Merge lp:~hopem/charms/precise/mysql/lp1251560 into lp:charms/mysql

Proposed by Edward Hope-Morley
Status: Merged
Merged at revision: 111
Proposed branch: lp:~hopem/charms/precise/mysql/lp1251560
Merge into: lp:charms/mysql
Diff against target: 145 lines (+73/-13)
4 files modified
config.yaml (+10/-0)
hooks/ha_relations.py (+3/-1)
hooks/lib/ceph_utils.py (+59/-11)
revision (+1/-1)
To merge this branch: bzr merge lp:~hopem/charms/precise/mysql/lp1251560
Reviewer Review Type Date Requested Status
Marco Ceppi (community) Approve
charmers Pending
James Page Pending
Review via email: mp+203687@code.launchpad.net

This proposal supersedes a proposal from 2013-12-09.

To post a comment you must log in.
Revision history for this message
Ante Karamatić (ivoks) wrote : Posted in a previous version of this proposal

FWIW; I've tested this charm and it works as intended.

Revision history for this message
James Page (james-page) wrote : Posted in a previous version of this proposal

This is fine with > argonaut; I recently landed some changes into charm-helpers to deal with <= argonaut:

https://code.launchpad.net/~james-page/charm-helpers/folsom-ceph-support/+merge/192557

review: Needs Fixing
Revision history for this message
Marco Ceppi (marcoceppi) wrote :

LGTM, +1

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'config.yaml'
2--- config.yaml 2013-12-12 17:07:59 +0000
3+++ config.yaml 2014-01-29 09:37:41 +0000
4@@ -68,3 +68,13 @@
5 The name that will be used to create the Ceph's RBD image with. If the
6 image name exists in Ceph, it will be re-used and the data will be
7 overwritten.
8+ ceph-osd-replication-count:
9+ default: 2
10+ type: int
11+ description: |
12+ This value dictates the number of replicas ceph must make of any
13+ object it stores within the mysql rbd pool. Of course, this only
14+ applies if using Ceph as a backend store. Note that once the mysql
15+ rbd pool has been created, changing this value will not have any
16+ effect (although it can be changed in ceph by manually configuring
17+ your ceph cluster).
18
19=== modified file 'hooks/ha_relations.py'
20--- hooks/ha_relations.py 2013-03-18 10:35:17 +0000
21+++ hooks/ha_relations.py 2014-01-29 09:37:41 +0000
22@@ -110,11 +110,13 @@
23 sizemb = int(utils.config_get('block-size')) * 1024
24 rbd_img = utils.config_get('rbd-name')
25 blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
26+ rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
27 ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
28 rbd_img=rbd_img, sizemb=sizemb,
29 fstype='ext4', mount_point=DATA_SRC_DST,
30 blk_device=blk_device,
31- system_services=['mysql'])
32+ system_services=['mysql'],
33+ rbd_pool_replicas=rbd_pool_rep_count)
34 else:
35 utils.juju_log('INFO',
36 'This is not the peer leader. Not configuring RBD.')
37
38=== modified file 'hooks/lib/ceph_utils.py'
39--- hooks/lib/ceph_utils.py 2013-08-08 21:50:40 +0000
40+++ hooks/lib/ceph_utils.py 2014-01-29 09:37:41 +0000
41@@ -9,6 +9,7 @@
42 #
43
44 import commands
45+import json
46 import subprocess
47 import os
48 import shutil
49@@ -66,15 +67,61 @@
50 return name in out
51
52
53-def create_pool(service, name):
54- cmd = [
55- 'rados',
56- '--id',
57- service,
58- 'mkpool',
59- name
60- ]
61- execute(cmd)
62+def ceph_version():
63+ ''' Retrieve the local version of ceph '''
64+ if os.path.exists('/usr/bin/ceph'):
65+ cmd = ['ceph', '-v']
66+ output = subprocess.check_output(cmd)
67+ output = output.split()
68+ if len(output) > 3:
69+ return output[2]
70+ else:
71+ return None
72+ else:
73+ return None
74+
75+
76+def get_osds(service):
77+ '''
78+ Return a list of all Ceph Object Storage Daemons
79+ currently in the cluster
80+ '''
81+ version = ceph_version()
82+ if version and version >= '0.56':
83+ cmd = ['ceph', '--id', service, 'osd', 'ls', '--format=json']
84+ return json.loads(subprocess.check_output(cmd))
85+ else:
86+ return None
87+
88+
89+def create_pool(service, name, replicas=2):
90+ ''' Create a new RADOS pool '''
91+ if pool_exists(service, name):
92+ utils.juju_log('WARNING',
93+ "Ceph pool {} already exists, "
94+ "skipping creation".format(name))
95+ return
96+
97+ osds = get_osds(service)
98+ if osds:
99+ pgnum = (len(osds) * 100 / replicas)
100+ else:
101+ # NOTE(james-page): Default to 200 for older ceph versions
102+ # which don't support OSD query from cli
103+ pgnum = 200
104+
105+ cmd = [
106+ 'ceph', '--id', service,
107+ 'osd', 'pool', 'create',
108+ name, str(pgnum)
109+ ]
110+ subprocess.check_call(cmd)
111+ cmd = [
112+ 'ceph', '--id', service,
113+ 'osd', 'pool', 'set', name,
114+ 'size', str(replicas)
115+ ]
116+ subprocess.check_call(cmd)
117
118
119 def keyfile_path(service):
120@@ -220,7 +267,8 @@
121
122
123 def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
124- blk_device, fstype, system_services=[]):
125+ blk_device, fstype, system_services=[],
126+ rbd_pool_replicas=2):
127 """
128 To be called from the current cluster leader.
129 Ensures given pool and RBD image exists, is mapped to a block device,
130@@ -235,7 +283,7 @@
131 # Ensure pool, RBD image, RBD mappings are in place.
132 if not pool_exists(service, pool):
133 utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
134- create_pool(service, pool)
135+ create_pool(service, pool, replicas=rbd_pool_replicas)
136
137 if not rbd_exists(service, pool, rbd_img):
138 utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
139
140=== modified file 'revision'
141--- revision 2013-08-08 21:50:40 +0000
142+++ revision 2014-01-29 09:37:41 +0000
143@@ -1,1 +1,1 @@
144-309
145+310

Subscribers

People subscribed via source and target branches