Merge lp:~gandelman-a/charms/precise/mysql/ceph_refactor into lp:~openstack-charmers/charms/precise/mysql/ha-support

Proposed by Adam Gandelman
Status: Merged
Merged at revision: 102
Proposed branch: lp:~gandelman-a/charms/precise/mysql/ceph_refactor
Merge into: lp:~openstack-charmers/charms/precise/mysql/ha-support
Diff against target: 446 lines (+173/-150)
3 files modified
hooks/ceph.py (+144/-12)
hooks/ha-relations (+28/-137)
revision (+1/-1)
To merge this branch: bzr merge lp:~gandelman-a/charms/precise/mysql/ceph_refactor
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+151344@code.launchpad.net

Description of the change

Refactored ceph work to fit nicely into a ceph.py which can be easily reused across charms that need a ceph backed block device.

Charms can now specify the pool to be created for RBDs, mysql creates a pool that matches the service name.

Various other fixes here and there.

To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/ceph.py'
2--- hooks/ceph.py 2013-01-28 17:59:02 +0000
3+++ hooks/ceph.py 2013-03-02 03:51:18 +0000
4@@ -6,6 +6,14 @@
5 import os
6 import shutil
7
8+KEYRING = '/etc/ceph/ceph.client.%s.keyring'
9+KEYFILE = '/etc/ceph/ceph.client.%s.key'
10+
11+CEPH_CONF = """[global]
12+ auth supported = %(auth)s
13+ keyring = %(keyring)s
14+ mon host = %(mon_hosts)s
15+"""
16
17 def execute(cmd):
18 subprocess.check_call(cmd)
19@@ -15,33 +23,59 @@
20 subprocess.check_call(cmd, shell=True)
21
22
23-def create_image(service, image, sizemb):
24+def install():
25+ ceph_dir = "/etc/ceph"
26+ if not os.path.isdir(ceph_dir):
27+ os.mkdir(ceph_dir)
28+ utils.install('ceph-common')
29+
30+
31+def rbd_exists(service, pool, rbd_img):
32+ (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %\
33+ (service, pool))
34+ return rbd_img in out
35+
36+
37+def create_rbd_image(service, pool, image, sizemb):
38 cmd = [
39 'rbd',
40 'create',
41 image,
42 '--size',
43- sizemb,
44+ str(sizemb),
45 '--id',
46 service,
47 '--pool',
48- 'images'
49+ pool
50 ]
51 execute(cmd)
52
53
54-def create_image_pool(service):
55+def pool_exists(service, name):
56+ (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
57+ return name in out
58+
59+def create_pool(service, name):
60 cmd = [
61 'rados',
62 '--id',
63 service,
64 'mkpool',
65- 'images'
66+ name
67 ]
68 execute(cmd)
69
70
71-def create_keyring(service, keyring, key):
72+def keyfile_path(service):
73+ return KEYFILE % service
74+
75+def keyring_path(service):
76+ return KEYRING % service
77+
78+def create_keyring(service, key):
79+ keyring = keyring_path(service)
80+ if os.path.exists(keyring):
81+ utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring)
82 cmd = [
83 'ceph-authtool',
84 keyring,
85@@ -50,22 +84,64 @@
86 '--add-key=%s' % key
87 ]
88 execute(cmd)
89-
90-
91-def map_block_storage(service, image, keyfile):
92+ utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring)
93+
94+
95+def create_key_file(service, key):
96+ # create a file containing the key
97+ keyfile = keyfile_path(service)
98+ if os.path.exists(keyfile):
99+ utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile)
100+ fd = open(keyfile, 'w')
101+ fd.write(key)
102+ fd.close()
103+ utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile)
104+
105+
106+def get_ceph_nodes():
107+ hosts = []
108+ for r_id in utils.relation_ids('ceph'):
109+ for unit in utils.relation_list(r_id):
110+ hosts.append(utils.relation_get('private-address',
111+ unit=unit, rid=r_id))
112+ return hosts
113+
114+
115+def configure(service, key, auth):
116+ create_keyring(service, key)
117+ create_key_file(service, key)
118+ hosts = get_ceph_nodes()
119+ mon_hosts = ",".join(map(str, hosts))
120+ keyring = keyring_path(service)
121+ with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
122+ ceph_conf.write(CEPH_CONF % locals())
123+ modprobe_kernel_module('rbd')
124+
125+
126+def image_mapped(image_name):
127+ (rc, out) = commands.getstatusoutput('rbd showmapped')
128+ return image_name in out
129+
130+def map_block_storage(service, pool, image):
131 cmd = [
132 'rbd',
133 'map',
134- 'images/%s' % image,
135+ '%s/%s' % (pool, image),
136 '--user',
137 service,
138 '--secret',
139- keyfile,
140+ keyfile_path(service),
141 ]
142 execute(cmd)
143
144
145-def make_filesystem(service, blk_device, fstype='ext4'):
146+def filesystem_mounted(fs):
147+ return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
148+
149+def make_filesystem(blk_device, fstype='ext4'):
150+ utils.juju_log('INFO',
151+ 'ceph: Formatting block device %s as filesystem %s.' %\
152+ (blk_device, fstype))
153 cmd = ['mkfs', '-t', fstype, blk_device]
154 execute(cmd)
155
156@@ -85,13 +161,21 @@
157 cmd = ['umount', '/mnt']
158 execute(cmd)
159
160+ _dir = os.stat(data_src_dst)
161+ uid = _dir.st_uid
162+ gid = _dir.st_gid
163+
164 # re-mount where the data should originally be
165 cmd = ['mount', '-t', fstype, blk_device, data_src_dst]
166 execute(cmd)
167
168+ # ensure original ownership of new mount.
169+ cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
170+ execute(cmd)
171
172 # TODO: re-use
173 def modprobe_kernel_module(module):
174+ utils.juju_log('INFO','Loading kernel module')
175 cmd = ['modprobe', module]
176 execute(cmd)
177 cmd = 'echo %s >> /etc/modules' % module
178@@ -106,3 +190,51 @@
179 shutil.copytree(s, d, symlinks, ignore)
180 else:
181 shutil.copy2(s, d)
182+
183+def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
184+ blk_device, fstype, system_services=[]):
185+ """
186+ To be called from the current cluster leader.
187+ Ensures given pool and RBD image exists, is mapped to a block device,
188+ and the device is formatted and mounted at the given mount_point.
189+
190+ If formatting a device for the first time, data existing at mount_point
191+ will be migrated to the RBD device before being remounted.
192+
193+ All services listed in system_services will be stopped prior to data
194+ migration and restarted when complete.
195+ """
196+ # Ensure pool, RBD image, RBD mappings are in place.
197+ if not pool_exists(service, pool):
198+ utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
199+ create_pool(service, pool)
200+
201+ if not rbd_exists(service, pool, rbd_img):
202+ utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
203+ create_rbd_image(service, pool, rbd_img, sizemb)
204+
205+ if not image_mapped(rbd_img):
206+ utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
207+ map_block_storage(service, pool, rbd_img)
208+
209+ # make file system
210+ # TODO: What happens if for whatever reason this is run again and
211+ # the data is already in the rbd device and/or is mounted??
212+ # When it is mounted already, it will fail to make the fs
213+ # XXX: This is really sketchy! Need to at least add an fstab entry
214+ # otherwise this hook will blow away existing data if its executed
215+ # after a reboot.
216+ if not filesystem_mounted(mount_point):
217+ make_filesystem(blk_device, fstype)
218+
219+ for svc in system_services:
220+ if utils.running(svc):
221+ utils.juju_log('INFO',
222+ 'Stopping services %s prior to migrating '\
223+ 'data' % svc)
224+ utils.stop(svc)
225+
226+ place_data_on_ceph(service, blk_device, mount_point, fstype)
227+
228+ for svc in system_services:
229+ utils.start(svc)
230
231=== modified file 'hooks/ha-relations'
232--- hooks/ha-relations 2013-02-19 23:11:24 +0000
233+++ hooks/ha-relations 2013-03-02 03:51:18 +0000
234@@ -14,10 +14,9 @@
235
236 # CEPH
237 DATA_SRC_DST = '/var/lib/mysql'
238-SERVICE_NAME = utils.get_unit_name().replace('-','/').split('/')[0]
239-KEYRING = "/etc/ceph/ceph.client.%s.keyring" % SERVICE_NAME
240-KEYFILE = "/etc/ceph/ceph.client.%s.key" % SERVICE_NAME
241
242+SERVICE_NAME = os.getenv('JUJU_UNIT_NAME').split('/')[0]
243+POOL_NAME = SERVICE_NAME
244
245 config=json.loads(subprocess.check_output(['config-get','--format=json']))
246
247@@ -69,10 +68,10 @@
248 }
249
250 resource_params = {
251- 'res_mysql_rbd':'params name="%s" pool="images" user="%s" secret="%s"' % (
252- config['rbd-name'], SERVICE_NAME, KEYFILE),
253- 'res_mysql_fs':'params device="/dev/rbd/images/%s" directory="%s" fstype="ext4" op start start-delay="10s"' % (
254- config['rbd-name'], DATA_SRC_DST),
255+ 'res_mysql_rbd':'params name="%s" pool="%s" user="%s" secret="%s"' % (
256+ config['rbd-name'], POOL_NAME, SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)),
257+ 'res_mysql_fs':'params device="/dev/rbd/%s/%s" directory="%s" fstype="ext4" op start start-delay="10s"' % (
258+ POOL_NAME, config['rbd-name'], DATA_SRC_DST),
259 'res_mysql_vip':'params ip="%s" cidr_netmask="%s" nic="%s"' % (config['vip'],
260 config['vip_cidr'], config['vip_iface']),
261 'res_mysqld':'op start start-delay="5s" op monitor interval="5s"',
262@@ -107,12 +106,7 @@
263
264 def ceph_joined():
265 utils.juju_log('INFO', 'Start Ceph Relation Joined')
266-
267- ceph_dir = "/etc/ceph"
268- if not os.path.isdir(ceph_dir):
269- os.mkdir(ceph_dir)
270- utils.install('ceph-common')
271-
272+ ceph.install()
273 utils.juju_log('INFO', 'Finish Ceph Relation Joined')
274
275
276@@ -121,42 +115,32 @@
277
278 # TODO: ask james: What happens if the relation data has changed?
279 # do we reconfigure ceph? What do we do with the data?
280+ auth = utils.relation_get('auth')
281 key = utils.relation_get('key')
282-
283- if key:
284- # create KEYRING file
285- if not os.path.exists(KEYRING):
286- ceph.create_keyring(SERVICE_NAME, KEYRING, key)
287- # create a file containing the key
288- if not os.path.exists(KEYFILE):
289- fd = open(KEYFILE, 'w')
290- fd.write(key)
291- fd.close()
292- else:
293+ if None in [auth, key]:
294+ utils.juju_log('INFO', 'Missing key or auth in relation')
295 sys.exit(0)
296
297- # emit ceph config
298- hosts = get_ceph_nodes()
299- mon_hosts = ",".join(map(str, hosts))
300- conf_context = {
301- 'auth': utils.relation_get('auth'),
302- 'keyring': KEYRING,
303- 'mon_hosts': mon_hosts,
304- }
305- with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
306- ceph_conf.write(utils.render_template('ceph.conf',
307- conf_context))
308-
309- # Create the images pool if it does not already exist
310+
311+ ceph.configure(service=SERVICE_NAME, key=key, auth=auth)
312+
313 if utils.eligible_leader():
314- (status, output) = commands.getstatusoutput("rados --id %s lspools" % SERVICE_NAME)
315- pools = "images" in output
316- if not pools:
317- utils.juju_log('INFO','Creating image pool')
318- ceph.create_image_pool(SERVICE_NAME)
319+ sizemb = int(config['block-size'].split('G')[0]) * 1024
320+ rbd_img = config['rbd-name']
321+ blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
322+ ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
323+ rbd_img=rbd_img, sizemb=sizemb,
324+ fstype='ext4', mount_point=DATA_SRC_DST,
325+ blk_device=blk_device,
326+ system_services=['mysql'])
327+ else:
328+ utils.juju_log('INFO',
329+ 'This is not the peer leader. Not configuring RBD.')
330+ # Stopping MySQL
331+ if utils.running('mysql'):
332+ utils.juju_log('INFO','Stopping MySQL...')
333+ utils.stop('mysql')
334
335- # Configure ceph()
336- configure_ceph()
337
338 # If 'ha' relation has been made before the 'ceph' relation
339 # it is important to make sure the ha-relation data is being
340@@ -173,99 +157,6 @@
341 utils.juju_log('INFO', 'Finish Ceph Relation Changed')
342
343
344-def configure_ceph():
345- utils.juju_log('INFO', 'Start Ceph Configuration')
346-
347- block_sizemb = int(config['block-size'].split('G')[0]) * 1024
348- image_name = config['rbd-name']
349- fstype = 'ext4'
350- data_src = DATA_SRC_DST
351- blk_device = '/dev/rbd/images/%s' % image_name
352-
353- # modprobe the kernel module
354- utils.juju_log('INFO','Loading kernel module')
355- ceph.modprobe_kernel_module('rbd')
356-
357-
358- # configure mysql for ceph storage options
359- if not utils.eligible_leader():
360- utils.juju_log('INFO','This is not the peer leader. Not configuring RBD.')
361- # Stopping MySQL
362- if utils.running('mysql'):
363- utils.juju_log('INFO','Stopping MySQL...')
364- utils.stop('mysql')
365- return
366-
367- elif utils.eligible_leader():
368- # create an image/block device
369- (status, output) = commands.getstatusoutput('rbd list --id %s --pool images' % SERVICE_NAME)
370- rbd = image_name in output
371- if not rbd:
372- utils.juju_log('INFO', 'Creating RBD Image...')
373- ceph.create_image(SERVICE_NAME, image_name, str(block_sizemb))
374- else:
375- utils.juju_log('INFO',
376- 'Looks like RBD already exists. Not creating a new one.')
377-
378- # map the image to a block device if not already mapped.
379- (status, output) = commands.getstatusoutput('rbd showmapped')
380- mapped = image_name in output
381- if not mapped:
382- # map block storage
383- utils.juju_log('INFO', 'Mapping RBD Image as a Block Device')
384- ceph.map_block_storage(SERVICE_NAME, image_name, KEYFILE)
385- else:
386- utils.juju_log('INFO',
387- 'Looks like RBD is already mapped. Not re-mapping.')
388-
389- # make file system
390- # TODO: What happens if for whatever reason this is run again and
391- # the data is already in the rbd device and/or is mounted??
392- # When it is mounted already, it will fail to make the fs
393- utils.juju_log('INFO', 'Trying to move data over to RBD.')
394- if not filesystem_mounted(data_src):
395- utils.juju_log('INFO', 'Formating RBD.')
396- ceph.make_filesystem(SERVICE_NAME, blk_device, fstype)
397-
398- # Stopping MySQL
399- if utils.running('mysql'):
400- utils.juju_log('INFO','Stopping MySQL before moving data to RBD.')
401- utils.stop('mysql')
402-
403- # mount block device to temporary location and copy the data
404- utils.juju_log('INFO', 'Copying MySQL data to RBD.')
405- ceph.place_data_on_ceph(SERVICE_NAME, blk_device, data_src, fstype)
406-
407- # Make files be owned by mysql user/pass
408- cmd = ['chown', '-R', 'mysql:mysql', data_src]
409- subprocess.check_call(cmd)
410- else:
411- utils.juju_log('INFO',
412- 'Looks like data is already on the RBD, skipping...')
413-
414- if not utils.running('mysql'):
415- utils.start('mysql')
416-
417- else:
418- return
419-
420- utils.juju_log('INFO', 'Finish Ceph Configuration')
421-
422-
423-def filesystem_mounted(fs):
424- return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
425-
426-
427-def get_ceph_nodes():
428- hosts = []
429- for r_id in utils.relation_ids('ceph'):
430- for unit in utils.relation_list(r_id):
431- #hosts.append(utils.relation_get_dict(relation_id=r_id,
432- # remote_unit=unit)['private-address'])
433- hosts.append(utils.relation_get('private-address', unit=unit, rid=r_id))
434- return hosts
435-
436-
437 def cluster_changed():
438 utils.juju_log('INFO', 'Begin cluster changed hook.')
439
440
441=== modified file 'revision'
442--- revision 2013-02-20 15:55:13 +0000
443+++ revision 2013-03-02 03:51:18 +0000
444@@ -1,1 +1,1 @@
445-293
446+305

Subscribers

People subscribed via source and target branches