Merge lp:~hopem/charms/precise/mysql/lp1251560 into lp:charms/mysql

Proposed by Edward Hope-Morley
Status: Merged
Merged at revision: 111
Proposed branch: lp:~hopem/charms/precise/mysql/lp1251560
Merge into: lp:charms/mysql
Diff against target: 145 lines (+73/-13)
4 files modified
config.yaml (+10/-0)
hooks/ha_relations.py (+3/-1)
hooks/lib/ceph_utils.py (+59/-11)
revision (+1/-1)
To merge this branch: bzr merge lp:~hopem/charms/precise/mysql/lp1251560
Reviewer Review Type Date Requested Status
Marco Ceppi (community) Approve
charmers Pending
James Page Pending
Review via email: mp+203687@code.launchpad.net

This proposal supersedes a proposal from 2013-12-09.

To post a comment you must log in.
Revision history for this message
Ante Karamatić (ivoks) wrote : Posted in a previous version of this proposal

FWIW; I've tested this charm and it works as intended.

Revision history for this message
James Page (james-page) wrote : Posted in a previous version of this proposal

This is fine with > argonaut; I recently landed some changes into charm-helpers to deal with <= argonaut:

https://code.launchpad.net/~james-page/charm-helpers/folsom-ceph-support/+merge/192557

review: Needs Fixing
Revision history for this message
Marco Ceppi (marcoceppi) wrote :

LGTM, +1

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'config.yaml'
--- config.yaml 2013-12-12 17:07:59 +0000
+++ config.yaml 2014-01-29 09:37:41 +0000
@@ -68,3 +68,13 @@
68 The name that will be used to create the Ceph's RBD image with. If the68 The name that will be used to create the Ceph's RBD image with. If the
69 image name exists in Ceph, it will be re-used and the data will be69 image name exists in Ceph, it will be re-used and the data will be
70 overwritten.70 overwritten.
71 ceph-osd-replication-count:
72 default: 2
73 type: int
74 description: |
75 This value dictates the number of replicas ceph must make of any
76 object it stores within the mysql rbd pool. Of course, this only
77 applies if using Ceph as a backend store. Note that once the mysql
78 rbd pool has been created, changing this value will not have any
79 effect (although it can be changed in ceph by manually configuring
80 your ceph cluster).
7181
=== modified file 'hooks/ha_relations.py'
--- hooks/ha_relations.py 2013-03-18 10:35:17 +0000
+++ hooks/ha_relations.py 2014-01-29 09:37:41 +0000
@@ -110,11 +110,13 @@
110 sizemb = int(utils.config_get('block-size')) * 1024110 sizemb = int(utils.config_get('block-size')) * 1024
111 rbd_img = utils.config_get('rbd-name')111 rbd_img = utils.config_get('rbd-name')
112 blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)112 blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
113 rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
113 ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,114 ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
114 rbd_img=rbd_img, sizemb=sizemb,115 rbd_img=rbd_img, sizemb=sizemb,
115 fstype='ext4', mount_point=DATA_SRC_DST,116 fstype='ext4', mount_point=DATA_SRC_DST,
116 blk_device=blk_device,117 blk_device=blk_device,
117 system_services=['mysql'])118 system_services=['mysql'],
119 rbd_pool_replicas=rbd_pool_rep_count)
118 else:120 else:
119 utils.juju_log('INFO',121 utils.juju_log('INFO',
120 'This is not the peer leader. Not configuring RBD.')122 'This is not the peer leader. Not configuring RBD.')
121123
=== modified file 'hooks/lib/ceph_utils.py'
--- hooks/lib/ceph_utils.py 2013-08-08 21:50:40 +0000
+++ hooks/lib/ceph_utils.py 2014-01-29 09:37:41 +0000
@@ -9,6 +9,7 @@
9#9#
1010
11import commands11import commands
12import json
12import subprocess13import subprocess
13import os14import os
14import shutil15import shutil
@@ -66,15 +67,61 @@
66 return name in out67 return name in out
6768
6869
69def create_pool(service, name):70def ceph_version():
70 cmd = [71 ''' Retrieve the local version of ceph '''
71 'rados',72 if os.path.exists('/usr/bin/ceph'):
72 '--id',73 cmd = ['ceph', '-v']
73 service,74 output = subprocess.check_output(cmd)
74 'mkpool',75 output = output.split()
75 name76 if len(output) > 3:
76 ]77 return output[2]
77 execute(cmd)78 else:
79 return None
80 else:
81 return None
82
83
84def get_osds(service):
85 '''
86 Return a list of all Ceph Object Storage Daemons
87 currently in the cluster
88 '''
89 version = ceph_version()
90 if version and version >= '0.56':
91 cmd = ['ceph', '--id', service, 'osd', 'ls', '--format=json']
92 return json.loads(subprocess.check_output(cmd))
93 else:
94 return None
95
96
97def create_pool(service, name, replicas=2):
98 ''' Create a new RADOS pool '''
99 if pool_exists(service, name):
100 utils.juju_log('WARNING',
101 "Ceph pool {} already exists, "
102 "skipping creation".format(name))
103 return
104
105 osds = get_osds(service)
106 if osds:
107 pgnum = (len(osds) * 100 / replicas)
108 else:
109 # NOTE(james-page): Default to 200 for older ceph versions
110 # which don't support OSD query from cli
111 pgnum = 200
112
113 cmd = [
114 'ceph', '--id', service,
115 'osd', 'pool', 'create',
116 name, str(pgnum)
117 ]
118 subprocess.check_call(cmd)
119 cmd = [
120 'ceph', '--id', service,
121 'osd', 'pool', 'set', name,
122 'size', str(replicas)
123 ]
124 subprocess.check_call(cmd)
78125
79126
80def keyfile_path(service):127def keyfile_path(service):
@@ -220,7 +267,8 @@
220267
221268
222def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,269def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
223 blk_device, fstype, system_services=[]):270 blk_device, fstype, system_services=[],
271 rbd_pool_replicas=2):
224 """272 """
225 To be called from the current cluster leader.273 To be called from the current cluster leader.
226 Ensures given pool and RBD image exists, is mapped to a block device,274 Ensures given pool and RBD image exists, is mapped to a block device,
@@ -235,7 +283,7 @@
235 # Ensure pool, RBD image, RBD mappings are in place.283 # Ensure pool, RBD image, RBD mappings are in place.
236 if not pool_exists(service, pool):284 if not pool_exists(service, pool):
237 utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)285 utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
238 create_pool(service, pool)286 create_pool(service, pool, replicas=rbd_pool_replicas)
239287
240 if not rbd_exists(service, pool, rbd_img):288 if not rbd_exists(service, pool, rbd_img):
241 utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)289 utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
242290
=== modified file 'revision'
--- revision 2013-08-08 21:50:40 +0000
+++ revision 2014-01-29 09:37:41 +0000
@@ -1,1 +1,1 @@
13091310

Subscribers

People subscribed via source and target branches