Merge lp:~james-page/charms/precise/ceph/dir-support into lp:~charmers/charms/precise/ceph/trunk

Proposed by James Page
Status: Merged
Approved by: Mark Mims
Approved revision: 72
Merge reported by: Mark Mims
Merged at revision: not available
Proposed branch: lp:~james-page/charms/precise/ceph/dir-support
Merge into: lp:~charmers/charms/precise/ceph/trunk
Diff against target: 264 lines (+102/-32)
5 files modified
README.md (+8/-7)
config.yaml (+3/-0)
hooks/ceph.py (+73/-17)
hooks/hooks.py (+17/-7)
revision (+1/-1)
To merge this branch: bzr merge lp:~james-page/charms/precise/ceph/dir-support
Reviewer Review Type Date Requested Status
Mark Mims (community) Approve
Review via email: mp+182607@code.launchpad.net

Description of the change

Add support for using directories to host OSD filesytems instead of
requiring block devices for every OSD.

This allows use with the Juju local provider.

To post a comment you must log in.
Revision history for this message
Mark Mims (mark-mims) wrote :

lgtm

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'README.md'
--- README.md 2013-07-17 18:11:40 +0000
+++ README.md 2013-08-28 11:43:37 +0000
@@ -27,15 +27,20 @@
27to do this will cause a reconfiguration error and new service units will not join27to do this will cause a reconfiguration error and new service units will not join
28the existing ceph cluster.28the existing ceph cluster.
2929
30The charm also supports the specification of storage devices to be used in the 30The charm also supports the specification of storage devices to be used in the
31ceph cluster.31ceph cluster.
3232
33 osd-devices:33 osd-devices:
34 A list of devices that the charm will attempt to detect, initialise and34 A list of devices that the charm will attempt to detect, initialise and
35 activate as ceph storage.35 activate as ceph storage.
3636
37 This this can be a superset of the actual storage devices presented to37 This can be a superset of the actual storage devices presented to each
38 each service unit and can be changed post ceph bootstrap using `juju set`.38 service unit and can be changed post ceph bootstrap using `juju set`.
39
40 The full path of each device must be provided, e.g. /dev/vdb.
41
42 For Ceph >= 0.56.6 (Raring or the Grizzly Cloud Archive) use of
43 directories instead of devices is also supported.
3944
40At a minimum you must provide a juju config file during initial deployment45At a minimum you must provide a juju config file during initial deployment
41with the fsid and monitor-secret options (contents of cepy.yaml below):46with the fsid and monitor-secret options (contents of cepy.yaml below):
@@ -66,10 +71,6 @@
66Technical Bootnotes71Technical Bootnotes
67===================72===================
6873
69This charm is currently deliberately inflexible and potentially destructive.
70It is designed to deploy on exactly three machines. Each machine will run mon
71and osd.
72
73This charm uses the new-style Ceph deployment as reverse-engineered from the74This charm uses the new-style Ceph deployment as reverse-engineered from the
74Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected75Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected
75a different strategy to form the monitor cluster. Since we don't know the76a different strategy to form the monitor cluster. Since we don't know the
7677
=== modified file 'config.yaml'
--- config.yaml 2013-06-10 13:48:52 +0000
+++ config.yaml 2013-08-28 11:43:37 +0000
@@ -39,6 +39,9 @@
39 .39 .
40 These devices are the range of devices that will be checked for and40 These devices are the range of devices that will be checked for and
41 used across all service units.41 used across all service units.
42 .
43 For ceph >= 0.56.6 these can also be directories instead of devices - the
44 charm assumes anything not starting with /dev is a directory instead.
42 osd-journal:45 osd-journal:
43 type: string46 type: string
44 description: |47 description: |
4548
=== modified file 'hooks/ceph.py'
--- hooks/ceph.py 2013-07-08 08:32:38 +0000
+++ hooks/ceph.py 2013-08-28 11:43:37 +0000
@@ -15,14 +15,17 @@
15from charmhelpers.core.host import (15from charmhelpers.core.host import (
16 mkdir,16 mkdir,
17 service_restart,17 service_restart,
18 log18)
19from charmhelpers.core.hookenv import (
20 log,
21 ERROR,
19)22)
20from charmhelpers.contrib.storage.linux.utils import (23from charmhelpers.contrib.storage.linux.utils import (
21 zap_disk,24 zap_disk,
22 is_block_device25 is_block_device,
23)26)
24from utils import (27from utils import (
25 get_unit_hostname28 get_unit_hostname,
26)29)
2730
28LEADER = 'leader'31LEADER = 'leader'
@@ -119,6 +122,16 @@
119 return False122 return False
120123
121124
125def start_osds(devices):
126 # Scan for ceph block devices
127 rescan_osd_devices()
128 if get_ceph_version() >= "0.56.6":
129 # Use ceph-disk-activate for directory based OSD's
130 for dev_or_path in devices:
131 if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path):
132 subprocess.check_call(['ceph-disk-activate', dev_or_path])
133
134
122def rescan_osd_devices():135def rescan_osd_devices():
123 cmd = [136 cmd = [
124 'udevadm', 'trigger',137 'udevadm', 'trigger',
@@ -161,9 +174,38 @@
161 ]174 ]
162}175}
163176
177_osd_bootstrap_caps_profile = {
178 'mon': [
179 'allow profile bootstrap-osd'
180 ]
181}
182
183
184def parse_key(raw_key):
185 # get-or-create appears to have different output depending
186 # on whether its 'get' or 'create'
187 # 'create' just returns the key, 'get' is more verbose and
188 # needs parsing
189 key = None
190 if len(raw_key.splitlines()) == 1:
191 key = raw_key
192 else:
193 for element in raw_key.splitlines():
194 if 'key' in element:
195 key = element.split(' = ')[1].strip() # IGNORE:E1103
196 return key
197
164198
165def get_osd_bootstrap_key():199def get_osd_bootstrap_key():
166 return get_named_key('bootstrap-osd', _osd_bootstrap_caps)200 try:
201 # Attempt to get/create a key using the OSD bootstrap profile first
202 key = get_named_key('bootstrap-osd',
203 _osd_bootstrap_caps_profile)
204 except:
205 # If that fails try with the older style permissions
206 key = get_named_key('bootstrap-osd',
207 _osd_bootstrap_caps)
208 return key
167209
168210
169_radosgw_keyring = "/etc/ceph/keyring.rados.gateway"211_radosgw_keyring = "/etc/ceph/keyring.rados.gateway"
@@ -214,19 +256,7 @@
214 subsystem,256 subsystem,
215 '; '.join(subcaps),257 '; '.join(subcaps),
216 ])258 ])
217 output = subprocess.check_output(cmd).strip() # IGNORE:E1103259 return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103
218 # get-or-create appears to have different output depending
219 # on whether its 'get' or 'create'
220 # 'create' just returns the key, 'get' is more verbose and
221 # needs parsing
222 key = None
223 if len(output.splitlines()) == 1:
224 key = output
225 else:
226 for element in output.splitlines():
227 if 'key' in element:
228 key = element.split(' = ')[1].strip() # IGNORE:E1103
229 return key
230260
231261
232def bootstrap_monitor_cluster(secret):262def bootstrap_monitor_cluster(secret):
@@ -291,6 +321,13 @@
291321
292322
293def osdize(dev, osd_format, osd_journal, reformat_osd=False):323def osdize(dev, osd_format, osd_journal, reformat_osd=False):
324 if dev.startswith('/dev'):
325 osdize_dev(dev, osd_format, osd_journal, reformat_osd)
326 else:
327 osdize_dir(dev)
328
329
330def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False):
294 if not os.path.exists(dev):331 if not os.path.exists(dev):
295 log('Path {} does not exist - bailing'.format(dev))332 log('Path {} does not exist - bailing'.format(dev))
296 return333 return
@@ -327,6 +364,25 @@
327 subprocess.check_call(cmd)364 subprocess.check_call(cmd)
328365
329366
367def osdize_dir(path):
368 if os.path.exists(os.path.join(path, 'upstart')):
369 log('Path {} is already configured as an OSD - bailing'.format(path))
370 return
371
372 if get_ceph_version() < "0.56.6":
373 log('Unable to use directories for OSDs with ceph < 0.56.6',
374 level=ERROR)
375 raise
376
377 mkdir(path)
378 cmd = [
379 'ceph-disk-prepare',
380 '--data-dir',
381 path
382 ]
383 subprocess.check_call(cmd)
384
385
330def device_mounted(dev):386def device_mounted(dev):
331 return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0387 return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0
332388
333389
=== modified file 'hooks/hooks.py'
--- hooks/hooks.py 2013-06-25 11:03:02 +0000
+++ hooks/hooks.py 2013-08-28 11:43:37 +0000
@@ -102,17 +102,16 @@
102 with open(JOURNAL_ZAPPED, 'w') as zapped:102 with open(JOURNAL_ZAPPED, 'w') as zapped:
103 zapped.write('DONE')103 zapped.write('DONE')
104104
105 for dev in config('osd-devices').split(' '):
106 ceph.osdize(dev, config('osd-format'), config('osd-journal'),
107 reformat_osd())
108
109 # Support use of single node ceph105 # Support use of single node ceph
110 if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1):106 if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1):
111 ceph.bootstrap_monitor_cluster(config('monitor-secret'))107 ceph.bootstrap_monitor_cluster(config('monitor-secret'))
112 ceph.wait_for_bootstrap()108 ceph.wait_for_bootstrap()
113109
114 if ceph.is_bootstrapped():110 if ceph.is_bootstrapped():
115 ceph.rescan_osd_devices()111 for dev in get_devices():
112 ceph.osdize(dev, config('osd-format'), config('osd-journal'),
113 reformat_osd())
114 ceph.start_osds(get_devices())
116115
117 log('End config-changed hook.')116 log('End config-changed hook.')
118117
@@ -139,6 +138,13 @@
139 return False138 return False
140139
141140
141def get_devices():
142 if config('osd-devices'):
143 return config('osd-devices').split(' ')
144 else:
145 return []
146
147
142@hooks.hook('mon-relation-departed',148@hooks.hook('mon-relation-departed',
143 'mon-relation-joined')149 'mon-relation-joined')
144def mon_relation():150def mon_relation():
@@ -149,7 +155,10 @@
149 if len(get_mon_hosts()) >= moncount:155 if len(get_mon_hosts()) >= moncount:
150 ceph.bootstrap_monitor_cluster(config('monitor-secret'))156 ceph.bootstrap_monitor_cluster(config('monitor-secret'))
151 ceph.wait_for_bootstrap()157 ceph.wait_for_bootstrap()
152 ceph.rescan_osd_devices()158 for dev in get_devices():
159 ceph.osdize(dev, config('osd-format'), config('osd-journal'),
160 reformat_osd())
161 ceph.start_osds(get_devices())
153 notify_osds()162 notify_osds()
154 notify_radosgws()163 notify_radosgws()
155 notify_client()164 notify_client()
@@ -258,7 +267,8 @@
258 # In case we're being redeployed to the same machines, try267 # In case we're being redeployed to the same machines, try
259 # to make sure everything is running as soon as possible.268 # to make sure everything is running as soon as possible.
260 service_restart('ceph-mon-all')269 service_restart('ceph-mon-all')
261 ceph.rescan_osd_devices()270 if ceph.is_bootstrapped():
271 ceph.start_osds(get_devices())
262272
263273
264if __name__ == '__main__':274if __name__ == '__main__':
265275
=== modified file 'revision'
--- revision 2013-06-20 23:58:01 +0000
+++ revision 2013-08-28 11:43:37 +0000
@@ -1,1 +1,1 @@
1921100

Subscribers

People subscribed via source and target branches

to all changes: