Merge lp:~james-page/charms/precise/ceph/dir-support into lp:~charmers/charms/precise/ceph/trunk

Proposed by James Page
Status: Merged
Approved by: Mark Mims
Approved revision: 72
Merge reported by: Mark Mims
Merged at revision: not available
Proposed branch: lp:~james-page/charms/precise/ceph/dir-support
Merge into: lp:~charmers/charms/precise/ceph/trunk
Diff against target: 264 lines (+102/-32)
5 files modified
README.md (+8/-7)
config.yaml (+3/-0)
hooks/ceph.py (+73/-17)
hooks/hooks.py (+17/-7)
revision (+1/-1)
To merge this branch: bzr merge lp:~james-page/charms/precise/ceph/dir-support
Reviewer Review Type Date Requested Status
Mark Mims (community) Approve
Review via email: mp+182607@code.launchpad.net

Description of the change

Add support for using directories to host OSD filesytems instead of
requiring block devices for every OSD.

This allows use with the Juju local provider.

To post a comment you must log in.
Revision history for this message
Mark Mims (mark-mims) wrote :

lgtm

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'README.md'
2--- README.md 2013-07-17 18:11:40 +0000
3+++ README.md 2013-08-28 11:43:37 +0000
4@@ -27,15 +27,20 @@
5 to do this will cause a reconfiguration error and new service units will not join
6 the existing ceph cluster.
7
8-The charm also supports the specification of storage devices to be used in the
9+The charm also supports the specification of storage devices to be used in the
10 ceph cluster.
11
12 osd-devices:
13 A list of devices that the charm will attempt to detect, initialise and
14 activate as ceph storage.
15
16- This this can be a superset of the actual storage devices presented to
17- each service unit and can be changed post ceph bootstrap using `juju set`.
18+ This can be a superset of the actual storage devices presented to each
19+ service unit and can be changed post ceph bootstrap using `juju set`.
20+
21+ The full path of each device must be provided, e.g. /dev/vdb.
22+
23+ For Ceph >= 0.56.6 (Raring or the Grizzly Cloud Archive) use of
24+ directories instead of devices is also supported.
25
26 At a minimum you must provide a juju config file during initial deployment
27 with the fsid and monitor-secret options (contents of cepy.yaml below):
28@@ -66,10 +71,6 @@
29 Technical Bootnotes
30 ===================
31
32-This charm is currently deliberately inflexible and potentially destructive.
33-It is designed to deploy on exactly three machines. Each machine will run mon
34-and osd.
35-
36 This charm uses the new-style Ceph deployment as reverse-engineered from the
37 Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected
38 a different strategy to form the monitor cluster. Since we don't know the
39
40=== modified file 'config.yaml'
41--- config.yaml 2013-06-10 13:48:52 +0000
42+++ config.yaml 2013-08-28 11:43:37 +0000
43@@ -39,6 +39,9 @@
44 .
45 These devices are the range of devices that will be checked for and
46 used across all service units.
47+ .
48+ For ceph >= 0.56.6 these can also be directories instead of devices - the
49+ charm assumes anything not starting with /dev is a directory instead.
50 osd-journal:
51 type: string
52 description: |
53
54=== modified file 'hooks/ceph.py'
55--- hooks/ceph.py 2013-07-08 08:32:38 +0000
56+++ hooks/ceph.py 2013-08-28 11:43:37 +0000
57@@ -15,14 +15,17 @@
58 from charmhelpers.core.host import (
59 mkdir,
60 service_restart,
61- log
62+)
63+from charmhelpers.core.hookenv import (
64+ log,
65+ ERROR,
66 )
67 from charmhelpers.contrib.storage.linux.utils import (
68 zap_disk,
69- is_block_device
70+ is_block_device,
71 )
72 from utils import (
73- get_unit_hostname
74+ get_unit_hostname,
75 )
76
77 LEADER = 'leader'
78@@ -119,6 +122,16 @@
79 return False
80
81
82+def start_osds(devices):
83+ # Scan for ceph block devices
84+ rescan_osd_devices()
85+ if get_ceph_version() >= "0.56.6":
86+ # Use ceph-disk-activate for directory based OSD's
87+ for dev_or_path in devices:
88+ if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path):
89+ subprocess.check_call(['ceph-disk-activate', dev_or_path])
90+
91+
92 def rescan_osd_devices():
93 cmd = [
94 'udevadm', 'trigger',
95@@ -161,9 +174,38 @@
96 ]
97 }
98
99+_osd_bootstrap_caps_profile = {
100+ 'mon': [
101+ 'allow profile bootstrap-osd'
102+ ]
103+}
104+
105+
106+def parse_key(raw_key):
107+ # get-or-create appears to have different output depending
108+ # on whether its 'get' or 'create'
109+ # 'create' just returns the key, 'get' is more verbose and
110+ # needs parsing
111+ key = None
112+ if len(raw_key.splitlines()) == 1:
113+ key = raw_key
114+ else:
115+ for element in raw_key.splitlines():
116+ if 'key' in element:
117+ key = element.split(' = ')[1].strip() # IGNORE:E1103
118+ return key
119+
120
121 def get_osd_bootstrap_key():
122- return get_named_key('bootstrap-osd', _osd_bootstrap_caps)
123+ try:
124+ # Attempt to get/create a key using the OSD bootstrap profile first
125+ key = get_named_key('bootstrap-osd',
126+ _osd_bootstrap_caps_profile)
127+ except:
128+ # If that fails try with the older style permissions
129+ key = get_named_key('bootstrap-osd',
130+ _osd_bootstrap_caps)
131+ return key
132
133
134 _radosgw_keyring = "/etc/ceph/keyring.rados.gateway"
135@@ -214,19 +256,7 @@
136 subsystem,
137 '; '.join(subcaps),
138 ])
139- output = subprocess.check_output(cmd).strip() # IGNORE:E1103
140- # get-or-create appears to have different output depending
141- # on whether its 'get' or 'create'
142- # 'create' just returns the key, 'get' is more verbose and
143- # needs parsing
144- key = None
145- if len(output.splitlines()) == 1:
146- key = output
147- else:
148- for element in output.splitlines():
149- if 'key' in element:
150- key = element.split(' = ')[1].strip() # IGNORE:E1103
151- return key
152+ return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103
153
154
155 def bootstrap_monitor_cluster(secret):
156@@ -291,6 +321,13 @@
157
158
159 def osdize(dev, osd_format, osd_journal, reformat_osd=False):
160+ if dev.startswith('/dev'):
161+ osdize_dev(dev, osd_format, osd_journal, reformat_osd)
162+ else:
163+ osdize_dir(dev)
164+
165+
166+def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False):
167 if not os.path.exists(dev):
168 log('Path {} does not exist - bailing'.format(dev))
169 return
170@@ -327,6 +364,25 @@
171 subprocess.check_call(cmd)
172
173
174+def osdize_dir(path):
175+ if os.path.exists(os.path.join(path, 'upstart')):
176+ log('Path {} is already configured as an OSD - bailing'.format(path))
177+ return
178+
179+ if get_ceph_version() < "0.56.6":
180+ log('Unable to use directories for OSDs with ceph < 0.56.6',
181+ level=ERROR)
182+ raise
183+
184+ mkdir(path)
185+ cmd = [
186+ 'ceph-disk-prepare',
187+ '--data-dir',
188+ path
189+ ]
190+ subprocess.check_call(cmd)
191+
192+
193 def device_mounted(dev):
194 return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0
195
196
197=== modified file 'hooks/hooks.py'
198--- hooks/hooks.py 2013-06-25 11:03:02 +0000
199+++ hooks/hooks.py 2013-08-28 11:43:37 +0000
200@@ -102,17 +102,16 @@
201 with open(JOURNAL_ZAPPED, 'w') as zapped:
202 zapped.write('DONE')
203
204- for dev in config('osd-devices').split(' '):
205- ceph.osdize(dev, config('osd-format'), config('osd-journal'),
206- reformat_osd())
207-
208 # Support use of single node ceph
209 if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1):
210 ceph.bootstrap_monitor_cluster(config('monitor-secret'))
211 ceph.wait_for_bootstrap()
212
213 if ceph.is_bootstrapped():
214- ceph.rescan_osd_devices()
215+ for dev in get_devices():
216+ ceph.osdize(dev, config('osd-format'), config('osd-journal'),
217+ reformat_osd())
218+ ceph.start_osds(get_devices())
219
220 log('End config-changed hook.')
221
222@@ -139,6 +138,13 @@
223 return False
224
225
226+def get_devices():
227+ if config('osd-devices'):
228+ return config('osd-devices').split(' ')
229+ else:
230+ return []
231+
232+
233 @hooks.hook('mon-relation-departed',
234 'mon-relation-joined')
235 def mon_relation():
236@@ -149,7 +155,10 @@
237 if len(get_mon_hosts()) >= moncount:
238 ceph.bootstrap_monitor_cluster(config('monitor-secret'))
239 ceph.wait_for_bootstrap()
240- ceph.rescan_osd_devices()
241+ for dev in get_devices():
242+ ceph.osdize(dev, config('osd-format'), config('osd-journal'),
243+ reformat_osd())
244+ ceph.start_osds(get_devices())
245 notify_osds()
246 notify_radosgws()
247 notify_client()
248@@ -258,7 +267,8 @@
249 # In case we're being redeployed to the same machines, try
250 # to make sure everything is running as soon as possible.
251 service_restart('ceph-mon-all')
252- ceph.rescan_osd_devices()
253+ if ceph.is_bootstrapped():
254+ ceph.start_osds(get_devices())
255
256
257 if __name__ == '__main__':
258
259=== modified file 'revision'
260--- revision 2013-06-20 23:58:01 +0000
261+++ revision 2013-08-28 11:43:37 +0000
262@@ -1,1 +1,1 @@
263-92
264+100

Subscribers

People subscribed via source and target branches

to all changes: