Merge lp:~chris.macnaughton/charms/trusty/ceph-osd/add-infernalis into lp:~openstack-charmers-archive/charms/trusty/ceph-osd/next

Proposed by Chris MacNaughton
Status: Merged
Merged at revision: 56
Proposed branch: lp:~chris.macnaughton/charms/trusty/ceph-osd/add-infernalis
Merge into: lp:~openstack-charmers-archive/charms/trusty/ceph-osd/next
Diff against target: 268 lines (+110/-19)
2 files modified
hooks/ceph.py (+108/-18)
hooks/ceph_hooks.py (+2/-1)
To merge this branch: bzr merge lp:~chris.macnaughton/charms/trusty/ceph-osd/add-infernalis
Reviewer Review Type Date Requested Status
Chris Holcombe (community) Approve
OpenStack Charmers Pending
Review via email: mp+282304@code.launchpad.net

Description of the change

Add support for infernalis

To post a comment you must log in.
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #17181 ceph-osd-next for chris.macnaughton mp282304
    LINT OK: passed

Build: http://10.245.162.77:8080/job/charm_lint_check/17181/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #16051 ceph-osd-next for chris.macnaughton mp282304
    UNIT OK: passed

Build: http://10.245.162.77:8080/job/charm_unit_test/16051/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #8732 ceph-osd-next for chris.macnaughton mp282304
    AMULET FAIL: amulet-test failed

AMULET Results (max last 2 lines):
make: *** [functional_test] Error 1
ERROR:root:Make target returned non-zero.

Full amulet test output: http://paste.ubuntu.com/14479989/
Build: http://10.245.162.77:8080/job/charm_amulet_test/8732/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #8737 ceph-osd-next for chris.macnaughton mp282304
    AMULET OK: passed

Build: http://10.245.162.77:8080/job/charm_amulet_test/8737/

Revision history for this message
Chris Holcombe (xfactor973) wrote :

Land it

review: Approve
Revision history for this message
Corey Bryant (corey.bryant) wrote :

It seems like the common code between ceph and ceph-osd should get moved to charm-helpers (perhaps to http://bazaar.launchpad.net/~charm-helpers/charm-helpers/devel/view/head:/charmhelpers/contrib/storage/linux/ceph.py), but since there's already a precedence of this I think it could be done as a separate task.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/ceph.py'
2--- hooks/ceph.py 2015-10-06 10:44:28 +0000
3+++ hooks/ceph.py 2016-01-12 14:19:26 +0000
4@@ -11,16 +11,25 @@
5 import subprocess
6 import time
7 import os
8+import re
9+import sys
10 from charmhelpers.core.host import (
11 mkdir,
12+ chownr,
13 service_restart,
14- cmp_pkgrevno
15+ cmp_pkgrevno,
16+ lsb_release
17 )
18 from charmhelpers.core.hookenv import (
19 log,
20- ERROR, WARNING,
21+ ERROR,
22+ WARNING,
23+ cached,
24 status_set,
25 )
26+from charmhelpers.fetch import (
27+ apt_cache
28+)
29 from charmhelpers.contrib.storage.linux.utils import (
30 zap_disk,
31 is_block_device,
32@@ -37,9 +46,55 @@
33 PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs']
34
35
36+def ceph_user():
37+ if get_version() > 1:
38+ return 'ceph'
39+ else:
40+ return "root"
41+
42+
43+def get_version():
44+ '''Derive Ceph release from an installed package.'''
45+ import apt_pkg as apt
46+
47+ cache = apt_cache()
48+ package = "ceph"
49+ try:
50+ pkg = cache[package]
51+ except:
52+ # the package is unknown to the current apt cache.
53+ e = 'Could not determine version of package with no installation '\
54+ 'candidate: %s' % package
55+ error_out(e)
56+
57+ if not pkg.current_ver:
58+ # package is known, but no version is currently installed.
59+ e = 'Could not determine version of uninstalled package: %s' % package
60+ error_out(e)
61+
62+ vers = apt.upstream_version(pkg.current_ver.ver_str)
63+
64+ # x.y match only for 20XX.X
65+ # and ignore patch level for other packages
66+ match = re.match('^(\d+)\.(\d+)', vers)
67+
68+ if match:
69+ vers = match.group(0)
70+ return float(vers)
71+
72+
73+def error_out(msg):
74+ log("FATAL ERROR: %s" % msg,
75+ level=ERROR)
76+ sys.exit(1)
77+
78+
79 def is_quorum():
80 asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname())
81 cmd = [
82+ "sudo",
83+ "-u",
84+ ceph_user(),
85 "ceph",
86 "--admin-daemon",
87 asok,
88@@ -64,6 +119,9 @@
89 def is_leader():
90 asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname())
91 cmd = [
92+ "sudo",
93+ "-u",
94+ ceph_user(),
95 "ceph",
96 "--admin-daemon",
97 asok,
98@@ -93,6 +151,9 @@
99 def add_bootstrap_hint(peer):
100 asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname())
101 cmd = [
102+ "sudo",
103+ "-u",
104+ ceph_user(),
105 "ceph",
106 "--admin-daemon",
107 asok,
108@@ -127,11 +188,11 @@
109 def start_osds(devices):
110 # Scan for ceph block devices
111 rescan_osd_devices()
112- if cmp_pkgrevno('ceph', '0.56.6') >= 0:
113- # Use ceph-disk-activate for directory based OSD's
114+ if cmp_pkgrevno('ceph', "0.56.6") >= 0:
115+ # Use ceph-disk activate for directory based OSD's
116 for dev_or_path in devices:
117 if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path):
118- subprocess.check_call(['ceph-disk-activate', dev_or_path])
119+ subprocess.check_call(['ceph-disk', 'activate', dev_or_path])
120
121
122 def rescan_osd_devices():
123@@ -158,6 +219,9 @@
124 def import_osd_bootstrap_key(key):
125 if not os.path.exists(_bootstrap_keyring):
126 cmd = [
127+ "sudo",
128+ "-u",
129+ ceph_user(),
130 'ceph-authtool',
131 _bootstrap_keyring,
132 '--create-keyring',
133@@ -216,6 +280,9 @@
134 def import_radosgw_key(key):
135 if not os.path.exists(_radosgw_keyring):
136 cmd = [
137+ "sudo",
138+ "-u",
139+ ceph_user(),
140 'ceph-authtool',
141 _radosgw_keyring,
142 '--create-keyring',
143@@ -244,6 +311,9 @@
144 def get_named_key(name, caps=None):
145 caps = caps or _default_caps
146 cmd = [
147+ "sudo",
148+ "-u",
149+ ceph_user(),
150 'ceph',
151 '--name', 'mon.',
152 '--keyring',
153@@ -261,19 +331,29 @@
154 return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103
155
156
157+@cached
158+def systemd():
159+ return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid')
160+
161+
162 def bootstrap_monitor_cluster(secret):
163 hostname = get_unit_hostname()
164 path = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
165 done = '{}/done'.format(path)
166- upstart = '{}/upstart'.format(path)
167+ if systemd():
168+ init_marker = '{}/systemd'.format(path)
169+ else:
170+ init_marker = '{}/upstart'.format(path)
171+
172 keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname)
173
174 if os.path.exists(done):
175 log('bootstrap_monitor_cluster: mon already initialized.')
176 else:
177 # Ceph >= 0.61.3 needs this for ceph-mon fs creation
178- mkdir('/var/run/ceph', perms=0755)
179- mkdir(path)
180+ mkdir('/var/run/ceph', owner=ceph_user(),
181+ group=ceph_user(), perms=0o755)
182+ mkdir(path, owner=ceph_user(), group=ceph_user())
183 # end changes for Ceph >= 0.61.3
184 try:
185 subprocess.check_call(['ceph-authtool', keyring,
186@@ -284,13 +364,17 @@
187 subprocess.check_call(['ceph-mon', '--mkfs',
188 '-i', hostname,
189 '--keyring', keyring])
190-
191+ chownr(path, ceph_user(), ceph_user())
192 with open(done, 'w'):
193 pass
194- with open(upstart, 'w'):
195+ with open(init_marker, 'w'):
196 pass
197
198- service_restart('ceph-mon-all')
199+ if systemd():
200+ subprocess.check_call(['systemctl', 'enable', 'ceph-mon'])
201+ service_restart('ceph-mon')
202+ else:
203+ service_restart('ceph-mon-all')
204 except:
205 raise
206 finally:
207@@ -300,11 +384,14 @@
208 def update_monfs():
209 hostname = get_unit_hostname()
210 monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
211- upstart = '{}/upstart'.format(monfs)
212- if os.path.exists(monfs) and not os.path.exists(upstart):
213+ if systemd():
214+ init_marker = '{}/systemd'.format(monfs)
215+ else:
216+ init_marker = '{}/upstart'.format(monfs)
217+ if os.path.exists(monfs) and not os.path.exists(init_marker):
218 # Mark mon as managed by upstart so that
219 # it gets start correctly on reboots
220- with open(upstart, 'w'):
221+ with open(init_marker, 'w'):
222 pass
223
224
225@@ -335,7 +422,7 @@
226 return
227
228 status_set('maintenance', 'Initializing device {}'.format(dev))
229- cmd = ['ceph-disk-prepare']
230+ cmd = ['ceph-disk', 'prepare']
231 # Later versions of ceph support more options
232 if cmp_pkgrevno('ceph', '0.48.3') >= 0:
233 if osd_format:
234@@ -368,14 +455,17 @@
235 log('Path {} is already configured as an OSD - bailing'.format(path))
236 return
237
238- if cmp_pkgrevno('ceph', '0.56.6') < 0:
239+ if cmp_pkgrevno('ceph', "0.56.6") < 0:
240 log('Unable to use directories for OSDs with ceph < 0.56.6',
241 level=ERROR)
242 raise
243
244- mkdir(path)
245+ mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755)
246+ chownr('/var/lib/ceph', ceph_user(), ceph_user())
247 cmd = [
248- 'ceph-disk-prepare',
249+ 'sudo', '-u', ceph_user(),
250+ 'ceph-disk',
251+ 'prepare',
252 '--data-dir',
253 path
254 ]
255
256=== modified file 'hooks/ceph_hooks.py'
257--- hooks/ceph_hooks.py 2015-10-30 02:22:54 +0000
258+++ hooks/ceph_hooks.py 2016-01-12 14:19:26 +0000
259@@ -95,7 +95,8 @@
260 # Install ceph.conf as an alternative to support
261 # co-existence with other charms that write this file
262 charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
263- mkdir(os.path.dirname(charm_ceph_conf))
264+ mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(),
265+ group=ceph.ceph_user())
266 with open(charm_ceph_conf, 'w') as cephconf:
267 cephconf.write(render_template('ceph.conf', cephcontext))
268 install_alternative('ceph.conf', '/etc/ceph/ceph.conf',

Subscribers

People subscribed via source and target branches