Merge lp:~hopem/charms/precise/rabbitmq-server/lp1251560 into lp:charms/rabbitmq-server

Proposed by Edward Hope-Morley
Status: Merged
Merged at revision: 48
Proposed branch: lp:~hopem/charms/precise/rabbitmq-server/lp1251560
Merge into: lp:charms/rabbitmq-server
Diff against target: 144 lines (+73/-12)
4 files modified
config.yaml (+10/-0)
hooks/lib/ceph_utils.py (+59/-10)
hooks/rabbitmq_server_relations.py (+3/-1)
revision (+1/-1)
To merge this branch: bzr merge lp:~hopem/charms/precise/rabbitmq-server/lp1251560
Reviewer Review Type Date Requested Status
James Page Approve
Marco Ceppi Pending
OpenStack Charmers Pending
Review via email: mp+205540@code.launchpad.net

This proposal supersedes a proposal from 2014-01-29.

To post a comment you must log in.
Revision history for this message
Ante Karamatić (ivoks) wrote : Posted in a previous version of this proposal

FWIW; I've tested this charm and it works as intended.

Revision history for this message
James Page (james-page) wrote : Posted in a previous version of this proposal

This is fine with > argonaut; I recently landed some changes into charm-helpers to deal with <= argonaut:

https://code.launchpad.net/~james-page/charm-helpers/folsom-ceph-support/+merge/192557

review: Needs Fixing
Revision history for this message
Marco Ceppi (marcoceppi) wrote : Posted in a previous version of this proposal

There are merge conflicts in this proposal. Needs fixing, but LGTM otherwise

review: Needs Fixing
Revision history for this message
James Page (james-page) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'config.yaml'
2--- config.yaml 2014-01-16 13:47:32 +0000
3+++ config.yaml 2014-02-10 09:26:47 +0000
4@@ -67,3 +67,13 @@
5 The name that will be used to create the Ceph's RBD image with. If the
6 image name exists in Ceph, it will be re-used and the data will be
7 overwritten.
8+ ceph-osd-replication-count:
9+ default: 2
10+ type: int
11+ description: |
12+ This value dictates the number of replicas ceph must make of any
13+ object it stores within the rabbitmq rbd pool. Of course, this only
14+ applies if using Ceph as a backend store. Note that once the rabbitmq
15+ rbd pool has been created, changing this value will not have any
16+ effect (although it can be changed in ceph by manually configuring
17+ your ceph cluster).
18
19=== modified file 'hooks/lib/ceph_utils.py'
20--- hooks/lib/ceph_utils.py 2013-12-12 16:45:24 +0000
21+++ hooks/lib/ceph_utils.py 2014-02-10 09:26:47 +0000
22@@ -9,6 +9,7 @@
23 #
24
25 import commands
26+import json
27 import subprocess
28 import os
29 import shutil
30@@ -65,14 +66,61 @@
31 return name in out
32
33
34-def create_pool(service, name):
35- cmd = [
36- 'rados',
37- '--id',
38- service,
39- 'mkpool',
40- name]
41- execute(cmd)
42+def ceph_version():
43+ ''' Retrieve the local version of ceph '''
44+ if os.path.exists('/usr/bin/ceph'):
45+ cmd = ['ceph', '-v']
46+ output = subprocess.check_output(cmd)
47+ output = output.split()
48+ if len(output) > 3:
49+ return output[2]
50+ else:
51+ return None
52+ else:
53+ return None
54+
55+
56+def get_osds(service):
57+ '''
58+ Return a list of all Ceph Object Storage Daemons
59+ currently in the cluster
60+ '''
61+ version = ceph_version()
62+ if version and version >= '0.56':
63+ cmd = ['ceph', '--id', service, 'osd', 'ls', '--format=json']
64+ return json.loads(subprocess.check_output(cmd))
65+ else:
66+ return None
67+
68+
69+def create_pool(service, name, replicas=2):
70+ ''' Create a new RADOS pool '''
71+ if pool_exists(service, name):
72+ utils.juju_log('WARNING',
73+ "Ceph pool {} already exists, "
74+ "skipping creation".format(name))
75+ return
76+
77+ osds = get_osds(service)
78+ if osds:
79+ pgnum = (len(osds) * 100 / replicas)
80+ else:
81+ # NOTE(james-page): Default to 200 for older ceph versions
82+ # which don't support OSD query from cli
83+ pgnum = 200
84+
85+ cmd = [
86+ 'ceph', '--id', service,
87+ 'osd', 'pool', 'create',
88+ name, str(pgnum)
89+ ]
90+ subprocess.check_call(cmd)
91+ cmd = [
92+ 'ceph', '--id', service,
93+ 'osd', 'pool', 'set', name,
94+ 'size', str(replicas)
95+ ]
96+ subprocess.check_call(cmd)
97
98
99 def keyfile_path(service):
100@@ -218,7 +266,8 @@
101
102
103 def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
104- blk_device, fstype, system_services=[]):
105+ blk_device, fstype, system_services=[],
106+ rbd_pool_replicas=2):
107 """
108 To be called from the current cluster leader.
109 Ensures given pool and RBD image exists, is mapped to a block device,
110@@ -233,7 +282,7 @@
111 # Ensure pool, RBD image, RBD mappings are in place.
112 if not pool_exists(service, pool):
113 utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
114- create_pool(service, pool)
115+ create_pool(service, pool, replicas=rbd_pool_replicas)
116
117 if not rbd_exists(service, pool, rbd_img):
118 utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
119
120=== modified file 'hooks/rabbitmq_server_relations.py'
121--- hooks/rabbitmq_server_relations.py 2014-01-13 11:58:04 +0000
122+++ hooks/rabbitmq_server_relations.py 2014-02-10 09:26:47 +0000
123@@ -290,11 +290,13 @@
124 rbd_size = utils.config_get('rbd-size')
125 sizemb = int(rbd_size.split('G')[0]) * 1024
126 blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
127+ rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
128 ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
129 rbd_img=rbd_img, sizemb=sizemb,
130 fstype='ext4', mount_point=RABBIT_DIR,
131 blk_device=blk_device,
132- system_services=['rabbitmq-server'])
133+ system_services=['rabbitmq-server'],
134+ rbd_pool_replicas=rbd_pool_rep_count)
135 else:
136 utils.juju_log('INFO',
137 'This is not the peer leader. Not configuring RBD.')
138
139=== modified file 'revision'
140--- revision 2014-01-16 13:47:32 +0000
141+++ revision 2014-02-10 09:26:47 +0000
142@@ -1,1 +1,1 @@
143-113
144+114

Subscribers

People subscribed via source and target branches