Merge lp:~openstack-charmers/charms/precise/quantum-gateway/ods_merge into lp:~charmers/charms/precise/quantum-gateway/trunk
- Precise Pangolin (12.04)
- ods_merge
- Merge into trunk
Proposed by
Adam Gandelman
Status: | Merged |
---|---|
Merged at revision: | 40 |
Proposed branch: | lp:~openstack-charmers/charms/precise/quantum-gateway/ods_merge |
Merge into: | lp:~charmers/charms/precise/quantum-gateway/trunk |
Diff against target: |
1490 lines (+860/-62) 16 files modified
charm-helpers-sync.yaml (+1/-0) hooks/charmhelpers/contrib/network/ovs/__init__.py (+4/-1) hooks/charmhelpers/contrib/openstack/alternatives.py (+17/-0) hooks/charmhelpers/contrib/openstack/context.py (+20/-1) hooks/charmhelpers/contrib/openstack/neutron.py (+20/-0) hooks/charmhelpers/contrib/openstack/utils.py (+81/-10) hooks/charmhelpers/contrib/storage/linux/ceph.py (+383/-0) hooks/charmhelpers/contrib/storage/linux/loopback.py (+62/-0) hooks/charmhelpers/contrib/storage/linux/lvm.py (+88/-0) hooks/charmhelpers/contrib/storage/linux/utils.py (+25/-0) hooks/charmhelpers/core/hookenv.py (+78/-23) hooks/charmhelpers/core/host.py (+15/-9) hooks/charmhelpers/fetch/__init__.py (+53/-5) hooks/charmhelpers/fetch/bzrurl.py (+1/-1) hooks/quantum_utils.py (+3/-6) unit_tests/test_quantum_utils.py (+9/-6) |
To merge this branch: | bzr merge lp:~openstack-charmers/charms/precise/quantum-gateway/ods_merge |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Marco Ceppi (community) | Abstain | ||
OpenStack Charmers | Pending | ||
Review via email: mp+194065@code.launchpad.net |
Commit message
Description of the change
Adds Neutron NVP support
To post a comment you must log in.
- 42. By James Page
-
Rebase on trunk, resync helpers and add missing deps
Revision history for this message
Marco Ceppi (marcoceppi) : | # |
review:
Abstain
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'charm-helpers-sync.yaml' | |||
2 | --- charm-helpers-sync.yaml 2013-10-15 01:32:42 +0000 | |||
3 | +++ charm-helpers-sync.yaml 2013-11-08 05:56:36 +0000 | |||
4 | @@ -6,4 +6,5 @@ | |||
5 | 6 | - contrib.openstack | 6 | - contrib.openstack |
6 | 7 | - contrib.hahelpers | 7 | - contrib.hahelpers |
7 | 8 | - contrib.network.ovs | 8 | - contrib.network.ovs |
8 | 9 | - contrib.storage.linux | ||
9 | 9 | - payload.execd | 10 | - payload.execd |
10 | 10 | 11 | ||
11 | === modified file 'hooks/charmhelpers/contrib/network/ovs/__init__.py' | |||
12 | --- hooks/charmhelpers/contrib/network/ovs/__init__.py 2013-07-10 14:52:15 +0000 | |||
13 | +++ hooks/charmhelpers/contrib/network/ovs/__init__.py 2013-11-08 05:56:36 +0000 | |||
14 | @@ -69,4 +69,7 @@ | |||
15 | 69 | 69 | ||
16 | 70 | def full_restart(): | 70 | def full_restart(): |
17 | 71 | ''' Full restart and reload of openvswitch ''' | 71 | ''' Full restart and reload of openvswitch ''' |
19 | 72 | service('force-reload-kmod', 'openvswitch-switch') | 72 | if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'): |
20 | 73 | service('start', 'openvswitch-force-reload-kmod') | ||
21 | 74 | else: | ||
22 | 75 | service('force-reload-kmod', 'openvswitch-switch') | ||
23 | 73 | 76 | ||
24 | === added file 'hooks/charmhelpers/contrib/openstack/alternatives.py' | |||
25 | --- hooks/charmhelpers/contrib/openstack/alternatives.py 1970-01-01 00:00:00 +0000 | |||
26 | +++ hooks/charmhelpers/contrib/openstack/alternatives.py 2013-11-08 05:56:36 +0000 | |||
27 | @@ -0,0 +1,17 @@ | |||
28 | 1 | ''' Helper for managing alternatives for file conflict resolution ''' | ||
29 | 2 | |||
30 | 3 | import subprocess | ||
31 | 4 | import shutil | ||
32 | 5 | import os | ||
33 | 6 | |||
34 | 7 | |||
35 | 8 | def install_alternative(name, target, source, priority=50): | ||
36 | 9 | ''' Install alternative configuration ''' | ||
37 | 10 | if (os.path.exists(target) and not os.path.islink(target)): | ||
38 | 11 | # Move existing file/directory away before installing | ||
39 | 12 | shutil.move(target, '{}.bak'.format(target)) | ||
40 | 13 | cmd = [ | ||
41 | 14 | 'update-alternatives', '--force', '--install', | ||
42 | 15 | target, name, source, str(priority) | ||
43 | 16 | ] | ||
44 | 17 | subprocess.check_call(cmd) | ||
45 | 0 | 18 | ||
46 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
47 | --- hooks/charmhelpers/contrib/openstack/context.py 2013-10-15 01:32:42 +0000 | |||
48 | +++ hooks/charmhelpers/contrib/openstack/context.py 2013-11-08 05:56:36 +0000 | |||
49 | @@ -385,16 +385,33 @@ | |||
50 | 385 | def ovs_ctxt(self): | 385 | def ovs_ctxt(self): |
51 | 386 | driver = neutron_plugin_attribute(self.plugin, 'driver', | 386 | driver = neutron_plugin_attribute(self.plugin, 'driver', |
52 | 387 | self.network_manager) | 387 | self.network_manager) |
54 | 388 | 388 | config = neutron_plugin_attribute(self.plugin, 'config', | |
55 | 389 | self.network_manager) | ||
56 | 389 | ovs_ctxt = { | 390 | ovs_ctxt = { |
57 | 390 | 'core_plugin': driver, | 391 | 'core_plugin': driver, |
58 | 391 | 'neutron_plugin': 'ovs', | 392 | 'neutron_plugin': 'ovs', |
59 | 392 | 'neutron_security_groups': self.neutron_security_groups, | 393 | 'neutron_security_groups': self.neutron_security_groups, |
60 | 393 | 'local_ip': unit_private_ip(), | 394 | 'local_ip': unit_private_ip(), |
61 | 395 | 'config': config | ||
62 | 394 | } | 396 | } |
63 | 395 | 397 | ||
64 | 396 | return ovs_ctxt | 398 | return ovs_ctxt |
65 | 397 | 399 | ||
66 | 400 | def nvp_ctxt(self): | ||
67 | 401 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
68 | 402 | self.network_manager) | ||
69 | 403 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
70 | 404 | self.network_manager) | ||
71 | 405 | nvp_ctxt = { | ||
72 | 406 | 'core_plugin': driver, | ||
73 | 407 | 'neutron_plugin': 'nvp', | ||
74 | 408 | 'neutron_security_groups': self.neutron_security_groups, | ||
75 | 409 | 'local_ip': unit_private_ip(), | ||
76 | 410 | 'config': config | ||
77 | 411 | } | ||
78 | 412 | |||
79 | 413 | return nvp_ctxt | ||
80 | 414 | |||
81 | 398 | def __call__(self): | 415 | def __call__(self): |
82 | 399 | self._ensure_packages() | 416 | self._ensure_packages() |
83 | 400 | 417 | ||
84 | @@ -408,6 +425,8 @@ | |||
85 | 408 | 425 | ||
86 | 409 | if self.plugin == 'ovs': | 426 | if self.plugin == 'ovs': |
87 | 410 | ctxt.update(self.ovs_ctxt()) | 427 | ctxt.update(self.ovs_ctxt()) |
88 | 428 | elif self.plugin == 'nvp': | ||
89 | 429 | ctxt.update(self.nvp_ctxt()) | ||
90 | 411 | 430 | ||
91 | 412 | self._save_flag_file() | 431 | self._save_flag_file() |
92 | 413 | return ctxt | 432 | return ctxt |
93 | 414 | 433 | ||
94 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
95 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2013-10-15 01:32:42 +0000 | |||
96 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2013-11-08 05:56:36 +0000 | |||
97 | @@ -34,13 +34,23 @@ | |||
98 | 34 | 'services': ['quantum-plugin-openvswitch-agent'], | 34 | 'services': ['quantum-plugin-openvswitch-agent'], |
99 | 35 | 'packages': [[headers_package(), 'openvswitch-datapath-dkms'], | 35 | 'packages': [[headers_package(), 'openvswitch-datapath-dkms'], |
100 | 36 | ['quantum-plugin-openvswitch-agent']], | 36 | ['quantum-plugin-openvswitch-agent']], |
101 | 37 | 'server_packages': ['quantum-server', | ||
102 | 38 | 'quantum-plugin-openvswitch'], | ||
103 | 39 | 'server_services': ['quantum-server'] | ||
104 | 37 | }, | 40 | }, |
105 | 38 | 'nvp': { | 41 | 'nvp': { |
106 | 39 | 'config': '/etc/quantum/plugins/nicira/nvp.ini', | 42 | 'config': '/etc/quantum/plugins/nicira/nvp.ini', |
107 | 40 | 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' | 43 | 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' |
108 | 41 | 'QuantumPlugin.NvpPluginV2', | 44 | 'QuantumPlugin.NvpPluginV2', |
109 | 45 | 'contexts': [ | ||
110 | 46 | context.SharedDBContext(user=config('neutron-database-user'), | ||
111 | 47 | database=config('neutron-database'), | ||
112 | 48 | relation_prefix='neutron')], | ||
113 | 42 | 'services': [], | 49 | 'services': [], |
114 | 43 | 'packages': [], | 50 | 'packages': [], |
115 | 51 | 'server_packages': ['quantum-server', | ||
116 | 52 | 'quantum-plugin-nicira'], | ||
117 | 53 | 'server_services': ['quantum-server'] | ||
118 | 44 | } | 54 | } |
119 | 45 | } | 55 | } |
120 | 46 | 56 | ||
121 | @@ -60,13 +70,23 @@ | |||
122 | 60 | 'services': ['neutron-plugin-openvswitch-agent'], | 70 | 'services': ['neutron-plugin-openvswitch-agent'], |
123 | 61 | 'packages': [[headers_package(), 'openvswitch-datapath-dkms'], | 71 | 'packages': [[headers_package(), 'openvswitch-datapath-dkms'], |
124 | 62 | ['quantum-plugin-openvswitch-agent']], | 72 | ['quantum-plugin-openvswitch-agent']], |
125 | 73 | 'server_packages': ['neutron-server', | ||
126 | 74 | 'neutron-plugin-openvswitch'], | ||
127 | 75 | 'server_services': ['neutron-server'] | ||
128 | 63 | }, | 76 | }, |
129 | 64 | 'nvp': { | 77 | 'nvp': { |
130 | 65 | 'config': '/etc/neutron/plugins/nicira/nvp.ini', | 78 | 'config': '/etc/neutron/plugins/nicira/nvp.ini', |
131 | 66 | 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' | 79 | 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' |
132 | 67 | 'NeutronPlugin.NvpPluginV2', | 80 | 'NeutronPlugin.NvpPluginV2', |
133 | 81 | 'contexts': [ | ||
134 | 82 | context.SharedDBContext(user=config('neutron-database-user'), | ||
135 | 83 | database=config('neutron-database'), | ||
136 | 84 | relation_prefix='neutron')], | ||
137 | 68 | 'services': [], | 85 | 'services': [], |
138 | 69 | 'packages': [], | 86 | 'packages': [], |
139 | 87 | 'server_packages': ['neutron-server', | ||
140 | 88 | 'neutron-plugin-nicira'], | ||
141 | 89 | 'server_services': ['neutron-server'] | ||
142 | 70 | } | 90 | } |
143 | 71 | } | 91 | } |
144 | 72 | 92 | ||
145 | 73 | 93 | ||
146 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
147 | --- hooks/charmhelpers/contrib/openstack/utils.py 2013-10-15 01:32:42 +0000 | |||
148 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2013-11-08 05:56:36 +0000 | |||
149 | @@ -13,19 +13,28 @@ | |||
150 | 13 | config, | 13 | config, |
151 | 14 | log as juju_log, | 14 | log as juju_log, |
152 | 15 | charm_dir, | 15 | charm_dir, |
162 | 16 | ) | 16 | ERROR, |
163 | 17 | 17 | INFO | |
164 | 18 | from charmhelpers.core.host import ( | 18 | ) |
165 | 19 | lsb_release, | 19 | |
166 | 20 | ) | 20 | from charmhelpers.contrib.storage.linux.lvm import ( |
167 | 21 | 21 | deactivate_lvm_volume_group, | |
168 | 22 | from charmhelpers.fetch import ( | 22 | is_lvm_physical_volume, |
169 | 23 | apt_install, | 23 | remove_lvm_physical_volume, |
170 | 24 | ) | 24 | ) |
171 | 25 | |||
172 | 26 | from charmhelpers.core.host import lsb_release, mounts, umount | ||
173 | 27 | from charmhelpers.fetch import apt_install | ||
174 | 28 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk | ||
175 | 29 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device | ||
176 | 25 | 30 | ||
177 | 26 | CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" | 31 | CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" |
178 | 27 | CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' | 32 | CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' |
179 | 28 | 33 | ||
180 | 34 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' | ||
181 | 35 | 'restricted main multiverse universe') | ||
182 | 36 | |||
183 | 37 | |||
184 | 29 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ | 38 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ |
185 | 30 | ('oneiric', 'diablo'), | 39 | ('oneiric', 'diablo'), |
186 | 31 | ('precise', 'essex'), | 40 | ('precise', 'essex'), |
187 | @@ -57,6 +66,8 @@ | |||
188 | 57 | ('1.9.0', 'havana'), | 66 | ('1.9.0', 'havana'), |
189 | 58 | ]) | 67 | ]) |
190 | 59 | 68 | ||
191 | 69 | DEFAULT_LOOPBACK_SIZE = '5G' | ||
192 | 70 | |||
193 | 60 | 71 | ||
194 | 61 | def error_out(msg): | 72 | def error_out(msg): |
195 | 62 | juju_log("FATAL ERROR: %s" % msg, level='ERROR') | 73 | juju_log("FATAL ERROR: %s" % msg, level='ERROR') |
196 | @@ -67,7 +78,7 @@ | |||
197 | 67 | '''Derive OpenStack release codename from a given installation source.''' | 78 | '''Derive OpenStack release codename from a given installation source.''' |
198 | 68 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | 79 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] |
199 | 69 | rel = '' | 80 | rel = '' |
201 | 70 | if src == 'distro': | 81 | if src in ['distro', 'distro-proposed']: |
202 | 71 | try: | 82 | try: |
203 | 72 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] | 83 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] |
204 | 73 | except KeyError: | 84 | except KeyError: |
205 | @@ -202,6 +213,10 @@ | |||
206 | 202 | '''Configure apt installation source.''' | 213 | '''Configure apt installation source.''' |
207 | 203 | if rel == 'distro': | 214 | if rel == 'distro': |
208 | 204 | return | 215 | return |
209 | 216 | elif rel == 'distro-proposed': | ||
210 | 217 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | ||
211 | 218 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: | ||
212 | 219 | f.write(DISTRO_PROPOSED % ubuntu_rel) | ||
213 | 205 | elif rel[:4] == "ppa:": | 220 | elif rel[:4] == "ppa:": |
214 | 206 | src = rel | 221 | src = rel |
215 | 207 | subprocess.check_call(["add-apt-repository", "-y", src]) | 222 | subprocess.check_call(["add-apt-repository", "-y", src]) |
216 | @@ -299,6 +314,62 @@ | |||
217 | 299 | return apt.version_compare(available_vers, cur_vers) == 1 | 314 | return apt.version_compare(available_vers, cur_vers) == 1 |
218 | 300 | 315 | ||
219 | 301 | 316 | ||
220 | 317 | def ensure_block_device(block_device): | ||
221 | 318 | ''' | ||
222 | 319 | Confirm block_device, create as loopback if necessary. | ||
223 | 320 | |||
224 | 321 | :param block_device: str: Full path of block device to ensure. | ||
225 | 322 | |||
226 | 323 | :returns: str: Full path of ensured block device. | ||
227 | 324 | ''' | ||
228 | 325 | _none = ['None', 'none', None] | ||
229 | 326 | if (block_device in _none): | ||
230 | 327 | error_out('prepare_storage(): Missing required input: ' | ||
231 | 328 | 'block_device=%s.' % block_device, level=ERROR) | ||
232 | 329 | |||
233 | 330 | if block_device.startswith('/dev/'): | ||
234 | 331 | bdev = block_device | ||
235 | 332 | elif block_device.startswith('/'): | ||
236 | 333 | _bd = block_device.split('|') | ||
237 | 334 | if len(_bd) == 2: | ||
238 | 335 | bdev, size = _bd | ||
239 | 336 | else: | ||
240 | 337 | bdev = block_device | ||
241 | 338 | size = DEFAULT_LOOPBACK_SIZE | ||
242 | 339 | bdev = ensure_loopback_device(bdev, size) | ||
243 | 340 | else: | ||
244 | 341 | bdev = '/dev/%s' % block_device | ||
245 | 342 | |||
246 | 343 | if not is_block_device(bdev): | ||
247 | 344 | error_out('Failed to locate valid block device at %s' % bdev, | ||
248 | 345 | level=ERROR) | ||
249 | 346 | |||
250 | 347 | return bdev | ||
251 | 348 | |||
252 | 349 | |||
253 | 350 | def clean_storage(block_device): | ||
254 | 351 | ''' | ||
255 | 352 | Ensures a block device is clean. That is: | ||
256 | 353 | - unmounted | ||
257 | 354 | - any lvm volume groups are deactivated | ||
258 | 355 | - any lvm physical device signatures removed | ||
259 | 356 | - partition table wiped | ||
260 | 357 | |||
261 | 358 | :param block_device: str: Full path to block device to clean. | ||
262 | 359 | ''' | ||
263 | 360 | for mp, d in mounts(): | ||
264 | 361 | if d == block_device: | ||
265 | 362 | juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % | ||
266 | 363 | (d, mp), level=INFO) | ||
267 | 364 | umount(mp, persist=True) | ||
268 | 365 | |||
269 | 366 | if is_lvm_physical_volume(block_device): | ||
270 | 367 | deactivate_lvm_volume_group(block_device) | ||
271 | 368 | remove_lvm_physical_volume(block_device) | ||
272 | 369 | else: | ||
273 | 370 | zap_disk(block_device) | ||
274 | 371 | |||
275 | 372 | |||
276 | 302 | def is_ip(address): | 373 | def is_ip(address): |
277 | 303 | """ | 374 | """ |
278 | 304 | Returns True if address is a valid IP address. | 375 | Returns True if address is a valid IP address. |
279 | 305 | 376 | ||
280 | === added directory 'hooks/charmhelpers/contrib/storage' | |||
281 | === added file 'hooks/charmhelpers/contrib/storage/__init__.py' | |||
282 | === added directory 'hooks/charmhelpers/contrib/storage/linux' | |||
283 | === added file 'hooks/charmhelpers/contrib/storage/linux/__init__.py' | |||
284 | === added file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
285 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 1970-01-01 00:00:00 +0000 | |||
286 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2013-11-08 05:56:36 +0000 | |||
287 | @@ -0,0 +1,383 @@ | |||
288 | 1 | # | ||
289 | 2 | # Copyright 2012 Canonical Ltd. | ||
290 | 3 | # | ||
291 | 4 | # This file is sourced from lp:openstack-charm-helpers | ||
292 | 5 | # | ||
293 | 6 | # Authors: | ||
294 | 7 | # James Page <james.page@ubuntu.com> | ||
295 | 8 | # Adam Gandelman <adamg@ubuntu.com> | ||
296 | 9 | # | ||
297 | 10 | |||
298 | 11 | import os | ||
299 | 12 | import shutil | ||
300 | 13 | import json | ||
301 | 14 | import time | ||
302 | 15 | |||
303 | 16 | from subprocess import ( | ||
304 | 17 | check_call, | ||
305 | 18 | check_output, | ||
306 | 19 | CalledProcessError | ||
307 | 20 | ) | ||
308 | 21 | |||
309 | 22 | from charmhelpers.core.hookenv import ( | ||
310 | 23 | relation_get, | ||
311 | 24 | relation_ids, | ||
312 | 25 | related_units, | ||
313 | 26 | log, | ||
314 | 27 | INFO, | ||
315 | 28 | WARNING, | ||
316 | 29 | ERROR | ||
317 | 30 | ) | ||
318 | 31 | |||
319 | 32 | from charmhelpers.core.host import ( | ||
320 | 33 | mount, | ||
321 | 34 | mounts, | ||
322 | 35 | service_start, | ||
323 | 36 | service_stop, | ||
324 | 37 | service_running, | ||
325 | 38 | umount, | ||
326 | 39 | ) | ||
327 | 40 | |||
328 | 41 | from charmhelpers.fetch import ( | ||
329 | 42 | apt_install, | ||
330 | 43 | ) | ||
331 | 44 | |||
332 | 45 | KEYRING = '/etc/ceph/ceph.client.{}.keyring' | ||
333 | 46 | KEYFILE = '/etc/ceph/ceph.client.{}.key' | ||
334 | 47 | |||
335 | 48 | CEPH_CONF = """[global] | ||
336 | 49 | auth supported = {auth} | ||
337 | 50 | keyring = {keyring} | ||
338 | 51 | mon host = {mon_hosts} | ||
339 | 52 | """ | ||
340 | 53 | |||
341 | 54 | |||
342 | 55 | def install(): | ||
343 | 56 | ''' Basic Ceph client installation ''' | ||
344 | 57 | ceph_dir = "/etc/ceph" | ||
345 | 58 | if not os.path.exists(ceph_dir): | ||
346 | 59 | os.mkdir(ceph_dir) | ||
347 | 60 | apt_install('ceph-common', fatal=True) | ||
348 | 61 | |||
349 | 62 | |||
350 | 63 | def rbd_exists(service, pool, rbd_img): | ||
351 | 64 | ''' Check to see if a RADOS block device exists ''' | ||
352 | 65 | try: | ||
353 | 66 | out = check_output(['rbd', 'list', '--id', service, | ||
354 | 67 | '--pool', pool]) | ||
355 | 68 | except CalledProcessError: | ||
356 | 69 | return False | ||
357 | 70 | else: | ||
358 | 71 | return rbd_img in out | ||
359 | 72 | |||
360 | 73 | |||
361 | 74 | def create_rbd_image(service, pool, image, sizemb): | ||
362 | 75 | ''' Create a new RADOS block device ''' | ||
363 | 76 | cmd = [ | ||
364 | 77 | 'rbd', | ||
365 | 78 | 'create', | ||
366 | 79 | image, | ||
367 | 80 | '--size', | ||
368 | 81 | str(sizemb), | ||
369 | 82 | '--id', | ||
370 | 83 | service, | ||
371 | 84 | '--pool', | ||
372 | 85 | pool | ||
373 | 86 | ] | ||
374 | 87 | check_call(cmd) | ||
375 | 88 | |||
376 | 89 | |||
377 | 90 | def pool_exists(service, name): | ||
378 | 91 | ''' Check to see if a RADOS pool already exists ''' | ||
379 | 92 | try: | ||
380 | 93 | out = check_output(['rados', '--id', service, 'lspools']) | ||
381 | 94 | except CalledProcessError: | ||
382 | 95 | return False | ||
383 | 96 | else: | ||
384 | 97 | return name in out | ||
385 | 98 | |||
386 | 99 | |||
387 | 100 | def get_osds(service): | ||
388 | 101 | ''' | ||
389 | 102 | Return a list of all Ceph Object Storage Daemons | ||
390 | 103 | currently in the cluster | ||
391 | 104 | ''' | ||
392 | 105 | version = ceph_version() | ||
393 | 106 | if version and version >= '0.56': | ||
394 | 107 | return json.loads(check_output(['ceph', '--id', service, | ||
395 | 108 | 'osd', 'ls', '--format=json'])) | ||
396 | 109 | else: | ||
397 | 110 | return None | ||
398 | 111 | |||
399 | 112 | |||
400 | 113 | def create_pool(service, name, replicas=2): | ||
401 | 114 | ''' Create a new RADOS pool ''' | ||
402 | 115 | if pool_exists(service, name): | ||
403 | 116 | log("Ceph pool {} already exists, skipping creation".format(name), | ||
404 | 117 | level=WARNING) | ||
405 | 118 | return | ||
406 | 119 | # Calculate the number of placement groups based | ||
407 | 120 | # on upstream recommended best practices. | ||
408 | 121 | osds = get_osds(service) | ||
409 | 122 | if osds: | ||
410 | 123 | pgnum = (len(osds) * 100 / replicas) | ||
411 | 124 | else: | ||
412 | 125 | # NOTE(james-page): Default to 200 for older ceph versions | ||
413 | 126 | # which don't support OSD query from cli | ||
414 | 127 | pgnum = 200 | ||
415 | 128 | cmd = [ | ||
416 | 129 | 'ceph', '--id', service, | ||
417 | 130 | 'osd', 'pool', 'create', | ||
418 | 131 | name, str(pgnum) | ||
419 | 132 | ] | ||
420 | 133 | check_call(cmd) | ||
421 | 134 | cmd = [ | ||
422 | 135 | 'ceph', '--id', service, | ||
423 | 136 | 'osd', 'pool', 'set', name, | ||
424 | 137 | 'size', str(replicas) | ||
425 | 138 | ] | ||
426 | 139 | check_call(cmd) | ||
427 | 140 | |||
428 | 141 | |||
429 | 142 | def delete_pool(service, name): | ||
430 | 143 | ''' Delete a RADOS pool from ceph ''' | ||
431 | 144 | cmd = [ | ||
432 | 145 | 'ceph', '--id', service, | ||
433 | 146 | 'osd', 'pool', 'delete', | ||
434 | 147 | name, '--yes-i-really-really-mean-it' | ||
435 | 148 | ] | ||
436 | 149 | check_call(cmd) | ||
437 | 150 | |||
438 | 151 | |||
439 | 152 | def _keyfile_path(service): | ||
440 | 153 | return KEYFILE.format(service) | ||
441 | 154 | |||
442 | 155 | |||
443 | 156 | def _keyring_path(service): | ||
444 | 157 | return KEYRING.format(service) | ||
445 | 158 | |||
446 | 159 | |||
447 | 160 | def create_keyring(service, key): | ||
448 | 161 | ''' Create a new Ceph keyring containing key''' | ||
449 | 162 | keyring = _keyring_path(service) | ||
450 | 163 | if os.path.exists(keyring): | ||
451 | 164 | log('ceph: Keyring exists at %s.' % keyring, level=WARNING) | ||
452 | 165 | return | ||
453 | 166 | cmd = [ | ||
454 | 167 | 'ceph-authtool', | ||
455 | 168 | keyring, | ||
456 | 169 | '--create-keyring', | ||
457 | 170 | '--name=client.{}'.format(service), | ||
458 | 171 | '--add-key={}'.format(key) | ||
459 | 172 | ] | ||
460 | 173 | check_call(cmd) | ||
461 | 174 | log('ceph: Created new ring at %s.' % keyring, level=INFO) | ||
462 | 175 | |||
463 | 176 | |||
464 | 177 | def create_key_file(service, key): | ||
465 | 178 | ''' Create a file containing key ''' | ||
466 | 179 | keyfile = _keyfile_path(service) | ||
467 | 180 | if os.path.exists(keyfile): | ||
468 | 181 | log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) | ||
469 | 182 | return | ||
470 | 183 | with open(keyfile, 'w') as fd: | ||
471 | 184 | fd.write(key) | ||
472 | 185 | log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) | ||
473 | 186 | |||
474 | 187 | |||
475 | 188 | def get_ceph_nodes(): | ||
476 | 189 | ''' Query named relation 'ceph' to detemine current nodes ''' | ||
477 | 190 | hosts = [] | ||
478 | 191 | for r_id in relation_ids('ceph'): | ||
479 | 192 | for unit in related_units(r_id): | ||
480 | 193 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) | ||
481 | 194 | return hosts | ||
482 | 195 | |||
483 | 196 | |||
484 | 197 | def configure(service, key, auth): | ||
485 | 198 | ''' Perform basic configuration of Ceph ''' | ||
486 | 199 | create_keyring(service, key) | ||
487 | 200 | create_key_file(service, key) | ||
488 | 201 | hosts = get_ceph_nodes() | ||
489 | 202 | with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: | ||
490 | 203 | ceph_conf.write(CEPH_CONF.format(auth=auth, | ||
491 | 204 | keyring=_keyring_path(service), | ||
492 | 205 | mon_hosts=",".join(map(str, hosts)))) | ||
493 | 206 | modprobe('rbd') | ||
494 | 207 | |||
495 | 208 | |||
496 | 209 | def image_mapped(name): | ||
497 | 210 | ''' Determine whether a RADOS block device is mapped locally ''' | ||
498 | 211 | try: | ||
499 | 212 | out = check_output(['rbd', 'showmapped']) | ||
500 | 213 | except CalledProcessError: | ||
501 | 214 | return False | ||
502 | 215 | else: | ||
503 | 216 | return name in out | ||
504 | 217 | |||
505 | 218 | |||
506 | 219 | def map_block_storage(service, pool, image): | ||
507 | 220 | ''' Map a RADOS block device for local use ''' | ||
508 | 221 | cmd = [ | ||
509 | 222 | 'rbd', | ||
510 | 223 | 'map', | ||
511 | 224 | '{}/{}'.format(pool, image), | ||
512 | 225 | '--user', | ||
513 | 226 | service, | ||
514 | 227 | '--secret', | ||
515 | 228 | _keyfile_path(service), | ||
516 | 229 | ] | ||
517 | 230 | check_call(cmd) | ||
518 | 231 | |||
519 | 232 | |||
520 | 233 | def filesystem_mounted(fs): | ||
521 | 234 | ''' Determine whether a filesytems is already mounted ''' | ||
522 | 235 | return fs in [f for f, m in mounts()] | ||
523 | 236 | |||
524 | 237 | |||
525 | 238 | def make_filesystem(blk_device, fstype='ext4', timeout=10): | ||
526 | 239 | ''' Make a new filesystem on the specified block device ''' | ||
527 | 240 | count = 0 | ||
528 | 241 | e_noent = os.errno.ENOENT | ||
529 | 242 | while not os.path.exists(blk_device): | ||
530 | 243 | if count >= timeout: | ||
531 | 244 | log('ceph: gave up waiting on block device %s' % blk_device, | ||
532 | 245 | level=ERROR) | ||
533 | 246 | raise IOError(e_noent, os.strerror(e_noent), blk_device) | ||
534 | 247 | log('ceph: waiting for block device %s to appear' % blk_device, | ||
535 | 248 | level=INFO) | ||
536 | 249 | count += 1 | ||
537 | 250 | time.sleep(1) | ||
538 | 251 | else: | ||
539 | 252 | log('ceph: Formatting block device %s as filesystem %s.' % | ||
540 | 253 | (blk_device, fstype), level=INFO) | ||
541 | 254 | check_call(['mkfs', '-t', fstype, blk_device]) | ||
542 | 255 | |||
543 | 256 | |||
544 | 257 | def place_data_on_block_device(blk_device, data_src_dst): | ||
545 | 258 | ''' Migrate data in data_src_dst to blk_device and then remount ''' | ||
546 | 259 | # mount block device into /mnt | ||
547 | 260 | mount(blk_device, '/mnt') | ||
548 | 261 | # copy data to /mnt | ||
549 | 262 | copy_files(data_src_dst, '/mnt') | ||
550 | 263 | # umount block device | ||
551 | 264 | umount('/mnt') | ||
552 | 265 | # Grab user/group ID's from original source | ||
553 | 266 | _dir = os.stat(data_src_dst) | ||
554 | 267 | uid = _dir.st_uid | ||
555 | 268 | gid = _dir.st_gid | ||
556 | 269 | # re-mount where the data should originally be | ||
557 | 270 | # TODO: persist is currently a NO-OP in core.host | ||
558 | 271 | mount(blk_device, data_src_dst, persist=True) | ||
559 | 272 | # ensure original ownership of new mount. | ||
560 | 273 | os.chown(data_src_dst, uid, gid) | ||
561 | 274 | |||
562 | 275 | |||
563 | 276 | # TODO: re-use | ||
564 | 277 | def modprobe(module): | ||
565 | 278 | ''' Load a kernel module and configure for auto-load on reboot ''' | ||
566 | 279 | log('ceph: Loading kernel module', level=INFO) | ||
567 | 280 | cmd = ['modprobe', module] | ||
568 | 281 | check_call(cmd) | ||
569 | 282 | with open('/etc/modules', 'r+') as modules: | ||
570 | 283 | if module not in modules.read(): | ||
571 | 284 | modules.write(module) | ||
572 | 285 | |||
573 | 286 | |||
574 | 287 | def copy_files(src, dst, symlinks=False, ignore=None): | ||
575 | 288 | ''' Copy files from src to dst ''' | ||
576 | 289 | for item in os.listdir(src): | ||
577 | 290 | s = os.path.join(src, item) | ||
578 | 291 | d = os.path.join(dst, item) | ||
579 | 292 | if os.path.isdir(s): | ||
580 | 293 | shutil.copytree(s, d, symlinks, ignore) | ||
581 | 294 | else: | ||
582 | 295 | shutil.copy2(s, d) | ||
583 | 296 | |||
584 | 297 | |||
585 | 298 | def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, | ||
586 | 299 | blk_device, fstype, system_services=[]): | ||
587 | 300 | """ | ||
588 | 301 | NOTE: This function must only be called from a single service unit for | ||
589 | 302 | the same rbd_img otherwise data loss will occur. | ||
590 | 303 | |||
591 | 304 | Ensures given pool and RBD image exists, is mapped to a block device, | ||
592 | 305 | and the device is formatted and mounted at the given mount_point. | ||
593 | 306 | |||
594 | 307 | If formatting a device for the first time, data existing at mount_point | ||
595 | 308 | will be migrated to the RBD device before being re-mounted. | ||
596 | 309 | |||
597 | 310 | All services listed in system_services will be stopped prior to data | ||
598 | 311 | migration and restarted when complete. | ||
599 | 312 | """ | ||
600 | 313 | # Ensure pool, RBD image, RBD mappings are in place. | ||
601 | 314 | if not pool_exists(service, pool): | ||
602 | 315 | log('ceph: Creating new pool {}.'.format(pool)) | ||
603 | 316 | create_pool(service, pool) | ||
604 | 317 | |||
605 | 318 | if not rbd_exists(service, pool, rbd_img): | ||
606 | 319 | log('ceph: Creating RBD image ({}).'.format(rbd_img)) | ||
607 | 320 | create_rbd_image(service, pool, rbd_img, sizemb) | ||
608 | 321 | |||
609 | 322 | if not image_mapped(rbd_img): | ||
610 | 323 | log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) | ||
611 | 324 | map_block_storage(service, pool, rbd_img) | ||
612 | 325 | |||
613 | 326 | # make file system | ||
614 | 327 | # TODO: What happens if for whatever reason this is run again and | ||
615 | 328 | # the data is already in the rbd device and/or is mounted?? | ||
616 | 329 | # When it is mounted already, it will fail to make the fs | ||
617 | 330 | # XXX: This is really sketchy! Need to at least add an fstab entry | ||
618 | 331 | # otherwise this hook will blow away existing data if its executed | ||
619 | 332 | # after a reboot. | ||
620 | 333 | if not filesystem_mounted(mount_point): | ||
621 | 334 | make_filesystem(blk_device, fstype) | ||
622 | 335 | |||
623 | 336 | for svc in system_services: | ||
624 | 337 | if service_running(svc): | ||
625 | 338 | log('ceph: Stopping services {} prior to migrating data.' | ||
626 | 339 | .format(svc)) | ||
627 | 340 | service_stop(svc) | ||
628 | 341 | |||
629 | 342 | place_data_on_block_device(blk_device, mount_point) | ||
630 | 343 | |||
631 | 344 | for svc in system_services: | ||
632 | 345 | log('ceph: Starting service {} after migrating data.' | ||
633 | 346 | .format(svc)) | ||
634 | 347 | service_start(svc) | ||
635 | 348 | |||
636 | 349 | |||
637 | 350 | def ensure_ceph_keyring(service, user=None, group=None): | ||
638 | 351 | ''' | ||
639 | 352 | Ensures a ceph keyring is created for a named service | ||
640 | 353 | and optionally ensures user and group ownership. | ||
641 | 354 | |||
642 | 355 | Returns False if no ceph key is available in relation state. | ||
643 | 356 | ''' | ||
644 | 357 | key = None | ||
645 | 358 | for rid in relation_ids('ceph'): | ||
646 | 359 | for unit in related_units(rid): | ||
647 | 360 | key = relation_get('key', rid=rid, unit=unit) | ||
648 | 361 | if key: | ||
649 | 362 | break | ||
650 | 363 | if not key: | ||
651 | 364 | return False | ||
652 | 365 | create_keyring(service=service, key=key) | ||
653 | 366 | keyring = _keyring_path(service) | ||
654 | 367 | if user and group: | ||
655 | 368 | check_call(['chown', '%s.%s' % (user, group), keyring]) | ||
656 | 369 | return True | ||
657 | 370 | |||
658 | 371 | |||
659 | 372 | def ceph_version(): | ||
660 | 373 | ''' Retrieve the local version of ceph ''' | ||
661 | 374 | if os.path.exists('/usr/bin/ceph'): | ||
662 | 375 | cmd = ['ceph', '-v'] | ||
663 | 376 | output = check_output(cmd) | ||
664 | 377 | output = output.split() | ||
665 | 378 | if len(output) > 3: | ||
666 | 379 | return output[2] | ||
667 | 380 | else: | ||
668 | 381 | return None | ||
669 | 382 | else: | ||
670 | 383 | return None | ||
671 | 0 | 384 | ||
672 | === added file 'hooks/charmhelpers/contrib/storage/linux/loopback.py' | |||
673 | --- hooks/charmhelpers/contrib/storage/linux/loopback.py 1970-01-01 00:00:00 +0000 | |||
674 | +++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2013-11-08 05:56:36 +0000 | |||
675 | @@ -0,0 +1,62 @@ | |||
676 | 1 | |||
677 | 2 | import os | ||
678 | 3 | import re | ||
679 | 4 | |||
680 | 5 | from subprocess import ( | ||
681 | 6 | check_call, | ||
682 | 7 | check_output, | ||
683 | 8 | ) | ||
684 | 9 | |||
685 | 10 | |||
686 | 11 | ################################################## | ||
687 | 12 | # loopback device helpers. | ||
688 | 13 | ################################################## | ||
689 | 14 | def loopback_devices(): | ||
690 | 15 | ''' | ||
691 | 16 | Parse through 'losetup -a' output to determine currently mapped | ||
692 | 17 | loopback devices. Output is expected to look like: | ||
693 | 18 | |||
694 | 19 | /dev/loop0: [0807]:961814 (/tmp/my.img) | ||
695 | 20 | |||
696 | 21 | :returns: dict: a dict mapping {loopback_dev: backing_file} | ||
697 | 22 | ''' | ||
698 | 23 | loopbacks = {} | ||
699 | 24 | cmd = ['losetup', '-a'] | ||
700 | 25 | devs = [d.strip().split(' ') for d in | ||
701 | 26 | check_output(cmd).splitlines() if d != ''] | ||
702 | 27 | for dev, _, f in devs: | ||
703 | 28 | loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] | ||
704 | 29 | return loopbacks | ||
705 | 30 | |||
706 | 31 | |||
707 | 32 | def create_loopback(file_path): | ||
708 | 33 | ''' | ||
709 | 34 | Create a loopback device for a given backing file. | ||
710 | 35 | |||
711 | 36 | :returns: str: Full path to new loopback device (eg, /dev/loop0) | ||
712 | 37 | ''' | ||
713 | 38 | file_path = os.path.abspath(file_path) | ||
714 | 39 | check_call(['losetup', '--find', file_path]) | ||
715 | 40 | for d, f in loopback_devices().iteritems(): | ||
716 | 41 | if f == file_path: | ||
717 | 42 | return d | ||
718 | 43 | |||
719 | 44 | |||
720 | 45 | def ensure_loopback_device(path, size): | ||
721 | 46 | ''' | ||
722 | 47 | Ensure a loopback device exists for a given backing file path and size. | ||
723 | 48 | If it a loopback device is not mapped to file, a new one will be created. | ||
724 | 49 | |||
725 | 50 | TODO: Confirm size of found loopback device. | ||
726 | 51 | |||
727 | 52 | :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) | ||
728 | 53 | ''' | ||
729 | 54 | for d, f in loopback_devices().iteritems(): | ||
730 | 55 | if f == path: | ||
731 | 56 | return d | ||
732 | 57 | |||
733 | 58 | if not os.path.exists(path): | ||
734 | 59 | cmd = ['truncate', '--size', size, path] | ||
735 | 60 | check_call(cmd) | ||
736 | 61 | |||
737 | 62 | return create_loopback(path) | ||
738 | 0 | 63 | ||
739 | === added file 'hooks/charmhelpers/contrib/storage/linux/lvm.py' | |||
740 | --- hooks/charmhelpers/contrib/storage/linux/lvm.py 1970-01-01 00:00:00 +0000 | |||
741 | +++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2013-11-08 05:56:36 +0000 | |||
742 | @@ -0,0 +1,88 @@ | |||
743 | 1 | from subprocess import ( | ||
744 | 2 | CalledProcessError, | ||
745 | 3 | check_call, | ||
746 | 4 | check_output, | ||
747 | 5 | Popen, | ||
748 | 6 | PIPE, | ||
749 | 7 | ) | ||
750 | 8 | |||
751 | 9 | |||
752 | 10 | ################################################## | ||
753 | 11 | # LVM helpers. | ||
754 | 12 | ################################################## | ||
755 | 13 | def deactivate_lvm_volume_group(block_device): | ||
756 | 14 | ''' | ||
757 | 15 | Deactivate any volume gruop associated with an LVM physical volume. | ||
758 | 16 | |||
759 | 17 | :param block_device: str: Full path to LVM physical volume | ||
760 | 18 | ''' | ||
761 | 19 | vg = list_lvm_volume_group(block_device) | ||
762 | 20 | if vg: | ||
763 | 21 | cmd = ['vgchange', '-an', vg] | ||
764 | 22 | check_call(cmd) | ||
765 | 23 | |||
766 | 24 | |||
767 | 25 | def is_lvm_physical_volume(block_device): | ||
768 | 26 | ''' | ||
769 | 27 | Determine whether a block device is initialized as an LVM PV. | ||
770 | 28 | |||
771 | 29 | :param block_device: str: Full path of block device to inspect. | ||
772 | 30 | |||
773 | 31 | :returns: boolean: True if block device is a PV, False if not. | ||
774 | 32 | ''' | ||
775 | 33 | try: | ||
776 | 34 | check_output(['pvdisplay', block_device]) | ||
777 | 35 | return True | ||
778 | 36 | except CalledProcessError: | ||
779 | 37 | return False | ||
780 | 38 | |||
781 | 39 | |||
782 | 40 | def remove_lvm_physical_volume(block_device): | ||
783 | 41 | ''' | ||
784 | 42 | Remove LVM PV signatures from a given block device. | ||
785 | 43 | |||
786 | 44 | :param block_device: str: Full path of block device to scrub. | ||
787 | 45 | ''' | ||
788 | 46 | p = Popen(['pvremove', '-ff', block_device], | ||
789 | 47 | stdin=PIPE) | ||
790 | 48 | p.communicate(input='y\n') | ||
791 | 49 | |||
792 | 50 | |||
793 | 51 | def list_lvm_volume_group(block_device): | ||
794 | 52 | ''' | ||
795 | 53 | List LVM volume group associated with a given block device. | ||
796 | 54 | |||
797 | 55 | Assumes block device is a valid LVM PV. | ||
798 | 56 | |||
799 | 57 | :param block_device: str: Full path of block device to inspect. | ||
800 | 58 | |||
801 | 59 | :returns: str: Name of volume group associated with block device or None | ||
802 | 60 | ''' | ||
803 | 61 | vg = None | ||
804 | 62 | pvd = check_output(['pvdisplay', block_device]).splitlines() | ||
805 | 63 | for l in pvd: | ||
806 | 64 | if l.strip().startswith('VG Name'): | ||
807 | 65 | vg = ' '.join(l.split()).split(' ').pop() | ||
808 | 66 | return vg | ||
809 | 67 | |||
810 | 68 | |||
811 | 69 | def create_lvm_physical_volume(block_device): | ||
812 | 70 | ''' | ||
813 | 71 | Initialize a block device as an LVM physical volume. | ||
814 | 72 | |||
815 | 73 | :param block_device: str: Full path of block device to initialize. | ||
816 | 74 | |||
817 | 75 | ''' | ||
818 | 76 | check_call(['pvcreate', block_device]) | ||
819 | 77 | |||
820 | 78 | |||
821 | 79 | def create_lvm_volume_group(volume_group, block_device): | ||
822 | 80 | ''' | ||
823 | 81 | Create an LVM volume group backed by a given block device. | ||
824 | 82 | |||
825 | 83 | Assumes block device has already been initialized as an LVM PV. | ||
826 | 84 | |||
827 | 85 | :param volume_group: str: Name of volume group to create. | ||
828 | 86 | :block_device: str: Full path of PV-initialized block device. | ||
829 | 87 | ''' | ||
830 | 88 | check_call(['vgcreate', volume_group, block_device]) | ||
831 | 0 | 89 | ||
832 | === added file 'hooks/charmhelpers/contrib/storage/linux/utils.py' | |||
833 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 1970-01-01 00:00:00 +0000 | |||
834 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2013-11-08 05:56:36 +0000 | |||
835 | @@ -0,0 +1,25 @@ | |||
836 | 1 | from os import stat | ||
837 | 2 | from stat import S_ISBLK | ||
838 | 3 | |||
839 | 4 | from subprocess import ( | ||
840 | 5 | check_call | ||
841 | 6 | ) | ||
842 | 7 | |||
843 | 8 | |||
844 | 9 | def is_block_device(path): | ||
845 | 10 | ''' | ||
846 | 11 | Confirm device at path is a valid block device node. | ||
847 | 12 | |||
848 | 13 | :returns: boolean: True if path is a block device, False if not. | ||
849 | 14 | ''' | ||
850 | 15 | return S_ISBLK(stat(path).st_mode) | ||
851 | 16 | |||
852 | 17 | |||
853 | 18 | def zap_disk(block_device): | ||
854 | 19 | ''' | ||
855 | 20 | Clear a block device of partition table. Relies on sgdisk, which is | ||
856 | 21 | installed as pat of the 'gdisk' package in Ubuntu. | ||
857 | 22 | |||
858 | 23 | :param block_device: str: Full path of block device to clean. | ||
859 | 24 | ''' | ||
860 | 25 | check_call(['sgdisk', '--zap-all', block_device]) | ||
861 | 0 | 26 | ||
862 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
863 | --- hooks/charmhelpers/core/hookenv.py 2013-07-19 09:46:25 +0000 | |||
864 | +++ hooks/charmhelpers/core/hookenv.py 2013-11-08 05:56:36 +0000 | |||
865 | @@ -9,6 +9,7 @@ | |||
866 | 9 | import yaml | 9 | import yaml |
867 | 10 | import subprocess | 10 | import subprocess |
868 | 11 | import UserDict | 11 | import UserDict |
869 | 12 | from subprocess import CalledProcessError | ||
870 | 12 | 13 | ||
871 | 13 | CRITICAL = "CRITICAL" | 14 | CRITICAL = "CRITICAL" |
872 | 14 | ERROR = "ERROR" | 15 | ERROR = "ERROR" |
873 | @@ -21,7 +22,7 @@ | |||
874 | 21 | 22 | ||
875 | 22 | 23 | ||
876 | 23 | def cached(func): | 24 | def cached(func): |
878 | 24 | ''' Cache return values for multiple executions of func + args | 25 | """Cache return values for multiple executions of func + args |
879 | 25 | 26 | ||
880 | 26 | For example: | 27 | For example: |
881 | 27 | 28 | ||
882 | @@ -32,7 +33,7 @@ | |||
883 | 32 | unit_get('test') | 33 | unit_get('test') |
884 | 33 | 34 | ||
885 | 34 | will cache the result of unit_get + 'test' for future calls. | 35 | will cache the result of unit_get + 'test' for future calls. |
887 | 35 | ''' | 36 | """ |
888 | 36 | def wrapper(*args, **kwargs): | 37 | def wrapper(*args, **kwargs): |
889 | 37 | global cache | 38 | global cache |
890 | 38 | key = str((func, args, kwargs)) | 39 | key = str((func, args, kwargs)) |
891 | @@ -46,8 +47,8 @@ | |||
892 | 46 | 47 | ||
893 | 47 | 48 | ||
894 | 48 | def flush(key): | 49 | def flush(key): |
897 | 49 | ''' Flushes any entries from function cache where the | 50 | """Flushes any entries from function cache where the |
898 | 50 | key is found in the function+args ''' | 51 | key is found in the function+args """ |
899 | 51 | flush_list = [] | 52 | flush_list = [] |
900 | 52 | for item in cache: | 53 | for item in cache: |
901 | 53 | if key in item: | 54 | if key in item: |
902 | @@ -57,7 +58,7 @@ | |||
903 | 57 | 58 | ||
904 | 58 | 59 | ||
905 | 59 | def log(message, level=None): | 60 | def log(message, level=None): |
907 | 60 | "Write a message to the juju log" | 61 | """Write a message to the juju log""" |
908 | 61 | command = ['juju-log'] | 62 | command = ['juju-log'] |
909 | 62 | if level: | 63 | if level: |
910 | 63 | command += ['-l', level] | 64 | command += ['-l', level] |
911 | @@ -66,7 +67,7 @@ | |||
912 | 66 | 67 | ||
913 | 67 | 68 | ||
914 | 68 | class Serializable(UserDict.IterableUserDict): | 69 | class Serializable(UserDict.IterableUserDict): |
916 | 69 | "Wrapper, an object that can be serialized to yaml or json" | 70 | """Wrapper, an object that can be serialized to yaml or json""" |
917 | 70 | 71 | ||
918 | 71 | def __init__(self, obj): | 72 | def __init__(self, obj): |
919 | 72 | # wrap the object | 73 | # wrap the object |
920 | @@ -96,11 +97,11 @@ | |||
921 | 96 | self.data = state | 97 | self.data = state |
922 | 97 | 98 | ||
923 | 98 | def json(self): | 99 | def json(self): |
925 | 99 | "Serialize the object to json" | 100 | """Serialize the object to json""" |
926 | 100 | return json.dumps(self.data) | 101 | return json.dumps(self.data) |
927 | 101 | 102 | ||
928 | 102 | def yaml(self): | 103 | def yaml(self): |
930 | 103 | "Serialize the object to yaml" | 104 | """Serialize the object to yaml""" |
931 | 104 | return yaml.dump(self.data) | 105 | return yaml.dump(self.data) |
932 | 105 | 106 | ||
933 | 106 | 107 | ||
934 | @@ -119,38 +120,38 @@ | |||
935 | 119 | 120 | ||
936 | 120 | 121 | ||
937 | 121 | def in_relation_hook(): | 122 | def in_relation_hook(): |
939 | 122 | "Determine whether we're running in a relation hook" | 123 | """Determine whether we're running in a relation hook""" |
940 | 123 | return 'JUJU_RELATION' in os.environ | 124 | return 'JUJU_RELATION' in os.environ |
941 | 124 | 125 | ||
942 | 125 | 126 | ||
943 | 126 | def relation_type(): | 127 | def relation_type(): |
945 | 127 | "The scope for the current relation hook" | 128 | """The scope for the current relation hook""" |
946 | 128 | return os.environ.get('JUJU_RELATION', None) | 129 | return os.environ.get('JUJU_RELATION', None) |
947 | 129 | 130 | ||
948 | 130 | 131 | ||
949 | 131 | def relation_id(): | 132 | def relation_id(): |
951 | 132 | "The relation ID for the current relation hook" | 133 | """The relation ID for the current relation hook""" |
952 | 133 | return os.environ.get('JUJU_RELATION_ID', None) | 134 | return os.environ.get('JUJU_RELATION_ID', None) |
953 | 134 | 135 | ||
954 | 135 | 136 | ||
955 | 136 | def local_unit(): | 137 | def local_unit(): |
957 | 137 | "Local unit ID" | 138 | """Local unit ID""" |
958 | 138 | return os.environ['JUJU_UNIT_NAME'] | 139 | return os.environ['JUJU_UNIT_NAME'] |
959 | 139 | 140 | ||
960 | 140 | 141 | ||
961 | 141 | def remote_unit(): | 142 | def remote_unit(): |
963 | 142 | "The remote unit for the current relation hook" | 143 | """The remote unit for the current relation hook""" |
964 | 143 | return os.environ['JUJU_REMOTE_UNIT'] | 144 | return os.environ['JUJU_REMOTE_UNIT'] |
965 | 144 | 145 | ||
966 | 145 | 146 | ||
967 | 146 | def service_name(): | 147 | def service_name(): |
969 | 147 | "The name service group this unit belongs to" | 148 | """The name service group this unit belongs to""" |
970 | 148 | return local_unit().split('/')[0] | 149 | return local_unit().split('/')[0] |
971 | 149 | 150 | ||
972 | 150 | 151 | ||
973 | 151 | @cached | 152 | @cached |
974 | 152 | def config(scope=None): | 153 | def config(scope=None): |
976 | 153 | "Juju charm configuration" | 154 | """Juju charm configuration""" |
977 | 154 | config_cmd_line = ['config-get'] | 155 | config_cmd_line = ['config-get'] |
978 | 155 | if scope is not None: | 156 | if scope is not None: |
979 | 156 | config_cmd_line.append(scope) | 157 | config_cmd_line.append(scope) |
980 | @@ -163,6 +164,7 @@ | |||
981 | 163 | 164 | ||
982 | 164 | @cached | 165 | @cached |
983 | 165 | def relation_get(attribute=None, unit=None, rid=None): | 166 | def relation_get(attribute=None, unit=None, rid=None): |
984 | 167 | """Get relation information""" | ||
985 | 166 | _args = ['relation-get', '--format=json'] | 168 | _args = ['relation-get', '--format=json'] |
986 | 167 | if rid: | 169 | if rid: |
987 | 168 | _args.append('-r') | 170 | _args.append('-r') |
988 | @@ -174,9 +176,14 @@ | |||
989 | 174 | return json.loads(subprocess.check_output(_args)) | 176 | return json.loads(subprocess.check_output(_args)) |
990 | 175 | except ValueError: | 177 | except ValueError: |
991 | 176 | return None | 178 | return None |
992 | 179 | except CalledProcessError, e: | ||
993 | 180 | if e.returncode == 2: | ||
994 | 181 | return None | ||
995 | 182 | raise | ||
996 | 177 | 183 | ||
997 | 178 | 184 | ||
998 | 179 | def relation_set(relation_id=None, relation_settings={}, **kwargs): | 185 | def relation_set(relation_id=None, relation_settings={}, **kwargs): |
999 | 186 | """Set relation information for the current unit""" | ||
1000 | 180 | relation_cmd_line = ['relation-set'] | 187 | relation_cmd_line = ['relation-set'] |
1001 | 181 | if relation_id is not None: | 188 | if relation_id is not None: |
1002 | 182 | relation_cmd_line.extend(('-r', relation_id)) | 189 | relation_cmd_line.extend(('-r', relation_id)) |
1003 | @@ -192,7 +199,7 @@ | |||
1004 | 192 | 199 | ||
1005 | 193 | @cached | 200 | @cached |
1006 | 194 | def relation_ids(reltype=None): | 201 | def relation_ids(reltype=None): |
1008 | 195 | "A list of relation_ids" | 202 | """A list of relation_ids""" |
1009 | 196 | reltype = reltype or relation_type() | 203 | reltype = reltype or relation_type() |
1010 | 197 | relid_cmd_line = ['relation-ids', '--format=json'] | 204 | relid_cmd_line = ['relation-ids', '--format=json'] |
1011 | 198 | if reltype is not None: | 205 | if reltype is not None: |
1012 | @@ -203,7 +210,7 @@ | |||
1013 | 203 | 210 | ||
1014 | 204 | @cached | 211 | @cached |
1015 | 205 | def related_units(relid=None): | 212 | def related_units(relid=None): |
1017 | 206 | "A list of related units" | 213 | """A list of related units""" |
1018 | 207 | relid = relid or relation_id() | 214 | relid = relid or relation_id() |
1019 | 208 | units_cmd_line = ['relation-list', '--format=json'] | 215 | units_cmd_line = ['relation-list', '--format=json'] |
1020 | 209 | if relid is not None: | 216 | if relid is not None: |
1021 | @@ -213,7 +220,7 @@ | |||
1022 | 213 | 220 | ||
1023 | 214 | @cached | 221 | @cached |
1024 | 215 | def relation_for_unit(unit=None, rid=None): | 222 | def relation_for_unit(unit=None, rid=None): |
1026 | 216 | "Get the json represenation of a unit's relation" | 223 | """Get the json represenation of a unit's relation""" |
1027 | 217 | unit = unit or remote_unit() | 224 | unit = unit or remote_unit() |
1028 | 218 | relation = relation_get(unit=unit, rid=rid) | 225 | relation = relation_get(unit=unit, rid=rid) |
1029 | 219 | for key in relation: | 226 | for key in relation: |
1030 | @@ -225,7 +232,7 @@ | |||
1031 | 225 | 232 | ||
1032 | 226 | @cached | 233 | @cached |
1033 | 227 | def relations_for_id(relid=None): | 234 | def relations_for_id(relid=None): |
1035 | 228 | "Get relations of a specific relation ID" | 235 | """Get relations of a specific relation ID""" |
1036 | 229 | relation_data = [] | 236 | relation_data = [] |
1037 | 230 | relid = relid or relation_ids() | 237 | relid = relid or relation_ids() |
1038 | 231 | for unit in related_units(relid): | 238 | for unit in related_units(relid): |
1039 | @@ -237,7 +244,7 @@ | |||
1040 | 237 | 244 | ||
1041 | 238 | @cached | 245 | @cached |
1042 | 239 | def relations_of_type(reltype=None): | 246 | def relations_of_type(reltype=None): |
1044 | 240 | "Get relations of a specific type" | 247 | """Get relations of a specific type""" |
1045 | 241 | relation_data = [] | 248 | relation_data = [] |
1046 | 242 | reltype = reltype or relation_type() | 249 | reltype = reltype or relation_type() |
1047 | 243 | for relid in relation_ids(reltype): | 250 | for relid in relation_ids(reltype): |
1048 | @@ -249,7 +256,7 @@ | |||
1049 | 249 | 256 | ||
1050 | 250 | @cached | 257 | @cached |
1051 | 251 | def relation_types(): | 258 | def relation_types(): |
1053 | 252 | "Get a list of relation types supported by this charm" | 259 | """Get a list of relation types supported by this charm""" |
1054 | 253 | charmdir = os.environ.get('CHARM_DIR', '') | 260 | charmdir = os.environ.get('CHARM_DIR', '') |
1055 | 254 | mdf = open(os.path.join(charmdir, 'metadata.yaml')) | 261 | mdf = open(os.path.join(charmdir, 'metadata.yaml')) |
1056 | 255 | md = yaml.safe_load(mdf) | 262 | md = yaml.safe_load(mdf) |
1057 | @@ -264,6 +271,7 @@ | |||
1058 | 264 | 271 | ||
1059 | 265 | @cached | 272 | @cached |
1060 | 266 | def relations(): | 273 | def relations(): |
1061 | 274 | """Get a nested dictionary of relation data for all related units""" | ||
1062 | 267 | rels = {} | 275 | rels = {} |
1063 | 268 | for reltype in relation_types(): | 276 | for reltype in relation_types(): |
1064 | 269 | relids = {} | 277 | relids = {} |
1065 | @@ -277,15 +285,35 @@ | |||
1066 | 277 | return rels | 285 | return rels |
1067 | 278 | 286 | ||
1068 | 279 | 287 | ||
1069 | 288 | @cached | ||
1070 | 289 | def is_relation_made(relation, keys='private-address'): | ||
1071 | 290 | ''' | ||
1072 | 291 | Determine whether a relation is established by checking for | ||
1073 | 292 | presence of key(s). If a list of keys is provided, they | ||
1074 | 293 | must all be present for the relation to be identified as made | ||
1075 | 294 | ''' | ||
1076 | 295 | if isinstance(keys, str): | ||
1077 | 296 | keys = [keys] | ||
1078 | 297 | for r_id in relation_ids(relation): | ||
1079 | 298 | for unit in related_units(r_id): | ||
1080 | 299 | context = {} | ||
1081 | 300 | for k in keys: | ||
1082 | 301 | context[k] = relation_get(k, rid=r_id, | ||
1083 | 302 | unit=unit) | ||
1084 | 303 | if None not in context.values(): | ||
1085 | 304 | return True | ||
1086 | 305 | return False | ||
1087 | 306 | |||
1088 | 307 | |||
1089 | 280 | def open_port(port, protocol="TCP"): | 308 | def open_port(port, protocol="TCP"): |
1091 | 281 | "Open a service network port" | 309 | """Open a service network port""" |
1092 | 282 | _args = ['open-port'] | 310 | _args = ['open-port'] |
1093 | 283 | _args.append('{}/{}'.format(port, protocol)) | 311 | _args.append('{}/{}'.format(port, protocol)) |
1094 | 284 | subprocess.check_call(_args) | 312 | subprocess.check_call(_args) |
1095 | 285 | 313 | ||
1096 | 286 | 314 | ||
1097 | 287 | def close_port(port, protocol="TCP"): | 315 | def close_port(port, protocol="TCP"): |
1099 | 288 | "Close a service network port" | 316 | """Close a service network port""" |
1100 | 289 | _args = ['close-port'] | 317 | _args = ['close-port'] |
1101 | 290 | _args.append('{}/{}'.format(port, protocol)) | 318 | _args.append('{}/{}'.format(port, protocol)) |
1102 | 291 | subprocess.check_call(_args) | 319 | subprocess.check_call(_args) |
1103 | @@ -293,6 +321,7 @@ | |||
1104 | 293 | 321 | ||
1105 | 294 | @cached | 322 | @cached |
1106 | 295 | def unit_get(attribute): | 323 | def unit_get(attribute): |
1107 | 324 | """Get the unit ID for the remote unit""" | ||
1108 | 296 | _args = ['unit-get', '--format=json', attribute] | 325 | _args = ['unit-get', '--format=json', attribute] |
1109 | 297 | try: | 326 | try: |
1110 | 298 | return json.loads(subprocess.check_output(_args)) | 327 | return json.loads(subprocess.check_output(_args)) |
1111 | @@ -301,22 +330,46 @@ | |||
1112 | 301 | 330 | ||
1113 | 302 | 331 | ||
1114 | 303 | def unit_private_ip(): | 332 | def unit_private_ip(): |
1115 | 333 | """Get this unit's private IP address""" | ||
1116 | 304 | return unit_get('private-address') | 334 | return unit_get('private-address') |
1117 | 305 | 335 | ||
1118 | 306 | 336 | ||
1119 | 307 | class UnregisteredHookError(Exception): | 337 | class UnregisteredHookError(Exception): |
1120 | 338 | """Raised when an undefined hook is called""" | ||
1121 | 308 | pass | 339 | pass |
1122 | 309 | 340 | ||
1123 | 310 | 341 | ||
1124 | 311 | class Hooks(object): | 342 | class Hooks(object): |
1125 | 343 | """A convenient handler for hook functions. | ||
1126 | 344 | |||
1127 | 345 | Example: | ||
1128 | 346 | hooks = Hooks() | ||
1129 | 347 | |||
1130 | 348 | # register a hook, taking its name from the function name | ||
1131 | 349 | @hooks.hook() | ||
1132 | 350 | def install(): | ||
1133 | 351 | ... | ||
1134 | 352 | |||
1135 | 353 | # register a hook, providing a custom hook name | ||
1136 | 354 | @hooks.hook("config-changed") | ||
1137 | 355 | def config_changed(): | ||
1138 | 356 | ... | ||
1139 | 357 | |||
1140 | 358 | if __name__ == "__main__": | ||
1141 | 359 | # execute a hook based on the name the program is called by | ||
1142 | 360 | hooks.execute(sys.argv) | ||
1143 | 361 | """ | ||
1144 | 362 | |||
1145 | 312 | def __init__(self): | 363 | def __init__(self): |
1146 | 313 | super(Hooks, self).__init__() | 364 | super(Hooks, self).__init__() |
1147 | 314 | self._hooks = {} | 365 | self._hooks = {} |
1148 | 315 | 366 | ||
1149 | 316 | def register(self, name, function): | 367 | def register(self, name, function): |
1150 | 368 | """Register a hook""" | ||
1151 | 317 | self._hooks[name] = function | 369 | self._hooks[name] = function |
1152 | 318 | 370 | ||
1153 | 319 | def execute(self, args): | 371 | def execute(self, args): |
1154 | 372 | """Execute a registered hook based on args[0]""" | ||
1155 | 320 | hook_name = os.path.basename(args[0]) | 373 | hook_name = os.path.basename(args[0]) |
1156 | 321 | if hook_name in self._hooks: | 374 | if hook_name in self._hooks: |
1157 | 322 | self._hooks[hook_name]() | 375 | self._hooks[hook_name]() |
1158 | @@ -324,6 +377,7 @@ | |||
1159 | 324 | raise UnregisteredHookError(hook_name) | 377 | raise UnregisteredHookError(hook_name) |
1160 | 325 | 378 | ||
1161 | 326 | def hook(self, *hook_names): | 379 | def hook(self, *hook_names): |
1162 | 380 | """Decorator, registering them as hooks""" | ||
1163 | 327 | def wrapper(decorated): | 381 | def wrapper(decorated): |
1164 | 328 | for hook_name in hook_names: | 382 | for hook_name in hook_names: |
1165 | 329 | self.register(hook_name, decorated) | 383 | self.register(hook_name, decorated) |
1166 | @@ -337,4 +391,5 @@ | |||
1167 | 337 | 391 | ||
1168 | 338 | 392 | ||
1169 | 339 | def charm_dir(): | 393 | def charm_dir(): |
1170 | 394 | """Return the root directory of the current charm""" | ||
1171 | 340 | return os.environ.get('CHARM_DIR') | 395 | return os.environ.get('CHARM_DIR') |
1172 | 341 | 396 | ||
1173 | === modified file 'hooks/charmhelpers/core/host.py' | |||
1174 | --- hooks/charmhelpers/core/host.py 2013-09-02 16:14:27 +0000 | |||
1175 | +++ hooks/charmhelpers/core/host.py 2013-11-08 05:56:36 +0000 | |||
1176 | @@ -19,18 +19,22 @@ | |||
1177 | 19 | 19 | ||
1178 | 20 | 20 | ||
1179 | 21 | def service_start(service_name): | 21 | def service_start(service_name): |
1180 | 22 | """Start a system service""" | ||
1181 | 22 | return service('start', service_name) | 23 | return service('start', service_name) |
1182 | 23 | 24 | ||
1183 | 24 | 25 | ||
1184 | 25 | def service_stop(service_name): | 26 | def service_stop(service_name): |
1185 | 27 | """Stop a system service""" | ||
1186 | 26 | return service('stop', service_name) | 28 | return service('stop', service_name) |
1187 | 27 | 29 | ||
1188 | 28 | 30 | ||
1189 | 29 | def service_restart(service_name): | 31 | def service_restart(service_name): |
1190 | 32 | """Restart a system service""" | ||
1191 | 30 | return service('restart', service_name) | 33 | return service('restart', service_name) |
1192 | 31 | 34 | ||
1193 | 32 | 35 | ||
1194 | 33 | def service_reload(service_name, restart_on_failure=False): | 36 | def service_reload(service_name, restart_on_failure=False): |
1195 | 37 | """Reload a system service, optionally falling back to restart if reload fails""" | ||
1196 | 34 | service_result = service('reload', service_name) | 38 | service_result = service('reload', service_name) |
1197 | 35 | if not service_result and restart_on_failure: | 39 | if not service_result and restart_on_failure: |
1198 | 36 | service_result = service('restart', service_name) | 40 | service_result = service('restart', service_name) |
1199 | @@ -38,11 +42,13 @@ | |||
1200 | 38 | 42 | ||
1201 | 39 | 43 | ||
1202 | 40 | def service(action, service_name): | 44 | def service(action, service_name): |
1203 | 45 | """Control a system service""" | ||
1204 | 41 | cmd = ['service', service_name, action] | 46 | cmd = ['service', service_name, action] |
1205 | 42 | return subprocess.call(cmd) == 0 | 47 | return subprocess.call(cmd) == 0 |
1206 | 43 | 48 | ||
1207 | 44 | 49 | ||
1208 | 45 | def service_running(service): | 50 | def service_running(service): |
1209 | 51 | """Determine whether a system service is running""" | ||
1210 | 46 | try: | 52 | try: |
1211 | 47 | output = subprocess.check_output(['service', service, 'status']) | 53 | output = subprocess.check_output(['service', service, 'status']) |
1212 | 48 | except subprocess.CalledProcessError: | 54 | except subprocess.CalledProcessError: |
1213 | @@ -55,7 +61,7 @@ | |||
1214 | 55 | 61 | ||
1215 | 56 | 62 | ||
1216 | 57 | def adduser(username, password=None, shell='/bin/bash', system_user=False): | 63 | def adduser(username, password=None, shell='/bin/bash', system_user=False): |
1218 | 58 | """Add a user""" | 64 | """Add a user to the system""" |
1219 | 59 | try: | 65 | try: |
1220 | 60 | user_info = pwd.getpwnam(username) | 66 | user_info = pwd.getpwnam(username) |
1221 | 61 | log('user {0} already exists!'.format(username)) | 67 | log('user {0} already exists!'.format(username)) |
1222 | @@ -138,7 +144,7 @@ | |||
1223 | 138 | 144 | ||
1224 | 139 | 145 | ||
1225 | 140 | def mount(device, mountpoint, options=None, persist=False): | 146 | def mount(device, mountpoint, options=None, persist=False): |
1227 | 141 | '''Mount a filesystem''' | 147 | """Mount a filesystem at a particular mountpoint""" |
1228 | 142 | cmd_args = ['mount'] | 148 | cmd_args = ['mount'] |
1229 | 143 | if options is not None: | 149 | if options is not None: |
1230 | 144 | cmd_args.extend(['-o', options]) | 150 | cmd_args.extend(['-o', options]) |
1231 | @@ -155,7 +161,7 @@ | |||
1232 | 155 | 161 | ||
1233 | 156 | 162 | ||
1234 | 157 | def umount(mountpoint, persist=False): | 163 | def umount(mountpoint, persist=False): |
1236 | 158 | '''Unmount a filesystem''' | 164 | """Unmount a filesystem""" |
1237 | 159 | cmd_args = ['umount', mountpoint] | 165 | cmd_args = ['umount', mountpoint] |
1238 | 160 | try: | 166 | try: |
1239 | 161 | subprocess.check_output(cmd_args) | 167 | subprocess.check_output(cmd_args) |
1240 | @@ -169,7 +175,7 @@ | |||
1241 | 169 | 175 | ||
1242 | 170 | 176 | ||
1243 | 171 | def mounts(): | 177 | def mounts(): |
1245 | 172 | '''List of all mounted volumes as [[mountpoint,device],[...]]''' | 178 | """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" |
1246 | 173 | with open('/proc/mounts') as f: | 179 | with open('/proc/mounts') as f: |
1247 | 174 | # [['/mount/point','/dev/path'],[...]] | 180 | # [['/mount/point','/dev/path'],[...]] |
1248 | 175 | system_mounts = [m[1::-1] for m in [l.strip().split() | 181 | system_mounts = [m[1::-1] for m in [l.strip().split() |
1249 | @@ -178,7 +184,7 @@ | |||
1250 | 178 | 184 | ||
1251 | 179 | 185 | ||
1252 | 180 | def file_hash(path): | 186 | def file_hash(path): |
1254 | 181 | ''' Generate a md5 hash of the contents of 'path' or None if not found ''' | 187 | """Generate a md5 hash of the contents of 'path' or None if not found """ |
1255 | 182 | if os.path.exists(path): | 188 | if os.path.exists(path): |
1256 | 183 | h = hashlib.md5() | 189 | h = hashlib.md5() |
1257 | 184 | with open(path, 'r') as source: | 190 | with open(path, 'r') as source: |
1258 | @@ -189,7 +195,7 @@ | |||
1259 | 189 | 195 | ||
1260 | 190 | 196 | ||
1261 | 191 | def restart_on_change(restart_map): | 197 | def restart_on_change(restart_map): |
1263 | 192 | ''' Restart services based on configuration files changing | 198 | """Restart services based on configuration files changing |
1264 | 193 | 199 | ||
1265 | 194 | This function is used a decorator, for example | 200 | This function is used a decorator, for example |
1266 | 195 | 201 | ||
1267 | @@ -202,7 +208,7 @@ | |||
1268 | 202 | In this example, the cinder-api and cinder-volume services | 208 | In this example, the cinder-api and cinder-volume services |
1269 | 203 | would be restarted if /etc/ceph/ceph.conf is changed by the | 209 | would be restarted if /etc/ceph/ceph.conf is changed by the |
1270 | 204 | ceph_client_changed function. | 210 | ceph_client_changed function. |
1272 | 205 | ''' | 211 | """ |
1273 | 206 | def wrap(f): | 212 | def wrap(f): |
1274 | 207 | def wrapped_f(*args): | 213 | def wrapped_f(*args): |
1275 | 208 | checksums = {} | 214 | checksums = {} |
1276 | @@ -220,7 +226,7 @@ | |||
1277 | 220 | 226 | ||
1278 | 221 | 227 | ||
1279 | 222 | def lsb_release(): | 228 | def lsb_release(): |
1281 | 223 | '''Return /etc/lsb-release in a dict''' | 229 | """Return /etc/lsb-release in a dict""" |
1282 | 224 | d = {} | 230 | d = {} |
1283 | 225 | with open('/etc/lsb-release', 'r') as lsb: | 231 | with open('/etc/lsb-release', 'r') as lsb: |
1284 | 226 | for l in lsb: | 232 | for l in lsb: |
1285 | @@ -230,7 +236,7 @@ | |||
1286 | 230 | 236 | ||
1287 | 231 | 237 | ||
1288 | 232 | def pwgen(length=None): | 238 | def pwgen(length=None): |
1290 | 233 | '''Generate a random pasword.''' | 239 | """Generate a random pasword.""" |
1291 | 234 | if length is None: | 240 | if length is None: |
1292 | 235 | length = random.choice(range(35, 45)) | 241 | length = random.choice(range(35, 45)) |
1293 | 236 | alphanumeric_chars = [ | 242 | alphanumeric_chars = [ |
1294 | 237 | 243 | ||
1295 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
1296 | --- hooks/charmhelpers/fetch/__init__.py 2013-09-02 16:36:50 +0000 | |||
1297 | +++ hooks/charmhelpers/fetch/__init__.py 2013-11-08 05:56:36 +0000 | |||
1298 | @@ -20,6 +20,32 @@ | |||
1299 | 20 | PROPOSED_POCKET = """# Proposed | 20 | PROPOSED_POCKET = """# Proposed |
1300 | 21 | deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted | 21 | deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted |
1301 | 22 | """ | 22 | """ |
1302 | 23 | CLOUD_ARCHIVE_POCKETS = { | ||
1303 | 24 | # Folsom | ||
1304 | 25 | 'folsom': 'precise-updates/folsom', | ||
1305 | 26 | 'precise-folsom': 'precise-updates/folsom', | ||
1306 | 27 | 'precise-folsom/updates': 'precise-updates/folsom', | ||
1307 | 28 | 'precise-updates/folsom': 'precise-updates/folsom', | ||
1308 | 29 | 'folsom/proposed': 'precise-proposed/folsom', | ||
1309 | 30 | 'precise-folsom/proposed': 'precise-proposed/folsom', | ||
1310 | 31 | 'precise-proposed/folsom': 'precise-proposed/folsom', | ||
1311 | 32 | # Grizzly | ||
1312 | 33 | 'grizzly': 'precise-updates/grizzly', | ||
1313 | 34 | 'precise-grizzly': 'precise-updates/grizzly', | ||
1314 | 35 | 'precise-grizzly/updates': 'precise-updates/grizzly', | ||
1315 | 36 | 'precise-updates/grizzly': 'precise-updates/grizzly', | ||
1316 | 37 | 'grizzly/proposed': 'precise-proposed/grizzly', | ||
1317 | 38 | 'precise-grizzly/proposed': 'precise-proposed/grizzly', | ||
1318 | 39 | 'precise-proposed/grizzly': 'precise-proposed/grizzly', | ||
1319 | 40 | # Havana | ||
1320 | 41 | 'havana': 'precise-updates/havana', | ||
1321 | 42 | 'precise-havana': 'precise-updates/havana', | ||
1322 | 43 | 'precise-havana/updates': 'precise-updates/havana', | ||
1323 | 44 | 'precise-updates/havana': 'precise-updates/havana', | ||
1324 | 45 | 'havana/proposed': 'precise-proposed/havana', | ||
1325 | 46 | 'precies-havana/proposed': 'precise-proposed/havana', | ||
1326 | 47 | 'precise-proposed/havana': 'precise-proposed/havana', | ||
1327 | 48 | } | ||
1328 | 23 | 49 | ||
1329 | 24 | 50 | ||
1330 | 25 | def filter_installed_packages(packages): | 51 | def filter_installed_packages(packages): |
1331 | @@ -79,16 +105,35 @@ | |||
1332 | 79 | subprocess.call(cmd) | 105 | subprocess.call(cmd) |
1333 | 80 | 106 | ||
1334 | 81 | 107 | ||
1335 | 108 | def apt_hold(packages, fatal=False): | ||
1336 | 109 | """Hold one or more packages""" | ||
1337 | 110 | cmd = ['apt-mark', 'hold'] | ||
1338 | 111 | if isinstance(packages, basestring): | ||
1339 | 112 | cmd.append(packages) | ||
1340 | 113 | else: | ||
1341 | 114 | cmd.extend(packages) | ||
1342 | 115 | log("Holding {}".format(packages)) | ||
1343 | 116 | if fatal: | ||
1344 | 117 | subprocess.check_call(cmd) | ||
1345 | 118 | else: | ||
1346 | 119 | subprocess.call(cmd) | ||
1347 | 120 | |||
1348 | 121 | |||
1349 | 82 | def add_source(source, key=None): | 122 | def add_source(source, key=None): |
1352 | 83 | if ((source.startswith('ppa:') or | 123 | if (source.startswith('ppa:') or |
1353 | 84 | source.startswith('http:'))): | 124 | source.startswith('http:') or |
1354 | 125 | source.startswith('deb ') or | ||
1355 | 126 | source.startswith('cloud-archive:')): | ||
1356 | 85 | subprocess.check_call(['add-apt-repository', '--yes', source]) | 127 | subprocess.check_call(['add-apt-repository', '--yes', source]) |
1357 | 86 | elif source.startswith('cloud:'): | 128 | elif source.startswith('cloud:'): |
1358 | 87 | apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), | 129 | apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), |
1359 | 88 | fatal=True) | 130 | fatal=True) |
1360 | 89 | pocket = source.split(':')[-1] | 131 | pocket = source.split(':')[-1] |
1361 | 132 | if pocket not in CLOUD_ARCHIVE_POCKETS: | ||
1362 | 133 | raise SourceConfigError('Unsupported cloud: source option %s' % pocket) | ||
1363 | 134 | actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] | ||
1364 | 90 | with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: | 135 | with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: |
1366 | 91 | apt.write(CLOUD_ARCHIVE.format(pocket)) | 136 | apt.write(CLOUD_ARCHIVE.format(actual_pocket)) |
1367 | 92 | elif source == 'proposed': | 137 | elif source == 'proposed': |
1368 | 93 | release = lsb_release()['DISTRIB_CODENAME'] | 138 | release = lsb_release()['DISTRIB_CODENAME'] |
1369 | 94 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: | 139 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: |
1370 | @@ -118,8 +163,11 @@ | |||
1371 | 118 | Note that 'null' (a.k.a. None) should not be quoted. | 163 | Note that 'null' (a.k.a. None) should not be quoted. |
1372 | 119 | """ | 164 | """ |
1373 | 120 | sources = safe_load(config(sources_var)) | 165 | sources = safe_load(config(sources_var)) |
1376 | 121 | keys = safe_load(config(keys_var)) | 166 | keys = config(keys_var) |
1377 | 122 | if isinstance(sources, basestring) and isinstance(keys, basestring): | 167 | if keys is not None: |
1378 | 168 | keys = safe_load(keys) | ||
1379 | 169 | if isinstance(sources, basestring) and ( | ||
1380 | 170 | keys is None or isinstance(keys, basestring)): | ||
1381 | 123 | add_source(sources, keys) | 171 | add_source(sources, keys) |
1382 | 124 | else: | 172 | else: |
1383 | 125 | if not len(sources) == len(keys): | 173 | if not len(sources) == len(keys): |
1384 | 126 | 174 | ||
1385 | === modified file 'hooks/charmhelpers/fetch/bzrurl.py' | |||
1386 | --- hooks/charmhelpers/fetch/bzrurl.py 2013-09-02 16:36:50 +0000 | |||
1387 | +++ hooks/charmhelpers/fetch/bzrurl.py 2013-11-08 05:56:36 +0000 | |||
1388 | @@ -12,6 +12,7 @@ | |||
1389 | 12 | apt_install("python-bzrlib") | 12 | apt_install("python-bzrlib") |
1390 | 13 | from bzrlib.branch import Branch | 13 | from bzrlib.branch import Branch |
1391 | 14 | 14 | ||
1392 | 15 | |||
1393 | 15 | class BzrUrlFetchHandler(BaseFetchHandler): | 16 | class BzrUrlFetchHandler(BaseFetchHandler): |
1394 | 16 | """Handler for bazaar branches via generic and lp URLs""" | 17 | """Handler for bazaar branches via generic and lp URLs""" |
1395 | 17 | def can_handle(self, source): | 18 | def can_handle(self, source): |
1396 | @@ -46,4 +47,3 @@ | |||
1397 | 46 | except OSError as e: | 47 | except OSError as e: |
1398 | 47 | raise UnhandledSource(e.strerror) | 48 | raise UnhandledSource(e.strerror) |
1399 | 48 | return dest_dir | 49 | return dest_dir |
1400 | 49 | |||
1401 | 50 | 50 | ||
1402 | === modified file 'hooks/quantum_utils.py' | |||
1403 | --- hooks/quantum_utils.py 2013-10-16 12:19:22 +0000 | |||
1404 | +++ hooks/quantum_utils.py 2013-11-08 05:56:36 +0000 | |||
1405 | @@ -80,7 +80,6 @@ | |||
1406 | 80 | "nova-api-metadata" | 80 | "nova-api-metadata" |
1407 | 81 | ], | 81 | ], |
1408 | 82 | NVP: [ | 82 | NVP: [ |
1409 | 83 | "openvswitch-switch", | ||
1410 | 84 | "neutron-dhcp-agent", | 83 | "neutron-dhcp-agent", |
1411 | 85 | 'python-mysqldb', | 84 | 'python-mysqldb', |
1412 | 86 | 'python-oslo.config', # Force upgrade | 85 | 'python-oslo.config', # Force upgrade |
1413 | @@ -95,7 +94,7 @@ | |||
1414 | 95 | 94 | ||
1415 | 96 | EARLY_PACKAGES = { | 95 | EARLY_PACKAGES = { |
1416 | 97 | OVS: ['openvswitch-datapath-dkms'], | 96 | OVS: ['openvswitch-datapath-dkms'], |
1418 | 98 | NVP: ['openvswitch-datapath-dkms'] | 97 | NVP: [] |
1419 | 99 | } | 98 | } |
1420 | 100 | 99 | ||
1421 | 101 | 100 | ||
1422 | @@ -396,13 +395,11 @@ | |||
1423 | 396 | 395 | ||
1424 | 397 | 396 | ||
1425 | 398 | def configure_ovs(): | 397 | def configure_ovs(): |
1426 | 399 | if not service_running('openvswitch-switch'): | ||
1427 | 400 | full_restart() | ||
1428 | 401 | if config('plugin') == OVS: | 398 | if config('plugin') == OVS: |
1429 | 399 | if not service_running('openvswitch-switch'): | ||
1430 | 400 | full_restart() | ||
1431 | 402 | add_bridge(INT_BRIDGE) | 401 | add_bridge(INT_BRIDGE) |
1432 | 403 | add_bridge(EXT_BRIDGE) | 402 | add_bridge(EXT_BRIDGE) |
1433 | 404 | ext_port = config('ext-port') | 403 | ext_port = config('ext-port') |
1434 | 405 | if ext_port: | 404 | if ext_port: |
1435 | 406 | add_bridge_port(EXT_BRIDGE, ext_port) | 405 | add_bridge_port(EXT_BRIDGE, ext_port) |
1436 | 407 | if config('plugin') == NVP: | ||
1437 | 408 | add_bridge(INT_BRIDGE) | ||
1438 | 409 | 406 | ||
1439 | === modified file 'unit_tests/test_quantum_utils.py' | |||
1440 | --- unit_tests/test_quantum_utils.py 2013-10-19 15:19:41 +0000 | |||
1441 | +++ unit_tests/test_quantum_utils.py 2013-11-08 05:56:36 +0000 | |||
1442 | @@ -6,6 +6,11 @@ | |||
1443 | 6 | 6 | ||
1444 | 7 | import quantum_utils | 7 | import quantum_utils |
1445 | 8 | 8 | ||
1446 | 9 | try: | ||
1447 | 10 | import neutronclient | ||
1448 | 11 | except ImportError: | ||
1449 | 12 | neutronclient = None | ||
1450 | 13 | |||
1451 | 9 | from test_utils import ( | 14 | from test_utils import ( |
1452 | 10 | CharmTestCase | 15 | CharmTestCase |
1453 | 11 | ) | 16 | ) |
1454 | @@ -60,7 +65,7 @@ | |||
1455 | 60 | self.config.return_value = 'nvp' | 65 | self.config.return_value = 'nvp' |
1456 | 61 | self.assertEquals( | 66 | self.assertEquals( |
1457 | 62 | quantum_utils.get_early_packages(), | 67 | quantum_utils.get_early_packages(), |
1459 | 63 | ['openvswitch-datapath-dkms', 'linux-headers-2.6.18']) | 68 | []) |
1460 | 64 | 69 | ||
1461 | 65 | @patch.object(quantum_utils, 'EARLY_PACKAGES') | 70 | @patch.object(quantum_utils, 'EARLY_PACKAGES') |
1462 | 66 | def test_get_early_packages_no_dkms(self, _early_packages): | 71 | def test_get_early_packages_no_dkms(self, _early_packages): |
1463 | @@ -76,6 +81,7 @@ | |||
1464 | 76 | self.assertNotEqual(quantum_utils.get_packages(), []) | 81 | self.assertNotEqual(quantum_utils.get_packages(), []) |
1465 | 77 | 82 | ||
1466 | 78 | def test_configure_ovs_starts_service_if_required(self): | 83 | def test_configure_ovs_starts_service_if_required(self): |
1467 | 84 | self.config.return_value = 'ovs' | ||
1468 | 79 | self.service_running.return_value = False | 85 | self.service_running.return_value = False |
1469 | 80 | quantum_utils.configure_ovs() | 86 | quantum_utils.configure_ovs() |
1470 | 81 | self.assertTrue(self.full_restart.called) | 87 | self.assertTrue(self.full_restart.called) |
1471 | @@ -96,11 +102,6 @@ | |||
1472 | 96 | ]) | 102 | ]) |
1473 | 97 | self.add_bridge_port.assert_called_with('br-ex', 'eth0') | 103 | self.add_bridge_port.assert_called_with('br-ex', 'eth0') |
1474 | 98 | 104 | ||
1475 | 99 | def test_configure_ovs_nvp(self): | ||
1476 | 100 | self.config.return_value = 'nvp' | ||
1477 | 101 | quantum_utils.configure_ovs() | ||
1478 | 102 | self.add_bridge.assert_called_with('br-int') | ||
1479 | 103 | |||
1480 | 104 | def test_do_openstack_upgrade(self): | 105 | def test_do_openstack_upgrade(self): |
1481 | 105 | self.config.side_effect = self.test_config.get | 106 | self.config.side_effect = self.test_config.get |
1482 | 106 | self.test_config.set('openstack-origin', 'cloud:precise-havana') | 107 | self.test_config.set('openstack-origin', 'cloud:precise-havana') |
1483 | @@ -293,6 +294,8 @@ | |||
1484 | 293 | 294 | ||
1485 | 294 | class TestQuantumAgentReallocation(CharmTestCase): | 295 | class TestQuantumAgentReallocation(CharmTestCase): |
1486 | 295 | def setUp(self): | 296 | def setUp(self): |
1487 | 297 | if not neutronclient: | ||
1488 | 298 | raise self.skipTest('Skipping, no neutronclient installed') | ||
1489 | 296 | super(TestQuantumAgentReallocation, self).setUp(quantum_utils, | 299 | super(TestQuantumAgentReallocation, self).setUp(quantum_utils, |
1490 | 297 | TO_PATCH) | 300 | TO_PATCH) |
1491 | 298 | 301 |