Merge lp:~openstack-charmers/charms/precise/nova-compute/python-redux into lp:~charmers/charms/precise/nova-compute/trunk

Proposed by Adam Gandelman
Status: Merged
Merged at revision: 47
Proposed branch: lp:~openstack-charmers/charms/precise/nova-compute/python-redux
Merge into: lp:~charmers/charms/precise/nova-compute/trunk
Diff against target: 8327 lines (+6091/-1846)
59 files modified
.coveragerc (+6/-0)
.project (+17/-0)
.pydevproject (+9/-0)
Makefile (+14/-0)
charm-helpers.yaml (+12/-0)
config.yaml (+10/-3)
hooks/charmhelpers/contrib/hahelpers/apache.py (+58/-0)
hooks/charmhelpers/contrib/hahelpers/ceph.py (+294/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+183/-0)
hooks/charmhelpers/contrib/network/ovs/__init__.py (+72/-0)
hooks/charmhelpers/contrib/openstack/context.py (+522/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+117/-0)
hooks/charmhelpers/contrib/openstack/templates/__init__.py (+2/-0)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+11/-0)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+37/-0)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+23/-0)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+23/-0)
hooks/charmhelpers/contrib/openstack/templating.py (+280/-0)
hooks/charmhelpers/contrib/openstack/utils.py (+365/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+359/-0)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+62/-0)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+88/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+25/-0)
hooks/charmhelpers/core/hookenv.py (+340/-0)
hooks/charmhelpers/core/host.py (+241/-0)
hooks/charmhelpers/fetch/__init__.py (+209/-0)
hooks/charmhelpers/fetch/archiveurl.py (+48/-0)
hooks/charmhelpers/fetch/bzrurl.py (+49/-0)
hooks/charmhelpers/payload/__init__.py (+1/-0)
hooks/charmhelpers/payload/execd.py (+50/-0)
hooks/lib/nova/essex (+0/-43)
hooks/lib/nova/folsom (+0/-135)
hooks/lib/nova/grizzly (+0/-97)
hooks/lib/nova/nova-common (+0/-148)
hooks/lib/openstack-common (+0/-781)
hooks/nova-compute-common (+0/-309)
hooks/nova-compute-relations (+0/-329)
hooks/nova_compute_context.py (+328/-0)
hooks/nova_compute_hooks.py (+210/-0)
hooks/nova_compute_utils.py (+376/-0)
metadata.yaml (+2/-0)
revision (+1/-1)
templates/essex/nova.conf (+33/-0)
templates/folsom/nova.conf (+79/-0)
templates/folsom/ovs_quantum_plugin.ini (+27/-0)
templates/folsom/quantum.conf (+37/-0)
templates/grizzly/nova.conf (+81/-0)
templates/havana/neutron.conf (+39/-0)
templates/havana/nova.conf (+81/-0)
templates/havana/ovs_neutron_plugin.ini (+27/-0)
templates/libvirt-bin (+16/-0)
templates/libvirtd.conf (+400/-0)
templates/qemu.conf (+12/-0)
templates/secret.xml (+8/-0)
unit_tests/__init__.py (+3/-0)
unit_tests/test_nova_compute_contexts.py (+164/-0)
unit_tests/test_nova_compute_hooks.py (+278/-0)
unit_tests/test_nova_compute_utils.py (+244/-0)
unit_tests/test_utils.py (+118/-0)
To merge this branch: bzr merge lp:~openstack-charmers/charms/precise/nova-compute/python-redux
Reviewer Review Type Date Requested Status
charmers Pending
Review via email: mp+191084@code.launchpad.net

Description of the change

Update of all Havana / Saucy / python-redux work:

* Full python rewrite using new OpenStack charm-helpers.

* Test coverage

* Havana support

To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file '.coveragerc'
2--- .coveragerc 1970-01-01 00:00:00 +0000
3+++ .coveragerc 2013-10-15 01:35:28 +0000
4@@ -0,0 +1,6 @@
5+[report]
6+# Regexes for lines to exclude from consideration
7+exclude_lines =
8+ if __name__ == .__main__.:
9+include=
10+ hooks/nova_*
11
12=== added file '.project'
13--- .project 1970-01-01 00:00:00 +0000
14+++ .project 2013-10-15 01:35:28 +0000
15@@ -0,0 +1,17 @@
16+<?xml version="1.0" encoding="UTF-8"?>
17+<projectDescription>
18+ <name>nova-compute</name>
19+ <comment></comment>
20+ <projects>
21+ </projects>
22+ <buildSpec>
23+ <buildCommand>
24+ <name>org.python.pydev.PyDevBuilder</name>
25+ <arguments>
26+ </arguments>
27+ </buildCommand>
28+ </buildSpec>
29+ <natures>
30+ <nature>org.python.pydev.pythonNature</nature>
31+ </natures>
32+</projectDescription>
33
34=== added file '.pydevproject'
35--- .pydevproject 1970-01-01 00:00:00 +0000
36+++ .pydevproject 2013-10-15 01:35:28 +0000
37@@ -0,0 +1,9 @@
38+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
39+<?eclipse-pydev version="1.0"?><pydev_project>
40+<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
41+<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
42+<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
43+<path>/nova-compute/hooks</path>
44+<path>/nova-compute/unit_tests</path>
45+</pydev_pathproperty>
46+</pydev_project>
47
48=== added file 'Makefile'
49--- Makefile 1970-01-01 00:00:00 +0000
50+++ Makefile 2013-10-15 01:35:28 +0000
51@@ -0,0 +1,14 @@
52+#!/usr/bin/make
53+PYTHON := /usr/bin/env python
54+
55+lint:
56+ @flake8 --exclude hooks/charmhelpers hooks
57+ @flake8 --exclude hooks/charmhelpers unit_tests
58+ @charm proof
59+
60+test:
61+ @echo Starting tests...
62+ @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
63+
64+sync:
65+ @charm-helper-sync -c charm-helpers.yaml
66
67=== added file 'charm-helpers.yaml'
68--- charm-helpers.yaml 1970-01-01 00:00:00 +0000
69+++ charm-helpers.yaml 2013-10-15 01:35:28 +0000
70@@ -0,0 +1,12 @@
71+branch: lp:charm-helpers
72+destination: hooks/charmhelpers
73+include:
74+ - core
75+ - fetch
76+ - contrib.openstack|inc=*
77+ - contrib.storage
78+ - contrib.hahelpers:
79+ - apache
80+ - cluster
81+ - contrib.network.ovs
82+ - payload.execd
83
84=== modified file 'config.yaml'
85--- config.yaml 2013-05-20 11:28:14 +0000
86+++ config.yaml 2013-10-15 01:35:28 +0000
87@@ -26,14 +26,22 @@
88 default: nova
89 type: string
90 decsription: Rabbitmq vhost
91- db-user:
92+ database-user:
93 default: nova
94 type: string
95 description: Username for database access
96- nova-db:
97+ database:
98 default: nova
99 type: string
100 description: Database name
101+ neutron-database-user:
102+ default: neutron
103+ type: string
104+ description: Username for Neutron database access (if enabled)
105+ neutron-database:
106+ default: neutron
107+ type: string
108+ description: Database name for Neutron (if enabled)
109 virt-type:
110 default: kvm
111 type: string
112@@ -71,7 +79,6 @@
113 type: string
114 description: Network interface on which to build bridge
115 config-flags:
116- default: None
117 type: string
118 description: Comma separated list of key=value config flags to be set in nova.conf.
119 nagios_context:
120
121=== added file 'hooks/__init__.py'
122=== added symlink 'hooks/amqp-relation-broken'
123=== target is u'nova_compute_hooks.py'
124=== modified symlink 'hooks/amqp-relation-changed'
125=== target changed u'nova-compute-relations' => u'nova_compute_hooks.py'
126=== modified symlink 'hooks/amqp-relation-joined'
127=== target changed u'nova-compute-relations' => u'nova_compute_hooks.py'
128=== added symlink 'hooks/ceph-relation-broken'
129=== target is u'nova_compute_hooks.py'
130=== modified symlink 'hooks/ceph-relation-changed'
131=== target changed u'nova-compute-relations' => u'nova_compute_hooks.py'
132=== modified symlink 'hooks/ceph-relation-joined'
133=== target changed u'nova-compute-relations' => u'nova_compute_hooks.py'
134=== added directory 'hooks/charmhelpers'
135=== added file 'hooks/charmhelpers/__init__.py'
136=== added directory 'hooks/charmhelpers/contrib'
137=== added file 'hooks/charmhelpers/contrib/__init__.py'
138=== added directory 'hooks/charmhelpers/contrib/hahelpers'
139=== added file 'hooks/charmhelpers/contrib/hahelpers/__init__.py'
140=== added file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
141--- hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000
142+++ hooks/charmhelpers/contrib/hahelpers/apache.py 2013-10-15 01:35:28 +0000
143@@ -0,0 +1,58 @@
144+#
145+# Copyright 2012 Canonical Ltd.
146+#
147+# This file is sourced from lp:openstack-charm-helpers
148+#
149+# Authors:
150+# James Page <james.page@ubuntu.com>
151+# Adam Gandelman <adamg@ubuntu.com>
152+#
153+
154+import subprocess
155+
156+from charmhelpers.core.hookenv import (
157+ config as config_get,
158+ relation_get,
159+ relation_ids,
160+ related_units as relation_list,
161+ log,
162+ INFO,
163+)
164+
165+
166+def get_cert():
167+ cert = config_get('ssl_cert')
168+ key = config_get('ssl_key')
169+ if not (cert and key):
170+ log("Inspecting identity-service relations for SSL certificate.",
171+ level=INFO)
172+ cert = key = None
173+ for r_id in relation_ids('identity-service'):
174+ for unit in relation_list(r_id):
175+ if not cert:
176+ cert = relation_get('ssl_cert',
177+ rid=r_id, unit=unit)
178+ if not key:
179+ key = relation_get('ssl_key',
180+ rid=r_id, unit=unit)
181+ return (cert, key)
182+
183+
184+def get_ca_cert():
185+ ca_cert = None
186+ log("Inspecting identity-service relations for CA SSL certificate.",
187+ level=INFO)
188+ for r_id in relation_ids('identity-service'):
189+ for unit in relation_list(r_id):
190+ if not ca_cert:
191+ ca_cert = relation_get('ca_cert',
192+ rid=r_id, unit=unit)
193+ return ca_cert
194+
195+
196+def install_ca_cert(ca_cert):
197+ if ca_cert:
198+ with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
199+ 'w') as crt:
200+ crt.write(ca_cert)
201+ subprocess.check_call(['update-ca-certificates', '--fresh'])
202
203=== added file 'hooks/charmhelpers/contrib/hahelpers/ceph.py'
204--- hooks/charmhelpers/contrib/hahelpers/ceph.py 1970-01-01 00:00:00 +0000
205+++ hooks/charmhelpers/contrib/hahelpers/ceph.py 2013-10-15 01:35:28 +0000
206@@ -0,0 +1,294 @@
207+#
208+# Copyright 2012 Canonical Ltd.
209+#
210+# This file is sourced from lp:openstack-charm-helpers
211+#
212+# Authors:
213+# James Page <james.page@ubuntu.com>
214+# Adam Gandelman <adamg@ubuntu.com>
215+#
216+
217+import commands
218+import os
219+import shutil
220+import time
221+
222+from subprocess import (
223+ check_call,
224+ check_output,
225+ CalledProcessError
226+)
227+
228+from charmhelpers.core.hookenv import (
229+ relation_get,
230+ relation_ids,
231+ related_units,
232+ log,
233+ INFO,
234+ ERROR
235+)
236+
237+from charmhelpers.fetch import (
238+ apt_install,
239+)
240+
241+from charmhelpers.core.host import (
242+ mount,
243+ mounts,
244+ service_start,
245+ service_stop,
246+ umount,
247+)
248+
249+KEYRING = '/etc/ceph/ceph.client.%s.keyring'
250+KEYFILE = '/etc/ceph/ceph.client.%s.key'
251+
252+CEPH_CONF = """[global]
253+ auth supported = %(auth)s
254+ keyring = %(keyring)s
255+ mon host = %(mon_hosts)s
256+"""
257+
258+
259+def running(service):
260+ # this local util can be dropped as soon the following branch lands
261+ # in lp:charm-helpers
262+ # https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/
263+ try:
264+ output = check_output(['service', service, 'status'])
265+ except CalledProcessError:
266+ return False
267+ else:
268+ if ("start/running" in output or "is running" in output):
269+ return True
270+ else:
271+ return False
272+
273+
274+def install():
275+ ceph_dir = "/etc/ceph"
276+ if not os.path.isdir(ceph_dir):
277+ os.mkdir(ceph_dir)
278+ apt_install('ceph-common', fatal=True)
279+
280+
281+def rbd_exists(service, pool, rbd_img):
282+ (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
283+ (service, pool))
284+ return rbd_img in out
285+
286+
287+def create_rbd_image(service, pool, image, sizemb):
288+ cmd = [
289+ 'rbd',
290+ 'create',
291+ image,
292+ '--size',
293+ str(sizemb),
294+ '--id',
295+ service,
296+ '--pool',
297+ pool
298+ ]
299+ check_call(cmd)
300+
301+
302+def pool_exists(service, name):
303+ (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
304+ return name in out
305+
306+
307+def create_pool(service, name):
308+ cmd = [
309+ 'rados',
310+ '--id',
311+ service,
312+ 'mkpool',
313+ name
314+ ]
315+ check_call(cmd)
316+
317+
318+def keyfile_path(service):
319+ return KEYFILE % service
320+
321+
322+def keyring_path(service):
323+ return KEYRING % service
324+
325+
326+def create_keyring(service, key):
327+ keyring = keyring_path(service)
328+ if os.path.exists(keyring):
329+ log('ceph: Keyring exists at %s.' % keyring, level=INFO)
330+ cmd = [
331+ 'ceph-authtool',
332+ keyring,
333+ '--create-keyring',
334+ '--name=client.%s' % service,
335+ '--add-key=%s' % key
336+ ]
337+ check_call(cmd)
338+ log('ceph: Created new ring at %s.' % keyring, level=INFO)
339+
340+
341+def create_key_file(service, key):
342+ # create a file containing the key
343+ keyfile = keyfile_path(service)
344+ if os.path.exists(keyfile):
345+ log('ceph: Keyfile exists at %s.' % keyfile, level=INFO)
346+ fd = open(keyfile, 'w')
347+ fd.write(key)
348+ fd.close()
349+ log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
350+
351+
352+def get_ceph_nodes():
353+ hosts = []
354+ for r_id in relation_ids('ceph'):
355+ for unit in related_units(r_id):
356+ hosts.append(relation_get('private-address', unit=unit, rid=r_id))
357+ return hosts
358+
359+
360+def configure(service, key, auth):
361+ create_keyring(service, key)
362+ create_key_file(service, key)
363+ hosts = get_ceph_nodes()
364+ mon_hosts = ",".join(map(str, hosts))
365+ keyring = keyring_path(service)
366+ with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
367+ ceph_conf.write(CEPH_CONF % locals())
368+ modprobe_kernel_module('rbd')
369+
370+
371+def image_mapped(image_name):
372+ (rc, out) = commands.getstatusoutput('rbd showmapped')
373+ return image_name in out
374+
375+
376+def map_block_storage(service, pool, image):
377+ cmd = [
378+ 'rbd',
379+ 'map',
380+ '%s/%s' % (pool, image),
381+ '--user',
382+ service,
383+ '--secret',
384+ keyfile_path(service),
385+ ]
386+ check_call(cmd)
387+
388+
389+def filesystem_mounted(fs):
390+ return fs in [f for m, f in mounts()]
391+
392+
393+def make_filesystem(blk_device, fstype='ext4', timeout=10):
394+ count = 0
395+ e_noent = os.errno.ENOENT
396+ while not os.path.exists(blk_device):
397+ if count >= timeout:
398+ log('ceph: gave up waiting on block device %s' % blk_device,
399+ level=ERROR)
400+ raise IOError(e_noent, os.strerror(e_noent), blk_device)
401+ log('ceph: waiting for block device %s to appear' % blk_device,
402+ level=INFO)
403+ count += 1
404+ time.sleep(1)
405+ else:
406+ log('ceph: Formatting block device %s as filesystem %s.' %
407+ (blk_device, fstype), level=INFO)
408+ check_call(['mkfs', '-t', fstype, blk_device])
409+
410+
411+def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
412+ # mount block device into /mnt
413+ mount(blk_device, '/mnt')
414+
415+ # copy data to /mnt
416+ try:
417+ copy_files(data_src_dst, '/mnt')
418+ except:
419+ pass
420+
421+ # umount block device
422+ umount('/mnt')
423+
424+ _dir = os.stat(data_src_dst)
425+ uid = _dir.st_uid
426+ gid = _dir.st_gid
427+
428+ # re-mount where the data should originally be
429+ mount(blk_device, data_src_dst, persist=True)
430+
431+ # ensure original ownership of new mount.
432+ cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
433+ check_call(cmd)
434+
435+
436+# TODO: re-use
437+def modprobe_kernel_module(module):
438+ log('ceph: Loading kernel module', level=INFO)
439+ cmd = ['modprobe', module]
440+ check_call(cmd)
441+ cmd = 'echo %s >> /etc/modules' % module
442+ check_call(cmd, shell=True)
443+
444+
445+def copy_files(src, dst, symlinks=False, ignore=None):
446+ for item in os.listdir(src):
447+ s = os.path.join(src, item)
448+ d = os.path.join(dst, item)
449+ if os.path.isdir(s):
450+ shutil.copytree(s, d, symlinks, ignore)
451+ else:
452+ shutil.copy2(s, d)
453+
454+
455+def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
456+ blk_device, fstype, system_services=[]):
457+ """
458+ To be called from the current cluster leader.
459+ Ensures given pool and RBD image exists, is mapped to a block device,
460+ and the device is formatted and mounted at the given mount_point.
461+
462+ If formatting a device for the first time, data existing at mount_point
463+ will be migrated to the RBD device before being remounted.
464+
465+ All services listed in system_services will be stopped prior to data
466+ migration and restarted when complete.
467+ """
468+ # Ensure pool, RBD image, RBD mappings are in place.
469+ if not pool_exists(service, pool):
470+ log('ceph: Creating new pool %s.' % pool, level=INFO)
471+ create_pool(service, pool)
472+
473+ if not rbd_exists(service, pool, rbd_img):
474+ log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO)
475+ create_rbd_image(service, pool, rbd_img, sizemb)
476+
477+ if not image_mapped(rbd_img):
478+ log('ceph: Mapping RBD Image as a Block Device.', level=INFO)
479+ map_block_storage(service, pool, rbd_img)
480+
481+ # make file system
482+ # TODO: What happens if for whatever reason this is run again and
483+ # the data is already in the rbd device and/or is mounted??
484+ # When it is mounted already, it will fail to make the fs
485+ # XXX: This is really sketchy! Need to at least add an fstab entry
486+ # otherwise this hook will blow away existing data if its executed
487+ # after a reboot.
488+ if not filesystem_mounted(mount_point):
489+ make_filesystem(blk_device, fstype)
490+
491+ for svc in system_services:
492+ if running(svc):
493+ log('Stopping services %s prior to migrating data.' % svc,
494+ level=INFO)
495+ service_stop(svc)
496+
497+ place_data_on_ceph(service, blk_device, mount_point, fstype)
498+
499+ for svc in system_services:
500+ service_start(svc)
501
502=== added file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
503--- hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
504+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2013-10-15 01:35:28 +0000
505@@ -0,0 +1,183 @@
506+#
507+# Copyright 2012 Canonical Ltd.
508+#
509+# Authors:
510+# James Page <james.page@ubuntu.com>
511+# Adam Gandelman <adamg@ubuntu.com>
512+#
513+
514+import subprocess
515+import os
516+
517+from socket import gethostname as get_unit_hostname
518+
519+from charmhelpers.core.hookenv import (
520+ log,
521+ relation_ids,
522+ related_units as relation_list,
523+ relation_get,
524+ config as config_get,
525+ INFO,
526+ ERROR,
527+ unit_get,
528+)
529+
530+
531+class HAIncompleteConfig(Exception):
532+ pass
533+
534+
535+def is_clustered():
536+ for r_id in (relation_ids('ha') or []):
537+ for unit in (relation_list(r_id) or []):
538+ clustered = relation_get('clustered',
539+ rid=r_id,
540+ unit=unit)
541+ if clustered:
542+ return True
543+ return False
544+
545+
546+def is_leader(resource):
547+ cmd = [
548+ "crm", "resource",
549+ "show", resource
550+ ]
551+ try:
552+ status = subprocess.check_output(cmd)
553+ except subprocess.CalledProcessError:
554+ return False
555+ else:
556+ if get_unit_hostname() in status:
557+ return True
558+ else:
559+ return False
560+
561+
562+def peer_units():
563+ peers = []
564+ for r_id in (relation_ids('cluster') or []):
565+ for unit in (relation_list(r_id) or []):
566+ peers.append(unit)
567+ return peers
568+
569+
570+def oldest_peer(peers):
571+ local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
572+ for peer in peers:
573+ remote_unit_no = int(peer.split('/')[1])
574+ if remote_unit_no < local_unit_no:
575+ return False
576+ return True
577+
578+
579+def eligible_leader(resource):
580+ if is_clustered():
581+ if not is_leader(resource):
582+ log('Deferring action to CRM leader.', level=INFO)
583+ return False
584+ else:
585+ peers = peer_units()
586+ if peers and not oldest_peer(peers):
587+ log('Deferring action to oldest service unit.', level=INFO)
588+ return False
589+ return True
590+
591+
592+def https():
593+ '''
594+ Determines whether enough data has been provided in configuration
595+ or relation data to configure HTTPS
596+ .
597+ returns: boolean
598+ '''
599+ if config_get('use-https') == "yes":
600+ return True
601+ if config_get('ssl_cert') and config_get('ssl_key'):
602+ return True
603+ for r_id in relation_ids('identity-service'):
604+ for unit in relation_list(r_id):
605+ rel_state = [
606+ relation_get('https_keystone', rid=r_id, unit=unit),
607+ relation_get('ssl_cert', rid=r_id, unit=unit),
608+ relation_get('ssl_key', rid=r_id, unit=unit),
609+ relation_get('ca_cert', rid=r_id, unit=unit),
610+ ]
611+ # NOTE: works around (LP: #1203241)
612+ if (None not in rel_state) and ('' not in rel_state):
613+ return True
614+ return False
615+
616+
617+def determine_api_port(public_port):
618+ '''
619+ Determine correct API server listening port based on
620+ existence of HTTPS reverse proxy and/or haproxy.
621+
622+ public_port: int: standard public port for given service
623+
624+ returns: int: the correct listening port for the API service
625+ '''
626+ i = 0
627+ if len(peer_units()) > 0 or is_clustered():
628+ i += 1
629+ if https():
630+ i += 1
631+ return public_port - (i * 10)
632+
633+
634+def determine_haproxy_port(public_port):
635+ '''
636+ Description: Determine correct proxy listening port based on public IP +
637+ existence of HTTPS reverse proxy.
638+
639+ public_port: int: standard public port for given service
640+
641+ returns: int: the correct listening port for the HAProxy service
642+ '''
643+ i = 0
644+ if https():
645+ i += 1
646+ return public_port - (i * 10)
647+
648+
649+def get_hacluster_config():
650+ '''
651+ Obtains all relevant configuration from charm configuration required
652+ for initiating a relation to hacluster:
653+
654+ ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
655+
656+ returns: dict: A dict containing settings keyed by setting name.
657+ raises: HAIncompleteConfig if settings are missing.
658+ '''
659+ settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
660+ conf = {}
661+ for setting in settings:
662+ conf[setting] = config_get(setting)
663+ missing = []
664+ [missing.append(s) for s, v in conf.iteritems() if v is None]
665+ if missing:
666+ log('Insufficient config data to configure hacluster.', level=ERROR)
667+ raise HAIncompleteConfig
668+ return conf
669+
670+
671+def canonical_url(configs, vip_setting='vip'):
672+ '''
673+ Returns the correct HTTP URL to this host given the state of HTTPS
674+ configuration and hacluster.
675+
676+ :configs : OSTemplateRenderer: A config tempating object to inspect for
677+ a complete https context.
678+ :vip_setting: str: Setting in charm config that specifies
679+ VIP address.
680+ '''
681+ scheme = 'http'
682+ if 'https' in configs.complete_contexts():
683+ scheme = 'https'
684+ if is_clustered():
685+ addr = config_get(vip_setting)
686+ else:
687+ addr = unit_get('private-address')
688+ return '%s://%s' % (scheme, addr)
689
690=== added directory 'hooks/charmhelpers/contrib/network'
691=== added file 'hooks/charmhelpers/contrib/network/__init__.py'
692=== added directory 'hooks/charmhelpers/contrib/network/ovs'
693=== added file 'hooks/charmhelpers/contrib/network/ovs/__init__.py'
694--- hooks/charmhelpers/contrib/network/ovs/__init__.py 1970-01-01 00:00:00 +0000
695+++ hooks/charmhelpers/contrib/network/ovs/__init__.py 2013-10-15 01:35:28 +0000
696@@ -0,0 +1,72 @@
697+''' Helpers for interacting with OpenvSwitch '''
698+import subprocess
699+import os
700+from charmhelpers.core.hookenv import (
701+ log, WARNING
702+)
703+from charmhelpers.core.host import (
704+ service
705+)
706+
707+
708+def add_bridge(name):
709+ ''' Add the named bridge to openvswitch '''
710+ log('Creating bridge {}'.format(name))
711+ subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
712+
713+
714+def del_bridge(name):
715+ ''' Delete the named bridge from openvswitch '''
716+ log('Deleting bridge {}'.format(name))
717+ subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
718+
719+
720+def add_bridge_port(name, port):
721+ ''' Add a port to the named openvswitch bridge '''
722+ log('Adding port {} to bridge {}'.format(port, name))
723+ subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
724+ name, port])
725+ subprocess.check_call(["ip", "link", "set", port, "up"])
726+
727+
728+def del_bridge_port(name, port):
729+ ''' Delete a port from the named openvswitch bridge '''
730+ log('Deleting port {} from bridge {}'.format(port, name))
731+ subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
732+ name, port])
733+ subprocess.check_call(["ip", "link", "set", port, "down"])
734+
735+
736+def set_manager(manager):
737+ ''' Set the controller for the local openvswitch '''
738+ log('Setting manager for local ovs to {}'.format(manager))
739+ subprocess.check_call(['ovs-vsctl', 'set-manager',
740+ 'ssl:{}'.format(manager)])
741+
742+
743+CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
744+
745+
746+def get_certificate():
747+ ''' Read openvswitch certificate from disk '''
748+ if os.path.exists(CERT_PATH):
749+ log('Reading ovs certificate from {}'.format(CERT_PATH))
750+ with open(CERT_PATH, 'r') as cert:
751+ full_cert = cert.read()
752+ begin_marker = "-----BEGIN CERTIFICATE-----"
753+ end_marker = "-----END CERTIFICATE-----"
754+ begin_index = full_cert.find(begin_marker)
755+ end_index = full_cert.rfind(end_marker)
756+ if end_index == -1 or begin_index == -1:
757+ raise RuntimeError("Certificate does not contain valid begin"
758+ " and end markers.")
759+ full_cert = full_cert[begin_index:(end_index + len(end_marker))]
760+ return full_cert
761+ else:
762+ log('Certificate not found', level=WARNING)
763+ return None
764+
765+
766+def full_restart():
767+ ''' Full restart and reload of openvswitch '''
768+ service('force-reload-kmod', 'openvswitch-switch')
769
770=== added directory 'hooks/charmhelpers/contrib/openstack'
771=== added file 'hooks/charmhelpers/contrib/openstack/__init__.py'
772=== added file 'hooks/charmhelpers/contrib/openstack/context.py'
773--- hooks/charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
774+++ hooks/charmhelpers/contrib/openstack/context.py 2013-10-15 01:35:28 +0000
775@@ -0,0 +1,522 @@
776+import json
777+import os
778+
779+from base64 import b64decode
780+
781+from subprocess import (
782+ check_call
783+)
784+
785+
786+from charmhelpers.fetch import (
787+ apt_install,
788+ filter_installed_packages,
789+)
790+
791+from charmhelpers.core.hookenv import (
792+ config,
793+ local_unit,
794+ log,
795+ relation_get,
796+ relation_ids,
797+ related_units,
798+ unit_get,
799+ unit_private_ip,
800+ ERROR,
801+ WARNING,
802+)
803+
804+from charmhelpers.contrib.hahelpers.cluster import (
805+ determine_api_port,
806+ determine_haproxy_port,
807+ https,
808+ is_clustered,
809+ peer_units,
810+)
811+
812+from charmhelpers.contrib.hahelpers.apache import (
813+ get_cert,
814+ get_ca_cert,
815+)
816+
817+from charmhelpers.contrib.openstack.neutron import (
818+ neutron_plugin_attribute,
819+)
820+
821+CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
822+
823+
824+class OSContextError(Exception):
825+ pass
826+
827+
828+def ensure_packages(packages):
829+ '''Install but do not upgrade required plugin packages'''
830+ required = filter_installed_packages(packages)
831+ if required:
832+ apt_install(required, fatal=True)
833+
834+
835+def context_complete(ctxt):
836+ _missing = []
837+ for k, v in ctxt.iteritems():
838+ if v is None or v == '':
839+ _missing.append(k)
840+ if _missing:
841+ log('Missing required data: %s' % ' '.join(_missing), level='INFO')
842+ return False
843+ return True
844+
845+
846+class OSContextGenerator(object):
847+ interfaces = []
848+
849+ def __call__(self):
850+ raise NotImplementedError
851+
852+
853+class SharedDBContext(OSContextGenerator):
854+ interfaces = ['shared-db']
855+
856+ def __init__(self, database=None, user=None, relation_prefix=None):
857+ '''
858+ Allows inspecting relation for settings prefixed with relation_prefix.
859+ This is useful for parsing access for multiple databases returned via
860+ the shared-db interface (eg, nova_password, quantum_password)
861+ '''
862+ self.relation_prefix = relation_prefix
863+ self.database = database
864+ self.user = user
865+
866+ def __call__(self):
867+ self.database = self.database or config('database')
868+ self.user = self.user or config('database-user')
869+ if None in [self.database, self.user]:
870+ log('Could not generate shared_db context. '
871+ 'Missing required charm config options. '
872+ '(database name and user)')
873+ raise OSContextError
874+ ctxt = {}
875+
876+ password_setting = 'password'
877+ if self.relation_prefix:
878+ password_setting = self.relation_prefix + '_password'
879+
880+ for rid in relation_ids('shared-db'):
881+ for unit in related_units(rid):
882+ passwd = relation_get(password_setting, rid=rid, unit=unit)
883+ ctxt = {
884+ 'database_host': relation_get('db_host', rid=rid,
885+ unit=unit),
886+ 'database': self.database,
887+ 'database_user': self.user,
888+ 'database_password': passwd,
889+ }
890+ if context_complete(ctxt):
891+ return ctxt
892+ return {}
893+
894+
895+class IdentityServiceContext(OSContextGenerator):
896+ interfaces = ['identity-service']
897+
898+ def __call__(self):
899+ log('Generating template context for identity-service')
900+ ctxt = {}
901+
902+ for rid in relation_ids('identity-service'):
903+ for unit in related_units(rid):
904+ ctxt = {
905+ 'service_port': relation_get('service_port', rid=rid,
906+ unit=unit),
907+ 'service_host': relation_get('service_host', rid=rid,
908+ unit=unit),
909+ 'auth_host': relation_get('auth_host', rid=rid, unit=unit),
910+ 'auth_port': relation_get('auth_port', rid=rid, unit=unit),
911+ 'admin_tenant_name': relation_get('service_tenant',
912+ rid=rid, unit=unit),
913+ 'admin_user': relation_get('service_username', rid=rid,
914+ unit=unit),
915+ 'admin_password': relation_get('service_password', rid=rid,
916+ unit=unit),
917+ # XXX: Hard-coded http.
918+ 'service_protocol': 'http',
919+ 'auth_protocol': 'http',
920+ }
921+ if context_complete(ctxt):
922+ return ctxt
923+ return {}
924+
925+
926+class AMQPContext(OSContextGenerator):
927+ interfaces = ['amqp']
928+
929+ def __call__(self):
930+ log('Generating template context for amqp')
931+ conf = config()
932+ try:
933+ username = conf['rabbit-user']
934+ vhost = conf['rabbit-vhost']
935+ except KeyError as e:
936+ log('Could not generate shared_db context. '
937+ 'Missing required charm config options: %s.' % e)
938+ raise OSContextError
939+
940+ ctxt = {}
941+ for rid in relation_ids('amqp'):
942+ for unit in related_units(rid):
943+ if relation_get('clustered', rid=rid, unit=unit):
944+ ctxt['clustered'] = True
945+ ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
946+ unit=unit)
947+ else:
948+ ctxt['rabbitmq_host'] = relation_get('private-address',
949+ rid=rid, unit=unit)
950+ ctxt.update({
951+ 'rabbitmq_user': username,
952+ 'rabbitmq_password': relation_get('password', rid=rid,
953+ unit=unit),
954+ 'rabbitmq_virtual_host': vhost,
955+ })
956+ if context_complete(ctxt):
957+ # Sufficient information found = break out!
958+ break
959+ # Used for active/active rabbitmq >= grizzly
960+ ctxt['rabbitmq_hosts'] = []
961+ for unit in related_units(rid):
962+ ctxt['rabbitmq_hosts'].append(relation_get('private-address',
963+ rid=rid, unit=unit))
964+ if not context_complete(ctxt):
965+ return {}
966+ else:
967+ return ctxt
968+
969+
970+class CephContext(OSContextGenerator):
971+ interfaces = ['ceph']
972+
973+ def __call__(self):
974+ '''This generates context for /etc/ceph/ceph.conf templates'''
975+ if not relation_ids('ceph'):
976+ return {}
977+ log('Generating template context for ceph')
978+ mon_hosts = []
979+ auth = None
980+ key = None
981+ for rid in relation_ids('ceph'):
982+ for unit in related_units(rid):
983+ mon_hosts.append(relation_get('private-address', rid=rid,
984+ unit=unit))
985+ auth = relation_get('auth', rid=rid, unit=unit)
986+ key = relation_get('key', rid=rid, unit=unit)
987+
988+ ctxt = {
989+ 'mon_hosts': ' '.join(mon_hosts),
990+ 'auth': auth,
991+ 'key': key,
992+ }
993+
994+ if not os.path.isdir('/etc/ceph'):
995+ os.mkdir('/etc/ceph')
996+
997+ if not context_complete(ctxt):
998+ return {}
999+
1000+ ensure_packages(['ceph-common'])
1001+
1002+ return ctxt
1003+
1004+
1005+class HAProxyContext(OSContextGenerator):
1006+ interfaces = ['cluster']
1007+
1008+ def __call__(self):
1009+ '''
1010+ Builds half a context for the haproxy template, which describes
1011+ all peers to be included in the cluster. Each charm needs to include
1012+ its own context generator that describes the port mapping.
1013+ '''
1014+ if not relation_ids('cluster'):
1015+ return {}
1016+
1017+ cluster_hosts = {}
1018+ l_unit = local_unit().replace('/', '-')
1019+ cluster_hosts[l_unit] = unit_get('private-address')
1020+
1021+ for rid in relation_ids('cluster'):
1022+ for unit in related_units(rid):
1023+ _unit = unit.replace('/', '-')
1024+ addr = relation_get('private-address', rid=rid, unit=unit)
1025+ cluster_hosts[_unit] = addr
1026+
1027+ ctxt = {
1028+ 'units': cluster_hosts,
1029+ }
1030+ if len(cluster_hosts.keys()) > 1:
1031+ # Enable haproxy when we have enough peers.
1032+ log('Ensuring haproxy enabled in /etc/default/haproxy.')
1033+ with open('/etc/default/haproxy', 'w') as out:
1034+ out.write('ENABLED=1\n')
1035+ return ctxt
1036+ log('HAProxy context is incomplete, this unit has no peers.')
1037+ return {}
1038+
1039+
1040+class ImageServiceContext(OSContextGenerator):
1041+ interfaces = ['image-service']
1042+
1043+ def __call__(self):
1044+ '''
1045+ Obtains the glance API server from the image-service relation. Useful
1046+ in nova and cinder (currently).
1047+ '''
1048+ log('Generating template context for image-service.')
1049+ rids = relation_ids('image-service')
1050+ if not rids:
1051+ return {}
1052+ for rid in rids:
1053+ for unit in related_units(rid):
1054+ api_server = relation_get('glance-api-server',
1055+ rid=rid, unit=unit)
1056+ if api_server:
1057+ return {'glance_api_servers': api_server}
1058+ log('ImageService context is incomplete. '
1059+ 'Missing required relation data.')
1060+ return {}
1061+
1062+
1063+class ApacheSSLContext(OSContextGenerator):
1064+ """
1065+ Generates a context for an apache vhost configuration that configures
1066+ HTTPS reverse proxying for one or many endpoints. Generated context
1067+ looks something like:
1068+ {
1069+ 'namespace': 'cinder',
1070+ 'private_address': 'iscsi.mycinderhost.com',
1071+ 'endpoints': [(8776, 8766), (8777, 8767)]
1072+ }
1073+
1074+ The endpoints list consists of a tuples mapping external ports
1075+ to internal ports.
1076+ """
1077+ interfaces = ['https']
1078+
1079+ # charms should inherit this context and set external ports
1080+ # and service namespace accordingly.
1081+ external_ports = []
1082+ service_namespace = None
1083+
1084+ def enable_modules(self):
1085+ cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
1086+ check_call(cmd)
1087+
1088+ def configure_cert(self):
1089+ if not os.path.isdir('/etc/apache2/ssl'):
1090+ os.mkdir('/etc/apache2/ssl')
1091+ ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
1092+ if not os.path.isdir(ssl_dir):
1093+ os.mkdir(ssl_dir)
1094+ cert, key = get_cert()
1095+ with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
1096+ cert_out.write(b64decode(cert))
1097+ with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
1098+ key_out.write(b64decode(key))
1099+ ca_cert = get_ca_cert()
1100+ if ca_cert:
1101+ with open(CA_CERT_PATH, 'w') as ca_out:
1102+ ca_out.write(b64decode(ca_cert))
1103+ check_call(['update-ca-certificates'])
1104+
1105+ def __call__(self):
1106+ if isinstance(self.external_ports, basestring):
1107+ self.external_ports = [self.external_ports]
1108+ if (not self.external_ports or not https()):
1109+ return {}
1110+
1111+ self.configure_cert()
1112+ self.enable_modules()
1113+
1114+ ctxt = {
1115+ 'namespace': self.service_namespace,
1116+ 'private_address': unit_get('private-address'),
1117+ 'endpoints': []
1118+ }
1119+ for ext_port in self.external_ports:
1120+ if peer_units() or is_clustered():
1121+ int_port = determine_haproxy_port(ext_port)
1122+ else:
1123+ int_port = determine_api_port(ext_port)
1124+ portmap = (int(ext_port), int(int_port))
1125+ ctxt['endpoints'].append(portmap)
1126+ return ctxt
1127+
1128+
1129+class NeutronContext(object):
1130+ interfaces = []
1131+
1132+ @property
1133+ def plugin(self):
1134+ return None
1135+
1136+ @property
1137+ def network_manager(self):
1138+ return None
1139+
1140+ @property
1141+ def packages(self):
1142+ return neutron_plugin_attribute(
1143+ self.plugin, 'packages', self.network_manager)
1144+
1145+ @property
1146+ def neutron_security_groups(self):
1147+ return None
1148+
1149+ def _ensure_packages(self):
1150+ [ensure_packages(pkgs) for pkgs in self.packages]
1151+
1152+ def _save_flag_file(self):
1153+ if self.network_manager == 'quantum':
1154+ _file = '/etc/nova/quantum_plugin.conf'
1155+ else:
1156+ _file = '/etc/nova/neutron_plugin.conf'
1157+ with open(_file, 'wb') as out:
1158+ out.write(self.plugin + '\n')
1159+
1160+ def ovs_ctxt(self):
1161+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1162+ self.network_manager)
1163+
1164+ ovs_ctxt = {
1165+ 'core_plugin': driver,
1166+ 'neutron_plugin': 'ovs',
1167+ 'neutron_security_groups': self.neutron_security_groups,
1168+ 'local_ip': unit_private_ip(),
1169+ }
1170+
1171+ return ovs_ctxt
1172+
1173+ def __call__(self):
1174+ self._ensure_packages()
1175+
1176+ if self.network_manager not in ['quantum', 'neutron']:
1177+ return {}
1178+
1179+ if not self.plugin:
1180+ return {}
1181+
1182+ ctxt = {'network_manager': self.network_manager}
1183+
1184+ if self.plugin == 'ovs':
1185+ ctxt.update(self.ovs_ctxt())
1186+
1187+ self._save_flag_file()
1188+ return ctxt
1189+
1190+
1191+class OSConfigFlagContext(OSContextGenerator):
1192+ '''
1193+ Responsible adding user-defined config-flags in charm config to a
1194+ to a template context.
1195+ '''
1196+ def __call__(self):
1197+ config_flags = config('config-flags')
1198+ if not config_flags or config_flags in ['None', '']:
1199+ return {}
1200+ config_flags = config_flags.split(',')
1201+ flags = {}
1202+ for flag in config_flags:
1203+ if '=' not in flag:
1204+ log('Improperly formatted config-flag, expected k=v '
1205+ 'got %s' % flag, level=WARNING)
1206+ continue
1207+ k, v = flag.split('=')
1208+ flags[k.strip()] = v
1209+ ctxt = {'user_config_flags': flags}
1210+ return ctxt
1211+
1212+
1213+class SubordinateConfigContext(OSContextGenerator):
1214+ """
1215+ Responsible for inspecting relations to subordinates that
1216+ may be exporting required config via a json blob.
1217+
1218+ The subordinate interface allows subordinates to export their
1219+ configuration requirements to the principle for multiple config
1220+ files and multiple serivces. Ie, a subordinate that has interfaces
1221+ to both glance and nova may export to following yaml blob as json:
1222+
1223+ glance:
1224+ /etc/glance/glance-api.conf:
1225+ sections:
1226+ DEFAULT:
1227+ - [key1, value1]
1228+ /etc/glance/glance-registry.conf:
1229+ MYSECTION:
1230+ - [key2, value2]
1231+ nova:
1232+ /etc/nova/nova.conf:
1233+ sections:
1234+ DEFAULT:
1235+ - [key3, value3]
1236+
1237+
1238+ It is then up to the principle charms to subscribe this context to
1239+ the service+config file it is interestd in. Configuration data will
1240+ be available in the template context, in glance's case, as:
1241+ ctxt = {
1242+ ... other context ...
1243+ 'subordinate_config': {
1244+ 'DEFAULT': {
1245+ 'key1': 'value1',
1246+ },
1247+ 'MYSECTION': {
1248+ 'key2': 'value2',
1249+ },
1250+ }
1251+ }
1252+
1253+ """
1254+ def __init__(self, service, config_file, interface):
1255+ """
1256+ :param service : Service name key to query in any subordinate
1257+ data found
1258+ :param config_file : Service's config file to query sections
1259+ :param interface : Subordinate interface to inspect
1260+ """
1261+ self.service = service
1262+ self.config_file = config_file
1263+ self.interface = interface
1264+
1265+ def __call__(self):
1266+ ctxt = {}
1267+ for rid in relation_ids(self.interface):
1268+ for unit in related_units(rid):
1269+ sub_config = relation_get('subordinate_configuration',
1270+ rid=rid, unit=unit)
1271+ if sub_config and sub_config != '':
1272+ try:
1273+ sub_config = json.loads(sub_config)
1274+ except:
1275+ log('Could not parse JSON from subordinate_config '
1276+ 'setting from %s' % rid, level=ERROR)
1277+ continue
1278+
1279+ if self.service not in sub_config:
1280+ log('Found subordinate_config on %s but it contained'
1281+ 'nothing for %s service' % (rid, self.service))
1282+ continue
1283+
1284+ sub_config = sub_config[self.service]
1285+ if self.config_file not in sub_config:
1286+ log('Found subordinate_config on %s but it contained'
1287+ 'nothing for %s' % (rid, self.config_file))
1288+ continue
1289+
1290+ sub_config = sub_config[self.config_file]
1291+ for k, v in sub_config.iteritems():
1292+ ctxt[k] = v
1293+
1294+ if not ctxt:
1295+ ctxt['sections'] = {}
1296+
1297+ return ctxt
1298
1299=== added file 'hooks/charmhelpers/contrib/openstack/neutron.py'
1300--- hooks/charmhelpers/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
1301+++ hooks/charmhelpers/contrib/openstack/neutron.py 2013-10-15 01:35:28 +0000
1302@@ -0,0 +1,117 @@
1303+# Various utilies for dealing with Neutron and the renaming from Quantum.
1304+
1305+from subprocess import check_output
1306+
1307+from charmhelpers.core.hookenv import (
1308+ config,
1309+ log,
1310+ ERROR,
1311+)
1312+
1313+from charmhelpers.contrib.openstack.utils import os_release
1314+
1315+
1316+def headers_package():
1317+ """Ensures correct linux-headers for running kernel are installed,
1318+ for building DKMS package"""
1319+ kver = check_output(['uname', '-r']).strip()
1320+ return 'linux-headers-%s' % kver
1321+
1322+
1323+# legacy
1324+def quantum_plugins():
1325+ from charmhelpers.contrib.openstack import context
1326+ return {
1327+ 'ovs': {
1328+ 'config': '/etc/quantum/plugins/openvswitch/'
1329+ 'ovs_quantum_plugin.ini',
1330+ 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
1331+ 'OVSQuantumPluginV2',
1332+ 'contexts': [
1333+ context.SharedDBContext(user=config('neutron-database-user'),
1334+ database=config('neutron-database'),
1335+ relation_prefix='neutron')],
1336+ 'services': ['quantum-plugin-openvswitch-agent'],
1337+ 'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
1338+ ['quantum-plugin-openvswitch-agent']],
1339+ },
1340+ 'nvp': {
1341+ 'config': '/etc/quantum/plugins/nicira/nvp.ini',
1342+ 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
1343+ 'QuantumPlugin.NvpPluginV2',
1344+ 'services': [],
1345+ 'packages': [],
1346+ }
1347+ }
1348+
1349+
1350+def neutron_plugins():
1351+ from charmhelpers.contrib.openstack import context
1352+ return {
1353+ 'ovs': {
1354+ 'config': '/etc/neutron/plugins/openvswitch/'
1355+ 'ovs_neutron_plugin.ini',
1356+ 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
1357+ 'OVSNeutronPluginV2',
1358+ 'contexts': [
1359+ context.SharedDBContext(user=config('neutron-database-user'),
1360+ database=config('neutron-database'),
1361+ relation_prefix='neutron')],
1362+ 'services': ['neutron-plugin-openvswitch-agent'],
1363+ 'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
1364+ ['quantum-plugin-openvswitch-agent']],
1365+ },
1366+ 'nvp': {
1367+ 'config': '/etc/neutron/plugins/nicira/nvp.ini',
1368+ 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
1369+ 'NeutronPlugin.NvpPluginV2',
1370+ 'services': [],
1371+ 'packages': [],
1372+ }
1373+ }
1374+
1375+
1376+def neutron_plugin_attribute(plugin, attr, net_manager=None):
1377+ manager = net_manager or network_manager()
1378+ if manager == 'quantum':
1379+ plugins = quantum_plugins()
1380+ elif manager == 'neutron':
1381+ plugins = neutron_plugins()
1382+ else:
1383+ log('Error: Network manager does not support plugins.')
1384+ raise Exception
1385+
1386+ try:
1387+ _plugin = plugins[plugin]
1388+ except KeyError:
1389+ log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
1390+ raise Exception
1391+
1392+ try:
1393+ return _plugin[attr]
1394+ except KeyError:
1395+ return None
1396+
1397+
1398+def network_manager():
1399+ '''
1400+ Deals with the renaming of Quantum to Neutron in H and any situations
1401+ that require compatability (eg, deploying H with network-manager=quantum,
1402+ upgrading from G).
1403+ '''
1404+ release = os_release('nova-common')
1405+ manager = config('network-manager').lower()
1406+
1407+ if manager not in ['quantum', 'neutron']:
1408+ return manager
1409+
1410+ if release in ['essex']:
1411+ # E does not support neutron
1412+ log('Neutron networking not supported in Essex.', level=ERROR)
1413+ raise Exception
1414+ elif release in ['folsom', 'grizzly']:
1415+ # neutron is named quantum in F and G
1416+ return 'quantum'
1417+ else:
1418+ # ensure accurate naming for all releases post-H
1419+ return 'neutron'
1420
1421=== added directory 'hooks/charmhelpers/contrib/openstack/templates'
1422=== added file 'hooks/charmhelpers/contrib/openstack/templates/__init__.py'
1423--- hooks/charmhelpers/contrib/openstack/templates/__init__.py 1970-01-01 00:00:00 +0000
1424+++ hooks/charmhelpers/contrib/openstack/templates/__init__.py 2013-10-15 01:35:28 +0000
1425@@ -0,0 +1,2 @@
1426+# dummy __init__.py to fool syncer into thinking this is a syncable python
1427+# module
1428
1429=== added file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf'
1430--- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 1970-01-01 00:00:00 +0000
1431+++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2013-10-15 01:35:28 +0000
1432@@ -0,0 +1,11 @@
1433+###############################################################################
1434+# [ WARNING ]
1435+# cinder configuration file maintained by Juju
1436+# local changes may be overwritten.
1437+###############################################################################
1438+{% if auth -%}
1439+[global]
1440+ auth_supported = {{ auth }}
1441+ keyring = /etc/ceph/$cluster.$name.keyring
1442+ mon host = {{ mon_hosts }}
1443+{% endif -%}
1444
1445=== added file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
1446--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 1970-01-01 00:00:00 +0000
1447+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2013-10-15 01:35:28 +0000
1448@@ -0,0 +1,37 @@
1449+global
1450+ log 127.0.0.1 local0
1451+ log 127.0.0.1 local1 notice
1452+ maxconn 20000
1453+ user haproxy
1454+ group haproxy
1455+ spread-checks 0
1456+
1457+defaults
1458+ log global
1459+ mode http
1460+ option httplog
1461+ option dontlognull
1462+ retries 3
1463+ timeout queue 1000
1464+ timeout connect 1000
1465+ timeout client 30000
1466+ timeout server 30000
1467+
1468+listen stats :8888
1469+ mode http
1470+ stats enable
1471+ stats hide-version
1472+ stats realm Haproxy\ Statistics
1473+ stats uri /
1474+ stats auth admin:password
1475+
1476+{% if units -%}
1477+{% for service, ports in service_ports.iteritems() -%}
1478+listen {{ service }} 0.0.0.0:{{ ports[0] }}
1479+ balance roundrobin
1480+ option tcplog
1481+ {% for unit, address in units.iteritems() -%}
1482+ server {{ unit }} {{ address }}:{{ ports[1] }} check
1483+ {% endfor %}
1484+{% endfor -%}
1485+{% endif -%}
1486
1487=== added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend'
1488--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 1970-01-01 00:00:00 +0000
1489+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2013-10-15 01:35:28 +0000
1490@@ -0,0 +1,23 @@
1491+{% if endpoints -%}
1492+{% for ext, int in endpoints -%}
1493+Listen {{ ext }}
1494+NameVirtualHost *:{{ ext }}
1495+<VirtualHost *:{{ ext }}>
1496+ ServerName {{ private_address }}
1497+ SSLEngine on
1498+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
1499+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
1500+ ProxyPass / http://localhost:{{ int }}/
1501+ ProxyPassReverse / http://localhost:{{ int }}/
1502+ ProxyPreserveHost on
1503+</VirtualHost>
1504+<Proxy *>
1505+ Order deny,allow
1506+ Allow from all
1507+</Proxy>
1508+<Location />
1509+ Order allow,deny
1510+ Allow from all
1511+</Location>
1512+{% endfor -%}
1513+{% endif -%}
1514
1515=== added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf'
1516--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 1970-01-01 00:00:00 +0000
1517+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2013-10-15 01:35:28 +0000
1518@@ -0,0 +1,23 @@
1519+{% if endpoints -%}
1520+{% for ext, int in endpoints -%}
1521+Listen {{ ext }}
1522+NameVirtualHost *:{{ ext }}
1523+<VirtualHost *:{{ ext }}>
1524+ ServerName {{ private_address }}
1525+ SSLEngine on
1526+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
1527+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
1528+ ProxyPass / http://localhost:{{ int }}/
1529+ ProxyPassReverse / http://localhost:{{ int }}/
1530+ ProxyPreserveHost on
1531+</VirtualHost>
1532+<Proxy *>
1533+ Order deny,allow
1534+ Allow from all
1535+</Proxy>
1536+<Location />
1537+ Order allow,deny
1538+ Allow from all
1539+</Location>
1540+{% endfor -%}
1541+{% endif -%}
1542
1543=== added file 'hooks/charmhelpers/contrib/openstack/templating.py'
1544--- hooks/charmhelpers/contrib/openstack/templating.py 1970-01-01 00:00:00 +0000
1545+++ hooks/charmhelpers/contrib/openstack/templating.py 2013-10-15 01:35:28 +0000
1546@@ -0,0 +1,280 @@
1547+import os
1548+
1549+from charmhelpers.fetch import apt_install
1550+
1551+from charmhelpers.core.hookenv import (
1552+ log,
1553+ ERROR,
1554+ INFO
1555+)
1556+
1557+from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
1558+
1559+try:
1560+ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
1561+except ImportError:
1562+ # python-jinja2 may not be installed yet, or we're running unittests.
1563+ FileSystemLoader = ChoiceLoader = Environment = exceptions = None
1564+
1565+
1566+class OSConfigException(Exception):
1567+ pass
1568+
1569+
1570+def get_loader(templates_dir, os_release):
1571+ """
1572+ Create a jinja2.ChoiceLoader containing template dirs up to
1573+ and including os_release. If directory template directory
1574+ is missing at templates_dir, it will be omitted from the loader.
1575+ templates_dir is added to the bottom of the search list as a base
1576+ loading dir.
1577+
1578+ A charm may also ship a templates dir with this module
1579+ and it will be appended to the bottom of the search list, eg:
1580+ hooks/charmhelpers/contrib/openstack/templates.
1581+
1582+ :param templates_dir: str: Base template directory containing release
1583+ sub-directories.
1584+ :param os_release : str: OpenStack release codename to construct template
1585+ loader.
1586+
1587+ :returns : jinja2.ChoiceLoader constructed with a list of
1588+ jinja2.FilesystemLoaders, ordered in descending
1589+ order by OpenStack release.
1590+ """
1591+ tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
1592+ for rel in OPENSTACK_CODENAMES.itervalues()]
1593+
1594+ if not os.path.isdir(templates_dir):
1595+ log('Templates directory not found @ %s.' % templates_dir,
1596+ level=ERROR)
1597+ raise OSConfigException
1598+
1599+ # the bottom contains tempaltes_dir and possibly a common templates dir
1600+ # shipped with the helper.
1601+ loaders = [FileSystemLoader(templates_dir)]
1602+ helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
1603+ if os.path.isdir(helper_templates):
1604+ loaders.append(FileSystemLoader(helper_templates))
1605+
1606+ for rel, tmpl_dir in tmpl_dirs:
1607+ if os.path.isdir(tmpl_dir):
1608+ loaders.insert(0, FileSystemLoader(tmpl_dir))
1609+ if rel == os_release:
1610+ break
1611+ log('Creating choice loader with dirs: %s' %
1612+ [l.searchpath for l in loaders], level=INFO)
1613+ return ChoiceLoader(loaders)
1614+
1615+
1616+class OSConfigTemplate(object):
1617+ """
1618+ Associates a config file template with a list of context generators.
1619+ Responsible for constructing a template context based on those generators.
1620+ """
1621+ def __init__(self, config_file, contexts):
1622+ self.config_file = config_file
1623+
1624+ if hasattr(contexts, '__call__'):
1625+ self.contexts = [contexts]
1626+ else:
1627+ self.contexts = contexts
1628+
1629+ self._complete_contexts = []
1630+
1631+ def context(self):
1632+ ctxt = {}
1633+ for context in self.contexts:
1634+ _ctxt = context()
1635+ if _ctxt:
1636+ ctxt.update(_ctxt)
1637+ # track interfaces for every complete context.
1638+ [self._complete_contexts.append(interface)
1639+ for interface in context.interfaces
1640+ if interface not in self._complete_contexts]
1641+ return ctxt
1642+
1643+ def complete_contexts(self):
1644+ '''
1645+ Return a list of interfaces that have atisfied contexts.
1646+ '''
1647+ if self._complete_contexts:
1648+ return self._complete_contexts
1649+ self.context()
1650+ return self._complete_contexts
1651+
1652+
1653+class OSConfigRenderer(object):
1654+ """
1655+ This class provides a common templating system to be used by OpenStack
1656+ charms. It is intended to help charms share common code and templates,
1657+ and ease the burden of managing config templates across multiple OpenStack
1658+ releases.
1659+
1660+ Basic usage:
1661+ # import some common context generates from charmhelpers
1662+ from charmhelpers.contrib.openstack import context
1663+
1664+ # Create a renderer object for a specific OS release.
1665+ configs = OSConfigRenderer(templates_dir='/tmp/templates',
1666+ openstack_release='folsom')
1667+ # register some config files with context generators.
1668+ configs.register(config_file='/etc/nova/nova.conf',
1669+ contexts=[context.SharedDBContext(),
1670+ context.AMQPContext()])
1671+ configs.register(config_file='/etc/nova/api-paste.ini',
1672+ contexts=[context.IdentityServiceContext()])
1673+ configs.register(config_file='/etc/haproxy/haproxy.conf',
1674+ contexts=[context.HAProxyContext()])
1675+ # write out a single config
1676+ configs.write('/etc/nova/nova.conf')
1677+ # write out all registered configs
1678+ configs.write_all()
1679+
1680+ Details:
1681+
1682+ OpenStack Releases and template loading
1683+ ---------------------------------------
1684+ When the object is instantiated, it is associated with a specific OS
1685+ release. This dictates how the template loader will be constructed.
1686+
1687+ The constructed loader attempts to load the template from several places
1688+ in the following order:
1689+ - from the most recent OS release-specific template dir (if one exists)
1690+ - the base templates_dir
1691+ - a template directory shipped in the charm with this helper file.
1692+
1693+
1694+ For the example above, '/tmp/templates' contains the following structure:
1695+ /tmp/templates/nova.conf
1696+ /tmp/templates/api-paste.ini
1697+ /tmp/templates/grizzly/api-paste.ini
1698+ /tmp/templates/havana/api-paste.ini
1699+
1700+ Since it was registered with the grizzly release, it first seraches
1701+ the grizzly directory for nova.conf, then the templates dir.
1702+
1703+ When writing api-paste.ini, it will find the template in the grizzly
1704+ directory.
1705+
1706+ If the object were created with folsom, it would fall back to the
1707+ base templates dir for its api-paste.ini template.
1708+
1709+ This system should help manage changes in config files through
1710+ openstack releases, allowing charms to fall back to the most recently
1711+ updated config template for a given release
1712+
1713+ The haproxy.conf, since it is not shipped in the templates dir, will
1714+ be loaded from the module directory's template directory, eg
1715+ $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
1716+ us to ship common templates (haproxy, apache) with the helpers.
1717+
1718+ Context generators
1719+ ---------------------------------------
1720+ Context generators are used to generate template contexts during hook
1721+ execution. Doing so may require inspecting service relations, charm
1722+ config, etc. When registered, a config file is associated with a list
1723+ of generators. When a template is rendered and written, all context
1724+ generates are called in a chain to generate the context dictionary
1725+ passed to the jinja2 template. See context.py for more info.
1726+ """
1727+ def __init__(self, templates_dir, openstack_release):
1728+ if not os.path.isdir(templates_dir):
1729+ log('Could not locate templates dir %s' % templates_dir,
1730+ level=ERROR)
1731+ raise OSConfigException
1732+
1733+ self.templates_dir = templates_dir
1734+ self.openstack_release = openstack_release
1735+ self.templates = {}
1736+ self._tmpl_env = None
1737+
1738+ if None in [Environment, ChoiceLoader, FileSystemLoader]:
1739+ # if this code is running, the object is created pre-install hook.
1740+ # jinja2 shouldn't get touched until the module is reloaded on next
1741+ # hook execution, with proper jinja2 bits successfully imported.
1742+ apt_install('python-jinja2')
1743+
1744+ def register(self, config_file, contexts):
1745+ """
1746+ Register a config file with a list of context generators to be called
1747+ during rendering.
1748+ """
1749+ self.templates[config_file] = OSConfigTemplate(config_file=config_file,
1750+ contexts=contexts)
1751+ log('Registered config file: %s' % config_file, level=INFO)
1752+
1753+ def _get_tmpl_env(self):
1754+ if not self._tmpl_env:
1755+ loader = get_loader(self.templates_dir, self.openstack_release)
1756+ self._tmpl_env = Environment(loader=loader)
1757+
1758+ def _get_template(self, template):
1759+ self._get_tmpl_env()
1760+ template = self._tmpl_env.get_template(template)
1761+ log('Loaded template from %s' % template.filename, level=INFO)
1762+ return template
1763+
1764+ def render(self, config_file):
1765+ if config_file not in self.templates:
1766+ log('Config not registered: %s' % config_file, level=ERROR)
1767+ raise OSConfigException
1768+ ctxt = self.templates[config_file].context()
1769+
1770+ _tmpl = os.path.basename(config_file)
1771+ try:
1772+ template = self._get_template(_tmpl)
1773+ except exceptions.TemplateNotFound:
1774+ # if no template is found with basename, try looking for it
1775+ # using a munged full path, eg:
1776+ # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
1777+ _tmpl = '_'.join(config_file.split('/')[1:])
1778+ try:
1779+ template = self._get_template(_tmpl)
1780+ except exceptions.TemplateNotFound as e:
1781+ log('Could not load template from %s by %s or %s.' %
1782+ (self.templates_dir, os.path.basename(config_file), _tmpl),
1783+ level=ERROR)
1784+ raise e
1785+
1786+ log('Rendering from template: %s' % _tmpl, level=INFO)
1787+ return template.render(ctxt)
1788+
1789+ def write(self, config_file):
1790+ """
1791+ Write a single config file, raises if config file is not registered.
1792+ """
1793+ if config_file not in self.templates:
1794+ log('Config not registered: %s' % config_file, level=ERROR)
1795+ raise OSConfigException
1796+
1797+ _out = self.render(config_file)
1798+
1799+ with open(config_file, 'wb') as out:
1800+ out.write(_out)
1801+
1802+ log('Wrote template %s.' % config_file, level=INFO)
1803+
1804+ def write_all(self):
1805+ """
1806+ Write out all registered config files.
1807+ """
1808+ [self.write(k) for k in self.templates.iterkeys()]
1809+
1810+ def set_release(self, openstack_release):
1811+ """
1812+ Resets the template environment and generates a new template loader
1813+ based on a the new openstack release.
1814+ """
1815+ self._tmpl_env = None
1816+ self.openstack_release = openstack_release
1817+ self._get_tmpl_env()
1818+
1819+ def complete_contexts(self):
1820+ '''
1821+ Returns a list of context interfaces that yield a complete context.
1822+ '''
1823+ interfaces = []
1824+ [interfaces.extend(i.complete_contexts())
1825+ for i in self.templates.itervalues()]
1826+ return interfaces
1827
1828=== added file 'hooks/charmhelpers/contrib/openstack/utils.py'
1829--- hooks/charmhelpers/contrib/openstack/utils.py 1970-01-01 00:00:00 +0000
1830+++ hooks/charmhelpers/contrib/openstack/utils.py 2013-10-15 01:35:28 +0000
1831@@ -0,0 +1,365 @@
1832+#!/usr/bin/python
1833+
1834+# Common python helper functions used for OpenStack charms.
1835+from collections import OrderedDict
1836+
1837+import apt_pkg as apt
1838+import subprocess
1839+import os
1840+import socket
1841+import sys
1842+
1843+from charmhelpers.core.hookenv import (
1844+ config,
1845+ log as juju_log,
1846+ charm_dir,
1847+)
1848+
1849+from charmhelpers.core.host import (
1850+ lsb_release,
1851+)
1852+
1853+from charmhelpers.fetch import (
1854+ apt_install,
1855+)
1856+
1857+CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
1858+CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
1859+
1860+UBUNTU_OPENSTACK_RELEASE = OrderedDict([
1861+ ('oneiric', 'diablo'),
1862+ ('precise', 'essex'),
1863+ ('quantal', 'folsom'),
1864+ ('raring', 'grizzly'),
1865+ ('saucy', 'havana'),
1866+])
1867+
1868+
1869+OPENSTACK_CODENAMES = OrderedDict([
1870+ ('2011.2', 'diablo'),
1871+ ('2012.1', 'essex'),
1872+ ('2012.2', 'folsom'),
1873+ ('2013.1', 'grizzly'),
1874+ ('2013.2', 'havana'),
1875+ ('2014.1', 'icehouse'),
1876+])
1877+
1878+# The ugly duckling
1879+SWIFT_CODENAMES = OrderedDict([
1880+ ('1.4.3', 'diablo'),
1881+ ('1.4.8', 'essex'),
1882+ ('1.7.4', 'folsom'),
1883+ ('1.8.0', 'grizzly'),
1884+ ('1.7.7', 'grizzly'),
1885+ ('1.7.6', 'grizzly'),
1886+ ('1.10.0', 'havana'),
1887+ ('1.9.1', 'havana'),
1888+ ('1.9.0', 'havana'),
1889+])
1890+
1891+
1892+def error_out(msg):
1893+ juju_log("FATAL ERROR: %s" % msg, level='ERROR')
1894+ sys.exit(1)
1895+
1896+
1897+def get_os_codename_install_source(src):
1898+ '''Derive OpenStack release codename from a given installation source.'''
1899+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
1900+ rel = ''
1901+ if src == 'distro':
1902+ try:
1903+ rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
1904+ except KeyError:
1905+ e = 'Could not derive openstack release for '\
1906+ 'this Ubuntu release: %s' % ubuntu_rel
1907+ error_out(e)
1908+ return rel
1909+
1910+ if src.startswith('cloud:'):
1911+ ca_rel = src.split(':')[1]
1912+ ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
1913+ return ca_rel
1914+
1915+ # Best guess match based on deb string provided
1916+ if src.startswith('deb') or src.startswith('ppa'):
1917+ for k, v in OPENSTACK_CODENAMES.iteritems():
1918+ if v in src:
1919+ return v
1920+
1921+
1922+def get_os_version_install_source(src):
1923+ codename = get_os_codename_install_source(src)
1924+ return get_os_version_codename(codename)
1925+
1926+
1927+def get_os_codename_version(vers):
1928+ '''Determine OpenStack codename from version number.'''
1929+ try:
1930+ return OPENSTACK_CODENAMES[vers]
1931+ except KeyError:
1932+ e = 'Could not determine OpenStack codename for version %s' % vers
1933+ error_out(e)
1934+
1935+
1936+def get_os_version_codename(codename):
1937+ '''Determine OpenStack version number from codename.'''
1938+ for k, v in OPENSTACK_CODENAMES.iteritems():
1939+ if v == codename:
1940+ return k
1941+ e = 'Could not derive OpenStack version for '\
1942+ 'codename: %s' % codename
1943+ error_out(e)
1944+
1945+
1946+def get_os_codename_package(package, fatal=True):
1947+ '''Derive OpenStack release codename from an installed package.'''
1948+ apt.init()
1949+ cache = apt.Cache()
1950+
1951+ try:
1952+ pkg = cache[package]
1953+ except:
1954+ if not fatal:
1955+ return None
1956+ # the package is unknown to the current apt cache.
1957+ e = 'Could not determine version of package with no installation '\
1958+ 'candidate: %s' % package
1959+ error_out(e)
1960+
1961+ if not pkg.current_ver:
1962+ if not fatal:
1963+ return None
1964+ # package is known, but no version is currently installed.
1965+ e = 'Could not determine version of uninstalled package: %s' % package
1966+ error_out(e)
1967+
1968+ vers = apt.upstream_version(pkg.current_ver.ver_str)
1969+
1970+ try:
1971+ if 'swift' in pkg.name:
1972+ swift_vers = vers[:5]
1973+ if swift_vers not in SWIFT_CODENAMES:
1974+ # Deal with 1.10.0 upward
1975+ swift_vers = vers[:6]
1976+ return SWIFT_CODENAMES[swift_vers]
1977+ else:
1978+ vers = vers[:6]
1979+ return OPENSTACK_CODENAMES[vers]
1980+ except KeyError:
1981+ e = 'Could not determine OpenStack codename for version %s' % vers
1982+ error_out(e)
1983+
1984+
1985+def get_os_version_package(pkg, fatal=True):
1986+ '''Derive OpenStack version number from an installed package.'''
1987+ codename = get_os_codename_package(pkg, fatal=fatal)
1988+
1989+ if not codename:
1990+ return None
1991+
1992+ if 'swift' in pkg:
1993+ vers_map = SWIFT_CODENAMES
1994+ else:
1995+ vers_map = OPENSTACK_CODENAMES
1996+
1997+ for version, cname in vers_map.iteritems():
1998+ if cname == codename:
1999+ return version
2000+ #e = "Could not determine OpenStack version for package: %s" % pkg
2001+ #error_out(e)
2002+
2003+
2004+os_rel = None
2005+
2006+
2007+def os_release(package, base='essex'):
2008+ '''
2009+ Returns OpenStack release codename from a cached global.
2010+ If the codename can not be determined from either an installed package or
2011+ the installation source, the earliest release supported by the charm should
2012+ be returned.
2013+ '''
2014+ global os_rel
2015+ if os_rel:
2016+ return os_rel
2017+ os_rel = (get_os_codename_package(package, fatal=False) or
2018+ get_os_codename_install_source(config('openstack-origin')) or
2019+ base)
2020+ return os_rel
2021+
2022+
2023+def import_key(keyid):
2024+ cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
2025+ "--recv-keys %s" % keyid
2026+ try:
2027+ subprocess.check_call(cmd.split(' '))
2028+ except subprocess.CalledProcessError:
2029+ error_out("Error importing repo key %s" % keyid)
2030+
2031+
2032+def configure_installation_source(rel):
2033+ '''Configure apt installation source.'''
2034+ if rel == 'distro':
2035+ return
2036+ elif rel[:4] == "ppa:":
2037+ src = rel
2038+ subprocess.check_call(["add-apt-repository", "-y", src])
2039+ elif rel[:3] == "deb":
2040+ l = len(rel.split('|'))
2041+ if l == 2:
2042+ src, key = rel.split('|')
2043+ juju_log("Importing PPA key from keyserver for %s" % src)
2044+ import_key(key)
2045+ elif l == 1:
2046+ src = rel
2047+ with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
2048+ f.write(src)
2049+ elif rel[:6] == 'cloud:':
2050+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
2051+ rel = rel.split(':')[1]
2052+ u_rel = rel.split('-')[0]
2053+ ca_rel = rel.split('-')[1]
2054+
2055+ if u_rel != ubuntu_rel:
2056+ e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
2057+ 'version (%s)' % (ca_rel, ubuntu_rel)
2058+ error_out(e)
2059+
2060+ if 'staging' in ca_rel:
2061+ # staging is just a regular PPA.
2062+ os_rel = ca_rel.split('/')[0]
2063+ ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
2064+ cmd = 'add-apt-repository -y %s' % ppa
2065+ subprocess.check_call(cmd.split(' '))
2066+ return
2067+
2068+ # map charm config options to actual archive pockets.
2069+ pockets = {
2070+ 'folsom': 'precise-updates/folsom',
2071+ 'folsom/updates': 'precise-updates/folsom',
2072+ 'folsom/proposed': 'precise-proposed/folsom',
2073+ 'grizzly': 'precise-updates/grizzly',
2074+ 'grizzly/updates': 'precise-updates/grizzly',
2075+ 'grizzly/proposed': 'precise-proposed/grizzly',
2076+ 'havana': 'precise-updates/havana',
2077+ 'havana/updates': 'precise-updates/havana',
2078+ 'havana/proposed': 'precise-proposed/havana',
2079+ }
2080+
2081+ try:
2082+ pocket = pockets[ca_rel]
2083+ except KeyError:
2084+ e = 'Invalid Cloud Archive release specified: %s' % rel
2085+ error_out(e)
2086+
2087+ src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
2088+ apt_install('ubuntu-cloud-keyring', fatal=True)
2089+
2090+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
2091+ f.write(src)
2092+ else:
2093+ error_out("Invalid openstack-release specified: %s" % rel)
2094+
2095+
2096+def save_script_rc(script_path="scripts/scriptrc", **env_vars):
2097+ """
2098+ Write an rc file in the charm-delivered directory containing
2099+ exported environment variables provided by env_vars. Any charm scripts run
2100+ outside the juju hook environment can source this scriptrc to obtain
2101+ updated config information necessary to perform health checks or
2102+ service changes.
2103+ """
2104+ juju_rc_path = "%s/%s" % (charm_dir(), script_path)
2105+ if not os.path.exists(os.path.dirname(juju_rc_path)):
2106+ os.mkdir(os.path.dirname(juju_rc_path))
2107+ with open(juju_rc_path, 'wb') as rc_script:
2108+ rc_script.write(
2109+ "#!/bin/bash\n")
2110+ [rc_script.write('export %s=%s\n' % (u, p))
2111+ for u, p in env_vars.iteritems() if u != "script_path"]
2112+
2113+
2114+def openstack_upgrade_available(package):
2115+ """
2116+ Determines if an OpenStack upgrade is available from installation
2117+ source, based on version of installed package.
2118+
2119+ :param package: str: Name of installed package.
2120+
2121+ :returns: bool: : Returns True if configured installation source offers
2122+ a newer version of package.
2123+
2124+ """
2125+
2126+ src = config('openstack-origin')
2127+ cur_vers = get_os_version_package(package)
2128+ available_vers = get_os_version_install_source(src)
2129+ apt.init()
2130+ return apt.version_compare(available_vers, cur_vers) == 1
2131+
2132+
2133+def is_ip(address):
2134+ """
2135+ Returns True if address is a valid IP address.
2136+ """
2137+ try:
2138+ # Test to see if already an IPv4 address
2139+ socket.inet_aton(address)
2140+ return True
2141+ except socket.error:
2142+ return False
2143+
2144+
2145+def ns_query(address):
2146+ try:
2147+ import dns.resolver
2148+ except ImportError:
2149+ apt_install('python-dnspython')
2150+ import dns.resolver
2151+
2152+ if isinstance(address, dns.name.Name):
2153+ rtype = 'PTR'
2154+ elif isinstance(address, basestring):
2155+ rtype = 'A'
2156+
2157+ answers = dns.resolver.query(address, rtype)
2158+ if answers:
2159+ return str(answers[0])
2160+ return None
2161+
2162+
2163+def get_host_ip(hostname):
2164+ """
2165+ Resolves the IP for a given hostname, or returns
2166+ the input if it is already an IP.
2167+ """
2168+ if is_ip(hostname):
2169+ return hostname
2170+
2171+ return ns_query(hostname)
2172+
2173+
2174+def get_hostname(address):
2175+ """
2176+ Resolves hostname for given IP, or returns the input
2177+ if it is already a hostname.
2178+ """
2179+ if not is_ip(address):
2180+ return address
2181+
2182+ try:
2183+ import dns.reversename
2184+ except ImportError:
2185+ apt_install('python-dnspython')
2186+ import dns.reversename
2187+
2188+ rev = dns.reversename.from_address(address)
2189+ result = ns_query(rev)
2190+ if not result:
2191+ return None
2192+
2193+ # strip trailing .
2194+ if result.endswith('.'):
2195+ return result[:-1]
2196+ return result
2197
2198=== added directory 'hooks/charmhelpers/contrib/storage'
2199=== added file 'hooks/charmhelpers/contrib/storage/__init__.py'
2200=== added directory 'hooks/charmhelpers/contrib/storage/linux'
2201=== added file 'hooks/charmhelpers/contrib/storage/linux/__init__.py'
2202=== added file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
2203--- hooks/charmhelpers/contrib/storage/linux/ceph.py 1970-01-01 00:00:00 +0000
2204+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2013-10-15 01:35:28 +0000
2205@@ -0,0 +1,359 @@
2206+#
2207+# Copyright 2012 Canonical Ltd.
2208+#
2209+# This file is sourced from lp:openstack-charm-helpers
2210+#
2211+# Authors:
2212+# James Page <james.page@ubuntu.com>
2213+# Adam Gandelman <adamg@ubuntu.com>
2214+#
2215+
2216+import os
2217+import shutil
2218+import json
2219+import time
2220+
2221+from subprocess import (
2222+ check_call,
2223+ check_output,
2224+ CalledProcessError
2225+)
2226+
2227+from charmhelpers.core.hookenv import (
2228+ relation_get,
2229+ relation_ids,
2230+ related_units,
2231+ log,
2232+ INFO,
2233+ WARNING,
2234+ ERROR
2235+)
2236+
2237+from charmhelpers.core.host import (
2238+ mount,
2239+ mounts,
2240+ service_start,
2241+ service_stop,
2242+ service_running,
2243+ umount,
2244+)
2245+
2246+from charmhelpers.fetch import (
2247+ apt_install,
2248+)
2249+
2250+KEYRING = '/etc/ceph/ceph.client.{}.keyring'
2251+KEYFILE = '/etc/ceph/ceph.client.{}.key'
2252+
2253+CEPH_CONF = """[global]
2254+ auth supported = {auth}
2255+ keyring = {keyring}
2256+ mon host = {mon_hosts}
2257+"""
2258+
2259+
2260+def install():
2261+ ''' Basic Ceph client installation '''
2262+ ceph_dir = "/etc/ceph"
2263+ if not os.path.exists(ceph_dir):
2264+ os.mkdir(ceph_dir)
2265+ apt_install('ceph-common', fatal=True)
2266+
2267+
2268+def rbd_exists(service, pool, rbd_img):
2269+ ''' Check to see if a RADOS block device exists '''
2270+ try:
2271+ out = check_output(['rbd', 'list', '--id', service,
2272+ '--pool', pool])
2273+ except CalledProcessError:
2274+ return False
2275+ else:
2276+ return rbd_img in out
2277+
2278+
2279+def create_rbd_image(service, pool, image, sizemb):
2280+ ''' Create a new RADOS block device '''
2281+ cmd = [
2282+ 'rbd',
2283+ 'create',
2284+ image,
2285+ '--size',
2286+ str(sizemb),
2287+ '--id',
2288+ service,
2289+ '--pool',
2290+ pool
2291+ ]
2292+ check_call(cmd)
2293+
2294+
2295+def pool_exists(service, name):
2296+ ''' Check to see if a RADOS pool already exists '''
2297+ try:
2298+ out = check_output(['rados', '--id', service, 'lspools'])
2299+ except CalledProcessError:
2300+ return False
2301+ else:
2302+ return name in out
2303+
2304+
2305+def get_osds(service):
2306+ '''
2307+ Return a list of all Ceph Object Storage Daemons
2308+ currently in the cluster
2309+ '''
2310+ return json.loads(check_output(['ceph', '--id', service,
2311+ 'osd', 'ls', '--format=json']))
2312+
2313+
2314+def create_pool(service, name, replicas=2):
2315+ ''' Create a new RADOS pool '''
2316+ if pool_exists(service, name):
2317+ log("Ceph pool {} already exists, skipping creation".format(name),
2318+ level=WARNING)
2319+ return
2320+ # Calculate the number of placement groups based
2321+ # on upstream recommended best practices.
2322+ pgnum = (len(get_osds(service)) * 100 / replicas)
2323+ cmd = [
2324+ 'ceph', '--id', service,
2325+ 'osd', 'pool', 'create',
2326+ name, str(pgnum)
2327+ ]
2328+ check_call(cmd)
2329+ cmd = [
2330+ 'ceph', '--id', service,
2331+ 'osd', 'pool', 'set', name,
2332+ 'size', str(replicas)
2333+ ]
2334+ check_call(cmd)
2335+
2336+
2337+def delete_pool(service, name):
2338+ ''' Delete a RADOS pool from ceph '''
2339+ cmd = [
2340+ 'ceph', '--id', service,
2341+ 'osd', 'pool', 'delete',
2342+ name, '--yes-i-really-really-mean-it'
2343+ ]
2344+ check_call(cmd)
2345+
2346+
2347+def _keyfile_path(service):
2348+ return KEYFILE.format(service)
2349+
2350+
2351+def _keyring_path(service):
2352+ return KEYRING.format(service)
2353+
2354+
2355+def create_keyring(service, key):
2356+ ''' Create a new Ceph keyring containing key'''
2357+ keyring = _keyring_path(service)
2358+ if os.path.exists(keyring):
2359+ log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
2360+ return
2361+ cmd = [
2362+ 'ceph-authtool',
2363+ keyring,
2364+ '--create-keyring',
2365+ '--name=client.{}'.format(service),
2366+ '--add-key={}'.format(key)
2367+ ]
2368+ check_call(cmd)
2369+ log('ceph: Created new ring at %s.' % keyring, level=INFO)
2370+
2371+
2372+def create_key_file(service, key):
2373+ ''' Create a file containing key '''
2374+ keyfile = _keyfile_path(service)
2375+ if os.path.exists(keyfile):
2376+ log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
2377+ return
2378+ with open(keyfile, 'w') as fd:
2379+ fd.write(key)
2380+ log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
2381+
2382+
2383+def get_ceph_nodes():
2384+ ''' Query named relation 'ceph' to detemine current nodes '''
2385+ hosts = []
2386+ for r_id in relation_ids('ceph'):
2387+ for unit in related_units(r_id):
2388+ hosts.append(relation_get('private-address', unit=unit, rid=r_id))
2389+ return hosts
2390+
2391+
2392+def configure(service, key, auth):
2393+ ''' Perform basic configuration of Ceph '''
2394+ create_keyring(service, key)
2395+ create_key_file(service, key)
2396+ hosts = get_ceph_nodes()
2397+ with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
2398+ ceph_conf.write(CEPH_CONF.format(auth=auth,
2399+ keyring=_keyring_path(service),
2400+ mon_hosts=",".join(map(str, hosts))))
2401+ modprobe('rbd')
2402+
2403+
2404+def image_mapped(name):
2405+ ''' Determine whether a RADOS block device is mapped locally '''
2406+ try:
2407+ out = check_output(['rbd', 'showmapped'])
2408+ except CalledProcessError:
2409+ return False
2410+ else:
2411+ return name in out
2412+
2413+
2414+def map_block_storage(service, pool, image):
2415+ ''' Map a RADOS block device for local use '''
2416+ cmd = [
2417+ 'rbd',
2418+ 'map',
2419+ '{}/{}'.format(pool, image),
2420+ '--user',
2421+ service,
2422+ '--secret',
2423+ _keyfile_path(service),
2424+ ]
2425+ check_call(cmd)
2426+
2427+
2428+def filesystem_mounted(fs):
2429+ ''' Determine whether a filesytems is already mounted '''
2430+ return fs in [f for f, m in mounts()]
2431+
2432+
2433+def make_filesystem(blk_device, fstype='ext4', timeout=10):
2434+ ''' Make a new filesystem on the specified block device '''
2435+ count = 0
2436+ e_noent = os.errno.ENOENT
2437+ while not os.path.exists(blk_device):
2438+ if count >= timeout:
2439+ log('ceph: gave up waiting on block device %s' % blk_device,
2440+ level=ERROR)
2441+ raise IOError(e_noent, os.strerror(e_noent), blk_device)
2442+ log('ceph: waiting for block device %s to appear' % blk_device,
2443+ level=INFO)
2444+ count += 1
2445+ time.sleep(1)
2446+ else:
2447+ log('ceph: Formatting block device %s as filesystem %s.' %
2448+ (blk_device, fstype), level=INFO)
2449+ check_call(['mkfs', '-t', fstype, blk_device])
2450+
2451+
2452+def place_data_on_block_device(blk_device, data_src_dst):
2453+ ''' Migrate data in data_src_dst to blk_device and then remount '''
2454+ # mount block device into /mnt
2455+ mount(blk_device, '/mnt')
2456+ # copy data to /mnt
2457+ copy_files(data_src_dst, '/mnt')
2458+ # umount block device
2459+ umount('/mnt')
2460+ # Grab user/group ID's from original source
2461+ _dir = os.stat(data_src_dst)
2462+ uid = _dir.st_uid
2463+ gid = _dir.st_gid
2464+ # re-mount where the data should originally be
2465+ # TODO: persist is currently a NO-OP in core.host
2466+ mount(blk_device, data_src_dst, persist=True)
2467+ # ensure original ownership of new mount.
2468+ os.chown(data_src_dst, uid, gid)
2469+
2470+
2471+# TODO: re-use
2472+def modprobe(module):
2473+ ''' Load a kernel module and configure for auto-load on reboot '''
2474+ log('ceph: Loading kernel module', level=INFO)
2475+ cmd = ['modprobe', module]
2476+ check_call(cmd)
2477+ with open('/etc/modules', 'r+') as modules:
2478+ if module not in modules.read():
2479+ modules.write(module)
2480+
2481+
2482+def copy_files(src, dst, symlinks=False, ignore=None):
2483+ ''' Copy files from src to dst '''
2484+ for item in os.listdir(src):
2485+ s = os.path.join(src, item)
2486+ d = os.path.join(dst, item)
2487+ if os.path.isdir(s):
2488+ shutil.copytree(s, d, symlinks, ignore)
2489+ else:
2490+ shutil.copy2(s, d)
2491+
2492+
2493+def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
2494+ blk_device, fstype, system_services=[]):
2495+ """
2496+ NOTE: This function must only be called from a single service unit for
2497+ the same rbd_img otherwise data loss will occur.
2498+
2499+ Ensures given pool and RBD image exists, is mapped to a block device,
2500+ and the device is formatted and mounted at the given mount_point.
2501+
2502+ If formatting a device for the first time, data existing at mount_point
2503+ will be migrated to the RBD device before being re-mounted.
2504+
2505+ All services listed in system_services will be stopped prior to data
2506+ migration and restarted when complete.
2507+ """
2508+ # Ensure pool, RBD image, RBD mappings are in place.
2509+ if not pool_exists(service, pool):
2510+ log('ceph: Creating new pool {}.'.format(pool))
2511+ create_pool(service, pool)
2512+
2513+ if not rbd_exists(service, pool, rbd_img):
2514+ log('ceph: Creating RBD image ({}).'.format(rbd_img))
2515+ create_rbd_image(service, pool, rbd_img, sizemb)
2516+
2517+ if not image_mapped(rbd_img):
2518+ log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
2519+ map_block_storage(service, pool, rbd_img)
2520+
2521+ # make file system
2522+ # TODO: What happens if for whatever reason this is run again and
2523+ # the data is already in the rbd device and/or is mounted??
2524+ # When it is mounted already, it will fail to make the fs
2525+ # XXX: This is really sketchy! Need to at least add an fstab entry
2526+ # otherwise this hook will blow away existing data if its executed
2527+ # after a reboot.
2528+ if not filesystem_mounted(mount_point):
2529+ make_filesystem(blk_device, fstype)
2530+
2531+ for svc in system_services:
2532+ if service_running(svc):
2533+ log('ceph: Stopping services {} prior to migrating data.'
2534+ .format(svc))
2535+ service_stop(svc)
2536+
2537+ place_data_on_block_device(blk_device, mount_point)
2538+
2539+ for svc in system_services:
2540+ log('ceph: Starting service {} after migrating data.'
2541+ .format(svc))
2542+ service_start(svc)
2543+
2544+
2545+def ensure_ceph_keyring(service, user=None, group=None):
2546+ '''
2547+ Ensures a ceph keyring is created for a named service
2548+ and optionally ensures user and group ownership.
2549+
2550+ Returns False if no ceph key is available in relation state.
2551+ '''
2552+ key = None
2553+ for rid in relation_ids('ceph'):
2554+ for unit in related_units(rid):
2555+ key = relation_get('key', rid=rid, unit=unit)
2556+ if key:
2557+ break
2558+ if not key:
2559+ return False
2560+ create_keyring(service=service, key=key)
2561+ keyring = _keyring_path(service)
2562+ if user and group:
2563+ check_call(['chown', '%s.%s' % (user, group), keyring])
2564+ return True
2565
2566=== added file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
2567--- hooks/charmhelpers/contrib/storage/linux/loopback.py 1970-01-01 00:00:00 +0000
2568+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2013-10-15 01:35:28 +0000
2569@@ -0,0 +1,62 @@
2570+
2571+import os
2572+import re
2573+
2574+from subprocess import (
2575+ check_call,
2576+ check_output,
2577+)
2578+
2579+
2580+##################################################
2581+# loopback device helpers.
2582+##################################################
2583+def loopback_devices():
2584+ '''
2585+ Parse through 'losetup -a' output to determine currently mapped
2586+ loopback devices. Output is expected to look like:
2587+
2588+ /dev/loop0: [0807]:961814 (/tmp/my.img)
2589+
2590+ :returns: dict: a dict mapping {loopback_dev: backing_file}
2591+ '''
2592+ loopbacks = {}
2593+ cmd = ['losetup', '-a']
2594+ devs = [d.strip().split(' ') for d in
2595+ check_output(cmd).splitlines() if d != '']
2596+ for dev, _, f in devs:
2597+ loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
2598+ return loopbacks
2599+
2600+
2601+def create_loopback(file_path):
2602+ '''
2603+ Create a loopback device for a given backing file.
2604+
2605+ :returns: str: Full path to new loopback device (eg, /dev/loop0)
2606+ '''
2607+ file_path = os.path.abspath(file_path)
2608+ check_call(['losetup', '--find', file_path])
2609+ for d, f in loopback_devices().iteritems():
2610+ if f == file_path:
2611+ return d
2612+
2613+
2614+def ensure_loopback_device(path, size):
2615+ '''
2616+ Ensure a loopback device exists for a given backing file path and size.
2617+ If it a loopback device is not mapped to file, a new one will be created.
2618+
2619+ TODO: Confirm size of found loopback device.
2620+
2621+ :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
2622+ '''
2623+ for d, f in loopback_devices().iteritems():
2624+ if f == path:
2625+ return d
2626+
2627+ if not os.path.exists(path):
2628+ cmd = ['truncate', '--size', size, path]
2629+ check_call(cmd)
2630+
2631+ return create_loopback(path)
2632
2633=== added file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
2634--- hooks/charmhelpers/contrib/storage/linux/lvm.py 1970-01-01 00:00:00 +0000
2635+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2013-10-15 01:35:28 +0000
2636@@ -0,0 +1,88 @@
2637+from subprocess import (
2638+ CalledProcessError,
2639+ check_call,
2640+ check_output,
2641+ Popen,
2642+ PIPE,
2643+)
2644+
2645+
2646+##################################################
2647+# LVM helpers.
2648+##################################################
2649+def deactivate_lvm_volume_group(block_device):
2650+ '''
2651+ Deactivate any volume gruop associated with an LVM physical volume.
2652+
2653+ :param block_device: str: Full path to LVM physical volume
2654+ '''
2655+ vg = list_lvm_volume_group(block_device)
2656+ if vg:
2657+ cmd = ['vgchange', '-an', vg]
2658+ check_call(cmd)
2659+
2660+
2661+def is_lvm_physical_volume(block_device):
2662+ '''
2663+ Determine whether a block device is initialized as an LVM PV.
2664+
2665+ :param block_device: str: Full path of block device to inspect.
2666+
2667+ :returns: boolean: True if block device is a PV, False if not.
2668+ '''
2669+ try:
2670+ check_output(['pvdisplay', block_device])
2671+ return True
2672+ except CalledProcessError:
2673+ return False
2674+
2675+
2676+def remove_lvm_physical_volume(block_device):
2677+ '''
2678+ Remove LVM PV signatures from a given block device.
2679+
2680+ :param block_device: str: Full path of block device to scrub.
2681+ '''
2682+ p = Popen(['pvremove', '-ff', block_device],
2683+ stdin=PIPE)
2684+ p.communicate(input='y\n')
2685+
2686+
2687+def list_lvm_volume_group(block_device):
2688+ '''
2689+ List LVM volume group associated with a given block device.
2690+
2691+ Assumes block device is a valid LVM PV.
2692+
2693+ :param block_device: str: Full path of block device to inspect.
2694+
2695+ :returns: str: Name of volume group associated with block device or None
2696+ '''
2697+ vg = None
2698+ pvd = check_output(['pvdisplay', block_device]).splitlines()
2699+ for l in pvd:
2700+ if l.strip().startswith('VG Name'):
2701+ vg = ' '.join(l.split()).split(' ').pop()
2702+ return vg
2703+
2704+
2705+def create_lvm_physical_volume(block_device):
2706+ '''
2707+ Initialize a block device as an LVM physical volume.
2708+
2709+ :param block_device: str: Full path of block device to initialize.
2710+
2711+ '''
2712+ check_call(['pvcreate', block_device])
2713+
2714+
2715+def create_lvm_volume_group(volume_group, block_device):
2716+ '''
2717+ Create an LVM volume group backed by a given block device.
2718+
2719+ Assumes block device has already been initialized as an LVM PV.
2720+
2721+ :param volume_group: str: Name of volume group to create.
2722+ :block_device: str: Full path of PV-initialized block device.
2723+ '''
2724+ check_call(['vgcreate', volume_group, block_device])
2725
2726=== added file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
2727--- hooks/charmhelpers/contrib/storage/linux/utils.py 1970-01-01 00:00:00 +0000
2728+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2013-10-15 01:35:28 +0000
2729@@ -0,0 +1,25 @@
2730+from os import stat
2731+from stat import S_ISBLK
2732+
2733+from subprocess import (
2734+ check_call
2735+)
2736+
2737+
2738+def is_block_device(path):
2739+ '''
2740+ Confirm device at path is a valid block device node.
2741+
2742+ :returns: boolean: True if path is a block device, False if not.
2743+ '''
2744+ return S_ISBLK(stat(path).st_mode)
2745+
2746+
2747+def zap_disk(block_device):
2748+ '''
2749+ Clear a block device of partition table. Relies on sgdisk, which is
2750+ installed as pat of the 'gdisk' package in Ubuntu.
2751+
2752+ :param block_device: str: Full path of block device to clean.
2753+ '''
2754+ check_call(['sgdisk', '--zap-all', block_device])
2755
2756=== added directory 'hooks/charmhelpers/core'
2757=== added file 'hooks/charmhelpers/core/__init__.py'
2758=== added file 'hooks/charmhelpers/core/hookenv.py'
2759--- hooks/charmhelpers/core/hookenv.py 1970-01-01 00:00:00 +0000
2760+++ hooks/charmhelpers/core/hookenv.py 2013-10-15 01:35:28 +0000
2761@@ -0,0 +1,340 @@
2762+"Interactions with the Juju environment"
2763+# Copyright 2013 Canonical Ltd.
2764+#
2765+# Authors:
2766+# Charm Helpers Developers <juju@lists.ubuntu.com>
2767+
2768+import os
2769+import json
2770+import yaml
2771+import subprocess
2772+import UserDict
2773+
2774+CRITICAL = "CRITICAL"
2775+ERROR = "ERROR"
2776+WARNING = "WARNING"
2777+INFO = "INFO"
2778+DEBUG = "DEBUG"
2779+MARKER = object()
2780+
2781+cache = {}
2782+
2783+
2784+def cached(func):
2785+ ''' Cache return values for multiple executions of func + args
2786+
2787+ For example:
2788+
2789+ @cached
2790+ def unit_get(attribute):
2791+ pass
2792+
2793+ unit_get('test')
2794+
2795+ will cache the result of unit_get + 'test' for future calls.
2796+ '''
2797+ def wrapper(*args, **kwargs):
2798+ global cache
2799+ key = str((func, args, kwargs))
2800+ try:
2801+ return cache[key]
2802+ except KeyError:
2803+ res = func(*args, **kwargs)
2804+ cache[key] = res
2805+ return res
2806+ return wrapper
2807+
2808+
2809+def flush(key):
2810+ ''' Flushes any entries from function cache where the
2811+ key is found in the function+args '''
2812+ flush_list = []
2813+ for item in cache:
2814+ if key in item:
2815+ flush_list.append(item)
2816+ for item in flush_list:
2817+ del cache[item]
2818+
2819+
2820+def log(message, level=None):
2821+ "Write a message to the juju log"
2822+ command = ['juju-log']
2823+ if level:
2824+ command += ['-l', level]
2825+ command += [message]
2826+ subprocess.call(command)
2827+
2828+
2829+class Serializable(UserDict.IterableUserDict):
2830+ "Wrapper, an object that can be serialized to yaml or json"
2831+
2832+ def __init__(self, obj):
2833+ # wrap the object
2834+ UserDict.IterableUserDict.__init__(self)
2835+ self.data = obj
2836+
2837+ def __getattr__(self, attr):
2838+ # See if this object has attribute.
2839+ if attr in ("json", "yaml", "data"):
2840+ return self.__dict__[attr]
2841+ # Check for attribute in wrapped object.
2842+ got = getattr(self.data, attr, MARKER)
2843+ if got is not MARKER:
2844+ return got
2845+ # Proxy to the wrapped object via dict interface.
2846+ try:
2847+ return self.data[attr]
2848+ except KeyError:
2849+ raise AttributeError(attr)
2850+
2851+ def __getstate__(self):
2852+ # Pickle as a standard dictionary.
2853+ return self.data
2854+
2855+ def __setstate__(self, state):
2856+ # Unpickle into our wrapper.
2857+ self.data = state
2858+
2859+ def json(self):
2860+ "Serialize the object to json"
2861+ return json.dumps(self.data)
2862+
2863+ def yaml(self):
2864+ "Serialize the object to yaml"
2865+ return yaml.dump(self.data)
2866+
2867+
2868+def execution_environment():
2869+ """A convenient bundling of the current execution context"""
2870+ context = {}
2871+ context['conf'] = config()
2872+ if relation_id():
2873+ context['reltype'] = relation_type()
2874+ context['relid'] = relation_id()
2875+ context['rel'] = relation_get()
2876+ context['unit'] = local_unit()
2877+ context['rels'] = relations()
2878+ context['env'] = os.environ
2879+ return context
2880+
2881+
2882+def in_relation_hook():
2883+ "Determine whether we're running in a relation hook"
2884+ return 'JUJU_RELATION' in os.environ
2885+
2886+
2887+def relation_type():
2888+ "The scope for the current relation hook"
2889+ return os.environ.get('JUJU_RELATION', None)
2890+
2891+
2892+def relation_id():
2893+ "The relation ID for the current relation hook"
2894+ return os.environ.get('JUJU_RELATION_ID', None)
2895+
2896+
2897+def local_unit():
2898+ "Local unit ID"
2899+ return os.environ['JUJU_UNIT_NAME']
2900+
2901+
2902+def remote_unit():
2903+ "The remote unit for the current relation hook"
2904+ return os.environ['JUJU_REMOTE_UNIT']
2905+
2906+
2907+def service_name():
2908+ "The name service group this unit belongs to"
2909+ return local_unit().split('/')[0]
2910+
2911+
2912+@cached
2913+def config(scope=None):
2914+ "Juju charm configuration"
2915+ config_cmd_line = ['config-get']
2916+ if scope is not None:
2917+ config_cmd_line.append(scope)
2918+ config_cmd_line.append('--format=json')
2919+ try:
2920+ return json.loads(subprocess.check_output(config_cmd_line))
2921+ except ValueError:
2922+ return None
2923+
2924+
2925+@cached
2926+def relation_get(attribute=None, unit=None, rid=None):
2927+ _args = ['relation-get', '--format=json']
2928+ if rid:
2929+ _args.append('-r')
2930+ _args.append(rid)
2931+ _args.append(attribute or '-')
2932+ if unit:
2933+ _args.append(unit)
2934+ try:
2935+ return json.loads(subprocess.check_output(_args))
2936+ except ValueError:
2937+ return None
2938+
2939+
2940+def relation_set(relation_id=None, relation_settings={}, **kwargs):
2941+ relation_cmd_line = ['relation-set']
2942+ if relation_id is not None:
2943+ relation_cmd_line.extend(('-r', relation_id))
2944+ for k, v in (relation_settings.items() + kwargs.items()):
2945+ if v is None:
2946+ relation_cmd_line.append('{}='.format(k))
2947+ else:
2948+ relation_cmd_line.append('{}={}'.format(k, v))
2949+ subprocess.check_call(relation_cmd_line)
2950+ # Flush cache of any relation-gets for local unit
2951+ flush(local_unit())
2952+
2953+
2954+@cached
2955+def relation_ids(reltype=None):
2956+ "A list of relation_ids"
2957+ reltype = reltype or relation_type()
2958+ relid_cmd_line = ['relation-ids', '--format=json']
2959+ if reltype is not None:
2960+ relid_cmd_line.append(reltype)
2961+ return json.loads(subprocess.check_output(relid_cmd_line)) or []
2962+ return []
2963+
2964+
2965+@cached
2966+def related_units(relid=None):
2967+ "A list of related units"
2968+ relid = relid or relation_id()
2969+ units_cmd_line = ['relation-list', '--format=json']
2970+ if relid is not None:
2971+ units_cmd_line.extend(('-r', relid))
2972+ return json.loads(subprocess.check_output(units_cmd_line)) or []
2973+
2974+
2975+@cached
2976+def relation_for_unit(unit=None, rid=None):
2977+ "Get the json represenation of a unit's relation"
2978+ unit = unit or remote_unit()
2979+ relation = relation_get(unit=unit, rid=rid)
2980+ for key in relation:
2981+ if key.endswith('-list'):
2982+ relation[key] = relation[key].split()
2983+ relation['__unit__'] = unit
2984+ return relation
2985+
2986+
2987+@cached
2988+def relations_for_id(relid=None):
2989+ "Get relations of a specific relation ID"
2990+ relation_data = []
2991+ relid = relid or relation_ids()
2992+ for unit in related_units(relid):
2993+ unit_data = relation_for_unit(unit, relid)
2994+ unit_data['__relid__'] = relid
2995+ relation_data.append(unit_data)
2996+ return relation_data
2997+
2998+
2999+@cached
3000+def relations_of_type(reltype=None):
3001+ "Get relations of a specific type"
3002+ relation_data = []
3003+ reltype = reltype or relation_type()
3004+ for relid in relation_ids(reltype):
3005+ for relation in relations_for_id(relid):
3006+ relation['__relid__'] = relid
3007+ relation_data.append(relation)
3008+ return relation_data
3009+
3010+
3011+@cached
3012+def relation_types():
3013+ "Get a list of relation types supported by this charm"
3014+ charmdir = os.environ.get('CHARM_DIR', '')
3015+ mdf = open(os.path.join(charmdir, 'metadata.yaml'))
3016+ md = yaml.safe_load(mdf)
3017+ rel_types = []
3018+ for key in ('provides', 'requires', 'peers'):
3019+ section = md.get(key)
3020+ if section:
3021+ rel_types.extend(section.keys())
3022+ mdf.close()
3023+ return rel_types
3024+
3025+
3026+@cached
3027+def relations():
3028+ rels = {}
3029+ for reltype in relation_types():
3030+ relids = {}
3031+ for relid in relation_ids(reltype):
3032+ units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
3033+ for unit in related_units(relid):
3034+ reldata = relation_get(unit=unit, rid=relid)
3035+ units[unit] = reldata
3036+ relids[relid] = units
3037+ rels[reltype] = relids
3038+ return rels
3039+
3040+
3041+def open_port(port, protocol="TCP"):
3042+ "Open a service network port"
3043+ _args = ['open-port']
3044+ _args.append('{}/{}'.format(port, protocol))
3045+ subprocess.check_call(_args)
3046+
3047+
3048+def close_port(port, protocol="TCP"):
3049+ "Close a service network port"
3050+ _args = ['close-port']
3051+ _args.append('{}/{}'.format(port, protocol))
3052+ subprocess.check_call(_args)
3053+
3054+
3055+@cached
3056+def unit_get(attribute):
3057+ _args = ['unit-get', '--format=json', attribute]
3058+ try:
3059+ return json.loads(subprocess.check_output(_args))
3060+ except ValueError:
3061+ return None
3062+
3063+
3064+def unit_private_ip():
3065+ return unit_get('private-address')
3066+
3067+
3068+class UnregisteredHookError(Exception):
3069+ pass
3070+
3071+
3072+class Hooks(object):
3073+ def __init__(self):
3074+ super(Hooks, self).__init__()
3075+ self._hooks = {}
3076+
3077+ def register(self, name, function):
3078+ self._hooks[name] = function
3079+
3080+ def execute(self, args):
3081+ hook_name = os.path.basename(args[0])
3082+ if hook_name in self._hooks:
3083+ self._hooks[hook_name]()
3084+ else:
3085+ raise UnregisteredHookError(hook_name)
3086+
3087+ def hook(self, *hook_names):
3088+ def wrapper(decorated):
3089+ for hook_name in hook_names:
3090+ self.register(hook_name, decorated)
3091+ else:
3092+ self.register(decorated.__name__, decorated)
3093+ if '_' in decorated.__name__:
3094+ self.register(
3095+ decorated.__name__.replace('_', '-'), decorated)
3096+ return decorated
3097+ return wrapper
3098+
3099+
3100+def charm_dir():
3101+ return os.environ.get('CHARM_DIR')
3102
3103=== added file 'hooks/charmhelpers/core/host.py'
3104--- hooks/charmhelpers/core/host.py 1970-01-01 00:00:00 +0000
3105+++ hooks/charmhelpers/core/host.py 2013-10-15 01:35:28 +0000
3106@@ -0,0 +1,241 @@
3107+"""Tools for working with the host system"""
3108+# Copyright 2012 Canonical Ltd.
3109+#
3110+# Authors:
3111+# Nick Moffitt <nick.moffitt@canonical.com>
3112+# Matthew Wedgwood <matthew.wedgwood@canonical.com>
3113+
3114+import os
3115+import pwd
3116+import grp
3117+import random
3118+import string
3119+import subprocess
3120+import hashlib
3121+
3122+from collections import OrderedDict
3123+
3124+from hookenv import log
3125+
3126+
3127+def service_start(service_name):
3128+ return service('start', service_name)
3129+
3130+
3131+def service_stop(service_name):
3132+ return service('stop', service_name)
3133+
3134+
3135+def service_restart(service_name):
3136+ return service('restart', service_name)
3137+
3138+
3139+def service_reload(service_name, restart_on_failure=False):
3140+ service_result = service('reload', service_name)
3141+ if not service_result and restart_on_failure:
3142+ service_result = service('restart', service_name)
3143+ return service_result
3144+
3145+
3146+def service(action, service_name):
3147+ cmd = ['service', service_name, action]
3148+ return subprocess.call(cmd) == 0
3149+
3150+
3151+def service_running(service):
3152+ try:
3153+ output = subprocess.check_output(['service', service, 'status'])
3154+ except subprocess.CalledProcessError:
3155+ return False
3156+ else:
3157+ if ("start/running" in output or "is running" in output):
3158+ return True
3159+ else:
3160+ return False
3161+
3162+
3163+def adduser(username, password=None, shell='/bin/bash', system_user=False):
3164+ """Add a user"""
3165+ try:
3166+ user_info = pwd.getpwnam(username)
3167+ log('user {0} already exists!'.format(username))
3168+ except KeyError:
3169+ log('creating user {0}'.format(username))
3170+ cmd = ['useradd']
3171+ if system_user or password is None:
3172+ cmd.append('--system')
3173+ else:
3174+ cmd.extend([
3175+ '--create-home',
3176+ '--shell', shell,
3177+ '--password', password,
3178+ ])
3179+ cmd.append(username)
3180+ subprocess.check_call(cmd)
3181+ user_info = pwd.getpwnam(username)
3182+ return user_info
3183+
3184+
3185+def add_user_to_group(username, group):
3186+ """Add a user to a group"""
3187+ cmd = [
3188+ 'gpasswd', '-a',
3189+ username,
3190+ group
3191+ ]
3192+ log("Adding user {} to group {}".format(username, group))
3193+ subprocess.check_call(cmd)
3194+
3195+
3196+def rsync(from_path, to_path, flags='-r', options=None):
3197+ """Replicate the contents of a path"""
3198+ options = options or ['--delete', '--executability']
3199+ cmd = ['/usr/bin/rsync', flags]
3200+ cmd.extend(options)
3201+ cmd.append(from_path)
3202+ cmd.append(to_path)
3203+ log(" ".join(cmd))
3204+ return subprocess.check_output(cmd).strip()
3205+
3206+
3207+def symlink(source, destination):
3208+ """Create a symbolic link"""
3209+ log("Symlinking {} as {}".format(source, destination))
3210+ cmd = [
3211+ 'ln',
3212+ '-sf',
3213+ source,
3214+ destination,
3215+ ]
3216+ subprocess.check_call(cmd)
3217+
3218+
3219+def mkdir(path, owner='root', group='root', perms=0555, force=False):
3220+ """Create a directory"""
3221+ log("Making dir {} {}:{} {:o}".format(path, owner, group,
3222+ perms))
3223+ uid = pwd.getpwnam(owner).pw_uid
3224+ gid = grp.getgrnam(group).gr_gid
3225+ realpath = os.path.abspath(path)
3226+ if os.path.exists(realpath):
3227+ if force and not os.path.isdir(realpath):
3228+ log("Removing non-directory file {} prior to mkdir()".format(path))
3229+ os.unlink(realpath)
3230+ else:
3231+ os.makedirs(realpath, perms)
3232+ os.chown(realpath, uid, gid)
3233+
3234+
3235+def write_file(path, content, owner='root', group='root', perms=0444):
3236+ """Create or overwrite a file with the contents of a string"""
3237+ log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
3238+ uid = pwd.getpwnam(owner).pw_uid
3239+ gid = grp.getgrnam(group).gr_gid
3240+ with open(path, 'w') as target:
3241+ os.fchown(target.fileno(), uid, gid)
3242+ os.fchmod(target.fileno(), perms)
3243+ target.write(content)
3244+
3245+
3246+def mount(device, mountpoint, options=None, persist=False):
3247+ '''Mount a filesystem'''
3248+ cmd_args = ['mount']
3249+ if options is not None:
3250+ cmd_args.extend(['-o', options])
3251+ cmd_args.extend([device, mountpoint])
3252+ try:
3253+ subprocess.check_output(cmd_args)
3254+ except subprocess.CalledProcessError, e:
3255+ log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
3256+ return False
3257+ if persist:
3258+ # TODO: update fstab
3259+ pass
3260+ return True
3261+
3262+
3263+def umount(mountpoint, persist=False):
3264+ '''Unmount a filesystem'''
3265+ cmd_args = ['umount', mountpoint]
3266+ try:
3267+ subprocess.check_output(cmd_args)
3268+ except subprocess.CalledProcessError, e:
3269+ log('Error unmounting {}\n{}'.format(mountpoint, e.output))
3270+ return False
3271+ if persist:
3272+ # TODO: update fstab
3273+ pass
3274+ return True
3275+
3276+
3277+def mounts():
3278+ '''List of all mounted volumes as [[mountpoint,device],[...]]'''
3279+ with open('/proc/mounts') as f:
3280+ # [['/mount/point','/dev/path'],[...]]
3281+ system_mounts = [m[1::-1] for m in [l.strip().split()
3282+ for l in f.readlines()]]
3283+ return system_mounts
3284+
3285+
3286+def file_hash(path):
3287+ ''' Generate a md5 hash of the contents of 'path' or None if not found '''
3288+ if os.path.exists(path):
3289+ h = hashlib.md5()
3290+ with open(path, 'r') as source:
3291+ h.update(source.read()) # IGNORE:E1101 - it does have update
3292+ return h.hexdigest()
3293+ else:
3294+ return None
3295+
3296+
3297+def restart_on_change(restart_map):
3298+ ''' Restart services based on configuration files changing
3299+
3300+ This function is used a decorator, for example
3301+
3302+ @restart_on_change({
3303+ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
3304+ })
3305+ def ceph_client_changed():
3306+ ...
3307+
3308+ In this example, the cinder-api and cinder-volume services
3309+ would be restarted if /etc/ceph/ceph.conf is changed by the
3310+ ceph_client_changed function.
3311+ '''
3312+ def wrap(f):
3313+ def wrapped_f(*args):
3314+ checksums = {}
3315+ for path in restart_map:
3316+ checksums[path] = file_hash(path)
3317+ f(*args)
3318+ restarts = []
3319+ for path in restart_map:
3320+ if checksums[path] != file_hash(path):
3321+ restarts += restart_map[path]
3322+ for service_name in list(OrderedDict.fromkeys(restarts)):
3323+ service('restart', service_name)
3324+ return wrapped_f
3325+ return wrap
3326+
3327+
3328+def lsb_release():
3329+ '''Return /etc/lsb-release in a dict'''
3330+ d = {}
3331+ with open('/etc/lsb-release', 'r') as lsb:
3332+ for l in lsb:
3333+ k, v = l.split('=')
3334+ d[k.strip()] = v.strip()
3335+ return d
3336+
3337+
3338+def pwgen(length=None):
3339+ '''Generate a random pasword.'''
3340+ if length is None:
3341+ length = random.choice(range(35, 45))
3342+ alphanumeric_chars = [
3343+ l for l in (string.letters + string.digits)
3344+ if l not in 'l0QD1vAEIOUaeiou']
3345+ random_chars = [
3346+ random.choice(alphanumeric_chars) for _ in range(length)]
3347+ return(''.join(random_chars))
3348
3349=== added directory 'hooks/charmhelpers/fetch'
3350=== added file 'hooks/charmhelpers/fetch/__init__.py'
3351--- hooks/charmhelpers/fetch/__init__.py 1970-01-01 00:00:00 +0000
3352+++ hooks/charmhelpers/fetch/__init__.py 2013-10-15 01:35:28 +0000
3353@@ -0,0 +1,209 @@
3354+import importlib
3355+from yaml import safe_load
3356+from charmhelpers.core.host import (
3357+ lsb_release
3358+)
3359+from urlparse import (
3360+ urlparse,
3361+ urlunparse,
3362+)
3363+import subprocess
3364+from charmhelpers.core.hookenv import (
3365+ config,
3366+ log,
3367+)
3368+import apt_pkg
3369+
3370+CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
3371+deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
3372+"""
3373+PROPOSED_POCKET = """# Proposed
3374+deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
3375+"""
3376+
3377+
3378+def filter_installed_packages(packages):
3379+ """Returns a list of packages that require installation"""
3380+ apt_pkg.init()
3381+ cache = apt_pkg.Cache()
3382+ _pkgs = []
3383+ for package in packages:
3384+ try:
3385+ p = cache[package]
3386+ p.current_ver or _pkgs.append(package)
3387+ except KeyError:
3388+ log('Package {} has no installation candidate.'.format(package),
3389+ level='WARNING')
3390+ _pkgs.append(package)
3391+ return _pkgs
3392+
3393+
3394+def apt_install(packages, options=None, fatal=False):
3395+ """Install one or more packages"""
3396+ options = options or []
3397+ cmd = ['apt-get', '-y']
3398+ cmd.extend(options)
3399+ cmd.append('install')
3400+ if isinstance(packages, basestring):
3401+ cmd.append(packages)
3402+ else:
3403+ cmd.extend(packages)
3404+ log("Installing {} with options: {}".format(packages,
3405+ options))
3406+ if fatal:
3407+ subprocess.check_call(cmd)
3408+ else:
3409+ subprocess.call(cmd)
3410+
3411+
3412+def apt_update(fatal=False):
3413+ """Update local apt cache"""
3414+ cmd = ['apt-get', 'update']
3415+ if fatal:
3416+ subprocess.check_call(cmd)
3417+ else:
3418+ subprocess.call(cmd)
3419+
3420+
3421+def apt_purge(packages, fatal=False):
3422+ """Purge one or more packages"""
3423+ cmd = ['apt-get', '-y', 'purge']
3424+ if isinstance(packages, basestring):
3425+ cmd.append(packages)
3426+ else:
3427+ cmd.extend(packages)
3428+ log("Purging {}".format(packages))
3429+ if fatal:
3430+ subprocess.check_call(cmd)
3431+ else:
3432+ subprocess.call(cmd)
3433+
3434+
3435+def add_source(source, key=None):
3436+ if ((source.startswith('ppa:') or
3437+ source.startswith('http:'))):
3438+ subprocess.check_call(['add-apt-repository', '--yes', source])
3439+ elif source.startswith('cloud:'):
3440+ apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
3441+ fatal=True)
3442+ pocket = source.split(':')[-1]
3443+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
3444+ apt.write(CLOUD_ARCHIVE.format(pocket))
3445+ elif source == 'proposed':
3446+ release = lsb_release()['DISTRIB_CODENAME']
3447+ with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
3448+ apt.write(PROPOSED_POCKET.format(release))
3449+ if key:
3450+ subprocess.check_call(['apt-key', 'import', key])
3451+
3452+
3453+class SourceConfigError(Exception):
3454+ pass
3455+
3456+
3457+def configure_sources(update=False,
3458+ sources_var='install_sources',
3459+ keys_var='install_keys'):
3460+ """
3461+ Configure multiple sources from charm configuration
3462+
3463+ Example config:
3464+ install_sources:
3465+ - "ppa:foo"
3466+ - "http://example.com/repo precise main"
3467+ install_keys:
3468+ - null
3469+ - "a1b2c3d4"
3470+
3471+ Note that 'null' (a.k.a. None) should not be quoted.
3472+ """
3473+ sources = safe_load(config(sources_var))
3474+ keys = safe_load(config(keys_var))
3475+ if isinstance(sources, basestring) and isinstance(keys, basestring):
3476+ add_source(sources, keys)
3477+ else:
3478+ if not len(sources) == len(keys):
3479+ msg = 'Install sources and keys lists are different lengths'
3480+ raise SourceConfigError(msg)
3481+ for src_num in range(len(sources)):
3482+ add_source(sources[src_num], keys[src_num])
3483+ if update:
3484+ apt_update(fatal=True)
3485+
3486+# The order of this list is very important. Handlers should be listed in from
3487+# least- to most-specific URL matching.
3488+FETCH_HANDLERS = (
3489+ 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
3490+ 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
3491+)
3492+
3493+
3494+class UnhandledSource(Exception):
3495+ pass
3496+
3497+
3498+def install_remote(source):
3499+ """
3500+ Install a file tree from a remote source
3501+
3502+ The specified source should be a url of the form:
3503+ scheme://[host]/path[#[option=value][&...]]
3504+
3505+ Schemes supported are based on this modules submodules
3506+ Options supported are submodule-specific"""
3507+ # We ONLY check for True here because can_handle may return a string
3508+ # explaining why it can't handle a given source.
3509+ handlers = [h for h in plugins() if h.can_handle(source) is True]
3510+ installed_to = None
3511+ for handler in handlers:
3512+ try:
3513+ installed_to = handler.install(source)
3514+ except UnhandledSource:
3515+ pass
3516+ if not installed_to:
3517+ raise UnhandledSource("No handler found for source {}".format(source))
3518+ return installed_to
3519+
3520+
3521+def install_from_config(config_var_name):
3522+ charm_config = config()
3523+ source = charm_config[config_var_name]
3524+ return install_remote(source)
3525+
3526+
3527+class BaseFetchHandler(object):
3528+ """Base class for FetchHandler implementations in fetch plugins"""
3529+ def can_handle(self, source):
3530+ """Returns True if the source can be handled. Otherwise returns
3531+ a string explaining why it cannot"""
3532+ return "Wrong source type"
3533+
3534+ def install(self, source):
3535+ """Try to download and unpack the source. Return the path to the
3536+ unpacked files or raise UnhandledSource."""
3537+ raise UnhandledSource("Wrong source type {}".format(source))
3538+
3539+ def parse_url(self, url):
3540+ return urlparse(url)
3541+
3542+ def base_url(self, url):
3543+ """Return url without querystring or fragment"""
3544+ parts = list(self.parse_url(url))
3545+ parts[4:] = ['' for i in parts[4:]]
3546+ return urlunparse(parts)
3547+
3548+
3549+def plugins(fetch_handlers=None):
3550+ if not fetch_handlers:
3551+ fetch_handlers = FETCH_HANDLERS
3552+ plugin_list = []
3553+ for handler_name in fetch_handlers:
3554+ package, classname = handler_name.rsplit('.', 1)
3555+ try:
3556+ handler_class = getattr(importlib.import_module(package), classname)
3557+ plugin_list.append(handler_class())
3558+ except (ImportError, AttributeError):
3559+ # Skip missing plugins so that they can be ommitted from
3560+ # installation if desired
3561+ log("FetchHandler {} not found, skipping plugin".format(handler_name))
3562+ return plugin_list
3563
3564=== added file 'hooks/charmhelpers/fetch/archiveurl.py'
3565--- hooks/charmhelpers/fetch/archiveurl.py 1970-01-01 00:00:00 +0000
3566+++ hooks/charmhelpers/fetch/archiveurl.py 2013-10-15 01:35:28 +0000
3567@@ -0,0 +1,48 @@
3568+import os
3569+import urllib2
3570+from charmhelpers.fetch import (
3571+ BaseFetchHandler,
3572+ UnhandledSource
3573+)
3574+from charmhelpers.payload.archive import (
3575+ get_archive_handler,
3576+ extract,
3577+)
3578+from charmhelpers.core.host import mkdir
3579+
3580+
3581+class ArchiveUrlFetchHandler(BaseFetchHandler):
3582+ """Handler for archives via generic URLs"""
3583+ def can_handle(self, source):
3584+ url_parts = self.parse_url(source)
3585+ if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
3586+ return "Wrong source type"
3587+ if get_archive_handler(self.base_url(source)):
3588+ return True
3589+ return False
3590+
3591+ def download(self, source, dest):
3592+ # propogate all exceptions
3593+ # URLError, OSError, etc
3594+ response = urllib2.urlopen(source)
3595+ try:
3596+ with open(dest, 'w') as dest_file:
3597+ dest_file.write(response.read())
3598+ except Exception as e:
3599+ if os.path.isfile(dest):
3600+ os.unlink(dest)
3601+ raise e
3602+
3603+ def install(self, source):
3604+ url_parts = self.parse_url(source)
3605+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
3606+ if not os.path.exists(dest_dir):
3607+ mkdir(dest_dir, perms=0755)
3608+ dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
3609+ try:
3610+ self.download(source, dld_file)
3611+ except urllib2.URLError as e:
3612+ raise UnhandledSource(e.reason)
3613+ except OSError as e:
3614+ raise UnhandledSource(e.strerror)
3615+ return extract(dld_file)
3616
3617=== added file 'hooks/charmhelpers/fetch/bzrurl.py'
3618--- hooks/charmhelpers/fetch/bzrurl.py 1970-01-01 00:00:00 +0000
3619+++ hooks/charmhelpers/fetch/bzrurl.py 2013-10-15 01:35:28 +0000
3620@@ -0,0 +1,49 @@
3621+import os
3622+from charmhelpers.fetch import (
3623+ BaseFetchHandler,
3624+ UnhandledSource
3625+)
3626+from charmhelpers.core.host import mkdir
3627+
3628+try:
3629+ from bzrlib.branch import Branch
3630+except ImportError:
3631+ from charmhelpers.fetch import apt_install
3632+ apt_install("python-bzrlib")
3633+ from bzrlib.branch import Branch
3634+
3635+class BzrUrlFetchHandler(BaseFetchHandler):
3636+ """Handler for bazaar branches via generic and lp URLs"""
3637+ def can_handle(self, source):
3638+ url_parts = self.parse_url(source)
3639+ if url_parts.scheme not in ('bzr+ssh', 'lp'):
3640+ return False
3641+ else:
3642+ return True
3643+
3644+ def branch(self, source, dest):
3645+ url_parts = self.parse_url(source)
3646+ # If we use lp:branchname scheme we need to load plugins
3647+ if not self.can_handle(source):
3648+ raise UnhandledSource("Cannot handle {}".format(source))
3649+ if url_parts.scheme == "lp":
3650+ from bzrlib.plugin import load_plugins
3651+ load_plugins()
3652+ try:
3653+ remote_branch = Branch.open(source)
3654+ remote_branch.bzrdir.sprout(dest).open_branch()
3655+ except Exception as e:
3656+ raise e
3657+
3658+ def install(self, source):
3659+ url_parts = self.parse_url(source)
3660+ branch_name = url_parts.path.strip("/").split("/")[-1]
3661+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
3662+ if not os.path.exists(dest_dir):
3663+ mkdir(dest_dir, perms=0755)
3664+ try:
3665+ self.branch(source, dest_dir)
3666+ except OSError as e:
3667+ raise UnhandledSource(e.strerror)
3668+ return dest_dir
3669+
3670
3671=== added directory 'hooks/charmhelpers/payload'
3672=== added file 'hooks/charmhelpers/payload/__init__.py'
3673--- hooks/charmhelpers/payload/__init__.py 1970-01-01 00:00:00 +0000
3674+++ hooks/charmhelpers/payload/__init__.py 2013-10-15 01:35:28 +0000
3675@@ -0,0 +1,1 @@
3676+"Tools for working with files injected into a charm just before deployment."
3677
3678=== added file 'hooks/charmhelpers/payload/execd.py'
3679--- hooks/charmhelpers/payload/execd.py 1970-01-01 00:00:00 +0000
3680+++ hooks/charmhelpers/payload/execd.py 2013-10-15 01:35:28 +0000
3681@@ -0,0 +1,50 @@
3682+#!/usr/bin/env python
3683+
3684+import os
3685+import sys
3686+import subprocess
3687+from charmhelpers.core import hookenv
3688+
3689+
3690+def default_execd_dir():
3691+ return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
3692+
3693+
3694+def execd_module_paths(execd_dir=None):
3695+ """Generate a list of full paths to modules within execd_dir."""
3696+ if not execd_dir:
3697+ execd_dir = default_execd_dir()
3698+
3699+ if not os.path.exists(execd_dir):
3700+ return
3701+
3702+ for subpath in os.listdir(execd_dir):
3703+ module = os.path.join(execd_dir, subpath)
3704+ if os.path.isdir(module):
3705+ yield module
3706+
3707+
3708+def execd_submodule_paths(command, execd_dir=None):
3709+ """Generate a list of full paths to the specified command within exec_dir.
3710+ """
3711+ for module_path in execd_module_paths(execd_dir):
3712+ path = os.path.join(module_path, command)
3713+ if os.access(path, os.X_OK) and os.path.isfile(path):
3714+ yield path
3715+
3716+
3717+def execd_run(command, execd_dir=None, die_on_error=False, stderr=None):
3718+ """Run command for each module within execd_dir which defines it."""
3719+ for submodule_path in execd_submodule_paths(command, execd_dir):
3720+ try:
3721+ subprocess.check_call(submodule_path, shell=True, stderr=stderr)
3722+ except subprocess.CalledProcessError as e:
3723+ hookenv.log("Error ({}) running {}. Output: {}".format(
3724+ e.returncode, e.cmd, e.output))
3725+ if die_on_error:
3726+ sys.exit(e.returncode)
3727+
3728+
3729+def execd_preinstall(execd_dir=None):
3730+ """Run charm-pre-install for each module within execd_dir."""
3731+ execd_run('charm-pre-install', execd_dir=execd_dir)
3732
3733=== modified symlink 'hooks/cloud-compute-relation-changed'
3734=== target changed u'nova-compute-relations' => u'nova_compute_hooks.py'
3735=== modified symlink 'hooks/cloud-compute-relation-joined'
3736=== target changed u'nova-compute-relations' => u'nova_compute_hooks.py'
3737=== modified symlink 'hooks/config-changed'
3738=== target changed u'nova-compute-relations' => u'nova_compute_hooks.py'
3739=== added symlink 'hooks/image-service-relation-broken'
3740=== target is u'nova_compute_hooks.py'
3741=== modified symlink 'hooks/image-service-relation-changed'
3742=== target changed u'nova-compute-relations' => u'nova_compute_hooks.py'
3743=== removed symlink 'hooks/image-service-relation-joined'
3744=== target was u'nova-compute-relations'
3745=== added symlink 'hooks/install'
3746=== target is u'nova_compute_hooks.py'
3747=== removed symlink 'hooks/install'
3748=== target was u'nova-compute-relations'
3749=== removed directory 'hooks/lib'
3750=== removed directory 'hooks/lib/nova'
3751=== removed file 'hooks/lib/nova/essex'
3752--- hooks/lib/nova/essex 2013-04-04 23:34:30 +0000
3753+++ hooks/lib/nova/essex 1970-01-01 00:00:00 +0000
3754@@ -1,43 +0,0 @@
3755-#!/bin/bash -e
3756-
3757-# Essex-specific functions
3758-
3759-nova_set_or_update() {
3760- # Set a config option in nova.conf or api-paste.ini, depending
3761- # Defaults to updating nova.conf
3762- local key=$1
3763- local value=$2
3764- local conf_file=$3
3765- local pattern=""
3766-
3767- local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
3768- local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
3769- local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
3770- [[ -z $key ]] && juju-log "$CHARM set_or_update: value $value missing key" && exit 1
3771- [[ -z $value ]] && juju-log "$CHARM set_or_update: key $key missing value" && exit 1
3772- [[ -z "$conf_file" ]] && conf_file=$nova_conf
3773-
3774- case "$conf_file" in
3775- "$nova_conf") match="\-\-$key="
3776- pattern="--$key="
3777- out=$pattern
3778- ;;
3779- "$api_conf"|"$libvirtd_conf") match="^$key = "
3780- pattern="$match"
3781- out="$key = "
3782- ;;
3783- *) error_out "ERROR: set_or_update: Invalid conf_file ($conf_file)"
3784- esac
3785-
3786- cat $conf_file | grep "$match$value" >/dev/null &&
3787- juju-log "$CHARM: $key=$value already in set in $conf_file" \
3788- && return 0
3789- if cat $conf_file | grep "$match" >/dev/null ; then
3790- juju-log "$CHARM: Updating $conf_file, $key=$value"
3791- sed -i "s|\($pattern\).*|\1$value|" $conf_file
3792- else
3793- juju-log "$CHARM: Setting new option $key=$value in $conf_file"
3794- echo "$out$value" >>$conf_file
3795- fi
3796- CONFIG_CHANGED=True
3797-}
3798
3799=== removed file 'hooks/lib/nova/folsom'
3800--- hooks/lib/nova/folsom 2013-04-04 23:34:30 +0000
3801+++ hooks/lib/nova/folsom 1970-01-01 00:00:00 +0000
3802@@ -1,135 +0,0 @@
3803-#!/bin/bash -e
3804-
3805-# Folsom-specific functions
3806-
3807-nova_set_or_update() {
3808- # Set a config option in nova.conf or api-paste.ini, depending
3809- # Defaults to updating nova.conf
3810- local key="$1"
3811- local value="$2"
3812- local conf_file="$3"
3813- local section="${4:-DEFAULT}"
3814-
3815- local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
3816- local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
3817- local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf}
3818- local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini}
3819- local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini}
3820- local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
3821-
3822- [[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1
3823- [[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1
3824-
3825- [[ -z "$conf_file" ]] && conf_file=$nova_conf
3826-
3827- local pattern=""
3828- case "$conf_file" in
3829- "$nova_conf") match="^$key="
3830- pattern="$key="
3831- out=$pattern
3832- ;;
3833- "$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \
3834- "$libvirtd_conf")
3835- match="^$key = "
3836- pattern="$match"
3837- out="$key = "
3838- ;;
3839- *) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)"
3840- esac
3841-
3842- cat $conf_file | grep "$match$value" >/dev/null &&
3843- juju-log "$CHARM: $key=$value already in set in $conf_file" \
3844- && return 0
3845-
3846- case $conf_file in
3847- "$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf")
3848- python -c "
3849-import ConfigParser
3850-config = ConfigParser.RawConfigParser()
3851-config.read('$conf_file')
3852-config.set('$section','$key','$value')
3853-with open('$conf_file', 'wb') as configfile:
3854- config.write(configfile)
3855-"
3856- ;;
3857- *)
3858- if cat $conf_file | grep "$match" >/dev/null ; then
3859- juju-log "$CHARM: Updating $conf_file, $key=$value"
3860- sed -i "s|\($pattern\).*|\1$value|" $conf_file
3861- else
3862- juju-log "$CHARM: Setting new option $key=$value in $conf_file"
3863- echo "$out$value" >>$conf_file
3864- fi
3865- ;;
3866- esac
3867- CONFIG_CHANGED="True"
3868-}
3869-
3870-# Upgrade Helpers
3871-nova_pre_upgrade() {
3872- # Pre-upgrade helper. Caller should pass the version of OpenStack we are
3873- # upgrading from.
3874- return 0 # Nothing to do here, yet.
3875-}
3876-
3877-nova_post_upgrade() {
3878- # Post-upgrade helper. Caller should pass the version of OpenStack we are
3879- # upgrading from.
3880- local upgrade_from="$1"
3881- juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> folsom."
3882- # We only support essex -> folsom, currently.
3883- [[ "$upgrade_from" != "essex" ]] &&
3884- error_out "Unsupported upgrade: $upgrade_from -> folsom"
3885-
3886- # This may be dangerous, if we are upgrading a number of units at once
3887- # and they all begin the same migration concurrently. Migrate only from
3888- # the cloud controller(s).
3889- if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
3890- juju-log "$CHARM: Migrating nova database."
3891- /usr/bin/nova-manage db sync
3892-
3893- # Trigger a service restart on all other nova nodes.
3894- trigger_remote_service_restarts
3895- fi
3896-
3897- # Packaging currently takes care of converting the Essex gflags format
3898- # to .ini, but we need to update the api-paste.ini manually. It can be
3899- # updated directly from keystone, via the identity-service relation,
3900- # if it exists. Only services that require keystone credentials will
3901- # have modified api-paste.ini, and only those services will have a .dpkg-dist
3902- # version present.
3903- local r_id=$(relation-ids identity-service)
3904- if [[ -n "$r_id" ]] && [[ -e "$CONF_DIR/api-paste.ini.dpkg-dist" ]] ; then
3905- # Backup the last api config, update the stock packaged version
3906- # with our current Keystone info.
3907- mv $API_CONF $CONF_DIR/api-paste.ini.juju-last
3908- mv $CONF_DIR/api-paste.ini.dpkg-dist $CONF_DIR/api-paste.ini
3909-
3910- unit=$(relation-list -r $r_id | head -n1)
3911- # Note, this should never be called from an relation hook, only config-changed.
3912- export JUJU_REMOTE_UNIT=$unit
3913- service_port=$(relation-get -r $r_id service_port)
3914- auth_port=$(relation-get -r $r_id auth_port)
3915- service_username=$(relation-get -r $r_id service_username)
3916- service_password=$(relation-get -r $r_id service_password)
3917- service_tenant=$(relation-get -r $r_id service_tenant)
3918- keystone_host=$(relation-get -r $r_id private-address)
3919- unset JUJU_REMOTE_UNIT
3920-
3921- juju-log "$CHARM: Updating new api-paste.ini with keystone data from $unit:$r_id"
3922- set_or_update "service_host" "$keystone_host" "$API_CONF"
3923- set_or_update "service_port" "$service_port" "$API_CONF"
3924- set_or_update "auth_host" "$keystone_host" "$API_CONF"
3925- set_or_update "auth_port" "$auth_port" "$API_CONF"
3926- set_or_update "auth_uri" "http://$keystone_host:$service_port/" "$API_CONF"
3927- set_or_update "admin_tenant_name" "$service_tenant" "$API_CONF"
3928- set_or_update "admin_user" "$service_username" "$API_CONF"
3929- set_or_update "admin_password" "$service_password" "$API_CONF"
3930- fi
3931-
3932- # TEMPORARY
3933- # RC3 packaging in cloud archive doesn't have this in postinst. Do it here
3934- sed -e "s,^root_helper=.\+,rootwrap_config=/etc/nova/rootwrap.conf," -i /etc/nova/nova.conf
3935-
3936- juju-log "$CHARM: Post-upgrade hook complete: $upgrade_from -> folsom."
3937-}
3938
3939=== removed file 'hooks/lib/nova/grizzly'
3940--- hooks/lib/nova/grizzly 2013-06-13 18:45:55 +0000
3941+++ hooks/lib/nova/grizzly 1970-01-01 00:00:00 +0000
3942@@ -1,97 +0,0 @@
3943-#!/bin/bash -e
3944-
3945-# Folsom-specific functions
3946-
3947-nova_set_or_update() {
3948- # TODO: This needs to be shared among folsom, grizzly and beyond.
3949- # Set a config option in nova.conf or api-paste.ini, depending
3950- # Defaults to updating nova.conf
3951- local key="$1"
3952- local value="$2"
3953- local conf_file="$3"
3954- local section="${4:-DEFAULT}"
3955-
3956- local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
3957- local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
3958- local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf}
3959- local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini}
3960- local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini}
3961- local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
3962-
3963- [[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1
3964- [[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1
3965-
3966- [[ -z "$conf_file" ]] && conf_file=$nova_conf
3967-
3968- local pattern=""
3969- case "$conf_file" in
3970- "$nova_conf") match="^$key="
3971- pattern="$key="
3972- out=$pattern
3973- ;;
3974- "$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \
3975- "$libvirtd_conf")
3976- match="^$key = "
3977- pattern="$match"
3978- out="$key = "
3979- ;;
3980- *) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)"
3981- esac
3982-
3983- cat $conf_file | grep "$match$value" >/dev/null &&
3984- juju-log "$CHARM: $key=$value already in set in $conf_file" \
3985- && return 0
3986-
3987- case $conf_file in
3988- "$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf")
3989- python -c "
3990-import ConfigParser
3991-config = ConfigParser.RawConfigParser()
3992-config.read('$conf_file')
3993-config.set('$section','$key','$value')
3994-with open('$conf_file', 'wb') as configfile:
3995- config.write(configfile)
3996-"
3997- ;;
3998- *)
3999- if cat $conf_file | grep "$match" >/dev/null ; then
4000- juju-log "$CHARM: Updating $conf_file, $key=$value"
4001- sed -i "s|\($pattern\).*|\1$value|" $conf_file
4002- else
4003- juju-log "$CHARM: Setting new option $key=$value in $conf_file"
4004- echo "$out$value" >>$conf_file
4005- fi
4006- ;;
4007- esac
4008- CONFIG_CHANGED="True"
4009-}
4010-
4011-# Upgrade Helpers
4012-nova_pre_upgrade() {
4013- # Pre-upgrade helper. Caller should pass the version of OpenStack we are
4014- # upgrading from.
4015- return 0 # Nothing to do here, yet.
4016-}
4017-
4018-nova_post_upgrade() {
4019- # Post-upgrade helper. Caller should pass the version of OpenStack we are
4020- # upgrading from.
4021- local upgrade_from="$1"
4022- juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> grizzly."
4023- # We only support folsom -> grizzly, currently.
4024- [[ "$upgrade_from" != "folsom" ]] &&
4025- error_out "Unsupported upgrade: $upgrade_from -> grizzly"
4026-
4027- # This may be dangerous, if we are upgrading a number of units at once
4028- # and they all begin the same migration concurrently. Migrate only from
4029- # the cloud controller(s).
4030- if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
4031- juju-log "$CHARM: Migrating nova database."
4032- /usr/bin/nova-manage db sync
4033-
4034- # Trigger a service restart on all other nova nodes.
4035- trigger_remote_service_restarts
4036- fi
4037-
4038- juju-log "$CHARM: Post-upgrade hook complete: $upgrade_from -> grizzly."
4039-}
4040
4041=== removed file 'hooks/lib/nova/nova-common'
4042--- hooks/lib/nova/nova-common 2013-04-26 13:06:20 +0000
4043+++ hooks/lib/nova/nova-common 1970-01-01 00:00:00 +0000
4044@@ -1,148 +0,0 @@
4045-#!/bin/bash -e
4046-
4047-# Common utility functions used across all nova charms.
4048-
4049-CONFIG_CHANGED=False
4050-HOOKS_DIR="$CHARM_DIR/hooks"
4051-
4052-# Load the common OpenStack helper library.
4053-if [[ -e $HOOKS_DIR/lib/openstack-common ]] ; then
4054- . $HOOKS_DIR/lib/openstack-common
4055-else
4056- juju-log "Couldn't load $HOOKS_DIR/lib/opentack-common." && exit 1
4057-fi
4058-
4059-set_or_update() {
4060- # Update config flags in nova.conf or api-paste.ini.
4061- # Config layout changed in Folsom, so this is now OpenStack release specific.
4062- local rel=$(get_os_codename_package "nova-common")
4063- . $HOOKS_DIR/lib/nova/$rel
4064- nova_set_or_update $@
4065-}
4066-
4067-function set_config_flags() {
4068- # Set user-defined nova.conf flags from deployment config
4069- juju-log "$CHARM: Processing config-flags."
4070- flags=$(config-get config-flags)
4071- if [[ "$flags" != "None" && -n "$flags" ]] ; then
4072- for f in $(echo $flags | sed -e 's/,/ /g') ; do
4073- k=$(echo $f | cut -d= -f1)
4074- v=$(echo $f | cut -d= -f2)
4075- set_or_update "$k" "$v"
4076- done
4077- fi
4078-}
4079-
4080-configure_volume_service() {
4081- local svc="$1"
4082- local cur_vers="$(get_os_codename_package "nova-common")"
4083- case "$svc" in
4084- "cinder")
4085- set_or_update "volume_api_class" "nova.volume.cinder.API" ;;
4086- "nova-volume")
4087- # nova-volume only supported before grizzly.
4088- [[ "$cur_vers" == "essex" ]] || [[ "$cur_vers" == "folsom" ]] &&
4089- set_or_update "volume_api_class" "nova.volume.api.API"
4090- ;;
4091- *) juju-log "$CHARM ERROR - configure_volume_service: Invalid service $svc"
4092- return 1 ;;
4093- esac
4094-}
4095-
4096-function configure_network_manager {
4097- local manager="$1"
4098- echo "$CHARM: configuring $manager network manager"
4099- case $1 in
4100- "FlatManager")
4101- set_or_update "network_manager" "nova.network.manager.FlatManager"
4102- ;;
4103- "FlatDHCPManager")
4104- set_or_update "network_manager" "nova.network.manager.FlatDHCPManager"
4105-
4106- if [[ "$CHARM" == "nova-compute" ]] ; then
4107- local flat_interface=$(config-get flat-interface)
4108- local ec2_host=$(relation-get ec2_host)
4109- set_or_update flat_inteface "$flat_interface"
4110- set_or_update ec2_dmz_host "$ec2_host"
4111-
4112- # Ensure flat_interface has link.
4113- if ip link show $flat_interface >/dev/null 2>&1 ; then
4114- ip link set $flat_interface up
4115- fi
4116-
4117- # work around (LP: #1035172)
4118- if [[ -e /dev/vhost-net ]] ; then
4119- iptables -A POSTROUTING -t mangle -p udp --dport 68 -j CHECKSUM \
4120- --checksum-fill
4121- fi
4122- fi
4123-
4124- ;;
4125- "Quantum")
4126- local local_ip=$(get_ip `unit-get private-address`)
4127- [[ -n $local_ip ]] || {
4128- juju-log "Unable to resolve local IP address"
4129- exit 1
4130- }
4131- set_or_update "network_api_class" "nova.network.quantumv2.api.API"
4132- set_or_update "quantum_auth_strategy" "keystone"
4133- set_or_update "core_plugin" "$QUANTUM_CORE_PLUGIN" "$QUANTUM_CONF"
4134- set_or_update "bind_host" "0.0.0.0" "$QUANTUM_CONF"
4135- if [ "$QUANTUM_PLUGIN" == "ovs" ]; then
4136- set_or_update "tenant_network_type" "gre" $QUANTUM_PLUGIN_CONF "OVS"
4137- set_or_update "enable_tunneling" "True" $QUANTUM_PLUGIN_CONF "OVS"
4138- set_or_update "tunnel_id_ranges" "1:1000" $QUANTUM_PLUGIN_CONF "OVS"
4139- set_or_update "local_ip" "$local_ip" $QUANTUM_PLUGIN_CONF "OVS"
4140- fi
4141- ;;
4142- *) juju-log "ERROR: Invalid network manager $1" && exit 1 ;;
4143- esac
4144-}
4145-
4146-function trigger_remote_service_restarts() {
4147- # Trigger a service restart on all other nova nodes that have a relation
4148- # via the cloud-controller interface.
4149-
4150- # possible relations to other nova services.
4151- local relations="cloud-compute nova-volume-service"
4152-
4153- for rel in $relations; do
4154- local r_ids=$(relation-ids $rel)
4155- for r_id in $r_ids ; do
4156- juju-log "$CHARM: Triggering a service restart on relation $r_id."
4157- relation-set -r $r_id restart-trigger=$(uuid)
4158- done
4159- done
4160-}
4161-
4162-do_openstack_upgrade() {
4163- # update openstack components to those provided by a new installation source
4164- # it is assumed the calling hook has confirmed that the upgrade is sane.
4165- local rel="$1"
4166- shift
4167- local packages=$@
4168-
4169- orig_os_rel=$(get_os_codename_package "nova-common")
4170- new_rel=$(get_os_codename_install_source "$rel")
4171-
4172- # Backup the config directory.
4173- local stamp=$(date +"%Y%m%d%M%S")
4174- tar -pcf /var/lib/juju/$CHARM-backup-$stamp.tar $CONF_DIR
4175-
4176- # load the release helper library for pre/post upgrade hooks specific to the
4177- # release we are upgrading to.
4178- . $HOOKS_DIR/lib/nova/$new_rel
4179-
4180- # new release specific pre-upgrade hook
4181- nova_pre_upgrade "$orig_os_rel"
4182-
4183- # Setup apt repository access and kick off the actual package upgrade.
4184- configure_install_source "$rel"
4185- apt-get update
4186- DEBIAN_FRONTEND=noninteractive apt-get --option Dpkg::Options::=--force-confold -y \
4187- install --no-install-recommends $packages
4188-
4189- # new release sepcific post-upgrade hook
4190- nova_post_upgrade "$orig_os_rel"
4191-
4192-}
4193
4194=== removed file 'hooks/lib/openstack-common'
4195--- hooks/lib/openstack-common 2013-04-26 13:06:20 +0000
4196+++ hooks/lib/openstack-common 1970-01-01 00:00:00 +0000
4197@@ -1,781 +0,0 @@
4198-#!/bin/bash -e
4199-
4200-# Common utility functions used across all OpenStack charms.
4201-
4202-error_out() {
4203- juju-log "$CHARM ERROR: $@"
4204- exit 1
4205-}
4206-
4207-function service_ctl_status {
4208- # Return 0 if a service is running, 1 otherwise.
4209- local svc="$1"
4210- local status=$(service $svc status | cut -d/ -f1 | awk '{ print $2 }')
4211- case $status in
4212- "start") return 0 ;;
4213- "stop") return 1 ;;
4214- *) error_out "Unexpected status of service $svc: $status" ;;
4215- esac
4216-}
4217-
4218-function service_ctl {
4219- # control a specific service, or all (as defined by $SERVICES)
4220- # service restarts will only occur depending on global $CONFIG_CHANGED,
4221- # which should be updated in charm's set_or_update().
4222- local config_changed=${CONFIG_CHANGED:-True}
4223- if [[ $1 == "all" ]] ; then
4224- ctl="$SERVICES"
4225- else
4226- ctl="$1"
4227- fi
4228- action="$2"
4229- if [[ -z "$ctl" ]] || [[ -z "$action" ]] ; then
4230- error_out "ERROR service_ctl: Not enough arguments"
4231- fi
4232-
4233- for i in $ctl ; do
4234- case $action in
4235- "start")
4236- service_ctl_status $i || service $i start ;;
4237- "stop")
4238- service_ctl_status $i && service $i stop || return 0 ;;
4239- "restart")
4240- if [[ "$config_changed" == "True" ]] ; then
4241- service_ctl_status $i && service $i restart || service $i start
4242- fi
4243- ;;
4244- esac
4245- if [[ $? != 0 ]] ; then
4246- juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action"
4247- fi
4248- done
4249- # all configs should have been reloaded on restart of all services, reset
4250- # flag if its being used.
4251- if [[ "$action" == "restart" ]] && [[ -n "$CONFIG_CHANGED" ]] &&
4252- [[ "$ctl" == "all" ]]; then
4253- CONFIG_CHANGED="False"
4254- fi
4255-}
4256-
4257-function configure_install_source {
4258- # Setup and configure installation source based on a config flag.
4259- local src="$1"
4260-
4261- # Default to installing from the main Ubuntu archive.
4262- [[ $src == "distro" ]] || [[ -z "$src" ]] && return 0
4263-
4264- . /etc/lsb-release
4265-
4266- # standard 'ppa:someppa/name' format.
4267- if [[ "${src:0:4}" == "ppa:" ]] ; then
4268- juju-log "$CHARM: Configuring installation from custom src ($src)"
4269- add-apt-repository -y "$src" || error_out "Could not configure PPA access."
4270- return 0
4271- fi
4272-
4273- # standard 'deb http://url/ubuntu main' entries. gpg key ids must
4274- # be appended to the end of url after a |, ie:
4275- # 'deb http://url/ubuntu main|$GPGKEYID'
4276- if [[ "${src:0:3}" == "deb" ]] ; then
4277- juju-log "$CHARM: Configuring installation from custom src URL ($src)"
4278- if echo "$src" | grep -q "|" ; then
4279- # gpg key id tagged to end of url folloed by a |
4280- url=$(echo $src | cut -d'|' -f1)
4281- key=$(echo $src | cut -d'|' -f2)
4282- juju-log "$CHARM: Importing repository key: $key"
4283- apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "$key" || \
4284- juju-log "$CHARM WARN: Could not import key from keyserver: $key"
4285- else
4286- juju-log "$CHARM No repository key specified."
4287- url="$src"
4288- fi
4289- echo "$url" > /etc/apt/sources.list.d/juju_deb.list
4290- return 0
4291- fi
4292-
4293- # Cloud Archive
4294- if [[ "${src:0:6}" == "cloud:" ]] ; then
4295-
4296- # current os releases supported by the UCA.
4297- local cloud_archive_versions="folsom grizzly"
4298-
4299- local ca_rel=$(echo $src | cut -d: -f2)
4300- local u_rel=$(echo $ca_rel | cut -d- -f1)
4301- local os_rel=$(echo $ca_rel | cut -d- -f2 | cut -d/ -f1)
4302-
4303- [[ "$u_rel" != "$DISTRIB_CODENAME" ]] &&
4304- error_out "Cannot install from Cloud Archive pocket $src " \
4305- "on this Ubuntu version ($DISTRIB_CODENAME)!"
4306-
4307- valid_release=""
4308- for rel in $cloud_archive_versions ; do
4309- if [[ "$os_rel" == "$rel" ]] ; then
4310- valid_release=1
4311- juju-log "Installing OpenStack ($os_rel) from the Ubuntu Cloud Archive."
4312- fi
4313- done
4314- if [[ -z "$valid_release" ]] ; then
4315- error_out "OpenStack release ($os_rel) not supported by "\
4316- "the Ubuntu Cloud Archive."
4317- fi
4318-
4319- # CA staging repos are standard PPAs.
4320- if echo $ca_rel | grep -q "staging" ; then
4321- add-apt-repository -y ppa:ubuntu-cloud-archive/${os_rel}-staging
4322- return 0
4323- fi
4324-
4325- # the others are LP-external deb repos.
4326- case "$ca_rel" in
4327- "$u_rel-$os_rel"|"$u_rel-$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
4328- "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
4329- "$u_rel-$os_rel"|"$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
4330- "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
4331- *) error_out "Invalid Cloud Archive repo specified: $src"
4332- esac
4333-
4334- apt-get -y install ubuntu-cloud-keyring
4335- entry="deb http://ubuntu-cloud.archive.canonical.com/ubuntu $pocket main"
4336- echo "$entry" \
4337- >/etc/apt/sources.list.d/ubuntu-cloud-archive-$DISTRIB_CODENAME.list
4338- return 0
4339- fi
4340-
4341- error_out "Invalid installation source specified in config: $src"
4342-
4343-}
4344-
4345-get_os_codename_install_source() {
4346- # derive the openstack release provided by a supported installation source.
4347- local rel="$1"
4348- local codename="unknown"
4349- . /etc/lsb-release
4350-
4351- # map ubuntu releases to the openstack version shipped with it.
4352- if [[ "$rel" == "distro" ]] ; then
4353- case "$DISTRIB_CODENAME" in
4354- "oneiric") codename="diablo" ;;
4355- "precise") codename="essex" ;;
4356- "quantal") codename="folsom" ;;
4357- "raring") codename="grizzly" ;;
4358- esac
4359- fi
4360-
4361- # derive version from cloud archive strings.
4362- if [[ "${rel:0:6}" == "cloud:" ]] ; then
4363- rel=$(echo $rel | cut -d: -f2)
4364- local u_rel=$(echo $rel | cut -d- -f1)
4365- local ca_rel=$(echo $rel | cut -d- -f2)
4366- if [[ "$u_rel" == "$DISTRIB_CODENAME" ]] ; then
4367- case "$ca_rel" in
4368- "folsom"|"folsom/updates"|"folsom/proposed"|"folsom/staging")
4369- codename="folsom" ;;
4370- "grizzly"|"grizzly/updates"|"grizzly/proposed"|"grizzly/staging")
4371- codename="grizzly" ;;
4372- esac
4373- fi
4374- fi
4375-
4376- # have a guess based on the deb string provided
4377- if [[ "${rel:0:3}" == "deb" ]] || \
4378- [[ "${rel:0:3}" == "ppa" ]] ; then
4379- CODENAMES="diablo essex folsom grizzly havana"
4380- for cname in $CODENAMES; do
4381- if echo $rel | grep -q $cname; then
4382- codename=$cname
4383- fi
4384- done
4385- fi
4386- echo $codename
4387-}
4388-
4389-get_os_codename_package() {
4390- local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none"
4391- pkg_vers=$(echo $pkg_vers | cut -d: -f2) # epochs
4392- case "${pkg_vers:0:6}" in
4393- "2011.2") echo "diablo" ;;
4394- "2012.1") echo "essex" ;;
4395- "2012.2") echo "folsom" ;;
4396- "2013.1") echo "grizzly" ;;
4397- "2013.2") echo "havana" ;;
4398- esac
4399-}
4400-
4401-get_os_version_codename() {
4402- case "$1" in
4403- "diablo") echo "2011.2" ;;
4404- "essex") echo "2012.1" ;;
4405- "folsom") echo "2012.2" ;;
4406- "grizzly") echo "2013.1" ;;
4407- "havana") echo "2013.2" ;;
4408- esac
4409-}
4410-
4411-get_ip() {
4412- dpkg -l | grep -q python-dnspython || {
4413- apt-get -y install python-dnspython 2>&1 > /dev/null
4414- }
4415- hostname=$1
4416- python -c "
4417-import dns.resolver
4418-import socket
4419-try:
4420- # Test to see if already an IPv4 address
4421- socket.inet_aton('$hostname')
4422- print '$hostname'
4423-except socket.error:
4424- try:
4425- answers = dns.resolver.query('$hostname', 'A')
4426- if answers:
4427- print answers[0].address
4428- except dns.resolver.NXDOMAIN:
4429- pass
4430-"
4431-}
4432-
4433-# Common storage routines used by cinder, nova-volume and swift-storage.
4434-clean_storage() {
4435- # if configured to overwrite existing storage, we unmount the block-dev
4436- # if mounted and clear any previous pv signatures
4437- local block_dev="$1"
4438- juju-log "Cleaining storage '$block_dev'"
4439- if grep -q "^$block_dev" /proc/mounts ; then
4440- mp=$(grep "^$block_dev" /proc/mounts | awk '{ print $2 }')
4441- juju-log "Unmounting $block_dev from $mp"
4442- umount "$mp" || error_out "ERROR: Could not unmount storage from $mp"
4443- fi
4444- if pvdisplay "$block_dev" >/dev/null 2>&1 ; then
4445- juju-log "Removing existing LVM PV signatures from $block_dev"
4446-
4447- # deactivate any volgroups that may be built on this dev
4448- vg=$(pvdisplay $block_dev | grep "VG Name" | awk '{ print $3 }')
4449- if [[ -n "$vg" ]] ; then
4450- juju-log "Deactivating existing volume group: $vg"
4451- vgchange -an "$vg" ||
4452- error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?"
4453- fi
4454- echo "yes" | pvremove -ff "$block_dev" ||
4455- error_out "Could not pvremove $block_dev"
4456- else
4457- juju-log "Zapping disk of all GPT and MBR structures"
4458- sgdisk --zap-all $block_dev ||
4459- error_out "Unable to zap $block_dev"
4460- fi
4461-}
4462-
4463-function get_block_device() {
4464- # given a string, return full path to the block device for that
4465- # if input is not a block device, find a loopback device
4466- local input="$1"
4467-
4468- case "$input" in
4469- /dev/*) [[ ! -b "$input" ]] && error_out "$input does not exist."
4470- echo "$input"; return 0;;
4471- /*) :;;
4472- *) [[ ! -b "/dev/$input" ]] && error_out "/dev/$input does not exist."
4473- echo "/dev/$input"; return 0;;
4474- esac
4475-
4476- # this represents a file
4477- # support "/path/to/file|5G"
4478- local fpath size oifs="$IFS"
4479- if [ "${input#*|}" != "${input}" ]; then
4480- size=${input##*|}
4481- fpath=${input%|*}
4482- else
4483- fpath=${input}
4484- size=5G
4485- fi
4486-
4487- ## loop devices are not namespaced. This is bad for containers.
4488- ## it means that the output of 'losetup' may have the given $fpath
4489- ## in it, but that may not represent this containers $fpath, but
4490- ## another containers. To address that, we really need to
4491- ## allow some uniq container-id to be expanded within path.
4492- ## TODO: find a unique container-id that will be consistent for
4493- ## this container throughout its lifetime and expand it
4494- ## in the fpath.
4495- # fpath=${fpath//%{id}/$THAT_ID}
4496-
4497- local found=""
4498- # parse through 'losetup -a' output, looking for this file
4499- # output is expected to look like:
4500- # /dev/loop0: [0807]:961814 (/tmp/my.img)
4501- found=$(losetup -a |
4502- awk 'BEGIN { found=0; }
4503- $3 == f { sub(/:$/,"",$1); print $1; found=found+1; }
4504- END { if( found == 0 || found == 1 ) { exit(0); }; exit(1); }' \
4505- f="($fpath)")
4506-
4507- if [ $? -ne 0 ]; then
4508- echo "multiple devices found for $fpath: $found" 1>&2
4509- return 1;
4510- fi
4511-
4512- [ -n "$found" -a -b "$found" ] && { echo "$found"; return 1; }
4513-
4514- if [ -n "$found" ]; then
4515- echo "confused, $found is not a block device for $fpath";
4516- return 1;
4517- fi
4518-
4519- # no existing device was found, create one
4520- mkdir -p "${fpath%/*}"
4521- truncate --size "$size" "$fpath" ||
4522- { echo "failed to create $fpath of size $size"; return 1; }
4523-
4524- found=$(losetup --find --show "$fpath") ||
4525- { echo "failed to setup loop device for $fpath" 1>&2; return 1; }
4526-
4527- echo "$found"
4528- return 0
4529-}
4530-
4531-HAPROXY_CFG=/etc/haproxy/haproxy.cfg
4532-HAPROXY_DEFAULT=/etc/default/haproxy
4533-##########################################################################
4534-# Description: Configures HAProxy services for Openstack API's
4535-# Parameters:
4536-# Space delimited list of service:port:mode combinations for which
4537-# haproxy service configuration should be generated for. The function
4538-# assumes the name of the peer relation is 'cluster' and that every
4539-# service unit in the peer relation is running the same services.
4540-#
4541-# Services that do not specify :mode in parameter will default to http.
4542-#
4543-# Example
4544-# configure_haproxy cinder_api:8776:8756:tcp nova_api:8774:8764:http
4545-##########################################################################
4546-configure_haproxy() {
4547- local address=`unit-get private-address`
4548- local name=${JUJU_UNIT_NAME////-}
4549- cat > $HAPROXY_CFG << EOF
4550-global
4551- log 127.0.0.1 local0
4552- log 127.0.0.1 local1 notice
4553- maxconn 20000
4554- user haproxy
4555- group haproxy
4556- spread-checks 0
4557-
4558-defaults
4559- log global
4560- mode http
4561- option httplog
4562- option dontlognull
4563- retries 3
4564- timeout queue 1000
4565- timeout connect 1000
4566- timeout client 30000
4567- timeout server 30000
4568-
4569-listen stats :8888
4570- mode http
4571- stats enable
4572- stats hide-version
4573- stats realm Haproxy\ Statistics
4574- stats uri /
4575- stats auth admin:password
4576-
4577-EOF
4578- for service in $@; do
4579- local service_name=$(echo $service | cut -d : -f 1)
4580- local haproxy_listen_port=$(echo $service | cut -d : -f 2)
4581- local api_listen_port=$(echo $service | cut -d : -f 3)
4582- local mode=$(echo $service | cut -d : -f 4)
4583- [[ -z "$mode" ]] && mode="http"
4584- juju-log "Adding haproxy configuration entry for $service "\
4585- "($haproxy_listen_port -> $api_listen_port)"
4586- cat >> $HAPROXY_CFG << EOF
4587-listen $service_name 0.0.0.0:$haproxy_listen_port
4588- balance roundrobin
4589- mode $mode
4590- option ${mode}log
4591- server $name $address:$api_listen_port check
4592-EOF
4593- local r_id=""
4594- local unit=""
4595- for r_id in `relation-ids cluster`; do
4596- for unit in `relation-list -r $r_id`; do
4597- local unit_name=${unit////-}
4598- local unit_address=`relation-get -r $r_id private-address $unit`
4599- if [ -n "$unit_address" ]; then
4600- echo " server $unit_name $unit_address:$api_listen_port check" \
4601- >> $HAPROXY_CFG
4602- fi
4603- done
4604- done
4605- done
4606- echo "ENABLED=1" > $HAPROXY_DEFAULT
4607- service haproxy restart
4608-}
4609-
4610-##########################################################################
4611-# Description: Query HA interface to determine is cluster is configured
4612-# Returns: 0 if configured, 1 if not configured
4613-##########################################################################
4614-is_clustered() {
4615- local r_id=""
4616- local unit=""
4617- for r_id in $(relation-ids ha); do
4618- if [ -n "$r_id" ]; then
4619- for unit in $(relation-list -r $r_id); do
4620- clustered=$(relation-get -r $r_id clustered $unit)
4621- if [ -n "$clustered" ]; then
4622- juju-log "Unit is haclustered"
4623- return 0
4624- fi
4625- done
4626- fi
4627- done
4628- juju-log "Unit is not haclustered"
4629- return 1
4630-}
4631-
4632-##########################################################################
4633-# Description: Return a list of all peers in cluster relations
4634-##########################################################################
4635-peer_units() {
4636- local peers=""
4637- local r_id=""
4638- for r_id in $(relation-ids cluster); do
4639- peers="$peers $(relation-list -r $r_id)"
4640- done
4641- echo $peers
4642-}
4643-
4644-##########################################################################
4645-# Description: Determines whether the current unit is the oldest of all
4646-# its peers - supports partial leader election
4647-# Returns: 0 if oldest, 1 if not
4648-##########################################################################
4649-oldest_peer() {
4650- peers=$1
4651- local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2)
4652- for peer in $peers; do
4653- echo "Comparing $JUJU_UNIT_NAME with peers: $peers"
4654- local r_unit_no=$(echo $peer | cut -d / -f 2)
4655- if (($r_unit_no<$l_unit_no)); then
4656- juju-log "Not oldest peer; deferring"
4657- return 1
4658- fi
4659- done
4660- juju-log "Oldest peer; might take charge?"
4661- return 0
4662-}
4663-
4664-##########################################################################
4665-# Description: Determines whether the current service units is the
4666-# leader within a) a cluster of its peers or b) across a
4667-# set of unclustered peers.
4668-# Parameters: CRM resource to check ownership of if clustered
4669-# Returns: 0 if leader, 1 if not
4670-##########################################################################
4671-eligible_leader() {
4672- if is_clustered; then
4673- if ! is_leader $1; then
4674- juju-log 'Deferring action to CRM leader'
4675- return 1
4676- fi
4677- else
4678- peers=$(peer_units)
4679- if [ -n "$peers" ] && ! oldest_peer "$peers"; then
4680- juju-log 'Deferring action to oldest service unit.'
4681- return 1
4682- fi
4683- fi
4684- return 0
4685-}
4686-
4687-##########################################################################
4688-# Description: Query Cluster peer interface to see if peered
4689-# Returns: 0 if peered, 1 if not peered
4690-##########################################################################
4691-is_peered() {
4692- local r_id=$(relation-ids cluster)
4693- if [ -n "$r_id" ]; then
4694- if [ -n "$(relation-list -r $r_id)" ]; then
4695- juju-log "Unit peered"
4696- return 0
4697- fi
4698- fi
4699- juju-log "Unit not peered"
4700- return 1
4701-}
4702-
4703-##########################################################################
4704-# Description: Determines whether host is owner of clustered services
4705-# Parameters: Name of CRM resource to check ownership of
4706-# Returns: 0 if leader, 1 if not leader
4707-##########################################################################
4708-is_leader() {
4709- hostname=`hostname`
4710- if [ -x /usr/sbin/crm ]; then
4711- if crm resource show $1 | grep -q $hostname; then
4712- juju-log "$hostname is cluster leader."
4713- return 0
4714- fi
4715- fi
4716- juju-log "$hostname is not cluster leader."
4717- return 1
4718-}
4719-
4720-##########################################################################
4721-# Description: Determines whether enough data has been provided in
4722-# configuration or relation data to configure HTTPS.
4723-# Parameters: None
4724-# Returns: 0 if HTTPS can be configured, 1 if not.
4725-##########################################################################
4726-https() {
4727- local r_id=""
4728- if [[ -n "$(config-get ssl_cert)" ]] &&
4729- [[ -n "$(config-get ssl_key)" ]] ; then
4730- return 0
4731- fi
4732- for r_id in $(relation-ids identity-service) ; do
4733- for unit in $(relation-list -r $r_id) ; do
4734- if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] &&
4735- [[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] &&
4736- [[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] &&
4737- [[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then
4738- return 0
4739- fi
4740- done
4741- done
4742- return 1
4743-}
4744-
4745-##########################################################################
4746-# Description: For a given number of port mappings, configures apache2
4747-# HTTPs local reverse proxying using certficates and keys provided in
4748-# either configuration data (preferred) or relation data. Assumes ports
4749-# are not in use (calling charm should ensure that).
4750-# Parameters: Variable number of proxy port mappings as
4751-# $internal:$external.
4752-# Returns: 0 if reverse proxy(s) have been configured, 0 if not.
4753-##########################################################################
4754-enable_https() {
4755- local port_maps="$@"
4756- local http_restart=""
4757- juju-log "Enabling HTTPS for port mappings: $port_maps."
4758-
4759- # allow overriding of keystone provided certs with those set manually
4760- # in config.
4761- local cert=$(config-get ssl_cert)
4762- local key=$(config-get ssl_key)
4763- local ca_cert=""
4764- if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then
4765- juju-log "Inspecting identity-service relations for SSL certificate."
4766- local r_id=""
4767- cert=""
4768- key=""
4769- ca_cert=""
4770- for r_id in $(relation-ids identity-service) ; do
4771- for unit in $(relation-list -r $r_id) ; do
4772- [[ -z "$cert" ]] && cert="$(relation-get -r $r_id ssl_cert $unit)"
4773- [[ -z "$key" ]] && key="$(relation-get -r $r_id ssl_key $unit)"
4774- [[ -z "$ca_cert" ]] && ca_cert="$(relation-get -r $r_id ca_cert $unit)"
4775- done
4776- done
4777- [[ -n "$cert" ]] && cert=$(echo $cert | base64 -di)
4778- [[ -n "$key" ]] && key=$(echo $key | base64 -di)
4779- [[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di)
4780- else
4781- juju-log "Using SSL certificate provided in service config."
4782- fi
4783-
4784- [[ -z "$cert" ]] || [[ -z "$key" ]] &&
4785- juju-log "Expected but could not find SSL certificate data, not "\
4786- "configuring HTTPS!" && return 1
4787-
4788- apt-get -y install apache2
4789- a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" &&
4790- http_restart=1
4791-
4792- mkdir -p /etc/apache2/ssl/$CHARM
4793- echo "$cert" >/etc/apache2/ssl/$CHARM/cert
4794- echo "$key" >/etc/apache2/ssl/$CHARM/key
4795- if [[ -n "$ca_cert" ]] ; then
4796- juju-log "Installing Keystone supplied CA cert."
4797- echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
4798- update-ca-certificates --fresh
4799-
4800- # XXX TODO: Find a better way of exporting this?
4801- if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
4802- [[ -e /var/www/keystone_juju_ca_cert.crt ]] &&
4803- rm -rf /var/www/keystone_juju_ca_cert.crt
4804- ln -s /usr/local/share/ca-certificates/keystone_juju_ca_cert.crt \
4805- /var/www/keystone_juju_ca_cert.crt
4806- fi
4807-
4808- fi
4809- for port_map in $port_maps ; do
4810- local ext_port=$(echo $port_map | cut -d: -f1)
4811- local int_port=$(echo $port_map | cut -d: -f2)
4812- juju-log "Creating apache2 reverse proxy vhost for $port_map."
4813- cat >/etc/apache2/sites-available/${CHARM}_${ext_port} <<END
4814-Listen $ext_port
4815-NameVirtualHost *:$ext_port
4816-<VirtualHost *:$ext_port>
4817- ServerName $(unit-get private-address)
4818- SSLEngine on
4819- SSLCertificateFile /etc/apache2/ssl/$CHARM/cert
4820- SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key
4821- ProxyPass / http://localhost:$int_port/
4822- ProxyPassReverse / http://localhost:$int_port/
4823- ProxyPreserveHost on
4824-</VirtualHost>
4825-<Proxy *>
4826- Order deny,allow
4827- Allow from all
4828-</Proxy>
4829-<Location />
4830- Order allow,deny
4831- Allow from all
4832-</Location>
4833-END
4834- a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
4835- http_restart=1
4836- done
4837- if [[ -n "$http_restart" ]] ; then
4838- service apache2 restart
4839- fi
4840-}
4841-
4842-##########################################################################
4843-# Description: Ensure HTTPS reverse proxying is disabled for given port
4844-# mappings.
4845-# Parameters: Variable number of proxy port mappings as
4846-# $internal:$external.
4847-# Returns: 0 if reverse proxy is not active for all portmaps, 1 on error.
4848-##########################################################################
4849-disable_https() {
4850- local port_maps="$@"
4851- local http_restart=""
4852- juju-log "Ensuring HTTPS disabled for $port_maps."
4853- ( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0
4854- for port_map in $port_maps ; do
4855- local ext_port=$(echo $port_map | cut -d: -f1)
4856- local int_port=$(echo $port_map | cut -d: -f2)
4857- if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then
4858- juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map."
4859- a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
4860- http_restart=1
4861- fi
4862- done
4863- if [[ -n "$http_restart" ]] ; then
4864- service apache2 restart
4865- fi
4866-}
4867-
4868-
4869-##########################################################################
4870-# Description: Ensures HTTPS is either enabled or disabled for given port
4871-# mapping.
4872-# Parameters: Variable number of proxy port mappings as
4873-# $internal:$external.
4874-# Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not.
4875-##########################################################################
4876-setup_https() {
4877- # configure https via apache reverse proxying either
4878- # using certs provided by config or keystone.
4879- [[ -z "$CHARM" ]] &&
4880- error_out "setup_https(): CHARM not set."
4881- if ! https ; then
4882- disable_https $@
4883- else
4884- enable_https $@
4885- fi
4886-}
4887-
4888-##########################################################################
4889-# Description: Determine correct API server listening port based on
4890-# existence of HTTPS reverse proxy and/or haproxy.
4891-# Paremeters: The standard public port for given service.
4892-# Returns: The correct listening port for API service.
4893-##########################################################################
4894-determine_api_port() {
4895- local public_port="$1"
4896- local i=0
4897- ( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1]
4898- https >/dev/null 2>&1 && i=$[$i + 1]
4899- echo $[$public_port - $[$i * 10]]
4900-}
4901-
4902-##########################################################################
4903-# Description: Determine correct proxy listening port based on public IP +
4904-# existence of HTTPS reverse proxy.
4905-# Paremeters: The standard public port for given service.
4906-# Returns: The correct listening port for haproxy service public address.
4907-##########################################################################
4908-determine_haproxy_port() {
4909- local public_port="$1"
4910- local i=0
4911- https >/dev/null 2>&1 && i=$[$i + 1]
4912- echo $[$public_port - $[$i * 10]]
4913-}
4914-
4915-##########################################################################
4916-# Description: Print the value for a given config option in an OpenStack
4917-# .ini style configuration file.
4918-# Parameters: File path, option to retrieve, optional
4919-# section name (default=DEFAULT)
4920-# Returns: Prints value if set, prints nothing otherwise.
4921-##########################################################################
4922-local_config_get() {
4923- # return config values set in openstack .ini config files.
4924- # default placeholders starting (eg, %AUTH_HOST%) treated as
4925- # unset values.
4926- local file="$1"
4927- local option="$2"
4928- local section="$3"
4929- [[ -z "$section" ]] && section="DEFAULT"
4930- python -c "
4931-import ConfigParser
4932-config = ConfigParser.RawConfigParser()
4933-config.read('$file')
4934-try:
4935- value = config.get('$section', '$option')
4936-except:
4937- print ''
4938- exit(0)
4939-if value.startswith('%'): exit(0)
4940-print value
4941-"
4942-}
4943-
4944-##########################################################################
4945-# Description: Creates an rc file exporting environment variables to a
4946-# script_path local to the charm's installed directory.
4947-# Any charm scripts run outside the juju hook environment can source this
4948-# scriptrc to obtain updated config information necessary to perform health
4949-# checks or service changes
4950-#
4951-# Parameters:
4952-# An array of '=' delimited ENV_VAR:value combinations to export.
4953-# If optional script_path key is not provided in the array, script_path
4954-# defaults to scripts/scriptrc
4955-##########################################################################
4956-function save_script_rc {
4957- if [ ! -n "$JUJU_UNIT_NAME" ]; then
4958- echo "Error: Missing JUJU_UNIT_NAME environment variable"
4959- exit 1
4960- fi
4961- # our default unit_path
4962- unit_path="$CHARM_DIR/scripts/scriptrc"
4963- echo $unit_path
4964- tmp_rc="/tmp/${JUJU_UNIT_NAME/\//-}rc"
4965-
4966- echo "#!/bin/bash" > $tmp_rc
4967- for env_var in "${@}"
4968- do
4969- if `echo $env_var | grep -q script_path`; then
4970- # well then we need to reset the new unit-local script path
4971- unit_path="$CHARM_DIR/${env_var/script_path=/}"
4972- else
4973- echo "export $env_var" >> $tmp_rc
4974- fi
4975- done
4976- chmod 755 $tmp_rc
4977- mv $tmp_rc $unit_path
4978-}
4979
4980=== removed file 'hooks/nova-compute-common'
4981--- hooks/nova-compute-common 2013-06-04 14:06:37 +0000
4982+++ hooks/nova-compute-common 1970-01-01 00:00:00 +0000
4983@@ -1,309 +0,0 @@
4984-#!/bin/bash -e
4985-
4986-CHARM="nova-compute"
4987-PACKAGES="nova-compute python-keystone genisoimage"
4988-SERVICES="nova-compute"
4989-CONF_DIR="/etc/nova"
4990-NOVA_CONF=$(config-get nova-config)
4991-API_CONF="/etc/nova/api-paste.ini"
4992-QUANTUM_CONF="/etc/quantum/quantum.conf"
4993-LIBVIRTD_CONF="/etc/libvirt/libvirtd.conf"
4994-HOOKS_DIR="$CHARM_DIR/hooks"
4995-MULTI_HOST=$(config-get multi-host)
4996-
4997-if [ -f /etc/nova/nm.conf ]; then
4998- NET_MANAGER=$(cat /etc/nova/nm.conf)
4999-fi
5000-case $NET_MANAGER in
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches