Merge lp:~james-page/charms/trusty/nova-cloud-controller/ram-allocation-ratio into lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/trunk

Proposed by James Page
Status: Superseded
Proposed branch: lp:~james-page/charms/trusty/nova-cloud-controller/ram-allocation-ratio
Merge into: lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/trunk
Diff against target: 5890 lines (+4147/-361) (has conflicts)
49 files modified
.bzrignore (+2/-0)
Makefile (+24/-1)
README.txt (+10/-0)
charm-helpers-hooks.yaml (+12/-0)
charm-helpers-tests.yaml (+5/-0)
config.yaml (+100/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+3/-2)
hooks/charmhelpers/contrib/network/ip.py (+174/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+61/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+275/-0)
hooks/charmhelpers/contrib/openstack/context.py (+121/-25)
hooks/charmhelpers/contrib/openstack/ip.py (+79/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+9/-4)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/openstack/utils.py (+18/-5)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+83/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/core/fstab.py (+116/-0)
hooks/charmhelpers/core/hookenv.py (+7/-5)
hooks/charmhelpers/core/host.py (+47/-8)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+310/-0)
hooks/charmhelpers/core/services/helpers.py (+125/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+97/-28)
hooks/nova_cc_context.py (+51/-2)
hooks/nova_cc_hooks.py (+286/-61)
hooks/nova_cc_utils.py (+369/-163)
metadata.yaml (+2/-0)
revision (+1/-1)
templates/havana/nova.conf (+11/-2)
templates/icehouse/neutron.conf (+5/-0)
templates/icehouse/nova.conf (+18/-2)
tests/00-setup (+10/-0)
tests/10-basic-precise-essex (+10/-0)
tests/11-basic-precise-folsom (+18/-0)
tests/12-basic-precise-grizzly (+12/-0)
tests/13-basic-precise-havana (+12/-0)
tests/14-basic-precise-icehouse (+12/-0)
tests/15-basic-trusty-icehouse (+10/-0)
tests/README (+47/-0)
tests/basic_deployment.py (+520/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+71/-0)
tests/charmhelpers/contrib/amulet/utils.py (+176/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+61/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+275/-0)
unit_tests/test_nova_cc_hooks.py (+262/-13)
unit_tests/test_nova_cc_utils.py (+140/-15)
Conflict adding file .bzrignore.  Moved existing file to .bzrignore.moved.
Text conflict in Makefile
Contents conflict in charm-helpers.yaml
Text conflict in config.yaml
Text conflict in hooks/charmhelpers/contrib/openstack/context.py
Text conflict in hooks/charmhelpers/contrib/openstack/utils.py
Conflict adding file hooks/charmhelpers/core/fstab.py.  Moved existing file to hooks/charmhelpers/core/fstab.py.moved.
Text conflict in hooks/charmhelpers/core/host.py
Text conflict in hooks/charmhelpers/fetch/__init__.py
Text conflict in hooks/nova_cc_hooks.py
Text conflict in hooks/nova_cc_utils.py
Text conflict in templates/havana/nova.conf
Text conflict in templates/icehouse/neutron.conf
Text conflict in templates/icehouse/nova.conf
To merge this branch: bzr merge lp:~james-page/charms/trusty/nova-cloud-controller/ram-allocation-ratio
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+234781@code.launchpad.net
To post a comment you must log in.

Unmerged revisions

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file '.bzrignore'
2--- .bzrignore 1970-01-01 00:00:00 +0000
3+++ .bzrignore 2014-09-16 09:08:32 +0000
4@@ -0,0 +1,2 @@
5+bin
6+.coverage
7
8=== renamed file '.bzrignore' => '.bzrignore.moved'
9=== modified file 'Makefile'
10--- Makefile 2014-09-09 23:43:43 +0000
11+++ Makefile 2014-09-16 09:08:32 +0000
12@@ -2,9 +2,10 @@
13 PYTHON := /usr/bin/env python
14
15 lint:
16- @flake8 --exclude hooks/charmhelpers hooks unit_tests
17+ @flake8 --exclude hooks/charmhelpers hooks unit_tests tests
18 @charm proof
19
20+<<<<<<< TREE
21 test: .venv
22 @echo Starting tests...
23 .venv/bin/nosetests --nologcapture --with-coverage unit_tests
24@@ -18,6 +19,28 @@
25 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
26
27 publish: lint test
28+=======
29+unit_test:
30+ @echo Starting unit tests...
31+ @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
32+
33+bin/charm_helpers_sync.py:
34+ @mkdir -p bin
35+ @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
36+ > bin/charm_helpers_sync.py
37+test:
38+ @echo Starting Amulet tests...
39+ # coreycb note: The -v should only be temporary until Amulet sends
40+ # raise_status() messages to stderr:
41+ # https://bugs.launchpad.net/amulet/+bug/1320357
42+ @juju test -v -p AMULET_HTTP_PROXY
43+
44+sync: bin/charm_helpers_sync.py
45+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
46+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
47+
48+publish: lint unit_test
49+>>>>>>> MERGE-SOURCE
50 bzr push lp:charms/nova-cloud-controller
51 bzr push lp:charms/trusty/nova-cloud-controller
52
53
54=== modified file 'README.txt'
55--- README.txt 2014-03-25 09:11:04 +0000
56+++ README.txt 2014-09-16 09:08:32 +0000
57@@ -4,6 +4,16 @@
58
59 Cloud controller node for Openstack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore.
60
61+The neutron-api interface can be used join this charm with an external neutron-api server. If this is done
62+then this charm will shutdown its neutron-api service and the external charm will be registered as the
63+neutron-api endpoint in keystone. It will also use the quantum-security-groups setting which is passed to
64+it by the api service rather than its own quantum-security-groups setting.
65+
66+If console access is required then console-proxy-ip should be set to a client accessible IP that resolves
67+to the nova-cloud-controller. If running in HA mode then the public vip is used if console-proxy-ip is set
68+to local. Note: The console access protocol is baked into a guest when it is created, if you change it then
69+console access for existing guests will stop working
70+
71 ******************************************************
72 Special considerations to be deployed using Postgresql
73 ******************************************************
74
75=== added file 'charm-helpers-hooks.yaml'
76--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
77+++ charm-helpers-hooks.yaml 2014-09-16 09:08:32 +0000
78@@ -0,0 +1,12 @@
79+branch: lp:charm-helpers
80+destination: hooks/charmhelpers
81+include:
82+ - core
83+ - fetch
84+ - contrib.openstack|inc=*
85+ - contrib.storage
86+ - contrib.peerstorage
87+ - contrib.hahelpers:
88+ - apache
89+ - payload.execd
90+ - contrib.network.ip
91
92=== added file 'charm-helpers-tests.yaml'
93--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
94+++ charm-helpers-tests.yaml 2014-09-16 09:08:32 +0000
95@@ -0,0 +1,5 @@
96+branch: lp:charm-helpers
97+destination: tests/charmhelpers
98+include:
99+ - contrib.amulet
100+ - contrib.openstack.amulet
101
102=== renamed file 'charm-helpers.yaml' => 'charm-helpers.yaml.THIS'
103=== modified file 'config.yaml'
104--- config.yaml 2014-09-09 23:43:43 +0000
105+++ config.yaml 2014-09-16 09:08:32 +0000
106@@ -97,6 +97,7 @@
107 # HA configuration settings
108 vip:
109 type: string
110+<<<<<<< TREE
111 default:
112 description: "Virtual IP to use to front API services in ha configuration"
113 vip_iface:
114@@ -107,6 +108,13 @@
115 type: int
116 default: 24
117 description: "Netmask that will be used for the Virtual IP"
118+=======
119+ description: |
120+ Virtual IP(s) to use to front API services in HA configuration.
121+ .
122+ If multiple networks are being used, a VIP should be provided for each
123+ network, separated by spaces.
124+>>>>>>> MERGE-SOURCE
125 ha-bindiface:
126 type: string
127 default: eth0
128@@ -145,8 +153,12 @@
129 # Neutron NVP and VMware NSX plugin configuration
130 nvp-controllers:
131 type: string
132+<<<<<<< TREE
133 default:
134 description: Space delimited addresses of NVP/NSX controllers
135+=======
136+ description: Space delimited addresses of NVP/NSX controllers
137+>>>>>>> MERGE-SOURCE
138 nvp-username:
139 type: string
140 default: admin
141@@ -168,6 +180,7 @@
142 in NVP before starting Quantum with the nvp plugin.
143 nvp-l3-uuid:
144 type: string
145+<<<<<<< TREE
146 default:
147 description: |
148 This is uuid of the default NVP/NSX L3 Gateway Service.
149@@ -191,3 +204,90 @@
150 * shared-db or (pgsql-nova-db, pgsql-neutron-db)
151 * amqp
152 * identity-service
153+=======
154+ description: |
155+ This is uuid of the default NVP/NSX L3 Gateway Service.
156+ # end of NVP/NSX configuration
157+ # Network configuration options
158+ # by default all access is over 'private-address'
159+ os-admin-network:
160+ type: string
161+ description: |
162+ The IP address and netmask of the OpenStack Admin network (e.g.,
163+ 192.168.0.0/24)
164+ .
165+ This network will be used for admin endpoints.
166+ os-internal-network:
167+ type: string
168+ description: |
169+ The IP address and netmask of the OpenStack Internal network (e.g.,
170+ 192.168.0.0/24)
171+ .
172+ This network will be used for internal endpoints.
173+ os-public-network:
174+ type: string
175+ description: |
176+ The IP address and netmask of the OpenStack Public network (e.g.,
177+ 192.168.0.0/24)
178+ .
179+ This network will be used for public endpoints.
180+ service-guard:
181+ type: boolean
182+ default: false
183+ description: |
184+ Ensure required relations are made and complete before allowing services
185+ to be started
186+ .
187+ By default, services may be up and accepting API request from install
188+ onwards.
189+ .
190+ Enabling this flag ensures that services will not be started until the
191+ minimum 'core relations' have been made between this charm and other
192+ charms.
193+ .
194+ For this charm the following relations must be made:
195+ .
196+ * shared-db or (pgsql-nova-db, pgsql-neutron-db)
197+ * amqp
198+ * identity-service
199+ console-access-protocol:
200+ type: string
201+ description: |
202+ Protocol to use when accessing virtual machine console. Supported types
203+ are None, spice, xvpvnc, novnc and vnc (for both xvpvnc and novnc)
204+ console-proxy-ip:
205+ type: string
206+ default: local
207+ description: |
208+ If console-access-protocol != None then this is the ip published to
209+ clients for access to console proxy. Set to local for the ip address of
210+ the nova-cloud-controller serving the request to be used
211+ console-keymap:
212+ type: string
213+ default: 'en-us'
214+ description: |
215+ Console keymap
216+ worker-multiplier:
217+ type: int
218+ default: 2
219+ description: |
220+ The CPU core multiplier to use when configuring worker processes for
221+ Nova and Neutron. By default, the number of workers for each daemon
222+ is set to twice the number of CPU cores a service unit has.
223+ cpu-allocation-ratio:
224+ type: float
225+ default: 16.0
226+ description: |
227+ The per physical core -> virtual core ratio to use in the Nova scheduler.
228+ .
229+ Increasing this value will increase instance density on compute nodes
230+ at the expense of instance performance.
231+ ram-allocation-ratio:
232+ type: float
233+ default: 1.5
234+ description: |
235+ The physical ram -> virtual ram ratio to use in the Nova scheduler.
236+ .
237+ Increasing this value will increase instance density on compute nodes
238+ at the potential expense of instance performance.
239+>>>>>>> MERGE-SOURCE
240
241=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
242--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-02-17 12:10:27 +0000
243+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-09-16 09:08:32 +0000
244@@ -146,12 +146,12 @@
245 Obtains all relevant configuration from charm configuration required
246 for initiating a relation to hacluster:
247
248- ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
249+ ha-bindiface, ha-mcastport, vip
250
251 returns: dict: A dict containing settings keyed by setting name.
252 raises: HAIncompleteConfig if settings are missing.
253 '''
254- settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
255+ settings = ['ha-bindiface', 'ha-mcastport', 'vip']
256 conf = {}
257 for setting in settings:
258 conf[setting] = config_get(setting)
259@@ -170,6 +170,7 @@
260
261 :configs : OSTemplateRenderer: A config tempating object to inspect for
262 a complete https context.
263+
264 :vip_setting: str: Setting in charm config that specifies
265 VIP address.
266 '''
267
268=== added directory 'hooks/charmhelpers/contrib/network'
269=== added file 'hooks/charmhelpers/contrib/network/__init__.py'
270=== added file 'hooks/charmhelpers/contrib/network/ip.py'
271--- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
272+++ hooks/charmhelpers/contrib/network/ip.py 2014-09-16 09:08:32 +0000
273@@ -0,0 +1,174 @@
274+import sys
275+
276+from functools import partial
277+
278+from charmhelpers.fetch import apt_install
279+from charmhelpers.core.hookenv import (
280+ ERROR, log, config,
281+)
282+
283+try:
284+ import netifaces
285+except ImportError:
286+ apt_install('python-netifaces')
287+ import netifaces
288+
289+try:
290+ import netaddr
291+except ImportError:
292+ apt_install('python-netaddr')
293+ import netaddr
294+
295+
296+def _validate_cidr(network):
297+ try:
298+ netaddr.IPNetwork(network)
299+ except (netaddr.core.AddrFormatError, ValueError):
300+ raise ValueError("Network (%s) is not in CIDR presentation format" %
301+ network)
302+
303+
304+def get_address_in_network(network, fallback=None, fatal=False):
305+ """
306+ Get an IPv4 or IPv6 address within the network from the host.
307+
308+ :param network (str): CIDR presentation format. For example,
309+ '192.168.1.0/24'.
310+ :param fallback (str): If no address is found, return fallback.
311+ :param fatal (boolean): If no address is found, fallback is not
312+ set and fatal is True then exit(1).
313+
314+ """
315+
316+ def not_found_error_out():
317+ log("No IP address found in network: %s" % network,
318+ level=ERROR)
319+ sys.exit(1)
320+
321+ if network is None:
322+ if fallback is not None:
323+ return fallback
324+ else:
325+ if fatal:
326+ not_found_error_out()
327+
328+ _validate_cidr(network)
329+ network = netaddr.IPNetwork(network)
330+ for iface in netifaces.interfaces():
331+ addresses = netifaces.ifaddresses(iface)
332+ if network.version == 4 and netifaces.AF_INET in addresses:
333+ addr = addresses[netifaces.AF_INET][0]['addr']
334+ netmask = addresses[netifaces.AF_INET][0]['netmask']
335+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
336+ if cidr in network:
337+ return str(cidr.ip)
338+ if network.version == 6 and netifaces.AF_INET6 in addresses:
339+ for addr in addresses[netifaces.AF_INET6]:
340+ if not addr['addr'].startswith('fe80'):
341+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
342+ addr['netmask']))
343+ if cidr in network:
344+ return str(cidr.ip)
345+
346+ if fallback is not None:
347+ return fallback
348+
349+ if fatal:
350+ not_found_error_out()
351+
352+ return None
353+
354+
355+def is_ipv6(address):
356+ '''Determine whether provided address is IPv6 or not'''
357+ try:
358+ address = netaddr.IPAddress(address)
359+ except netaddr.AddrFormatError:
360+ # probably a hostname - so not an address at all!
361+ return False
362+ else:
363+ return address.version == 6
364+
365+
366+def is_address_in_network(network, address):
367+ """
368+ Determine whether the provided address is within a network range.
369+
370+ :param network (str): CIDR presentation format. For example,
371+ '192.168.1.0/24'.
372+ :param address: An individual IPv4 or IPv6 address without a net
373+ mask or subnet prefix. For example, '192.168.1.1'.
374+ :returns boolean: Flag indicating whether address is in network.
375+ """
376+ try:
377+ network = netaddr.IPNetwork(network)
378+ except (netaddr.core.AddrFormatError, ValueError):
379+ raise ValueError("Network (%s) is not in CIDR presentation format" %
380+ network)
381+ try:
382+ address = netaddr.IPAddress(address)
383+ except (netaddr.core.AddrFormatError, ValueError):
384+ raise ValueError("Address (%s) is not in correct presentation format" %
385+ address)
386+ if address in network:
387+ return True
388+ else:
389+ return False
390+
391+
392+def _get_for_address(address, key):
393+ """Retrieve an attribute of or the physical interface that
394+ the IP address provided could be bound to.
395+
396+ :param address (str): An individual IPv4 or IPv6 address without a net
397+ mask or subnet prefix. For example, '192.168.1.1'.
398+ :param key: 'iface' for the physical interface name or an attribute
399+ of the configured interface, for example 'netmask'.
400+ :returns str: Requested attribute or None if address is not bindable.
401+ """
402+ address = netaddr.IPAddress(address)
403+ for iface in netifaces.interfaces():
404+ addresses = netifaces.ifaddresses(iface)
405+ if address.version == 4 and netifaces.AF_INET in addresses:
406+ addr = addresses[netifaces.AF_INET][0]['addr']
407+ netmask = addresses[netifaces.AF_INET][0]['netmask']
408+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
409+ if address in cidr:
410+ if key == 'iface':
411+ return iface
412+ else:
413+ return addresses[netifaces.AF_INET][0][key]
414+ if address.version == 6 and netifaces.AF_INET6 in addresses:
415+ for addr in addresses[netifaces.AF_INET6]:
416+ if not addr['addr'].startswith('fe80'):
417+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
418+ addr['netmask']))
419+ if address in cidr:
420+ if key == 'iface':
421+ return iface
422+ else:
423+ return addr[key]
424+ return None
425+
426+
427+get_iface_for_address = partial(_get_for_address, key='iface')
428+
429+get_netmask_for_address = partial(_get_for_address, key='netmask')
430+
431+
432+def get_ipv6_addr(iface="eth0"):
433+ try:
434+ iface_addrs = netifaces.ifaddresses(iface)
435+ if netifaces.AF_INET6 not in iface_addrs:
436+ raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
437+
438+ addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
439+ ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
440+ and config('vip') != a['addr']]
441+ if not ipv6_addr:
442+ raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
443+
444+ return ipv6_addr[0]
445+
446+ except ValueError:
447+ raise ValueError("Invalid interface '%s'" % iface)
448
449=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
450=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
451=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
452--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
453+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-16 09:08:32 +0000
454@@ -0,0 +1,61 @@
455+from charmhelpers.contrib.amulet.deployment import (
456+ AmuletDeployment
457+)
458+
459+
460+class OpenStackAmuletDeployment(AmuletDeployment):
461+ """OpenStack amulet deployment.
462+
463+ This class inherits from AmuletDeployment and has additional support
464+ that is specifically for use by OpenStack charms.
465+ """
466+
467+ def __init__(self, series=None, openstack=None, source=None):
468+ """Initialize the deployment environment."""
469+ super(OpenStackAmuletDeployment, self).__init__(series)
470+ self.openstack = openstack
471+ self.source = source
472+
473+ def _add_services(self, this_service, other_services):
474+ """Add services to the deployment and set openstack-origin."""
475+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
476+ other_services)
477+ name = 0
478+ services = other_services
479+ services.append(this_service)
480+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
481+
482+ if self.openstack:
483+ for svc in services:
484+ if svc[name] not in use_source:
485+ config = {'openstack-origin': self.openstack}
486+ self.d.configure(svc[name], config)
487+
488+ if self.source:
489+ for svc in services:
490+ if svc[name] in use_source:
491+ config = {'source': self.source}
492+ self.d.configure(svc[name], config)
493+
494+ def _configure_services(self, configs):
495+ """Configure all of the services."""
496+ for service, config in configs.iteritems():
497+ self.d.configure(service, config)
498+
499+ def _get_openstack_release(self):
500+ """Get openstack release.
501+
502+ Return an integer representing the enum value of the openstack
503+ release.
504+ """
505+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
506+ self.precise_havana, self.precise_icehouse,
507+ self.trusty_icehouse) = range(6)
508+ releases = {
509+ ('precise', None): self.precise_essex,
510+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
511+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
512+ ('precise', 'cloud:precise-havana'): self.precise_havana,
513+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
514+ ('trusty', None): self.trusty_icehouse}
515+ return releases[(self.series, self.openstack)]
516
517=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
518--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
519+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-16 09:08:32 +0000
520@@ -0,0 +1,275 @@
521+import logging
522+import os
523+import time
524+import urllib
525+
526+import glanceclient.v1.client as glance_client
527+import keystoneclient.v2_0 as keystone_client
528+import novaclient.v1_1.client as nova_client
529+
530+from charmhelpers.contrib.amulet.utils import (
531+ AmuletUtils
532+)
533+
534+DEBUG = logging.DEBUG
535+ERROR = logging.ERROR
536+
537+
538+class OpenStackAmuletUtils(AmuletUtils):
539+ """OpenStack amulet utilities.
540+
541+ This class inherits from AmuletUtils and has additional support
542+ that is specifically for use by OpenStack charms.
543+ """
544+
545+ def __init__(self, log_level=ERROR):
546+ """Initialize the deployment environment."""
547+ super(OpenStackAmuletUtils, self).__init__(log_level)
548+
549+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
550+ public_port, expected):
551+ """Validate endpoint data.
552+
553+ Validate actual endpoint data vs expected endpoint data. The ports
554+ are used to find the matching endpoint.
555+ """
556+ found = False
557+ for ep in endpoints:
558+ self.log.debug('endpoint: {}'.format(repr(ep)))
559+ if (admin_port in ep.adminurl and
560+ internal_port in ep.internalurl and
561+ public_port in ep.publicurl):
562+ found = True
563+ actual = {'id': ep.id,
564+ 'region': ep.region,
565+ 'adminurl': ep.adminurl,
566+ 'internalurl': ep.internalurl,
567+ 'publicurl': ep.publicurl,
568+ 'service_id': ep.service_id}
569+ ret = self._validate_dict_data(expected, actual)
570+ if ret:
571+ return 'unexpected endpoint data - {}'.format(ret)
572+
573+ if not found:
574+ return 'endpoint not found'
575+
576+ def validate_svc_catalog_endpoint_data(self, expected, actual):
577+ """Validate service catalog endpoint data.
578+
579+ Validate a list of actual service catalog endpoints vs a list of
580+ expected service catalog endpoints.
581+ """
582+ self.log.debug('actual: {}'.format(repr(actual)))
583+ for k, v in expected.iteritems():
584+ if k in actual:
585+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
586+ if ret:
587+ return self.endpoint_error(k, ret)
588+ else:
589+ return "endpoint {} does not exist".format(k)
590+ return ret
591+
592+ def validate_tenant_data(self, expected, actual):
593+ """Validate tenant data.
594+
595+ Validate a list of actual tenant data vs list of expected tenant
596+ data.
597+ """
598+ self.log.debug('actual: {}'.format(repr(actual)))
599+ for e in expected:
600+ found = False
601+ for act in actual:
602+ a = {'enabled': act.enabled, 'description': act.description,
603+ 'name': act.name, 'id': act.id}
604+ if e['name'] == a['name']:
605+ found = True
606+ ret = self._validate_dict_data(e, a)
607+ if ret:
608+ return "unexpected tenant data - {}".format(ret)
609+ if not found:
610+ return "tenant {} does not exist".format(e['name'])
611+ return ret
612+
613+ def validate_role_data(self, expected, actual):
614+ """Validate role data.
615+
616+ Validate a list of actual role data vs a list of expected role
617+ data.
618+ """
619+ self.log.debug('actual: {}'.format(repr(actual)))
620+ for e in expected:
621+ found = False
622+ for act in actual:
623+ a = {'name': act.name, 'id': act.id}
624+ if e['name'] == a['name']:
625+ found = True
626+ ret = self._validate_dict_data(e, a)
627+ if ret:
628+ return "unexpected role data - {}".format(ret)
629+ if not found:
630+ return "role {} does not exist".format(e['name'])
631+ return ret
632+
633+ def validate_user_data(self, expected, actual):
634+ """Validate user data.
635+
636+ Validate a list of actual user data vs a list of expected user
637+ data.
638+ """
639+ self.log.debug('actual: {}'.format(repr(actual)))
640+ for e in expected:
641+ found = False
642+ for act in actual:
643+ a = {'enabled': act.enabled, 'name': act.name,
644+ 'email': act.email, 'tenantId': act.tenantId,
645+ 'id': act.id}
646+ if e['name'] == a['name']:
647+ found = True
648+ ret = self._validate_dict_data(e, a)
649+ if ret:
650+ return "unexpected user data - {}".format(ret)
651+ if not found:
652+ return "user {} does not exist".format(e['name'])
653+ return ret
654+
655+ def validate_flavor_data(self, expected, actual):
656+ """Validate flavor data.
657+
658+ Validate a list of actual flavors vs a list of expected flavors.
659+ """
660+ self.log.debug('actual: {}'.format(repr(actual)))
661+ act = [a.name for a in actual]
662+ return self._validate_list_data(expected, act)
663+
664+ def tenant_exists(self, keystone, tenant):
665+ """Return True if tenant exists."""
666+ return tenant in [t.name for t in keystone.tenants.list()]
667+
668+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
669+ tenant):
670+ """Authenticates admin user with the keystone admin endpoint."""
671+ unit = keystone_sentry
672+ service_ip = unit.relation('shared-db',
673+ 'mysql:shared-db')['private-address']
674+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
675+ return keystone_client.Client(username=user, password=password,
676+ tenant_name=tenant, auth_url=ep)
677+
678+ def authenticate_keystone_user(self, keystone, user, password, tenant):
679+ """Authenticates a regular user with the keystone public endpoint."""
680+ ep = keystone.service_catalog.url_for(service_type='identity',
681+ endpoint_type='publicURL')
682+ return keystone_client.Client(username=user, password=password,
683+ tenant_name=tenant, auth_url=ep)
684+
685+ def authenticate_glance_admin(self, keystone):
686+ """Authenticates admin user with glance."""
687+ ep = keystone.service_catalog.url_for(service_type='image',
688+ endpoint_type='adminURL')
689+ return glance_client.Client(ep, token=keystone.auth_token)
690+
691+ def authenticate_nova_user(self, keystone, user, password, tenant):
692+ """Authenticates a regular user with nova-api."""
693+ ep = keystone.service_catalog.url_for(service_type='identity',
694+ endpoint_type='publicURL')
695+ return nova_client.Client(username=user, api_key=password,
696+ project_id=tenant, auth_url=ep)
697+
698+ def create_cirros_image(self, glance, image_name):
699+ """Download the latest cirros image and upload it to glance."""
700+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
701+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
702+ if http_proxy:
703+ proxies = {'http': http_proxy}
704+ opener = urllib.FancyURLopener(proxies)
705+ else:
706+ opener = urllib.FancyURLopener()
707+
708+ f = opener.open("http://download.cirros-cloud.net/version/released")
709+ version = f.read().strip()
710+ cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
711+
712+ if not os.path.exists(cirros_img):
713+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
714+ version, cirros_img)
715+ opener.retrieve(cirros_url, cirros_img)
716+ f.close()
717+
718+ with open(cirros_img) as f:
719+ image = glance.images.create(name=image_name, is_public=True,
720+ disk_format='qcow2',
721+ container_format='bare', data=f)
722+ count = 1
723+ status = image.status
724+ while status != 'active' and count < 10:
725+ time.sleep(3)
726+ image = glance.images.get(image.id)
727+ status = image.status
728+ self.log.debug('image status: {}'.format(status))
729+ count += 1
730+
731+ if status != 'active':
732+ self.log.error('image creation timed out')
733+ return None
734+
735+ return image
736+
737+ def delete_image(self, glance, image):
738+ """Delete the specified image."""
739+ num_before = len(list(glance.images.list()))
740+ glance.images.delete(image)
741+
742+ count = 1
743+ num_after = len(list(glance.images.list()))
744+ while num_after != (num_before - 1) and count < 10:
745+ time.sleep(3)
746+ num_after = len(list(glance.images.list()))
747+ self.log.debug('number of images: {}'.format(num_after))
748+ count += 1
749+
750+ if num_after != (num_before - 1):
751+ self.log.error('image deletion timed out')
752+ return False
753+
754+ return True
755+
756+ def create_instance(self, nova, image_name, instance_name, flavor):
757+ """Create the specified instance."""
758+ image = nova.images.find(name=image_name)
759+ flavor = nova.flavors.find(name=flavor)
760+ instance = nova.servers.create(name=instance_name, image=image,
761+ flavor=flavor)
762+
763+ count = 1
764+ status = instance.status
765+ while status != 'ACTIVE' and count < 60:
766+ time.sleep(3)
767+ instance = nova.servers.get(instance.id)
768+ status = instance.status
769+ self.log.debug('instance status: {}'.format(status))
770+ count += 1
771+
772+ if status != 'ACTIVE':
773+ self.log.error('instance creation timed out')
774+ return None
775+
776+ return instance
777+
778+ def delete_instance(self, nova, instance):
779+ """Delete the specified instance."""
780+ num_before = len(list(nova.servers.list()))
781+ nova.servers.delete(instance)
782+
783+ count = 1
784+ num_after = len(list(nova.servers.list()))
785+ while num_after != (num_before - 1) and count < 10:
786+ time.sleep(3)
787+ num_after = len(list(nova.servers.list()))
788+ self.log.debug('number of instances: {}'.format(num_after))
789+ count += 1
790+
791+ if num_after != (num_before - 1):
792+ self.log.error('instance deletion timed out')
793+ return False
794+
795+ return True
796
797=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
798--- hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 15:53:46 +0000
799+++ hooks/charmhelpers/contrib/openstack/context.py 2014-09-16 09:08:32 +0000
800@@ -21,6 +21,7 @@
801 relation_get,
802 relation_ids,
803 related_units,
804+ relation_set,
805 unit_get,
806 unit_private_ip,
807 ERROR,
808@@ -43,6 +44,11 @@
809 neutron_plugin_attribute,
810 )
811
812+from charmhelpers.contrib.network.ip import (
813+ get_address_in_network,
814+ get_ipv6_addr,
815+)
816+
817 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
818
819
820@@ -135,8 +141,26 @@
821 'Missing required charm config options. '
822 '(database name and user)')
823 raise OSContextError
824+
825 ctxt = {}
826
827+ # NOTE(jamespage) if mysql charm provides a network upon which
828+ # access to the database should be made, reconfigure relation
829+ # with the service units local address and defer execution
830+ access_network = relation_get('access-network')
831+ if access_network is not None:
832+ if self.relation_prefix is not None:
833+ hostname_key = "{}_hostname".format(self.relation_prefix)
834+ else:
835+ hostname_key = "hostname"
836+ access_hostname = get_address_in_network(access_network,
837+ unit_get('private-address'))
838+ set_hostname = relation_get(attribute=hostname_key,
839+ unit=local_unit())
840+ if set_hostname != access_hostname:
841+ relation_set(relation_settings={hostname_key: access_hostname})
842+ return ctxt # Defer any further hook execution for now....
843+
844 password_setting = 'password'
845 if self.relation_prefix:
846 password_setting = self.relation_prefix + '_password'
847@@ -244,23 +268,31 @@
848
849
850 class AMQPContext(OSContextGenerator):
851- interfaces = ['amqp']
852
853- def __init__(self, ssl_dir=None):
854+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
855 self.ssl_dir = ssl_dir
856+ self.rel_name = rel_name
857+ self.relation_prefix = relation_prefix
858+ self.interfaces = [rel_name]
859
860 def __call__(self):
861 log('Generating template context for amqp')
862 conf = config()
863+ user_setting = 'rabbit-user'
864+ vhost_setting = 'rabbit-vhost'
865+ if self.relation_prefix:
866+ user_setting = self.relation_prefix + '-rabbit-user'
867+ vhost_setting = self.relation_prefix + '-rabbit-vhost'
868+
869 try:
870- username = conf['rabbit-user']
871- vhost = conf['rabbit-vhost']
872+ username = conf[user_setting]
873+ vhost = conf[vhost_setting]
874 except KeyError as e:
875 log('Could not generate shared_db context. '
876 'Missing required charm config options: %s.' % e)
877 raise OSContextError
878 ctxt = {}
879- for rid in relation_ids('amqp'):
880+ for rid in relation_ids(self.rel_name):
881 ha_vip_only = False
882 for unit in related_units(rid):
883 if relation_get('clustered', rid=rid, unit=unit):
884@@ -333,10 +365,12 @@
885 use_syslog = str(config('use-syslog')).lower()
886 for rid in relation_ids('ceph'):
887 for unit in related_units(rid):
888- mon_hosts.append(relation_get('private-address', rid=rid,
889- unit=unit))
890 auth = relation_get('auth', rid=rid, unit=unit)
891 key = relation_get('key', rid=rid, unit=unit)
892+ ceph_addr = \
893+ relation_get('ceph-public-address', rid=rid, unit=unit) or \
894+ relation_get('private-address', rid=rid, unit=unit)
895+ mon_hosts.append(ceph_addr)
896
897 ctxt = {
898 'mon_hosts': ' '.join(mon_hosts),
899@@ -370,7 +404,12 @@
900
901 cluster_hosts = {}
902 l_unit = local_unit().replace('/', '-')
903- cluster_hosts[l_unit] = unit_get('private-address')
904+ if config('prefer-ipv6'):
905+ addr = get_ipv6_addr()
906+ else:
907+ addr = unit_get('private-address')
908+ cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
909+ addr)
910
911 for rid in relation_ids('cluster'):
912 for unit in related_units(rid):
913@@ -381,6 +420,16 @@
914 ctxt = {
915 'units': cluster_hosts,
916 }
917+
918+ if config('prefer-ipv6'):
919+ ctxt['local_host'] = 'ip6-localhost'
920+ ctxt['haproxy_host'] = '::'
921+ ctxt['stat_port'] = ':::8888'
922+ else:
923+ ctxt['local_host'] = '127.0.0.1'
924+ ctxt['haproxy_host'] = '0.0.0.0'
925+ ctxt['stat_port'] = ':8888'
926+
927 if len(cluster_hosts.keys()) > 1:
928 # Enable haproxy when we have enough peers.
929 log('Ensuring haproxy enabled in /etc/default/haproxy.')
930@@ -419,12 +468,13 @@
931 """
932 Generates a context for an apache vhost configuration that configures
933 HTTPS reverse proxying for one or many endpoints. Generated context
934- looks something like:
935- {
936- 'namespace': 'cinder',
937- 'private_address': 'iscsi.mycinderhost.com',
938- 'endpoints': [(8776, 8766), (8777, 8767)]
939- }
940+ looks something like::
941+
942+ {
943+ 'namespace': 'cinder',
944+ 'private_address': 'iscsi.mycinderhost.com',
945+ 'endpoints': [(8776, 8766), (8777, 8767)]
946+ }
947
948 The endpoints list consists of a tuples mapping external ports
949 to internal ports.
950@@ -542,6 +592,26 @@
951
952 return nvp_ctxt
953
954+ def n1kv_ctxt(self):
955+ driver = neutron_plugin_attribute(self.plugin, 'driver',
956+ self.network_manager)
957+ n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
958+ self.network_manager)
959+ n1kv_ctxt = {
960+ 'core_plugin': driver,
961+ 'neutron_plugin': 'n1kv',
962+ 'neutron_security_groups': self.neutron_security_groups,
963+ 'local_ip': unit_private_ip(),
964+ 'config': n1kv_config,
965+ 'vsm_ip': config('n1kv-vsm-ip'),
966+ 'vsm_username': config('n1kv-vsm-username'),
967+ 'vsm_password': config('n1kv-vsm-password'),
968+ 'restrict_policy_profiles': config(
969+ 'n1kv_restrict_policy_profiles'),
970+ }
971+
972+ return n1kv_ctxt
973+
974 def neutron_ctxt(self):
975 if https():
976 proto = 'https'
977@@ -573,6 +643,8 @@
978 ctxt.update(self.ovs_ctxt())
979 elif self.plugin in ['nvp', 'nsx']:
980 ctxt.update(self.nvp_ctxt())
981+ elif self.plugin == 'n1kv':
982+ ctxt.update(self.n1kv_ctxt())
983
984 alchemy_flags = config('neutron-alchemy-flags')
985 if alchemy_flags:
986@@ -612,7 +684,7 @@
987 The subordinate interface allows subordinates to export their
988 configuration requirements to the principle for multiple config
989 files and multiple serivces. Ie, a subordinate that has interfaces
990- to both glance and nova may export to following yaml blob as json:
991+ to both glance and nova may export to following yaml blob as json::
992
993 glance:
994 /etc/glance/glance-api.conf:
995@@ -631,7 +703,8 @@
996
997 It is then up to the principle charms to subscribe this context to
998 the service+config file it is interestd in. Configuration data will
999- be available in the template context, in glance's case, as:
1000+ be available in the template context, in glance's case, as::
1001+
1002 ctxt = {
1003 ... other context ...
1004 'subordinate_config': {
1005@@ -684,15 +757,38 @@
1006
1007 sub_config = sub_config[self.config_file]
1008 for k, v in sub_config.iteritems():
1009- if k == 'sections':
1010- for section, config_dict in v.iteritems():
1011- log("adding section '%s'" % (section))
1012- ctxt[k][section] = config_dict
1013- else:
1014- ctxt[k] = v
1015-
1016- log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
1017-
1018+<<<<<<< TREE
1019+ if k == 'sections':
1020+ for section, config_dict in v.iteritems():
1021+ log("adding section '%s'" % (section))
1022+ ctxt[k][section] = config_dict
1023+ else:
1024+ ctxt[k] = v
1025+
1026+ log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
1027+
1028+=======
1029+ if k == 'sections':
1030+ for section, config_dict in v.iteritems():
1031+ log("adding section '%s'" % (section))
1032+ ctxt[k][section] = config_dict
1033+ else:
1034+ ctxt[k] = v
1035+
1036+ log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
1037+
1038+ return ctxt
1039+
1040+
1041+class LogLevelContext(OSContextGenerator):
1042+
1043+ def __call__(self):
1044+ ctxt = {}
1045+ ctxt['debug'] = \
1046+ False if config('debug') is None else config('debug')
1047+ ctxt['verbose'] = \
1048+ False if config('verbose') is None else config('verbose')
1049+>>>>>>> MERGE-SOURCE
1050 return ctxt
1051
1052
1053
1054=== added file 'hooks/charmhelpers/contrib/openstack/ip.py'
1055--- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
1056+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-09-16 09:08:32 +0000
1057@@ -0,0 +1,79 @@
1058+from charmhelpers.core.hookenv import (
1059+ config,
1060+ unit_get,
1061+)
1062+
1063+from charmhelpers.contrib.network.ip import (
1064+ get_address_in_network,
1065+ is_address_in_network,
1066+ is_ipv6,
1067+ get_ipv6_addr,
1068+)
1069+
1070+from charmhelpers.contrib.hahelpers.cluster import is_clustered
1071+
1072+PUBLIC = 'public'
1073+INTERNAL = 'int'
1074+ADMIN = 'admin'
1075+
1076+_address_map = {
1077+ PUBLIC: {
1078+ 'config': 'os-public-network',
1079+ 'fallback': 'public-address'
1080+ },
1081+ INTERNAL: {
1082+ 'config': 'os-internal-network',
1083+ 'fallback': 'private-address'
1084+ },
1085+ ADMIN: {
1086+ 'config': 'os-admin-network',
1087+ 'fallback': 'private-address'
1088+ }
1089+}
1090+
1091+
1092+def canonical_url(configs, endpoint_type=PUBLIC):
1093+ '''
1094+ Returns the correct HTTP URL to this host given the state of HTTPS
1095+ configuration, hacluster and charm configuration.
1096+
1097+ :configs OSTemplateRenderer: A config tempating object to inspect for
1098+ a complete https context.
1099+ :endpoint_type str: The endpoint type to resolve.
1100+
1101+ :returns str: Base URL for services on the current service unit.
1102+ '''
1103+ scheme = 'http'
1104+ if 'https' in configs.complete_contexts():
1105+ scheme = 'https'
1106+ address = resolve_address(endpoint_type)
1107+ if is_ipv6(address):
1108+ address = "[{}]".format(address)
1109+ return '%s://%s' % (scheme, address)
1110+
1111+
1112+def resolve_address(endpoint_type=PUBLIC):
1113+ resolved_address = None
1114+ if is_clustered():
1115+ if config(_address_map[endpoint_type]['config']) is None:
1116+ # Assume vip is simple and pass back directly
1117+ resolved_address = config('vip')
1118+ else:
1119+ for vip in config('vip').split():
1120+ if is_address_in_network(
1121+ config(_address_map[endpoint_type]['config']),
1122+ vip):
1123+ resolved_address = vip
1124+ else:
1125+ if config('prefer-ipv6'):
1126+ fallback_addr = get_ipv6_addr()
1127+ else:
1128+ fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
1129+ resolved_address = get_address_in_network(
1130+ config(_address_map[endpoint_type]['config']), fallback_addr)
1131+
1132+ if resolved_address is None:
1133+ raise ValueError('Unable to resolve a suitable IP address'
1134+ ' based on charm state and configuration')
1135+ else:
1136+ return resolved_address
1137
1138=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
1139--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-19 11:38:09 +0000
1140+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-09-16 09:08:32 +0000
1141@@ -128,6 +128,20 @@
1142 'server_packages': ['neutron-server',
1143 'neutron-plugin-vmware'],
1144 'server_services': ['neutron-server']
1145+ },
1146+ 'n1kv': {
1147+ 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
1148+ 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
1149+ 'contexts': [
1150+ context.SharedDBContext(user=config('neutron-database-user'),
1151+ database=config('neutron-database'),
1152+ relation_prefix='neutron',
1153+ ssl_dir=NEUTRON_CONF_DIR)],
1154+ 'services': [],
1155+ 'packages': [['neutron-plugin-cisco']],
1156+ 'server_packages': ['neutron-server',
1157+ 'neutron-plugin-cisco'],
1158+ 'server_services': ['neutron-server']
1159 }
1160 }
1161 if release >= 'icehouse':
1162
1163=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
1164--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-02-27 09:26:38 +0000
1165+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-09-16 09:08:32 +0000
1166@@ -1,6 +1,6 @@
1167 global
1168- log 127.0.0.1 local0
1169- log 127.0.0.1 local1 notice
1170+ log {{ local_host }} local0
1171+ log {{ local_host }} local1 notice
1172 maxconn 20000
1173 user haproxy
1174 group haproxy
1175@@ -17,7 +17,7 @@
1176 timeout client 30000
1177 timeout server 30000
1178
1179-listen stats :8888
1180+listen stats {{ stat_port }}
1181 mode http
1182 stats enable
1183 stats hide-version
1184@@ -27,7 +27,12 @@
1185
1186 {% if units -%}
1187 {% for service, ports in service_ports.iteritems() -%}
1188-listen {{ service }} 0.0.0.0:{{ ports[0] }}
1189+listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
1190+ balance roundrobin
1191+ {% for unit, address in units.iteritems() -%}
1192+ server {{ unit }} {{ address }}:{{ ports[1] }} check
1193+ {% endfor %}
1194+listen {{ service }}_ipv6 :::{{ ports[0] }}
1195 balance roundrobin
1196 {% for unit, address in units.iteritems() -%}
1197 server {{ unit }} {{ address }}:{{ ports[1] }} check
1198
1199=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
1200--- hooks/charmhelpers/contrib/openstack/templating.py 2014-02-24 19:31:57 +0000
1201+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-09-16 09:08:32 +0000
1202@@ -30,17 +30,17 @@
1203 loading dir.
1204
1205 A charm may also ship a templates dir with this module
1206- and it will be appended to the bottom of the search list, eg:
1207- hooks/charmhelpers/contrib/openstack/templates.
1208-
1209- :param templates_dir: str: Base template directory containing release
1210- sub-directories.
1211- :param os_release : str: OpenStack release codename to construct template
1212- loader.
1213-
1214- :returns : jinja2.ChoiceLoader constructed with a list of
1215- jinja2.FilesystemLoaders, ordered in descending
1216- order by OpenStack release.
1217+ and it will be appended to the bottom of the search list, eg::
1218+
1219+ hooks/charmhelpers/contrib/openstack/templates
1220+
1221+ :param templates_dir (str): Base template directory containing release
1222+ sub-directories.
1223+ :param os_release (str): OpenStack release codename to construct template
1224+ loader.
1225+ :returns: jinja2.ChoiceLoader constructed with a list of
1226+ jinja2.FilesystemLoaders, ordered in descending
1227+ order by OpenStack release.
1228 """
1229 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
1230 for rel in OPENSTACK_CODENAMES.itervalues()]
1231@@ -111,7 +111,8 @@
1232 and ease the burden of managing config templates across multiple OpenStack
1233 releases.
1234
1235- Basic usage:
1236+ Basic usage::
1237+
1238 # import some common context generates from charmhelpers
1239 from charmhelpers.contrib.openstack import context
1240
1241@@ -131,21 +132,19 @@
1242 # write out all registered configs
1243 configs.write_all()
1244
1245- Details:
1246+ **OpenStack Releases and template loading**
1247
1248- OpenStack Releases and template loading
1249- ---------------------------------------
1250 When the object is instantiated, it is associated with a specific OS
1251 release. This dictates how the template loader will be constructed.
1252
1253 The constructed loader attempts to load the template from several places
1254 in the following order:
1255- - from the most recent OS release-specific template dir (if one exists)
1256- - the base templates_dir
1257- - a template directory shipped in the charm with this helper file.
1258-
1259-
1260- For the example above, '/tmp/templates' contains the following structure:
1261+ - from the most recent OS release-specific template dir (if one exists)
1262+ - the base templates_dir
1263+ - a template directory shipped in the charm with this helper file.
1264+
1265+ For the example above, '/tmp/templates' contains the following structure::
1266+
1267 /tmp/templates/nova.conf
1268 /tmp/templates/api-paste.ini
1269 /tmp/templates/grizzly/api-paste.ini
1270@@ -169,8 +168,8 @@
1271 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
1272 us to ship common templates (haproxy, apache) with the helpers.
1273
1274- Context generators
1275- ---------------------------------------
1276+ **Context generators**
1277+
1278 Context generators are used to generate template contexts during hook
1279 execution. Doing so may require inspecting service relations, charm
1280 config, etc. When registered, a config file is associated with a list
1281
1282=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
1283--- hooks/charmhelpers/contrib/openstack/utils.py 2014-08-27 07:14:03 +0000
1284+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-09-16 09:08:32 +0000
1285@@ -3,7 +3,6 @@
1286 # Common python helper functions used for OpenStack charms.
1287 from collections import OrderedDict
1288
1289-import apt_pkg as apt
1290 import subprocess
1291 import os
1292 import socket
1293@@ -41,7 +40,8 @@
1294 ('quantal', 'folsom'),
1295 ('raring', 'grizzly'),
1296 ('saucy', 'havana'),
1297- ('trusty', 'icehouse')
1298+ ('trusty', 'icehouse'),
1299+ ('utopic', 'juno'),
1300 ])
1301
1302
1303@@ -52,6 +52,7 @@
1304 ('2013.1', 'grizzly'),
1305 ('2013.2', 'havana'),
1306 ('2014.1', 'icehouse'),
1307+ ('2014.2', 'juno'),
1308 ])
1309
1310 # The ugly duckling
1311@@ -83,6 +84,8 @@
1312 '''Derive OpenStack release codename from a given installation source.'''
1313 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
1314 rel = ''
1315+ if src is None:
1316+ return rel
1317 if src in ['distro', 'distro-proposed']:
1318 try:
1319 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
1320@@ -130,8 +133,14 @@
1321
1322 def get_os_codename_package(package, fatal=True):
1323 '''Derive OpenStack release codename from an installed package.'''
1324-
1325- cache = apt_cache()
1326+<<<<<<< TREE
1327+
1328+ cache = apt_cache()
1329+=======
1330+ import apt_pkg as apt
1331+
1332+ cache = apt_cache()
1333+>>>>>>> MERGE-SOURCE
1334
1335 try:
1336 pkg = cache[package]
1337@@ -182,7 +191,7 @@
1338 for version, cname in vers_map.iteritems():
1339 if cname == codename:
1340 return version
1341- #e = "Could not determine OpenStack version for package: %s" % pkg
1342+ # e = "Could not determine OpenStack version for package: %s" % pkg
1343 # error_out(e)
1344
1345
1346@@ -268,6 +277,9 @@
1347 'icehouse': 'precise-updates/icehouse',
1348 'icehouse/updates': 'precise-updates/icehouse',
1349 'icehouse/proposed': 'precise-proposed/icehouse',
1350+ 'juno': 'trusty-updates/juno',
1351+ 'juno/updates': 'trusty-updates/juno',
1352+ 'juno/proposed': 'trusty-proposed/juno',
1353 }
1354
1355 try:
1356@@ -315,6 +327,7 @@
1357
1358 """
1359
1360+ import apt_pkg as apt
1361 src = config('openstack-origin')
1362 cur_vers = get_os_version_package(package)
1363 available_vers = get_os_version_install_source(src)
1364
1365=== added directory 'hooks/charmhelpers/contrib/peerstorage'
1366=== added file 'hooks/charmhelpers/contrib/peerstorage/__init__.py'
1367--- hooks/charmhelpers/contrib/peerstorage/__init__.py 1970-01-01 00:00:00 +0000
1368+++ hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-09-16 09:08:32 +0000
1369@@ -0,0 +1,83 @@
1370+from charmhelpers.core.hookenv import (
1371+ relation_ids,
1372+ relation_get,
1373+ local_unit,
1374+ relation_set,
1375+)
1376+
1377+"""
1378+This helper provides functions to support use of a peer relation
1379+for basic key/value storage, with the added benefit that all storage
1380+can be replicated across peer units, so this is really useful for
1381+services that issue usernames/passwords to remote services.
1382+
1383+def shared_db_changed()
1384+ # Only the lead unit should create passwords
1385+ if not is_leader():
1386+ return
1387+ username = relation_get('username')
1388+ key = '{}.password'.format(username)
1389+ # Attempt to retrieve any existing password for this user
1390+ password = peer_retrieve(key)
1391+ if password is None:
1392+ # New user, create password and store
1393+ password = pwgen(length=64)
1394+ peer_store(key, password)
1395+ create_access(username, password)
1396+ relation_set(password=password)
1397+
1398+
1399+def cluster_changed()
1400+ # Echo any relation data other that *-address
1401+ # back onto the peer relation so all units have
1402+ # all *.password keys stored on their local relation
1403+ # for later retrieval.
1404+ peer_echo()
1405+
1406+"""
1407+
1408+
1409+def peer_retrieve(key, relation_name='cluster'):
1410+ """ Retrieve a named key from peer relation relation_name """
1411+ cluster_rels = relation_ids(relation_name)
1412+ if len(cluster_rels) > 0:
1413+ cluster_rid = cluster_rels[0]
1414+ return relation_get(attribute=key, rid=cluster_rid,
1415+ unit=local_unit())
1416+ else:
1417+ raise ValueError('Unable to detect'
1418+ 'peer relation {}'.format(relation_name))
1419+
1420+
1421+def peer_store(key, value, relation_name='cluster'):
1422+ """ Store the key/value pair on the named peer relation relation_name """
1423+ cluster_rels = relation_ids(relation_name)
1424+ if len(cluster_rels) > 0:
1425+ cluster_rid = cluster_rels[0]
1426+ relation_set(relation_id=cluster_rid,
1427+ relation_settings={key: value})
1428+ else:
1429+ raise ValueError('Unable to detect '
1430+ 'peer relation {}'.format(relation_name))
1431+
1432+
1433+def peer_echo(includes=None):
1434+ """Echo filtered attributes back onto the same relation for storage
1435+
1436+ Note that this helper must only be called within a peer relation
1437+ changed hook
1438+ """
1439+ rdata = relation_get()
1440+ echo_data = {}
1441+ if includes is None:
1442+ echo_data = rdata.copy()
1443+ for ex in ['private-address', 'public-address']:
1444+ if ex in echo_data:
1445+ echo_data.pop(ex)
1446+ else:
1447+ for attribute, value in rdata.iteritems():
1448+ for include in includes:
1449+ if include in attribute:
1450+ echo_data[attribute] = value
1451+ if len(echo_data) > 0:
1452+ relation_set(relation_settings=echo_data)
1453
1454=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
1455--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-27 11:02:24 +0000
1456+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-09-16 09:08:32 +0000
1457@@ -303,7 +303,7 @@
1458 blk_device, fstype, system_services=[]):
1459 """
1460 NOTE: This function must only be called from a single service unit for
1461- the same rbd_img otherwise data loss will occur.
1462+ the same rbd_img otherwise data loss will occur.
1463
1464 Ensures given pool and RBD image exists, is mapped to a block device,
1465 and the device is formatted and mounted at the given mount_point.
1466
1467=== added file 'hooks/charmhelpers/core/fstab.py'
1468--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
1469+++ hooks/charmhelpers/core/fstab.py 2014-09-16 09:08:32 +0000
1470@@ -0,0 +1,116 @@
1471+#!/usr/bin/env python
1472+# -*- coding: utf-8 -*-
1473+
1474+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
1475+
1476+import os
1477+
1478+
1479+class Fstab(file):
1480+ """This class extends file in order to implement a file reader/writer
1481+ for file `/etc/fstab`
1482+ """
1483+
1484+ class Entry(object):
1485+ """Entry class represents a non-comment line on the `/etc/fstab` file
1486+ """
1487+ def __init__(self, device, mountpoint, filesystem,
1488+ options, d=0, p=0):
1489+ self.device = device
1490+ self.mountpoint = mountpoint
1491+ self.filesystem = filesystem
1492+
1493+ if not options:
1494+ options = "defaults"
1495+
1496+ self.options = options
1497+ self.d = d
1498+ self.p = p
1499+
1500+ def __eq__(self, o):
1501+ return str(self) == str(o)
1502+
1503+ def __str__(self):
1504+ return "{} {} {} {} {} {}".format(self.device,
1505+ self.mountpoint,
1506+ self.filesystem,
1507+ self.options,
1508+ self.d,
1509+ self.p)
1510+
1511+ DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
1512+
1513+ def __init__(self, path=None):
1514+ if path:
1515+ self._path = path
1516+ else:
1517+ self._path = self.DEFAULT_PATH
1518+ file.__init__(self, self._path, 'r+')
1519+
1520+ def _hydrate_entry(self, line):
1521+ # NOTE: use split with no arguments to split on any
1522+ # whitespace including tabs
1523+ return Fstab.Entry(*filter(
1524+ lambda x: x not in ('', None),
1525+ line.strip("\n").split()))
1526+
1527+ @property
1528+ def entries(self):
1529+ self.seek(0)
1530+ for line in self.readlines():
1531+ try:
1532+ if not line.startswith("#"):
1533+ yield self._hydrate_entry(line)
1534+ except ValueError:
1535+ pass
1536+
1537+ def get_entry_by_attr(self, attr, value):
1538+ for entry in self.entries:
1539+ e_attr = getattr(entry, attr)
1540+ if e_attr == value:
1541+ return entry
1542+ return None
1543+
1544+ def add_entry(self, entry):
1545+ if self.get_entry_by_attr('device', entry.device):
1546+ return False
1547+
1548+ self.write(str(entry) + '\n')
1549+ self.truncate()
1550+ return entry
1551+
1552+ def remove_entry(self, entry):
1553+ self.seek(0)
1554+
1555+ lines = self.readlines()
1556+
1557+ found = False
1558+ for index, line in enumerate(lines):
1559+ if not line.startswith("#"):
1560+ if self._hydrate_entry(line) == entry:
1561+ found = True
1562+ break
1563+
1564+ if not found:
1565+ return False
1566+
1567+ lines.remove(line)
1568+
1569+ self.seek(0)
1570+ self.write(''.join(lines))
1571+ self.truncate()
1572+ return True
1573+
1574+ @classmethod
1575+ def remove_by_mountpoint(cls, mountpoint, path=None):
1576+ fstab = cls(path=path)
1577+ entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
1578+ if entry:
1579+ return fstab.remove_entry(entry)
1580+ return False
1581+
1582+ @classmethod
1583+ def add(cls, device, mountpoint, filesystem, options=None, path=None):
1584+ return cls(path=path).add_entry(Fstab.Entry(device,
1585+ mountpoint, filesystem,
1586+ options=options))
1587
1588=== renamed file 'hooks/charmhelpers/core/fstab.py' => 'hooks/charmhelpers/core/fstab.py.moved'
1589=== modified file 'hooks/charmhelpers/core/hookenv.py'
1590--- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:38:09 +0000
1591+++ hooks/charmhelpers/core/hookenv.py 2014-09-16 09:08:32 +0000
1592@@ -25,7 +25,7 @@
1593 def cached(func):
1594 """Cache return values for multiple executions of func + args
1595
1596- For example:
1597+ For example::
1598
1599 @cached
1600 def unit_get(attribute):
1601@@ -285,8 +285,9 @@
1602 raise
1603
1604
1605-def relation_set(relation_id=None, relation_settings={}, **kwargs):
1606+def relation_set(relation_id=None, relation_settings=None, **kwargs):
1607 """Set relation information for the current unit"""
1608+ relation_settings = relation_settings if relation_settings else {}
1609 relation_cmd_line = ['relation-set']
1610 if relation_id is not None:
1611 relation_cmd_line.extend(('-r', relation_id))
1612@@ -445,18 +446,19 @@
1613 class Hooks(object):
1614 """A convenient handler for hook functions.
1615
1616- Example:
1617+ Example::
1618+
1619 hooks = Hooks()
1620
1621 # register a hook, taking its name from the function name
1622 @hooks.hook()
1623 def install():
1624- ...
1625+ pass # your code here
1626
1627 # register a hook, providing a custom hook name
1628 @hooks.hook("config-changed")
1629 def config_changed():
1630- ...
1631+ pass # your code here
1632
1633 if __name__ == "__main__":
1634 # execute a hook based on the name the program is called by
1635
1636=== modified file 'hooks/charmhelpers/core/host.py'
1637--- hooks/charmhelpers/core/host.py 2014-08-27 07:14:03 +0000
1638+++ hooks/charmhelpers/core/host.py 2014-09-16 09:08:32 +0000
1639@@ -12,7 +12,8 @@
1640 import string
1641 import subprocess
1642 import hashlib
1643-import apt_pkg
1644+import shutil
1645+from contextlib import contextmanager
1646
1647 from collections import OrderedDict
1648
1649@@ -53,7 +54,7 @@
1650 def service_running(service):
1651 """Determine whether a system service is running"""
1652 try:
1653- output = subprocess.check_output(['service', service, 'status'])
1654+ output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
1655 except subprocess.CalledProcessError:
1656 return False
1657 else:
1658@@ -63,6 +64,16 @@
1659 return False
1660
1661
1662+def service_available(service_name):
1663+ """Determine whether a system service is available"""
1664+ try:
1665+ subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
1666+ except subprocess.CalledProcessError:
1667+ return False
1668+ else:
1669+ return True
1670+
1671+
1672 def adduser(username, password=None, shell='/bin/bash', system_user=False):
1673 """Add a user to the system"""
1674 try:
1675@@ -212,13 +223,13 @@
1676 def restart_on_change(restart_map, stopstart=False):
1677 """Restart services based on configuration files changing
1678
1679- This function is used a decorator, for example
1680+ This function is used a decorator, for example::
1681
1682 @restart_on_change({
1683 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
1684 })
1685 def ceph_client_changed():
1686- ...
1687+ pass # your code here
1688
1689 In this example, the cinder-api and cinder-volume services
1690 would be restarted if /etc/ceph/ceph.conf is changed by the
1691@@ -314,12 +325,40 @@
1692
1693 def cmp_pkgrevno(package, revno, pkgcache=None):
1694 '''Compare supplied revno with the revno of the installed package
1695- 1 => Installed revno is greater than supplied arg
1696- 0 => Installed revno is the same as supplied arg
1697- -1 => Installed revno is less than supplied arg
1698+
1699+ * 1 => Installed revno is greater than supplied arg
1700+ * 0 => Installed revno is the same as supplied arg
1701+ * -1 => Installed revno is less than supplied arg
1702+
1703 '''
1704- from charmhelpers.fetch import apt_cache
1705+<<<<<<< TREE
1706+ from charmhelpers.fetch import apt_cache
1707+=======
1708+ import apt_pkg
1709+ from charmhelpers.fetch import apt_cache
1710+>>>>>>> MERGE-SOURCE
1711 if not pkgcache:
1712 pkgcache = apt_cache()
1713 pkg = pkgcache[package]
1714 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
1715+
1716+
1717+@contextmanager
1718+def chdir(d):
1719+ cur = os.getcwd()
1720+ try:
1721+ yield os.chdir(d)
1722+ finally:
1723+ os.chdir(cur)
1724+
1725+
1726+def chownr(path, owner, group):
1727+ uid = pwd.getpwnam(owner).pw_uid
1728+ gid = grp.getgrnam(group).gr_gid
1729+
1730+ for root, dirs, files in os.walk(path):
1731+ for name in dirs + files:
1732+ full = os.path.join(root, name)
1733+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
1734+ if not broken_symlink:
1735+ os.chown(full, uid, gid)
1736
1737=== added directory 'hooks/charmhelpers/core/services'
1738=== added file 'hooks/charmhelpers/core/services/__init__.py'
1739--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
1740+++ hooks/charmhelpers/core/services/__init__.py 2014-09-16 09:08:32 +0000
1741@@ -0,0 +1,2 @@
1742+from .base import *
1743+from .helpers import *
1744
1745=== added file 'hooks/charmhelpers/core/services/base.py'
1746--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
1747+++ hooks/charmhelpers/core/services/base.py 2014-09-16 09:08:32 +0000
1748@@ -0,0 +1,310 @@
1749+import os
1750+import re
1751+import json
1752+from collections import Iterable
1753+
1754+from charmhelpers.core import host
1755+from charmhelpers.core import hookenv
1756+
1757+
1758+__all__ = ['ServiceManager', 'ManagerCallback',
1759+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
1760+ 'service_restart', 'service_stop']
1761+
1762+
1763+class ServiceManager(object):
1764+ def __init__(self, services=None):
1765+ """
1766+ Register a list of services, given their definitions.
1767+
1768+ Service definitions are dicts in the following formats (all keys except
1769+ 'service' are optional)::
1770+
1771+ {
1772+ "service": <service name>,
1773+ "required_data": <list of required data contexts>,
1774+ "provided_data": <list of provided data contexts>,
1775+ "data_ready": <one or more callbacks>,
1776+ "data_lost": <one or more callbacks>,
1777+ "start": <one or more callbacks>,
1778+ "stop": <one or more callbacks>,
1779+ "ports": <list of ports to manage>,
1780+ }
1781+
1782+ The 'required_data' list should contain dicts of required data (or
1783+ dependency managers that act like dicts and know how to collect the data).
1784+ Only when all items in the 'required_data' list are populated are the list
1785+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
1786+ information.
1787+
1788+ The 'provided_data' list should contain relation data providers, most likely
1789+ a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
1790+ that will indicate a set of data to set on a given relation.
1791+
1792+ The 'data_ready' value should be either a single callback, or a list of
1793+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
1794+ Each callback will be called with the service name as the only parameter.
1795+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
1796+ are fired.
1797+
1798+ The 'data_lost' value should be either a single callback, or a list of
1799+ callbacks, to be called when a 'required_data' item no longer passes
1800+ `is_ready()`. Each callback will be called with the service name as the
1801+ only parameter. After all of the 'data_lost' callbacks are called,
1802+ the 'stop' callbacks are fired.
1803+
1804+ The 'start' value should be either a single callback, or a list of
1805+ callbacks, to be called when starting the service, after the 'data_ready'
1806+ callbacks are complete. Each callback will be called with the service
1807+ name as the only parameter. This defaults to
1808+ `[host.service_start, services.open_ports]`.
1809+
1810+ The 'stop' value should be either a single callback, or a list of
1811+ callbacks, to be called when stopping the service. If the service is
1812+ being stopped because it no longer has all of its 'required_data', this
1813+ will be called after all of the 'data_lost' callbacks are complete.
1814+ Each callback will be called with the service name as the only parameter.
1815+ This defaults to `[services.close_ports, host.service_stop]`.
1816+
1817+ The 'ports' value should be a list of ports to manage. The default
1818+ 'start' handler will open the ports after the service is started,
1819+ and the default 'stop' handler will close the ports prior to stopping
1820+ the service.
1821+
1822+
1823+ Examples:
1824+
1825+ The following registers an Upstart service called bingod that depends on
1826+ a mongodb relation and which runs a custom `db_migrate` function prior to
1827+ restarting the service, and a Runit service called spadesd::
1828+
1829+ manager = services.ServiceManager([
1830+ {
1831+ 'service': 'bingod',
1832+ 'ports': [80, 443],
1833+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
1834+ 'data_ready': [
1835+ services.template(source='bingod.conf'),
1836+ services.template(source='bingod.ini',
1837+ target='/etc/bingod.ini',
1838+ owner='bingo', perms=0400),
1839+ ],
1840+ },
1841+ {
1842+ 'service': 'spadesd',
1843+ 'data_ready': services.template(source='spadesd_run.j2',
1844+ target='/etc/sv/spadesd/run',
1845+ perms=0555),
1846+ 'start': runit_start,
1847+ 'stop': runit_stop,
1848+ },
1849+ ])
1850+ manager.manage()
1851+ """
1852+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
1853+ self._ready = None
1854+ self.services = {}
1855+ for service in services or []:
1856+ service_name = service['service']
1857+ self.services[service_name] = service
1858+
1859+ def manage(self):
1860+ """
1861+ Handle the current hook by doing The Right Thing with the registered services.
1862+ """
1863+ hook_name = hookenv.hook_name()
1864+ if hook_name == 'stop':
1865+ self.stop_services()
1866+ else:
1867+ self.provide_data()
1868+ self.reconfigure_services()
1869+
1870+ def provide_data(self):
1871+ """
1872+ Set the relation data for each provider in the ``provided_data`` list.
1873+
1874+ A provider must have a `name` attribute, which indicates which relation
1875+ to set data on, and a `provide_data()` method, which returns a dict of
1876+ data to set.
1877+ """
1878+ hook_name = hookenv.hook_name()
1879+ for service in self.services.values():
1880+ for provider in service.get('provided_data', []):
1881+ if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
1882+ data = provider.provide_data()
1883+ _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
1884+ if _ready:
1885+ hookenv.relation_set(None, data)
1886+
1887+ def reconfigure_services(self, *service_names):
1888+ """
1889+ Update all files for one or more registered services, and,
1890+ if ready, optionally restart them.
1891+
1892+ If no service names are given, reconfigures all registered services.
1893+ """
1894+ for service_name in service_names or self.services.keys():
1895+ if self.is_ready(service_name):
1896+ self.fire_event('data_ready', service_name)
1897+ self.fire_event('start', service_name, default=[
1898+ service_restart,
1899+ manage_ports])
1900+ self.save_ready(service_name)
1901+ else:
1902+ if self.was_ready(service_name):
1903+ self.fire_event('data_lost', service_name)
1904+ self.fire_event('stop', service_name, default=[
1905+ manage_ports,
1906+ service_stop])
1907+ self.save_lost(service_name)
1908+
1909+ def stop_services(self, *service_names):
1910+ """
1911+ Stop one or more registered services, by name.
1912+
1913+ If no service names are given, stops all registered services.
1914+ """
1915+ for service_name in service_names or self.services.keys():
1916+ self.fire_event('stop', service_name, default=[
1917+ manage_ports,
1918+ service_stop])
1919+
1920+ def get_service(self, service_name):
1921+ """
1922+ Given the name of a registered service, return its service definition.
1923+ """
1924+ service = self.services.get(service_name)
1925+ if not service:
1926+ raise KeyError('Service not registered: %s' % service_name)
1927+ return service
1928+
1929+ def fire_event(self, event_name, service_name, default=None):
1930+ """
1931+ Fire a data_ready, data_lost, start, or stop event on a given service.
1932+ """
1933+ service = self.get_service(service_name)
1934+ callbacks = service.get(event_name, default)
1935+ if not callbacks:
1936+ return
1937+ if not isinstance(callbacks, Iterable):
1938+ callbacks = [callbacks]
1939+ for callback in callbacks:
1940+ if isinstance(callback, ManagerCallback):
1941+ callback(self, service_name, event_name)
1942+ else:
1943+ callback(service_name)
1944+
1945+ def is_ready(self, service_name):
1946+ """
1947+ Determine if a registered service is ready, by checking its 'required_data'.
1948+
1949+ A 'required_data' item can be any mapping type, and is considered ready
1950+ if `bool(item)` evaluates as True.
1951+ """
1952+ service = self.get_service(service_name)
1953+ reqs = service.get('required_data', [])
1954+ return all(bool(req) for req in reqs)
1955+
1956+ def _load_ready_file(self):
1957+ if self._ready is not None:
1958+ return
1959+ if os.path.exists(self._ready_file):
1960+ with open(self._ready_file) as fp:
1961+ self._ready = set(json.load(fp))
1962+ else:
1963+ self._ready = set()
1964+
1965+ def _save_ready_file(self):
1966+ if self._ready is None:
1967+ return
1968+ with open(self._ready_file, 'w') as fp:
1969+ json.dump(list(self._ready), fp)
1970+
1971+ def save_ready(self, service_name):
1972+ """
1973+ Save an indicator that the given service is now data_ready.
1974+ """
1975+ self._load_ready_file()
1976+ self._ready.add(service_name)
1977+ self._save_ready_file()
1978+
1979+ def save_lost(self, service_name):
1980+ """
1981+ Save an indicator that the given service is no longer data_ready.
1982+ """
1983+ self._load_ready_file()
1984+ self._ready.discard(service_name)
1985+ self._save_ready_file()
1986+
1987+ def was_ready(self, service_name):
1988+ """
1989+ Determine if the given service was previously data_ready.
1990+ """
1991+ self._load_ready_file()
1992+ return service_name in self._ready
1993+
1994+
1995+class ManagerCallback(object):
1996+ """
1997+ Special case of a callback that takes the `ServiceManager` instance
1998+ in addition to the service name.
1999+
2000+ Subclasses should implement `__call__` which should accept three parameters:
2001+
2002+ * `manager` The `ServiceManager` instance
2003+ * `service_name` The name of the service it's being triggered for
2004+ * `event_name` The name of the event that this callback is handling
2005+ """
2006+ def __call__(self, manager, service_name, event_name):
2007+ raise NotImplementedError()
2008+
2009+
2010+class PortManagerCallback(ManagerCallback):
2011+ """
2012+ Callback class that will open or close ports, for use as either
2013+ a start or stop action.
2014+ """
2015+ def __call__(self, manager, service_name, event_name):
2016+ service = manager.get_service(service_name)
2017+ new_ports = service.get('ports', [])
2018+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
2019+ if os.path.exists(port_file):
2020+ with open(port_file) as fp:
2021+ old_ports = fp.read().split(',')
2022+ for old_port in old_ports:
2023+ if bool(old_port):
2024+ old_port = int(old_port)
2025+ if old_port not in new_ports:
2026+ hookenv.close_port(old_port)
2027+ with open(port_file, 'w') as fp:
2028+ fp.write(','.join(str(port) for port in new_ports))
2029+ for port in new_ports:
2030+ if event_name == 'start':
2031+ hookenv.open_port(port)
2032+ elif event_name == 'stop':
2033+ hookenv.close_port(port)
2034+
2035+
2036+def service_stop(service_name):
2037+ """
2038+ Wrapper around host.service_stop to prevent spurious "unknown service"
2039+ messages in the logs.
2040+ """
2041+ if host.service_running(service_name):
2042+ host.service_stop(service_name)
2043+
2044+
2045+def service_restart(service_name):
2046+ """
2047+ Wrapper around host.service_restart to prevent spurious "unknown service"
2048+ messages in the logs.
2049+ """
2050+ if host.service_available(service_name):
2051+ if host.service_running(service_name):
2052+ host.service_restart(service_name)
2053+ else:
2054+ host.service_start(service_name)
2055+
2056+
2057+# Convenience aliases
2058+open_ports = close_ports = manage_ports = PortManagerCallback()
2059
2060=== added file 'hooks/charmhelpers/core/services/helpers.py'
2061--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
2062+++ hooks/charmhelpers/core/services/helpers.py 2014-09-16 09:08:32 +0000
2063@@ -0,0 +1,125 @@
2064+from charmhelpers.core import hookenv
2065+from charmhelpers.core import templating
2066+
2067+from charmhelpers.core.services.base import ManagerCallback
2068+
2069+
2070+__all__ = ['RelationContext', 'TemplateCallback',
2071+ 'render_template', 'template']
2072+
2073+
2074+class RelationContext(dict):
2075+ """
2076+ Base class for a context generator that gets relation data from juju.
2077+
2078+ Subclasses must provide the attributes `name`, which is the name of the
2079+ interface of interest, `interface`, which is the type of the interface of
2080+ interest, and `required_keys`, which is the set of keys required for the
2081+ relation to be considered complete. The data for all interfaces matching
2082+ the `name` attribute that are complete will used to populate the dictionary
2083+ values (see `get_data`, below).
2084+
2085+ The generated context will be namespaced under the interface type, to prevent
2086+ potential naming conflicts.
2087+ """
2088+ name = None
2089+ interface = None
2090+ required_keys = []
2091+
2092+ def __init__(self, *args, **kwargs):
2093+ super(RelationContext, self).__init__(*args, **kwargs)
2094+ self.get_data()
2095+
2096+ def __bool__(self):
2097+ """
2098+ Returns True if all of the required_keys are available.
2099+ """
2100+ return self.is_ready()
2101+
2102+ __nonzero__ = __bool__
2103+
2104+ def __repr__(self):
2105+ return super(RelationContext, self).__repr__()
2106+
2107+ def is_ready(self):
2108+ """
2109+ Returns True if all of the `required_keys` are available from any units.
2110+ """
2111+ ready = len(self.get(self.name, [])) > 0
2112+ if not ready:
2113+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
2114+ return ready
2115+
2116+ def _is_ready(self, unit_data):
2117+ """
2118+ Helper method that tests a set of relation data and returns True if
2119+ all of the `required_keys` are present.
2120+ """
2121+ return set(unit_data.keys()).issuperset(set(self.required_keys))
2122+
2123+ def get_data(self):
2124+ """
2125+ Retrieve the relation data for each unit involved in a relation and,
2126+ if complete, store it in a list under `self[self.name]`. This
2127+ is automatically called when the RelationContext is instantiated.
2128+
2129+ The units are sorted lexographically first by the service ID, then by
2130+ the unit ID. Thus, if an interface has two other services, 'db:1'
2131+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
2132+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
2133+ set of data, the relation data for the units will be stored in the
2134+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
2135+
2136+ If you only care about a single unit on the relation, you can just
2137+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
2138+ support multiple units on a relation, you should iterate over the list,
2139+ like::
2140+
2141+ {% for unit in interface -%}
2142+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
2143+ {%- endfor %}
2144+
2145+ Note that since all sets of relation data from all related services and
2146+ units are in a single list, if you need to know which service or unit a
2147+ set of data came from, you'll need to extend this class to preserve
2148+ that information.
2149+ """
2150+ if not hookenv.relation_ids(self.name):
2151+ return
2152+
2153+ ns = self.setdefault(self.name, [])
2154+ for rid in sorted(hookenv.relation_ids(self.name)):
2155+ for unit in sorted(hookenv.related_units(rid)):
2156+ reldata = hookenv.relation_get(rid=rid, unit=unit)
2157+ if self._is_ready(reldata):
2158+ ns.append(reldata)
2159+
2160+ def provide_data(self):
2161+ """
2162+ Return data to be relation_set for this interface.
2163+ """
2164+ return {}
2165+
2166+
2167+class TemplateCallback(ManagerCallback):
2168+ """
2169+ Callback class that will render a template, for use as a ready action.
2170+ """
2171+ def __init__(self, source, target, owner='root', group='root', perms=0444):
2172+ self.source = source
2173+ self.target = target
2174+ self.owner = owner
2175+ self.group = group
2176+ self.perms = perms
2177+
2178+ def __call__(self, manager, service_name, event_name):
2179+ service = manager.get_service(service_name)
2180+ context = {}
2181+ for ctx in service.get('required_data', []):
2182+ context.update(ctx)
2183+ templating.render(self.source, self.target, context,
2184+ self.owner, self.group, self.perms)
2185+
2186+
2187+# Convenience aliases for templates
2188+render_template = template = TemplateCallback
2189
2190=== added file 'hooks/charmhelpers/core/templating.py'
2191--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
2192+++ hooks/charmhelpers/core/templating.py 2014-09-16 09:08:32 +0000
2193@@ -0,0 +1,51 @@
2194+import os
2195+
2196+from charmhelpers.core import host
2197+from charmhelpers.core import hookenv
2198+
2199+
2200+def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
2201+ """
2202+ Render a template.
2203+
2204+ The `source` path, if not absolute, is relative to the `templates_dir`.
2205+
2206+ The `target` path should be absolute.
2207+
2208+ The context should be a dict containing the values to be replaced in the
2209+ template.
2210+
2211+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
2212+
2213+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
2214+
2215+ Note: Using this requires python-jinja2; if it is not installed, calling
2216+ this will attempt to use charmhelpers.fetch.apt_install to install it.
2217+ """
2218+ try:
2219+ from jinja2 import FileSystemLoader, Environment, exceptions
2220+ except ImportError:
2221+ try:
2222+ from charmhelpers.fetch import apt_install
2223+ except ImportError:
2224+ hookenv.log('Could not import jinja2, and could not import '
2225+ 'charmhelpers.fetch to install it',
2226+ level=hookenv.ERROR)
2227+ raise
2228+ apt_install('python-jinja2', fatal=True)
2229+ from jinja2 import FileSystemLoader, Environment, exceptions
2230+
2231+ if templates_dir is None:
2232+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
2233+ loader = Environment(loader=FileSystemLoader(templates_dir))
2234+ try:
2235+ source = source
2236+ template = loader.get_template(source)
2237+ except exceptions.TemplateNotFound as e:
2238+ hookenv.log('Could not load template %s from %s.' %
2239+ (source, templates_dir),
2240+ level=hookenv.ERROR)
2241+ raise e
2242+ content = template.render(context)
2243+ host.mkdir(os.path.dirname(target))
2244+ host.write_file(target, content, owner, group, perms)
2245
2246=== modified file 'hooks/charmhelpers/fetch/__init__.py'
2247--- hooks/charmhelpers/fetch/__init__.py 2014-08-27 07:14:03 +0000
2248+++ hooks/charmhelpers/fetch/__init__.py 2014-09-16 09:08:32 +0000
2249@@ -1,4 +1,5 @@
2250 import importlib
2251+from tempfile import NamedTemporaryFile
2252 import time
2253 from yaml import safe_load
2254 from charmhelpers.core.host import (
2255@@ -13,7 +14,6 @@
2256 config,
2257 log,
2258 )
2259-import apt_pkg
2260 import os
2261
2262
2263@@ -56,6 +56,15 @@
2264 'icehouse/proposed': 'precise-proposed/icehouse',
2265 'precise-icehouse/proposed': 'precise-proposed/icehouse',
2266 'precise-proposed/icehouse': 'precise-proposed/icehouse',
2267+ # Juno
2268+ 'juno': 'trusty-updates/juno',
2269+ 'trusty-juno': 'trusty-updates/juno',
2270+ 'trusty-juno/updates': 'trusty-updates/juno',
2271+ 'trusty-updates/juno': 'trusty-updates/juno',
2272+ 'juno/proposed': 'trusty-proposed/juno',
2273+ 'juno/proposed': 'trusty-proposed/juno',
2274+ 'trusty-juno/proposed': 'trusty-proposed/juno',
2275+ 'trusty-proposed/juno': 'trusty-proposed/juno',
2276 }
2277
2278 # The order of this list is very important. Handlers should be listed in from
2279@@ -108,8 +117,12 @@
2280
2281 def filter_installed_packages(packages):
2282 """Returns a list of packages that require installation"""
2283+<<<<<<< TREE
2284
2285 cache = apt_cache()
2286+=======
2287+ cache = apt_cache()
2288+>>>>>>> MERGE-SOURCE
2289 _pkgs = []
2290 for package in packages:
2291 try:
2292@@ -122,15 +135,28 @@
2293 return _pkgs
2294
2295
2296-def apt_cache(in_memory=True):
2297- """Build and return an apt cache"""
2298- apt_pkg.init()
2299- if in_memory:
2300- apt_pkg.config.set("Dir::Cache::pkgcache", "")
2301- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
2302- return apt_pkg.Cache()
2303-
2304-
2305+<<<<<<< TREE
2306+def apt_cache(in_memory=True):
2307+ """Build and return an apt cache"""
2308+ apt_pkg.init()
2309+ if in_memory:
2310+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
2311+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
2312+ return apt_pkg.Cache()
2313+
2314+
2315+=======
2316+def apt_cache(in_memory=True):
2317+ """Build and return an apt cache"""
2318+ import apt_pkg
2319+ apt_pkg.init()
2320+ if in_memory:
2321+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
2322+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
2323+ return apt_pkg.Cache()
2324+
2325+
2326+>>>>>>> MERGE-SOURCE
2327 def apt_install(packages, options=None, fatal=False):
2328 """Install one or more packages"""
2329 if options is None:
2330@@ -196,6 +222,27 @@
2331
2332
2333 def add_source(source, key=None):
2334+ """Add a package source to this system.
2335+
2336+ @param source: a URL or sources.list entry, as supported by
2337+ add-apt-repository(1). Examples:
2338+ ppa:charmers/example
2339+ deb https://stub:key@private.example.com/ubuntu trusty main
2340+
2341+ In addition:
2342+ 'proposed:' may be used to enable the standard 'proposed'
2343+ pocket for the release.
2344+ 'cloud:' may be used to activate official cloud archive pockets,
2345+ such as 'cloud:icehouse'
2346+
2347+ @param key: A key to be added to the system's APT keyring and used
2348+ to verify the signatures on packages. Ideally, this should be an
2349+ ASCII format GPG public key including the block headers. A GPG key
2350+ id may also be used, but be aware that only insecure protocols are
2351+ available to retrieve the actual public key from a public keyserver
2352+ placing your Juju environment at risk. ppa and cloud archive keys
2353+ are securely added automtically, so sould not be provided.
2354+ """
2355 if source is None:
2356 log('Source is not present. Skipping')
2357 return
2358@@ -220,41 +267,63 @@
2359 release = lsb_release()['DISTRIB_CODENAME']
2360 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
2361 apt.write(PROPOSED_POCKET.format(release))
2362+ else:
2363+ raise SourceConfigError("Unknown source: {!r}".format(source))
2364+
2365 if key:
2366- subprocess.check_call(['apt-key', 'adv', '--keyserver',
2367- 'hkp://keyserver.ubuntu.com:80', '--recv',
2368- key])
2369+ if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
2370+ with NamedTemporaryFile() as key_file:
2371+ key_file.write(key)
2372+ key_file.flush()
2373+ key_file.seek(0)
2374+ subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
2375+ else:
2376+ # Note that hkp: is in no way a secure protocol. Using a
2377+ # GPG key id is pointless from a security POV unless you
2378+ # absolutely trust your network and DNS.
2379+ subprocess.check_call(['apt-key', 'adv', '--keyserver',
2380+ 'hkp://keyserver.ubuntu.com:80', '--recv',
2381+ key])
2382
2383
2384 def configure_sources(update=False,
2385 sources_var='install_sources',
2386 keys_var='install_keys'):
2387 """
2388- Configure multiple sources from charm configuration
2389+ Configure multiple sources from charm configuration.
2390+
2391+ The lists are encoded as yaml fragments in the configuration.
2392+ The frament needs to be included as a string. Sources and their
2393+ corresponding keys are of the types supported by add_source().
2394
2395 Example config:
2396- install_sources:
2397+ install_sources: |
2398 - "ppa:foo"
2399 - "http://example.com/repo precise main"
2400- install_keys:
2401+ install_keys: |
2402 - null
2403 - "a1b2c3d4"
2404
2405 Note that 'null' (a.k.a. None) should not be quoted.
2406 """
2407- sources = safe_load(config(sources_var))
2408- keys = config(keys_var)
2409- if keys is not None:
2410- keys = safe_load(keys)
2411- if isinstance(sources, basestring) and (
2412- keys is None or isinstance(keys, basestring)):
2413- add_source(sources, keys)
2414+ sources = safe_load((config(sources_var) or '').strip()) or []
2415+ keys = safe_load((config(keys_var) or '').strip()) or None
2416+
2417+ if isinstance(sources, basestring):
2418+ sources = [sources]
2419+
2420+ if keys is None:
2421+ for source in sources:
2422+ add_source(source, None)
2423 else:
2424- if not len(sources) == len(keys):
2425- msg = 'Install sources and keys lists are different lengths'
2426- raise SourceConfigError(msg)
2427- for src_num in range(len(sources)):
2428- add_source(sources[src_num], keys[src_num])
2429+ if isinstance(keys, basestring):
2430+ keys = [keys]
2431+
2432+ if len(sources) != len(keys):
2433+ raise SourceConfigError(
2434+ 'Install sources and keys lists are different lengths')
2435+ for source, key in zip(sources, keys):
2436+ add_source(source, key)
2437 if update:
2438 apt_update(fatal=True)
2439
2440
2441=== added symlink 'hooks/neutron-api-relation-broken'
2442=== target is u'nova_cc_hooks.py'
2443=== added symlink 'hooks/neutron-api-relation-changed'
2444=== target is u'nova_cc_hooks.py'
2445=== added symlink 'hooks/neutron-api-relation-departed'
2446=== target is u'nova_cc_hooks.py'
2447=== added symlink 'hooks/neutron-api-relation-joined'
2448=== target is u'nova_cc_hooks.py'
2449=== modified file 'hooks/nova_cc_context.py'
2450--- hooks/nova_cc_context.py 2014-06-17 10:01:21 +0000
2451+++ hooks/nova_cc_context.py 2014-09-16 09:08:32 +0000
2452@@ -1,7 +1,6 @@
2453-
2454 from charmhelpers.core.hookenv import (
2455 config, relation_ids, relation_set, log, ERROR,
2456- unit_get)
2457+ unit_get, related_units, relation_get)
2458
2459 from charmhelpers.fetch import apt_install, filter_installed_packages
2460 from charmhelpers.contrib.openstack import context, neutron, utils
2461@@ -14,6 +13,17 @@
2462 )
2463
2464
2465+def context_complete(ctxt):
2466+ _missing = []
2467+ for k, v in ctxt.iteritems():
2468+ if v is None or v == '':
2469+ _missing.append(k)
2470+ if _missing:
2471+ log('Missing required data: %s' % ' '.join(_missing), level='INFO')
2472+ return False
2473+ return True
2474+
2475+
2476 class ApacheSSLContext(context.ApacheSSLContext):
2477
2478 interfaces = ['https']
2479@@ -27,6 +37,26 @@
2480 return super(ApacheSSLContext, self).__call__()
2481
2482
2483+class NeutronAPIContext(context.OSContextGenerator):
2484+
2485+ def __call__(self):
2486+ log('Generating template context from neutron api relation')
2487+ ctxt = {}
2488+ for rid in relation_ids('neutron-api'):
2489+ for unit in related_units(rid):
2490+ rdata = relation_get(rid=rid, unit=unit)
2491+ ctxt = {
2492+ 'neutron_url': rdata.get('neutron-url'),
2493+ 'neutron_plugin': rdata.get('neutron-plugin'),
2494+ 'neutron_security_groups':
2495+ rdata.get('neutron-security-groups'),
2496+ 'network_manager': 'neutron',
2497+ }
2498+ if context_complete(ctxt):
2499+ return ctxt
2500+ return {}
2501+
2502+
2503 class VolumeServiceContext(context.OSContextGenerator):
2504 interfaces = []
2505
2506@@ -204,3 +234,22 @@
2507 def __init__(self):
2508 super(NeutronPostgresqlDBContext,
2509 self).__init__(config('neutron-database'))
2510+
2511+
2512+class WorkerConfigContext(context.OSContextGenerator):
2513+
2514+ def __call__(self):
2515+ import psutil
2516+ multiplier = config('worker-multiplier') or 1
2517+ ctxt = {
2518+ "workers": psutil.NUM_CPUS * multiplier
2519+ }
2520+ return ctxt
2521+
2522+
2523+class NovaConfigContext(WorkerConfigContext):
2524+ def __call__(self):
2525+ ctxt = super(NovaConfigContext, self).__call__()
2526+ ctxt['cpu_allocation_ratio'] = config('cpu-allocation-ratio')
2527+ ctxt['ram_allocation_ratio'] = config('ram-allocation-ratio')
2528+ return ctxt
2529
2530=== modified file 'hooks/nova_cc_hooks.py'
2531--- hooks/nova_cc_hooks.py 2014-08-01 11:13:57 +0000
2532+++ hooks/nova_cc_hooks.py 2014-09-16 09:08:32 +0000
2533@@ -15,20 +15,25 @@
2534 charm_dir,
2535 is_relation_made,
2536 log,
2537+ local_unit,
2538 ERROR,
2539 relation_get,
2540 relation_ids,
2541 relation_set,
2542+ related_units,
2543 open_port,
2544 unit_get,
2545 )
2546
2547 from charmhelpers.core.host import (
2548- restart_on_change
2549+ restart_on_change,
2550+ service_running,
2551+ service_stop,
2552 )
2553
2554 from charmhelpers.fetch import (
2555- apt_install, apt_update
2556+ apt_install, apt_update,
2557+ filter_installed_packages
2558 )
2559
2560 from charmhelpers.contrib.openstack.utils import (
2561@@ -41,21 +46,33 @@
2562 neutron_plugin_attribute,
2563 )
2564
2565+from nova_cc_context import (
2566+ NeutronAPIContext
2567+)
2568+
2569+from charmhelpers.contrib.peerstorage import (
2570+ peer_retrieve,
2571+ peer_echo,
2572+)
2573+
2574 from nova_cc_utils import (
2575 api_port,
2576 auth_token_config,
2577+ cmd_all_services,
2578 determine_endpoints,
2579 determine_packages,
2580 determine_ports,
2581+ disable_services,
2582 do_openstack_upgrade,
2583+ enable_services,
2584 keystone_ca_cert_b64,
2585 migrate_database,
2586 neutron_plugin,
2587 save_script_rc,
2588 ssh_compute_add,
2589 ssh_compute_remove,
2590- ssh_known_hosts_b64,
2591- ssh_authorized_keys_b64,
2592+ ssh_known_hosts_lines,
2593+ ssh_authorized_keys_lines,
2594 register_configs,
2595 restart_map,
2596 volume_service,
2597@@ -63,13 +80,19 @@
2598 NOVA_CONF,
2599 QUANTUM_CONF,
2600 NEUTRON_CONF,
2601- QUANTUM_API_PASTE,
2602- service_guard,
2603- guard_map,
2604+<<<<<<< TREE
2605+ QUANTUM_API_PASTE,
2606+ service_guard,
2607+ guard_map,
2608+=======
2609+ QUANTUM_API_PASTE,
2610+ console_attributes,
2611+ service_guard,
2612+ guard_map,
2613+>>>>>>> MERGE-SOURCE
2614 )
2615
2616 from charmhelpers.contrib.hahelpers.cluster import (
2617- canonical_url,
2618 eligible_leader,
2619 get_hacluster_config,
2620 is_leader,
2621@@ -77,6 +100,16 @@
2622
2623 from charmhelpers.payload.execd import execd_preinstall
2624
2625+from charmhelpers.contrib.openstack.ip import (
2626+ canonical_url,
2627+ PUBLIC, INTERNAL, ADMIN
2628+)
2629+
2630+from charmhelpers.contrib.network.ip import (
2631+ get_iface_for_address,
2632+ get_netmask_for_address
2633+)
2634+
2635 hooks = Hooks()
2636 CONFIGS = register_configs()
2637
2638@@ -95,6 +128,9 @@
2639 log('Installing %s to /usr/bin' % f)
2640 shutil.copy2(f, '/usr/bin')
2641 [open_port(port) for port in determine_ports()]
2642+ log('Disabling services into db relation joined')
2643+ disable_services()
2644+ cmd_all_services('stop')
2645
2646
2647 @hooks.hook('config-changed')
2648@@ -108,6 +144,13 @@
2649 save_script_rc()
2650 configure_https()
2651 CONFIGS.write_all()
2652+ if console_attributes('protocol'):
2653+ apt_update()
2654+ apt_install(console_attributes('packages'), fatal=True)
2655+ [compute_joined(rid=rid)
2656+ for rid in relation_ids('cloud-compute')]
2657+ for r_id in relation_ids('identity-service'):
2658+ identity_joined(rid=r_id)
2659
2660
2661 @hooks.hook('amqp-relation-joined')
2662@@ -126,10 +169,11 @@
2663 log('amqp relation incomplete. Peer not ready?')
2664 return
2665 CONFIGS.write(NOVA_CONF)
2666- if network_manager() == 'quantum':
2667- CONFIGS.write(QUANTUM_CONF)
2668- if network_manager() == 'neutron':
2669- CONFIGS.write(NEUTRON_CONF)
2670+ if not is_relation_made('neutron-api'):
2671+ if network_manager() == 'quantum':
2672+ CONFIGS.write(QUANTUM_CONF)
2673+ if network_manager() == 'neutron':
2674+ CONFIGS.write(NEUTRON_CONF)
2675
2676
2677 @hooks.hook('shared-db-relation-joined')
2678@@ -187,6 +231,13 @@
2679 CONFIGS.write_all()
2680
2681 if eligible_leader(CLUSTER_RES):
2682+ # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
2683+ # acl entry has been added. So, if the db supports passing a list of
2684+ # permitted units then check if we're in the list.
2685+ allowed_units = relation_get('nova_allowed_units')
2686+ if allowed_units and local_unit() not in allowed_units.split():
2687+ log('Allowed_units list provided and this unit not present')
2688+ return
2689 migrate_database()
2690 log('Triggering remote cloud-compute restarts.')
2691 [compute_joined(rid=rid, remote_restart=True)
2692@@ -237,8 +288,12 @@
2693 def identity_joined(rid=None):
2694 if not eligible_leader(CLUSTER_RES):
2695 return
2696- base_url = canonical_url(CONFIGS)
2697- relation_set(relation_id=rid, **determine_endpoints(base_url))
2698+ public_url = canonical_url(CONFIGS, PUBLIC)
2699+ internal_url = canonical_url(CONFIGS, INTERNAL)
2700+ admin_url = canonical_url(CONFIGS, ADMIN)
2701+ relation_set(relation_id=rid, **determine_endpoints(public_url,
2702+ internal_url,
2703+ admin_url))
2704
2705
2706 @hooks.hook('identity-service-relation-changed')
2707@@ -251,15 +306,17 @@
2708 return
2709 CONFIGS.write('/etc/nova/api-paste.ini')
2710 CONFIGS.write(NOVA_CONF)
2711- if network_manager() == 'quantum':
2712- CONFIGS.write(QUANTUM_API_PASTE)
2713- CONFIGS.write(QUANTUM_CONF)
2714- save_novarc()
2715- if network_manager() == 'neutron':
2716- CONFIGS.write(NEUTRON_CONF)
2717+ if not is_relation_made('neutron-api'):
2718+ if network_manager() == 'quantum':
2719+ CONFIGS.write(QUANTUM_API_PASTE)
2720+ CONFIGS.write(QUANTUM_CONF)
2721+ save_novarc()
2722+ if network_manager() == 'neutron':
2723+ CONFIGS.write(NEUTRON_CONF)
2724 [compute_joined(rid) for rid in relation_ids('cloud-compute')]
2725 [quantum_joined(rid) for rid in relation_ids('quantum-network-service')]
2726 [nova_vmware_relation_joined(rid) for rid in relation_ids('nova-vmware')]
2727+ [neutron_api_relation_joined(rid) for rid in relation_ids('neutron-api')]
2728 configure_https()
2729
2730
2731@@ -311,6 +368,33 @@
2732 out.write('export OS_REGION_NAME=%s\n' % config('region'))
2733
2734
2735+def neutron_settings():
2736+ neutron_settings = {}
2737+ if is_relation_made('neutron-api', 'neutron-plugin'):
2738+ neutron_api_info = NeutronAPIContext()()
2739+ neutron_settings.update({
2740+ # XXX: Rename these relations settings?
2741+ 'quantum_plugin': neutron_api_info['neutron_plugin'],
2742+ 'region': config('region'),
2743+ 'quantum_security_groups':
2744+ neutron_api_info['neutron_security_groups'],
2745+ 'quantum_url': neutron_api_info['neutron_url'],
2746+ })
2747+ else:
2748+ neutron_settings.update({
2749+ # XXX: Rename these relations settings?
2750+ 'quantum_plugin': neutron_plugin(),
2751+ 'region': config('region'),
2752+ 'quantum_security_groups': config('quantum-security-groups'),
2753+ 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL),
2754+ str(api_port('neutron-server'))),
2755+ })
2756+ neutron_url = urlparse(neutron_settings['quantum_url'])
2757+ neutron_settings['quantum_host'] = neutron_url.hostname
2758+ neutron_settings['quantum_port'] = neutron_url.port
2759+ return neutron_settings
2760+
2761+
2762 def keystone_compute_settings():
2763 ks_auth_config = _auth_config()
2764 rel_settings = {}
2765@@ -318,25 +402,45 @@
2766 if network_manager() in ['quantum', 'neutron']:
2767 if ks_auth_config:
2768 rel_settings.update(ks_auth_config)
2769-
2770- rel_settings.update({
2771- # XXX: Rename these relations settings?
2772- 'quantum_plugin': neutron_plugin(),
2773- 'region': config('region'),
2774- 'quantum_security_groups': config('quantum-security-groups'),
2775- 'quantum_url': (canonical_url(CONFIGS) + ':' +
2776- str(api_port('neutron-server'))),
2777- })
2778-
2779+ rel_settings.update(neutron_settings())
2780 ks_ca = keystone_ca_cert_b64()
2781 if ks_auth_config and ks_ca:
2782 rel_settings['ca_cert'] = ks_ca
2783+ return rel_settings
2784+
2785+
2786+def console_settings():
2787+ rel_settings = {}
2788+ proto = console_attributes('protocol')
2789+ if not proto:
2790+ return {}
2791+ rel_settings['console_keymap'] = config('console-keymap')
2792+ rel_settings['console_access_protocol'] = proto
2793+ if config('console-proxy-ip') == 'local':
2794+ proxy_base_addr = canonical_url(CONFIGS, PUBLIC)
2795+ else:
2796+ proxy_base_addr = "http://" + config('console-proxy-ip')
2797+ if proto == 'vnc':
2798+ protocols = ['novnc', 'xvpvnc']
2799+ else:
2800+ protocols = [proto]
2801+ for _proto in protocols:
2802+ rel_settings['console_proxy_%s_address' % (_proto)] = \
2803+ "%s:%s%s" % (proxy_base_addr,
2804+ console_attributes('proxy-port', proto=_proto),
2805+ console_attributes('proxy-page', proto=_proto))
2806+ rel_settings['console_proxy_%s_host' % (_proto)] = \
2807+ urlparse(proxy_base_addr).hostname
2808+ rel_settings['console_proxy_%s_port' % (_proto)] = \
2809+ console_attributes('proxy-port', proto=_proto)
2810
2811 return rel_settings
2812
2813
2814 @hooks.hook('cloud-compute-relation-joined')
2815 def compute_joined(rid=None, remote_restart=False):
2816+ cons_settings = console_settings()
2817+ relation_set(relation_id=rid, **cons_settings)
2818 if not eligible_leader(CLUSTER_RES):
2819 return
2820 rel_settings = {
2821@@ -346,7 +450,6 @@
2822 # this may not even be needed.
2823 'ec2_host': unit_get('private-address'),
2824 }
2825-
2826 # update relation setting if we're attempting to restart remote
2827 # services
2828 if remote_restart:
2829@@ -357,21 +460,63 @@
2830
2831
2832 @hooks.hook('cloud-compute-relation-changed')
2833-def compute_changed():
2834- migration_auth = relation_get('migration_auth_type')
2835- if migration_auth == 'ssh':
2836- key = relation_get('ssh_public_key')
2837+def compute_changed(rid=None, unit=None):
2838+ rel_settings = relation_get(rid=rid, unit=unit)
2839+ if 'migration_auth_type' not in rel_settings:
2840+ return
2841+ if rel_settings['migration_auth_type'] == 'ssh':
2842+ key = rel_settings.get('ssh_public_key')
2843 if not key:
2844 log('SSH migration set but peer did not publish key.')
2845 return
2846- ssh_compute_add(key)
2847- relation_set(known_hosts=ssh_known_hosts_b64(),
2848- authorized_keys=ssh_authorized_keys_b64())
2849- if relation_get('nova_ssh_public_key'):
2850- key = relation_get('nova_ssh_public_key')
2851- ssh_compute_add(key, user='nova')
2852- relation_set(nova_known_hosts=ssh_known_hosts_b64(user='nova'),
2853- nova_authorized_keys=ssh_authorized_keys_b64(user='nova'))
2854+ ssh_compute_add(key, rid=rid, unit=unit)
2855+ index = 0
2856+ for line in ssh_known_hosts_lines(unit=unit):
2857+ relation_set(
2858+ relation_id=rid,
2859+ relation_settings={
2860+ 'known_hosts_{}'.format(index): line})
2861+ index += 1
2862+ relation_set(relation_id=rid, known_hosts_max_index=index)
2863+ index = 0
2864+ for line in ssh_authorized_keys_lines(unit=unit):
2865+ relation_set(
2866+ relation_id=rid,
2867+ relation_settings={
2868+ 'authorized_keys_{}'.format(index): line})
2869+ index += 1
2870+ relation_set(relation_id=rid, authorized_keys_max_index=index)
2871+ if 'nova_ssh_public_key' not in rel_settings:
2872+ return
2873+ if rel_settings['nova_ssh_public_key']:
2874+ ssh_compute_add(rel_settings['nova_ssh_public_key'],
2875+ rid=rid, unit=unit, user='nova')
2876+ index = 0
2877+ for line in ssh_known_hosts_lines(unit=unit, user='nova'):
2878+ relation_set(
2879+ relation_id=rid,
2880+ relation_settings={
2881+ '{}_known_hosts_{}'.format(
2882+ 'nova',
2883+ index): line})
2884+ index += 1
2885+ relation_set(
2886+ relation_id=rid,
2887+ relation_settings={
2888+ '{}_known_hosts_max_index'.format('nova'): index})
2889+ index = 0
2890+ for line in ssh_authorized_keys_lines(unit=unit, user='nova'):
2891+ relation_set(
2892+ relation_id=rid,
2893+ relation_settings={
2894+ '{}_authorized_keys_{}'.format(
2895+ 'nova',
2896+ index): line})
2897+ index += 1
2898+ relation_set(
2899+ relation_id=rid,
2900+ relation_settings={
2901+ '{}_authorized_keys_max_index'.format('nova'): index})
2902
2903
2904 @hooks.hook('cloud-compute-relation-departed')
2905@@ -385,15 +530,7 @@
2906 if not eligible_leader(CLUSTER_RES):
2907 return
2908
2909- url = canonical_url(CONFIGS) + ':9696'
2910- # XXX: Can we rename to neutron_*?
2911- rel_settings = {
2912- 'quantum_host': urlparse(url).hostname,
2913- 'quantum_url': url,
2914- 'quantum_port': 9696,
2915- 'quantum_plugin': neutron_plugin(),
2916- 'region': config('region')
2917- }
2918+ rel_settings = neutron_settings()
2919
2920 # inform quantum about local keystone auth config
2921 ks_auth_config = _auth_config()
2922@@ -403,7 +540,6 @@
2923 ks_ca = keystone_ca_cert_b64()
2924 if ks_auth_config and ks_ca:
2925 rel_settings['ca_cert'] = ks_ca
2926-
2927 relation_set(relation_id=rid, **rel_settings)
2928
2929
2930@@ -414,21 +550,44 @@
2931 @restart_on_change(restart_map(), stopstart=True)
2932 def cluster_changed():
2933 CONFIGS.write_all()
2934+ if is_relation_made('cluster'):
2935+ peer_echo(includes='dbsync_state')
2936+ dbsync_state = peer_retrieve('dbsync_state')
2937+ if dbsync_state == 'complete':
2938+ enable_services()
2939+ cmd_all_services('start')
2940+ else:
2941+ log('Database sync not ready. Shutting down services')
2942+ disable_services()
2943+ cmd_all_services('stop')
2944
2945
2946 @hooks.hook('ha-relation-joined')
2947 def ha_joined():
2948 config = get_hacluster_config()
2949 resources = {
2950- 'res_nova_vip': 'ocf:heartbeat:IPaddr2',
2951 'res_nova_haproxy': 'lsb:haproxy',
2952 }
2953- vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
2954- (config['vip'], config['vip_cidr'], config['vip_iface'])
2955 resource_params = {
2956- 'res_nova_vip': vip_params,
2957 'res_nova_haproxy': 'op monitor interval="5s"'
2958 }
2959+ vip_group = []
2960+ for vip in config['vip'].split():
2961+ iface = get_iface_for_address(vip)
2962+ if iface is not None:
2963+ vip_key = 'res_nova_{}_vip'.format(iface)
2964+ resources[vip_key] = 'ocf:heartbeat:IPaddr2'
2965+ resource_params[vip_key] = (
2966+ 'params ip="{vip}" cidr_netmask="{netmask}"'
2967+ ' nic="{iface}"'.format(vip=vip,
2968+ iface=iface,
2969+ netmask=get_netmask_for_address(vip))
2970+ )
2971+ vip_group.append(vip_key)
2972+
2973+ if len(vip_group) >= 1:
2974+ relation_set(groups={'grp_nova_vips': ' '.join(vip_group)})
2975+
2976 init_services = {
2977 'res_nova_haproxy': 'haproxy'
2978 }
2979@@ -449,6 +608,7 @@
2980 if not clustered or clustered in [None, 'None', '']:
2981 log('ha_changed: hacluster subordinate not fully clustered.')
2982 return
2983+<<<<<<< TREE
2984
2985 CONFIGS.write(NOVA_CONF)
2986 if network_manager() == 'quantum':
2987@@ -456,6 +616,16 @@
2988 if network_manager() == 'neutron':
2989 CONFIGS.write(NEUTRON_CONF)
2990
2991+=======
2992+
2993+ CONFIGS.write(NOVA_CONF)
2994+ if not is_relation_made('neutron-api'):
2995+ if network_manager() == 'quantum':
2996+ CONFIGS.write(QUANTUM_CONF)
2997+ if network_manager() == 'neutron':
2998+ CONFIGS.write(NEUTRON_CONF)
2999+
3000+>>>>>>> MERGE-SOURCE
3001 if not is_leader(CLUSTER_RES):
3002 log('ha_changed: hacluster complete but we are not leader.')
3003 return
3004@@ -465,13 +635,23 @@
3005 identity_joined(rid=rid)
3006
3007
3008+@hooks.hook('shared-db-relation-broken',
3009+ 'pgsql-nova-db-relation-broken')
3010+@service_guard(guard_map(), CONFIGS,
3011+ active=config('service-guard'))
3012+def db_departed():
3013+ CONFIGS.write_all()
3014+ for r_id in relation_ids('cluster'):
3015+ relation_set(relation_id=r_id, dbsync_state='incomplete')
3016+ disable_services()
3017+ cmd_all_services('stop')
3018+
3019+
3020 @hooks.hook('amqp-relation-broken',
3021 'cinder-volume-service-relation-broken',
3022 'identity-service-relation-broken',
3023 'image-service-relation-broken',
3024 'nova-volume-service-relation-broken',
3025- 'shared-db-relation-broken',
3026- 'pgsql-nova-db-relation-broken',
3027 'pgsql-neutron-db-relation-broken',
3028 'quantum-network-service-relation-broken')
3029 @service_guard(guard_map(), CONFIGS,
3030@@ -509,8 +689,8 @@
3031 rel_settings.update({
3032 'quantum_plugin': neutron_plugin(),
3033 'quantum_security_groups': config('quantum-security-groups'),
3034- 'quantum_url': (canonical_url(CONFIGS) + ':' +
3035- str(api_port('neutron-server')))})
3036+ 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL),
3037+ str(api_port('neutron-server')))})
3038
3039 relation_set(relation_id=rid, **rel_settings)
3040
3041@@ -525,10 +705,55 @@
3042
3043 @hooks.hook('upgrade-charm')
3044 def upgrade_charm():
3045+ apt_install(filter_installed_packages(determine_packages()),
3046+ fatal=True)
3047 for r_id in relation_ids('amqp'):
3048 amqp_joined(relation_id=r_id)
3049 for r_id in relation_ids('identity-service'):
3050 identity_joined(rid=r_id)
3051+ for r_id in relation_ids('cloud-compute'):
3052+ for unit in related_units(r_id):
3053+ compute_changed(r_id, unit)
3054+
3055+
3056+@hooks.hook('neutron-api-relation-joined')
3057+def neutron_api_relation_joined(rid=None):
3058+ with open('/etc/init/neutron-server.override', 'wb') as out:
3059+ out.write('manual\n')
3060+ if os.path.isfile(NEUTRON_CONF):
3061+ os.rename(NEUTRON_CONF, NEUTRON_CONF + '_unused')
3062+ if service_running('neutron-server'):
3063+ service_stop('neutron-server')
3064+ for id_rid in relation_ids('identity-service'):
3065+ identity_joined(rid=id_rid)
3066+ nova_url = canonical_url(CONFIGS, INTERNAL) + ":8774/v2"
3067+ relation_set(relation_id=rid, nova_url=nova_url)
3068+
3069+
3070+@hooks.hook('neutron-api-relation-changed')
3071+@service_guard(guard_map(), CONFIGS,
3072+ active=config('service-guard'))
3073+@restart_on_change(restart_map())
3074+def neutron_api_relation_changed():
3075+ CONFIGS.write(NOVA_CONF)
3076+ for rid in relation_ids('cloud-compute'):
3077+ compute_joined(rid=rid)
3078+ for rid in relation_ids('quantum-network-service'):
3079+ quantum_joined(rid=rid)
3080+
3081+
3082+@hooks.hook('neutron-api-relation-broken')
3083+@service_guard(guard_map(), CONFIGS,
3084+ active=config('service-guard'))
3085+@restart_on_change(restart_map())
3086+def neutron_api_relation_broken():
3087+ if os.path.isfile('/etc/init/neutron-server.override'):
3088+ os.remove('/etc/init/neutron-server.override')
3089+ CONFIGS.write_all()
3090+ for rid in relation_ids('cloud-compute'):
3091+ compute_joined(rid=rid)
3092+ for rid in relation_ids('quantum-network-service'):
3093+ quantum_joined(rid=rid)
3094
3095
3096 def main():
3097
3098=== modified file 'hooks/nova_cc_utils.py'
3099--- hooks/nova_cc_utils.py 2014-07-29 15:05:01 +0000
3100+++ hooks/nova_cc_utils.py 2014-09-16 09:08:32 +0000
3101@@ -12,6 +12,8 @@
3102
3103 from charmhelpers.contrib.hahelpers.cluster import eligible_leader
3104
3105+from charmhelpers.contrib.peerstorage import peer_store
3106+
3107 from charmhelpers.contrib.openstack.utils import (
3108 configure_installation_source,
3109 get_host_ip,
3110@@ -39,17 +41,23 @@
3111 )
3112
3113 from charmhelpers.core.host import (
3114- service_start,
3115- service_stop,
3116- service_running
3117+<<<<<<< TREE
3118+ service_start,
3119+ service_stop,
3120+ service_running
3121+=======
3122+ service,
3123+ service_start,
3124+ service_stop,
3125+ service_running
3126+>>>>>>> MERGE-SOURCE
3127 )
3128
3129-
3130 import nova_cc_context
3131
3132 TEMPLATES = 'templates/'
3133
3134-CLUSTER_RES = 'res_nova_vip'
3135+CLUSTER_RES = 'grp_nova_vips'
3136
3137 # removed from original: charm-helper-sh
3138 BASE_PACKAGES = [
3139@@ -58,6 +66,7 @@
3140 'python-keystoneclient',
3141 'python-mysqldb',
3142 'python-psycopg2',
3143+ 'python-psutil',
3144 'uuid',
3145 ]
3146
3147@@ -110,7 +119,8 @@
3148 nova_cc_context.HAProxyContext(),
3149 nova_cc_context.IdentityServiceContext(),
3150 nova_cc_context.VolumeServiceContext(),
3151- nova_cc_context.NeutronCCContext()],
3152+ nova_cc_context.NeutronCCContext(),
3153+ nova_cc_context.NovaConfigContext()],
3154 }),
3155 (NOVA_API_PASTE, {
3156 'services': [s for s in BASE_SERVICES if 'api' in s],
3157@@ -150,7 +160,8 @@
3158 nova_cc_context.IdentityServiceContext(),
3159 nova_cc_context.NeutronCCContext(),
3160 nova_cc_context.HAProxyContext(),
3161- context.SyslogContext()],
3162+ context.SyslogContext(),
3163+ nova_cc_context.NovaConfigContext()],
3164 }),
3165 (NEUTRON_DEFAULT, {
3166 'services': ['neutron-server'],
3167@@ -175,6 +186,27 @@
3168
3169 NOVA_SSH_DIR = '/etc/nova/compute_ssh/'
3170
3171+CONSOLE_CONFIG = {
3172+ 'spice': {
3173+ 'packages': ['nova-spiceproxy', 'nova-consoleauth'],
3174+ 'services': ['nova-spiceproxy', 'nova-consoleauth'],
3175+ 'proxy-page': '/spice_auto.html',
3176+ 'proxy-port': 6082,
3177+ },
3178+ 'novnc': {
3179+ 'packages': ['nova-novncproxy', 'nova-consoleauth'],
3180+ 'services': ['nova-novncproxy', 'nova-consoleauth'],
3181+ 'proxy-page': '/vnc_auto.html',
3182+ 'proxy-port': 6080,
3183+ },
3184+ 'xvpvnc': {
3185+ 'packages': ['nova-xvpvncproxy', 'nova-consoleauth'],
3186+ 'services': ['nova-xvpvncproxy', 'nova-consoleauth'],
3187+ 'proxy-page': '/console',
3188+ 'proxy-port': 6081,
3189+ },
3190+}
3191+
3192
3193 def resource_map():
3194 '''
3195@@ -191,44 +223,56 @@
3196
3197 net_manager = network_manager()
3198
3199- # pop out irrelevant resources from the OrderedDict (easier than adding
3200- # them late)
3201- if net_manager != 'quantum':
3202- [resource_map.pop(k) for k in list(resource_map.iterkeys())
3203- if 'quantum' in k]
3204- if net_manager != 'neutron':
3205- [resource_map.pop(k) for k in list(resource_map.iterkeys())
3206- if 'neutron' in k]
3207-
3208 if os.path.exists('/etc/apache2/conf-available'):
3209 resource_map.pop(APACHE_CONF)
3210 else:
3211 resource_map.pop(APACHE_24_CONF)
3212
3213- # add neutron plugin requirements. nova-c-c only needs the neutron-server
3214- # associated with configs, not the plugin agent.
3215- if net_manager in ['quantum', 'neutron']:
3216- plugin = neutron_plugin()
3217- if plugin:
3218- conf = neutron_plugin_attribute(plugin, 'config', net_manager)
3219- ctxts = (neutron_plugin_attribute(plugin, 'contexts', net_manager)
3220- or [])
3221- services = neutron_plugin_attribute(plugin, 'server_services',
3222- net_manager)
3223- resource_map[conf] = {}
3224- resource_map[conf]['services'] = services
3225- resource_map[conf]['contexts'] = ctxts
3226- resource_map[conf]['contexts'].append(
3227- nova_cc_context.NeutronCCContext())
3228+ if is_relation_made('neutron-api'):
3229+ [resource_map.pop(k) for k in list(resource_map.iterkeys())
3230+ if 'quantum' in k or 'neutron' in k]
3231+ resource_map[NOVA_CONF]['contexts'].append(
3232+ nova_cc_context.NeutronAPIContext())
3233+ else:
3234+ resource_map[NOVA_CONF]['contexts'].append(
3235+ nova_cc_context.NeutronCCContext())
3236+ # pop out irrelevant resources from the OrderedDict (easier than adding
3237+ # them late)
3238+ if net_manager != 'quantum':
3239+ [resource_map.pop(k) for k in list(resource_map.iterkeys())
3240+ if 'quantum' in k]
3241+ if net_manager != 'neutron':
3242+ [resource_map.pop(k) for k in list(resource_map.iterkeys())
3243+ if 'neutron' in k]
3244+ # add neutron plugin requirements. nova-c-c only needs the
3245+ # neutron-server associated with configs, not the plugin agent.
3246+ if net_manager in ['quantum', 'neutron']:
3247+ plugin = neutron_plugin()
3248+ if plugin:
3249+ conf = neutron_plugin_attribute(plugin, 'config', net_manager)
3250+ ctxts = (neutron_plugin_attribute(plugin, 'contexts',
3251+ net_manager)
3252+ or [])
3253+ services = neutron_plugin_attribute(plugin, 'server_services',
3254+ net_manager)
3255+ resource_map[conf] = {}
3256+ resource_map[conf]['services'] = services
3257+ resource_map[conf]['contexts'] = ctxts
3258+ resource_map[conf]['contexts'].append(
3259+ nova_cc_context.NeutronCCContext())
3260
3261- # update for postgres
3262- resource_map[conf]['contexts'].append(
3263- nova_cc_context.NeutronPostgresqlDBContext())
3264+ # update for postgres
3265+ resource_map[conf]['contexts'].append(
3266+ nova_cc_context.NeutronPostgresqlDBContext())
3267
3268 # nova-conductor for releases >= G.
3269 if os_release('nova-common') not in ['essex', 'folsom']:
3270 resource_map['/etc/nova/nova.conf']['services'] += ['nova-conductor']
3271
3272+ if console_attributes('services'):
3273+ resource_map['/etc/nova/nova.conf']['services'] += \
3274+ console_attributes('services')
3275+
3276 # also manage any configs that are being updated by subordinates.
3277 vmware_ctxt = context.SubordinateConfigContext(interface='nova-vmware',
3278 service='nova',
3279@@ -238,6 +282,7 @@
3280 for s in vmware_ctxt['services']:
3281 if s not in resource_map[NOVA_CONF]['services']:
3282 resource_map[NOVA_CONF]['services'].append(s)
3283+
3284 return resource_map
3285
3286
3287@@ -268,9 +313,9 @@
3288 '''Assemble a list of API ports for services we are managing'''
3289 ports = []
3290 for services in restart_map().values():
3291- for service in services:
3292+ for svc in services:
3293 try:
3294- ports.append(API_PORTS[service])
3295+ ports.append(API_PORTS[svc])
3296 except KeyError:
3297 pass
3298 return list(set(ports))
3299@@ -280,6 +325,27 @@
3300 return API_PORTS[service]
3301
3302
3303+def console_attributes(attr, proto=None):
3304+ '''Leave proto unset to query attributes of the protocal specified at
3305+ runtime'''
3306+ if proto:
3307+ console_proto = proto
3308+ else:
3309+ console_proto = config('console-access-protocol')
3310+ if attr == 'protocol':
3311+ return console_proto
3312+ # 'vnc' is a virtual type made up of novnc and xvpvnc
3313+ if console_proto == 'vnc':
3314+ if attr in ['packages', 'services']:
3315+ return list(set(CONSOLE_CONFIG['novnc'][attr] +
3316+ CONSOLE_CONFIG['xvpvnc'][attr]))
3317+ else:
3318+ return None
3319+ if console_proto in CONSOLE_CONFIG:
3320+ return CONSOLE_CONFIG[console_proto][attr]
3321+ return None
3322+
3323+
3324 def determine_packages():
3325 # currently all packages match service names
3326 packages = [] + BASE_PACKAGES
3327@@ -289,6 +355,8 @@
3328 pkgs = neutron_plugin_attribute(neutron_plugin(), 'server_packages',
3329 network_manager())
3330 packages.extend(pkgs)
3331+ if console_attributes('packages'):
3332+ packages.extend(console_attributes('packages'))
3333 return list(set(packages))
3334
3335
3336@@ -486,6 +554,12 @@
3337 log('Migrating the nova database.', level=INFO)
3338 cmd = ['nova-manage', 'db', 'sync']
3339 subprocess.check_output(cmd)
3340+ if is_relation_made('cluster'):
3341+ log('Informing peers that dbsync is complete', level=INFO)
3342+ peer_store('dbsync_state', 'complete')
3343+ log('Enabling services', level=INFO)
3344+ enable_services()
3345+ cmd_all_services('start')
3346
3347
3348 def auth_token_config(setting):
3349@@ -512,8 +586,11 @@
3350 return b64encode(_in.read())
3351
3352
3353-def ssh_directory_for_unit(user=None):
3354- remote_service = remote_unit().split('/')[0]
3355+def ssh_directory_for_unit(unit=None, user=None):
3356+ if unit:
3357+ remote_service = unit.split('/')[0]
3358+ else:
3359+ remote_service = remote_unit().split('/')[0]
3360 if user:
3361 remote_service = "{}_{}".format(remote_service, user)
3362 _dir = os.path.join(NOVA_SSH_DIR, remote_service)
3363@@ -527,29 +604,29 @@
3364 return _dir
3365
3366
3367-def known_hosts(user=None):
3368- return os.path.join(ssh_directory_for_unit(user), 'known_hosts')
3369-
3370-
3371-def authorized_keys(user=None):
3372- return os.path.join(ssh_directory_for_unit(user), 'authorized_keys')
3373-
3374-
3375-def ssh_known_host_key(host, user=None):
3376- cmd = ['ssh-keygen', '-f', known_hosts(user), '-H', '-F', host]
3377+def known_hosts(unit=None, user=None):
3378+ return os.path.join(ssh_directory_for_unit(unit, user), 'known_hosts')
3379+
3380+
3381+def authorized_keys(unit=None, user=None):
3382+ return os.path.join(ssh_directory_for_unit(unit, user), 'authorized_keys')
3383+
3384+
3385+def ssh_known_host_key(host, unit=None, user=None):
3386+ cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-H', '-F', host]
3387 try:
3388 return subprocess.check_output(cmd).strip()
3389 except subprocess.CalledProcessError:
3390 return None
3391
3392
3393-def remove_known_host(host, user=None):
3394+def remove_known_host(host, unit=None, user=None):
3395 log('Removing SSH known host entry for compute host at %s' % host)
3396- cmd = ['ssh-keygen', '-f', known_hosts(user), '-R', host]
3397+ cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-R', host]
3398 subprocess.check_call(cmd)
3399
3400
3401-def add_known_host(host, user=None):
3402+def add_known_host(host, unit=None, user=None):
3403 '''Add variations of host to a known hosts file.'''
3404 cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
3405 try:
3406@@ -558,34 +635,37 @@
3407 log('Could not obtain SSH host key from %s' % host, level=ERROR)
3408 raise e
3409
3410- current_key = ssh_known_host_key(host, user)
3411+ current_key = ssh_known_host_key(host, unit, user)
3412 if current_key:
3413 if remote_key == current_key:
3414 log('Known host key for compute host %s up to date.' % host)
3415 return
3416 else:
3417- remove_known_host(host, user)
3418+ remove_known_host(host, unit, user)
3419
3420 log('Adding SSH host key to known hosts for compute node at %s.' % host)
3421- with open(known_hosts(user), 'a') as out:
3422+ with open(known_hosts(unit, user), 'a') as out:
3423 out.write(remote_key + '\n')
3424
3425
3426-def ssh_authorized_key_exists(public_key, user=None):
3427- with open(authorized_keys(user)) as keys:
3428+def ssh_authorized_key_exists(public_key, unit=None, user=None):
3429+ with open(authorized_keys(unit, user)) as keys:
3430 return (' %s ' % public_key) in keys.read()
3431
3432
3433-def add_authorized_key(public_key, user=None):
3434- with open(authorized_keys(user), 'a') as keys:
3435+def add_authorized_key(public_key, unit=None, user=None):
3436+ with open(authorized_keys(unit, user), 'a') as keys:
3437 keys.write(public_key + '\n')
3438
3439
3440-def ssh_compute_add(public_key, user=None):
3441+def ssh_compute_add(public_key, rid=None, unit=None, user=None):
3442 # If remote compute node hands us a hostname, ensure we have a
3443 # known hosts entry for its IP, hostname and FQDN.
3444- private_address = relation_get('private-address')
3445+ private_address = relation_get(rid=rid, unit=unit,
3446+ attribute='private-address')
3447 hosts = [private_address]
3448+ if relation_get('hostname'):
3449+ hosts.append(relation_get('hostname'))
3450
3451 if not is_ip(private_address):
3452 hosts.append(get_host_ip(private_address))
3453@@ -596,31 +676,41 @@
3454 hosts.append(hn.split('.')[0])
3455
3456 for host in list(set(hosts)):
3457- if not ssh_known_host_key(host, user):
3458- add_known_host(host, user)
3459+ if not ssh_known_host_key(host, unit, user):
3460+ add_known_host(host, unit, user)
3461
3462- if not ssh_authorized_key_exists(public_key, user):
3463+ if not ssh_authorized_key_exists(public_key, unit, user):
3464 log('Saving SSH authorized key for compute host at %s.' %
3465 private_address)
3466- add_authorized_key(public_key, user)
3467-
3468-
3469-def ssh_known_hosts_b64(user=None):
3470- with open(known_hosts(user)) as hosts:
3471- return b64encode(hosts.read())
3472-
3473-
3474-def ssh_authorized_keys_b64(user=None):
3475- with open(authorized_keys(user)) as keys:
3476- return b64encode(keys.read())
3477-
3478-
3479-def ssh_compute_remove(public_key, user=None):
3480- if not (os.path.isfile(authorized_keys(user)) or
3481- os.path.isfile(known_hosts(user))):
3482+ add_authorized_key(public_key, unit, user)
3483+
3484+
3485+def ssh_known_hosts_lines(unit=None, user=None):
3486+ known_hosts_list = []
3487+
3488+ with open(known_hosts(unit, user)) as hosts:
3489+ for hosts_line in hosts:
3490+ if hosts_line.rstrip():
3491+ known_hosts_list.append(hosts_line.rstrip())
3492+ return(known_hosts_list)
3493+
3494+
3495+def ssh_authorized_keys_lines(unit=None, user=None):
3496+ authorized_keys_list = []
3497+
3498+ with open(authorized_keys(unit, user)) as keys:
3499+ for authkey_line in keys:
3500+ if authkey_line.rstrip():
3501+ authorized_keys_list.append(authkey_line.rstrip())
3502+ return(authorized_keys_list)
3503+
3504+
3505+def ssh_compute_remove(public_key, unit=None, user=None):
3506+ if not (os.path.isfile(authorized_keys(unit, user)) or
3507+ os.path.isfile(known_hosts(unit, user))):
3508 return
3509
3510- with open(authorized_keys(user)) as _keys:
3511+ with open(authorized_keys(unit, user)) as _keys:
3512 keys = [k.strip() for k in _keys.readlines()]
3513
3514 if public_key not in keys:
3515@@ -628,67 +718,101 @@
3516
3517 [keys.remove(key) for key in keys if key == public_key]
3518
3519- with open(authorized_keys(user), 'w') as _keys:
3520+ with open(authorized_keys(unit, user), 'w') as _keys:
3521 keys = '\n'.join(keys)
3522 if not keys.endswith('\n'):
3523 keys += '\n'
3524 _keys.write(keys)
3525
3526
3527-def determine_endpoints(url):
3528+def determine_endpoints(public_url, internal_url, admin_url):
3529 '''Generates a dictionary containing all relevant endpoints to be
3530 passed to keystone as relation settings.'''
3531 region = config('region')
3532 os_rel = os_release('nova-common')
3533
3534 if os_rel >= 'grizzly':
3535- nova_url = ('%s:%s/v2/$(tenant_id)s' %
3536- (url, api_port('nova-api-os-compute')))
3537+ nova_public_url = ('%s:%s/v2/$(tenant_id)s' %
3538+ (public_url, api_port('nova-api-os-compute')))
3539+ nova_internal_url = ('%s:%s/v2/$(tenant_id)s' %
3540+ (internal_url, api_port('nova-api-os-compute')))
3541+ nova_admin_url = ('%s:%s/v2/$(tenant_id)s' %
3542+ (admin_url, api_port('nova-api-os-compute')))
3543 else:
3544- nova_url = ('%s:%s/v1.1/$(tenant_id)s' %
3545- (url, api_port('nova-api-os-compute')))
3546- ec2_url = '%s:%s/services/Cloud' % (url, api_port('nova-api-ec2'))
3547- nova_volume_url = ('%s:%s/v1/$(tenant_id)s' %
3548- (url, api_port('nova-api-os-compute')))
3549- neutron_url = '%s:%s' % (url, api_port('neutron-server'))
3550- s3_url = '%s:%s' % (url, api_port('nova-objectstore'))
3551+ nova_public_url = ('%s:%s/v1.1/$(tenant_id)s' %
3552+ (public_url, api_port('nova-api-os-compute')))
3553+ nova_internal_url = ('%s:%s/v1.1/$(tenant_id)s' %
3554+ (internal_url, api_port('nova-api-os-compute')))
3555+ nova_admin_url = ('%s:%s/v1.1/$(tenant_id)s' %
3556+ (admin_url, api_port('nova-api-os-compute')))
3557+
3558+ ec2_public_url = '%s:%s/services/Cloud' % (
3559+ public_url, api_port('nova-api-ec2'))
3560+ ec2_internal_url = '%s:%s/services/Cloud' % (
3561+ internal_url, api_port('nova-api-ec2'))
3562+ ec2_admin_url = '%s:%s/services/Cloud' % (admin_url,
3563+ api_port('nova-api-ec2'))
3564+
3565+ nova_volume_public_url = ('%s:%s/v1/$(tenant_id)s' %
3566+ (public_url, api_port('nova-api-os-compute')))
3567+ nova_volume_internal_url = ('%s:%s/v1/$(tenant_id)s' %
3568+ (internal_url,
3569+ api_port('nova-api-os-compute')))
3570+ nova_volume_admin_url = ('%s:%s/v1/$(tenant_id)s' %
3571+ (admin_url, api_port('nova-api-os-compute')))
3572+
3573+ neutron_public_url = '%s:%s' % (public_url, api_port('neutron-server'))
3574+ neutron_internal_url = '%s:%s' % (internal_url, api_port('neutron-server'))
3575+ neutron_admin_url = '%s:%s' % (admin_url, api_port('neutron-server'))
3576+
3577+ s3_public_url = '%s:%s' % (public_url, api_port('nova-objectstore'))
3578+ s3_internal_url = '%s:%s' % (internal_url, api_port('nova-objectstore'))
3579+ s3_admin_url = '%s:%s' % (admin_url, api_port('nova-objectstore'))
3580
3581 # the base endpoints
3582 endpoints = {
3583 'nova_service': 'nova',
3584 'nova_region': region,
3585- 'nova_public_url': nova_url,
3586- 'nova_admin_url': nova_url,
3587- 'nova_internal_url': nova_url,
3588+ 'nova_public_url': nova_public_url,
3589+ 'nova_admin_url': nova_admin_url,
3590+ 'nova_internal_url': nova_internal_url,
3591 'ec2_service': 'ec2',
3592 'ec2_region': region,
3593- 'ec2_public_url': ec2_url,
3594- 'ec2_admin_url': ec2_url,
3595- 'ec2_internal_url': ec2_url,
3596+ 'ec2_public_url': ec2_public_url,
3597+ 'ec2_admin_url': ec2_admin_url,
3598+ 'ec2_internal_url': ec2_internal_url,
3599 's3_service': 's3',
3600 's3_region': region,
3601- 's3_public_url': s3_url,
3602- 's3_admin_url': s3_url,
3603- 's3_internal_url': s3_url,
3604+ 's3_public_url': s3_public_url,
3605+ 's3_admin_url': s3_admin_url,
3606+ 's3_internal_url': s3_internal_url,
3607 }
3608
3609 if relation_ids('nova-volume-service'):
3610 endpoints.update({
3611 'nova-volume_service': 'nova-volume',
3612 'nova-volume_region': region,
3613- 'nova-volume_public_url': nova_volume_url,
3614- 'nova-volume_admin_url': nova_volume_url,
3615- 'nova-volume_internal_url': nova_volume_url,
3616+ 'nova-volume_public_url': nova_volume_public_url,
3617+ 'nova-volume_admin_url': nova_volume_admin_url,
3618+ 'nova-volume_internal_url': nova_volume_internal_url,
3619 })
3620
3621 # XXX: Keep these relations named quantum_*??
3622- if network_manager() in ['quantum', 'neutron']:
3623+ if is_relation_made('neutron-api'):
3624+ endpoints.update({
3625+ 'quantum_service': None,
3626+ 'quantum_region': None,
3627+ 'quantum_public_url': None,
3628+ 'quantum_admin_url': None,
3629+ 'quantum_internal_url': None,
3630+ })
3631+ elif network_manager() in ['quantum', 'neutron']:
3632 endpoints.update({
3633 'quantum_service': 'quantum',
3634 'quantum_region': region,
3635- 'quantum_public_url': neutron_url,
3636- 'quantum_admin_url': neutron_url,
3637- 'quantum_internal_url': neutron_url,
3638+ 'quantum_public_url': neutron_public_url,
3639+ 'quantum_admin_url': neutron_admin_url,
3640+ 'quantum_internal_url': neutron_internal_url,
3641 })
3642
3643 return endpoints
3644@@ -698,59 +822,141 @@
3645 # quantum-plugin config setting can be safely overriden
3646 # as we only supported OVS in G/neutron
3647 return config('neutron-plugin') or config('quantum-plugin')
3648-
3649-
3650-def guard_map():
3651- '''Map of services and required interfaces that must be present before
3652- the service should be allowed to start'''
3653- gmap = {}
3654- nova_services = deepcopy(BASE_SERVICES)
3655- if os_release('nova-common') not in ['essex', 'folsom']:
3656- nova_services.append('nova-conductor')
3657-
3658- nova_interfaces = ['identity-service', 'amqp']
3659- if relation_ids('pgsql-nova-db'):
3660- nova_interfaces.append('pgsql-nova-db')
3661- else:
3662- nova_interfaces.append('shared-db')
3663-
3664- for svc in nova_services:
3665- gmap[svc] = nova_interfaces
3666-
3667- net_manager = network_manager()
3668- if net_manager in ['neutron', 'quantum'] and \
3669- not is_relation_made('neutron-api'):
3670- neutron_interfaces = ['identity-service', 'amqp']
3671- if relation_ids('pgsql-neutron-db'):
3672- neutron_interfaces.append('pgsql-neutron-db')
3673- else:
3674- neutron_interfaces.append('shared-db')
3675- if network_manager() == 'quantum':
3676- gmap['quantum-server'] = neutron_interfaces
3677- else:
3678- gmap['neutron-server'] = neutron_interfaces
3679-
3680- return gmap
3681-
3682-
3683-def service_guard(guard_map, contexts, active=False):
3684- '''Inhibit services in guard_map from running unless
3685- required interfaces are found complete in contexts.'''
3686- def wrap(f):
3687- def wrapped_f(*args):
3688- if active is True:
3689- incomplete_services = []
3690- for svc in guard_map:
3691- for interface in guard_map[svc]:
3692- if interface not in contexts.complete_contexts():
3693- incomplete_services.append(svc)
3694- f(*args)
3695- for svc in incomplete_services:
3696- if service_running(svc):
3697- log('Service {} has unfulfilled '
3698- 'interface requirements, stopping.'.format(svc))
3699- service_stop(svc)
3700- else:
3701- f(*args)
3702- return wrapped_f
3703- return wrap
3704+<<<<<<< TREE
3705+
3706+
3707+def guard_map():
3708+ '''Map of services and required interfaces that must be present before
3709+ the service should be allowed to start'''
3710+ gmap = {}
3711+ nova_services = deepcopy(BASE_SERVICES)
3712+ if os_release('nova-common') not in ['essex', 'folsom']:
3713+ nova_services.append('nova-conductor')
3714+
3715+ nova_interfaces = ['identity-service', 'amqp']
3716+ if relation_ids('pgsql-nova-db'):
3717+ nova_interfaces.append('pgsql-nova-db')
3718+ else:
3719+ nova_interfaces.append('shared-db')
3720+
3721+ for svc in nova_services:
3722+ gmap[svc] = nova_interfaces
3723+
3724+ net_manager = network_manager()
3725+ if net_manager in ['neutron', 'quantum'] and \
3726+ not is_relation_made('neutron-api'):
3727+ neutron_interfaces = ['identity-service', 'amqp']
3728+ if relation_ids('pgsql-neutron-db'):
3729+ neutron_interfaces.append('pgsql-neutron-db')
3730+ else:
3731+ neutron_interfaces.append('shared-db')
3732+ if network_manager() == 'quantum':
3733+ gmap['quantum-server'] = neutron_interfaces
3734+ else:
3735+ gmap['neutron-server'] = neutron_interfaces
3736+
3737+ return gmap
3738+
3739+
3740+def service_guard(guard_map, contexts, active=False):
3741+ '''Inhibit services in guard_map from running unless
3742+ required interfaces are found complete in contexts.'''
3743+ def wrap(f):
3744+ def wrapped_f(*args):
3745+ if active is True:
3746+ incomplete_services = []
3747+ for svc in guard_map:
3748+ for interface in guard_map[svc]:
3749+ if interface not in contexts.complete_contexts():
3750+ incomplete_services.append(svc)
3751+ f(*args)
3752+ for svc in incomplete_services:
3753+ if service_running(svc):
3754+ log('Service {} has unfulfilled '
3755+ 'interface requirements, stopping.'.format(svc))
3756+ service_stop(svc)
3757+ else:
3758+ f(*args)
3759+ return wrapped_f
3760+ return wrap
3761+=======
3762+
3763+
3764+def guard_map():
3765+ '''Map of services and required interfaces that must be present before
3766+ the service should be allowed to start'''
3767+ gmap = {}
3768+ nova_services = deepcopy(BASE_SERVICES)
3769+ if os_release('nova-common') not in ['essex', 'folsom']:
3770+ nova_services.append('nova-conductor')
3771+
3772+ nova_interfaces = ['identity-service', 'amqp']
3773+ if relation_ids('pgsql-nova-db'):
3774+ nova_interfaces.append('pgsql-nova-db')
3775+ else:
3776+ nova_interfaces.append('shared-db')
3777+
3778+ for svc in nova_services:
3779+ gmap[svc] = nova_interfaces
3780+
3781+ net_manager = network_manager()
3782+ if net_manager in ['neutron', 'quantum'] and \
3783+ not is_relation_made('neutron-api'):
3784+ neutron_interfaces = ['identity-service', 'amqp']
3785+ if relation_ids('pgsql-neutron-db'):
3786+ neutron_interfaces.append('pgsql-neutron-db')
3787+ else:
3788+ neutron_interfaces.append('shared-db')
3789+ if network_manager() == 'quantum':
3790+ gmap['quantum-server'] = neutron_interfaces
3791+ else:
3792+ gmap['neutron-server'] = neutron_interfaces
3793+
3794+ return gmap
3795+
3796+
3797+def service_guard(guard_map, contexts, active=False):
3798+ '''Inhibit services in guard_map from running unless
3799+ required interfaces are found complete in contexts.'''
3800+ def wrap(f):
3801+ def wrapped_f(*args):
3802+ if active is True:
3803+ incomplete_services = []
3804+ for svc in guard_map:
3805+ for interface in guard_map[svc]:
3806+ if interface not in contexts.complete_contexts():
3807+ incomplete_services.append(svc)
3808+ f(*args)
3809+ for svc in incomplete_services:
3810+ if service_running(svc):
3811+ log('Service {} has unfulfilled '
3812+ 'interface requirements, stopping.'.format(svc))
3813+ service_stop(svc)
3814+ else:
3815+ f(*args)
3816+ return wrapped_f
3817+ return wrap
3818+
3819+
3820+def cmd_all_services(cmd):
3821+ if cmd == 'start':
3822+ for svc in services():
3823+ if not service_running(svc):
3824+ service_start(svc)
3825+ else:
3826+ for svc in services():
3827+ service(cmd, svc)
3828+
3829+
3830+def disable_services():
3831+ for svc in services():
3832+ with open('/etc/init/{}.override'.format(svc), 'wb') as out:
3833+ out.write('exec true\n')
3834+
3835+
3836+def enable_services():
3837+ for svc in services():
3838+ override_file = '/etc/init/{}.override'.format(svc)
3839+ if os.path.isfile(override_file):
3840+ os.remove(override_file)
3841+>>>>>>> MERGE-SOURCE
3842
3843=== modified file 'metadata.yaml'
3844--- metadata.yaml 2014-03-31 11:56:09 +0000
3845+++ metadata.yaml 2014-09-16 09:08:32 +0000
3846@@ -30,6 +30,8 @@
3847 interface: nova-volume
3848 quantum-network-service:
3849 interface: quantum
3850+ neutron-api:
3851+ interface: neutron-api
3852 ha:
3853 interface: hacluster
3854 scope: container
3855
3856=== modified file 'revision'
3857--- revision 2014-04-16 08:25:14 +0000
3858+++ revision 2014-09-16 09:08:32 +0000
3859@@ -1,1 +1,1 @@
3860-315
3861+500
3862
3863=== modified file 'templates/havana/nova.conf'
3864--- templates/havana/nova.conf 2014-08-01 11:04:31 +0000
3865+++ templates/havana/nova.conf 2014-09-16 09:08:32 +0000
3866@@ -20,8 +20,17 @@
3867 enabled_apis=ec2,osapi_compute,metadata
3868 auth_strategy=keystone
3869 compute_driver=libvirt.LibvirtDriver
3870-use_syslog={{ use_syslog }}
3871-
3872+<<<<<<< TREE
3873+use_syslog={{ use_syslog }}
3874+
3875+=======
3876+osapi_compute_workers = {{ workers }}
3877+ec2_workers = {{ workers }}
3878+scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
3879+cpu_allocation_ratio = {{ cpu_allocation_ratio }}
3880+use_syslog={{ use_syslog }}
3881+
3882+>>>>>>> MERGE-SOURCE
3883 {% if keystone_ec2_url -%}
3884 keystone_ec2_url = {{ keystone_ec2_url }}
3885 {% endif -%}
3886
3887=== modified file 'templates/icehouse/neutron.conf'
3888--- templates/icehouse/neutron.conf 2014-08-01 11:04:31 +0000
3889+++ templates/icehouse/neutron.conf 2014-09-16 09:08:32 +0000
3890@@ -8,7 +8,12 @@
3891 bind_host = 0.0.0.0
3892 auth_strategy = keystone
3893 notification_driver = neutron.openstack.common.notifier.rpc_notifier
3894+<<<<<<< TREE
3895 use_syslog={{ use_syslog }}
3896+=======
3897+api_workers = {{ workers }}
3898+use_syslog = {{ use_syslog }}
3899+>>>>>>> MERGE-SOURCE
3900
3901 {% if neutron_bind_port -%}
3902 bind_port = {{ neutron_bind_port }}
3903
3904=== modified file 'templates/icehouse/nova.conf'
3905--- templates/icehouse/nova.conf 2014-08-01 11:04:31 +0000
3906+++ templates/icehouse/nova.conf 2014-09-16 09:08:32 +0000
3907@@ -1,3 +1,4 @@
3908+# icehouse
3909 ###############################################################################
3910 # [ WARNING ]
3911 # Configuration file maintained by Juju. Local changes may be overwritten.
3912@@ -20,8 +21,21 @@
3913 enabled_apis=ec2,osapi_compute,metadata
3914 auth_strategy=keystone
3915 compute_driver=libvirt.LibvirtDriver
3916-use_syslog={{ use_syslog }}
3917-
3918+<<<<<<< TREE
3919+use_syslog={{ use_syslog }}
3920+
3921+=======
3922+
3923+osapi_compute_workers = {{ workers }}
3924+ec2_workers = {{ workers }}
3925+
3926+scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
3927+cpu_allocation_ratio = {{ cpu_allocation_ratio }}
3928+ram_allocation_ratio = {{ ram_allocation_ratio }}
3929+
3930+use_syslog={{ use_syslog }}
3931+
3932+>>>>>>> MERGE-SOURCE
3933 {% if keystone_ec2_url -%}
3934 keystone_ec2_url = {{ keystone_ec2_url }}
3935 {% endif -%}
3936@@ -130,3 +144,5 @@
3937 [osapi_v3]
3938 enabled=True
3939
3940+[conductor]
3941+workers = {{ workers }}
3942
3943=== added directory 'tests'
3944=== added file 'tests/00-setup'
3945--- tests/00-setup 1970-01-01 00:00:00 +0000
3946+++ tests/00-setup 2014-09-16 09:08:32 +0000
3947@@ -0,0 +1,10 @@
3948+#!/bin/bash
3949+
3950+set -ex
3951+
3952+sudo add-apt-repository --yes ppa:juju/stable
3953+sudo apt-get update --yes
3954+sudo apt-get install --yes python-amulet
3955+sudo apt-get install --yes python-glanceclient
3956+sudo apt-get install --yes python-keystoneclient
3957+sudo apt-get install --yes python-novaclient
3958
3959=== added file 'tests/10-basic-precise-essex'
3960--- tests/10-basic-precise-essex 1970-01-01 00:00:00 +0000
3961+++ tests/10-basic-precise-essex 2014-09-16 09:08:32 +0000
3962@@ -0,0 +1,10 @@
3963+#!/usr/bin/python
3964+
3965+"""Amulet tests on a basic nova cloud controller deployment on
3966+ precise-essex."""
3967+
3968+from basic_deployment import NovaCCBasicDeployment
3969+
3970+if __name__ == '__main__':
3971+ deployment = NovaCCBasicDeployment(series='precise')
3972+ deployment.run_tests()
3973
3974=== added file 'tests/11-basic-precise-folsom'
3975--- tests/11-basic-precise-folsom 1970-01-01 00:00:00 +0000
3976+++ tests/11-basic-precise-folsom 2014-09-16 09:08:32 +0000
3977@@ -0,0 +1,18 @@
3978+#!/usr/bin/python
3979+
3980+"""Amulet tests on a basic nova cloud controller deployment on
3981+ precise-folsom."""
3982+
3983+import amulet
3984+from basic_deployment import NovaCCBasicDeployment
3985+
3986+if __name__ == '__main__':
3987+ # NOTE(coreycb): Skipping failing test until resolved. 'nova-manage db sync'
3988+ # fails in shared-db-relation-changed (only fails on folsom)
3989+ message = "Skipping failing test until resolved"
3990+ amulet.raise_status(amulet.SKIP, msg=message)
3991+
3992+ deployment = NovaCCBasicDeployment(series='precise',
3993+ openstack='cloud:precise-folsom',
3994+ source='cloud:precise-updates/folsom')
3995+ deployment.run_tests()
3996
3997=== added file 'tests/12-basic-precise-grizzly'
3998--- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000
3999+++ tests/12-basic-precise-grizzly 2014-09-16 09:08:32 +0000
4000@@ -0,0 +1,12 @@
4001+#!/usr/bin/python
4002+
4003+"""Amulet tests on a basic nova cloud controller deployment on
4004+ precise-grizzly."""
4005+
4006+from basic_deployment import NovaCCBasicDeployment
4007+
4008+if __name__ == '__main__':
4009+ deployment = NovaCCBasicDeployment(series='precise',
4010+ openstack='cloud:precise-grizzly',
4011+ source='cloud:precise-updates/grizzly')
4012+ deployment.run_tests()
4013
4014=== added file 'tests/13-basic-precise-havana'
4015--- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000
4016+++ tests/13-basic-precise-havana 2014-09-16 09:08:32 +0000
4017@@ -0,0 +1,12 @@
4018+#!/usr/bin/python
4019+
4020+"""Amulet tests on a basic nova cloud controller deployment on
4021+ precise-havana."""
4022+
4023+from basic_deployment import NovaCCBasicDeployment
4024+
4025+if __name__ == '__main__':
4026+ deployment = NovaCCBasicDeployment(series='precise',
4027+ openstack='cloud:precise-havana',
4028+ source='cloud:precise-updates/havana')
4029+ deployment.run_tests()
4030
4031=== added file 'tests/14-basic-precise-icehouse'
4032--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
4033+++ tests/14-basic-precise-icehouse 2014-09-16 09:08:32 +0000
4034@@ -0,0 +1,12 @@
4035+#!/usr/bin/python
4036+
4037+"""Amulet tests on a basic nova cloud controller deployment on
4038+ precise-icehouse."""
4039+
4040+from basic_deployment import NovaCCBasicDeployment
4041+
4042+if __name__ == '__main__':
4043+ deployment = NovaCCBasicDeployment(series='precise',
4044+ openstack='cloud:precise-icehouse',
4045+ source='cloud:precise-updates/icehouse')
4046+ deployment.run_tests()
4047
4048=== added file 'tests/15-basic-trusty-icehouse'
4049--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
4050+++ tests/15-basic-trusty-icehouse 2014-09-16 09:08:32 +0000
4051@@ -0,0 +1,10 @@
4052+#!/usr/bin/python
4053+
4054+"""Amulet tests on a basic nova cloud controller deployment on
4055+ trusty-icehouse."""
4056+
4057+from basic_deployment import NovaCCBasicDeployment
4058+
4059+if __name__ == '__main__':
4060+ deployment = NovaCCBasicDeployment(series='trusty')
4061+ deployment.run_tests()
4062
4063=== added file 'tests/README'
4064--- tests/README 1970-01-01 00:00:00 +0000
4065+++ tests/README 2014-09-16 09:08:32 +0000
4066@@ -0,0 +1,47 @@
4067+This directory provides Amulet tests that focus on verification of Nova Cloud
4068+Controller deployments.
4069+
4070+If you use a web proxy server to access the web, you'll need to set the
4071+AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
4072+
4073+The following examples demonstrate different ways that tests can be executed.
4074+All examples are run from the charm's root directory.
4075+
4076+ * To run all tests (starting with 00-setup):
4077+
4078+ make test
4079+
4080+ * To run a specific test module (or modules):
4081+
4082+ juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
4083+
4084+ * To run a specific test module (or modules), and keep the environment
4085+ deployed after a failure:
4086+
4087+ juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
4088+
4089+ * To re-run a test module against an already deployed environment (one
4090+ that was deployed by a previous call to 'juju test --set-e'):
4091+
4092+ ./tests/15-basic-trusty-icehouse
4093+
4094+For debugging and test development purposes, all code should be idempotent.
4095+In other words, the code should have the ability to be re-run without changing
4096+the results beyond the initial run. This enables editing and re-running of a
4097+test module against an already deployed environment, as described above.
4098+
4099+Manual debugging tips:
4100+
4101+ * Set the following env vars before using the OpenStack CLI as admin:
4102+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
4103+ export OS_TENANT_NAME=admin
4104+ export OS_USERNAME=admin
4105+ export OS_PASSWORD=openstack
4106+ export OS_REGION_NAME=RegionOne
4107+
4108+ * Set the following env vars before using the OpenStack CLI as demoUser:
4109+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
4110+ export OS_TENANT_NAME=demoTenant
4111+ export OS_USERNAME=demoUser
4112+ export OS_PASSWORD=password
4113+ export OS_REGION_NAME=RegionOne
4114
4115=== added file 'tests/basic_deployment.py'
4116--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
4117+++ tests/basic_deployment.py 2014-09-16 09:08:32 +0000
4118@@ -0,0 +1,520 @@
4119+#!/usr/bin/python
4120+
4121+import amulet
4122+
4123+from charmhelpers.contrib.openstack.amulet.deployment import (
4124+ OpenStackAmuletDeployment
4125+)
4126+
4127+from charmhelpers.contrib.openstack.amulet.utils import (
4128+ OpenStackAmuletUtils,
4129+ DEBUG, # flake8: noqa
4130+ ERROR
4131+)
4132+
4133+# Use DEBUG to turn on debug logging
4134+u = OpenStackAmuletUtils(ERROR)
4135+
4136+
4137+class NovaCCBasicDeployment(OpenStackAmuletDeployment):
4138+ """Amulet tests on a basic nova cloud controller deployment."""
4139+
4140+ def __init__(self, series=None, openstack=None, source=None):
4141+ """Deploy the entire test environment."""
4142+ super(NovaCCBasicDeployment, self).__init__(series, openstack, source)
4143+ self._add_services()
4144+ self._add_relations()
4145+ self._configure_services()
4146+ self._deploy()
4147+ self._initialize_tests()
4148+
4149+ def _add_services(self):
4150+ """Add the service that we're testing, including the number of units,
4151+ where nova-cloud-controller is local, and the other charms are from
4152+ the charm store."""
4153+ this_service = ('nova-cloud-controller', 1)
4154+ other_services = [('mysql', 1), ('rabbitmq-server', 1),
4155+ ('nova-compute', 2), ('keystone', 1), ('glance', 1)]
4156+ super(NovaCCBasicDeployment, self)._add_services(this_service,
4157+ other_services)
4158+
4159+ def _add_relations(self):
4160+ """Add all of the relations for the services."""
4161+ relations = {
4162+ 'nova-cloud-controller:shared-db': 'mysql:shared-db',
4163+ 'nova-cloud-controller:identity-service': 'keystone:identity-service',
4164+ 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
4165+ 'nova-cloud-controller:cloud-compute': 'nova-compute:cloud-compute',
4166+ 'nova-cloud-controller:image-service': 'glance:image-service',
4167+ 'nova-compute:image-service': 'glance:image-service',
4168+ 'nova-compute:shared-db': 'mysql:shared-db',
4169+ 'nova-compute:amqp': 'rabbitmq-server:amqp',
4170+ 'keystone:shared-db': 'mysql:shared-db',
4171+ 'glance:identity-service': 'keystone:identity-service',
4172+ 'glance:shared-db': 'mysql:shared-db',
4173+ 'glance:amqp': 'rabbitmq-server:amqp'
4174+ }
4175+ super(NovaCCBasicDeployment, self)._add_relations(relations)
4176+
4177+ def _configure_services(self):
4178+ """Configure all of the services."""
4179+ keystone_config = {'admin-password': 'openstack',
4180+ 'admin-token': 'ubuntutesting'}
4181+ configs = {'keystone': keystone_config}
4182+ super(NovaCCBasicDeployment, self)._configure_services(configs)
4183+
4184+ def _initialize_tests(self):
4185+ """Perform final initialization before tests get run."""
4186+ # Access the sentries for inspecting service units
4187+ self.mysql_sentry = self.d.sentry.unit['mysql/0']
4188+ self.keystone_sentry = self.d.sentry.unit['keystone/0']
4189+ self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
4190+ self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0']
4191+ self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
4192+ self.glance_sentry = self.d.sentry.unit['glance/0']
4193+
4194+ # Authenticate admin with keystone
4195+ self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
4196+ user='admin',
4197+ password='openstack',
4198+ tenant='admin')
4199+
4200+ # Authenticate admin with glance endpoint
4201+ self.glance = u.authenticate_glance_admin(self.keystone)
4202+
4203+ # Create a demo tenant/role/user
4204+ self.demo_tenant = 'demoTenant'
4205+ self.demo_role = 'demoRole'
4206+ self.demo_user = 'demoUser'
4207+ if not u.tenant_exists(self.keystone, self.demo_tenant):
4208+ tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
4209+ description='demo tenant',
4210+ enabled=True)
4211+ self.keystone.roles.create(name=self.demo_role)
4212+ self.keystone.users.create(name=self.demo_user,
4213+ password='password',
4214+ tenant_id=tenant.id,
4215+ email='demo@demo.com')
4216+
4217+ # Authenticate demo user with keystone
4218+ self.keystone_demo = \
4219+ u.authenticate_keystone_user(self.keystone, user=self.demo_user,
4220+ password='password',
4221+ tenant=self.demo_tenant)
4222+
4223+ # Authenticate demo user with nova-api
4224+ self.nova_demo = u.authenticate_nova_user(self.keystone,
4225+ user=self.demo_user,
4226+ password='password',
4227+ tenant=self.demo_tenant)
4228+
4229+ def test_services(self):
4230+ """Verify the expected services are running on the corresponding
4231+ service units."""
4232+ commands = {
4233+ self.mysql_sentry: ['status mysql'],
4234+ self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
4235+ self.nova_cc_sentry: ['status nova-api-ec2',
4236+ 'status nova-api-os-compute',
4237+ 'status nova-objectstore',
4238+ 'status nova-cert',
4239+ 'status nova-scheduler'],
4240+ self.nova_compute_sentry: ['status nova-compute',
4241+ 'status nova-network',
4242+ 'status nova-api'],
4243+ self.keystone_sentry: ['status keystone'],
4244+ self.glance_sentry: ['status glance-registry', 'status glance-api']
4245+ }
4246+ if self._get_openstack_release() >= self.precise_grizzly:
4247+ commands[self.nova_cc_sentry] = ['status nova-conductor']
4248+
4249+ ret = u.validate_services(commands)
4250+ if ret:
4251+ amulet.raise_status(amulet.FAIL, msg=ret)
4252+
4253+ def test_service_catalog(self):
4254+ """Verify that the service catalog endpoint data is valid."""
4255+ endpoint_vol = {'adminURL': u.valid_url,
4256+ 'region': 'RegionOne',
4257+ 'publicURL': u.valid_url,
4258+ 'internalURL': u.valid_url}
4259+ endpoint_id = {'adminURL': u.valid_url,
4260+ 'region': 'RegionOne',
4261+ 'publicURL': u.valid_url,
4262+ 'internalURL': u.valid_url}
4263+ if self._get_openstack_release() >= self.precise_folsom:
4264+ endpoint_vol['id'] = u.not_null
4265+ endpoint_id['id'] = u.not_null
4266+ expected = {'s3': [endpoint_vol], 'compute': [endpoint_vol],
4267+ 'ec2': [endpoint_vol], 'identity': [endpoint_id]}
4268+ actual = self.keystone_demo.service_catalog.get_endpoints()
4269+
4270+ ret = u.validate_svc_catalog_endpoint_data(expected, actual)
4271+ if ret:
4272+ amulet.raise_status(amulet.FAIL, msg=ret)
4273+
4274+ def test_openstack_compute_api_endpoint(self):
4275+ """Verify the openstack compute api (osapi) endpoint data."""
4276+ endpoints = self.keystone.endpoints.list()
4277+ admin_port = internal_port = public_port = '8774'
4278+ expected = {'id': u.not_null,
4279+ 'region': 'RegionOne',
4280+ 'adminurl': u.valid_url,
4281+ 'internalurl': u.valid_url,
4282+ 'publicurl': u.valid_url,
4283+ 'service_id': u.not_null}
4284+
4285+ ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
4286+ public_port, expected)
4287+ if ret:
4288+ message = 'osapi endpoint: {}'.format(ret)
4289+ amulet.raise_status(amulet.FAIL, msg=message)
4290+
4291+ def test_ec2_api_endpoint(self):
4292+ """Verify the EC2 api endpoint data."""
4293+ endpoints = self.keystone.endpoints.list()
4294+ admin_port = internal_port = public_port = '8773'
4295+ expected = {'id': u.not_null,
4296+ 'region': 'RegionOne',
4297+ 'adminurl': u.valid_url,
4298+ 'internalurl': u.valid_url,
4299+ 'publicurl': u.valid_url,
4300+ 'service_id': u.not_null}
4301+
4302+ ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
4303+ public_port, expected)
4304+ if ret:
4305+ message = 'EC2 endpoint: {}'.format(ret)
4306+ amulet.raise_status(amulet.FAIL, msg=message)
4307+
4308+ def test_s3_api_endpoint(self):
4309+ """Verify the S3 api endpoint data."""
4310+ endpoints = self.keystone.endpoints.list()
4311+ admin_port = internal_port = public_port = '3333'
4312+ expected = {'id': u.not_null,
4313+ 'region': 'RegionOne',
4314+ 'adminurl': u.valid_url,
4315+ 'internalurl': u.valid_url,
4316+ 'publicurl': u.valid_url,
4317+ 'service_id': u.not_null}
4318+
4319+ ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
4320+ public_port, expected)
4321+ if ret:
4322+ message = 'S3 endpoint: {}'.format(ret)
4323+ amulet.raise_status(amulet.FAIL, msg=message)
4324+
4325+ def test_nova_cc_shared_db_relation(self):
4326+ """Verify the nova-cc to mysql shared-db relation data"""
4327+ unit = self.nova_cc_sentry
4328+ relation = ['shared-db', 'mysql:shared-db']
4329+ expected = {
4330+ 'private-address': u.valid_ip,
4331+ 'nova_database': 'nova',
4332+ 'nova_username': 'nova',
4333+ 'nova_hostname': u.valid_ip
4334+ }
4335+
4336+ ret = u.validate_relation_data(unit, relation, expected)
4337+ if ret:
4338+ message = u.relation_error('nova-cc shared-db', ret)
4339+ amulet.raise_status(amulet.FAIL, msg=message)
4340+
4341+ def test_mysql_shared_db_relation(self):
4342+ """Verify the mysql to nova-cc shared-db relation data"""
4343+ unit = self.mysql_sentry
4344+ relation = ['shared-db', 'nova-cloud-controller:shared-db']
4345+ expected = {
4346+ 'private-address': u.valid_ip,
4347+ 'nova_password': u.not_null,
4348+ 'db_host': u.valid_ip
4349+ }
4350+
4351+ ret = u.validate_relation_data(unit, relation, expected)
4352+ if ret:
4353+ message = u.relation_error('mysql shared-db', ret)
4354+ amulet.raise_status(amulet.FAIL, msg=message)
4355+
4356+ def test_nova_cc_identity_service_relation(self):
4357+ """Verify the nova-cc to keystone identity-service relation data"""
4358+ unit = self.nova_cc_sentry
4359+ relation = ['identity-service', 'keystone:identity-service']
4360+ expected = {
4361+ 'nova_internal_url': u.valid_url,
4362+ 'nova_public_url': u.valid_url,
4363+ 's3_public_url': u.valid_url,
4364+ 's3_service': 's3',
4365+ 'ec2_admin_url': u.valid_url,
4366+ 'ec2_internal_url': u.valid_url,
4367+ 'nova_service': 'nova',
4368+ 's3_region': 'RegionOne',
4369+ 'private-address': u.valid_ip,
4370+ 'nova_region': 'RegionOne',
4371+ 'ec2_public_url': u.valid_url,
4372+ 'ec2_region': 'RegionOne',
4373+ 's3_internal_url': u.valid_url,
4374+ 's3_admin_url': u.valid_url,
4375+ 'nova_admin_url': u.valid_url,
4376+ 'ec2_service': 'ec2'
4377+ }
4378+
4379+ ret = u.validate_relation_data(unit, relation, expected)
4380+ if ret:
4381+ message = u.relation_error('nova-cc identity-service', ret)
4382+ amulet.raise_status(amulet.FAIL, msg=message)
4383+
4384+ def test_keystone_identity_service_relation(self):
4385+ """Verify the keystone to nova-cc identity-service relation data"""
4386+ unit = self.keystone_sentry
4387+ relation = ['identity-service',
4388+ 'nova-cloud-controller:identity-service']
4389+ expected = {
4390+ 'service_protocol': 'http',
4391+ 'service_tenant': 'services',
4392+ 'admin_token': 'ubuntutesting',
4393+ 'service_password': u.not_null,
4394+ 'service_port': '5000',
4395+ 'auth_port': '35357',
4396+ 'auth_protocol': 'http',
4397+ 'private-address': u.valid_ip,
4398+ 'https_keystone': 'False',
4399+ 'auth_host': u.valid_ip,
4400+ 'service_username': 's3_ec2_nova',
4401+ 'service_tenant_id': u.not_null,
4402+ 'service_host': u.valid_ip
4403+ }
4404+
4405+ ret = u.validate_relation_data(unit, relation, expected)
4406+ if ret:
4407+ message = u.relation_error('keystone identity-service', ret)
4408+ amulet.raise_status(amulet.FAIL, msg=message)
4409+
4410+ def test_nova_cc_amqp_relation(self):
4411+ """Verify the nova-cc to rabbitmq-server amqp relation data"""
4412+ unit = self.nova_cc_sentry
4413+ relation = ['amqp', 'rabbitmq-server:amqp']
4414+ expected = {
4415+ 'username': 'nova',
4416+ 'private-address': u.valid_ip,
4417+ 'vhost': 'openstack'
4418+ }
4419+
4420+ ret = u.validate_relation_data(unit, relation, expected)
4421+ if ret:
4422+ message = u.relation_error('nova-cc amqp', ret)
4423+ amulet.raise_status(amulet.FAIL, msg=message)
4424+
4425+ def test_rabbitmq_amqp_relation(self):
4426+ """Verify the rabbitmq-server to nova-cc amqp relation data"""
4427+ unit = self.rabbitmq_sentry
4428+ relation = ['amqp', 'nova-cloud-controller:amqp']
4429+ expected = {
4430+ 'private-address': u.valid_ip,
4431+ 'password': u.not_null,
4432+ 'hostname': u.valid_ip
4433+ }
4434+
4435+ ret = u.validate_relation_data(unit, relation, expected)
4436+ if ret:
4437+ message = u.relation_error('rabbitmq amqp', ret)
4438+ amulet.raise_status(amulet.FAIL, msg=message)
4439+
4440+ def test_nova_cc_cloud_compute_relation(self):
4441+ """Verify the nova-cc to nova-compute cloud-compute relation data"""
4442+ unit = self.nova_cc_sentry
4443+ relation = ['cloud-compute', 'nova-compute:cloud-compute']
4444+ expected = {
4445+ 'volume_service': 'cinder',
4446+ 'network_manager': 'flatdhcpmanager',
4447+ 'ec2_host': u.valid_ip,
4448+ 'private-address': u.valid_ip,
4449+ 'restart_trigger': u.not_null
4450+ }
4451+ if self._get_openstack_release() == self.precise_essex:
4452+ expected['volume_service'] = 'nova-volume'
4453+
4454+ ret = u.validate_relation_data(unit, relation, expected)
4455+ if ret:
4456+ message = u.relation_error('nova-cc cloud-compute', ret)
4457+ amulet.raise_status(amulet.FAIL, msg=message)
4458+
4459+ def test_nova_cloud_compute_relation(self):
4460+ """Verify the nova-compute to nova-cc cloud-compute relation data"""
4461+ unit = self.nova_compute_sentry
4462+ relation = ['cloud-compute', 'nova-cloud-controller:cloud-compute']
4463+ expected = {
4464+ 'private-address': u.valid_ip,
4465+ }
4466+
4467+ ret = u.validate_relation_data(unit, relation, expected)
4468+ if ret:
4469+ message = u.relation_error('nova-compute cloud-compute', ret)
4470+ amulet.raise_status(amulet.FAIL, msg=message)
4471+
4472+ def test_nova_cc_image_service_relation(self):
4473+ """Verify the nova-cc to glance image-service relation data"""
4474+ unit = self.nova_cc_sentry
4475+ relation = ['image-service', 'glance:image-service']
4476+ expected = {
4477+ 'private-address': u.valid_ip,
4478+ }
4479+
4480+ ret = u.validate_relation_data(unit, relation, expected)
4481+ if ret:
4482+ message = u.relation_error('nova-cc image-service', ret)
4483+ amulet.raise_status(amulet.FAIL, msg=message)
4484+
4485+ def test_glance_image_service_relation(self):
4486+ """Verify the glance to nova-cc image-service relation data"""
4487+ unit = self.glance_sentry
4488+ relation = ['image-service', 'nova-cloud-controller:image-service']
4489+ expected = {
4490+ 'private-address': u.valid_ip,
4491+ 'glance-api-server': u.valid_url
4492+ }
4493+
4494+ ret = u.validate_relation_data(unit, relation, expected)
4495+ if ret:
4496+ message = u.relation_error('glance image-service', ret)
4497+ amulet.raise_status(amulet.FAIL, msg=message)
4498+
4499+ def test_restart_on_config_change(self):
4500+ """Verify that the specified services are restarted when the config
4501+ is changed."""
4502+ # NOTE(coreycb): Skipping failing test on essex until resolved.
4503+ # config-flags don't take effect on essex.
4504+ if self._get_openstack_release() == self.precise_essex:
4505+ u.log.error("Skipping failing test until resolved")
4506+ return
4507+
4508+ services = ['nova-api-ec2', 'nova-api-os-compute', 'nova-objectstore',
4509+ 'nova-cert', 'nova-scheduler', 'nova-conductor']
4510+ self.d.configure('nova-cloud-controller',
4511+ {'config-flags': 'quota_cores=20,quota_instances=40,quota_ram=102400'})
4512+ pgrep_full = True
4513+
4514+ time = 20
4515+ conf = '/etc/nova/nova.conf'
4516+ for s in services:
4517+ if not u.service_restarted(self.nova_cc_sentry, s, conf,
4518+ pgrep_full=True, sleep_time=time):
4519+ msg = "service {} didn't restart after config change".format(s)
4520+ amulet.raise_status(amulet.FAIL, msg=msg)
4521+ time = 0
4522+
4523+ def test_nova_default_config(self):
4524+ """Verify the data in the nova config file's default section."""
4525+ # NOTE(coreycb): Currently no way to test on essex because config file
4526+ # has no section headers.
4527+ if self._get_openstack_release() == self.precise_essex:
4528+ return
4529+
4530+ unit = self.nova_cc_sentry
4531+ conf = '/etc/nova/nova.conf'
4532+ rabbitmq_relation = self.rabbitmq_sentry.relation('amqp',
4533+ 'nova-cloud-controller:amqp')
4534+ glance_relation = self.glance_sentry.relation('image-service',
4535+ 'nova-cloud-controller:image-service')
4536+ mysql_relation = self.mysql_sentry.relation('shared-db',
4537+ 'nova-cloud-controller:shared-db')
4538+ db_uri = "mysql://{}:{}@{}/{}".format('nova',
4539+ mysql_relation['nova_password'],
4540+ mysql_relation['db_host'],
4541+ 'nova')
4542+ keystone_ep = self.keystone_demo.service_catalog.url_for(\
4543+ service_type='identity',
4544+ endpoint_type='publicURL')
4545+ keystone_ec2 = "{}/ec2tokens".format(keystone_ep)
4546+
4547+ expected = {'dhcpbridge_flagfile': '/etc/nova/nova.conf',
4548+ 'dhcpbridge': '/usr/bin/nova-dhcpbridge',
4549+ 'logdir': '/var/log/nova',
4550+ 'state_path': '/var/lib/nova',
4551+ 'lock_path': '/var/lock/nova',
4552+ 'force_dhcp_release': 'True',
4553+ 'iscsi_helper': 'tgtadm',
4554+ 'libvirt_use_virtio_for_bridges': 'True',
4555+ 'connection_type': 'libvirt',
4556+ 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf',
4557+ 'verbose': 'True',
4558+ 'ec2_private_dns_show_ip': 'True',
4559+ 'api_paste_config': '/etc/nova/api-paste.ini',
4560+ 'volumes_path': '/var/lib/nova/volumes',
4561+ 'enabled_apis': 'ec2,osapi_compute,metadata',
4562+ 'auth_strategy': 'keystone',
4563+ 'compute_driver': 'libvirt.LibvirtDriver',
4564+ 'keystone_ec2_url': keystone_ec2,
4565+ 'sql_connection': db_uri,
4566+ 'rabbit_userid': 'nova',
4567+ 'rabbit_virtual_host': 'openstack',
4568+ 'rabbit_password': rabbitmq_relation['password'],
4569+ 'rabbit_host': rabbitmq_relation['hostname'],
4570+ 'glance_api_servers': glance_relation['glance-api-server'],
4571+ 'network_manager': 'nova.network.manager.FlatDHCPManager',
4572+ 's3_listen_port': '3333',
4573+ 'osapi_compute_listen_port': '8774',
4574+ 'ec2_listen_port': '8773'}
4575+
4576+ ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
4577+ if ret:
4578+ message = "nova config error: {}".format(ret)
4579+ amulet.raise_status(amulet.FAIL, msg=message)
4580+
4581+
4582+ def test_nova_keystone_authtoken_config(self):
4583+ """Verify the data in the nova config file's keystone_authtoken
4584+ section. This data only exists since icehouse."""
4585+ if self._get_openstack_release() < self.precise_icehouse:
4586+ return
4587+
4588+ unit = self.nova_cc_sentry
4589+ conf = '/etc/nova/nova.conf'
4590+ keystone_relation = self.keystone_sentry.relation('identity-service',
4591+ 'nova-cloud-controller:identity-service')
4592+ keystone_uri = "http://{}:{}/".format(keystone_relation['service_host'],
4593+ keystone_relation['service_port'])
4594+ expected = {'auth_uri': keystone_uri,
4595+ 'auth_host': keystone_relation['service_host'],
4596+ 'auth_port': keystone_relation['auth_port'],
4597+ 'auth_protocol': keystone_relation['auth_protocol'],
4598+ 'admin_tenant_name': keystone_relation['service_tenant'],
4599+ 'admin_user': keystone_relation['service_username'],
4600+ 'admin_password': keystone_relation['service_password']}
4601+
4602+ ret = u.validate_config_data(unit, conf, 'keystone_authtoken', expected)
4603+ if ret:
4604+ message = "nova config error: {}".format(ret)
4605+ amulet.raise_status(amulet.FAIL, msg=message)
4606+
4607+ def test_image_instance_create(self):
4608+ """Create an image/instance, verify they exist, and delete them."""
4609+ # NOTE(coreycb): Skipping failing test on essex until resolved. essex
4610+ # nova API calls are getting "Malformed request url (HTTP
4611+ # 400)".
4612+ if self._get_openstack_release() == self.precise_essex:
4613+ u.log.error("Skipping failing test until resolved")
4614+ return
4615+
4616+ image = u.create_cirros_image(self.glance, "cirros-image")
4617+ if not image:
4618+ amulet.raise_status(amulet.FAIL, msg="Image create failed")
4619+
4620+ instance = u.create_instance(self.nova_demo, "cirros-image", "cirros",
4621+ "m1.tiny")
4622+ if not instance:
4623+ amulet.raise_status(amulet.FAIL, msg="Instance create failed")
4624+
4625+ found = False
4626+ for instance in self.nova_demo.servers.list():
4627+ if instance.name == 'cirros':
4628+ found = True
4629+ if instance.status != 'ACTIVE':
4630+ msg = "cirros instance is not active"
4631+ amulet.raise_status(amulet.FAIL, msg=message)
4632+
4633+ if not found:
4634+ message = "nova cirros instance does not exist"
4635+ amulet.raise_status(amulet.FAIL, msg=message)
4636+
4637+ u.delete_image(self.glance, image)
4638+ u.delete_instance(self.nova_demo, instance)
4639
4640=== added directory 'tests/charmhelpers'
4641=== added file 'tests/charmhelpers/__init__.py'
4642=== added directory 'tests/charmhelpers/contrib'
4643=== added file 'tests/charmhelpers/contrib/__init__.py'
4644=== added directory 'tests/charmhelpers/contrib/amulet'
4645=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
4646=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
4647--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
4648+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-09-16 09:08:32 +0000
4649@@ -0,0 +1,71 @@
4650+import amulet
4651+
4652+import os
4653+
4654+
4655+class AmuletDeployment(object):
4656+ """Amulet deployment.
4657+
4658+ This class provides generic Amulet deployment and test runner
4659+ methods.
4660+ """
4661+
4662+ def __init__(self, series=None):
4663+ """Initialize the deployment environment."""
4664+ self.series = None
4665+
4666+ if series:
4667+ self.series = series
4668+ self.d = amulet.Deployment(series=self.series)
4669+ else:
4670+ self.d = amulet.Deployment()
4671+
4672+ def _add_services(self, this_service, other_services):
4673+ """Add services.
4674+
4675+ Add services to the deployment where this_service is the local charm
4676+ that we're focused on testing and other_services are the other
4677+ charms that come from the charm store.
4678+ """
4679+ name, units = range(2)
4680+
4681+ if this_service[name] != os.path.basename(os.getcwd()):
4682+ s = this_service[name]
4683+ msg = "The charm's root directory name needs to be {}".format(s)
4684+ amulet.raise_status(amulet.FAIL, msg=msg)
4685+
4686+ self.d.add(this_service[name], units=this_service[units])
4687+
4688+ for svc in other_services:
4689+ if self.series:
4690+ self.d.add(svc[name],
4691+ charm='cs:{}/{}'.format(self.series, svc[name]),
4692+ units=svc[units])
4693+ else:
4694+ self.d.add(svc[name], units=svc[units])
4695+
4696+ def _add_relations(self, relations):
4697+ """Add all of the relations for the services."""
4698+ for k, v in relations.iteritems():
4699+ self.d.relate(k, v)
4700+
4701+ def _configure_services(self, configs):
4702+ """Configure all of the services."""
4703+ for service, config in configs.iteritems():
4704+ self.d.configure(service, config)
4705+
4706+ def _deploy(self):
4707+ """Deploy environment and wait for all hooks to finish executing."""
4708+ try:
4709+ self.d.setup()
4710+ self.d.sentry.wait(timeout=900)
4711+ except amulet.helpers.TimeoutError:
4712+ amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
4713+ except Exception:
4714+ raise
4715+
4716+ def run_tests(self):
4717+ """Run all of the methods that are prefixed with 'test_'."""
4718+ for test in dir(self):
4719+ if test.startswith('test_'):
4720+ getattr(self, test)()
4721
4722=== added file 'tests/charmhelpers/contrib/amulet/utils.py'
4723--- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000
4724+++ tests/charmhelpers/contrib/amulet/utils.py 2014-09-16 09:08:32 +0000
4725@@ -0,0 +1,176 @@
4726+import ConfigParser
4727+import io
4728+import logging
4729+import re
4730+import sys
4731+import time
4732+
4733+
4734+class AmuletUtils(object):
4735+ """Amulet utilities.
4736+
4737+ This class provides common utility functions that are used by Amulet
4738+ tests.
4739+ """
4740+
4741+ def __init__(self, log_level=logging.ERROR):
4742+ self.log = self.get_logger(level=log_level)
4743+
4744+ def get_logger(self, name="amulet-logger", level=logging.DEBUG):
4745+ """Get a logger object that will log to stdout."""
4746+ log = logging
4747+ logger = log.getLogger(name)
4748+ fmt = log.Formatter("%(asctime)s %(funcName)s "
4749+ "%(levelname)s: %(message)s")
4750+
4751+ handler = log.StreamHandler(stream=sys.stdout)
4752+ handler.setLevel(level)
4753+ handler.setFormatter(fmt)
4754+
4755+ logger.addHandler(handler)
4756+ logger.setLevel(level)
4757+
4758+ return logger
4759+
4760+ def valid_ip(self, ip):
4761+ if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
4762+ return True
4763+ else:
4764+ return False
4765+
4766+ def valid_url(self, url):
4767+ p = re.compile(
4768+ r'^(?:http|ftp)s?://'
4769+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
4770+ r'localhost|'
4771+ r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
4772+ r'(?::\d+)?'
4773+ r'(?:/?|[/?]\S+)$',
4774+ re.IGNORECASE)
4775+ if p.match(url):
4776+ return True
4777+ else:
4778+ return False
4779+
4780+ def validate_services(self, commands):
4781+ """Validate services.
4782+
4783+ Verify the specified services are running on the corresponding
4784+ service units.
4785+ """
4786+ for k, v in commands.iteritems():
4787+ for cmd in v:
4788+ output, code = k.run(cmd)
4789+ if code != 0:
4790+ return "command `{}` returned {}".format(cmd, str(code))
4791+ return None
4792+
4793+ def _get_config(self, unit, filename):
4794+ """Get a ConfigParser object for parsing a unit's config file."""
4795+ file_contents = unit.file_contents(filename)
4796+ config = ConfigParser.ConfigParser()
4797+ config.readfp(io.StringIO(file_contents))
4798+ return config
4799+
4800+ def validate_config_data(self, sentry_unit, config_file, section,
4801+ expected):
4802+ """Validate config file data.
4803+
4804+ Verify that the specified section of the config file contains
4805+ the expected option key:value pairs.
4806+ """
4807+ config = self._get_config(sentry_unit, config_file)
4808+
4809+ if section != 'DEFAULT' and not config.has_section(section):
4810+ return "section [{}] does not exist".format(section)
4811+
4812+ for k in expected.keys():
4813+ if not config.has_option(section, k):
4814+ return "section [{}] is missing option {}".format(section, k)
4815+ if config.get(section, k) != expected[k]:
4816+ return "section [{}] {}:{} != expected {}:{}".format(
4817+ section, k, config.get(section, k), k, expected[k])
4818+ return None
4819+
4820+ def _validate_dict_data(self, expected, actual):
4821+ """Validate dictionary data.
4822+
4823+ Compare expected dictionary data vs actual dictionary data.
4824+ The values in the 'expected' dictionary can be strings, bools, ints,
4825+ longs, or can be a function that evaluate a variable and returns a
4826+ bool.
4827+ """
4828+ for k, v in expected.iteritems():
4829+ if k in actual:
4830+ if (isinstance(v, basestring) or
4831+ isinstance(v, bool) or
4832+ isinstance(v, (int, long))):
4833+ if v != actual[k]:
4834+ return "{}:{}".format(k, actual[k])
4835+ elif not v(actual[k]):
4836+ return "{}:{}".format(k, actual[k])
4837+ else:
4838+ return "key '{}' does not exist".format(k)
4839+ return None
4840+
4841+ def validate_relation_data(self, sentry_unit, relation, expected):
4842+ """Validate actual relation data based on expected relation data."""
4843+ actual = sentry_unit.relation(relation[0], relation[1])
4844+ self.log.debug('actual: {}'.format(repr(actual)))
4845+ return self._validate_dict_data(expected, actual)
4846+
4847+ def _validate_list_data(self, expected, actual):
4848+ """Compare expected list vs actual list data."""
4849+ for e in expected:
4850+ if e not in actual:
4851+ return "expected item {} not found in actual list".format(e)
4852+ return None
4853+
4854+ def not_null(self, string):
4855+ if string is not None:
4856+ return True
4857+ else:
4858+ return False
4859+
4860+ def _get_file_mtime(self, sentry_unit, filename):
4861+ """Get last modification time of file."""
4862+ return sentry_unit.file_stat(filename)['mtime']
4863+
4864+ def _get_dir_mtime(self, sentry_unit, directory):
4865+ """Get last modification time of directory."""
4866+ return sentry_unit.directory_stat(directory)['mtime']
4867+
4868+ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
4869+ """Get process' start time.
4870+
4871+ Determine start time of the process based on the last modification
4872+ time of the /proc/pid directory. If pgrep_full is True, the process
4873+ name is matched against the full command line.
4874+ """
4875+ if pgrep_full:
4876+ cmd = 'pgrep -o -f {}'.format(service)
4877+ else:
4878+ cmd = 'pgrep -o {}'.format(service)
4879+ proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
4880+ return self._get_dir_mtime(sentry_unit, proc_dir)
4881+
4882+ def service_restarted(self, sentry_unit, service, filename,
4883+ pgrep_full=False, sleep_time=20):
4884+ """Check if service was restarted.
4885+
4886+ Compare a service's start time vs a file's last modification time
4887+ (such as a config file for that service) to determine if the service
4888+ has been restarted.
4889+ """
4890+ time.sleep(sleep_time)
4891+ if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
4892+ self._get_file_mtime(sentry_unit, filename)):
4893+ return True
4894+ else:
4895+ return False
4896+
4897+ def relation_error(self, name, data):
4898+ return 'unexpected relation data in {} - {}'.format(name, data)
4899+
4900+ def endpoint_error(self, name, data):
4901+ return 'unexpected endpoint data in {} - {}'.format(name, data)
4902
4903=== added directory 'tests/charmhelpers/contrib/openstack'
4904=== added file 'tests/charmhelpers/contrib/openstack/__init__.py'
4905=== added directory 'tests/charmhelpers/contrib/openstack/amulet'
4906=== added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py'
4907=== added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
4908--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
4909+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-16 09:08:32 +0000
4910@@ -0,0 +1,61 @@
4911+from charmhelpers.contrib.amulet.deployment import (
4912+ AmuletDeployment
4913+)
4914+
4915+
4916+class OpenStackAmuletDeployment(AmuletDeployment):
4917+ """OpenStack amulet deployment.
4918+
4919+ This class inherits from AmuletDeployment and has additional support
4920+ that is specifically for use by OpenStack charms.
4921+ """
4922+
4923+ def __init__(self, series=None, openstack=None, source=None):
4924+ """Initialize the deployment environment."""
4925+ super(OpenStackAmuletDeployment, self).__init__(series)
4926+ self.openstack = openstack
4927+ self.source = source
4928+
4929+ def _add_services(self, this_service, other_services):
4930+ """Add services to the deployment and set openstack-origin."""
4931+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
4932+ other_services)
4933+ name = 0
4934+ services = other_services
4935+ services.append(this_service)
4936+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
4937+
4938+ if self.openstack:
4939+ for svc in services:
4940+ if svc[name] not in use_source:
4941+ config = {'openstack-origin': self.openstack}
4942+ self.d.configure(svc[name], config)
4943+
4944+ if self.source:
4945+ for svc in services:
4946+ if svc[name] in use_source:
4947+ config = {'source': self.source}
4948+ self.d.configure(svc[name], config)
4949+
4950+ def _configure_services(self, configs):
4951+ """Configure all of the services."""
4952+ for service, config in configs.iteritems():
4953+ self.d.configure(service, config)
4954+
4955+ def _get_openstack_release(self):
4956+ """Get openstack release.
4957+
4958+ Return an integer representing the enum value of the openstack
4959+ release.
4960+ """
4961+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
4962+ self.precise_havana, self.precise_icehouse,
4963+ self.trusty_icehouse) = range(6)
4964+ releases = {
4965+ ('precise', None): self.precise_essex,
4966+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
4967+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
4968+ ('precise', 'cloud:precise-havana'): self.precise_havana,
4969+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
4970+ ('trusty', None): self.trusty_icehouse}
4971+ return releases[(self.series, self.openstack)]
4972
4973=== added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
4974--- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
4975+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-16 09:08:32 +0000
4976@@ -0,0 +1,275 @@
4977+import logging
4978+import os
4979+import time
4980+import urllib
4981+
4982+import glanceclient.v1.client as glance_client
4983+import keystoneclient.v2_0 as keystone_client
4984+import novaclient.v1_1.client as nova_client
4985+
4986+from charmhelpers.contrib.amulet.utils import (
4987+ AmuletUtils
4988+)
4989+
4990+DEBUG = logging.DEBUG
4991+ERROR = logging.ERROR
4992+
4993+
4994+class OpenStackAmuletUtils(AmuletUtils):
4995+ """OpenStack amulet utilities.
4996+
4997+ This class inherits from AmuletUtils and has additional support
4998+ that is specifically for use by OpenStack charms.
4999+ """
5000+
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches