Merge lp:~james-page/charms/trusty/nova-compute/fixup-secgroups into lp:~openstack-charmers-archive/charms/trusty/nova-compute/trunk

Proposed by James Page
Status: Superseded
Proposed branch: lp:~james-page/charms/trusty/nova-compute/fixup-secgroups
Merge into: lp:~openstack-charmers-archive/charms/trusty/nova-compute/trunk
Diff against target: 5335 lines (+3738/-274) (has conflicts)
52 files modified
.bzrignore (+2/-0)
Makefile (+25/-1)
charm-helpers-hooks.yaml (+12/-0)
charm-helpers-tests.yaml (+5/-0)
config.yaml (+34/-2)
hooks/charmhelpers/contrib/hahelpers/apache.py (+10/-3)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+59/-17)
hooks/charmhelpers/contrib/network/ip.py (+343/-0)
hooks/charmhelpers/contrib/network/ovs/__init__.py (+6/-1)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+94/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+276/-0)
hooks/charmhelpers/contrib/openstack/context.py (+237/-59)
hooks/charmhelpers/contrib/openstack/ip.py (+79/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+18/-4)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+9/-8)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+9/-8)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/openstack/utils.py (+45/-6)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/core/fstab.py (+116/-0)
hooks/charmhelpers/core/hookenv.py (+49/-21)
hooks/charmhelpers/core/host.py (+75/-11)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+313/-0)
hooks/charmhelpers/core/services/helpers.py (+239/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+115/-32)
hooks/charmhelpers/fetch/archiveurl.py (+49/-4)
hooks/nova_compute_context.py (+48/-3)
hooks/nova_compute_hooks.py (+26/-14)
hooks/nova_compute_utils.py (+69/-20)
metadata.yaml (+3/-0)
templates/grizzly/nova.conf (+0/-4)
templates/havana/nova.conf (+29/-4)
templates/icehouse/ml2_conf.ini (+1/-1)
tests/00-setup (+10/-0)
tests/10-basic-precise-essex (+9/-0)
tests/11-basic-precise-folsom (+17/-0)
tests/12-basic-precise-grizzly (+11/-0)
tests/13-basic-precise-havana (+11/-0)
tests/14-basic-precise-icehouse (+11/-0)
tests/15-basic-trusty-icehouse (+9/-0)
tests/README (+47/-0)
tests/basic_deployment.py (+406/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+72/-0)
tests/charmhelpers/contrib/amulet/utils.py (+176/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+94/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+276/-0)
unit_tests/test_nova_compute_contexts.py (+16/-0)
unit_tests/test_nova_compute_hooks.py (+22/-5)
unit_tests/test_nova_compute_utils.py (+66/-22)
Conflict adding file .bzrignore.  Moved existing file to .bzrignore.moved.
Text conflict in Makefile
Contents conflict in charm-helpers.yaml
Text conflict in hooks/charmhelpers/contrib/openstack/context.py
Text conflict in hooks/charmhelpers/contrib/openstack/utils.py
Conflict adding file hooks/charmhelpers/core/fstab.py.  Moved existing file to hooks/charmhelpers/core/fstab.py.moved.
Text conflict in hooks/charmhelpers/core/host.py
Text conflict in hooks/charmhelpers/fetch/__init__.py
To merge this branch: bzr merge lp:~james-page/charms/trusty/nova-compute/fixup-secgroups
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+236873@code.launchpad.net

This proposal has been superseded by a proposal from 2014-10-02.

To post a comment you must log in.

Unmerged revisions

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file '.bzrignore'
2--- .bzrignore 1970-01-01 00:00:00 +0000
3+++ .bzrignore 2014-10-02 12:57:31 +0000
4@@ -0,0 +1,2 @@
5+bin
6+.coverage
7
8=== renamed file '.bzrignore' => '.bzrignore.moved'
9=== modified file 'Makefile'
10--- Makefile 2014-09-10 00:00:24 +0000
11+++ Makefile 2014-10-02 12:57:31 +0000
12@@ -2,9 +2,10 @@
13 PYTHON := /usr/bin/env python
14
15 lint:
16- @flake8 --exclude hooks/charmhelpers hooks unit_tests
17+ @flake8 --exclude hooks/charmhelpers hooks unit_tests tests
18 @charm proof
19
20+<<<<<<< TREE
21 test: .venv
22 @echo Starting tests...
23 .venv/bin/nosetests --nologcapture --with-coverage unit_tests
24@@ -18,6 +19,29 @@
25 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
26
27 publish: lint test
28+=======
29+unit_test:
30+ @echo Starting unit tests...
31+ @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
32+
33+bin/charm_helpers_sync.py:
34+ @mkdir -p bin
35+ @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
36+ > bin/charm_helpers_sync.py
37+
38+sync: bin/charm_helpers_sync.py
39+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
40+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
41+
42+test:
43+ @echo Starting Amulet tests...
44+ # coreycb note: The -v should only be temporary until Amulet sends
45+ # raise_status() messages to stderr:
46+ # https://bugs.launchpad.net/amulet/+bug/1320357
47+ @juju test -v -p AMULET_HTTP_PROXY
48+
49+publish: lint unit_test
50+>>>>>>> MERGE-SOURCE
51 bzr push lp:charms/nova-compute
52 bzr push lp:charms/trusty/nova-compute
53
54
55=== added file 'charm-helpers-hooks.yaml'
56--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
57+++ charm-helpers-hooks.yaml 2014-10-02 12:57:31 +0000
58@@ -0,0 +1,12 @@
59+branch: lp:charm-helpers
60+destination: hooks/charmhelpers
61+include:
62+ - core
63+ - fetch
64+ - contrib.openstack|inc=*
65+ - contrib.storage
66+ - contrib.hahelpers:
67+ - apache
68+ - cluster
69+ - contrib.network
70+ - payload.execd
71
72=== added file 'charm-helpers-tests.yaml'
73--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
74+++ charm-helpers-tests.yaml 2014-10-02 12:57:31 +0000
75@@ -0,0 +1,5 @@
76+branch: lp:charm-helpers
77+destination: tests/charmhelpers
78+include:
79+ - contrib.amulet
80+ - contrib.openstack.amulet
81
82=== renamed file 'charm-helpers.yaml' => 'charm-helpers.yaml.THIS'
83=== modified file 'config.yaml'
84--- config.yaml 2014-09-10 00:00:24 +0000
85+++ config.yaml 2014-10-02 12:57:31 +0000
86@@ -66,11 +66,11 @@
87 type: boolean
88 description: Configure libvirt for live migration.
89 migration-auth-type:
90- default: sasl
91 type: string
92+ default:
93 description: |
94 TCP authentication scheme for libvirt live migration. Available options
95- include sasl or none.
96+ include ssh.
97 # needed if using flatmanager
98 bridge-interface:
99 default: br100
100@@ -107,3 +107,35 @@
101 juju-myservice-0
102 If you're running multiple environments with the same services in them
103 this allows you to differentiate between them.
104+ disable-neutron-security-groups:
105+ type: boolean
106+ default:
107+ description: |
108+ Disable neutron based security groups - setting this configuration option
109+ will override any settings configured via the nova-cloud-controller charm.
110+ .
111+ BE CAREFUL - this option allows you to disable all port level security within
112+ and OpenStack cloud.
113+ # Network configuration options
114+ # by default all access is over 'private-address'
115+ os-data-network:
116+ type: string
117+ default:
118+ description: |
119+ The IP address and netmask of the OpenStack Data network (e.g.,
120+ 192.168.0.0/24)
121+ .
122+ This network will be used for tenant network traffic in overlay
123+ networks.
124+ prefer-ipv6:
125+ type: boolean
126+ default: False
127+ description: |
128+ If True enables IPv6 support. The charm will expect network interfaces
129+ to be configured with an IPv6 address. If set to False (default) IPv4
130+ is expected.
131+ .
132+ NOTE: these charms do not currently support IPv6 privacy extension. In
133+ order for this charm to function correctly, the privacy extension must be
134+ disabled and a non-temporary address must be configured/available on
135+ your network interface.
136
137=== modified file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
138--- hooks/charmhelpers/contrib/hahelpers/apache.py 2014-03-27 11:08:20 +0000
139+++ hooks/charmhelpers/contrib/hahelpers/apache.py 2014-10-02 12:57:31 +0000
140@@ -20,20 +20,27 @@
141 )
142
143
144-def get_cert():
145+def get_cert(cn=None):
146+ # TODO: deal with multiple https endpoints via charm config
147 cert = config_get('ssl_cert')
148 key = config_get('ssl_key')
149 if not (cert and key):
150 log("Inspecting identity-service relations for SSL certificate.",
151 level=INFO)
152 cert = key = None
153+ if cn:
154+ ssl_cert_attr = 'ssl_cert_{}'.format(cn)
155+ ssl_key_attr = 'ssl_key_{}'.format(cn)
156+ else:
157+ ssl_cert_attr = 'ssl_cert'
158+ ssl_key_attr = 'ssl_key'
159 for r_id in relation_ids('identity-service'):
160 for unit in relation_list(r_id):
161 if not cert:
162- cert = relation_get('ssl_cert',
163+ cert = relation_get(ssl_cert_attr,
164 rid=r_id, unit=unit)
165 if not key:
166- key = relation_get('ssl_key',
167+ key = relation_get(ssl_key_attr,
168 rid=r_id, unit=unit)
169 return (cert, key)
170
171
172=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
173--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-04-04 16:45:38 +0000
174+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-10-02 12:57:31 +0000
175@@ -6,6 +6,11 @@
176 # Adam Gandelman <adamg@ubuntu.com>
177 #
178
179+"""
180+Helpers for clustering and determining "cluster leadership" and other
181+clustering-related helpers.
182+"""
183+
184 import subprocess
185 import os
186
187@@ -19,6 +24,7 @@
188 config as config_get,
189 INFO,
190 ERROR,
191+ WARNING,
192 unit_get,
193 )
194
195@@ -27,6 +33,29 @@
196 pass
197
198
199+def is_elected_leader(resource):
200+ """
201+ Returns True if the charm executing this is the elected cluster leader.
202+
203+ It relies on two mechanisms to determine leadership:
204+ 1. If the charm is part of a corosync cluster, call corosync to
205+ determine leadership.
206+ 2. If the charm is not part of a corosync cluster, the leader is
207+ determined as being "the alive unit with the lowest unit numer". In
208+ other words, the oldest surviving unit.
209+ """
210+ if is_clustered():
211+ if not is_crm_leader(resource):
212+ log('Deferring action to CRM leader.', level=INFO)
213+ return False
214+ else:
215+ peers = peer_units()
216+ if peers and not oldest_peer(peers):
217+ log('Deferring action to oldest service unit.', level=INFO)
218+ return False
219+ return True
220+
221+
222 def is_clustered():
223 for r_id in (relation_ids('ha') or []):
224 for unit in (relation_list(r_id) or []):
225@@ -38,7 +67,11 @@
226 return False
227
228
229-def is_leader(resource):
230+def is_crm_leader(resource):
231+ """
232+ Returns True if the charm calling this is the elected corosync leader,
233+ as returned by calling the external "crm" command.
234+ """
235 cmd = [
236 "crm", "resource",
237 "show", resource
238@@ -54,15 +87,31 @@
239 return False
240
241
242-def peer_units():
243+def is_leader(resource):
244+ log("is_leader is deprecated. Please consider using is_crm_leader "
245+ "instead.", level=WARNING)
246+ return is_crm_leader(resource)
247+
248+
249+def peer_units(peer_relation="cluster"):
250 peers = []
251- for r_id in (relation_ids('cluster') or []):
252+ for r_id in (relation_ids(peer_relation) or []):
253 for unit in (relation_list(r_id) or []):
254 peers.append(unit)
255 return peers
256
257
258+def peer_ips(peer_relation='cluster', addr_key='private-address'):
259+ '''Return a dict of peers and their private-address'''
260+ peers = {}
261+ for r_id in relation_ids(peer_relation):
262+ for unit in relation_list(r_id):
263+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
264+ return peers
265+
266+
267 def oldest_peer(peers):
268+ """Determines who the oldest peer is by comparing unit numbers."""
269 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
270 for peer in peers:
271 remote_unit_no = int(peer.split('/')[1])
272@@ -72,16 +121,9 @@
273
274
275 def eligible_leader(resource):
276- if is_clustered():
277- if not is_leader(resource):
278- log('Deferring action to CRM leader.', level=INFO)
279- return False
280- else:
281- peers = peer_units()
282- if peers and not oldest_peer(peers):
283- log('Deferring action to oldest service unit.', level=INFO)
284- return False
285- return True
286+ log("eligible_leader is deprecated. Please consider using "
287+ "is_elected_leader instead.", level=WARNING)
288+ return is_elected_leader(resource)
289
290
291 def https():
292@@ -97,10 +139,9 @@
293 return True
294 for r_id in relation_ids('identity-service'):
295 for unit in relation_list(r_id):
296+ # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
297 rel_state = [
298 relation_get('https_keystone', rid=r_id, unit=unit),
299- relation_get('ssl_cert', rid=r_id, unit=unit),
300- relation_get('ssl_key', rid=r_id, unit=unit),
301 relation_get('ca_cert', rid=r_id, unit=unit),
302 ]
303 # NOTE: works around (LP: #1203241)
304@@ -146,12 +187,12 @@
305 Obtains all relevant configuration from charm configuration required
306 for initiating a relation to hacluster:
307
308- ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
309+ ha-bindiface, ha-mcastport, vip
310
311 returns: dict: A dict containing settings keyed by setting name.
312 raises: HAIncompleteConfig if settings are missing.
313 '''
314- settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
315+ settings = ['ha-bindiface', 'ha-mcastport', 'vip']
316 conf = {}
317 for setting in settings:
318 conf[setting] = config_get(setting)
319@@ -170,6 +211,7 @@
320
321 :configs : OSTemplateRenderer: A config tempating object to inspect for
322 a complete https context.
323+
324 :vip_setting: str: Setting in charm config that specifies
325 VIP address.
326 '''
327
328=== added file 'hooks/charmhelpers/contrib/network/ip.py'
329--- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
330+++ hooks/charmhelpers/contrib/network/ip.py 2014-10-02 12:57:31 +0000
331@@ -0,0 +1,343 @@
332+import glob
333+import re
334+import subprocess
335+import sys
336+
337+from functools import partial
338+
339+from charmhelpers.core.hookenv import unit_get
340+from charmhelpers.fetch import apt_install
341+from charmhelpers.core.hookenv import (
342+ WARNING,
343+ ERROR,
344+ log
345+)
346+
347+try:
348+ import netifaces
349+except ImportError:
350+ apt_install('python-netifaces')
351+ import netifaces
352+
353+try:
354+ import netaddr
355+except ImportError:
356+ apt_install('python-netaddr')
357+ import netaddr
358+
359+
360+def _validate_cidr(network):
361+ try:
362+ netaddr.IPNetwork(network)
363+ except (netaddr.core.AddrFormatError, ValueError):
364+ raise ValueError("Network (%s) is not in CIDR presentation format" %
365+ network)
366+
367+
368+def get_address_in_network(network, fallback=None, fatal=False):
369+ """
370+ Get an IPv4 or IPv6 address within the network from the host.
371+
372+ :param network (str): CIDR presentation format. For example,
373+ '192.168.1.0/24'.
374+ :param fallback (str): If no address is found, return fallback.
375+ :param fatal (boolean): If no address is found, fallback is not
376+ set and fatal is True then exit(1).
377+
378+ """
379+
380+ def not_found_error_out():
381+ log("No IP address found in network: %s" % network,
382+ level=ERROR)
383+ sys.exit(1)
384+
385+ if network is None:
386+ if fallback is not None:
387+ return fallback
388+ else:
389+ if fatal:
390+ not_found_error_out()
391+
392+ _validate_cidr(network)
393+ network = netaddr.IPNetwork(network)
394+ for iface in netifaces.interfaces():
395+ addresses = netifaces.ifaddresses(iface)
396+ if network.version == 4 and netifaces.AF_INET in addresses:
397+ addr = addresses[netifaces.AF_INET][0]['addr']
398+ netmask = addresses[netifaces.AF_INET][0]['netmask']
399+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
400+ if cidr in network:
401+ return str(cidr.ip)
402+ if network.version == 6 and netifaces.AF_INET6 in addresses:
403+ for addr in addresses[netifaces.AF_INET6]:
404+ if not addr['addr'].startswith('fe80'):
405+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
406+ addr['netmask']))
407+ if cidr in network:
408+ return str(cidr.ip)
409+
410+ if fallback is not None:
411+ return fallback
412+
413+ if fatal:
414+ not_found_error_out()
415+
416+ return None
417+
418+
419+def is_ipv6(address):
420+ '''Determine whether provided address is IPv6 or not'''
421+ try:
422+ address = netaddr.IPAddress(address)
423+ except netaddr.AddrFormatError:
424+ # probably a hostname - so not an address at all!
425+ return False
426+ else:
427+ return address.version == 6
428+
429+
430+def is_address_in_network(network, address):
431+ """
432+ Determine whether the provided address is within a network range.
433+
434+ :param network (str): CIDR presentation format. For example,
435+ '192.168.1.0/24'.
436+ :param address: An individual IPv4 or IPv6 address without a net
437+ mask or subnet prefix. For example, '192.168.1.1'.
438+ :returns boolean: Flag indicating whether address is in network.
439+ """
440+ try:
441+ network = netaddr.IPNetwork(network)
442+ except (netaddr.core.AddrFormatError, ValueError):
443+ raise ValueError("Network (%s) is not in CIDR presentation format" %
444+ network)
445+ try:
446+ address = netaddr.IPAddress(address)
447+ except (netaddr.core.AddrFormatError, ValueError):
448+ raise ValueError("Address (%s) is not in correct presentation format" %
449+ address)
450+ if address in network:
451+ return True
452+ else:
453+ return False
454+
455+
456+def _get_for_address(address, key):
457+ """Retrieve an attribute of or the physical interface that
458+ the IP address provided could be bound to.
459+
460+ :param address (str): An individual IPv4 or IPv6 address without a net
461+ mask or subnet prefix. For example, '192.168.1.1'.
462+ :param key: 'iface' for the physical interface name or an attribute
463+ of the configured interface, for example 'netmask'.
464+ :returns str: Requested attribute or None if address is not bindable.
465+ """
466+ address = netaddr.IPAddress(address)
467+ for iface in netifaces.interfaces():
468+ addresses = netifaces.ifaddresses(iface)
469+ if address.version == 4 and netifaces.AF_INET in addresses:
470+ addr = addresses[netifaces.AF_INET][0]['addr']
471+ netmask = addresses[netifaces.AF_INET][0]['netmask']
472+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
473+ if address in cidr:
474+ if key == 'iface':
475+ return iface
476+ else:
477+ return addresses[netifaces.AF_INET][0][key]
478+ if address.version == 6 and netifaces.AF_INET6 in addresses:
479+ for addr in addresses[netifaces.AF_INET6]:
480+ if not addr['addr'].startswith('fe80'):
481+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
482+ addr['netmask']))
483+ if address in cidr:
484+ if key == 'iface':
485+ return iface
486+ else:
487+ return addr[key]
488+ return None
489+
490+
491+get_iface_for_address = partial(_get_for_address, key='iface')
492+
493+get_netmask_for_address = partial(_get_for_address, key='netmask')
494+
495+
496+def format_ipv6_addr(address):
497+ """
498+ IPv6 needs to be wrapped with [] in url link to parse correctly.
499+ """
500+ if is_ipv6(address):
501+ address = "[%s]" % address
502+ else:
503+ log("Not a valid ipv6 address: %s" % address, level=WARNING)
504+ address = None
505+
506+ return address
507+
508+
509+def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
510+ fatal=True, exc_list=None):
511+ """
512+ Return the assigned IP address for a given interface, if any, or [].
513+ """
514+ # Extract nic if passed /dev/ethX
515+ if '/' in iface:
516+ iface = iface.split('/')[-1]
517+ if not exc_list:
518+ exc_list = []
519+ try:
520+ inet_num = getattr(netifaces, inet_type)
521+ except AttributeError:
522+ raise Exception('Unknown inet type ' + str(inet_type))
523+
524+ interfaces = netifaces.interfaces()
525+ if inc_aliases:
526+ ifaces = []
527+ for _iface in interfaces:
528+ if iface == _iface or _iface.split(':')[0] == iface:
529+ ifaces.append(_iface)
530+ if fatal and not ifaces:
531+ raise Exception("Invalid interface '%s'" % iface)
532+ ifaces.sort()
533+ else:
534+ if iface not in interfaces:
535+ if fatal:
536+ raise Exception("%s not found " % (iface))
537+ else:
538+ return []
539+ else:
540+ ifaces = [iface]
541+
542+ addresses = []
543+ for netiface in ifaces:
544+ net_info = netifaces.ifaddresses(netiface)
545+ if inet_num in net_info:
546+ for entry in net_info[inet_num]:
547+ if 'addr' in entry and entry['addr'] not in exc_list:
548+ addresses.append(entry['addr'])
549+ if fatal and not addresses:
550+ raise Exception("Interface '%s' doesn't have any %s addresses." %
551+ (iface, inet_type))
552+ return addresses
553+
554+get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
555+
556+
557+def get_iface_from_addr(addr):
558+ """Work out on which interface the provided address is configured."""
559+ for iface in netifaces.interfaces():
560+ addresses = netifaces.ifaddresses(iface)
561+ for inet_type in addresses:
562+ for _addr in addresses[inet_type]:
563+ _addr = _addr['addr']
564+ # link local
565+ ll_key = re.compile("(.+)%.*")
566+ raw = re.match(ll_key, _addr)
567+ if raw:
568+ _addr = raw.group(1)
569+ if _addr == addr:
570+ log("Address '%s' is configured on iface '%s'" %
571+ (addr, iface))
572+ return iface
573+
574+ msg = "Unable to infer net iface on which '%s' is configured" % (addr)
575+ raise Exception(msg)
576+
577+
578+def sniff_iface(f):
579+ """If no iface provided, inject net iface inferred from unit private
580+ address.
581+ """
582+ def iface_sniffer(*args, **kwargs):
583+ if not kwargs.get('iface', None):
584+ kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
585+
586+ return f(*args, **kwargs)
587+
588+ return iface_sniffer
589+
590+
591+@sniff_iface
592+def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
593+ dynamic_only=True):
594+ """Get assigned IPv6 address for a given interface.
595+
596+ Returns list of addresses found. If no address found, returns empty list.
597+
598+ If iface is None, we infer the current primary interface by doing a reverse
599+ lookup on the unit private-address.
600+
601+ We currently only support scope global IPv6 addresses i.e. non-temporary
602+ addresses. If no global IPv6 address is found, return the first one found
603+ in the ipv6 address list.
604+ """
605+ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
606+ inc_aliases=inc_aliases, fatal=fatal,
607+ exc_list=exc_list)
608+
609+ if addresses:
610+ global_addrs = []
611+ for addr in addresses:
612+ key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
613+ m = re.match(key_scope_link_local, addr)
614+ if m:
615+ eui_64_mac = m.group(1)
616+ iface = m.group(2)
617+ else:
618+ global_addrs.append(addr)
619+
620+ if global_addrs:
621+ # Make sure any found global addresses are not temporary
622+ cmd = ['ip', 'addr', 'show', iface]
623+ out = subprocess.check_output(cmd)
624+ if dynamic_only:
625+ key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
626+ else:
627+ key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
628+
629+ addrs = []
630+ for line in out.split('\n'):
631+ line = line.strip()
632+ m = re.match(key, line)
633+ if m and 'temporary' not in line:
634+ # Return the first valid address we find
635+ for addr in global_addrs:
636+ if m.group(1) == addr:
637+ if not dynamic_only or \
638+ m.group(1).endswith(eui_64_mac):
639+ addrs.append(addr)
640+
641+ if addrs:
642+ return addrs
643+
644+ if fatal:
645+ raise Exception("Interface '%s' doesn't have a scope global "
646+ "non-temporary ipv6 address." % iface)
647+
648+ return []
649+
650+
651+def get_bridges(vnic_dir='/sys/devices/virtual/net'):
652+ """
653+ Return a list of bridges on the system or []
654+ """
655+ b_rgex = vnic_dir + '/*/bridge'
656+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
657+
658+
659+def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
660+ """
661+ Return a list of nics comprising a given bridge on the system or []
662+ """
663+ brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
664+ return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
665+
666+
667+def is_bridge_member(nic):
668+ """
669+ Check if a given nic is a member of a bridge
670+ """
671+ for bridge in get_bridges():
672+ if nic in get_bridge_nics(bridge):
673+ return True
674+ return False
675
676=== modified file 'hooks/charmhelpers/contrib/network/ovs/__init__.py'
677--- hooks/charmhelpers/contrib/network/ovs/__init__.py 2013-10-22 23:02:45 +0000
678+++ hooks/charmhelpers/contrib/network/ovs/__init__.py 2014-10-02 12:57:31 +0000
679@@ -21,12 +21,16 @@
680 subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
681
682
683-def add_bridge_port(name, port):
684+def add_bridge_port(name, port, promisc=False):
685 ''' Add a port to the named openvswitch bridge '''
686 log('Adding port {} to bridge {}'.format(port, name))
687 subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
688 name, port])
689 subprocess.check_call(["ip", "link", "set", port, "up"])
690+ if promisc:
691+ subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
692+ else:
693+ subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
694
695
696 def del_bridge_port(name, port):
697@@ -35,6 +39,7 @@
698 subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
699 name, port])
700 subprocess.check_call(["ip", "link", "set", port, "down"])
701+ subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
702
703
704 def set_manager(manager):
705
706=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
707=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
708=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
709--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
710+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-02 12:57:31 +0000
711@@ -0,0 +1,94 @@
712+from bzrlib.branch import Branch
713+import os
714+import re
715+from charmhelpers.contrib.amulet.deployment import (
716+ AmuletDeployment
717+)
718+
719+
720+class OpenStackAmuletDeployment(AmuletDeployment):
721+ """OpenStack amulet deployment.
722+
723+ This class inherits from AmuletDeployment and has additional support
724+ that is specifically for use by OpenStack charms.
725+ """
726+
727+ def __init__(self, series=None, openstack=None, source=None):
728+ """Initialize the deployment environment."""
729+ super(OpenStackAmuletDeployment, self).__init__(series)
730+ self.openstack = openstack
731+ self.source = source
732+
733+ def _is_dev_branch(self):
734+ """Determine if branch being tested is a dev (i.e. next) branch."""
735+ branch = Branch.open(os.getcwd())
736+ parent = branch.get_parent()
737+ pattern = re.compile("^.*/next/$")
738+ if (pattern.match(parent)):
739+ return True
740+ else:
741+ return False
742+
743+ def _determine_branch_locations(self, other_services):
744+ """Determine the branch locations for the other services.
745+
746+ If the branch being tested is a dev branch, then determine the
747+ development branch locations for the other services. Otherwise,
748+ the default charm store branches will be used."""
749+ name = 0
750+ if self._is_dev_branch():
751+ updated_services = []
752+ for svc in other_services:
753+ if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']:
754+ location = 'lp:charms/{}'.format(svc[name])
755+ else:
756+ temp = 'lp:~openstack-charmers/charms/trusty/{}/next'
757+ location = temp.format(svc[name])
758+ updated_services.append(svc + (location,))
759+ other_services = updated_services
760+ return other_services
761+
762+ def _add_services(self, this_service, other_services):
763+ """Add services to the deployment and set openstack-origin/source."""
764+ name = 0
765+ other_services = self._determine_branch_locations(other_services)
766+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
767+ other_services)
768+ services = other_services
769+ services.append(this_service)
770+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
771+
772+ if self.openstack:
773+ for svc in services:
774+ if svc[name] not in use_source:
775+ config = {'openstack-origin': self.openstack}
776+ self.d.configure(svc[name], config)
777+
778+ if self.source:
779+ for svc in services:
780+ if svc[name] in use_source:
781+ config = {'source': self.source}
782+ self.d.configure(svc[name], config)
783+
784+ def _configure_services(self, configs):
785+ """Configure all of the services."""
786+ for service, config in configs.iteritems():
787+ self.d.configure(service, config)
788+
789+ def _get_openstack_release(self):
790+ """Get openstack release.
791+
792+ Return an integer representing the enum value of the openstack
793+ release.
794+ """
795+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
796+ self.precise_havana, self.precise_icehouse,
797+ self.trusty_icehouse) = range(6)
798+ releases = {
799+ ('precise', None): self.precise_essex,
800+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
801+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
802+ ('precise', 'cloud:precise-havana'): self.precise_havana,
803+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
804+ ('trusty', None): self.trusty_icehouse}
805+ return releases[(self.series, self.openstack)]
806
807=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
808--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
809+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-10-02 12:57:31 +0000
810@@ -0,0 +1,276 @@
811+import logging
812+import os
813+import time
814+import urllib
815+
816+import glanceclient.v1.client as glance_client
817+import keystoneclient.v2_0 as keystone_client
818+import novaclient.v1_1.client as nova_client
819+
820+from charmhelpers.contrib.amulet.utils import (
821+ AmuletUtils
822+)
823+
824+DEBUG = logging.DEBUG
825+ERROR = logging.ERROR
826+
827+
828+class OpenStackAmuletUtils(AmuletUtils):
829+ """OpenStack amulet utilities.
830+
831+ This class inherits from AmuletUtils and has additional support
832+ that is specifically for use by OpenStack charms.
833+ """
834+
835+ def __init__(self, log_level=ERROR):
836+ """Initialize the deployment environment."""
837+ super(OpenStackAmuletUtils, self).__init__(log_level)
838+
839+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
840+ public_port, expected):
841+ """Validate endpoint data.
842+
843+ Validate actual endpoint data vs expected endpoint data. The ports
844+ are used to find the matching endpoint.
845+ """
846+ found = False
847+ for ep in endpoints:
848+ self.log.debug('endpoint: {}'.format(repr(ep)))
849+ if (admin_port in ep.adminurl and
850+ internal_port in ep.internalurl and
851+ public_port in ep.publicurl):
852+ found = True
853+ actual = {'id': ep.id,
854+ 'region': ep.region,
855+ 'adminurl': ep.adminurl,
856+ 'internalurl': ep.internalurl,
857+ 'publicurl': ep.publicurl,
858+ 'service_id': ep.service_id}
859+ ret = self._validate_dict_data(expected, actual)
860+ if ret:
861+ return 'unexpected endpoint data - {}'.format(ret)
862+
863+ if not found:
864+ return 'endpoint not found'
865+
866+ def validate_svc_catalog_endpoint_data(self, expected, actual):
867+ """Validate service catalog endpoint data.
868+
869+ Validate a list of actual service catalog endpoints vs a list of
870+ expected service catalog endpoints.
871+ """
872+ self.log.debug('actual: {}'.format(repr(actual)))
873+ for k, v in expected.iteritems():
874+ if k in actual:
875+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
876+ if ret:
877+ return self.endpoint_error(k, ret)
878+ else:
879+ return "endpoint {} does not exist".format(k)
880+ return ret
881+
882+ def validate_tenant_data(self, expected, actual):
883+ """Validate tenant data.
884+
885+ Validate a list of actual tenant data vs list of expected tenant
886+ data.
887+ """
888+ self.log.debug('actual: {}'.format(repr(actual)))
889+ for e in expected:
890+ found = False
891+ for act in actual:
892+ a = {'enabled': act.enabled, 'description': act.description,
893+ 'name': act.name, 'id': act.id}
894+ if e['name'] == a['name']:
895+ found = True
896+ ret = self._validate_dict_data(e, a)
897+ if ret:
898+ return "unexpected tenant data - {}".format(ret)
899+ if not found:
900+ return "tenant {} does not exist".format(e['name'])
901+ return ret
902+
903+ def validate_role_data(self, expected, actual):
904+ """Validate role data.
905+
906+ Validate a list of actual role data vs a list of expected role
907+ data.
908+ """
909+ self.log.debug('actual: {}'.format(repr(actual)))
910+ for e in expected:
911+ found = False
912+ for act in actual:
913+ a = {'name': act.name, 'id': act.id}
914+ if e['name'] == a['name']:
915+ found = True
916+ ret = self._validate_dict_data(e, a)
917+ if ret:
918+ return "unexpected role data - {}".format(ret)
919+ if not found:
920+ return "role {} does not exist".format(e['name'])
921+ return ret
922+
923+ def validate_user_data(self, expected, actual):
924+ """Validate user data.
925+
926+ Validate a list of actual user data vs a list of expected user
927+ data.
928+ """
929+ self.log.debug('actual: {}'.format(repr(actual)))
930+ for e in expected:
931+ found = False
932+ for act in actual:
933+ a = {'enabled': act.enabled, 'name': act.name,
934+ 'email': act.email, 'tenantId': act.tenantId,
935+ 'id': act.id}
936+ if e['name'] == a['name']:
937+ found = True
938+ ret = self._validate_dict_data(e, a)
939+ if ret:
940+ return "unexpected user data - {}".format(ret)
941+ if not found:
942+ return "user {} does not exist".format(e['name'])
943+ return ret
944+
945+ def validate_flavor_data(self, expected, actual):
946+ """Validate flavor data.
947+
948+ Validate a list of actual flavors vs a list of expected flavors.
949+ """
950+ self.log.debug('actual: {}'.format(repr(actual)))
951+ act = [a.name for a in actual]
952+ return self._validate_list_data(expected, act)
953+
954+ def tenant_exists(self, keystone, tenant):
955+ """Return True if tenant exists."""
956+ return tenant in [t.name for t in keystone.tenants.list()]
957+
958+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
959+ tenant):
960+ """Authenticates admin user with the keystone admin endpoint."""
961+ unit = keystone_sentry
962+ service_ip = unit.relation('shared-db',
963+ 'mysql:shared-db')['private-address']
964+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
965+ return keystone_client.Client(username=user, password=password,
966+ tenant_name=tenant, auth_url=ep)
967+
968+ def authenticate_keystone_user(self, keystone, user, password, tenant):
969+ """Authenticates a regular user with the keystone public endpoint."""
970+ ep = keystone.service_catalog.url_for(service_type='identity',
971+ endpoint_type='publicURL')
972+ return keystone_client.Client(username=user, password=password,
973+ tenant_name=tenant, auth_url=ep)
974+
975+ def authenticate_glance_admin(self, keystone):
976+ """Authenticates admin user with glance."""
977+ ep = keystone.service_catalog.url_for(service_type='image',
978+ endpoint_type='adminURL')
979+ return glance_client.Client(ep, token=keystone.auth_token)
980+
981+ def authenticate_nova_user(self, keystone, user, password, tenant):
982+ """Authenticates a regular user with nova-api."""
983+ ep = keystone.service_catalog.url_for(service_type='identity',
984+ endpoint_type='publicURL')
985+ return nova_client.Client(username=user, api_key=password,
986+ project_id=tenant, auth_url=ep)
987+
988+ def create_cirros_image(self, glance, image_name):
989+ """Download the latest cirros image and upload it to glance."""
990+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
991+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
992+ if http_proxy:
993+ proxies = {'http': http_proxy}
994+ opener = urllib.FancyURLopener(proxies)
995+ else:
996+ opener = urllib.FancyURLopener()
997+
998+ f = opener.open("http://download.cirros-cloud.net/version/released")
999+ version = f.read().strip()
1000+ cirros_img = "cirros-{}-x86_64-disk.img".format(version)
1001+ local_path = os.path.join('tests', cirros_img)
1002+
1003+ if not os.path.exists(local_path):
1004+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
1005+ version, cirros_img)
1006+ opener.retrieve(cirros_url, local_path)
1007+ f.close()
1008+
1009+ with open(local_path) as f:
1010+ image = glance.images.create(name=image_name, is_public=True,
1011+ disk_format='qcow2',
1012+ container_format='bare', data=f)
1013+ count = 1
1014+ status = image.status
1015+ while status != 'active' and count < 10:
1016+ time.sleep(3)
1017+ image = glance.images.get(image.id)
1018+ status = image.status
1019+ self.log.debug('image status: {}'.format(status))
1020+ count += 1
1021+
1022+ if status != 'active':
1023+ self.log.error('image creation timed out')
1024+ return None
1025+
1026+ return image
1027+
1028+ def delete_image(self, glance, image):
1029+ """Delete the specified image."""
1030+ num_before = len(list(glance.images.list()))
1031+ glance.images.delete(image)
1032+
1033+ count = 1
1034+ num_after = len(list(glance.images.list()))
1035+ while num_after != (num_before - 1) and count < 10:
1036+ time.sleep(3)
1037+ num_after = len(list(glance.images.list()))
1038+ self.log.debug('number of images: {}'.format(num_after))
1039+ count += 1
1040+
1041+ if num_after != (num_before - 1):
1042+ self.log.error('image deletion timed out')
1043+ return False
1044+
1045+ return True
1046+
1047+ def create_instance(self, nova, image_name, instance_name, flavor):
1048+ """Create the specified instance."""
1049+ image = nova.images.find(name=image_name)
1050+ flavor = nova.flavors.find(name=flavor)
1051+ instance = nova.servers.create(name=instance_name, image=image,
1052+ flavor=flavor)
1053+
1054+ count = 1
1055+ status = instance.status
1056+ while status != 'ACTIVE' and count < 60:
1057+ time.sleep(3)
1058+ instance = nova.servers.get(instance.id)
1059+ status = instance.status
1060+ self.log.debug('instance status: {}'.format(status))
1061+ count += 1
1062+
1063+ if status != 'ACTIVE':
1064+ self.log.error('instance creation timed out')
1065+ return None
1066+
1067+ return instance
1068+
1069+ def delete_instance(self, nova, instance):
1070+ """Delete the specified instance."""
1071+ num_before = len(list(nova.servers.list()))
1072+ nova.servers.delete(instance)
1073+
1074+ count = 1
1075+ num_after = len(list(nova.servers.list()))
1076+ while num_after != (num_before - 1) and count < 10:
1077+ time.sleep(3)
1078+ num_after = len(list(nova.servers.list()))
1079+ self.log.debug('number of instances: {}'.format(num_after))
1080+ count += 1
1081+
1082+ if num_after != (num_before - 1):
1083+ self.log.error('instance deletion timed out')
1084+ return False
1085+
1086+ return True
1087
1088=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
1089--- hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 15:55:07 +0000
1090+++ hooks/charmhelpers/contrib/openstack/context.py 2014-10-02 12:57:31 +0000
1091@@ -8,7 +8,6 @@
1092 check_call
1093 )
1094
1095-
1096 from charmhelpers.fetch import (
1097 apt_install,
1098 filter_installed_packages,
1099@@ -21,10 +20,20 @@
1100 relation_get,
1101 relation_ids,
1102 related_units,
1103+ relation_set,
1104 unit_get,
1105 unit_private_ip,
1106 ERROR,
1107- INFO
1108+<<<<<<< TREE
1109+ INFO
1110+=======
1111+ INFO
1112+)
1113+
1114+from charmhelpers.core.host import (
1115+ mkdir,
1116+ write_file
1117+>>>>>>> MERGE-SOURCE
1118 )
1119
1120 from charmhelpers.contrib.hahelpers.cluster import (
1121@@ -37,12 +46,20 @@
1122 from charmhelpers.contrib.hahelpers.apache import (
1123 get_cert,
1124 get_ca_cert,
1125+ install_ca_cert,
1126 )
1127
1128 from charmhelpers.contrib.openstack.neutron import (
1129 neutron_plugin_attribute,
1130 )
1131
1132+from charmhelpers.contrib.network.ip import (
1133+ get_address_in_network,
1134+ get_ipv6_addr,
1135+ format_ipv6_addr,
1136+ is_address_in_network
1137+)
1138+
1139 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
1140
1141
1142@@ -135,8 +152,26 @@
1143 'Missing required charm config options. '
1144 '(database name and user)')
1145 raise OSContextError
1146+
1147 ctxt = {}
1148
1149+ # NOTE(jamespage) if mysql charm provides a network upon which
1150+ # access to the database should be made, reconfigure relation
1151+ # with the service units local address and defer execution
1152+ access_network = relation_get('access-network')
1153+ if access_network is not None:
1154+ if self.relation_prefix is not None:
1155+ hostname_key = "{}_hostname".format(self.relation_prefix)
1156+ else:
1157+ hostname_key = "hostname"
1158+ access_hostname = get_address_in_network(access_network,
1159+ unit_get('private-address'))
1160+ set_hostname = relation_get(attribute=hostname_key,
1161+ unit=local_unit())
1162+ if set_hostname != access_hostname:
1163+ relation_set(relation_settings={hostname_key: access_hostname})
1164+ return ctxt # Defer any further hook execution for now....
1165+
1166 password_setting = 'password'
1167 if self.relation_prefix:
1168 password_setting = self.relation_prefix + '_password'
1169@@ -144,8 +179,10 @@
1170 for rid in relation_ids('shared-db'):
1171 for unit in related_units(rid):
1172 rdata = relation_get(rid=rid, unit=unit)
1173+ host = rdata.get('db_host')
1174+ host = format_ipv6_addr(host) or host
1175 ctxt = {
1176- 'database_host': rdata.get('db_host'),
1177+ 'database_host': host,
1178 'database': self.database,
1179 'database_user': self.user,
1180 'database_password': rdata.get(password_setting),
1181@@ -221,10 +258,15 @@
1182 for rid in relation_ids('identity-service'):
1183 for unit in related_units(rid):
1184 rdata = relation_get(rid=rid, unit=unit)
1185+ serv_host = rdata.get('service_host')
1186+ serv_host = format_ipv6_addr(serv_host) or serv_host
1187+ auth_host = rdata.get('auth_host')
1188+ auth_host = format_ipv6_addr(auth_host) or auth_host
1189+
1190 ctxt = {
1191 'service_port': rdata.get('service_port'),
1192- 'service_host': rdata.get('service_host'),
1193- 'auth_host': rdata.get('auth_host'),
1194+ 'service_host': serv_host,
1195+ 'auth_host': auth_host,
1196 'auth_port': rdata.get('auth_port'),
1197 'admin_tenant_name': rdata.get('service_tenant'),
1198 'admin_user': rdata.get('service_username'),
1199@@ -244,32 +286,42 @@
1200
1201
1202 class AMQPContext(OSContextGenerator):
1203- interfaces = ['amqp']
1204
1205- def __init__(self, ssl_dir=None):
1206+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
1207 self.ssl_dir = ssl_dir
1208+ self.rel_name = rel_name
1209+ self.relation_prefix = relation_prefix
1210+ self.interfaces = [rel_name]
1211
1212 def __call__(self):
1213 log('Generating template context for amqp')
1214 conf = config()
1215+ user_setting = 'rabbit-user'
1216+ vhost_setting = 'rabbit-vhost'
1217+ if self.relation_prefix:
1218+ user_setting = self.relation_prefix + '-rabbit-user'
1219+ vhost_setting = self.relation_prefix + '-rabbit-vhost'
1220+
1221 try:
1222- username = conf['rabbit-user']
1223- vhost = conf['rabbit-vhost']
1224+ username = conf[user_setting]
1225+ vhost = conf[vhost_setting]
1226 except KeyError as e:
1227 log('Could not generate shared_db context. '
1228 'Missing required charm config options: %s.' % e)
1229 raise OSContextError
1230 ctxt = {}
1231- for rid in relation_ids('amqp'):
1232+ for rid in relation_ids(self.rel_name):
1233 ha_vip_only = False
1234 for unit in related_units(rid):
1235 if relation_get('clustered', rid=rid, unit=unit):
1236 ctxt['clustered'] = True
1237- ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
1238- unit=unit)
1239+ vip = relation_get('vip', rid=rid, unit=unit)
1240+ vip = format_ipv6_addr(vip) or vip
1241+ ctxt['rabbitmq_host'] = vip
1242 else:
1243- ctxt['rabbitmq_host'] = relation_get('private-address',
1244- rid=rid, unit=unit)
1245+ host = relation_get('private-address', rid=rid, unit=unit)
1246+ host = format_ipv6_addr(host) or host
1247+ ctxt['rabbitmq_host'] = host
1248 ctxt.update({
1249 'rabbitmq_user': username,
1250 'rabbitmq_password': relation_get('password', rid=rid,
1251@@ -308,8 +360,9 @@
1252 and len(related_units(rid)) > 1:
1253 rabbitmq_hosts = []
1254 for unit in related_units(rid):
1255- rabbitmq_hosts.append(relation_get('private-address',
1256- rid=rid, unit=unit))
1257+ host = relation_get('private-address', rid=rid, unit=unit)
1258+ host = format_ipv6_addr(host) or host
1259+ rabbitmq_hosts.append(host)
1260 ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
1261 if not context_complete(ctxt):
1262 return {}
1263@@ -333,10 +386,13 @@
1264 use_syslog = str(config('use-syslog')).lower()
1265 for rid in relation_ids('ceph'):
1266 for unit in related_units(rid):
1267- mon_hosts.append(relation_get('private-address', rid=rid,
1268- unit=unit))
1269 auth = relation_get('auth', rid=rid, unit=unit)
1270 key = relation_get('key', rid=rid, unit=unit)
1271+ ceph_addr = \
1272+ relation_get('ceph-public-address', rid=rid, unit=unit) or \
1273+ relation_get('private-address', rid=rid, unit=unit)
1274+ ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
1275+ mon_hosts.append(ceph_addr)
1276
1277 ctxt = {
1278 'mon_hosts': ' '.join(mon_hosts),
1279@@ -370,7 +426,14 @@
1280
1281 cluster_hosts = {}
1282 l_unit = local_unit().replace('/', '-')
1283- cluster_hosts[l_unit] = unit_get('private-address')
1284+
1285+ if config('prefer-ipv6'):
1286+ addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1287+ else:
1288+ addr = unit_get('private-address')
1289+
1290+ cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
1291+ addr)
1292
1293 for rid in relation_ids('cluster'):
1294 for unit in related_units(rid):
1295@@ -381,6 +444,21 @@
1296 ctxt = {
1297 'units': cluster_hosts,
1298 }
1299+
1300+ if config('haproxy-server-timeout'):
1301+ ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
1302+ if config('haproxy-client-timeout'):
1303+ ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
1304+
1305+ if config('prefer-ipv6'):
1306+ ctxt['local_host'] = 'ip6-localhost'
1307+ ctxt['haproxy_host'] = '::'
1308+ ctxt['stat_port'] = ':::8888'
1309+ else:
1310+ ctxt['local_host'] = '127.0.0.1'
1311+ ctxt['haproxy_host'] = '0.0.0.0'
1312+ ctxt['stat_port'] = ':8888'
1313+
1314 if len(cluster_hosts.keys()) > 1:
1315 # Enable haproxy when we have enough peers.
1316 log('Ensuring haproxy enabled in /etc/default/haproxy.')
1317@@ -419,12 +497,13 @@
1318 """
1319 Generates a context for an apache vhost configuration that configures
1320 HTTPS reverse proxying for one or many endpoints. Generated context
1321- looks something like:
1322- {
1323- 'namespace': 'cinder',
1324- 'private_address': 'iscsi.mycinderhost.com',
1325- 'endpoints': [(8776, 8766), (8777, 8767)]
1326- }
1327+ looks something like::
1328+
1329+ {
1330+ 'namespace': 'cinder',
1331+ 'private_address': 'iscsi.mycinderhost.com',
1332+ 'endpoints': [(8776, 8766), (8777, 8767)]
1333+ }
1334
1335 The endpoints list consists of a tuples mapping external ports
1336 to internal ports.
1337@@ -440,22 +519,36 @@
1338 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
1339 check_call(cmd)
1340
1341- def configure_cert(self):
1342- if not os.path.isdir('/etc/apache2/ssl'):
1343- os.mkdir('/etc/apache2/ssl')
1344+ def configure_cert(self, cn=None):
1345 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
1346- if not os.path.isdir(ssl_dir):
1347- os.mkdir(ssl_dir)
1348- cert, key = get_cert()
1349- with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
1350- cert_out.write(b64decode(cert))
1351- with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
1352- key_out.write(b64decode(key))
1353+ mkdir(path=ssl_dir)
1354+ cert, key = get_cert(cn)
1355+ if cn:
1356+ cert_filename = 'cert_{}'.format(cn)
1357+ key_filename = 'key_{}'.format(cn)
1358+ else:
1359+ cert_filename = 'cert'
1360+ key_filename = 'key'
1361+ write_file(path=os.path.join(ssl_dir, cert_filename),
1362+ content=b64decode(cert))
1363+ write_file(path=os.path.join(ssl_dir, key_filename),
1364+ content=b64decode(key))
1365+
1366+ def configure_ca(self):
1367 ca_cert = get_ca_cert()
1368 if ca_cert:
1369- with open(CA_CERT_PATH, 'w') as ca_out:
1370- ca_out.write(b64decode(ca_cert))
1371- check_call(['update-ca-certificates'])
1372+ install_ca_cert(b64decode(ca_cert))
1373+
1374+ def canonical_names(self):
1375+ '''Figure out which canonical names clients will access this service'''
1376+ cns = []
1377+ for r_id in relation_ids('identity-service'):
1378+ for unit in related_units(r_id):
1379+ rdata = relation_get(rid=r_id, unit=unit)
1380+ for k in rdata:
1381+ if k.startswith('ssl_key_'):
1382+ cns.append(k.lstrip('ssl_key_'))
1383+ return list(set(cns))
1384
1385 def __call__(self):
1386 if isinstance(self.external_ports, basestring):
1387@@ -463,21 +556,47 @@
1388 if (not self.external_ports or not https()):
1389 return {}
1390
1391- self.configure_cert()
1392+ self.configure_ca()
1393 self.enable_modules()
1394
1395 ctxt = {
1396 'namespace': self.service_namespace,
1397- 'private_address': unit_get('private-address'),
1398- 'endpoints': []
1399+ 'endpoints': [],
1400+ 'ext_ports': []
1401 }
1402- if is_clustered():
1403- ctxt['private_address'] = config('vip')
1404- for api_port in self.external_ports:
1405- ext_port = determine_apache_port(api_port)
1406- int_port = determine_api_port(api_port)
1407- portmap = (int(ext_port), int(int_port))
1408- ctxt['endpoints'].append(portmap)
1409+
1410+ for cn in self.canonical_names():
1411+ self.configure_cert(cn)
1412+
1413+ addresses = []
1414+ vips = []
1415+ if config('vip'):
1416+ vips = config('vip').split()
1417+
1418+ for network_type in ['os-internal-network',
1419+ 'os-admin-network',
1420+ 'os-public-network']:
1421+ address = get_address_in_network(config(network_type),
1422+ unit_get('private-address'))
1423+ if len(vips) > 0 and is_clustered():
1424+ for vip in vips:
1425+ if is_address_in_network(config(network_type),
1426+ vip):
1427+ addresses.append((address, vip))
1428+ break
1429+ elif is_clustered():
1430+ addresses.append((address, config('vip')))
1431+ else:
1432+ addresses.append((address, address))
1433+
1434+ for address, endpoint in set(addresses):
1435+ for api_port in self.external_ports:
1436+ ext_port = determine_apache_port(api_port)
1437+ int_port = determine_api_port(api_port)
1438+ portmap = (address, endpoint, int(ext_port), int(int_port))
1439+ ctxt['endpoints'].append(portmap)
1440+ ctxt['ext_ports'].append(int(ext_port))
1441+ ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
1442 return ctxt
1443
1444
1445@@ -542,6 +661,26 @@
1446
1447 return nvp_ctxt
1448
1449+ def n1kv_ctxt(self):
1450+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1451+ self.network_manager)
1452+ n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
1453+ self.network_manager)
1454+ n1kv_ctxt = {
1455+ 'core_plugin': driver,
1456+ 'neutron_plugin': 'n1kv',
1457+ 'neutron_security_groups': self.neutron_security_groups,
1458+ 'local_ip': unit_private_ip(),
1459+ 'config': n1kv_config,
1460+ 'vsm_ip': config('n1kv-vsm-ip'),
1461+ 'vsm_username': config('n1kv-vsm-username'),
1462+ 'vsm_password': config('n1kv-vsm-password'),
1463+ 'restrict_policy_profiles': config(
1464+ 'n1kv_restrict_policy_profiles'),
1465+ }
1466+
1467+ return n1kv_ctxt
1468+
1469 def neutron_ctxt(self):
1470 if https():
1471 proto = 'https'
1472@@ -573,6 +712,8 @@
1473 ctxt.update(self.ovs_ctxt())
1474 elif self.plugin in ['nvp', 'nsx']:
1475 ctxt.update(self.nvp_ctxt())
1476+ elif self.plugin == 'n1kv':
1477+ ctxt.update(self.n1kv_ctxt())
1478
1479 alchemy_flags = config('neutron-alchemy-flags')
1480 if alchemy_flags:
1481@@ -612,7 +753,7 @@
1482 The subordinate interface allows subordinates to export their
1483 configuration requirements to the principle for multiple config
1484 files and multiple serivces. Ie, a subordinate that has interfaces
1485- to both glance and nova may export to following yaml blob as json:
1486+ to both glance and nova may export to following yaml blob as json::
1487
1488 glance:
1489 /etc/glance/glance-api.conf:
1490@@ -631,7 +772,8 @@
1491
1492 It is then up to the principle charms to subscribe this context to
1493 the service+config file it is interestd in. Configuration data will
1494- be available in the template context, in glance's case, as:
1495+ be available in the template context, in glance's case, as::
1496+
1497 ctxt = {
1498 ... other context ...
1499 'subordinate_config': {
1500@@ -684,15 +826,38 @@
1501
1502 sub_config = sub_config[self.config_file]
1503 for k, v in sub_config.iteritems():
1504- if k == 'sections':
1505- for section, config_dict in v.iteritems():
1506- log("adding section '%s'" % (section))
1507- ctxt[k][section] = config_dict
1508- else:
1509- ctxt[k] = v
1510-
1511- log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
1512-
1513+<<<<<<< TREE
1514+ if k == 'sections':
1515+ for section, config_dict in v.iteritems():
1516+ log("adding section '%s'" % (section))
1517+ ctxt[k][section] = config_dict
1518+ else:
1519+ ctxt[k] = v
1520+
1521+ log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
1522+
1523+=======
1524+ if k == 'sections':
1525+ for section, config_dict in v.iteritems():
1526+ log("adding section '%s'" % (section))
1527+ ctxt[k][section] = config_dict
1528+ else:
1529+ ctxt[k] = v
1530+
1531+ log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
1532+
1533+ return ctxt
1534+
1535+
1536+class LogLevelContext(OSContextGenerator):
1537+
1538+ def __call__(self):
1539+ ctxt = {}
1540+ ctxt['debug'] = \
1541+ False if config('debug') is None else config('debug')
1542+ ctxt['verbose'] = \
1543+ False if config('verbose') is None else config('verbose')
1544+>>>>>>> MERGE-SOURCE
1545 return ctxt
1546
1547
1548@@ -703,3 +868,16 @@
1549 'use_syslog': config('use-syslog')
1550 }
1551 return ctxt
1552+
1553+
1554+class BindHostContext(OSContextGenerator):
1555+
1556+ def __call__(self):
1557+ if config('prefer-ipv6'):
1558+ return {
1559+ 'bind_host': '::'
1560+ }
1561+ else:
1562+ return {
1563+ 'bind_host': '0.0.0.0'
1564+ }
1565
1566=== added file 'hooks/charmhelpers/contrib/openstack/ip.py'
1567--- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
1568+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-10-02 12:57:31 +0000
1569@@ -0,0 +1,79 @@
1570+from charmhelpers.core.hookenv import (
1571+ config,
1572+ unit_get,
1573+)
1574+
1575+from charmhelpers.contrib.network.ip import (
1576+ get_address_in_network,
1577+ is_address_in_network,
1578+ is_ipv6,
1579+ get_ipv6_addr,
1580+)
1581+
1582+from charmhelpers.contrib.hahelpers.cluster import is_clustered
1583+
1584+PUBLIC = 'public'
1585+INTERNAL = 'int'
1586+ADMIN = 'admin'
1587+
1588+_address_map = {
1589+ PUBLIC: {
1590+ 'config': 'os-public-network',
1591+ 'fallback': 'public-address'
1592+ },
1593+ INTERNAL: {
1594+ 'config': 'os-internal-network',
1595+ 'fallback': 'private-address'
1596+ },
1597+ ADMIN: {
1598+ 'config': 'os-admin-network',
1599+ 'fallback': 'private-address'
1600+ }
1601+}
1602+
1603+
1604+def canonical_url(configs, endpoint_type=PUBLIC):
1605+ '''
1606+ Returns the correct HTTP URL to this host given the state of HTTPS
1607+ configuration, hacluster and charm configuration.
1608+
1609+ :configs OSTemplateRenderer: A config tempating object to inspect for
1610+ a complete https context.
1611+ :endpoint_type str: The endpoint type to resolve.
1612+
1613+ :returns str: Base URL for services on the current service unit.
1614+ '''
1615+ scheme = 'http'
1616+ if 'https' in configs.complete_contexts():
1617+ scheme = 'https'
1618+ address = resolve_address(endpoint_type)
1619+ if is_ipv6(address):
1620+ address = "[{}]".format(address)
1621+ return '%s://%s' % (scheme, address)
1622+
1623+
1624+def resolve_address(endpoint_type=PUBLIC):
1625+ resolved_address = None
1626+ if is_clustered():
1627+ if config(_address_map[endpoint_type]['config']) is None:
1628+ # Assume vip is simple and pass back directly
1629+ resolved_address = config('vip')
1630+ else:
1631+ for vip in config('vip').split():
1632+ if is_address_in_network(
1633+ config(_address_map[endpoint_type]['config']),
1634+ vip):
1635+ resolved_address = vip
1636+ else:
1637+ if config('prefer-ipv6'):
1638+ fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1639+ else:
1640+ fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
1641+ resolved_address = get_address_in_network(
1642+ config(_address_map[endpoint_type]['config']), fallback_addr)
1643+
1644+ if resolved_address is None:
1645+ raise ValueError('Unable to resolve a suitable IP address'
1646+ ' based on charm state and configuration')
1647+ else:
1648+ return resolved_address
1649
1650=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
1651--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-19 11:41:02 +0000
1652+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-10-02 12:57:31 +0000
1653@@ -128,6 +128,20 @@
1654 'server_packages': ['neutron-server',
1655 'neutron-plugin-vmware'],
1656 'server_services': ['neutron-server']
1657+ },
1658+ 'n1kv': {
1659+ 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
1660+ 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
1661+ 'contexts': [
1662+ context.SharedDBContext(user=config('neutron-database-user'),
1663+ database=config('neutron-database'),
1664+ relation_prefix='neutron',
1665+ ssl_dir=NEUTRON_CONF_DIR)],
1666+ 'services': [],
1667+ 'packages': [['neutron-plugin-cisco']],
1668+ 'server_packages': ['neutron-server',
1669+ 'neutron-plugin-cisco'],
1670+ 'server_services': ['neutron-server']
1671 }
1672 }
1673 if release >= 'icehouse':
1674
1675=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
1676--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-04-04 16:45:38 +0000
1677+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-10-02 12:57:31 +0000
1678@@ -1,6 +1,6 @@
1679 global
1680- log 127.0.0.1 local0
1681- log 127.0.0.1 local1 notice
1682+ log {{ local_host }} local0
1683+ log {{ local_host }} local1 notice
1684 maxconn 20000
1685 user haproxy
1686 group haproxy
1687@@ -14,10 +14,19 @@
1688 retries 3
1689 timeout queue 1000
1690 timeout connect 1000
1691+{% if haproxy_client_timeout -%}
1692+ timeout client {{ haproxy_client_timeout }}
1693+{% else -%}
1694 timeout client 30000
1695+{% endif -%}
1696+
1697+{% if haproxy_server_timeout -%}
1698+ timeout server {{ haproxy_server_timeout }}
1699+{% else -%}
1700 timeout server 30000
1701+{% endif -%}
1702
1703-listen stats :8888
1704+listen stats {{ stat_port }}
1705 mode http
1706 stats enable
1707 stats hide-version
1708@@ -27,7 +36,12 @@
1709
1710 {% if units -%}
1711 {% for service, ports in service_ports.iteritems() -%}
1712-listen {{ service }} 0.0.0.0:{{ ports[0] }}
1713+listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
1714+ balance roundrobin
1715+ {% for unit, address in units.iteritems() -%}
1716+ server {{ unit }} {{ address }}:{{ ports[1] }} check
1717+ {% endfor %}
1718+listen {{ service }}_ipv6 :::{{ ports[0] }}
1719 balance roundrobin
1720 {% for unit, address in units.iteritems() -%}
1721 server {{ unit }} {{ address }}:{{ ports[1] }} check
1722
1723=== modified file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend'
1724--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2013-08-12 21:48:24 +0000
1725+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2014-10-02 12:57:31 +0000
1726@@ -1,16 +1,18 @@
1727 {% if endpoints -%}
1728-{% for ext, int in endpoints -%}
1729-Listen {{ ext }}
1730-NameVirtualHost *:{{ ext }}
1731-<VirtualHost *:{{ ext }}>
1732- ServerName {{ private_address }}
1733+{% for ext_port in ext_ports -%}
1734+Listen {{ ext_port }}
1735+{% endfor -%}
1736+{% for address, endpoint, ext, int in endpoints -%}
1737+<VirtualHost {{ address }}:{{ ext }}>
1738+ ServerName {{ endpoint }}
1739 SSLEngine on
1740- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
1741- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
1742+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
1743+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
1744 ProxyPass / http://localhost:{{ int }}/
1745 ProxyPassReverse / http://localhost:{{ int }}/
1746 ProxyPreserveHost on
1747 </VirtualHost>
1748+{% endfor -%}
1749 <Proxy *>
1750 Order deny,allow
1751 Allow from all
1752@@ -19,5 +21,4 @@
1753 Order allow,deny
1754 Allow from all
1755 </Location>
1756-{% endfor -%}
1757 {% endif -%}
1758
1759=== modified file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf'
1760--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2013-09-27 16:20:42 +0000
1761+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2014-10-02 12:57:31 +0000
1762@@ -1,16 +1,18 @@
1763 {% if endpoints -%}
1764-{% for ext, int in endpoints -%}
1765-Listen {{ ext }}
1766-NameVirtualHost *:{{ ext }}
1767-<VirtualHost *:{{ ext }}>
1768- ServerName {{ private_address }}
1769+{% for ext_port in ext_ports -%}
1770+Listen {{ ext_port }}
1771+{% endfor -%}
1772+{% for address, endpoint, ext, int in endpoints -%}
1773+<VirtualHost {{ address }}:{{ ext }}>
1774+ ServerName {{ endpoint }}
1775 SSLEngine on
1776- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
1777- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
1778+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
1779+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
1780 ProxyPass / http://localhost:{{ int }}/
1781 ProxyPassReverse / http://localhost:{{ int }}/
1782 ProxyPreserveHost on
1783 </VirtualHost>
1784+{% endfor -%}
1785 <Proxy *>
1786 Order deny,allow
1787 Allow from all
1788@@ -19,5 +21,4 @@
1789 Order allow,deny
1790 Allow from all
1791 </Location>
1792-{% endfor -%}
1793 {% endif -%}
1794
1795=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
1796--- hooks/charmhelpers/contrib/openstack/templating.py 2014-02-24 19:32:20 +0000
1797+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-10-02 12:57:31 +0000
1798@@ -30,17 +30,17 @@
1799 loading dir.
1800
1801 A charm may also ship a templates dir with this module
1802- and it will be appended to the bottom of the search list, eg:
1803- hooks/charmhelpers/contrib/openstack/templates.
1804-
1805- :param templates_dir: str: Base template directory containing release
1806- sub-directories.
1807- :param os_release : str: OpenStack release codename to construct template
1808- loader.
1809-
1810- :returns : jinja2.ChoiceLoader constructed with a list of
1811- jinja2.FilesystemLoaders, ordered in descending
1812- order by OpenStack release.
1813+ and it will be appended to the bottom of the search list, eg::
1814+
1815+ hooks/charmhelpers/contrib/openstack/templates
1816+
1817+ :param templates_dir (str): Base template directory containing release
1818+ sub-directories.
1819+ :param os_release (str): OpenStack release codename to construct template
1820+ loader.
1821+ :returns: jinja2.ChoiceLoader constructed with a list of
1822+ jinja2.FilesystemLoaders, ordered in descending
1823+ order by OpenStack release.
1824 """
1825 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
1826 for rel in OPENSTACK_CODENAMES.itervalues()]
1827@@ -111,7 +111,8 @@
1828 and ease the burden of managing config templates across multiple OpenStack
1829 releases.
1830
1831- Basic usage:
1832+ Basic usage::
1833+
1834 # import some common context generates from charmhelpers
1835 from charmhelpers.contrib.openstack import context
1836
1837@@ -131,21 +132,19 @@
1838 # write out all registered configs
1839 configs.write_all()
1840
1841- Details:
1842+ **OpenStack Releases and template loading**
1843
1844- OpenStack Releases and template loading
1845- ---------------------------------------
1846 When the object is instantiated, it is associated with a specific OS
1847 release. This dictates how the template loader will be constructed.
1848
1849 The constructed loader attempts to load the template from several places
1850 in the following order:
1851- - from the most recent OS release-specific template dir (if one exists)
1852- - the base templates_dir
1853- - a template directory shipped in the charm with this helper file.
1854-
1855-
1856- For the example above, '/tmp/templates' contains the following structure:
1857+ - from the most recent OS release-specific template dir (if one exists)
1858+ - the base templates_dir
1859+ - a template directory shipped in the charm with this helper file.
1860+
1861+ For the example above, '/tmp/templates' contains the following structure::
1862+
1863 /tmp/templates/nova.conf
1864 /tmp/templates/api-paste.ini
1865 /tmp/templates/grizzly/api-paste.ini
1866@@ -169,8 +168,8 @@
1867 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
1868 us to ship common templates (haproxy, apache) with the helpers.
1869
1870- Context generators
1871- ---------------------------------------
1872+ **Context generators**
1873+
1874 Context generators are used to generate template contexts during hook
1875 execution. Doing so may require inspecting service relations, charm
1876 config, etc. When registered, a config file is associated with a list
1877
1878=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
1879--- hooks/charmhelpers/contrib/openstack/utils.py 2014-08-27 07:15:10 +0000
1880+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-10-02 12:57:31 +0000
1881@@ -3,8 +3,8 @@
1882 # Common python helper functions used for OpenStack charms.
1883 from collections import OrderedDict
1884
1885-import apt_pkg as apt
1886 import subprocess
1887+import json
1888 import os
1889 import socket
1890 import sys
1891@@ -14,7 +14,9 @@
1892 log as juju_log,
1893 charm_dir,
1894 ERROR,
1895- INFO
1896+ INFO,
1897+ relation_ids,
1898+ relation_set
1899 )
1900
1901 from charmhelpers.contrib.storage.linux.lvm import (
1902@@ -23,6 +25,10 @@
1903 remove_lvm_physical_volume,
1904 )
1905
1906+from charmhelpers.contrib.network.ip import (
1907+ get_ipv6_addr
1908+)
1909+
1910 from charmhelpers.core.host import lsb_release, mounts, umount
1911 from charmhelpers.fetch import apt_install, apt_cache
1912 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
1913@@ -41,7 +47,8 @@
1914 ('quantal', 'folsom'),
1915 ('raring', 'grizzly'),
1916 ('saucy', 'havana'),
1917- ('trusty', 'icehouse')
1918+ ('trusty', 'icehouse'),
1919+ ('utopic', 'juno'),
1920 ])
1921
1922
1923@@ -52,6 +59,7 @@
1924 ('2013.1', 'grizzly'),
1925 ('2013.2', 'havana'),
1926 ('2014.1', 'icehouse'),
1927+ ('2014.2', 'juno'),
1928 ])
1929
1930 # The ugly duckling
1931@@ -69,6 +77,7 @@
1932 ('1.13.0', 'icehouse'),
1933 ('1.12.0', 'icehouse'),
1934 ('1.11.0', 'icehouse'),
1935+ ('2.0.0', 'juno'),
1936 ])
1937
1938 DEFAULT_LOOPBACK_SIZE = '5G'
1939@@ -83,6 +92,8 @@
1940 '''Derive OpenStack release codename from a given installation source.'''
1941 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
1942 rel = ''
1943+ if src is None:
1944+ return rel
1945 if src in ['distro', 'distro-proposed']:
1946 try:
1947 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
1948@@ -130,8 +141,14 @@
1949
1950 def get_os_codename_package(package, fatal=True):
1951 '''Derive OpenStack release codename from an installed package.'''
1952-
1953- cache = apt_cache()
1954+<<<<<<< TREE
1955+
1956+ cache = apt_cache()
1957+=======
1958+ import apt_pkg as apt
1959+
1960+ cache = apt_cache()
1961+>>>>>>> MERGE-SOURCE
1962
1963 try:
1964 pkg = cache[package]
1965@@ -182,7 +199,7 @@
1966 for version, cname in vers_map.iteritems():
1967 if cname == codename:
1968 return version
1969- #e = "Could not determine OpenStack version for package: %s" % pkg
1970+ # e = "Could not determine OpenStack version for package: %s" % pkg
1971 # error_out(e)
1972
1973
1974@@ -268,6 +285,9 @@
1975 'icehouse': 'precise-updates/icehouse',
1976 'icehouse/updates': 'precise-updates/icehouse',
1977 'icehouse/proposed': 'precise-proposed/icehouse',
1978+ 'juno': 'trusty-updates/juno',
1979+ 'juno/updates': 'trusty-updates/juno',
1980+ 'juno/proposed': 'trusty-proposed/juno',
1981 }
1982
1983 try:
1984@@ -315,6 +335,7 @@
1985
1986 """
1987
1988+ import apt_pkg as apt
1989 src = config('openstack-origin')
1990 cur_vers = get_os_version_package(package)
1991 available_vers = get_os_version_install_source(src)
1992@@ -448,3 +469,21 @@
1993 return result
1994 else:
1995 return result.split('.')[0]
1996+
1997+
1998+def sync_db_with_multi_ipv6_addresses(database, database_user,
1999+ relation_prefix=None):
2000+ hosts = get_ipv6_addr(dynamic_only=False)
2001+
2002+ kwargs = {'database': database,
2003+ 'username': database_user,
2004+ 'hostname': json.dumps(hosts)}
2005+
2006+ if relation_prefix:
2007+ keys = kwargs.keys()
2008+ for key in keys:
2009+ kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
2010+ del kwargs[key]
2011+
2012+ for rid in relation_ids('shared-db'):
2013+ relation_set(relation_id=rid, **kwargs)
2014
2015=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
2016--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-04-04 16:45:38 +0000
2017+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-10-02 12:57:31 +0000
2018@@ -303,7 +303,7 @@
2019 blk_device, fstype, system_services=[]):
2020 """
2021 NOTE: This function must only be called from a single service unit for
2022- the same rbd_img otherwise data loss will occur.
2023+ the same rbd_img otherwise data loss will occur.
2024
2025 Ensures given pool and RBD image exists, is mapped to a block device,
2026 and the device is formatted and mounted at the given mount_point.
2027
2028=== added file 'hooks/charmhelpers/core/fstab.py'
2029--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
2030+++ hooks/charmhelpers/core/fstab.py 2014-10-02 12:57:31 +0000
2031@@ -0,0 +1,116 @@
2032+#!/usr/bin/env python
2033+# -*- coding: utf-8 -*-
2034+
2035+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
2036+
2037+import os
2038+
2039+
2040+class Fstab(file):
2041+ """This class extends file in order to implement a file reader/writer
2042+ for file `/etc/fstab`
2043+ """
2044+
2045+ class Entry(object):
2046+ """Entry class represents a non-comment line on the `/etc/fstab` file
2047+ """
2048+ def __init__(self, device, mountpoint, filesystem,
2049+ options, d=0, p=0):
2050+ self.device = device
2051+ self.mountpoint = mountpoint
2052+ self.filesystem = filesystem
2053+
2054+ if not options:
2055+ options = "defaults"
2056+
2057+ self.options = options
2058+ self.d = d
2059+ self.p = p
2060+
2061+ def __eq__(self, o):
2062+ return str(self) == str(o)
2063+
2064+ def __str__(self):
2065+ return "{} {} {} {} {} {}".format(self.device,
2066+ self.mountpoint,
2067+ self.filesystem,
2068+ self.options,
2069+ self.d,
2070+ self.p)
2071+
2072+ DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
2073+
2074+ def __init__(self, path=None):
2075+ if path:
2076+ self._path = path
2077+ else:
2078+ self._path = self.DEFAULT_PATH
2079+ file.__init__(self, self._path, 'r+')
2080+
2081+ def _hydrate_entry(self, line):
2082+ # NOTE: use split with no arguments to split on any
2083+ # whitespace including tabs
2084+ return Fstab.Entry(*filter(
2085+ lambda x: x not in ('', None),
2086+ line.strip("\n").split()))
2087+
2088+ @property
2089+ def entries(self):
2090+ self.seek(0)
2091+ for line in self.readlines():
2092+ try:
2093+ if not line.startswith("#"):
2094+ yield self._hydrate_entry(line)
2095+ except ValueError:
2096+ pass
2097+
2098+ def get_entry_by_attr(self, attr, value):
2099+ for entry in self.entries:
2100+ e_attr = getattr(entry, attr)
2101+ if e_attr == value:
2102+ return entry
2103+ return None
2104+
2105+ def add_entry(self, entry):
2106+ if self.get_entry_by_attr('device', entry.device):
2107+ return False
2108+
2109+ self.write(str(entry) + '\n')
2110+ self.truncate()
2111+ return entry
2112+
2113+ def remove_entry(self, entry):
2114+ self.seek(0)
2115+
2116+ lines = self.readlines()
2117+
2118+ found = False
2119+ for index, line in enumerate(lines):
2120+ if not line.startswith("#"):
2121+ if self._hydrate_entry(line) == entry:
2122+ found = True
2123+ break
2124+
2125+ if not found:
2126+ return False
2127+
2128+ lines.remove(line)
2129+
2130+ self.seek(0)
2131+ self.write(''.join(lines))
2132+ self.truncate()
2133+ return True
2134+
2135+ @classmethod
2136+ def remove_by_mountpoint(cls, mountpoint, path=None):
2137+ fstab = cls(path=path)
2138+ entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
2139+ if entry:
2140+ return fstab.remove_entry(entry)
2141+ return False
2142+
2143+ @classmethod
2144+ def add(cls, device, mountpoint, filesystem, options=None, path=None):
2145+ return cls(path=path).add_entry(Fstab.Entry(device,
2146+ mountpoint, filesystem,
2147+ options=options))
2148
2149=== renamed file 'hooks/charmhelpers/core/fstab.py' => 'hooks/charmhelpers/core/fstab.py.moved'
2150=== modified file 'hooks/charmhelpers/core/hookenv.py'
2151--- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:41:02 +0000
2152+++ hooks/charmhelpers/core/hookenv.py 2014-10-02 12:57:31 +0000
2153@@ -25,7 +25,7 @@
2154 def cached(func):
2155 """Cache return values for multiple executions of func + args
2156
2157- For example:
2158+ For example::
2159
2160 @cached
2161 def unit_get(attribute):
2162@@ -156,12 +156,15 @@
2163
2164
2165 class Config(dict):
2166- """A Juju charm config dictionary that can write itself to
2167- disk (as json) and track which values have changed since
2168- the previous hook invocation.
2169-
2170- Do not instantiate this object directly - instead call
2171- ``hookenv.config()``
2172+ """A dictionary representation of the charm's config.yaml, with some
2173+ extra features:
2174+
2175+ - See which values in the dictionary have changed since the previous hook.
2176+ - For values that have changed, see what the previous value was.
2177+ - Store arbitrary data for use in a later hook.
2178+
2179+ NOTE: Do not instantiate this object directly - instead call
2180+ ``hookenv.config()``, which will return an instance of :class:`Config`.
2181
2182 Example usage::
2183
2184@@ -170,8 +173,8 @@
2185 >>> config = hookenv.config()
2186 >>> config['foo']
2187 'bar'
2188+ >>> # store a new key/value for later use
2189 >>> config['mykey'] = 'myval'
2190- >>> config.save()
2191
2192
2193 >>> # user runs `juju set mycharm foo=baz`
2194@@ -188,22 +191,34 @@
2195 >>> # keys/values that we add are preserved across hooks
2196 >>> config['mykey']
2197 'myval'
2198- >>> # don't forget to save at the end of hook!
2199- >>> config.save()
2200
2201 """
2202 CONFIG_FILE_NAME = '.juju-persistent-config'
2203
2204 def __init__(self, *args, **kw):
2205 super(Config, self).__init__(*args, **kw)
2206+ self.implicit_save = True
2207 self._prev_dict = None
2208 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
2209 if os.path.exists(self.path):
2210 self.load_previous()
2211
2212+ def __getitem__(self, key):
2213+ """For regular dict lookups, check the current juju config first,
2214+ then the previous (saved) copy. This ensures that user-saved values
2215+ will be returned by a dict lookup.
2216+
2217+ """
2218+ try:
2219+ return dict.__getitem__(self, key)
2220+ except KeyError:
2221+ return (self._prev_dict or {})[key]
2222+
2223 def load_previous(self, path=None):
2224- """Load previous copy of config from disk so that current values
2225- can be compared to previous values.
2226+ """Load previous copy of config from disk.
2227+
2228+ In normal usage you don't need to call this method directly - it
2229+ is called automatically at object initialization.
2230
2231 :param path:
2232
2233@@ -218,8 +233,8 @@
2234 self._prev_dict = json.load(f)
2235
2236 def changed(self, key):
2237- """Return true if the value for this key has changed since
2238- the last save.
2239+ """Return True if the current value for this key is different from
2240+ the previous value.
2241
2242 """
2243 if self._prev_dict is None:
2244@@ -228,7 +243,7 @@
2245
2246 def previous(self, key):
2247 """Return previous value for this key, or None if there
2248- is no "previous" value.
2249+ is no previous value.
2250
2251 """
2252 if self._prev_dict:
2253@@ -238,7 +253,13 @@
2254 def save(self):
2255 """Save this config to disk.
2256
2257- Preserves items in _prev_dict that do not exist in self.
2258+ If the charm is using the :mod:`Services Framework <services.base>`
2259+ or :meth:'@hook <Hooks.hook>' decorator, this
2260+ is called automatically at the end of successful hook execution.
2261+ Otherwise, it should be called directly by user code.
2262+
2263+ To disable automatic saves, set ``implicit_save=False`` on this
2264+ instance.
2265
2266 """
2267 if self._prev_dict:
2268@@ -285,8 +306,9 @@
2269 raise
2270
2271
2272-def relation_set(relation_id=None, relation_settings={}, **kwargs):
2273+def relation_set(relation_id=None, relation_settings=None, **kwargs):
2274 """Set relation information for the current unit"""
2275+ relation_settings = relation_settings if relation_settings else {}
2276 relation_cmd_line = ['relation-set']
2277 if relation_id is not None:
2278 relation_cmd_line.extend(('-r', relation_id))
2279@@ -445,27 +467,29 @@
2280 class Hooks(object):
2281 """A convenient handler for hook functions.
2282
2283- Example:
2284+ Example::
2285+
2286 hooks = Hooks()
2287
2288 # register a hook, taking its name from the function name
2289 @hooks.hook()
2290 def install():
2291- ...
2292+ pass # your code here
2293
2294 # register a hook, providing a custom hook name
2295 @hooks.hook("config-changed")
2296 def config_changed():
2297- ...
2298+ pass # your code here
2299
2300 if __name__ == "__main__":
2301 # execute a hook based on the name the program is called by
2302 hooks.execute(sys.argv)
2303 """
2304
2305- def __init__(self):
2306+ def __init__(self, config_save=True):
2307 super(Hooks, self).__init__()
2308 self._hooks = {}
2309+ self._config_save = config_save
2310
2311 def register(self, name, function):
2312 """Register a hook"""
2313@@ -476,6 +500,10 @@
2314 hook_name = os.path.basename(args[0])
2315 if hook_name in self._hooks:
2316 self._hooks[hook_name]()
2317+ if self._config_save:
2318+ cfg = config()
2319+ if cfg.implicit_save:
2320+ cfg.save()
2321 else:
2322 raise UnregisteredHookError(hook_name)
2323
2324
2325=== modified file 'hooks/charmhelpers/core/host.py'
2326--- hooks/charmhelpers/core/host.py 2014-08-27 07:15:10 +0000
2327+++ hooks/charmhelpers/core/host.py 2014-10-02 12:57:31 +0000
2328@@ -12,7 +12,8 @@
2329 import string
2330 import subprocess
2331 import hashlib
2332-import apt_pkg
2333+import shutil
2334+from contextlib import contextmanager
2335
2336 from collections import OrderedDict
2337
2338@@ -53,7 +54,7 @@
2339 def service_running(service):
2340 """Determine whether a system service is running"""
2341 try:
2342- output = subprocess.check_output(['service', service, 'status'])
2343+ output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
2344 except subprocess.CalledProcessError:
2345 return False
2346 else:
2347@@ -63,6 +64,16 @@
2348 return False
2349
2350
2351+def service_available(service_name):
2352+ """Determine whether a system service is available"""
2353+ try:
2354+ subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
2355+ except subprocess.CalledProcessError as e:
2356+ return 'unrecognized service' not in e.output
2357+ else:
2358+ return True
2359+
2360+
2361 def adduser(username, password=None, shell='/bin/bash', system_user=False):
2362 """Add a user to the system"""
2363 try:
2364@@ -198,10 +209,15 @@
2365 return system_mounts
2366
2367
2368-def file_hash(path):
2369- """Generate a md5 hash of the contents of 'path' or None if not found """
2370+def file_hash(path, hash_type='md5'):
2371+ """
2372+ Generate a hash checksum of the contents of 'path' or None if not found.
2373+
2374+ :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
2375+ such as md5, sha1, sha256, sha512, etc.
2376+ """
2377 if os.path.exists(path):
2378- h = hashlib.md5()
2379+ h = getattr(hashlib, hash_type)()
2380 with open(path, 'r') as source:
2381 h.update(source.read()) # IGNORE:E1101 - it does have update
2382 return h.hexdigest()
2383@@ -209,16 +225,36 @@
2384 return None
2385
2386
2387+def check_hash(path, checksum, hash_type='md5'):
2388+ """
2389+ Validate a file using a cryptographic checksum.
2390+
2391+ :param str checksum: Value of the checksum used to validate the file.
2392+ :param str hash_type: Hash algorithm used to generate `checksum`.
2393+ Can be any hash alrgorithm supported by :mod:`hashlib`,
2394+ such as md5, sha1, sha256, sha512, etc.
2395+ :raises ChecksumError: If the file fails the checksum
2396+
2397+ """
2398+ actual_checksum = file_hash(path, hash_type)
2399+ if checksum != actual_checksum:
2400+ raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
2401+
2402+
2403+class ChecksumError(ValueError):
2404+ pass
2405+
2406+
2407 def restart_on_change(restart_map, stopstart=False):
2408 """Restart services based on configuration files changing
2409
2410- This function is used a decorator, for example
2411+ This function is used a decorator, for example::
2412
2413 @restart_on_change({
2414 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
2415 })
2416 def ceph_client_changed():
2417- ...
2418+ pass # your code here
2419
2420 In this example, the cinder-api and cinder-volume services
2421 would be restarted if /etc/ceph/ceph.conf is changed by the
2422@@ -314,12 +350,40 @@
2423
2424 def cmp_pkgrevno(package, revno, pkgcache=None):
2425 '''Compare supplied revno with the revno of the installed package
2426- 1 => Installed revno is greater than supplied arg
2427- 0 => Installed revno is the same as supplied arg
2428- -1 => Installed revno is less than supplied arg
2429+
2430+ * 1 => Installed revno is greater than supplied arg
2431+ * 0 => Installed revno is the same as supplied arg
2432+ * -1 => Installed revno is less than supplied arg
2433+
2434 '''
2435- from charmhelpers.fetch import apt_cache
2436+<<<<<<< TREE
2437+ from charmhelpers.fetch import apt_cache
2438+=======
2439+ import apt_pkg
2440+ from charmhelpers.fetch import apt_cache
2441+>>>>>>> MERGE-SOURCE
2442 if not pkgcache:
2443 pkgcache = apt_cache()
2444 pkg = pkgcache[package]
2445 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
2446+
2447+
2448+@contextmanager
2449+def chdir(d):
2450+ cur = os.getcwd()
2451+ try:
2452+ yield os.chdir(d)
2453+ finally:
2454+ os.chdir(cur)
2455+
2456+
2457+def chownr(path, owner, group):
2458+ uid = pwd.getpwnam(owner).pw_uid
2459+ gid = grp.getgrnam(group).gr_gid
2460+
2461+ for root, dirs, files in os.walk(path):
2462+ for name in dirs + files:
2463+ full = os.path.join(root, name)
2464+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
2465+ if not broken_symlink:
2466+ os.chown(full, uid, gid)
2467
2468=== added directory 'hooks/charmhelpers/core/services'
2469=== added file 'hooks/charmhelpers/core/services/__init__.py'
2470--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
2471+++ hooks/charmhelpers/core/services/__init__.py 2014-10-02 12:57:31 +0000
2472@@ -0,0 +1,2 @@
2473+from .base import *
2474+from .helpers import *
2475
2476=== added file 'hooks/charmhelpers/core/services/base.py'
2477--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
2478+++ hooks/charmhelpers/core/services/base.py 2014-10-02 12:57:31 +0000
2479@@ -0,0 +1,313 @@
2480+import os
2481+import re
2482+import json
2483+from collections import Iterable
2484+
2485+from charmhelpers.core import host
2486+from charmhelpers.core import hookenv
2487+
2488+
2489+__all__ = ['ServiceManager', 'ManagerCallback',
2490+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
2491+ 'service_restart', 'service_stop']
2492+
2493+
2494+class ServiceManager(object):
2495+ def __init__(self, services=None):
2496+ """
2497+ Register a list of services, given their definitions.
2498+
2499+ Service definitions are dicts in the following formats (all keys except
2500+ 'service' are optional)::
2501+
2502+ {
2503+ "service": <service name>,
2504+ "required_data": <list of required data contexts>,
2505+ "provided_data": <list of provided data contexts>,
2506+ "data_ready": <one or more callbacks>,
2507+ "data_lost": <one or more callbacks>,
2508+ "start": <one or more callbacks>,
2509+ "stop": <one or more callbacks>,
2510+ "ports": <list of ports to manage>,
2511+ }
2512+
2513+ The 'required_data' list should contain dicts of required data (or
2514+ dependency managers that act like dicts and know how to collect the data).
2515+ Only when all items in the 'required_data' list are populated are the list
2516+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
2517+ information.
2518+
2519+ The 'provided_data' list should contain relation data providers, most likely
2520+ a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
2521+ that will indicate a set of data to set on a given relation.
2522+
2523+ The 'data_ready' value should be either a single callback, or a list of
2524+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
2525+ Each callback will be called with the service name as the only parameter.
2526+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
2527+ are fired.
2528+
2529+ The 'data_lost' value should be either a single callback, or a list of
2530+ callbacks, to be called when a 'required_data' item no longer passes
2531+ `is_ready()`. Each callback will be called with the service name as the
2532+ only parameter. After all of the 'data_lost' callbacks are called,
2533+ the 'stop' callbacks are fired.
2534+
2535+ The 'start' value should be either a single callback, or a list of
2536+ callbacks, to be called when starting the service, after the 'data_ready'
2537+ callbacks are complete. Each callback will be called with the service
2538+ name as the only parameter. This defaults to
2539+ `[host.service_start, services.open_ports]`.
2540+
2541+ The 'stop' value should be either a single callback, or a list of
2542+ callbacks, to be called when stopping the service. If the service is
2543+ being stopped because it no longer has all of its 'required_data', this
2544+ will be called after all of the 'data_lost' callbacks are complete.
2545+ Each callback will be called with the service name as the only parameter.
2546+ This defaults to `[services.close_ports, host.service_stop]`.
2547+
2548+ The 'ports' value should be a list of ports to manage. The default
2549+ 'start' handler will open the ports after the service is started,
2550+ and the default 'stop' handler will close the ports prior to stopping
2551+ the service.
2552+
2553+
2554+ Examples:
2555+
2556+ The following registers an Upstart service called bingod that depends on
2557+ a mongodb relation and which runs a custom `db_migrate` function prior to
2558+ restarting the service, and a Runit service called spadesd::
2559+
2560+ manager = services.ServiceManager([
2561+ {
2562+ 'service': 'bingod',
2563+ 'ports': [80, 443],
2564+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
2565+ 'data_ready': [
2566+ services.template(source='bingod.conf'),
2567+ services.template(source='bingod.ini',
2568+ target='/etc/bingod.ini',
2569+ owner='bingo', perms=0400),
2570+ ],
2571+ },
2572+ {
2573+ 'service': 'spadesd',
2574+ 'data_ready': services.template(source='spadesd_run.j2',
2575+ target='/etc/sv/spadesd/run',
2576+ perms=0555),
2577+ 'start': runit_start,
2578+ 'stop': runit_stop,
2579+ },
2580+ ])
2581+ manager.manage()
2582+ """
2583+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
2584+ self._ready = None
2585+ self.services = {}
2586+ for service in services or []:
2587+ service_name = service['service']
2588+ self.services[service_name] = service
2589+
2590+ def manage(self):
2591+ """
2592+ Handle the current hook by doing The Right Thing with the registered services.
2593+ """
2594+ hook_name = hookenv.hook_name()
2595+ if hook_name == 'stop':
2596+ self.stop_services()
2597+ else:
2598+ self.provide_data()
2599+ self.reconfigure_services()
2600+ cfg = hookenv.config()
2601+ if cfg.implicit_save:
2602+ cfg.save()
2603+
2604+ def provide_data(self):
2605+ """
2606+ Set the relation data for each provider in the ``provided_data`` list.
2607+
2608+ A provider must have a `name` attribute, which indicates which relation
2609+ to set data on, and a `provide_data()` method, which returns a dict of
2610+ data to set.
2611+ """
2612+ hook_name = hookenv.hook_name()
2613+ for service in self.services.values():
2614+ for provider in service.get('provided_data', []):
2615+ if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
2616+ data = provider.provide_data()
2617+ _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
2618+ if _ready:
2619+ hookenv.relation_set(None, data)
2620+
2621+ def reconfigure_services(self, *service_names):
2622+ """
2623+ Update all files for one or more registered services, and,
2624+ if ready, optionally restart them.
2625+
2626+ If no service names are given, reconfigures all registered services.
2627+ """
2628+ for service_name in service_names or self.services.keys():
2629+ if self.is_ready(service_name):
2630+ self.fire_event('data_ready', service_name)
2631+ self.fire_event('start', service_name, default=[
2632+ service_restart,
2633+ manage_ports])
2634+ self.save_ready(service_name)
2635+ else:
2636+ if self.was_ready(service_name):
2637+ self.fire_event('data_lost', service_name)
2638+ self.fire_event('stop', service_name, default=[
2639+ manage_ports,
2640+ service_stop])
2641+ self.save_lost(service_name)
2642+
2643+ def stop_services(self, *service_names):
2644+ """
2645+ Stop one or more registered services, by name.
2646+
2647+ If no service names are given, stops all registered services.
2648+ """
2649+ for service_name in service_names or self.services.keys():
2650+ self.fire_event('stop', service_name, default=[
2651+ manage_ports,
2652+ service_stop])
2653+
2654+ def get_service(self, service_name):
2655+ """
2656+ Given the name of a registered service, return its service definition.
2657+ """
2658+ service = self.services.get(service_name)
2659+ if not service:
2660+ raise KeyError('Service not registered: %s' % service_name)
2661+ return service
2662+
2663+ def fire_event(self, event_name, service_name, default=None):
2664+ """
2665+ Fire a data_ready, data_lost, start, or stop event on a given service.
2666+ """
2667+ service = self.get_service(service_name)
2668+ callbacks = service.get(event_name, default)
2669+ if not callbacks:
2670+ return
2671+ if not isinstance(callbacks, Iterable):
2672+ callbacks = [callbacks]
2673+ for callback in callbacks:
2674+ if isinstance(callback, ManagerCallback):
2675+ callback(self, service_name, event_name)
2676+ else:
2677+ callback(service_name)
2678+
2679+ def is_ready(self, service_name):
2680+ """
2681+ Determine if a registered service is ready, by checking its 'required_data'.
2682+
2683+ A 'required_data' item can be any mapping type, and is considered ready
2684+ if `bool(item)` evaluates as True.
2685+ """
2686+ service = self.get_service(service_name)
2687+ reqs = service.get('required_data', [])
2688+ return all(bool(req) for req in reqs)
2689+
2690+ def _load_ready_file(self):
2691+ if self._ready is not None:
2692+ return
2693+ if os.path.exists(self._ready_file):
2694+ with open(self._ready_file) as fp:
2695+ self._ready = set(json.load(fp))
2696+ else:
2697+ self._ready = set()
2698+
2699+ def _save_ready_file(self):
2700+ if self._ready is None:
2701+ return
2702+ with open(self._ready_file, 'w') as fp:
2703+ json.dump(list(self._ready), fp)
2704+
2705+ def save_ready(self, service_name):
2706+ """
2707+ Save an indicator that the given service is now data_ready.
2708+ """
2709+ self._load_ready_file()
2710+ self._ready.add(service_name)
2711+ self._save_ready_file()
2712+
2713+ def save_lost(self, service_name):
2714+ """
2715+ Save an indicator that the given service is no longer data_ready.
2716+ """
2717+ self._load_ready_file()
2718+ self._ready.discard(service_name)
2719+ self._save_ready_file()
2720+
2721+ def was_ready(self, service_name):
2722+ """
2723+ Determine if the given service was previously data_ready.
2724+ """
2725+ self._load_ready_file()
2726+ return service_name in self._ready
2727+
2728+
2729+class ManagerCallback(object):
2730+ """
2731+ Special case of a callback that takes the `ServiceManager` instance
2732+ in addition to the service name.
2733+
2734+ Subclasses should implement `__call__` which should accept three parameters:
2735+
2736+ * `manager` The `ServiceManager` instance
2737+ * `service_name` The name of the service it's being triggered for
2738+ * `event_name` The name of the event that this callback is handling
2739+ """
2740+ def __call__(self, manager, service_name, event_name):
2741+ raise NotImplementedError()
2742+
2743+
2744+class PortManagerCallback(ManagerCallback):
2745+ """
2746+ Callback class that will open or close ports, for use as either
2747+ a start or stop action.
2748+ """
2749+ def __call__(self, manager, service_name, event_name):
2750+ service = manager.get_service(service_name)
2751+ new_ports = service.get('ports', [])
2752+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
2753+ if os.path.exists(port_file):
2754+ with open(port_file) as fp:
2755+ old_ports = fp.read().split(',')
2756+ for old_port in old_ports:
2757+ if bool(old_port):
2758+ old_port = int(old_port)
2759+ if old_port not in new_ports:
2760+ hookenv.close_port(old_port)
2761+ with open(port_file, 'w') as fp:
2762+ fp.write(','.join(str(port) for port in new_ports))
2763+ for port in new_ports:
2764+ if event_name == 'start':
2765+ hookenv.open_port(port)
2766+ elif event_name == 'stop':
2767+ hookenv.close_port(port)
2768+
2769+
2770+def service_stop(service_name):
2771+ """
2772+ Wrapper around host.service_stop to prevent spurious "unknown service"
2773+ messages in the logs.
2774+ """
2775+ if host.service_running(service_name):
2776+ host.service_stop(service_name)
2777+
2778+
2779+def service_restart(service_name):
2780+ """
2781+ Wrapper around host.service_restart to prevent spurious "unknown service"
2782+ messages in the logs.
2783+ """
2784+ if host.service_available(service_name):
2785+ if host.service_running(service_name):
2786+ host.service_restart(service_name)
2787+ else:
2788+ host.service_start(service_name)
2789+
2790+
2791+# Convenience aliases
2792+open_ports = close_ports = manage_ports = PortManagerCallback()
2793
2794=== added file 'hooks/charmhelpers/core/services/helpers.py'
2795--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
2796+++ hooks/charmhelpers/core/services/helpers.py 2014-10-02 12:57:31 +0000
2797@@ -0,0 +1,239 @@
2798+import os
2799+import yaml
2800+from charmhelpers.core import hookenv
2801+from charmhelpers.core import templating
2802+
2803+from charmhelpers.core.services.base import ManagerCallback
2804+
2805+
2806+__all__ = ['RelationContext', 'TemplateCallback',
2807+ 'render_template', 'template']
2808+
2809+
2810+class RelationContext(dict):
2811+ """
2812+ Base class for a context generator that gets relation data from juju.
2813+
2814+ Subclasses must provide the attributes `name`, which is the name of the
2815+ interface of interest, `interface`, which is the type of the interface of
2816+ interest, and `required_keys`, which is the set of keys required for the
2817+ relation to be considered complete. The data for all interfaces matching
2818+ the `name` attribute that are complete will used to populate the dictionary
2819+ values (see `get_data`, below).
2820+
2821+ The generated context will be namespaced under the relation :attr:`name`,
2822+ to prevent potential naming conflicts.
2823+
2824+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
2825+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
2826+ """
2827+ name = None
2828+ interface = None
2829+ required_keys = []
2830+
2831+ def __init__(self, name=None, additional_required_keys=None):
2832+ if name is not None:
2833+ self.name = name
2834+ if additional_required_keys is not None:
2835+ self.required_keys.extend(additional_required_keys)
2836+ self.get_data()
2837+
2838+ def __bool__(self):
2839+ """
2840+ Returns True if all of the required_keys are available.
2841+ """
2842+ return self.is_ready()
2843+
2844+ __nonzero__ = __bool__
2845+
2846+ def __repr__(self):
2847+ return super(RelationContext, self).__repr__()
2848+
2849+ def is_ready(self):
2850+ """
2851+ Returns True if all of the `required_keys` are available from any units.
2852+ """
2853+ ready = len(self.get(self.name, [])) > 0
2854+ if not ready:
2855+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
2856+ return ready
2857+
2858+ def _is_ready(self, unit_data):
2859+ """
2860+ Helper method that tests a set of relation data and returns True if
2861+ all of the `required_keys` are present.
2862+ """
2863+ return set(unit_data.keys()).issuperset(set(self.required_keys))
2864+
2865+ def get_data(self):
2866+ """
2867+ Retrieve the relation data for each unit involved in a relation and,
2868+ if complete, store it in a list under `self[self.name]`. This
2869+ is automatically called when the RelationContext is instantiated.
2870+
2871+ The units are sorted lexographically first by the service ID, then by
2872+ the unit ID. Thus, if an interface has two other services, 'db:1'
2873+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
2874+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
2875+ set of data, the relation data for the units will be stored in the
2876+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
2877+
2878+ If you only care about a single unit on the relation, you can just
2879+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
2880+ support multiple units on a relation, you should iterate over the list,
2881+ like::
2882+
2883+ {% for unit in interface -%}
2884+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
2885+ {%- endfor %}
2886+
2887+ Note that since all sets of relation data from all related services and
2888+ units are in a single list, if you need to know which service or unit a
2889+ set of data came from, you'll need to extend this class to preserve
2890+ that information.
2891+ """
2892+ if not hookenv.relation_ids(self.name):
2893+ return
2894+
2895+ ns = self.setdefault(self.name, [])
2896+ for rid in sorted(hookenv.relation_ids(self.name)):
2897+ for unit in sorted(hookenv.related_units(rid)):
2898+ reldata = hookenv.relation_get(rid=rid, unit=unit)
2899+ if self._is_ready(reldata):
2900+ ns.append(reldata)
2901+
2902+ def provide_data(self):
2903+ """
2904+ Return data to be relation_set for this interface.
2905+ """
2906+ return {}
2907+
2908+
2909+class MysqlRelation(RelationContext):
2910+ """
2911+ Relation context for the `mysql` interface.
2912+
2913+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
2914+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
2915+ """
2916+ name = 'db'
2917+ interface = 'mysql'
2918+ required_keys = ['host', 'user', 'password', 'database']
2919+
2920+
2921+class HttpRelation(RelationContext):
2922+ """
2923+ Relation context for the `http` interface.
2924+
2925+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
2926+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
2927+ """
2928+ name = 'website'
2929+ interface = 'http'
2930+ required_keys = ['host', 'port']
2931+
2932+ def provide_data(self):
2933+ return {
2934+ 'host': hookenv.unit_get('private-address'),
2935+ 'port': 80,
2936+ }
2937+
2938+
2939+class RequiredConfig(dict):
2940+ """
2941+ Data context that loads config options with one or more mandatory options.
2942+
2943+ Once the required options have been changed from their default values, all
2944+ config options will be available, namespaced under `config` to prevent
2945+ potential naming conflicts (for example, between a config option and a
2946+ relation property).
2947+
2948+ :param list *args: List of options that must be changed from their default values.
2949+ """
2950+
2951+ def __init__(self, *args):
2952+ self.required_options = args
2953+ self['config'] = hookenv.config()
2954+ with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
2955+ self.config = yaml.load(fp).get('options', {})
2956+
2957+ def __bool__(self):
2958+ for option in self.required_options:
2959+ if option not in self['config']:
2960+ return False
2961+ current_value = self['config'][option]
2962+ default_value = self.config[option].get('default')
2963+ if current_value == default_value:
2964+ return False
2965+ if current_value in (None, '') and default_value in (None, ''):
2966+ return False
2967+ return True
2968+
2969+ def __nonzero__(self):
2970+ return self.__bool__()
2971+
2972+
2973+class StoredContext(dict):
2974+ """
2975+ A data context that always returns the data that it was first created with.
2976+
2977+ This is useful to do a one-time generation of things like passwords, that
2978+ will thereafter use the same value that was originally generated, instead
2979+ of generating a new value each time it is run.
2980+ """
2981+ def __init__(self, file_name, config_data):
2982+ """
2983+ If the file exists, populate `self` with the data from the file.
2984+ Otherwise, populate with the given data and persist it to the file.
2985+ """
2986+ if os.path.exists(file_name):
2987+ self.update(self.read_context(file_name))
2988+ else:
2989+ self.store_context(file_name, config_data)
2990+ self.update(config_data)
2991+
2992+ def store_context(self, file_name, config_data):
2993+ if not os.path.isabs(file_name):
2994+ file_name = os.path.join(hookenv.charm_dir(), file_name)
2995+ with open(file_name, 'w') as file_stream:
2996+ os.fchmod(file_stream.fileno(), 0600)
2997+ yaml.dump(config_data, file_stream)
2998+
2999+ def read_context(self, file_name):
3000+ if not os.path.isabs(file_name):
3001+ file_name = os.path.join(hookenv.charm_dir(), file_name)
3002+ with open(file_name, 'r') as file_stream:
3003+ data = yaml.load(file_stream)
3004+ if not data:
3005+ raise OSError("%s is empty" % file_name)
3006+ return data
3007+
3008+
3009+class TemplateCallback(ManagerCallback):
3010+ """
3011+ Callback class that will render a Jinja2 template, for use as a ready action.
3012+
3013+ :param str source: The template source file, relative to `$CHARM_DIR/templates`
3014+ :param str target: The target to write the rendered template to
3015+ :param str owner: The owner of the rendered file
3016+ :param str group: The group of the rendered file
3017+ :param int perms: The permissions of the rendered file
3018+ """
3019+ def __init__(self, source, target, owner='root', group='root', perms=0444):
3020+ self.source = source
3021+ self.target = target
3022+ self.owner = owner
3023+ self.group = group
3024+ self.perms = perms
3025+
3026+ def __call__(self, manager, service_name, event_name):
3027+ service = manager.get_service(service_name)
3028+ context = {}
3029+ for ctx in service.get('required_data', []):
3030+ context.update(ctx)
3031+ templating.render(self.source, self.target, context,
3032+ self.owner, self.group, self.perms)
3033+
3034+
3035+# Convenience aliases for templates
3036+render_template = template = TemplateCallback
3037
3038=== added file 'hooks/charmhelpers/core/templating.py'
3039--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
3040+++ hooks/charmhelpers/core/templating.py 2014-10-02 12:57:31 +0000
3041@@ -0,0 +1,51 @@
3042+import os
3043+
3044+from charmhelpers.core import host
3045+from charmhelpers.core import hookenv
3046+
3047+
3048+def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
3049+ """
3050+ Render a template.
3051+
3052+ The `source` path, if not absolute, is relative to the `templates_dir`.
3053+
3054+ The `target` path should be absolute.
3055+
3056+ The context should be a dict containing the values to be replaced in the
3057+ template.
3058+
3059+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
3060+
3061+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
3062+
3063+ Note: Using this requires python-jinja2; if it is not installed, calling
3064+ this will attempt to use charmhelpers.fetch.apt_install to install it.
3065+ """
3066+ try:
3067+ from jinja2 import FileSystemLoader, Environment, exceptions
3068+ except ImportError:
3069+ try:
3070+ from charmhelpers.fetch import apt_install
3071+ except ImportError:
3072+ hookenv.log('Could not import jinja2, and could not import '
3073+ 'charmhelpers.fetch to install it',
3074+ level=hookenv.ERROR)
3075+ raise
3076+ apt_install('python-jinja2', fatal=True)
3077+ from jinja2 import FileSystemLoader, Environment, exceptions
3078+
3079+ if templates_dir is None:
3080+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
3081+ loader = Environment(loader=FileSystemLoader(templates_dir))
3082+ try:
3083+ source = source
3084+ template = loader.get_template(source)
3085+ except exceptions.TemplateNotFound as e:
3086+ hookenv.log('Could not load template %s from %s.' %
3087+ (source, templates_dir),
3088+ level=hookenv.ERROR)
3089+ raise e
3090+ content = template.render(context)
3091+ host.mkdir(os.path.dirname(target))
3092+ host.write_file(target, content, owner, group, perms)
3093
3094=== modified file 'hooks/charmhelpers/fetch/__init__.py'
3095--- hooks/charmhelpers/fetch/__init__.py 2014-08-27 07:15:10 +0000
3096+++ hooks/charmhelpers/fetch/__init__.py 2014-10-02 12:57:31 +0000
3097@@ -1,4 +1,5 @@
3098 import importlib
3099+from tempfile import NamedTemporaryFile
3100 import time
3101 from yaml import safe_load
3102 from charmhelpers.core.host import (
3103@@ -13,7 +14,6 @@
3104 config,
3105 log,
3106 )
3107-import apt_pkg
3108 import os
3109
3110
3111@@ -56,6 +56,15 @@
3112 'icehouse/proposed': 'precise-proposed/icehouse',
3113 'precise-icehouse/proposed': 'precise-proposed/icehouse',
3114 'precise-proposed/icehouse': 'precise-proposed/icehouse',
3115+ # Juno
3116+ 'juno': 'trusty-updates/juno',
3117+ 'trusty-juno': 'trusty-updates/juno',
3118+ 'trusty-juno/updates': 'trusty-updates/juno',
3119+ 'trusty-updates/juno': 'trusty-updates/juno',
3120+ 'juno/proposed': 'trusty-proposed/juno',
3121+ 'juno/proposed': 'trusty-proposed/juno',
3122+ 'trusty-juno/proposed': 'trusty-proposed/juno',
3123+ 'trusty-proposed/juno': 'trusty-proposed/juno',
3124 }
3125
3126 # The order of this list is very important. Handlers should be listed in from
3127@@ -108,8 +117,12 @@
3128
3129 def filter_installed_packages(packages):
3130 """Returns a list of packages that require installation"""
3131+<<<<<<< TREE
3132
3133 cache = apt_cache()
3134+=======
3135+ cache = apt_cache()
3136+>>>>>>> MERGE-SOURCE
3137 _pkgs = []
3138 for package in packages:
3139 try:
3140@@ -122,15 +135,28 @@
3141 return _pkgs
3142
3143
3144-def apt_cache(in_memory=True):
3145- """Build and return an apt cache"""
3146- apt_pkg.init()
3147- if in_memory:
3148- apt_pkg.config.set("Dir::Cache::pkgcache", "")
3149- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
3150- return apt_pkg.Cache()
3151-
3152-
3153+<<<<<<< TREE
3154+def apt_cache(in_memory=True):
3155+ """Build and return an apt cache"""
3156+ apt_pkg.init()
3157+ if in_memory:
3158+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
3159+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
3160+ return apt_pkg.Cache()
3161+
3162+
3163+=======
3164+def apt_cache(in_memory=True):
3165+ """Build and return an apt cache"""
3166+ import apt_pkg
3167+ apt_pkg.init()
3168+ if in_memory:
3169+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
3170+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
3171+ return apt_pkg.Cache()
3172+
3173+
3174+>>>>>>> MERGE-SOURCE
3175 def apt_install(packages, options=None, fatal=False):
3176 """Install one or more packages"""
3177 if options is None:
3178@@ -196,6 +222,28 @@
3179
3180
3181 def add_source(source, key=None):
3182+ """Add a package source to this system.
3183+
3184+ @param source: a URL or sources.list entry, as supported by
3185+ add-apt-repository(1). Examples::
3186+
3187+ ppa:charmers/example
3188+ deb https://stub:key@private.example.com/ubuntu trusty main
3189+
3190+ In addition:
3191+ 'proposed:' may be used to enable the standard 'proposed'
3192+ pocket for the release.
3193+ 'cloud:' may be used to activate official cloud archive pockets,
3194+ such as 'cloud:icehouse'
3195+
3196+ @param key: A key to be added to the system's APT keyring and used
3197+ to verify the signatures on packages. Ideally, this should be an
3198+ ASCII format GPG public key including the block headers. A GPG key
3199+ id may also be used, but be aware that only insecure protocols are
3200+ available to retrieve the actual public key from a public keyserver
3201+ placing your Juju environment at risk. ppa and cloud archive keys
3202+ are securely added automtically, so sould not be provided.
3203+ """
3204 if source is None:
3205 log('Source is not present. Skipping')
3206 return
3207@@ -220,61 +268,96 @@
3208 release = lsb_release()['DISTRIB_CODENAME']
3209 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
3210 apt.write(PROPOSED_POCKET.format(release))
3211+ else:
3212+ raise SourceConfigError("Unknown source: {!r}".format(source))
3213+
3214 if key:
3215- subprocess.check_call(['apt-key', 'adv', '--keyserver',
3216- 'hkp://keyserver.ubuntu.com:80', '--recv',
3217- key])
3218+ if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
3219+ with NamedTemporaryFile() as key_file:
3220+ key_file.write(key)
3221+ key_file.flush()
3222+ key_file.seek(0)
3223+ subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
3224+ else:
3225+ # Note that hkp: is in no way a secure protocol. Using a
3226+ # GPG key id is pointless from a security POV unless you
3227+ # absolutely trust your network and DNS.
3228+ subprocess.check_call(['apt-key', 'adv', '--keyserver',
3229+ 'hkp://keyserver.ubuntu.com:80', '--recv',
3230+ key])
3231
3232
3233 def configure_sources(update=False,
3234 sources_var='install_sources',
3235 keys_var='install_keys'):
3236 """
3237- Configure multiple sources from charm configuration
3238+ Configure multiple sources from charm configuration.
3239+
3240+ The lists are encoded as yaml fragments in the configuration.
3241+ The frament needs to be included as a string. Sources and their
3242+ corresponding keys are of the types supported by add_source().
3243
3244 Example config:
3245- install_sources:
3246+ install_sources: |
3247 - "ppa:foo"
3248 - "http://example.com/repo precise main"
3249- install_keys:
3250+ install_keys: |
3251 - null
3252 - "a1b2c3d4"
3253
3254 Note that 'null' (a.k.a. None) should not be quoted.
3255 """
3256- sources = safe_load(config(sources_var))
3257- keys = config(keys_var)
3258- if keys is not None:
3259- keys = safe_load(keys)
3260- if isinstance(sources, basestring) and (
3261- keys is None or isinstance(keys, basestring)):
3262- add_source(sources, keys)
3263+ sources = safe_load((config(sources_var) or '').strip()) or []
3264+ keys = safe_load((config(keys_var) or '').strip()) or None
3265+
3266+ if isinstance(sources, basestring):
3267+ sources = [sources]
3268+
3269+ if keys is None:
3270+ for source in sources:
3271+ add_source(source, None)
3272 else:
3273- if not len(sources) == len(keys):
3274- msg = 'Install sources and keys lists are different lengths'
3275- raise SourceConfigError(msg)
3276- for src_num in range(len(sources)):
3277- add_source(sources[src_num], keys[src_num])
3278+ if isinstance(keys, basestring):
3279+ keys = [keys]
3280+
3281+ if len(sources) != len(keys):
3282+ raise SourceConfigError(
3283+ 'Install sources and keys lists are different lengths')
3284+ for source, key in zip(sources, keys):
3285+ add_source(source, key)
3286 if update:
3287 apt_update(fatal=True)
3288
3289
3290-def install_remote(source):
3291+def install_remote(source, *args, **kwargs):
3292 """
3293 Install a file tree from a remote source
3294
3295 The specified source should be a url of the form:
3296 scheme://[host]/path[#[option=value][&...]]
3297
3298- Schemes supported are based on this modules submodules
3299- Options supported are submodule-specific"""
3300+ Schemes supported are based on this modules submodules.
3301+ Options supported are submodule-specific.
3302+ Additional arguments are passed through to the submodule.
3303+
3304+ For example::
3305+
3306+ dest = install_remote('http://example.com/archive.tgz',
3307+ checksum='deadbeef',
3308+ hash_type='sha1')
3309+
3310+ This will download `archive.tgz`, validate it using SHA1 and, if
3311+ the file is ok, extract it and return the directory in which it
3312+ was extracted. If the checksum fails, it will raise
3313+ :class:`charmhelpers.core.host.ChecksumError`.
3314+ """
3315 # We ONLY check for True here because can_handle may return a string
3316 # explaining why it can't handle a given source.
3317 handlers = [h for h in plugins() if h.can_handle(source) is True]
3318 installed_to = None
3319 for handler in handlers:
3320 try:
3321- installed_to = handler.install(source)
3322+ installed_to = handler.install(source, *args, **kwargs)
3323 except UnhandledSource:
3324 pass
3325 if not installed_to:
3326
3327=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
3328--- hooks/charmhelpers/fetch/archiveurl.py 2014-04-04 16:45:38 +0000
3329+++ hooks/charmhelpers/fetch/archiveurl.py 2014-10-02 12:57:31 +0000
3330@@ -1,6 +1,8 @@
3331 import os
3332 import urllib2
3333+from urllib import urlretrieve
3334 import urlparse
3335+import hashlib
3336
3337 from charmhelpers.fetch import (
3338 BaseFetchHandler,
3339@@ -10,11 +12,19 @@
3340 get_archive_handler,
3341 extract,
3342 )
3343-from charmhelpers.core.host import mkdir
3344+from charmhelpers.core.host import mkdir, check_hash
3345
3346
3347 class ArchiveUrlFetchHandler(BaseFetchHandler):
3348- """Handler for archives via generic URLs"""
3349+ """
3350+ Handler to download archive files from arbitrary URLs.
3351+
3352+ Can fetch from http, https, ftp, and file URLs.
3353+
3354+ Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
3355+
3356+ Installs the contents of the archive in $CHARM_DIR/fetched/.
3357+ """
3358 def can_handle(self, source):
3359 url_parts = self.parse_url(source)
3360 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
3361@@ -24,6 +34,12 @@
3362 return False
3363
3364 def download(self, source, dest):
3365+ """
3366+ Download an archive file.
3367+
3368+ :param str source: URL pointing to an archive file.
3369+ :param str dest: Local path location to download archive file to.
3370+ """
3371 # propogate all exceptions
3372 # URLError, OSError, etc
3373 proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
3374@@ -48,7 +64,30 @@
3375 os.unlink(dest)
3376 raise e
3377
3378- def install(self, source):
3379+ # Mandatory file validation via Sha1 or MD5 hashing.
3380+ def download_and_validate(self, url, hashsum, validate="sha1"):
3381+ tempfile, headers = urlretrieve(url)
3382+ check_hash(tempfile, hashsum, validate)
3383+ return tempfile
3384+
3385+ def install(self, source, dest=None, checksum=None, hash_type='sha1'):
3386+ """
3387+ Download and install an archive file, with optional checksum validation.
3388+
3389+ The checksum can also be given on the `source` URL's fragment.
3390+ For example::
3391+
3392+ handler.install('http://example.com/file.tgz#sha1=deadbeef')
3393+
3394+ :param str source: URL pointing to an archive file.
3395+ :param str dest: Local destination path to install to. If not given,
3396+ installs to `$CHARM_DIR/archives/archive_file_name`.
3397+ :param str checksum: If given, validate the archive file after download.
3398+ :param str hash_type: Algorithm used to generate `checksum`.
3399+ Can be any hash alrgorithm supported by :mod:`hashlib`,
3400+ such as md5, sha1, sha256, sha512, etc.
3401+
3402+ """
3403 url_parts = self.parse_url(source)
3404 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
3405 if not os.path.exists(dest_dir):
3406@@ -60,4 +99,10 @@
3407 raise UnhandledSource(e.reason)
3408 except OSError as e:
3409 raise UnhandledSource(e.strerror)
3410- return extract(dld_file)
3411+ options = urlparse.parse_qs(url_parts.fragment)
3412+ for key, value in options.items():
3413+ if key in hashlib.algorithms:
3414+ check_hash(dld_file, value, key)
3415+ if checksum:
3416+ check_hash(dld_file, checksum, hash_type)
3417+ return extract(dld_file, dest)
3418
3419=== added symlink 'hooks/neutron-plugin-relation-changed'
3420=== target is u'nova_compute_hooks.py'
3421=== added symlink 'hooks/neutron-plugin-relation-departed'
3422=== target is u'nova_compute_hooks.py'
3423=== added symlink 'hooks/neutron-plugin-relation-joined'
3424=== target is u'nova_compute_hooks.py'
3425=== modified file 'hooks/nova_compute_context.py'
3426--- hooks/nova_compute_context.py 2014-06-01 14:26:17 +0000
3427+++ hooks/nova_compute_context.py 2014-10-02 12:57:31 +0000
3428@@ -17,7 +17,7 @@
3429
3430 from charmhelpers.contrib.openstack.utils import get_host_ip, os_release
3431 from charmhelpers.contrib.network.ovs import add_bridge
3432-
3433+from charmhelpers.contrib.network.ip import get_address_in_network
3434
3435 # This is just a label and it must be consistent across
3436 # nova-compute nodes to support live migration.
3437@@ -310,7 +310,42 @@
3438
3439 if self.restart_trigger():
3440 ctxt['restart_trigger'] = self.restart_trigger()
3441-
3442+ return ctxt
3443+
3444+
3445+class InstanceConsoleContext(context.OSContextGenerator):
3446+ interfaces = []
3447+
3448+ def get_console_info(self, proto, **kwargs):
3449+ console_settings = {
3450+ proto + '_proxy_address':
3451+ relation_get('console_proxy_%s_address' % (proto), **kwargs),
3452+ proto + '_proxy_host':
3453+ relation_get('console_proxy_%s_host' % (proto), **kwargs),
3454+ proto + '_proxy_port':
3455+ relation_get('console_proxy_%s_port' % (proto), **kwargs),
3456+ }
3457+ return console_settings
3458+
3459+ def __call__(self):
3460+ ctxt = {}
3461+ for rid in relation_ids('cloud-compute'):
3462+ for unit in related_units(rid):
3463+ rel = {'rid': rid, 'unit': unit}
3464+ proto = relation_get('console_access_protocol', **rel)
3465+ if not proto:
3466+ # only bother with units that have a proto set.
3467+ continue
3468+ ctxt['console_keymap'] = relation_get('console_keymap', **rel)
3469+ ctxt['console_access_protocol'] = proto
3470+ ctxt['console_vnc_type'] = True if 'vnc' in proto else False
3471+ if proto == 'vnc':
3472+ ctxt = dict(ctxt, **self.get_console_info('xvpvnc', **rel))
3473+ ctxt = dict(ctxt, **self.get_console_info('novnc', **rel))
3474+ else:
3475+ ctxt = dict(ctxt, **self.get_console_info(proto, **rel))
3476+ break
3477+ ctxt['console_listen_addr'] = get_host_ip(unit_get('private-address'))
3478 return ctxt
3479
3480
3481@@ -344,5 +379,15 @@
3482
3483 self._ensure_bridge()
3484
3485- ovs_ctxt['local_ip'] = get_host_ip(unit_get('private-address'))
3486+ ovs_ctxt['local_ip'] = \
3487+ get_address_in_network(config('os-data-network'),
3488+ get_host_ip(unit_get('private-address')))
3489 return ovs_ctxt
3490+
3491+ def __call__(self):
3492+ ctxt = super(NeutronComputeContext, self).__call__()
3493+ # NOTE(jamespage) support override of neutron security via config
3494+ if config('disable-neutron-security-groups') is not None:
3495+ ctxt['disable_neutron_security_groups'] = \
3496+ config('disable-neutron-security-groups')
3497+ return ctxt
3498
3499=== modified file 'hooks/nova_compute_hooks.py'
3500--- hooks/nova_compute_hooks.py 2014-06-01 14:26:17 +0000
3501+++ hooks/nova_compute_hooks.py 2014-10-02 12:57:31 +0000
3502@@ -15,7 +15,6 @@
3503 unit_get,
3504 UnregisteredHookError,
3505 )
3506-
3507 from charmhelpers.core.host import (
3508 restart_on_change,
3509 )
3510@@ -33,7 +32,6 @@
3511
3512 from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring
3513 from charmhelpers.payload.execd import execd_preinstall
3514-
3515 from nova_compute_utils import (
3516 create_libvirt_secret,
3517 determine_packages,
3518@@ -51,10 +49,16 @@
3519 QUANTUM_CONF, NEUTRON_CONF,
3520 ceph_config_file, CEPH_SECRET,
3521 enable_shell, disable_shell,
3522- fix_path_ownership
3523+ fix_path_ownership,
3524+ assert_charm_supports_ipv6
3525+)
3526+
3527+from charmhelpers.contrib.network.ip import (
3528+ get_ipv6_addr
3529 )
3530
3531 from nova_compute_context import CEPH_SECRET_UUID
3532+from socket import gethostname
3533
3534 hooks = Hooks()
3535 CONFIGS = register_configs()
3536@@ -71,6 +75,9 @@
3537 @hooks.hook('config-changed')
3538 @restart_on_change(restart_map())
3539 def config_changed():
3540+ if config('prefer-ipv6'):
3541+ assert_charm_supports_ipv6()
3542+
3543 global CONFIGS
3544 if openstack_upgrade_available('nova-common'):
3545 CONFIGS = do_openstack_upgrade()
3546@@ -110,11 +117,12 @@
3547 log('amqp relation incomplete. Peer not ready?')
3548 return
3549 CONFIGS.write(NOVA_CONF)
3550-
3551- if network_manager() == 'quantum' and neutron_plugin() == 'ovs':
3552- CONFIGS.write(QUANTUM_CONF)
3553- if network_manager() == 'neutron' and neutron_plugin() == 'ovs':
3554- CONFIGS.write(NEUTRON_CONF)
3555+ # No need to write NEUTRON_CONF if neutron-plugin is managing it
3556+ if not relation_ids('neutron-plugin'):
3557+ if network_manager() == 'quantum' and neutron_plugin() == 'ovs':
3558+ CONFIGS.write(QUANTUM_CONF)
3559+ if network_manager() == 'neutron' and neutron_plugin() == 'ovs':
3560+ CONFIGS.write(NEUTRON_CONF)
3561
3562
3563 @hooks.hook('shared-db-relation-joined')
3564@@ -173,18 +181,22 @@
3565
3566 @hooks.hook('cloud-compute-relation-joined')
3567 def compute_joined(rid=None):
3568+ # NOTE(james-page) in MAAS environments the actual hostname is a CNAME
3569+ # record so won't get scanned based on private-address which is an IP
3570+ # add the hostname configured locally to the relation.
3571+ settings = {
3572+ 'hostname': gethostname()
3573+ }
3574+ if config('prefer-ipv6'):
3575+ settings = {'private-address': get_ipv6_addr()[0]}
3576 if migration_enabled():
3577 auth_type = config('migration-auth-type')
3578- settings = {
3579- 'migration_auth_type': auth_type
3580- }
3581+ settings['migration_auth_type'] = auth_type
3582 if auth_type == 'ssh':
3583 settings['ssh_public_key'] = public_ssh_key()
3584 relation_set(relation_id=rid, **settings)
3585 if config('enable-resize'):
3586- settings = {
3587- 'nova_ssh_public_key': public_ssh_key(user='nova')
3588- }
3589+ settings['nova_ssh_public_key'] = public_ssh_key(user='nova')
3590 relation_set(relation_id=rid, **settings)
3591
3592
3593
3594=== modified file 'hooks/nova_compute_utils.py'
3595--- hooks/nova_compute_utils.py 2014-06-01 14:26:17 +0000
3596+++ hooks/nova_compute_utils.py 2014-10-02 12:57:31 +0000
3597@@ -5,8 +5,18 @@
3598 from copy import deepcopy
3599 from subprocess import check_call, check_output
3600
3601-from charmhelpers.fetch import apt_update, apt_upgrade, apt_install
3602-from charmhelpers.core.host import mkdir, service_restart
3603+from charmhelpers.fetch import (
3604+ apt_update,
3605+ apt_upgrade,
3606+ apt_install
3607+)
3608+
3609+from charmhelpers.core.host import (
3610+ mkdir,
3611+ service_restart,
3612+ lsb_release
3613+)
3614+
3615 from charmhelpers.core.hookenv import (
3616 config,
3617 log,
3618@@ -14,7 +24,7 @@
3619 relation_ids,
3620 relation_get,
3621 DEBUG,
3622- service_name
3623+ service_name,
3624 )
3625
3626 from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute
3627@@ -32,6 +42,7 @@
3628 NovaComputeLibvirtContext,
3629 NovaComputeCephContext,
3630 NeutronComputeContext,
3631+ InstanceConsoleContext,
3632 )
3633
3634 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
3635@@ -77,7 +88,8 @@
3636 context.SubordinateConfigContext(
3637 interface='nova-ceilometer',
3638 service='nova',
3639- config_file=NOVA_CONF)],
3640+ config_file=NOVA_CONF),
3641+ InstanceConsoleContext(), ],
3642 },
3643 }
3644
3645@@ -126,6 +138,15 @@
3646 'lxc': ['nova-compute-lxc'],
3647 }
3648
3649+# Maps virt-type config to a libvirt URI.
3650+LIBVIRT_URIS = {
3651+ 'kvm': 'qemu:///system',
3652+ 'qemu': 'qemu:///system',
3653+ 'xen': 'xen:///',
3654+ 'uml': 'uml:///system',
3655+ 'lxc': 'lxc:///',
3656+}
3657+
3658
3659 def ceph_config_file():
3660 return CHARM_CEPH_CONF.format(service_name())
3661@@ -153,7 +174,7 @@
3662 # depending on the plugin used.
3663 # NOTE(james-page): only required for ovs plugin right now
3664 if net_manager in ['neutron', 'quantum']:
3665- if plugin == 'ovs':
3666+ if not relation_ids('neutron-plugin') and plugin == 'ovs':
3667 if net_manager == 'quantum':
3668 nm_rsc = QUANTUM_RESOURCES
3669 if net_manager == 'neutron':
3670@@ -337,26 +358,46 @@
3671 """Import SSH authorized_keys + known_hosts from a cloud-compute relation
3672 and store in user's $HOME/.ssh.
3673 """
3674+ known_hosts = []
3675+ authorized_keys = []
3676 if prefix:
3677- hosts = relation_get('{}_known_hosts'.format(prefix))
3678- auth_keys = relation_get('{}_authorized_keys'.format(prefix))
3679+ known_hosts_index = relation_get(
3680+ '{}_known_hosts_max_index'.format(prefix))
3681+ if known_hosts_index:
3682+ for index in range(0, int(known_hosts_index)):
3683+ known_hosts.append(relation_get(
3684+ '{}_known_hosts_{}'.format(prefix, index)))
3685+ authorized_keys_index = relation_get(
3686+ '{}_authorized_keys_max_index'.format(prefix))
3687+ if authorized_keys_index:
3688+ for index in range(0, int(authorized_keys_index)):
3689+ authorized_keys.append(relation_get(
3690+ '{}_authorized_keys_{}'.format(prefix, index)))
3691 else:
3692 # XXX: Should this be managed via templates + contexts?
3693- hosts = relation_get('known_hosts')
3694- auth_keys = relation_get('authorized_keys')
3695+ known_hosts_index = relation_get('known_hosts_max_index')
3696+ if known_hosts_index:
3697+ for index in range(0, int(known_hosts_index)):
3698+ known_hosts.append(relation_get(
3699+ 'known_hosts_{}'.format(index)))
3700+ authorized_keys_index = relation_get('authorized_keys_max_index')
3701+ if authorized_keys_index:
3702+ for index in range(0, int(authorized_keys_index)):
3703+ authorized_keys.append(relation_get(
3704+ 'authorized_keys_{}'.format(index)))
3705
3706- # XXX: Need to fix charm-helpers to return None for empty settings,
3707- # in all cases.
3708- if not hosts or not auth_keys:
3709+ # XXX: Should partial return of known_hosts or authorized_keys
3710+ # be allowed ?
3711+ if not len(known_hosts) or not len(authorized_keys):
3712 return
3713-
3714 dest = os.path.join(pwd.getpwnam(user).pw_dir, '.ssh')
3715 log('Saving new known_hosts and authorized_keys file to: %s.' % dest)
3716-
3717+ with open(os.path.join(dest, 'known_hosts'), 'wb') as _hosts:
3718+ for index in range(0, int(known_hosts_index)):
3719+ _hosts.write('{}\n'.format(known_hosts[index]))
3720 with open(os.path.join(dest, 'authorized_keys'), 'wb') as _keys:
3721- _keys.write(b64decode(auth_keys))
3722- with open(os.path.join(dest, 'known_hosts'), 'wb') as _hosts:
3723- _hosts.write(b64decode(hosts))
3724+ for index in range(0, int(authorized_keys_index)):
3725+ _keys.write('{}\n'.format(authorized_keys[index]))
3726
3727
3728 def do_openstack_upgrade():
3729@@ -399,14 +440,15 @@
3730
3731
3732 def create_libvirt_secret(secret_file, secret_uuid, key):
3733- if secret_uuid in check_output(['virsh', 'secret-list']):
3734+ uri = LIBVIRT_URIS[config('virt-type')]
3735+ if secret_uuid in check_output(['virsh', '-c', uri, 'secret-list']):
3736 log('Libvirt secret already exists for uuid %s.' % secret_uuid,
3737 level=DEBUG)
3738 return
3739 log('Defining new libvirt secret for uuid %s.' % secret_uuid)
3740- cmd = ['virsh', 'secret-define', '--file', secret_file]
3741+ cmd = ['virsh', '-c', uri, 'secret-define', '--file', secret_file]
3742 check_call(cmd)
3743- cmd = ['virsh', 'secret-set-value', '--secret', secret_uuid,
3744+ cmd = ['virsh', '-c', uri, 'secret-set-value', '--secret', secret_uuid,
3745 '--base64', key]
3746 check_call(cmd)
3747
3748@@ -424,3 +466,10 @@
3749 def fix_path_ownership(path, user='nova'):
3750 cmd = ['chown', user, path]
3751 check_call(cmd)
3752+
3753+
3754+def assert_charm_supports_ipv6():
3755+ """Check whether we are able to support charms ipv6."""
3756+ if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty":
3757+ raise Exception("IPv6 is not supported in the charms for Ubuntu "
3758+ "versions less than Trusty 14.04")
3759
3760=== modified file 'metadata.yaml'
3761--- metadata.yaml 2014-09-02 15:00:20 +0000
3762+++ metadata.yaml 2014-10-02 12:57:31 +0000
3763@@ -27,6 +27,9 @@
3764 nova-ceilometer:
3765 interface: nova-ceilometer
3766 scope: container
3767+ neutron-plugin:
3768+ interface: neutron-plugin
3769+ scope: container
3770 peers:
3771 compute-peer:
3772 interface: nova
3773
3774=== modified file 'templates/grizzly/nova.conf'
3775--- templates/grizzly/nova.conf 2014-04-16 08:26:38 +0000
3776+++ templates/grizzly/nova.conf 2014-10-02 12:57:31 +0000
3777@@ -13,15 +13,11 @@
3778 state_path=/var/lib/nova
3779 lock_path=/var/lock/nova
3780 force_dhcp_release=True
3781-iscsi_helper=tgtadm
3782 libvirt_use_virtio_for_bridges=True
3783-connection_type=libvirt
3784-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
3785 verbose=True
3786 use_syslog = {{ use_syslog }}
3787 ec2_private_dns_show_ip=True
3788 api_paste_config=/etc/nova/api-paste.ini
3789-volumes_path=/var/lib/nova/volumes
3790 enabled_apis=ec2,osapi_compute,metadata
3791 auth_strategy=keystone
3792 compute_driver=libvirt.LibvirtDriver
3793
3794=== modified file 'templates/havana/nova.conf'
3795--- templates/havana/nova.conf 2014-06-17 09:51:57 +0000
3796+++ templates/havana/nova.conf 2014-10-02 12:57:31 +0000
3797@@ -13,15 +13,11 @@
3798 state_path=/var/lib/nova
3799 lock_path=/var/lock/nova
3800 force_dhcp_release=True
3801-iscsi_helper=tgtadm
3802 libvirt_use_virtio_for_bridges=True
3803-connection_type=libvirt
3804-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
3805 verbose=True
3806 use_syslog = {{ use_syslog }}
3807 ec2_private_dns_show_ip=True
3808 api_paste_config=/etc/nova/api-paste.ini
3809-volumes_path=/var/lib/nova/volumes
3810 enabled_apis=ec2,osapi_compute,metadata
3811 auth_strategy=keystone
3812 compute_driver=libvirt.LibvirtDriver
3813@@ -40,6 +36,25 @@
3814 rbd_secret_uuid = {{ rbd_secret_uuid }}
3815 {% endif -%}
3816
3817+{% if console_vnc_type -%}
3818+vnc_enabled = True
3819+novnc_enabled = True
3820+vnc_keymap = {{ console_keymap }}
3821+vncserver_listen = 0.0.0.0
3822+vncserver_proxyclient_address = {{ console_listen_addr }}
3823+{% if console_access_protocol == 'novnc' or console_access_protocol == 'vnc' -%}
3824+novncproxy_base_url = {{ novnc_proxy_address }}
3825+{% endif -%}
3826+{% if console_access_protocol == 'xvpvnc' or console_access_protocol == 'vnc' -%}
3827+xvpvncproxy_port = {{ xvpvnc_proxy_port }}
3828+xvpvncproxy_host = {{ xvpvnc_proxy_host }}
3829+xvpvncproxy_base_url = {{ xvpvnc_proxy_address }}
3830+{% endif -%}
3831+{% else -%}
3832+vnc_enabled = False
3833+novnc_enabled = False
3834+{% endif -%}
3835+
3836 {% if neutron_plugin and neutron_plugin == 'ovs' -%}
3837 libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver
3838 {% if neutron_security_groups -%}
3839@@ -89,3 +104,13 @@
3840 {{ key }} = {{ value }}
3841 {% endfor -%}
3842 {% endif -%}
3843+
3844+{% if console_access_protocol == 'spice' -%}
3845+[spice]
3846+agent_enabled = True
3847+enabled = True
3848+html5proxy_base_url = {{ spice_proxy_address }}
3849+keymap = {{ console_keymap }}
3850+server_listen = 0.0.0.0
3851+server_proxyclient_address = {{ console_listen_addr }}
3852+{% endif -%}
3853
3854=== modified file 'templates/icehouse/ml2_conf.ini'
3855--- templates/icehouse/ml2_conf.ini 2014-04-14 09:11:10 +0000
3856+++ templates/icehouse/ml2_conf.ini 2014-10-02 12:57:31 +0000
3857@@ -22,7 +22,7 @@
3858 tunnel_types = gre
3859
3860 [securitygroup]
3861-{% if neutron_security_groups -%}
3862+{% if neutron_security_groups and not disable_neutron_security_groups -%}
3863 enable_security_group = True
3864 firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
3865 {% else -%}
3866
3867=== added directory 'tests'
3868=== added file 'tests/00-setup'
3869--- tests/00-setup 1970-01-01 00:00:00 +0000
3870+++ tests/00-setup 2014-10-02 12:57:31 +0000
3871@@ -0,0 +1,10 @@
3872+#!/bin/bash
3873+
3874+set -ex
3875+
3876+sudo add-apt-repository --yes ppa:juju/stable
3877+sudo apt-get update --yes
3878+sudo apt-get install --yes python-amulet
3879+sudo apt-get install --yes python-glanceclient
3880+sudo apt-get install --yes python-keystoneclient
3881+sudo apt-get install --yes python-novaclient
3882
3883=== added file 'tests/10-basic-precise-essex'
3884--- tests/10-basic-precise-essex 1970-01-01 00:00:00 +0000
3885+++ tests/10-basic-precise-essex 2014-10-02 12:57:31 +0000
3886@@ -0,0 +1,9 @@
3887+#!/usr/bin/python
3888+
3889+"""Amulet tests on a basic nova compute deployment on precise-essex."""
3890+
3891+from basic_deployment import NovaBasicDeployment
3892+
3893+if __name__ == '__main__':
3894+ deployment = NovaBasicDeployment(series='precise')
3895+ deployment.run_tests()
3896
3897=== added file 'tests/11-basic-precise-folsom'
3898--- tests/11-basic-precise-folsom 1970-01-01 00:00:00 +0000
3899+++ tests/11-basic-precise-folsom 2014-10-02 12:57:31 +0000
3900@@ -0,0 +1,17 @@
3901+#!/usr/bin/python
3902+
3903+"""Amulet tests on a basic nova compute deployment on precise-folsom."""
3904+
3905+import amulet
3906+from basic_deployment import NovaBasicDeployment
3907+
3908+if __name__ == '__main__':
3909+ # NOTE(coreycb): Skipping failing test until resolved. 'nova-manage db sync'
3910+ # fails in shared-db-relation-changed (only fails on folsom)
3911+ message = "Skipping failing test until resolved"
3912+ amulet.raise_status(amulet.SKIP, msg=message)
3913+
3914+ deployment = NovaBasicDeployment(series='precise',
3915+ openstack='cloud:precise-folsom',
3916+ source='cloud:precise-updates/folsom')
3917+ deployment.run_tests()
3918
3919=== added file 'tests/12-basic-precise-grizzly'
3920--- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000
3921+++ tests/12-basic-precise-grizzly 2014-10-02 12:57:31 +0000
3922@@ -0,0 +1,11 @@
3923+#!/usr/bin/python
3924+
3925+"""Amulet tests on a basic nova compute deployment on precise-grizzly."""
3926+
3927+from basic_deployment import NovaBasicDeployment
3928+
3929+if __name__ == '__main__':
3930+ deployment = NovaBasicDeployment(series='precise',
3931+ openstack='cloud:precise-grizzly',
3932+ source='cloud:precise-updates/grizzly')
3933+ deployment.run_tests()
3934
3935=== added file 'tests/13-basic-precise-havana'
3936--- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000
3937+++ tests/13-basic-precise-havana 2014-10-02 12:57:31 +0000
3938@@ -0,0 +1,11 @@
3939+#!/usr/bin/python
3940+
3941+"""Amulet tests on a basic nova compute deployment on precise-havana."""
3942+
3943+from basic_deployment import NovaBasicDeployment
3944+
3945+if __name__ == '__main__':
3946+ deployment = NovaBasicDeployment(series='precise',
3947+ openstack='cloud:precise-havana',
3948+ source='cloud:precise-updates/havana')
3949+ deployment.run_tests()
3950
3951=== added file 'tests/14-basic-precise-icehouse'
3952--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
3953+++ tests/14-basic-precise-icehouse 2014-10-02 12:57:31 +0000
3954@@ -0,0 +1,11 @@
3955+#!/usr/bin/python
3956+
3957+"""Amulet tests on a basic nova compute deployment on precise-icehouse."""
3958+
3959+from basic_deployment import NovaBasicDeployment
3960+
3961+if __name__ == '__main__':
3962+ deployment = NovaBasicDeployment(series='precise',
3963+ openstack='cloud:precise-icehouse',
3964+ source='cloud:precise-updates/icehouse')
3965+ deployment.run_tests()
3966
3967=== added file 'tests/15-basic-trusty-icehouse'
3968--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
3969+++ tests/15-basic-trusty-icehouse 2014-10-02 12:57:31 +0000
3970@@ -0,0 +1,9 @@
3971+#!/usr/bin/python
3972+
3973+"""Amulet tests on a basic nova compute deployment on trusty-icehouse."""
3974+
3975+from basic_deployment import NovaBasicDeployment
3976+
3977+if __name__ == '__main__':
3978+ deployment = NovaBasicDeployment(series='trusty')
3979+ deployment.run_tests()
3980
3981=== added file 'tests/README'
3982--- tests/README 1970-01-01 00:00:00 +0000
3983+++ tests/README 2014-10-02 12:57:31 +0000
3984@@ -0,0 +1,47 @@
3985+This directory provides Amulet tests that focus on verification of nova-compute
3986+deployments.
3987+
3988+If you use a web proxy server to access the web, you'll need to set the
3989+AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
3990+
3991+The following examples demonstrate different ways that tests can be executed.
3992+All examples are run from the charm's root directory.
3993+
3994+ * To run all tests (starting with 00-setup):
3995+
3996+ make test
3997+
3998+ * To run a specific test module (or modules):
3999+
4000+ juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
4001+
4002+ * To run a specific test module (or modules), and keep the environment
4003+ deployed after a failure:
4004+
4005+ juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
4006+
4007+ * To re-run a test module against an already deployed environment (one
4008+ that was deployed by a previous call to 'juju test --set-e'):
4009+
4010+ ./tests/15-basic-trusty-icehouse
4011+
4012+For debugging and test development purposes, all code should be idempotent.
4013+In other words, the code should have the ability to be re-run without changing
4014+the results beyond the initial run. This enables editing and re-running of a
4015+test module against an already deployed environment, as described above.
4016+
4017+Manual debugging tips:
4018+
4019+ * Set the following env vars before using the OpenStack CLI as admin:
4020+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
4021+ export OS_TENANT_NAME=admin
4022+ export OS_USERNAME=admin
4023+ export OS_PASSWORD=openstack
4024+ export OS_REGION_NAME=RegionOne
4025+
4026+ * Set the following env vars before using the OpenStack CLI as demoUser:
4027+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
4028+ export OS_TENANT_NAME=demoTenant
4029+ export OS_USERNAME=demoUser
4030+ export OS_PASSWORD=password
4031+ export OS_REGION_NAME=RegionOne
4032
4033=== added file 'tests/basic_deployment.py'
4034--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
4035+++ tests/basic_deployment.py 2014-10-02 12:57:31 +0000
4036@@ -0,0 +1,406 @@
4037+#!/usr/bin/python
4038+
4039+import amulet
4040+
4041+from charmhelpers.contrib.openstack.amulet.deployment import (
4042+ OpenStackAmuletDeployment
4043+)
4044+
4045+from charmhelpers.contrib.openstack.amulet.utils import (
4046+ OpenStackAmuletUtils,
4047+ DEBUG, # flake8: noqa
4048+ ERROR
4049+)
4050+
4051+# Use DEBUG to turn on debug logging
4052+u = OpenStackAmuletUtils(ERROR)
4053+
4054+
4055+class NovaBasicDeployment(OpenStackAmuletDeployment):
4056+ """Amulet tests on a basic nova compute deployment."""
4057+
4058+ def __init__(self, series=None, openstack=None, source=None):
4059+ """Deploy the entire test environment."""
4060+ super(NovaBasicDeployment, self).__init__(series, openstack, source)
4061+ self._add_services()
4062+ self._add_relations()
4063+ self._configure_services()
4064+ self._deploy()
4065+ self._initialize_tests()
4066+
4067+ def _add_services(self):
4068+ """Add the service that we're testing, including the number of units,
4069+ where nova-compute is local, and the other charms are from
4070+ the charm store."""
4071+ this_service = ('nova-compute', 1)
4072+ other_services = [('mysql', 1), ('rabbitmq-server', 1),
4073+ ('nova-cloud-controller', 1), ('keystone', 1),
4074+ ('glance', 1)]
4075+ super(NovaBasicDeployment, self)._add_services(this_service,
4076+ other_services)
4077+
4078+ def _add_relations(self):
4079+ """Add all of the relations for the services."""
4080+ relations = {
4081+ 'nova-compute:image-service': 'glance:image-service',
4082+ 'nova-compute:shared-db': 'mysql:shared-db',
4083+ 'nova-compute:amqp': 'rabbitmq-server:amqp',
4084+ 'nova-cloud-controller:shared-db': 'mysql:shared-db',
4085+ 'nova-cloud-controller:identity-service': 'keystone:identity-service',
4086+ 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
4087+ 'nova-cloud-controller:cloud-compute': 'nova-compute:cloud-compute',
4088+ 'nova-cloud-controller:image-service': 'glance:image-service',
4089+ 'keystone:shared-db': 'mysql:shared-db',
4090+ 'glance:identity-service': 'keystone:identity-service',
4091+ 'glance:shared-db': 'mysql:shared-db',
4092+ 'glance:amqp': 'rabbitmq-server:amqp'
4093+ }
4094+ super(NovaBasicDeployment, self)._add_relations(relations)
4095+
4096+ def _configure_services(self):
4097+ """Configure all of the services."""
4098+ nova_config = {'config-flags': 'auto_assign_floating_ip=False',
4099+ 'enable-live-migration': 'False'}
4100+ keystone_config = {'admin-password': 'openstack',
4101+ 'admin-token': 'ubuntutesting'}
4102+ configs = {'nova-compute': nova_config, 'keystone': keystone_config}
4103+ super(NovaBasicDeployment, self)._configure_services(configs)
4104+
4105+ def _initialize_tests(self):
4106+ """Perform final initialization before tests get run."""
4107+ # Access the sentries for inspecting service units
4108+ self.mysql_sentry = self.d.sentry.unit['mysql/0']
4109+ self.keystone_sentry = self.d.sentry.unit['keystone/0']
4110+ self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
4111+ self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
4112+ self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0']
4113+ self.glance_sentry = self.d.sentry.unit['glance/0']
4114+
4115+ # Authenticate admin with keystone
4116+ self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
4117+ user='admin',
4118+ password='openstack',
4119+ tenant='admin')
4120+
4121+ # Authenticate admin with glance endpoint
4122+ self.glance = u.authenticate_glance_admin(self.keystone)
4123+
4124+ # Create a demo tenant/role/user
4125+ self.demo_tenant = 'demoTenant'
4126+ self.demo_role = 'demoRole'
4127+ self.demo_user = 'demoUser'
4128+ if not u.tenant_exists(self.keystone, self.demo_tenant):
4129+ tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
4130+ description='demo tenant',
4131+ enabled=True)
4132+ self.keystone.roles.create(name=self.demo_role)
4133+ self.keystone.users.create(name=self.demo_user,
4134+ password='password',
4135+ tenant_id=tenant.id,
4136+ email='demo@demo.com')
4137+
4138+ # Authenticate demo user with keystone
4139+ self.keystone_demo = \
4140+ u.authenticate_keystone_user(self.keystone, user=self.demo_user,
4141+ password='password',
4142+ tenant=self.demo_tenant)
4143+
4144+ # Authenticate demo user with nova-api
4145+ self.nova_demo = u.authenticate_nova_user(self.keystone,
4146+ user=self.demo_user,
4147+ password='password',
4148+ tenant=self.demo_tenant)
4149+
4150+ def test_services(self):
4151+ """Verify the expected services are running on the corresponding
4152+ service units."""
4153+ commands = {
4154+ self.mysql_sentry: ['status mysql'],
4155+ self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
4156+ self.nova_compute_sentry: ['status nova-compute',
4157+ 'status nova-network',
4158+ 'status nova-api'],
4159+ self.nova_cc_sentry: ['status nova-api-ec2',
4160+ 'status nova-api-os-compute',
4161+ 'status nova-objectstore',
4162+ 'status nova-cert',
4163+ 'status nova-scheduler'],
4164+ self.keystone_sentry: ['status keystone'],
4165+ self.glance_sentry: ['status glance-registry', 'status glance-api']
4166+ }
4167+ if self._get_openstack_release() >= self.precise_grizzly:
4168+ commands[self.nova_cc_sentry] = ['status nova-conductor']
4169+
4170+ ret = u.validate_services(commands)
4171+ if ret:
4172+ amulet.raise_status(amulet.FAIL, msg=ret)
4173+
4174+ def test_service_catalog(self):
4175+ """Verify that the service catalog endpoint data is valid."""
4176+ endpoint_vol = {'adminURL': u.valid_url,
4177+ 'region': 'RegionOne',
4178+ 'publicURL': u.valid_url,
4179+ 'internalURL': u.valid_url}
4180+ endpoint_id = {'adminURL': u.valid_url,
4181+ 'region': 'RegionOne',
4182+ 'publicURL': u.valid_url,
4183+ 'internalURL': u.valid_url}
4184+ if self._get_openstack_release() >= self.precise_folsom:
4185+ endpoint_vol['id'] = u.not_null
4186+ endpoint_id['id'] = u.not_null
4187+ expected = {'s3': [endpoint_vol], 'compute': [endpoint_vol],
4188+ 'ec2': [endpoint_vol], 'identity': [endpoint_id]}
4189+ actual = self.keystone_demo.service_catalog.get_endpoints()
4190+
4191+ ret = u.validate_svc_catalog_endpoint_data(expected, actual)
4192+ if ret:
4193+ amulet.raise_status(amulet.FAIL, msg=ret)
4194+
4195+ def test_openstack_compute_api_endpoint(self):
4196+ """Verify the openstack compute api (osapi) endpoint data."""
4197+ endpoints = self.keystone.endpoints.list()
4198+ admin_port = internal_port = public_port = '8774'
4199+ expected = {'id': u.not_null,
4200+ 'region': 'RegionOne',
4201+ 'adminurl': u.valid_url,
4202+ 'internalurl': u.valid_url,
4203+ 'publicurl': u.valid_url,
4204+ 'service_id': u.not_null}
4205+
4206+ ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
4207+ public_port, expected)
4208+ if ret:
4209+ message = 'osapi endpoint: {}'.format(ret)
4210+ amulet.raise_status(amulet.FAIL, msg=message)
4211+
4212+ def test_ec2_api_endpoint(self):
4213+ """Verify the EC2 api endpoint data."""
4214+ endpoints = self.keystone.endpoints.list()
4215+ admin_port = internal_port = public_port = '8773'
4216+ expected = {'id': u.not_null,
4217+ 'region': 'RegionOne',
4218+ 'adminurl': u.valid_url,
4219+ 'internalurl': u.valid_url,
4220+ 'publicurl': u.valid_url,
4221+ 'service_id': u.not_null}
4222+
4223+ ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
4224+ public_port, expected)
4225+ if ret:
4226+ message = 'EC2 endpoint: {}'.format(ret)
4227+ amulet.raise_status(amulet.FAIL, msg=message)
4228+
4229+ def test_s3_api_endpoint(self):
4230+ """Verify the S3 api endpoint data."""
4231+ endpoints = self.keystone.endpoints.list()
4232+ admin_port = internal_port = public_port = '3333'
4233+ expected = {'id': u.not_null,
4234+ 'region': 'RegionOne',
4235+ 'adminurl': u.valid_url,
4236+ 'internalurl': u.valid_url,
4237+ 'publicurl': u.valid_url,
4238+ 'service_id': u.not_null}
4239+
4240+ ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
4241+ public_port, expected)
4242+ if ret:
4243+ message = 'S3 endpoint: {}'.format(ret)
4244+ amulet.raise_status(amulet.FAIL, msg=message)
4245+
4246+ def test_nova_shared_db_relation(self):
4247+ """Verify the nova-compute to mysql shared-db relation data"""
4248+ unit = self.nova_compute_sentry
4249+ relation = ['shared-db', 'mysql:shared-db']
4250+ expected = {
4251+ 'private-address': u.valid_ip,
4252+ 'nova_database': 'nova',
4253+ 'nova_username': 'nova',
4254+ 'nova_hostname': u.valid_ip
4255+ }
4256+
4257+ ret = u.validate_relation_data(unit, relation, expected)
4258+ if ret:
4259+ message = u.relation_error('nova-compute shared-db', ret)
4260+ amulet.raise_status(amulet.FAIL, msg=message)
4261+
4262+ def test_mysql_shared_db_relation(self):
4263+ """Verify the mysql to nova-compute shared-db relation data"""
4264+ unit = self.mysql_sentry
4265+ relation = ['shared-db', 'nova-compute:shared-db']
4266+ expected = {
4267+ 'private-address': u.valid_ip,
4268+ 'nova_password': u.not_null,
4269+ 'db_host': u.valid_ip
4270+ }
4271+
4272+ ret = u.validate_relation_data(unit, relation, expected)
4273+ if ret:
4274+ message = u.relation_error('mysql shared-db', ret)
4275+ amulet.raise_status(amulet.FAIL, msg=message)
4276+
4277+ def test_nova_amqp_relation(self):
4278+ """Verify the nova-compute to rabbitmq-server amqp relation data"""
4279+ unit = self.nova_compute_sentry
4280+ relation = ['amqp', 'rabbitmq-server:amqp']
4281+ expected = {
4282+ 'username': 'nova',
4283+ 'private-address': u.valid_ip,
4284+ 'vhost': 'openstack'
4285+ }
4286+
4287+ ret = u.validate_relation_data(unit, relation, expected)
4288+ if ret:
4289+ message = u.relation_error('nova-compute amqp', ret)
4290+ amulet.raise_status(amulet.FAIL, msg=message)
4291+
4292+ def test_rabbitmq_amqp_relation(self):
4293+ """Verify the rabbitmq-server to nova-compute amqp relation data"""
4294+ unit = self.rabbitmq_sentry
4295+ relation = ['amqp', 'nova-compute:amqp']
4296+ expected = {
4297+ 'private-address': u.valid_ip,
4298+ 'password': u.not_null,
4299+ 'hostname': u.valid_ip
4300+ }
4301+
4302+ ret = u.validate_relation_data(unit, relation, expected)
4303+ if ret:
4304+ message = u.relation_error('rabbitmq amqp', ret)
4305+ amulet.raise_status(amulet.FAIL, msg=message)
4306+
4307+ def test_nova_cloud_compute_relation(self):
4308+ """Verify the nova-compute to nova-cc cloud-compute relation data"""
4309+ unit = self.nova_compute_sentry
4310+ relation = ['cloud-compute', 'nova-cloud-controller:cloud-compute']
4311+ expected = {
4312+ 'private-address': u.valid_ip,
4313+ }
4314+
4315+ ret = u.validate_relation_data(unit, relation, expected)
4316+ if ret:
4317+ message = u.relation_error('nova-compute cloud-compute', ret)
4318+ amulet.raise_status(amulet.FAIL, msg=message)
4319+
4320+ def test_nova_cc_cloud_compute_relation(self):
4321+ """Verify the nova-cc to nova-compute cloud-compute relation data"""
4322+ unit = self.nova_cc_sentry
4323+ relation = ['cloud-compute', 'nova-compute:cloud-compute']
4324+ expected = {
4325+ 'volume_service': 'cinder',
4326+ 'network_manager': 'flatdhcpmanager',
4327+ 'ec2_host': u.valid_ip,
4328+ 'private-address': u.valid_ip,
4329+ 'restart_trigger': u.not_null
4330+ }
4331+ if self._get_openstack_release() == self.precise_essex:
4332+ expected['volume_service'] = 'nova-volume'
4333+
4334+ ret = u.validate_relation_data(unit, relation, expected)
4335+ if ret:
4336+ message = u.relation_error('nova-cc cloud-compute', ret)
4337+ amulet.raise_status(amulet.FAIL, msg=message)
4338+
4339+ def test_restart_on_config_change(self):
4340+ """Verify that the specified services are restarted when the config
4341+ is changed."""
4342+ # NOTE(coreycb): Skipping failing test on essex until resolved.
4343+ # config-flags don't take effect on essex.
4344+ if self._get_openstack_release() == self.precise_essex:
4345+ u.log.error("Skipping failing test until resolved")
4346+ return
4347+
4348+ services = ['nova-compute', 'nova-api', 'nova-network']
4349+ self.d.configure('nova-compute', {'config-flags': 'verbose=False'})
4350+
4351+ time = 20
4352+ for s in services:
4353+ if not u.service_restarted(self.nova_compute_sentry, s,
4354+ '/etc/nova/nova.conf', sleep_time=time):
4355+ msg = "service {} didn't restart after config change".format(s)
4356+ amulet.raise_status(amulet.FAIL, msg=msg)
4357+ time = 0
4358+
4359+ self.d.configure('nova-compute', {'config-flags': 'verbose=True'})
4360+
4361+ def test_nova_config(self):
4362+ """Verify the data in the nova config file."""
4363+ # NOTE(coreycb): Currently no way to test on essex because config file
4364+ # has no section headers.
4365+ if self._get_openstack_release() == self.precise_essex:
4366+ return
4367+
4368+ unit = self.nova_compute_sentry
4369+ conf = '/etc/nova/nova.conf'
4370+ rabbitmq_relation = self.rabbitmq_sentry.relation('amqp',
4371+ 'nova-compute:amqp')
4372+ glance_relation = self.glance_sentry.relation('image-service',
4373+ 'nova-compute:image-service')
4374+ mysql_relation = self.mysql_sentry.relation('shared-db',
4375+ 'nova-compute:shared-db')
4376+ db_uri = "mysql://{}:{}@{}/{}".format('nova',
4377+ mysql_relation['nova_password'],
4378+ mysql_relation['db_host'],
4379+ 'nova')
4380+
4381+ expected = {'dhcpbridge_flagfile': '/etc/nova/nova.conf',
4382+ 'dhcpbridge': '/usr/bin/nova-dhcpbridge',
4383+ 'logdir': '/var/log/nova',
4384+ 'state_path': '/var/lib/nova',
4385+ 'lock_path': '/var/lock/nova',
4386+ 'force_dhcp_release': 'True',
4387+ 'libvirt_use_virtio_for_bridges': 'True',
4388+ 'verbose': 'True',
4389+ 'use_syslog': 'False',
4390+ 'ec2_private_dns_show_ip': 'True',
4391+ 'api_paste_config': '/etc/nova/api-paste.ini',
4392+ 'enabled_apis': 'ec2,osapi_compute,metadata',
4393+ 'auth_strategy': 'keystone',
4394+ 'compute_driver': 'libvirt.LibvirtDriver',
4395+ 'sql_connection': db_uri,
4396+ 'rabbit_userid': 'nova',
4397+ 'rabbit_virtual_host': 'openstack',
4398+ 'rabbit_password': rabbitmq_relation['password'],
4399+ 'rabbit_host': rabbitmq_relation['hostname'],
4400+ 'glance_api_servers': glance_relation['glance-api-server'],
4401+ 'flat_interface': 'eth1',
4402+ 'network_manager': 'nova.network.manager.FlatDHCPManager',
4403+ 'volume_api_class': 'nova.volume.cinder.API',
4404+ 'verbose': 'True'}
4405+
4406+ ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
4407+ if ret:
4408+ message = "nova config error: {}".format(ret)
4409+ amulet.raise_status(amulet.FAIL, msg=message)
4410+
4411+ def test_image_instance_create(self):
4412+ """Create an image/instance, verify they exist, and delete them."""
4413+ # NOTE(coreycb): Skipping failing test on essex until resolved. essex
4414+ # nova API calls are getting "Malformed request url (HTTP
4415+ # 400)".
4416+ if self._get_openstack_release() == self.precise_essex:
4417+ u.log.error("Skipping failing test until resolved")
4418+ return
4419+
4420+ image = u.create_cirros_image(self.glance, "cirros-image")
4421+ if not image:
4422+ amulet.raise_status(amulet.FAIL, msg="Image create failed")
4423+
4424+ instance = u.create_instance(self.nova_demo, "cirros-image", "cirros",
4425+ "m1.tiny")
4426+ if not instance:
4427+ amulet.raise_status(amulet.FAIL, msg="Instance create failed")
4428+
4429+ found = False
4430+ for instance in self.nova_demo.servers.list():
4431+ if instance.name == 'cirros':
4432+ found = True
4433+ if instance.status != 'ACTIVE':
4434+ msg = "cirros instance is not active"
4435+ amulet.raise_status(amulet.FAIL, msg=message)
4436+
4437+ if not found:
4438+ message = "nova cirros instance does not exist"
4439+ amulet.raise_status(amulet.FAIL, msg=message)
4440+
4441+ u.delete_image(self.glance, image)
4442+ u.delete_instance(self.nova_demo, instance)
4443
4444=== added directory 'tests/charmhelpers'
4445=== added file 'tests/charmhelpers/__init__.py'
4446=== added directory 'tests/charmhelpers/contrib'
4447=== added file 'tests/charmhelpers/contrib/__init__.py'
4448=== added directory 'tests/charmhelpers/contrib/amulet'
4449=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
4450=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
4451--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
4452+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-10-02 12:57:31 +0000
4453@@ -0,0 +1,72 @@
4454+import amulet
4455+
4456+import os
4457+
4458+
4459+class AmuletDeployment(object):
4460+ """Amulet deployment.
4461+
4462+ This class provides generic Amulet deployment and test runner
4463+ methods.
4464+ """
4465+
4466+ def __init__(self, series=None):
4467+ """Initialize the deployment environment."""
4468+ self.series = None
4469+
4470+ if series:
4471+ self.series = series
4472+ self.d = amulet.Deployment(series=self.series)
4473+ else:
4474+ self.d = amulet.Deployment()
4475+
4476+ def _add_services(self, this_service, other_services):
4477+ """Add services.
4478+
4479+ Add services to the deployment where this_service is the local charm
4480+ that we're testing and other_services are the other services that
4481+ are being used in the amulet tests.
4482+ """
4483+ name, units, location = range(3)
4484+
4485+ if this_service[name] != os.path.basename(os.getcwd()):
4486+ s = this_service[name]
4487+ msg = "The charm's root directory name needs to be {}".format(s)
4488+ amulet.raise_status(amulet.FAIL, msg=msg)
4489+
4490+ self.d.add(this_service[name], units=this_service[units])
4491+
4492+ for svc in other_services:
4493+ if len(svc) > 2:
4494+ branch_location = svc[location]
4495+ elif self.series:
4496+ branch_location = 'cs:{}/{}'.format(self.series, svc[name]),
4497+ else:
4498+ branch_location = None
4499+ self.d.add(svc[name], charm=branch_location, units=svc[units])
4500+
4501+ def _add_relations(self, relations):
4502+ """Add all of the relations for the services."""
4503+ for k, v in relations.iteritems():
4504+ self.d.relate(k, v)
4505+
4506+ def _configure_services(self, configs):
4507+ """Configure all of the services."""
4508+ for service, config in configs.iteritems():
4509+ self.d.configure(service, config)
4510+
4511+ def _deploy(self):
4512+ """Deploy environment and wait for all hooks to finish executing."""
4513+ try:
4514+ self.d.setup(timeout=900)
4515+ self.d.sentry.wait(timeout=900)
4516+ except amulet.helpers.TimeoutError:
4517+ amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
4518+ except Exception:
4519+ raise
4520+
4521+ def run_tests(self):
4522+ """Run all of the methods that are prefixed with 'test_'."""
4523+ for test in dir(self):
4524+ if test.startswith('test_'):
4525+ getattr(self, test)()
4526
4527=== added file 'tests/charmhelpers/contrib/amulet/utils.py'
4528--- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000
4529+++ tests/charmhelpers/contrib/amulet/utils.py 2014-10-02 12:57:31 +0000
4530@@ -0,0 +1,176 @@
4531+import ConfigParser
4532+import io
4533+import logging
4534+import re
4535+import sys
4536+import time
4537+
4538+
4539+class AmuletUtils(object):
4540+ """Amulet utilities.
4541+
4542+ This class provides common utility functions that are used by Amulet
4543+ tests.
4544+ """
4545+
4546+ def __init__(self, log_level=logging.ERROR):
4547+ self.log = self.get_logger(level=log_level)
4548+
4549+ def get_logger(self, name="amulet-logger", level=logging.DEBUG):
4550+ """Get a logger object that will log to stdout."""
4551+ log = logging
4552+ logger = log.getLogger(name)
4553+ fmt = log.Formatter("%(asctime)s %(funcName)s "
4554+ "%(levelname)s: %(message)s")
4555+
4556+ handler = log.StreamHandler(stream=sys.stdout)
4557+ handler.setLevel(level)
4558+ handler.setFormatter(fmt)
4559+
4560+ logger.addHandler(handler)
4561+ logger.setLevel(level)
4562+
4563+ return logger
4564+
4565+ def valid_ip(self, ip):
4566+ if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
4567+ return True
4568+ else:
4569+ return False
4570+
4571+ def valid_url(self, url):
4572+ p = re.compile(
4573+ r'^(?:http|ftp)s?://'
4574+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
4575+ r'localhost|'
4576+ r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
4577+ r'(?::\d+)?'
4578+ r'(?:/?|[/?]\S+)$',
4579+ re.IGNORECASE)
4580+ if p.match(url):
4581+ return True
4582+ else:
4583+ return False
4584+
4585+ def validate_services(self, commands):
4586+ """Validate services.
4587+
4588+ Verify the specified services are running on the corresponding
4589+ service units.
4590+ """
4591+ for k, v in commands.iteritems():
4592+ for cmd in v:
4593+ output, code = k.run(cmd)
4594+ if code != 0:
4595+ return "command `{}` returned {}".format(cmd, str(code))
4596+ return None
4597+
4598+ def _get_config(self, unit, filename):
4599+ """Get a ConfigParser object for parsing a unit's config file."""
4600+ file_contents = unit.file_contents(filename)
4601+ config = ConfigParser.ConfigParser()
4602+ config.readfp(io.StringIO(file_contents))
4603+ return config
4604+
4605+ def validate_config_data(self, sentry_unit, config_file, section,
4606+ expected):
4607+ """Validate config file data.
4608+
4609+ Verify that the specified section of the config file contains
4610+ the expected option key:value pairs.
4611+ """
4612+ config = self._get_config(sentry_unit, config_file)
4613+
4614+ if section != 'DEFAULT' and not config.has_section(section):
4615+ return "section [{}] does not exist".format(section)
4616+
4617+ for k in expected.keys():
4618+ if not config.has_option(section, k):
4619+ return "section [{}] is missing option {}".format(section, k)
4620+ if config.get(section, k) != expected[k]:
4621+ return "section [{}] {}:{} != expected {}:{}".format(
4622+ section, k, config.get(section, k), k, expected[k])
4623+ return None
4624+
4625+ def _validate_dict_data(self, expected, actual):
4626+ """Validate dictionary data.
4627+
4628+ Compare expected dictionary data vs actual dictionary data.
4629+ The values in the 'expected' dictionary can be strings, bools, ints,
4630+ longs, or can be a function that evaluate a variable and returns a
4631+ bool.
4632+ """
4633+ for k, v in expected.iteritems():
4634+ if k in actual:
4635+ if (isinstance(v, basestring) or
4636+ isinstance(v, bool) or
4637+ isinstance(v, (int, long))):
4638+ if v != actual[k]:
4639+ return "{}:{}".format(k, actual[k])
4640+ elif not v(actual[k]):
4641+ return "{}:{}".format(k, actual[k])
4642+ else:
4643+ return "key '{}' does not exist".format(k)
4644+ return None
4645+
4646+ def validate_relation_data(self, sentry_unit, relation, expected):
4647+ """Validate actual relation data based on expected relation data."""
4648+ actual = sentry_unit.relation(relation[0], relation[1])
4649+ self.log.debug('actual: {}'.format(repr(actual)))
4650+ return self._validate_dict_data(expected, actual)
4651+
4652+ def _validate_list_data(self, expected, actual):
4653+ """Compare expected list vs actual list data."""
4654+ for e in expected:
4655+ if e not in actual:
4656+ return "expected item {} not found in actual list".format(e)
4657+ return None
4658+
4659+ def not_null(self, string):
4660+ if string is not None:
4661+ return True
4662+ else:
4663+ return False
4664+
4665+ def _get_file_mtime(self, sentry_unit, filename):
4666+ """Get last modification time of file."""
4667+ return sentry_unit.file_stat(filename)['mtime']
4668+
4669+ def _get_dir_mtime(self, sentry_unit, directory):
4670+ """Get last modification time of directory."""
4671+ return sentry_unit.directory_stat(directory)['mtime']
4672+
4673+ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
4674+ """Get process' start time.
4675+
4676+ Determine start time of the process based on the last modification
4677+ time of the /proc/pid directory. If pgrep_full is True, the process
4678+ name is matched against the full command line.
4679+ """
4680+ if pgrep_full:
4681+ cmd = 'pgrep -o -f {}'.format(service)
4682+ else:
4683+ cmd = 'pgrep -o {}'.format(service)
4684+ proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
4685+ return self._get_dir_mtime(sentry_unit, proc_dir)
4686+
4687+ def service_restarted(self, sentry_unit, service, filename,
4688+ pgrep_full=False, sleep_time=20):
4689+ """Check if service was restarted.
4690+
4691+ Compare a service's start time vs a file's last modification time
4692+ (such as a config file for that service) to determine if the service
4693+ has been restarted.
4694+ """
4695+ time.sleep(sleep_time)
4696+ if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
4697+ self._get_file_mtime(sentry_unit, filename)):
4698+ return True
4699+ else:
4700+ return False
4701+
4702+ def relation_error(self, name, data):
4703+ return 'unexpected relation data in {} - {}'.format(name, data)
4704+
4705+ def endpoint_error(self, name, data):
4706+ return 'unexpected endpoint data in {} - {}'.format(name, data)
4707
4708=== added directory 'tests/charmhelpers/contrib/openstack'
4709=== added file 'tests/charmhelpers/contrib/openstack/__init__.py'
4710=== added directory 'tests/charmhelpers/contrib/openstack/amulet'
4711=== added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py'
4712=== added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
4713--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
4714+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-02 12:57:31 +0000
4715@@ -0,0 +1,94 @@
4716+from bzrlib.branch import Branch
4717+import os
4718+import re
4719+from charmhelpers.contrib.amulet.deployment import (
4720+ AmuletDeployment
4721+)
4722+
4723+
4724+class OpenStackAmuletDeployment(AmuletDeployment):
4725+ """OpenStack amulet deployment.
4726+
4727+ This class inherits from AmuletDeployment and has additional support
4728+ that is specifically for use by OpenStack charms.
4729+ """
4730+
4731+ def __init__(self, series=None, openstack=None, source=None):
4732+ """Initialize the deployment environment."""
4733+ super(OpenStackAmuletDeployment, self).__init__(series)
4734+ self.openstack = openstack
4735+ self.source = source
4736+
4737+ def _is_dev_branch(self):
4738+ """Determine if branch being tested is a dev (i.e. next) branch."""
4739+ branch = Branch.open(os.getcwd())
4740+ parent = branch.get_parent()
4741+ pattern = re.compile("^.*/next/$")
4742+ if (pattern.match(parent)):
4743+ return True
4744+ else:
4745+ return False
4746+
4747+ def _determine_branch_locations(self, other_services):
4748+ """Determine the branch locations for the other services.
4749+
4750+ If the branch being tested is a dev branch, then determine the
4751+ development branch locations for the other services. Otherwise,
4752+ the default charm store branches will be used."""
4753+ name = 0
4754+ if self._is_dev_branch():
4755+ updated_services = []
4756+ for svc in other_services:
4757+ if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']:
4758+ location = 'lp:charms/{}'.format(svc[name])
4759+ else:
4760+ temp = 'lp:~openstack-charmers/charms/trusty/{}/next'
4761+ location = temp.format(svc[name])
4762+ updated_services.append(svc + (location,))
4763+ other_services = updated_services
4764+ return other_services
4765+
4766+ def _add_services(self, this_service, other_services):
4767+ """Add services to the deployment and set openstack-origin/source."""
4768+ name = 0
4769+ other_services = self._determine_branch_locations(other_services)
4770+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
4771+ other_services)
4772+ services = other_services
4773+ services.append(this_service)
4774+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
4775+
4776+ if self.openstack:
4777+ for svc in services:
4778+ if svc[name] not in use_source:
4779+ config = {'openstack-origin': self.openstack}
4780+ self.d.configure(svc[name], config)
4781+
4782+ if self.source:
4783+ for svc in services:
4784+ if svc[name] in use_source:
4785+ config = {'source': self.source}
4786+ self.d.configure(svc[name], config)
4787+
4788+ def _configure_services(self, configs):
4789+ """Configure all of the services."""
4790+ for service, config in configs.iteritems():
4791+ self.d.configure(service, config)
4792+
4793+ def _get_openstack_release(self):
4794+ """Get openstack release.
4795+
4796+ Return an integer representing the enum value of the openstack
4797+ release.
4798+ """
4799+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
4800+ self.precise_havana, self.precise_icehouse,
4801+ self.trusty_icehouse) = range(6)
4802+ releases = {
4803+ ('precise', None): self.precise_essex,
4804+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
4805+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
4806+ ('precise', 'cloud:precise-havana'): self.precise_havana,
4807+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
4808+ ('trusty', None): self.trusty_icehouse}
4809+ return releases[(self.series, self.openstack)]
4810
4811=== added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
4812--- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
4813+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-10-02 12:57:31 +0000
4814@@ -0,0 +1,276 @@
4815+import logging
4816+import os
4817+import time
4818+import urllib
4819+
4820+import glanceclient.v1.client as glance_client
4821+import keystoneclient.v2_0 as keystone_client
4822+import novaclient.v1_1.client as nova_client
4823+
4824+from charmhelpers.contrib.amulet.utils import (
4825+ AmuletUtils
4826+)
4827+
4828+DEBUG = logging.DEBUG
4829+ERROR = logging.ERROR
4830+
4831+
4832+class OpenStackAmuletUtils(AmuletUtils):
4833+ """OpenStack amulet utilities.
4834+
4835+ This class inherits from AmuletUtils and has additional support
4836+ that is specifically for use by OpenStack charms.
4837+ """
4838+
4839+ def __init__(self, log_level=ERROR):
4840+ """Initialize the deployment environment."""
4841+ super(OpenStackAmuletUtils, self).__init__(log_level)
4842+
4843+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
4844+ public_port, expected):
4845+ """Validate endpoint data.
4846+
4847+ Validate actual endpoint data vs expected endpoint data. The ports
4848+ are used to find the matching endpoint.
4849+ """
4850+ found = False
4851+ for ep in endpoints:
4852+ self.log.debug('endpoint: {}'.format(repr(ep)))
4853+ if (admin_port in ep.adminurl and
4854+ internal_port in ep.internalurl and
4855+ public_port in ep.publicurl):
4856+ found = True
4857+ actual = {'id': ep.id,
4858+ 'region': ep.region,
4859+ 'adminurl': ep.adminurl,
4860+ 'internalurl': ep.internalurl,
4861+ 'publicurl': ep.publicurl,
4862+ 'service_id': ep.service_id}
4863+ ret = self._validate_dict_data(expected, actual)
4864+ if ret:
4865+ return 'unexpected endpoint data - {}'.format(ret)
4866+
4867+ if not found:
4868+ return 'endpoint not found'
4869+
4870+ def validate_svc_catalog_endpoint_data(self, expected, actual):
4871+ """Validate service catalog endpoint data.
4872+
4873+ Validate a list of actual service catalog endpoints vs a list of
4874+ expected service catalog endpoints.
4875+ """
4876+ self.log.debug('actual: {}'.format(repr(actual)))
4877+ for k, v in expected.iteritems():
4878+ if k in actual:
4879+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
4880+ if ret:
4881+ return self.endpoint_error(k, ret)
4882+ else:
4883+ return "endpoint {} does not exist".format(k)
4884+ return ret
4885+
4886+ def validate_tenant_data(self, expected, actual):
4887+ """Validate tenant data.
4888+
4889+ Validate a list of actual tenant data vs list of expected tenant
4890+ data.
4891+ """
4892+ self.log.debug('actual: {}'.format(repr(actual)))
4893+ for e in expected:
4894+ found = False
4895+ for act in actual:
4896+ a = {'enabled': act.enabled, 'description': act.description,
4897+ 'name': act.name, 'id': act.id}
4898+ if e['name'] == a['name']:
4899+ found = True
4900+ ret = self._validate_dict_data(e, a)
4901+ if ret:
4902+ return "unexpected tenant data - {}".format(ret)
4903+ if not found:
4904+ return "tenant {} does not exist".format(e['name'])
4905+ return ret
4906+
4907+ def validate_role_data(self, expected, actual):
4908+ """Validate role data.
4909+
4910+ Validate a list of actual role data vs a list of expected role
4911+ data.
4912+ """
4913+ self.log.debug('actual: {}'.format(repr(actual)))
4914+ for e in expected:
4915+ found = False
4916+ for act in actual:
4917+ a = {'name': act.name, 'id': act.id}
4918+ if e['name'] == a['name']:
4919+ found = True
4920+ ret = self._validate_dict_data(e, a)
4921+ if ret:
4922+ return "unexpected role data - {}".format(ret)
4923+ if not found:
4924+ return "role {} does not exist".format(e['name'])
4925+ return ret
4926+
4927+ def validate_user_data(self, expected, actual):
4928+ """Validate user data.
4929+
4930+ Validate a list of actual user data vs a list of expected user
4931+ data.
4932+ """
4933+ self.log.debug('actual: {}'.format(repr(actual)))
4934+ for e in expected:
4935+ found = False
4936+ for act in actual:
4937+ a = {'enabled': act.enabled, 'name': act.name,
4938+ 'email': act.email, 'tenantId': act.tenantId,
4939+ 'id': act.id}
4940+ if e['name'] == a['name']:
4941+ found = True
4942+ ret = self._validate_dict_data(e, a)
4943+ if ret:
4944+ return "unexpected user data - {}".format(ret)
4945+ if not found:
4946+ return "user {} does not exist".format(e['name'])
4947+ return ret
4948+
4949+ def validate_flavor_data(self, expected, actual):
4950+ """Validate flavor data.
4951+
4952+ Validate a list of actual flavors vs a list of expected flavors.
4953+ """
4954+ self.log.debug('actual: {}'.format(repr(actual)))
4955+ act = [a.name for a in actual]
4956+ return self._validate_list_data(expected, act)
4957+
4958+ def tenant_exists(self, keystone, tenant):
4959+ """Return True if tenant exists."""
4960+ return tenant in [t.name for t in keystone.tenants.list()]
4961+
4962+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
4963+ tenant):
4964+ """Authenticates admin user with the keystone admin endpoint."""
4965+ unit = keystone_sentry
4966+ service_ip = unit.relation('shared-db',
4967+ 'mysql:shared-db')['private-address']
4968+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
4969+ return keystone_client.Client(username=user, password=password,
4970+ tenant_name=tenant, auth_url=ep)
4971+
4972+ def authenticate_keystone_user(self, keystone, user, password, tenant):
4973+ """Authenticates a regular user with the keystone public endpoint."""
4974+ ep = keystone.service_catalog.url_for(service_type='identity',
4975+ endpoint_type='publicURL')
4976+ return keystone_client.Client(username=user, password=password,
4977+ tenant_name=tenant, auth_url=ep)
4978+
4979+ def authenticate_glance_admin(self, keystone):
4980+ """Authenticates admin user with glance."""
4981+ ep = keystone.service_catalog.url_for(service_type='image',
4982+ endpoint_type='adminURL')
4983+ return glance_client.Client(ep, token=keystone.auth_token)
4984+
4985+ def authenticate_nova_user(self, keystone, user, password, tenant):
4986+ """Authenticates a regular user with nova-api."""
4987+ ep = keystone.service_catalog.url_for(service_type='identity',
4988+ endpoint_type='publicURL')
4989+ return nova_client.Client(username=user, api_key=password,
4990+ project_id=tenant, auth_url=ep)
4991+
4992+ def create_cirros_image(self, glance, image_name):
4993+ """Download the latest cirros image and upload it to glance."""
4994+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
4995+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
4996+ if http_proxy:
4997+ proxies = {'http': http_proxy}
4998+ opener = urllib.FancyURLopener(proxies)
4999+ else:
5000+ opener = urllib.FancyURLopener()
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches