Merge lp:~gnuoy/charms/trusty/neutron-api-odl/ch-sync into lp:~openstack-charmers/charms/trusty/neutron-api-odl/vpp

Proposed by Liam Young
Status: Merged
Merged at revision: 5
Proposed branch: lp:~gnuoy/charms/trusty/neutron-api-odl/ch-sync
Merge into: lp:~openstack-charmers/charms/trusty/neutron-api-odl/vpp
Diff against target: 5751 lines (+3992/-388)
35 files modified
charm-helpers-hooks.yaml (+1/-1)
hooks/charmhelpers/contrib/network/ip.py (+10/-4)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+157/-15)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+742/-51)
hooks/charmhelpers/contrib/openstack/context.py (+178/-63)
hooks/charmhelpers/contrib/openstack/neutron.py (+57/-16)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+12/-6)
hooks/charmhelpers/contrib/openstack/templating.py (+32/-29)
hooks/charmhelpers/contrib/openstack/utils.py (+324/-33)
hooks/charmhelpers/contrib/python/packages.py (+2/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+232/-19)
hooks/charmhelpers/contrib/storage/linux/utils.py (+4/-3)
hooks/charmhelpers/core/files.py (+45/-0)
hooks/charmhelpers/core/hookenv.py (+249/-49)
hooks/charmhelpers/core/host.py (+144/-19)
hooks/charmhelpers/core/hugepage.py (+33/-16)
hooks/charmhelpers/core/kernel.py (+68/-0)
hooks/charmhelpers/core/services/base.py (+12/-9)
hooks/charmhelpers/core/services/helpers.py (+9/-7)
hooks/charmhelpers/core/strutils.py (+30/-0)
hooks/charmhelpers/core/templating.py (+12/-12)
hooks/charmhelpers/core/unitdata.py (+61/-17)
hooks/charmhelpers/fetch/__init__.py (+31/-14)
hooks/charmhelpers/fetch/archiveurl.py (+7/-1)
hooks/charmhelpers/fetch/giturl.py (+1/-1)
hooks/services.py (+2/-3)
tests/charmhelpers/__init__.py (+38/-0)
tests/charmhelpers/contrib/__init__.py (+15/-0)
tests/charmhelpers/contrib/amulet/__init__.py (+15/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+94/-0)
tests/charmhelpers/contrib/amulet/utils.py (+553/-0)
tests/charmhelpers/contrib/openstack/__init__.py (+15/-0)
tests/charmhelpers/contrib/openstack/amulet/__init__.py (+15/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+188/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+604/-0)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/neutron-api-odl/ch-sync
Reviewer Review Type Date Requested Status
James Page Approve
Review via email: mp+276520@code.launchpad.net
To post a comment you must log in.
Revision history for this message
James Page (james-page) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'charm-helpers-hooks.yaml'
2--- charm-helpers-hooks.yaml 2015-06-24 12:22:08 +0000
3+++ charm-helpers-hooks.yaml 2015-11-03 12:30:15 +0000
4@@ -1,4 +1,4 @@
5-branch: lp:~gnuoy/charm-helpers/cisco-vpp/
6+branch: lp:charm-helpers
7 destination: hooks/charmhelpers
8 include:
9 - core
10
11=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
12--- hooks/charmhelpers/contrib/network/ip.py 2015-06-24 12:22:08 +0000
13+++ hooks/charmhelpers/contrib/network/ip.py 2015-11-03 12:30:15 +0000
14@@ -23,7 +23,7 @@
15 from functools import partial
16
17 from charmhelpers.core.hookenv import unit_get
18-from charmhelpers.fetch import apt_install
19+from charmhelpers.fetch import apt_install, apt_update
20 from charmhelpers.core.hookenv import (
21 log,
22 WARNING,
23@@ -32,13 +32,15 @@
24 try:
25 import netifaces
26 except ImportError:
27- apt_install('python-netifaces')
28+ apt_update(fatal=True)
29+ apt_install('python-netifaces', fatal=True)
30 import netifaces
31
32 try:
33 import netaddr
34 except ImportError:
35- apt_install('python-netaddr')
36+ apt_update(fatal=True)
37+ apt_install('python-netaddr', fatal=True)
38 import netaddr
39
40
41@@ -435,8 +437,12 @@
42
43 rev = dns.reversename.from_address(address)
44 result = ns_query(rev)
45+
46 if not result:
47- return None
48+ try:
49+ result = socket.gethostbyaddr(address)[0]
50+ except:
51+ return None
52 else:
53 result = address
54
55
56=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
57--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-24 12:22:08 +0000
58+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-11-03 12:30:15 +0000
59@@ -14,12 +14,18 @@
60 # You should have received a copy of the GNU Lesser General Public License
61 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
62
63+import logging
64+import re
65+import sys
66 import six
67 from collections import OrderedDict
68 from charmhelpers.contrib.amulet.deployment import (
69 AmuletDeployment
70 )
71
72+DEBUG = logging.DEBUG
73+ERROR = logging.ERROR
74+
75
76 class OpenStackAmuletDeployment(AmuletDeployment):
77 """OpenStack amulet deployment.
78@@ -28,9 +34,12 @@
79 that is specifically for use by OpenStack charms.
80 """
81
82- def __init__(self, series=None, openstack=None, source=None, stable=True):
83+ def __init__(self, series=None, openstack=None, source=None,
84+ stable=True, log_level=DEBUG):
85 """Initialize the deployment environment."""
86 super(OpenStackAmuletDeployment, self).__init__(series)
87+ self.log = self.get_logger(level=log_level)
88+ self.log.info('OpenStackAmuletDeployment: init')
89 self.openstack = openstack
90 self.source = source
91 self.stable = stable
92@@ -38,30 +47,55 @@
93 # out.
94 self.current_next = "trusty"
95
96+ def get_logger(self, name="deployment-logger", level=logging.DEBUG):
97+ """Get a logger object that will log to stdout."""
98+ log = logging
99+ logger = log.getLogger(name)
100+ fmt = log.Formatter("%(asctime)s %(funcName)s "
101+ "%(levelname)s: %(message)s")
102+
103+ handler = log.StreamHandler(stream=sys.stdout)
104+ handler.setLevel(level)
105+ handler.setFormatter(fmt)
106+
107+ logger.addHandler(handler)
108+ logger.setLevel(level)
109+
110+ return logger
111+
112 def _determine_branch_locations(self, other_services):
113 """Determine the branch locations for the other services.
114
115 Determine if the local branch being tested is derived from its
116 stable or next (dev) branch, and based on this, use the corresonding
117 stable or next branches for the other_services."""
118- base_charms = ['mysql', 'mongodb']
119+
120+ self.log.info('OpenStackAmuletDeployment: determine branch locations')
121+
122+ # Charms outside the lp:~openstack-charmers namespace
123+ base_charms = ['mysql', 'mongodb', 'nrpe']
124+
125+ # Force these charms to current series even when using an older series.
126+ # ie. Use trusty/nrpe even when series is precise, as the P charm
127+ # does not possess the necessary external master config and hooks.
128+ force_series_current = ['nrpe']
129
130 if self.series in ['precise', 'trusty']:
131 base_series = self.series
132 else:
133 base_series = self.current_next
134
135- if self.stable:
136- for svc in other_services:
137- if svc.get('location'):
138- continue
139+ for svc in other_services:
140+ if svc['name'] in force_series_current:
141+ base_series = self.current_next
142+ # If a location has been explicitly set, use it
143+ if svc.get('location'):
144+ continue
145+ if self.stable:
146 temp = 'lp:charms/{}/{}'
147 svc['location'] = temp.format(base_series,
148 svc['name'])
149- else:
150- for svc in other_services:
151- if svc.get('location'):
152- continue
153+ else:
154 if svc['name'] in base_charms:
155 temp = 'lp:charms/{}/{}'
156 svc['location'] = temp.format(base_series,
157@@ -70,10 +104,13 @@
158 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
159 svc['location'] = temp.format(self.current_next,
160 svc['name'])
161+
162 return other_services
163
164 def _add_services(self, this_service, other_services):
165 """Add services to the deployment and set openstack-origin/source."""
166+ self.log.info('OpenStackAmuletDeployment: adding services')
167+
168 other_services = self._determine_branch_locations(other_services)
169
170 super(OpenStackAmuletDeployment, self)._add_services(this_service,
171@@ -81,29 +118,101 @@
172
173 services = other_services
174 services.append(this_service)
175+
176+ # Charms which should use the source config option
177 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
178 'ceph-osd', 'ceph-radosgw']
179- # Openstack subordinate charms do not expose an origin option as that
180- # is controlled by the principle
181- ignore = ['neutron-openvswitch', 'cisco-vpp']
182+
183+ # Charms which can not use openstack-origin, ie. many subordinates
184+ no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
185
186 if self.openstack:
187 for svc in services:
188- if svc['name'] not in use_source + ignore:
189+ if svc['name'] not in use_source + no_origin:
190 config = {'openstack-origin': self.openstack}
191 self.d.configure(svc['name'], config)
192
193 if self.source:
194 for svc in services:
195- if svc['name'] in use_source and svc['name'] not in ignore:
196+ if svc['name'] in use_source and svc['name'] not in no_origin:
197 config = {'source': self.source}
198 self.d.configure(svc['name'], config)
199
200 def _configure_services(self, configs):
201 """Configure all of the services."""
202+ self.log.info('OpenStackAmuletDeployment: configure services')
203 for service, config in six.iteritems(configs):
204 self.d.configure(service, config)
205
206+ def _auto_wait_for_status(self, message=None, exclude_services=None,
207+ include_only=None, timeout=1800):
208+ """Wait for all units to have a specific extended status, except
209+ for any defined as excluded. Unless specified via message, any
210+ status containing any case of 'ready' will be considered a match.
211+
212+ Examples of message usage:
213+
214+ Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
215+ message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
216+
217+ Wait for all units to reach this status (exact match):
218+ message = re.compile('^Unit is ready and clustered$')
219+
220+ Wait for all units to reach any one of these (exact match):
221+ message = re.compile('Unit is ready|OK|Ready')
222+
223+ Wait for at least one unit to reach this status (exact match):
224+ message = {'ready'}
225+
226+ See Amulet's sentry.wait_for_messages() for message usage detail.
227+ https://github.com/juju/amulet/blob/master/amulet/sentry.py
228+
229+ :param message: Expected status match
230+ :param exclude_services: List of juju service names to ignore,
231+ not to be used in conjuction with include_only.
232+ :param include_only: List of juju service names to exclusively check,
233+ not to be used in conjuction with exclude_services.
234+ :param timeout: Maximum time in seconds to wait for status match
235+ :returns: None. Raises if timeout is hit.
236+ """
237+ self.log.info('Waiting for extended status on units...')
238+
239+ all_services = self.d.services.keys()
240+
241+ if exclude_services and include_only:
242+ raise ValueError('exclude_services can not be used '
243+ 'with include_only')
244+
245+ if message:
246+ if isinstance(message, re._pattern_type):
247+ match = message.pattern
248+ else:
249+ match = message
250+
251+ self.log.debug('Custom extended status wait match: '
252+ '{}'.format(match))
253+ else:
254+ self.log.debug('Default extended status wait match: contains '
255+ 'READY (case-insensitive)')
256+ message = re.compile('.*ready.*', re.IGNORECASE)
257+
258+ if exclude_services:
259+ self.log.debug('Excluding services from extended status match: '
260+ '{}'.format(exclude_services))
261+ else:
262+ exclude_services = []
263+
264+ if include_only:
265+ services = include_only
266+ else:
267+ services = list(set(all_services) - set(exclude_services))
268+
269+ self.log.debug('Waiting up to {}s for extended status on services: '
270+ '{}'.format(timeout, services))
271+ service_messages = {service: message for service in services}
272+ self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
273+ self.log.info('OK')
274+
275 def _get_openstack_release(self):
276 """Get openstack release.
277
278@@ -152,3 +261,36 @@
279 return os_origin.split('%s-' % self.series)[1].split('/')[0]
280 else:
281 return releases[self.series]
282+
283+ def get_ceph_expected_pools(self, radosgw=False):
284+ """Return a list of expected ceph pools in a ceph + cinder + glance
285+ test scenario, based on OpenStack release and whether ceph radosgw
286+ is flagged as present or not."""
287+
288+ if self._get_openstack_release() >= self.trusty_kilo:
289+ # Kilo or later
290+ pools = [
291+ 'rbd',
292+ 'cinder',
293+ 'glance'
294+ ]
295+ else:
296+ # Juno or earlier
297+ pools = [
298+ 'data',
299+ 'metadata',
300+ 'rbd',
301+ 'cinder',
302+ 'glance'
303+ ]
304+
305+ if radosgw:
306+ pools.extend([
307+ '.rgw.root',
308+ '.rgw.control',
309+ '.rgw',
310+ '.rgw.gc',
311+ '.users.uid'
312+ ])
313+
314+ return pools
315
316=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
317--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-24 12:22:08 +0000
318+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-11-03 12:30:15 +0000
319@@ -14,16 +14,22 @@
320 # You should have received a copy of the GNU Lesser General Public License
321 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
322
323+import amulet
324+import json
325 import logging
326 import os
327+import re
328+import six
329 import time
330 import urllib
331
332+import cinderclient.v1.client as cinder_client
333 import glanceclient.v1.client as glance_client
334+import heatclient.v1.client as heat_client
335 import keystoneclient.v2_0 as keystone_client
336 import novaclient.v1_1.client as nova_client
337-
338-import six
339+import pika
340+import swiftclient
341
342 from charmhelpers.contrib.amulet.utils import (
343 AmuletUtils
344@@ -37,7 +43,7 @@
345 """OpenStack amulet utilities.
346
347 This class inherits from AmuletUtils and has additional support
348- that is specifically for use by OpenStack charms.
349+ that is specifically for use by OpenStack charm tests.
350 """
351
352 def __init__(self, log_level=ERROR):
353@@ -51,6 +57,8 @@
354 Validate actual endpoint data vs expected endpoint data. The ports
355 are used to find the matching endpoint.
356 """
357+ self.log.debug('Validating endpoint data...')
358+ self.log.debug('actual: {}'.format(repr(endpoints)))
359 found = False
360 for ep in endpoints:
361 self.log.debug('endpoint: {}'.format(repr(ep)))
362@@ -77,6 +85,7 @@
363 Validate a list of actual service catalog endpoints vs a list of
364 expected service catalog endpoints.
365 """
366+ self.log.debug('Validating service catalog endpoint data...')
367 self.log.debug('actual: {}'.format(repr(actual)))
368 for k, v in six.iteritems(expected):
369 if k in actual:
370@@ -93,6 +102,7 @@
371 Validate a list of actual tenant data vs list of expected tenant
372 data.
373 """
374+ self.log.debug('Validating tenant data...')
375 self.log.debug('actual: {}'.format(repr(actual)))
376 for e in expected:
377 found = False
378@@ -114,6 +124,7 @@
379 Validate a list of actual role data vs a list of expected role
380 data.
381 """
382+ self.log.debug('Validating role data...')
383 self.log.debug('actual: {}'.format(repr(actual)))
384 for e in expected:
385 found = False
386@@ -134,6 +145,7 @@
387 Validate a list of actual user data vs a list of expected user
388 data.
389 """
390+ self.log.debug('Validating user data...')
391 self.log.debug('actual: {}'.format(repr(actual)))
392 for e in expected:
393 found = False
394@@ -155,17 +167,30 @@
395
396 Validate a list of actual flavors vs a list of expected flavors.
397 """
398+ self.log.debug('Validating flavor data...')
399 self.log.debug('actual: {}'.format(repr(actual)))
400 act = [a.name for a in actual]
401 return self._validate_list_data(expected, act)
402
403 def tenant_exists(self, keystone, tenant):
404 """Return True if tenant exists."""
405+ self.log.debug('Checking if tenant exists ({})...'.format(tenant))
406 return tenant in [t.name for t in keystone.tenants.list()]
407
408+ def authenticate_cinder_admin(self, keystone_sentry, username,
409+ password, tenant):
410+ """Authenticates admin user with cinder."""
411+ # NOTE(beisner): cinder python client doesn't accept tokens.
412+ service_ip = \
413+ keystone_sentry.relation('shared-db',
414+ 'mysql:shared-db')['private-address']
415+ ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
416+ return cinder_client.Client(username, password, tenant, ept)
417+
418 def authenticate_keystone_admin(self, keystone_sentry, user, password,
419 tenant):
420 """Authenticates admin user with the keystone admin endpoint."""
421+ self.log.debug('Authenticating keystone admin...')
422 unit = keystone_sentry
423 service_ip = unit.relation('shared-db',
424 'mysql:shared-db')['private-address']
425@@ -175,6 +200,7 @@
426
427 def authenticate_keystone_user(self, keystone, user, password, tenant):
428 """Authenticates a regular user with the keystone public endpoint."""
429+ self.log.debug('Authenticating keystone user ({})...'.format(user))
430 ep = keystone.service_catalog.url_for(service_type='identity',
431 endpoint_type='publicURL')
432 return keystone_client.Client(username=user, password=password,
433@@ -182,19 +208,49 @@
434
435 def authenticate_glance_admin(self, keystone):
436 """Authenticates admin user with glance."""
437+ self.log.debug('Authenticating glance admin...')
438 ep = keystone.service_catalog.url_for(service_type='image',
439 endpoint_type='adminURL')
440 return glance_client.Client(ep, token=keystone.auth_token)
441
442+ def authenticate_heat_admin(self, keystone):
443+ """Authenticates the admin user with heat."""
444+ self.log.debug('Authenticating heat admin...')
445+ ep = keystone.service_catalog.url_for(service_type='orchestration',
446+ endpoint_type='publicURL')
447+ return heat_client.Client(endpoint=ep, token=keystone.auth_token)
448+
449 def authenticate_nova_user(self, keystone, user, password, tenant):
450 """Authenticates a regular user with nova-api."""
451+ self.log.debug('Authenticating nova user ({})...'.format(user))
452 ep = keystone.service_catalog.url_for(service_type='identity',
453 endpoint_type='publicURL')
454 return nova_client.Client(username=user, api_key=password,
455 project_id=tenant, auth_url=ep)
456
457+ def authenticate_swift_user(self, keystone, user, password, tenant):
458+ """Authenticates a regular user with swift api."""
459+ self.log.debug('Authenticating swift user ({})...'.format(user))
460+ ep = keystone.service_catalog.url_for(service_type='identity',
461+ endpoint_type='publicURL')
462+ return swiftclient.Connection(authurl=ep,
463+ user=user,
464+ key=password,
465+ tenant_name=tenant,
466+ auth_version='2.0')
467+
468 def create_cirros_image(self, glance, image_name):
469- """Download the latest cirros image and upload it to glance."""
470+ """Download the latest cirros image and upload it to glance,
471+ validate and return a resource pointer.
472+
473+ :param glance: pointer to authenticated glance connection
474+ :param image_name: display name for new image
475+ :returns: glance image pointer
476+ """
477+ self.log.debug('Creating glance cirros image '
478+ '({})...'.format(image_name))
479+
480+ # Download cirros image
481 http_proxy = os.getenv('AMULET_HTTP_PROXY')
482 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
483 if http_proxy:
484@@ -203,57 +259,67 @@
485 else:
486 opener = urllib.FancyURLopener()
487
488- f = opener.open("http://download.cirros-cloud.net/version/released")
489+ f = opener.open('http://download.cirros-cloud.net/version/released')
490 version = f.read().strip()
491- cirros_img = "cirros-{}-x86_64-disk.img".format(version)
492+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
493 local_path = os.path.join('tests', cirros_img)
494
495 if not os.path.exists(local_path):
496- cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
497+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
498 version, cirros_img)
499 opener.retrieve(cirros_url, local_path)
500 f.close()
501
502+ # Create glance image
503 with open(local_path) as f:
504 image = glance.images.create(name=image_name, is_public=True,
505 disk_format='qcow2',
506 container_format='bare', data=f)
507- count = 1
508- status = image.status
509- while status != 'active' and count < 10:
510- time.sleep(3)
511- image = glance.images.get(image.id)
512- status = image.status
513- self.log.debug('image status: {}'.format(status))
514- count += 1
515-
516- if status != 'active':
517- self.log.error('image creation timed out')
518- return None
519+
520+ # Wait for image to reach active status
521+ img_id = image.id
522+ ret = self.resource_reaches_status(glance.images, img_id,
523+ expected_stat='active',
524+ msg='Image status wait')
525+ if not ret:
526+ msg = 'Glance image failed to reach expected state.'
527+ amulet.raise_status(amulet.FAIL, msg=msg)
528+
529+ # Re-validate new image
530+ self.log.debug('Validating image attributes...')
531+ val_img_name = glance.images.get(img_id).name
532+ val_img_stat = glance.images.get(img_id).status
533+ val_img_pub = glance.images.get(img_id).is_public
534+ val_img_cfmt = glance.images.get(img_id).container_format
535+ val_img_dfmt = glance.images.get(img_id).disk_format
536+ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
537+ 'container fmt:{} disk fmt:{}'.format(
538+ val_img_name, val_img_pub, img_id,
539+ val_img_stat, val_img_cfmt, val_img_dfmt))
540+
541+ if val_img_name == image_name and val_img_stat == 'active' \
542+ and val_img_pub is True and val_img_cfmt == 'bare' \
543+ and val_img_dfmt == 'qcow2':
544+ self.log.debug(msg_attr)
545+ else:
546+ msg = ('Volume validation failed, {}'.format(msg_attr))
547+ amulet.raise_status(amulet.FAIL, msg=msg)
548
549 return image
550
551 def delete_image(self, glance, image):
552 """Delete the specified image."""
553- num_before = len(list(glance.images.list()))
554- glance.images.delete(image)
555-
556- count = 1
557- num_after = len(list(glance.images.list()))
558- while num_after != (num_before - 1) and count < 10:
559- time.sleep(3)
560- num_after = len(list(glance.images.list()))
561- self.log.debug('number of images: {}'.format(num_after))
562- count += 1
563-
564- if num_after != (num_before - 1):
565- self.log.error('image deletion timed out')
566- return False
567-
568- return True
569+
570+ # /!\ DEPRECATION WARNING
571+ self.log.warn('/!\\ DEPRECATION WARNING: use '
572+ 'delete_resource instead of delete_image.')
573+ self.log.debug('Deleting glance image ({})...'.format(image))
574+ return self.delete_resource(glance.images, image, msg='glance image')
575
576 def create_instance(self, nova, image_name, instance_name, flavor):
577 """Create the specified instance."""
578+ self.log.debug('Creating instance '
579+ '({}|{}|{})'.format(instance_name, image_name, flavor))
580 image = nova.images.find(name=image_name)
581 flavor = nova.flavors.find(name=flavor)
582 instance = nova.servers.create(name=instance_name, image=image,
583@@ -276,19 +342,644 @@
584
585 def delete_instance(self, nova, instance):
586 """Delete the specified instance."""
587- num_before = len(list(nova.servers.list()))
588- nova.servers.delete(instance)
589-
590- count = 1
591- num_after = len(list(nova.servers.list()))
592- while num_after != (num_before - 1) and count < 10:
593- time.sleep(3)
594- num_after = len(list(nova.servers.list()))
595- self.log.debug('number of instances: {}'.format(num_after))
596- count += 1
597-
598- if num_after != (num_before - 1):
599- self.log.error('instance deletion timed out')
600- return False
601-
602- return True
603+
604+ # /!\ DEPRECATION WARNING
605+ self.log.warn('/!\\ DEPRECATION WARNING: use '
606+ 'delete_resource instead of delete_instance.')
607+ self.log.debug('Deleting instance ({})...'.format(instance))
608+ return self.delete_resource(nova.servers, instance,
609+ msg='nova instance')
610+
611+ def create_or_get_keypair(self, nova, keypair_name="testkey"):
612+ """Create a new keypair, or return pointer if it already exists."""
613+ try:
614+ _keypair = nova.keypairs.get(keypair_name)
615+ self.log.debug('Keypair ({}) already exists, '
616+ 'using it.'.format(keypair_name))
617+ return _keypair
618+ except:
619+ self.log.debug('Keypair ({}) does not exist, '
620+ 'creating it.'.format(keypair_name))
621+
622+ _keypair = nova.keypairs.create(name=keypair_name)
623+ return _keypair
624+
625+ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
626+ img_id=None, src_vol_id=None, snap_id=None):
627+ """Create cinder volume, optionally from a glance image, OR
628+ optionally as a clone of an existing volume, OR optionally
629+ from a snapshot. Wait for the new volume status to reach
630+ the expected status, validate and return a resource pointer.
631+
632+ :param vol_name: cinder volume display name
633+ :param vol_size: size in gigabytes
634+ :param img_id: optional glance image id
635+ :param src_vol_id: optional source volume id to clone
636+ :param snap_id: optional snapshot id to use
637+ :returns: cinder volume pointer
638+ """
639+ # Handle parameter input and avoid impossible combinations
640+ if img_id and not src_vol_id and not snap_id:
641+ # Create volume from image
642+ self.log.debug('Creating cinder volume from glance image...')
643+ bootable = 'true'
644+ elif src_vol_id and not img_id and not snap_id:
645+ # Clone an existing volume
646+ self.log.debug('Cloning cinder volume...')
647+ bootable = cinder.volumes.get(src_vol_id).bootable
648+ elif snap_id and not src_vol_id and not img_id:
649+ # Create volume from snapshot
650+ self.log.debug('Creating cinder volume from snapshot...')
651+ snap = cinder.volume_snapshots.find(id=snap_id)
652+ vol_size = snap.size
653+ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
654+ bootable = cinder.volumes.get(snap_vol_id).bootable
655+ elif not img_id and not src_vol_id and not snap_id:
656+ # Create volume
657+ self.log.debug('Creating cinder volume...')
658+ bootable = 'false'
659+ else:
660+ # Impossible combination of parameters
661+ msg = ('Invalid method use - name:{} size:{} img_id:{} '
662+ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
663+ img_id, src_vol_id,
664+ snap_id))
665+ amulet.raise_status(amulet.FAIL, msg=msg)
666+
667+ # Create new volume
668+ try:
669+ vol_new = cinder.volumes.create(display_name=vol_name,
670+ imageRef=img_id,
671+ size=vol_size,
672+ source_volid=src_vol_id,
673+ snapshot_id=snap_id)
674+ vol_id = vol_new.id
675+ except Exception as e:
676+ msg = 'Failed to create volume: {}'.format(e)
677+ amulet.raise_status(amulet.FAIL, msg=msg)
678+
679+ # Wait for volume to reach available status
680+ ret = self.resource_reaches_status(cinder.volumes, vol_id,
681+ expected_stat="available",
682+ msg="Volume status wait")
683+ if not ret:
684+ msg = 'Cinder volume failed to reach expected state.'
685+ amulet.raise_status(amulet.FAIL, msg=msg)
686+
687+ # Re-validate new volume
688+ self.log.debug('Validating volume attributes...')
689+ val_vol_name = cinder.volumes.get(vol_id).display_name
690+ val_vol_boot = cinder.volumes.get(vol_id).bootable
691+ val_vol_stat = cinder.volumes.get(vol_id).status
692+ val_vol_size = cinder.volumes.get(vol_id).size
693+ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
694+ '{} size:{}'.format(val_vol_name, vol_id,
695+ val_vol_stat, val_vol_boot,
696+ val_vol_size))
697+
698+ if val_vol_boot == bootable and val_vol_stat == 'available' \
699+ and val_vol_name == vol_name and val_vol_size == vol_size:
700+ self.log.debug(msg_attr)
701+ else:
702+ msg = ('Volume validation failed, {}'.format(msg_attr))
703+ amulet.raise_status(amulet.FAIL, msg=msg)
704+
705+ return vol_new
706+
707+ def delete_resource(self, resource, resource_id,
708+ msg="resource", max_wait=120):
709+ """Delete one openstack resource, such as one instance, keypair,
710+ image, volume, stack, etc., and confirm deletion within max wait time.
711+
712+ :param resource: pointer to os resource type, ex:glance_client.images
713+ :param resource_id: unique name or id for the openstack resource
714+ :param msg: text to identify purpose in logging
715+ :param max_wait: maximum wait time in seconds
716+ :returns: True if successful, otherwise False
717+ """
718+ self.log.debug('Deleting OpenStack resource '
719+ '{} ({})'.format(resource_id, msg))
720+ num_before = len(list(resource.list()))
721+ resource.delete(resource_id)
722+
723+ tries = 0
724+ num_after = len(list(resource.list()))
725+ while num_after != (num_before - 1) and tries < (max_wait / 4):
726+ self.log.debug('{} delete check: '
727+ '{} [{}:{}] {}'.format(msg, tries,
728+ num_before,
729+ num_after,
730+ resource_id))
731+ time.sleep(4)
732+ num_after = len(list(resource.list()))
733+ tries += 1
734+
735+ self.log.debug('{}: expected, actual count = {}, '
736+ '{}'.format(msg, num_before - 1, num_after))
737+
738+ if num_after == (num_before - 1):
739+ return True
740+ else:
741+ self.log.error('{} delete timed out'.format(msg))
742+ return False
743+
744+ def resource_reaches_status(self, resource, resource_id,
745+ expected_stat='available',
746+ msg='resource', max_wait=120):
747+ """Wait for an openstack resources status to reach an
748+ expected status within a specified time. Useful to confirm that
749+ nova instances, cinder vols, snapshots, glance images, heat stacks
750+ and other resources eventually reach the expected status.
751+
752+ :param resource: pointer to os resource type, ex: heat_client.stacks
753+ :param resource_id: unique id for the openstack resource
754+ :param expected_stat: status to expect resource to reach
755+ :param msg: text to identify purpose in logging
756+ :param max_wait: maximum wait time in seconds
757+ :returns: True if successful, False if status is not reached
758+ """
759+
760+ tries = 0
761+ resource_stat = resource.get(resource_id).status
762+ while resource_stat != expected_stat and tries < (max_wait / 4):
763+ self.log.debug('{} status check: '
764+ '{} [{}:{}] {}'.format(msg, tries,
765+ resource_stat,
766+ expected_stat,
767+ resource_id))
768+ time.sleep(4)
769+ resource_stat = resource.get(resource_id).status
770+ tries += 1
771+
772+ self.log.debug('{}: expected, actual status = {}, '
773+ '{}'.format(msg, resource_stat, expected_stat))
774+
775+ if resource_stat == expected_stat:
776+ return True
777+ else:
778+ self.log.debug('{} never reached expected status: '
779+ '{}'.format(resource_id, expected_stat))
780+ return False
781+
782+ def get_ceph_osd_id_cmd(self, index):
783+ """Produce a shell command that will return a ceph-osd id."""
784+ return ("`initctl list | grep 'ceph-osd ' | "
785+ "awk 'NR=={} {{ print $2 }}' | "
786+ "grep -o '[0-9]*'`".format(index + 1))
787+
788+ def get_ceph_pools(self, sentry_unit):
789+ """Return a dict of ceph pools from a single ceph unit, with
790+ pool name as keys, pool id as vals."""
791+ pools = {}
792+ cmd = 'sudo ceph osd lspools'
793+ output, code = sentry_unit.run(cmd)
794+ if code != 0:
795+ msg = ('{} `{}` returned {} '
796+ '{}'.format(sentry_unit.info['unit_name'],
797+ cmd, code, output))
798+ amulet.raise_status(amulet.FAIL, msg=msg)
799+
800+ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
801+ for pool in str(output).split(','):
802+ pool_id_name = pool.split(' ')
803+ if len(pool_id_name) == 2:
804+ pool_id = pool_id_name[0]
805+ pool_name = pool_id_name[1]
806+ pools[pool_name] = int(pool_id)
807+
808+ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
809+ pools))
810+ return pools
811+
812+ def get_ceph_df(self, sentry_unit):
813+ """Return dict of ceph df json output, including ceph pool state.
814+
815+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
816+ :returns: Dict of ceph df output
817+ """
818+ cmd = 'sudo ceph df --format=json'
819+ output, code = sentry_unit.run(cmd)
820+ if code != 0:
821+ msg = ('{} `{}` returned {} '
822+ '{}'.format(sentry_unit.info['unit_name'],
823+ cmd, code, output))
824+ amulet.raise_status(amulet.FAIL, msg=msg)
825+ return json.loads(output)
826+
827+ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
828+ """Take a sample of attributes of a ceph pool, returning ceph
829+ pool name, object count and disk space used for the specified
830+ pool ID number.
831+
832+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
833+ :param pool_id: Ceph pool ID
834+ :returns: List of pool name, object count, kb disk space used
835+ """
836+ df = self.get_ceph_df(sentry_unit)
837+ pool_name = df['pools'][pool_id]['name']
838+ obj_count = df['pools'][pool_id]['stats']['objects']
839+ kb_used = df['pools'][pool_id]['stats']['kb_used']
840+ self.log.debug('Ceph {} pool (ID {}): {} objects, '
841+ '{} kb used'.format(pool_name, pool_id,
842+ obj_count, kb_used))
843+ return pool_name, obj_count, kb_used
844+
845+ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
846+ """Validate ceph pool samples taken over time, such as pool
847+ object counts or pool kb used, before adding, after adding, and
848+ after deleting items which affect those pool attributes. The
849+ 2nd element is expected to be greater than the 1st; 3rd is expected
850+ to be less than the 2nd.
851+
852+ :param samples: List containing 3 data samples
853+ :param sample_type: String for logging and usage context
854+ :returns: None if successful, Failure message otherwise
855+ """
856+ original, created, deleted = range(3)
857+ if samples[created] <= samples[original] or \
858+ samples[deleted] >= samples[created]:
859+ return ('Ceph {} samples ({}) '
860+ 'unexpected.'.format(sample_type, samples))
861+ else:
862+ self.log.debug('Ceph {} samples (OK): '
863+ '{}'.format(sample_type, samples))
864+ return None
865+
866+ # rabbitmq/amqp specific helpers:
867+
868+ def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
869+ """Wait for rmq units extended status to show cluster readiness,
870+ after an optional initial sleep period. Initial sleep is likely
871+ necessary to be effective following a config change, as status
872+ message may not instantly update to non-ready."""
873+
874+ if init_sleep:
875+ time.sleep(init_sleep)
876+
877+ message = re.compile('^Unit is ready and clustered$')
878+ deployment._auto_wait_for_status(message=message,
879+ timeout=timeout,
880+ include_only=['rabbitmq-server'])
881+
882+ def add_rmq_test_user(self, sentry_units,
883+ username="testuser1", password="changeme"):
884+ """Add a test user via the first rmq juju unit, check connection as
885+ the new user against all sentry units.
886+
887+ :param sentry_units: list of sentry unit pointers
888+ :param username: amqp user name, default to testuser1
889+ :param password: amqp user password
890+ :returns: None if successful. Raise on error.
891+ """
892+ self.log.debug('Adding rmq user ({})...'.format(username))
893+
894+ # Check that user does not already exist
895+ cmd_user_list = 'rabbitmqctl list_users'
896+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
897+ if username in output:
898+ self.log.warning('User ({}) already exists, returning '
899+ 'gracefully.'.format(username))
900+ return
901+
902+ perms = '".*" ".*" ".*"'
903+ cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
904+ 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
905+
906+ # Add user via first unit
907+ for cmd in cmds:
908+ output, _ = self.run_cmd_unit(sentry_units[0], cmd)
909+
910+ # Check connection against the other sentry_units
911+ self.log.debug('Checking user connect against units...')
912+ for sentry_unit in sentry_units:
913+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
914+ username=username,
915+ password=password)
916+ connection.close()
917+
918+ def delete_rmq_test_user(self, sentry_units, username="testuser1"):
919+ """Delete a rabbitmq user via the first rmq juju unit.
920+
921+ :param sentry_units: list of sentry unit pointers
922+ :param username: amqp user name, default to testuser1
923+ :param password: amqp user password
924+ :returns: None if successful or no such user.
925+ """
926+ self.log.debug('Deleting rmq user ({})...'.format(username))
927+
928+ # Check that the user exists
929+ cmd_user_list = 'rabbitmqctl list_users'
930+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
931+
932+ if username not in output:
933+ self.log.warning('User ({}) does not exist, returning '
934+ 'gracefully.'.format(username))
935+ return
936+
937+ # Delete the user
938+ cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
939+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
940+
941+ def get_rmq_cluster_status(self, sentry_unit):
942+ """Execute rabbitmq cluster status command on a unit and return
943+ the full output.
944+
945+ :param unit: sentry unit
946+ :returns: String containing console output of cluster status command
947+ """
948+ cmd = 'rabbitmqctl cluster_status'
949+ output, _ = self.run_cmd_unit(sentry_unit, cmd)
950+ self.log.debug('{} cluster_status:\n{}'.format(
951+ sentry_unit.info['unit_name'], output))
952+ return str(output)
953+
954+ def get_rmq_cluster_running_nodes(self, sentry_unit):
955+ """Parse rabbitmqctl cluster_status output string, return list of
956+ running rabbitmq cluster nodes.
957+
958+ :param unit: sentry unit
959+ :returns: List containing node names of running nodes
960+ """
961+ # NOTE(beisner): rabbitmqctl cluster_status output is not
962+ # json-parsable, do string chop foo, then json.loads that.
963+ str_stat = self.get_rmq_cluster_status(sentry_unit)
964+ if 'running_nodes' in str_stat:
965+ pos_start = str_stat.find("{running_nodes,") + 15
966+ pos_end = str_stat.find("]},", pos_start) + 1
967+ str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
968+ run_nodes = json.loads(str_run_nodes)
969+ return run_nodes
970+ else:
971+ return []
972+
973+ def validate_rmq_cluster_running_nodes(self, sentry_units):
974+ """Check that all rmq unit hostnames are represented in the
975+ cluster_status output of all units.
976+
977+ :param host_names: dict of juju unit names to host names
978+ :param units: list of sentry unit pointers (all rmq units)
979+ :returns: None if successful, otherwise return error message
980+ """
981+ host_names = self.get_unit_hostnames(sentry_units)
982+ errors = []
983+
984+ # Query every unit for cluster_status running nodes
985+ for query_unit in sentry_units:
986+ query_unit_name = query_unit.info['unit_name']
987+ running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
988+
989+ # Confirm that every unit is represented in the queried unit's
990+ # cluster_status running nodes output.
991+ for validate_unit in sentry_units:
992+ val_host_name = host_names[validate_unit.info['unit_name']]
993+ val_node_name = 'rabbit@{}'.format(val_host_name)
994+
995+ if val_node_name not in running_nodes:
996+ errors.append('Cluster member check failed on {}: {} not '
997+ 'in {}\n'.format(query_unit_name,
998+ val_node_name,
999+ running_nodes))
1000+ if errors:
1001+ return ''.join(errors)
1002+
1003+ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
1004+ """Check a single juju rmq unit for ssl and port in the config file."""
1005+ host = sentry_unit.info['public-address']
1006+ unit_name = sentry_unit.info['unit_name']
1007+
1008+ conf_file = '/etc/rabbitmq/rabbitmq.config'
1009+ conf_contents = str(self.file_contents_safe(sentry_unit,
1010+ conf_file, max_wait=16))
1011+ # Checks
1012+ conf_ssl = 'ssl' in conf_contents
1013+ conf_port = str(port) in conf_contents
1014+
1015+ # Port explicitly checked in config
1016+ if port and conf_port and conf_ssl:
1017+ self.log.debug('SSL is enabled @{}:{} '
1018+ '({})'.format(host, port, unit_name))
1019+ return True
1020+ elif port and not conf_port and conf_ssl:
1021+ self.log.debug('SSL is enabled @{} but not on port {} '
1022+ '({})'.format(host, port, unit_name))
1023+ return False
1024+ # Port not checked (useful when checking that ssl is disabled)
1025+ elif not port and conf_ssl:
1026+ self.log.debug('SSL is enabled @{}:{} '
1027+ '({})'.format(host, port, unit_name))
1028+ return True
1029+ elif not conf_ssl:
1030+ self.log.debug('SSL not enabled @{}:{} '
1031+ '({})'.format(host, port, unit_name))
1032+ return False
1033+ else:
1034+ msg = ('Unknown condition when checking SSL status @{}:{} '
1035+ '({})'.format(host, port, unit_name))
1036+ amulet.raise_status(amulet.FAIL, msg)
1037+
1038+ def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
1039+ """Check that ssl is enabled on rmq juju sentry units.
1040+
1041+ :param sentry_units: list of all rmq sentry units
1042+ :param port: optional ssl port override to validate
1043+ :returns: None if successful, otherwise return error message
1044+ """
1045+ for sentry_unit in sentry_units:
1046+ if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
1047+ return ('Unexpected condition: ssl is disabled on unit '
1048+ '({})'.format(sentry_unit.info['unit_name']))
1049+ return None
1050+
1051+ def validate_rmq_ssl_disabled_units(self, sentry_units):
1052+ """Check that ssl is enabled on listed rmq juju sentry units.
1053+
1054+ :param sentry_units: list of all rmq sentry units
1055+ :returns: True if successful. Raise on error.
1056+ """
1057+ for sentry_unit in sentry_units:
1058+ if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
1059+ return ('Unexpected condition: ssl is enabled on unit '
1060+ '({})'.format(sentry_unit.info['unit_name']))
1061+ return None
1062+
1063+ def configure_rmq_ssl_on(self, sentry_units, deployment,
1064+ port=None, max_wait=60):
1065+ """Turn ssl charm config option on, with optional non-default
1066+ ssl port specification. Confirm that it is enabled on every
1067+ unit.
1068+
1069+ :param sentry_units: list of sentry units
1070+ :param deployment: amulet deployment object pointer
1071+ :param port: amqp port, use defaults if None
1072+ :param max_wait: maximum time to wait in seconds to confirm
1073+ :returns: None if successful. Raise on error.
1074+ """
1075+ self.log.debug('Setting ssl charm config option: on')
1076+
1077+ # Enable RMQ SSL
1078+ config = {'ssl': 'on'}
1079+ if port:
1080+ config['ssl_port'] = port
1081+
1082+ deployment.d.configure('rabbitmq-server', config)
1083+
1084+ # Wait for unit status
1085+ self.rmq_wait_for_cluster(deployment)
1086+
1087+ # Confirm
1088+ tries = 0
1089+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
1090+ while ret and tries < (max_wait / 4):
1091+ time.sleep(4)
1092+ self.log.debug('Attempt {}: {}'.format(tries, ret))
1093+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
1094+ tries += 1
1095+
1096+ if ret:
1097+ amulet.raise_status(amulet.FAIL, ret)
1098+
1099+ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
1100+ """Turn ssl charm config option off, confirm that it is disabled
1101+ on every unit.
1102+
1103+ :param sentry_units: list of sentry units
1104+ :param deployment: amulet deployment object pointer
1105+ :param max_wait: maximum time to wait in seconds to confirm
1106+ :returns: None if successful. Raise on error.
1107+ """
1108+ self.log.debug('Setting ssl charm config option: off')
1109+
1110+ # Disable RMQ SSL
1111+ config = {'ssl': 'off'}
1112+ deployment.d.configure('rabbitmq-server', config)
1113+
1114+ # Wait for unit status
1115+ self.rmq_wait_for_cluster(deployment)
1116+
1117+ # Confirm
1118+ tries = 0
1119+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
1120+ while ret and tries < (max_wait / 4):
1121+ time.sleep(4)
1122+ self.log.debug('Attempt {}: {}'.format(tries, ret))
1123+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
1124+ tries += 1
1125+
1126+ if ret:
1127+ amulet.raise_status(amulet.FAIL, ret)
1128+
1129+ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
1130+ port=None, fatal=True,
1131+ username="testuser1", password="changeme"):
1132+ """Establish and return a pika amqp connection to the rabbitmq service
1133+ running on a rmq juju unit.
1134+
1135+ :param sentry_unit: sentry unit pointer
1136+ :param ssl: boolean, default to False
1137+ :param port: amqp port, use defaults if None
1138+ :param fatal: boolean, default to True (raises on connect error)
1139+ :param username: amqp user name, default to testuser1
1140+ :param password: amqp user password
1141+ :returns: pika amqp connection pointer or None if failed and non-fatal
1142+ """
1143+ host = sentry_unit.info['public-address']
1144+ unit_name = sentry_unit.info['unit_name']
1145+
1146+ # Default port logic if port is not specified
1147+ if ssl and not port:
1148+ port = 5671
1149+ elif not ssl and not port:
1150+ port = 5672
1151+
1152+ self.log.debug('Connecting to amqp on {}:{} ({}) as '
1153+ '{}...'.format(host, port, unit_name, username))
1154+
1155+ try:
1156+ credentials = pika.PlainCredentials(username, password)
1157+ parameters = pika.ConnectionParameters(host=host, port=port,
1158+ credentials=credentials,
1159+ ssl=ssl,
1160+ connection_attempts=3,
1161+ retry_delay=5,
1162+ socket_timeout=1)
1163+ connection = pika.BlockingConnection(parameters)
1164+ assert connection.server_properties['product'] == 'RabbitMQ'
1165+ self.log.debug('Connect OK')
1166+ return connection
1167+ except Exception as e:
1168+ msg = ('amqp connection failed to {}:{} as '
1169+ '{} ({})'.format(host, port, username, str(e)))
1170+ if fatal:
1171+ amulet.raise_status(amulet.FAIL, msg)
1172+ else:
1173+ self.log.warn(msg)
1174+ return None
1175+
1176+ def publish_amqp_message_by_unit(self, sentry_unit, message,
1177+ queue="test", ssl=False,
1178+ username="testuser1",
1179+ password="changeme",
1180+ port=None):
1181+ """Publish an amqp message to a rmq juju unit.
1182+
1183+ :param sentry_unit: sentry unit pointer
1184+ :param message: amqp message string
1185+ :param queue: message queue, default to test
1186+ :param username: amqp user name, default to testuser1
1187+ :param password: amqp user password
1188+ :param ssl: boolean, default to False
1189+ :param port: amqp port, use defaults if None
1190+ :returns: None. Raises exception if publish failed.
1191+ """
1192+ self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
1193+ message))
1194+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
1195+ port=port,
1196+ username=username,
1197+ password=password)
1198+
1199+ # NOTE(beisner): extra debug here re: pika hang potential:
1200+ # https://github.com/pika/pika/issues/297
1201+ # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
1202+ self.log.debug('Defining channel...')
1203+ channel = connection.channel()
1204+ self.log.debug('Declaring queue...')
1205+ channel.queue_declare(queue=queue, auto_delete=False, durable=True)
1206+ self.log.debug('Publishing message...')
1207+ channel.basic_publish(exchange='', routing_key=queue, body=message)
1208+ self.log.debug('Closing channel...')
1209+ channel.close()
1210+ self.log.debug('Closing connection...')
1211+ connection.close()
1212+
1213+ def get_amqp_message_by_unit(self, sentry_unit, queue="test",
1214+ username="testuser1",
1215+ password="changeme",
1216+ ssl=False, port=None):
1217+ """Get an amqp message from a rmq juju unit.
1218+
1219+ :param sentry_unit: sentry unit pointer
1220+ :param queue: message queue, default to test
1221+ :param username: amqp user name, default to testuser1
1222+ :param password: amqp user password
1223+ :param ssl: boolean, default to False
1224+ :param port: amqp port, use defaults if None
1225+ :returns: amqp message body as string. Raise if get fails.
1226+ """
1227+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
1228+ port=port,
1229+ username=username,
1230+ password=password)
1231+ channel = connection.channel()
1232+ method_frame, _, body = channel.basic_get(queue)
1233+
1234+ if method_frame:
1235+ self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
1236+ body))
1237+ channel.basic_ack(method_frame.delivery_tag)
1238+ channel.close()
1239+ connection.close()
1240+ return body
1241+ else:
1242+ msg = 'No message retrieved.'
1243+ amulet.raise_status(amulet.FAIL, msg)
1244
1245=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
1246--- hooks/charmhelpers/contrib/openstack/context.py 2015-06-24 12:22:08 +0000
1247+++ hooks/charmhelpers/contrib/openstack/context.py 2015-11-03 12:30:15 +0000
1248@@ -14,6 +14,7 @@
1249 # You should have received a copy of the GNU Lesser General Public License
1250 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1251
1252+import glob
1253 import json
1254 import os
1255 import re
1256@@ -50,6 +51,8 @@
1257 from charmhelpers.core.strutils import bool_from_string
1258
1259 from charmhelpers.core.host import (
1260+ get_bond_master,
1261+ is_phy_iface,
1262 list_nics,
1263 get_nic_hwaddr,
1264 mkdir,
1265@@ -122,21 +125,24 @@
1266 of specifying multiple key value pairs within the same string. For
1267 example, a string in the format of 'key1=value1, key2=value2' will
1268 return a dict of:
1269- {'key1': 'value1',
1270- 'key2': 'value2'}.
1271+
1272+ {'key1': 'value1',
1273+ 'key2': 'value2'}.
1274
1275 2. A string in the above format, but supporting a comma-delimited list
1276 of values for the same key. For example, a string in the format of
1277 'key1=value1, key2=value3,value4,value5' will return a dict of:
1278- {'key1', 'value1',
1279- 'key2', 'value2,value3,value4'}
1280+
1281+ {'key1', 'value1',
1282+ 'key2', 'value2,value3,value4'}
1283
1284 3. A string containing a colon character (:) prior to an equal
1285 character (=) will be treated as yaml and parsed as such. This can be
1286 used to specify more complex key value pairs. For example,
1287 a string in the format of 'key1: subkey1=value1, subkey2=value2' will
1288 return a dict of:
1289- {'key1', 'subkey1=value1, subkey2=value2'}
1290+
1291+ {'key1', 'subkey1=value1, subkey2=value2'}
1292
1293 The provided config_flags string may be a list of comma-separated values
1294 which themselves may be comma-separated list of values.
1295@@ -189,10 +195,50 @@
1296 class OSContextGenerator(object):
1297 """Base class for all context generators."""
1298 interfaces = []
1299+ related = False
1300+ complete = False
1301+ missing_data = []
1302
1303 def __call__(self):
1304 raise NotImplementedError
1305
1306+ def context_complete(self, ctxt):
1307+ """Check for missing data for the required context data.
1308+ Set self.missing_data if it exists and return False.
1309+ Set self.complete if no missing data and return True.
1310+ """
1311+ # Fresh start
1312+ self.complete = False
1313+ self.missing_data = []
1314+ for k, v in six.iteritems(ctxt):
1315+ if v is None or v == '':
1316+ if k not in self.missing_data:
1317+ self.missing_data.append(k)
1318+
1319+ if self.missing_data:
1320+ self.complete = False
1321+ log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
1322+ else:
1323+ self.complete = True
1324+ return self.complete
1325+
1326+ def get_related(self):
1327+ """Check if any of the context interfaces have relation ids.
1328+ Set self.related and return True if one of the interfaces
1329+ has relation ids.
1330+ """
1331+ # Fresh start
1332+ self.related = False
1333+ try:
1334+ for interface in self.interfaces:
1335+ if relation_ids(interface):
1336+ self.related = True
1337+ return self.related
1338+ except AttributeError as e:
1339+ log("{} {}"
1340+ "".format(self, e), 'INFO')
1341+ return self.related
1342+
1343
1344 class SharedDBContext(OSContextGenerator):
1345 interfaces = ['shared-db']
1346@@ -208,6 +254,7 @@
1347 self.database = database
1348 self.user = user
1349 self.ssl_dir = ssl_dir
1350+ self.rel_name = self.interfaces[0]
1351
1352 def __call__(self):
1353 self.database = self.database or config('database')
1354@@ -240,7 +287,8 @@
1355 if self.relation_prefix:
1356 password_setting = self.relation_prefix + '_password'
1357
1358- for rid in relation_ids('shared-db'):
1359+ for rid in relation_ids(self.interfaces[0]):
1360+ self.related = True
1361 for unit in related_units(rid):
1362 rdata = relation_get(rid=rid, unit=unit)
1363 host = rdata.get('db_host')
1364@@ -252,7 +300,7 @@
1365 'database_password': rdata.get(password_setting),
1366 'database_type': 'mysql'
1367 }
1368- if context_complete(ctxt):
1369+ if self.context_complete(ctxt):
1370 db_ssl(rdata, ctxt, self.ssl_dir)
1371 return ctxt
1372 return {}
1373@@ -273,6 +321,7 @@
1374
1375 ctxt = {}
1376 for rid in relation_ids(self.interfaces[0]):
1377+ self.related = True
1378 for unit in related_units(rid):
1379 rel_host = relation_get('host', rid=rid, unit=unit)
1380 rel_user = relation_get('user', rid=rid, unit=unit)
1381@@ -282,7 +331,7 @@
1382 'database_user': rel_user,
1383 'database_password': rel_passwd,
1384 'database_type': 'postgresql'}
1385- if context_complete(ctxt):
1386+ if self.context_complete(ctxt):
1387 return ctxt
1388
1389 return {}
1390@@ -343,6 +392,7 @@
1391 ctxt['signing_dir'] = cachedir
1392
1393 for rid in relation_ids(self.rel_name):
1394+ self.related = True
1395 for unit in related_units(rid):
1396 rdata = relation_get(rid=rid, unit=unit)
1397 serv_host = rdata.get('service_host')
1398@@ -361,7 +411,7 @@
1399 'service_protocol': svc_protocol,
1400 'auth_protocol': auth_protocol})
1401
1402- if context_complete(ctxt):
1403+ if self.context_complete(ctxt):
1404 # NOTE(jamespage) this is required for >= icehouse
1405 # so a missing value just indicates keystone needs
1406 # upgrading
1407@@ -400,6 +450,7 @@
1408 ctxt = {}
1409 for rid in relation_ids(self.rel_name):
1410 ha_vip_only = False
1411+ self.related = True
1412 for unit in related_units(rid):
1413 if relation_get('clustered', rid=rid, unit=unit):
1414 ctxt['clustered'] = True
1415@@ -432,7 +483,7 @@
1416 ha_vip_only = relation_get('ha-vip-only',
1417 rid=rid, unit=unit) is not None
1418
1419- if context_complete(ctxt):
1420+ if self.context_complete(ctxt):
1421 if 'rabbit_ssl_ca' in ctxt:
1422 if not self.ssl_dir:
1423 log("Charm not setup for ssl support but ssl ca "
1424@@ -464,7 +515,7 @@
1425 ctxt['oslo_messaging_flags'] = config_flags_parser(
1426 oslo_messaging_flags)
1427
1428- if not context_complete(ctxt):
1429+ if not self.complete:
1430 return {}
1431
1432 return ctxt
1433@@ -480,13 +531,15 @@
1434
1435 log('Generating template context for ceph', level=DEBUG)
1436 mon_hosts = []
1437- auth = None
1438- key = None
1439- use_syslog = str(config('use-syslog')).lower()
1440+ ctxt = {
1441+ 'use_syslog': str(config('use-syslog')).lower()
1442+ }
1443 for rid in relation_ids('ceph'):
1444 for unit in related_units(rid):
1445- auth = relation_get('auth', rid=rid, unit=unit)
1446- key = relation_get('key', rid=rid, unit=unit)
1447+ if not ctxt.get('auth'):
1448+ ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
1449+ if not ctxt.get('key'):
1450+ ctxt['key'] = relation_get('key', rid=rid, unit=unit)
1451 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
1452 unit=unit)
1453 unit_priv_addr = relation_get('private-address', rid=rid,
1454@@ -495,15 +548,12 @@
1455 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
1456 mon_hosts.append(ceph_addr)
1457
1458- ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
1459- 'auth': auth,
1460- 'key': key,
1461- 'use_syslog': use_syslog}
1462+ ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
1463
1464 if not os.path.isdir('/etc/ceph'):
1465 os.mkdir('/etc/ceph')
1466
1467- if not context_complete(ctxt):
1468+ if not self.context_complete(ctxt):
1469 return {}
1470
1471 ensure_packages(['ceph-common'])
1472@@ -890,9 +940,32 @@
1473 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
1474 return ctxt
1475
1476+ def pg_ctxt(self):
1477+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1478+ self.network_manager)
1479+ config = neutron_plugin_attribute(self.plugin, 'config',
1480+ self.network_manager)
1481+ ovs_ctxt = {'core_plugin': driver,
1482+ 'neutron_plugin': 'plumgrid',
1483+ 'neutron_security_groups': self.neutron_security_groups,
1484+ 'local_ip': unit_private_ip(),
1485+ 'config': config}
1486+ return ovs_ctxt
1487+
1488+ def midonet_ctxt(self):
1489+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1490+ self.network_manager)
1491+ midonet_config = neutron_plugin_attribute(self.plugin, 'config',
1492+ self.network_manager)
1493+ mido_ctxt = {'core_plugin': driver,
1494+ 'neutron_plugin': 'midonet',
1495+ 'neutron_security_groups': self.neutron_security_groups,
1496+ 'local_ip': unit_private_ip(),
1497+ 'config': midonet_config}
1498+
1499+ return mido_ctxt
1500+
1501 def __call__(self):
1502- self._ensure_packages()
1503-
1504 if self.network_manager not in ['quantum', 'neutron']:
1505 return {}
1506
1507@@ -911,6 +984,10 @@
1508 ctxt.update(self.calico_ctxt())
1509 elif self.plugin == 'vsp':
1510 ctxt.update(self.nuage_ctxt())
1511+ elif self.plugin == 'plumgrid':
1512+ ctxt.update(self.pg_ctxt())
1513+ elif self.plugin == 'midonet':
1514+ ctxt.update(self.midonet_ctxt())
1515
1516 alchemy_flags = config('neutron-alchemy-flags')
1517 if alchemy_flags:
1518@@ -922,7 +999,6 @@
1519
1520
1521 class NeutronPortContext(OSContextGenerator):
1522- NIC_PREFIXES = ['eth', 'bond']
1523
1524 def resolve_ports(self, ports):
1525 """Resolve NICs not yet bound to bridge(s)
1526@@ -934,7 +1010,18 @@
1527
1528 hwaddr_to_nic = {}
1529 hwaddr_to_ip = {}
1530- for nic in list_nics(self.NIC_PREFIXES):
1531+ for nic in list_nics():
1532+ # Ignore virtual interfaces (bond masters will be identified from
1533+ # their slaves)
1534+ if not is_phy_iface(nic):
1535+ continue
1536+
1537+ _nic = get_bond_master(nic)
1538+ if _nic:
1539+ log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
1540+ level=DEBUG)
1541+ nic = _nic
1542+
1543 hwaddr = get_nic_hwaddr(nic)
1544 hwaddr_to_nic[hwaddr] = nic
1545 addresses = get_ipv4_addr(nic, fatal=False)
1546@@ -960,7 +1047,8 @@
1547 # trust it to be the real external network).
1548 resolved.append(entry)
1549
1550- return resolved
1551+ # Ensure no duplicates
1552+ return list(set(resolved))
1553
1554
1555 class OSConfigFlagContext(OSContextGenerator):
1556@@ -1032,7 +1120,7 @@
1557
1558 ctxt = {
1559 ... other context ...
1560- 'subordinate_config': {
1561+ 'subordinate_configuration': {
1562 'DEFAULT': {
1563 'key1': 'value1',
1564 },
1565@@ -1050,13 +1138,22 @@
1566 :param config_file : Service's config file to query sections
1567 :param interface : Subordinate interface to inspect
1568 """
1569- self.service = service
1570 self.config_file = config_file
1571- self.interface = interface
1572+ if isinstance(service, list):
1573+ self.services = service
1574+ else:
1575+ self.services = [service]
1576+ if isinstance(interface, list):
1577+ self.interfaces = interface
1578+ else:
1579+ self.interfaces = [interface]
1580
1581 def __call__(self):
1582 ctxt = {'sections': {}}
1583- for rid in relation_ids(self.interface):
1584+ rids = []
1585+ for interface in self.interfaces:
1586+ rids.extend(relation_ids(interface))
1587+ for rid in rids:
1588 for unit in related_units(rid):
1589 sub_config = relation_get('subordinate_configuration',
1590 rid=rid, unit=unit)
1591@@ -1064,33 +1161,37 @@
1592 try:
1593 sub_config = json.loads(sub_config)
1594 except:
1595- log('Could not parse JSON from subordinate_config '
1596- 'setting from %s' % rid, level=ERROR)
1597- continue
1598-
1599- if self.service not in sub_config:
1600- log('Found subordinate_config on %s but it contained'
1601- 'nothing for %s service' % (rid, self.service),
1602- level=INFO)
1603- continue
1604-
1605- sub_config = sub_config[self.service]
1606- if self.config_file not in sub_config:
1607- log('Found subordinate_config on %s but it contained'
1608- 'nothing for %s' % (rid, self.config_file),
1609- level=INFO)
1610- continue
1611-
1612- sub_config = sub_config[self.config_file]
1613- for k, v in six.iteritems(sub_config):
1614- if k == 'sections':
1615- for section, config_dict in six.iteritems(v):
1616- log("adding section '%s'" % (section),
1617- level=DEBUG)
1618- ctxt[k][section] = config_dict
1619- else:
1620- ctxt[k] = v
1621-
1622+ log('Could not parse JSON from '
1623+ 'subordinate_configuration setting from %s'
1624+ % rid, level=ERROR)
1625+ continue
1626+
1627+ for service in self.services:
1628+ if service not in sub_config:
1629+ log('Found subordinate_configuration on %s but it '
1630+ 'contained nothing for %s service'
1631+ % (rid, service), level=INFO)
1632+ continue
1633+
1634+ sub_config = sub_config[service]
1635+ if self.config_file not in sub_config:
1636+ log('Found subordinate_configuration on %s but it '
1637+ 'contained nothing for %s'
1638+ % (rid, self.config_file), level=INFO)
1639+ continue
1640+
1641+ sub_config = sub_config[self.config_file]
1642+ for k, v in six.iteritems(sub_config):
1643+ if k == 'sections':
1644+ for section, config_list in six.iteritems(v):
1645+ log("adding section '%s'" % (section),
1646+ level=DEBUG)
1647+ if ctxt[k].get(section):
1648+ ctxt[k][section].extend(config_list)
1649+ else:
1650+ ctxt[k][section] = config_list
1651+ else:
1652+ ctxt[k] = v
1653 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1654 return ctxt
1655
1656@@ -1267,15 +1368,19 @@
1657 def __call__(self):
1658 ports = config('data-port')
1659 if ports:
1660+ # Map of {port/mac:bridge}
1661 portmap = parse_data_port_mappings(ports)
1662- ports = portmap.values()
1663+ ports = portmap.keys()
1664+ # Resolve provided ports or mac addresses and filter out those
1665+ # already attached to a bridge.
1666 resolved = self.resolve_ports(ports)
1667+ # FIXME: is this necessary?
1668 normalized = {get_nic_hwaddr(port): port for port in resolved
1669 if port not in ports}
1670 normalized.update({port: port for port in resolved
1671 if port in ports})
1672 if resolved:
1673- return {bridge: normalized[port] for bridge, port in
1674+ return {normalized[port]: bridge for port, bridge in
1675 six.iteritems(portmap) if port in normalized.keys()}
1676
1677 return None
1678@@ -1286,12 +1391,22 @@
1679 def __call__(self):
1680 ctxt = {}
1681 mappings = super(PhyNICMTUContext, self).__call__()
1682- if mappings and mappings.values():
1683- ports = mappings.values()
1684+ if mappings and mappings.keys():
1685+ ports = sorted(mappings.keys())
1686 napi_settings = NeutronAPIContext()()
1687 mtu = napi_settings.get('network_device_mtu')
1688+ all_ports = set()
1689+ # If any of ports is a vlan device, its underlying device must have
1690+ # mtu applied first.
1691+ for port in ports:
1692+ for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
1693+ lport = os.path.basename(lport)
1694+ all_ports.add(lport.split('_')[1])
1695+
1696+ all_ports = list(all_ports)
1697+ all_ports.extend(ports)
1698 if mtu:
1699- ctxt["devs"] = '\\n'.join(ports)
1700+ ctxt["devs"] = '\\n'.join(all_ports)
1701 ctxt['mtu'] = mtu
1702
1703 return ctxt
1704@@ -1323,6 +1438,6 @@
1705 'auth_protocol':
1706 rdata.get('auth_protocol') or 'http',
1707 }
1708- if context_complete(ctxt):
1709+ if self.context_complete(ctxt):
1710 return ctxt
1711 return {}
1712
1713=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
1714--- hooks/charmhelpers/contrib/openstack/neutron.py 2015-06-24 12:22:08 +0000
1715+++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-11-03 12:30:15 +0000
1716@@ -195,6 +195,34 @@
1717 'packages': [],
1718 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
1719 'server_services': ['neutron-server']
1720+ },
1721+ 'plumgrid': {
1722+ 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
1723+ 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
1724+ 'contexts': [
1725+ context.SharedDBContext(user=config('database-user'),
1726+ database=config('database'),
1727+ ssl_dir=NEUTRON_CONF_DIR)],
1728+ 'services': [],
1729+ 'packages': [['plumgrid-lxc'],
1730+ ['iovisor-dkms']],
1731+ 'server_packages': ['neutron-server',
1732+ 'neutron-plugin-plumgrid'],
1733+ 'server_services': ['neutron-server']
1734+ },
1735+ 'midonet': {
1736+ 'config': '/etc/neutron/plugins/midonet/midonet.ini',
1737+ 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
1738+ 'contexts': [
1739+ context.SharedDBContext(user=config('neutron-database-user'),
1740+ database=config('neutron-database'),
1741+ relation_prefix='neutron',
1742+ ssl_dir=NEUTRON_CONF_DIR)],
1743+ 'services': [],
1744+ 'packages': [[headers_package()] + determine_dkms_package()],
1745+ 'server_packages': ['neutron-server',
1746+ 'python-neutron-plugin-midonet'],
1747+ 'server_services': ['neutron-server']
1748 }
1749 }
1750 if release >= 'icehouse':
1751@@ -255,17 +283,30 @@
1752 return 'neutron'
1753
1754
1755-def parse_mappings(mappings):
1756+def parse_mappings(mappings, key_rvalue=False):
1757+ """By default mappings are lvalue keyed.
1758+
1759+ If key_rvalue is True, the mapping will be reversed to allow multiple
1760+ configs for the same lvalue.
1761+ """
1762 parsed = {}
1763 if mappings:
1764 mappings = mappings.split()
1765 for m in mappings:
1766 p = m.partition(':')
1767- key = p[0].strip()
1768- if p[1]:
1769- parsed[key] = p[2].strip()
1770+
1771+ if key_rvalue:
1772+ key_index = 2
1773+ val_index = 0
1774+ # if there is no rvalue skip to next
1775+ if not p[1]:
1776+ continue
1777 else:
1778- parsed[key] = ''
1779+ key_index = 0
1780+ val_index = 2
1781+
1782+ key = p[key_index].strip()
1783+ parsed[key] = p[val_index].strip()
1784
1785 return parsed
1786
1787@@ -283,25 +324,25 @@
1788 def parse_data_port_mappings(mappings, default_bridge='br-data'):
1789 """Parse data port mappings.
1790
1791- Mappings must be a space-delimited list of bridge:port mappings.
1792+ Mappings must be a space-delimited list of bridge:port.
1793
1794- Returns dict of the form {bridge:port}.
1795+ Returns dict of the form {port:bridge} where ports may be mac addresses or
1796+ interface names.
1797 """
1798- _mappings = parse_mappings(mappings)
1799+
1800+ # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
1801+ # proposed for <port> since it may be a mac address which will differ
1802+ # across units this allowing first-known-good to be chosen.
1803+ _mappings = parse_mappings(mappings, key_rvalue=True)
1804 if not _mappings or list(_mappings.values()) == ['']:
1805 if not mappings:
1806 return {}
1807
1808 # For backwards-compatibility we need to support port-only provided in
1809 # config.
1810- _mappings = {default_bridge: mappings.split()[0]}
1811-
1812- bridges = _mappings.keys()
1813- ports = _mappings.values()
1814- if len(set(bridges)) != len(bridges):
1815- raise Exception("It is not allowed to have more than one port "
1816- "configured on the same bridge")
1817-
1818+ _mappings = {mappings.split()[0]: default_bridge}
1819+
1820+ ports = _mappings.keys()
1821 if len(set(ports)) != len(ports):
1822 raise Exception("It is not allowed to have the same port configured "
1823 "on more than one bridge")
1824
1825=== modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf'
1826--- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-06-24 12:22:08 +0000
1827+++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-11-03 12:30:15 +0000
1828@@ -5,11 +5,17 @@
1829 ###############################################################################
1830 [global]
1831 {% if auth -%}
1832- auth_supported = {{ auth }}
1833- keyring = /etc/ceph/$cluster.$name.keyring
1834- mon host = {{ mon_hosts }}
1835+auth_supported = {{ auth }}
1836+keyring = /etc/ceph/$cluster.$name.keyring
1837+mon host = {{ mon_hosts }}
1838 {% endif -%}
1839- log to syslog = {{ use_syslog }}
1840- err to syslog = {{ use_syslog }}
1841- clog to syslog = {{ use_syslog }}
1842+log to syslog = {{ use_syslog }}
1843+err to syslog = {{ use_syslog }}
1844+clog to syslog = {{ use_syslog }}
1845
1846+[client]
1847+{% if rbd_client_cache_settings -%}
1848+{% for key, value in rbd_client_cache_settings.iteritems() -%}
1849+{{ key }} = {{ value }}
1850+{% endfor -%}
1851+{%- endif %}
1852\ No newline at end of file
1853
1854=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
1855--- hooks/charmhelpers/contrib/openstack/templating.py 2015-06-24 12:22:08 +0000
1856+++ hooks/charmhelpers/contrib/openstack/templating.py 2015-11-03 12:30:15 +0000
1857@@ -18,7 +18,7 @@
1858
1859 import six
1860
1861-from charmhelpers.fetch import apt_install
1862+from charmhelpers.fetch import apt_install, apt_update
1863 from charmhelpers.core.hookenv import (
1864 log,
1865 ERROR,
1866@@ -29,39 +29,15 @@
1867 try:
1868 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
1869 except ImportError:
1870- # python-jinja2 may not be installed yet, or we're running unittests.
1871- FileSystemLoader = ChoiceLoader = Environment = exceptions = None
1872+ apt_update(fatal=True)
1873+ apt_install('python-jinja2', fatal=True)
1874+ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
1875
1876
1877 class OSConfigException(Exception):
1878 pass
1879
1880
1881-def os_template_dirs(templates_dir, os_release):
1882- tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
1883- for rel in six.itervalues(OPENSTACK_CODENAMES)]
1884-
1885- if not os.path.isdir(templates_dir):
1886- log('Templates directory not found @ %s.' % templates_dir,
1887- level=ERROR)
1888- raise OSConfigException
1889- dirs = [templates_dir]
1890- helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
1891- if os.path.isdir(helper_templates):
1892- dirs.append(helper_templates)
1893-
1894- for rel, tmpl_dir in tmpl_dirs:
1895- if os.path.isdir(tmpl_dir):
1896- dirs.insert(0, tmpl_dir)
1897- if rel == os_release:
1898- break
1899- ch_templates = os.path.dirname(__file__) + '/charmhelpers/contrib/openstack/templates'
1900- dirs.append(ch_templates)
1901- log('Template search path: %s' %
1902- ' '.join(dirs), level=INFO)
1903- return dirs
1904-
1905-
1906 def get_loader(templates_dir, os_release):
1907 """
1908 Create a jinja2.ChoiceLoader containing template dirs up to
1909@@ -137,7 +113,7 @@
1910
1911 def complete_contexts(self):
1912 '''
1913- Return a list of interfaces that have atisfied contexts.
1914+ Return a list of interfaces that have satisfied contexts.
1915 '''
1916 if self._complete_contexts:
1917 return self._complete_contexts
1918@@ -318,3 +294,30 @@
1919 [interfaces.extend(i.complete_contexts())
1920 for i in six.itervalues(self.templates)]
1921 return interfaces
1922+
1923+ def get_incomplete_context_data(self, interfaces):
1924+ '''
1925+ Return dictionary of relation status of interfaces and any missing
1926+ required context data. Example:
1927+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
1928+ 'zeromq-configuration': {'related': False}}
1929+ '''
1930+ incomplete_context_data = {}
1931+
1932+ for i in six.itervalues(self.templates):
1933+ for context in i.contexts:
1934+ for interface in interfaces:
1935+ related = False
1936+ if interface in context.interfaces:
1937+ related = context.get_related()
1938+ missing_data = context.missing_data
1939+ if missing_data:
1940+ incomplete_context_data[interface] = {'missing_data': missing_data}
1941+ if related:
1942+ if incomplete_context_data.get(interface):
1943+ incomplete_context_data[interface].update({'related': True})
1944+ else:
1945+ incomplete_context_data[interface] = {'related': True}
1946+ else:
1947+ incomplete_context_data[interface] = {'related': False}
1948+ return incomplete_context_data
1949
1950=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
1951--- hooks/charmhelpers/contrib/openstack/utils.py 2015-06-24 12:22:08 +0000
1952+++ hooks/charmhelpers/contrib/openstack/utils.py 2015-11-03 12:30:15 +0000
1953@@ -1,5 +1,3 @@
1954-#!/usr/bin/python
1955-
1956 # Copyright 2014-2015 Canonical Limited.
1957 #
1958 # This file is part of charm-helpers.
1959@@ -24,9 +22,11 @@
1960 import json
1961 import os
1962 import sys
1963+import re
1964+
1965+import six
1966+import traceback
1967 import uuid
1968-
1969-import six
1970 import yaml
1971
1972 from charmhelpers.contrib.network import ip
1973@@ -36,13 +36,17 @@
1974 )
1975
1976 from charmhelpers.core.hookenv import (
1977+ action_fail,
1978+ action_set,
1979 config,
1980 log as juju_log,
1981 charm_dir,
1982 INFO,
1983+ related_units,
1984 relation_ids,
1985- related_units,
1986 relation_set,
1987+ status_set,
1988+ hook_name
1989 )
1990
1991 from charmhelpers.contrib.storage.linux.lvm import (
1992@@ -52,7 +56,8 @@
1993 )
1994
1995 from charmhelpers.contrib.network.ip import (
1996- get_ipv6_addr
1997+ get_ipv6_addr,
1998+ is_ipv6,
1999 )
2000
2001 from charmhelpers.contrib.python.packages import (
2002@@ -71,7 +76,6 @@
2003 DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
2004 'restricted main multiverse universe')
2005
2006-
2007 UBUNTU_OPENSTACK_RELEASE = OrderedDict([
2008 ('oneiric', 'diablo'),
2009 ('precise', 'essex'),
2010@@ -81,6 +85,7 @@
2011 ('trusty', 'icehouse'),
2012 ('utopic', 'juno'),
2013 ('vivid', 'kilo'),
2014+ ('wily', 'liberty'),
2015 ])
2016
2017
2018@@ -93,6 +98,7 @@
2019 ('2014.1', 'icehouse'),
2020 ('2014.2', 'juno'),
2021 ('2015.1', 'kilo'),
2022+ ('2015.2', 'liberty'),
2023 ])
2024
2025 # The ugly duckling
2026@@ -115,8 +121,42 @@
2027 ('2.2.0', 'juno'),
2028 ('2.2.1', 'kilo'),
2029 ('2.2.2', 'kilo'),
2030+ ('2.3.0', 'liberty'),
2031+ ('2.4.0', 'liberty'),
2032+ ('2.5.0', 'liberty'),
2033 ])
2034
2035+# >= Liberty version->codename mapping
2036+PACKAGE_CODENAMES = {
2037+ 'nova-common': OrderedDict([
2038+ ('12.0.0', 'liberty'),
2039+ ]),
2040+ 'neutron-common': OrderedDict([
2041+ ('7.0.0', 'liberty'),
2042+ ]),
2043+ 'cinder-common': OrderedDict([
2044+ ('7.0.0', 'liberty'),
2045+ ]),
2046+ 'keystone': OrderedDict([
2047+ ('8.0.0', 'liberty'),
2048+ ]),
2049+ 'horizon-common': OrderedDict([
2050+ ('8.0.0', 'liberty'),
2051+ ]),
2052+ 'ceilometer-common': OrderedDict([
2053+ ('5.0.0', 'liberty'),
2054+ ]),
2055+ 'heat-common': OrderedDict([
2056+ ('5.0.0', 'liberty'),
2057+ ]),
2058+ 'glance-common': OrderedDict([
2059+ ('11.0.0', 'liberty'),
2060+ ]),
2061+ 'openstack-dashboard': OrderedDict([
2062+ ('8.0.0', 'liberty'),
2063+ ]),
2064+}
2065+
2066 DEFAULT_LOOPBACK_SIZE = '5G'
2067
2068
2069@@ -166,9 +206,9 @@
2070 error_out(e)
2071
2072
2073-def get_os_version_codename(codename):
2074+def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
2075 '''Determine OpenStack version number from codename.'''
2076- for k, v in six.iteritems(OPENSTACK_CODENAMES):
2077+ for k, v in six.iteritems(version_map):
2078 if v == codename:
2079 return k
2080 e = 'Could not derive OpenStack version for '\
2081@@ -200,20 +240,31 @@
2082 error_out(e)
2083
2084 vers = apt.upstream_version(pkg.current_ver.ver_str)
2085+ match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
2086+ if match:
2087+ vers = match.group(0)
2088
2089- try:
2090- if 'swift' in pkg.name:
2091- swift_vers = vers[:5]
2092- if swift_vers not in SWIFT_CODENAMES:
2093- # Deal with 1.10.0 upward
2094- swift_vers = vers[:6]
2095- return SWIFT_CODENAMES[swift_vers]
2096- else:
2097- vers = vers[:6]
2098- return OPENSTACK_CODENAMES[vers]
2099- except KeyError:
2100- e = 'Could not determine OpenStack codename for version %s' % vers
2101- error_out(e)
2102+ # >= Liberty independent project versions
2103+ if (package in PACKAGE_CODENAMES and
2104+ vers in PACKAGE_CODENAMES[package]):
2105+ return PACKAGE_CODENAMES[package][vers]
2106+ else:
2107+ # < Liberty co-ordinated project versions
2108+ try:
2109+ if 'swift' in pkg.name:
2110+ swift_vers = vers[:5]
2111+ if swift_vers not in SWIFT_CODENAMES:
2112+ # Deal with 1.10.0 upward
2113+ swift_vers = vers[:6]
2114+ return SWIFT_CODENAMES[swift_vers]
2115+ else:
2116+ vers = vers[:6]
2117+ return OPENSTACK_CODENAMES[vers]
2118+ except KeyError:
2119+ if not fatal:
2120+ return None
2121+ e = 'Could not determine OpenStack codename for version %s' % vers
2122+ error_out(e)
2123
2124
2125 def get_os_version_package(pkg, fatal=True):
2126@@ -323,6 +374,9 @@
2127 'kilo': 'trusty-updates/kilo',
2128 'kilo/updates': 'trusty-updates/kilo',
2129 'kilo/proposed': 'trusty-proposed/kilo',
2130+ 'liberty': 'trusty-updates/liberty',
2131+ 'liberty/updates': 'trusty-updates/liberty',
2132+ 'liberty/proposed': 'trusty-proposed/liberty',
2133 }
2134
2135 try:
2136@@ -388,7 +442,11 @@
2137 import apt_pkg as apt
2138 src = config('openstack-origin')
2139 cur_vers = get_os_version_package(package)
2140- available_vers = get_os_version_install_source(src)
2141+ if "swift" in package:
2142+ codename = get_os_codename_install_source(src)
2143+ available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
2144+ else:
2145+ available_vers = get_os_version_install_source(src)
2146 apt.init()
2147 return apt.version_compare(available_vers, cur_vers) == 1
2148
2149@@ -465,6 +523,12 @@
2150 relation_prefix=None):
2151 hosts = get_ipv6_addr(dynamic_only=False)
2152
2153+ if config('vip'):
2154+ vips = config('vip').split()
2155+ for vip in vips:
2156+ if vip and is_ipv6(vip):
2157+ hosts.append(vip)
2158+
2159 kwargs = {'database': database,
2160 'username': database_user,
2161 'hostname': json.dumps(hosts)}
2162@@ -518,6 +582,7 @@
2163 Clone/install all specified OpenStack repositories.
2164
2165 The expected format of projects_yaml is:
2166+
2167 repositories:
2168 - {name: keystone,
2169 repository: 'git://git.openstack.org/openstack/keystone.git',
2170@@ -525,11 +590,13 @@
2171 - {name: requirements,
2172 repository: 'git://git.openstack.org/openstack/requirements.git',
2173 branch: 'stable/icehouse'}
2174+
2175 directory: /mnt/openstack-git
2176 http_proxy: squid-proxy-url
2177 https_proxy: squid-proxy-url
2178
2179- The directory, http_proxy, and https_proxy keys are optional.
2180+ The directory, http_proxy, and https_proxy keys are optional.
2181+
2182 """
2183 global requirements_dir
2184 parent_dir = '/mnt/openstack-git'
2185@@ -551,6 +618,12 @@
2186
2187 pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
2188
2189+ # Upgrade setuptools and pip from default virtualenv versions. The default
2190+ # versions in trusty break master OpenStack branch deployments.
2191+ for p in ['pip', 'setuptools']:
2192+ pip_install(p, upgrade=True, proxy=http_proxy,
2193+ venv=os.path.join(parent_dir, 'venv'))
2194+
2195 for p in projects['repositories']:
2196 repo = p['repository']
2197 branch = p['branch']
2198@@ -612,24 +685,24 @@
2199 else:
2200 repo_dir = dest_dir
2201
2202+ venv = os.path.join(parent_dir, 'venv')
2203+
2204 if update_requirements:
2205 if not requirements_dir:
2206 error_out('requirements repo must be cloned before '
2207 'updating from global requirements.')
2208- _git_update_requirements(repo_dir, requirements_dir)
2209+ _git_update_requirements(venv, repo_dir, requirements_dir)
2210
2211 juju_log('Installing git repo from dir: {}'.format(repo_dir))
2212 if http_proxy:
2213- pip_install(repo_dir, proxy=http_proxy,
2214- venv=os.path.join(parent_dir, 'venv'))
2215+ pip_install(repo_dir, proxy=http_proxy, venv=venv)
2216 else:
2217- pip_install(repo_dir,
2218- venv=os.path.join(parent_dir, 'venv'))
2219+ pip_install(repo_dir, venv=venv)
2220
2221 return repo_dir
2222
2223
2224-def _git_update_requirements(package_dir, reqs_dir):
2225+def _git_update_requirements(venv, package_dir, reqs_dir):
2226 """
2227 Update from global requirements.
2228
2229@@ -638,12 +711,14 @@
2230 """
2231 orig_dir = os.getcwd()
2232 os.chdir(reqs_dir)
2233- cmd = ['python', 'update.py', package_dir]
2234+ python = os.path.join(venv, 'bin/python')
2235+ cmd = [python, 'update.py', package_dir]
2236 try:
2237 subprocess.check_call(cmd)
2238 except subprocess.CalledProcessError:
2239 package = os.path.basename(package_dir)
2240- error_out("Error updating {} from global-requirements.txt".format(package))
2241+ error_out("Error updating {} from "
2242+ "global-requirements.txt".format(package))
2243 os.chdir(orig_dir)
2244
2245
2246@@ -691,6 +766,222 @@
2247 return None
2248
2249
2250+def os_workload_status(configs, required_interfaces, charm_func=None):
2251+ """
2252+ Decorator to set workload status based on complete contexts
2253+ """
2254+ def wrap(f):
2255+ @wraps(f)
2256+ def wrapped_f(*args, **kwargs):
2257+ # Run the original function first
2258+ f(*args, **kwargs)
2259+ # Set workload status now that contexts have been
2260+ # acted on
2261+ set_os_workload_status(configs, required_interfaces, charm_func)
2262+ return wrapped_f
2263+ return wrap
2264+
2265+
2266+def set_os_workload_status(configs, required_interfaces, charm_func=None):
2267+ """
2268+ Set workload status based on complete contexts.
2269+ status-set missing or incomplete contexts
2270+ and juju-log details of missing required data.
2271+ charm_func is a charm specific function to run checking
2272+ for charm specific requirements such as a VIP setting.
2273+ """
2274+ incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
2275+ state = 'active'
2276+ missing_relations = []
2277+ incomplete_relations = []
2278+ message = None
2279+ charm_state = None
2280+ charm_message = None
2281+
2282+ for generic_interface in incomplete_rel_data.keys():
2283+ related_interface = None
2284+ missing_data = {}
2285+ # Related or not?
2286+ for interface in incomplete_rel_data[generic_interface]:
2287+ if incomplete_rel_data[generic_interface][interface].get('related'):
2288+ related_interface = interface
2289+ missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
2290+ # No relation ID for the generic_interface
2291+ if not related_interface:
2292+ juju_log("{} relation is missing and must be related for "
2293+ "functionality. ".format(generic_interface), 'WARN')
2294+ state = 'blocked'
2295+ if generic_interface not in missing_relations:
2296+ missing_relations.append(generic_interface)
2297+ else:
2298+ # Relation ID exists but no related unit
2299+ if not missing_data:
2300+ # Edge case relation ID exists but departing
2301+ if ('departed' in hook_name() or 'broken' in hook_name()) \
2302+ and related_interface in hook_name():
2303+ state = 'blocked'
2304+ if generic_interface not in missing_relations:
2305+ missing_relations.append(generic_interface)
2306+ juju_log("{} relation's interface, {}, "
2307+ "relationship is departed or broken "
2308+ "and is required for functionality."
2309+ "".format(generic_interface, related_interface), "WARN")
2310+ # Normal case relation ID exists but no related unit
2311+ # (joining)
2312+ else:
2313+ juju_log("{} relations's interface, {}, is related but has "
2314+ "no units in the relation."
2315+ "".format(generic_interface, related_interface), "INFO")
2316+ # Related unit exists and data missing on the relation
2317+ else:
2318+ juju_log("{} relation's interface, {}, is related awaiting "
2319+ "the following data from the relationship: {}. "
2320+ "".format(generic_interface, related_interface,
2321+ ", ".join(missing_data)), "INFO")
2322+ if state != 'blocked':
2323+ state = 'waiting'
2324+ if generic_interface not in incomplete_relations \
2325+ and generic_interface not in missing_relations:
2326+ incomplete_relations.append(generic_interface)
2327+
2328+ if missing_relations:
2329+ message = "Missing relations: {}".format(", ".join(missing_relations))
2330+ if incomplete_relations:
2331+ message += "; incomplete relations: {}" \
2332+ "".format(", ".join(incomplete_relations))
2333+ state = 'blocked'
2334+ elif incomplete_relations:
2335+ message = "Incomplete relations: {}" \
2336+ "".format(", ".join(incomplete_relations))
2337+ state = 'waiting'
2338+
2339+ # Run charm specific checks
2340+ if charm_func:
2341+ charm_state, charm_message = charm_func(configs)
2342+ if charm_state != 'active' and charm_state != 'unknown':
2343+ state = workload_state_compare(state, charm_state)
2344+ if message:
2345+ charm_message = charm_message.replace("Incomplete relations: ",
2346+ "")
2347+ message = "{}, {}".format(message, charm_message)
2348+ else:
2349+ message = charm_message
2350+
2351+ # Set to active if all requirements have been met
2352+ if state == 'active':
2353+ message = "Unit is ready"
2354+ juju_log(message, "INFO")
2355+
2356+ status_set(state, message)
2357+
2358+
2359+def workload_state_compare(current_workload_state, workload_state):
2360+ """ Return highest priority of two states"""
2361+ hierarchy = {'unknown': -1,
2362+ 'active': 0,
2363+ 'maintenance': 1,
2364+ 'waiting': 2,
2365+ 'blocked': 3,
2366+ }
2367+
2368+ if hierarchy.get(workload_state) is None:
2369+ workload_state = 'unknown'
2370+ if hierarchy.get(current_workload_state) is None:
2371+ current_workload_state = 'unknown'
2372+
2373+ # Set workload_state based on hierarchy of statuses
2374+ if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
2375+ return current_workload_state
2376+ else:
2377+ return workload_state
2378+
2379+
2380+def incomplete_relation_data(configs, required_interfaces):
2381+ """
2382+ Check complete contexts against required_interfaces
2383+ Return dictionary of incomplete relation data.
2384+
2385+ configs is an OSConfigRenderer object with configs registered
2386+
2387+ required_interfaces is a dictionary of required general interfaces
2388+ with dictionary values of possible specific interfaces.
2389+ Example:
2390+ required_interfaces = {'database': ['shared-db', 'pgsql-db']}
2391+
2392+ The interface is said to be satisfied if anyone of the interfaces in the
2393+ list has a complete context.
2394+
2395+ Return dictionary of incomplete or missing required contexts with relation
2396+ status of interfaces and any missing data points. Example:
2397+ {'message':
2398+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
2399+ 'zeromq-configuration': {'related': False}},
2400+ 'identity':
2401+ {'identity-service': {'related': False}},
2402+ 'database':
2403+ {'pgsql-db': {'related': False},
2404+ 'shared-db': {'related': True}}}
2405+ """
2406+ complete_ctxts = configs.complete_contexts()
2407+ incomplete_relations = []
2408+ for svc_type in required_interfaces.keys():
2409+ # Avoid duplicates
2410+ found_ctxt = False
2411+ for interface in required_interfaces[svc_type]:
2412+ if interface in complete_ctxts:
2413+ found_ctxt = True
2414+ if not found_ctxt:
2415+ incomplete_relations.append(svc_type)
2416+ incomplete_context_data = {}
2417+ for i in incomplete_relations:
2418+ incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
2419+ return incomplete_context_data
2420+
2421+
2422+def do_action_openstack_upgrade(package, upgrade_callback, configs):
2423+ """Perform action-managed OpenStack upgrade.
2424+
2425+ Upgrades packages to the configured openstack-origin version and sets
2426+ the corresponding action status as a result.
2427+
2428+ If the charm was installed from source we cannot upgrade it.
2429+ For backwards compatibility a config flag (action-managed-upgrade) must
2430+ be set for this code to run, otherwise a full service level upgrade will
2431+ fire on config-changed.
2432+
2433+ @param package: package name for determining if upgrade available
2434+ @param upgrade_callback: function callback to charm's upgrade function
2435+ @param configs: templating object derived from OSConfigRenderer class
2436+
2437+ @return: True if upgrade successful; False if upgrade failed or skipped
2438+ """
2439+ ret = False
2440+
2441+ if git_install_requested():
2442+ action_set({'outcome': 'installed from source, skipped upgrade.'})
2443+ else:
2444+ if openstack_upgrade_available(package):
2445+ if config('action-managed-upgrade'):
2446+ juju_log('Upgrading OpenStack release')
2447+
2448+ try:
2449+ upgrade_callback(configs=configs)
2450+ action_set({'outcome': 'success, upgrade completed.'})
2451+ ret = True
2452+ except:
2453+ action_set({'outcome': 'upgrade failed, see traceback.'})
2454+ action_set({'traceback': traceback.format_exc()})
2455+ action_fail('do_openstack_upgrade resulted in an '
2456+ 'unexpected error')
2457+ else:
2458+ action_set({'outcome': 'action-managed-upgrade config is '
2459+ 'False, skipped upgrade.'})
2460+ else:
2461+ action_set({'outcome': 'no upgrade available.'})
2462+
2463+ return ret
2464+
2465+
2466 def remote_restart(rel_name, remote_service=None):
2467 trigger = {
2468 'restart-trigger': str(uuid.uuid4()),
2469@@ -700,7 +991,7 @@
2470 for rid in relation_ids(rel_name):
2471 # This subordinate can be related to two seperate services using
2472 # different subordinate relations so only issue the restart if
2473- # thr principle is conencted down the relation we think it is
2474+ # the principle is conencted down the relation we think it is
2475 if related_units(relid=rid):
2476 relation_set(relation_id=rid,
2477 relation_settings=trigger,
2478
2479=== modified file 'hooks/charmhelpers/contrib/python/packages.py'
2480--- hooks/charmhelpers/contrib/python/packages.py 2015-06-24 12:22:08 +0000
2481+++ hooks/charmhelpers/contrib/python/packages.py 2015-11-03 12:30:15 +0000
2482@@ -36,6 +36,8 @@
2483 def parse_options(given, available):
2484 """Given a set of options, check if available"""
2485 for key, value in sorted(given.items()):
2486+ if not value:
2487+ continue
2488 if key in available:
2489 yield "--{0}={1}".format(key, value)
2490
2491
2492=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
2493--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-06-24 12:22:08 +0000
2494+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-11-03 12:30:15 +0000
2495@@ -28,6 +28,7 @@
2496 import shutil
2497 import json
2498 import time
2499+import uuid
2500
2501 from subprocess import (
2502 check_call,
2503@@ -35,8 +36,10 @@
2504 CalledProcessError,
2505 )
2506 from charmhelpers.core.hookenv import (
2507+ local_unit,
2508 relation_get,
2509 relation_ids,
2510+ relation_set,
2511 related_units,
2512 log,
2513 DEBUG,
2514@@ -56,16 +59,18 @@
2515 apt_install,
2516 )
2517
2518+from charmhelpers.core.kernel import modprobe
2519+
2520 KEYRING = '/etc/ceph/ceph.client.{}.keyring'
2521 KEYFILE = '/etc/ceph/ceph.client.{}.key'
2522
2523 CEPH_CONF = """[global]
2524- auth supported = {auth}
2525- keyring = {keyring}
2526- mon host = {mon_hosts}
2527- log to syslog = {use_syslog}
2528- err to syslog = {use_syslog}
2529- clog to syslog = {use_syslog}
2530+auth supported = {auth}
2531+keyring = {keyring}
2532+mon host = {mon_hosts}
2533+log to syslog = {use_syslog}
2534+err to syslog = {use_syslog}
2535+clog to syslog = {use_syslog}
2536 """
2537
2538
2539@@ -288,17 +293,6 @@
2540 os.chown(data_src_dst, uid, gid)
2541
2542
2543-# TODO: re-use
2544-def modprobe(module):
2545- """Load a kernel module and configure for auto-load on reboot."""
2546- log('Loading kernel module', level=INFO)
2547- cmd = ['modprobe', module]
2548- check_call(cmd)
2549- with open('/etc/modules', 'r+') as modules:
2550- if module not in modules.read():
2551- modules.write(module)
2552-
2553-
2554 def copy_files(src, dst, symlinks=False, ignore=None):
2555 """Copy files from src to dst."""
2556 for item in os.listdir(src):
2557@@ -411,17 +405,52 @@
2558
2559 The API is versioned and defaults to version 1.
2560 """
2561- def __init__(self, api_version=1):
2562+ def __init__(self, api_version=1, request_id=None):
2563 self.api_version = api_version
2564+ if request_id:
2565+ self.request_id = request_id
2566+ else:
2567+ self.request_id = str(uuid.uuid1())
2568 self.ops = []
2569
2570 def add_op_create_pool(self, name, replica_count=3):
2571 self.ops.append({'op': 'create-pool', 'name': name,
2572 'replicas': replica_count})
2573
2574+ def set_ops(self, ops):
2575+ """Set request ops to provided value.
2576+
2577+ Useful for injecting ops that come from a previous request
2578+ to allow comparisons to ensure validity.
2579+ """
2580+ self.ops = ops
2581+
2582 @property
2583 def request(self):
2584- return json.dumps({'api-version': self.api_version, 'ops': self.ops})
2585+ return json.dumps({'api-version': self.api_version, 'ops': self.ops,
2586+ 'request-id': self.request_id})
2587+
2588+ def _ops_equal(self, other):
2589+ if len(self.ops) == len(other.ops):
2590+ for req_no in range(0, len(self.ops)):
2591+ for key in ['replicas', 'name', 'op']:
2592+ if self.ops[req_no][key] != other.ops[req_no][key]:
2593+ return False
2594+ else:
2595+ return False
2596+ return True
2597+
2598+ def __eq__(self, other):
2599+ if not isinstance(other, self.__class__):
2600+ return False
2601+ if self.api_version == other.api_version and \
2602+ self._ops_equal(other):
2603+ return True
2604+ else:
2605+ return False
2606+
2607+ def __ne__(self, other):
2608+ return not self.__eq__(other)
2609
2610
2611 class CephBrokerRsp(object):
2612@@ -431,14 +460,198 @@
2613
2614 The API is versioned and defaults to version 1.
2615 """
2616+
2617 def __init__(self, encoded_rsp):
2618 self.api_version = None
2619 self.rsp = json.loads(encoded_rsp)
2620
2621 @property
2622+ def request_id(self):
2623+ return self.rsp.get('request-id')
2624+
2625+ @property
2626 def exit_code(self):
2627 return self.rsp.get('exit-code')
2628
2629 @property
2630 def exit_msg(self):
2631 return self.rsp.get('stderr')
2632+
2633+
2634+# Ceph Broker Conversation:
2635+# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
2636+# and send that request to ceph via the ceph relation. The CephBrokerRq has a
2637+# unique id so that the client can identity which CephBrokerRsp is associated
2638+# with the request. Ceph will also respond to each client unit individually
2639+# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
2640+# via key broker-rsp-glance-0
2641+#
2642+# To use this the charm can just do something like:
2643+#
2644+# from charmhelpers.contrib.storage.linux.ceph import (
2645+# send_request_if_needed,
2646+# is_request_complete,
2647+# CephBrokerRq,
2648+# )
2649+#
2650+# @hooks.hook('ceph-relation-changed')
2651+# def ceph_changed():
2652+# rq = CephBrokerRq()
2653+# rq.add_op_create_pool(name='poolname', replica_count=3)
2654+#
2655+# if is_request_complete(rq):
2656+# <Request complete actions>
2657+# else:
2658+# send_request_if_needed(get_ceph_request())
2659+#
2660+# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
2661+# of glance having sent a request to ceph which ceph has successfully processed
2662+# 'ceph:8': {
2663+# 'ceph/0': {
2664+# 'auth': 'cephx',
2665+# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
2666+# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
2667+# 'ceph-public-address': '10.5.44.103',
2668+# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
2669+# 'private-address': '10.5.44.103',
2670+# },
2671+# 'glance/0': {
2672+# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
2673+# '"ops": [{"replicas": 3, "name": "glance", '
2674+# '"op": "create-pool"}]}'),
2675+# 'private-address': '10.5.44.109',
2676+# },
2677+# }
2678+
2679+def get_previous_request(rid):
2680+ """Return the last ceph broker request sent on a given relation
2681+
2682+ @param rid: Relation id to query for request
2683+ """
2684+ request = None
2685+ broker_req = relation_get(attribute='broker_req', rid=rid,
2686+ unit=local_unit())
2687+ if broker_req:
2688+ request_data = json.loads(broker_req)
2689+ request = CephBrokerRq(api_version=request_data['api-version'],
2690+ request_id=request_data['request-id'])
2691+ request.set_ops(request_data['ops'])
2692+
2693+ return request
2694+
2695+
2696+def get_request_states(request):
2697+ """Return a dict of requests per relation id with their corresponding
2698+ completion state.
2699+
2700+ This allows a charm, which has a request for ceph, to see whether there is
2701+ an equivalent request already being processed and if so what state that
2702+ request is in.
2703+
2704+ @param request: A CephBrokerRq object
2705+ """
2706+ complete = []
2707+ requests = {}
2708+ for rid in relation_ids('ceph'):
2709+ complete = False
2710+ previous_request = get_previous_request(rid)
2711+ if request == previous_request:
2712+ sent = True
2713+ complete = is_request_complete_for_rid(previous_request, rid)
2714+ else:
2715+ sent = False
2716+ complete = False
2717+
2718+ requests[rid] = {
2719+ 'sent': sent,
2720+ 'complete': complete,
2721+ }
2722+
2723+ return requests
2724+
2725+
2726+def is_request_sent(request):
2727+ """Check to see if a functionally equivalent request has already been sent
2728+
2729+ Returns True if a similair request has been sent
2730+
2731+ @param request: A CephBrokerRq object
2732+ """
2733+ states = get_request_states(request)
2734+ for rid in states.keys():
2735+ if not states[rid]['sent']:
2736+ return False
2737+
2738+ return True
2739+
2740+
2741+def is_request_complete(request):
2742+ """Check to see if a functionally equivalent request has already been
2743+ completed
2744+
2745+ Returns True if a similair request has been completed
2746+
2747+ @param request: A CephBrokerRq object
2748+ """
2749+ states = get_request_states(request)
2750+ for rid in states.keys():
2751+ if not states[rid]['complete']:
2752+ return False
2753+
2754+ return True
2755+
2756+
2757+def is_request_complete_for_rid(request, rid):
2758+ """Check if a given request has been completed on the given relation
2759+
2760+ @param request: A CephBrokerRq object
2761+ @param rid: Relation ID
2762+ """
2763+ broker_key = get_broker_rsp_key()
2764+ for unit in related_units(rid):
2765+ rdata = relation_get(rid=rid, unit=unit)
2766+ if rdata.get(broker_key):
2767+ rsp = CephBrokerRsp(rdata.get(broker_key))
2768+ if rsp.request_id == request.request_id:
2769+ if not rsp.exit_code:
2770+ return True
2771+ else:
2772+ # The remote unit sent no reply targeted at this unit so either the
2773+ # remote ceph cluster does not support unit targeted replies or it
2774+ # has not processed our request yet.
2775+ if rdata.get('broker_rsp'):
2776+ request_data = json.loads(rdata['broker_rsp'])
2777+ if request_data.get('request-id'):
2778+ log('Ignoring legacy broker_rsp without unit key as remote '
2779+ 'service supports unit specific replies', level=DEBUG)
2780+ else:
2781+ log('Using legacy broker_rsp as remote service does not '
2782+ 'supports unit specific replies', level=DEBUG)
2783+ rsp = CephBrokerRsp(rdata['broker_rsp'])
2784+ if not rsp.exit_code:
2785+ return True
2786+
2787+ return False
2788+
2789+
2790+def get_broker_rsp_key():
2791+ """Return broker response key for this unit
2792+
2793+ This is the key that ceph is going to use to pass request status
2794+ information back to this unit
2795+ """
2796+ return 'broker-rsp-' + local_unit().replace('/', '-')
2797+
2798+
2799+def send_request_if_needed(request):
2800+ """Send broker request if an equivalent request has not already been sent
2801+
2802+ @param request: A CephBrokerRq object
2803+ """
2804+ if is_request_sent(request):
2805+ log('Request already sent but not complete, not sending new request',
2806+ level=DEBUG)
2807+ else:
2808+ for rid in relation_ids('ceph'):
2809+ log('Sending request {}'.format(request.request_id), level=DEBUG)
2810+ relation_set(relation_id=rid, broker_req=request.request)
2811
2812=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
2813--- hooks/charmhelpers/contrib/storage/linux/utils.py 2015-06-24 12:22:08 +0000
2814+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2015-11-03 12:30:15 +0000
2815@@ -43,9 +43,10 @@
2816
2817 :param block_device: str: Full path of block device to clean.
2818 '''
2819+ # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
2820 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
2821- call(['sgdisk', '--zap-all', '--mbrtogpt',
2822- '--clear', block_device])
2823+ call(['sgdisk', '--zap-all', '--', block_device])
2824+ call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
2825 dev_end = check_output(['blockdev', '--getsz',
2826 block_device]).decode('UTF-8')
2827 gpt_end = int(dev_end.split()[0]) - 100
2828@@ -67,4 +68,4 @@
2829 out = check_output(['mount']).decode('UTF-8')
2830 if is_partition:
2831 return bool(re.search(device + r"\b", out))
2832- return bool(re.search(device + r"[0-9]+\b", out))
2833+ return bool(re.search(device + r"[0-9]*\b", out))
2834
2835=== added file 'hooks/charmhelpers/core/files.py'
2836--- hooks/charmhelpers/core/files.py 1970-01-01 00:00:00 +0000
2837+++ hooks/charmhelpers/core/files.py 2015-11-03 12:30:15 +0000
2838@@ -0,0 +1,45 @@
2839+#!/usr/bin/env python
2840+# -*- coding: utf-8 -*-
2841+
2842+# Copyright 2014-2015 Canonical Limited.
2843+#
2844+# This file is part of charm-helpers.
2845+#
2846+# charm-helpers is free software: you can redistribute it and/or modify
2847+# it under the terms of the GNU Lesser General Public License version 3 as
2848+# published by the Free Software Foundation.
2849+#
2850+# charm-helpers is distributed in the hope that it will be useful,
2851+# but WITHOUT ANY WARRANTY; without even the implied warranty of
2852+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2853+# GNU Lesser General Public License for more details.
2854+#
2855+# You should have received a copy of the GNU Lesser General Public License
2856+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2857+
2858+__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
2859+
2860+import os
2861+import subprocess
2862+
2863+
2864+def sed(filename, before, after, flags='g'):
2865+ """
2866+ Search and replaces the given pattern on filename.
2867+
2868+ :param filename: relative or absolute file path.
2869+ :param before: expression to be replaced (see 'man sed')
2870+ :param after: expression to replace with (see 'man sed')
2871+ :param flags: sed-compatible regex flags in example, to make
2872+ the search and replace case insensitive, specify ``flags="i"``.
2873+ The ``g`` flag is always specified regardless, so you do not
2874+ need to remember to include it when overriding this parameter.
2875+ :returns: If the sed command exit code was zero then return,
2876+ otherwise raise CalledProcessError.
2877+ """
2878+ expression = r's/{0}/{1}/{2}'.format(before,
2879+ after, flags)
2880+
2881+ return subprocess.check_call(["sed", "-i", "-r", "-e",
2882+ expression,
2883+ os.path.expanduser(filename)])
2884
2885=== modified file 'hooks/charmhelpers/core/hookenv.py'
2886--- hooks/charmhelpers/core/hookenv.py 2015-06-24 12:22:08 +0000
2887+++ hooks/charmhelpers/core/hookenv.py 2015-11-03 12:30:15 +0000
2888@@ -21,7 +21,10 @@
2889 # Charm Helpers Developers <juju@lists.ubuntu.com>
2890
2891 from __future__ import print_function
2892+import copy
2893+from distutils.version import LooseVersion
2894 from functools import wraps
2895+import glob
2896 import os
2897 import json
2898 import yaml
2899@@ -71,6 +74,7 @@
2900 res = func(*args, **kwargs)
2901 cache[key] = res
2902 return res
2903+ wrapper._wrapped = func
2904 return wrapper
2905
2906
2907@@ -170,9 +174,19 @@
2908 return os.environ.get('JUJU_RELATION', None)
2909
2910
2911-def relation_id():
2912- """The relation ID for the current relation hook"""
2913- return os.environ.get('JUJU_RELATION_ID', None)
2914+@cached
2915+def relation_id(relation_name=None, service_or_unit=None):
2916+ """The relation ID for the current or a specified relation"""
2917+ if not relation_name and not service_or_unit:
2918+ return os.environ.get('JUJU_RELATION_ID', None)
2919+ elif relation_name and service_or_unit:
2920+ service_name = service_or_unit.split('/')[0]
2921+ for relid in relation_ids(relation_name):
2922+ remote_service = remote_service_name(relid)
2923+ if remote_service == service_name:
2924+ return relid
2925+ else:
2926+ raise ValueError('Must specify neither or both of relation_name and service_or_unit')
2927
2928
2929 def local_unit():
2930@@ -190,9 +204,20 @@
2931 return local_unit().split('/')[0]
2932
2933
2934+@cached
2935+def remote_service_name(relid=None):
2936+ """The remote service name for a given relation-id (or the current relation)"""
2937+ if relid is None:
2938+ unit = remote_unit()
2939+ else:
2940+ units = related_units(relid)
2941+ unit = units[0] if units else None
2942+ return unit.split('/')[0] if unit else None
2943+
2944+
2945 def hook_name():
2946 """The name of the currently executing hook"""
2947- return os.path.basename(sys.argv[0])
2948+ return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
2949
2950
2951 class Config(dict):
2952@@ -242,29 +267,7 @@
2953 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
2954 if os.path.exists(self.path):
2955 self.load_previous()
2956-
2957- def __getitem__(self, key):
2958- """For regular dict lookups, check the current juju config first,
2959- then the previous (saved) copy. This ensures that user-saved values
2960- will be returned by a dict lookup.
2961-
2962- """
2963- try:
2964- return dict.__getitem__(self, key)
2965- except KeyError:
2966- return (self._prev_dict or {})[key]
2967-
2968- def get(self, key, default=None):
2969- try:
2970- return self[key]
2971- except KeyError:
2972- return default
2973-
2974- def keys(self):
2975- prev_keys = []
2976- if self._prev_dict is not None:
2977- prev_keys = self._prev_dict.keys()
2978- return list(set(prev_keys + list(dict.keys(self))))
2979+ atexit(self._implicit_save)
2980
2981 def load_previous(self, path=None):
2982 """Load previous copy of config from disk.
2983@@ -283,6 +286,9 @@
2984 self.path = path or self.path
2985 with open(self.path) as f:
2986 self._prev_dict = json.load(f)
2987+ for k, v in copy.deepcopy(self._prev_dict).items():
2988+ if k not in self:
2989+ self[k] = v
2990
2991 def changed(self, key):
2992 """Return True if the current value for this key is different from
2993@@ -314,13 +320,13 @@
2994 instance.
2995
2996 """
2997- if self._prev_dict:
2998- for k, v in six.iteritems(self._prev_dict):
2999- if k not in self:
3000- self[k] = v
3001 with open(self.path, 'w') as f:
3002 json.dump(self, f)
3003
3004+ def _implicit_save(self):
3005+ if self.implicit_save:
3006+ self.save()
3007+
3008
3009 @cached
3010 def config(scope=None):
3011@@ -485,6 +491,76 @@
3012
3013
3014 @cached
3015+def peer_relation_id():
3016+ '''Get a peer relation id if a peer relation has been joined, else None.'''
3017+ md = metadata()
3018+ section = md.get('peers')
3019+ if section:
3020+ for key in section:
3021+ relids = relation_ids(key)
3022+ if relids:
3023+ return relids[0]
3024+ return None
3025+
3026+
3027+@cached
3028+def relation_to_interface(relation_name):
3029+ """
3030+ Given the name of a relation, return the interface that relation uses.
3031+
3032+ :returns: The interface name, or ``None``.
3033+ """
3034+ return relation_to_role_and_interface(relation_name)[1]
3035+
3036+
3037+@cached
3038+def relation_to_role_and_interface(relation_name):
3039+ """
3040+ Given the name of a relation, return the role and the name of the interface
3041+ that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
3042+
3043+ :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
3044+ """
3045+ _metadata = metadata()
3046+ for role in ('provides', 'requires', 'peer'):
3047+ interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
3048+ if interface:
3049+ return role, interface
3050+ return None, None
3051+
3052+
3053+@cached
3054+def role_and_interface_to_relations(role, interface_name):
3055+ """
3056+ Given a role and interface name, return a list of relation names for the
3057+ current charm that use that interface under that role (where role is one
3058+ of ``provides``, ``requires``, or ``peer``).
3059+
3060+ :returns: A list of relation names.
3061+ """
3062+ _metadata = metadata()
3063+ results = []
3064+ for relation_name, relation in _metadata.get(role, {}).items():
3065+ if relation['interface'] == interface_name:
3066+ results.append(relation_name)
3067+ return results
3068+
3069+
3070+@cached
3071+def interface_to_relations(interface_name):
3072+ """
3073+ Given an interface, return a list of relation names for the current
3074+ charm that use that interface.
3075+
3076+ :returns: A list of relation names.
3077+ """
3078+ results = []
3079+ for role in ('provides', 'requires', 'peer'):
3080+ results.extend(role_and_interface_to_relations(role, interface_name))
3081+ return results
3082+
3083+
3084+@cached
3085 def charm_name():
3086 """Get the name of the current charm as is specified on metadata.yaml"""
3087 return metadata().get('name')
3088@@ -560,6 +636,38 @@
3089 return unit_get('private-address')
3090
3091
3092+@cached
3093+def storage_get(attribute="", storage_id=""):
3094+ """Get storage attributes"""
3095+ _args = ['storage-get', '--format=json']
3096+ if storage_id:
3097+ _args.extend(('-s', storage_id))
3098+ if attribute:
3099+ _args.append(attribute)
3100+ try:
3101+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
3102+ except ValueError:
3103+ return None
3104+
3105+
3106+@cached
3107+def storage_list(storage_name=""):
3108+ """List the storage IDs for the unit"""
3109+ _args = ['storage-list', '--format=json']
3110+ if storage_name:
3111+ _args.append(storage_name)
3112+ try:
3113+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
3114+ except ValueError:
3115+ return None
3116+ except OSError as e:
3117+ import errno
3118+ if e.errno == errno.ENOENT:
3119+ # storage-list does not exist
3120+ return []
3121+ raise
3122+
3123+
3124 class UnregisteredHookError(Exception):
3125 """Raised when an undefined hook is called"""
3126 pass
3127@@ -587,10 +695,14 @@
3128 hooks.execute(sys.argv)
3129 """
3130
3131- def __init__(self, config_save=True):
3132+ def __init__(self, config_save=None):
3133 super(Hooks, self).__init__()
3134 self._hooks = {}
3135- self._config_save = config_save
3136+
3137+ # For unknown reasons, we allow the Hooks constructor to override
3138+ # config().implicit_save.
3139+ if config_save is not None:
3140+ config().implicit_save = config_save
3141
3142 def register(self, name, function):
3143 """Register a hook"""
3144@@ -598,13 +710,16 @@
3145
3146 def execute(self, args):
3147 """Execute a registered hook based on args[0]"""
3148+ _run_atstart()
3149 hook_name = os.path.basename(args[0])
3150 if hook_name in self._hooks:
3151- self._hooks[hook_name]()
3152- if self._config_save:
3153- cfg = config()
3154- if cfg.implicit_save:
3155- cfg.save()
3156+ try:
3157+ self._hooks[hook_name]()
3158+ except SystemExit as x:
3159+ if x.code is None or x.code == 0:
3160+ _run_atexit()
3161+ raise
3162+ _run_atexit()
3163 else:
3164 raise UnregisteredHookError(hook_name)
3165
3166@@ -653,6 +768,21 @@
3167 subprocess.check_call(['action-fail', message])
3168
3169
3170+def action_name():
3171+ """Get the name of the currently executing action."""
3172+ return os.environ.get('JUJU_ACTION_NAME')
3173+
3174+
3175+def action_uuid():
3176+ """Get the UUID of the currently executing action."""
3177+ return os.environ.get('JUJU_ACTION_UUID')
3178+
3179+
3180+def action_tag():
3181+ """Get the tag for the currently executing action."""
3182+ return os.environ.get('JUJU_ACTION_TAG')
3183+
3184+
3185 def status_set(workload_state, message):
3186 """Set the workload state with a message
3187
3188@@ -682,25 +812,28 @@
3189
3190
3191 def status_get():
3192- """Retrieve the previously set juju workload state
3193-
3194- If the status-set command is not found then assume this is juju < 1.23 and
3195- return 'unknown'
3196+ """Retrieve the previously set juju workload state and message
3197+
3198+ If the status-get command is not found then assume this is juju < 1.23 and
3199+ return 'unknown', ""
3200+
3201 """
3202- cmd = ['status-get']
3203+ cmd = ['status-get', "--format=json", "--include-data"]
3204 try:
3205- raw_status = subprocess.check_output(cmd, universal_newlines=True)
3206- status = raw_status.rstrip()
3207- return status
3208+ raw_status = subprocess.check_output(cmd)
3209 except OSError as e:
3210 if e.errno == errno.ENOENT:
3211- return 'unknown'
3212+ return ('unknown', "")
3213 else:
3214 raise
3215+ else:
3216+ status = json.loads(raw_status.decode("UTF-8"))
3217+ return (status["status"], status["message"])
3218
3219
3220 def translate_exc(from_exc, to_exc):
3221 def inner_translate_exc1(f):
3222+ @wraps(f)
3223 def inner_translate_exc2(*args, **kwargs):
3224 try:
3225 return f(*args, **kwargs)
3226@@ -732,13 +865,80 @@
3227 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
3228 def leader_set(settings=None, **kwargs):
3229 """Juju leader set value(s)"""
3230- log("Juju leader-set '%s'" % (settings), level=DEBUG)
3231+ # Don't log secrets.
3232+ # log("Juju leader-set '%s'" % (settings), level=DEBUG)
3233 cmd = ['leader-set']
3234 settings = settings or {}
3235 settings.update(kwargs)
3236- for k, v in settings.iteritems():
3237+ for k, v in settings.items():
3238 if v is None:
3239 cmd.append('{}='.format(k))
3240 else:
3241 cmd.append('{}={}'.format(k, v))
3242 subprocess.check_call(cmd)
3243+
3244+
3245+@cached
3246+def juju_version():
3247+ """Full version string (eg. '1.23.3.1-trusty-amd64')"""
3248+ # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
3249+ jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
3250+ return subprocess.check_output([jujud, 'version'],
3251+ universal_newlines=True).strip()
3252+
3253+
3254+@cached
3255+def has_juju_version(minimum_version):
3256+ """Return True if the Juju version is at least the provided version"""
3257+ return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
3258+
3259+
3260+_atexit = []
3261+_atstart = []
3262+
3263+
3264+def atstart(callback, *args, **kwargs):
3265+ '''Schedule a callback to run before the main hook.
3266+
3267+ Callbacks are run in the order they were added.
3268+
3269+ This is useful for modules and classes to perform initialization
3270+ and inject behavior. In particular:
3271+
3272+ - Run common code before all of your hooks, such as logging
3273+ the hook name or interesting relation data.
3274+ - Defer object or module initialization that requires a hook
3275+ context until we know there actually is a hook context,
3276+ making testing easier.
3277+ - Rather than requiring charm authors to include boilerplate to
3278+ invoke your helper's behavior, have it run automatically if
3279+ your object is instantiated or module imported.
3280+
3281+ This is not at all useful after your hook framework as been launched.
3282+ '''
3283+ global _atstart
3284+ _atstart.append((callback, args, kwargs))
3285+
3286+
3287+def atexit(callback, *args, **kwargs):
3288+ '''Schedule a callback to run on successful hook completion.
3289+
3290+ Callbacks are run in the reverse order that they were added.'''
3291+ _atexit.append((callback, args, kwargs))
3292+
3293+
3294+def _run_atstart():
3295+ '''Hook frameworks must invoke this before running the main hook body.'''
3296+ global _atstart
3297+ for callback, args, kwargs in _atstart:
3298+ callback(*args, **kwargs)
3299+ del _atstart[:]
3300+
3301+
3302+def _run_atexit():
3303+ '''Hook frameworks must invoke this after the main hook body has
3304+ successfully completed. Do not invoke it if the hook fails.'''
3305+ global _atexit
3306+ for callback, args, kwargs in reversed(_atexit):
3307+ callback(*args, **kwargs)
3308+ del _atexit[:]
3309
3310=== modified file 'hooks/charmhelpers/core/host.py'
3311--- hooks/charmhelpers/core/host.py 2015-06-24 12:22:08 +0000
3312+++ hooks/charmhelpers/core/host.py 2015-11-03 12:30:15 +0000
3313@@ -63,6 +63,52 @@
3314 return service_result
3315
3316
3317+def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
3318+ """Pause a system service.
3319+
3320+ Stop it, and prevent it from starting again at boot."""
3321+ stopped = service_stop(service_name)
3322+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
3323+ sysv_file = os.path.join(initd_dir, service_name)
3324+ if os.path.exists(upstart_file):
3325+ override_path = os.path.join(
3326+ init_dir, '{}.override'.format(service_name))
3327+ with open(override_path, 'w') as fh:
3328+ fh.write("manual\n")
3329+ elif os.path.exists(sysv_file):
3330+ subprocess.check_call(["update-rc.d", service_name, "disable"])
3331+ else:
3332+ # XXX: Support SystemD too
3333+ raise ValueError(
3334+ "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
3335+ service_name, upstart_file, sysv_file))
3336+ return stopped
3337+
3338+
3339+def service_resume(service_name, init_dir="/etc/init",
3340+ initd_dir="/etc/init.d"):
3341+ """Resume a system service.
3342+
3343+ Reenable starting again at boot. Start the service"""
3344+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
3345+ sysv_file = os.path.join(initd_dir, service_name)
3346+ if os.path.exists(upstart_file):
3347+ override_path = os.path.join(
3348+ init_dir, '{}.override'.format(service_name))
3349+ if os.path.exists(override_path):
3350+ os.unlink(override_path)
3351+ elif os.path.exists(sysv_file):
3352+ subprocess.check_call(["update-rc.d", service_name, "enable"])
3353+ else:
3354+ # XXX: Support SystemD too
3355+ raise ValueError(
3356+ "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
3357+ service_name, upstart_file, sysv_file))
3358+
3359+ started = service_start(service_name)
3360+ return started
3361+
3362+
3363 def service(action, service_name):
3364 """Control a system service"""
3365 cmd = ['service', service_name, action]
3366@@ -119,8 +165,9 @@
3367
3368
3369 def user_exists(username):
3370+ """Check if a user exists"""
3371 try:
3372- user_info = pwd.getpwnam(username)
3373+ pwd.getpwnam(username)
3374 user_exists = True
3375 except KeyError:
3376 user_exists = False
3377@@ -149,11 +196,7 @@
3378
3379 def add_user_to_group(username, group):
3380 """Add a user to a group"""
3381- cmd = [
3382- 'gpasswd', '-a',
3383- username,
3384- group
3385- ]
3386+ cmd = ['gpasswd', '-a', username, group]
3387 log("Adding user {} to group {}".format(username, group))
3388 subprocess.check_call(cmd)
3389
3390@@ -263,8 +306,8 @@
3391 return system_mounts
3392
3393
3394-
3395 def fstab_mount(mountpoint):
3396+ """Mount filesystem using fstab"""
3397 cmd_args = ['mount', mountpoint]
3398 try:
3399 subprocess.check_output(cmd_args)
3400@@ -390,25 +433,80 @@
3401 return(''.join(random_chars))
3402
3403
3404-def list_nics(nic_type):
3405+def is_phy_iface(interface):
3406+ """Returns True if interface is not virtual, otherwise False."""
3407+ if interface:
3408+ sys_net = '/sys/class/net'
3409+ if os.path.isdir(sys_net):
3410+ for iface in glob.glob(os.path.join(sys_net, '*')):
3411+ if '/virtual/' in os.path.realpath(iface):
3412+ continue
3413+
3414+ if interface == os.path.basename(iface):
3415+ return True
3416+
3417+ return False
3418+
3419+
3420+def get_bond_master(interface):
3421+ """Returns bond master if interface is bond slave otherwise None.
3422+
3423+ NOTE: the provided interface is expected to be physical
3424+ """
3425+ if interface:
3426+ iface_path = '/sys/class/net/%s' % (interface)
3427+ if os.path.exists(iface_path):
3428+ if '/virtual/' in os.path.realpath(iface_path):
3429+ return None
3430+
3431+ master = os.path.join(iface_path, 'master')
3432+ if os.path.exists(master):
3433+ master = os.path.realpath(master)
3434+ # make sure it is a bond master
3435+ if os.path.exists(os.path.join(master, 'bonding')):
3436+ return os.path.basename(master)
3437+
3438+ return None
3439+
3440+
3441+def list_nics(nic_type=None):
3442 '''Return a list of nics of given type(s)'''
3443 if isinstance(nic_type, six.string_types):
3444 int_types = [nic_type]
3445 else:
3446 int_types = nic_type
3447+
3448 interfaces = []
3449- for int_type in int_types:
3450- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
3451+ if nic_type:
3452+ for int_type in int_types:
3453+ cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
3454+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
3455+ ip_output = ip_output.split('\n')
3456+ ip_output = (line for line in ip_output if line)
3457+ for line in ip_output:
3458+ if line.split()[1].startswith(int_type):
3459+ matched = re.search('.*: (' + int_type +
3460+ r'[0-9]+\.[0-9]+)@.*', line)
3461+ if matched:
3462+ iface = matched.groups()[0]
3463+ else:
3464+ iface = line.split()[1].replace(":", "")
3465+
3466+ if iface not in interfaces:
3467+ interfaces.append(iface)
3468+ else:
3469+ cmd = ['ip', 'a']
3470 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
3471- ip_output = (line for line in ip_output if line)
3472+ ip_output = (line.strip() for line in ip_output if line)
3473+
3474+ key = re.compile('^[0-9]+:\s+(.+):')
3475 for line in ip_output:
3476- if line.split()[1].startswith(int_type):
3477- matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
3478- if matched:
3479- interface = matched.groups()[0]
3480- else:
3481- interface = line.split()[1].replace(":", "")
3482- interfaces.append(interface)
3483+ matched = re.search(key, line)
3484+ if matched:
3485+ iface = matched.group(1)
3486+ iface = iface.partition("@")[0]
3487+ if iface not in interfaces:
3488+ interfaces.append(iface)
3489
3490 return interfaces
3491
3492@@ -468,7 +566,14 @@
3493 os.chdir(cur)
3494
3495
3496-def chownr(path, owner, group, follow_links=True):
3497+def chownr(path, owner, group, follow_links=True, chowntopdir=False):
3498+ """
3499+ Recursively change user and group ownership of files and directories
3500+ in given path. Doesn't chown path itself by default, only its children.
3501+
3502+ :param bool follow_links: Also Chown links if True
3503+ :param bool chowntopdir: Also chown path itself if True
3504+ """
3505 uid = pwd.getpwnam(owner).pw_uid
3506 gid = grp.getgrnam(group).gr_gid
3507 if follow_links:
3508@@ -476,6 +581,10 @@
3509 else:
3510 chown = os.lchown
3511
3512+ if chowntopdir:
3513+ broken_symlink = os.path.lexists(path) and not os.path.exists(path)
3514+ if not broken_symlink:
3515+ chown(path, uid, gid)
3516 for root, dirs, files in os.walk(path):
3517 for name in dirs + files:
3518 full = os.path.join(root, name)
3519@@ -486,3 +595,19 @@
3520
3521 def lchownr(path, owner, group):
3522 chownr(path, owner, group, follow_links=False)
3523+
3524+
3525+def get_total_ram():
3526+ '''The total amount of system RAM in bytes.
3527+
3528+ This is what is reported by the OS, and may be overcommitted when
3529+ there are multiple containers hosted on the same machine.
3530+ '''
3531+ with open('/proc/meminfo', 'r') as f:
3532+ for line in f.readlines():
3533+ if line:
3534+ key, value, unit = line.split()
3535+ if key == 'MemTotal:':
3536+ assert unit == 'kB', 'Unknown unit'
3537+ return int(value) * 1024 # Classic, not KiB.
3538+ raise NotImplementedError()
3539
3540=== modified file 'hooks/charmhelpers/core/hugepage.py'
3541--- hooks/charmhelpers/core/hugepage.py 2015-06-24 12:22:08 +0000
3542+++ hooks/charmhelpers/core/hugepage.py 2015-11-03 12:30:15 +0000
3543@@ -1,5 +1,3 @@
3544-
3545-#!/usr/bin/env python
3546 # -*- coding: utf-8 -*-
3547
3548 # Copyright 2014-2015 Canonical Limited.
3549@@ -19,36 +17,55 @@
3550 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3551
3552 import yaml
3553-from charmhelpers.core.fstab import Fstab
3554-from charmhelpers.core.sysctl import (
3555- create,
3556-)
3557+from charmhelpers.core import fstab
3558+from charmhelpers.core import sysctl
3559 from charmhelpers.core.host import (
3560 add_group,
3561 add_user_to_group,
3562 fstab_mount,
3563 mkdir,
3564 )
3565+from charmhelpers.core.strutils import bytes_from_string
3566+from subprocess import check_output
3567+
3568
3569 def hugepage_support(user, group='hugetlb', nr_hugepages=256,
3570- max_map_count=65536, mnt_point='/hugepages',
3571- pagesize='2MB', mount=True):
3572+ max_map_count=65536, mnt_point='/run/hugepages/kvm',
3573+ pagesize='2MB', mount=True, set_shmmax=False):
3574+ """Enable hugepages on system.
3575+
3576+ Args:
3577+ user (str) -- Username to allow access to hugepages to
3578+ group (str) -- Group name to own hugepages
3579+ nr_hugepages (int) -- Number of pages to reserve
3580+ max_map_count (int) -- Number of Virtual Memory Areas a process can own
3581+ mnt_point (str) -- Directory to mount hugepages on
3582+ pagesize (str) -- Size of hugepages
3583+ mount (bool) -- Whether to Mount hugepages
3584+ """
3585 group_info = add_group(group)
3586 gid = group_info.gr_gid
3587 add_user_to_group(user, group)
3588+ if max_map_count < 2 * nr_hugepages:
3589+ max_map_count = 2 * nr_hugepages
3590 sysctl_settings = {
3591 'vm.nr_hugepages': nr_hugepages,
3592- 'vm.max_map_count': max_map_count, # 1GB
3593+ 'vm.max_map_count': max_map_count,
3594 'vm.hugetlb_shm_group': gid,
3595 }
3596- create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
3597+ if set_shmmax:
3598+ shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
3599+ shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
3600+ if shmmax_minsize > shmmax_current:
3601+ sysctl_settings['kernel.shmmax'] = shmmax_minsize
3602+ sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
3603 mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
3604- fstab = Fstab()
3605- fstab_entry = fstab.get_entry_by_attr('mountpoint', mnt_point)
3606+ lfstab = fstab.Fstab()
3607+ fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
3608 if fstab_entry:
3609- fstab.remove_entry(fstab_entry)
3610- entry = fstab.Entry('nodev', mnt_point, 'hugetlbfs',
3611- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
3612- fstab.add_entry(entry)
3613+ lfstab.remove_entry(fstab_entry)
3614+ entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
3615+ 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
3616+ lfstab.add_entry(entry)
3617 if mount:
3618 fstab_mount(mnt_point)
3619
3620=== added file 'hooks/charmhelpers/core/kernel.py'
3621--- hooks/charmhelpers/core/kernel.py 1970-01-01 00:00:00 +0000
3622+++ hooks/charmhelpers/core/kernel.py 2015-11-03 12:30:15 +0000
3623@@ -0,0 +1,68 @@
3624+#!/usr/bin/env python
3625+# -*- coding: utf-8 -*-
3626+
3627+# Copyright 2014-2015 Canonical Limited.
3628+#
3629+# This file is part of charm-helpers.
3630+#
3631+# charm-helpers is free software: you can redistribute it and/or modify
3632+# it under the terms of the GNU Lesser General Public License version 3 as
3633+# published by the Free Software Foundation.
3634+#
3635+# charm-helpers is distributed in the hope that it will be useful,
3636+# but WITHOUT ANY WARRANTY; without even the implied warranty of
3637+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3638+# GNU Lesser General Public License for more details.
3639+#
3640+# You should have received a copy of the GNU Lesser General Public License
3641+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3642+
3643+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
3644+
3645+from charmhelpers.core.hookenv import (
3646+ log,
3647+ INFO
3648+)
3649+
3650+from subprocess import check_call, check_output
3651+import re
3652+
3653+
3654+def modprobe(module, persist=True):
3655+ """Load a kernel module and configure for auto-load on reboot."""
3656+ cmd = ['modprobe', module]
3657+
3658+ log('Loading kernel module %s' % module, level=INFO)
3659+
3660+ check_call(cmd)
3661+ if persist:
3662+ with open('/etc/modules', 'r+') as modules:
3663+ if module not in modules.read():
3664+ modules.write(module)
3665+
3666+
3667+def rmmod(module, force=False):
3668+ """Remove a module from the linux kernel"""
3669+ cmd = ['rmmod']
3670+ if force:
3671+ cmd.append('-f')
3672+ cmd.append(module)
3673+ log('Removing kernel module %s' % module, level=INFO)
3674+ return check_call(cmd)
3675+
3676+
3677+def lsmod():
3678+ """Shows what kernel modules are currently loaded"""
3679+ return check_output(['lsmod'],
3680+ universal_newlines=True)
3681+
3682+
3683+def is_module_loaded(module):
3684+ """Checks if a kernel module is already loaded"""
3685+ matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
3686+ return len(matches) > 0
3687+
3688+
3689+def update_initramfs(version='all'):
3690+ """Updates an initramfs image"""
3691+ return check_call(["update-initramfs", "-k", version, "-u"])
3692
3693=== modified file 'hooks/charmhelpers/core/services/base.py'
3694--- hooks/charmhelpers/core/services/base.py 2015-06-24 12:22:08 +0000
3695+++ hooks/charmhelpers/core/services/base.py 2015-11-03 12:30:15 +0000
3696@@ -128,15 +128,18 @@
3697 """
3698 Handle the current hook by doing The Right Thing with the registered services.
3699 """
3700- hook_name = hookenv.hook_name()
3701- if hook_name == 'stop':
3702- self.stop_services()
3703- else:
3704- self.reconfigure_services()
3705- self.provide_data()
3706- cfg = hookenv.config()
3707- if cfg.implicit_save:
3708- cfg.save()
3709+ hookenv._run_atstart()
3710+ try:
3711+ hook_name = hookenv.hook_name()
3712+ if hook_name == 'stop':
3713+ self.stop_services()
3714+ else:
3715+ self.reconfigure_services()
3716+ self.provide_data()
3717+ except SystemExit as x:
3718+ if x.code is None or x.code == 0:
3719+ hookenv._run_atexit()
3720+ hookenv._run_atexit()
3721
3722 def provide_data(self):
3723 """
3724
3725=== modified file 'hooks/charmhelpers/core/services/helpers.py'
3726--- hooks/charmhelpers/core/services/helpers.py 2015-06-24 12:22:08 +0000
3727+++ hooks/charmhelpers/core/services/helpers.py 2015-11-03 12:30:15 +0000
3728@@ -16,6 +16,7 @@
3729
3730 import os
3731 import yaml
3732+
3733 from charmhelpers.core import hookenv
3734 from charmhelpers.core import host
3735 from charmhelpers.core import templating
3736@@ -240,42 +241,43 @@
3737 action.
3738
3739 :param str source: The template source file, relative to
3740- `$CHARM_DIR/templates`
3741+ `$CHARM_DIR/templates`
3742
3743 :param str target: The target to write the rendered template to
3744 :param str owner: The owner of the rendered file
3745 :param str group: The group of the rendered file
3746 :param int perms: The permissions of the rendered file
3747- :param list template_searchpath: List of paths to search for template in
3748 :param partial on_change_action: functools partial to be executed when
3749 rendered file changes
3750+ :param jinja2 loader template_loader: A jinja2 template loader
3751 """
3752 def __init__(self, source, target,
3753 owner='root', group='root', perms=0o444,
3754- template_searchpath=None, on_change_action=None):
3755+ on_change_action=None, template_loader=None):
3756 self.source = source
3757 self.target = target
3758 self.owner = owner
3759 self.group = group
3760 self.perms = perms
3761- self.template_searchpath = template_searchpath
3762 self.on_change_action = on_change_action
3763+ self.template_loader = template_loader
3764
3765 def __call__(self, manager, service_name, event_name):
3766 pre_checksum = ''
3767 if self.on_change_action and os.path.isfile(self.target):
3768 pre_checksum = host.file_hash(self.target)
3769- print pre_checksum
3770 service = manager.get_service(service_name)
3771 context = {}
3772 for ctx in service.get('required_data', []):
3773 context.update(ctx)
3774 templating.render(self.source, self.target, context,
3775 self.owner, self.group, self.perms,
3776- self.template_searchpath)
3777+ template_loader=self.template_loader)
3778 if self.on_change_action:
3779 if pre_checksum == host.file_hash(self.target):
3780- print "No change detected " + self.target
3781+ hookenv.log(
3782+ 'No change detected: {}'.format(self.target),
3783+ hookenv.DEBUG)
3784 else:
3785 self.on_change_action()
3786
3787
3788=== modified file 'hooks/charmhelpers/core/strutils.py'
3789--- hooks/charmhelpers/core/strutils.py 2015-06-24 12:22:08 +0000
3790+++ hooks/charmhelpers/core/strutils.py 2015-11-03 12:30:15 +0000
3791@@ -18,6 +18,7 @@
3792 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3793
3794 import six
3795+import re
3796
3797
3798 def bool_from_string(value):
3799@@ -40,3 +41,32 @@
3800
3801 msg = "Unable to interpret string value '%s' as boolean" % (value)
3802 raise ValueError(msg)
3803+
3804+
3805+def bytes_from_string(value):
3806+ """Interpret human readable string value as bytes.
3807+
3808+ Returns int
3809+ """
3810+ BYTE_POWER = {
3811+ 'K': 1,
3812+ 'KB': 1,
3813+ 'M': 2,
3814+ 'MB': 2,
3815+ 'G': 3,
3816+ 'GB': 3,
3817+ 'T': 4,
3818+ 'TB': 4,
3819+ 'P': 5,
3820+ 'PB': 5,
3821+ }
3822+ if isinstance(value, six.string_types):
3823+ value = six.text_type(value)
3824+ else:
3825+ msg = "Unable to interpret non-string value '%s' as boolean" % (value)
3826+ raise ValueError(msg)
3827+ matches = re.match("([0-9]+)([a-zA-Z]+)", value)
3828+ if not matches:
3829+ msg = "Unable to interpret string value '%s' as bytes" % (value)
3830+ raise ValueError(msg)
3831+ return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
3832
3833=== modified file 'hooks/charmhelpers/core/templating.py'
3834--- hooks/charmhelpers/core/templating.py 2015-06-24 12:22:08 +0000
3835+++ hooks/charmhelpers/core/templating.py 2015-11-03 12:30:15 +0000
3836@@ -21,8 +21,7 @@
3837
3838
3839 def render(source, target, context, owner='root', group='root',
3840- perms=0o444, templates_dir=None, encoding='UTF-8',
3841- template_searchpath=None):
3842+ perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
3843 """
3844 Render a template.
3845
3846@@ -41,7 +40,7 @@
3847 this will attempt to use charmhelpers.fetch.apt_install to install it.
3848 """
3849 try:
3850- from jinja2 import ChoiceLoader, FileSystemLoader, Environment, exceptions
3851+ from jinja2 import FileSystemLoader, Environment, exceptions
3852 except ImportError:
3853 try:
3854 from charmhelpers.fetch import apt_install
3855@@ -51,25 +50,26 @@
3856 level=hookenv.ERROR)
3857 raise
3858 apt_install('python-jinja2', fatal=True)
3859- from jinja2 import ChoiceLoader, FileSystemLoader, Environment, exceptions
3860+ from jinja2 import FileSystemLoader, Environment, exceptions
3861
3862- if template_searchpath:
3863- fs_loaders = []
3864- for tmpl_dir in template_searchpath:
3865- fs_loaders.append(FileSystemLoader(tmpl_dir))
3866- loader = ChoiceLoader(fs_loaders)
3867+ if template_loader:
3868+ template_env = Environment(loader=template_loader)
3869 else:
3870 if templates_dir is None:
3871 templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
3872- loader = Environment(loader=FileSystemLoader(templates_dir))
3873+ template_env = Environment(loader=FileSystemLoader(templates_dir))
3874 try:
3875 source = source
3876- template = loader.get_template(source)
3877+ template = template_env.get_template(source)
3878 except exceptions.TemplateNotFound as e:
3879 hookenv.log('Could not load template %s from %s.' %
3880 (source, templates_dir),
3881 level=hookenv.ERROR)
3882 raise e
3883 content = template.render(context)
3884- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
3885+ target_dir = os.path.dirname(target)
3886+ if not os.path.exists(target_dir):
3887+ # This is a terrible default directory permission, as the file
3888+ # or its siblings will often contain secrets.
3889+ host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
3890 host.write_file(target, content.encode(encoding), owner, group, perms)
3891
3892=== modified file 'hooks/charmhelpers/core/unitdata.py'
3893--- hooks/charmhelpers/core/unitdata.py 2015-06-24 12:22:08 +0000
3894+++ hooks/charmhelpers/core/unitdata.py 2015-11-03 12:30:15 +0000
3895@@ -152,6 +152,7 @@
3896 import collections
3897 import contextlib
3898 import datetime
3899+import itertools
3900 import json
3901 import os
3902 import pprint
3903@@ -164,8 +165,7 @@
3904 class Storage(object):
3905 """Simple key value database for local unit state within charms.
3906
3907- Modifications are automatically committed at hook exit. That's
3908- currently regardless of exit code.
3909+ Modifications are not persisted unless :meth:`flush` is called.
3910
3911 To support dicts, lists, integer, floats, and booleans values
3912 are automatically json encoded/decoded.
3913@@ -173,8 +173,11 @@
3914 def __init__(self, path=None):
3915 self.db_path = path
3916 if path is None:
3917- self.db_path = os.path.join(
3918- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
3919+ if 'UNIT_STATE_DB' in os.environ:
3920+ self.db_path = os.environ['UNIT_STATE_DB']
3921+ else:
3922+ self.db_path = os.path.join(
3923+ os.environ.get('CHARM_DIR', ''), '.unit-state.db')
3924 self.conn = sqlite3.connect('%s' % self.db_path)
3925 self.cursor = self.conn.cursor()
3926 self.revision = None
3927@@ -189,15 +192,8 @@
3928 self.conn.close()
3929 self._closed = True
3930
3931- def _scoped_query(self, stmt, params=None):
3932- if params is None:
3933- params = []
3934- return stmt, params
3935-
3936 def get(self, key, default=None, record=False):
3937- self.cursor.execute(
3938- *self._scoped_query(
3939- 'select data from kv where key=?', [key]))
3940+ self.cursor.execute('select data from kv where key=?', [key])
3941 result = self.cursor.fetchone()
3942 if not result:
3943 return default
3944@@ -206,33 +202,81 @@
3945 return json.loads(result[0])
3946
3947 def getrange(self, key_prefix, strip=False):
3948- stmt = "select key, data from kv where key like '%s%%'" % key_prefix
3949- self.cursor.execute(*self._scoped_query(stmt))
3950+ """
3951+ Get a range of keys starting with a common prefix as a mapping of
3952+ keys to values.
3953+
3954+ :param str key_prefix: Common prefix among all keys
3955+ :param bool strip: Optionally strip the common prefix from the key
3956+ names in the returned dict
3957+ :return dict: A (possibly empty) dict of key-value mappings
3958+ """
3959+ self.cursor.execute("select key, data from kv where key like ?",
3960+ ['%s%%' % key_prefix])
3961 result = self.cursor.fetchall()
3962
3963 if not result:
3964- return None
3965+ return {}
3966 if not strip:
3967 key_prefix = ''
3968 return dict([
3969 (k[len(key_prefix):], json.loads(v)) for k, v in result])
3970
3971 def update(self, mapping, prefix=""):
3972+ """
3973+ Set the values of multiple keys at once.
3974+
3975+ :param dict mapping: Mapping of keys to values
3976+ :param str prefix: Optional prefix to apply to all keys in `mapping`
3977+ before setting
3978+ """
3979 for k, v in mapping.items():
3980 self.set("%s%s" % (prefix, k), v)
3981
3982 def unset(self, key):
3983+ """
3984+ Remove a key from the database entirely.
3985+ """
3986 self.cursor.execute('delete from kv where key=?', [key])
3987 if self.revision and self.cursor.rowcount:
3988 self.cursor.execute(
3989 'insert into kv_revisions values (?, ?, ?)',
3990 [key, self.revision, json.dumps('DELETED')])
3991
3992+ def unsetrange(self, keys=None, prefix=""):
3993+ """
3994+ Remove a range of keys starting with a common prefix, from the database
3995+ entirely.
3996+
3997+ :param list keys: List of keys to remove.
3998+ :param str prefix: Optional prefix to apply to all keys in ``keys``
3999+ before removing.
4000+ """
4001+ if keys is not None:
4002+ keys = ['%s%s' % (prefix, key) for key in keys]
4003+ self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
4004+ if self.revision and self.cursor.rowcount:
4005+ self.cursor.execute(
4006+ 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
4007+ list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
4008+ else:
4009+ self.cursor.execute('delete from kv where key like ?',
4010+ ['%s%%' % prefix])
4011+ if self.revision and self.cursor.rowcount:
4012+ self.cursor.execute(
4013+ 'insert into kv_revisions values (?, ?, ?)',
4014+ ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
4015+
4016 def set(self, key, value):
4017+ """
4018+ Set a value in the database.
4019+
4020+ :param str key: Key to set the value for
4021+ :param value: Any JSON-serializable value to be set
4022+ """
4023 serialized = json.dumps(value)
4024
4025- self.cursor.execute(
4026- 'select data from kv where key=?', [key])
4027+ self.cursor.execute('select data from kv where key=?', [key])
4028 exists = self.cursor.fetchone()
4029
4030 # Skip mutations to the same value
4031
4032=== modified file 'hooks/charmhelpers/fetch/__init__.py'
4033--- hooks/charmhelpers/fetch/__init__.py 2015-06-24 12:22:08 +0000
4034+++ hooks/charmhelpers/fetch/__init__.py 2015-11-03 12:30:15 +0000
4035@@ -90,6 +90,14 @@
4036 'kilo/proposed': 'trusty-proposed/kilo',
4037 'trusty-kilo/proposed': 'trusty-proposed/kilo',
4038 'trusty-proposed/kilo': 'trusty-proposed/kilo',
4039+ # Liberty
4040+ 'liberty': 'trusty-updates/liberty',
4041+ 'trusty-liberty': 'trusty-updates/liberty',
4042+ 'trusty-liberty/updates': 'trusty-updates/liberty',
4043+ 'trusty-updates/liberty': 'trusty-updates/liberty',
4044+ 'liberty/proposed': 'trusty-proposed/liberty',
4045+ 'trusty-liberty/proposed': 'trusty-proposed/liberty',
4046+ 'trusty-proposed/liberty': 'trusty-proposed/liberty',
4047 }
4048
4049 # The order of this list is very important. Handlers should be listed in from
4050@@ -215,19 +223,27 @@
4051 _run_apt_command(cmd, fatal)
4052
4053
4054+def apt_mark(packages, mark, fatal=False):
4055+ """Flag one or more packages using apt-mark"""
4056+ log("Marking {} as {}".format(packages, mark))
4057+ cmd = ['apt-mark', mark]
4058+ if isinstance(packages, six.string_types):
4059+ cmd.append(packages)
4060+ else:
4061+ cmd.extend(packages)
4062+
4063+ if fatal:
4064+ subprocess.check_call(cmd, universal_newlines=True)
4065+ else:
4066+ subprocess.call(cmd, universal_newlines=True)
4067+
4068+
4069 def apt_hold(packages, fatal=False):
4070- """Hold one or more packages"""
4071- cmd = ['apt-mark', 'hold']
4072- if isinstance(packages, six.string_types):
4073- cmd.append(packages)
4074- else:
4075- cmd.extend(packages)
4076- log("Holding {}".format(packages))
4077-
4078- if fatal:
4079- subprocess.check_call(cmd)
4080- else:
4081- subprocess.call(cmd)
4082+ return apt_mark(packages, 'hold', fatal=fatal)
4083+
4084+
4085+def apt_unhold(packages, fatal=False):
4086+ return apt_mark(packages, 'unhold', fatal=fatal)
4087
4088
4089 def add_source(source, key=None):
4090@@ -370,8 +386,9 @@
4091 for handler in handlers:
4092 try:
4093 installed_to = handler.install(source, *args, **kwargs)
4094- except UnhandledSource:
4095- pass
4096+ except UnhandledSource as e:
4097+ log('Install source attempt unsuccessful: {}'.format(e),
4098+ level='WARNING')
4099 if not installed_to:
4100 raise UnhandledSource("No handler found for source {}".format(source))
4101 return installed_to
4102
4103=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
4104--- hooks/charmhelpers/fetch/archiveurl.py 2015-06-24 12:22:08 +0000
4105+++ hooks/charmhelpers/fetch/archiveurl.py 2015-11-03 12:30:15 +0000
4106@@ -77,6 +77,8 @@
4107 def can_handle(self, source):
4108 url_parts = self.parse_url(source)
4109 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
4110+ # XXX: Why is this returning a boolean and a string? It's
4111+ # doomed to fail since "bool(can_handle('foo://'))" will be True.
4112 return "Wrong source type"
4113 if get_archive_handler(self.base_url(source)):
4114 return True
4115@@ -155,7 +157,11 @@
4116 else:
4117 algorithms = hashlib.algorithms_available
4118 if key in algorithms:
4119- check_hash(dld_file, value, key)
4120+ if len(value) != 1:
4121+ raise TypeError(
4122+ "Expected 1 hash value, not %d" % len(value))
4123+ expected = value[0]
4124+ check_hash(dld_file, expected, key)
4125 if checksum:
4126 check_hash(dld_file, checksum, hash_type)
4127 return extract(dld_file, dest)
4128
4129=== modified file 'hooks/charmhelpers/fetch/giturl.py'
4130--- hooks/charmhelpers/fetch/giturl.py 2015-06-24 12:22:08 +0000
4131+++ hooks/charmhelpers/fetch/giturl.py 2015-11-03 12:30:15 +0000
4132@@ -67,7 +67,7 @@
4133 try:
4134 self.clone(source, dest_dir, branch, depth)
4135 except GitCommandError as e:
4136- raise UnhandledSource(e.message)
4137+ raise UnhandledSource(e)
4138 except OSError as e:
4139 raise UnhandledSource(e.strerror)
4140 return dest_dir
4141
4142=== modified file 'hooks/services.py'
4143--- hooks/services.py 2015-06-24 12:44:33 +0000
4144+++ hooks/services.py 2015-11-03 12:30:15 +0000
4145@@ -2,7 +2,7 @@
4146 from charmhelpers.core import hookenv
4147 from charmhelpers.core.services.base import ServiceManager
4148 from charmhelpers.core.services import helpers
4149-from charmhelpers.contrib.openstack.templating import os_template_dirs
4150+from charmhelpers.contrib.openstack.templating import get_loader
4151 from charmhelpers.contrib.openstack.utils import os_release, remote_restart
4152
4153 import odl_utils
4154@@ -33,8 +33,7 @@
4155 'data_ready': [
4156 helpers.render_template(
4157 source='ml2_conf.ini',
4158- template_searchpath=os_template_dirs('templates/',
4159- release),
4160+ template_loader=get_loader('templates/', release),
4161 target='/etc/neutron/plugins/ml2/ml2_conf.ini',
4162 on_change_action=(partial(remote_restart,
4163 'neutron-plugin-api-subordinate',
4164
4165=== added directory 'tests'
4166=== added directory 'tests/charmhelpers'
4167=== added file 'tests/charmhelpers/__init__.py'
4168--- tests/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000
4169+++ tests/charmhelpers/__init__.py 2015-11-03 12:30:15 +0000
4170@@ -0,0 +1,38 @@
4171+# Copyright 2014-2015 Canonical Limited.
4172+#
4173+# This file is part of charm-helpers.
4174+#
4175+# charm-helpers is free software: you can redistribute it and/or modify
4176+# it under the terms of the GNU Lesser General Public License version 3 as
4177+# published by the Free Software Foundation.
4178+#
4179+# charm-helpers is distributed in the hope that it will be useful,
4180+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4181+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4182+# GNU Lesser General Public License for more details.
4183+#
4184+# You should have received a copy of the GNU Lesser General Public License
4185+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4186+
4187+# Bootstrap charm-helpers, installing its dependencies if necessary using
4188+# only standard libraries.
4189+import subprocess
4190+import sys
4191+
4192+try:
4193+ import six # flake8: noqa
4194+except ImportError:
4195+ if sys.version_info.major == 2:
4196+ subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
4197+ else:
4198+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
4199+ import six # flake8: noqa
4200+
4201+try:
4202+ import yaml # flake8: noqa
4203+except ImportError:
4204+ if sys.version_info.major == 2:
4205+ subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
4206+ else:
4207+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
4208+ import yaml # flake8: noqa
4209
4210=== added directory 'tests/charmhelpers/contrib'
4211=== added file 'tests/charmhelpers/contrib/__init__.py'
4212--- tests/charmhelpers/contrib/__init__.py 1970-01-01 00:00:00 +0000
4213+++ tests/charmhelpers/contrib/__init__.py 2015-11-03 12:30:15 +0000
4214@@ -0,0 +1,15 @@
4215+# Copyright 2014-2015 Canonical Limited.
4216+#
4217+# This file is part of charm-helpers.
4218+#
4219+# charm-helpers is free software: you can redistribute it and/or modify
4220+# it under the terms of the GNU Lesser General Public License version 3 as
4221+# published by the Free Software Foundation.
4222+#
4223+# charm-helpers is distributed in the hope that it will be useful,
4224+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4225+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4226+# GNU Lesser General Public License for more details.
4227+#
4228+# You should have received a copy of the GNU Lesser General Public License
4229+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4230
4231=== added directory 'tests/charmhelpers/contrib/amulet'
4232=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
4233--- tests/charmhelpers/contrib/amulet/__init__.py 1970-01-01 00:00:00 +0000
4234+++ tests/charmhelpers/contrib/amulet/__init__.py 2015-11-03 12:30:15 +0000
4235@@ -0,0 +1,15 @@
4236+# Copyright 2014-2015 Canonical Limited.
4237+#
4238+# This file is part of charm-helpers.
4239+#
4240+# charm-helpers is free software: you can redistribute it and/or modify
4241+# it under the terms of the GNU Lesser General Public License version 3 as
4242+# published by the Free Software Foundation.
4243+#
4244+# charm-helpers is distributed in the hope that it will be useful,
4245+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4246+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4247+# GNU Lesser General Public License for more details.
4248+#
4249+# You should have received a copy of the GNU Lesser General Public License
4250+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4251
4252=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
4253--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
4254+++ tests/charmhelpers/contrib/amulet/deployment.py 2015-11-03 12:30:15 +0000
4255@@ -0,0 +1,94 @@
4256+# Copyright 2014-2015 Canonical Limited.
4257+#
4258+# This file is part of charm-helpers.
4259+#
4260+# charm-helpers is free software: you can redistribute it and/or modify
4261+# it under the terms of the GNU Lesser General Public License version 3 as
4262+# published by the Free Software Foundation.
4263+#
4264+# charm-helpers is distributed in the hope that it will be useful,
4265+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4266+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4267+# GNU Lesser General Public License for more details.
4268+#
4269+# You should have received a copy of the GNU Lesser General Public License
4270+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4271+
4272+import amulet
4273+import os
4274+import six
4275+
4276+
4277+class AmuletDeployment(object):
4278+ """Amulet deployment.
4279+
4280+ This class provides generic Amulet deployment and test runner
4281+ methods.
4282+ """
4283+
4284+ def __init__(self, series=None):
4285+ """Initialize the deployment environment."""
4286+ self.series = None
4287+
4288+ if series:
4289+ self.series = series
4290+ self.d = amulet.Deployment(series=self.series)
4291+ else:
4292+ self.d = amulet.Deployment()
4293+
4294+ def _add_services(self, this_service, other_services):
4295+ """Add services.
4296+
4297+ Add services to the deployment where this_service is the local charm
4298+ that we're testing and other_services are the other services that
4299+ are being used in the local amulet tests.
4300+ """
4301+ if this_service['name'] != os.path.basename(os.getcwd()):
4302+ s = this_service['name']
4303+ msg = "The charm's root directory name needs to be {}".format(s)
4304+ amulet.raise_status(amulet.FAIL, msg=msg)
4305+
4306+ if 'units' not in this_service:
4307+ this_service['units'] = 1
4308+
4309+ self.d.add(this_service['name'], units=this_service['units'],
4310+ constraints=this_service.get('constraints'))
4311+
4312+ for svc in other_services:
4313+ if 'location' in svc:
4314+ branch_location = svc['location']
4315+ elif self.series:
4316+ branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
4317+ else:
4318+ branch_location = None
4319+
4320+ if 'units' not in svc:
4321+ svc['units'] = 1
4322+ self.d.add(svc['name'], charm=branch_location, units=svc['units'],
4323+ constraints=svc.get('constraints'))
4324+
4325+ def _add_relations(self, relations):
4326+ """Add all of the relations for the services."""
4327+ for k, v in six.iteritems(relations):
4328+ self.d.relate(k, v)
4329+
4330+ def _configure_services(self, configs):
4331+ """Configure all of the services."""
4332+ for service, config in six.iteritems(configs):
4333+ self.d.configure(service, config)
4334+
4335+ def _deploy(self):
4336+ """Deploy environment and wait for all hooks to finish executing."""
4337+ try:
4338+ self.d.setup(timeout=900)
4339+ self.d.sentry.wait(timeout=900)
4340+ except amulet.helpers.TimeoutError:
4341+ amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
4342+ except Exception:
4343+ raise
4344+
4345+ def run_tests(self):
4346+ """Run all of the methods that are prefixed with 'test_'."""
4347+ for test in dir(self):
4348+ if test.startswith('test_'):
4349+ getattr(self, test)()
4350
4351=== added file 'tests/charmhelpers/contrib/amulet/utils.py'
4352--- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000
4353+++ tests/charmhelpers/contrib/amulet/utils.py 2015-11-03 12:30:15 +0000
4354@@ -0,0 +1,553 @@
4355+# Copyright 2014-2015 Canonical Limited.
4356+#
4357+# This file is part of charm-helpers.
4358+#
4359+# charm-helpers is free software: you can redistribute it and/or modify
4360+# it under the terms of the GNU Lesser General Public License version 3 as
4361+# published by the Free Software Foundation.
4362+#
4363+# charm-helpers is distributed in the hope that it will be useful,
4364+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4365+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4366+# GNU Lesser General Public License for more details.
4367+#
4368+# You should have received a copy of the GNU Lesser General Public License
4369+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4370+
4371+import io
4372+import logging
4373+import os
4374+import re
4375+import sys
4376+import time
4377+
4378+import amulet
4379+import distro_info
4380+import six
4381+from six.moves import configparser
4382+if six.PY3:
4383+ from urllib import parse as urlparse
4384+else:
4385+ import urlparse
4386+
4387+
4388+class AmuletUtils(object):
4389+ """Amulet utilities.
4390+
4391+ This class provides common utility functions that are used by Amulet
4392+ tests.
4393+ """
4394+
4395+ def __init__(self, log_level=logging.ERROR):
4396+ self.log = self.get_logger(level=log_level)
4397+ self.ubuntu_releases = self.get_ubuntu_releases()
4398+
4399+ def get_logger(self, name="amulet-logger", level=logging.DEBUG):
4400+ """Get a logger object that will log to stdout."""
4401+ log = logging
4402+ logger = log.getLogger(name)
4403+ fmt = log.Formatter("%(asctime)s %(funcName)s "
4404+ "%(levelname)s: %(message)s")
4405+
4406+ handler = log.StreamHandler(stream=sys.stdout)
4407+ handler.setLevel(level)
4408+ handler.setFormatter(fmt)
4409+
4410+ logger.addHandler(handler)
4411+ logger.setLevel(level)
4412+
4413+ return logger
4414+
4415+ def valid_ip(self, ip):
4416+ if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
4417+ return True
4418+ else:
4419+ return False
4420+
4421+ def valid_url(self, url):
4422+ p = re.compile(
4423+ r'^(?:http|ftp)s?://'
4424+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
4425+ r'localhost|'
4426+ r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
4427+ r'(?::\d+)?'
4428+ r'(?:/?|[/?]\S+)$',
4429+ re.IGNORECASE)
4430+ if p.match(url):
4431+ return True
4432+ else:
4433+ return False
4434+
4435+ def get_ubuntu_release_from_sentry(self, sentry_unit):
4436+ """Get Ubuntu release codename from sentry unit.
4437+
4438+ :param sentry_unit: amulet sentry/service unit pointer
4439+ :returns: list of strings - release codename, failure message
4440+ """
4441+ msg = None
4442+ cmd = 'lsb_release -cs'
4443+ release, code = sentry_unit.run(cmd)
4444+ if code == 0:
4445+ self.log.debug('{} lsb_release: {}'.format(
4446+ sentry_unit.info['unit_name'], release))
4447+ else:
4448+ msg = ('{} `{}` returned {} '
4449+ '{}'.format(sentry_unit.info['unit_name'],
4450+ cmd, release, code))
4451+ if release not in self.ubuntu_releases:
4452+ msg = ("Release ({}) not found in Ubuntu releases "
4453+ "({})".format(release, self.ubuntu_releases))
4454+ return release, msg
4455+
4456+ def validate_services(self, commands):
4457+ """Validate that lists of commands succeed on service units. Can be
4458+ used to verify system services are running on the corresponding
4459+ service units.
4460+
4461+ :param commands: dict with sentry keys and arbitrary command list vals
4462+ :returns: None if successful, Failure string message otherwise
4463+ """
4464+ self.log.debug('Checking status of system services...')
4465+
4466+ # /!\ DEPRECATION WARNING (beisner):
4467+ # New and existing tests should be rewritten to use
4468+ # validate_services_by_name() as it is aware of init systems.
4469+ self.log.warn('/!\\ DEPRECATION WARNING: use '
4470+ 'validate_services_by_name instead of validate_services '
4471+ 'due to init system differences.')
4472+
4473+ for k, v in six.iteritems(commands):
4474+ for cmd in v:
4475+ output, code = k.run(cmd)
4476+ self.log.debug('{} `{}` returned '
4477+ '{}'.format(k.info['unit_name'],
4478+ cmd, code))
4479+ if code != 0:
4480+ return "command `{}` returned {}".format(cmd, str(code))
4481+ return None
4482+
4483+ def validate_services_by_name(self, sentry_services):
4484+ """Validate system service status by service name, automatically
4485+ detecting init system based on Ubuntu release codename.
4486+
4487+ :param sentry_services: dict with sentry keys and svc list values
4488+ :returns: None if successful, Failure string message otherwise
4489+ """
4490+ self.log.debug('Checking status of system services...')
4491+
4492+ # Point at which systemd became a thing
4493+ systemd_switch = self.ubuntu_releases.index('vivid')
4494+
4495+ for sentry_unit, services_list in six.iteritems(sentry_services):
4496+ # Get lsb_release codename from unit
4497+ release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
4498+ if ret:
4499+ return ret
4500+
4501+ for service_name in services_list:
4502+ if (self.ubuntu_releases.index(release) >= systemd_switch or
4503+ service_name in ['rabbitmq-server', 'apache2']):
4504+ # init is systemd (or regular sysv)
4505+ cmd = 'sudo service {} status'.format(service_name)
4506+ output, code = sentry_unit.run(cmd)
4507+ service_running = code == 0
4508+ elif self.ubuntu_releases.index(release) < systemd_switch:
4509+ # init is upstart
4510+ cmd = 'sudo status {}'.format(service_name)
4511+ output, code = sentry_unit.run(cmd)
4512+ service_running = code == 0 and "start/running" in output
4513+
4514+ self.log.debug('{} `{}` returned '
4515+ '{}'.format(sentry_unit.info['unit_name'],
4516+ cmd, code))
4517+ if not service_running:
4518+ return u"command `{}` returned {} {}".format(
4519+ cmd, output, str(code))
4520+ return None
4521+
4522+ def _get_config(self, unit, filename):
4523+ """Get a ConfigParser object for parsing a unit's config file."""
4524+ file_contents = unit.file_contents(filename)
4525+
4526+ # NOTE(beisner): by default, ConfigParser does not handle options
4527+ # with no value, such as the flags used in the mysql my.cnf file.
4528+ # https://bugs.python.org/issue7005
4529+ config = configparser.ConfigParser(allow_no_value=True)
4530+ config.readfp(io.StringIO(file_contents))
4531+ return config
4532+
4533+ def validate_config_data(self, sentry_unit, config_file, section,
4534+ expected):
4535+ """Validate config file data.
4536+
4537+ Verify that the specified section of the config file contains
4538+ the expected option key:value pairs.
4539+
4540+ Compare expected dictionary data vs actual dictionary data.
4541+ The values in the 'expected' dictionary can be strings, bools, ints,
4542+ longs, or can be a function that evaluates a variable and returns a
4543+ bool.
4544+ """
4545+ self.log.debug('Validating config file data ({} in {} on {})'
4546+ '...'.format(section, config_file,
4547+ sentry_unit.info['unit_name']))
4548+ config = self._get_config(sentry_unit, config_file)
4549+
4550+ if section != 'DEFAULT' and not config.has_section(section):
4551+ return "section [{}] does not exist".format(section)
4552+
4553+ for k in expected.keys():
4554+ if not config.has_option(section, k):
4555+ return "section [{}] is missing option {}".format(section, k)
4556+
4557+ actual = config.get(section, k)
4558+ v = expected[k]
4559+ if (isinstance(v, six.string_types) or
4560+ isinstance(v, bool) or
4561+ isinstance(v, six.integer_types)):
4562+ # handle explicit values
4563+ if actual != v:
4564+ return "section [{}] {}:{} != expected {}:{}".format(
4565+ section, k, actual, k, expected[k])
4566+ # handle function pointers, such as not_null or valid_ip
4567+ elif not v(actual):
4568+ return "section [{}] {}:{} != expected {}:{}".format(
4569+ section, k, actual, k, expected[k])
4570+ return None
4571+
4572+ def _validate_dict_data(self, expected, actual):
4573+ """Validate dictionary data.
4574+
4575+ Compare expected dictionary data vs actual dictionary data.
4576+ The values in the 'expected' dictionary can be strings, bools, ints,
4577+ longs, or can be a function that evaluates a variable and returns a
4578+ bool.
4579+ """
4580+ self.log.debug('actual: {}'.format(repr(actual)))
4581+ self.log.debug('expected: {}'.format(repr(expected)))
4582+
4583+ for k, v in six.iteritems(expected):
4584+ if k in actual:
4585+ if (isinstance(v, six.string_types) or
4586+ isinstance(v, bool) or
4587+ isinstance(v, six.integer_types)):
4588+ # handle explicit values
4589+ if v != actual[k]:
4590+ return "{}:{}".format(k, actual[k])
4591+ # handle function pointers, such as not_null or valid_ip
4592+ elif not v(actual[k]):
4593+ return "{}:{}".format(k, actual[k])
4594+ else:
4595+ return "key '{}' does not exist".format(k)
4596+ return None
4597+
4598+ def validate_relation_data(self, sentry_unit, relation, expected):
4599+ """Validate actual relation data based on expected relation data."""
4600+ actual = sentry_unit.relation(relation[0], relation[1])
4601+ return self._validate_dict_data(expected, actual)
4602+
4603+ def _validate_list_data(self, expected, actual):
4604+ """Compare expected list vs actual list data."""
4605+ for e in expected:
4606+ if e not in actual:
4607+ return "expected item {} not found in actual list".format(e)
4608+ return None
4609+
4610+ def not_null(self, string):
4611+ if string is not None:
4612+ return True
4613+ else:
4614+ return False
4615+
4616+ def _get_file_mtime(self, sentry_unit, filename):
4617+ """Get last modification time of file."""
4618+ return sentry_unit.file_stat(filename)['mtime']
4619+
4620+ def _get_dir_mtime(self, sentry_unit, directory):
4621+ """Get last modification time of directory."""
4622+ return sentry_unit.directory_stat(directory)['mtime']
4623+
4624+ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
4625+ """Get process' start time.
4626+
4627+ Determine start time of the process based on the last modification
4628+ time of the /proc/pid directory. If pgrep_full is True, the process
4629+ name is matched against the full command line.
4630+ """
4631+ if pgrep_full:
4632+ cmd = 'pgrep -o -f {}'.format(service)
4633+ else:
4634+ cmd = 'pgrep -o {}'.format(service)
4635+ cmd = cmd + ' | grep -v pgrep || exit 0'
4636+ cmd_out = sentry_unit.run(cmd)
4637+ self.log.debug('CMDout: ' + str(cmd_out))
4638+ if cmd_out[0]:
4639+ self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
4640+ proc_dir = '/proc/{}'.format(cmd_out[0].strip())
4641+ return self._get_dir_mtime(sentry_unit, proc_dir)
4642+
4643+ def service_restarted(self, sentry_unit, service, filename,
4644+ pgrep_full=False, sleep_time=20):
4645+ """Check if service was restarted.
4646+
4647+ Compare a service's start time vs a file's last modification time
4648+ (such as a config file for that service) to determine if the service
4649+ has been restarted.
4650+ """
4651+ time.sleep(sleep_time)
4652+ if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
4653+ self._get_file_mtime(sentry_unit, filename)):
4654+ return True
4655+ else:
4656+ return False
4657+
4658+ def service_restarted_since(self, sentry_unit, mtime, service,
4659+ pgrep_full=False, sleep_time=20,
4660+ retry_count=2):
4661+ """Check if service was been started after a given time.
4662+
4663+ Args:
4664+ sentry_unit (sentry): The sentry unit to check for the service on
4665+ mtime (float): The epoch time to check against
4666+ service (string): service name to look for in process table
4667+ pgrep_full (boolean): Use full command line search mode with pgrep
4668+ sleep_time (int): Seconds to sleep before looking for process
4669+ retry_count (int): If service is not found, how many times to retry
4670+
4671+ Returns:
4672+ bool: True if service found and its start time it newer than mtime,
4673+ False if service is older than mtime or if service was
4674+ not found.
4675+ """
4676+ self.log.debug('Checking %s restarted since %s' % (service, mtime))
4677+ time.sleep(sleep_time)
4678+ proc_start_time = self._get_proc_start_time(sentry_unit, service,
4679+ pgrep_full)
4680+ while retry_count > 0 and not proc_start_time:
4681+ self.log.debug('No pid file found for service %s, will retry %i '
4682+ 'more times' % (service, retry_count))
4683+ time.sleep(30)
4684+ proc_start_time = self._get_proc_start_time(sentry_unit, service,
4685+ pgrep_full)
4686+ retry_count = retry_count - 1
4687+
4688+ if not proc_start_time:
4689+ self.log.warn('No proc start time found, assuming service did '
4690+ 'not start')
4691+ return False
4692+ if proc_start_time >= mtime:
4693+ self.log.debug('proc start time is newer than provided mtime'
4694+ '(%s >= %s)' % (proc_start_time, mtime))
4695+ return True
4696+ else:
4697+ self.log.warn('proc start time (%s) is older than provided mtime '
4698+ '(%s), service did not restart' % (proc_start_time,
4699+ mtime))
4700+ return False
4701+
4702+ def config_updated_since(self, sentry_unit, filename, mtime,
4703+ sleep_time=20):
4704+ """Check if file was modified after a given time.
4705+
4706+ Args:
4707+ sentry_unit (sentry): The sentry unit to check the file mtime on
4708+ filename (string): The file to check mtime of
4709+ mtime (float): The epoch time to check against
4710+ sleep_time (int): Seconds to sleep before looking for process
4711+
4712+ Returns:
4713+ bool: True if file was modified more recently than mtime, False if
4714+ file was modified before mtime,
4715+ """
4716+ self.log.debug('Checking %s updated since %s' % (filename, mtime))
4717+ time.sleep(sleep_time)
4718+ file_mtime = self._get_file_mtime(sentry_unit, filename)
4719+ if file_mtime >= mtime:
4720+ self.log.debug('File mtime is newer than provided mtime '
4721+ '(%s >= %s)' % (file_mtime, mtime))
4722+ return True
4723+ else:
4724+ self.log.warn('File mtime %s is older than provided mtime %s'
4725+ % (file_mtime, mtime))
4726+ return False
4727+
4728+ def validate_service_config_changed(self, sentry_unit, mtime, service,
4729+ filename, pgrep_full=False,
4730+ sleep_time=20, retry_count=2):
4731+ """Check service and file were updated after mtime
4732+
4733+ Args:
4734+ sentry_unit (sentry): The sentry unit to check for the service on
4735+ mtime (float): The epoch time to check against
4736+ service (string): service name to look for in process table
4737+ filename (string): The file to check mtime of
4738+ pgrep_full (boolean): Use full command line search mode with pgrep
4739+ sleep_time (int): Seconds to sleep before looking for process
4740+ retry_count (int): If service is not found, how many times to retry
4741+
4742+ Typical Usage:
4743+ u = OpenStackAmuletUtils(ERROR)
4744+ ...
4745+ mtime = u.get_sentry_time(self.cinder_sentry)
4746+ self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
4747+ if not u.validate_service_config_changed(self.cinder_sentry,
4748+ mtime,
4749+ 'cinder-api',
4750+ '/etc/cinder/cinder.conf')
4751+ amulet.raise_status(amulet.FAIL, msg='update failed')
4752+ Returns:
4753+ bool: True if both service and file where updated/restarted after
4754+ mtime, False if service is older than mtime or if service was
4755+ not found or if filename was modified before mtime.
4756+ """
4757+ self.log.debug('Checking %s restarted since %s' % (service, mtime))
4758+ time.sleep(sleep_time)
4759+ service_restart = self.service_restarted_since(sentry_unit, mtime,
4760+ service,
4761+ pgrep_full=pgrep_full,
4762+ sleep_time=0,
4763+ retry_count=retry_count)
4764+ config_update = self.config_updated_since(sentry_unit, filename, mtime,
4765+ sleep_time=0)
4766+ return service_restart and config_update
4767+
4768+ def get_sentry_time(self, sentry_unit):
4769+ """Return current epoch time on a sentry"""
4770+ cmd = "date +'%s'"
4771+ return float(sentry_unit.run(cmd)[0])
4772+
4773+ def relation_error(self, name, data):
4774+ return 'unexpected relation data in {} - {}'.format(name, data)
4775+
4776+ def endpoint_error(self, name, data):
4777+ return 'unexpected endpoint data in {} - {}'.format(name, data)
4778+
4779+ def get_ubuntu_releases(self):
4780+ """Return a list of all Ubuntu releases in order of release."""
4781+ _d = distro_info.UbuntuDistroInfo()
4782+ _release_list = _d.all
4783+ self.log.debug('Ubuntu release list: {}'.format(_release_list))
4784+ return _release_list
4785+
4786+ def file_to_url(self, file_rel_path):
4787+ """Convert a relative file path to a file URL."""
4788+ _abs_path = os.path.abspath(file_rel_path)
4789+ return urlparse.urlparse(_abs_path, scheme='file').geturl()
4790+
4791+ def check_commands_on_units(self, commands, sentry_units):
4792+ """Check that all commands in a list exit zero on all
4793+ sentry units in a list.
4794+
4795+ :param commands: list of bash commands
4796+ :param sentry_units: list of sentry unit pointers
4797+ :returns: None if successful; Failure message otherwise
4798+ """
4799+ self.log.debug('Checking exit codes for {} commands on {} '
4800+ 'sentry units...'.format(len(commands),
4801+ len(sentry_units)))
4802+ for sentry_unit in sentry_units:
4803+ for cmd in commands:
4804+ output, code = sentry_unit.run(cmd)
4805+ if code == 0:
4806+ self.log.debug('{} `{}` returned {} '
4807+ '(OK)'.format(sentry_unit.info['unit_name'],
4808+ cmd, code))
4809+ else:
4810+ return ('{} `{}` returned {} '
4811+ '{}'.format(sentry_unit.info['unit_name'],
4812+ cmd, code, output))
4813+ return None
4814+
4815+ def get_process_id_list(self, sentry_unit, process_name):
4816+ """Get a list of process ID(s) from a single sentry juju unit
4817+ for a single process name.
4818+
4819+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
4820+ :param process_name: Process name
4821+ :returns: List of process IDs
4822+ """
4823+ cmd = 'pidof {}'.format(process_name)
4824+ output, code = sentry_unit.run(cmd)
4825+ if code != 0:
4826+ msg = ('{} `{}` returned {} '
4827+ '{}'.format(sentry_unit.info['unit_name'],
4828+ cmd, code, output))
4829+ amulet.raise_status(amulet.FAIL, msg=msg)
4830+ return str(output).split()
4831+
4832+ def get_unit_process_ids(self, unit_processes):
4833+ """Construct a dict containing unit sentries, process names, and
4834+ process IDs."""
4835+ pid_dict = {}
4836+ for sentry_unit, process_list in unit_processes.iteritems():
4837+ pid_dict[sentry_unit] = {}
4838+ for process in process_list:
4839+ pids = self.get_process_id_list(sentry_unit, process)
4840+ pid_dict[sentry_unit].update({process: pids})
4841+ return pid_dict
4842+
4843+ def validate_unit_process_ids(self, expected, actual):
4844+ """Validate process id quantities for services on units."""
4845+ self.log.debug('Checking units for running processes...')
4846+ self.log.debug('Expected PIDs: {}'.format(expected))
4847+ self.log.debug('Actual PIDs: {}'.format(actual))
4848+
4849+ if len(actual) != len(expected):
4850+ return ('Unit count mismatch. expected, actual: {}, '
4851+ '{} '.format(len(expected), len(actual)))
4852+
4853+ for (e_sentry, e_proc_names) in expected.iteritems():
4854+ e_sentry_name = e_sentry.info['unit_name']
4855+ if e_sentry in actual.keys():
4856+ a_proc_names = actual[e_sentry]
4857+ else:
4858+ return ('Expected sentry ({}) not found in actual dict data.'
4859+ '{}'.format(e_sentry_name, e_sentry))
4860+
4861+ if len(e_proc_names.keys()) != len(a_proc_names.keys()):
4862+ return ('Process name count mismatch. expected, actual: {}, '
4863+ '{}'.format(len(expected), len(actual)))
4864+
4865+ for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
4866+ zip(e_proc_names.items(), a_proc_names.items()):
4867+ if e_proc_name != a_proc_name:
4868+ return ('Process name mismatch. expected, actual: {}, '
4869+ '{}'.format(e_proc_name, a_proc_name))
4870+
4871+ a_pids_length = len(a_pids)
4872+ fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
4873+ '{}, {} ({})'.format(e_sentry_name, e_proc_name,
4874+ e_pids_length, a_pids_length,
4875+ a_pids))
4876+
4877+ # If expected is not bool, ensure PID quantities match
4878+ if not isinstance(e_pids_length, bool) and \
4879+ a_pids_length != e_pids_length:
4880+ return fail_msg
4881+ # If expected is bool True, ensure 1 or more PIDs exist
4882+ elif isinstance(e_pids_length, bool) and \
4883+ e_pids_length is True and a_pids_length < 1:
4884+ return fail_msg
4885+ # If expected is bool False, ensure 0 PIDs exist
4886+ elif isinstance(e_pids_length, bool) and \
4887+ e_pids_length is False and a_pids_length != 0:
4888+ return fail_msg
4889+ else:
4890+ self.log.debug('PID check OK: {} {} {}: '
4891+ '{}'.format(e_sentry_name, e_proc_name,
4892+ e_pids_length, a_pids))
4893+ return None
4894+
4895+ def validate_list_of_identical_dicts(self, list_of_dicts):
4896+ """Check that all dicts within a list are identical."""
4897+ hashes = []
4898+ for _dict in list_of_dicts:
4899+ hashes.append(hash(frozenset(_dict.items())))
4900+
4901+ self.log.debug('Hashes: {}'.format(hashes))
4902+ if len(set(hashes)) == 1:
4903+ self.log.debug('Dicts within list are identical')
4904+ else:
4905+ return 'Dicts within list are not identical'
4906+
4907+ return None
4908
4909=== added directory 'tests/charmhelpers/contrib/openstack'
4910=== added file 'tests/charmhelpers/contrib/openstack/__init__.py'
4911--- tests/charmhelpers/contrib/openstack/__init__.py 1970-01-01 00:00:00 +0000
4912+++ tests/charmhelpers/contrib/openstack/__init__.py 2015-11-03 12:30:15 +0000
4913@@ -0,0 +1,15 @@
4914+# Copyright 2014-2015 Canonical Limited.
4915+#
4916+# This file is part of charm-helpers.
4917+#
4918+# charm-helpers is free software: you can redistribute it and/or modify
4919+# it under the terms of the GNU Lesser General Public License version 3 as
4920+# published by the Free Software Foundation.
4921+#
4922+# charm-helpers is distributed in the hope that it will be useful,
4923+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4924+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4925+# GNU Lesser General Public License for more details.
4926+#
4927+# You should have received a copy of the GNU Lesser General Public License
4928+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4929
4930=== added directory 'tests/charmhelpers/contrib/openstack/amulet'
4931=== added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py'
4932--- tests/charmhelpers/contrib/openstack/amulet/__init__.py 1970-01-01 00:00:00 +0000
4933+++ tests/charmhelpers/contrib/openstack/amulet/__init__.py 2015-11-03 12:30:15 +0000
4934@@ -0,0 +1,15 @@
4935+# Copyright 2014-2015 Canonical Limited.
4936+#
4937+# This file is part of charm-helpers.
4938+#
4939+# charm-helpers is free software: you can redistribute it and/or modify
4940+# it under the terms of the GNU Lesser General Public License version 3 as
4941+# published by the Free Software Foundation.
4942+#
4943+# charm-helpers is distributed in the hope that it will be useful,
4944+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4945+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4946+# GNU Lesser General Public License for more details.
4947+#
4948+# You should have received a copy of the GNU Lesser General Public License
4949+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4950
4951=== added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
4952--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
4953+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-11-03 12:30:15 +0000
4954@@ -0,0 +1,188 @@
4955+# Copyright 2014-2015 Canonical Limited.
4956+#
4957+# This file is part of charm-helpers.
4958+#
4959+# charm-helpers is free software: you can redistribute it and/or modify
4960+# it under the terms of the GNU Lesser General Public License version 3 as
4961+# published by the Free Software Foundation.
4962+#
4963+# charm-helpers is distributed in the hope that it will be useful,
4964+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4965+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4966+# GNU Lesser General Public License for more details.
4967+#
4968+# You should have received a copy of the GNU Lesser General Public License
4969+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4970+
4971+import six
4972+from collections import OrderedDict
4973+from charmhelpers.contrib.amulet.deployment import (
4974+ AmuletDeployment
4975+)
4976+
4977+
4978+class OpenStackAmuletDeployment(AmuletDeployment):
4979+ """OpenStack amulet deployment.
4980+
4981+ This class inherits from AmuletDeployment and has additional support
4982+ that is specifically for use by OpenStack charms.
4983+ """
4984+
4985+ def __init__(self, series=None, openstack=None, source=None, stable=True):
4986+ """Initialize the deployment environment."""
4987+ super(OpenStackAmuletDeployment, self).__init__(series)
4988+ self.openstack = openstack
4989+ self.source = source
4990+ self.stable = stable
4991+ # Note(coreycb): this needs to be changed when new next branches come
4992+ # out.
4993+ self.current_next = "trusty"
4994+
4995+ def _determine_branch_locations(self, other_services):
4996+ """Determine the branch locations for the other services.
4997+
4998+ Determine if the local branch being tested is derived from its
4999+ stable or next (dev) branch, and based on this, use the corresonding
5000+ stable or next branches for the other_services."""
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: