Merge lp:~gnuoy/charms/trusty/cisco-vpp/dhcp into lp:~openstack-charmers/charms/trusty/cisco-vpp/next

Proposed by Liam Young
Status: Merged
Merged at revision: 116
Proposed branch: lp:~gnuoy/charms/trusty/cisco-vpp/dhcp
Merge into: lp:~openstack-charmers/charms/trusty/cisco-vpp/next
Diff against target: 5609 lines (+3361/-493)
40 files modified
charm-helpers-hooks.yaml (+1/-1)
config.yaml (+8/-0)
hooks/ODL.py (+2/-1)
hooks/charmhelpers/contrib/network/ip.py (+10/-4)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+158/-15)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+742/-51)
hooks/charmhelpers/contrib/openstack/context.py (+192/-63)
hooks/charmhelpers/contrib/openstack/neutron.py (+57/-16)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+12/-6)
hooks/charmhelpers/contrib/openstack/templating.py (+32/-29)
hooks/charmhelpers/contrib/openstack/utils.py (+324/-33)
hooks/charmhelpers/contrib/python/packages.py (+2/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+272/-43)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+10/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+4/-3)
hooks/charmhelpers/core/files.py (+45/-0)
hooks/charmhelpers/core/hookenv.py (+249/-49)
hooks/charmhelpers/core/host.py (+148/-36)
hooks/charmhelpers/core/hugepage.py (+33/-16)
hooks/charmhelpers/core/kernel.py (+68/-0)
hooks/charmhelpers/core/services/base.py (+12/-9)
hooks/charmhelpers/core/services/helpers.py (+9/-7)
hooks/charmhelpers/core/strutils.py (+30/-0)
hooks/charmhelpers/core/templating.py (+12/-12)
hooks/charmhelpers/core/unitdata.py (+61/-17)
hooks/charmhelpers/fetch/__init__.py (+31/-14)
hooks/charmhelpers/fetch/archiveurl.py (+7/-1)
hooks/charmhelpers/fetch/giturl.py (+1/-1)
hooks/services.py (+28/-0)
hooks/vpp_data.py (+75/-0)
hooks/vpp_utils.py (+2/-1)
metadata.yaml (+4/-0)
templates/icehouse/dhcp_agent.ini (+13/-0)
templates/icehouse/metadata_agent.ini (+15/-0)
templates/icehouse/neutron.conf (+31/-0)
templates/parts/rabbitmq (+21/-0)
tests/charmhelpers/contrib/amulet/utils.py (+239/-9)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+38/-4)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+361/-51)
unit_tests/test_vpp_utils.py (+2/-1)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/cisco-vpp/dhcp
Reviewer Review Type Date Requested Status
James Page Approve
Review via email: mp+278819@code.launchpad.net

Description of the change

This merge proposal adds support for serving dhcp and metadata requests to guests.

To post a comment you must log in.
Revision history for this message
James Page (james-page) :
review: Needs Information
121. By Liam Young

Fix typo in context that was retuning the wrong ip for keystone

122. By Liam Young

ODL initially returns 404s when querying nodes so backoff and retry node query

123. By Liam Young

Fix bug causing charm to fail if /etc/neutron does not exists

Revision history for this message
James Page (james-page) wrote :

Just a few niggles - but need taking care of.

review: Needs Fixing
124. By Liam Young

General tidyup/fixes from mp feedback from JamesPage

Revision history for this message
James Page (james-page) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'charm-helpers-hooks.yaml'
2--- charm-helpers-hooks.yaml 2015-06-24 09:56:23 +0000
3+++ charm-helpers-hooks.yaml 2015-12-01 15:05:49 +0000
4@@ -1,4 +1,4 @@
5-branch: lp:~gnuoy/charm-helpers/cisco-vpp/
6+branch: lp:charm-helpers
7 destination: hooks/charmhelpers
8 include:
9 - core
10
11=== modified file 'config.yaml'
12--- config.yaml 2015-08-14 07:27:33 +0000
13+++ config.yaml 2015-12-01 15:05:49 +0000
14@@ -41,3 +41,11 @@
15 mac-network-map:
16 default: ''
17 type: string
18+ rabbit-user:
19+ default: neutron
20+ type: string
21+ description: Username used to access RabbitMQ queue
22+ rabbit-vhost:
23+ default: openstack
24+ type: string
25+ description: RabbitMQ vhost
26
27=== modified file 'hooks/ODL.py'
28--- hooks/ODL.py 2015-09-14 16:44:47 +0000
29+++ hooks/ODL.py 2015-12-01 15:05:49 +0000
30@@ -73,7 +73,8 @@
31
32 def get_odl_registered_nodes(self):
33 log('Querying nodes registered with odl')
34- odl_req = self.contact_odl('GET', self.node_query_url)
35+ odl_req = self.contact_odl('GET', self.node_query_url,
36+ retry_rcs=[requests.codes.not_found])
37 odl_json = odl_req.json()
38 odl_node_ids = []
39 if odl_json.get('nodes'):
40
41=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
42--- hooks/charmhelpers/contrib/network/ip.py 2015-06-10 15:45:48 +0000
43+++ hooks/charmhelpers/contrib/network/ip.py 2015-12-01 15:05:49 +0000
44@@ -23,7 +23,7 @@
45 from functools import partial
46
47 from charmhelpers.core.hookenv import unit_get
48-from charmhelpers.fetch import apt_install
49+from charmhelpers.fetch import apt_install, apt_update
50 from charmhelpers.core.hookenv import (
51 log,
52 WARNING,
53@@ -32,13 +32,15 @@
54 try:
55 import netifaces
56 except ImportError:
57- apt_install('python-netifaces')
58+ apt_update(fatal=True)
59+ apt_install('python-netifaces', fatal=True)
60 import netifaces
61
62 try:
63 import netaddr
64 except ImportError:
65- apt_install('python-netaddr')
66+ apt_update(fatal=True)
67+ apt_install('python-netaddr', fatal=True)
68 import netaddr
69
70
71@@ -435,8 +437,12 @@
72
73 rev = dns.reversename.from_address(address)
74 result = ns_query(rev)
75+
76 if not result:
77- return None
78+ try:
79+ result = socket.gethostbyaddr(address)[0]
80+ except:
81+ return None
82 else:
83 result = address
84
85
86=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
87--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-16 07:53:15 +0000
88+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-12-01 15:05:49 +0000
89@@ -14,12 +14,18 @@
90 # You should have received a copy of the GNU Lesser General Public License
91 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
92
93+import logging
94+import re
95+import sys
96 import six
97 from collections import OrderedDict
98 from charmhelpers.contrib.amulet.deployment import (
99 AmuletDeployment
100 )
101
102+DEBUG = logging.DEBUG
103+ERROR = logging.ERROR
104+
105
106 class OpenStackAmuletDeployment(AmuletDeployment):
107 """OpenStack amulet deployment.
108@@ -28,9 +34,12 @@
109 that is specifically for use by OpenStack charms.
110 """
111
112- def __init__(self, series=None, openstack=None, source=None, stable=True):
113+ def __init__(self, series=None, openstack=None, source=None,
114+ stable=True, log_level=DEBUG):
115 """Initialize the deployment environment."""
116 super(OpenStackAmuletDeployment, self).__init__(series)
117+ self.log = self.get_logger(level=log_level)
118+ self.log.info('OpenStackAmuletDeployment: init')
119 self.openstack = openstack
120 self.source = source
121 self.stable = stable
122@@ -38,30 +47,55 @@
123 # out.
124 self.current_next = "trusty"
125
126+ def get_logger(self, name="deployment-logger", level=logging.DEBUG):
127+ """Get a logger object that will log to stdout."""
128+ log = logging
129+ logger = log.getLogger(name)
130+ fmt = log.Formatter("%(asctime)s %(funcName)s "
131+ "%(levelname)s: %(message)s")
132+
133+ handler = log.StreamHandler(stream=sys.stdout)
134+ handler.setLevel(level)
135+ handler.setFormatter(fmt)
136+
137+ logger.addHandler(handler)
138+ logger.setLevel(level)
139+
140+ return logger
141+
142 def _determine_branch_locations(self, other_services):
143 """Determine the branch locations for the other services.
144
145 Determine if the local branch being tested is derived from its
146 stable or next (dev) branch, and based on this, use the corresonding
147 stable or next branches for the other_services."""
148- base_charms = ['mysql', 'mongodb']
149+
150+ self.log.info('OpenStackAmuletDeployment: determine branch locations')
151+
152+ # Charms outside the lp:~openstack-charmers namespace
153+ base_charms = ['mysql', 'mongodb', 'nrpe']
154+
155+ # Force these charms to current series even when using an older series.
156+ # ie. Use trusty/nrpe even when series is precise, as the P charm
157+ # does not possess the necessary external master config and hooks.
158+ force_series_current = ['nrpe']
159
160 if self.series in ['precise', 'trusty']:
161 base_series = self.series
162 else:
163 base_series = self.current_next
164
165- if self.stable:
166- for svc in other_services:
167- if svc.get('location'):
168- continue
169+ for svc in other_services:
170+ if svc['name'] in force_series_current:
171+ base_series = self.current_next
172+ # If a location has been explicitly set, use it
173+ if svc.get('location'):
174+ continue
175+ if self.stable:
176 temp = 'lp:charms/{}/{}'
177 svc['location'] = temp.format(base_series,
178 svc['name'])
179- else:
180- for svc in other_services:
181- if svc.get('location'):
182- continue
183+ else:
184 if svc['name'] in base_charms:
185 temp = 'lp:charms/{}/{}'
186 svc['location'] = temp.format(base_series,
187@@ -70,10 +104,13 @@
188 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
189 svc['location'] = temp.format(self.current_next,
190 svc['name'])
191+
192 return other_services
193
194 def _add_services(self, this_service, other_services):
195 """Add services to the deployment and set openstack-origin/source."""
196+ self.log.info('OpenStackAmuletDeployment: adding services')
197+
198 other_services = self._determine_branch_locations(other_services)
199
200 super(OpenStackAmuletDeployment, self)._add_services(this_service,
201@@ -81,29 +118,102 @@
202
203 services = other_services
204 services.append(this_service)
205+
206+ # Charms which should use the source config option
207 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
208 'ceph-osd', 'ceph-radosgw']
209- # Openstack subordinate charms do not expose an origin option as that
210- # is controlled by the principle
211- ignore = ['neutron-openvswitch', 'cisco-vpp']
212+
213+ # Charms which can not use openstack-origin, ie. many subordinates
214+ no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
215+ 'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
216
217 if self.openstack:
218 for svc in services:
219- if svc['name'] not in use_source + ignore:
220+ if svc['name'] not in use_source + no_origin:
221 config = {'openstack-origin': self.openstack}
222 self.d.configure(svc['name'], config)
223
224 if self.source:
225 for svc in services:
226- if svc['name'] in use_source and svc['name'] not in ignore:
227+ if svc['name'] in use_source and svc['name'] not in no_origin:
228 config = {'source': self.source}
229 self.d.configure(svc['name'], config)
230
231 def _configure_services(self, configs):
232 """Configure all of the services."""
233+ self.log.info('OpenStackAmuletDeployment: configure services')
234 for service, config in six.iteritems(configs):
235 self.d.configure(service, config)
236
237+ def _auto_wait_for_status(self, message=None, exclude_services=None,
238+ include_only=None, timeout=1800):
239+ """Wait for all units to have a specific extended status, except
240+ for any defined as excluded. Unless specified via message, any
241+ status containing any case of 'ready' will be considered a match.
242+
243+ Examples of message usage:
244+
245+ Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
246+ message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
247+
248+ Wait for all units to reach this status (exact match):
249+ message = re.compile('^Unit is ready and clustered$')
250+
251+ Wait for all units to reach any one of these (exact match):
252+ message = re.compile('Unit is ready|OK|Ready')
253+
254+ Wait for at least one unit to reach this status (exact match):
255+ message = {'ready'}
256+
257+ See Amulet's sentry.wait_for_messages() for message usage detail.
258+ https://github.com/juju/amulet/blob/master/amulet/sentry.py
259+
260+ :param message: Expected status match
261+ :param exclude_services: List of juju service names to ignore,
262+ not to be used in conjuction with include_only.
263+ :param include_only: List of juju service names to exclusively check,
264+ not to be used in conjuction with exclude_services.
265+ :param timeout: Maximum time in seconds to wait for status match
266+ :returns: None. Raises if timeout is hit.
267+ """
268+ self.log.info('Waiting for extended status on units...')
269+
270+ all_services = self.d.services.keys()
271+
272+ if exclude_services and include_only:
273+ raise ValueError('exclude_services can not be used '
274+ 'with include_only')
275+
276+ if message:
277+ if isinstance(message, re._pattern_type):
278+ match = message.pattern
279+ else:
280+ match = message
281+
282+ self.log.debug('Custom extended status wait match: '
283+ '{}'.format(match))
284+ else:
285+ self.log.debug('Default extended status wait match: contains '
286+ 'READY (case-insensitive)')
287+ message = re.compile('.*ready.*', re.IGNORECASE)
288+
289+ if exclude_services:
290+ self.log.debug('Excluding services from extended status match: '
291+ '{}'.format(exclude_services))
292+ else:
293+ exclude_services = []
294+
295+ if include_only:
296+ services = include_only
297+ else:
298+ services = list(set(all_services) - set(exclude_services))
299+
300+ self.log.debug('Waiting up to {}s for extended status on services: '
301+ '{}'.format(timeout, services))
302+ service_messages = {service: message for service in services}
303+ self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
304+ self.log.info('OK')
305+
306 def _get_openstack_release(self):
307 """Get openstack release.
308
309@@ -152,3 +262,36 @@
310 return os_origin.split('%s-' % self.series)[1].split('/')[0]
311 else:
312 return releases[self.series]
313+
314+ def get_ceph_expected_pools(self, radosgw=False):
315+ """Return a list of expected ceph pools in a ceph + cinder + glance
316+ test scenario, based on OpenStack release and whether ceph radosgw
317+ is flagged as present or not."""
318+
319+ if self._get_openstack_release() >= self.trusty_kilo:
320+ # Kilo or later
321+ pools = [
322+ 'rbd',
323+ 'cinder',
324+ 'glance'
325+ ]
326+ else:
327+ # Juno or earlier
328+ pools = [
329+ 'data',
330+ 'metadata',
331+ 'rbd',
332+ 'cinder',
333+ 'glance'
334+ ]
335+
336+ if radosgw:
337+ pools.extend([
338+ '.rgw.root',
339+ '.rgw.control',
340+ '.rgw',
341+ '.rgw.gc',
342+ '.users.uid'
343+ ])
344+
345+ return pools
346
347=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
348--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-10 07:35:12 +0000
349+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-12-01 15:05:49 +0000
350@@ -14,16 +14,22 @@
351 # You should have received a copy of the GNU Lesser General Public License
352 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
353
354+import amulet
355+import json
356 import logging
357 import os
358+import re
359+import six
360 import time
361 import urllib
362
363+import cinderclient.v1.client as cinder_client
364 import glanceclient.v1.client as glance_client
365+import heatclient.v1.client as heat_client
366 import keystoneclient.v2_0 as keystone_client
367 import novaclient.v1_1.client as nova_client
368-
369-import six
370+import pika
371+import swiftclient
372
373 from charmhelpers.contrib.amulet.utils import (
374 AmuletUtils
375@@ -37,7 +43,7 @@
376 """OpenStack amulet utilities.
377
378 This class inherits from AmuletUtils and has additional support
379- that is specifically for use by OpenStack charms.
380+ that is specifically for use by OpenStack charm tests.
381 """
382
383 def __init__(self, log_level=ERROR):
384@@ -51,6 +57,8 @@
385 Validate actual endpoint data vs expected endpoint data. The ports
386 are used to find the matching endpoint.
387 """
388+ self.log.debug('Validating endpoint data...')
389+ self.log.debug('actual: {}'.format(repr(endpoints)))
390 found = False
391 for ep in endpoints:
392 self.log.debug('endpoint: {}'.format(repr(ep)))
393@@ -77,6 +85,7 @@
394 Validate a list of actual service catalog endpoints vs a list of
395 expected service catalog endpoints.
396 """
397+ self.log.debug('Validating service catalog endpoint data...')
398 self.log.debug('actual: {}'.format(repr(actual)))
399 for k, v in six.iteritems(expected):
400 if k in actual:
401@@ -93,6 +102,7 @@
402 Validate a list of actual tenant data vs list of expected tenant
403 data.
404 """
405+ self.log.debug('Validating tenant data...')
406 self.log.debug('actual: {}'.format(repr(actual)))
407 for e in expected:
408 found = False
409@@ -114,6 +124,7 @@
410 Validate a list of actual role data vs a list of expected role
411 data.
412 """
413+ self.log.debug('Validating role data...')
414 self.log.debug('actual: {}'.format(repr(actual)))
415 for e in expected:
416 found = False
417@@ -134,6 +145,7 @@
418 Validate a list of actual user data vs a list of expected user
419 data.
420 """
421+ self.log.debug('Validating user data...')
422 self.log.debug('actual: {}'.format(repr(actual)))
423 for e in expected:
424 found = False
425@@ -155,17 +167,30 @@
426
427 Validate a list of actual flavors vs a list of expected flavors.
428 """
429+ self.log.debug('Validating flavor data...')
430 self.log.debug('actual: {}'.format(repr(actual)))
431 act = [a.name for a in actual]
432 return self._validate_list_data(expected, act)
433
434 def tenant_exists(self, keystone, tenant):
435 """Return True if tenant exists."""
436+ self.log.debug('Checking if tenant exists ({})...'.format(tenant))
437 return tenant in [t.name for t in keystone.tenants.list()]
438
439+ def authenticate_cinder_admin(self, keystone_sentry, username,
440+ password, tenant):
441+ """Authenticates admin user with cinder."""
442+ # NOTE(beisner): cinder python client doesn't accept tokens.
443+ service_ip = \
444+ keystone_sentry.relation('shared-db',
445+ 'mysql:shared-db')['private-address']
446+ ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
447+ return cinder_client.Client(username, password, tenant, ept)
448+
449 def authenticate_keystone_admin(self, keystone_sentry, user, password,
450 tenant):
451 """Authenticates admin user with the keystone admin endpoint."""
452+ self.log.debug('Authenticating keystone admin...')
453 unit = keystone_sentry
454 service_ip = unit.relation('shared-db',
455 'mysql:shared-db')['private-address']
456@@ -175,6 +200,7 @@
457
458 def authenticate_keystone_user(self, keystone, user, password, tenant):
459 """Authenticates a regular user with the keystone public endpoint."""
460+ self.log.debug('Authenticating keystone user ({})...'.format(user))
461 ep = keystone.service_catalog.url_for(service_type='identity',
462 endpoint_type='publicURL')
463 return keystone_client.Client(username=user, password=password,
464@@ -182,19 +208,49 @@
465
466 def authenticate_glance_admin(self, keystone):
467 """Authenticates admin user with glance."""
468+ self.log.debug('Authenticating glance admin...')
469 ep = keystone.service_catalog.url_for(service_type='image',
470 endpoint_type='adminURL')
471 return glance_client.Client(ep, token=keystone.auth_token)
472
473+ def authenticate_heat_admin(self, keystone):
474+ """Authenticates the admin user with heat."""
475+ self.log.debug('Authenticating heat admin...')
476+ ep = keystone.service_catalog.url_for(service_type='orchestration',
477+ endpoint_type='publicURL')
478+ return heat_client.Client(endpoint=ep, token=keystone.auth_token)
479+
480 def authenticate_nova_user(self, keystone, user, password, tenant):
481 """Authenticates a regular user with nova-api."""
482+ self.log.debug('Authenticating nova user ({})...'.format(user))
483 ep = keystone.service_catalog.url_for(service_type='identity',
484 endpoint_type='publicURL')
485 return nova_client.Client(username=user, api_key=password,
486 project_id=tenant, auth_url=ep)
487
488+ def authenticate_swift_user(self, keystone, user, password, tenant):
489+ """Authenticates a regular user with swift api."""
490+ self.log.debug('Authenticating swift user ({})...'.format(user))
491+ ep = keystone.service_catalog.url_for(service_type='identity',
492+ endpoint_type='publicURL')
493+ return swiftclient.Connection(authurl=ep,
494+ user=user,
495+ key=password,
496+ tenant_name=tenant,
497+ auth_version='2.0')
498+
499 def create_cirros_image(self, glance, image_name):
500- """Download the latest cirros image and upload it to glance."""
501+ """Download the latest cirros image and upload it to glance,
502+ validate and return a resource pointer.
503+
504+ :param glance: pointer to authenticated glance connection
505+ :param image_name: display name for new image
506+ :returns: glance image pointer
507+ """
508+ self.log.debug('Creating glance cirros image '
509+ '({})...'.format(image_name))
510+
511+ # Download cirros image
512 http_proxy = os.getenv('AMULET_HTTP_PROXY')
513 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
514 if http_proxy:
515@@ -203,57 +259,67 @@
516 else:
517 opener = urllib.FancyURLopener()
518
519- f = opener.open("http://download.cirros-cloud.net/version/released")
520+ f = opener.open('http://download.cirros-cloud.net/version/released')
521 version = f.read().strip()
522- cirros_img = "cirros-{}-x86_64-disk.img".format(version)
523+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
524 local_path = os.path.join('tests', cirros_img)
525
526 if not os.path.exists(local_path):
527- cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
528+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
529 version, cirros_img)
530 opener.retrieve(cirros_url, local_path)
531 f.close()
532
533+ # Create glance image
534 with open(local_path) as f:
535 image = glance.images.create(name=image_name, is_public=True,
536 disk_format='qcow2',
537 container_format='bare', data=f)
538- count = 1
539- status = image.status
540- while status != 'active' and count < 10:
541- time.sleep(3)
542- image = glance.images.get(image.id)
543- status = image.status
544- self.log.debug('image status: {}'.format(status))
545- count += 1
546-
547- if status != 'active':
548- self.log.error('image creation timed out')
549- return None
550+
551+ # Wait for image to reach active status
552+ img_id = image.id
553+ ret = self.resource_reaches_status(glance.images, img_id,
554+ expected_stat='active',
555+ msg='Image status wait')
556+ if not ret:
557+ msg = 'Glance image failed to reach expected state.'
558+ amulet.raise_status(amulet.FAIL, msg=msg)
559+
560+ # Re-validate new image
561+ self.log.debug('Validating image attributes...')
562+ val_img_name = glance.images.get(img_id).name
563+ val_img_stat = glance.images.get(img_id).status
564+ val_img_pub = glance.images.get(img_id).is_public
565+ val_img_cfmt = glance.images.get(img_id).container_format
566+ val_img_dfmt = glance.images.get(img_id).disk_format
567+ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
568+ 'container fmt:{} disk fmt:{}'.format(
569+ val_img_name, val_img_pub, img_id,
570+ val_img_stat, val_img_cfmt, val_img_dfmt))
571+
572+ if val_img_name == image_name and val_img_stat == 'active' \
573+ and val_img_pub is True and val_img_cfmt == 'bare' \
574+ and val_img_dfmt == 'qcow2':
575+ self.log.debug(msg_attr)
576+ else:
577+ msg = ('Volume validation failed, {}'.format(msg_attr))
578+ amulet.raise_status(amulet.FAIL, msg=msg)
579
580 return image
581
582 def delete_image(self, glance, image):
583 """Delete the specified image."""
584- num_before = len(list(glance.images.list()))
585- glance.images.delete(image)
586-
587- count = 1
588- num_after = len(list(glance.images.list()))
589- while num_after != (num_before - 1) and count < 10:
590- time.sleep(3)
591- num_after = len(list(glance.images.list()))
592- self.log.debug('number of images: {}'.format(num_after))
593- count += 1
594-
595- if num_after != (num_before - 1):
596- self.log.error('image deletion timed out')
597- return False
598-
599- return True
600+
601+ # /!\ DEPRECATION WARNING
602+ self.log.warn('/!\\ DEPRECATION WARNING: use '
603+ 'delete_resource instead of delete_image.')
604+ self.log.debug('Deleting glance image ({})...'.format(image))
605+ return self.delete_resource(glance.images, image, msg='glance image')
606
607 def create_instance(self, nova, image_name, instance_name, flavor):
608 """Create the specified instance."""
609+ self.log.debug('Creating instance '
610+ '({}|{}|{})'.format(instance_name, image_name, flavor))
611 image = nova.images.find(name=image_name)
612 flavor = nova.flavors.find(name=flavor)
613 instance = nova.servers.create(name=instance_name, image=image,
614@@ -276,19 +342,644 @@
615
616 def delete_instance(self, nova, instance):
617 """Delete the specified instance."""
618- num_before = len(list(nova.servers.list()))
619- nova.servers.delete(instance)
620-
621- count = 1
622- num_after = len(list(nova.servers.list()))
623- while num_after != (num_before - 1) and count < 10:
624- time.sleep(3)
625- num_after = len(list(nova.servers.list()))
626- self.log.debug('number of instances: {}'.format(num_after))
627- count += 1
628-
629- if num_after != (num_before - 1):
630- self.log.error('instance deletion timed out')
631- return False
632-
633- return True
634+
635+ # /!\ DEPRECATION WARNING
636+ self.log.warn('/!\\ DEPRECATION WARNING: use '
637+ 'delete_resource instead of delete_instance.')
638+ self.log.debug('Deleting instance ({})...'.format(instance))
639+ return self.delete_resource(nova.servers, instance,
640+ msg='nova instance')
641+
642+ def create_or_get_keypair(self, nova, keypair_name="testkey"):
643+ """Create a new keypair, or return pointer if it already exists."""
644+ try:
645+ _keypair = nova.keypairs.get(keypair_name)
646+ self.log.debug('Keypair ({}) already exists, '
647+ 'using it.'.format(keypair_name))
648+ return _keypair
649+ except:
650+ self.log.debug('Keypair ({}) does not exist, '
651+ 'creating it.'.format(keypair_name))
652+
653+ _keypair = nova.keypairs.create(name=keypair_name)
654+ return _keypair
655+
656+ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
657+ img_id=None, src_vol_id=None, snap_id=None):
658+ """Create cinder volume, optionally from a glance image, OR
659+ optionally as a clone of an existing volume, OR optionally
660+ from a snapshot. Wait for the new volume status to reach
661+ the expected status, validate and return a resource pointer.
662+
663+ :param vol_name: cinder volume display name
664+ :param vol_size: size in gigabytes
665+ :param img_id: optional glance image id
666+ :param src_vol_id: optional source volume id to clone
667+ :param snap_id: optional snapshot id to use
668+ :returns: cinder volume pointer
669+ """
670+ # Handle parameter input and avoid impossible combinations
671+ if img_id and not src_vol_id and not snap_id:
672+ # Create volume from image
673+ self.log.debug('Creating cinder volume from glance image...')
674+ bootable = 'true'
675+ elif src_vol_id and not img_id and not snap_id:
676+ # Clone an existing volume
677+ self.log.debug('Cloning cinder volume...')
678+ bootable = cinder.volumes.get(src_vol_id).bootable
679+ elif snap_id and not src_vol_id and not img_id:
680+ # Create volume from snapshot
681+ self.log.debug('Creating cinder volume from snapshot...')
682+ snap = cinder.volume_snapshots.find(id=snap_id)
683+ vol_size = snap.size
684+ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
685+ bootable = cinder.volumes.get(snap_vol_id).bootable
686+ elif not img_id and not src_vol_id and not snap_id:
687+ # Create volume
688+ self.log.debug('Creating cinder volume...')
689+ bootable = 'false'
690+ else:
691+ # Impossible combination of parameters
692+ msg = ('Invalid method use - name:{} size:{} img_id:{} '
693+ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
694+ img_id, src_vol_id,
695+ snap_id))
696+ amulet.raise_status(amulet.FAIL, msg=msg)
697+
698+ # Create new volume
699+ try:
700+ vol_new = cinder.volumes.create(display_name=vol_name,
701+ imageRef=img_id,
702+ size=vol_size,
703+ source_volid=src_vol_id,
704+ snapshot_id=snap_id)
705+ vol_id = vol_new.id
706+ except Exception as e:
707+ msg = 'Failed to create volume: {}'.format(e)
708+ amulet.raise_status(amulet.FAIL, msg=msg)
709+
710+ # Wait for volume to reach available status
711+ ret = self.resource_reaches_status(cinder.volumes, vol_id,
712+ expected_stat="available",
713+ msg="Volume status wait")
714+ if not ret:
715+ msg = 'Cinder volume failed to reach expected state.'
716+ amulet.raise_status(amulet.FAIL, msg=msg)
717+
718+ # Re-validate new volume
719+ self.log.debug('Validating volume attributes...')
720+ val_vol_name = cinder.volumes.get(vol_id).display_name
721+ val_vol_boot = cinder.volumes.get(vol_id).bootable
722+ val_vol_stat = cinder.volumes.get(vol_id).status
723+ val_vol_size = cinder.volumes.get(vol_id).size
724+ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
725+ '{} size:{}'.format(val_vol_name, vol_id,
726+ val_vol_stat, val_vol_boot,
727+ val_vol_size))
728+
729+ if val_vol_boot == bootable and val_vol_stat == 'available' \
730+ and val_vol_name == vol_name and val_vol_size == vol_size:
731+ self.log.debug(msg_attr)
732+ else:
733+ msg = ('Volume validation failed, {}'.format(msg_attr))
734+ amulet.raise_status(amulet.FAIL, msg=msg)
735+
736+ return vol_new
737+
738+ def delete_resource(self, resource, resource_id,
739+ msg="resource", max_wait=120):
740+ """Delete one openstack resource, such as one instance, keypair,
741+ image, volume, stack, etc., and confirm deletion within max wait time.
742+
743+ :param resource: pointer to os resource type, ex:glance_client.images
744+ :param resource_id: unique name or id for the openstack resource
745+ :param msg: text to identify purpose in logging
746+ :param max_wait: maximum wait time in seconds
747+ :returns: True if successful, otherwise False
748+ """
749+ self.log.debug('Deleting OpenStack resource '
750+ '{} ({})'.format(resource_id, msg))
751+ num_before = len(list(resource.list()))
752+ resource.delete(resource_id)
753+
754+ tries = 0
755+ num_after = len(list(resource.list()))
756+ while num_after != (num_before - 1) and tries < (max_wait / 4):
757+ self.log.debug('{} delete check: '
758+ '{} [{}:{}] {}'.format(msg, tries,
759+ num_before,
760+ num_after,
761+ resource_id))
762+ time.sleep(4)
763+ num_after = len(list(resource.list()))
764+ tries += 1
765+
766+ self.log.debug('{}: expected, actual count = {}, '
767+ '{}'.format(msg, num_before - 1, num_after))
768+
769+ if num_after == (num_before - 1):
770+ return True
771+ else:
772+ self.log.error('{} delete timed out'.format(msg))
773+ return False
774+
775+ def resource_reaches_status(self, resource, resource_id,
776+ expected_stat='available',
777+ msg='resource', max_wait=120):
778+ """Wait for an openstack resources status to reach an
779+ expected status within a specified time. Useful to confirm that
780+ nova instances, cinder vols, snapshots, glance images, heat stacks
781+ and other resources eventually reach the expected status.
782+
783+ :param resource: pointer to os resource type, ex: heat_client.stacks
784+ :param resource_id: unique id for the openstack resource
785+ :param expected_stat: status to expect resource to reach
786+ :param msg: text to identify purpose in logging
787+ :param max_wait: maximum wait time in seconds
788+ :returns: True if successful, False if status is not reached
789+ """
790+
791+ tries = 0
792+ resource_stat = resource.get(resource_id).status
793+ while resource_stat != expected_stat and tries < (max_wait / 4):
794+ self.log.debug('{} status check: '
795+ '{} [{}:{}] {}'.format(msg, tries,
796+ resource_stat,
797+ expected_stat,
798+ resource_id))
799+ time.sleep(4)
800+ resource_stat = resource.get(resource_id).status
801+ tries += 1
802+
803+ self.log.debug('{}: expected, actual status = {}, '
804+ '{}'.format(msg, resource_stat, expected_stat))
805+
806+ if resource_stat == expected_stat:
807+ return True
808+ else:
809+ self.log.debug('{} never reached expected status: '
810+ '{}'.format(resource_id, expected_stat))
811+ return False
812+
813+ def get_ceph_osd_id_cmd(self, index):
814+ """Produce a shell command that will return a ceph-osd id."""
815+ return ("`initctl list | grep 'ceph-osd ' | "
816+ "awk 'NR=={} {{ print $2 }}' | "
817+ "grep -o '[0-9]*'`".format(index + 1))
818+
819+ def get_ceph_pools(self, sentry_unit):
820+ """Return a dict of ceph pools from a single ceph unit, with
821+ pool name as keys, pool id as vals."""
822+ pools = {}
823+ cmd = 'sudo ceph osd lspools'
824+ output, code = sentry_unit.run(cmd)
825+ if code != 0:
826+ msg = ('{} `{}` returned {} '
827+ '{}'.format(sentry_unit.info['unit_name'],
828+ cmd, code, output))
829+ amulet.raise_status(amulet.FAIL, msg=msg)
830+
831+ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
832+ for pool in str(output).split(','):
833+ pool_id_name = pool.split(' ')
834+ if len(pool_id_name) == 2:
835+ pool_id = pool_id_name[0]
836+ pool_name = pool_id_name[1]
837+ pools[pool_name] = int(pool_id)
838+
839+ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
840+ pools))
841+ return pools
842+
843+ def get_ceph_df(self, sentry_unit):
844+ """Return dict of ceph df json output, including ceph pool state.
845+
846+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
847+ :returns: Dict of ceph df output
848+ """
849+ cmd = 'sudo ceph df --format=json'
850+ output, code = sentry_unit.run(cmd)
851+ if code != 0:
852+ msg = ('{} `{}` returned {} '
853+ '{}'.format(sentry_unit.info['unit_name'],
854+ cmd, code, output))
855+ amulet.raise_status(amulet.FAIL, msg=msg)
856+ return json.loads(output)
857+
858+ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
859+ """Take a sample of attributes of a ceph pool, returning ceph
860+ pool name, object count and disk space used for the specified
861+ pool ID number.
862+
863+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
864+ :param pool_id: Ceph pool ID
865+ :returns: List of pool name, object count, kb disk space used
866+ """
867+ df = self.get_ceph_df(sentry_unit)
868+ pool_name = df['pools'][pool_id]['name']
869+ obj_count = df['pools'][pool_id]['stats']['objects']
870+ kb_used = df['pools'][pool_id]['stats']['kb_used']
871+ self.log.debug('Ceph {} pool (ID {}): {} objects, '
872+ '{} kb used'.format(pool_name, pool_id,
873+ obj_count, kb_used))
874+ return pool_name, obj_count, kb_used
875+
876+ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
877+ """Validate ceph pool samples taken over time, such as pool
878+ object counts or pool kb used, before adding, after adding, and
879+ after deleting items which affect those pool attributes. The
880+ 2nd element is expected to be greater than the 1st; 3rd is expected
881+ to be less than the 2nd.
882+
883+ :param samples: List containing 3 data samples
884+ :param sample_type: String for logging and usage context
885+ :returns: None if successful, Failure message otherwise
886+ """
887+ original, created, deleted = range(3)
888+ if samples[created] <= samples[original] or \
889+ samples[deleted] >= samples[created]:
890+ return ('Ceph {} samples ({}) '
891+ 'unexpected.'.format(sample_type, samples))
892+ else:
893+ self.log.debug('Ceph {} samples (OK): '
894+ '{}'.format(sample_type, samples))
895+ return None
896+
897+ # rabbitmq/amqp specific helpers:
898+
899+ def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
900+ """Wait for rmq units extended status to show cluster readiness,
901+ after an optional initial sleep period. Initial sleep is likely
902+ necessary to be effective following a config change, as status
903+ message may not instantly update to non-ready."""
904+
905+ if init_sleep:
906+ time.sleep(init_sleep)
907+
908+ message = re.compile('^Unit is ready and clustered$')
909+ deployment._auto_wait_for_status(message=message,
910+ timeout=timeout,
911+ include_only=['rabbitmq-server'])
912+
913+ def add_rmq_test_user(self, sentry_units,
914+ username="testuser1", password="changeme"):
915+ """Add a test user via the first rmq juju unit, check connection as
916+ the new user against all sentry units.
917+
918+ :param sentry_units: list of sentry unit pointers
919+ :param username: amqp user name, default to testuser1
920+ :param password: amqp user password
921+ :returns: None if successful. Raise on error.
922+ """
923+ self.log.debug('Adding rmq user ({})...'.format(username))
924+
925+ # Check that user does not already exist
926+ cmd_user_list = 'rabbitmqctl list_users'
927+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
928+ if username in output:
929+ self.log.warning('User ({}) already exists, returning '
930+ 'gracefully.'.format(username))
931+ return
932+
933+ perms = '".*" ".*" ".*"'
934+ cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
935+ 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
936+
937+ # Add user via first unit
938+ for cmd in cmds:
939+ output, _ = self.run_cmd_unit(sentry_units[0], cmd)
940+
941+ # Check connection against the other sentry_units
942+ self.log.debug('Checking user connect against units...')
943+ for sentry_unit in sentry_units:
944+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
945+ username=username,
946+ password=password)
947+ connection.close()
948+
949+ def delete_rmq_test_user(self, sentry_units, username="testuser1"):
950+ """Delete a rabbitmq user via the first rmq juju unit.
951+
952+ :param sentry_units: list of sentry unit pointers
953+ :param username: amqp user name, default to testuser1
954+ :param password: amqp user password
955+ :returns: None if successful or no such user.
956+ """
957+ self.log.debug('Deleting rmq user ({})...'.format(username))
958+
959+ # Check that the user exists
960+ cmd_user_list = 'rabbitmqctl list_users'
961+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
962+
963+ if username not in output:
964+ self.log.warning('User ({}) does not exist, returning '
965+ 'gracefully.'.format(username))
966+ return
967+
968+ # Delete the user
969+ cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
970+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
971+
972+ def get_rmq_cluster_status(self, sentry_unit):
973+ """Execute rabbitmq cluster status command on a unit and return
974+ the full output.
975+
976+ :param unit: sentry unit
977+ :returns: String containing console output of cluster status command
978+ """
979+ cmd = 'rabbitmqctl cluster_status'
980+ output, _ = self.run_cmd_unit(sentry_unit, cmd)
981+ self.log.debug('{} cluster_status:\n{}'.format(
982+ sentry_unit.info['unit_name'], output))
983+ return str(output)
984+
985+ def get_rmq_cluster_running_nodes(self, sentry_unit):
986+ """Parse rabbitmqctl cluster_status output string, return list of
987+ running rabbitmq cluster nodes.
988+
989+ :param unit: sentry unit
990+ :returns: List containing node names of running nodes
991+ """
992+ # NOTE(beisner): rabbitmqctl cluster_status output is not
993+ # json-parsable, do string chop foo, then json.loads that.
994+ str_stat = self.get_rmq_cluster_status(sentry_unit)
995+ if 'running_nodes' in str_stat:
996+ pos_start = str_stat.find("{running_nodes,") + 15
997+ pos_end = str_stat.find("]},", pos_start) + 1
998+ str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
999+ run_nodes = json.loads(str_run_nodes)
1000+ return run_nodes
1001+ else:
1002+ return []
1003+
1004+ def validate_rmq_cluster_running_nodes(self, sentry_units):
1005+ """Check that all rmq unit hostnames are represented in the
1006+ cluster_status output of all units.
1007+
1008+ :param host_names: dict of juju unit names to host names
1009+ :param units: list of sentry unit pointers (all rmq units)
1010+ :returns: None if successful, otherwise return error message
1011+ """
1012+ host_names = self.get_unit_hostnames(sentry_units)
1013+ errors = []
1014+
1015+ # Query every unit for cluster_status running nodes
1016+ for query_unit in sentry_units:
1017+ query_unit_name = query_unit.info['unit_name']
1018+ running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
1019+
1020+ # Confirm that every unit is represented in the queried unit's
1021+ # cluster_status running nodes output.
1022+ for validate_unit in sentry_units:
1023+ val_host_name = host_names[validate_unit.info['unit_name']]
1024+ val_node_name = 'rabbit@{}'.format(val_host_name)
1025+
1026+ if val_node_name not in running_nodes:
1027+ errors.append('Cluster member check failed on {}: {} not '
1028+ 'in {}\n'.format(query_unit_name,
1029+ val_node_name,
1030+ running_nodes))
1031+ if errors:
1032+ return ''.join(errors)
1033+
1034+ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
1035+ """Check a single juju rmq unit for ssl and port in the config file."""
1036+ host = sentry_unit.info['public-address']
1037+ unit_name = sentry_unit.info['unit_name']
1038+
1039+ conf_file = '/etc/rabbitmq/rabbitmq.config'
1040+ conf_contents = str(self.file_contents_safe(sentry_unit,
1041+ conf_file, max_wait=16))
1042+ # Checks
1043+ conf_ssl = 'ssl' in conf_contents
1044+ conf_port = str(port) in conf_contents
1045+
1046+ # Port explicitly checked in config
1047+ if port and conf_port and conf_ssl:
1048+ self.log.debug('SSL is enabled @{}:{} '
1049+ '({})'.format(host, port, unit_name))
1050+ return True
1051+ elif port and not conf_port and conf_ssl:
1052+ self.log.debug('SSL is enabled @{} but not on port {} '
1053+ '({})'.format(host, port, unit_name))
1054+ return False
1055+ # Port not checked (useful when checking that ssl is disabled)
1056+ elif not port and conf_ssl:
1057+ self.log.debug('SSL is enabled @{}:{} '
1058+ '({})'.format(host, port, unit_name))
1059+ return True
1060+ elif not conf_ssl:
1061+ self.log.debug('SSL not enabled @{}:{} '
1062+ '({})'.format(host, port, unit_name))
1063+ return False
1064+ else:
1065+ msg = ('Unknown condition when checking SSL status @{}:{} '
1066+ '({})'.format(host, port, unit_name))
1067+ amulet.raise_status(amulet.FAIL, msg)
1068+
1069+ def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
1070+ """Check that ssl is enabled on rmq juju sentry units.
1071+
1072+ :param sentry_units: list of all rmq sentry units
1073+ :param port: optional ssl port override to validate
1074+ :returns: None if successful, otherwise return error message
1075+ """
1076+ for sentry_unit in sentry_units:
1077+ if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
1078+ return ('Unexpected condition: ssl is disabled on unit '
1079+ '({})'.format(sentry_unit.info['unit_name']))
1080+ return None
1081+
1082+ def validate_rmq_ssl_disabled_units(self, sentry_units):
1083+ """Check that ssl is enabled on listed rmq juju sentry units.
1084+
1085+ :param sentry_units: list of all rmq sentry units
1086+ :returns: True if successful. Raise on error.
1087+ """
1088+ for sentry_unit in sentry_units:
1089+ if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
1090+ return ('Unexpected condition: ssl is enabled on unit '
1091+ '({})'.format(sentry_unit.info['unit_name']))
1092+ return None
1093+
1094+ def configure_rmq_ssl_on(self, sentry_units, deployment,
1095+ port=None, max_wait=60):
1096+ """Turn ssl charm config option on, with optional non-default
1097+ ssl port specification. Confirm that it is enabled on every
1098+ unit.
1099+
1100+ :param sentry_units: list of sentry units
1101+ :param deployment: amulet deployment object pointer
1102+ :param port: amqp port, use defaults if None
1103+ :param max_wait: maximum time to wait in seconds to confirm
1104+ :returns: None if successful. Raise on error.
1105+ """
1106+ self.log.debug('Setting ssl charm config option: on')
1107+
1108+ # Enable RMQ SSL
1109+ config = {'ssl': 'on'}
1110+ if port:
1111+ config['ssl_port'] = port
1112+
1113+ deployment.d.configure('rabbitmq-server', config)
1114+
1115+ # Wait for unit status
1116+ self.rmq_wait_for_cluster(deployment)
1117+
1118+ # Confirm
1119+ tries = 0
1120+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
1121+ while ret and tries < (max_wait / 4):
1122+ time.sleep(4)
1123+ self.log.debug('Attempt {}: {}'.format(tries, ret))
1124+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
1125+ tries += 1
1126+
1127+ if ret:
1128+ amulet.raise_status(amulet.FAIL, ret)
1129+
1130+ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
1131+ """Turn ssl charm config option off, confirm that it is disabled
1132+ on every unit.
1133+
1134+ :param sentry_units: list of sentry units
1135+ :param deployment: amulet deployment object pointer
1136+ :param max_wait: maximum time to wait in seconds to confirm
1137+ :returns: None if successful. Raise on error.
1138+ """
1139+ self.log.debug('Setting ssl charm config option: off')
1140+
1141+ # Disable RMQ SSL
1142+ config = {'ssl': 'off'}
1143+ deployment.d.configure('rabbitmq-server', config)
1144+
1145+ # Wait for unit status
1146+ self.rmq_wait_for_cluster(deployment)
1147+
1148+ # Confirm
1149+ tries = 0
1150+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
1151+ while ret and tries < (max_wait / 4):
1152+ time.sleep(4)
1153+ self.log.debug('Attempt {}: {}'.format(tries, ret))
1154+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
1155+ tries += 1
1156+
1157+ if ret:
1158+ amulet.raise_status(amulet.FAIL, ret)
1159+
1160+ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
1161+ port=None, fatal=True,
1162+ username="testuser1", password="changeme"):
1163+ """Establish and return a pika amqp connection to the rabbitmq service
1164+ running on a rmq juju unit.
1165+
1166+ :param sentry_unit: sentry unit pointer
1167+ :param ssl: boolean, default to False
1168+ :param port: amqp port, use defaults if None
1169+ :param fatal: boolean, default to True (raises on connect error)
1170+ :param username: amqp user name, default to testuser1
1171+ :param password: amqp user password
1172+ :returns: pika amqp connection pointer or None if failed and non-fatal
1173+ """
1174+ host = sentry_unit.info['public-address']
1175+ unit_name = sentry_unit.info['unit_name']
1176+
1177+ # Default port logic if port is not specified
1178+ if ssl and not port:
1179+ port = 5671
1180+ elif not ssl and not port:
1181+ port = 5672
1182+
1183+ self.log.debug('Connecting to amqp on {}:{} ({}) as '
1184+ '{}...'.format(host, port, unit_name, username))
1185+
1186+ try:
1187+ credentials = pika.PlainCredentials(username, password)
1188+ parameters = pika.ConnectionParameters(host=host, port=port,
1189+ credentials=credentials,
1190+ ssl=ssl,
1191+ connection_attempts=3,
1192+ retry_delay=5,
1193+ socket_timeout=1)
1194+ connection = pika.BlockingConnection(parameters)
1195+ assert connection.server_properties['product'] == 'RabbitMQ'
1196+ self.log.debug('Connect OK')
1197+ return connection
1198+ except Exception as e:
1199+ msg = ('amqp connection failed to {}:{} as '
1200+ '{} ({})'.format(host, port, username, str(e)))
1201+ if fatal:
1202+ amulet.raise_status(amulet.FAIL, msg)
1203+ else:
1204+ self.log.warn(msg)
1205+ return None
1206+
1207+ def publish_amqp_message_by_unit(self, sentry_unit, message,
1208+ queue="test", ssl=False,
1209+ username="testuser1",
1210+ password="changeme",
1211+ port=None):
1212+ """Publish an amqp message to a rmq juju unit.
1213+
1214+ :param sentry_unit: sentry unit pointer
1215+ :param message: amqp message string
1216+ :param queue: message queue, default to test
1217+ :param username: amqp user name, default to testuser1
1218+ :param password: amqp user password
1219+ :param ssl: boolean, default to False
1220+ :param port: amqp port, use defaults if None
1221+ :returns: None. Raises exception if publish failed.
1222+ """
1223+ self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
1224+ message))
1225+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
1226+ port=port,
1227+ username=username,
1228+ password=password)
1229+
1230+ # NOTE(beisner): extra debug here re: pika hang potential:
1231+ # https://github.com/pika/pika/issues/297
1232+ # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
1233+ self.log.debug('Defining channel...')
1234+ channel = connection.channel()
1235+ self.log.debug('Declaring queue...')
1236+ channel.queue_declare(queue=queue, auto_delete=False, durable=True)
1237+ self.log.debug('Publishing message...')
1238+ channel.basic_publish(exchange='', routing_key=queue, body=message)
1239+ self.log.debug('Closing channel...')
1240+ channel.close()
1241+ self.log.debug('Closing connection...')
1242+ connection.close()
1243+
1244+ def get_amqp_message_by_unit(self, sentry_unit, queue="test",
1245+ username="testuser1",
1246+ password="changeme",
1247+ ssl=False, port=None):
1248+ """Get an amqp message from a rmq juju unit.
1249+
1250+ :param sentry_unit: sentry unit pointer
1251+ :param queue: message queue, default to test
1252+ :param username: amqp user name, default to testuser1
1253+ :param password: amqp user password
1254+ :param ssl: boolean, default to False
1255+ :param port: amqp port, use defaults if None
1256+ :returns: amqp message body as string. Raise if get fails.
1257+ """
1258+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
1259+ port=port,
1260+ username=username,
1261+ password=password)
1262+ channel = connection.channel()
1263+ method_frame, _, body = channel.basic_get(queue)
1264+
1265+ if method_frame:
1266+ self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
1267+ body))
1268+ channel.basic_ack(method_frame.delivery_tag)
1269+ channel.close()
1270+ connection.close()
1271+ return body
1272+ else:
1273+ msg = 'No message retrieved.'
1274+ amulet.raise_status(amulet.FAIL, msg)
1275
1276=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
1277--- hooks/charmhelpers/contrib/openstack/context.py 2015-06-10 07:35:12 +0000
1278+++ hooks/charmhelpers/contrib/openstack/context.py 2015-12-01 15:05:49 +0000
1279@@ -14,6 +14,7 @@
1280 # You should have received a copy of the GNU Lesser General Public License
1281 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1282
1283+import glob
1284 import json
1285 import os
1286 import re
1287@@ -50,6 +51,8 @@
1288 from charmhelpers.core.strutils import bool_from_string
1289
1290 from charmhelpers.core.host import (
1291+ get_bond_master,
1292+ is_phy_iface,
1293 list_nics,
1294 get_nic_hwaddr,
1295 mkdir,
1296@@ -122,21 +125,24 @@
1297 of specifying multiple key value pairs within the same string. For
1298 example, a string in the format of 'key1=value1, key2=value2' will
1299 return a dict of:
1300- {'key1': 'value1',
1301- 'key2': 'value2'}.
1302+
1303+ {'key1': 'value1',
1304+ 'key2': 'value2'}.
1305
1306 2. A string in the above format, but supporting a comma-delimited list
1307 of values for the same key. For example, a string in the format of
1308 'key1=value1, key2=value3,value4,value5' will return a dict of:
1309- {'key1', 'value1',
1310- 'key2', 'value2,value3,value4'}
1311+
1312+ {'key1', 'value1',
1313+ 'key2', 'value2,value3,value4'}
1314
1315 3. A string containing a colon character (:) prior to an equal
1316 character (=) will be treated as yaml and parsed as such. This can be
1317 used to specify more complex key value pairs. For example,
1318 a string in the format of 'key1: subkey1=value1, subkey2=value2' will
1319 return a dict of:
1320- {'key1', 'subkey1=value1, subkey2=value2'}
1321+
1322+ {'key1', 'subkey1=value1, subkey2=value2'}
1323
1324 The provided config_flags string may be a list of comma-separated values
1325 which themselves may be comma-separated list of values.
1326@@ -189,10 +195,50 @@
1327 class OSContextGenerator(object):
1328 """Base class for all context generators."""
1329 interfaces = []
1330+ related = False
1331+ complete = False
1332+ missing_data = []
1333
1334 def __call__(self):
1335 raise NotImplementedError
1336
1337+ def context_complete(self, ctxt):
1338+ """Check for missing data for the required context data.
1339+ Set self.missing_data if it exists and return False.
1340+ Set self.complete if no missing data and return True.
1341+ """
1342+ # Fresh start
1343+ self.complete = False
1344+ self.missing_data = []
1345+ for k, v in six.iteritems(ctxt):
1346+ if v is None or v == '':
1347+ if k not in self.missing_data:
1348+ self.missing_data.append(k)
1349+
1350+ if self.missing_data:
1351+ self.complete = False
1352+ log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
1353+ else:
1354+ self.complete = True
1355+ return self.complete
1356+
1357+ def get_related(self):
1358+ """Check if any of the context interfaces have relation ids.
1359+ Set self.related and return True if one of the interfaces
1360+ has relation ids.
1361+ """
1362+ # Fresh start
1363+ self.related = False
1364+ try:
1365+ for interface in self.interfaces:
1366+ if relation_ids(interface):
1367+ self.related = True
1368+ return self.related
1369+ except AttributeError as e:
1370+ log("{} {}"
1371+ "".format(self, e), 'INFO')
1372+ return self.related
1373+
1374
1375 class SharedDBContext(OSContextGenerator):
1376 interfaces = ['shared-db']
1377@@ -208,6 +254,7 @@
1378 self.database = database
1379 self.user = user
1380 self.ssl_dir = ssl_dir
1381+ self.rel_name = self.interfaces[0]
1382
1383 def __call__(self):
1384 self.database = self.database or config('database')
1385@@ -240,7 +287,8 @@
1386 if self.relation_prefix:
1387 password_setting = self.relation_prefix + '_password'
1388
1389- for rid in relation_ids('shared-db'):
1390+ for rid in relation_ids(self.interfaces[0]):
1391+ self.related = True
1392 for unit in related_units(rid):
1393 rdata = relation_get(rid=rid, unit=unit)
1394 host = rdata.get('db_host')
1395@@ -252,7 +300,7 @@
1396 'database_password': rdata.get(password_setting),
1397 'database_type': 'mysql'
1398 }
1399- if context_complete(ctxt):
1400+ if self.context_complete(ctxt):
1401 db_ssl(rdata, ctxt, self.ssl_dir)
1402 return ctxt
1403 return {}
1404@@ -273,6 +321,7 @@
1405
1406 ctxt = {}
1407 for rid in relation_ids(self.interfaces[0]):
1408+ self.related = True
1409 for unit in related_units(rid):
1410 rel_host = relation_get('host', rid=rid, unit=unit)
1411 rel_user = relation_get('user', rid=rid, unit=unit)
1412@@ -282,7 +331,7 @@
1413 'database_user': rel_user,
1414 'database_password': rel_passwd,
1415 'database_type': 'postgresql'}
1416- if context_complete(ctxt):
1417+ if self.context_complete(ctxt):
1418 return ctxt
1419
1420 return {}
1421@@ -343,6 +392,7 @@
1422 ctxt['signing_dir'] = cachedir
1423
1424 for rid in relation_ids(self.rel_name):
1425+ self.related = True
1426 for unit in related_units(rid):
1427 rdata = relation_get(rid=rid, unit=unit)
1428 serv_host = rdata.get('service_host')
1429@@ -361,7 +411,7 @@
1430 'service_protocol': svc_protocol,
1431 'auth_protocol': auth_protocol})
1432
1433- if context_complete(ctxt):
1434+ if self.context_complete(ctxt):
1435 # NOTE(jamespage) this is required for >= icehouse
1436 # so a missing value just indicates keystone needs
1437 # upgrading
1438@@ -400,6 +450,7 @@
1439 ctxt = {}
1440 for rid in relation_ids(self.rel_name):
1441 ha_vip_only = False
1442+ self.related = True
1443 for unit in related_units(rid):
1444 if relation_get('clustered', rid=rid, unit=unit):
1445 ctxt['clustered'] = True
1446@@ -432,7 +483,7 @@
1447 ha_vip_only = relation_get('ha-vip-only',
1448 rid=rid, unit=unit) is not None
1449
1450- if context_complete(ctxt):
1451+ if self.context_complete(ctxt):
1452 if 'rabbit_ssl_ca' in ctxt:
1453 if not self.ssl_dir:
1454 log("Charm not setup for ssl support but ssl ca "
1455@@ -464,7 +515,7 @@
1456 ctxt['oslo_messaging_flags'] = config_flags_parser(
1457 oslo_messaging_flags)
1458
1459- if not context_complete(ctxt):
1460+ if not self.complete:
1461 return {}
1462
1463 return ctxt
1464@@ -480,13 +531,15 @@
1465
1466 log('Generating template context for ceph', level=DEBUG)
1467 mon_hosts = []
1468- auth = None
1469- key = None
1470- use_syslog = str(config('use-syslog')).lower()
1471+ ctxt = {
1472+ 'use_syslog': str(config('use-syslog')).lower()
1473+ }
1474 for rid in relation_ids('ceph'):
1475 for unit in related_units(rid):
1476- auth = relation_get('auth', rid=rid, unit=unit)
1477- key = relation_get('key', rid=rid, unit=unit)
1478+ if not ctxt.get('auth'):
1479+ ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
1480+ if not ctxt.get('key'):
1481+ ctxt['key'] = relation_get('key', rid=rid, unit=unit)
1482 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
1483 unit=unit)
1484 unit_priv_addr = relation_get('private-address', rid=rid,
1485@@ -495,15 +548,12 @@
1486 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
1487 mon_hosts.append(ceph_addr)
1488
1489- ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
1490- 'auth': auth,
1491- 'key': key,
1492- 'use_syslog': use_syslog}
1493+ ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
1494
1495 if not os.path.isdir('/etc/ceph'):
1496 os.mkdir('/etc/ceph')
1497
1498- if not context_complete(ctxt):
1499+ if not self.context_complete(ctxt):
1500 return {}
1501
1502 ensure_packages(['ceph-common'])
1503@@ -890,9 +940,32 @@
1504 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
1505 return ctxt
1506
1507+ def pg_ctxt(self):
1508+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1509+ self.network_manager)
1510+ config = neutron_plugin_attribute(self.plugin, 'config',
1511+ self.network_manager)
1512+ ovs_ctxt = {'core_plugin': driver,
1513+ 'neutron_plugin': 'plumgrid',
1514+ 'neutron_security_groups': self.neutron_security_groups,
1515+ 'local_ip': unit_private_ip(),
1516+ 'config': config}
1517+ return ovs_ctxt
1518+
1519+ def midonet_ctxt(self):
1520+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1521+ self.network_manager)
1522+ midonet_config = neutron_plugin_attribute(self.plugin, 'config',
1523+ self.network_manager)
1524+ mido_ctxt = {'core_plugin': driver,
1525+ 'neutron_plugin': 'midonet',
1526+ 'neutron_security_groups': self.neutron_security_groups,
1527+ 'local_ip': unit_private_ip(),
1528+ 'config': midonet_config}
1529+
1530+ return mido_ctxt
1531+
1532 def __call__(self):
1533- self._ensure_packages()
1534-
1535 if self.network_manager not in ['quantum', 'neutron']:
1536 return {}
1537
1538@@ -911,6 +984,10 @@
1539 ctxt.update(self.calico_ctxt())
1540 elif self.plugin == 'vsp':
1541 ctxt.update(self.nuage_ctxt())
1542+ elif self.plugin == 'plumgrid':
1543+ ctxt.update(self.pg_ctxt())
1544+ elif self.plugin == 'midonet':
1545+ ctxt.update(self.midonet_ctxt())
1546
1547 alchemy_flags = config('neutron-alchemy-flags')
1548 if alchemy_flags:
1549@@ -922,7 +999,6 @@
1550
1551
1552 class NeutronPortContext(OSContextGenerator):
1553- NIC_PREFIXES = ['eth', 'bond']
1554
1555 def resolve_ports(self, ports):
1556 """Resolve NICs not yet bound to bridge(s)
1557@@ -934,7 +1010,18 @@
1558
1559 hwaddr_to_nic = {}
1560 hwaddr_to_ip = {}
1561- for nic in list_nics(self.NIC_PREFIXES):
1562+ for nic in list_nics():
1563+ # Ignore virtual interfaces (bond masters will be identified from
1564+ # their slaves)
1565+ if not is_phy_iface(nic):
1566+ continue
1567+
1568+ _nic = get_bond_master(nic)
1569+ if _nic:
1570+ log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
1571+ level=DEBUG)
1572+ nic = _nic
1573+
1574 hwaddr = get_nic_hwaddr(nic)
1575 hwaddr_to_nic[hwaddr] = nic
1576 addresses = get_ipv4_addr(nic, fatal=False)
1577@@ -960,7 +1047,8 @@
1578 # trust it to be the real external network).
1579 resolved.append(entry)
1580
1581- return resolved
1582+ # Ensure no duplicates
1583+ return list(set(resolved))
1584
1585
1586 class OSConfigFlagContext(OSContextGenerator):
1587@@ -1000,6 +1088,20 @@
1588 config_flags_parser(config_flags)}
1589
1590
1591+class LibvirtConfigFlagsContext(OSContextGenerator):
1592+ """
1593+ This context provides support for extending
1594+ the libvirt section through user-defined flags.
1595+ """
1596+ def __call__(self):
1597+ ctxt = {}
1598+ libvirt_flags = config('libvirt-flags')
1599+ if libvirt_flags:
1600+ ctxt['libvirt_flags'] = config_flags_parser(
1601+ libvirt_flags)
1602+ return ctxt
1603+
1604+
1605 class SubordinateConfigContext(OSContextGenerator):
1606
1607 """
1608@@ -1032,7 +1134,7 @@
1609
1610 ctxt = {
1611 ... other context ...
1612- 'subordinate_config': {
1613+ 'subordinate_configuration': {
1614 'DEFAULT': {
1615 'key1': 'value1',
1616 },
1617@@ -1050,13 +1152,22 @@
1618 :param config_file : Service's config file to query sections
1619 :param interface : Subordinate interface to inspect
1620 """
1621- self.service = service
1622 self.config_file = config_file
1623- self.interface = interface
1624+ if isinstance(service, list):
1625+ self.services = service
1626+ else:
1627+ self.services = [service]
1628+ if isinstance(interface, list):
1629+ self.interfaces = interface
1630+ else:
1631+ self.interfaces = [interface]
1632
1633 def __call__(self):
1634 ctxt = {'sections': {}}
1635- for rid in relation_ids(self.interface):
1636+ rids = []
1637+ for interface in self.interfaces:
1638+ rids.extend(relation_ids(interface))
1639+ for rid in rids:
1640 for unit in related_units(rid):
1641 sub_config = relation_get('subordinate_configuration',
1642 rid=rid, unit=unit)
1643@@ -1064,33 +1175,37 @@
1644 try:
1645 sub_config = json.loads(sub_config)
1646 except:
1647- log('Could not parse JSON from subordinate_config '
1648- 'setting from %s' % rid, level=ERROR)
1649- continue
1650-
1651- if self.service not in sub_config:
1652- log('Found subordinate_config on %s but it contained'
1653- 'nothing for %s service' % (rid, self.service),
1654- level=INFO)
1655- continue
1656-
1657- sub_config = sub_config[self.service]
1658- if self.config_file not in sub_config:
1659- log('Found subordinate_config on %s but it contained'
1660- 'nothing for %s' % (rid, self.config_file),
1661- level=INFO)
1662- continue
1663-
1664- sub_config = sub_config[self.config_file]
1665- for k, v in six.iteritems(sub_config):
1666- if k == 'sections':
1667- for section, config_dict in six.iteritems(v):
1668- log("adding section '%s'" % (section),
1669- level=DEBUG)
1670- ctxt[k][section] = config_dict
1671- else:
1672- ctxt[k] = v
1673-
1674+ log('Could not parse JSON from '
1675+ 'subordinate_configuration setting from %s'
1676+ % rid, level=ERROR)
1677+ continue
1678+
1679+ for service in self.services:
1680+ if service not in sub_config:
1681+ log('Found subordinate_configuration on %s but it '
1682+ 'contained nothing for %s service'
1683+ % (rid, service), level=INFO)
1684+ continue
1685+
1686+ sub_config = sub_config[service]
1687+ if self.config_file not in sub_config:
1688+ log('Found subordinate_configuration on %s but it '
1689+ 'contained nothing for %s'
1690+ % (rid, self.config_file), level=INFO)
1691+ continue
1692+
1693+ sub_config = sub_config[self.config_file]
1694+ for k, v in six.iteritems(sub_config):
1695+ if k == 'sections':
1696+ for section, config_list in six.iteritems(v):
1697+ log("adding section '%s'" % (section),
1698+ level=DEBUG)
1699+ if ctxt[k].get(section):
1700+ ctxt[k][section].extend(config_list)
1701+ else:
1702+ ctxt[k][section] = config_list
1703+ else:
1704+ ctxt[k] = v
1705 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1706 return ctxt
1707
1708@@ -1267,15 +1382,19 @@
1709 def __call__(self):
1710 ports = config('data-port')
1711 if ports:
1712+ # Map of {port/mac:bridge}
1713 portmap = parse_data_port_mappings(ports)
1714- ports = portmap.values()
1715+ ports = portmap.keys()
1716+ # Resolve provided ports or mac addresses and filter out those
1717+ # already attached to a bridge.
1718 resolved = self.resolve_ports(ports)
1719+ # FIXME: is this necessary?
1720 normalized = {get_nic_hwaddr(port): port for port in resolved
1721 if port not in ports}
1722 normalized.update({port: port for port in resolved
1723 if port in ports})
1724 if resolved:
1725- return {bridge: normalized[port] for bridge, port in
1726+ return {normalized[port]: bridge for port, bridge in
1727 six.iteritems(portmap) if port in normalized.keys()}
1728
1729 return None
1730@@ -1286,12 +1405,22 @@
1731 def __call__(self):
1732 ctxt = {}
1733 mappings = super(PhyNICMTUContext, self).__call__()
1734- if mappings and mappings.values():
1735- ports = mappings.values()
1736+ if mappings and mappings.keys():
1737+ ports = sorted(mappings.keys())
1738 napi_settings = NeutronAPIContext()()
1739 mtu = napi_settings.get('network_device_mtu')
1740+ all_ports = set()
1741+ # If any of ports is a vlan device, its underlying device must have
1742+ # mtu applied first.
1743+ for port in ports:
1744+ for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
1745+ lport = os.path.basename(lport)
1746+ all_ports.add(lport.split('_')[1])
1747+
1748+ all_ports = list(all_ports)
1749+ all_ports.extend(ports)
1750 if mtu:
1751- ctxt["devs"] = '\\n'.join(ports)
1752+ ctxt["devs"] = '\\n'.join(all_ports)
1753 ctxt['mtu'] = mtu
1754
1755 return ctxt
1756@@ -1323,6 +1452,6 @@
1757 'auth_protocol':
1758 rdata.get('auth_protocol') or 'http',
1759 }
1760- if context_complete(ctxt):
1761+ if self.context_complete(ctxt):
1762 return ctxt
1763 return {}
1764
1765=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
1766--- hooks/charmhelpers/contrib/openstack/neutron.py 2015-06-10 15:45:48 +0000
1767+++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-12-01 15:05:49 +0000
1768@@ -195,6 +195,34 @@
1769 'packages': [],
1770 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
1771 'server_services': ['neutron-server']
1772+ },
1773+ 'plumgrid': {
1774+ 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
1775+ 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
1776+ 'contexts': [
1777+ context.SharedDBContext(user=config('database-user'),
1778+ database=config('database'),
1779+ ssl_dir=NEUTRON_CONF_DIR)],
1780+ 'services': [],
1781+ 'packages': ['plumgrid-lxc',
1782+ 'iovisor-dkms'],
1783+ 'server_packages': ['neutron-server',
1784+ 'neutron-plugin-plumgrid'],
1785+ 'server_services': ['neutron-server']
1786+ },
1787+ 'midonet': {
1788+ 'config': '/etc/neutron/plugins/midonet/midonet.ini',
1789+ 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
1790+ 'contexts': [
1791+ context.SharedDBContext(user=config('neutron-database-user'),
1792+ database=config('neutron-database'),
1793+ relation_prefix='neutron',
1794+ ssl_dir=NEUTRON_CONF_DIR)],
1795+ 'services': [],
1796+ 'packages': [[headers_package()] + determine_dkms_package()],
1797+ 'server_packages': ['neutron-server',
1798+ 'python-neutron-plugin-midonet'],
1799+ 'server_services': ['neutron-server']
1800 }
1801 }
1802 if release >= 'icehouse':
1803@@ -255,17 +283,30 @@
1804 return 'neutron'
1805
1806
1807-def parse_mappings(mappings):
1808+def parse_mappings(mappings, key_rvalue=False):
1809+ """By default mappings are lvalue keyed.
1810+
1811+ If key_rvalue is True, the mapping will be reversed to allow multiple
1812+ configs for the same lvalue.
1813+ """
1814 parsed = {}
1815 if mappings:
1816 mappings = mappings.split()
1817 for m in mappings:
1818 p = m.partition(':')
1819- key = p[0].strip()
1820- if p[1]:
1821- parsed[key] = p[2].strip()
1822+
1823+ if key_rvalue:
1824+ key_index = 2
1825+ val_index = 0
1826+ # if there is no rvalue skip to next
1827+ if not p[1]:
1828+ continue
1829 else:
1830- parsed[key] = ''
1831+ key_index = 0
1832+ val_index = 2
1833+
1834+ key = p[key_index].strip()
1835+ parsed[key] = p[val_index].strip()
1836
1837 return parsed
1838
1839@@ -283,25 +324,25 @@
1840 def parse_data_port_mappings(mappings, default_bridge='br-data'):
1841 """Parse data port mappings.
1842
1843- Mappings must be a space-delimited list of bridge:port mappings.
1844+ Mappings must be a space-delimited list of bridge:port.
1845
1846- Returns dict of the form {bridge:port}.
1847+ Returns dict of the form {port:bridge} where ports may be mac addresses or
1848+ interface names.
1849 """
1850- _mappings = parse_mappings(mappings)
1851+
1852+ # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
1853+ # proposed for <port> since it may be a mac address which will differ
1854+ # across units this allowing first-known-good to be chosen.
1855+ _mappings = parse_mappings(mappings, key_rvalue=True)
1856 if not _mappings or list(_mappings.values()) == ['']:
1857 if not mappings:
1858 return {}
1859
1860 # For backwards-compatibility we need to support port-only provided in
1861 # config.
1862- _mappings = {default_bridge: mappings.split()[0]}
1863-
1864- bridges = _mappings.keys()
1865- ports = _mappings.values()
1866- if len(set(bridges)) != len(bridges):
1867- raise Exception("It is not allowed to have more than one port "
1868- "configured on the same bridge")
1869-
1870+ _mappings = {mappings.split()[0]: default_bridge}
1871+
1872+ ports = _mappings.keys()
1873 if len(set(ports)) != len(ports):
1874 raise Exception("It is not allowed to have the same port configured "
1875 "on more than one bridge")
1876
1877=== modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf'
1878--- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-06-10 07:35:12 +0000
1879+++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-12-01 15:05:49 +0000
1880@@ -5,11 +5,17 @@
1881 ###############################################################################
1882 [global]
1883 {% if auth -%}
1884- auth_supported = {{ auth }}
1885- keyring = /etc/ceph/$cluster.$name.keyring
1886- mon host = {{ mon_hosts }}
1887+auth_supported = {{ auth }}
1888+keyring = /etc/ceph/$cluster.$name.keyring
1889+mon host = {{ mon_hosts }}
1890 {% endif -%}
1891- log to syslog = {{ use_syslog }}
1892- err to syslog = {{ use_syslog }}
1893- clog to syslog = {{ use_syslog }}
1894+log to syslog = {{ use_syslog }}
1895+err to syslog = {{ use_syslog }}
1896+clog to syslog = {{ use_syslog }}
1897
1898+[client]
1899+{% if rbd_client_cache_settings -%}
1900+{% for key, value in rbd_client_cache_settings.iteritems() -%}
1901+{{ key }} = {{ value }}
1902+{% endfor -%}
1903+{%- endif %}
1904\ No newline at end of file
1905
1906=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
1907--- hooks/charmhelpers/contrib/openstack/templating.py 2015-06-11 14:20:09 +0000
1908+++ hooks/charmhelpers/contrib/openstack/templating.py 2015-12-01 15:05:49 +0000
1909@@ -18,7 +18,7 @@
1910
1911 import six
1912
1913-from charmhelpers.fetch import apt_install
1914+from charmhelpers.fetch import apt_install, apt_update
1915 from charmhelpers.core.hookenv import (
1916 log,
1917 ERROR,
1918@@ -29,39 +29,15 @@
1919 try:
1920 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
1921 except ImportError:
1922- # python-jinja2 may not be installed yet, or we're running unittests.
1923- FileSystemLoader = ChoiceLoader = Environment = exceptions = None
1924+ apt_update(fatal=True)
1925+ apt_install('python-jinja2', fatal=True)
1926+ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
1927
1928
1929 class OSConfigException(Exception):
1930 pass
1931
1932
1933-def os_template_dirs(templates_dir, os_release):
1934- tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
1935- for rel in six.itervalues(OPENSTACK_CODENAMES)]
1936-
1937- if not os.path.isdir(templates_dir):
1938- log('Templates directory not found @ %s.' % templates_dir,
1939- level=ERROR)
1940- raise OSConfigException
1941- dirs = [templates_dir]
1942- helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
1943- if os.path.isdir(helper_templates):
1944- dirs.append(helper_templates)
1945-
1946- for rel, tmpl_dir in tmpl_dirs:
1947- if os.path.isdir(tmpl_dir):
1948- dirs.insert(0, tmpl_dir)
1949- if rel == os_release:
1950- break
1951- ch_templates = os.path.dirname(__file__) + '/charmhelpers/contrib/openstack/templates'
1952- dirs.append(ch_templates)
1953- log('Template search path: %s' %
1954- ' '.join(dirs), level=INFO)
1955- return dirs
1956-
1957-
1958 def get_loader(templates_dir, os_release):
1959 """
1960 Create a jinja2.ChoiceLoader containing template dirs up to
1961@@ -137,7 +113,7 @@
1962
1963 def complete_contexts(self):
1964 '''
1965- Return a list of interfaces that have atisfied contexts.
1966+ Return a list of interfaces that have satisfied contexts.
1967 '''
1968 if self._complete_contexts:
1969 return self._complete_contexts
1970@@ -318,3 +294,30 @@
1971 [interfaces.extend(i.complete_contexts())
1972 for i in six.itervalues(self.templates)]
1973 return interfaces
1974+
1975+ def get_incomplete_context_data(self, interfaces):
1976+ '''
1977+ Return dictionary of relation status of interfaces and any missing
1978+ required context data. Example:
1979+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
1980+ 'zeromq-configuration': {'related': False}}
1981+ '''
1982+ incomplete_context_data = {}
1983+
1984+ for i in six.itervalues(self.templates):
1985+ for context in i.contexts:
1986+ for interface in interfaces:
1987+ related = False
1988+ if interface in context.interfaces:
1989+ related = context.get_related()
1990+ missing_data = context.missing_data
1991+ if missing_data:
1992+ incomplete_context_data[interface] = {'missing_data': missing_data}
1993+ if related:
1994+ if incomplete_context_data.get(interface):
1995+ incomplete_context_data[interface].update({'related': True})
1996+ else:
1997+ incomplete_context_data[interface] = {'related': True}
1998+ else:
1999+ incomplete_context_data[interface] = {'related': False}
2000+ return incomplete_context_data
2001
2002=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
2003--- hooks/charmhelpers/contrib/openstack/utils.py 2015-06-17 12:23:31 +0000
2004+++ hooks/charmhelpers/contrib/openstack/utils.py 2015-12-01 15:05:49 +0000
2005@@ -1,5 +1,3 @@
2006-#!/usr/bin/python
2007-
2008 # Copyright 2014-2015 Canonical Limited.
2009 #
2010 # This file is part of charm-helpers.
2011@@ -24,9 +22,11 @@
2012 import json
2013 import os
2014 import sys
2015+import re
2016+
2017+import six
2018+import traceback
2019 import uuid
2020-
2021-import six
2022 import yaml
2023
2024 from charmhelpers.contrib.network import ip
2025@@ -36,13 +36,17 @@
2026 )
2027
2028 from charmhelpers.core.hookenv import (
2029+ action_fail,
2030+ action_set,
2031 config,
2032 log as juju_log,
2033 charm_dir,
2034 INFO,
2035+ related_units,
2036 relation_ids,
2037- related_units,
2038 relation_set,
2039+ status_set,
2040+ hook_name
2041 )
2042
2043 from charmhelpers.contrib.storage.linux.lvm import (
2044@@ -52,7 +56,8 @@
2045 )
2046
2047 from charmhelpers.contrib.network.ip import (
2048- get_ipv6_addr
2049+ get_ipv6_addr,
2050+ is_ipv6,
2051 )
2052
2053 from charmhelpers.contrib.python.packages import (
2054@@ -71,7 +76,6 @@
2055 DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
2056 'restricted main multiverse universe')
2057
2058-
2059 UBUNTU_OPENSTACK_RELEASE = OrderedDict([
2060 ('oneiric', 'diablo'),
2061 ('precise', 'essex'),
2062@@ -81,6 +85,7 @@
2063 ('trusty', 'icehouse'),
2064 ('utopic', 'juno'),
2065 ('vivid', 'kilo'),
2066+ ('wily', 'liberty'),
2067 ])
2068
2069
2070@@ -93,6 +98,7 @@
2071 ('2014.1', 'icehouse'),
2072 ('2014.2', 'juno'),
2073 ('2015.1', 'kilo'),
2074+ ('2015.2', 'liberty'),
2075 ])
2076
2077 # The ugly duckling
2078@@ -115,8 +121,42 @@
2079 ('2.2.0', 'juno'),
2080 ('2.2.1', 'kilo'),
2081 ('2.2.2', 'kilo'),
2082+ ('2.3.0', 'liberty'),
2083+ ('2.4.0', 'liberty'),
2084+ ('2.5.0', 'liberty'),
2085 ])
2086
2087+# >= Liberty version->codename mapping
2088+PACKAGE_CODENAMES = {
2089+ 'nova-common': OrderedDict([
2090+ ('12.0.0', 'liberty'),
2091+ ]),
2092+ 'neutron-common': OrderedDict([
2093+ ('7.0.0', 'liberty'),
2094+ ]),
2095+ 'cinder-common': OrderedDict([
2096+ ('7.0.0', 'liberty'),
2097+ ]),
2098+ 'keystone': OrderedDict([
2099+ ('8.0.0', 'liberty'),
2100+ ]),
2101+ 'horizon-common': OrderedDict([
2102+ ('8.0.0', 'liberty'),
2103+ ]),
2104+ 'ceilometer-common': OrderedDict([
2105+ ('5.0.0', 'liberty'),
2106+ ]),
2107+ 'heat-common': OrderedDict([
2108+ ('5.0.0', 'liberty'),
2109+ ]),
2110+ 'glance-common': OrderedDict([
2111+ ('11.0.0', 'liberty'),
2112+ ]),
2113+ 'openstack-dashboard': OrderedDict([
2114+ ('8.0.0', 'liberty'),
2115+ ]),
2116+}
2117+
2118 DEFAULT_LOOPBACK_SIZE = '5G'
2119
2120
2121@@ -166,9 +206,9 @@
2122 error_out(e)
2123
2124
2125-def get_os_version_codename(codename):
2126+def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
2127 '''Determine OpenStack version number from codename.'''
2128- for k, v in six.iteritems(OPENSTACK_CODENAMES):
2129+ for k, v in six.iteritems(version_map):
2130 if v == codename:
2131 return k
2132 e = 'Could not derive OpenStack version for '\
2133@@ -200,20 +240,31 @@
2134 error_out(e)
2135
2136 vers = apt.upstream_version(pkg.current_ver.ver_str)
2137+ match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
2138+ if match:
2139+ vers = match.group(0)
2140
2141- try:
2142- if 'swift' in pkg.name:
2143- swift_vers = vers[:5]
2144- if swift_vers not in SWIFT_CODENAMES:
2145- # Deal with 1.10.0 upward
2146- swift_vers = vers[:6]
2147- return SWIFT_CODENAMES[swift_vers]
2148- else:
2149- vers = vers[:6]
2150- return OPENSTACK_CODENAMES[vers]
2151- except KeyError:
2152- e = 'Could not determine OpenStack codename for version %s' % vers
2153- error_out(e)
2154+ # >= Liberty independent project versions
2155+ if (package in PACKAGE_CODENAMES and
2156+ vers in PACKAGE_CODENAMES[package]):
2157+ return PACKAGE_CODENAMES[package][vers]
2158+ else:
2159+ # < Liberty co-ordinated project versions
2160+ try:
2161+ if 'swift' in pkg.name:
2162+ swift_vers = vers[:5]
2163+ if swift_vers not in SWIFT_CODENAMES:
2164+ # Deal with 1.10.0 upward
2165+ swift_vers = vers[:6]
2166+ return SWIFT_CODENAMES[swift_vers]
2167+ else:
2168+ vers = vers[:6]
2169+ return OPENSTACK_CODENAMES[vers]
2170+ except KeyError:
2171+ if not fatal:
2172+ return None
2173+ e = 'Could not determine OpenStack codename for version %s' % vers
2174+ error_out(e)
2175
2176
2177 def get_os_version_package(pkg, fatal=True):
2178@@ -323,6 +374,9 @@
2179 'kilo': 'trusty-updates/kilo',
2180 'kilo/updates': 'trusty-updates/kilo',
2181 'kilo/proposed': 'trusty-proposed/kilo',
2182+ 'liberty': 'trusty-updates/liberty',
2183+ 'liberty/updates': 'trusty-updates/liberty',
2184+ 'liberty/proposed': 'trusty-proposed/liberty',
2185 }
2186
2187 try:
2188@@ -388,7 +442,11 @@
2189 import apt_pkg as apt
2190 src = config('openstack-origin')
2191 cur_vers = get_os_version_package(package)
2192- available_vers = get_os_version_install_source(src)
2193+ if "swift" in package:
2194+ codename = get_os_codename_install_source(src)
2195+ available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
2196+ else:
2197+ available_vers = get_os_version_install_source(src)
2198 apt.init()
2199 return apt.version_compare(available_vers, cur_vers) == 1
2200
2201@@ -465,6 +523,12 @@
2202 relation_prefix=None):
2203 hosts = get_ipv6_addr(dynamic_only=False)
2204
2205+ if config('vip'):
2206+ vips = config('vip').split()
2207+ for vip in vips:
2208+ if vip and is_ipv6(vip):
2209+ hosts.append(vip)
2210+
2211 kwargs = {'database': database,
2212 'username': database_user,
2213 'hostname': json.dumps(hosts)}
2214@@ -518,6 +582,7 @@
2215 Clone/install all specified OpenStack repositories.
2216
2217 The expected format of projects_yaml is:
2218+
2219 repositories:
2220 - {name: keystone,
2221 repository: 'git://git.openstack.org/openstack/keystone.git',
2222@@ -525,11 +590,13 @@
2223 - {name: requirements,
2224 repository: 'git://git.openstack.org/openstack/requirements.git',
2225 branch: 'stable/icehouse'}
2226+
2227 directory: /mnt/openstack-git
2228 http_proxy: squid-proxy-url
2229 https_proxy: squid-proxy-url
2230
2231- The directory, http_proxy, and https_proxy keys are optional.
2232+ The directory, http_proxy, and https_proxy keys are optional.
2233+
2234 """
2235 global requirements_dir
2236 parent_dir = '/mnt/openstack-git'
2237@@ -551,6 +618,12 @@
2238
2239 pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
2240
2241+ # Upgrade setuptools and pip from default virtualenv versions. The default
2242+ # versions in trusty break master OpenStack branch deployments.
2243+ for p in ['pip', 'setuptools']:
2244+ pip_install(p, upgrade=True, proxy=http_proxy,
2245+ venv=os.path.join(parent_dir, 'venv'))
2246+
2247 for p in projects['repositories']:
2248 repo = p['repository']
2249 branch = p['branch']
2250@@ -612,24 +685,24 @@
2251 else:
2252 repo_dir = dest_dir
2253
2254+ venv = os.path.join(parent_dir, 'venv')
2255+
2256 if update_requirements:
2257 if not requirements_dir:
2258 error_out('requirements repo must be cloned before '
2259 'updating from global requirements.')
2260- _git_update_requirements(repo_dir, requirements_dir)
2261+ _git_update_requirements(venv, repo_dir, requirements_dir)
2262
2263 juju_log('Installing git repo from dir: {}'.format(repo_dir))
2264 if http_proxy:
2265- pip_install(repo_dir, proxy=http_proxy,
2266- venv=os.path.join(parent_dir, 'venv'))
2267+ pip_install(repo_dir, proxy=http_proxy, venv=venv)
2268 else:
2269- pip_install(repo_dir,
2270- venv=os.path.join(parent_dir, 'venv'))
2271+ pip_install(repo_dir, venv=venv)
2272
2273 return repo_dir
2274
2275
2276-def _git_update_requirements(package_dir, reqs_dir):
2277+def _git_update_requirements(venv, package_dir, reqs_dir):
2278 """
2279 Update from global requirements.
2280
2281@@ -638,12 +711,14 @@
2282 """
2283 orig_dir = os.getcwd()
2284 os.chdir(reqs_dir)
2285- cmd = ['python', 'update.py', package_dir]
2286+ python = os.path.join(venv, 'bin/python')
2287+ cmd = [python, 'update.py', package_dir]
2288 try:
2289 subprocess.check_call(cmd)
2290 except subprocess.CalledProcessError:
2291 package = os.path.basename(package_dir)
2292- error_out("Error updating {} from global-requirements.txt".format(package))
2293+ error_out("Error updating {} from "
2294+ "global-requirements.txt".format(package))
2295 os.chdir(orig_dir)
2296
2297
2298@@ -691,6 +766,222 @@
2299 return None
2300
2301
2302+def os_workload_status(configs, required_interfaces, charm_func=None):
2303+ """
2304+ Decorator to set workload status based on complete contexts
2305+ """
2306+ def wrap(f):
2307+ @wraps(f)
2308+ def wrapped_f(*args, **kwargs):
2309+ # Run the original function first
2310+ f(*args, **kwargs)
2311+ # Set workload status now that contexts have been
2312+ # acted on
2313+ set_os_workload_status(configs, required_interfaces, charm_func)
2314+ return wrapped_f
2315+ return wrap
2316+
2317+
2318+def set_os_workload_status(configs, required_interfaces, charm_func=None):
2319+ """
2320+ Set workload status based on complete contexts.
2321+ status-set missing or incomplete contexts
2322+ and juju-log details of missing required data.
2323+ charm_func is a charm specific function to run checking
2324+ for charm specific requirements such as a VIP setting.
2325+ """
2326+ incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
2327+ state = 'active'
2328+ missing_relations = []
2329+ incomplete_relations = []
2330+ message = None
2331+ charm_state = None
2332+ charm_message = None
2333+
2334+ for generic_interface in incomplete_rel_data.keys():
2335+ related_interface = None
2336+ missing_data = {}
2337+ # Related or not?
2338+ for interface in incomplete_rel_data[generic_interface]:
2339+ if incomplete_rel_data[generic_interface][interface].get('related'):
2340+ related_interface = interface
2341+ missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
2342+ # No relation ID for the generic_interface
2343+ if not related_interface:
2344+ juju_log("{} relation is missing and must be related for "
2345+ "functionality. ".format(generic_interface), 'WARN')
2346+ state = 'blocked'
2347+ if generic_interface not in missing_relations:
2348+ missing_relations.append(generic_interface)
2349+ else:
2350+ # Relation ID exists but no related unit
2351+ if not missing_data:
2352+ # Edge case relation ID exists but departing
2353+ if ('departed' in hook_name() or 'broken' in hook_name()) \
2354+ and related_interface in hook_name():
2355+ state = 'blocked'
2356+ if generic_interface not in missing_relations:
2357+ missing_relations.append(generic_interface)
2358+ juju_log("{} relation's interface, {}, "
2359+ "relationship is departed or broken "
2360+ "and is required for functionality."
2361+ "".format(generic_interface, related_interface), "WARN")
2362+ # Normal case relation ID exists but no related unit
2363+ # (joining)
2364+ else:
2365+ juju_log("{} relations's interface, {}, is related but has "
2366+ "no units in the relation."
2367+ "".format(generic_interface, related_interface), "INFO")
2368+ # Related unit exists and data missing on the relation
2369+ else:
2370+ juju_log("{} relation's interface, {}, is related awaiting "
2371+ "the following data from the relationship: {}. "
2372+ "".format(generic_interface, related_interface,
2373+ ", ".join(missing_data)), "INFO")
2374+ if state != 'blocked':
2375+ state = 'waiting'
2376+ if generic_interface not in incomplete_relations \
2377+ and generic_interface not in missing_relations:
2378+ incomplete_relations.append(generic_interface)
2379+
2380+ if missing_relations:
2381+ message = "Missing relations: {}".format(", ".join(missing_relations))
2382+ if incomplete_relations:
2383+ message += "; incomplete relations: {}" \
2384+ "".format(", ".join(incomplete_relations))
2385+ state = 'blocked'
2386+ elif incomplete_relations:
2387+ message = "Incomplete relations: {}" \
2388+ "".format(", ".join(incomplete_relations))
2389+ state = 'waiting'
2390+
2391+ # Run charm specific checks
2392+ if charm_func:
2393+ charm_state, charm_message = charm_func(configs)
2394+ if charm_state != 'active' and charm_state != 'unknown':
2395+ state = workload_state_compare(state, charm_state)
2396+ if message:
2397+ charm_message = charm_message.replace("Incomplete relations: ",
2398+ "")
2399+ message = "{}, {}".format(message, charm_message)
2400+ else:
2401+ message = charm_message
2402+
2403+ # Set to active if all requirements have been met
2404+ if state == 'active':
2405+ message = "Unit is ready"
2406+ juju_log(message, "INFO")
2407+
2408+ status_set(state, message)
2409+
2410+
2411+def workload_state_compare(current_workload_state, workload_state):
2412+ """ Return highest priority of two states"""
2413+ hierarchy = {'unknown': -1,
2414+ 'active': 0,
2415+ 'maintenance': 1,
2416+ 'waiting': 2,
2417+ 'blocked': 3,
2418+ }
2419+
2420+ if hierarchy.get(workload_state) is None:
2421+ workload_state = 'unknown'
2422+ if hierarchy.get(current_workload_state) is None:
2423+ current_workload_state = 'unknown'
2424+
2425+ # Set workload_state based on hierarchy of statuses
2426+ if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
2427+ return current_workload_state
2428+ else:
2429+ return workload_state
2430+
2431+
2432+def incomplete_relation_data(configs, required_interfaces):
2433+ """
2434+ Check complete contexts against required_interfaces
2435+ Return dictionary of incomplete relation data.
2436+
2437+ configs is an OSConfigRenderer object with configs registered
2438+
2439+ required_interfaces is a dictionary of required general interfaces
2440+ with dictionary values of possible specific interfaces.
2441+ Example:
2442+ required_interfaces = {'database': ['shared-db', 'pgsql-db']}
2443+
2444+ The interface is said to be satisfied if anyone of the interfaces in the
2445+ list has a complete context.
2446+
2447+ Return dictionary of incomplete or missing required contexts with relation
2448+ status of interfaces and any missing data points. Example:
2449+ {'message':
2450+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
2451+ 'zeromq-configuration': {'related': False}},
2452+ 'identity':
2453+ {'identity-service': {'related': False}},
2454+ 'database':
2455+ {'pgsql-db': {'related': False},
2456+ 'shared-db': {'related': True}}}
2457+ """
2458+ complete_ctxts = configs.complete_contexts()
2459+ incomplete_relations = []
2460+ for svc_type in required_interfaces.keys():
2461+ # Avoid duplicates
2462+ found_ctxt = False
2463+ for interface in required_interfaces[svc_type]:
2464+ if interface in complete_ctxts:
2465+ found_ctxt = True
2466+ if not found_ctxt:
2467+ incomplete_relations.append(svc_type)
2468+ incomplete_context_data = {}
2469+ for i in incomplete_relations:
2470+ incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
2471+ return incomplete_context_data
2472+
2473+
2474+def do_action_openstack_upgrade(package, upgrade_callback, configs):
2475+ """Perform action-managed OpenStack upgrade.
2476+
2477+ Upgrades packages to the configured openstack-origin version and sets
2478+ the corresponding action status as a result.
2479+
2480+ If the charm was installed from source we cannot upgrade it.
2481+ For backwards compatibility a config flag (action-managed-upgrade) must
2482+ be set for this code to run, otherwise a full service level upgrade will
2483+ fire on config-changed.
2484+
2485+ @param package: package name for determining if upgrade available
2486+ @param upgrade_callback: function callback to charm's upgrade function
2487+ @param configs: templating object derived from OSConfigRenderer class
2488+
2489+ @return: True if upgrade successful; False if upgrade failed or skipped
2490+ """
2491+ ret = False
2492+
2493+ if git_install_requested():
2494+ action_set({'outcome': 'installed from source, skipped upgrade.'})
2495+ else:
2496+ if openstack_upgrade_available(package):
2497+ if config('action-managed-upgrade'):
2498+ juju_log('Upgrading OpenStack release')
2499+
2500+ try:
2501+ upgrade_callback(configs=configs)
2502+ action_set({'outcome': 'success, upgrade completed.'})
2503+ ret = True
2504+ except:
2505+ action_set({'outcome': 'upgrade failed, see traceback.'})
2506+ action_set({'traceback': traceback.format_exc()})
2507+ action_fail('do_openstack_upgrade resulted in an '
2508+ 'unexpected error')
2509+ else:
2510+ action_set({'outcome': 'action-managed-upgrade config is '
2511+ 'False, skipped upgrade.'})
2512+ else:
2513+ action_set({'outcome': 'no upgrade available.'})
2514+
2515+ return ret
2516+
2517+
2518 def remote_restart(rel_name, remote_service=None):
2519 trigger = {
2520 'restart-trigger': str(uuid.uuid4()),
2521@@ -700,7 +991,7 @@
2522 for rid in relation_ids(rel_name):
2523 # This subordinate can be related to two seperate services using
2524 # different subordinate relations so only issue the restart if
2525- # thr principle is conencted down the relation we think it is
2526+ # the principle is conencted down the relation we think it is
2527 if related_units(relid=rid):
2528 relation_set(relation_id=rid,
2529 relation_settings=trigger,
2530
2531=== modified file 'hooks/charmhelpers/contrib/python/packages.py'
2532--- hooks/charmhelpers/contrib/python/packages.py 2015-06-10 15:45:48 +0000
2533+++ hooks/charmhelpers/contrib/python/packages.py 2015-12-01 15:05:49 +0000
2534@@ -36,6 +36,8 @@
2535 def parse_options(given, available):
2536 """Given a set of options, check if available"""
2537 for key, value in sorted(given.items()):
2538+ if not value:
2539+ continue
2540 if key in available:
2541 yield "--{0}={1}".format(key, value)
2542
2543
2544=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
2545--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-06-10 15:45:48 +0000
2546+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-12-01 15:05:49 +0000
2547@@ -26,8 +26,10 @@
2548
2549 import os
2550 import shutil
2551+import six
2552 import json
2553 import time
2554+import uuid
2555
2556 from subprocess import (
2557 check_call,
2558@@ -35,8 +37,10 @@
2559 CalledProcessError,
2560 )
2561 from charmhelpers.core.hookenv import (
2562+ local_unit,
2563 relation_get,
2564 relation_ids,
2565+ relation_set,
2566 related_units,
2567 log,
2568 DEBUG,
2569@@ -56,16 +60,18 @@
2570 apt_install,
2571 )
2572
2573+from charmhelpers.core.kernel import modprobe
2574+
2575 KEYRING = '/etc/ceph/ceph.client.{}.keyring'
2576 KEYFILE = '/etc/ceph/ceph.client.{}.key'
2577
2578 CEPH_CONF = """[global]
2579- auth supported = {auth}
2580- keyring = {keyring}
2581- mon host = {mon_hosts}
2582- log to syslog = {use_syslog}
2583- err to syslog = {use_syslog}
2584- clog to syslog = {use_syslog}
2585+auth supported = {auth}
2586+keyring = {keyring}
2587+mon host = {mon_hosts}
2588+log to syslog = {use_syslog}
2589+err to syslog = {use_syslog}
2590+clog to syslog = {use_syslog}
2591 """
2592
2593
2594@@ -120,29 +126,37 @@
2595 return None
2596
2597
2598-def create_pool(service, name, replicas=3):
2599+def update_pool(client, pool, settings):
2600+ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
2601+ for k, v in six.iteritems(settings):
2602+ cmd.append(k)
2603+ cmd.append(v)
2604+
2605+ check_call(cmd)
2606+
2607+
2608+def create_pool(service, name, replicas=3, pg_num=None):
2609 """Create a new RADOS pool."""
2610 if pool_exists(service, name):
2611 log("Ceph pool {} already exists, skipping creation".format(name),
2612 level=WARNING)
2613 return
2614
2615- # Calculate the number of placement groups based
2616- # on upstream recommended best practices.
2617- osds = get_osds(service)
2618- if osds:
2619- pgnum = (len(osds) * 100 // replicas)
2620- else:
2621- # NOTE(james-page): Default to 200 for older ceph versions
2622- # which don't support OSD query from cli
2623- pgnum = 200
2624-
2625- cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
2626- check_call(cmd)
2627-
2628- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
2629- str(replicas)]
2630- check_call(cmd)
2631+ if not pg_num:
2632+ # Calculate the number of placement groups based
2633+ # on upstream recommended best practices.
2634+ osds = get_osds(service)
2635+ if osds:
2636+ pg_num = (len(osds) * 100 // replicas)
2637+ else:
2638+ # NOTE(james-page): Default to 200 for older ceph versions
2639+ # which don't support OSD query from cli
2640+ pg_num = 200
2641+
2642+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
2643+ check_call(cmd)
2644+
2645+ update_pool(service, name, settings={'size': str(replicas)})
2646
2647
2648 def delete_pool(service, name):
2649@@ -197,10 +211,10 @@
2650 log('Created new keyfile at %s.' % keyfile, level=INFO)
2651
2652
2653-def get_ceph_nodes():
2654- """Query named relation 'ceph' to determine current nodes."""
2655+def get_ceph_nodes(relation='ceph'):
2656+ """Query named relation to determine current nodes."""
2657 hosts = []
2658- for r_id in relation_ids('ceph'):
2659+ for r_id in relation_ids(relation):
2660 for unit in related_units(r_id):
2661 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
2662
2663@@ -288,17 +302,6 @@
2664 os.chown(data_src_dst, uid, gid)
2665
2666
2667-# TODO: re-use
2668-def modprobe(module):
2669- """Load a kernel module and configure for auto-load on reboot."""
2670- log('Loading kernel module', level=INFO)
2671- cmd = ['modprobe', module]
2672- check_call(cmd)
2673- with open('/etc/modules', 'r+') as modules:
2674- if module not in modules.read():
2675- modules.write(module)
2676-
2677-
2678 def copy_files(src, dst, symlinks=False, ignore=None):
2679 """Copy files from src to dst."""
2680 for item in os.listdir(src):
2681@@ -363,14 +366,14 @@
2682 service_start(svc)
2683
2684
2685-def ensure_ceph_keyring(service, user=None, group=None):
2686+def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
2687 """Ensures a ceph keyring is created for a named service and optionally
2688 ensures user and group ownership.
2689
2690 Returns False if no ceph key is available in relation state.
2691 """
2692 key = None
2693- for rid in relation_ids('ceph'):
2694+ for rid in relation_ids(relation):
2695 for unit in related_units(rid):
2696 key = relation_get('key', rid=rid, unit=unit)
2697 if key:
2698@@ -411,17 +414,59 @@
2699
2700 The API is versioned and defaults to version 1.
2701 """
2702- def __init__(self, api_version=1):
2703+ def __init__(self, api_version=1, request_id=None):
2704 self.api_version = api_version
2705+ if request_id:
2706+ self.request_id = request_id
2707+ else:
2708+ self.request_id = str(uuid.uuid1())
2709 self.ops = []
2710
2711- def add_op_create_pool(self, name, replica_count=3):
2712+ def add_op_create_pool(self, name, replica_count=3, pg_num=None):
2713+ """Adds an operation to create a pool.
2714+
2715+ @param pg_num setting: optional setting. If not provided, this value
2716+ will be calculated by the broker based on how many OSDs are in the
2717+ cluster at the time of creation. Note that, if provided, this value
2718+ will be capped at the current available maximum.
2719+ """
2720 self.ops.append({'op': 'create-pool', 'name': name,
2721- 'replicas': replica_count})
2722+ 'replicas': replica_count, 'pg_num': pg_num})
2723+
2724+ def set_ops(self, ops):
2725+ """Set request ops to provided value.
2726+
2727+ Useful for injecting ops that come from a previous request
2728+ to allow comparisons to ensure validity.
2729+ """
2730+ self.ops = ops
2731
2732 @property
2733 def request(self):
2734- return json.dumps({'api-version': self.api_version, 'ops': self.ops})
2735+ return json.dumps({'api-version': self.api_version, 'ops': self.ops,
2736+ 'request-id': self.request_id})
2737+
2738+ def _ops_equal(self, other):
2739+ if len(self.ops) == len(other.ops):
2740+ for req_no in range(0, len(self.ops)):
2741+ for key in ['replicas', 'name', 'op', 'pg_num']:
2742+ if self.ops[req_no].get(key) != other.ops[req_no].get(key):
2743+ return False
2744+ else:
2745+ return False
2746+ return True
2747+
2748+ def __eq__(self, other):
2749+ if not isinstance(other, self.__class__):
2750+ return False
2751+ if self.api_version == other.api_version and \
2752+ self._ops_equal(other):
2753+ return True
2754+ else:
2755+ return False
2756+
2757+ def __ne__(self, other):
2758+ return not self.__eq__(other)
2759
2760
2761 class CephBrokerRsp(object):
2762@@ -431,14 +476,198 @@
2763
2764 The API is versioned and defaults to version 1.
2765 """
2766+
2767 def __init__(self, encoded_rsp):
2768 self.api_version = None
2769 self.rsp = json.loads(encoded_rsp)
2770
2771 @property
2772+ def request_id(self):
2773+ return self.rsp.get('request-id')
2774+
2775+ @property
2776 def exit_code(self):
2777 return self.rsp.get('exit-code')
2778
2779 @property
2780 def exit_msg(self):
2781 return self.rsp.get('stderr')
2782+
2783+
2784+# Ceph Broker Conversation:
2785+# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
2786+# and send that request to ceph via the ceph relation. The CephBrokerRq has a
2787+# unique id so that the client can identity which CephBrokerRsp is associated
2788+# with the request. Ceph will also respond to each client unit individually
2789+# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
2790+# via key broker-rsp-glance-0
2791+#
2792+# To use this the charm can just do something like:
2793+#
2794+# from charmhelpers.contrib.storage.linux.ceph import (
2795+# send_request_if_needed,
2796+# is_request_complete,
2797+# CephBrokerRq,
2798+# )
2799+#
2800+# @hooks.hook('ceph-relation-changed')
2801+# def ceph_changed():
2802+# rq = CephBrokerRq()
2803+# rq.add_op_create_pool(name='poolname', replica_count=3)
2804+#
2805+# if is_request_complete(rq):
2806+# <Request complete actions>
2807+# else:
2808+# send_request_if_needed(get_ceph_request())
2809+#
2810+# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
2811+# of glance having sent a request to ceph which ceph has successfully processed
2812+# 'ceph:8': {
2813+# 'ceph/0': {
2814+# 'auth': 'cephx',
2815+# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
2816+# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
2817+# 'ceph-public-address': '10.5.44.103',
2818+# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
2819+# 'private-address': '10.5.44.103',
2820+# },
2821+# 'glance/0': {
2822+# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
2823+# '"ops": [{"replicas": 3, "name": "glance", '
2824+# '"op": "create-pool"}]}'),
2825+# 'private-address': '10.5.44.109',
2826+# },
2827+# }
2828+
2829+def get_previous_request(rid):
2830+ """Return the last ceph broker request sent on a given relation
2831+
2832+ @param rid: Relation id to query for request
2833+ """
2834+ request = None
2835+ broker_req = relation_get(attribute='broker_req', rid=rid,
2836+ unit=local_unit())
2837+ if broker_req:
2838+ request_data = json.loads(broker_req)
2839+ request = CephBrokerRq(api_version=request_data['api-version'],
2840+ request_id=request_data['request-id'])
2841+ request.set_ops(request_data['ops'])
2842+
2843+ return request
2844+
2845+
2846+def get_request_states(request, relation='ceph'):
2847+ """Return a dict of requests per relation id with their corresponding
2848+ completion state.
2849+
2850+ This allows a charm, which has a request for ceph, to see whether there is
2851+ an equivalent request already being processed and if so what state that
2852+ request is in.
2853+
2854+ @param request: A CephBrokerRq object
2855+ """
2856+ complete = []
2857+ requests = {}
2858+ for rid in relation_ids(relation):
2859+ complete = False
2860+ previous_request = get_previous_request(rid)
2861+ if request == previous_request:
2862+ sent = True
2863+ complete = is_request_complete_for_rid(previous_request, rid)
2864+ else:
2865+ sent = False
2866+ complete = False
2867+
2868+ requests[rid] = {
2869+ 'sent': sent,
2870+ 'complete': complete,
2871+ }
2872+
2873+ return requests
2874+
2875+
2876+def is_request_sent(request, relation='ceph'):
2877+ """Check to see if a functionally equivalent request has already been sent
2878+
2879+ Returns True if a similair request has been sent
2880+
2881+ @param request: A CephBrokerRq object
2882+ """
2883+ states = get_request_states(request, relation=relation)
2884+ for rid in states.keys():
2885+ if not states[rid]['sent']:
2886+ return False
2887+
2888+ return True
2889+
2890+
2891+def is_request_complete(request, relation='ceph'):
2892+ """Check to see if a functionally equivalent request has already been
2893+ completed
2894+
2895+ Returns True if a similair request has been completed
2896+
2897+ @param request: A CephBrokerRq object
2898+ """
2899+ states = get_request_states(request, relation=relation)
2900+ for rid in states.keys():
2901+ if not states[rid]['complete']:
2902+ return False
2903+
2904+ return True
2905+
2906+
2907+def is_request_complete_for_rid(request, rid):
2908+ """Check if a given request has been completed on the given relation
2909+
2910+ @param request: A CephBrokerRq object
2911+ @param rid: Relation ID
2912+ """
2913+ broker_key = get_broker_rsp_key()
2914+ for unit in related_units(rid):
2915+ rdata = relation_get(rid=rid, unit=unit)
2916+ if rdata.get(broker_key):
2917+ rsp = CephBrokerRsp(rdata.get(broker_key))
2918+ if rsp.request_id == request.request_id:
2919+ if not rsp.exit_code:
2920+ return True
2921+ else:
2922+ # The remote unit sent no reply targeted at this unit so either the
2923+ # remote ceph cluster does not support unit targeted replies or it
2924+ # has not processed our request yet.
2925+ if rdata.get('broker_rsp'):
2926+ request_data = json.loads(rdata['broker_rsp'])
2927+ if request_data.get('request-id'):
2928+ log('Ignoring legacy broker_rsp without unit key as remote '
2929+ 'service supports unit specific replies', level=DEBUG)
2930+ else:
2931+ log('Using legacy broker_rsp as remote service does not '
2932+ 'supports unit specific replies', level=DEBUG)
2933+ rsp = CephBrokerRsp(rdata['broker_rsp'])
2934+ if not rsp.exit_code:
2935+ return True
2936+
2937+ return False
2938+
2939+
2940+def get_broker_rsp_key():
2941+ """Return broker response key for this unit
2942+
2943+ This is the key that ceph is going to use to pass request status
2944+ information back to this unit
2945+ """
2946+ return 'broker-rsp-' + local_unit().replace('/', '-')
2947+
2948+
2949+def send_request_if_needed(request, relation='ceph'):
2950+ """Send broker request if an equivalent request has not already been sent
2951+
2952+ @param request: A CephBrokerRq object
2953+ """
2954+ if is_request_sent(request, relation=relation):
2955+ log('Request already sent but not complete, not sending new request',
2956+ level=DEBUG)
2957+ else:
2958+ for rid in relation_ids(relation):
2959+ log('Sending request {}'.format(request.request_id), level=DEBUG)
2960+ relation_set(relation_id=rid, broker_req=request.request)
2961
2962=== modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
2963--- hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-06-10 15:45:48 +0000
2964+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-12-01 15:05:49 +0000
2965@@ -76,3 +76,13 @@
2966 check_call(cmd)
2967
2968 return create_loopback(path)
2969+
2970+
2971+def is_mapped_loopback_device(device):
2972+ """
2973+ Checks if a given device name is an existing/mapped loopback device.
2974+ :param device: str: Full path to the device (eg, /dev/loop1).
2975+ :returns: str: Path to the backing file if is a loopback device
2976+ empty string otherwise
2977+ """
2978+ return loopback_devices().get(device, "")
2979
2980=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
2981--- hooks/charmhelpers/contrib/storage/linux/utils.py 2015-06-10 15:45:48 +0000
2982+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2015-12-01 15:05:49 +0000
2983@@ -43,9 +43,10 @@
2984
2985 :param block_device: str: Full path of block device to clean.
2986 '''
2987+ # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
2988 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
2989- call(['sgdisk', '--zap-all', '--mbrtogpt',
2990- '--clear', block_device])
2991+ call(['sgdisk', '--zap-all', '--', block_device])
2992+ call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
2993 dev_end = check_output(['blockdev', '--getsz',
2994 block_device]).decode('UTF-8')
2995 gpt_end = int(dev_end.split()[0]) - 100
2996@@ -67,4 +68,4 @@
2997 out = check_output(['mount']).decode('UTF-8')
2998 if is_partition:
2999 return bool(re.search(device + r"\b", out))
3000- return bool(re.search(device + r"[0-9]+\b", out))
3001+ return bool(re.search(device + r"[0-9]*\b", out))
3002
3003=== added file 'hooks/charmhelpers/core/files.py'
3004--- hooks/charmhelpers/core/files.py 1970-01-01 00:00:00 +0000
3005+++ hooks/charmhelpers/core/files.py 2015-12-01 15:05:49 +0000
3006@@ -0,0 +1,45 @@
3007+#!/usr/bin/env python
3008+# -*- coding: utf-8 -*-
3009+
3010+# Copyright 2014-2015 Canonical Limited.
3011+#
3012+# This file is part of charm-helpers.
3013+#
3014+# charm-helpers is free software: you can redistribute it and/or modify
3015+# it under the terms of the GNU Lesser General Public License version 3 as
3016+# published by the Free Software Foundation.
3017+#
3018+# charm-helpers is distributed in the hope that it will be useful,
3019+# but WITHOUT ANY WARRANTY; without even the implied warranty of
3020+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3021+# GNU Lesser General Public License for more details.
3022+#
3023+# You should have received a copy of the GNU Lesser General Public License
3024+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3025+
3026+__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
3027+
3028+import os
3029+import subprocess
3030+
3031+
3032+def sed(filename, before, after, flags='g'):
3033+ """
3034+ Search and replaces the given pattern on filename.
3035+
3036+ :param filename: relative or absolute file path.
3037+ :param before: expression to be replaced (see 'man sed')
3038+ :param after: expression to replace with (see 'man sed')
3039+ :param flags: sed-compatible regex flags in example, to make
3040+ the search and replace case insensitive, specify ``flags="i"``.
3041+ The ``g`` flag is always specified regardless, so you do not
3042+ need to remember to include it when overriding this parameter.
3043+ :returns: If the sed command exit code was zero then return,
3044+ otherwise raise CalledProcessError.
3045+ """
3046+ expression = r's/{0}/{1}/{2}'.format(before,
3047+ after, flags)
3048+
3049+ return subprocess.check_call(["sed", "-i", "-r", "-e",
3050+ expression,
3051+ os.path.expanduser(filename)])
3052
3053=== modified file 'hooks/charmhelpers/core/hookenv.py'
3054--- hooks/charmhelpers/core/hookenv.py 2015-06-10 07:35:12 +0000
3055+++ hooks/charmhelpers/core/hookenv.py 2015-12-01 15:05:49 +0000
3056@@ -21,7 +21,10 @@
3057 # Charm Helpers Developers <juju@lists.ubuntu.com>
3058
3059 from __future__ import print_function
3060+import copy
3061+from distutils.version import LooseVersion
3062 from functools import wraps
3063+import glob
3064 import os
3065 import json
3066 import yaml
3067@@ -71,6 +74,7 @@
3068 res = func(*args, **kwargs)
3069 cache[key] = res
3070 return res
3071+ wrapper._wrapped = func
3072 return wrapper
3073
3074
3075@@ -170,9 +174,19 @@
3076 return os.environ.get('JUJU_RELATION', None)
3077
3078
3079-def relation_id():
3080- """The relation ID for the current relation hook"""
3081- return os.environ.get('JUJU_RELATION_ID', None)
3082+@cached
3083+def relation_id(relation_name=None, service_or_unit=None):
3084+ """The relation ID for the current or a specified relation"""
3085+ if not relation_name and not service_or_unit:
3086+ return os.environ.get('JUJU_RELATION_ID', None)
3087+ elif relation_name and service_or_unit:
3088+ service_name = service_or_unit.split('/')[0]
3089+ for relid in relation_ids(relation_name):
3090+ remote_service = remote_service_name(relid)
3091+ if remote_service == service_name:
3092+ return relid
3093+ else:
3094+ raise ValueError('Must specify neither or both of relation_name and service_or_unit')
3095
3096
3097 def local_unit():
3098@@ -190,9 +204,20 @@
3099 return local_unit().split('/')[0]
3100
3101
3102+@cached
3103+def remote_service_name(relid=None):
3104+ """The remote service name for a given relation-id (or the current relation)"""
3105+ if relid is None:
3106+ unit = remote_unit()
3107+ else:
3108+ units = related_units(relid)
3109+ unit = units[0] if units else None
3110+ return unit.split('/')[0] if unit else None
3111+
3112+
3113 def hook_name():
3114 """The name of the currently executing hook"""
3115- return os.path.basename(sys.argv[0])
3116+ return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
3117
3118
3119 class Config(dict):
3120@@ -242,29 +267,7 @@
3121 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
3122 if os.path.exists(self.path):
3123 self.load_previous()
3124-
3125- def __getitem__(self, key):
3126- """For regular dict lookups, check the current juju config first,
3127- then the previous (saved) copy. This ensures that user-saved values
3128- will be returned by a dict lookup.
3129-
3130- """
3131- try:
3132- return dict.__getitem__(self, key)
3133- except KeyError:
3134- return (self._prev_dict or {})[key]
3135-
3136- def get(self, key, default=None):
3137- try:
3138- return self[key]
3139- except KeyError:
3140- return default
3141-
3142- def keys(self):
3143- prev_keys = []
3144- if self._prev_dict is not None:
3145- prev_keys = self._prev_dict.keys()
3146- return list(set(prev_keys + list(dict.keys(self))))
3147+ atexit(self._implicit_save)
3148
3149 def load_previous(self, path=None):
3150 """Load previous copy of config from disk.
3151@@ -283,6 +286,9 @@
3152 self.path = path or self.path
3153 with open(self.path) as f:
3154 self._prev_dict = json.load(f)
3155+ for k, v in copy.deepcopy(self._prev_dict).items():
3156+ if k not in self:
3157+ self[k] = v
3158
3159 def changed(self, key):
3160 """Return True if the current value for this key is different from
3161@@ -314,13 +320,13 @@
3162 instance.
3163
3164 """
3165- if self._prev_dict:
3166- for k, v in six.iteritems(self._prev_dict):
3167- if k not in self:
3168- self[k] = v
3169 with open(self.path, 'w') as f:
3170 json.dump(self, f)
3171
3172+ def _implicit_save(self):
3173+ if self.implicit_save:
3174+ self.save()
3175+
3176
3177 @cached
3178 def config(scope=None):
3179@@ -485,6 +491,76 @@
3180
3181
3182 @cached
3183+def peer_relation_id():
3184+ '''Get a peer relation id if a peer relation has been joined, else None.'''
3185+ md = metadata()
3186+ section = md.get('peers')
3187+ if section:
3188+ for key in section:
3189+ relids = relation_ids(key)
3190+ if relids:
3191+ return relids[0]
3192+ return None
3193+
3194+
3195+@cached
3196+def relation_to_interface(relation_name):
3197+ """
3198+ Given the name of a relation, return the interface that relation uses.
3199+
3200+ :returns: The interface name, or ``None``.
3201+ """
3202+ return relation_to_role_and_interface(relation_name)[1]
3203+
3204+
3205+@cached
3206+def relation_to_role_and_interface(relation_name):
3207+ """
3208+ Given the name of a relation, return the role and the name of the interface
3209+ that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
3210+
3211+ :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
3212+ """
3213+ _metadata = metadata()
3214+ for role in ('provides', 'requires', 'peer'):
3215+ interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
3216+ if interface:
3217+ return role, interface
3218+ return None, None
3219+
3220+
3221+@cached
3222+def role_and_interface_to_relations(role, interface_name):
3223+ """
3224+ Given a role and interface name, return a list of relation names for the
3225+ current charm that use that interface under that role (where role is one
3226+ of ``provides``, ``requires``, or ``peer``).
3227+
3228+ :returns: A list of relation names.
3229+ """
3230+ _metadata = metadata()
3231+ results = []
3232+ for relation_name, relation in _metadata.get(role, {}).items():
3233+ if relation['interface'] == interface_name:
3234+ results.append(relation_name)
3235+ return results
3236+
3237+
3238+@cached
3239+def interface_to_relations(interface_name):
3240+ """
3241+ Given an interface, return a list of relation names for the current
3242+ charm that use that interface.
3243+
3244+ :returns: A list of relation names.
3245+ """
3246+ results = []
3247+ for role in ('provides', 'requires', 'peer'):
3248+ results.extend(role_and_interface_to_relations(role, interface_name))
3249+ return results
3250+
3251+
3252+@cached
3253 def charm_name():
3254 """Get the name of the current charm as is specified on metadata.yaml"""
3255 return metadata().get('name')
3256@@ -560,6 +636,38 @@
3257 return unit_get('private-address')
3258
3259
3260+@cached
3261+def storage_get(attribute="", storage_id=""):
3262+ """Get storage attributes"""
3263+ _args = ['storage-get', '--format=json']
3264+ if storage_id:
3265+ _args.extend(('-s', storage_id))
3266+ if attribute:
3267+ _args.append(attribute)
3268+ try:
3269+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
3270+ except ValueError:
3271+ return None
3272+
3273+
3274+@cached
3275+def storage_list(storage_name=""):
3276+ """List the storage IDs for the unit"""
3277+ _args = ['storage-list', '--format=json']
3278+ if storage_name:
3279+ _args.append(storage_name)
3280+ try:
3281+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
3282+ except ValueError:
3283+ return None
3284+ except OSError as e:
3285+ import errno
3286+ if e.errno == errno.ENOENT:
3287+ # storage-list does not exist
3288+ return []
3289+ raise
3290+
3291+
3292 class UnregisteredHookError(Exception):
3293 """Raised when an undefined hook is called"""
3294 pass
3295@@ -587,10 +695,14 @@
3296 hooks.execute(sys.argv)
3297 """
3298
3299- def __init__(self, config_save=True):
3300+ def __init__(self, config_save=None):
3301 super(Hooks, self).__init__()
3302 self._hooks = {}
3303- self._config_save = config_save
3304+
3305+ # For unknown reasons, we allow the Hooks constructor to override
3306+ # config().implicit_save.
3307+ if config_save is not None:
3308+ config().implicit_save = config_save
3309
3310 def register(self, name, function):
3311 """Register a hook"""
3312@@ -598,13 +710,16 @@
3313
3314 def execute(self, args):
3315 """Execute a registered hook based on args[0]"""
3316+ _run_atstart()
3317 hook_name = os.path.basename(args[0])
3318 if hook_name in self._hooks:
3319- self._hooks[hook_name]()
3320- if self._config_save:
3321- cfg = config()
3322- if cfg.implicit_save:
3323- cfg.save()
3324+ try:
3325+ self._hooks[hook_name]()
3326+ except SystemExit as x:
3327+ if x.code is None or x.code == 0:
3328+ _run_atexit()
3329+ raise
3330+ _run_atexit()
3331 else:
3332 raise UnregisteredHookError(hook_name)
3333
3334@@ -653,6 +768,21 @@
3335 subprocess.check_call(['action-fail', message])
3336
3337
3338+def action_name():
3339+ """Get the name of the currently executing action."""
3340+ return os.environ.get('JUJU_ACTION_NAME')
3341+
3342+
3343+def action_uuid():
3344+ """Get the UUID of the currently executing action."""
3345+ return os.environ.get('JUJU_ACTION_UUID')
3346+
3347+
3348+def action_tag():
3349+ """Get the tag for the currently executing action."""
3350+ return os.environ.get('JUJU_ACTION_TAG')
3351+
3352+
3353 def status_set(workload_state, message):
3354 """Set the workload state with a message
3355
3356@@ -682,25 +812,28 @@
3357
3358
3359 def status_get():
3360- """Retrieve the previously set juju workload state
3361-
3362- If the status-set command is not found then assume this is juju < 1.23 and
3363- return 'unknown'
3364+ """Retrieve the previously set juju workload state and message
3365+
3366+ If the status-get command is not found then assume this is juju < 1.23 and
3367+ return 'unknown', ""
3368+
3369 """
3370- cmd = ['status-get']
3371+ cmd = ['status-get', "--format=json", "--include-data"]
3372 try:
3373- raw_status = subprocess.check_output(cmd, universal_newlines=True)
3374- status = raw_status.rstrip()
3375- return status
3376+ raw_status = subprocess.check_output(cmd)
3377 except OSError as e:
3378 if e.errno == errno.ENOENT:
3379- return 'unknown'
3380+ return ('unknown', "")
3381 else:
3382 raise
3383+ else:
3384+ status = json.loads(raw_status.decode("UTF-8"))
3385+ return (status["status"], status["message"])
3386
3387
3388 def translate_exc(from_exc, to_exc):
3389 def inner_translate_exc1(f):
3390+ @wraps(f)
3391 def inner_translate_exc2(*args, **kwargs):
3392 try:
3393 return f(*args, **kwargs)
3394@@ -732,13 +865,80 @@
3395 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
3396 def leader_set(settings=None, **kwargs):
3397 """Juju leader set value(s)"""
3398- log("Juju leader-set '%s'" % (settings), level=DEBUG)
3399+ # Don't log secrets.
3400+ # log("Juju leader-set '%s'" % (settings), level=DEBUG)
3401 cmd = ['leader-set']
3402 settings = settings or {}
3403 settings.update(kwargs)
3404- for k, v in settings.iteritems():
3405+ for k, v in settings.items():
3406 if v is None:
3407 cmd.append('{}='.format(k))
3408 else:
3409 cmd.append('{}={}'.format(k, v))
3410 subprocess.check_call(cmd)
3411+
3412+
3413+@cached
3414+def juju_version():
3415+ """Full version string (eg. '1.23.3.1-trusty-amd64')"""
3416+ # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
3417+ jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
3418+ return subprocess.check_output([jujud, 'version'],
3419+ universal_newlines=True).strip()
3420+
3421+
3422+@cached
3423+def has_juju_version(minimum_version):
3424+ """Return True if the Juju version is at least the provided version"""
3425+ return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
3426+
3427+
3428+_atexit = []
3429+_atstart = []
3430+
3431+
3432+def atstart(callback, *args, **kwargs):
3433+ '''Schedule a callback to run before the main hook.
3434+
3435+ Callbacks are run in the order they were added.
3436+
3437+ This is useful for modules and classes to perform initialization
3438+ and inject behavior. In particular:
3439+
3440+ - Run common code before all of your hooks, such as logging
3441+ the hook name or interesting relation data.
3442+ - Defer object or module initialization that requires a hook
3443+ context until we know there actually is a hook context,
3444+ making testing easier.
3445+ - Rather than requiring charm authors to include boilerplate to
3446+ invoke your helper's behavior, have it run automatically if
3447+ your object is instantiated or module imported.
3448+
3449+ This is not at all useful after your hook framework as been launched.
3450+ '''
3451+ global _atstart
3452+ _atstart.append((callback, args, kwargs))
3453+
3454+
3455+def atexit(callback, *args, **kwargs):
3456+ '''Schedule a callback to run on successful hook completion.
3457+
3458+ Callbacks are run in the reverse order that they were added.'''
3459+ _atexit.append((callback, args, kwargs))
3460+
3461+
3462+def _run_atstart():
3463+ '''Hook frameworks must invoke this before running the main hook body.'''
3464+ global _atstart
3465+ for callback, args, kwargs in _atstart:
3466+ callback(*args, **kwargs)
3467+ del _atstart[:]
3468+
3469+
3470+def _run_atexit():
3471+ '''Hook frameworks must invoke this after the main hook body has
3472+ successfully completed. Do not invoke it if the hook fails.'''
3473+ global _atexit
3474+ for callback, args, kwargs in reversed(_atexit):
3475+ callback(*args, **kwargs)
3476+ del _atexit[:]
3477
3478=== modified file 'hooks/charmhelpers/core/host.py'
3479--- hooks/charmhelpers/core/host.py 2015-07-01 13:35:47 +0000
3480+++ hooks/charmhelpers/core/host.py 2015-12-01 15:05:49 +0000
3481@@ -63,6 +63,56 @@
3482 return service_result
3483
3484
3485+def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
3486+ """Pause a system service.
3487+
3488+ Stop it, and prevent it from starting again at boot."""
3489+ stopped = True
3490+ if service_running(service_name):
3491+ stopped = service_stop(service_name)
3492+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
3493+ sysv_file = os.path.join(initd_dir, service_name)
3494+ if os.path.exists(upstart_file):
3495+ override_path = os.path.join(
3496+ init_dir, '{}.override'.format(service_name))
3497+ with open(override_path, 'w') as fh:
3498+ fh.write("manual\n")
3499+ elif os.path.exists(sysv_file):
3500+ subprocess.check_call(["update-rc.d", service_name, "disable"])
3501+ else:
3502+ # XXX: Support SystemD too
3503+ raise ValueError(
3504+ "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
3505+ service_name, upstart_file, sysv_file))
3506+ return stopped
3507+
3508+
3509+def service_resume(service_name, init_dir="/etc/init",
3510+ initd_dir="/etc/init.d"):
3511+ """Resume a system service.
3512+
3513+ Reenable starting again at boot. Start the service"""
3514+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
3515+ sysv_file = os.path.join(initd_dir, service_name)
3516+ if os.path.exists(upstart_file):
3517+ override_path = os.path.join(
3518+ init_dir, '{}.override'.format(service_name))
3519+ if os.path.exists(override_path):
3520+ os.unlink(override_path)
3521+ elif os.path.exists(sysv_file):
3522+ subprocess.check_call(["update-rc.d", service_name, "enable"])
3523+ else:
3524+ # XXX: Support SystemD too
3525+ raise ValueError(
3526+ "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
3527+ service_name, upstart_file, sysv_file))
3528+
3529+ started = service_running(service_name)
3530+ if not started:
3531+ started = service_start(service_name)
3532+ return started
3533+
3534+
3535 def service(action, service_name):
3536 """Control a system service"""
3537 cmd = ['service', service_name, action]
3538@@ -119,8 +169,9 @@
3539
3540
3541 def user_exists(username):
3542+ """Check if a user exists"""
3543 try:
3544- user_info = pwd.getpwnam(username)
3545+ pwd.getpwnam(username)
3546 user_exists = True
3547 except KeyError:
3548 user_exists = False
3549@@ -149,11 +200,7 @@
3550
3551 def add_user_to_group(username, group):
3552 """Add a user to a group"""
3553- cmd = [
3554- 'gpasswd', '-a',
3555- username,
3556- group
3557- ]
3558+ cmd = ['gpasswd', '-a', username, group]
3559 log("Adding user {} to group {}".format(username, group))
3560 subprocess.check_call(cmd)
3561
3562@@ -263,8 +310,8 @@
3563 return system_mounts
3564
3565
3566-
3567 def fstab_mount(mountpoint):
3568+ """Mount filesystem using fstab"""
3569 cmd_args = ['mount', mountpoint]
3570 try:
3571 subprocess.check_output(cmd_args)
3572@@ -390,25 +437,80 @@
3573 return(''.join(random_chars))
3574
3575
3576-def list_nics(nic_type):
3577+def is_phy_iface(interface):
3578+ """Returns True if interface is not virtual, otherwise False."""
3579+ if interface:
3580+ sys_net = '/sys/class/net'
3581+ if os.path.isdir(sys_net):
3582+ for iface in glob.glob(os.path.join(sys_net, '*')):
3583+ if '/virtual/' in os.path.realpath(iface):
3584+ continue
3585+
3586+ if interface == os.path.basename(iface):
3587+ return True
3588+
3589+ return False
3590+
3591+
3592+def get_bond_master(interface):
3593+ """Returns bond master if interface is bond slave otherwise None.
3594+
3595+ NOTE: the provided interface is expected to be physical
3596+ """
3597+ if interface:
3598+ iface_path = '/sys/class/net/%s' % (interface)
3599+ if os.path.exists(iface_path):
3600+ if '/virtual/' in os.path.realpath(iface_path):
3601+ return None
3602+
3603+ master = os.path.join(iface_path, 'master')
3604+ if os.path.exists(master):
3605+ master = os.path.realpath(master)
3606+ # make sure it is a bond master
3607+ if os.path.exists(os.path.join(master, 'bonding')):
3608+ return os.path.basename(master)
3609+
3610+ return None
3611+
3612+
3613+def list_nics(nic_type=None):
3614 '''Return a list of nics of given type(s)'''
3615 if isinstance(nic_type, six.string_types):
3616 int_types = [nic_type]
3617 else:
3618 int_types = nic_type
3619+
3620 interfaces = []
3621- for int_type in int_types:
3622- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
3623+ if nic_type:
3624+ for int_type in int_types:
3625+ cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
3626+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
3627+ ip_output = ip_output.split('\n')
3628+ ip_output = (line for line in ip_output if line)
3629+ for line in ip_output:
3630+ if line.split()[1].startswith(int_type):
3631+ matched = re.search('.*: (' + int_type +
3632+ r'[0-9]+\.[0-9]+)@.*', line)
3633+ if matched:
3634+ iface = matched.groups()[0]
3635+ else:
3636+ iface = line.split()[1].replace(":", "")
3637+
3638+ if iface not in interfaces:
3639+ interfaces.append(iface)
3640+ else:
3641+ cmd = ['ip', 'a']
3642 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
3643- ip_output = (line for line in ip_output if line)
3644+ ip_output = (line.strip() for line in ip_output if line)
3645+
3646+ key = re.compile('^[0-9]+:\s+(.+):')
3647 for line in ip_output:
3648- if line.split()[1].startswith(int_type):
3649- matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
3650- if matched:
3651- interface = matched.groups()[0]
3652- else:
3653- interface = line.split()[1].replace(":", "")
3654- interfaces.append(interface)
3655+ matched = re.search(key, line)
3656+ if matched:
3657+ iface = matched.group(1)
3658+ iface = iface.partition("@")[0]
3659+ if iface not in interfaces:
3660+ interfaces.append(iface)
3661
3662 return interfaces
3663
3664@@ -440,23 +542,6 @@
3665 return hwaddr
3666
3667
3668-def get_mac_nic_map():
3669- '''Return a dict of macs and their corresponding nics'''
3670- cmd = ['ip', '-o', '-0', 'addr', 'list']
3671- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
3672- mac_nic_map = {}
3673- for line in ip_output:
3674- columns = line.split()
3675- if 'link/ether' in columns:
3676- hwaddr = columns[columns.index('link/ether') + 1]
3677- nic = columns[1].replace(':', '')
3678- if mac_nic_map.get(hwaddr):
3679- mac_nic_map[hwaddr].append(nic)
3680- else:
3681- mac_nic_map[hwaddr] = [nic]
3682- return mac_nic_map
3683-
3684-
3685 def cmp_pkgrevno(package, revno, pkgcache=None):
3686 '''Compare supplied revno with the revno of the installed package
3687
3688@@ -485,7 +570,14 @@
3689 os.chdir(cur)
3690
3691
3692-def chownr(path, owner, group, follow_links=True):
3693+def chownr(path, owner, group, follow_links=True, chowntopdir=False):
3694+ """
3695+ Recursively change user and group ownership of files and directories
3696+ in given path. Doesn't chown path itself by default, only its children.
3697+
3698+ :param bool follow_links: Also Chown links if True
3699+ :param bool chowntopdir: Also chown path itself if True
3700+ """
3701 uid = pwd.getpwnam(owner).pw_uid
3702 gid = grp.getgrnam(group).gr_gid
3703 if follow_links:
3704@@ -493,6 +585,10 @@
3705 else:
3706 chown = os.lchown
3707
3708+ if chowntopdir:
3709+ broken_symlink = os.path.lexists(path) and not os.path.exists(path)
3710+ if not broken_symlink:
3711+ chown(path, uid, gid)
3712 for root, dirs, files in os.walk(path):
3713 for name in dirs + files:
3714 full = os.path.join(root, name)
3715@@ -503,3 +599,19 @@
3716
3717 def lchownr(path, owner, group):
3718 chownr(path, owner, group, follow_links=False)
3719+
3720+
3721+def get_total_ram():
3722+ '''The total amount of system RAM in bytes.
3723+
3724+ This is what is reported by the OS, and may be overcommitted when
3725+ there are multiple containers hosted on the same machine.
3726+ '''
3727+ with open('/proc/meminfo', 'r') as f:
3728+ for line in f.readlines():
3729+ if line:
3730+ key, value, unit = line.split()
3731+ if key == 'MemTotal:':
3732+ assert unit == 'kB', 'Unknown unit'
3733+ return int(value) * 1024 # Classic, not KiB.
3734+ raise NotImplementedError()
3735
3736=== modified file 'hooks/charmhelpers/core/hugepage.py'
3737--- hooks/charmhelpers/core/hugepage.py 2015-06-22 09:26:28 +0000
3738+++ hooks/charmhelpers/core/hugepage.py 2015-12-01 15:05:49 +0000
3739@@ -1,5 +1,3 @@
3740-
3741-#!/usr/bin/env python
3742 # -*- coding: utf-8 -*-
3743
3744 # Copyright 2014-2015 Canonical Limited.
3745@@ -19,36 +17,55 @@
3746 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3747
3748 import yaml
3749-from charmhelpers.core.fstab import Fstab
3750-from charmhelpers.core.sysctl import (
3751- create,
3752-)
3753+from charmhelpers.core import fstab
3754+from charmhelpers.core import sysctl
3755 from charmhelpers.core.host import (
3756 add_group,
3757 add_user_to_group,
3758 fstab_mount,
3759 mkdir,
3760 )
3761+from charmhelpers.core.strutils import bytes_from_string
3762+from subprocess import check_output
3763+
3764
3765 def hugepage_support(user, group='hugetlb', nr_hugepages=256,
3766- max_map_count=65536, mnt_point='/hugepages',
3767- pagesize='2MB', mount=True):
3768+ max_map_count=65536, mnt_point='/run/hugepages/kvm',
3769+ pagesize='2MB', mount=True, set_shmmax=False):
3770+ """Enable hugepages on system.
3771+
3772+ Args:
3773+ user (str) -- Username to allow access to hugepages to
3774+ group (str) -- Group name to own hugepages
3775+ nr_hugepages (int) -- Number of pages to reserve
3776+ max_map_count (int) -- Number of Virtual Memory Areas a process can own
3777+ mnt_point (str) -- Directory to mount hugepages on
3778+ pagesize (str) -- Size of hugepages
3779+ mount (bool) -- Whether to Mount hugepages
3780+ """
3781 group_info = add_group(group)
3782 gid = group_info.gr_gid
3783 add_user_to_group(user, group)
3784+ if max_map_count < 2 * nr_hugepages:
3785+ max_map_count = 2 * nr_hugepages
3786 sysctl_settings = {
3787 'vm.nr_hugepages': nr_hugepages,
3788- 'vm.max_map_count': max_map_count, # 1GB
3789+ 'vm.max_map_count': max_map_count,
3790 'vm.hugetlb_shm_group': gid,
3791 }
3792- create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
3793+ if set_shmmax:
3794+ shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
3795+ shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
3796+ if shmmax_minsize > shmmax_current:
3797+ sysctl_settings['kernel.shmmax'] = shmmax_minsize
3798+ sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
3799 mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
3800- fstab = Fstab()
3801- fstab_entry = fstab.get_entry_by_attr('mountpoint', mnt_point)
3802+ lfstab = fstab.Fstab()
3803+ fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
3804 if fstab_entry:
3805- fstab.remove_entry(fstab_entry)
3806- entry = fstab.Entry('nodev', mnt_point, 'hugetlbfs',
3807- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
3808- fstab.add_entry(entry)
3809+ lfstab.remove_entry(fstab_entry)
3810+ entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
3811+ 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
3812+ lfstab.add_entry(entry)
3813 if mount:
3814 fstab_mount(mnt_point)
3815
3816=== added file 'hooks/charmhelpers/core/kernel.py'
3817--- hooks/charmhelpers/core/kernel.py 1970-01-01 00:00:00 +0000
3818+++ hooks/charmhelpers/core/kernel.py 2015-12-01 15:05:49 +0000
3819@@ -0,0 +1,68 @@
3820+#!/usr/bin/env python
3821+# -*- coding: utf-8 -*-
3822+
3823+# Copyright 2014-2015 Canonical Limited.
3824+#
3825+# This file is part of charm-helpers.
3826+#
3827+# charm-helpers is free software: you can redistribute it and/or modify
3828+# it under the terms of the GNU Lesser General Public License version 3 as
3829+# published by the Free Software Foundation.
3830+#
3831+# charm-helpers is distributed in the hope that it will be useful,
3832+# but WITHOUT ANY WARRANTY; without even the implied warranty of
3833+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3834+# GNU Lesser General Public License for more details.
3835+#
3836+# You should have received a copy of the GNU Lesser General Public License
3837+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3838+
3839+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
3840+
3841+from charmhelpers.core.hookenv import (
3842+ log,
3843+ INFO
3844+)
3845+
3846+from subprocess import check_call, check_output
3847+import re
3848+
3849+
3850+def modprobe(module, persist=True):
3851+ """Load a kernel module and configure for auto-load on reboot."""
3852+ cmd = ['modprobe', module]
3853+
3854+ log('Loading kernel module %s' % module, level=INFO)
3855+
3856+ check_call(cmd)
3857+ if persist:
3858+ with open('/etc/modules', 'r+') as modules:
3859+ if module not in modules.read():
3860+ modules.write(module)
3861+
3862+
3863+def rmmod(module, force=False):
3864+ """Remove a module from the linux kernel"""
3865+ cmd = ['rmmod']
3866+ if force:
3867+ cmd.append('-f')
3868+ cmd.append(module)
3869+ log('Removing kernel module %s' % module, level=INFO)
3870+ return check_call(cmd)
3871+
3872+
3873+def lsmod():
3874+ """Shows what kernel modules are currently loaded"""
3875+ return check_output(['lsmod'],
3876+ universal_newlines=True)
3877+
3878+
3879+def is_module_loaded(module):
3880+ """Checks if a kernel module is already loaded"""
3881+ matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
3882+ return len(matches) > 0
3883+
3884+
3885+def update_initramfs(version='all'):
3886+ """Updates an initramfs image"""
3887+ return check_call(["update-initramfs", "-k", version, "-u"])
3888
3889=== modified file 'hooks/charmhelpers/core/services/base.py'
3890--- hooks/charmhelpers/core/services/base.py 2015-06-10 07:35:12 +0000
3891+++ hooks/charmhelpers/core/services/base.py 2015-12-01 15:05:49 +0000
3892@@ -128,15 +128,18 @@
3893 """
3894 Handle the current hook by doing The Right Thing with the registered services.
3895 """
3896- hook_name = hookenv.hook_name()
3897- if hook_name == 'stop':
3898- self.stop_services()
3899- else:
3900- self.reconfigure_services()
3901- self.provide_data()
3902- cfg = hookenv.config()
3903- if cfg.implicit_save:
3904- cfg.save()
3905+ hookenv._run_atstart()
3906+ try:
3907+ hook_name = hookenv.hook_name()
3908+ if hook_name == 'stop':
3909+ self.stop_services()
3910+ else:
3911+ self.reconfigure_services()
3912+ self.provide_data()
3913+ except SystemExit as x:
3914+ if x.code is None or x.code == 0:
3915+ hookenv._run_atexit()
3916+ hookenv._run_atexit()
3917
3918 def provide_data(self):
3919 """
3920
3921=== modified file 'hooks/charmhelpers/core/services/helpers.py'
3922--- hooks/charmhelpers/core/services/helpers.py 2015-06-12 12:22:51 +0000
3923+++ hooks/charmhelpers/core/services/helpers.py 2015-12-01 15:05:49 +0000
3924@@ -16,6 +16,7 @@
3925
3926 import os
3927 import yaml
3928+
3929 from charmhelpers.core import hookenv
3930 from charmhelpers.core import host
3931 from charmhelpers.core import templating
3932@@ -240,42 +241,43 @@
3933 action.
3934
3935 :param str source: The template source file, relative to
3936- `$CHARM_DIR/templates`
3937+ `$CHARM_DIR/templates`
3938
3939 :param str target: The target to write the rendered template to
3940 :param str owner: The owner of the rendered file
3941 :param str group: The group of the rendered file
3942 :param int perms: The permissions of the rendered file
3943- :param list template_searchpath: List of paths to search for template in
3944 :param partial on_change_action: functools partial to be executed when
3945 rendered file changes
3946+ :param jinja2 loader template_loader: A jinja2 template loader
3947 """
3948 def __init__(self, source, target,
3949 owner='root', group='root', perms=0o444,
3950- template_searchpath=None, on_change_action=None):
3951+ on_change_action=None, template_loader=None):
3952 self.source = source
3953 self.target = target
3954 self.owner = owner
3955 self.group = group
3956 self.perms = perms
3957- self.template_searchpath = template_searchpath
3958 self.on_change_action = on_change_action
3959+ self.template_loader = template_loader
3960
3961 def __call__(self, manager, service_name, event_name):
3962 pre_checksum = ''
3963 if self.on_change_action and os.path.isfile(self.target):
3964 pre_checksum = host.file_hash(self.target)
3965- print pre_checksum
3966 service = manager.get_service(service_name)
3967 context = {}
3968 for ctx in service.get('required_data', []):
3969 context.update(ctx)
3970 templating.render(self.source, self.target, context,
3971 self.owner, self.group, self.perms,
3972- self.template_searchpath)
3973+ template_loader=self.template_loader)
3974 if self.on_change_action:
3975 if pre_checksum == host.file_hash(self.target):
3976- print "No change detected " + self.target
3977+ hookenv.log(
3978+ 'No change detected: {}'.format(self.target),
3979+ hookenv.DEBUG)
3980 else:
3981 self.on_change_action()
3982
3983
3984=== modified file 'hooks/charmhelpers/core/strutils.py'
3985--- hooks/charmhelpers/core/strutils.py 2015-06-10 07:35:12 +0000
3986+++ hooks/charmhelpers/core/strutils.py 2015-12-01 15:05:49 +0000
3987@@ -18,6 +18,7 @@
3988 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3989
3990 import six
3991+import re
3992
3993
3994 def bool_from_string(value):
3995@@ -40,3 +41,32 @@
3996
3997 msg = "Unable to interpret string value '%s' as boolean" % (value)
3998 raise ValueError(msg)
3999+
4000+
4001+def bytes_from_string(value):
4002+ """Interpret human readable string value as bytes.
4003+
4004+ Returns int
4005+ """
4006+ BYTE_POWER = {
4007+ 'K': 1,
4008+ 'KB': 1,
4009+ 'M': 2,
4010+ 'MB': 2,
4011+ 'G': 3,
4012+ 'GB': 3,
4013+ 'T': 4,
4014+ 'TB': 4,
4015+ 'P': 5,
4016+ 'PB': 5,
4017+ }
4018+ if isinstance(value, six.string_types):
4019+ value = six.text_type(value)
4020+ else:
4021+ msg = "Unable to interpret non-string value '%s' as boolean" % (value)
4022+ raise ValueError(msg)
4023+ matches = re.match("([0-9]+)([a-zA-Z]+)", value)
4024+ if not matches:
4025+ msg = "Unable to interpret string value '%s' as bytes" % (value)
4026+ raise ValueError(msg)
4027+ return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
4028
4029=== modified file 'hooks/charmhelpers/core/templating.py'
4030--- hooks/charmhelpers/core/templating.py 2015-06-17 12:23:31 +0000
4031+++ hooks/charmhelpers/core/templating.py 2015-12-01 15:05:49 +0000
4032@@ -21,8 +21,7 @@
4033
4034
4035 def render(source, target, context, owner='root', group='root',
4036- perms=0o444, templates_dir=None, encoding='UTF-8',
4037- template_searchpath=None):
4038+ perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
4039 """
4040 Render a template.
4041
4042@@ -41,7 +40,7 @@
4043 this will attempt to use charmhelpers.fetch.apt_install to install it.
4044 """
4045 try:
4046- from jinja2 import ChoiceLoader, FileSystemLoader, Environment, exceptions
4047+ from jinja2 import FileSystemLoader, Environment, exceptions
4048 except ImportError:
4049 try:
4050 from charmhelpers.fetch import apt_install
4051@@ -51,25 +50,26 @@
4052 level=hookenv.ERROR)
4053 raise
4054 apt_install('python-jinja2', fatal=True)
4055- from jinja2 import ChoiceLoader, FileSystemLoader, Environment, exceptions
4056+ from jinja2 import FileSystemLoader, Environment, exceptions
4057
4058- if template_searchpath:
4059- fs_loaders = []
4060- for tmpl_dir in template_searchpath:
4061- fs_loaders.append(FileSystemLoader(tmpl_dir))
4062- loader = ChoiceLoader(fs_loaders)
4063+ if template_loader:
4064+ template_env = Environment(loader=template_loader)
4065 else:
4066 if templates_dir is None:
4067 templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
4068- loader = Environment(loader=FileSystemLoader(templates_dir))
4069+ template_env = Environment(loader=FileSystemLoader(templates_dir))
4070 try:
4071 source = source
4072- template = loader.get_template(source)
4073+ template = template_env.get_template(source)
4074 except exceptions.TemplateNotFound as e:
4075 hookenv.log('Could not load template %s from %s.' %
4076 (source, templates_dir),
4077 level=hookenv.ERROR)
4078 raise e
4079 content = template.render(context)
4080- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
4081+ target_dir = os.path.dirname(target)
4082+ if not os.path.exists(target_dir):
4083+ # This is a terrible default directory permission, as the file
4084+ # or its siblings will often contain secrets.
4085+ host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
4086 host.write_file(target, content.encode(encoding), owner, group, perms)
4087
4088=== modified file 'hooks/charmhelpers/core/unitdata.py'
4089--- hooks/charmhelpers/core/unitdata.py 2015-06-10 07:35:12 +0000
4090+++ hooks/charmhelpers/core/unitdata.py 2015-12-01 15:05:49 +0000
4091@@ -152,6 +152,7 @@
4092 import collections
4093 import contextlib
4094 import datetime
4095+import itertools
4096 import json
4097 import os
4098 import pprint
4099@@ -164,8 +165,7 @@
4100 class Storage(object):
4101 """Simple key value database for local unit state within charms.
4102
4103- Modifications are automatically committed at hook exit. That's
4104- currently regardless of exit code.
4105+ Modifications are not persisted unless :meth:`flush` is called.
4106
4107 To support dicts, lists, integer, floats, and booleans values
4108 are automatically json encoded/decoded.
4109@@ -173,8 +173,11 @@
4110 def __init__(self, path=None):
4111 self.db_path = path
4112 if path is None:
4113- self.db_path = os.path.join(
4114- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
4115+ if 'UNIT_STATE_DB' in os.environ:
4116+ self.db_path = os.environ['UNIT_STATE_DB']
4117+ else:
4118+ self.db_path = os.path.join(
4119+ os.environ.get('CHARM_DIR', ''), '.unit-state.db')
4120 self.conn = sqlite3.connect('%s' % self.db_path)
4121 self.cursor = self.conn.cursor()
4122 self.revision = None
4123@@ -189,15 +192,8 @@
4124 self.conn.close()
4125 self._closed = True
4126
4127- def _scoped_query(self, stmt, params=None):
4128- if params is None:
4129- params = []
4130- return stmt, params
4131-
4132 def get(self, key, default=None, record=False):
4133- self.cursor.execute(
4134- *self._scoped_query(
4135- 'select data from kv where key=?', [key]))
4136+ self.cursor.execute('select data from kv where key=?', [key])
4137 result = self.cursor.fetchone()
4138 if not result:
4139 return default
4140@@ -206,33 +202,81 @@
4141 return json.loads(result[0])
4142
4143 def getrange(self, key_prefix, strip=False):
4144- stmt = "select key, data from kv where key like '%s%%'" % key_prefix
4145- self.cursor.execute(*self._scoped_query(stmt))
4146+ """
4147+ Get a range of keys starting with a common prefix as a mapping of
4148+ keys to values.
4149+
4150+ :param str key_prefix: Common prefix among all keys
4151+ :param bool strip: Optionally strip the common prefix from the key
4152+ names in the returned dict
4153+ :return dict: A (possibly empty) dict of key-value mappings
4154+ """
4155+ self.cursor.execute("select key, data from kv where key like ?",
4156+ ['%s%%' % key_prefix])
4157 result = self.cursor.fetchall()
4158
4159 if not result:
4160- return None
4161+ return {}
4162 if not strip:
4163 key_prefix = ''
4164 return dict([
4165 (k[len(key_prefix):], json.loads(v)) for k, v in result])
4166
4167 def update(self, mapping, prefix=""):
4168+ """
4169+ Set the values of multiple keys at once.
4170+
4171+ :param dict mapping: Mapping of keys to values
4172+ :param str prefix: Optional prefix to apply to all keys in `mapping`
4173+ before setting
4174+ """
4175 for k, v in mapping.items():
4176 self.set("%s%s" % (prefix, k), v)
4177
4178 def unset(self, key):
4179+ """
4180+ Remove a key from the database entirely.
4181+ """
4182 self.cursor.execute('delete from kv where key=?', [key])
4183 if self.revision and self.cursor.rowcount:
4184 self.cursor.execute(
4185 'insert into kv_revisions values (?, ?, ?)',
4186 [key, self.revision, json.dumps('DELETED')])
4187
4188+ def unsetrange(self, keys=None, prefix=""):
4189+ """
4190+ Remove a range of keys starting with a common prefix, from the database
4191+ entirely.
4192+
4193+ :param list keys: List of keys to remove.
4194+ :param str prefix: Optional prefix to apply to all keys in ``keys``
4195+ before removing.
4196+ """
4197+ if keys is not None:
4198+ keys = ['%s%s' % (prefix, key) for key in keys]
4199+ self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
4200+ if self.revision and self.cursor.rowcount:
4201+ self.cursor.execute(
4202+ 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
4203+ list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
4204+ else:
4205+ self.cursor.execute('delete from kv where key like ?',
4206+ ['%s%%' % prefix])
4207+ if self.revision and self.cursor.rowcount:
4208+ self.cursor.execute(
4209+ 'insert into kv_revisions values (?, ?, ?)',
4210+ ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
4211+
4212 def set(self, key, value):
4213+ """
4214+ Set a value in the database.
4215+
4216+ :param str key: Key to set the value for
4217+ :param value: Any JSON-serializable value to be set
4218+ """
4219 serialized = json.dumps(value)
4220
4221- self.cursor.execute(
4222- 'select data from kv where key=?', [key])
4223+ self.cursor.execute('select data from kv where key=?', [key])
4224 exists = self.cursor.fetchone()
4225
4226 # Skip mutations to the same value
4227
4228=== modified file 'hooks/charmhelpers/fetch/__init__.py'
4229--- hooks/charmhelpers/fetch/__init__.py 2015-06-10 07:35:12 +0000
4230+++ hooks/charmhelpers/fetch/__init__.py 2015-12-01 15:05:49 +0000
4231@@ -90,6 +90,14 @@
4232 'kilo/proposed': 'trusty-proposed/kilo',
4233 'trusty-kilo/proposed': 'trusty-proposed/kilo',
4234 'trusty-proposed/kilo': 'trusty-proposed/kilo',
4235+ # Liberty
4236+ 'liberty': 'trusty-updates/liberty',
4237+ 'trusty-liberty': 'trusty-updates/liberty',
4238+ 'trusty-liberty/updates': 'trusty-updates/liberty',
4239+ 'trusty-updates/liberty': 'trusty-updates/liberty',
4240+ 'liberty/proposed': 'trusty-proposed/liberty',
4241+ 'trusty-liberty/proposed': 'trusty-proposed/liberty',
4242+ 'trusty-proposed/liberty': 'trusty-proposed/liberty',
4243 }
4244
4245 # The order of this list is very important. Handlers should be listed in from
4246@@ -215,19 +223,27 @@
4247 _run_apt_command(cmd, fatal)
4248
4249
4250+def apt_mark(packages, mark, fatal=False):
4251+ """Flag one or more packages using apt-mark"""
4252+ log("Marking {} as {}".format(packages, mark))
4253+ cmd = ['apt-mark', mark]
4254+ if isinstance(packages, six.string_types):
4255+ cmd.append(packages)
4256+ else:
4257+ cmd.extend(packages)
4258+
4259+ if fatal:
4260+ subprocess.check_call(cmd, universal_newlines=True)
4261+ else:
4262+ subprocess.call(cmd, universal_newlines=True)
4263+
4264+
4265 def apt_hold(packages, fatal=False):
4266- """Hold one or more packages"""
4267- cmd = ['apt-mark', 'hold']
4268- if isinstance(packages, six.string_types):
4269- cmd.append(packages)
4270- else:
4271- cmd.extend(packages)
4272- log("Holding {}".format(packages))
4273-
4274- if fatal:
4275- subprocess.check_call(cmd)
4276- else:
4277- subprocess.call(cmd)
4278+ return apt_mark(packages, 'hold', fatal=fatal)
4279+
4280+
4281+def apt_unhold(packages, fatal=False):
4282+ return apt_mark(packages, 'unhold', fatal=fatal)
4283
4284
4285 def add_source(source, key=None):
4286@@ -370,8 +386,9 @@
4287 for handler in handlers:
4288 try:
4289 installed_to = handler.install(source, *args, **kwargs)
4290- except UnhandledSource:
4291- pass
4292+ except UnhandledSource as e:
4293+ log('Install source attempt unsuccessful: {}'.format(e),
4294+ level='WARNING')
4295 if not installed_to:
4296 raise UnhandledSource("No handler found for source {}".format(source))
4297 return installed_to
4298
4299=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
4300--- hooks/charmhelpers/fetch/archiveurl.py 2015-06-10 07:35:12 +0000
4301+++ hooks/charmhelpers/fetch/archiveurl.py 2015-12-01 15:05:49 +0000
4302@@ -77,6 +77,8 @@
4303 def can_handle(self, source):
4304 url_parts = self.parse_url(source)
4305 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
4306+ # XXX: Why is this returning a boolean and a string? It's
4307+ # doomed to fail since "bool(can_handle('foo://'))" will be True.
4308 return "Wrong source type"
4309 if get_archive_handler(self.base_url(source)):
4310 return True
4311@@ -155,7 +157,11 @@
4312 else:
4313 algorithms = hashlib.algorithms_available
4314 if key in algorithms:
4315- check_hash(dld_file, value, key)
4316+ if len(value) != 1:
4317+ raise TypeError(
4318+ "Expected 1 hash value, not %d" % len(value))
4319+ expected = value[0]
4320+ check_hash(dld_file, expected, key)
4321 if checksum:
4322 check_hash(dld_file, checksum, hash_type)
4323 return extract(dld_file, dest)
4324
4325=== modified file 'hooks/charmhelpers/fetch/giturl.py'
4326--- hooks/charmhelpers/fetch/giturl.py 2015-06-10 07:35:12 +0000
4327+++ hooks/charmhelpers/fetch/giturl.py 2015-12-01 15:05:49 +0000
4328@@ -67,7 +67,7 @@
4329 try:
4330 self.clone(source, dest_dir, branch, depth)
4331 except GitCommandError as e:
4332- raise UnhandledSource(e.message)
4333+ raise UnhandledSource(e)
4334 except OSError as e:
4335 raise UnhandledSource(e.strerror)
4336 return dest_dir
4337
4338=== modified file 'hooks/services.py'
4339--- hooks/services.py 2015-09-14 16:44:47 +0000
4340+++ hooks/services.py 2015-12-01 15:05:49 +0000
4341@@ -2,6 +2,9 @@
4342 from charmhelpers.core import hookenv
4343 from charmhelpers.core.services.base import ServiceManager
4344 from charmhelpers.core.services import helpers
4345+from charmhelpers.contrib.openstack.templating import get_loader
4346+from charmhelpers.core.services.base import service_restart
4347+from charmhelpers.contrib.openstack.utils import os_release
4348
4349 import vpp_utils
4350 import vpp_data
4351@@ -9,6 +12,7 @@
4352
4353 def manage():
4354 config = hookenv.config()
4355+ release = os_release('neutron-common')
4356 manager = ServiceManager([
4357 # Actions which have no prerequisites and can be rerun
4358 {
4359@@ -18,6 +22,7 @@
4360 ],
4361 'provided_data': [
4362 vpp_data.NeutronPluginRelation(),
4363+ vpp_data.AMQPRelation(),
4364 ],
4365 },
4366 # Install hugepages and components reliant on huge pages
4367@@ -34,12 +39,14 @@
4368 {
4369 'service': 'vpp-compute-render',
4370 'required_data': [
4371+ vpp_data.AMQPRelation(),
4372 vpp_data.SystemResources(),
4373 vpp_data.NeutronPluginRelation(),
4374 vpp_data.ODLControllerRelation(),
4375 config,
4376 vpp_data.ConfigTranslation(),
4377 vpp_data.PCIInfo(),
4378+ vpp_data.NeutronPluginAPIRelation(),
4379 ],
4380 'data_ready': [
4381 vpp_utils.bind_orphaned_net_interfaces,
4382@@ -53,6 +60,27 @@
4383 target='/etc/apparmor.d/libvirt/TEMPLATE.qemu',
4384 on_change_action=(partial(vpp_utils.reload_apparmor)),
4385 ),
4386+ helpers.render_template(
4387+ source='neutron.conf',
4388+ template_loader=get_loader('templates/', release),
4389+ target='/etc/neutron/neutron.conf',
4390+ on_change_action=(partial(service_restart,
4391+ 'neutron-dhcp-agent')),
4392+ ),
4393+ helpers.render_template(
4394+ source='dhcp_agent.ini',
4395+ template_loader=get_loader('templates/', release),
4396+ target='/etc/neutron/dhcp_agent.ini',
4397+ on_change_action=(partial(service_restart,
4398+ 'neutron-dhcp-agent')),
4399+ ),
4400+ helpers.render_template(
4401+ source='metadata_agent.ini',
4402+ template_loader=get_loader('templates/', release),
4403+ target='/etc/neutron/metadata_agent.ini',
4404+ on_change_action=(partial(service_restart,
4405+ 'neutron-metadata-agent')),
4406+ ),
4407 vpp_utils.odl_node_registration,
4408 vpp_utils.odl_register_macs,
4409 vpp_utils.bind_orphaned_net_interfaces,
4410
4411=== modified file 'hooks/vpp_data.py'
4412--- hooks/vpp_data.py 2015-08-17 06:56:01 +0000
4413+++ hooks/vpp_data.py 2015-12-01 15:05:49 +0000
4414@@ -2,16 +2,47 @@
4415 import glob
4416 import os
4417 import json
4418+from charmhelpers.contrib.openstack import context
4419 from charmhelpers.core.services import helpers
4420 from charmhelpers.core.hookenv import(
4421 config,
4422 log,
4423 )
4424+import uuid
4425
4426 VLAN = 'vlan'
4427 VXLAN = 'vxlan'
4428 GRE = 'gre'
4429 OVERLAY_NET_TYPES = [VXLAN, GRE]
4430+NEUTRON_CONF_DIR = "/etc/neutron"
4431+SHARED_SECRET_FILE = "/etc/neutron/secret.txt"
4432+
4433+class NeutronPluginAPIRelation(helpers.RelationContext):
4434+ name = 'neutron-plugin-api'
4435+ interface = 'neutron-plugin-api'
4436+
4437+ def get_first_data(self):
4438+ if self.get('neutron-plugin-api') and len(self['neutron-plugin-api']):
4439+ return self['neutron-plugin-api'][0]
4440+ else:
4441+ return {}
4442+
4443+ def get_data(self):
4444+ super(NeutronPluginAPIRelation, self).get_data()
4445+ api_server = self.get_first_data()
4446+ self['service_host'] = api_server.get('service_host')
4447+ self['service_protocol'] = api_server.get('service_protocol', 'http')
4448+ self['service_port'] = api_server.get('service_port')
4449+ self['admin_tenant_name'] = api_server.get('service_tenant')
4450+ self['admin_user'] = api_server.get('service_username')
4451+ self['admin_password'] = api_server.get('service_password')
4452+ self['region'] = api_server.get('region')
4453+
4454+ def is_ready(self):
4455+ if 'service_password' in self.get_first_data():
4456+ return True
4457+ else:
4458+ return False
4459
4460
4461 class ODLControllerRelation(helpers.RelationContext):
4462@@ -47,6 +78,22 @@
4463 name = 'neutron-plugin'
4464 interface = 'neutron-plugin-api-subordinate'
4465
4466+ def __init__(self, *args, **kwargs):
4467+ super(NeutronPluginRelation, self).__init__(*args, **kwargs)
4468+ self['shared_secret'] = self.get_metadata_secret()
4469+
4470+ def get_metadata_secret(self):
4471+ secret = None
4472+ if os.path.exists(os.path.dirname(SHARED_SECRET_FILE)):
4473+ if os.path.exists(SHARED_SECRET_FILE):
4474+ with open(SHARED_SECRET_FILE, 'r') as secret_file:
4475+ secret = secret_file.read().strip()
4476+ else:
4477+ secret = str(uuid.uuid4())
4478+ with open(SHARED_SECRET_FILE, 'w') as secret_file:
4479+ secret_file.write(secret)
4480+ return secret
4481+
4482 def provide_data(self):
4483 # Add sections and tuples to insert values into neutron-server's
4484 # neutron.conf e.g.
4485@@ -83,6 +130,7 @@
4486 relation_info = {
4487 'neutron-plugin': 'odl',
4488 'subordinate_configuration': json.dumps(principle_config),
4489+ 'metadata-shared-secret': self['shared_secret'],
4490 }
4491 return relation_info
4492
4493@@ -179,3 +227,30 @@
4494 'net': tmp_dict.get('net'),
4495 }]
4496 return mac_net_config
4497+
4498+
4499+class AMQPRelation(helpers.RelationContext):
4500+ name = 'amqp'
4501+ interface = 'rabbitmq'
4502+
4503+ def __init__(self, *args, **kwargs):
4504+ self.ctxt = context.AMQPContext(ssl_dir=NEUTRON_CONF_DIR)()
4505+ super(AMQPRelation, self).__init__(*args, **kwargs)
4506+
4507+ def get_data(self):
4508+ super(AMQPRelation, self).get_data()
4509+ for key, value in self.ctxt.iteritems():
4510+ self[key] = value
4511+
4512+ def provide_data(self):
4513+ relation_info = {
4514+ 'username': config('rabbit-user'),
4515+ 'vhost': config('rabbit-vhost'),
4516+ }
4517+ return relation_info
4518+
4519+ def is_ready(self):
4520+ if self.ctxt.get('rabbitmq_password'):
4521+ return True
4522+ else:
4523+ return False
4524
4525=== modified file 'hooks/vpp_utils.py'
4526--- hooks/vpp_utils.py 2015-09-14 16:44:47 +0000
4527+++ hooks/vpp_utils.py 2015-12-01 15:05:49 +0000
4528@@ -24,7 +24,8 @@
4529 ODL_MOUNT_PATH = ('/restconf/config/opendaylight-inventory:nodes/node/'
4530 'controller-config/yang-ext:mount/config:modules')
4531
4532-PACKAGES = ['python-psutil', 'gcc', 'make', 'python-crypto', 'qemu', 'seabios']
4533+PACKAGES = ['python-psutil', 'gcc', 'make', 'python-crypto', 'qemu', 'seabios',
4534+ 'neutron-dhcp-agent']
4535
4536
4537 def install_packages(servicename):
4538
4539=== modified file 'metadata.yaml'
4540--- metadata.yaml 2015-07-21 15:18:07 +0000
4541+++ metadata.yaml 2015-12-01 15:05:49 +0000
4542@@ -17,3 +17,7 @@
4543 container:
4544 interface: juju-info
4545 scope: container
4546+ amqp:
4547+ interface: rabbitmq
4548+ neutron-plugin-api:
4549+ interface: neutron-plugin-api
4550
4551=== added directory 'templates/icehouse'
4552=== added file 'templates/icehouse/dhcp_agent.ini'
4553--- templates/icehouse/dhcp_agent.ini 1970-01-01 00:00:00 +0000
4554+++ templates/icehouse/dhcp_agent.ini 2015-12-01 15:05:49 +0000
4555@@ -0,0 +1,13 @@
4556+###############################################################################
4557+# [ WARNING ]
4558+# Configuration file maintained by Juju. Local changes may be overwritten.
4559+###############################################################################
4560+[DEFAULT]
4561+debug = {{ debug }}
4562+resync_interval = 5
4563+interface_driver = neutron.agent.linux.interface.NSNullDriver
4564+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
4565+use_namespaces = True
4566+enable_isolated_metadata = True
4567+enable_metadata_network = True
4568+
4569
4570=== added file 'templates/icehouse/metadata_agent.ini'
4571--- templates/icehouse/metadata_agent.ini 1970-01-01 00:00:00 +0000
4572+++ templates/icehouse/metadata_agent.ini 2015-12-01 15:05:49 +0000
4573@@ -0,0 +1,15 @@
4574+###############################################################################
4575+# [ WARNING ]
4576+# Configuration file maintained by Juju. Local changes may be overwritten.
4577+###############################################################################
4578+# Metadata service seems to cache neutron api url from keystone so trigger
4579+
4580+[DEFAULT]
4581+auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0
4582+auth_region = {{ region }}
4583+admin_tenant_name = {{ admin_tenant_name }}
4584+admin_user = {{ admin_user }}
4585+admin_password = {{ admin_password }}
4586+nova_metadata_port = 8775
4587+metadata_proxy_shared_secret = {{ shared_secret }}
4588+cache_url = memory://?default_ttl=5
4589
4590=== added file 'templates/icehouse/neutron.conf'
4591--- templates/icehouse/neutron.conf 1970-01-01 00:00:00 +0000
4592+++ templates/icehouse/neutron.conf 2015-12-01 15:05:49 +0000
4593@@ -0,0 +1,31 @@
4594+# icehouse
4595+###############################################################################
4596+# [ WARNING ]
4597+# Configuration file maintained by Juju. Local changes may be overwritten.
4598+# Config managed by neutron-openvswitch charm
4599+###############################################################################
4600+[DEFAULT]
4601+verbose = {{ verbose }}
4602+debug = {{ debug }}
4603+state_path = /var/lib/neutron
4604+lock_path = $state_path/lock
4605+bind_host = 0.0.0.0
4606+bind_port = 9696
4607+
4608+api_paste_config = /etc/neutron/api-paste.ini
4609+auth_strategy = keystone
4610+default_notification_level = INFO
4611+notification_topics = notifications
4612+
4613+{% include "parts/rabbitmq" %}
4614+
4615+[QUOTAS]
4616+
4617+[DEFAULT_SERVICETYPE]
4618+
4619+[AGENT]
4620+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
4621+
4622+[keystone_authtoken]
4623+signing_dir = /var/lib/neutron/keystone-signing
4624+
4625
4626=== added directory 'templates/parts'
4627=== added file 'templates/parts/rabbitmq'
4628--- templates/parts/rabbitmq 1970-01-01 00:00:00 +0000
4629+++ templates/parts/rabbitmq 2015-12-01 15:05:49 +0000
4630@@ -0,0 +1,21 @@
4631+{% if rabbitmq_host or rabbitmq_hosts -%}
4632+rabbit_userid = {{ rabbitmq_user }}
4633+rabbit_virtual_host = {{ rabbitmq_virtual_host }}
4634+rabbit_password = {{ rabbitmq_password }}
4635+{% if rabbitmq_hosts -%}
4636+rabbit_hosts = {{ rabbitmq_hosts }}
4637+{% if rabbitmq_ha_queues -%}
4638+rabbit_ha_queues = True
4639+rabbit_durable_queues = False
4640+{% endif -%}
4641+{% else -%}
4642+rabbit_host = {{ rabbitmq_host }}
4643+{% endif -%}
4644+{% if rabbit_ssl_port -%}
4645+rabbit_use_ssl = True
4646+rabbit_port = {{ rabbit_ssl_port }}
4647+{% if rabbit_ssl_ca -%}
4648+kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
4649+{% endif -%}
4650+{% endif -%}
4651+{% endif -%}
4652\ No newline at end of file
4653
4654=== modified file 'tests/charmhelpers/contrib/amulet/utils.py'
4655--- tests/charmhelpers/contrib/amulet/utils.py 2015-06-16 07:53:15 +0000
4656+++ tests/charmhelpers/contrib/amulet/utils.py 2015-12-01 15:05:49 +0000
4657@@ -14,14 +14,21 @@
4658 # You should have received a copy of the GNU Lesser General Public License
4659 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4660
4661-import ConfigParser
4662 import io
4663 import logging
4664+import os
4665 import re
4666 import sys
4667 import time
4668
4669+import amulet
4670+import distro_info
4671 import six
4672+from six.moves import configparser
4673+if six.PY3:
4674+ from urllib import parse as urlparse
4675+else:
4676+ import urlparse
4677
4678
4679 class AmuletUtils(object):
4680@@ -33,6 +40,7 @@
4681
4682 def __init__(self, log_level=logging.ERROR):
4683 self.log = self.get_logger(level=log_level)
4684+ self.ubuntu_releases = self.get_ubuntu_releases()
4685
4686 def get_logger(self, name="amulet-logger", level=logging.DEBUG):
4687 """Get a logger object that will log to stdout."""
4688@@ -70,12 +78,44 @@
4689 else:
4690 return False
4691
4692+ def get_ubuntu_release_from_sentry(self, sentry_unit):
4693+ """Get Ubuntu release codename from sentry unit.
4694+
4695+ :param sentry_unit: amulet sentry/service unit pointer
4696+ :returns: list of strings - release codename, failure message
4697+ """
4698+ msg = None
4699+ cmd = 'lsb_release -cs'
4700+ release, code = sentry_unit.run(cmd)
4701+ if code == 0:
4702+ self.log.debug('{} lsb_release: {}'.format(
4703+ sentry_unit.info['unit_name'], release))
4704+ else:
4705+ msg = ('{} `{}` returned {} '
4706+ '{}'.format(sentry_unit.info['unit_name'],
4707+ cmd, release, code))
4708+ if release not in self.ubuntu_releases:
4709+ msg = ("Release ({}) not found in Ubuntu releases "
4710+ "({})".format(release, self.ubuntu_releases))
4711+ return release, msg
4712+
4713 def validate_services(self, commands):
4714- """Validate services.
4715-
4716- Verify the specified services are running on the corresponding
4717+ """Validate that lists of commands succeed on service units. Can be
4718+ used to verify system services are running on the corresponding
4719 service units.
4720- """
4721+
4722+ :param commands: dict with sentry keys and arbitrary command list vals
4723+ :returns: None if successful, Failure string message otherwise
4724+ """
4725+ self.log.debug('Checking status of system services...')
4726+
4727+ # /!\ DEPRECATION WARNING (beisner):
4728+ # New and existing tests should be rewritten to use
4729+ # validate_services_by_name() as it is aware of init systems.
4730+ self.log.warn('/!\\ DEPRECATION WARNING: use '
4731+ 'validate_services_by_name instead of validate_services '
4732+ 'due to init system differences.')
4733+
4734 for k, v in six.iteritems(commands):
4735 for cmd in v:
4736 output, code = k.run(cmd)
4737@@ -86,6 +126,45 @@
4738 return "command `{}` returned {}".format(cmd, str(code))
4739 return None
4740
4741+ def validate_services_by_name(self, sentry_services):
4742+ """Validate system service status by service name, automatically
4743+ detecting init system based on Ubuntu release codename.
4744+
4745+ :param sentry_services: dict with sentry keys and svc list values
4746+ :returns: None if successful, Failure string message otherwise
4747+ """
4748+ self.log.debug('Checking status of system services...')
4749+
4750+ # Point at which systemd became a thing
4751+ systemd_switch = self.ubuntu_releases.index('vivid')
4752+
4753+ for sentry_unit, services_list in six.iteritems(sentry_services):
4754+ # Get lsb_release codename from unit
4755+ release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
4756+ if ret:
4757+ return ret
4758+
4759+ for service_name in services_list:
4760+ if (self.ubuntu_releases.index(release) >= systemd_switch or
4761+ service_name in ['rabbitmq-server', 'apache2']):
4762+ # init is systemd (or regular sysv)
4763+ cmd = 'sudo service {} status'.format(service_name)
4764+ output, code = sentry_unit.run(cmd)
4765+ service_running = code == 0
4766+ elif self.ubuntu_releases.index(release) < systemd_switch:
4767+ # init is upstart
4768+ cmd = 'sudo status {}'.format(service_name)
4769+ output, code = sentry_unit.run(cmd)
4770+ service_running = code == 0 and "start/running" in output
4771+
4772+ self.log.debug('{} `{}` returned '
4773+ '{}'.format(sentry_unit.info['unit_name'],
4774+ cmd, code))
4775+ if not service_running:
4776+ return u"command `{}` returned {} {}".format(
4777+ cmd, output, str(code))
4778+ return None
4779+
4780 def _get_config(self, unit, filename):
4781 """Get a ConfigParser object for parsing a unit's config file."""
4782 file_contents = unit.file_contents(filename)
4783@@ -93,7 +172,7 @@
4784 # NOTE(beisner): by default, ConfigParser does not handle options
4785 # with no value, such as the flags used in the mysql my.cnf file.
4786 # https://bugs.python.org/issue7005
4787- config = ConfigParser.ConfigParser(allow_no_value=True)
4788+ config = configparser.ConfigParser(allow_no_value=True)
4789 config.readfp(io.StringIO(file_contents))
4790 return config
4791
4792@@ -103,7 +182,15 @@
4793
4794 Verify that the specified section of the config file contains
4795 the expected option key:value pairs.
4796+
4797+ Compare expected dictionary data vs actual dictionary data.
4798+ The values in the 'expected' dictionary can be strings, bools, ints,
4799+ longs, or can be a function that evaluates a variable and returns a
4800+ bool.
4801 """
4802+ self.log.debug('Validating config file data ({} in {} on {})'
4803+ '...'.format(section, config_file,
4804+ sentry_unit.info['unit_name']))
4805 config = self._get_config(sentry_unit, config_file)
4806
4807 if section != 'DEFAULT' and not config.has_section(section):
4808@@ -112,9 +199,20 @@
4809 for k in expected.keys():
4810 if not config.has_option(section, k):
4811 return "section [{}] is missing option {}".format(section, k)
4812- if config.get(section, k) != expected[k]:
4813+
4814+ actual = config.get(section, k)
4815+ v = expected[k]
4816+ if (isinstance(v, six.string_types) or
4817+ isinstance(v, bool) or
4818+ isinstance(v, six.integer_types)):
4819+ # handle explicit values
4820+ if actual != v:
4821+ return "section [{}] {}:{} != expected {}:{}".format(
4822+ section, k, actual, k, expected[k])
4823+ # handle function pointers, such as not_null or valid_ip
4824+ elif not v(actual):
4825 return "section [{}] {}:{} != expected {}:{}".format(
4826- section, k, config.get(section, k), k, expected[k])
4827+ section, k, actual, k, expected[k])
4828 return None
4829
4830 def _validate_dict_data(self, expected, actual):
4831@@ -122,7 +220,7 @@
4832
4833 Compare expected dictionary data vs actual dictionary data.
4834 The values in the 'expected' dictionary can be strings, bools, ints,
4835- longs, or can be a function that evaluate a variable and returns a
4836+ longs, or can be a function that evaluates a variable and returns a
4837 bool.
4838 """
4839 self.log.debug('actual: {}'.format(repr(actual)))
4840@@ -133,8 +231,10 @@
4841 if (isinstance(v, six.string_types) or
4842 isinstance(v, bool) or
4843 isinstance(v, six.integer_types)):
4844+ # handle explicit values
4845 if v != actual[k]:
4846 return "{}:{}".format(k, actual[k])
4847+ # handle function pointers, such as not_null or valid_ip
4848 elif not v(actual[k]):
4849 return "{}:{}".format(k, actual[k])
4850 else:
4851@@ -321,3 +421,133 @@
4852
4853 def endpoint_error(self, name, data):
4854 return 'unexpected endpoint data in {} - {}'.format(name, data)
4855+
4856+ def get_ubuntu_releases(self):
4857+ """Return a list of all Ubuntu releases in order of release."""
4858+ _d = distro_info.UbuntuDistroInfo()
4859+ _release_list = _d.all
4860+ self.log.debug('Ubuntu release list: {}'.format(_release_list))
4861+ return _release_list
4862+
4863+ def file_to_url(self, file_rel_path):
4864+ """Convert a relative file path to a file URL."""
4865+ _abs_path = os.path.abspath(file_rel_path)
4866+ return urlparse.urlparse(_abs_path, scheme='file').geturl()
4867+
4868+ def check_commands_on_units(self, commands, sentry_units):
4869+ """Check that all commands in a list exit zero on all
4870+ sentry units in a list.
4871+
4872+ :param commands: list of bash commands
4873+ :param sentry_units: list of sentry unit pointers
4874+ :returns: None if successful; Failure message otherwise
4875+ """
4876+ self.log.debug('Checking exit codes for {} commands on {} '
4877+ 'sentry units...'.format(len(commands),
4878+ len(sentry_units)))
4879+ for sentry_unit in sentry_units:
4880+ for cmd in commands:
4881+ output, code = sentry_unit.run(cmd)
4882+ if code == 0:
4883+ self.log.debug('{} `{}` returned {} '
4884+ '(OK)'.format(sentry_unit.info['unit_name'],
4885+ cmd, code))
4886+ else:
4887+ return ('{} `{}` returned {} '
4888+ '{}'.format(sentry_unit.info['unit_name'],
4889+ cmd, code, output))
4890+ return None
4891+
4892+ def get_process_id_list(self, sentry_unit, process_name):
4893+ """Get a list of process ID(s) from a single sentry juju unit
4894+ for a single process name.
4895+
4896+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
4897+ :param process_name: Process name
4898+ :returns: List of process IDs
4899+ """
4900+ cmd = 'pidof {}'.format(process_name)
4901+ output, code = sentry_unit.run(cmd)
4902+ if code != 0:
4903+ msg = ('{} `{}` returned {} '
4904+ '{}'.format(sentry_unit.info['unit_name'],
4905+ cmd, code, output))
4906+ amulet.raise_status(amulet.FAIL, msg=msg)
4907+ return str(output).split()
4908+
4909+ def get_unit_process_ids(self, unit_processes):
4910+ """Construct a dict containing unit sentries, process names, and
4911+ process IDs."""
4912+ pid_dict = {}
4913+ for sentry_unit, process_list in unit_processes.iteritems():
4914+ pid_dict[sentry_unit] = {}
4915+ for process in process_list:
4916+ pids = self.get_process_id_list(sentry_unit, process)
4917+ pid_dict[sentry_unit].update({process: pids})
4918+ return pid_dict
4919+
4920+ def validate_unit_process_ids(self, expected, actual):
4921+ """Validate process id quantities for services on units."""
4922+ self.log.debug('Checking units for running processes...')
4923+ self.log.debug('Expected PIDs: {}'.format(expected))
4924+ self.log.debug('Actual PIDs: {}'.format(actual))
4925+
4926+ if len(actual) != len(expected):
4927+ return ('Unit count mismatch. expected, actual: {}, '
4928+ '{} '.format(len(expected), len(actual)))
4929+
4930+ for (e_sentry, e_proc_names) in expected.iteritems():
4931+ e_sentry_name = e_sentry.info['unit_name']
4932+ if e_sentry in actual.keys():
4933+ a_proc_names = actual[e_sentry]
4934+ else:
4935+ return ('Expected sentry ({}) not found in actual dict data.'
4936+ '{}'.format(e_sentry_name, e_sentry))
4937+
4938+ if len(e_proc_names.keys()) != len(a_proc_names.keys()):
4939+ return ('Process name count mismatch. expected, actual: {}, '
4940+ '{}'.format(len(expected), len(actual)))
4941+
4942+ for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
4943+ zip(e_proc_names.items(), a_proc_names.items()):
4944+ if e_proc_name != a_proc_name:
4945+ return ('Process name mismatch. expected, actual: {}, '
4946+ '{}'.format(e_proc_name, a_proc_name))
4947+
4948+ a_pids_length = len(a_pids)
4949+ fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
4950+ '{}, {} ({})'.format(e_sentry_name, e_proc_name,
4951+ e_pids_length, a_pids_length,
4952+ a_pids))
4953+
4954+ # If expected is not bool, ensure PID quantities match
4955+ if not isinstance(e_pids_length, bool) and \
4956+ a_pids_length != e_pids_length:
4957+ return fail_msg
4958+ # If expected is bool True, ensure 1 or more PIDs exist
4959+ elif isinstance(e_pids_length, bool) and \
4960+ e_pids_length is True and a_pids_length < 1:
4961+ return fail_msg
4962+ # If expected is bool False, ensure 0 PIDs exist
4963+ elif isinstance(e_pids_length, bool) and \
4964+ e_pids_length is False and a_pids_length != 0:
4965+ return fail_msg
4966+ else:
4967+ self.log.debug('PID check OK: {} {} {}: '
4968+ '{}'.format(e_sentry_name, e_proc_name,
4969+ e_pids_length, a_pids))
4970+ return None
4971+
4972+ def validate_list_of_identical_dicts(self, list_of_dicts):
4973+ """Check that all dicts within a list are identical."""
4974+ hashes = []
4975+ for _dict in list_of_dicts:
4976+ hashes.append(hash(frozenset(_dict.items())))
4977+
4978+ self.log.debug('Hashes: {}'.format(hashes))
4979+ if len(set(hashes)) == 1:
4980+ self.log.debug('Dicts within list are identical')
4981+ else:
4982+ return 'Dicts within list are not identical'
4983+
4984+ return None
4985
4986=== modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
4987--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-24 08:25:28 +0000
4988+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-12-01 15:05:49 +0000
4989@@ -44,7 +44,7 @@
4990 Determine if the local branch being tested is derived from its
4991 stable or next (dev) branch, and based on this, use the corresonding
4992 stable or next branches for the other_services."""
4993- base_charms = ['mysql', 'mongodb']
4994+ base_charms = ['mysql', 'mongodb', 'nrpe']
4995
4996 if self.series in ['precise', 'trusty']:
4997 base_series = self.series
4998@@ -83,9 +83,10 @@
4999 services.append(this_service)
5000 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: