Merge lp:~gnuoy/charms/trusty/cisco-vpp/dhcp into lp:~openstack-charmers/charms/trusty/cisco-vpp/next

Proposed by Liam Young
Status: Merged
Merged at revision: 116
Proposed branch: lp:~gnuoy/charms/trusty/cisco-vpp/dhcp
Merge into: lp:~openstack-charmers/charms/trusty/cisco-vpp/next
Diff against target: 5609 lines (+3361/-493)
40 files modified
charm-helpers-hooks.yaml (+1/-1)
config.yaml (+8/-0)
hooks/ODL.py (+2/-1)
hooks/charmhelpers/contrib/network/ip.py (+10/-4)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+158/-15)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+742/-51)
hooks/charmhelpers/contrib/openstack/context.py (+192/-63)
hooks/charmhelpers/contrib/openstack/neutron.py (+57/-16)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+12/-6)
hooks/charmhelpers/contrib/openstack/templating.py (+32/-29)
hooks/charmhelpers/contrib/openstack/utils.py (+324/-33)
hooks/charmhelpers/contrib/python/packages.py (+2/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+272/-43)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+10/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+4/-3)
hooks/charmhelpers/core/files.py (+45/-0)
hooks/charmhelpers/core/hookenv.py (+249/-49)
hooks/charmhelpers/core/host.py (+148/-36)
hooks/charmhelpers/core/hugepage.py (+33/-16)
hooks/charmhelpers/core/kernel.py (+68/-0)
hooks/charmhelpers/core/services/base.py (+12/-9)
hooks/charmhelpers/core/services/helpers.py (+9/-7)
hooks/charmhelpers/core/strutils.py (+30/-0)
hooks/charmhelpers/core/templating.py (+12/-12)
hooks/charmhelpers/core/unitdata.py (+61/-17)
hooks/charmhelpers/fetch/__init__.py (+31/-14)
hooks/charmhelpers/fetch/archiveurl.py (+7/-1)
hooks/charmhelpers/fetch/giturl.py (+1/-1)
hooks/services.py (+28/-0)
hooks/vpp_data.py (+75/-0)
hooks/vpp_utils.py (+2/-1)
metadata.yaml (+4/-0)
templates/icehouse/dhcp_agent.ini (+13/-0)
templates/icehouse/metadata_agent.ini (+15/-0)
templates/icehouse/neutron.conf (+31/-0)
templates/parts/rabbitmq (+21/-0)
tests/charmhelpers/contrib/amulet/utils.py (+239/-9)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+38/-4)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+361/-51)
unit_tests/test_vpp_utils.py (+2/-1)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/cisco-vpp/dhcp
Reviewer Review Type Date Requested Status
James Page Approve
Review via email: mp+278819@code.launchpad.net

Description of the change

This merge proposal adds support for serving dhcp and metadata requests to guests.

To post a comment you must log in.
Revision history for this message
James Page (james-page) :
review: Needs Information
121. By Liam Young

Fix typo in context that was retuning the wrong ip for keystone

122. By Liam Young

ODL initially returns 404s when querying nodes so backoff and retry node query

123. By Liam Young

Fix bug causing charm to fail if /etc/neutron does not exists

Revision history for this message
James Page (james-page) wrote :

Just a few niggles - but need taking care of.

review: Needs Fixing
124. By Liam Young

General tidyup/fixes from mp feedback from JamesPage

Revision history for this message
James Page (james-page) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'charm-helpers-hooks.yaml'
--- charm-helpers-hooks.yaml 2015-06-24 09:56:23 +0000
+++ charm-helpers-hooks.yaml 2015-12-01 15:05:49 +0000
@@ -1,4 +1,4 @@
1branch: lp:~gnuoy/charm-helpers/cisco-vpp/1branch: lp:charm-helpers
2destination: hooks/charmhelpers2destination: hooks/charmhelpers
3include:3include:
4 - core4 - core
55
=== modified file 'config.yaml'
--- config.yaml 2015-08-14 07:27:33 +0000
+++ config.yaml 2015-12-01 15:05:49 +0000
@@ -41,3 +41,11 @@
41 mac-network-map:41 mac-network-map:
42 default: ''42 default: ''
43 type: string43 type: string
44 rabbit-user:
45 default: neutron
46 type: string
47 description: Username used to access RabbitMQ queue
48 rabbit-vhost:
49 default: openstack
50 type: string
51 description: RabbitMQ vhost
4452
=== modified file 'hooks/ODL.py'
--- hooks/ODL.py 2015-09-14 16:44:47 +0000
+++ hooks/ODL.py 2015-12-01 15:05:49 +0000
@@ -73,7 +73,8 @@
7373
74 def get_odl_registered_nodes(self):74 def get_odl_registered_nodes(self):
75 log('Querying nodes registered with odl')75 log('Querying nodes registered with odl')
76 odl_req = self.contact_odl('GET', self.node_query_url)76 odl_req = self.contact_odl('GET', self.node_query_url,
77 retry_rcs=[requests.codes.not_found])
77 odl_json = odl_req.json()78 odl_json = odl_req.json()
78 odl_node_ids = []79 odl_node_ids = []
79 if odl_json.get('nodes'):80 if odl_json.get('nodes'):
8081
=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
--- hooks/charmhelpers/contrib/network/ip.py 2015-06-10 15:45:48 +0000
+++ hooks/charmhelpers/contrib/network/ip.py 2015-12-01 15:05:49 +0000
@@ -23,7 +23,7 @@
23from functools import partial23from functools import partial
2424
25from charmhelpers.core.hookenv import unit_get25from charmhelpers.core.hookenv import unit_get
26from charmhelpers.fetch import apt_install26from charmhelpers.fetch import apt_install, apt_update
27from charmhelpers.core.hookenv import (27from charmhelpers.core.hookenv import (
28 log,28 log,
29 WARNING,29 WARNING,
@@ -32,13 +32,15 @@
32try:32try:
33 import netifaces33 import netifaces
34except ImportError:34except ImportError:
35 apt_install('python-netifaces')35 apt_update(fatal=True)
36 apt_install('python-netifaces', fatal=True)
36 import netifaces37 import netifaces
3738
38try:39try:
39 import netaddr40 import netaddr
40except ImportError:41except ImportError:
41 apt_install('python-netaddr')42 apt_update(fatal=True)
43 apt_install('python-netaddr', fatal=True)
42 import netaddr44 import netaddr
4345
4446
@@ -435,8 +437,12 @@
435437
436 rev = dns.reversename.from_address(address)438 rev = dns.reversename.from_address(address)
437 result = ns_query(rev)439 result = ns_query(rev)
440
438 if not result:441 if not result:
439 return None442 try:
443 result = socket.gethostbyaddr(address)[0]
444 except:
445 return None
440 else:446 else:
441 result = address447 result = address
442448
443449
=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-16 07:53:15 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-12-01 15:05:49 +0000
@@ -14,12 +14,18 @@
14# You should have received a copy of the GNU Lesser General Public License14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1616
17import logging
18import re
19import sys
17import six20import six
18from collections import OrderedDict21from collections import OrderedDict
19from charmhelpers.contrib.amulet.deployment import (22from charmhelpers.contrib.amulet.deployment import (
20 AmuletDeployment23 AmuletDeployment
21)24)
2225
26DEBUG = logging.DEBUG
27ERROR = logging.ERROR
28
2329
24class OpenStackAmuletDeployment(AmuletDeployment):30class OpenStackAmuletDeployment(AmuletDeployment):
25 """OpenStack amulet deployment.31 """OpenStack amulet deployment.
@@ -28,9 +34,12 @@
28 that is specifically for use by OpenStack charms.34 that is specifically for use by OpenStack charms.
29 """35 """
3036
31 def __init__(self, series=None, openstack=None, source=None, stable=True):37 def __init__(self, series=None, openstack=None, source=None,
38 stable=True, log_level=DEBUG):
32 """Initialize the deployment environment."""39 """Initialize the deployment environment."""
33 super(OpenStackAmuletDeployment, self).__init__(series)40 super(OpenStackAmuletDeployment, self).__init__(series)
41 self.log = self.get_logger(level=log_level)
42 self.log.info('OpenStackAmuletDeployment: init')
34 self.openstack = openstack43 self.openstack = openstack
35 self.source = source44 self.source = source
36 self.stable = stable45 self.stable = stable
@@ -38,30 +47,55 @@
38 # out.47 # out.
39 self.current_next = "trusty"48 self.current_next = "trusty"
4049
50 def get_logger(self, name="deployment-logger", level=logging.DEBUG):
51 """Get a logger object that will log to stdout."""
52 log = logging
53 logger = log.getLogger(name)
54 fmt = log.Formatter("%(asctime)s %(funcName)s "
55 "%(levelname)s: %(message)s")
56
57 handler = log.StreamHandler(stream=sys.stdout)
58 handler.setLevel(level)
59 handler.setFormatter(fmt)
60
61 logger.addHandler(handler)
62 logger.setLevel(level)
63
64 return logger
65
41 def _determine_branch_locations(self, other_services):66 def _determine_branch_locations(self, other_services):
42 """Determine the branch locations for the other services.67 """Determine the branch locations for the other services.
4368
44 Determine if the local branch being tested is derived from its69 Determine if the local branch being tested is derived from its
45 stable or next (dev) branch, and based on this, use the corresonding70 stable or next (dev) branch, and based on this, use the corresonding
46 stable or next branches for the other_services."""71 stable or next branches for the other_services."""
47 base_charms = ['mysql', 'mongodb']72
73 self.log.info('OpenStackAmuletDeployment: determine branch locations')
74
75 # Charms outside the lp:~openstack-charmers namespace
76 base_charms = ['mysql', 'mongodb', 'nrpe']
77
78 # Force these charms to current series even when using an older series.
79 # ie. Use trusty/nrpe even when series is precise, as the P charm
80 # does not possess the necessary external master config and hooks.
81 force_series_current = ['nrpe']
4882
49 if self.series in ['precise', 'trusty']:83 if self.series in ['precise', 'trusty']:
50 base_series = self.series84 base_series = self.series
51 else:85 else:
52 base_series = self.current_next86 base_series = self.current_next
5387
54 if self.stable:88 for svc in other_services:
55 for svc in other_services:89 if svc['name'] in force_series_current:
56 if svc.get('location'):90 base_series = self.current_next
57 continue91 # If a location has been explicitly set, use it
92 if svc.get('location'):
93 continue
94 if self.stable:
58 temp = 'lp:charms/{}/{}'95 temp = 'lp:charms/{}/{}'
59 svc['location'] = temp.format(base_series,96 svc['location'] = temp.format(base_series,
60 svc['name'])97 svc['name'])
61 else:98 else:
62 for svc in other_services:
63 if svc.get('location'):
64 continue
65 if svc['name'] in base_charms:99 if svc['name'] in base_charms:
66 temp = 'lp:charms/{}/{}'100 temp = 'lp:charms/{}/{}'
67 svc['location'] = temp.format(base_series,101 svc['location'] = temp.format(base_series,
@@ -70,10 +104,13 @@
70 temp = 'lp:~openstack-charmers/charms/{}/{}/next'104 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
71 svc['location'] = temp.format(self.current_next,105 svc['location'] = temp.format(self.current_next,
72 svc['name'])106 svc['name'])
107
73 return other_services108 return other_services
74109
75 def _add_services(self, this_service, other_services):110 def _add_services(self, this_service, other_services):
76 """Add services to the deployment and set openstack-origin/source."""111 """Add services to the deployment and set openstack-origin/source."""
112 self.log.info('OpenStackAmuletDeployment: adding services')
113
77 other_services = self._determine_branch_locations(other_services)114 other_services = self._determine_branch_locations(other_services)
78115
79 super(OpenStackAmuletDeployment, self)._add_services(this_service,116 super(OpenStackAmuletDeployment, self)._add_services(this_service,
@@ -81,29 +118,102 @@
81118
82 services = other_services119 services = other_services
83 services.append(this_service)120 services.append(this_service)
121
122 # Charms which should use the source config option
84 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',123 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
85 'ceph-osd', 'ceph-radosgw']124 'ceph-osd', 'ceph-radosgw']
86 # Openstack subordinate charms do not expose an origin option as that125
87 # is controlled by the principle126 # Charms which can not use openstack-origin, ie. many subordinates
88 ignore = ['neutron-openvswitch', 'cisco-vpp']127 no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
128 'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
89129
90 if self.openstack:130 if self.openstack:
91 for svc in services:131 for svc in services:
92 if svc['name'] not in use_source + ignore:132 if svc['name'] not in use_source + no_origin:
93 config = {'openstack-origin': self.openstack}133 config = {'openstack-origin': self.openstack}
94 self.d.configure(svc['name'], config)134 self.d.configure(svc['name'], config)
95135
96 if self.source:136 if self.source:
97 for svc in services:137 for svc in services:
98 if svc['name'] in use_source and svc['name'] not in ignore:138 if svc['name'] in use_source and svc['name'] not in no_origin:
99 config = {'source': self.source}139 config = {'source': self.source}
100 self.d.configure(svc['name'], config)140 self.d.configure(svc['name'], config)
101141
102 def _configure_services(self, configs):142 def _configure_services(self, configs):
103 """Configure all of the services."""143 """Configure all of the services."""
144 self.log.info('OpenStackAmuletDeployment: configure services')
104 for service, config in six.iteritems(configs):145 for service, config in six.iteritems(configs):
105 self.d.configure(service, config)146 self.d.configure(service, config)
106147
148 def _auto_wait_for_status(self, message=None, exclude_services=None,
149 include_only=None, timeout=1800):
150 """Wait for all units to have a specific extended status, except
151 for any defined as excluded. Unless specified via message, any
152 status containing any case of 'ready' will be considered a match.
153
154 Examples of message usage:
155
156 Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
157 message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
158
159 Wait for all units to reach this status (exact match):
160 message = re.compile('^Unit is ready and clustered$')
161
162 Wait for all units to reach any one of these (exact match):
163 message = re.compile('Unit is ready|OK|Ready')
164
165 Wait for at least one unit to reach this status (exact match):
166 message = {'ready'}
167
168 See Amulet's sentry.wait_for_messages() for message usage detail.
169 https://github.com/juju/amulet/blob/master/amulet/sentry.py
170
171 :param message: Expected status match
172 :param exclude_services: List of juju service names to ignore,
173 not to be used in conjuction with include_only.
174 :param include_only: List of juju service names to exclusively check,
175 not to be used in conjuction with exclude_services.
176 :param timeout: Maximum time in seconds to wait for status match
177 :returns: None. Raises if timeout is hit.
178 """
179 self.log.info('Waiting for extended status on units...')
180
181 all_services = self.d.services.keys()
182
183 if exclude_services and include_only:
184 raise ValueError('exclude_services can not be used '
185 'with include_only')
186
187 if message:
188 if isinstance(message, re._pattern_type):
189 match = message.pattern
190 else:
191 match = message
192
193 self.log.debug('Custom extended status wait match: '
194 '{}'.format(match))
195 else:
196 self.log.debug('Default extended status wait match: contains '
197 'READY (case-insensitive)')
198 message = re.compile('.*ready.*', re.IGNORECASE)
199
200 if exclude_services:
201 self.log.debug('Excluding services from extended status match: '
202 '{}'.format(exclude_services))
203 else:
204 exclude_services = []
205
206 if include_only:
207 services = include_only
208 else:
209 services = list(set(all_services) - set(exclude_services))
210
211 self.log.debug('Waiting up to {}s for extended status on services: '
212 '{}'.format(timeout, services))
213 service_messages = {service: message for service in services}
214 self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
215 self.log.info('OK')
216
107 def _get_openstack_release(self):217 def _get_openstack_release(self):
108 """Get openstack release.218 """Get openstack release.
109219
@@ -152,3 +262,36 @@
152 return os_origin.split('%s-' % self.series)[1].split('/')[0]262 return os_origin.split('%s-' % self.series)[1].split('/')[0]
153 else:263 else:
154 return releases[self.series]264 return releases[self.series]
265
266 def get_ceph_expected_pools(self, radosgw=False):
267 """Return a list of expected ceph pools in a ceph + cinder + glance
268 test scenario, based on OpenStack release and whether ceph radosgw
269 is flagged as present or not."""
270
271 if self._get_openstack_release() >= self.trusty_kilo:
272 # Kilo or later
273 pools = [
274 'rbd',
275 'cinder',
276 'glance'
277 ]
278 else:
279 # Juno or earlier
280 pools = [
281 'data',
282 'metadata',
283 'rbd',
284 'cinder',
285 'glance'
286 ]
287
288 if radosgw:
289 pools.extend([
290 '.rgw.root',
291 '.rgw.control',
292 '.rgw',
293 '.rgw.gc',
294 '.users.uid'
295 ])
296
297 return pools
155298
=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-10 07:35:12 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-12-01 15:05:49 +0000
@@ -14,16 +14,22 @@
14# You should have received a copy of the GNU Lesser General Public License14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1616
17import amulet
18import json
17import logging19import logging
18import os20import os
21import re
22import six
19import time23import time
20import urllib24import urllib
2125
26import cinderclient.v1.client as cinder_client
22import glanceclient.v1.client as glance_client27import glanceclient.v1.client as glance_client
28import heatclient.v1.client as heat_client
23import keystoneclient.v2_0 as keystone_client29import keystoneclient.v2_0 as keystone_client
24import novaclient.v1_1.client as nova_client30import novaclient.v1_1.client as nova_client
2531import pika
26import six32import swiftclient
2733
28from charmhelpers.contrib.amulet.utils import (34from charmhelpers.contrib.amulet.utils import (
29 AmuletUtils35 AmuletUtils
@@ -37,7 +43,7 @@
37 """OpenStack amulet utilities.43 """OpenStack amulet utilities.
3844
39 This class inherits from AmuletUtils and has additional support45 This class inherits from AmuletUtils and has additional support
40 that is specifically for use by OpenStack charms.46 that is specifically for use by OpenStack charm tests.
41 """47 """
4248
43 def __init__(self, log_level=ERROR):49 def __init__(self, log_level=ERROR):
@@ -51,6 +57,8 @@
51 Validate actual endpoint data vs expected endpoint data. The ports57 Validate actual endpoint data vs expected endpoint data. The ports
52 are used to find the matching endpoint.58 are used to find the matching endpoint.
53 """59 """
60 self.log.debug('Validating endpoint data...')
61 self.log.debug('actual: {}'.format(repr(endpoints)))
54 found = False62 found = False
55 for ep in endpoints:63 for ep in endpoints:
56 self.log.debug('endpoint: {}'.format(repr(ep)))64 self.log.debug('endpoint: {}'.format(repr(ep)))
@@ -77,6 +85,7 @@
77 Validate a list of actual service catalog endpoints vs a list of85 Validate a list of actual service catalog endpoints vs a list of
78 expected service catalog endpoints.86 expected service catalog endpoints.
79 """87 """
88 self.log.debug('Validating service catalog endpoint data...')
80 self.log.debug('actual: {}'.format(repr(actual)))89 self.log.debug('actual: {}'.format(repr(actual)))
81 for k, v in six.iteritems(expected):90 for k, v in six.iteritems(expected):
82 if k in actual:91 if k in actual:
@@ -93,6 +102,7 @@
93 Validate a list of actual tenant data vs list of expected tenant102 Validate a list of actual tenant data vs list of expected tenant
94 data.103 data.
95 """104 """
105 self.log.debug('Validating tenant data...')
96 self.log.debug('actual: {}'.format(repr(actual)))106 self.log.debug('actual: {}'.format(repr(actual)))
97 for e in expected:107 for e in expected:
98 found = False108 found = False
@@ -114,6 +124,7 @@
114 Validate a list of actual role data vs a list of expected role124 Validate a list of actual role data vs a list of expected role
115 data.125 data.
116 """126 """
127 self.log.debug('Validating role data...')
117 self.log.debug('actual: {}'.format(repr(actual)))128 self.log.debug('actual: {}'.format(repr(actual)))
118 for e in expected:129 for e in expected:
119 found = False130 found = False
@@ -134,6 +145,7 @@
134 Validate a list of actual user data vs a list of expected user145 Validate a list of actual user data vs a list of expected user
135 data.146 data.
136 """147 """
148 self.log.debug('Validating user data...')
137 self.log.debug('actual: {}'.format(repr(actual)))149 self.log.debug('actual: {}'.format(repr(actual)))
138 for e in expected:150 for e in expected:
139 found = False151 found = False
@@ -155,17 +167,30 @@
155167
156 Validate a list of actual flavors vs a list of expected flavors.168 Validate a list of actual flavors vs a list of expected flavors.
157 """169 """
170 self.log.debug('Validating flavor data...')
158 self.log.debug('actual: {}'.format(repr(actual)))171 self.log.debug('actual: {}'.format(repr(actual)))
159 act = [a.name for a in actual]172 act = [a.name for a in actual]
160 return self._validate_list_data(expected, act)173 return self._validate_list_data(expected, act)
161174
162 def tenant_exists(self, keystone, tenant):175 def tenant_exists(self, keystone, tenant):
163 """Return True if tenant exists."""176 """Return True if tenant exists."""
177 self.log.debug('Checking if tenant exists ({})...'.format(tenant))
164 return tenant in [t.name for t in keystone.tenants.list()]178 return tenant in [t.name for t in keystone.tenants.list()]
165179
180 def authenticate_cinder_admin(self, keystone_sentry, username,
181 password, tenant):
182 """Authenticates admin user with cinder."""
183 # NOTE(beisner): cinder python client doesn't accept tokens.
184 service_ip = \
185 keystone_sentry.relation('shared-db',
186 'mysql:shared-db')['private-address']
187 ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
188 return cinder_client.Client(username, password, tenant, ept)
189
166 def authenticate_keystone_admin(self, keystone_sentry, user, password,190 def authenticate_keystone_admin(self, keystone_sentry, user, password,
167 tenant):191 tenant):
168 """Authenticates admin user with the keystone admin endpoint."""192 """Authenticates admin user with the keystone admin endpoint."""
193 self.log.debug('Authenticating keystone admin...')
169 unit = keystone_sentry194 unit = keystone_sentry
170 service_ip = unit.relation('shared-db',195 service_ip = unit.relation('shared-db',
171 'mysql:shared-db')['private-address']196 'mysql:shared-db')['private-address']
@@ -175,6 +200,7 @@
175200
176 def authenticate_keystone_user(self, keystone, user, password, tenant):201 def authenticate_keystone_user(self, keystone, user, password, tenant):
177 """Authenticates a regular user with the keystone public endpoint."""202 """Authenticates a regular user with the keystone public endpoint."""
203 self.log.debug('Authenticating keystone user ({})...'.format(user))
178 ep = keystone.service_catalog.url_for(service_type='identity',204 ep = keystone.service_catalog.url_for(service_type='identity',
179 endpoint_type='publicURL')205 endpoint_type='publicURL')
180 return keystone_client.Client(username=user, password=password,206 return keystone_client.Client(username=user, password=password,
@@ -182,19 +208,49 @@
182208
183 def authenticate_glance_admin(self, keystone):209 def authenticate_glance_admin(self, keystone):
184 """Authenticates admin user with glance."""210 """Authenticates admin user with glance."""
211 self.log.debug('Authenticating glance admin...')
185 ep = keystone.service_catalog.url_for(service_type='image',212 ep = keystone.service_catalog.url_for(service_type='image',
186 endpoint_type='adminURL')213 endpoint_type='adminURL')
187 return glance_client.Client(ep, token=keystone.auth_token)214 return glance_client.Client(ep, token=keystone.auth_token)
188215
216 def authenticate_heat_admin(self, keystone):
217 """Authenticates the admin user with heat."""
218 self.log.debug('Authenticating heat admin...')
219 ep = keystone.service_catalog.url_for(service_type='orchestration',
220 endpoint_type='publicURL')
221 return heat_client.Client(endpoint=ep, token=keystone.auth_token)
222
189 def authenticate_nova_user(self, keystone, user, password, tenant):223 def authenticate_nova_user(self, keystone, user, password, tenant):
190 """Authenticates a regular user with nova-api."""224 """Authenticates a regular user with nova-api."""
225 self.log.debug('Authenticating nova user ({})...'.format(user))
191 ep = keystone.service_catalog.url_for(service_type='identity',226 ep = keystone.service_catalog.url_for(service_type='identity',
192 endpoint_type='publicURL')227 endpoint_type='publicURL')
193 return nova_client.Client(username=user, api_key=password,228 return nova_client.Client(username=user, api_key=password,
194 project_id=tenant, auth_url=ep)229 project_id=tenant, auth_url=ep)
195230
231 def authenticate_swift_user(self, keystone, user, password, tenant):
232 """Authenticates a regular user with swift api."""
233 self.log.debug('Authenticating swift user ({})...'.format(user))
234 ep = keystone.service_catalog.url_for(service_type='identity',
235 endpoint_type='publicURL')
236 return swiftclient.Connection(authurl=ep,
237 user=user,
238 key=password,
239 tenant_name=tenant,
240 auth_version='2.0')
241
196 def create_cirros_image(self, glance, image_name):242 def create_cirros_image(self, glance, image_name):
197 """Download the latest cirros image and upload it to glance."""243 """Download the latest cirros image and upload it to glance,
244 validate and return a resource pointer.
245
246 :param glance: pointer to authenticated glance connection
247 :param image_name: display name for new image
248 :returns: glance image pointer
249 """
250 self.log.debug('Creating glance cirros image '
251 '({})...'.format(image_name))
252
253 # Download cirros image
198 http_proxy = os.getenv('AMULET_HTTP_PROXY')254 http_proxy = os.getenv('AMULET_HTTP_PROXY')
199 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))255 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
200 if http_proxy:256 if http_proxy:
@@ -203,57 +259,67 @@
203 else:259 else:
204 opener = urllib.FancyURLopener()260 opener = urllib.FancyURLopener()
205261
206 f = opener.open("http://download.cirros-cloud.net/version/released")262 f = opener.open('http://download.cirros-cloud.net/version/released')
207 version = f.read().strip()263 version = f.read().strip()
208 cirros_img = "cirros-{}-x86_64-disk.img".format(version)264 cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
209 local_path = os.path.join('tests', cirros_img)265 local_path = os.path.join('tests', cirros_img)
210266
211 if not os.path.exists(local_path):267 if not os.path.exists(local_path):
212 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",268 cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
213 version, cirros_img)269 version, cirros_img)
214 opener.retrieve(cirros_url, local_path)270 opener.retrieve(cirros_url, local_path)
215 f.close()271 f.close()
216272
273 # Create glance image
217 with open(local_path) as f:274 with open(local_path) as f:
218 image = glance.images.create(name=image_name, is_public=True,275 image = glance.images.create(name=image_name, is_public=True,
219 disk_format='qcow2',276 disk_format='qcow2',
220 container_format='bare', data=f)277 container_format='bare', data=f)
221 count = 1278
222 status = image.status279 # Wait for image to reach active status
223 while status != 'active' and count < 10:280 img_id = image.id
224 time.sleep(3)281 ret = self.resource_reaches_status(glance.images, img_id,
225 image = glance.images.get(image.id)282 expected_stat='active',
226 status = image.status283 msg='Image status wait')
227 self.log.debug('image status: {}'.format(status))284 if not ret:
228 count += 1285 msg = 'Glance image failed to reach expected state.'
229286 amulet.raise_status(amulet.FAIL, msg=msg)
230 if status != 'active':287
231 self.log.error('image creation timed out')288 # Re-validate new image
232 return None289 self.log.debug('Validating image attributes...')
290 val_img_name = glance.images.get(img_id).name
291 val_img_stat = glance.images.get(img_id).status
292 val_img_pub = glance.images.get(img_id).is_public
293 val_img_cfmt = glance.images.get(img_id).container_format
294 val_img_dfmt = glance.images.get(img_id).disk_format
295 msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
296 'container fmt:{} disk fmt:{}'.format(
297 val_img_name, val_img_pub, img_id,
298 val_img_stat, val_img_cfmt, val_img_dfmt))
299
300 if val_img_name == image_name and val_img_stat == 'active' \
301 and val_img_pub is True and val_img_cfmt == 'bare' \
302 and val_img_dfmt == 'qcow2':
303 self.log.debug(msg_attr)
304 else:
305 msg = ('Volume validation failed, {}'.format(msg_attr))
306 amulet.raise_status(amulet.FAIL, msg=msg)
233307
234 return image308 return image
235309
236 def delete_image(self, glance, image):310 def delete_image(self, glance, image):
237 """Delete the specified image."""311 """Delete the specified image."""
238 num_before = len(list(glance.images.list()))312
239 glance.images.delete(image)313 # /!\ DEPRECATION WARNING
240314 self.log.warn('/!\\ DEPRECATION WARNING: use '
241 count = 1315 'delete_resource instead of delete_image.')
242 num_after = len(list(glance.images.list()))316 self.log.debug('Deleting glance image ({})...'.format(image))
243 while num_after != (num_before - 1) and count < 10:317 return self.delete_resource(glance.images, image, msg='glance image')
244 time.sleep(3)
245 num_after = len(list(glance.images.list()))
246 self.log.debug('number of images: {}'.format(num_after))
247 count += 1
248
249 if num_after != (num_before - 1):
250 self.log.error('image deletion timed out')
251 return False
252
253 return True
254318
255 def create_instance(self, nova, image_name, instance_name, flavor):319 def create_instance(self, nova, image_name, instance_name, flavor):
256 """Create the specified instance."""320 """Create the specified instance."""
321 self.log.debug('Creating instance '
322 '({}|{}|{})'.format(instance_name, image_name, flavor))
257 image = nova.images.find(name=image_name)323 image = nova.images.find(name=image_name)
258 flavor = nova.flavors.find(name=flavor)324 flavor = nova.flavors.find(name=flavor)
259 instance = nova.servers.create(name=instance_name, image=image,325 instance = nova.servers.create(name=instance_name, image=image,
@@ -276,19 +342,644 @@
276342
277 def delete_instance(self, nova, instance):343 def delete_instance(self, nova, instance):
278 """Delete the specified instance."""344 """Delete the specified instance."""
279 num_before = len(list(nova.servers.list()))345
280 nova.servers.delete(instance)346 # /!\ DEPRECATION WARNING
281347 self.log.warn('/!\\ DEPRECATION WARNING: use '
282 count = 1348 'delete_resource instead of delete_instance.')
283 num_after = len(list(nova.servers.list()))349 self.log.debug('Deleting instance ({})...'.format(instance))
284 while num_after != (num_before - 1) and count < 10:350 return self.delete_resource(nova.servers, instance,
285 time.sleep(3)351 msg='nova instance')
286 num_after = len(list(nova.servers.list()))352
287 self.log.debug('number of instances: {}'.format(num_after))353 def create_or_get_keypair(self, nova, keypair_name="testkey"):
288 count += 1354 """Create a new keypair, or return pointer if it already exists."""
289355 try:
290 if num_after != (num_before - 1):356 _keypair = nova.keypairs.get(keypair_name)
291 self.log.error('instance deletion timed out')357 self.log.debug('Keypair ({}) already exists, '
292 return False358 'using it.'.format(keypair_name))
293359 return _keypair
294 return True360 except:
361 self.log.debug('Keypair ({}) does not exist, '
362 'creating it.'.format(keypair_name))
363
364 _keypair = nova.keypairs.create(name=keypair_name)
365 return _keypair
366
367 def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
368 img_id=None, src_vol_id=None, snap_id=None):
369 """Create cinder volume, optionally from a glance image, OR
370 optionally as a clone of an existing volume, OR optionally
371 from a snapshot. Wait for the new volume status to reach
372 the expected status, validate and return a resource pointer.
373
374 :param vol_name: cinder volume display name
375 :param vol_size: size in gigabytes
376 :param img_id: optional glance image id
377 :param src_vol_id: optional source volume id to clone
378 :param snap_id: optional snapshot id to use
379 :returns: cinder volume pointer
380 """
381 # Handle parameter input and avoid impossible combinations
382 if img_id and not src_vol_id and not snap_id:
383 # Create volume from image
384 self.log.debug('Creating cinder volume from glance image...')
385 bootable = 'true'
386 elif src_vol_id and not img_id and not snap_id:
387 # Clone an existing volume
388 self.log.debug('Cloning cinder volume...')
389 bootable = cinder.volumes.get(src_vol_id).bootable
390 elif snap_id and not src_vol_id and not img_id:
391 # Create volume from snapshot
392 self.log.debug('Creating cinder volume from snapshot...')
393 snap = cinder.volume_snapshots.find(id=snap_id)
394 vol_size = snap.size
395 snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
396 bootable = cinder.volumes.get(snap_vol_id).bootable
397 elif not img_id and not src_vol_id and not snap_id:
398 # Create volume
399 self.log.debug('Creating cinder volume...')
400 bootable = 'false'
401 else:
402 # Impossible combination of parameters
403 msg = ('Invalid method use - name:{} size:{} img_id:{} '
404 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
405 img_id, src_vol_id,
406 snap_id))
407 amulet.raise_status(amulet.FAIL, msg=msg)
408
409 # Create new volume
410 try:
411 vol_new = cinder.volumes.create(display_name=vol_name,
412 imageRef=img_id,
413 size=vol_size,
414 source_volid=src_vol_id,
415 snapshot_id=snap_id)
416 vol_id = vol_new.id
417 except Exception as e:
418 msg = 'Failed to create volume: {}'.format(e)
419 amulet.raise_status(amulet.FAIL, msg=msg)
420
421 # Wait for volume to reach available status
422 ret = self.resource_reaches_status(cinder.volumes, vol_id,
423 expected_stat="available",
424 msg="Volume status wait")
425 if not ret:
426 msg = 'Cinder volume failed to reach expected state.'
427 amulet.raise_status(amulet.FAIL, msg=msg)
428
429 # Re-validate new volume
430 self.log.debug('Validating volume attributes...')
431 val_vol_name = cinder.volumes.get(vol_id).display_name
432 val_vol_boot = cinder.volumes.get(vol_id).bootable
433 val_vol_stat = cinder.volumes.get(vol_id).status
434 val_vol_size = cinder.volumes.get(vol_id).size
435 msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
436 '{} size:{}'.format(val_vol_name, vol_id,
437 val_vol_stat, val_vol_boot,
438 val_vol_size))
439
440 if val_vol_boot == bootable and val_vol_stat == 'available' \
441 and val_vol_name == vol_name and val_vol_size == vol_size:
442 self.log.debug(msg_attr)
443 else:
444 msg = ('Volume validation failed, {}'.format(msg_attr))
445 amulet.raise_status(amulet.FAIL, msg=msg)
446
447 return vol_new
448
449 def delete_resource(self, resource, resource_id,
450 msg="resource", max_wait=120):
451 """Delete one openstack resource, such as one instance, keypair,
452 image, volume, stack, etc., and confirm deletion within max wait time.
453
454 :param resource: pointer to os resource type, ex:glance_client.images
455 :param resource_id: unique name or id for the openstack resource
456 :param msg: text to identify purpose in logging
457 :param max_wait: maximum wait time in seconds
458 :returns: True if successful, otherwise False
459 """
460 self.log.debug('Deleting OpenStack resource '
461 '{} ({})'.format(resource_id, msg))
462 num_before = len(list(resource.list()))
463 resource.delete(resource_id)
464
465 tries = 0
466 num_after = len(list(resource.list()))
467 while num_after != (num_before - 1) and tries < (max_wait / 4):
468 self.log.debug('{} delete check: '
469 '{} [{}:{}] {}'.format(msg, tries,
470 num_before,
471 num_after,
472 resource_id))
473 time.sleep(4)
474 num_after = len(list(resource.list()))
475 tries += 1
476
477 self.log.debug('{}: expected, actual count = {}, '
478 '{}'.format(msg, num_before - 1, num_after))
479
480 if num_after == (num_before - 1):
481 return True
482 else:
483 self.log.error('{} delete timed out'.format(msg))
484 return False
485
486 def resource_reaches_status(self, resource, resource_id,
487 expected_stat='available',
488 msg='resource', max_wait=120):
489 """Wait for an openstack resources status to reach an
490 expected status within a specified time. Useful to confirm that
491 nova instances, cinder vols, snapshots, glance images, heat stacks
492 and other resources eventually reach the expected status.
493
494 :param resource: pointer to os resource type, ex: heat_client.stacks
495 :param resource_id: unique id for the openstack resource
496 :param expected_stat: status to expect resource to reach
497 :param msg: text to identify purpose in logging
498 :param max_wait: maximum wait time in seconds
499 :returns: True if successful, False if status is not reached
500 """
501
502 tries = 0
503 resource_stat = resource.get(resource_id).status
504 while resource_stat != expected_stat and tries < (max_wait / 4):
505 self.log.debug('{} status check: '
506 '{} [{}:{}] {}'.format(msg, tries,
507 resource_stat,
508 expected_stat,
509 resource_id))
510 time.sleep(4)
511 resource_stat = resource.get(resource_id).status
512 tries += 1
513
514 self.log.debug('{}: expected, actual status = {}, '
515 '{}'.format(msg, resource_stat, expected_stat))
516
517 if resource_stat == expected_stat:
518 return True
519 else:
520 self.log.debug('{} never reached expected status: '
521 '{}'.format(resource_id, expected_stat))
522 return False
523
524 def get_ceph_osd_id_cmd(self, index):
525 """Produce a shell command that will return a ceph-osd id."""
526 return ("`initctl list | grep 'ceph-osd ' | "
527 "awk 'NR=={} {{ print $2 }}' | "
528 "grep -o '[0-9]*'`".format(index + 1))
529
530 def get_ceph_pools(self, sentry_unit):
531 """Return a dict of ceph pools from a single ceph unit, with
532 pool name as keys, pool id as vals."""
533 pools = {}
534 cmd = 'sudo ceph osd lspools'
535 output, code = sentry_unit.run(cmd)
536 if code != 0:
537 msg = ('{} `{}` returned {} '
538 '{}'.format(sentry_unit.info['unit_name'],
539 cmd, code, output))
540 amulet.raise_status(amulet.FAIL, msg=msg)
541
542 # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
543 for pool in str(output).split(','):
544 pool_id_name = pool.split(' ')
545 if len(pool_id_name) == 2:
546 pool_id = pool_id_name[0]
547 pool_name = pool_id_name[1]
548 pools[pool_name] = int(pool_id)
549
550 self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
551 pools))
552 return pools
553
554 def get_ceph_df(self, sentry_unit):
555 """Return dict of ceph df json output, including ceph pool state.
556
557 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
558 :returns: Dict of ceph df output
559 """
560 cmd = 'sudo ceph df --format=json'
561 output, code = sentry_unit.run(cmd)
562 if code != 0:
563 msg = ('{} `{}` returned {} '
564 '{}'.format(sentry_unit.info['unit_name'],
565 cmd, code, output))
566 amulet.raise_status(amulet.FAIL, msg=msg)
567 return json.loads(output)
568
569 def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
570 """Take a sample of attributes of a ceph pool, returning ceph
571 pool name, object count and disk space used for the specified
572 pool ID number.
573
574 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
575 :param pool_id: Ceph pool ID
576 :returns: List of pool name, object count, kb disk space used
577 """
578 df = self.get_ceph_df(sentry_unit)
579 pool_name = df['pools'][pool_id]['name']
580 obj_count = df['pools'][pool_id]['stats']['objects']
581 kb_used = df['pools'][pool_id]['stats']['kb_used']
582 self.log.debug('Ceph {} pool (ID {}): {} objects, '
583 '{} kb used'.format(pool_name, pool_id,
584 obj_count, kb_used))
585 return pool_name, obj_count, kb_used
586
587 def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
588 """Validate ceph pool samples taken over time, such as pool
589 object counts or pool kb used, before adding, after adding, and
590 after deleting items which affect those pool attributes. The
591 2nd element is expected to be greater than the 1st; 3rd is expected
592 to be less than the 2nd.
593
594 :param samples: List containing 3 data samples
595 :param sample_type: String for logging and usage context
596 :returns: None if successful, Failure message otherwise
597 """
598 original, created, deleted = range(3)
599 if samples[created] <= samples[original] or \
600 samples[deleted] >= samples[created]:
601 return ('Ceph {} samples ({}) '
602 'unexpected.'.format(sample_type, samples))
603 else:
604 self.log.debug('Ceph {} samples (OK): '
605 '{}'.format(sample_type, samples))
606 return None
607
608 # rabbitmq/amqp specific helpers:
609
610 def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
611 """Wait for rmq units extended status to show cluster readiness,
612 after an optional initial sleep period. Initial sleep is likely
613 necessary to be effective following a config change, as status
614 message may not instantly update to non-ready."""
615
616 if init_sleep:
617 time.sleep(init_sleep)
618
619 message = re.compile('^Unit is ready and clustered$')
620 deployment._auto_wait_for_status(message=message,
621 timeout=timeout,
622 include_only=['rabbitmq-server'])
623
624 def add_rmq_test_user(self, sentry_units,
625 username="testuser1", password="changeme"):
626 """Add a test user via the first rmq juju unit, check connection as
627 the new user against all sentry units.
628
629 :param sentry_units: list of sentry unit pointers
630 :param username: amqp user name, default to testuser1
631 :param password: amqp user password
632 :returns: None if successful. Raise on error.
633 """
634 self.log.debug('Adding rmq user ({})...'.format(username))
635
636 # Check that user does not already exist
637 cmd_user_list = 'rabbitmqctl list_users'
638 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
639 if username in output:
640 self.log.warning('User ({}) already exists, returning '
641 'gracefully.'.format(username))
642 return
643
644 perms = '".*" ".*" ".*"'
645 cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
646 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
647
648 # Add user via first unit
649 for cmd in cmds:
650 output, _ = self.run_cmd_unit(sentry_units[0], cmd)
651
652 # Check connection against the other sentry_units
653 self.log.debug('Checking user connect against units...')
654 for sentry_unit in sentry_units:
655 connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
656 username=username,
657 password=password)
658 connection.close()
659
660 def delete_rmq_test_user(self, sentry_units, username="testuser1"):
661 """Delete a rabbitmq user via the first rmq juju unit.
662
663 :param sentry_units: list of sentry unit pointers
664 :param username: amqp user name, default to testuser1
665 :param password: amqp user password
666 :returns: None if successful or no such user.
667 """
668 self.log.debug('Deleting rmq user ({})...'.format(username))
669
670 # Check that the user exists
671 cmd_user_list = 'rabbitmqctl list_users'
672 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
673
674 if username not in output:
675 self.log.warning('User ({}) does not exist, returning '
676 'gracefully.'.format(username))
677 return
678
679 # Delete the user
680 cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
681 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
682
683 def get_rmq_cluster_status(self, sentry_unit):
684 """Execute rabbitmq cluster status command on a unit and return
685 the full output.
686
687 :param unit: sentry unit
688 :returns: String containing console output of cluster status command
689 """
690 cmd = 'rabbitmqctl cluster_status'
691 output, _ = self.run_cmd_unit(sentry_unit, cmd)
692 self.log.debug('{} cluster_status:\n{}'.format(
693 sentry_unit.info['unit_name'], output))
694 return str(output)
695
696 def get_rmq_cluster_running_nodes(self, sentry_unit):
697 """Parse rabbitmqctl cluster_status output string, return list of
698 running rabbitmq cluster nodes.
699
700 :param unit: sentry unit
701 :returns: List containing node names of running nodes
702 """
703 # NOTE(beisner): rabbitmqctl cluster_status output is not
704 # json-parsable, do string chop foo, then json.loads that.
705 str_stat = self.get_rmq_cluster_status(sentry_unit)
706 if 'running_nodes' in str_stat:
707 pos_start = str_stat.find("{running_nodes,") + 15
708 pos_end = str_stat.find("]},", pos_start) + 1
709 str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
710 run_nodes = json.loads(str_run_nodes)
711 return run_nodes
712 else:
713 return []
714
715 def validate_rmq_cluster_running_nodes(self, sentry_units):
716 """Check that all rmq unit hostnames are represented in the
717 cluster_status output of all units.
718
719 :param host_names: dict of juju unit names to host names
720 :param units: list of sentry unit pointers (all rmq units)
721 :returns: None if successful, otherwise return error message
722 """
723 host_names = self.get_unit_hostnames(sentry_units)
724 errors = []
725
726 # Query every unit for cluster_status running nodes
727 for query_unit in sentry_units:
728 query_unit_name = query_unit.info['unit_name']
729 running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
730
731 # Confirm that every unit is represented in the queried unit's
732 # cluster_status running nodes output.
733 for validate_unit in sentry_units:
734 val_host_name = host_names[validate_unit.info['unit_name']]
735 val_node_name = 'rabbit@{}'.format(val_host_name)
736
737 if val_node_name not in running_nodes:
738 errors.append('Cluster member check failed on {}: {} not '
739 'in {}\n'.format(query_unit_name,
740 val_node_name,
741 running_nodes))
742 if errors:
743 return ''.join(errors)
744
745 def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
746 """Check a single juju rmq unit for ssl and port in the config file."""
747 host = sentry_unit.info['public-address']
748 unit_name = sentry_unit.info['unit_name']
749
750 conf_file = '/etc/rabbitmq/rabbitmq.config'
751 conf_contents = str(self.file_contents_safe(sentry_unit,
752 conf_file, max_wait=16))
753 # Checks
754 conf_ssl = 'ssl' in conf_contents
755 conf_port = str(port) in conf_contents
756
757 # Port explicitly checked in config
758 if port and conf_port and conf_ssl:
759 self.log.debug('SSL is enabled @{}:{} '
760 '({})'.format(host, port, unit_name))
761 return True
762 elif port and not conf_port and conf_ssl:
763 self.log.debug('SSL is enabled @{} but not on port {} '
764 '({})'.format(host, port, unit_name))
765 return False
766 # Port not checked (useful when checking that ssl is disabled)
767 elif not port and conf_ssl:
768 self.log.debug('SSL is enabled @{}:{} '
769 '({})'.format(host, port, unit_name))
770 return True
771 elif not conf_ssl:
772 self.log.debug('SSL not enabled @{}:{} '
773 '({})'.format(host, port, unit_name))
774 return False
775 else:
776 msg = ('Unknown condition when checking SSL status @{}:{} '
777 '({})'.format(host, port, unit_name))
778 amulet.raise_status(amulet.FAIL, msg)
779
780 def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
781 """Check that ssl is enabled on rmq juju sentry units.
782
783 :param sentry_units: list of all rmq sentry units
784 :param port: optional ssl port override to validate
785 :returns: None if successful, otherwise return error message
786 """
787 for sentry_unit in sentry_units:
788 if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
789 return ('Unexpected condition: ssl is disabled on unit '
790 '({})'.format(sentry_unit.info['unit_name']))
791 return None
792
793 def validate_rmq_ssl_disabled_units(self, sentry_units):
794 """Check that ssl is enabled on listed rmq juju sentry units.
795
796 :param sentry_units: list of all rmq sentry units
797 :returns: True if successful. Raise on error.
798 """
799 for sentry_unit in sentry_units:
800 if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
801 return ('Unexpected condition: ssl is enabled on unit '
802 '({})'.format(sentry_unit.info['unit_name']))
803 return None
804
805 def configure_rmq_ssl_on(self, sentry_units, deployment,
806 port=None, max_wait=60):
807 """Turn ssl charm config option on, with optional non-default
808 ssl port specification. Confirm that it is enabled on every
809 unit.
810
811 :param sentry_units: list of sentry units
812 :param deployment: amulet deployment object pointer
813 :param port: amqp port, use defaults if None
814 :param max_wait: maximum time to wait in seconds to confirm
815 :returns: None if successful. Raise on error.
816 """
817 self.log.debug('Setting ssl charm config option: on')
818
819 # Enable RMQ SSL
820 config = {'ssl': 'on'}
821 if port:
822 config['ssl_port'] = port
823
824 deployment.d.configure('rabbitmq-server', config)
825
826 # Wait for unit status
827 self.rmq_wait_for_cluster(deployment)
828
829 # Confirm
830 tries = 0
831 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
832 while ret and tries < (max_wait / 4):
833 time.sleep(4)
834 self.log.debug('Attempt {}: {}'.format(tries, ret))
835 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
836 tries += 1
837
838 if ret:
839 amulet.raise_status(amulet.FAIL, ret)
840
841 def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
842 """Turn ssl charm config option off, confirm that it is disabled
843 on every unit.
844
845 :param sentry_units: list of sentry units
846 :param deployment: amulet deployment object pointer
847 :param max_wait: maximum time to wait in seconds to confirm
848 :returns: None if successful. Raise on error.
849 """
850 self.log.debug('Setting ssl charm config option: off')
851
852 # Disable RMQ SSL
853 config = {'ssl': 'off'}
854 deployment.d.configure('rabbitmq-server', config)
855
856 # Wait for unit status
857 self.rmq_wait_for_cluster(deployment)
858
859 # Confirm
860 tries = 0
861 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
862 while ret and tries < (max_wait / 4):
863 time.sleep(4)
864 self.log.debug('Attempt {}: {}'.format(tries, ret))
865 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
866 tries += 1
867
868 if ret:
869 amulet.raise_status(amulet.FAIL, ret)
870
871 def connect_amqp_by_unit(self, sentry_unit, ssl=False,
872 port=None, fatal=True,
873 username="testuser1", password="changeme"):
874 """Establish and return a pika amqp connection to the rabbitmq service
875 running on a rmq juju unit.
876
877 :param sentry_unit: sentry unit pointer
878 :param ssl: boolean, default to False
879 :param port: amqp port, use defaults if None
880 :param fatal: boolean, default to True (raises on connect error)
881 :param username: amqp user name, default to testuser1
882 :param password: amqp user password
883 :returns: pika amqp connection pointer or None if failed and non-fatal
884 """
885 host = sentry_unit.info['public-address']
886 unit_name = sentry_unit.info['unit_name']
887
888 # Default port logic if port is not specified
889 if ssl and not port:
890 port = 5671
891 elif not ssl and not port:
892 port = 5672
893
894 self.log.debug('Connecting to amqp on {}:{} ({}) as '
895 '{}...'.format(host, port, unit_name, username))
896
897 try:
898 credentials = pika.PlainCredentials(username, password)
899 parameters = pika.ConnectionParameters(host=host, port=port,
900 credentials=credentials,
901 ssl=ssl,
902 connection_attempts=3,
903 retry_delay=5,
904 socket_timeout=1)
905 connection = pika.BlockingConnection(parameters)
906 assert connection.server_properties['product'] == 'RabbitMQ'
907 self.log.debug('Connect OK')
908 return connection
909 except Exception as e:
910 msg = ('amqp connection failed to {}:{} as '
911 '{} ({})'.format(host, port, username, str(e)))
912 if fatal:
913 amulet.raise_status(amulet.FAIL, msg)
914 else:
915 self.log.warn(msg)
916 return None
917
918 def publish_amqp_message_by_unit(self, sentry_unit, message,
919 queue="test", ssl=False,
920 username="testuser1",
921 password="changeme",
922 port=None):
923 """Publish an amqp message to a rmq juju unit.
924
925 :param sentry_unit: sentry unit pointer
926 :param message: amqp message string
927 :param queue: message queue, default to test
928 :param username: amqp user name, default to testuser1
929 :param password: amqp user password
930 :param ssl: boolean, default to False
931 :param port: amqp port, use defaults if None
932 :returns: None. Raises exception if publish failed.
933 """
934 self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
935 message))
936 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
937 port=port,
938 username=username,
939 password=password)
940
941 # NOTE(beisner): extra debug here re: pika hang potential:
942 # https://github.com/pika/pika/issues/297
943 # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
944 self.log.debug('Defining channel...')
945 channel = connection.channel()
946 self.log.debug('Declaring queue...')
947 channel.queue_declare(queue=queue, auto_delete=False, durable=True)
948 self.log.debug('Publishing message...')
949 channel.basic_publish(exchange='', routing_key=queue, body=message)
950 self.log.debug('Closing channel...')
951 channel.close()
952 self.log.debug('Closing connection...')
953 connection.close()
954
955 def get_amqp_message_by_unit(self, sentry_unit, queue="test",
956 username="testuser1",
957 password="changeme",
958 ssl=False, port=None):
959 """Get an amqp message from a rmq juju unit.
960
961 :param sentry_unit: sentry unit pointer
962 :param queue: message queue, default to test
963 :param username: amqp user name, default to testuser1
964 :param password: amqp user password
965 :param ssl: boolean, default to False
966 :param port: amqp port, use defaults if None
967 :returns: amqp message body as string. Raise if get fails.
968 """
969 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
970 port=port,
971 username=username,
972 password=password)
973 channel = connection.channel()
974 method_frame, _, body = channel.basic_get(queue)
975
976 if method_frame:
977 self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
978 body))
979 channel.basic_ack(method_frame.delivery_tag)
980 channel.close()
981 connection.close()
982 return body
983 else:
984 msg = 'No message retrieved.'
985 amulet.raise_status(amulet.FAIL, msg)
295986
=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2015-06-10 07:35:12 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 2015-12-01 15:05:49 +0000
@@ -14,6 +14,7 @@
14# You should have received a copy of the GNU Lesser General Public License14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1616
17import glob
17import json18import json
18import os19import os
19import re20import re
@@ -50,6 +51,8 @@
50from charmhelpers.core.strutils import bool_from_string51from charmhelpers.core.strutils import bool_from_string
5152
52from charmhelpers.core.host import (53from charmhelpers.core.host import (
54 get_bond_master,
55 is_phy_iface,
53 list_nics,56 list_nics,
54 get_nic_hwaddr,57 get_nic_hwaddr,
55 mkdir,58 mkdir,
@@ -122,21 +125,24 @@
122 of specifying multiple key value pairs within the same string. For125 of specifying multiple key value pairs within the same string. For
123 example, a string in the format of 'key1=value1, key2=value2' will126 example, a string in the format of 'key1=value1, key2=value2' will
124 return a dict of:127 return a dict of:
125 {'key1': 'value1',128
126 'key2': 'value2'}.129 {'key1': 'value1',
130 'key2': 'value2'}.
127131
128 2. A string in the above format, but supporting a comma-delimited list132 2. A string in the above format, but supporting a comma-delimited list
129 of values for the same key. For example, a string in the format of133 of values for the same key. For example, a string in the format of
130 'key1=value1, key2=value3,value4,value5' will return a dict of:134 'key1=value1, key2=value3,value4,value5' will return a dict of:
131 {'key1', 'value1',135
132 'key2', 'value2,value3,value4'}136 {'key1', 'value1',
137 'key2', 'value2,value3,value4'}
133138
134 3. A string containing a colon character (:) prior to an equal139 3. A string containing a colon character (:) prior to an equal
135 character (=) will be treated as yaml and parsed as such. This can be140 character (=) will be treated as yaml and parsed as such. This can be
136 used to specify more complex key value pairs. For example,141 used to specify more complex key value pairs. For example,
137 a string in the format of 'key1: subkey1=value1, subkey2=value2' will142 a string in the format of 'key1: subkey1=value1, subkey2=value2' will
138 return a dict of:143 return a dict of:
139 {'key1', 'subkey1=value1, subkey2=value2'}144
145 {'key1', 'subkey1=value1, subkey2=value2'}
140146
141 The provided config_flags string may be a list of comma-separated values147 The provided config_flags string may be a list of comma-separated values
142 which themselves may be comma-separated list of values.148 which themselves may be comma-separated list of values.
@@ -189,10 +195,50 @@
189class OSContextGenerator(object):195class OSContextGenerator(object):
190 """Base class for all context generators."""196 """Base class for all context generators."""
191 interfaces = []197 interfaces = []
198 related = False
199 complete = False
200 missing_data = []
192201
193 def __call__(self):202 def __call__(self):
194 raise NotImplementedError203 raise NotImplementedError
195204
205 def context_complete(self, ctxt):
206 """Check for missing data for the required context data.
207 Set self.missing_data if it exists and return False.
208 Set self.complete if no missing data and return True.
209 """
210 # Fresh start
211 self.complete = False
212 self.missing_data = []
213 for k, v in six.iteritems(ctxt):
214 if v is None or v == '':
215 if k not in self.missing_data:
216 self.missing_data.append(k)
217
218 if self.missing_data:
219 self.complete = False
220 log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
221 else:
222 self.complete = True
223 return self.complete
224
225 def get_related(self):
226 """Check if any of the context interfaces have relation ids.
227 Set self.related and return True if one of the interfaces
228 has relation ids.
229 """
230 # Fresh start
231 self.related = False
232 try:
233 for interface in self.interfaces:
234 if relation_ids(interface):
235 self.related = True
236 return self.related
237 except AttributeError as e:
238 log("{} {}"
239 "".format(self, e), 'INFO')
240 return self.related
241
196242
197class SharedDBContext(OSContextGenerator):243class SharedDBContext(OSContextGenerator):
198 interfaces = ['shared-db']244 interfaces = ['shared-db']
@@ -208,6 +254,7 @@
208 self.database = database254 self.database = database
209 self.user = user255 self.user = user
210 self.ssl_dir = ssl_dir256 self.ssl_dir = ssl_dir
257 self.rel_name = self.interfaces[0]
211258
212 def __call__(self):259 def __call__(self):
213 self.database = self.database or config('database')260 self.database = self.database or config('database')
@@ -240,7 +287,8 @@
240 if self.relation_prefix:287 if self.relation_prefix:
241 password_setting = self.relation_prefix + '_password'288 password_setting = self.relation_prefix + '_password'
242289
243 for rid in relation_ids('shared-db'):290 for rid in relation_ids(self.interfaces[0]):
291 self.related = True
244 for unit in related_units(rid):292 for unit in related_units(rid):
245 rdata = relation_get(rid=rid, unit=unit)293 rdata = relation_get(rid=rid, unit=unit)
246 host = rdata.get('db_host')294 host = rdata.get('db_host')
@@ -252,7 +300,7 @@
252 'database_password': rdata.get(password_setting),300 'database_password': rdata.get(password_setting),
253 'database_type': 'mysql'301 'database_type': 'mysql'
254 }302 }
255 if context_complete(ctxt):303 if self.context_complete(ctxt):
256 db_ssl(rdata, ctxt, self.ssl_dir)304 db_ssl(rdata, ctxt, self.ssl_dir)
257 return ctxt305 return ctxt
258 return {}306 return {}
@@ -273,6 +321,7 @@
273321
274 ctxt = {}322 ctxt = {}
275 for rid in relation_ids(self.interfaces[0]):323 for rid in relation_ids(self.interfaces[0]):
324 self.related = True
276 for unit in related_units(rid):325 for unit in related_units(rid):
277 rel_host = relation_get('host', rid=rid, unit=unit)326 rel_host = relation_get('host', rid=rid, unit=unit)
278 rel_user = relation_get('user', rid=rid, unit=unit)327 rel_user = relation_get('user', rid=rid, unit=unit)
@@ -282,7 +331,7 @@
282 'database_user': rel_user,331 'database_user': rel_user,
283 'database_password': rel_passwd,332 'database_password': rel_passwd,
284 'database_type': 'postgresql'}333 'database_type': 'postgresql'}
285 if context_complete(ctxt):334 if self.context_complete(ctxt):
286 return ctxt335 return ctxt
287336
288 return {}337 return {}
@@ -343,6 +392,7 @@
343 ctxt['signing_dir'] = cachedir392 ctxt['signing_dir'] = cachedir
344393
345 for rid in relation_ids(self.rel_name):394 for rid in relation_ids(self.rel_name):
395 self.related = True
346 for unit in related_units(rid):396 for unit in related_units(rid):
347 rdata = relation_get(rid=rid, unit=unit)397 rdata = relation_get(rid=rid, unit=unit)
348 serv_host = rdata.get('service_host')398 serv_host = rdata.get('service_host')
@@ -361,7 +411,7 @@
361 'service_protocol': svc_protocol,411 'service_protocol': svc_protocol,
362 'auth_protocol': auth_protocol})412 'auth_protocol': auth_protocol})
363413
364 if context_complete(ctxt):414 if self.context_complete(ctxt):
365 # NOTE(jamespage) this is required for >= icehouse415 # NOTE(jamespage) this is required for >= icehouse
366 # so a missing value just indicates keystone needs416 # so a missing value just indicates keystone needs
367 # upgrading417 # upgrading
@@ -400,6 +450,7 @@
400 ctxt = {}450 ctxt = {}
401 for rid in relation_ids(self.rel_name):451 for rid in relation_ids(self.rel_name):
402 ha_vip_only = False452 ha_vip_only = False
453 self.related = True
403 for unit in related_units(rid):454 for unit in related_units(rid):
404 if relation_get('clustered', rid=rid, unit=unit):455 if relation_get('clustered', rid=rid, unit=unit):
405 ctxt['clustered'] = True456 ctxt['clustered'] = True
@@ -432,7 +483,7 @@
432 ha_vip_only = relation_get('ha-vip-only',483 ha_vip_only = relation_get('ha-vip-only',
433 rid=rid, unit=unit) is not None484 rid=rid, unit=unit) is not None
434485
435 if context_complete(ctxt):486 if self.context_complete(ctxt):
436 if 'rabbit_ssl_ca' in ctxt:487 if 'rabbit_ssl_ca' in ctxt:
437 if not self.ssl_dir:488 if not self.ssl_dir:
438 log("Charm not setup for ssl support but ssl ca "489 log("Charm not setup for ssl support but ssl ca "
@@ -464,7 +515,7 @@
464 ctxt['oslo_messaging_flags'] = config_flags_parser(515 ctxt['oslo_messaging_flags'] = config_flags_parser(
465 oslo_messaging_flags)516 oslo_messaging_flags)
466517
467 if not context_complete(ctxt):518 if not self.complete:
468 return {}519 return {}
469520
470 return ctxt521 return ctxt
@@ -480,13 +531,15 @@
480531
481 log('Generating template context for ceph', level=DEBUG)532 log('Generating template context for ceph', level=DEBUG)
482 mon_hosts = []533 mon_hosts = []
483 auth = None534 ctxt = {
484 key = None535 'use_syslog': str(config('use-syslog')).lower()
485 use_syslog = str(config('use-syslog')).lower()536 }
486 for rid in relation_ids('ceph'):537 for rid in relation_ids('ceph'):
487 for unit in related_units(rid):538 for unit in related_units(rid):
488 auth = relation_get('auth', rid=rid, unit=unit)539 if not ctxt.get('auth'):
489 key = relation_get('key', rid=rid, unit=unit)540 ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
541 if not ctxt.get('key'):
542 ctxt['key'] = relation_get('key', rid=rid, unit=unit)
490 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,543 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
491 unit=unit)544 unit=unit)
492 unit_priv_addr = relation_get('private-address', rid=rid,545 unit_priv_addr = relation_get('private-address', rid=rid,
@@ -495,15 +548,12 @@
495 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr548 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
496 mon_hosts.append(ceph_addr)549 mon_hosts.append(ceph_addr)
497550
498 ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),551 ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
499 'auth': auth,
500 'key': key,
501 'use_syslog': use_syslog}
502552
503 if not os.path.isdir('/etc/ceph'):553 if not os.path.isdir('/etc/ceph'):
504 os.mkdir('/etc/ceph')554 os.mkdir('/etc/ceph')
505555
506 if not context_complete(ctxt):556 if not self.context_complete(ctxt):
507 return {}557 return {}
508558
509 ensure_packages(['ceph-common'])559 ensure_packages(['ceph-common'])
@@ -890,9 +940,32 @@
890 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}940 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
891 return ctxt941 return ctxt
892942
943 def pg_ctxt(self):
944 driver = neutron_plugin_attribute(self.plugin, 'driver',
945 self.network_manager)
946 config = neutron_plugin_attribute(self.plugin, 'config',
947 self.network_manager)
948 ovs_ctxt = {'core_plugin': driver,
949 'neutron_plugin': 'plumgrid',
950 'neutron_security_groups': self.neutron_security_groups,
951 'local_ip': unit_private_ip(),
952 'config': config}
953 return ovs_ctxt
954
955 def midonet_ctxt(self):
956 driver = neutron_plugin_attribute(self.plugin, 'driver',
957 self.network_manager)
958 midonet_config = neutron_plugin_attribute(self.plugin, 'config',
959 self.network_manager)
960 mido_ctxt = {'core_plugin': driver,
961 'neutron_plugin': 'midonet',
962 'neutron_security_groups': self.neutron_security_groups,
963 'local_ip': unit_private_ip(),
964 'config': midonet_config}
965
966 return mido_ctxt
967
893 def __call__(self):968 def __call__(self):
894 self._ensure_packages()
895
896 if self.network_manager not in ['quantum', 'neutron']:969 if self.network_manager not in ['quantum', 'neutron']:
897 return {}970 return {}
898971
@@ -911,6 +984,10 @@
911 ctxt.update(self.calico_ctxt())984 ctxt.update(self.calico_ctxt())
912 elif self.plugin == 'vsp':985 elif self.plugin == 'vsp':
913 ctxt.update(self.nuage_ctxt())986 ctxt.update(self.nuage_ctxt())
987 elif self.plugin == 'plumgrid':
988 ctxt.update(self.pg_ctxt())
989 elif self.plugin == 'midonet':
990 ctxt.update(self.midonet_ctxt())
914991
915 alchemy_flags = config('neutron-alchemy-flags')992 alchemy_flags = config('neutron-alchemy-flags')
916 if alchemy_flags:993 if alchemy_flags:
@@ -922,7 +999,6 @@
922999
9231000
924class NeutronPortContext(OSContextGenerator):1001class NeutronPortContext(OSContextGenerator):
925 NIC_PREFIXES = ['eth', 'bond']
9261002
927 def resolve_ports(self, ports):1003 def resolve_ports(self, ports):
928 """Resolve NICs not yet bound to bridge(s)1004 """Resolve NICs not yet bound to bridge(s)
@@ -934,7 +1010,18 @@
9341010
935 hwaddr_to_nic = {}1011 hwaddr_to_nic = {}
936 hwaddr_to_ip = {}1012 hwaddr_to_ip = {}
937 for nic in list_nics(self.NIC_PREFIXES):1013 for nic in list_nics():
1014 # Ignore virtual interfaces (bond masters will be identified from
1015 # their slaves)
1016 if not is_phy_iface(nic):
1017 continue
1018
1019 _nic = get_bond_master(nic)
1020 if _nic:
1021 log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
1022 level=DEBUG)
1023 nic = _nic
1024
938 hwaddr = get_nic_hwaddr(nic)1025 hwaddr = get_nic_hwaddr(nic)
939 hwaddr_to_nic[hwaddr] = nic1026 hwaddr_to_nic[hwaddr] = nic
940 addresses = get_ipv4_addr(nic, fatal=False)1027 addresses = get_ipv4_addr(nic, fatal=False)
@@ -960,7 +1047,8 @@
960 # trust it to be the real external network).1047 # trust it to be the real external network).
961 resolved.append(entry)1048 resolved.append(entry)
9621049
963 return resolved1050 # Ensure no duplicates
1051 return list(set(resolved))
9641052
9651053
966class OSConfigFlagContext(OSContextGenerator):1054class OSConfigFlagContext(OSContextGenerator):
@@ -1000,6 +1088,20 @@
1000 config_flags_parser(config_flags)}1088 config_flags_parser(config_flags)}
10011089
10021090
1091class LibvirtConfigFlagsContext(OSContextGenerator):
1092 """
1093 This context provides support for extending
1094 the libvirt section through user-defined flags.
1095 """
1096 def __call__(self):
1097 ctxt = {}
1098 libvirt_flags = config('libvirt-flags')
1099 if libvirt_flags:
1100 ctxt['libvirt_flags'] = config_flags_parser(
1101 libvirt_flags)
1102 return ctxt
1103
1104
1003class SubordinateConfigContext(OSContextGenerator):1105class SubordinateConfigContext(OSContextGenerator):
10041106
1005 """1107 """
@@ -1032,7 +1134,7 @@
10321134
1033 ctxt = {1135 ctxt = {
1034 ... other context ...1136 ... other context ...
1035 'subordinate_config': {1137 'subordinate_configuration': {
1036 'DEFAULT': {1138 'DEFAULT': {
1037 'key1': 'value1',1139 'key1': 'value1',
1038 },1140 },
@@ -1050,13 +1152,22 @@
1050 :param config_file : Service's config file to query sections1152 :param config_file : Service's config file to query sections
1051 :param interface : Subordinate interface to inspect1153 :param interface : Subordinate interface to inspect
1052 """1154 """
1053 self.service = service
1054 self.config_file = config_file1155 self.config_file = config_file
1055 self.interface = interface1156 if isinstance(service, list):
1157 self.services = service
1158 else:
1159 self.services = [service]
1160 if isinstance(interface, list):
1161 self.interfaces = interface
1162 else:
1163 self.interfaces = [interface]
10561164
1057 def __call__(self):1165 def __call__(self):
1058 ctxt = {'sections': {}}1166 ctxt = {'sections': {}}
1059 for rid in relation_ids(self.interface):1167 rids = []
1168 for interface in self.interfaces:
1169 rids.extend(relation_ids(interface))
1170 for rid in rids:
1060 for unit in related_units(rid):1171 for unit in related_units(rid):
1061 sub_config = relation_get('subordinate_configuration',1172 sub_config = relation_get('subordinate_configuration',
1062 rid=rid, unit=unit)1173 rid=rid, unit=unit)
@@ -1064,33 +1175,37 @@
1064 try:1175 try:
1065 sub_config = json.loads(sub_config)1176 sub_config = json.loads(sub_config)
1066 except:1177 except:
1067 log('Could not parse JSON from subordinate_config '1178 log('Could not parse JSON from '
1068 'setting from %s' % rid, level=ERROR)1179 'subordinate_configuration setting from %s'
1069 continue1180 % rid, level=ERROR)
10701181 continue
1071 if self.service not in sub_config:1182
1072 log('Found subordinate_config on %s but it contained'1183 for service in self.services:
1073 'nothing for %s service' % (rid, self.service),1184 if service not in sub_config:
1074 level=INFO)1185 log('Found subordinate_configuration on %s but it '
1075 continue1186 'contained nothing for %s service'
10761187 % (rid, service), level=INFO)
1077 sub_config = sub_config[self.service]1188 continue
1078 if self.config_file not in sub_config:1189
1079 log('Found subordinate_config on %s but it contained'1190 sub_config = sub_config[service]
1080 'nothing for %s' % (rid, self.config_file),1191 if self.config_file not in sub_config:
1081 level=INFO)1192 log('Found subordinate_configuration on %s but it '
1082 continue1193 'contained nothing for %s'
10831194 % (rid, self.config_file), level=INFO)
1084 sub_config = sub_config[self.config_file]1195 continue
1085 for k, v in six.iteritems(sub_config):1196
1086 if k == 'sections':1197 sub_config = sub_config[self.config_file]
1087 for section, config_dict in six.iteritems(v):1198 for k, v in six.iteritems(sub_config):
1088 log("adding section '%s'" % (section),1199 if k == 'sections':
1089 level=DEBUG)1200 for section, config_list in six.iteritems(v):
1090 ctxt[k][section] = config_dict1201 log("adding section '%s'" % (section),
1091 else:1202 level=DEBUG)
1092 ctxt[k] = v1203 if ctxt[k].get(section):
10931204 ctxt[k][section].extend(config_list)
1205 else:
1206 ctxt[k][section] = config_list
1207 else:
1208 ctxt[k] = v
1094 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)1209 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1095 return ctxt1210 return ctxt
10961211
@@ -1267,15 +1382,19 @@
1267 def __call__(self):1382 def __call__(self):
1268 ports = config('data-port')1383 ports = config('data-port')
1269 if ports:1384 if ports:
1385 # Map of {port/mac:bridge}
1270 portmap = parse_data_port_mappings(ports)1386 portmap = parse_data_port_mappings(ports)
1271 ports = portmap.values()1387 ports = portmap.keys()
1388 # Resolve provided ports or mac addresses and filter out those
1389 # already attached to a bridge.
1272 resolved = self.resolve_ports(ports)1390 resolved = self.resolve_ports(ports)
1391 # FIXME: is this necessary?
1273 normalized = {get_nic_hwaddr(port): port for port in resolved1392 normalized = {get_nic_hwaddr(port): port for port in resolved
1274 if port not in ports}1393 if port not in ports}
1275 normalized.update({port: port for port in resolved1394 normalized.update({port: port for port in resolved
1276 if port in ports})1395 if port in ports})
1277 if resolved:1396 if resolved:
1278 return {bridge: normalized[port] for bridge, port in1397 return {normalized[port]: bridge for port, bridge in
1279 six.iteritems(portmap) if port in normalized.keys()}1398 six.iteritems(portmap) if port in normalized.keys()}
12801399
1281 return None1400 return None
@@ -1286,12 +1405,22 @@
1286 def __call__(self):1405 def __call__(self):
1287 ctxt = {}1406 ctxt = {}
1288 mappings = super(PhyNICMTUContext, self).__call__()1407 mappings = super(PhyNICMTUContext, self).__call__()
1289 if mappings and mappings.values():1408 if mappings and mappings.keys():
1290 ports = mappings.values()1409 ports = sorted(mappings.keys())
1291 napi_settings = NeutronAPIContext()()1410 napi_settings = NeutronAPIContext()()
1292 mtu = napi_settings.get('network_device_mtu')1411 mtu = napi_settings.get('network_device_mtu')
1412 all_ports = set()
1413 # If any of ports is a vlan device, its underlying device must have
1414 # mtu applied first.
1415 for port in ports:
1416 for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
1417 lport = os.path.basename(lport)
1418 all_ports.add(lport.split('_')[1])
1419
1420 all_ports = list(all_ports)
1421 all_ports.extend(ports)
1293 if mtu:1422 if mtu:
1294 ctxt["devs"] = '\\n'.join(ports)1423 ctxt["devs"] = '\\n'.join(all_ports)
1295 ctxt['mtu'] = mtu1424 ctxt['mtu'] = mtu
12961425
1297 return ctxt1426 return ctxt
@@ -1323,6 +1452,6 @@
1323 'auth_protocol':1452 'auth_protocol':
1324 rdata.get('auth_protocol') or 'http',1453 rdata.get('auth_protocol') or 'http',
1325 }1454 }
1326 if context_complete(ctxt):1455 if self.context_complete(ctxt):
1327 return ctxt1456 return ctxt
1328 return {}1457 return {}
13291458
=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
--- hooks/charmhelpers/contrib/openstack/neutron.py 2015-06-10 15:45:48 +0000
+++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-12-01 15:05:49 +0000
@@ -195,6 +195,34 @@
195 'packages': [],195 'packages': [],
196 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],196 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
197 'server_services': ['neutron-server']197 'server_services': ['neutron-server']
198 },
199 'plumgrid': {
200 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
201 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
202 'contexts': [
203 context.SharedDBContext(user=config('database-user'),
204 database=config('database'),
205 ssl_dir=NEUTRON_CONF_DIR)],
206 'services': [],
207 'packages': ['plumgrid-lxc',
208 'iovisor-dkms'],
209 'server_packages': ['neutron-server',
210 'neutron-plugin-plumgrid'],
211 'server_services': ['neutron-server']
212 },
213 'midonet': {
214 'config': '/etc/neutron/plugins/midonet/midonet.ini',
215 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
216 'contexts': [
217 context.SharedDBContext(user=config('neutron-database-user'),
218 database=config('neutron-database'),
219 relation_prefix='neutron',
220 ssl_dir=NEUTRON_CONF_DIR)],
221 'services': [],
222 'packages': [[headers_package()] + determine_dkms_package()],
223 'server_packages': ['neutron-server',
224 'python-neutron-plugin-midonet'],
225 'server_services': ['neutron-server']
198 }226 }
199 }227 }
200 if release >= 'icehouse':228 if release >= 'icehouse':
@@ -255,17 +283,30 @@
255 return 'neutron'283 return 'neutron'
256284
257285
258def parse_mappings(mappings):286def parse_mappings(mappings, key_rvalue=False):
287 """By default mappings are lvalue keyed.
288
289 If key_rvalue is True, the mapping will be reversed to allow multiple
290 configs for the same lvalue.
291 """
259 parsed = {}292 parsed = {}
260 if mappings:293 if mappings:
261 mappings = mappings.split()294 mappings = mappings.split()
262 for m in mappings:295 for m in mappings:
263 p = m.partition(':')296 p = m.partition(':')
264 key = p[0].strip()297
265 if p[1]:298 if key_rvalue:
266 parsed[key] = p[2].strip()299 key_index = 2
300 val_index = 0
301 # if there is no rvalue skip to next
302 if not p[1]:
303 continue
267 else:304 else:
268 parsed[key] = ''305 key_index = 0
306 val_index = 2
307
308 key = p[key_index].strip()
309 parsed[key] = p[val_index].strip()
269310
270 return parsed311 return parsed
271312
@@ -283,25 +324,25 @@
283def parse_data_port_mappings(mappings, default_bridge='br-data'):324def parse_data_port_mappings(mappings, default_bridge='br-data'):
284 """Parse data port mappings.325 """Parse data port mappings.
285326
286 Mappings must be a space-delimited list of bridge:port mappings.327 Mappings must be a space-delimited list of bridge:port.
287328
288 Returns dict of the form {bridge:port}.329 Returns dict of the form {port:bridge} where ports may be mac addresses or
330 interface names.
289 """331 """
290 _mappings = parse_mappings(mappings)332
333 # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
334 # proposed for <port> since it may be a mac address which will differ
335 # across units this allowing first-known-good to be chosen.
336 _mappings = parse_mappings(mappings, key_rvalue=True)
291 if not _mappings or list(_mappings.values()) == ['']:337 if not _mappings or list(_mappings.values()) == ['']:
292 if not mappings:338 if not mappings:
293 return {}339 return {}
294340
295 # For backwards-compatibility we need to support port-only provided in341 # For backwards-compatibility we need to support port-only provided in
296 # config.342 # config.
297 _mappings = {default_bridge: mappings.split()[0]}343 _mappings = {mappings.split()[0]: default_bridge}
298344
299 bridges = _mappings.keys()345 ports = _mappings.keys()
300 ports = _mappings.values()
301 if len(set(bridges)) != len(bridges):
302 raise Exception("It is not allowed to have more than one port "
303 "configured on the same bridge")
304
305 if len(set(ports)) != len(ports):346 if len(set(ports)) != len(ports):
306 raise Exception("It is not allowed to have the same port configured "347 raise Exception("It is not allowed to have the same port configured "
307 "on more than one bridge")348 "on more than one bridge")
308349
=== modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf'
--- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-06-10 07:35:12 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-12-01 15:05:49 +0000
@@ -5,11 +5,17 @@
5###############################################################################5###############################################################################
6[global]6[global]
7{% if auth -%}7{% if auth -%}
8 auth_supported = {{ auth }}8auth_supported = {{ auth }}
9 keyring = /etc/ceph/$cluster.$name.keyring9keyring = /etc/ceph/$cluster.$name.keyring
10 mon host = {{ mon_hosts }}10mon host = {{ mon_hosts }}
11{% endif -%}11{% endif -%}
12 log to syslog = {{ use_syslog }}12log to syslog = {{ use_syslog }}
13 err to syslog = {{ use_syslog }}13err to syslog = {{ use_syslog }}
14 clog to syslog = {{ use_syslog }}14clog to syslog = {{ use_syslog }}
1515
16[client]
17{% if rbd_client_cache_settings -%}
18{% for key, value in rbd_client_cache_settings.iteritems() -%}
19{{ key }} = {{ value }}
20{% endfor -%}
21{%- endif %}
16\ No newline at end of file22\ No newline at end of file
1723
=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
--- hooks/charmhelpers/contrib/openstack/templating.py 2015-06-11 14:20:09 +0000
+++ hooks/charmhelpers/contrib/openstack/templating.py 2015-12-01 15:05:49 +0000
@@ -18,7 +18,7 @@
1818
19import six19import six
2020
21from charmhelpers.fetch import apt_install21from charmhelpers.fetch import apt_install, apt_update
22from charmhelpers.core.hookenv import (22from charmhelpers.core.hookenv import (
23 log,23 log,
24 ERROR,24 ERROR,
@@ -29,39 +29,15 @@
29try:29try:
30 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions30 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
31except ImportError:31except ImportError:
32 # python-jinja2 may not be installed yet, or we're running unittests.32 apt_update(fatal=True)
33 FileSystemLoader = ChoiceLoader = Environment = exceptions = None33 apt_install('python-jinja2', fatal=True)
34 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
3435
3536
36class OSConfigException(Exception):37class OSConfigException(Exception):
37 pass38 pass
3839
3940
40def os_template_dirs(templates_dir, os_release):
41 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
42 for rel in six.itervalues(OPENSTACK_CODENAMES)]
43
44 if not os.path.isdir(templates_dir):
45 log('Templates directory not found @ %s.' % templates_dir,
46 level=ERROR)
47 raise OSConfigException
48 dirs = [templates_dir]
49 helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
50 if os.path.isdir(helper_templates):
51 dirs.append(helper_templates)
52
53 for rel, tmpl_dir in tmpl_dirs:
54 if os.path.isdir(tmpl_dir):
55 dirs.insert(0, tmpl_dir)
56 if rel == os_release:
57 break
58 ch_templates = os.path.dirname(__file__) + '/charmhelpers/contrib/openstack/templates'
59 dirs.append(ch_templates)
60 log('Template search path: %s' %
61 ' '.join(dirs), level=INFO)
62 return dirs
63
64
65def get_loader(templates_dir, os_release):41def get_loader(templates_dir, os_release):
66 """42 """
67 Create a jinja2.ChoiceLoader containing template dirs up to43 Create a jinja2.ChoiceLoader containing template dirs up to
@@ -137,7 +113,7 @@
137113
138 def complete_contexts(self):114 def complete_contexts(self):
139 '''115 '''
140 Return a list of interfaces that have atisfied contexts.116 Return a list of interfaces that have satisfied contexts.
141 '''117 '''
142 if self._complete_contexts:118 if self._complete_contexts:
143 return self._complete_contexts119 return self._complete_contexts
@@ -318,3 +294,30 @@
318 [interfaces.extend(i.complete_contexts())294 [interfaces.extend(i.complete_contexts())
319 for i in six.itervalues(self.templates)]295 for i in six.itervalues(self.templates)]
320 return interfaces296 return interfaces
297
298 def get_incomplete_context_data(self, interfaces):
299 '''
300 Return dictionary of relation status of interfaces and any missing
301 required context data. Example:
302 {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
303 'zeromq-configuration': {'related': False}}
304 '''
305 incomplete_context_data = {}
306
307 for i in six.itervalues(self.templates):
308 for context in i.contexts:
309 for interface in interfaces:
310 related = False
311 if interface in context.interfaces:
312 related = context.get_related()
313 missing_data = context.missing_data
314 if missing_data:
315 incomplete_context_data[interface] = {'missing_data': missing_data}
316 if related:
317 if incomplete_context_data.get(interface):
318 incomplete_context_data[interface].update({'related': True})
319 else:
320 incomplete_context_data[interface] = {'related': True}
321 else:
322 incomplete_context_data[interface] = {'related': False}
323 return incomplete_context_data
321324
=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
--- hooks/charmhelpers/contrib/openstack/utils.py 2015-06-17 12:23:31 +0000
+++ hooks/charmhelpers/contrib/openstack/utils.py 2015-12-01 15:05:49 +0000
@@ -1,5 +1,3 @@
1#!/usr/bin/python
2
3# Copyright 2014-2015 Canonical Limited.1# Copyright 2014-2015 Canonical Limited.
4#2#
5# This file is part of charm-helpers.3# This file is part of charm-helpers.
@@ -24,9 +22,11 @@
24import json22import json
25import os23import os
26import sys24import sys
25import re
26
27import six
28import traceback
27import uuid29import uuid
28
29import six
30import yaml30import yaml
3131
32from charmhelpers.contrib.network import ip32from charmhelpers.contrib.network import ip
@@ -36,13 +36,17 @@
36)36)
3737
38from charmhelpers.core.hookenv import (38from charmhelpers.core.hookenv import (
39 action_fail,
40 action_set,
39 config,41 config,
40 log as juju_log,42 log as juju_log,
41 charm_dir,43 charm_dir,
42 INFO,44 INFO,
45 related_units,
43 relation_ids,46 relation_ids,
44 related_units,
45 relation_set,47 relation_set,
48 status_set,
49 hook_name
46)50)
4751
48from charmhelpers.contrib.storage.linux.lvm import (52from charmhelpers.contrib.storage.linux.lvm import (
@@ -52,7 +56,8 @@
52)56)
5357
54from charmhelpers.contrib.network.ip import (58from charmhelpers.contrib.network.ip import (
55 get_ipv6_addr59 get_ipv6_addr,
60 is_ipv6,
56)61)
5762
58from charmhelpers.contrib.python.packages import (63from charmhelpers.contrib.python.packages import (
@@ -71,7 +76,6 @@
71DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '76DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
72 'restricted main multiverse universe')77 'restricted main multiverse universe')
7378
74
75UBUNTU_OPENSTACK_RELEASE = OrderedDict([79UBUNTU_OPENSTACK_RELEASE = OrderedDict([
76 ('oneiric', 'diablo'),80 ('oneiric', 'diablo'),
77 ('precise', 'essex'),81 ('precise', 'essex'),
@@ -81,6 +85,7 @@
81 ('trusty', 'icehouse'),85 ('trusty', 'icehouse'),
82 ('utopic', 'juno'),86 ('utopic', 'juno'),
83 ('vivid', 'kilo'),87 ('vivid', 'kilo'),
88 ('wily', 'liberty'),
84])89])
8590
8691
@@ -93,6 +98,7 @@
93 ('2014.1', 'icehouse'),98 ('2014.1', 'icehouse'),
94 ('2014.2', 'juno'),99 ('2014.2', 'juno'),
95 ('2015.1', 'kilo'),100 ('2015.1', 'kilo'),
101 ('2015.2', 'liberty'),
96])102])
97103
98# The ugly duckling104# The ugly duckling
@@ -115,8 +121,42 @@
115 ('2.2.0', 'juno'),121 ('2.2.0', 'juno'),
116 ('2.2.1', 'kilo'),122 ('2.2.1', 'kilo'),
117 ('2.2.2', 'kilo'),123 ('2.2.2', 'kilo'),
124 ('2.3.0', 'liberty'),
125 ('2.4.0', 'liberty'),
126 ('2.5.0', 'liberty'),
118])127])
119128
129# >= Liberty version->codename mapping
130PACKAGE_CODENAMES = {
131 'nova-common': OrderedDict([
132 ('12.0.0', 'liberty'),
133 ]),
134 'neutron-common': OrderedDict([
135 ('7.0.0', 'liberty'),
136 ]),
137 'cinder-common': OrderedDict([
138 ('7.0.0', 'liberty'),
139 ]),
140 'keystone': OrderedDict([
141 ('8.0.0', 'liberty'),
142 ]),
143 'horizon-common': OrderedDict([
144 ('8.0.0', 'liberty'),
145 ]),
146 'ceilometer-common': OrderedDict([
147 ('5.0.0', 'liberty'),
148 ]),
149 'heat-common': OrderedDict([
150 ('5.0.0', 'liberty'),
151 ]),
152 'glance-common': OrderedDict([
153 ('11.0.0', 'liberty'),
154 ]),
155 'openstack-dashboard': OrderedDict([
156 ('8.0.0', 'liberty'),
157 ]),
158}
159
120DEFAULT_LOOPBACK_SIZE = '5G'160DEFAULT_LOOPBACK_SIZE = '5G'
121161
122162
@@ -166,9 +206,9 @@
166 error_out(e)206 error_out(e)
167207
168208
169def get_os_version_codename(codename):209def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
170 '''Determine OpenStack version number from codename.'''210 '''Determine OpenStack version number from codename.'''
171 for k, v in six.iteritems(OPENSTACK_CODENAMES):211 for k, v in six.iteritems(version_map):
172 if v == codename:212 if v == codename:
173 return k213 return k
174 e = 'Could not derive OpenStack version for '\214 e = 'Could not derive OpenStack version for '\
@@ -200,20 +240,31 @@
200 error_out(e)240 error_out(e)
201241
202 vers = apt.upstream_version(pkg.current_ver.ver_str)242 vers = apt.upstream_version(pkg.current_ver.ver_str)
243 match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
244 if match:
245 vers = match.group(0)
203246
204 try:247 # >= Liberty independent project versions
205 if 'swift' in pkg.name:248 if (package in PACKAGE_CODENAMES and
206 swift_vers = vers[:5]249 vers in PACKAGE_CODENAMES[package]):
207 if swift_vers not in SWIFT_CODENAMES:250 return PACKAGE_CODENAMES[package][vers]
208 # Deal with 1.10.0 upward251 else:
209 swift_vers = vers[:6]252 # < Liberty co-ordinated project versions
210 return SWIFT_CODENAMES[swift_vers]253 try:
211 else:254 if 'swift' in pkg.name:
212 vers = vers[:6]255 swift_vers = vers[:5]
213 return OPENSTACK_CODENAMES[vers]256 if swift_vers not in SWIFT_CODENAMES:
214 except KeyError:257 # Deal with 1.10.0 upward
215 e = 'Could not determine OpenStack codename for version %s' % vers258 swift_vers = vers[:6]
216 error_out(e)259 return SWIFT_CODENAMES[swift_vers]
260 else:
261 vers = vers[:6]
262 return OPENSTACK_CODENAMES[vers]
263 except KeyError:
264 if not fatal:
265 return None
266 e = 'Could not determine OpenStack codename for version %s' % vers
267 error_out(e)
217268
218269
219def get_os_version_package(pkg, fatal=True):270def get_os_version_package(pkg, fatal=True):
@@ -323,6 +374,9 @@
323 'kilo': 'trusty-updates/kilo',374 'kilo': 'trusty-updates/kilo',
324 'kilo/updates': 'trusty-updates/kilo',375 'kilo/updates': 'trusty-updates/kilo',
325 'kilo/proposed': 'trusty-proposed/kilo',376 'kilo/proposed': 'trusty-proposed/kilo',
377 'liberty': 'trusty-updates/liberty',
378 'liberty/updates': 'trusty-updates/liberty',
379 'liberty/proposed': 'trusty-proposed/liberty',
326 }380 }
327381
328 try:382 try:
@@ -388,7 +442,11 @@
388 import apt_pkg as apt442 import apt_pkg as apt
389 src = config('openstack-origin')443 src = config('openstack-origin')
390 cur_vers = get_os_version_package(package)444 cur_vers = get_os_version_package(package)
391 available_vers = get_os_version_install_source(src)445 if "swift" in package:
446 codename = get_os_codename_install_source(src)
447 available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
448 else:
449 available_vers = get_os_version_install_source(src)
392 apt.init()450 apt.init()
393 return apt.version_compare(available_vers, cur_vers) == 1451 return apt.version_compare(available_vers, cur_vers) == 1
394452
@@ -465,6 +523,12 @@
465 relation_prefix=None):523 relation_prefix=None):
466 hosts = get_ipv6_addr(dynamic_only=False)524 hosts = get_ipv6_addr(dynamic_only=False)
467525
526 if config('vip'):
527 vips = config('vip').split()
528 for vip in vips:
529 if vip and is_ipv6(vip):
530 hosts.append(vip)
531
468 kwargs = {'database': database,532 kwargs = {'database': database,
469 'username': database_user,533 'username': database_user,
470 'hostname': json.dumps(hosts)}534 'hostname': json.dumps(hosts)}
@@ -518,6 +582,7 @@
518 Clone/install all specified OpenStack repositories.582 Clone/install all specified OpenStack repositories.
519583
520 The expected format of projects_yaml is:584 The expected format of projects_yaml is:
585
521 repositories:586 repositories:
522 - {name: keystone,587 - {name: keystone,
523 repository: 'git://git.openstack.org/openstack/keystone.git',588 repository: 'git://git.openstack.org/openstack/keystone.git',
@@ -525,11 +590,13 @@
525 - {name: requirements,590 - {name: requirements,
526 repository: 'git://git.openstack.org/openstack/requirements.git',591 repository: 'git://git.openstack.org/openstack/requirements.git',
527 branch: 'stable/icehouse'}592 branch: 'stable/icehouse'}
593
528 directory: /mnt/openstack-git594 directory: /mnt/openstack-git
529 http_proxy: squid-proxy-url595 http_proxy: squid-proxy-url
530 https_proxy: squid-proxy-url596 https_proxy: squid-proxy-url
531597
532 The directory, http_proxy, and https_proxy keys are optional.598 The directory, http_proxy, and https_proxy keys are optional.
599
533 """600 """
534 global requirements_dir601 global requirements_dir
535 parent_dir = '/mnt/openstack-git'602 parent_dir = '/mnt/openstack-git'
@@ -551,6 +618,12 @@
551618
552 pip_create_virtualenv(os.path.join(parent_dir, 'venv'))619 pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
553620
621 # Upgrade setuptools and pip from default virtualenv versions. The default
622 # versions in trusty break master OpenStack branch deployments.
623 for p in ['pip', 'setuptools']:
624 pip_install(p, upgrade=True, proxy=http_proxy,
625 venv=os.path.join(parent_dir, 'venv'))
626
554 for p in projects['repositories']:627 for p in projects['repositories']:
555 repo = p['repository']628 repo = p['repository']
556 branch = p['branch']629 branch = p['branch']
@@ -612,24 +685,24 @@
612 else:685 else:
613 repo_dir = dest_dir686 repo_dir = dest_dir
614687
688 venv = os.path.join(parent_dir, 'venv')
689
615 if update_requirements:690 if update_requirements:
616 if not requirements_dir:691 if not requirements_dir:
617 error_out('requirements repo must be cloned before '692 error_out('requirements repo must be cloned before '
618 'updating from global requirements.')693 'updating from global requirements.')
619 _git_update_requirements(repo_dir, requirements_dir)694 _git_update_requirements(venv, repo_dir, requirements_dir)
620695
621 juju_log('Installing git repo from dir: {}'.format(repo_dir))696 juju_log('Installing git repo from dir: {}'.format(repo_dir))
622 if http_proxy:697 if http_proxy:
623 pip_install(repo_dir, proxy=http_proxy,698 pip_install(repo_dir, proxy=http_proxy, venv=venv)
624 venv=os.path.join(parent_dir, 'venv'))
625 else:699 else:
626 pip_install(repo_dir,700 pip_install(repo_dir, venv=venv)
627 venv=os.path.join(parent_dir, 'venv'))
628701
629 return repo_dir702 return repo_dir
630703
631704
632def _git_update_requirements(package_dir, reqs_dir):705def _git_update_requirements(venv, package_dir, reqs_dir):
633 """706 """
634 Update from global requirements.707 Update from global requirements.
635708
@@ -638,12 +711,14 @@
638 """711 """
639 orig_dir = os.getcwd()712 orig_dir = os.getcwd()
640 os.chdir(reqs_dir)713 os.chdir(reqs_dir)
641 cmd = ['python', 'update.py', package_dir]714 python = os.path.join(venv, 'bin/python')
715 cmd = [python, 'update.py', package_dir]
642 try:716 try:
643 subprocess.check_call(cmd)717 subprocess.check_call(cmd)
644 except subprocess.CalledProcessError:718 except subprocess.CalledProcessError:
645 package = os.path.basename(package_dir)719 package = os.path.basename(package_dir)
646 error_out("Error updating {} from global-requirements.txt".format(package))720 error_out("Error updating {} from "
721 "global-requirements.txt".format(package))
647 os.chdir(orig_dir)722 os.chdir(orig_dir)
648723
649724
@@ -691,6 +766,222 @@
691 return None766 return None
692767
693768
769def os_workload_status(configs, required_interfaces, charm_func=None):
770 """
771 Decorator to set workload status based on complete contexts
772 """
773 def wrap(f):
774 @wraps(f)
775 def wrapped_f(*args, **kwargs):
776 # Run the original function first
777 f(*args, **kwargs)
778 # Set workload status now that contexts have been
779 # acted on
780 set_os_workload_status(configs, required_interfaces, charm_func)
781 return wrapped_f
782 return wrap
783
784
785def set_os_workload_status(configs, required_interfaces, charm_func=None):
786 """
787 Set workload status based on complete contexts.
788 status-set missing or incomplete contexts
789 and juju-log details of missing required data.
790 charm_func is a charm specific function to run checking
791 for charm specific requirements such as a VIP setting.
792 """
793 incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
794 state = 'active'
795 missing_relations = []
796 incomplete_relations = []
797 message = None
798 charm_state = None
799 charm_message = None
800
801 for generic_interface in incomplete_rel_data.keys():
802 related_interface = None
803 missing_data = {}
804 # Related or not?
805 for interface in incomplete_rel_data[generic_interface]:
806 if incomplete_rel_data[generic_interface][interface].get('related'):
807 related_interface = interface
808 missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
809 # No relation ID for the generic_interface
810 if not related_interface:
811 juju_log("{} relation is missing and must be related for "
812 "functionality. ".format(generic_interface), 'WARN')
813 state = 'blocked'
814 if generic_interface not in missing_relations:
815 missing_relations.append(generic_interface)
816 else:
817 # Relation ID exists but no related unit
818 if not missing_data:
819 # Edge case relation ID exists but departing
820 if ('departed' in hook_name() or 'broken' in hook_name()) \
821 and related_interface in hook_name():
822 state = 'blocked'
823 if generic_interface not in missing_relations:
824 missing_relations.append(generic_interface)
825 juju_log("{} relation's interface, {}, "
826 "relationship is departed or broken "
827 "and is required for functionality."
828 "".format(generic_interface, related_interface), "WARN")
829 # Normal case relation ID exists but no related unit
830 # (joining)
831 else:
832 juju_log("{} relations's interface, {}, is related but has "
833 "no units in the relation."
834 "".format(generic_interface, related_interface), "INFO")
835 # Related unit exists and data missing on the relation
836 else:
837 juju_log("{} relation's interface, {}, is related awaiting "
838 "the following data from the relationship: {}. "
839 "".format(generic_interface, related_interface,
840 ", ".join(missing_data)), "INFO")
841 if state != 'blocked':
842 state = 'waiting'
843 if generic_interface not in incomplete_relations \
844 and generic_interface not in missing_relations:
845 incomplete_relations.append(generic_interface)
846
847 if missing_relations:
848 message = "Missing relations: {}".format(", ".join(missing_relations))
849 if incomplete_relations:
850 message += "; incomplete relations: {}" \
851 "".format(", ".join(incomplete_relations))
852 state = 'blocked'
853 elif incomplete_relations:
854 message = "Incomplete relations: {}" \
855 "".format(", ".join(incomplete_relations))
856 state = 'waiting'
857
858 # Run charm specific checks
859 if charm_func:
860 charm_state, charm_message = charm_func(configs)
861 if charm_state != 'active' and charm_state != 'unknown':
862 state = workload_state_compare(state, charm_state)
863 if message:
864 charm_message = charm_message.replace("Incomplete relations: ",
865 "")
866 message = "{}, {}".format(message, charm_message)
867 else:
868 message = charm_message
869
870 # Set to active if all requirements have been met
871 if state == 'active':
872 message = "Unit is ready"
873 juju_log(message, "INFO")
874
875 status_set(state, message)
876
877
878def workload_state_compare(current_workload_state, workload_state):
879 """ Return highest priority of two states"""
880 hierarchy = {'unknown': -1,
881 'active': 0,
882 'maintenance': 1,
883 'waiting': 2,
884 'blocked': 3,
885 }
886
887 if hierarchy.get(workload_state) is None:
888 workload_state = 'unknown'
889 if hierarchy.get(current_workload_state) is None:
890 current_workload_state = 'unknown'
891
892 # Set workload_state based on hierarchy of statuses
893 if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
894 return current_workload_state
895 else:
896 return workload_state
897
898
899def incomplete_relation_data(configs, required_interfaces):
900 """
901 Check complete contexts against required_interfaces
902 Return dictionary of incomplete relation data.
903
904 configs is an OSConfigRenderer object with configs registered
905
906 required_interfaces is a dictionary of required general interfaces
907 with dictionary values of possible specific interfaces.
908 Example:
909 required_interfaces = {'database': ['shared-db', 'pgsql-db']}
910
911 The interface is said to be satisfied if anyone of the interfaces in the
912 list has a complete context.
913
914 Return dictionary of incomplete or missing required contexts with relation
915 status of interfaces and any missing data points. Example:
916 {'message':
917 {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
918 'zeromq-configuration': {'related': False}},
919 'identity':
920 {'identity-service': {'related': False}},
921 'database':
922 {'pgsql-db': {'related': False},
923 'shared-db': {'related': True}}}
924 """
925 complete_ctxts = configs.complete_contexts()
926 incomplete_relations = []
927 for svc_type in required_interfaces.keys():
928 # Avoid duplicates
929 found_ctxt = False
930 for interface in required_interfaces[svc_type]:
931 if interface in complete_ctxts:
932 found_ctxt = True
933 if not found_ctxt:
934 incomplete_relations.append(svc_type)
935 incomplete_context_data = {}
936 for i in incomplete_relations:
937 incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
938 return incomplete_context_data
939
940
941def do_action_openstack_upgrade(package, upgrade_callback, configs):
942 """Perform action-managed OpenStack upgrade.
943
944 Upgrades packages to the configured openstack-origin version and sets
945 the corresponding action status as a result.
946
947 If the charm was installed from source we cannot upgrade it.
948 For backwards compatibility a config flag (action-managed-upgrade) must
949 be set for this code to run, otherwise a full service level upgrade will
950 fire on config-changed.
951
952 @param package: package name for determining if upgrade available
953 @param upgrade_callback: function callback to charm's upgrade function
954 @param configs: templating object derived from OSConfigRenderer class
955
956 @return: True if upgrade successful; False if upgrade failed or skipped
957 """
958 ret = False
959
960 if git_install_requested():
961 action_set({'outcome': 'installed from source, skipped upgrade.'})
962 else:
963 if openstack_upgrade_available(package):
964 if config('action-managed-upgrade'):
965 juju_log('Upgrading OpenStack release')
966
967 try:
968 upgrade_callback(configs=configs)
969 action_set({'outcome': 'success, upgrade completed.'})
970 ret = True
971 except:
972 action_set({'outcome': 'upgrade failed, see traceback.'})
973 action_set({'traceback': traceback.format_exc()})
974 action_fail('do_openstack_upgrade resulted in an '
975 'unexpected error')
976 else:
977 action_set({'outcome': 'action-managed-upgrade config is '
978 'False, skipped upgrade.'})
979 else:
980 action_set({'outcome': 'no upgrade available.'})
981
982 return ret
983
984
694def remote_restart(rel_name, remote_service=None):985def remote_restart(rel_name, remote_service=None):
695 trigger = {986 trigger = {
696 'restart-trigger': str(uuid.uuid4()),987 'restart-trigger': str(uuid.uuid4()),
@@ -700,7 +991,7 @@
700 for rid in relation_ids(rel_name):991 for rid in relation_ids(rel_name):
701 # This subordinate can be related to two seperate services using992 # This subordinate can be related to two seperate services using
702 # different subordinate relations so only issue the restart if993 # different subordinate relations so only issue the restart if
703 # thr principle is conencted down the relation we think it is994 # the principle is conencted down the relation we think it is
704 if related_units(relid=rid):995 if related_units(relid=rid):
705 relation_set(relation_id=rid,996 relation_set(relation_id=rid,
706 relation_settings=trigger,997 relation_settings=trigger,
707998
=== modified file 'hooks/charmhelpers/contrib/python/packages.py'
--- hooks/charmhelpers/contrib/python/packages.py 2015-06-10 15:45:48 +0000
+++ hooks/charmhelpers/contrib/python/packages.py 2015-12-01 15:05:49 +0000
@@ -36,6 +36,8 @@
36def parse_options(given, available):36def parse_options(given, available):
37 """Given a set of options, check if available"""37 """Given a set of options, check if available"""
38 for key, value in sorted(given.items()):38 for key, value in sorted(given.items()):
39 if not value:
40 continue
39 if key in available:41 if key in available:
40 yield "--{0}={1}".format(key, value)42 yield "--{0}={1}".format(key, value)
4143
4244
=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-06-10 15:45:48 +0000
+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-12-01 15:05:49 +0000
@@ -26,8 +26,10 @@
2626
27import os27import os
28import shutil28import shutil
29import six
29import json30import json
30import time31import time
32import uuid
3133
32from subprocess import (34from subprocess import (
33 check_call,35 check_call,
@@ -35,8 +37,10 @@
35 CalledProcessError,37 CalledProcessError,
36)38)
37from charmhelpers.core.hookenv import (39from charmhelpers.core.hookenv import (
40 local_unit,
38 relation_get,41 relation_get,
39 relation_ids,42 relation_ids,
43 relation_set,
40 related_units,44 related_units,
41 log,45 log,
42 DEBUG,46 DEBUG,
@@ -56,16 +60,18 @@
56 apt_install,60 apt_install,
57)61)
5862
63from charmhelpers.core.kernel import modprobe
64
59KEYRING = '/etc/ceph/ceph.client.{}.keyring'65KEYRING = '/etc/ceph/ceph.client.{}.keyring'
60KEYFILE = '/etc/ceph/ceph.client.{}.key'66KEYFILE = '/etc/ceph/ceph.client.{}.key'
6167
62CEPH_CONF = """[global]68CEPH_CONF = """[global]
63 auth supported = {auth}69auth supported = {auth}
64 keyring = {keyring}70keyring = {keyring}
65 mon host = {mon_hosts}71mon host = {mon_hosts}
66 log to syslog = {use_syslog}72log to syslog = {use_syslog}
67 err to syslog = {use_syslog}73err to syslog = {use_syslog}
68 clog to syslog = {use_syslog}74clog to syslog = {use_syslog}
69"""75"""
7076
7177
@@ -120,29 +126,37 @@
120 return None126 return None
121127
122128
123def create_pool(service, name, replicas=3):129def update_pool(client, pool, settings):
130 cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
131 for k, v in six.iteritems(settings):
132 cmd.append(k)
133 cmd.append(v)
134
135 check_call(cmd)
136
137
138def create_pool(service, name, replicas=3, pg_num=None):
124 """Create a new RADOS pool."""139 """Create a new RADOS pool."""
125 if pool_exists(service, name):140 if pool_exists(service, name):
126 log("Ceph pool {} already exists, skipping creation".format(name),141 log("Ceph pool {} already exists, skipping creation".format(name),
127 level=WARNING)142 level=WARNING)
128 return143 return
129144
130 # Calculate the number of placement groups based145 if not pg_num:
131 # on upstream recommended best practices.146 # Calculate the number of placement groups based
132 osds = get_osds(service)147 # on upstream recommended best practices.
133 if osds:148 osds = get_osds(service)
134 pgnum = (len(osds) * 100 // replicas)149 if osds:
135 else:150 pg_num = (len(osds) * 100 // replicas)
136 # NOTE(james-page): Default to 200 for older ceph versions151 else:
137 # which don't support OSD query from cli152 # NOTE(james-page): Default to 200 for older ceph versions
138 pgnum = 200153 # which don't support OSD query from cli
139154 pg_num = 200
140 cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]155
141 check_call(cmd)156 cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
142157 check_call(cmd)
143 cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',158
144 str(replicas)]159 update_pool(service, name, settings={'size': str(replicas)})
145 check_call(cmd)
146160
147161
148def delete_pool(service, name):162def delete_pool(service, name):
@@ -197,10 +211,10 @@
197 log('Created new keyfile at %s.' % keyfile, level=INFO)211 log('Created new keyfile at %s.' % keyfile, level=INFO)
198212
199213
200def get_ceph_nodes():214def get_ceph_nodes(relation='ceph'):
201 """Query named relation 'ceph' to determine current nodes."""215 """Query named relation to determine current nodes."""
202 hosts = []216 hosts = []
203 for r_id in relation_ids('ceph'):217 for r_id in relation_ids(relation):
204 for unit in related_units(r_id):218 for unit in related_units(r_id):
205 hosts.append(relation_get('private-address', unit=unit, rid=r_id))219 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
206220
@@ -288,17 +302,6 @@
288 os.chown(data_src_dst, uid, gid)302 os.chown(data_src_dst, uid, gid)
289303
290304
291# TODO: re-use
292def modprobe(module):
293 """Load a kernel module and configure for auto-load on reboot."""
294 log('Loading kernel module', level=INFO)
295 cmd = ['modprobe', module]
296 check_call(cmd)
297 with open('/etc/modules', 'r+') as modules:
298 if module not in modules.read():
299 modules.write(module)
300
301
302def copy_files(src, dst, symlinks=False, ignore=None):305def copy_files(src, dst, symlinks=False, ignore=None):
303 """Copy files from src to dst."""306 """Copy files from src to dst."""
304 for item in os.listdir(src):307 for item in os.listdir(src):
@@ -363,14 +366,14 @@
363 service_start(svc)366 service_start(svc)
364367
365368
366def ensure_ceph_keyring(service, user=None, group=None):369def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
367 """Ensures a ceph keyring is created for a named service and optionally370 """Ensures a ceph keyring is created for a named service and optionally
368 ensures user and group ownership.371 ensures user and group ownership.
369372
370 Returns False if no ceph key is available in relation state.373 Returns False if no ceph key is available in relation state.
371 """374 """
372 key = None375 key = None
373 for rid in relation_ids('ceph'):376 for rid in relation_ids(relation):
374 for unit in related_units(rid):377 for unit in related_units(rid):
375 key = relation_get('key', rid=rid, unit=unit)378 key = relation_get('key', rid=rid, unit=unit)
376 if key:379 if key:
@@ -411,17 +414,59 @@
411414
412 The API is versioned and defaults to version 1.415 The API is versioned and defaults to version 1.
413 """416 """
414 def __init__(self, api_version=1):417 def __init__(self, api_version=1, request_id=None):
415 self.api_version = api_version418 self.api_version = api_version
419 if request_id:
420 self.request_id = request_id
421 else:
422 self.request_id = str(uuid.uuid1())
416 self.ops = []423 self.ops = []
417424
418 def add_op_create_pool(self, name, replica_count=3):425 def add_op_create_pool(self, name, replica_count=3, pg_num=None):
426 """Adds an operation to create a pool.
427
428 @param pg_num setting: optional setting. If not provided, this value
429 will be calculated by the broker based on how many OSDs are in the
430 cluster at the time of creation. Note that, if provided, this value
431 will be capped at the current available maximum.
432 """
419 self.ops.append({'op': 'create-pool', 'name': name,433 self.ops.append({'op': 'create-pool', 'name': name,
420 'replicas': replica_count})434 'replicas': replica_count, 'pg_num': pg_num})
435
436 def set_ops(self, ops):
437 """Set request ops to provided value.
438
439 Useful for injecting ops that come from a previous request
440 to allow comparisons to ensure validity.
441 """
442 self.ops = ops
421443
422 @property444 @property
423 def request(self):445 def request(self):
424 return json.dumps({'api-version': self.api_version, 'ops': self.ops})446 return json.dumps({'api-version': self.api_version, 'ops': self.ops,
447 'request-id': self.request_id})
448
449 def _ops_equal(self, other):
450 if len(self.ops) == len(other.ops):
451 for req_no in range(0, len(self.ops)):
452 for key in ['replicas', 'name', 'op', 'pg_num']:
453 if self.ops[req_no].get(key) != other.ops[req_no].get(key):
454 return False
455 else:
456 return False
457 return True
458
459 def __eq__(self, other):
460 if not isinstance(other, self.__class__):
461 return False
462 if self.api_version == other.api_version and \
463 self._ops_equal(other):
464 return True
465 else:
466 return False
467
468 def __ne__(self, other):
469 return not self.__eq__(other)
425470
426471
427class CephBrokerRsp(object):472class CephBrokerRsp(object):
@@ -431,14 +476,198 @@
431476
432 The API is versioned and defaults to version 1.477 The API is versioned and defaults to version 1.
433 """478 """
479
434 def __init__(self, encoded_rsp):480 def __init__(self, encoded_rsp):
435 self.api_version = None481 self.api_version = None
436 self.rsp = json.loads(encoded_rsp)482 self.rsp = json.loads(encoded_rsp)
437483
438 @property484 @property
485 def request_id(self):
486 return self.rsp.get('request-id')
487
488 @property
439 def exit_code(self):489 def exit_code(self):
440 return self.rsp.get('exit-code')490 return self.rsp.get('exit-code')
441491
442 @property492 @property
443 def exit_msg(self):493 def exit_msg(self):
444 return self.rsp.get('stderr')494 return self.rsp.get('stderr')
495
496
497# Ceph Broker Conversation:
498# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
499# and send that request to ceph via the ceph relation. The CephBrokerRq has a
500# unique id so that the client can identity which CephBrokerRsp is associated
501# with the request. Ceph will also respond to each client unit individually
502# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
503# via key broker-rsp-glance-0
504#
505# To use this the charm can just do something like:
506#
507# from charmhelpers.contrib.storage.linux.ceph import (
508# send_request_if_needed,
509# is_request_complete,
510# CephBrokerRq,
511# )
512#
513# @hooks.hook('ceph-relation-changed')
514# def ceph_changed():
515# rq = CephBrokerRq()
516# rq.add_op_create_pool(name='poolname', replica_count=3)
517#
518# if is_request_complete(rq):
519# <Request complete actions>
520# else:
521# send_request_if_needed(get_ceph_request())
522#
523# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
524# of glance having sent a request to ceph which ceph has successfully processed
525# 'ceph:8': {
526# 'ceph/0': {
527# 'auth': 'cephx',
528# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
529# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
530# 'ceph-public-address': '10.5.44.103',
531# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
532# 'private-address': '10.5.44.103',
533# },
534# 'glance/0': {
535# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
536# '"ops": [{"replicas": 3, "name": "glance", '
537# '"op": "create-pool"}]}'),
538# 'private-address': '10.5.44.109',
539# },
540# }
541
542def get_previous_request(rid):
543 """Return the last ceph broker request sent on a given relation
544
545 @param rid: Relation id to query for request
546 """
547 request = None
548 broker_req = relation_get(attribute='broker_req', rid=rid,
549 unit=local_unit())
550 if broker_req:
551 request_data = json.loads(broker_req)
552 request = CephBrokerRq(api_version=request_data['api-version'],
553 request_id=request_data['request-id'])
554 request.set_ops(request_data['ops'])
555
556 return request
557
558
559def get_request_states(request, relation='ceph'):
560 """Return a dict of requests per relation id with their corresponding
561 completion state.
562
563 This allows a charm, which has a request for ceph, to see whether there is
564 an equivalent request already being processed and if so what state that
565 request is in.
566
567 @param request: A CephBrokerRq object
568 """
569 complete = []
570 requests = {}
571 for rid in relation_ids(relation):
572 complete = False
573 previous_request = get_previous_request(rid)
574 if request == previous_request:
575 sent = True
576 complete = is_request_complete_for_rid(previous_request, rid)
577 else:
578 sent = False
579 complete = False
580
581 requests[rid] = {
582 'sent': sent,
583 'complete': complete,
584 }
585
586 return requests
587
588
589def is_request_sent(request, relation='ceph'):
590 """Check to see if a functionally equivalent request has already been sent
591
592 Returns True if a similair request has been sent
593
594 @param request: A CephBrokerRq object
595 """
596 states = get_request_states(request, relation=relation)
597 for rid in states.keys():
598 if not states[rid]['sent']:
599 return False
600
601 return True
602
603
604def is_request_complete(request, relation='ceph'):
605 """Check to see if a functionally equivalent request has already been
606 completed
607
608 Returns True if a similair request has been completed
609
610 @param request: A CephBrokerRq object
611 """
612 states = get_request_states(request, relation=relation)
613 for rid in states.keys():
614 if not states[rid]['complete']:
615 return False
616
617 return True
618
619
620def is_request_complete_for_rid(request, rid):
621 """Check if a given request has been completed on the given relation
622
623 @param request: A CephBrokerRq object
624 @param rid: Relation ID
625 """
626 broker_key = get_broker_rsp_key()
627 for unit in related_units(rid):
628 rdata = relation_get(rid=rid, unit=unit)
629 if rdata.get(broker_key):
630 rsp = CephBrokerRsp(rdata.get(broker_key))
631 if rsp.request_id == request.request_id:
632 if not rsp.exit_code:
633 return True
634 else:
635 # The remote unit sent no reply targeted at this unit so either the
636 # remote ceph cluster does not support unit targeted replies or it
637 # has not processed our request yet.
638 if rdata.get('broker_rsp'):
639 request_data = json.loads(rdata['broker_rsp'])
640 if request_data.get('request-id'):
641 log('Ignoring legacy broker_rsp without unit key as remote '
642 'service supports unit specific replies', level=DEBUG)
643 else:
644 log('Using legacy broker_rsp as remote service does not '
645 'supports unit specific replies', level=DEBUG)
646 rsp = CephBrokerRsp(rdata['broker_rsp'])
647 if not rsp.exit_code:
648 return True
649
650 return False
651
652
653def get_broker_rsp_key():
654 """Return broker response key for this unit
655
656 This is the key that ceph is going to use to pass request status
657 information back to this unit
658 """
659 return 'broker-rsp-' + local_unit().replace('/', '-')
660
661
662def send_request_if_needed(request, relation='ceph'):
663 """Send broker request if an equivalent request has not already been sent
664
665 @param request: A CephBrokerRq object
666 """
667 if is_request_sent(request, relation=relation):
668 log('Request already sent but not complete, not sending new request',
669 level=DEBUG)
670 else:
671 for rid in relation_ids(relation):
672 log('Sending request {}'.format(request.request_id), level=DEBUG)
673 relation_set(relation_id=rid, broker_req=request.request)
445674
=== modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
--- hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-06-10 15:45:48 +0000
+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-12-01 15:05:49 +0000
@@ -76,3 +76,13 @@
76 check_call(cmd)76 check_call(cmd)
7777
78 return create_loopback(path)78 return create_loopback(path)
79
80
81def is_mapped_loopback_device(device):
82 """
83 Checks if a given device name is an existing/mapped loopback device.
84 :param device: str: Full path to the device (eg, /dev/loop1).
85 :returns: str: Path to the backing file if is a loopback device
86 empty string otherwise
87 """
88 return loopback_devices().get(device, "")
7989
=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
--- hooks/charmhelpers/contrib/storage/linux/utils.py 2015-06-10 15:45:48 +0000
+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2015-12-01 15:05:49 +0000
@@ -43,9 +43,10 @@
4343
44 :param block_device: str: Full path of block device to clean.44 :param block_device: str: Full path of block device to clean.
45 '''45 '''
46 # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
46 # sometimes sgdisk exits non-zero; this is OK, dd will clean up47 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
47 call(['sgdisk', '--zap-all', '--mbrtogpt',48 call(['sgdisk', '--zap-all', '--', block_device])
48 '--clear', block_device])49 call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
49 dev_end = check_output(['blockdev', '--getsz',50 dev_end = check_output(['blockdev', '--getsz',
50 block_device]).decode('UTF-8')51 block_device]).decode('UTF-8')
51 gpt_end = int(dev_end.split()[0]) - 10052 gpt_end = int(dev_end.split()[0]) - 100
@@ -67,4 +68,4 @@
67 out = check_output(['mount']).decode('UTF-8')68 out = check_output(['mount']).decode('UTF-8')
68 if is_partition:69 if is_partition:
69 return bool(re.search(device + r"\b", out))70 return bool(re.search(device + r"\b", out))
70 return bool(re.search(device + r"[0-9]+\b", out))71 return bool(re.search(device + r"[0-9]*\b", out))
7172
=== added file 'hooks/charmhelpers/core/files.py'
--- hooks/charmhelpers/core/files.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/files.py 2015-12-01 15:05:49 +0000
@@ -0,0 +1,45 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4# Copyright 2014-2015 Canonical Limited.
5#
6# This file is part of charm-helpers.
7#
8# charm-helpers is free software: you can redistribute it and/or modify
9# it under the terms of the GNU Lesser General Public License version 3 as
10# published by the Free Software Foundation.
11#
12# charm-helpers is distributed in the hope that it will be useful,
13# but WITHOUT ANY WARRANTY; without even the implied warranty of
14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15# GNU Lesser General Public License for more details.
16#
17# You should have received a copy of the GNU Lesser General Public License
18# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
19
20__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
21
22import os
23import subprocess
24
25
26def sed(filename, before, after, flags='g'):
27 """
28 Search and replaces the given pattern on filename.
29
30 :param filename: relative or absolute file path.
31 :param before: expression to be replaced (see 'man sed')
32 :param after: expression to replace with (see 'man sed')
33 :param flags: sed-compatible regex flags in example, to make
34 the search and replace case insensitive, specify ``flags="i"``.
35 The ``g`` flag is always specified regardless, so you do not
36 need to remember to include it when overriding this parameter.
37 :returns: If the sed command exit code was zero then return,
38 otherwise raise CalledProcessError.
39 """
40 expression = r's/{0}/{1}/{2}'.format(before,
41 after, flags)
42
43 return subprocess.check_call(["sed", "-i", "-r", "-e",
44 expression,
45 os.path.expanduser(filename)])
046
=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py 2015-06-10 07:35:12 +0000
+++ hooks/charmhelpers/core/hookenv.py 2015-12-01 15:05:49 +0000
@@ -21,7 +21,10 @@
21# Charm Helpers Developers <juju@lists.ubuntu.com>21# Charm Helpers Developers <juju@lists.ubuntu.com>
2222
23from __future__ import print_function23from __future__ import print_function
24import copy
25from distutils.version import LooseVersion
24from functools import wraps26from functools import wraps
27import glob
25import os28import os
26import json29import json
27import yaml30import yaml
@@ -71,6 +74,7 @@
71 res = func(*args, **kwargs)74 res = func(*args, **kwargs)
72 cache[key] = res75 cache[key] = res
73 return res76 return res
77 wrapper._wrapped = func
74 return wrapper78 return wrapper
7579
7680
@@ -170,9 +174,19 @@
170 return os.environ.get('JUJU_RELATION', None)174 return os.environ.get('JUJU_RELATION', None)
171175
172176
173def relation_id():177@cached
174 """The relation ID for the current relation hook"""178def relation_id(relation_name=None, service_or_unit=None):
175 return os.environ.get('JUJU_RELATION_ID', None)179 """The relation ID for the current or a specified relation"""
180 if not relation_name and not service_or_unit:
181 return os.environ.get('JUJU_RELATION_ID', None)
182 elif relation_name and service_or_unit:
183 service_name = service_or_unit.split('/')[0]
184 for relid in relation_ids(relation_name):
185 remote_service = remote_service_name(relid)
186 if remote_service == service_name:
187 return relid
188 else:
189 raise ValueError('Must specify neither or both of relation_name and service_or_unit')
176190
177191
178def local_unit():192def local_unit():
@@ -190,9 +204,20 @@
190 return local_unit().split('/')[0]204 return local_unit().split('/')[0]
191205
192206
207@cached
208def remote_service_name(relid=None):
209 """The remote service name for a given relation-id (or the current relation)"""
210 if relid is None:
211 unit = remote_unit()
212 else:
213 units = related_units(relid)
214 unit = units[0] if units else None
215 return unit.split('/')[0] if unit else None
216
217
193def hook_name():218def hook_name():
194 """The name of the currently executing hook"""219 """The name of the currently executing hook"""
195 return os.path.basename(sys.argv[0])220 return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
196221
197222
198class Config(dict):223class Config(dict):
@@ -242,29 +267,7 @@
242 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)267 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
243 if os.path.exists(self.path):268 if os.path.exists(self.path):
244 self.load_previous()269 self.load_previous()
245270 atexit(self._implicit_save)
246 def __getitem__(self, key):
247 """For regular dict lookups, check the current juju config first,
248 then the previous (saved) copy. This ensures that user-saved values
249 will be returned by a dict lookup.
250
251 """
252 try:
253 return dict.__getitem__(self, key)
254 except KeyError:
255 return (self._prev_dict or {})[key]
256
257 def get(self, key, default=None):
258 try:
259 return self[key]
260 except KeyError:
261 return default
262
263 def keys(self):
264 prev_keys = []
265 if self._prev_dict is not None:
266 prev_keys = self._prev_dict.keys()
267 return list(set(prev_keys + list(dict.keys(self))))
268271
269 def load_previous(self, path=None):272 def load_previous(self, path=None):
270 """Load previous copy of config from disk.273 """Load previous copy of config from disk.
@@ -283,6 +286,9 @@
283 self.path = path or self.path286 self.path = path or self.path
284 with open(self.path) as f:287 with open(self.path) as f:
285 self._prev_dict = json.load(f)288 self._prev_dict = json.load(f)
289 for k, v in copy.deepcopy(self._prev_dict).items():
290 if k not in self:
291 self[k] = v
286292
287 def changed(self, key):293 def changed(self, key):
288 """Return True if the current value for this key is different from294 """Return True if the current value for this key is different from
@@ -314,13 +320,13 @@
314 instance.320 instance.
315321
316 """322 """
317 if self._prev_dict:
318 for k, v in six.iteritems(self._prev_dict):
319 if k not in self:
320 self[k] = v
321 with open(self.path, 'w') as f:323 with open(self.path, 'w') as f:
322 json.dump(self, f)324 json.dump(self, f)
323325
326 def _implicit_save(self):
327 if self.implicit_save:
328 self.save()
329
324330
325@cached331@cached
326def config(scope=None):332def config(scope=None):
@@ -485,6 +491,76 @@
485491
486492
487@cached493@cached
494def peer_relation_id():
495 '''Get a peer relation id if a peer relation has been joined, else None.'''
496 md = metadata()
497 section = md.get('peers')
498 if section:
499 for key in section:
500 relids = relation_ids(key)
501 if relids:
502 return relids[0]
503 return None
504
505
506@cached
507def relation_to_interface(relation_name):
508 """
509 Given the name of a relation, return the interface that relation uses.
510
511 :returns: The interface name, or ``None``.
512 """
513 return relation_to_role_and_interface(relation_name)[1]
514
515
516@cached
517def relation_to_role_and_interface(relation_name):
518 """
519 Given the name of a relation, return the role and the name of the interface
520 that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
521
522 :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
523 """
524 _metadata = metadata()
525 for role in ('provides', 'requires', 'peer'):
526 interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
527 if interface:
528 return role, interface
529 return None, None
530
531
532@cached
533def role_and_interface_to_relations(role, interface_name):
534 """
535 Given a role and interface name, return a list of relation names for the
536 current charm that use that interface under that role (where role is one
537 of ``provides``, ``requires``, or ``peer``).
538
539 :returns: A list of relation names.
540 """
541 _metadata = metadata()
542 results = []
543 for relation_name, relation in _metadata.get(role, {}).items():
544 if relation['interface'] == interface_name:
545 results.append(relation_name)
546 return results
547
548
549@cached
550def interface_to_relations(interface_name):
551 """
552 Given an interface, return a list of relation names for the current
553 charm that use that interface.
554
555 :returns: A list of relation names.
556 """
557 results = []
558 for role in ('provides', 'requires', 'peer'):
559 results.extend(role_and_interface_to_relations(role, interface_name))
560 return results
561
562
563@cached
488def charm_name():564def charm_name():
489 """Get the name of the current charm as is specified on metadata.yaml"""565 """Get the name of the current charm as is specified on metadata.yaml"""
490 return metadata().get('name')566 return metadata().get('name')
@@ -560,6 +636,38 @@
560 return unit_get('private-address')636 return unit_get('private-address')
561637
562638
639@cached
640def storage_get(attribute="", storage_id=""):
641 """Get storage attributes"""
642 _args = ['storage-get', '--format=json']
643 if storage_id:
644 _args.extend(('-s', storage_id))
645 if attribute:
646 _args.append(attribute)
647 try:
648 return json.loads(subprocess.check_output(_args).decode('UTF-8'))
649 except ValueError:
650 return None
651
652
653@cached
654def storage_list(storage_name=""):
655 """List the storage IDs for the unit"""
656 _args = ['storage-list', '--format=json']
657 if storage_name:
658 _args.append(storage_name)
659 try:
660 return json.loads(subprocess.check_output(_args).decode('UTF-8'))
661 except ValueError:
662 return None
663 except OSError as e:
664 import errno
665 if e.errno == errno.ENOENT:
666 # storage-list does not exist
667 return []
668 raise
669
670
563class UnregisteredHookError(Exception):671class UnregisteredHookError(Exception):
564 """Raised when an undefined hook is called"""672 """Raised when an undefined hook is called"""
565 pass673 pass
@@ -587,10 +695,14 @@
587 hooks.execute(sys.argv)695 hooks.execute(sys.argv)
588 """696 """
589697
590 def __init__(self, config_save=True):698 def __init__(self, config_save=None):
591 super(Hooks, self).__init__()699 super(Hooks, self).__init__()
592 self._hooks = {}700 self._hooks = {}
593 self._config_save = config_save701
702 # For unknown reasons, we allow the Hooks constructor to override
703 # config().implicit_save.
704 if config_save is not None:
705 config().implicit_save = config_save
594706
595 def register(self, name, function):707 def register(self, name, function):
596 """Register a hook"""708 """Register a hook"""
@@ -598,13 +710,16 @@
598710
599 def execute(self, args):711 def execute(self, args):
600 """Execute a registered hook based on args[0]"""712 """Execute a registered hook based on args[0]"""
713 _run_atstart()
601 hook_name = os.path.basename(args[0])714 hook_name = os.path.basename(args[0])
602 if hook_name in self._hooks:715 if hook_name in self._hooks:
603 self._hooks[hook_name]()716 try:
604 if self._config_save:717 self._hooks[hook_name]()
605 cfg = config()718 except SystemExit as x:
606 if cfg.implicit_save:719 if x.code is None or x.code == 0:
607 cfg.save()720 _run_atexit()
721 raise
722 _run_atexit()
608 else:723 else:
609 raise UnregisteredHookError(hook_name)724 raise UnregisteredHookError(hook_name)
610725
@@ -653,6 +768,21 @@
653 subprocess.check_call(['action-fail', message])768 subprocess.check_call(['action-fail', message])
654769
655770
771def action_name():
772 """Get the name of the currently executing action."""
773 return os.environ.get('JUJU_ACTION_NAME')
774
775
776def action_uuid():
777 """Get the UUID of the currently executing action."""
778 return os.environ.get('JUJU_ACTION_UUID')
779
780
781def action_tag():
782 """Get the tag for the currently executing action."""
783 return os.environ.get('JUJU_ACTION_TAG')
784
785
656def status_set(workload_state, message):786def status_set(workload_state, message):
657 """Set the workload state with a message787 """Set the workload state with a message
658788
@@ -682,25 +812,28 @@
682812
683813
684def status_get():814def status_get():
685 """Retrieve the previously set juju workload state815 """Retrieve the previously set juju workload state and message
686816
687 If the status-set command is not found then assume this is juju < 1.23 and817 If the status-get command is not found then assume this is juju < 1.23 and
688 return 'unknown'818 return 'unknown', ""
819
689 """820 """
690 cmd = ['status-get']821 cmd = ['status-get', "--format=json", "--include-data"]
691 try:822 try:
692 raw_status = subprocess.check_output(cmd, universal_newlines=True)823 raw_status = subprocess.check_output(cmd)
693 status = raw_status.rstrip()
694 return status
695 except OSError as e:824 except OSError as e:
696 if e.errno == errno.ENOENT:825 if e.errno == errno.ENOENT:
697 return 'unknown'826 return ('unknown', "")
698 else:827 else:
699 raise828 raise
829 else:
830 status = json.loads(raw_status.decode("UTF-8"))
831 return (status["status"], status["message"])
700832
701833
702def translate_exc(from_exc, to_exc):834def translate_exc(from_exc, to_exc):
703 def inner_translate_exc1(f):835 def inner_translate_exc1(f):
836 @wraps(f)
704 def inner_translate_exc2(*args, **kwargs):837 def inner_translate_exc2(*args, **kwargs):
705 try:838 try:
706 return f(*args, **kwargs)839 return f(*args, **kwargs)
@@ -732,13 +865,80 @@
732@translate_exc(from_exc=OSError, to_exc=NotImplementedError)865@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
733def leader_set(settings=None, **kwargs):866def leader_set(settings=None, **kwargs):
734 """Juju leader set value(s)"""867 """Juju leader set value(s)"""
735 log("Juju leader-set '%s'" % (settings), level=DEBUG)868 # Don't log secrets.
869 # log("Juju leader-set '%s'" % (settings), level=DEBUG)
736 cmd = ['leader-set']870 cmd = ['leader-set']
737 settings = settings or {}871 settings = settings or {}
738 settings.update(kwargs)872 settings.update(kwargs)
739 for k, v in settings.iteritems():873 for k, v in settings.items():
740 if v is None:874 if v is None:
741 cmd.append('{}='.format(k))875 cmd.append('{}='.format(k))
742 else:876 else:
743 cmd.append('{}={}'.format(k, v))877 cmd.append('{}={}'.format(k, v))
744 subprocess.check_call(cmd)878 subprocess.check_call(cmd)
879
880
881@cached
882def juju_version():
883 """Full version string (eg. '1.23.3.1-trusty-amd64')"""
884 # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
885 jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
886 return subprocess.check_output([jujud, 'version'],
887 universal_newlines=True).strip()
888
889
890@cached
891def has_juju_version(minimum_version):
892 """Return True if the Juju version is at least the provided version"""
893 return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
894
895
896_atexit = []
897_atstart = []
898
899
900def atstart(callback, *args, **kwargs):
901 '''Schedule a callback to run before the main hook.
902
903 Callbacks are run in the order they were added.
904
905 This is useful for modules and classes to perform initialization
906 and inject behavior. In particular:
907
908 - Run common code before all of your hooks, such as logging
909 the hook name or interesting relation data.
910 - Defer object or module initialization that requires a hook
911 context until we know there actually is a hook context,
912 making testing easier.
913 - Rather than requiring charm authors to include boilerplate to
914 invoke your helper's behavior, have it run automatically if
915 your object is instantiated or module imported.
916
917 This is not at all useful after your hook framework as been launched.
918 '''
919 global _atstart
920 _atstart.append((callback, args, kwargs))
921
922
923def atexit(callback, *args, **kwargs):
924 '''Schedule a callback to run on successful hook completion.
925
926 Callbacks are run in the reverse order that they were added.'''
927 _atexit.append((callback, args, kwargs))
928
929
930def _run_atstart():
931 '''Hook frameworks must invoke this before running the main hook body.'''
932 global _atstart
933 for callback, args, kwargs in _atstart:
934 callback(*args, **kwargs)
935 del _atstart[:]
936
937
938def _run_atexit():
939 '''Hook frameworks must invoke this after the main hook body has
940 successfully completed. Do not invoke it if the hook fails.'''
941 global _atexit
942 for callback, args, kwargs in reversed(_atexit):
943 callback(*args, **kwargs)
944 del _atexit[:]
745945
=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py 2015-07-01 13:35:47 +0000
+++ hooks/charmhelpers/core/host.py 2015-12-01 15:05:49 +0000
@@ -63,6 +63,56 @@
63 return service_result63 return service_result
6464
6565
66def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
67 """Pause a system service.
68
69 Stop it, and prevent it from starting again at boot."""
70 stopped = True
71 if service_running(service_name):
72 stopped = service_stop(service_name)
73 upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
74 sysv_file = os.path.join(initd_dir, service_name)
75 if os.path.exists(upstart_file):
76 override_path = os.path.join(
77 init_dir, '{}.override'.format(service_name))
78 with open(override_path, 'w') as fh:
79 fh.write("manual\n")
80 elif os.path.exists(sysv_file):
81 subprocess.check_call(["update-rc.d", service_name, "disable"])
82 else:
83 # XXX: Support SystemD too
84 raise ValueError(
85 "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
86 service_name, upstart_file, sysv_file))
87 return stopped
88
89
90def service_resume(service_name, init_dir="/etc/init",
91 initd_dir="/etc/init.d"):
92 """Resume a system service.
93
94 Reenable starting again at boot. Start the service"""
95 upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
96 sysv_file = os.path.join(initd_dir, service_name)
97 if os.path.exists(upstart_file):
98 override_path = os.path.join(
99 init_dir, '{}.override'.format(service_name))
100 if os.path.exists(override_path):
101 os.unlink(override_path)
102 elif os.path.exists(sysv_file):
103 subprocess.check_call(["update-rc.d", service_name, "enable"])
104 else:
105 # XXX: Support SystemD too
106 raise ValueError(
107 "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
108 service_name, upstart_file, sysv_file))
109
110 started = service_running(service_name)
111 if not started:
112 started = service_start(service_name)
113 return started
114
115
66def service(action, service_name):116def service(action, service_name):
67 """Control a system service"""117 """Control a system service"""
68 cmd = ['service', service_name, action]118 cmd = ['service', service_name, action]
@@ -119,8 +169,9 @@
119169
120170
121def user_exists(username):171def user_exists(username):
172 """Check if a user exists"""
122 try:173 try:
123 user_info = pwd.getpwnam(username)174 pwd.getpwnam(username)
124 user_exists = True175 user_exists = True
125 except KeyError:176 except KeyError:
126 user_exists = False177 user_exists = False
@@ -149,11 +200,7 @@
149200
150def add_user_to_group(username, group):201def add_user_to_group(username, group):
151 """Add a user to a group"""202 """Add a user to a group"""
152 cmd = [203 cmd = ['gpasswd', '-a', username, group]
153 'gpasswd', '-a',
154 username,
155 group
156 ]
157 log("Adding user {} to group {}".format(username, group))204 log("Adding user {} to group {}".format(username, group))
158 subprocess.check_call(cmd)205 subprocess.check_call(cmd)
159206
@@ -263,8 +310,8 @@
263 return system_mounts310 return system_mounts
264311
265312
266
267def fstab_mount(mountpoint):313def fstab_mount(mountpoint):
314 """Mount filesystem using fstab"""
268 cmd_args = ['mount', mountpoint]315 cmd_args = ['mount', mountpoint]
269 try:316 try:
270 subprocess.check_output(cmd_args)317 subprocess.check_output(cmd_args)
@@ -390,25 +437,80 @@
390 return(''.join(random_chars))437 return(''.join(random_chars))
391438
392439
393def list_nics(nic_type):440def is_phy_iface(interface):
441 """Returns True if interface is not virtual, otherwise False."""
442 if interface:
443 sys_net = '/sys/class/net'
444 if os.path.isdir(sys_net):
445 for iface in glob.glob(os.path.join(sys_net, '*')):
446 if '/virtual/' in os.path.realpath(iface):
447 continue
448
449 if interface == os.path.basename(iface):
450 return True
451
452 return False
453
454
455def get_bond_master(interface):
456 """Returns bond master if interface is bond slave otherwise None.
457
458 NOTE: the provided interface is expected to be physical
459 """
460 if interface:
461 iface_path = '/sys/class/net/%s' % (interface)
462 if os.path.exists(iface_path):
463 if '/virtual/' in os.path.realpath(iface_path):
464 return None
465
466 master = os.path.join(iface_path, 'master')
467 if os.path.exists(master):
468 master = os.path.realpath(master)
469 # make sure it is a bond master
470 if os.path.exists(os.path.join(master, 'bonding')):
471 return os.path.basename(master)
472
473 return None
474
475
476def list_nics(nic_type=None):
394 '''Return a list of nics of given type(s)'''477 '''Return a list of nics of given type(s)'''
395 if isinstance(nic_type, six.string_types):478 if isinstance(nic_type, six.string_types):
396 int_types = [nic_type]479 int_types = [nic_type]
397 else:480 else:
398 int_types = nic_type481 int_types = nic_type
482
399 interfaces = []483 interfaces = []
400 for int_type in int_types:484 if nic_type:
401 cmd = ['ip', 'addr', 'show', 'label', int_type + '*']485 for int_type in int_types:
486 cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
487 ip_output = subprocess.check_output(cmd).decode('UTF-8')
488 ip_output = ip_output.split('\n')
489 ip_output = (line for line in ip_output if line)
490 for line in ip_output:
491 if line.split()[1].startswith(int_type):
492 matched = re.search('.*: (' + int_type +
493 r'[0-9]+\.[0-9]+)@.*', line)
494 if matched:
495 iface = matched.groups()[0]
496 else:
497 iface = line.split()[1].replace(":", "")
498
499 if iface not in interfaces:
500 interfaces.append(iface)
501 else:
502 cmd = ['ip', 'a']
402 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')503 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
403 ip_output = (line for line in ip_output if line)504 ip_output = (line.strip() for line in ip_output if line)
505
506 key = re.compile('^[0-9]+:\s+(.+):')
404 for line in ip_output:507 for line in ip_output:
405 if line.split()[1].startswith(int_type):508 matched = re.search(key, line)
406 matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)509 if matched:
407 if matched:510 iface = matched.group(1)
408 interface = matched.groups()[0]511 iface = iface.partition("@")[0]
409 else:512 if iface not in interfaces:
410 interface = line.split()[1].replace(":", "")513 interfaces.append(iface)
411 interfaces.append(interface)
412514
413 return interfaces515 return interfaces
414516
@@ -440,23 +542,6 @@
440 return hwaddr542 return hwaddr
441543
442544
443def get_mac_nic_map():
444 '''Return a dict of macs and their corresponding nics'''
445 cmd = ['ip', '-o', '-0', 'addr', 'list']
446 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
447 mac_nic_map = {}
448 for line in ip_output:
449 columns = line.split()
450 if 'link/ether' in columns:
451 hwaddr = columns[columns.index('link/ether') + 1]
452 nic = columns[1].replace(':', '')
453 if mac_nic_map.get(hwaddr):
454 mac_nic_map[hwaddr].append(nic)
455 else:
456 mac_nic_map[hwaddr] = [nic]
457 return mac_nic_map
458
459
460def cmp_pkgrevno(package, revno, pkgcache=None):545def cmp_pkgrevno(package, revno, pkgcache=None):
461 '''Compare supplied revno with the revno of the installed package546 '''Compare supplied revno with the revno of the installed package
462547
@@ -485,7 +570,14 @@
485 os.chdir(cur)570 os.chdir(cur)
486571
487572
488def chownr(path, owner, group, follow_links=True):573def chownr(path, owner, group, follow_links=True, chowntopdir=False):
574 """
575 Recursively change user and group ownership of files and directories
576 in given path. Doesn't chown path itself by default, only its children.
577
578 :param bool follow_links: Also Chown links if True
579 :param bool chowntopdir: Also chown path itself if True
580 """
489 uid = pwd.getpwnam(owner).pw_uid581 uid = pwd.getpwnam(owner).pw_uid
490 gid = grp.getgrnam(group).gr_gid582 gid = grp.getgrnam(group).gr_gid
491 if follow_links:583 if follow_links:
@@ -493,6 +585,10 @@
493 else:585 else:
494 chown = os.lchown586 chown = os.lchown
495587
588 if chowntopdir:
589 broken_symlink = os.path.lexists(path) and not os.path.exists(path)
590 if not broken_symlink:
591 chown(path, uid, gid)
496 for root, dirs, files in os.walk(path):592 for root, dirs, files in os.walk(path):
497 for name in dirs + files:593 for name in dirs + files:
498 full = os.path.join(root, name)594 full = os.path.join(root, name)
@@ -503,3 +599,19 @@
503599
504def lchownr(path, owner, group):600def lchownr(path, owner, group):
505 chownr(path, owner, group, follow_links=False)601 chownr(path, owner, group, follow_links=False)
602
603
604def get_total_ram():
605 '''The total amount of system RAM in bytes.
606
607 This is what is reported by the OS, and may be overcommitted when
608 there are multiple containers hosted on the same machine.
609 '''
610 with open('/proc/meminfo', 'r') as f:
611 for line in f.readlines():
612 if line:
613 key, value, unit = line.split()
614 if key == 'MemTotal:':
615 assert unit == 'kB', 'Unknown unit'
616 return int(value) * 1024 # Classic, not KiB.
617 raise NotImplementedError()
506618
=== modified file 'hooks/charmhelpers/core/hugepage.py'
--- hooks/charmhelpers/core/hugepage.py 2015-06-22 09:26:28 +0000
+++ hooks/charmhelpers/core/hugepage.py 2015-12-01 15:05:49 +0000
@@ -1,5 +1,3 @@
1
2#!/usr/bin/env python
3# -*- coding: utf-8 -*-1# -*- coding: utf-8 -*-
42
5# Copyright 2014-2015 Canonical Limited.3# Copyright 2014-2015 Canonical Limited.
@@ -19,36 +17,55 @@
19# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.17# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2018
21import yaml19import yaml
22from charmhelpers.core.fstab import Fstab20from charmhelpers.core import fstab
23from charmhelpers.core.sysctl import (21from charmhelpers.core import sysctl
24 create,
25)
26from charmhelpers.core.host import (22from charmhelpers.core.host import (
27 add_group,23 add_group,
28 add_user_to_group,24 add_user_to_group,
29 fstab_mount,25 fstab_mount,
30 mkdir,26 mkdir,
31)27)
28from charmhelpers.core.strutils import bytes_from_string
29from subprocess import check_output
30
3231
33def hugepage_support(user, group='hugetlb', nr_hugepages=256,32def hugepage_support(user, group='hugetlb', nr_hugepages=256,
34 max_map_count=65536, mnt_point='/hugepages',33 max_map_count=65536, mnt_point='/run/hugepages/kvm',
35 pagesize='2MB', mount=True):34 pagesize='2MB', mount=True, set_shmmax=False):
35 """Enable hugepages on system.
36
37 Args:
38 user (str) -- Username to allow access to hugepages to
39 group (str) -- Group name to own hugepages
40 nr_hugepages (int) -- Number of pages to reserve
41 max_map_count (int) -- Number of Virtual Memory Areas a process can own
42 mnt_point (str) -- Directory to mount hugepages on
43 pagesize (str) -- Size of hugepages
44 mount (bool) -- Whether to Mount hugepages
45 """
36 group_info = add_group(group)46 group_info = add_group(group)
37 gid = group_info.gr_gid47 gid = group_info.gr_gid
38 add_user_to_group(user, group)48 add_user_to_group(user, group)
49 if max_map_count < 2 * nr_hugepages:
50 max_map_count = 2 * nr_hugepages
39 sysctl_settings = {51 sysctl_settings = {
40 'vm.nr_hugepages': nr_hugepages,52 'vm.nr_hugepages': nr_hugepages,
41 'vm.max_map_count': max_map_count, # 1GB53 'vm.max_map_count': max_map_count,
42 'vm.hugetlb_shm_group': gid,54 'vm.hugetlb_shm_group': gid,
43 }55 }
44 create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')56 if set_shmmax:
57 shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
58 shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
59 if shmmax_minsize > shmmax_current:
60 sysctl_settings['kernel.shmmax'] = shmmax_minsize
61 sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
45 mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)62 mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
46 fstab = Fstab()63 lfstab = fstab.Fstab()
47 fstab_entry = fstab.get_entry_by_attr('mountpoint', mnt_point)64 fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
48 if fstab_entry:65 if fstab_entry:
49 fstab.remove_entry(fstab_entry)66 lfstab.remove_entry(fstab_entry)
50 entry = fstab.Entry('nodev', mnt_point, 'hugetlbfs',67 entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
51 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)68 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
52 fstab.add_entry(entry)69 lfstab.add_entry(entry)
53 if mount:70 if mount:
54 fstab_mount(mnt_point)71 fstab_mount(mnt_point)
5572
=== added file 'hooks/charmhelpers/core/kernel.py'
--- hooks/charmhelpers/core/kernel.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/kernel.py 2015-12-01 15:05:49 +0000
@@ -0,0 +1,68 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4# Copyright 2014-2015 Canonical Limited.
5#
6# This file is part of charm-helpers.
7#
8# charm-helpers is free software: you can redistribute it and/or modify
9# it under the terms of the GNU Lesser General Public License version 3 as
10# published by the Free Software Foundation.
11#
12# charm-helpers is distributed in the hope that it will be useful,
13# but WITHOUT ANY WARRANTY; without even the implied warranty of
14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15# GNU Lesser General Public License for more details.
16#
17# You should have received a copy of the GNU Lesser General Public License
18# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
19
20__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
21
22from charmhelpers.core.hookenv import (
23 log,
24 INFO
25)
26
27from subprocess import check_call, check_output
28import re
29
30
31def modprobe(module, persist=True):
32 """Load a kernel module and configure for auto-load on reboot."""
33 cmd = ['modprobe', module]
34
35 log('Loading kernel module %s' % module, level=INFO)
36
37 check_call(cmd)
38 if persist:
39 with open('/etc/modules', 'r+') as modules:
40 if module not in modules.read():
41 modules.write(module)
42
43
44def rmmod(module, force=False):
45 """Remove a module from the linux kernel"""
46 cmd = ['rmmod']
47 if force:
48 cmd.append('-f')
49 cmd.append(module)
50 log('Removing kernel module %s' % module, level=INFO)
51 return check_call(cmd)
52
53
54def lsmod():
55 """Shows what kernel modules are currently loaded"""
56 return check_output(['lsmod'],
57 universal_newlines=True)
58
59
60def is_module_loaded(module):
61 """Checks if a kernel module is already loaded"""
62 matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
63 return len(matches) > 0
64
65
66def update_initramfs(version='all'):
67 """Updates an initramfs image"""
68 return check_call(["update-initramfs", "-k", version, "-u"])
069
=== modified file 'hooks/charmhelpers/core/services/base.py'
--- hooks/charmhelpers/core/services/base.py 2015-06-10 07:35:12 +0000
+++ hooks/charmhelpers/core/services/base.py 2015-12-01 15:05:49 +0000
@@ -128,15 +128,18 @@
128 """128 """
129 Handle the current hook by doing The Right Thing with the registered services.129 Handle the current hook by doing The Right Thing with the registered services.
130 """130 """
131 hook_name = hookenv.hook_name()131 hookenv._run_atstart()
132 if hook_name == 'stop':132 try:
133 self.stop_services()133 hook_name = hookenv.hook_name()
134 else:134 if hook_name == 'stop':
135 self.reconfigure_services()135 self.stop_services()
136 self.provide_data()136 else:
137 cfg = hookenv.config()137 self.reconfigure_services()
138 if cfg.implicit_save:138 self.provide_data()
139 cfg.save()139 except SystemExit as x:
140 if x.code is None or x.code == 0:
141 hookenv._run_atexit()
142 hookenv._run_atexit()
140143
141 def provide_data(self):144 def provide_data(self):
142 """145 """
143146
=== modified file 'hooks/charmhelpers/core/services/helpers.py'
--- hooks/charmhelpers/core/services/helpers.py 2015-06-12 12:22:51 +0000
+++ hooks/charmhelpers/core/services/helpers.py 2015-12-01 15:05:49 +0000
@@ -16,6 +16,7 @@
1616
17import os17import os
18import yaml18import yaml
19
19from charmhelpers.core import hookenv20from charmhelpers.core import hookenv
20from charmhelpers.core import host21from charmhelpers.core import host
21from charmhelpers.core import templating22from charmhelpers.core import templating
@@ -240,42 +241,43 @@
240 action.241 action.
241242
242 :param str source: The template source file, relative to243 :param str source: The template source file, relative to
243 `$CHARM_DIR/templates`244 `$CHARM_DIR/templates`
244245
245 :param str target: The target to write the rendered template to246 :param str target: The target to write the rendered template to
246 :param str owner: The owner of the rendered file247 :param str owner: The owner of the rendered file
247 :param str group: The group of the rendered file248 :param str group: The group of the rendered file
248 :param int perms: The permissions of the rendered file249 :param int perms: The permissions of the rendered file
249 :param list template_searchpath: List of paths to search for template in
250 :param partial on_change_action: functools partial to be executed when250 :param partial on_change_action: functools partial to be executed when
251 rendered file changes251 rendered file changes
252 :param jinja2 loader template_loader: A jinja2 template loader
252 """253 """
253 def __init__(self, source, target,254 def __init__(self, source, target,
254 owner='root', group='root', perms=0o444,255 owner='root', group='root', perms=0o444,
255 template_searchpath=None, on_change_action=None):256 on_change_action=None, template_loader=None):
256 self.source = source257 self.source = source
257 self.target = target258 self.target = target
258 self.owner = owner259 self.owner = owner
259 self.group = group260 self.group = group
260 self.perms = perms261 self.perms = perms
261 self.template_searchpath = template_searchpath
262 self.on_change_action = on_change_action262 self.on_change_action = on_change_action
263 self.template_loader = template_loader
263264
264 def __call__(self, manager, service_name, event_name):265 def __call__(self, manager, service_name, event_name):
265 pre_checksum = ''266 pre_checksum = ''
266 if self.on_change_action and os.path.isfile(self.target):267 if self.on_change_action and os.path.isfile(self.target):
267 pre_checksum = host.file_hash(self.target)268 pre_checksum = host.file_hash(self.target)
268 print pre_checksum
269 service = manager.get_service(service_name)269 service = manager.get_service(service_name)
270 context = {}270 context = {}
271 for ctx in service.get('required_data', []):271 for ctx in service.get('required_data', []):
272 context.update(ctx)272 context.update(ctx)
273 templating.render(self.source, self.target, context,273 templating.render(self.source, self.target, context,
274 self.owner, self.group, self.perms,274 self.owner, self.group, self.perms,
275 self.template_searchpath)275 template_loader=self.template_loader)
276 if self.on_change_action:276 if self.on_change_action:
277 if pre_checksum == host.file_hash(self.target):277 if pre_checksum == host.file_hash(self.target):
278 print "No change detected " + self.target278 hookenv.log(
279 'No change detected: {}'.format(self.target),
280 hookenv.DEBUG)
279 else:281 else:
280 self.on_change_action()282 self.on_change_action()
281283
282284
=== modified file 'hooks/charmhelpers/core/strutils.py'
--- hooks/charmhelpers/core/strutils.py 2015-06-10 07:35:12 +0000
+++ hooks/charmhelpers/core/strutils.py 2015-12-01 15:05:49 +0000
@@ -18,6 +18,7 @@
18# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.18# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1919
20import six20import six
21import re
2122
2223
23def bool_from_string(value):24def bool_from_string(value):
@@ -40,3 +41,32 @@
4041
41 msg = "Unable to interpret string value '%s' as boolean" % (value)42 msg = "Unable to interpret string value '%s' as boolean" % (value)
42 raise ValueError(msg)43 raise ValueError(msg)
44
45
46def bytes_from_string(value):
47 """Interpret human readable string value as bytes.
48
49 Returns int
50 """
51 BYTE_POWER = {
52 'K': 1,
53 'KB': 1,
54 'M': 2,
55 'MB': 2,
56 'G': 3,
57 'GB': 3,
58 'T': 4,
59 'TB': 4,
60 'P': 5,
61 'PB': 5,
62 }
63 if isinstance(value, six.string_types):
64 value = six.text_type(value)
65 else:
66 msg = "Unable to interpret non-string value '%s' as boolean" % (value)
67 raise ValueError(msg)
68 matches = re.match("([0-9]+)([a-zA-Z]+)", value)
69 if not matches:
70 msg = "Unable to interpret string value '%s' as bytes" % (value)
71 raise ValueError(msg)
72 return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
4373
=== modified file 'hooks/charmhelpers/core/templating.py'
--- hooks/charmhelpers/core/templating.py 2015-06-17 12:23:31 +0000
+++ hooks/charmhelpers/core/templating.py 2015-12-01 15:05:49 +0000
@@ -21,8 +21,7 @@
2121
2222
23def render(source, target, context, owner='root', group='root',23def render(source, target, context, owner='root', group='root',
24 perms=0o444, templates_dir=None, encoding='UTF-8',24 perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
25 template_searchpath=None):
26 """25 """
27 Render a template.26 Render a template.
2827
@@ -41,7 +40,7 @@
41 this will attempt to use charmhelpers.fetch.apt_install to install it.40 this will attempt to use charmhelpers.fetch.apt_install to install it.
42 """41 """
43 try:42 try:
44 from jinja2 import ChoiceLoader, FileSystemLoader, Environment, exceptions43 from jinja2 import FileSystemLoader, Environment, exceptions
45 except ImportError:44 except ImportError:
46 try:45 try:
47 from charmhelpers.fetch import apt_install46 from charmhelpers.fetch import apt_install
@@ -51,25 +50,26 @@
51 level=hookenv.ERROR)50 level=hookenv.ERROR)
52 raise51 raise
53 apt_install('python-jinja2', fatal=True)52 apt_install('python-jinja2', fatal=True)
54 from jinja2 import ChoiceLoader, FileSystemLoader, Environment, exceptions53 from jinja2 import FileSystemLoader, Environment, exceptions
5554
56 if template_searchpath:55 if template_loader:
57 fs_loaders = []56 template_env = Environment(loader=template_loader)
58 for tmpl_dir in template_searchpath:
59 fs_loaders.append(FileSystemLoader(tmpl_dir))
60 loader = ChoiceLoader(fs_loaders)
61 else:57 else:
62 if templates_dir is None:58 if templates_dir is None:
63 templates_dir = os.path.join(hookenv.charm_dir(), 'templates')59 templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
64 loader = Environment(loader=FileSystemLoader(templates_dir))60 template_env = Environment(loader=FileSystemLoader(templates_dir))
65 try:61 try:
66 source = source62 source = source
67 template = loader.get_template(source)63 template = template_env.get_template(source)
68 except exceptions.TemplateNotFound as e:64 except exceptions.TemplateNotFound as e:
69 hookenv.log('Could not load template %s from %s.' %65 hookenv.log('Could not load template %s from %s.' %
70 (source, templates_dir),66 (source, templates_dir),
71 level=hookenv.ERROR)67 level=hookenv.ERROR)
72 raise e68 raise e
73 content = template.render(context)69 content = template.render(context)
74 host.mkdir(os.path.dirname(target), owner, group, perms=0o755)70 target_dir = os.path.dirname(target)
71 if not os.path.exists(target_dir):
72 # This is a terrible default directory permission, as the file
73 # or its siblings will often contain secrets.
74 host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
75 host.write_file(target, content.encode(encoding), owner, group, perms)75 host.write_file(target, content.encode(encoding), owner, group, perms)
7676
=== modified file 'hooks/charmhelpers/core/unitdata.py'
--- hooks/charmhelpers/core/unitdata.py 2015-06-10 07:35:12 +0000
+++ hooks/charmhelpers/core/unitdata.py 2015-12-01 15:05:49 +0000
@@ -152,6 +152,7 @@
152import collections152import collections
153import contextlib153import contextlib
154import datetime154import datetime
155import itertools
155import json156import json
156import os157import os
157import pprint158import pprint
@@ -164,8 +165,7 @@
164class Storage(object):165class Storage(object):
165 """Simple key value database for local unit state within charms.166 """Simple key value database for local unit state within charms.
166167
167 Modifications are automatically committed at hook exit. That's168 Modifications are not persisted unless :meth:`flush` is called.
168 currently regardless of exit code.
169169
170 To support dicts, lists, integer, floats, and booleans values170 To support dicts, lists, integer, floats, and booleans values
171 are automatically json encoded/decoded.171 are automatically json encoded/decoded.
@@ -173,8 +173,11 @@
173 def __init__(self, path=None):173 def __init__(self, path=None):
174 self.db_path = path174 self.db_path = path
175 if path is None:175 if path is None:
176 self.db_path = os.path.join(176 if 'UNIT_STATE_DB' in os.environ:
177 os.environ.get('CHARM_DIR', ''), '.unit-state.db')177 self.db_path = os.environ['UNIT_STATE_DB']
178 else:
179 self.db_path = os.path.join(
180 os.environ.get('CHARM_DIR', ''), '.unit-state.db')
178 self.conn = sqlite3.connect('%s' % self.db_path)181 self.conn = sqlite3.connect('%s' % self.db_path)
179 self.cursor = self.conn.cursor()182 self.cursor = self.conn.cursor()
180 self.revision = None183 self.revision = None
@@ -189,15 +192,8 @@
189 self.conn.close()192 self.conn.close()
190 self._closed = True193 self._closed = True
191194
192 def _scoped_query(self, stmt, params=None):
193 if params is None:
194 params = []
195 return stmt, params
196
197 def get(self, key, default=None, record=False):195 def get(self, key, default=None, record=False):
198 self.cursor.execute(196 self.cursor.execute('select data from kv where key=?', [key])
199 *self._scoped_query(
200 'select data from kv where key=?', [key]))
201 result = self.cursor.fetchone()197 result = self.cursor.fetchone()
202 if not result:198 if not result:
203 return default199 return default
@@ -206,33 +202,81 @@
206 return json.loads(result[0])202 return json.loads(result[0])
207203
208 def getrange(self, key_prefix, strip=False):204 def getrange(self, key_prefix, strip=False):
209 stmt = "select key, data from kv where key like '%s%%'" % key_prefix205 """
210 self.cursor.execute(*self._scoped_query(stmt))206 Get a range of keys starting with a common prefix as a mapping of
207 keys to values.
208
209 :param str key_prefix: Common prefix among all keys
210 :param bool strip: Optionally strip the common prefix from the key
211 names in the returned dict
212 :return dict: A (possibly empty) dict of key-value mappings
213 """
214 self.cursor.execute("select key, data from kv where key like ?",
215 ['%s%%' % key_prefix])
211 result = self.cursor.fetchall()216 result = self.cursor.fetchall()
212217
213 if not result:218 if not result:
214 return None219 return {}
215 if not strip:220 if not strip:
216 key_prefix = ''221 key_prefix = ''
217 return dict([222 return dict([
218 (k[len(key_prefix):], json.loads(v)) for k, v in result])223 (k[len(key_prefix):], json.loads(v)) for k, v in result])
219224
220 def update(self, mapping, prefix=""):225 def update(self, mapping, prefix=""):
226 """
227 Set the values of multiple keys at once.
228
229 :param dict mapping: Mapping of keys to values
230 :param str prefix: Optional prefix to apply to all keys in `mapping`
231 before setting
232 """
221 for k, v in mapping.items():233 for k, v in mapping.items():
222 self.set("%s%s" % (prefix, k), v)234 self.set("%s%s" % (prefix, k), v)
223235
224 def unset(self, key):236 def unset(self, key):
237 """
238 Remove a key from the database entirely.
239 """
225 self.cursor.execute('delete from kv where key=?', [key])240 self.cursor.execute('delete from kv where key=?', [key])
226 if self.revision and self.cursor.rowcount:241 if self.revision and self.cursor.rowcount:
227 self.cursor.execute(242 self.cursor.execute(
228 'insert into kv_revisions values (?, ?, ?)',243 'insert into kv_revisions values (?, ?, ?)',
229 [key, self.revision, json.dumps('DELETED')])244 [key, self.revision, json.dumps('DELETED')])
230245
246 def unsetrange(self, keys=None, prefix=""):
247 """
248 Remove a range of keys starting with a common prefix, from the database
249 entirely.
250
251 :param list keys: List of keys to remove.
252 :param str prefix: Optional prefix to apply to all keys in ``keys``
253 before removing.
254 """
255 if keys is not None:
256 keys = ['%s%s' % (prefix, key) for key in keys]
257 self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
258 if self.revision and self.cursor.rowcount:
259 self.cursor.execute(
260 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
261 list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
262 else:
263 self.cursor.execute('delete from kv where key like ?',
264 ['%s%%' % prefix])
265 if self.revision and self.cursor.rowcount:
266 self.cursor.execute(
267 'insert into kv_revisions values (?, ?, ?)',
268 ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
269
231 def set(self, key, value):270 def set(self, key, value):
271 """
272 Set a value in the database.
273
274 :param str key: Key to set the value for
275 :param value: Any JSON-serializable value to be set
276 """
232 serialized = json.dumps(value)277 serialized = json.dumps(value)
233278
234 self.cursor.execute(279 self.cursor.execute('select data from kv where key=?', [key])
235 'select data from kv where key=?', [key])
236 exists = self.cursor.fetchone()280 exists = self.cursor.fetchone()
237281
238 # Skip mutations to the same value282 # Skip mutations to the same value
239283
=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py 2015-06-10 07:35:12 +0000
+++ hooks/charmhelpers/fetch/__init__.py 2015-12-01 15:05:49 +0000
@@ -90,6 +90,14 @@
90 'kilo/proposed': 'trusty-proposed/kilo',90 'kilo/proposed': 'trusty-proposed/kilo',
91 'trusty-kilo/proposed': 'trusty-proposed/kilo',91 'trusty-kilo/proposed': 'trusty-proposed/kilo',
92 'trusty-proposed/kilo': 'trusty-proposed/kilo',92 'trusty-proposed/kilo': 'trusty-proposed/kilo',
93 # Liberty
94 'liberty': 'trusty-updates/liberty',
95 'trusty-liberty': 'trusty-updates/liberty',
96 'trusty-liberty/updates': 'trusty-updates/liberty',
97 'trusty-updates/liberty': 'trusty-updates/liberty',
98 'liberty/proposed': 'trusty-proposed/liberty',
99 'trusty-liberty/proposed': 'trusty-proposed/liberty',
100 'trusty-proposed/liberty': 'trusty-proposed/liberty',
93}101}
94102
95# The order of this list is very important. Handlers should be listed in from103# The order of this list is very important. Handlers should be listed in from
@@ -215,19 +223,27 @@
215 _run_apt_command(cmd, fatal)223 _run_apt_command(cmd, fatal)
216224
217225
226def apt_mark(packages, mark, fatal=False):
227 """Flag one or more packages using apt-mark"""
228 log("Marking {} as {}".format(packages, mark))
229 cmd = ['apt-mark', mark]
230 if isinstance(packages, six.string_types):
231 cmd.append(packages)
232 else:
233 cmd.extend(packages)
234
235 if fatal:
236 subprocess.check_call(cmd, universal_newlines=True)
237 else:
238 subprocess.call(cmd, universal_newlines=True)
239
240
218def apt_hold(packages, fatal=False):241def apt_hold(packages, fatal=False):
219 """Hold one or more packages"""242 return apt_mark(packages, 'hold', fatal=fatal)
220 cmd = ['apt-mark', 'hold']243
221 if isinstance(packages, six.string_types):244
222 cmd.append(packages)245def apt_unhold(packages, fatal=False):
223 else:246 return apt_mark(packages, 'unhold', fatal=fatal)
224 cmd.extend(packages)
225 log("Holding {}".format(packages))
226
227 if fatal:
228 subprocess.check_call(cmd)
229 else:
230 subprocess.call(cmd)
231247
232248
233def add_source(source, key=None):249def add_source(source, key=None):
@@ -370,8 +386,9 @@
370 for handler in handlers:386 for handler in handlers:
371 try:387 try:
372 installed_to = handler.install(source, *args, **kwargs)388 installed_to = handler.install(source, *args, **kwargs)
373 except UnhandledSource:389 except UnhandledSource as e:
374 pass390 log('Install source attempt unsuccessful: {}'.format(e),
391 level='WARNING')
375 if not installed_to:392 if not installed_to:
376 raise UnhandledSource("No handler found for source {}".format(source))393 raise UnhandledSource("No handler found for source {}".format(source))
377 return installed_to394 return installed_to
378395
=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
--- hooks/charmhelpers/fetch/archiveurl.py 2015-06-10 07:35:12 +0000
+++ hooks/charmhelpers/fetch/archiveurl.py 2015-12-01 15:05:49 +0000
@@ -77,6 +77,8 @@
77 def can_handle(self, source):77 def can_handle(self, source):
78 url_parts = self.parse_url(source)78 url_parts = self.parse_url(source)
79 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):79 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
80 # XXX: Why is this returning a boolean and a string? It's
81 # doomed to fail since "bool(can_handle('foo://'))" will be True.
80 return "Wrong source type"82 return "Wrong source type"
81 if get_archive_handler(self.base_url(source)):83 if get_archive_handler(self.base_url(source)):
82 return True84 return True
@@ -155,7 +157,11 @@
155 else:157 else:
156 algorithms = hashlib.algorithms_available158 algorithms = hashlib.algorithms_available
157 if key in algorithms:159 if key in algorithms:
158 check_hash(dld_file, value, key)160 if len(value) != 1:
161 raise TypeError(
162 "Expected 1 hash value, not %d" % len(value))
163 expected = value[0]
164 check_hash(dld_file, expected, key)
159 if checksum:165 if checksum:
160 check_hash(dld_file, checksum, hash_type)166 check_hash(dld_file, checksum, hash_type)
161 return extract(dld_file, dest)167 return extract(dld_file, dest)
162168
=== modified file 'hooks/charmhelpers/fetch/giturl.py'
--- hooks/charmhelpers/fetch/giturl.py 2015-06-10 07:35:12 +0000
+++ hooks/charmhelpers/fetch/giturl.py 2015-12-01 15:05:49 +0000
@@ -67,7 +67,7 @@
67 try:67 try:
68 self.clone(source, dest_dir, branch, depth)68 self.clone(source, dest_dir, branch, depth)
69 except GitCommandError as e:69 except GitCommandError as e:
70 raise UnhandledSource(e.message)70 raise UnhandledSource(e)
71 except OSError as e:71 except OSError as e:
72 raise UnhandledSource(e.strerror)72 raise UnhandledSource(e.strerror)
73 return dest_dir73 return dest_dir
7474
=== modified file 'hooks/services.py'
--- hooks/services.py 2015-09-14 16:44:47 +0000
+++ hooks/services.py 2015-12-01 15:05:49 +0000
@@ -2,6 +2,9 @@
2from charmhelpers.core import hookenv2from charmhelpers.core import hookenv
3from charmhelpers.core.services.base import ServiceManager3from charmhelpers.core.services.base import ServiceManager
4from charmhelpers.core.services import helpers4from charmhelpers.core.services import helpers
5from charmhelpers.contrib.openstack.templating import get_loader
6from charmhelpers.core.services.base import service_restart
7from charmhelpers.contrib.openstack.utils import os_release
58
6import vpp_utils9import vpp_utils
7import vpp_data10import vpp_data
@@ -9,6 +12,7 @@
912
10def manage():13def manage():
11 config = hookenv.config()14 config = hookenv.config()
15 release = os_release('neutron-common')
12 manager = ServiceManager([16 manager = ServiceManager([
13 # Actions which have no prerequisites and can be rerun17 # Actions which have no prerequisites and can be rerun
14 {18 {
@@ -18,6 +22,7 @@
18 ],22 ],
19 'provided_data': [23 'provided_data': [
20 vpp_data.NeutronPluginRelation(),24 vpp_data.NeutronPluginRelation(),
25 vpp_data.AMQPRelation(),
21 ],26 ],
22 },27 },
23 # Install hugepages and components reliant on huge pages28 # Install hugepages and components reliant on huge pages
@@ -34,12 +39,14 @@
34 {39 {
35 'service': 'vpp-compute-render',40 'service': 'vpp-compute-render',
36 'required_data': [41 'required_data': [
42 vpp_data.AMQPRelation(),
37 vpp_data.SystemResources(),43 vpp_data.SystemResources(),
38 vpp_data.NeutronPluginRelation(),44 vpp_data.NeutronPluginRelation(),
39 vpp_data.ODLControllerRelation(),45 vpp_data.ODLControllerRelation(),
40 config,46 config,
41 vpp_data.ConfigTranslation(),47 vpp_data.ConfigTranslation(),
42 vpp_data.PCIInfo(),48 vpp_data.PCIInfo(),
49 vpp_data.NeutronPluginAPIRelation(),
43 ],50 ],
44 'data_ready': [51 'data_ready': [
45 vpp_utils.bind_orphaned_net_interfaces,52 vpp_utils.bind_orphaned_net_interfaces,
@@ -53,6 +60,27 @@
53 target='/etc/apparmor.d/libvirt/TEMPLATE.qemu',60 target='/etc/apparmor.d/libvirt/TEMPLATE.qemu',
54 on_change_action=(partial(vpp_utils.reload_apparmor)),61 on_change_action=(partial(vpp_utils.reload_apparmor)),
55 ),62 ),
63 helpers.render_template(
64 source='neutron.conf',
65 template_loader=get_loader('templates/', release),
66 target='/etc/neutron/neutron.conf',
67 on_change_action=(partial(service_restart,
68 'neutron-dhcp-agent')),
69 ),
70 helpers.render_template(
71 source='dhcp_agent.ini',
72 template_loader=get_loader('templates/', release),
73 target='/etc/neutron/dhcp_agent.ini',
74 on_change_action=(partial(service_restart,
75 'neutron-dhcp-agent')),
76 ),
77 helpers.render_template(
78 source='metadata_agent.ini',
79 template_loader=get_loader('templates/', release),
80 target='/etc/neutron/metadata_agent.ini',
81 on_change_action=(partial(service_restart,
82 'neutron-metadata-agent')),
83 ),
56 vpp_utils.odl_node_registration,84 vpp_utils.odl_node_registration,
57 vpp_utils.odl_register_macs,85 vpp_utils.odl_register_macs,
58 vpp_utils.bind_orphaned_net_interfaces,86 vpp_utils.bind_orphaned_net_interfaces,
5987
=== modified file 'hooks/vpp_data.py'
--- hooks/vpp_data.py 2015-08-17 06:56:01 +0000
+++ hooks/vpp_data.py 2015-12-01 15:05:49 +0000
@@ -2,16 +2,47 @@
2import glob2import glob
3import os3import os
4import json4import json
5from charmhelpers.contrib.openstack import context
5from charmhelpers.core.services import helpers6from charmhelpers.core.services import helpers
6from charmhelpers.core.hookenv import(7from charmhelpers.core.hookenv import(
7 config,8 config,
8 log,9 log,
9)10)
11import uuid
1012
11VLAN = 'vlan'13VLAN = 'vlan'
12VXLAN = 'vxlan'14VXLAN = 'vxlan'
13GRE = 'gre'15GRE = 'gre'
14OVERLAY_NET_TYPES = [VXLAN, GRE]16OVERLAY_NET_TYPES = [VXLAN, GRE]
17NEUTRON_CONF_DIR = "/etc/neutron"
18SHARED_SECRET_FILE = "/etc/neutron/secret.txt"
19
20class NeutronPluginAPIRelation(helpers.RelationContext):
21 name = 'neutron-plugin-api'
22 interface = 'neutron-plugin-api'
23
24 def get_first_data(self):
25 if self.get('neutron-plugin-api') and len(self['neutron-plugin-api']):
26 return self['neutron-plugin-api'][0]
27 else:
28 return {}
29
30 def get_data(self):
31 super(NeutronPluginAPIRelation, self).get_data()
32 api_server = self.get_first_data()
33 self['service_host'] = api_server.get('service_host')
34 self['service_protocol'] = api_server.get('service_protocol', 'http')
35 self['service_port'] = api_server.get('service_port')
36 self['admin_tenant_name'] = api_server.get('service_tenant')
37 self['admin_user'] = api_server.get('service_username')
38 self['admin_password'] = api_server.get('service_password')
39 self['region'] = api_server.get('region')
40
41 def is_ready(self):
42 if 'service_password' in self.get_first_data():
43 return True
44 else:
45 return False
1546
1647
17class ODLControllerRelation(helpers.RelationContext):48class ODLControllerRelation(helpers.RelationContext):
@@ -47,6 +78,22 @@
47 name = 'neutron-plugin'78 name = 'neutron-plugin'
48 interface = 'neutron-plugin-api-subordinate'79 interface = 'neutron-plugin-api-subordinate'
4980
81 def __init__(self, *args, **kwargs):
82 super(NeutronPluginRelation, self).__init__(*args, **kwargs)
83 self['shared_secret'] = self.get_metadata_secret()
84
85 def get_metadata_secret(self):
86 secret = None
87 if os.path.exists(os.path.dirname(SHARED_SECRET_FILE)):
88 if os.path.exists(SHARED_SECRET_FILE):
89 with open(SHARED_SECRET_FILE, 'r') as secret_file:
90 secret = secret_file.read().strip()
91 else:
92 secret = str(uuid.uuid4())
93 with open(SHARED_SECRET_FILE, 'w') as secret_file:
94 secret_file.write(secret)
95 return secret
96
50 def provide_data(self):97 def provide_data(self):
51 # Add sections and tuples to insert values into neutron-server's98 # Add sections and tuples to insert values into neutron-server's
52 # neutron.conf e.g.99 # neutron.conf e.g.
@@ -83,6 +130,7 @@
83 relation_info = {130 relation_info = {
84 'neutron-plugin': 'odl',131 'neutron-plugin': 'odl',
85 'subordinate_configuration': json.dumps(principle_config),132 'subordinate_configuration': json.dumps(principle_config),
133 'metadata-shared-secret': self['shared_secret'],
86 }134 }
87 return relation_info135 return relation_info
88136
@@ -179,3 +227,30 @@
179 'net': tmp_dict.get('net'),227 'net': tmp_dict.get('net'),
180 }]228 }]
181 return mac_net_config229 return mac_net_config
230
231
232class AMQPRelation(helpers.RelationContext):
233 name = 'amqp'
234 interface = 'rabbitmq'
235
236 def __init__(self, *args, **kwargs):
237 self.ctxt = context.AMQPContext(ssl_dir=NEUTRON_CONF_DIR)()
238 super(AMQPRelation, self).__init__(*args, **kwargs)
239
240 def get_data(self):
241 super(AMQPRelation, self).get_data()
242 for key, value in self.ctxt.iteritems():
243 self[key] = value
244
245 def provide_data(self):
246 relation_info = {
247 'username': config('rabbit-user'),
248 'vhost': config('rabbit-vhost'),
249 }
250 return relation_info
251
252 def is_ready(self):
253 if self.ctxt.get('rabbitmq_password'):
254 return True
255 else:
256 return False
182257
=== modified file 'hooks/vpp_utils.py'
--- hooks/vpp_utils.py 2015-09-14 16:44:47 +0000
+++ hooks/vpp_utils.py 2015-12-01 15:05:49 +0000
@@ -24,7 +24,8 @@
24ODL_MOUNT_PATH = ('/restconf/config/opendaylight-inventory:nodes/node/'24ODL_MOUNT_PATH = ('/restconf/config/opendaylight-inventory:nodes/node/'
25 'controller-config/yang-ext:mount/config:modules')25 'controller-config/yang-ext:mount/config:modules')
2626
27PACKAGES = ['python-psutil', 'gcc', 'make', 'python-crypto', 'qemu', 'seabios']27PACKAGES = ['python-psutil', 'gcc', 'make', 'python-crypto', 'qemu', 'seabios',
28 'neutron-dhcp-agent']
2829
2930
30def install_packages(servicename):31def install_packages(servicename):
3132
=== modified file 'metadata.yaml'
--- metadata.yaml 2015-07-21 15:18:07 +0000
+++ metadata.yaml 2015-12-01 15:05:49 +0000
@@ -17,3 +17,7 @@
17 container:17 container:
18 interface: juju-info18 interface: juju-info
19 scope: container19 scope: container
20 amqp:
21 interface: rabbitmq
22 neutron-plugin-api:
23 interface: neutron-plugin-api
2024
=== added directory 'templates/icehouse'
=== added file 'templates/icehouse/dhcp_agent.ini'
--- templates/icehouse/dhcp_agent.ini 1970-01-01 00:00:00 +0000
+++ templates/icehouse/dhcp_agent.ini 2015-12-01 15:05:49 +0000
@@ -0,0 +1,13 @@
1###############################################################################
2# [ WARNING ]
3# Configuration file maintained by Juju. Local changes may be overwritten.
4###############################################################################
5[DEFAULT]
6debug = {{ debug }}
7resync_interval = 5
8interface_driver = neutron.agent.linux.interface.NSNullDriver
9dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
10use_namespaces = True
11enable_isolated_metadata = True
12enable_metadata_network = True
13
014
=== added file 'templates/icehouse/metadata_agent.ini'
--- templates/icehouse/metadata_agent.ini 1970-01-01 00:00:00 +0000
+++ templates/icehouse/metadata_agent.ini 2015-12-01 15:05:49 +0000
@@ -0,0 +1,15 @@
1###############################################################################
2# [ WARNING ]
3# Configuration file maintained by Juju. Local changes may be overwritten.
4###############################################################################
5# Metadata service seems to cache neutron api url from keystone so trigger
6
7[DEFAULT]
8auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0
9auth_region = {{ region }}
10admin_tenant_name = {{ admin_tenant_name }}
11admin_user = {{ admin_user }}
12admin_password = {{ admin_password }}
13nova_metadata_port = 8775
14metadata_proxy_shared_secret = {{ shared_secret }}
15cache_url = memory://?default_ttl=5
016
=== added file 'templates/icehouse/neutron.conf'
--- templates/icehouse/neutron.conf 1970-01-01 00:00:00 +0000
+++ templates/icehouse/neutron.conf 2015-12-01 15:05:49 +0000
@@ -0,0 +1,31 @@
1# icehouse
2###############################################################################
3# [ WARNING ]
4# Configuration file maintained by Juju. Local changes may be overwritten.
5# Config managed by neutron-openvswitch charm
6###############################################################################
7[DEFAULT]
8verbose = {{ verbose }}
9debug = {{ debug }}
10state_path = /var/lib/neutron
11lock_path = $state_path/lock
12bind_host = 0.0.0.0
13bind_port = 9696
14
15api_paste_config = /etc/neutron/api-paste.ini
16auth_strategy = keystone
17default_notification_level = INFO
18notification_topics = notifications
19
20{% include "parts/rabbitmq" %}
21
22[QUOTAS]
23
24[DEFAULT_SERVICETYPE]
25
26[AGENT]
27root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
28
29[keystone_authtoken]
30signing_dir = /var/lib/neutron/keystone-signing
31
032
=== added directory 'templates/parts'
=== added file 'templates/parts/rabbitmq'
--- templates/parts/rabbitmq 1970-01-01 00:00:00 +0000
+++ templates/parts/rabbitmq 2015-12-01 15:05:49 +0000
@@ -0,0 +1,21 @@
1{% if rabbitmq_host or rabbitmq_hosts -%}
2rabbit_userid = {{ rabbitmq_user }}
3rabbit_virtual_host = {{ rabbitmq_virtual_host }}
4rabbit_password = {{ rabbitmq_password }}
5{% if rabbitmq_hosts -%}
6rabbit_hosts = {{ rabbitmq_hosts }}
7{% if rabbitmq_ha_queues -%}
8rabbit_ha_queues = True
9rabbit_durable_queues = False
10{% endif -%}
11{% else -%}
12rabbit_host = {{ rabbitmq_host }}
13{% endif -%}
14{% if rabbit_ssl_port -%}
15rabbit_use_ssl = True
16rabbit_port = {{ rabbit_ssl_port }}
17{% if rabbit_ssl_ca -%}
18kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
19{% endif -%}
20{% endif -%}
21{% endif -%}
0\ No newline at end of file22\ No newline at end of file
123
=== modified file 'tests/charmhelpers/contrib/amulet/utils.py'
--- tests/charmhelpers/contrib/amulet/utils.py 2015-06-16 07:53:15 +0000
+++ tests/charmhelpers/contrib/amulet/utils.py 2015-12-01 15:05:49 +0000
@@ -14,14 +14,21 @@
14# You should have received a copy of the GNU Lesser General Public License14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1616
17import ConfigParser
18import io17import io
19import logging18import logging
19import os
20import re20import re
21import sys21import sys
22import time22import time
2323
24import amulet
25import distro_info
24import six26import six
27from six.moves import configparser
28if six.PY3:
29 from urllib import parse as urlparse
30else:
31 import urlparse
2532
2633
27class AmuletUtils(object):34class AmuletUtils(object):
@@ -33,6 +40,7 @@
3340
34 def __init__(self, log_level=logging.ERROR):41 def __init__(self, log_level=logging.ERROR):
35 self.log = self.get_logger(level=log_level)42 self.log = self.get_logger(level=log_level)
43 self.ubuntu_releases = self.get_ubuntu_releases()
3644
37 def get_logger(self, name="amulet-logger", level=logging.DEBUG):45 def get_logger(self, name="amulet-logger", level=logging.DEBUG):
38 """Get a logger object that will log to stdout."""46 """Get a logger object that will log to stdout."""
@@ -70,12 +78,44 @@
70 else:78 else:
71 return False79 return False
7280
81 def get_ubuntu_release_from_sentry(self, sentry_unit):
82 """Get Ubuntu release codename from sentry unit.
83
84 :param sentry_unit: amulet sentry/service unit pointer
85 :returns: list of strings - release codename, failure message
86 """
87 msg = None
88 cmd = 'lsb_release -cs'
89 release, code = sentry_unit.run(cmd)
90 if code == 0:
91 self.log.debug('{} lsb_release: {}'.format(
92 sentry_unit.info['unit_name'], release))
93 else:
94 msg = ('{} `{}` returned {} '
95 '{}'.format(sentry_unit.info['unit_name'],
96 cmd, release, code))
97 if release not in self.ubuntu_releases:
98 msg = ("Release ({}) not found in Ubuntu releases "
99 "({})".format(release, self.ubuntu_releases))
100 return release, msg
101
73 def validate_services(self, commands):102 def validate_services(self, commands):
74 """Validate services.103 """Validate that lists of commands succeed on service units. Can be
75104 used to verify system services are running on the corresponding
76 Verify the specified services are running on the corresponding
77 service units.105 service units.
78 """106
107 :param commands: dict with sentry keys and arbitrary command list vals
108 :returns: None if successful, Failure string message otherwise
109 """
110 self.log.debug('Checking status of system services...')
111
112 # /!\ DEPRECATION WARNING (beisner):
113 # New and existing tests should be rewritten to use
114 # validate_services_by_name() as it is aware of init systems.
115 self.log.warn('/!\\ DEPRECATION WARNING: use '
116 'validate_services_by_name instead of validate_services '
117 'due to init system differences.')
118
79 for k, v in six.iteritems(commands):119 for k, v in six.iteritems(commands):
80 for cmd in v:120 for cmd in v:
81 output, code = k.run(cmd)121 output, code = k.run(cmd)
@@ -86,6 +126,45 @@
86 return "command `{}` returned {}".format(cmd, str(code))126 return "command `{}` returned {}".format(cmd, str(code))
87 return None127 return None
88128
129 def validate_services_by_name(self, sentry_services):
130 """Validate system service status by service name, automatically
131 detecting init system based on Ubuntu release codename.
132
133 :param sentry_services: dict with sentry keys and svc list values
134 :returns: None if successful, Failure string message otherwise
135 """
136 self.log.debug('Checking status of system services...')
137
138 # Point at which systemd became a thing
139 systemd_switch = self.ubuntu_releases.index('vivid')
140
141 for sentry_unit, services_list in six.iteritems(sentry_services):
142 # Get lsb_release codename from unit
143 release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
144 if ret:
145 return ret
146
147 for service_name in services_list:
148 if (self.ubuntu_releases.index(release) >= systemd_switch or
149 service_name in ['rabbitmq-server', 'apache2']):
150 # init is systemd (or regular sysv)
151 cmd = 'sudo service {} status'.format(service_name)
152 output, code = sentry_unit.run(cmd)
153 service_running = code == 0
154 elif self.ubuntu_releases.index(release) < systemd_switch:
155 # init is upstart
156 cmd = 'sudo status {}'.format(service_name)
157 output, code = sentry_unit.run(cmd)
158 service_running = code == 0 and "start/running" in output
159
160 self.log.debug('{} `{}` returned '
161 '{}'.format(sentry_unit.info['unit_name'],
162 cmd, code))
163 if not service_running:
164 return u"command `{}` returned {} {}".format(
165 cmd, output, str(code))
166 return None
167
89 def _get_config(self, unit, filename):168 def _get_config(self, unit, filename):
90 """Get a ConfigParser object for parsing a unit's config file."""169 """Get a ConfigParser object for parsing a unit's config file."""
91 file_contents = unit.file_contents(filename)170 file_contents = unit.file_contents(filename)
@@ -93,7 +172,7 @@
93 # NOTE(beisner): by default, ConfigParser does not handle options172 # NOTE(beisner): by default, ConfigParser does not handle options
94 # with no value, such as the flags used in the mysql my.cnf file.173 # with no value, such as the flags used in the mysql my.cnf file.
95 # https://bugs.python.org/issue7005174 # https://bugs.python.org/issue7005
96 config = ConfigParser.ConfigParser(allow_no_value=True)175 config = configparser.ConfigParser(allow_no_value=True)
97 config.readfp(io.StringIO(file_contents))176 config.readfp(io.StringIO(file_contents))
98 return config177 return config
99178
@@ -103,7 +182,15 @@
103182
104 Verify that the specified section of the config file contains183 Verify that the specified section of the config file contains
105 the expected option key:value pairs.184 the expected option key:value pairs.
185
186 Compare expected dictionary data vs actual dictionary data.
187 The values in the 'expected' dictionary can be strings, bools, ints,
188 longs, or can be a function that evaluates a variable and returns a
189 bool.
106 """190 """
191 self.log.debug('Validating config file data ({} in {} on {})'
192 '...'.format(section, config_file,
193 sentry_unit.info['unit_name']))
107 config = self._get_config(sentry_unit, config_file)194 config = self._get_config(sentry_unit, config_file)
108195
109 if section != 'DEFAULT' and not config.has_section(section):196 if section != 'DEFAULT' and not config.has_section(section):
@@ -112,9 +199,20 @@
112 for k in expected.keys():199 for k in expected.keys():
113 if not config.has_option(section, k):200 if not config.has_option(section, k):
114 return "section [{}] is missing option {}".format(section, k)201 return "section [{}] is missing option {}".format(section, k)
115 if config.get(section, k) != expected[k]:202
203 actual = config.get(section, k)
204 v = expected[k]
205 if (isinstance(v, six.string_types) or
206 isinstance(v, bool) or
207 isinstance(v, six.integer_types)):
208 # handle explicit values
209 if actual != v:
210 return "section [{}] {}:{} != expected {}:{}".format(
211 section, k, actual, k, expected[k])
212 # handle function pointers, such as not_null or valid_ip
213 elif not v(actual):
116 return "section [{}] {}:{} != expected {}:{}".format(214 return "section [{}] {}:{} != expected {}:{}".format(
117 section, k, config.get(section, k), k, expected[k])215 section, k, actual, k, expected[k])
118 return None216 return None
119217
120 def _validate_dict_data(self, expected, actual):218 def _validate_dict_data(self, expected, actual):
@@ -122,7 +220,7 @@
122220
123 Compare expected dictionary data vs actual dictionary data.221 Compare expected dictionary data vs actual dictionary data.
124 The values in the 'expected' dictionary can be strings, bools, ints,222 The values in the 'expected' dictionary can be strings, bools, ints,
125 longs, or can be a function that evaluate a variable and returns a223 longs, or can be a function that evaluates a variable and returns a
126 bool.224 bool.
127 """225 """
128 self.log.debug('actual: {}'.format(repr(actual)))226 self.log.debug('actual: {}'.format(repr(actual)))
@@ -133,8 +231,10 @@
133 if (isinstance(v, six.string_types) or231 if (isinstance(v, six.string_types) or
134 isinstance(v, bool) or232 isinstance(v, bool) or
135 isinstance(v, six.integer_types)):233 isinstance(v, six.integer_types)):
234 # handle explicit values
136 if v != actual[k]:235 if v != actual[k]:
137 return "{}:{}".format(k, actual[k])236 return "{}:{}".format(k, actual[k])
237 # handle function pointers, such as not_null or valid_ip
138 elif not v(actual[k]):238 elif not v(actual[k]):
139 return "{}:{}".format(k, actual[k])239 return "{}:{}".format(k, actual[k])
140 else:240 else:
@@ -321,3 +421,133 @@
321421
322 def endpoint_error(self, name, data):422 def endpoint_error(self, name, data):
323 return 'unexpected endpoint data in {} - {}'.format(name, data)423 return 'unexpected endpoint data in {} - {}'.format(name, data)
424
425 def get_ubuntu_releases(self):
426 """Return a list of all Ubuntu releases in order of release."""
427 _d = distro_info.UbuntuDistroInfo()
428 _release_list = _d.all
429 self.log.debug('Ubuntu release list: {}'.format(_release_list))
430 return _release_list
431
432 def file_to_url(self, file_rel_path):
433 """Convert a relative file path to a file URL."""
434 _abs_path = os.path.abspath(file_rel_path)
435 return urlparse.urlparse(_abs_path, scheme='file').geturl()
436
437 def check_commands_on_units(self, commands, sentry_units):
438 """Check that all commands in a list exit zero on all
439 sentry units in a list.
440
441 :param commands: list of bash commands
442 :param sentry_units: list of sentry unit pointers
443 :returns: None if successful; Failure message otherwise
444 """
445 self.log.debug('Checking exit codes for {} commands on {} '
446 'sentry units...'.format(len(commands),
447 len(sentry_units)))
448 for sentry_unit in sentry_units:
449 for cmd in commands:
450 output, code = sentry_unit.run(cmd)
451 if code == 0:
452 self.log.debug('{} `{}` returned {} '
453 '(OK)'.format(sentry_unit.info['unit_name'],
454 cmd, code))
455 else:
456 return ('{} `{}` returned {} '
457 '{}'.format(sentry_unit.info['unit_name'],
458 cmd, code, output))
459 return None
460
461 def get_process_id_list(self, sentry_unit, process_name):
462 """Get a list of process ID(s) from a single sentry juju unit
463 for a single process name.
464
465 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
466 :param process_name: Process name
467 :returns: List of process IDs
468 """
469 cmd = 'pidof {}'.format(process_name)
470 output, code = sentry_unit.run(cmd)
471 if code != 0:
472 msg = ('{} `{}` returned {} '
473 '{}'.format(sentry_unit.info['unit_name'],
474 cmd, code, output))
475 amulet.raise_status(amulet.FAIL, msg=msg)
476 return str(output).split()
477
478 def get_unit_process_ids(self, unit_processes):
479 """Construct a dict containing unit sentries, process names, and
480 process IDs."""
481 pid_dict = {}
482 for sentry_unit, process_list in unit_processes.iteritems():
483 pid_dict[sentry_unit] = {}
484 for process in process_list:
485 pids = self.get_process_id_list(sentry_unit, process)
486 pid_dict[sentry_unit].update({process: pids})
487 return pid_dict
488
489 def validate_unit_process_ids(self, expected, actual):
490 """Validate process id quantities for services on units."""
491 self.log.debug('Checking units for running processes...')
492 self.log.debug('Expected PIDs: {}'.format(expected))
493 self.log.debug('Actual PIDs: {}'.format(actual))
494
495 if len(actual) != len(expected):
496 return ('Unit count mismatch. expected, actual: {}, '
497 '{} '.format(len(expected), len(actual)))
498
499 for (e_sentry, e_proc_names) in expected.iteritems():
500 e_sentry_name = e_sentry.info['unit_name']
501 if e_sentry in actual.keys():
502 a_proc_names = actual[e_sentry]
503 else:
504 return ('Expected sentry ({}) not found in actual dict data.'
505 '{}'.format(e_sentry_name, e_sentry))
506
507 if len(e_proc_names.keys()) != len(a_proc_names.keys()):
508 return ('Process name count mismatch. expected, actual: {}, '
509 '{}'.format(len(expected), len(actual)))
510
511 for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
512 zip(e_proc_names.items(), a_proc_names.items()):
513 if e_proc_name != a_proc_name:
514 return ('Process name mismatch. expected, actual: {}, '
515 '{}'.format(e_proc_name, a_proc_name))
516
517 a_pids_length = len(a_pids)
518 fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
519 '{}, {} ({})'.format(e_sentry_name, e_proc_name,
520 e_pids_length, a_pids_length,
521 a_pids))
522
523 # If expected is not bool, ensure PID quantities match
524 if not isinstance(e_pids_length, bool) and \
525 a_pids_length != e_pids_length:
526 return fail_msg
527 # If expected is bool True, ensure 1 or more PIDs exist
528 elif isinstance(e_pids_length, bool) and \
529 e_pids_length is True and a_pids_length < 1:
530 return fail_msg
531 # If expected is bool False, ensure 0 PIDs exist
532 elif isinstance(e_pids_length, bool) and \
533 e_pids_length is False and a_pids_length != 0:
534 return fail_msg
535 else:
536 self.log.debug('PID check OK: {} {} {}: '
537 '{}'.format(e_sentry_name, e_proc_name,
538 e_pids_length, a_pids))
539 return None
540
541 def validate_list_of_identical_dicts(self, list_of_dicts):
542 """Check that all dicts within a list are identical."""
543 hashes = []
544 for _dict in list_of_dicts:
545 hashes.append(hash(frozenset(_dict.items())))
546
547 self.log.debug('Hashes: {}'.format(hashes))
548 if len(set(hashes)) == 1:
549 self.log.debug('Dicts within list are identical')
550 else:
551 return 'Dicts within list are not identical'
552
553 return None
324554
=== modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-24 08:25:28 +0000
+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-12-01 15:05:49 +0000
@@ -44,7 +44,7 @@
44 Determine if the local branch being tested is derived from its44 Determine if the local branch being tested is derived from its
45 stable or next (dev) branch, and based on this, use the corresonding45 stable or next (dev) branch, and based on this, use the corresonding
46 stable or next branches for the other_services."""46 stable or next branches for the other_services."""
47 base_charms = ['mysql', 'mongodb']47 base_charms = ['mysql', 'mongodb', 'nrpe']
4848
49 if self.series in ['precise', 'trusty']:49 if self.series in ['precise', 'trusty']:
50 base_series = self.series50 base_series = self.series
@@ -83,9 +83,10 @@
83 services.append(this_service)83 services.append(this_service)
84 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',84 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: