Merge lp:~bbaqar/charms/trusty/neutron-api-plumgrid/plumgrid-team-liberty into lp:~plumgrid-team/charms/trusty/neutron-api-plumgrid/trunk

Proposed by Bilal Baqar
Status: Merged
Merged at revision: 19
Proposed branch: lp:~bbaqar/charms/trusty/neutron-api-plumgrid/plumgrid-team-liberty
Merge into: lp:~plumgrid-team/charms/trusty/neutron-api-plumgrid/trunk
Diff against target: 6215 lines (+3726/-609)
36 files modified
config.yaml (+19/-0)
hooks/charmhelpers/contrib/amulet/deployment.py (+4/-2)
hooks/charmhelpers/contrib/amulet/utils.py (+368/-83)
hooks/charmhelpers/contrib/charmsupport/nrpe.py (+52/-14)
hooks/charmhelpers/contrib/mellanox/infiniband.py (+151/-0)
hooks/charmhelpers/contrib/network/ip.py (+46/-23)
hooks/charmhelpers/contrib/network/ufw.py (+5/-6)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+133/-14)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+381/-0)
hooks/charmhelpers/contrib/openstack/context.py (+215/-78)
hooks/charmhelpers/contrib/openstack/neutron.py (+64/-23)
hooks/charmhelpers/contrib/openstack/templating.py (+30/-2)
hooks/charmhelpers/contrib/openstack/utils.py (+511/-70)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+5/-4)
hooks/charmhelpers/contrib/python/packages.py (+35/-11)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+812/-61)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+10/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-2)
hooks/charmhelpers/contrib/templating/jinja.py (+4/-3)
hooks/charmhelpers/core/hookenv.py (+189/-13)
hooks/charmhelpers/core/host.py (+240/-61)
hooks/charmhelpers/core/hugepage.py (+71/-0)
hooks/charmhelpers/core/kernel.py (+68/-0)
hooks/charmhelpers/core/services/helpers.py (+30/-5)
hooks/charmhelpers/core/strutils.py (+30/-0)
hooks/charmhelpers/core/templating.py (+21/-8)
hooks/charmhelpers/core/unitdata.py (+61/-17)
hooks/charmhelpers/fetch/__init__.py (+18/-2)
hooks/charmhelpers/fetch/archiveurl.py (+1/-1)
hooks/charmhelpers/fetch/bzrurl.py (+22/-32)
hooks/charmhelpers/fetch/giturl.py (+20/-23)
hooks/neutron_plumgrid_context.py (+26/-42)
hooks/neutron_plumgrid_hooks.py (+21/-5)
hooks/neutron_plumgrid_utils.py (+34/-3)
metadata.yaml (+6/-1)
templates/kilo/plumgrid.ini (+20/-0)
To merge this branch: bzr merge lp:~bbaqar/charms/trusty/neutron-api-plumgrid/plumgrid-team-liberty
Reviewer Review Type Date Requested Status
Bilal Baqar Pending
Review via email: mp+287392@code.launchpad.net

Commit message

WIP

Description of the change

Liberty changes
1. synced charmhelpers
2. removed neutron-api-plugin relation
3. Added subordinate relation with neutron-api
4. Added relation with keystone
5. Added plumgrid.ini

To post a comment you must log in.
12. By Bilal Baqar

Second commit:

13. By Bilal Baqar

3rd commit

14. By Bilal Baqar

Commit

15. By Bilal Baqar

Fixes

16. By Bilal Baqar

Updating quota driver

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'config.yaml'
2--- config.yaml 2016-01-15 20:56:20 +0000
3+++ config.yaml 2016-03-09 12:07:21 +0000
4@@ -21,3 +21,22 @@
5 type: string
6 description: |
7 Provide the version of networking-plumgrid package that needs to be installed
8+ plumgrid-username:
9+ default: plumgrid
10+ type: string
11+ description: Username to access PLUMgrid Director
12+ plumgrid-password:
13+ default: plumgrid
14+ type: string
15+ description: Password to access PLUMgrid Director
16+ plumgrid-virtual-ip:
17+ default:
18+ type: string
19+ description: IP address of PLUMgrid Director
20+ # end of PLUMgrid configuration
21+ manage-neutron-plugin-legacy-mode:
22+ type: boolean
23+ default: False
24+ description: |
25+ If True neutron-api charm will install neutron packages for the plugin
26+ configured. Also needs to be set in neutron-api charm
27
28=== modified file 'hooks/charmhelpers/contrib/amulet/deployment.py'
29--- hooks/charmhelpers/contrib/amulet/deployment.py 2015-07-29 18:35:16 +0000
30+++ hooks/charmhelpers/contrib/amulet/deployment.py 2016-03-09 12:07:21 +0000
31@@ -51,7 +51,8 @@
32 if 'units' not in this_service:
33 this_service['units'] = 1
34
35- self.d.add(this_service['name'], units=this_service['units'])
36+ self.d.add(this_service['name'], units=this_service['units'],
37+ constraints=this_service.get('constraints'))
38
39 for svc in other_services:
40 if 'location' in svc:
41@@ -64,7 +65,8 @@
42 if 'units' not in svc:
43 svc['units'] = 1
44
45- self.d.add(svc['name'], charm=branch_location, units=svc['units'])
46+ self.d.add(svc['name'], charm=branch_location, units=svc['units'],
47+ constraints=svc.get('constraints'))
48
49 def _add_relations(self, relations):
50 """Add all of the relations for the services."""
51
52=== modified file 'hooks/charmhelpers/contrib/amulet/utils.py'
53--- hooks/charmhelpers/contrib/amulet/utils.py 2015-07-29 18:35:16 +0000
54+++ hooks/charmhelpers/contrib/amulet/utils.py 2016-03-09 12:07:21 +0000
55@@ -14,17 +14,25 @@
56 # You should have received a copy of the GNU Lesser General Public License
57 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
58
59-import amulet
60-import ConfigParser
61-import distro_info
62 import io
63+import json
64 import logging
65 import os
66 import re
67-import six
68+import socket
69+import subprocess
70 import sys
71 import time
72-import urlparse
73+import uuid
74+
75+import amulet
76+import distro_info
77+import six
78+from six.moves import configparser
79+if six.PY3:
80+ from urllib import parse as urlparse
81+else:
82+ import urlparse
83
84
85 class AmuletUtils(object):
86@@ -108,7 +116,7 @@
87 # /!\ DEPRECATION WARNING (beisner):
88 # New and existing tests should be rewritten to use
89 # validate_services_by_name() as it is aware of init systems.
90- self.log.warn('/!\\ DEPRECATION WARNING: use '
91+ self.log.warn('DEPRECATION WARNING: use '
92 'validate_services_by_name instead of validate_services '
93 'due to init system differences.')
94
95@@ -142,19 +150,23 @@
96
97 for service_name in services_list:
98 if (self.ubuntu_releases.index(release) >= systemd_switch or
99- service_name == "rabbitmq-server"):
100- # init is systemd
101+ service_name in ['rabbitmq-server', 'apache2']):
102+ # init is systemd (or regular sysv)
103 cmd = 'sudo service {} status'.format(service_name)
104+ output, code = sentry_unit.run(cmd)
105+ service_running = code == 0
106 elif self.ubuntu_releases.index(release) < systemd_switch:
107 # init is upstart
108 cmd = 'sudo status {}'.format(service_name)
109+ output, code = sentry_unit.run(cmd)
110+ service_running = code == 0 and "start/running" in output
111
112- output, code = sentry_unit.run(cmd)
113 self.log.debug('{} `{}` returned '
114 '{}'.format(sentry_unit.info['unit_name'],
115 cmd, code))
116- if code != 0:
117- return "command `{}` returned {}".format(cmd, str(code))
118+ if not service_running:
119+ return u"command `{}` returned {} {}".format(
120+ cmd, output, str(code))
121 return None
122
123 def _get_config(self, unit, filename):
124@@ -164,7 +176,7 @@
125 # NOTE(beisner): by default, ConfigParser does not handle options
126 # with no value, such as the flags used in the mysql my.cnf file.
127 # https://bugs.python.org/issue7005
128- config = ConfigParser.ConfigParser(allow_no_value=True)
129+ config = configparser.ConfigParser(allow_no_value=True)
130 config.readfp(io.StringIO(file_contents))
131 return config
132
133@@ -259,33 +271,52 @@
134 """Get last modification time of directory."""
135 return sentry_unit.directory_stat(directory)['mtime']
136
137- def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
138- """Get process' start time.
139-
140- Determine start time of the process based on the last modification
141- time of the /proc/pid directory. If pgrep_full is True, the process
142- name is matched against the full command line.
143- """
144- if pgrep_full:
145- cmd = 'pgrep -o -f {}'.format(service)
146- else:
147- cmd = 'pgrep -o {}'.format(service)
148- cmd = cmd + ' | grep -v pgrep || exit 0'
149- cmd_out = sentry_unit.run(cmd)
150- self.log.debug('CMDout: ' + str(cmd_out))
151- if cmd_out[0]:
152- self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
153- proc_dir = '/proc/{}'.format(cmd_out[0].strip())
154- return self._get_dir_mtime(sentry_unit, proc_dir)
155+ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
156+ """Get start time of a process based on the last modification time
157+ of the /proc/pid directory.
158+
159+ :sentry_unit: The sentry unit to check for the service on
160+ :service: service name to look for in process table
161+ :pgrep_full: [Deprecated] Use full command line search mode with pgrep
162+ :returns: epoch time of service process start
163+ :param commands: list of bash commands
164+ :param sentry_units: list of sentry unit pointers
165+ :returns: None if successful; Failure message otherwise
166+ """
167+ if pgrep_full is not None:
168+ # /!\ DEPRECATION WARNING (beisner):
169+ # No longer implemented, as pidof is now used instead of pgrep.
170+ # https://bugs.launchpad.net/charm-helpers/+bug/1474030
171+ self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
172+ 'longer implemented re: lp 1474030.')
173+
174+ pid_list = self.get_process_id_list(sentry_unit, service)
175+ pid = pid_list[0]
176+ proc_dir = '/proc/{}'.format(pid)
177+ self.log.debug('Pid for {} on {}: {}'.format(
178+ service, sentry_unit.info['unit_name'], pid))
179+
180+ return self._get_dir_mtime(sentry_unit, proc_dir)
181
182 def service_restarted(self, sentry_unit, service, filename,
183- pgrep_full=False, sleep_time=20):
184+ pgrep_full=None, sleep_time=20):
185 """Check if service was restarted.
186
187 Compare a service's start time vs a file's last modification time
188 (such as a config file for that service) to determine if the service
189 has been restarted.
190 """
191+ # /!\ DEPRECATION WARNING (beisner):
192+ # This method is prone to races in that no before-time is known.
193+ # Use validate_service_config_changed instead.
194+
195+ # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
196+ # used instead of pgrep. pgrep_full is still passed through to ensure
197+ # deprecation WARNS. lp1474030
198+ self.log.warn('DEPRECATION WARNING: use '
199+ 'validate_service_config_changed instead of '
200+ 'service_restarted due to known races.')
201+
202 time.sleep(sleep_time)
203 if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
204 self._get_file_mtime(sentry_unit, filename)):
205@@ -294,78 +325,122 @@
206 return False
207
208 def service_restarted_since(self, sentry_unit, mtime, service,
209- pgrep_full=False, sleep_time=20,
210- retry_count=2):
211+ pgrep_full=None, sleep_time=20,
212+ retry_count=30, retry_sleep_time=10):
213 """Check if service was been started after a given time.
214
215 Args:
216 sentry_unit (sentry): The sentry unit to check for the service on
217 mtime (float): The epoch time to check against
218 service (string): service name to look for in process table
219- pgrep_full (boolean): Use full command line search mode with pgrep
220- sleep_time (int): Seconds to sleep before looking for process
221- retry_count (int): If service is not found, how many times to retry
222+ pgrep_full: [Deprecated] Use full command line search mode with pgrep
223+ sleep_time (int): Initial sleep time (s) before looking for file
224+ retry_sleep_time (int): Time (s) to sleep between retries
225+ retry_count (int): If file is not found, how many times to retry
226
227 Returns:
228 bool: True if service found and its start time it newer than mtime,
229 False if service is older than mtime or if service was
230 not found.
231 """
232- self.log.debug('Checking %s restarted since %s' % (service, mtime))
233+ # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
234+ # used instead of pgrep. pgrep_full is still passed through to ensure
235+ # deprecation WARNS. lp1474030
236+
237+ unit_name = sentry_unit.info['unit_name']
238+ self.log.debug('Checking that %s service restarted since %s on '
239+ '%s' % (service, mtime, unit_name))
240 time.sleep(sleep_time)
241- proc_start_time = self._get_proc_start_time(sentry_unit, service,
242- pgrep_full)
243- while retry_count > 0 and not proc_start_time:
244- self.log.debug('No pid file found for service %s, will retry %i '
245- 'more times' % (service, retry_count))
246- time.sleep(30)
247- proc_start_time = self._get_proc_start_time(sentry_unit, service,
248- pgrep_full)
249- retry_count = retry_count - 1
250+ proc_start_time = None
251+ tries = 0
252+ while tries <= retry_count and not proc_start_time:
253+ try:
254+ proc_start_time = self._get_proc_start_time(sentry_unit,
255+ service,
256+ pgrep_full)
257+ self.log.debug('Attempt {} to get {} proc start time on {} '
258+ 'OK'.format(tries, service, unit_name))
259+ except IOError as e:
260+ # NOTE(beisner) - race avoidance, proc may not exist yet.
261+ # https://bugs.launchpad.net/charm-helpers/+bug/1474030
262+ self.log.debug('Attempt {} to get {} proc start time on {} '
263+ 'failed\n{}'.format(tries, service,
264+ unit_name, e))
265+ time.sleep(retry_sleep_time)
266+ tries += 1
267
268 if not proc_start_time:
269 self.log.warn('No proc start time found, assuming service did '
270 'not start')
271 return False
272 if proc_start_time >= mtime:
273- self.log.debug('proc start time is newer than provided mtime'
274- '(%s >= %s)' % (proc_start_time, mtime))
275+ self.log.debug('Proc start time is newer than provided mtime'
276+ '(%s >= %s) on %s (OK)' % (proc_start_time,
277+ mtime, unit_name))
278 return True
279 else:
280- self.log.warn('proc start time (%s) is older than provided mtime '
281- '(%s), service did not restart' % (proc_start_time,
282- mtime))
283+ self.log.warn('Proc start time (%s) is older than provided mtime '
284+ '(%s) on %s, service did not '
285+ 'restart' % (proc_start_time, mtime, unit_name))
286 return False
287
288 def config_updated_since(self, sentry_unit, filename, mtime,
289- sleep_time=20):
290+ sleep_time=20, retry_count=30,
291+ retry_sleep_time=10):
292 """Check if file was modified after a given time.
293
294 Args:
295 sentry_unit (sentry): The sentry unit to check the file mtime on
296 filename (string): The file to check mtime of
297 mtime (float): The epoch time to check against
298- sleep_time (int): Seconds to sleep before looking for process
299+ sleep_time (int): Initial sleep time (s) before looking for file
300+ retry_sleep_time (int): Time (s) to sleep between retries
301+ retry_count (int): If file is not found, how many times to retry
302
303 Returns:
304 bool: True if file was modified more recently than mtime, False if
305- file was modified before mtime,
306+ file was modified before mtime, or if file not found.
307 """
308- self.log.debug('Checking %s updated since %s' % (filename, mtime))
309+ unit_name = sentry_unit.info['unit_name']
310+ self.log.debug('Checking that %s updated since %s on '
311+ '%s' % (filename, mtime, unit_name))
312 time.sleep(sleep_time)
313- file_mtime = self._get_file_mtime(sentry_unit, filename)
314+ file_mtime = None
315+ tries = 0
316+ while tries <= retry_count and not file_mtime:
317+ try:
318+ file_mtime = self._get_file_mtime(sentry_unit, filename)
319+ self.log.debug('Attempt {} to get {} file mtime on {} '
320+ 'OK'.format(tries, filename, unit_name))
321+ except IOError as e:
322+ # NOTE(beisner) - race avoidance, file may not exist yet.
323+ # https://bugs.launchpad.net/charm-helpers/+bug/1474030
324+ self.log.debug('Attempt {} to get {} file mtime on {} '
325+ 'failed\n{}'.format(tries, filename,
326+ unit_name, e))
327+ time.sleep(retry_sleep_time)
328+ tries += 1
329+
330+ if not file_mtime:
331+ self.log.warn('Could not determine file mtime, assuming '
332+ 'file does not exist')
333+ return False
334+
335 if file_mtime >= mtime:
336 self.log.debug('File mtime is newer than provided mtime '
337- '(%s >= %s)' % (file_mtime, mtime))
338+ '(%s >= %s) on %s (OK)' % (file_mtime,
339+ mtime, unit_name))
340 return True
341 else:
342- self.log.warn('File mtime %s is older than provided mtime %s'
343- % (file_mtime, mtime))
344+ self.log.warn('File mtime is older than provided mtime'
345+ '(%s < on %s) on %s' % (file_mtime,
346+ mtime, unit_name))
347 return False
348
349 def validate_service_config_changed(self, sentry_unit, mtime, service,
350- filename, pgrep_full=False,
351- sleep_time=20, retry_count=2):
352+ filename, pgrep_full=None,
353+ sleep_time=20, retry_count=30,
354+ retry_sleep_time=10):
355 """Check service and file were updated after mtime
356
357 Args:
358@@ -373,9 +448,10 @@
359 mtime (float): The epoch time to check against
360 service (string): service name to look for in process table
361 filename (string): The file to check mtime of
362- pgrep_full (boolean): Use full command line search mode with pgrep
363- sleep_time (int): Seconds to sleep before looking for process
364+ pgrep_full: [Deprecated] Use full command line search mode with pgrep
365+ sleep_time (int): Initial sleep in seconds to pass to test helpers
366 retry_count (int): If service is not found, how many times to retry
367+ retry_sleep_time (int): Time in seconds to wait between retries
368
369 Typical Usage:
370 u = OpenStackAmuletUtils(ERROR)
371@@ -392,15 +468,27 @@
372 mtime, False if service is older than mtime or if service was
373 not found or if filename was modified before mtime.
374 """
375- self.log.debug('Checking %s restarted since %s' % (service, mtime))
376- time.sleep(sleep_time)
377- service_restart = self.service_restarted_since(sentry_unit, mtime,
378- service,
379- pgrep_full=pgrep_full,
380- sleep_time=0,
381- retry_count=retry_count)
382- config_update = self.config_updated_since(sentry_unit, filename, mtime,
383- sleep_time=0)
384+
385+ # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
386+ # used instead of pgrep. pgrep_full is still passed through to ensure
387+ # deprecation WARNS. lp1474030
388+
389+ service_restart = self.service_restarted_since(
390+ sentry_unit, mtime,
391+ service,
392+ pgrep_full=pgrep_full,
393+ sleep_time=sleep_time,
394+ retry_count=retry_count,
395+ retry_sleep_time=retry_sleep_time)
396+
397+ config_update = self.config_updated_since(
398+ sentry_unit,
399+ filename,
400+ mtime,
401+ sleep_time=sleep_time,
402+ retry_count=retry_count,
403+ retry_sleep_time=retry_sleep_time)
404+
405 return service_restart and config_update
406
407 def get_sentry_time(self, sentry_unit):
408@@ -418,7 +506,6 @@
409 """Return a list of all Ubuntu releases in order of release."""
410 _d = distro_info.UbuntuDistroInfo()
411 _release_list = _d.all
412- self.log.debug('Ubuntu release list: {}'.format(_release_list))
413 return _release_list
414
415 def file_to_url(self, file_rel_path):
416@@ -450,15 +537,20 @@
417 cmd, code, output))
418 return None
419
420- def get_process_id_list(self, sentry_unit, process_name):
421+ def get_process_id_list(self, sentry_unit, process_name,
422+ expect_success=True):
423 """Get a list of process ID(s) from a single sentry juju unit
424 for a single process name.
425
426- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
427+ :param sentry_unit: Amulet sentry instance (juju unit)
428 :param process_name: Process name
429+ :param expect_success: If False, expect the PID to be missing,
430+ raise if it is present.
431 :returns: List of process IDs
432 """
433- cmd = 'pidof {}'.format(process_name)
434+ cmd = 'pidof -x {}'.format(process_name)
435+ if not expect_success:
436+ cmd += " || exit 0 && exit 1"
437 output, code = sentry_unit.run(cmd)
438 if code != 0:
439 msg = ('{} `{}` returned {} '
440@@ -467,14 +559,23 @@
441 amulet.raise_status(amulet.FAIL, msg=msg)
442 return str(output).split()
443
444- def get_unit_process_ids(self, unit_processes):
445+ def get_unit_process_ids(self, unit_processes, expect_success=True):
446 """Construct a dict containing unit sentries, process names, and
447- process IDs."""
448+ process IDs.
449+
450+ :param unit_processes: A dictionary of Amulet sentry instance
451+ to list of process names.
452+ :param expect_success: if False expect the processes to not be
453+ running, raise if they are.
454+ :returns: Dictionary of Amulet sentry instance to dictionary
455+ of process names to PIDs.
456+ """
457 pid_dict = {}
458- for sentry_unit, process_list in unit_processes.iteritems():
459+ for sentry_unit, process_list in six.iteritems(unit_processes):
460 pid_dict[sentry_unit] = {}
461 for process in process_list:
462- pids = self.get_process_id_list(sentry_unit, process)
463+ pids = self.get_process_id_list(
464+ sentry_unit, process, expect_success=expect_success)
465 pid_dict[sentry_unit].update({process: pids})
466 return pid_dict
467
468@@ -488,7 +589,7 @@
469 return ('Unit count mismatch. expected, actual: {}, '
470 '{} '.format(len(expected), len(actual)))
471
472- for (e_sentry, e_proc_names) in expected.iteritems():
473+ for (e_sentry, e_proc_names) in six.iteritems(expected):
474 e_sentry_name = e_sentry.info['unit_name']
475 if e_sentry in actual.keys():
476 a_proc_names = actual[e_sentry]
477@@ -507,11 +608,23 @@
478 '{}'.format(e_proc_name, a_proc_name))
479
480 a_pids_length = len(a_pids)
481- if e_pids_length != a_pids_length:
482- return ('PID count mismatch. {} ({}) expected, actual: '
483+ fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
484 '{}, {} ({})'.format(e_sentry_name, e_proc_name,
485 e_pids_length, a_pids_length,
486 a_pids))
487+
488+ # If expected is not bool, ensure PID quantities match
489+ if not isinstance(e_pids_length, bool) and \
490+ a_pids_length != e_pids_length:
491+ return fail_msg
492+ # If expected is bool True, ensure 1 or more PIDs exist
493+ elif isinstance(e_pids_length, bool) and \
494+ e_pids_length is True and a_pids_length < 1:
495+ return fail_msg
496+ # If expected is bool False, ensure 0 PIDs exist
497+ elif isinstance(e_pids_length, bool) and \
498+ e_pids_length is False and a_pids_length != 0:
499+ return fail_msg
500 else:
501 self.log.debug('PID check OK: {} {} {}: '
502 '{}'.format(e_sentry_name, e_proc_name,
503@@ -531,3 +644,175 @@
504 return 'Dicts within list are not identical'
505
506 return None
507+
508+ def validate_sectionless_conf(self, file_contents, expected):
509+ """A crude conf parser. Useful to inspect configuration files which
510+ do not have section headers (as would be necessary in order to use
511+ the configparser). Such as openstack-dashboard or rabbitmq confs."""
512+ for line in file_contents.split('\n'):
513+ if '=' in line:
514+ args = line.split('=')
515+ if len(args) <= 1:
516+ continue
517+ key = args[0].strip()
518+ value = args[1].strip()
519+ if key in expected.keys():
520+ if expected[key] != value:
521+ msg = ('Config mismatch. Expected, actual: {}, '
522+ '{}'.format(expected[key], value))
523+ amulet.raise_status(amulet.FAIL, msg=msg)
524+
525+ def get_unit_hostnames(self, units):
526+ """Return a dict of juju unit names to hostnames."""
527+ host_names = {}
528+ for unit in units:
529+ host_names[unit.info['unit_name']] = \
530+ str(unit.file_contents('/etc/hostname').strip())
531+ self.log.debug('Unit host names: {}'.format(host_names))
532+ return host_names
533+
534+ def run_cmd_unit(self, sentry_unit, cmd):
535+ """Run a command on a unit, return the output and exit code."""
536+ output, code = sentry_unit.run(cmd)
537+ if code == 0:
538+ self.log.debug('{} `{}` command returned {} '
539+ '(OK)'.format(sentry_unit.info['unit_name'],
540+ cmd, code))
541+ else:
542+ msg = ('{} `{}` command returned {} '
543+ '{}'.format(sentry_unit.info['unit_name'],
544+ cmd, code, output))
545+ amulet.raise_status(amulet.FAIL, msg=msg)
546+ return str(output), code
547+
548+ def file_exists_on_unit(self, sentry_unit, file_name):
549+ """Check if a file exists on a unit."""
550+ try:
551+ sentry_unit.file_stat(file_name)
552+ return True
553+ except IOError:
554+ return False
555+ except Exception as e:
556+ msg = 'Error checking file {}: {}'.format(file_name, e)
557+ amulet.raise_status(amulet.FAIL, msg=msg)
558+
559+ def file_contents_safe(self, sentry_unit, file_name,
560+ max_wait=60, fatal=False):
561+ """Get file contents from a sentry unit. Wrap amulet file_contents
562+ with retry logic to address races where a file checks as existing,
563+ but no longer exists by the time file_contents is called.
564+ Return None if file not found. Optionally raise if fatal is True."""
565+ unit_name = sentry_unit.info['unit_name']
566+ file_contents = False
567+ tries = 0
568+ while not file_contents and tries < (max_wait / 4):
569+ try:
570+ file_contents = sentry_unit.file_contents(file_name)
571+ except IOError:
572+ self.log.debug('Attempt {} to open file {} from {} '
573+ 'failed'.format(tries, file_name,
574+ unit_name))
575+ time.sleep(4)
576+ tries += 1
577+
578+ if file_contents:
579+ return file_contents
580+ elif not fatal:
581+ return None
582+ elif fatal:
583+ msg = 'Failed to get file contents from unit.'
584+ amulet.raise_status(amulet.FAIL, msg)
585+
586+ def port_knock_tcp(self, host="localhost", port=22, timeout=15):
587+ """Open a TCP socket to check for a listening sevice on a host.
588+
589+ :param host: host name or IP address, default to localhost
590+ :param port: TCP port number, default to 22
591+ :param timeout: Connect timeout, default to 15 seconds
592+ :returns: True if successful, False if connect failed
593+ """
594+
595+ # Resolve host name if possible
596+ try:
597+ connect_host = socket.gethostbyname(host)
598+ host_human = "{} ({})".format(connect_host, host)
599+ except socket.error as e:
600+ self.log.warn('Unable to resolve address: '
601+ '{} ({}) Trying anyway!'.format(host, e))
602+ connect_host = host
603+ host_human = connect_host
604+
605+ # Attempt socket connection
606+ try:
607+ knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
608+ knock.settimeout(timeout)
609+ knock.connect((connect_host, port))
610+ knock.close()
611+ self.log.debug('Socket connect OK for host '
612+ '{} on port {}.'.format(host_human, port))
613+ return True
614+ except socket.error as e:
615+ self.log.debug('Socket connect FAIL for'
616+ ' {} port {} ({})'.format(host_human, port, e))
617+ return False
618+
619+ def port_knock_units(self, sentry_units, port=22,
620+ timeout=15, expect_success=True):
621+ """Open a TCP socket to check for a listening sevice on each
622+ listed juju unit.
623+
624+ :param sentry_units: list of sentry unit pointers
625+ :param port: TCP port number, default to 22
626+ :param timeout: Connect timeout, default to 15 seconds
627+ :expect_success: True by default, set False to invert logic
628+ :returns: None if successful, Failure message otherwise
629+ """
630+ for unit in sentry_units:
631+ host = unit.info['public-address']
632+ connected = self.port_knock_tcp(host, port, timeout)
633+ if not connected and expect_success:
634+ return 'Socket connect failed.'
635+ elif connected and not expect_success:
636+ return 'Socket connected unexpectedly.'
637+
638+ def get_uuid_epoch_stamp(self):
639+ """Returns a stamp string based on uuid4 and epoch time. Useful in
640+ generating test messages which need to be unique-ish."""
641+ return '[{}-{}]'.format(uuid.uuid4(), time.time())
642+
643+# amulet juju action helpers:
644+ def run_action(self, unit_sentry, action,
645+ _check_output=subprocess.check_output):
646+ """Run the named action on a given unit sentry.
647+
648+ _check_output parameter is used for dependency injection.
649+
650+ @return action_id.
651+ """
652+ unit_id = unit_sentry.info["unit_name"]
653+ command = ["juju", "action", "do", "--format=json", unit_id, action]
654+ self.log.info("Running command: %s\n" % " ".join(command))
655+ output = _check_output(command, universal_newlines=True)
656+ data = json.loads(output)
657+ action_id = data[u'Action queued with id']
658+ return action_id
659+
660+ def wait_on_action(self, action_id, _check_output=subprocess.check_output):
661+ """Wait for a given action, returning if it completed or not.
662+
663+ _check_output parameter is used for dependency injection.
664+ """
665+ command = ["juju", "action", "fetch", "--format=json", "--wait=0",
666+ action_id]
667+ output = _check_output(command, universal_newlines=True)
668+ data = json.loads(output)
669+ return data.get(u"status") == "completed"
670+
671+ def status_get(self, unit):
672+ """Return the current service status of this unit."""
673+ raw_status, return_code = unit.run(
674+ "status-get --format=json --include-data")
675+ if return_code != 0:
676+ return ("unknown", "")
677+ status = json.loads(raw_status)
678+ return (status["status"], status["message"])
679
680=== modified file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py'
681--- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2015-07-29 18:35:16 +0000
682+++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 2016-03-09 12:07:21 +0000
683@@ -148,6 +148,13 @@
684 self.description = description
685 self.check_cmd = self._locate_cmd(check_cmd)
686
687+ def _get_check_filename(self):
688+ return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
689+
690+ def _get_service_filename(self, hostname):
691+ return os.path.join(NRPE.nagios_exportdir,
692+ 'service__{}_{}.cfg'.format(hostname, self.command))
693+
694 def _locate_cmd(self, check_cmd):
695 search_path = (
696 '/usr/lib/nagios/plugins',
697@@ -163,9 +170,21 @@
698 log('Check command not found: {}'.format(parts[0]))
699 return ''
700
701+ def _remove_service_files(self):
702+ if not os.path.exists(NRPE.nagios_exportdir):
703+ return
704+ for f in os.listdir(NRPE.nagios_exportdir):
705+ if f.endswith('_{}.cfg'.format(self.command)):
706+ os.remove(os.path.join(NRPE.nagios_exportdir, f))
707+
708+ def remove(self, hostname):
709+ nrpe_check_file = self._get_check_filename()
710+ if os.path.exists(nrpe_check_file):
711+ os.remove(nrpe_check_file)
712+ self._remove_service_files()
713+
714 def write(self, nagios_context, hostname, nagios_servicegroups):
715- nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
716- self.command)
717+ nrpe_check_file = self._get_check_filename()
718 with open(nrpe_check_file, 'w') as nrpe_check_config:
719 nrpe_check_config.write("# check {}\n".format(self.shortname))
720 nrpe_check_config.write("command[{}]={}\n".format(
721@@ -180,9 +199,7 @@
722
723 def write_service_config(self, nagios_context, hostname,
724 nagios_servicegroups):
725- for f in os.listdir(NRPE.nagios_exportdir):
726- if re.search('.*{}.cfg'.format(self.command), f):
727- os.remove(os.path.join(NRPE.nagios_exportdir, f))
728+ self._remove_service_files()
729
730 templ_vars = {
731 'nagios_hostname': hostname,
732@@ -192,8 +209,7 @@
733 'command': self.command,
734 }
735 nrpe_service_text = Check.service_template.format(**templ_vars)
736- nrpe_service_file = '{}/service__{}_{}.cfg'.format(
737- NRPE.nagios_exportdir, hostname, self.command)
738+ nrpe_service_file = self._get_service_filename(hostname)
739 with open(nrpe_service_file, 'w') as nrpe_service_config:
740 nrpe_service_config.write(str(nrpe_service_text))
741
742@@ -218,12 +234,32 @@
743 if hostname:
744 self.hostname = hostname
745 else:
746- self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
747+ nagios_hostname = get_nagios_hostname()
748+ if nagios_hostname:
749+ self.hostname = nagios_hostname
750+ else:
751+ self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
752 self.checks = []
753
754 def add_check(self, *args, **kwargs):
755 self.checks.append(Check(*args, **kwargs))
756
757+ def remove_check(self, *args, **kwargs):
758+ if kwargs.get('shortname') is None:
759+ raise ValueError('shortname of check must be specified')
760+
761+ # Use sensible defaults if they're not specified - these are not
762+ # actually used during removal, but they're required for constructing
763+ # the Check object; check_disk is chosen because it's part of the
764+ # nagios-plugins-basic package.
765+ if kwargs.get('check_cmd') is None:
766+ kwargs['check_cmd'] = 'check_disk'
767+ if kwargs.get('description') is None:
768+ kwargs['description'] = ''
769+
770+ check = Check(*args, **kwargs)
771+ check.remove(self.hostname)
772+
773 def write(self):
774 try:
775 nagios_uid = pwd.getpwnam('nagios').pw_uid
776@@ -260,7 +296,7 @@
777 :param str relation_name: Name of relation nrpe sub joined to
778 """
779 for rel in relations_of_type(relation_name):
780- if 'nagios_hostname' in rel:
781+ if 'nagios_host_context' in rel:
782 return rel['nagios_host_context']
783
784
785@@ -301,11 +337,13 @@
786 upstart_init = '/etc/init/%s.conf' % svc
787 sysv_init = '/etc/init.d/%s' % svc
788 if os.path.exists(upstart_init):
789- nrpe.add_check(
790- shortname=svc,
791- description='process check {%s}' % unit_name,
792- check_cmd='check_upstart_job %s' % svc
793- )
794+ # Don't add a check for these services from neutron-gateway
795+ if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
796+ nrpe.add_check(
797+ shortname=svc,
798+ description='process check {%s}' % unit_name,
799+ check_cmd='check_upstart_job %s' % svc
800+ )
801 elif os.path.exists(sysv_init):
802 cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
803 cron_file = ('*/5 * * * * root '
804
805=== added directory 'hooks/charmhelpers/contrib/mellanox'
806=== added file 'hooks/charmhelpers/contrib/mellanox/__init__.py'
807=== added file 'hooks/charmhelpers/contrib/mellanox/infiniband.py'
808--- hooks/charmhelpers/contrib/mellanox/infiniband.py 1970-01-01 00:00:00 +0000
809+++ hooks/charmhelpers/contrib/mellanox/infiniband.py 2016-03-09 12:07:21 +0000
810@@ -0,0 +1,151 @@
811+#!/usr/bin/env python
812+# -*- coding: utf-8 -*-
813+
814+# Copyright 2014-2015 Canonical Limited.
815+#
816+# This file is part of charm-helpers.
817+#
818+# charm-helpers is free software: you can redistribute it and/or modify
819+# it under the terms of the GNU Lesser General Public License version 3 as
820+# published by the Free Software Foundation.
821+#
822+# charm-helpers is distributed in the hope that it will be useful,
823+# but WITHOUT ANY WARRANTY; without even the implied warranty of
824+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
825+# GNU Lesser General Public License for more details.
826+#
827+# You should have received a copy of the GNU Lesser General Public License
828+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
829+
830+
831+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
832+
833+from charmhelpers.fetch import (
834+ apt_install,
835+ apt_update,
836+)
837+
838+from charmhelpers.core.hookenv import (
839+ log,
840+ INFO,
841+)
842+
843+try:
844+ from netifaces import interfaces as network_interfaces
845+except ImportError:
846+ apt_install('python-netifaces')
847+ from netifaces import interfaces as network_interfaces
848+
849+import os
850+import re
851+import subprocess
852+
853+from charmhelpers.core.kernel import modprobe
854+
855+REQUIRED_MODULES = (
856+ "mlx4_ib",
857+ "mlx4_en",
858+ "mlx4_core",
859+ "ib_ipath",
860+ "ib_mthca",
861+ "ib_srpt",
862+ "ib_srp",
863+ "ib_ucm",
864+ "ib_isert",
865+ "ib_iser",
866+ "ib_ipoib",
867+ "ib_cm",
868+ "ib_uverbs"
869+ "ib_umad",
870+ "ib_sa",
871+ "ib_mad",
872+ "ib_core",
873+ "ib_addr",
874+ "rdma_ucm",
875+)
876+
877+REQUIRED_PACKAGES = (
878+ "ibutils",
879+ "infiniband-diags",
880+ "ibverbs-utils",
881+)
882+
883+IPOIB_DRIVERS = (
884+ "ib_ipoib",
885+)
886+
887+ABI_VERSION_FILE = "/sys/class/infiniband_mad/abi_version"
888+
889+
890+class DeviceInfo(object):
891+ pass
892+
893+
894+def install_packages():
895+ apt_update()
896+ apt_install(REQUIRED_PACKAGES, fatal=True)
897+
898+
899+def load_modules():
900+ for module in REQUIRED_MODULES:
901+ modprobe(module, persist=True)
902+
903+
904+def is_enabled():
905+ """Check if infiniband is loaded on the system"""
906+ return os.path.exists(ABI_VERSION_FILE)
907+
908+
909+def stat():
910+ """Return full output of ibstat"""
911+ return subprocess.check_output(["ibstat"])
912+
913+
914+def devices():
915+ """Returns a list of IB enabled devices"""
916+ return subprocess.check_output(['ibstat', '-l']).splitlines()
917+
918+
919+def device_info(device):
920+ """Returns a DeviceInfo object with the current device settings"""
921+
922+ status = subprocess.check_output([
923+ 'ibstat', device, '-s']).splitlines()
924+
925+ regexes = {
926+ "CA type: (.*)": "device_type",
927+ "Number of ports: (.*)": "num_ports",
928+ "Firmware version: (.*)": "fw_ver",
929+ "Hardware version: (.*)": "hw_ver",
930+ "Node GUID: (.*)": "node_guid",
931+ "System image GUID: (.*)": "sys_guid",
932+ }
933+
934+ device = DeviceInfo()
935+
936+ for line in status:
937+ for expression, key in regexes.items():
938+ matches = re.search(expression, line)
939+ if matches:
940+ setattr(device, key, matches.group(1))
941+
942+ return device
943+
944+
945+def ipoib_interfaces():
946+ """Return a list of IPOIB capable ethernet interfaces"""
947+ interfaces = []
948+
949+ for interface in network_interfaces():
950+ try:
951+ driver = re.search('^driver: (.+)$', subprocess.check_output([
952+ 'ethtool', '-i',
953+ interface]), re.M).group(1)
954+
955+ if driver in IPOIB_DRIVERS:
956+ interfaces.append(interface)
957+ except:
958+ log("Skipping interface %s" % interface, level=INFO)
959+ continue
960+
961+ return interfaces
962
963=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
964--- hooks/charmhelpers/contrib/network/ip.py 2015-07-29 18:35:16 +0000
965+++ hooks/charmhelpers/contrib/network/ip.py 2016-03-09 12:07:21 +0000
966@@ -23,7 +23,7 @@
967 from functools import partial
968
969 from charmhelpers.core.hookenv import unit_get
970-from charmhelpers.fetch import apt_install
971+from charmhelpers.fetch import apt_install, apt_update
972 from charmhelpers.core.hookenv import (
973 log,
974 WARNING,
975@@ -32,13 +32,15 @@
976 try:
977 import netifaces
978 except ImportError:
979- apt_install('python-netifaces')
980+ apt_update(fatal=True)
981+ apt_install('python-netifaces', fatal=True)
982 import netifaces
983
984 try:
985 import netaddr
986 except ImportError:
987- apt_install('python-netaddr')
988+ apt_update(fatal=True)
989+ apt_install('python-netaddr', fatal=True)
990 import netaddr
991
992
993@@ -51,7 +53,7 @@
994
995
996 def no_ip_found_error_out(network):
997- errmsg = ("No IP address found in network: %s" % network)
998+ errmsg = ("No IP address found in network(s): %s" % network)
999 raise ValueError(errmsg)
1000
1001
1002@@ -59,7 +61,7 @@
1003 """Get an IPv4 or IPv6 address within the network from the host.
1004
1005 :param network (str): CIDR presentation format. For example,
1006- '192.168.1.0/24'.
1007+ '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
1008 :param fallback (str): If no address is found, return fallback.
1009 :param fatal (boolean): If no address is found, fallback is not
1010 set and fatal is True then exit(1).
1011@@ -73,24 +75,26 @@
1012 else:
1013 return None
1014
1015- _validate_cidr(network)
1016- network = netaddr.IPNetwork(network)
1017- for iface in netifaces.interfaces():
1018- addresses = netifaces.ifaddresses(iface)
1019- if network.version == 4 and netifaces.AF_INET in addresses:
1020- addr = addresses[netifaces.AF_INET][0]['addr']
1021- netmask = addresses[netifaces.AF_INET][0]['netmask']
1022- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
1023- if cidr in network:
1024- return str(cidr.ip)
1025+ networks = network.split() or [network]
1026+ for network in networks:
1027+ _validate_cidr(network)
1028+ network = netaddr.IPNetwork(network)
1029+ for iface in netifaces.interfaces():
1030+ addresses = netifaces.ifaddresses(iface)
1031+ if network.version == 4 and netifaces.AF_INET in addresses:
1032+ addr = addresses[netifaces.AF_INET][0]['addr']
1033+ netmask = addresses[netifaces.AF_INET][0]['netmask']
1034+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
1035+ if cidr in network:
1036+ return str(cidr.ip)
1037
1038- if network.version == 6 and netifaces.AF_INET6 in addresses:
1039- for addr in addresses[netifaces.AF_INET6]:
1040- if not addr['addr'].startswith('fe80'):
1041- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
1042- addr['netmask']))
1043- if cidr in network:
1044- return str(cidr.ip)
1045+ if network.version == 6 and netifaces.AF_INET6 in addresses:
1046+ for addr in addresses[netifaces.AF_INET6]:
1047+ if not addr['addr'].startswith('fe80'):
1048+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
1049+ addr['netmask']))
1050+ if cidr in network:
1051+ return str(cidr.ip)
1052
1053 if fallback is not None:
1054 return fallback
1055@@ -435,8 +439,12 @@
1056
1057 rev = dns.reversename.from_address(address)
1058 result = ns_query(rev)
1059+
1060 if not result:
1061- return None
1062+ try:
1063+ result = socket.gethostbyaddr(address)[0]
1064+ except:
1065+ return None
1066 else:
1067 result = address
1068
1069@@ -448,3 +456,18 @@
1070 return result
1071 else:
1072 return result.split('.')[0]
1073+
1074+
1075+def port_has_listener(address, port):
1076+ """
1077+ Returns True if the address:port is open and being listened to,
1078+ else False.
1079+
1080+ @param address: an IP address or hostname
1081+ @param port: integer port
1082+
1083+ Note calls 'zc' via a subprocess shell
1084+ """
1085+ cmd = ['nc', '-z', address, str(port)]
1086+ result = subprocess.call(cmd)
1087+ return not(bool(result))
1088
1089=== modified file 'hooks/charmhelpers/contrib/network/ufw.py'
1090--- hooks/charmhelpers/contrib/network/ufw.py 2015-07-29 18:35:16 +0000
1091+++ hooks/charmhelpers/contrib/network/ufw.py 2016-03-09 12:07:21 +0000
1092@@ -40,7 +40,9 @@
1093 import re
1094 import os
1095 import subprocess
1096+
1097 from charmhelpers.core import hookenv
1098+from charmhelpers.core.kernel import modprobe, is_module_loaded
1099
1100 __author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
1101
1102@@ -82,14 +84,11 @@
1103 # do we have IPv6 in the machine?
1104 if os.path.isdir('/proc/sys/net/ipv6'):
1105 # is ip6tables kernel module loaded?
1106- lsmod = subprocess.check_output(['lsmod'], universal_newlines=True)
1107- matches = re.findall('^ip6_tables[ ]+', lsmod, re.M)
1108- if len(matches) == 0:
1109+ if not is_module_loaded('ip6_tables'):
1110 # ip6tables support isn't complete, let's try to load it
1111 try:
1112- subprocess.check_output(['modprobe', 'ip6_tables'],
1113- universal_newlines=True)
1114- # great, we could load the module
1115+ modprobe('ip6_tables')
1116+ # great, we can load the module
1117 return True
1118 except subprocess.CalledProcessError as ex:
1119 hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
1120
1121=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
1122--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-07-29 18:35:16 +0000
1123+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2016-03-09 12:07:21 +0000
1124@@ -14,12 +14,18 @@
1125 # You should have received a copy of the GNU Lesser General Public License
1126 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1127
1128+import logging
1129+import re
1130+import sys
1131 import six
1132 from collections import OrderedDict
1133 from charmhelpers.contrib.amulet.deployment import (
1134 AmuletDeployment
1135 )
1136
1137+DEBUG = logging.DEBUG
1138+ERROR = logging.ERROR
1139+
1140
1141 class OpenStackAmuletDeployment(AmuletDeployment):
1142 """OpenStack amulet deployment.
1143@@ -28,9 +34,12 @@
1144 that is specifically for use by OpenStack charms.
1145 """
1146
1147- def __init__(self, series=None, openstack=None, source=None, stable=True):
1148+ def __init__(self, series=None, openstack=None, source=None,
1149+ stable=True, log_level=DEBUG):
1150 """Initialize the deployment environment."""
1151 super(OpenStackAmuletDeployment, self).__init__(series)
1152+ self.log = self.get_logger(level=log_level)
1153+ self.log.info('OpenStackAmuletDeployment: init')
1154 self.openstack = openstack
1155 self.source = source
1156 self.stable = stable
1157@@ -38,26 +47,55 @@
1158 # out.
1159 self.current_next = "trusty"
1160
1161+ def get_logger(self, name="deployment-logger", level=logging.DEBUG):
1162+ """Get a logger object that will log to stdout."""
1163+ log = logging
1164+ logger = log.getLogger(name)
1165+ fmt = log.Formatter("%(asctime)s %(funcName)s "
1166+ "%(levelname)s: %(message)s")
1167+
1168+ handler = log.StreamHandler(stream=sys.stdout)
1169+ handler.setLevel(level)
1170+ handler.setFormatter(fmt)
1171+
1172+ logger.addHandler(handler)
1173+ logger.setLevel(level)
1174+
1175+ return logger
1176+
1177 def _determine_branch_locations(self, other_services):
1178 """Determine the branch locations for the other services.
1179
1180 Determine if the local branch being tested is derived from its
1181 stable or next (dev) branch, and based on this, use the corresonding
1182 stable or next branches for the other_services."""
1183- base_charms = ['mysql', 'mongodb']
1184+
1185+ self.log.info('OpenStackAmuletDeployment: determine branch locations')
1186+
1187+ # Charms outside the lp:~openstack-charmers namespace
1188+ base_charms = ['mysql', 'mongodb', 'nrpe']
1189+
1190+ # Force these charms to current series even when using an older series.
1191+ # ie. Use trusty/nrpe even when series is precise, as the P charm
1192+ # does not possess the necessary external master config and hooks.
1193+ force_series_current = ['nrpe']
1194
1195 if self.series in ['precise', 'trusty']:
1196 base_series = self.series
1197 else:
1198 base_series = self.current_next
1199
1200- if self.stable:
1201- for svc in other_services:
1202+ for svc in other_services:
1203+ if svc['name'] in force_series_current:
1204+ base_series = self.current_next
1205+ # If a location has been explicitly set, use it
1206+ if svc.get('location'):
1207+ continue
1208+ if self.stable:
1209 temp = 'lp:charms/{}/{}'
1210 svc['location'] = temp.format(base_series,
1211 svc['name'])
1212- else:
1213- for svc in other_services:
1214+ else:
1215 if svc['name'] in base_charms:
1216 temp = 'lp:charms/{}/{}'
1217 svc['location'] = temp.format(base_series,
1218@@ -66,10 +104,13 @@
1219 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
1220 svc['location'] = temp.format(self.current_next,
1221 svc['name'])
1222+
1223 return other_services
1224
1225 def _add_services(self, this_service, other_services):
1226 """Add services to the deployment and set openstack-origin/source."""
1227+ self.log.info('OpenStackAmuletDeployment: adding services')
1228+
1229 other_services = self._determine_branch_locations(other_services)
1230
1231 super(OpenStackAmuletDeployment, self)._add_services(this_service,
1232@@ -77,29 +118,103 @@
1233
1234 services = other_services
1235 services.append(this_service)
1236+
1237+ # Charms which should use the source config option
1238 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
1239- 'ceph-osd', 'ceph-radosgw']
1240- # Most OpenStack subordinate charms do not expose an origin option
1241- # as that is controlled by the principle.
1242- ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
1243+ 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
1244+
1245+ # Charms which can not use openstack-origin, ie. many subordinates
1246+ no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
1247+ 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
1248+ 'cinder-backup']
1249
1250 if self.openstack:
1251 for svc in services:
1252- if svc['name'] not in use_source + ignore:
1253+ if svc['name'] not in use_source + no_origin:
1254 config = {'openstack-origin': self.openstack}
1255 self.d.configure(svc['name'], config)
1256
1257 if self.source:
1258 for svc in services:
1259- if svc['name'] in use_source and svc['name'] not in ignore:
1260+ if svc['name'] in use_source and svc['name'] not in no_origin:
1261 config = {'source': self.source}
1262 self.d.configure(svc['name'], config)
1263
1264 def _configure_services(self, configs):
1265 """Configure all of the services."""
1266+ self.log.info('OpenStackAmuletDeployment: configure services')
1267 for service, config in six.iteritems(configs):
1268 self.d.configure(service, config)
1269
1270+ def _auto_wait_for_status(self, message=None, exclude_services=None,
1271+ include_only=None, timeout=1800):
1272+ """Wait for all units to have a specific extended status, except
1273+ for any defined as excluded. Unless specified via message, any
1274+ status containing any case of 'ready' will be considered a match.
1275+
1276+ Examples of message usage:
1277+
1278+ Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
1279+ message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
1280+
1281+ Wait for all units to reach this status (exact match):
1282+ message = re.compile('^Unit is ready and clustered$')
1283+
1284+ Wait for all units to reach any one of these (exact match):
1285+ message = re.compile('Unit is ready|OK|Ready')
1286+
1287+ Wait for at least one unit to reach this status (exact match):
1288+ message = {'ready'}
1289+
1290+ See Amulet's sentry.wait_for_messages() for message usage detail.
1291+ https://github.com/juju/amulet/blob/master/amulet/sentry.py
1292+
1293+ :param message: Expected status match
1294+ :param exclude_services: List of juju service names to ignore,
1295+ not to be used in conjuction with include_only.
1296+ :param include_only: List of juju service names to exclusively check,
1297+ not to be used in conjuction with exclude_services.
1298+ :param timeout: Maximum time in seconds to wait for status match
1299+ :returns: None. Raises if timeout is hit.
1300+ """
1301+ self.log.info('Waiting for extended status on units...')
1302+
1303+ all_services = self.d.services.keys()
1304+
1305+ if exclude_services and include_only:
1306+ raise ValueError('exclude_services can not be used '
1307+ 'with include_only')
1308+
1309+ if message:
1310+ if isinstance(message, re._pattern_type):
1311+ match = message.pattern
1312+ else:
1313+ match = message
1314+
1315+ self.log.debug('Custom extended status wait match: '
1316+ '{}'.format(match))
1317+ else:
1318+ self.log.debug('Default extended status wait match: contains '
1319+ 'READY (case-insensitive)')
1320+ message = re.compile('.*ready.*', re.IGNORECASE)
1321+
1322+ if exclude_services:
1323+ self.log.debug('Excluding services from extended status match: '
1324+ '{}'.format(exclude_services))
1325+ else:
1326+ exclude_services = []
1327+
1328+ if include_only:
1329+ services = include_only
1330+ else:
1331+ services = list(set(all_services) - set(exclude_services))
1332+
1333+ self.log.debug('Waiting up to {}s for extended status on services: '
1334+ '{}'.format(timeout, services))
1335+ service_messages = {service: message for service in services}
1336+ self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
1337+ self.log.info('OK')
1338+
1339 def _get_openstack_release(self):
1340 """Get openstack release.
1341
1342@@ -111,7 +226,8 @@
1343 self.precise_havana, self.precise_icehouse,
1344 self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
1345 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
1346- self.wily_liberty) = range(12)
1347+ self.wily_liberty, self.trusty_mitaka,
1348+ self.xenial_mitaka) = range(14)
1349
1350 releases = {
1351 ('precise', None): self.precise_essex,
1352@@ -123,9 +239,11 @@
1353 ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
1354 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
1355 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
1356+ ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
1357 ('utopic', None): self.utopic_juno,
1358 ('vivid', None): self.vivid_kilo,
1359- ('wily', None): self.wily_liberty}
1360+ ('wily', None): self.wily_liberty,
1361+ ('xenial', None): self.xenial_mitaka}
1362 return releases[(self.series, self.openstack)]
1363
1364 def _get_openstack_release_string(self):
1365@@ -142,6 +260,7 @@
1366 ('utopic', 'juno'),
1367 ('vivid', 'kilo'),
1368 ('wily', 'liberty'),
1369+ ('xenial', 'mitaka'),
1370 ])
1371 if self.openstack:
1372 os_origin = self.openstack.split(':')[1]
1373
1374=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
1375--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-29 18:35:16 +0000
1376+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2016-03-09 12:07:21 +0000
1377@@ -18,6 +18,7 @@
1378 import json
1379 import logging
1380 import os
1381+import re
1382 import six
1383 import time
1384 import urllib
1385@@ -27,6 +28,7 @@
1386 import heatclient.v1.client as heat_client
1387 import keystoneclient.v2_0 as keystone_client
1388 import novaclient.v1_1.client as nova_client
1389+import pika
1390 import swiftclient
1391
1392 from charmhelpers.contrib.amulet.utils import (
1393@@ -602,3 +604,382 @@
1394 self.log.debug('Ceph {} samples (OK): '
1395 '{}'.format(sample_type, samples))
1396 return None
1397+
1398+ # rabbitmq/amqp specific helpers:
1399+
1400+ def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
1401+ """Wait for rmq units extended status to show cluster readiness,
1402+ after an optional initial sleep period. Initial sleep is likely
1403+ necessary to be effective following a config change, as status
1404+ message may not instantly update to non-ready."""
1405+
1406+ if init_sleep:
1407+ time.sleep(init_sleep)
1408+
1409+ message = re.compile('^Unit is ready and clustered$')
1410+ deployment._auto_wait_for_status(message=message,
1411+ timeout=timeout,
1412+ include_only=['rabbitmq-server'])
1413+
1414+ def add_rmq_test_user(self, sentry_units,
1415+ username="testuser1", password="changeme"):
1416+ """Add a test user via the first rmq juju unit, check connection as
1417+ the new user against all sentry units.
1418+
1419+ :param sentry_units: list of sentry unit pointers
1420+ :param username: amqp user name, default to testuser1
1421+ :param password: amqp user password
1422+ :returns: None if successful. Raise on error.
1423+ """
1424+ self.log.debug('Adding rmq user ({})...'.format(username))
1425+
1426+ # Check that user does not already exist
1427+ cmd_user_list = 'rabbitmqctl list_users'
1428+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
1429+ if username in output:
1430+ self.log.warning('User ({}) already exists, returning '
1431+ 'gracefully.'.format(username))
1432+ return
1433+
1434+ perms = '".*" ".*" ".*"'
1435+ cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
1436+ 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
1437+
1438+ # Add user via first unit
1439+ for cmd in cmds:
1440+ output, _ = self.run_cmd_unit(sentry_units[0], cmd)
1441+
1442+ # Check connection against the other sentry_units
1443+ self.log.debug('Checking user connect against units...')
1444+ for sentry_unit in sentry_units:
1445+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
1446+ username=username,
1447+ password=password)
1448+ connection.close()
1449+
1450+ def delete_rmq_test_user(self, sentry_units, username="testuser1"):
1451+ """Delete a rabbitmq user via the first rmq juju unit.
1452+
1453+ :param sentry_units: list of sentry unit pointers
1454+ :param username: amqp user name, default to testuser1
1455+ :param password: amqp user password
1456+ :returns: None if successful or no such user.
1457+ """
1458+ self.log.debug('Deleting rmq user ({})...'.format(username))
1459+
1460+ # Check that the user exists
1461+ cmd_user_list = 'rabbitmqctl list_users'
1462+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
1463+
1464+ if username not in output:
1465+ self.log.warning('User ({}) does not exist, returning '
1466+ 'gracefully.'.format(username))
1467+ return
1468+
1469+ # Delete the user
1470+ cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
1471+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
1472+
1473+ def get_rmq_cluster_status(self, sentry_unit):
1474+ """Execute rabbitmq cluster status command on a unit and return
1475+ the full output.
1476+
1477+ :param unit: sentry unit
1478+ :returns: String containing console output of cluster status command
1479+ """
1480+ cmd = 'rabbitmqctl cluster_status'
1481+ output, _ = self.run_cmd_unit(sentry_unit, cmd)
1482+ self.log.debug('{} cluster_status:\n{}'.format(
1483+ sentry_unit.info['unit_name'], output))
1484+ return str(output)
1485+
1486+ def get_rmq_cluster_running_nodes(self, sentry_unit):
1487+ """Parse rabbitmqctl cluster_status output string, return list of
1488+ running rabbitmq cluster nodes.
1489+
1490+ :param unit: sentry unit
1491+ :returns: List containing node names of running nodes
1492+ """
1493+ # NOTE(beisner): rabbitmqctl cluster_status output is not
1494+ # json-parsable, do string chop foo, then json.loads that.
1495+ str_stat = self.get_rmq_cluster_status(sentry_unit)
1496+ if 'running_nodes' in str_stat:
1497+ pos_start = str_stat.find("{running_nodes,") + 15
1498+ pos_end = str_stat.find("]},", pos_start) + 1
1499+ str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
1500+ run_nodes = json.loads(str_run_nodes)
1501+ return run_nodes
1502+ else:
1503+ return []
1504+
1505+ def validate_rmq_cluster_running_nodes(self, sentry_units):
1506+ """Check that all rmq unit hostnames are represented in the
1507+ cluster_status output of all units.
1508+
1509+ :param host_names: dict of juju unit names to host names
1510+ :param units: list of sentry unit pointers (all rmq units)
1511+ :returns: None if successful, otherwise return error message
1512+ """
1513+ host_names = self.get_unit_hostnames(sentry_units)
1514+ errors = []
1515+
1516+ # Query every unit for cluster_status running nodes
1517+ for query_unit in sentry_units:
1518+ query_unit_name = query_unit.info['unit_name']
1519+ running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
1520+
1521+ # Confirm that every unit is represented in the queried unit's
1522+ # cluster_status running nodes output.
1523+ for validate_unit in sentry_units:
1524+ val_host_name = host_names[validate_unit.info['unit_name']]
1525+ val_node_name = 'rabbit@{}'.format(val_host_name)
1526+
1527+ if val_node_name not in running_nodes:
1528+ errors.append('Cluster member check failed on {}: {} not '
1529+ 'in {}\n'.format(query_unit_name,
1530+ val_node_name,
1531+ running_nodes))
1532+ if errors:
1533+ return ''.join(errors)
1534+
1535+ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
1536+ """Check a single juju rmq unit for ssl and port in the config file."""
1537+ host = sentry_unit.info['public-address']
1538+ unit_name = sentry_unit.info['unit_name']
1539+
1540+ conf_file = '/etc/rabbitmq/rabbitmq.config'
1541+ conf_contents = str(self.file_contents_safe(sentry_unit,
1542+ conf_file, max_wait=16))
1543+ # Checks
1544+ conf_ssl = 'ssl' in conf_contents
1545+ conf_port = str(port) in conf_contents
1546+
1547+ # Port explicitly checked in config
1548+ if port and conf_port and conf_ssl:
1549+ self.log.debug('SSL is enabled @{}:{} '
1550+ '({})'.format(host, port, unit_name))
1551+ return True
1552+ elif port and not conf_port and conf_ssl:
1553+ self.log.debug('SSL is enabled @{} but not on port {} '
1554+ '({})'.format(host, port, unit_name))
1555+ return False
1556+ # Port not checked (useful when checking that ssl is disabled)
1557+ elif not port and conf_ssl:
1558+ self.log.debug('SSL is enabled @{}:{} '
1559+ '({})'.format(host, port, unit_name))
1560+ return True
1561+ elif not conf_ssl:
1562+ self.log.debug('SSL not enabled @{}:{} '
1563+ '({})'.format(host, port, unit_name))
1564+ return False
1565+ else:
1566+ msg = ('Unknown condition when checking SSL status @{}:{} '
1567+ '({})'.format(host, port, unit_name))
1568+ amulet.raise_status(amulet.FAIL, msg)
1569+
1570+ def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
1571+ """Check that ssl is enabled on rmq juju sentry units.
1572+
1573+ :param sentry_units: list of all rmq sentry units
1574+ :param port: optional ssl port override to validate
1575+ :returns: None if successful, otherwise return error message
1576+ """
1577+ for sentry_unit in sentry_units:
1578+ if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
1579+ return ('Unexpected condition: ssl is disabled on unit '
1580+ '({})'.format(sentry_unit.info['unit_name']))
1581+ return None
1582+
1583+ def validate_rmq_ssl_disabled_units(self, sentry_units):
1584+ """Check that ssl is enabled on listed rmq juju sentry units.
1585+
1586+ :param sentry_units: list of all rmq sentry units
1587+ :returns: True if successful. Raise on error.
1588+ """
1589+ for sentry_unit in sentry_units:
1590+ if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
1591+ return ('Unexpected condition: ssl is enabled on unit '
1592+ '({})'.format(sentry_unit.info['unit_name']))
1593+ return None
1594+
1595+ def configure_rmq_ssl_on(self, sentry_units, deployment,
1596+ port=None, max_wait=60):
1597+ """Turn ssl charm config option on, with optional non-default
1598+ ssl port specification. Confirm that it is enabled on every
1599+ unit.
1600+
1601+ :param sentry_units: list of sentry units
1602+ :param deployment: amulet deployment object pointer
1603+ :param port: amqp port, use defaults if None
1604+ :param max_wait: maximum time to wait in seconds to confirm
1605+ :returns: None if successful. Raise on error.
1606+ """
1607+ self.log.debug('Setting ssl charm config option: on')
1608+
1609+ # Enable RMQ SSL
1610+ config = {'ssl': 'on'}
1611+ if port:
1612+ config['ssl_port'] = port
1613+
1614+ deployment.d.configure('rabbitmq-server', config)
1615+
1616+ # Wait for unit status
1617+ self.rmq_wait_for_cluster(deployment)
1618+
1619+ # Confirm
1620+ tries = 0
1621+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
1622+ while ret and tries < (max_wait / 4):
1623+ time.sleep(4)
1624+ self.log.debug('Attempt {}: {}'.format(tries, ret))
1625+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
1626+ tries += 1
1627+
1628+ if ret:
1629+ amulet.raise_status(amulet.FAIL, ret)
1630+
1631+ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
1632+ """Turn ssl charm config option off, confirm that it is disabled
1633+ on every unit.
1634+
1635+ :param sentry_units: list of sentry units
1636+ :param deployment: amulet deployment object pointer
1637+ :param max_wait: maximum time to wait in seconds to confirm
1638+ :returns: None if successful. Raise on error.
1639+ """
1640+ self.log.debug('Setting ssl charm config option: off')
1641+
1642+ # Disable RMQ SSL
1643+ config = {'ssl': 'off'}
1644+ deployment.d.configure('rabbitmq-server', config)
1645+
1646+ # Wait for unit status
1647+ self.rmq_wait_for_cluster(deployment)
1648+
1649+ # Confirm
1650+ tries = 0
1651+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
1652+ while ret and tries < (max_wait / 4):
1653+ time.sleep(4)
1654+ self.log.debug('Attempt {}: {}'.format(tries, ret))
1655+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
1656+ tries += 1
1657+
1658+ if ret:
1659+ amulet.raise_status(amulet.FAIL, ret)
1660+
1661+ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
1662+ port=None, fatal=True,
1663+ username="testuser1", password="changeme"):
1664+ """Establish and return a pika amqp connection to the rabbitmq service
1665+ running on a rmq juju unit.
1666+
1667+ :param sentry_unit: sentry unit pointer
1668+ :param ssl: boolean, default to False
1669+ :param port: amqp port, use defaults if None
1670+ :param fatal: boolean, default to True (raises on connect error)
1671+ :param username: amqp user name, default to testuser1
1672+ :param password: amqp user password
1673+ :returns: pika amqp connection pointer or None if failed and non-fatal
1674+ """
1675+ host = sentry_unit.info['public-address']
1676+ unit_name = sentry_unit.info['unit_name']
1677+
1678+ # Default port logic if port is not specified
1679+ if ssl and not port:
1680+ port = 5671
1681+ elif not ssl and not port:
1682+ port = 5672
1683+
1684+ self.log.debug('Connecting to amqp on {}:{} ({}) as '
1685+ '{}...'.format(host, port, unit_name, username))
1686+
1687+ try:
1688+ credentials = pika.PlainCredentials(username, password)
1689+ parameters = pika.ConnectionParameters(host=host, port=port,
1690+ credentials=credentials,
1691+ ssl=ssl,
1692+ connection_attempts=3,
1693+ retry_delay=5,
1694+ socket_timeout=1)
1695+ connection = pika.BlockingConnection(parameters)
1696+ assert connection.server_properties['product'] == 'RabbitMQ'
1697+ self.log.debug('Connect OK')
1698+ return connection
1699+ except Exception as e:
1700+ msg = ('amqp connection failed to {}:{} as '
1701+ '{} ({})'.format(host, port, username, str(e)))
1702+ if fatal:
1703+ amulet.raise_status(amulet.FAIL, msg)
1704+ else:
1705+ self.log.warn(msg)
1706+ return None
1707+
1708+ def publish_amqp_message_by_unit(self, sentry_unit, message,
1709+ queue="test", ssl=False,
1710+ username="testuser1",
1711+ password="changeme",
1712+ port=None):
1713+ """Publish an amqp message to a rmq juju unit.
1714+
1715+ :param sentry_unit: sentry unit pointer
1716+ :param message: amqp message string
1717+ :param queue: message queue, default to test
1718+ :param username: amqp user name, default to testuser1
1719+ :param password: amqp user password
1720+ :param ssl: boolean, default to False
1721+ :param port: amqp port, use defaults if None
1722+ :returns: None. Raises exception if publish failed.
1723+ """
1724+ self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
1725+ message))
1726+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
1727+ port=port,
1728+ username=username,
1729+ password=password)
1730+
1731+ # NOTE(beisner): extra debug here re: pika hang potential:
1732+ # https://github.com/pika/pika/issues/297
1733+ # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
1734+ self.log.debug('Defining channel...')
1735+ channel = connection.channel()
1736+ self.log.debug('Declaring queue...')
1737+ channel.queue_declare(queue=queue, auto_delete=False, durable=True)
1738+ self.log.debug('Publishing message...')
1739+ channel.basic_publish(exchange='', routing_key=queue, body=message)
1740+ self.log.debug('Closing channel...')
1741+ channel.close()
1742+ self.log.debug('Closing connection...')
1743+ connection.close()
1744+
1745+ def get_amqp_message_by_unit(self, sentry_unit, queue="test",
1746+ username="testuser1",
1747+ password="changeme",
1748+ ssl=False, port=None):
1749+ """Get an amqp message from a rmq juju unit.
1750+
1751+ :param sentry_unit: sentry unit pointer
1752+ :param queue: message queue, default to test
1753+ :param username: amqp user name, default to testuser1
1754+ :param password: amqp user password
1755+ :param ssl: boolean, default to False
1756+ :param port: amqp port, use defaults if None
1757+ :returns: amqp message body as string. Raise if get fails.
1758+ """
1759+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
1760+ port=port,
1761+ username=username,
1762+ password=password)
1763+ channel = connection.channel()
1764+ method_frame, _, body = channel.basic_get(queue)
1765+
1766+ if method_frame:
1767+ self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
1768+ body))
1769+ channel.basic_ack(method_frame.delivery_tag)
1770+ channel.close()
1771+ connection.close()
1772+ return body
1773+ else:
1774+ msg = 'No message retrieved.'
1775+ amulet.raise_status(amulet.FAIL, msg)
1776
1777=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
1778--- hooks/charmhelpers/contrib/openstack/context.py 2015-07-29 18:35:16 +0000
1779+++ hooks/charmhelpers/contrib/openstack/context.py 2016-03-09 12:07:21 +0000
1780@@ -14,6 +14,7 @@
1781 # You should have received a copy of the GNU Lesser General Public License
1782 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1783
1784+import glob
1785 import json
1786 import os
1787 import re
1788@@ -50,10 +51,13 @@
1789 from charmhelpers.core.strutils import bool_from_string
1790
1791 from charmhelpers.core.host import (
1792+ get_bond_master,
1793+ is_phy_iface,
1794 list_nics,
1795 get_nic_hwaddr,
1796 mkdir,
1797 write_file,
1798+ pwgen,
1799 )
1800 from charmhelpers.contrib.hahelpers.cluster import (
1801 determine_apache_port,
1802@@ -84,6 +88,14 @@
1803 is_bridge_member,
1804 )
1805 from charmhelpers.contrib.openstack.utils import get_host_ip
1806+from charmhelpers.core.unitdata import kv
1807+
1808+try:
1809+ import psutil
1810+except ImportError:
1811+ apt_install('python-psutil', fatal=True)
1812+ import psutil
1813+
1814 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
1815 ADDRESS_TYPES = ['admin', 'internal', 'public']
1816
1817@@ -192,10 +204,50 @@
1818 class OSContextGenerator(object):
1819 """Base class for all context generators."""
1820 interfaces = []
1821+ related = False
1822+ complete = False
1823+ missing_data = []
1824
1825 def __call__(self):
1826 raise NotImplementedError
1827
1828+ def context_complete(self, ctxt):
1829+ """Check for missing data for the required context data.
1830+ Set self.missing_data if it exists and return False.
1831+ Set self.complete if no missing data and return True.
1832+ """
1833+ # Fresh start
1834+ self.complete = False
1835+ self.missing_data = []
1836+ for k, v in six.iteritems(ctxt):
1837+ if v is None or v == '':
1838+ if k not in self.missing_data:
1839+ self.missing_data.append(k)
1840+
1841+ if self.missing_data:
1842+ self.complete = False
1843+ log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
1844+ else:
1845+ self.complete = True
1846+ return self.complete
1847+
1848+ def get_related(self):
1849+ """Check if any of the context interfaces have relation ids.
1850+ Set self.related and return True if one of the interfaces
1851+ has relation ids.
1852+ """
1853+ # Fresh start
1854+ self.related = False
1855+ try:
1856+ for interface in self.interfaces:
1857+ if relation_ids(interface):
1858+ self.related = True
1859+ return self.related
1860+ except AttributeError as e:
1861+ log("{} {}"
1862+ "".format(self, e), 'INFO')
1863+ return self.related
1864+
1865
1866 class SharedDBContext(OSContextGenerator):
1867 interfaces = ['shared-db']
1868@@ -211,6 +263,7 @@
1869 self.database = database
1870 self.user = user
1871 self.ssl_dir = ssl_dir
1872+ self.rel_name = self.interfaces[0]
1873
1874 def __call__(self):
1875 self.database = self.database or config('database')
1876@@ -244,6 +297,7 @@
1877 password_setting = self.relation_prefix + '_password'
1878
1879 for rid in relation_ids(self.interfaces[0]):
1880+ self.related = True
1881 for unit in related_units(rid):
1882 rdata = relation_get(rid=rid, unit=unit)
1883 host = rdata.get('db_host')
1884@@ -255,7 +309,7 @@
1885 'database_password': rdata.get(password_setting),
1886 'database_type': 'mysql'
1887 }
1888- if context_complete(ctxt):
1889+ if self.context_complete(ctxt):
1890 db_ssl(rdata, ctxt, self.ssl_dir)
1891 return ctxt
1892 return {}
1893@@ -276,6 +330,7 @@
1894
1895 ctxt = {}
1896 for rid in relation_ids(self.interfaces[0]):
1897+ self.related = True
1898 for unit in related_units(rid):
1899 rel_host = relation_get('host', rid=rid, unit=unit)
1900 rel_user = relation_get('user', rid=rid, unit=unit)
1901@@ -285,7 +340,7 @@
1902 'database_user': rel_user,
1903 'database_password': rel_passwd,
1904 'database_type': 'postgresql'}
1905- if context_complete(ctxt):
1906+ if self.context_complete(ctxt):
1907 return ctxt
1908
1909 return {}
1910@@ -346,6 +401,7 @@
1911 ctxt['signing_dir'] = cachedir
1912
1913 for rid in relation_ids(self.rel_name):
1914+ self.related = True
1915 for unit in related_units(rid):
1916 rdata = relation_get(rid=rid, unit=unit)
1917 serv_host = rdata.get('service_host')
1918@@ -354,6 +410,7 @@
1919 auth_host = format_ipv6_addr(auth_host) or auth_host
1920 svc_protocol = rdata.get('service_protocol') or 'http'
1921 auth_protocol = rdata.get('auth_protocol') or 'http'
1922+ api_version = rdata.get('api_version') or '2.0'
1923 ctxt.update({'service_port': rdata.get('service_port'),
1924 'service_host': serv_host,
1925 'auth_host': auth_host,
1926@@ -362,9 +419,10 @@
1927 'admin_user': rdata.get('service_username'),
1928 'admin_password': rdata.get('service_password'),
1929 'service_protocol': svc_protocol,
1930- 'auth_protocol': auth_protocol})
1931+ 'auth_protocol': auth_protocol,
1932+ 'api_version': api_version})
1933
1934- if context_complete(ctxt):
1935+ if self.context_complete(ctxt):
1936 # NOTE(jamespage) this is required for >= icehouse
1937 # so a missing value just indicates keystone needs
1938 # upgrading
1939@@ -403,6 +461,7 @@
1940 ctxt = {}
1941 for rid in relation_ids(self.rel_name):
1942 ha_vip_only = False
1943+ self.related = True
1944 for unit in related_units(rid):
1945 if relation_get('clustered', rid=rid, unit=unit):
1946 ctxt['clustered'] = True
1947@@ -435,7 +494,7 @@
1948 ha_vip_only = relation_get('ha-vip-only',
1949 rid=rid, unit=unit) is not None
1950
1951- if context_complete(ctxt):
1952+ if self.context_complete(ctxt):
1953 if 'rabbit_ssl_ca' in ctxt:
1954 if not self.ssl_dir:
1955 log("Charm not setup for ssl support but ssl ca "
1956@@ -467,7 +526,7 @@
1957 ctxt['oslo_messaging_flags'] = config_flags_parser(
1958 oslo_messaging_flags)
1959
1960- if not context_complete(ctxt):
1961+ if not self.complete:
1962 return {}
1963
1964 return ctxt
1965@@ -483,13 +542,15 @@
1966
1967 log('Generating template context for ceph', level=DEBUG)
1968 mon_hosts = []
1969- auth = None
1970- key = None
1971- use_syslog = str(config('use-syslog')).lower()
1972+ ctxt = {
1973+ 'use_syslog': str(config('use-syslog')).lower()
1974+ }
1975 for rid in relation_ids('ceph'):
1976 for unit in related_units(rid):
1977- auth = relation_get('auth', rid=rid, unit=unit)
1978- key = relation_get('key', rid=rid, unit=unit)
1979+ if not ctxt.get('auth'):
1980+ ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
1981+ if not ctxt.get('key'):
1982+ ctxt['key'] = relation_get('key', rid=rid, unit=unit)
1983 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
1984 unit=unit)
1985 unit_priv_addr = relation_get('private-address', rid=rid,
1986@@ -498,15 +559,12 @@
1987 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
1988 mon_hosts.append(ceph_addr)
1989
1990- ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
1991- 'auth': auth,
1992- 'key': key,
1993- 'use_syslog': use_syslog}
1994+ ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
1995
1996 if not os.path.isdir('/etc/ceph'):
1997 os.mkdir('/etc/ceph')
1998
1999- if not context_complete(ctxt):
2000+ if not self.context_complete(ctxt):
2001 return {}
2002
2003 ensure_packages(['ceph-common'])
2004@@ -579,15 +637,28 @@
2005 if config('haproxy-client-timeout'):
2006 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
2007
2008+ if config('haproxy-queue-timeout'):
2009+ ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
2010+
2011+ if config('haproxy-connect-timeout'):
2012+ ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
2013+
2014 if config('prefer-ipv6'):
2015 ctxt['ipv6'] = True
2016 ctxt['local_host'] = 'ip6-localhost'
2017 ctxt['haproxy_host'] = '::'
2018- ctxt['stat_port'] = ':::8888'
2019 else:
2020 ctxt['local_host'] = '127.0.0.1'
2021 ctxt['haproxy_host'] = '0.0.0.0'
2022- ctxt['stat_port'] = ':8888'
2023+
2024+ ctxt['stat_port'] = '8888'
2025+
2026+ db = kv()
2027+ ctxt['stat_password'] = db.get('stat-password')
2028+ if not ctxt['stat_password']:
2029+ ctxt['stat_password'] = db.set('stat-password',
2030+ pwgen(32))
2031+ db.flush()
2032
2033 for frontend in cluster_hosts:
2034 if (len(cluster_hosts[frontend]['backends']) > 1 or
2035@@ -878,19 +949,6 @@
2036
2037 return calico_ctxt
2038
2039- def pg_ctxt(self):
2040- driver = neutron_plugin_attribute(self.plugin, 'driver',
2041- self.network_manager)
2042- config = neutron_plugin_attribute(self.plugin, 'config',
2043- self.network_manager)
2044- ovs_ctxt = {'core_plugin': driver,
2045- 'neutron_plugin': 'plumgrid',
2046- 'neutron_security_groups': self.neutron_security_groups,
2047- 'local_ip': unit_private_ip(),
2048- 'config': config}
2049-
2050- return ovs_ctxt
2051-
2052 def neutron_ctxt(self):
2053 if https():
2054 proto = 'https'
2055@@ -906,6 +964,31 @@
2056 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
2057 return ctxt
2058
2059+ def pg_ctxt(self):
2060+ driver = neutron_plugin_attribute(self.plugin, 'driver',
2061+ self.network_manager)
2062+ config = neutron_plugin_attribute(self.plugin, 'config',
2063+ self.network_manager)
2064+ ovs_ctxt = {'core_plugin': driver,
2065+ 'neutron_plugin': 'plumgrid',
2066+ 'neutron_security_groups': self.neutron_security_groups,
2067+ 'local_ip': unit_private_ip(),
2068+ 'config': config}
2069+ return ovs_ctxt
2070+
2071+ def midonet_ctxt(self):
2072+ driver = neutron_plugin_attribute(self.plugin, 'driver',
2073+ self.network_manager)
2074+ midonet_config = neutron_plugin_attribute(self.plugin, 'config',
2075+ self.network_manager)
2076+ mido_ctxt = {'core_plugin': driver,
2077+ 'neutron_plugin': 'midonet',
2078+ 'neutron_security_groups': self.neutron_security_groups,
2079+ 'local_ip': unit_private_ip(),
2080+ 'config': midonet_config}
2081+
2082+ return mido_ctxt
2083+
2084 def __call__(self):
2085 if self.network_manager not in ['quantum', 'neutron']:
2086 return {}
2087@@ -927,6 +1010,8 @@
2088 ctxt.update(self.nuage_ctxt())
2089 elif self.plugin == 'plumgrid':
2090 ctxt.update(self.pg_ctxt())
2091+ elif self.plugin == 'midonet':
2092+ ctxt.update(self.midonet_ctxt())
2093
2094 alchemy_flags = config('neutron-alchemy-flags')
2095 if alchemy_flags:
2096@@ -938,7 +1023,6 @@
2097
2098
2099 class NeutronPortContext(OSContextGenerator):
2100- NIC_PREFIXES = ['eth', 'bond']
2101
2102 def resolve_ports(self, ports):
2103 """Resolve NICs not yet bound to bridge(s)
2104@@ -950,7 +1034,18 @@
2105
2106 hwaddr_to_nic = {}
2107 hwaddr_to_ip = {}
2108- for nic in list_nics(self.NIC_PREFIXES):
2109+ for nic in list_nics():
2110+ # Ignore virtual interfaces (bond masters will be identified from
2111+ # their slaves)
2112+ if not is_phy_iface(nic):
2113+ continue
2114+
2115+ _nic = get_bond_master(nic)
2116+ if _nic:
2117+ log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
2118+ level=DEBUG)
2119+ nic = _nic
2120+
2121 hwaddr = get_nic_hwaddr(nic)
2122 hwaddr_to_nic[hwaddr] = nic
2123 addresses = get_ipv4_addr(nic, fatal=False)
2124@@ -976,7 +1071,8 @@
2125 # trust it to be the real external network).
2126 resolved.append(entry)
2127
2128- return resolved
2129+ # Ensure no duplicates
2130+ return list(set(resolved))
2131
2132
2133 class OSConfigFlagContext(OSContextGenerator):
2134@@ -1016,6 +1112,20 @@
2135 config_flags_parser(config_flags)}
2136
2137
2138+class LibvirtConfigFlagsContext(OSContextGenerator):
2139+ """
2140+ This context provides support for extending
2141+ the libvirt section through user-defined flags.
2142+ """
2143+ def __call__(self):
2144+ ctxt = {}
2145+ libvirt_flags = config('libvirt-flags')
2146+ if libvirt_flags:
2147+ ctxt['libvirt_flags'] = config_flags_parser(
2148+ libvirt_flags)
2149+ return ctxt
2150+
2151+
2152 class SubordinateConfigContext(OSContextGenerator):
2153
2154 """
2155@@ -1048,7 +1158,7 @@
2156
2157 ctxt = {
2158 ... other context ...
2159- 'subordinate_config': {
2160+ 'subordinate_configuration': {
2161 'DEFAULT': {
2162 'key1': 'value1',
2163 },
2164@@ -1066,13 +1176,22 @@
2165 :param config_file : Service's config file to query sections
2166 :param interface : Subordinate interface to inspect
2167 """
2168- self.service = service
2169 self.config_file = config_file
2170- self.interface = interface
2171+ if isinstance(service, list):
2172+ self.services = service
2173+ else:
2174+ self.services = [service]
2175+ if isinstance(interface, list):
2176+ self.interfaces = interface
2177+ else:
2178+ self.interfaces = [interface]
2179
2180 def __call__(self):
2181 ctxt = {'sections': {}}
2182- for rid in relation_ids(self.interface):
2183+ rids = []
2184+ for interface in self.interfaces:
2185+ rids.extend(relation_ids(interface))
2186+ for rid in rids:
2187 for unit in related_units(rid):
2188 sub_config = relation_get('subordinate_configuration',
2189 rid=rid, unit=unit)
2190@@ -1080,33 +1199,37 @@
2191 try:
2192 sub_config = json.loads(sub_config)
2193 except:
2194- log('Could not parse JSON from subordinate_config '
2195- 'setting from %s' % rid, level=ERROR)
2196- continue
2197-
2198- if self.service not in sub_config:
2199- log('Found subordinate_config on %s but it contained'
2200- 'nothing for %s service' % (rid, self.service),
2201- level=INFO)
2202- continue
2203-
2204- sub_config = sub_config[self.service]
2205- if self.config_file not in sub_config:
2206- log('Found subordinate_config on %s but it contained'
2207- 'nothing for %s' % (rid, self.config_file),
2208- level=INFO)
2209- continue
2210-
2211- sub_config = sub_config[self.config_file]
2212- for k, v in six.iteritems(sub_config):
2213- if k == 'sections':
2214- for section, config_dict in six.iteritems(v):
2215- log("adding section '%s'" % (section),
2216- level=DEBUG)
2217- ctxt[k][section] = config_dict
2218- else:
2219- ctxt[k] = v
2220-
2221+ log('Could not parse JSON from '
2222+ 'subordinate_configuration setting from %s'
2223+ % rid, level=ERROR)
2224+ continue
2225+
2226+ for service in self.services:
2227+ if service not in sub_config:
2228+ log('Found subordinate_configuration on %s but it '
2229+ 'contained nothing for %s service'
2230+ % (rid, service), level=INFO)
2231+ continue
2232+
2233+ sub_config = sub_config[service]
2234+ if self.config_file not in sub_config:
2235+ log('Found subordinate_configuration on %s but it '
2236+ 'contained nothing for %s'
2237+ % (rid, self.config_file), level=INFO)
2238+ continue
2239+
2240+ sub_config = sub_config[self.config_file]
2241+ for k, v in six.iteritems(sub_config):
2242+ if k == 'sections':
2243+ for section, config_list in six.iteritems(v):
2244+ log("adding section '%s'" % (section),
2245+ level=DEBUG)
2246+ if ctxt[k].get(section):
2247+ ctxt[k][section].extend(config_list)
2248+ else:
2249+ ctxt[k][section] = config_list
2250+ else:
2251+ ctxt[k] = v
2252 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
2253 return ctxt
2254
2255@@ -1143,13 +1266,11 @@
2256
2257 @property
2258 def num_cpus(self):
2259- try:
2260- from psutil import NUM_CPUS
2261- except ImportError:
2262- apt_install('python-psutil', fatal=True)
2263- from psutil import NUM_CPUS
2264-
2265- return NUM_CPUS
2266+ # NOTE: use cpu_count if present (16.04 support)
2267+ if hasattr(psutil, 'cpu_count'):
2268+ return psutil.cpu_count()
2269+ else:
2270+ return psutil.NUM_CPUS
2271
2272 def __call__(self):
2273 multiplier = config('worker-multiplier') or 0
2274@@ -1283,15 +1404,19 @@
2275 def __call__(self):
2276 ports = config('data-port')
2277 if ports:
2278+ # Map of {port/mac:bridge}
2279 portmap = parse_data_port_mappings(ports)
2280- ports = portmap.values()
2281+ ports = portmap.keys()
2282+ # Resolve provided ports or mac addresses and filter out those
2283+ # already attached to a bridge.
2284 resolved = self.resolve_ports(ports)
2285+ # FIXME: is this necessary?
2286 normalized = {get_nic_hwaddr(port): port for port in resolved
2287 if port not in ports}
2288 normalized.update({port: port for port in resolved
2289 if port in ports})
2290 if resolved:
2291- return {bridge: normalized[port] for bridge, port in
2292+ return {normalized[port]: bridge for port, bridge in
2293 six.iteritems(portmap) if port in normalized.keys()}
2294
2295 return None
2296@@ -1302,12 +1427,22 @@
2297 def __call__(self):
2298 ctxt = {}
2299 mappings = super(PhyNICMTUContext, self).__call__()
2300- if mappings and mappings.values():
2301- ports = mappings.values()
2302+ if mappings and mappings.keys():
2303+ ports = sorted(mappings.keys())
2304 napi_settings = NeutronAPIContext()()
2305 mtu = napi_settings.get('network_device_mtu')
2306+ all_ports = set()
2307+ # If any of ports is a vlan device, its underlying device must have
2308+ # mtu applied first.
2309+ for port in ports:
2310+ for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
2311+ lport = os.path.basename(lport)
2312+ all_ports.add(lport.split('_')[1])
2313+
2314+ all_ports = list(all_ports)
2315+ all_ports.extend(ports)
2316 if mtu:
2317- ctxt["devs"] = '\\n'.join(ports)
2318+ ctxt["devs"] = '\\n'.join(all_ports)
2319 ctxt['mtu'] = mtu
2320
2321 return ctxt
2322@@ -1338,7 +1473,9 @@
2323 rdata.get('service_protocol') or 'http',
2324 'auth_protocol':
2325 rdata.get('auth_protocol') or 'http',
2326+ 'api_version':
2327+ rdata.get('api_version') or '2.0',
2328 }
2329- if context_complete(ctxt):
2330+ if self.context_complete(ctxt):
2331 return ctxt
2332 return {}
2333
2334=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
2335--- hooks/charmhelpers/contrib/openstack/neutron.py 2015-07-29 18:35:16 +0000
2336+++ hooks/charmhelpers/contrib/openstack/neutron.py 2016-03-09 12:07:21 +0000
2337@@ -50,7 +50,7 @@
2338 if kernel_version() >= (3, 13):
2339 return []
2340 else:
2341- return ['openvswitch-datapath-dkms']
2342+ return [headers_package(), 'openvswitch-datapath-dkms']
2343
2344
2345 # legacy
2346@@ -70,7 +70,7 @@
2347 relation_prefix='neutron',
2348 ssl_dir=QUANTUM_CONF_DIR)],
2349 'services': ['quantum-plugin-openvswitch-agent'],
2350- 'packages': [[headers_package()] + determine_dkms_package(),
2351+ 'packages': [determine_dkms_package(),
2352 ['quantum-plugin-openvswitch-agent']],
2353 'server_packages': ['quantum-server',
2354 'quantum-plugin-openvswitch'],
2355@@ -111,7 +111,7 @@
2356 relation_prefix='neutron',
2357 ssl_dir=NEUTRON_CONF_DIR)],
2358 'services': ['neutron-plugin-openvswitch-agent'],
2359- 'packages': [[headers_package()] + determine_dkms_package(),
2360+ 'packages': [determine_dkms_package(),
2361 ['neutron-plugin-openvswitch-agent']],
2362 'server_packages': ['neutron-server',
2363 'neutron-plugin-openvswitch'],
2364@@ -155,7 +155,7 @@
2365 relation_prefix='neutron',
2366 ssl_dir=NEUTRON_CONF_DIR)],
2367 'services': [],
2368- 'packages': [[headers_package()] + determine_dkms_package(),
2369+ 'packages': [determine_dkms_package(),
2370 ['neutron-plugin-cisco']],
2371 'server_packages': ['neutron-server',
2372 'neutron-plugin-cisco'],
2373@@ -174,7 +174,7 @@
2374 'neutron-dhcp-agent',
2375 'nova-api-metadata',
2376 'etcd'],
2377- 'packages': [[headers_package()] + determine_dkms_package(),
2378+ 'packages': [determine_dkms_package(),
2379 ['calico-compute',
2380 'bird',
2381 'neutron-dhcp-agent',
2382@@ -204,11 +204,25 @@
2383 database=config('database'),
2384 ssl_dir=NEUTRON_CONF_DIR)],
2385 'services': [],
2386- 'packages': [['plumgrid-lxc'],
2387- ['iovisor-dkms']],
2388+ 'packages': ['plumgrid-lxc',
2389+ 'iovisor-dkms'],
2390 'server_packages': ['neutron-server',
2391 'neutron-plugin-plumgrid'],
2392 'server_services': ['neutron-server']
2393+ },
2394+ 'midonet': {
2395+ 'config': '/etc/neutron/plugins/midonet/midonet.ini',
2396+ 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
2397+ 'contexts': [
2398+ context.SharedDBContext(user=config('neutron-database-user'),
2399+ database=config('neutron-database'),
2400+ relation_prefix='neutron',
2401+ ssl_dir=NEUTRON_CONF_DIR)],
2402+ 'services': [],
2403+ 'packages': [determine_dkms_package()],
2404+ 'server_packages': ['neutron-server',
2405+ 'python-neutron-plugin-midonet'],
2406+ 'server_services': ['neutron-server']
2407 }
2408 }
2409 if release >= 'icehouse':
2410@@ -219,6 +233,20 @@
2411 'neutron-plugin-ml2']
2412 # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
2413 plugins['nvp'] = plugins['nsx']
2414+ if release >= 'kilo':
2415+ plugins['midonet']['driver'] = (
2416+ 'neutron.plugins.midonet.plugin.MidonetPluginV2')
2417+ if release >= 'liberty':
2418+ plugins['midonet']['driver'] = (
2419+ 'midonet.neutron.plugin_v1.MidonetPluginV2')
2420+ plugins['midonet']['server_packages'].remove(
2421+ 'python-neutron-plugin-midonet')
2422+ plugins['midonet']['server_packages'].append(
2423+ 'python-networking-midonet')
2424+ plugins['plumgrid']['driver'] = (
2425+ 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
2426+ plugins['plumgrid']['server_packages'].remove(
2427+ 'neutron-plugin-plumgrid')
2428 return plugins
2429
2430
2431@@ -269,17 +297,30 @@
2432 return 'neutron'
2433
2434
2435-def parse_mappings(mappings):
2436+def parse_mappings(mappings, key_rvalue=False):
2437+ """By default mappings are lvalue keyed.
2438+
2439+ If key_rvalue is True, the mapping will be reversed to allow multiple
2440+ configs for the same lvalue.
2441+ """
2442 parsed = {}
2443 if mappings:
2444 mappings = mappings.split()
2445 for m in mappings:
2446 p = m.partition(':')
2447- key = p[0].strip()
2448- if p[1]:
2449- parsed[key] = p[2].strip()
2450+
2451+ if key_rvalue:
2452+ key_index = 2
2453+ val_index = 0
2454+ # if there is no rvalue skip to next
2455+ if not p[1]:
2456+ continue
2457 else:
2458- parsed[key] = ''
2459+ key_index = 0
2460+ val_index = 2
2461+
2462+ key = p[key_index].strip()
2463+ parsed[key] = p[val_index].strip()
2464
2465 return parsed
2466
2467@@ -297,25 +338,25 @@
2468 def parse_data_port_mappings(mappings, default_bridge='br-data'):
2469 """Parse data port mappings.
2470
2471- Mappings must be a space-delimited list of bridge:port mappings.
2472+ Mappings must be a space-delimited list of bridge:port.
2473
2474- Returns dict of the form {bridge:port}.
2475+ Returns dict of the form {port:bridge} where ports may be mac addresses or
2476+ interface names.
2477 """
2478- _mappings = parse_mappings(mappings)
2479+
2480+ # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
2481+ # proposed for <port> since it may be a mac address which will differ
2482+ # across units this allowing first-known-good to be chosen.
2483+ _mappings = parse_mappings(mappings, key_rvalue=True)
2484 if not _mappings or list(_mappings.values()) == ['']:
2485 if not mappings:
2486 return {}
2487
2488 # For backwards-compatibility we need to support port-only provided in
2489 # config.
2490- _mappings = {default_bridge: mappings.split()[0]}
2491-
2492- bridges = _mappings.keys()
2493- ports = _mappings.values()
2494- if len(set(bridges)) != len(bridges):
2495- raise Exception("It is not allowed to have more than one port "
2496- "configured on the same bridge")
2497-
2498+ _mappings = {mappings.split()[0]: default_bridge}
2499+
2500+ ports = _mappings.keys()
2501 if len(set(ports)) != len(ports):
2502 raise Exception("It is not allowed to have the same port configured "
2503 "on more than one bridge")
2504
2505=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
2506--- hooks/charmhelpers/contrib/openstack/templating.py 2015-07-29 18:35:16 +0000
2507+++ hooks/charmhelpers/contrib/openstack/templating.py 2016-03-09 12:07:21 +0000
2508@@ -18,7 +18,7 @@
2509
2510 import six
2511
2512-from charmhelpers.fetch import apt_install
2513+from charmhelpers.fetch import apt_install, apt_update
2514 from charmhelpers.core.hookenv import (
2515 log,
2516 ERROR,
2517@@ -29,6 +29,7 @@
2518 try:
2519 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
2520 except ImportError:
2521+ apt_update(fatal=True)
2522 apt_install('python-jinja2', fatal=True)
2523 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
2524
2525@@ -112,7 +113,7 @@
2526
2527 def complete_contexts(self):
2528 '''
2529- Return a list of interfaces that have atisfied contexts.
2530+ Return a list of interfaces that have satisfied contexts.
2531 '''
2532 if self._complete_contexts:
2533 return self._complete_contexts
2534@@ -293,3 +294,30 @@
2535 [interfaces.extend(i.complete_contexts())
2536 for i in six.itervalues(self.templates)]
2537 return interfaces
2538+
2539+ def get_incomplete_context_data(self, interfaces):
2540+ '''
2541+ Return dictionary of relation status of interfaces and any missing
2542+ required context data. Example:
2543+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
2544+ 'zeromq-configuration': {'related': False}}
2545+ '''
2546+ incomplete_context_data = {}
2547+
2548+ for i in six.itervalues(self.templates):
2549+ for context in i.contexts:
2550+ for interface in interfaces:
2551+ related = False
2552+ if interface in context.interfaces:
2553+ related = context.get_related()
2554+ missing_data = context.missing_data
2555+ if missing_data:
2556+ incomplete_context_data[interface] = {'missing_data': missing_data}
2557+ if related:
2558+ if incomplete_context_data.get(interface):
2559+ incomplete_context_data[interface].update({'related': True})
2560+ else:
2561+ incomplete_context_data[interface] = {'related': True}
2562+ else:
2563+ incomplete_context_data[interface] = {'related': False}
2564+ return incomplete_context_data
2565
2566=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
2567--- hooks/charmhelpers/contrib/openstack/utils.py 2015-07-29 18:35:16 +0000
2568+++ hooks/charmhelpers/contrib/openstack/utils.py 2016-03-09 12:07:21 +0000
2569@@ -1,5 +1,3 @@
2570-#!/usr/bin/python
2571-
2572 # Copyright 2014-2015 Canonical Limited.
2573 #
2574 # This file is part of charm-helpers.
2575@@ -24,8 +22,13 @@
2576 import json
2577 import os
2578 import sys
2579+import re
2580+import itertools
2581
2582 import six
2583+import tempfile
2584+import traceback
2585+import uuid
2586 import yaml
2587
2588 from charmhelpers.contrib.network import ip
2589@@ -35,12 +38,18 @@
2590 )
2591
2592 from charmhelpers.core.hookenv import (
2593+ action_fail,
2594+ action_set,
2595 config,
2596 log as juju_log,
2597 charm_dir,
2598+ DEBUG,
2599 INFO,
2600+ related_units,
2601 relation_ids,
2602- relation_set
2603+ relation_set,
2604+ status_set,
2605+ hook_name
2606 )
2607
2608 from charmhelpers.contrib.storage.linux.lvm import (
2609@@ -50,7 +59,9 @@
2610 )
2611
2612 from charmhelpers.contrib.network.ip import (
2613- get_ipv6_addr
2614+ get_ipv6_addr,
2615+ is_ipv6,
2616+ port_has_listener,
2617 )
2618
2619 from charmhelpers.contrib.python.packages import (
2620@@ -58,7 +69,7 @@
2621 pip_install,
2622 )
2623
2624-from charmhelpers.core.host import lsb_release, mounts, umount
2625+from charmhelpers.core.host import lsb_release, mounts, umount, service_running
2626 from charmhelpers.fetch import apt_install, apt_cache, install_remote
2627 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
2628 from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
2629@@ -69,7 +80,6 @@
2630 DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
2631 'restricted main multiverse universe')
2632
2633-
2634 UBUNTU_OPENSTACK_RELEASE = OrderedDict([
2635 ('oneiric', 'diablo'),
2636 ('precise', 'essex'),
2637@@ -80,6 +90,7 @@
2638 ('utopic', 'juno'),
2639 ('vivid', 'kilo'),
2640 ('wily', 'liberty'),
2641+ ('xenial', 'mitaka'),
2642 ])
2643
2644
2645@@ -93,31 +104,73 @@
2646 ('2014.2', 'juno'),
2647 ('2015.1', 'kilo'),
2648 ('2015.2', 'liberty'),
2649+ ('2016.1', 'mitaka'),
2650 ])
2651
2652-# The ugly duckling
2653+# The ugly duckling - must list releases oldest to newest
2654 SWIFT_CODENAMES = OrderedDict([
2655- ('1.4.3', 'diablo'),
2656- ('1.4.8', 'essex'),
2657- ('1.7.4', 'folsom'),
2658- ('1.8.0', 'grizzly'),
2659- ('1.7.7', 'grizzly'),
2660- ('1.7.6', 'grizzly'),
2661- ('1.10.0', 'havana'),
2662- ('1.9.1', 'havana'),
2663- ('1.9.0', 'havana'),
2664- ('1.13.1', 'icehouse'),
2665- ('1.13.0', 'icehouse'),
2666- ('1.12.0', 'icehouse'),
2667- ('1.11.0', 'icehouse'),
2668- ('2.0.0', 'juno'),
2669- ('2.1.0', 'juno'),
2670- ('2.2.0', 'juno'),
2671- ('2.2.1', 'kilo'),
2672- ('2.2.2', 'kilo'),
2673- ('2.3.0', 'liberty'),
2674+ ('diablo',
2675+ ['1.4.3']),
2676+ ('essex',
2677+ ['1.4.8']),
2678+ ('folsom',
2679+ ['1.7.4']),
2680+ ('grizzly',
2681+ ['1.7.6', '1.7.7', '1.8.0']),
2682+ ('havana',
2683+ ['1.9.0', '1.9.1', '1.10.0']),
2684+ ('icehouse',
2685+ ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
2686+ ('juno',
2687+ ['2.0.0', '2.1.0', '2.2.0']),
2688+ ('kilo',
2689+ ['2.2.1', '2.2.2']),
2690+ ('liberty',
2691+ ['2.3.0', '2.4.0', '2.5.0']),
2692+ ('mitaka',
2693+ ['2.5.0']),
2694 ])
2695
2696+# >= Liberty version->codename mapping
2697+PACKAGE_CODENAMES = {
2698+ 'nova-common': OrderedDict([
2699+ ('12.0', 'liberty'),
2700+ ('13.0', 'mitaka'),
2701+ ]),
2702+ 'neutron-common': OrderedDict([
2703+ ('7.0', 'liberty'),
2704+ ('8.0', 'mitaka'),
2705+ ]),
2706+ 'cinder-common': OrderedDict([
2707+ ('7.0', 'liberty'),
2708+ ('8.0', 'mitaka'),
2709+ ]),
2710+ 'keystone': OrderedDict([
2711+ ('8.0', 'liberty'),
2712+ ('9.0', 'mitaka'),
2713+ ]),
2714+ 'horizon-common': OrderedDict([
2715+ ('8.0', 'liberty'),
2716+ ('9.0', 'mitaka'),
2717+ ]),
2718+ 'ceilometer-common': OrderedDict([
2719+ ('5.0', 'liberty'),
2720+ ('6.0', 'mitaka'),
2721+ ]),
2722+ 'heat-common': OrderedDict([
2723+ ('5.0', 'liberty'),
2724+ ('6.0', 'mitaka'),
2725+ ]),
2726+ 'glance-common': OrderedDict([
2727+ ('11.0', 'liberty'),
2728+ ('12.0', 'mitaka'),
2729+ ]),
2730+ 'openstack-dashboard': OrderedDict([
2731+ ('8.0', 'liberty'),
2732+ ('9.0', 'mitaka'),
2733+ ]),
2734+}
2735+
2736 DEFAULT_LOOPBACK_SIZE = '5G'
2737
2738
2739@@ -167,9 +220,9 @@
2740 error_out(e)
2741
2742
2743-def get_os_version_codename(codename):
2744+def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
2745 '''Determine OpenStack version number from codename.'''
2746- for k, v in six.iteritems(OPENSTACK_CODENAMES):
2747+ for k, v in six.iteritems(version_map):
2748 if v == codename:
2749 return k
2750 e = 'Could not derive OpenStack version for '\
2751@@ -177,6 +230,33 @@
2752 error_out(e)
2753
2754
2755+def get_os_version_codename_swift(codename):
2756+ '''Determine OpenStack version number of swift from codename.'''
2757+ for k, v in six.iteritems(SWIFT_CODENAMES):
2758+ if k == codename:
2759+ return v[-1]
2760+ e = 'Could not derive swift version for '\
2761+ 'codename: %s' % codename
2762+ error_out(e)
2763+
2764+
2765+def get_swift_codename(version):
2766+ '''Determine OpenStack codename that corresponds to swift version.'''
2767+ codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
2768+ if len(codenames) > 1:
2769+ # If more than one release codename contains this version we determine
2770+ # the actual codename based on the highest available install source.
2771+ for codename in reversed(codenames):
2772+ releases = UBUNTU_OPENSTACK_RELEASE
2773+ release = [k for k, v in six.iteritems(releases) if codename in v]
2774+ ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
2775+ if codename in ret or release[0] in ret:
2776+ return codename
2777+ elif len(codenames) == 1:
2778+ return codenames[0]
2779+ return None
2780+
2781+
2782 def get_os_codename_package(package, fatal=True):
2783 '''Derive OpenStack release codename from an installed package.'''
2784 import apt_pkg as apt
2785@@ -201,20 +281,33 @@
2786 error_out(e)
2787
2788 vers = apt.upstream_version(pkg.current_ver.ver_str)
2789-
2790- try:
2791- if 'swift' in pkg.name:
2792- swift_vers = vers[:5]
2793- if swift_vers not in SWIFT_CODENAMES:
2794- # Deal with 1.10.0 upward
2795- swift_vers = vers[:6]
2796- return SWIFT_CODENAMES[swift_vers]
2797- else:
2798- vers = vers[:6]
2799- return OPENSTACK_CODENAMES[vers]
2800- except KeyError:
2801- e = 'Could not determine OpenStack codename for version %s' % vers
2802- error_out(e)
2803+ if 'swift' in pkg.name:
2804+ # Fully x.y.z match for swift versions
2805+ match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
2806+ else:
2807+ # x.y match only for 20XX.X
2808+ # and ignore patch level for other packages
2809+ match = re.match('^(\d+)\.(\d+)', vers)
2810+
2811+ if match:
2812+ vers = match.group(0)
2813+
2814+ # >= Liberty independent project versions
2815+ if (package in PACKAGE_CODENAMES and
2816+ vers in PACKAGE_CODENAMES[package]):
2817+ return PACKAGE_CODENAMES[package][vers]
2818+ else:
2819+ # < Liberty co-ordinated project versions
2820+ try:
2821+ if 'swift' in pkg.name:
2822+ return get_swift_codename(vers)
2823+ else:
2824+ return OPENSTACK_CODENAMES[vers]
2825+ except KeyError:
2826+ if not fatal:
2827+ return None
2828+ e = 'Could not determine OpenStack codename for version %s' % vers
2829+ error_out(e)
2830
2831
2832 def get_os_version_package(pkg, fatal=True):
2833@@ -226,12 +319,14 @@
2834
2835 if 'swift' in pkg:
2836 vers_map = SWIFT_CODENAMES
2837+ for cname, version in six.iteritems(vers_map):
2838+ if cname == codename:
2839+ return version[-1]
2840 else:
2841 vers_map = OPENSTACK_CODENAMES
2842-
2843- for version, cname in six.iteritems(vers_map):
2844- if cname == codename:
2845- return version
2846+ for version, cname in six.iteritems(vers_map):
2847+ if cname == codename:
2848+ return version
2849 # e = "Could not determine OpenStack version for package: %s" % pkg
2850 # error_out(e)
2851
2852@@ -256,12 +351,42 @@
2853
2854
2855 def import_key(keyid):
2856- cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
2857- "--recv-keys %s" % keyid
2858- try:
2859- subprocess.check_call(cmd.split(' '))
2860- except subprocess.CalledProcessError:
2861- error_out("Error importing repo key %s" % keyid)
2862+ key = keyid.strip()
2863+ if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
2864+ key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
2865+ juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
2866+ juju_log("Importing ASCII Armor PGP key", level=DEBUG)
2867+ with tempfile.NamedTemporaryFile() as keyfile:
2868+ with open(keyfile.name, 'w') as fd:
2869+ fd.write(key)
2870+ fd.write("\n")
2871+
2872+ cmd = ['apt-key', 'add', keyfile.name]
2873+ try:
2874+ subprocess.check_call(cmd)
2875+ except subprocess.CalledProcessError:
2876+ error_out("Error importing PGP key '%s'" % key)
2877+ else:
2878+ juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
2879+ juju_log("Importing PGP key from keyserver", level=DEBUG)
2880+ cmd = ['apt-key', 'adv', '--keyserver',
2881+ 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
2882+ try:
2883+ subprocess.check_call(cmd)
2884+ except subprocess.CalledProcessError:
2885+ error_out("Error importing PGP key '%s'" % key)
2886+
2887+
2888+def get_source_and_pgp_key(input):
2889+ """Look for a pgp key ID or ascii-armor key in the given input."""
2890+ index = input.strip()
2891+ index = input.rfind('|')
2892+ if index < 0:
2893+ return input, None
2894+
2895+ key = input[index + 1:].strip('|')
2896+ source = input[:index]
2897+ return source, key
2898
2899
2900 def configure_installation_source(rel):
2901@@ -273,16 +398,16 @@
2902 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
2903 f.write(DISTRO_PROPOSED % ubuntu_rel)
2904 elif rel[:4] == "ppa:":
2905- src = rel
2906+ src, key = get_source_and_pgp_key(rel)
2907+ if key:
2908+ import_key(key)
2909+
2910 subprocess.check_call(["add-apt-repository", "-y", src])
2911 elif rel[:3] == "deb":
2912- l = len(rel.split('|'))
2913- if l == 2:
2914- src, key = rel.split('|')
2915- juju_log("Importing PPA key from keyserver for %s" % src)
2916+ src, key = get_source_and_pgp_key(rel)
2917+ if key:
2918 import_key(key)
2919- elif l == 1:
2920- src = rel
2921+
2922 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
2923 f.write(src)
2924 elif rel[:6] == 'cloud:':
2925@@ -327,6 +452,9 @@
2926 'liberty': 'trusty-updates/liberty',
2927 'liberty/updates': 'trusty-updates/liberty',
2928 'liberty/proposed': 'trusty-proposed/liberty',
2929+ 'mitaka': 'trusty-updates/mitaka',
2930+ 'mitaka/updates': 'trusty-updates/mitaka',
2931+ 'mitaka/proposed': 'trusty-proposed/mitaka',
2932 }
2933
2934 try:
2935@@ -392,9 +520,18 @@
2936 import apt_pkg as apt
2937 src = config('openstack-origin')
2938 cur_vers = get_os_version_package(package)
2939- available_vers = get_os_version_install_source(src)
2940+ if "swift" in package:
2941+ codename = get_os_codename_install_source(src)
2942+ avail_vers = get_os_version_codename_swift(codename)
2943+ else:
2944+ avail_vers = get_os_version_install_source(src)
2945 apt.init()
2946- return apt.version_compare(available_vers, cur_vers) == 1
2947+ if "swift" in package:
2948+ major_cur_vers = cur_vers.split('.', 1)[0]
2949+ major_avail_vers = avail_vers.split('.', 1)[0]
2950+ major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
2951+ return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
2952+ return apt.version_compare(avail_vers, cur_vers) == 1
2953
2954
2955 def ensure_block_device(block_device):
2956@@ -469,6 +606,12 @@
2957 relation_prefix=None):
2958 hosts = get_ipv6_addr(dynamic_only=False)
2959
2960+ if config('vip'):
2961+ vips = config('vip').split()
2962+ for vip in vips:
2963+ if vip and is_ipv6(vip):
2964+ hosts.append(vip)
2965+
2966 kwargs = {'database': database,
2967 'username': database_user,
2968 'hostname': json.dumps(hosts)}
2969@@ -517,7 +660,7 @@
2970 return yaml.load(projects_yaml)
2971
2972
2973-def git_clone_and_install(projects_yaml, core_project, depth=1):
2974+def git_clone_and_install(projects_yaml, core_project):
2975 """
2976 Clone/install all specified OpenStack repositories.
2977
2978@@ -567,6 +710,9 @@
2979 for p in projects['repositories']:
2980 repo = p['repository']
2981 branch = p['branch']
2982+ depth = '1'
2983+ if 'depth' in p.keys():
2984+ depth = p['depth']
2985 if p['name'] == 'requirements':
2986 repo_dir = _git_clone_and_install_single(repo, branch, depth,
2987 parent_dir, http_proxy,
2988@@ -611,19 +757,13 @@
2989 """
2990 Clone and install a single git repository.
2991 """
2992- dest_dir = os.path.join(parent_dir, os.path.basename(repo))
2993-
2994 if not os.path.exists(parent_dir):
2995 juju_log('Directory already exists at {}. '
2996 'No need to create directory.'.format(parent_dir))
2997 os.mkdir(parent_dir)
2998
2999- if not os.path.exists(dest_dir):
3000- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
3001- repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
3002- depth=depth)
3003- else:
3004- repo_dir = dest_dir
3005+ juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
3006+ repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth)
3007
3008 venv = os.path.join(parent_dir, 'venv')
3009
3010@@ -704,3 +844,304 @@
3011 return projects[key]
3012
3013 return None
3014+
3015+
3016+def os_workload_status(configs, required_interfaces, charm_func=None):
3017+ """
3018+ Decorator to set workload status based on complete contexts
3019+ """
3020+ def wrap(f):
3021+ @wraps(f)
3022+ def wrapped_f(*args, **kwargs):
3023+ # Run the original function first
3024+ f(*args, **kwargs)
3025+ # Set workload status now that contexts have been
3026+ # acted on
3027+ set_os_workload_status(configs, required_interfaces, charm_func)
3028+ return wrapped_f
3029+ return wrap
3030+
3031+
3032+def set_os_workload_status(configs, required_interfaces, charm_func=None, services=None, ports=None):
3033+ """
3034+ Set workload status based on complete contexts.
3035+ status-set missing or incomplete contexts
3036+ and juju-log details of missing required data.
3037+ charm_func is a charm specific function to run checking
3038+ for charm specific requirements such as a VIP setting.
3039+
3040+ This function also checks for whether the services defined are ACTUALLY
3041+ running and that the ports they advertise are open and being listened to.
3042+
3043+ @param services - OPTIONAL: a [{'service': <string>, 'ports': [<int>]]
3044+ The ports are optional.
3045+ If services is a [<string>] then ports are ignored.
3046+ @param ports - OPTIONAL: an [<int>] representing ports that shoudl be
3047+ open.
3048+ @returns None
3049+ """
3050+ incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
3051+ state = 'active'
3052+ missing_relations = []
3053+ incomplete_relations = []
3054+ message = None
3055+ charm_state = None
3056+ charm_message = None
3057+
3058+ for generic_interface in incomplete_rel_data.keys():
3059+ related_interface = None
3060+ missing_data = {}
3061+ # Related or not?
3062+ for interface in incomplete_rel_data[generic_interface]:
3063+ if incomplete_rel_data[generic_interface][interface].get('related'):
3064+ related_interface = interface
3065+ missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
3066+ # No relation ID for the generic_interface
3067+ if not related_interface:
3068+ juju_log("{} relation is missing and must be related for "
3069+ "functionality. ".format(generic_interface), 'WARN')
3070+ state = 'blocked'
3071+ if generic_interface not in missing_relations:
3072+ missing_relations.append(generic_interface)
3073+ else:
3074+ # Relation ID exists but no related unit
3075+ if not missing_data:
3076+ # Edge case relation ID exists but departing
3077+ if ('departed' in hook_name() or 'broken' in hook_name()) \
3078+ and related_interface in hook_name():
3079+ state = 'blocked'
3080+ if generic_interface not in missing_relations:
3081+ missing_relations.append(generic_interface)
3082+ juju_log("{} relation's interface, {}, "
3083+ "relationship is departed or broken "
3084+ "and is required for functionality."
3085+ "".format(generic_interface, related_interface), "WARN")
3086+ # Normal case relation ID exists but no related unit
3087+ # (joining)
3088+ else:
3089+ juju_log("{} relations's interface, {}, is related but has "
3090+ "no units in the relation."
3091+ "".format(generic_interface, related_interface), "INFO")
3092+ # Related unit exists and data missing on the relation
3093+ else:
3094+ juju_log("{} relation's interface, {}, is related awaiting "
3095+ "the following data from the relationship: {}. "
3096+ "".format(generic_interface, related_interface,
3097+ ", ".join(missing_data)), "INFO")
3098+ if state != 'blocked':
3099+ state = 'waiting'
3100+ if generic_interface not in incomplete_relations \
3101+ and generic_interface not in missing_relations:
3102+ incomplete_relations.append(generic_interface)
3103+
3104+ if missing_relations:
3105+ message = "Missing relations: {}".format(", ".join(missing_relations))
3106+ if incomplete_relations:
3107+ message += "; incomplete relations: {}" \
3108+ "".format(", ".join(incomplete_relations))
3109+ state = 'blocked'
3110+ elif incomplete_relations:
3111+ message = "Incomplete relations: {}" \
3112+ "".format(", ".join(incomplete_relations))
3113+ state = 'waiting'
3114+
3115+ # Run charm specific checks
3116+ if charm_func:
3117+ charm_state, charm_message = charm_func(configs)
3118+ if charm_state != 'active' and charm_state != 'unknown':
3119+ state = workload_state_compare(state, charm_state)
3120+ if message:
3121+ charm_message = charm_message.replace("Incomplete relations: ",
3122+ "")
3123+ message = "{}, {}".format(message, charm_message)
3124+ else:
3125+ message = charm_message
3126+
3127+ # If the charm thinks the unit is active, check that the actual services
3128+ # really are active.
3129+ if services is not None and state == 'active':
3130+ # if we're passed the dict() then just grab the values as a list.
3131+ if isinstance(services, dict):
3132+ services = services.values()
3133+ # either extract the list of services from the dictionary, or if
3134+ # it is a simple string, use that. i.e. works with mixed lists.
3135+ _s = []
3136+ for s in services:
3137+ if isinstance(s, dict) and 'service' in s:
3138+ _s.append(s['service'])
3139+ if isinstance(s, str):
3140+ _s.append(s)
3141+ services_running = [service_running(s) for s in _s]
3142+ if not all(services_running):
3143+ not_running = [s for s, running in zip(_s, services_running)
3144+ if not running]
3145+ message = ("Services not running that should be: {}"
3146+ .format(", ".join(not_running)))
3147+ state = 'blocked'
3148+ # also verify that the ports that should be open are open
3149+ # NB, that ServiceManager objects only OPTIONALLY have ports
3150+ port_map = OrderedDict([(s['service'], s['ports'])
3151+ for s in services if 'ports' in s])
3152+ if state == 'active' and port_map:
3153+ all_ports = list(itertools.chain(*port_map.values()))
3154+ ports_open = [port_has_listener('0.0.0.0', p)
3155+ for p in all_ports]
3156+ if not all(ports_open):
3157+ not_opened = [p for p, opened in zip(all_ports, ports_open)
3158+ if not opened]
3159+ map_not_open = OrderedDict()
3160+ for service, ports in port_map.items():
3161+ closed_ports = set(ports).intersection(not_opened)
3162+ if closed_ports:
3163+ map_not_open[service] = closed_ports
3164+ # find which service has missing ports. They are in service
3165+ # order which makes it a bit easier.
3166+ message = (
3167+ "Services with ports not open that should be: {}"
3168+ .format(
3169+ ", ".join([
3170+ "{}: [{}]".format(
3171+ service,
3172+ ", ".join([str(v) for v in ports]))
3173+ for service, ports in map_not_open.items()])))
3174+ state = 'blocked'
3175+
3176+ if ports is not None and state == 'active':
3177+ # and we can also check ports which we don't know the service for
3178+ ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
3179+ if not all(ports_open):
3180+ message = (
3181+ "Ports which should be open, but are not: {}"
3182+ .format(", ".join([str(p) for p, v in zip(ports, ports_open)
3183+ if not v])))
3184+ state = 'blocked'
3185+
3186+ # Set to active if all requirements have been met
3187+ if state == 'active':
3188+ message = "Unit is ready"
3189+ juju_log(message, "INFO")
3190+
3191+ status_set(state, message)
3192+
3193+
3194+def workload_state_compare(current_workload_state, workload_state):
3195+ """ Return highest priority of two states"""
3196+ hierarchy = {'unknown': -1,
3197+ 'active': 0,
3198+ 'maintenance': 1,
3199+ 'waiting': 2,
3200+ 'blocked': 3,
3201+ }
3202+
3203+ if hierarchy.get(workload_state) is None:
3204+ workload_state = 'unknown'
3205+ if hierarchy.get(current_workload_state) is None:
3206+ current_workload_state = 'unknown'
3207+
3208+ # Set workload_state based on hierarchy of statuses
3209+ if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
3210+ return current_workload_state
3211+ else:
3212+ return workload_state
3213+
3214+
3215+def incomplete_relation_data(configs, required_interfaces):
3216+ """
3217+ Check complete contexts against required_interfaces
3218+ Return dictionary of incomplete relation data.
3219+
3220+ configs is an OSConfigRenderer object with configs registered
3221+
3222+ required_interfaces is a dictionary of required general interfaces
3223+ with dictionary values of possible specific interfaces.
3224+ Example:
3225+ required_interfaces = {'database': ['shared-db', 'pgsql-db']}
3226+
3227+ The interface is said to be satisfied if anyone of the interfaces in the
3228+ list has a complete context.
3229+
3230+ Return dictionary of incomplete or missing required contexts with relation
3231+ status of interfaces and any missing data points. Example:
3232+ {'message':
3233+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
3234+ 'zeromq-configuration': {'related': False}},
3235+ 'identity':
3236+ {'identity-service': {'related': False}},
3237+ 'database':
3238+ {'pgsql-db': {'related': False},
3239+ 'shared-db': {'related': True}}}
3240+ """
3241+ complete_ctxts = configs.complete_contexts()
3242+ incomplete_relations = []
3243+ for svc_type in required_interfaces.keys():
3244+ # Avoid duplicates
3245+ found_ctxt = False
3246+ for interface in required_interfaces[svc_type]:
3247+ if interface in complete_ctxts:
3248+ found_ctxt = True
3249+ if not found_ctxt:
3250+ incomplete_relations.append(svc_type)
3251+ incomplete_context_data = {}
3252+ for i in incomplete_relations:
3253+ incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
3254+ return incomplete_context_data
3255+
3256+
3257+def do_action_openstack_upgrade(package, upgrade_callback, configs):
3258+ """Perform action-managed OpenStack upgrade.
3259+
3260+ Upgrades packages to the configured openstack-origin version and sets
3261+ the corresponding action status as a result.
3262+
3263+ If the charm was installed from source we cannot upgrade it.
3264+ For backwards compatibility a config flag (action-managed-upgrade) must
3265+ be set for this code to run, otherwise a full service level upgrade will
3266+ fire on config-changed.
3267+
3268+ @param package: package name for determining if upgrade available
3269+ @param upgrade_callback: function callback to charm's upgrade function
3270+ @param configs: templating object derived from OSConfigRenderer class
3271+
3272+ @return: True if upgrade successful; False if upgrade failed or skipped
3273+ """
3274+ ret = False
3275+
3276+ if git_install_requested():
3277+ action_set({'outcome': 'installed from source, skipped upgrade.'})
3278+ else:
3279+ if openstack_upgrade_available(package):
3280+ if config('action-managed-upgrade'):
3281+ juju_log('Upgrading OpenStack release')
3282+
3283+ try:
3284+ upgrade_callback(configs=configs)
3285+ action_set({'outcome': 'success, upgrade completed.'})
3286+ ret = True
3287+ except:
3288+ action_set({'outcome': 'upgrade failed, see traceback.'})
3289+ action_set({'traceback': traceback.format_exc()})
3290+ action_fail('do_openstack_upgrade resulted in an '
3291+ 'unexpected error')
3292+ else:
3293+ action_set({'outcome': 'action-managed-upgrade config is '
3294+ 'False, skipped upgrade.'})
3295+ else:
3296+ action_set({'outcome': 'no upgrade available.'})
3297+
3298+ return ret
3299+
3300+
3301+def remote_restart(rel_name, remote_service=None):
3302+ trigger = {
3303+ 'restart-trigger': str(uuid.uuid4()),
3304+ }
3305+ if remote_service:
3306+ trigger['remote-service'] = remote_service
3307+ for rid in relation_ids(rel_name):
3308+ # This subordinate can be related to two seperate services using
3309+ # different subordinate relations so only issue the restart if
3310+ # the principle is conencted down the relation we think it is
3311+ if related_units(relid=rid):
3312+ relation_set(relation_id=rid,
3313+ relation_settings=trigger,
3314+ )
3315
3316=== modified file 'hooks/charmhelpers/contrib/peerstorage/__init__.py'
3317--- hooks/charmhelpers/contrib/peerstorage/__init__.py 2015-07-29 18:35:16 +0000
3318+++ hooks/charmhelpers/contrib/peerstorage/__init__.py 2016-03-09 12:07:21 +0000
3319@@ -59,7 +59,7 @@
3320 """
3321
3322
3323-def leader_get(attribute=None):
3324+def leader_get(attribute=None, rid=None):
3325 """Wrapper to ensure that settings are migrated from the peer relation.
3326
3327 This is to support upgrading an environment that does not support
3328@@ -94,7 +94,8 @@
3329 # If attribute not present in leader db, check if this unit has set
3330 # the attribute in the peer relation
3331 if not leader_settings:
3332- peer_setting = relation_get(attribute=attribute, unit=local_unit())
3333+ peer_setting = _relation_get(attribute=attribute, unit=local_unit(),
3334+ rid=rid)
3335 if peer_setting:
3336 leader_set(settings={attribute: peer_setting})
3337 leader_settings = peer_setting
3338@@ -103,7 +104,7 @@
3339 settings_migrated = True
3340 migrated.add(attribute)
3341 else:
3342- r_settings = relation_get(unit=local_unit())
3343+ r_settings = _relation_get(unit=local_unit(), rid=rid)
3344 if r_settings:
3345 for key in set(r_settings.keys()).difference(migrated):
3346 # Leader setting wins
3347@@ -151,7 +152,7 @@
3348 """
3349 try:
3350 if rid in relation_ids('cluster'):
3351- return leader_get(attribute)
3352+ return leader_get(attribute, rid)
3353 else:
3354 raise NotImplementedError
3355 except NotImplementedError:
3356
3357=== modified file 'hooks/charmhelpers/contrib/python/packages.py'
3358--- hooks/charmhelpers/contrib/python/packages.py 2015-07-29 18:35:16 +0000
3359+++ hooks/charmhelpers/contrib/python/packages.py 2016-03-09 12:07:21 +0000
3360@@ -19,20 +19,35 @@
3361
3362 import os
3363 import subprocess
3364+import sys
3365
3366 from charmhelpers.fetch import apt_install, apt_update
3367 from charmhelpers.core.hookenv import charm_dir, log
3368
3369-try:
3370- from pip import main as pip_execute
3371-except ImportError:
3372- apt_update()
3373- apt_install('python-pip')
3374- from pip import main as pip_execute
3375-
3376 __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
3377
3378
3379+def pip_execute(*args, **kwargs):
3380+ """Overriden pip_execute() to stop sys.path being changed.
3381+
3382+ The act of importing main from the pip module seems to cause add wheels
3383+ from the /usr/share/python-wheels which are installed by various tools.
3384+ This function ensures that sys.path remains the same after the call is
3385+ executed.
3386+ """
3387+ try:
3388+ _path = sys.path
3389+ try:
3390+ from pip import main as _pip_execute
3391+ except ImportError:
3392+ apt_update()
3393+ apt_install('python-pip')
3394+ from pip import main as _pip_execute
3395+ _pip_execute(*args, **kwargs)
3396+ finally:
3397+ sys.path = _path
3398+
3399+
3400 def parse_options(given, available):
3401 """Given a set of options, check if available"""
3402 for key, value in sorted(given.items()):
3403@@ -42,8 +57,12 @@
3404 yield "--{0}={1}".format(key, value)
3405
3406
3407-def pip_install_requirements(requirements, **options):
3408- """Install a requirements file """
3409+def pip_install_requirements(requirements, constraints=None, **options):
3410+ """Install a requirements file.
3411+
3412+ :param constraints: Path to pip constraints file.
3413+ http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
3414+ """
3415 command = ["install"]
3416
3417 available_options = ('proxy', 'src', 'log', )
3418@@ -51,8 +70,13 @@
3419 command.append(option)
3420
3421 command.append("-r {0}".format(requirements))
3422- log("Installing from file: {} with options: {}".format(requirements,
3423- command))
3424+ if constraints:
3425+ command.append("-c {0}".format(constraints))
3426+ log("Installing from file: {} with constraints {} "
3427+ "and options: {}".format(requirements, constraints, command))
3428+ else:
3429+ log("Installing from file: {} with options: {}".format(requirements,
3430+ command))
3431 pip_execute(command)
3432
3433
3434
3435=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
3436--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-07-29 18:35:16 +0000
3437+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2016-03-09 12:07:21 +0000
3438@@ -23,11 +23,16 @@
3439 # James Page <james.page@ubuntu.com>
3440 # Adam Gandelman <adamg@ubuntu.com>
3441 #
3442+import bisect
3443+import errno
3444+import hashlib
3445+import six
3446
3447 import os
3448 import shutil
3449 import json
3450 import time
3451+import uuid
3452
3453 from subprocess import (
3454 check_call,
3455@@ -35,8 +40,10 @@
3456 CalledProcessError,
3457 )
3458 from charmhelpers.core.hookenv import (
3459+ local_unit,
3460 relation_get,
3461 relation_ids,
3462+ relation_set,
3463 related_units,
3464 log,
3465 DEBUG,
3466@@ -56,6 +63,8 @@
3467 apt_install,
3468 )
3469
3470+from charmhelpers.core.kernel import modprobe
3471+
3472 KEYRING = '/etc/ceph/ceph.client.{}.keyring'
3473 KEYFILE = '/etc/ceph/ceph.client.{}.key'
3474
3475@@ -67,6 +76,548 @@
3476 err to syslog = {use_syslog}
3477 clog to syslog = {use_syslog}
3478 """
3479+# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
3480+powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
3481+
3482+
3483+def validator(value, valid_type, valid_range=None):
3484+ """
3485+ Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
3486+ Example input:
3487+ validator(value=1,
3488+ valid_type=int,
3489+ valid_range=[0, 2])
3490+ This says I'm testing value=1. It must be an int inclusive in [0,2]
3491+
3492+ :param value: The value to validate
3493+ :param valid_type: The type that value should be.
3494+ :param valid_range: A range of values that value can assume.
3495+ :return:
3496+ """
3497+ assert isinstance(value, valid_type), "{} is not a {}".format(
3498+ value,
3499+ valid_type)
3500+ if valid_range is not None:
3501+ assert isinstance(valid_range, list), \
3502+ "valid_range must be a list, was given {}".format(valid_range)
3503+ # If we're dealing with strings
3504+ if valid_type is six.string_types:
3505+ assert value in valid_range, \
3506+ "{} is not in the list {}".format(value, valid_range)
3507+ # Integer, float should have a min and max
3508+ else:
3509+ if len(valid_range) != 2:
3510+ raise ValueError(
3511+ "Invalid valid_range list of {} for {}. "
3512+ "List must be [min,max]".format(valid_range, value))
3513+ assert value >= valid_range[0], \
3514+ "{} is less than minimum allowed value of {}".format(
3515+ value, valid_range[0])
3516+ assert value <= valid_range[1], \
3517+ "{} is greater than maximum allowed value of {}".format(
3518+ value, valid_range[1])
3519+
3520+
3521+class PoolCreationError(Exception):
3522+ """
3523+ A custom error to inform the caller that a pool creation failed. Provides an error message
3524+ """
3525+
3526+ def __init__(self, message):
3527+ super(PoolCreationError, self).__init__(message)
3528+
3529+
3530+class Pool(object):
3531+ """
3532+ An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
3533+ Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
3534+ """
3535+
3536+ def __init__(self, service, name):
3537+ self.service = service
3538+ self.name = name
3539+
3540+ # Create the pool if it doesn't exist already
3541+ # To be implemented by subclasses
3542+ def create(self):
3543+ pass
3544+
3545+ def add_cache_tier(self, cache_pool, mode):
3546+ """
3547+ Adds a new cache tier to an existing pool.
3548+ :param cache_pool: six.string_types. The cache tier pool name to add.
3549+ :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
3550+ :return: None
3551+ """
3552+ # Check the input types and values
3553+ validator(value=cache_pool, valid_type=six.string_types)
3554+ validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
3555+
3556+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
3557+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
3558+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
3559+ check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
3560+
3561+ def remove_cache_tier(self, cache_pool):
3562+ """
3563+ Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
3564+ :param cache_pool: six.string_types. The cache tier pool name to remove.
3565+ :return: None
3566+ """
3567+ # read-only is easy, writeback is much harder
3568+ mode = get_cache_mode(self.service, cache_pool)
3569+ if mode == 'readonly':
3570+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
3571+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
3572+
3573+ elif mode == 'writeback':
3574+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
3575+ # Flush the cache and wait for it to return
3576+ check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
3577+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
3578+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
3579+
3580+ def get_pgs(self, pool_size):
3581+ """
3582+ :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
3583+ erasure coded pools
3584+ :return: int. The number of pgs to use.
3585+ """
3586+ validator(value=pool_size, valid_type=int)
3587+ osd_list = get_osds(self.service)
3588+ if not osd_list:
3589+ # NOTE(james-page): Default to 200 for older ceph versions
3590+ # which don't support OSD query from cli
3591+ return 200
3592+
3593+ osd_list_length = len(osd_list)
3594+ # Calculate based on Ceph best practices
3595+ if osd_list_length < 5:
3596+ return 128
3597+ elif 5 < osd_list_length < 10:
3598+ return 512
3599+ elif 10 < osd_list_length < 50:
3600+ return 4096
3601+ else:
3602+ estimate = (osd_list_length * 100) / pool_size
3603+ # Return the next nearest power of 2
3604+ index = bisect.bisect_right(powers_of_two, estimate)
3605+ return powers_of_two[index]
3606+
3607+
3608+class ReplicatedPool(Pool):
3609+ def __init__(self, service, name, pg_num=None, replicas=2):
3610+ super(ReplicatedPool, self).__init__(service=service, name=name)
3611+ self.replicas = replicas
3612+ if pg_num is None:
3613+ self.pg_num = self.get_pgs(self.replicas)
3614+ else:
3615+ self.pg_num = pg_num
3616+
3617+ def create(self):
3618+ if not pool_exists(self.service, self.name):
3619+ # Create it
3620+ cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
3621+ self.name, str(self.pg_num)]
3622+ try:
3623+ check_call(cmd)
3624+ except CalledProcessError:
3625+ raise
3626+
3627+
3628+# Default jerasure erasure coded pool
3629+class ErasurePool(Pool):
3630+ def __init__(self, service, name, erasure_code_profile="default"):
3631+ super(ErasurePool, self).__init__(service=service, name=name)
3632+ self.erasure_code_profile = erasure_code_profile
3633+
3634+ def create(self):
3635+ if not pool_exists(self.service, self.name):
3636+ # Try to find the erasure profile information so we can properly size the pgs
3637+ erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
3638+
3639+ # Check for errors
3640+ if erasure_profile is None:
3641+ log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
3642+ level=ERROR)
3643+ raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
3644+ if 'k' not in erasure_profile or 'm' not in erasure_profile:
3645+ # Error
3646+ log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
3647+ level=ERROR)
3648+ raise PoolCreationError(
3649+ message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
3650+
3651+ pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
3652+ # Create it
3653+ cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs),
3654+ 'erasure', self.erasure_code_profile]
3655+ try:
3656+ check_call(cmd)
3657+ except CalledProcessError:
3658+ raise
3659+
3660+ """Get an existing erasure code profile if it already exists.
3661+ Returns json formatted output"""
3662+
3663+
3664+def get_mon_map(service):
3665+ """
3666+ Returns the current monitor map.
3667+ :param service: six.string_types. The Ceph user name to run the command under
3668+ :return: json string. :raise: ValueError if the monmap fails to parse.
3669+ Also raises CalledProcessError if our ceph command fails
3670+ """
3671+ try:
3672+ mon_status = check_output(
3673+ ['ceph', '--id', service,
3674+ 'ceph', 'mon_status', '--format=json'])
3675+ try:
3676+ return json.loads(mon_status)
3677+ except ValueError as v:
3678+ log("Unable to parse mon_status json: {}. Error: {}".format(
3679+ mon_status, v.message))
3680+ raise
3681+ except CalledProcessError as e:
3682+ log("mon_status command failed with message: {}".format(
3683+ e.message))
3684+ raise
3685+
3686+
3687+def hash_monitor_names(service):
3688+ """
3689+ Uses the get_mon_map() function to get information about the monitor
3690+ cluster.
3691+ Hash the name of each monitor. Return a sorted list of monitor hashes
3692+ in an ascending order.
3693+ :param service: six.string_types. The Ceph user name to run the command under
3694+ :rtype : dict. json dict of monitor name, ip address and rank
3695+ example: {
3696+ 'name': 'ip-172-31-13-165',
3697+ 'rank': 0,
3698+ 'addr': '172.31.13.165:6789/0'}
3699+ """
3700+ try:
3701+ hash_list = []
3702+ monitor_list = get_mon_map(service=service)
3703+ if monitor_list['monmap']['mons']:
3704+ for mon in monitor_list['monmap']['mons']:
3705+ hash_list.append(
3706+ hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
3707+ return sorted(hash_list)
3708+ else:
3709+ return None
3710+ except (ValueError, CalledProcessError):
3711+ raise
3712+
3713+
3714+def monitor_key_delete(service, key):
3715+ """
3716+ Delete a key and value pair from the monitor cluster
3717+ :param service: six.string_types. The Ceph user name to run the command under
3718+ Deletes a key value pair on the monitor cluster.
3719+ :param key: six.string_types. The key to delete.
3720+ """
3721+ try:
3722+ check_output(
3723+ ['ceph', '--id', service,
3724+ 'ceph', 'config-key', 'del', str(key)])
3725+ except CalledProcessError as e:
3726+ log("Monitor config-key put failed with message: {}".format(
3727+ e.output))
3728+ raise
3729+
3730+
3731+def monitor_key_set(service, key, value):
3732+ """
3733+ Sets a key value pair on the monitor cluster.
3734+ :param service: six.string_types. The Ceph user name to run the command under
3735+ :param key: six.string_types. The key to set.
3736+ :param value: The value to set. This will be converted to a string
3737+ before setting
3738+ """
3739+ try:
3740+ check_output(
3741+ ['ceph', '--id', service,
3742+ 'ceph', 'config-key', 'put', str(key), str(value)])
3743+ except CalledProcessError as e:
3744+ log("Monitor config-key put failed with message: {}".format(
3745+ e.output))
3746+ raise
3747+
3748+
3749+def monitor_key_get(service, key):
3750+ """
3751+ Gets the value of an existing key in the monitor cluster.
3752+ :param service: six.string_types. The Ceph user name to run the command under
3753+ :param key: six.string_types. The key to search for.
3754+ :return: Returns the value of that key or None if not found.
3755+ """
3756+ try:
3757+ output = check_output(
3758+ ['ceph', '--id', service,
3759+ 'ceph', 'config-key', 'get', str(key)])
3760+ return output
3761+ except CalledProcessError as e:
3762+ log("Monitor config-key get failed with message: {}".format(
3763+ e.output))
3764+ return None
3765+
3766+
3767+def monitor_key_exists(service, key):
3768+ """
3769+ Searches for the existence of a key in the monitor cluster.
3770+ :param service: six.string_types. The Ceph user name to run the command under
3771+ :param key: six.string_types. The key to search for
3772+ :return: Returns True if the key exists, False if not and raises an
3773+ exception if an unknown error occurs. :raise: CalledProcessError if
3774+ an unknown error occurs
3775+ """
3776+ try:
3777+ check_call(
3778+ ['ceph', '--id', service,
3779+ 'config-key', 'exists', str(key)])
3780+ # I can return true here regardless because Ceph returns
3781+ # ENOENT if the key wasn't found
3782+ return True
3783+ except CalledProcessError as e:
3784+ if e.returncode == errno.ENOENT:
3785+ return False
3786+ else:
3787+ log("Unknown error from ceph config-get exists: {} {}".format(
3788+ e.returncode, e.output))
3789+ raise
3790+
3791+
3792+def get_erasure_profile(service, name):
3793+ """
3794+ :param service: six.string_types. The Ceph user name to run the command under
3795+ :param name:
3796+ :return:
3797+ """
3798+ try:
3799+ out = check_output(['ceph', '--id', service,
3800+ 'osd', 'erasure-code-profile', 'get',
3801+ name, '--format=json'])
3802+ return json.loads(out)
3803+ except (CalledProcessError, OSError, ValueError):
3804+ return None
3805+
3806+
3807+def pool_set(service, pool_name, key, value):
3808+ """
3809+ Sets a value for a RADOS pool in ceph.
3810+ :param service: six.string_types. The Ceph user name to run the command under
3811+ :param pool_name: six.string_types
3812+ :param key: six.string_types
3813+ :param value:
3814+ :return: None. Can raise CalledProcessError
3815+ """
3816+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
3817+ try:
3818+ check_call(cmd)
3819+ except CalledProcessError:
3820+ raise
3821+
3822+
3823+def snapshot_pool(service, pool_name, snapshot_name):
3824+ """
3825+ Snapshots a RADOS pool in ceph.
3826+ :param service: six.string_types. The Ceph user name to run the command under
3827+ :param pool_name: six.string_types
3828+ :param snapshot_name: six.string_types
3829+ :return: None. Can raise CalledProcessError
3830+ """
3831+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
3832+ try:
3833+ check_call(cmd)
3834+ except CalledProcessError:
3835+ raise
3836+
3837+
3838+def remove_pool_snapshot(service, pool_name, snapshot_name):
3839+ """
3840+ Remove a snapshot from a RADOS pool in ceph.
3841+ :param service: six.string_types. The Ceph user name to run the command under
3842+ :param pool_name: six.string_types
3843+ :param snapshot_name: six.string_types
3844+ :return: None. Can raise CalledProcessError
3845+ """
3846+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
3847+ try:
3848+ check_call(cmd)
3849+ except CalledProcessError:
3850+ raise
3851+
3852+
3853+# max_bytes should be an int or long
3854+def set_pool_quota(service, pool_name, max_bytes):
3855+ """
3856+ :param service: six.string_types. The Ceph user name to run the command under
3857+ :param pool_name: six.string_types
3858+ :param max_bytes: int or long
3859+ :return: None. Can raise CalledProcessError
3860+ """
3861+ # Set a byte quota on a RADOS pool in ceph.
3862+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
3863+ 'max_bytes', str(max_bytes)]
3864+ try:
3865+ check_call(cmd)
3866+ except CalledProcessError:
3867+ raise
3868+
3869+
3870+def remove_pool_quota(service, pool_name):
3871+ """
3872+ Set a byte quota on a RADOS pool in ceph.
3873+ :param service: six.string_types. The Ceph user name to run the command under
3874+ :param pool_name: six.string_types
3875+ :return: None. Can raise CalledProcessError
3876+ """
3877+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
3878+ try:
3879+ check_call(cmd)
3880+ except CalledProcessError:
3881+ raise
3882+
3883+
3884+def remove_erasure_profile(service, profile_name):
3885+ """
3886+ Create a new erasure code profile if one does not already exist for it. Updates
3887+ the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
3888+ for more details
3889+ :param service: six.string_types. The Ceph user name to run the command under
3890+ :param profile_name: six.string_types
3891+ :return: None. Can raise CalledProcessError
3892+ """
3893+ cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
3894+ profile_name]
3895+ try:
3896+ check_call(cmd)
3897+ except CalledProcessError:
3898+ raise
3899+
3900+
3901+def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
3902+ failure_domain='host',
3903+ data_chunks=2, coding_chunks=1,
3904+ locality=None, durability_estimator=None):
3905+ """
3906+ Create a new erasure code profile if one does not already exist for it. Updates
3907+ the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
3908+ for more details
3909+ :param service: six.string_types. The Ceph user name to run the command under
3910+ :param profile_name: six.string_types
3911+ :param erasure_plugin_name: six.string_types
3912+ :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
3913+ 'room', 'root', 'row'])
3914+ :param data_chunks: int
3915+ :param coding_chunks: int
3916+ :param locality: int
3917+ :param durability_estimator: int
3918+ :return: None. Can raise CalledProcessError
3919+ """
3920+ # Ensure this failure_domain is allowed by Ceph
3921+ validator(failure_domain, six.string_types,
3922+ ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
3923+
3924+ cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
3925+ 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
3926+ 'ruleset_failure_domain=' + failure_domain]
3927+ if locality is not None and durability_estimator is not None:
3928+ raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
3929+
3930+ # Add plugin specific information
3931+ if locality is not None:
3932+ # For local erasure codes
3933+ cmd.append('l=' + str(locality))
3934+ if durability_estimator is not None:
3935+ # For Shec erasure codes
3936+ cmd.append('c=' + str(durability_estimator))
3937+
3938+ if erasure_profile_exists(service, profile_name):
3939+ cmd.append('--force')
3940+
3941+ try:
3942+ check_call(cmd)
3943+ except CalledProcessError:
3944+ raise
3945+
3946+
3947+def rename_pool(service, old_name, new_name):
3948+ """
3949+ Rename a Ceph pool from old_name to new_name
3950+ :param service: six.string_types. The Ceph user name to run the command under
3951+ :param old_name: six.string_types
3952+ :param new_name: six.string_types
3953+ :return: None
3954+ """
3955+ validator(value=old_name, valid_type=six.string_types)
3956+ validator(value=new_name, valid_type=six.string_types)
3957+
3958+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
3959+ check_call(cmd)
3960+
3961+
3962+def erasure_profile_exists(service, name):
3963+ """
3964+ Check to see if an Erasure code profile already exists.
3965+ :param service: six.string_types. The Ceph user name to run the command under
3966+ :param name: six.string_types
3967+ :return: int or None
3968+ """
3969+ validator(value=name, valid_type=six.string_types)
3970+ try:
3971+ check_call(['ceph', '--id', service,
3972+ 'osd', 'erasure-code-profile', 'get',
3973+ name])
3974+ return True
3975+ except CalledProcessError:
3976+ return False
3977+
3978+
3979+def get_cache_mode(service, pool_name):
3980+ """
3981+ Find the current caching mode of the pool_name given.
3982+ :param service: six.string_types. The Ceph user name to run the command under
3983+ :param pool_name: six.string_types
3984+ :return: int or None
3985+ """
3986+ validator(value=service, valid_type=six.string_types)
3987+ validator(value=pool_name, valid_type=six.string_types)
3988+ out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
3989+ try:
3990+ osd_json = json.loads(out)
3991+ for pool in osd_json['pools']:
3992+ if pool['pool_name'] == pool_name:
3993+ return pool['cache_mode']
3994+ return None
3995+ except ValueError:
3996+ raise
3997+
3998+
3999+def pool_exists(service, name):
4000+ """Check to see if a RADOS pool already exists."""
4001+ try:
4002+ out = check_output(['rados', '--id', service,
4003+ 'lspools']).decode('UTF-8')
4004+ except CalledProcessError:
4005+ return False
4006+
4007+ return name in out
4008+
4009+
4010+def get_osds(service):
4011+ """Return a list of all Ceph Object Storage Daemons currently in the
4012+ cluster.
4013+ """
4014+ version = ceph_version()
4015+ if version and version >= '0.56':
4016+ return json.loads(check_output(['ceph', '--id', service,
4017+ 'osd', 'ls',
4018+ '--format=json']).decode('UTF-8'))
4019+
4020+ return None
4021
4022
4023 def install():
4024@@ -96,53 +647,37 @@
4025 check_call(cmd)
4026
4027
4028-def pool_exists(service, name):
4029- """Check to see if a RADOS pool already exists."""
4030- try:
4031- out = check_output(['rados', '--id', service,
4032- 'lspools']).decode('UTF-8')
4033- except CalledProcessError:
4034- return False
4035-
4036- return name in out
4037-
4038-
4039-def get_osds(service):
4040- """Return a list of all Ceph Object Storage Daemons currently in the
4041- cluster.
4042- """
4043- version = ceph_version()
4044- if version and version >= '0.56':
4045- return json.loads(check_output(['ceph', '--id', service,
4046- 'osd', 'ls',
4047- '--format=json']).decode('UTF-8'))
4048-
4049- return None
4050-
4051-
4052-def create_pool(service, name, replicas=3):
4053+def update_pool(client, pool, settings):
4054+ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
4055+ for k, v in six.iteritems(settings):
4056+ cmd.append(k)
4057+ cmd.append(v)
4058+
4059+ check_call(cmd)
4060+
4061+
4062+def create_pool(service, name, replicas=3, pg_num=None):
4063 """Create a new RADOS pool."""
4064 if pool_exists(service, name):
4065 log("Ceph pool {} already exists, skipping creation".format(name),
4066 level=WARNING)
4067 return
4068
4069- # Calculate the number of placement groups based
4070- # on upstream recommended best practices.
4071- osds = get_osds(service)
4072- if osds:
4073- pgnum = (len(osds) * 100 // replicas)
4074- else:
4075- # NOTE(james-page): Default to 200 for older ceph versions
4076- # which don't support OSD query from cli
4077- pgnum = 200
4078-
4079- cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
4080- check_call(cmd)
4081-
4082- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
4083- str(replicas)]
4084- check_call(cmd)
4085+ if not pg_num:
4086+ # Calculate the number of placement groups based
4087+ # on upstream recommended best practices.
4088+ osds = get_osds(service)
4089+ if osds:
4090+ pg_num = (len(osds) * 100 // replicas)
4091+ else:
4092+ # NOTE(james-page): Default to 200 for older ceph versions
4093+ # which don't support OSD query from cli
4094+ pg_num = 200
4095+
4096+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
4097+ check_call(cmd)
4098+
4099+ update_pool(service, name, settings={'size': str(replicas)})
4100
4101
4102 def delete_pool(service, name):
4103@@ -197,10 +732,10 @@
4104 log('Created new keyfile at %s.' % keyfile, level=INFO)
4105
4106
4107-def get_ceph_nodes():
4108- """Query named relation 'ceph' to determine current nodes."""
4109+def get_ceph_nodes(relation='ceph'):
4110+ """Query named relation to determine current nodes."""
4111 hosts = []
4112- for r_id in relation_ids('ceph'):
4113+ for r_id in relation_ids(relation):
4114 for unit in related_units(r_id):
4115 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
4116
4117@@ -288,17 +823,6 @@
4118 os.chown(data_src_dst, uid, gid)
4119
4120
4121-# TODO: re-use
4122-def modprobe(module):
4123- """Load a kernel module and configure for auto-load on reboot."""
4124- log('Loading kernel module', level=INFO)
4125- cmd = ['modprobe', module]
4126- check_call(cmd)
4127- with open('/etc/modules', 'r+') as modules:
4128- if module not in modules.read():
4129- modules.write(module)
4130-
4131-
4132 def copy_files(src, dst, symlinks=False, ignore=None):
4133 """Copy files from src to dst."""
4134 for item in os.listdir(src):
4135@@ -363,14 +887,14 @@
4136 service_start(svc)
4137
4138
4139-def ensure_ceph_keyring(service, user=None, group=None):
4140+def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
4141 """Ensures a ceph keyring is created for a named service and optionally
4142 ensures user and group ownership.
4143
4144 Returns False if no ceph key is available in relation state.
4145 """
4146 key = None
4147- for rid in relation_ids('ceph'):
4148+ for rid in relation_ids(relation):
4149 for unit in related_units(rid):
4150 key = relation_get('key', rid=rid, unit=unit)
4151 if key:
4152@@ -411,17 +935,60 @@
4153
4154 The API is versioned and defaults to version 1.
4155 """
4156- def __init__(self, api_version=1):
4157+
4158+ def __init__(self, api_version=1, request_id=None):
4159 self.api_version = api_version
4160+ if request_id:
4161+ self.request_id = request_id
4162+ else:
4163+ self.request_id = str(uuid.uuid1())
4164 self.ops = []
4165
4166- def add_op_create_pool(self, name, replica_count=3):
4167+ def add_op_create_pool(self, name, replica_count=3, pg_num=None):
4168+ """Adds an operation to create a pool.
4169+
4170+ @param pg_num setting: optional setting. If not provided, this value
4171+ will be calculated by the broker based on how many OSDs are in the
4172+ cluster at the time of creation. Note that, if provided, this value
4173+ will be capped at the current available maximum.
4174+ """
4175 self.ops.append({'op': 'create-pool', 'name': name,
4176- 'replicas': replica_count})
4177+ 'replicas': replica_count, 'pg_num': pg_num})
4178+
4179+ def set_ops(self, ops):
4180+ """Set request ops to provided value.
4181+
4182+ Useful for injecting ops that come from a previous request
4183+ to allow comparisons to ensure validity.
4184+ """
4185+ self.ops = ops
4186
4187 @property
4188 def request(self):
4189- return json.dumps({'api-version': self.api_version, 'ops': self.ops})
4190+ return json.dumps({'api-version': self.api_version, 'ops': self.ops,
4191+ 'request-id': self.request_id})
4192+
4193+ def _ops_equal(self, other):
4194+ if len(self.ops) == len(other.ops):
4195+ for req_no in range(0, len(self.ops)):
4196+ for key in ['replicas', 'name', 'op', 'pg_num']:
4197+ if self.ops[req_no].get(key) != other.ops[req_no].get(key):
4198+ return False
4199+ else:
4200+ return False
4201+ return True
4202+
4203+ def __eq__(self, other):
4204+ if not isinstance(other, self.__class__):
4205+ return False
4206+ if self.api_version == other.api_version and \
4207+ self._ops_equal(other):
4208+ return True
4209+ else:
4210+ return False
4211+
4212+ def __ne__(self, other):
4213+ return not self.__eq__(other)
4214
4215
4216 class CephBrokerRsp(object):
4217@@ -431,14 +998,198 @@
4218
4219 The API is versioned and defaults to version 1.
4220 """
4221+
4222 def __init__(self, encoded_rsp):
4223 self.api_version = None
4224 self.rsp = json.loads(encoded_rsp)
4225
4226 @property
4227+ def request_id(self):
4228+ return self.rsp.get('request-id')
4229+
4230+ @property
4231 def exit_code(self):
4232 return self.rsp.get('exit-code')
4233
4234 @property
4235 def exit_msg(self):
4236 return self.rsp.get('stderr')
4237+
4238+
4239+# Ceph Broker Conversation:
4240+# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
4241+# and send that request to ceph via the ceph relation. The CephBrokerRq has a
4242+# unique id so that the client can identity which CephBrokerRsp is associated
4243+# with the request. Ceph will also respond to each client unit individually
4244+# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
4245+# via key broker-rsp-glance-0
4246+#
4247+# To use this the charm can just do something like:
4248+#
4249+# from charmhelpers.contrib.storage.linux.ceph import (
4250+# send_request_if_needed,
4251+# is_request_complete,
4252+# CephBrokerRq,
4253+# )
4254+#
4255+# @hooks.hook('ceph-relation-changed')
4256+# def ceph_changed():
4257+# rq = CephBrokerRq()
4258+# rq.add_op_create_pool(name='poolname', replica_count=3)
4259+#
4260+# if is_request_complete(rq):
4261+# <Request complete actions>
4262+# else:
4263+# send_request_if_needed(get_ceph_request())
4264+#
4265+# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
4266+# of glance having sent a request to ceph which ceph has successfully processed
4267+# 'ceph:8': {
4268+# 'ceph/0': {
4269+# 'auth': 'cephx',
4270+# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
4271+# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
4272+# 'ceph-public-address': '10.5.44.103',
4273+# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
4274+# 'private-address': '10.5.44.103',
4275+# },
4276+# 'glance/0': {
4277+# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
4278+# '"ops": [{"replicas": 3, "name": "glance", '
4279+# '"op": "create-pool"}]}'),
4280+# 'private-address': '10.5.44.109',
4281+# },
4282+# }
4283+
4284+def get_previous_request(rid):
4285+ """Return the last ceph broker request sent on a given relation
4286+
4287+ @param rid: Relation id to query for request
4288+ """
4289+ request = None
4290+ broker_req = relation_get(attribute='broker_req', rid=rid,
4291+ unit=local_unit())
4292+ if broker_req:
4293+ request_data = json.loads(broker_req)
4294+ request = CephBrokerRq(api_version=request_data['api-version'],
4295+ request_id=request_data['request-id'])
4296+ request.set_ops(request_data['ops'])
4297+
4298+ return request
4299+
4300+
4301+def get_request_states(request, relation='ceph'):
4302+ """Return a dict of requests per relation id with their corresponding
4303+ completion state.
4304+
4305+ This allows a charm, which has a request for ceph, to see whether there is
4306+ an equivalent request already being processed and if so what state that
4307+ request is in.
4308+
4309+ @param request: A CephBrokerRq object
4310+ """
4311+ complete = []
4312+ requests = {}
4313+ for rid in relation_ids(relation):
4314+ complete = False
4315+ previous_request = get_previous_request(rid)
4316+ if request == previous_request:
4317+ sent = True
4318+ complete = is_request_complete_for_rid(previous_request, rid)
4319+ else:
4320+ sent = False
4321+ complete = False
4322+
4323+ requests[rid] = {
4324+ 'sent': sent,
4325+ 'complete': complete,
4326+ }
4327+
4328+ return requests
4329+
4330+
4331+def is_request_sent(request, relation='ceph'):
4332+ """Check to see if a functionally equivalent request has already been sent
4333+
4334+ Returns True if a similair request has been sent
4335+
4336+ @param request: A CephBrokerRq object
4337+ """
4338+ states = get_request_states(request, relation=relation)
4339+ for rid in states.keys():
4340+ if not states[rid]['sent']:
4341+ return False
4342+
4343+ return True
4344+
4345+
4346+def is_request_complete(request, relation='ceph'):
4347+ """Check to see if a functionally equivalent request has already been
4348+ completed
4349+
4350+ Returns True if a similair request has been completed
4351+
4352+ @param request: A CephBrokerRq object
4353+ """
4354+ states = get_request_states(request, relation=relation)
4355+ for rid in states.keys():
4356+ if not states[rid]['complete']:
4357+ return False
4358+
4359+ return True
4360+
4361+
4362+def is_request_complete_for_rid(request, rid):
4363+ """Check if a given request has been completed on the given relation
4364+
4365+ @param request: A CephBrokerRq object
4366+ @param rid: Relation ID
4367+ """
4368+ broker_key = get_broker_rsp_key()
4369+ for unit in related_units(rid):
4370+ rdata = relation_get(rid=rid, unit=unit)
4371+ if rdata.get(broker_key):
4372+ rsp = CephBrokerRsp(rdata.get(broker_key))
4373+ if rsp.request_id == request.request_id:
4374+ if not rsp.exit_code:
4375+ return True
4376+ else:
4377+ # The remote unit sent no reply targeted at this unit so either the
4378+ # remote ceph cluster does not support unit targeted replies or it
4379+ # has not processed our request yet.
4380+ if rdata.get('broker_rsp'):
4381+ request_data = json.loads(rdata['broker_rsp'])
4382+ if request_data.get('request-id'):
4383+ log('Ignoring legacy broker_rsp without unit key as remote '
4384+ 'service supports unit specific replies', level=DEBUG)
4385+ else:
4386+ log('Using legacy broker_rsp as remote service does not '
4387+ 'supports unit specific replies', level=DEBUG)
4388+ rsp = CephBrokerRsp(rdata['broker_rsp'])
4389+ if not rsp.exit_code:
4390+ return True
4391+
4392+ return False
4393+
4394+
4395+def get_broker_rsp_key():
4396+ """Return broker response key for this unit
4397+
4398+ This is the key that ceph is going to use to pass request status
4399+ information back to this unit
4400+ """
4401+ return 'broker-rsp-' + local_unit().replace('/', '-')
4402+
4403+
4404+def send_request_if_needed(request, relation='ceph'):
4405+ """Send broker request if an equivalent request has not already been sent
4406+
4407+ @param request: A CephBrokerRq object
4408+ """
4409+ if is_request_sent(request, relation=relation):
4410+ log('Request already sent but not complete, not sending new request',
4411+ level=DEBUG)
4412+ else:
4413+ for rid in relation_ids(relation):
4414+ log('Sending request {}'.format(request.request_id), level=DEBUG)
4415+ relation_set(relation_id=rid, broker_req=request.request)
4416
4417=== modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
4418--- hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-07-29 18:35:16 +0000
4419+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2016-03-09 12:07:21 +0000
4420@@ -76,3 +76,13 @@
4421 check_call(cmd)
4422
4423 return create_loopback(path)
4424+
4425+
4426+def is_mapped_loopback_device(device):
4427+ """
4428+ Checks if a given device name is an existing/mapped loopback device.
4429+ :param device: str: Full path to the device (eg, /dev/loop1).
4430+ :returns: str: Path to the backing file if is a loopback device
4431+ empty string otherwise
4432+ """
4433+ return loopback_devices().get(device, "")
4434
4435=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
4436--- hooks/charmhelpers/contrib/storage/linux/utils.py 2015-07-29 18:35:16 +0000
4437+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2016-03-09 12:07:21 +0000
4438@@ -43,9 +43,10 @@
4439
4440 :param block_device: str: Full path of block device to clean.
4441 '''
4442+ # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
4443 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
4444- call(['sgdisk', '--zap-all', '--mbrtogpt',
4445- '--clear', block_device])
4446+ call(['sgdisk', '--zap-all', '--', block_device])
4447+ call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
4448 dev_end = check_output(['blockdev', '--getsz',
4449 block_device]).decode('UTF-8')
4450 gpt_end = int(dev_end.split()[0]) - 100
4451
4452=== modified file 'hooks/charmhelpers/contrib/templating/jinja.py'
4453--- hooks/charmhelpers/contrib/templating/jinja.py 2015-07-29 18:35:16 +0000
4454+++ hooks/charmhelpers/contrib/templating/jinja.py 2016-03-09 12:07:21 +0000
4455@@ -18,14 +18,15 @@
4456 Templating using the python-jinja2 package.
4457 """
4458 import six
4459-from charmhelpers.fetch import apt_install
4460+from charmhelpers.fetch import apt_install, apt_update
4461 try:
4462 import jinja2
4463 except ImportError:
4464+ apt_update(fatal=True)
4465 if six.PY3:
4466- apt_install(["python3-jinja2"])
4467+ apt_install(["python3-jinja2"], fatal=True)
4468 else:
4469- apt_install(["python-jinja2"])
4470+ apt_install(["python-jinja2"], fatal=True)
4471 import jinja2
4472
4473
4474
4475=== modified file 'hooks/charmhelpers/core/hookenv.py'
4476--- hooks/charmhelpers/core/hookenv.py 2015-07-29 18:35:16 +0000
4477+++ hooks/charmhelpers/core/hookenv.py 2016-03-09 12:07:21 +0000
4478@@ -74,6 +74,7 @@
4479 res = func(*args, **kwargs)
4480 cache[key] = res
4481 return res
4482+ wrapper._wrapped = func
4483 return wrapper
4484
4485
4486@@ -173,9 +174,19 @@
4487 return os.environ.get('JUJU_RELATION', None)
4488
4489
4490-def relation_id():
4491- """The relation ID for the current relation hook"""
4492- return os.environ.get('JUJU_RELATION_ID', None)
4493+@cached
4494+def relation_id(relation_name=None, service_or_unit=None):
4495+ """The relation ID for the current or a specified relation"""
4496+ if not relation_name and not service_or_unit:
4497+ return os.environ.get('JUJU_RELATION_ID', None)
4498+ elif relation_name and service_or_unit:
4499+ service_name = service_or_unit.split('/')[0]
4500+ for relid in relation_ids(relation_name):
4501+ remote_service = remote_service_name(relid)
4502+ if remote_service == service_name:
4503+ return relid
4504+ else:
4505+ raise ValueError('Must specify neither or both of relation_name and service_or_unit')
4506
4507
4508 def local_unit():
4509@@ -193,9 +204,20 @@
4510 return local_unit().split('/')[0]
4511
4512
4513+@cached
4514+def remote_service_name(relid=None):
4515+ """The remote service name for a given relation-id (or the current relation)"""
4516+ if relid is None:
4517+ unit = remote_unit()
4518+ else:
4519+ units = related_units(relid)
4520+ unit = units[0] if units else None
4521+ return unit.split('/')[0] if unit else None
4522+
4523+
4524 def hook_name():
4525 """The name of the currently executing hook"""
4526- return os.path.basename(sys.argv[0])
4527+ return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
4528
4529
4530 class Config(dict):
4531@@ -469,6 +491,76 @@
4532
4533
4534 @cached
4535+def peer_relation_id():
4536+ '''Get the peers relation id if a peers relation has been joined, else None.'''
4537+ md = metadata()
4538+ section = md.get('peers')
4539+ if section:
4540+ for key in section:
4541+ relids = relation_ids(key)
4542+ if relids:
4543+ return relids[0]
4544+ return None
4545+
4546+
4547+@cached
4548+def relation_to_interface(relation_name):
4549+ """
4550+ Given the name of a relation, return the interface that relation uses.
4551+
4552+ :returns: The interface name, or ``None``.
4553+ """
4554+ return relation_to_role_and_interface(relation_name)[1]
4555+
4556+
4557+@cached
4558+def relation_to_role_and_interface(relation_name):
4559+ """
4560+ Given the name of a relation, return the role and the name of the interface
4561+ that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
4562+
4563+ :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
4564+ """
4565+ _metadata = metadata()
4566+ for role in ('provides', 'requires', 'peers'):
4567+ interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
4568+ if interface:
4569+ return role, interface
4570+ return None, None
4571+
4572+
4573+@cached
4574+def role_and_interface_to_relations(role, interface_name):
4575+ """
4576+ Given a role and interface name, return a list of relation names for the
4577+ current charm that use that interface under that role (where role is one
4578+ of ``provides``, ``requires``, or ``peers``).
4579+
4580+ :returns: A list of relation names.
4581+ """
4582+ _metadata = metadata()
4583+ results = []
4584+ for relation_name, relation in _metadata.get(role, {}).items():
4585+ if relation['interface'] == interface_name:
4586+ results.append(relation_name)
4587+ return results
4588+
4589+
4590+@cached
4591+def interface_to_relations(interface_name):
4592+ """
4593+ Given an interface, return a list of relation names for the current
4594+ charm that use that interface.
4595+
4596+ :returns: A list of relation names.
4597+ """
4598+ results = []
4599+ for role in ('provides', 'requires', 'peers'):
4600+ results.extend(role_and_interface_to_relations(role, interface_name))
4601+ return results
4602+
4603+
4604+@cached
4605 def charm_name():
4606 """Get the name of the current charm as is specified on metadata.yaml"""
4607 return metadata().get('name')
4608@@ -544,6 +636,38 @@
4609 return unit_get('private-address')
4610
4611
4612+@cached
4613+def storage_get(attribute=None, storage_id=None):
4614+ """Get storage attributes"""
4615+ _args = ['storage-get', '--format=json']
4616+ if storage_id:
4617+ _args.extend(('-s', storage_id))
4618+ if attribute:
4619+ _args.append(attribute)
4620+ try:
4621+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
4622+ except ValueError:
4623+ return None
4624+
4625+
4626+@cached
4627+def storage_list(storage_name=None):
4628+ """List the storage IDs for the unit"""
4629+ _args = ['storage-list', '--format=json']
4630+ if storage_name:
4631+ _args.append(storage_name)
4632+ try:
4633+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
4634+ except ValueError:
4635+ return None
4636+ except OSError as e:
4637+ import errno
4638+ if e.errno == errno.ENOENT:
4639+ # storage-list does not exist
4640+ return []
4641+ raise
4642+
4643+
4644 class UnregisteredHookError(Exception):
4645 """Raised when an undefined hook is called"""
4646 pass
4647@@ -644,6 +768,21 @@
4648 subprocess.check_call(['action-fail', message])
4649
4650
4651+def action_name():
4652+ """Get the name of the currently executing action."""
4653+ return os.environ.get('JUJU_ACTION_NAME')
4654+
4655+
4656+def action_uuid():
4657+ """Get the UUID of the currently executing action."""
4658+ return os.environ.get('JUJU_ACTION_UUID')
4659+
4660+
4661+def action_tag():
4662+ """Get the tag for the currently executing action."""
4663+ return os.environ.get('JUJU_ACTION_TAG')
4664+
4665+
4666 def status_set(workload_state, message):
4667 """Set the workload state with a message
4668
4669@@ -673,25 +812,28 @@
4670
4671
4672 def status_get():
4673- """Retrieve the previously set juju workload state
4674-
4675- If the status-set command is not found then assume this is juju < 1.23 and
4676- return 'unknown'
4677+ """Retrieve the previously set juju workload state and message
4678+
4679+ If the status-get command is not found then assume this is juju < 1.23 and
4680+ return 'unknown', ""
4681+
4682 """
4683- cmd = ['status-get']
4684+ cmd = ['status-get', "--format=json", "--include-data"]
4685 try:
4686- raw_status = subprocess.check_output(cmd, universal_newlines=True)
4687- status = raw_status.rstrip()
4688- return status
4689+ raw_status = subprocess.check_output(cmd)
4690 except OSError as e:
4691 if e.errno == errno.ENOENT:
4692- return 'unknown'
4693+ return ('unknown', "")
4694 else:
4695 raise
4696+ else:
4697+ status = json.loads(raw_status.decode("UTF-8"))
4698+ return (status["status"], status["message"])
4699
4700
4701 def translate_exc(from_exc, to_exc):
4702 def inner_translate_exc1(f):
4703+ @wraps(f)
4704 def inner_translate_exc2(*args, **kwargs):
4705 try:
4706 return f(*args, **kwargs)
4707@@ -736,6 +878,40 @@
4708 subprocess.check_call(cmd)
4709
4710
4711+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
4712+def payload_register(ptype, klass, pid):
4713+ """ is used while a hook is running to let Juju know that a
4714+ payload has been started."""
4715+ cmd = ['payload-register']
4716+ for x in [ptype, klass, pid]:
4717+ cmd.append(x)
4718+ subprocess.check_call(cmd)
4719+
4720+
4721+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
4722+def payload_unregister(klass, pid):
4723+ """ is used while a hook is running to let Juju know
4724+ that a payload has been manually stopped. The <class> and <id> provided
4725+ must match a payload that has been previously registered with juju using
4726+ payload-register."""
4727+ cmd = ['payload-unregister']
4728+ for x in [klass, pid]:
4729+ cmd.append(x)
4730+ subprocess.check_call(cmd)
4731+
4732+
4733+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
4734+def payload_status_set(klass, pid, status):
4735+ """is used to update the current status of a registered payload.
4736+ The <class> and <id> provided must match a payload that has been previously
4737+ registered with juju using payload-register. The <status> must be one of the
4738+ follow: starting, started, stopping, stopped"""
4739+ cmd = ['payload-status-set']
4740+ for x in [klass, pid, status]:
4741+ cmd.append(x)
4742+ subprocess.check_call(cmd)
4743+
4744+
4745 @cached
4746 def juju_version():
4747 """Full version string (eg. '1.23.3.1-trusty-amd64')"""
4748
4749=== modified file 'hooks/charmhelpers/core/host.py'
4750--- hooks/charmhelpers/core/host.py 2015-07-29 18:35:16 +0000
4751+++ hooks/charmhelpers/core/host.py 2016-03-09 12:07:21 +0000
4752@@ -63,55 +63,86 @@
4753 return service_result
4754
4755
4756-def service_pause(service_name, init_dir=None):
4757+def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
4758 """Pause a system service.
4759
4760 Stop it, and prevent it from starting again at boot."""
4761- if init_dir is None:
4762- init_dir = "/etc/init"
4763- stopped = service_stop(service_name)
4764- # XXX: Support systemd too
4765- override_path = os.path.join(
4766- init_dir, '{}.conf.override'.format(service_name))
4767- with open(override_path, 'w') as fh:
4768- fh.write("manual\n")
4769+ stopped = True
4770+ if service_running(service_name):
4771+ stopped = service_stop(service_name)
4772+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
4773+ sysv_file = os.path.join(initd_dir, service_name)
4774+ if init_is_systemd():
4775+ service('disable', service_name)
4776+ elif os.path.exists(upstart_file):
4777+ override_path = os.path.join(
4778+ init_dir, '{}.override'.format(service_name))
4779+ with open(override_path, 'w') as fh:
4780+ fh.write("manual\n")
4781+ elif os.path.exists(sysv_file):
4782+ subprocess.check_call(["update-rc.d", service_name, "disable"])
4783+ else:
4784+ raise ValueError(
4785+ "Unable to detect {0} as SystemD, Upstart {1} or"
4786+ " SysV {2}".format(
4787+ service_name, upstart_file, sysv_file))
4788 return stopped
4789
4790
4791-def service_resume(service_name, init_dir=None):
4792+def service_resume(service_name, init_dir="/etc/init",
4793+ initd_dir="/etc/init.d"):
4794 """Resume a system service.
4795
4796 Reenable starting again at boot. Start the service"""
4797- # XXX: Support systemd too
4798- if init_dir is None:
4799- init_dir = "/etc/init"
4800- override_path = os.path.join(
4801- init_dir, '{}.conf.override'.format(service_name))
4802- if os.path.exists(override_path):
4803- os.unlink(override_path)
4804- started = service_start(service_name)
4805+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
4806+ sysv_file = os.path.join(initd_dir, service_name)
4807+ if init_is_systemd():
4808+ service('enable', service_name)
4809+ elif os.path.exists(upstart_file):
4810+ override_path = os.path.join(
4811+ init_dir, '{}.override'.format(service_name))
4812+ if os.path.exists(override_path):
4813+ os.unlink(override_path)
4814+ elif os.path.exists(sysv_file):
4815+ subprocess.check_call(["update-rc.d", service_name, "enable"])
4816+ else:
4817+ raise ValueError(
4818+ "Unable to detect {0} as SystemD, Upstart {1} or"
4819+ " SysV {2}".format(
4820+ service_name, upstart_file, sysv_file))
4821+
4822+ started = service_running(service_name)
4823+ if not started:
4824+ started = service_start(service_name)
4825 return started
4826
4827
4828 def service(action, service_name):
4829 """Control a system service"""
4830- cmd = ['service', service_name, action]
4831+ if init_is_systemd():
4832+ cmd = ['systemctl', action, service_name]
4833+ else:
4834+ cmd = ['service', service_name, action]
4835 return subprocess.call(cmd) == 0
4836
4837
4838-def service_running(service):
4839+def service_running(service_name):
4840 """Determine whether a system service is running"""
4841- try:
4842- output = subprocess.check_output(
4843- ['service', service, 'status'],
4844- stderr=subprocess.STDOUT).decode('UTF-8')
4845- except subprocess.CalledProcessError:
4846- return False
4847+ if init_is_systemd():
4848+ return service('is-active', service_name)
4849 else:
4850- if ("start/running" in output or "is running" in output):
4851- return True
4852- else:
4853+ try:
4854+ output = subprocess.check_output(
4855+ ['service', service_name, 'status'],
4856+ stderr=subprocess.STDOUT).decode('UTF-8')
4857+ except subprocess.CalledProcessError:
4858 return False
4859+ else:
4860+ if ("start/running" in output or "is running" in output or
4861+ "up and running" in output):
4862+ return True
4863+ else:
4864+ return False
4865
4866
4867 def service_available(service_name):
4868@@ -126,8 +157,29 @@
4869 return True
4870
4871
4872-def adduser(username, password=None, shell='/bin/bash', system_user=False):
4873- """Add a user to the system"""
4874+SYSTEMD_SYSTEM = '/run/systemd/system'
4875+
4876+
4877+def init_is_systemd():
4878+ """Return True if the host system uses systemd, False otherwise."""
4879+ return os.path.isdir(SYSTEMD_SYSTEM)
4880+
4881+
4882+def adduser(username, password=None, shell='/bin/bash', system_user=False,
4883+ primary_group=None, secondary_groups=None):
4884+ """Add a user to the system.
4885+
4886+ Will log but otherwise succeed if the user already exists.
4887+
4888+ :param str username: Username to create
4889+ :param str password: Password for user; if ``None``, create a system user
4890+ :param str shell: The default shell for the user
4891+ :param bool system_user: Whether to create a login or system user
4892+ :param str primary_group: Primary group for user; defaults to username
4893+ :param list secondary_groups: Optional list of additional groups
4894+
4895+ :returns: The password database entry struct, as returned by `pwd.getpwnam`
4896+ """
4897 try:
4898 user_info = pwd.getpwnam(username)
4899 log('user {0} already exists!'.format(username))
4900@@ -142,12 +194,32 @@
4901 '--shell', shell,
4902 '--password', password,
4903 ])
4904+ if not primary_group:
4905+ try:
4906+ grp.getgrnam(username)
4907+ primary_group = username # avoid "group exists" error
4908+ except KeyError:
4909+ pass
4910+ if primary_group:
4911+ cmd.extend(['-g', primary_group])
4912+ if secondary_groups:
4913+ cmd.extend(['-G', ','.join(secondary_groups)])
4914 cmd.append(username)
4915 subprocess.check_call(cmd)
4916 user_info = pwd.getpwnam(username)
4917 return user_info
4918
4919
4920+def user_exists(username):
4921+ """Check if a user exists"""
4922+ try:
4923+ pwd.getpwnam(username)
4924+ user_exists = True
4925+ except KeyError:
4926+ user_exists = False
4927+ return user_exists
4928+
4929+
4930 def add_group(group_name, system_group=False):
4931 """Add a group to the system"""
4932 try:
4933@@ -229,14 +301,12 @@
4934
4935
4936 def fstab_remove(mp):
4937- """Remove the given mountpoint entry from /etc/fstab
4938- """
4939+ """Remove the given mountpoint entry from /etc/fstab"""
4940 return Fstab.remove_by_mountpoint(mp)
4941
4942
4943 def fstab_add(dev, mp, fs, options=None):
4944- """Adds the given device entry to the /etc/fstab file
4945- """
4946+ """Adds the given device entry to the /etc/fstab file"""
4947 return Fstab.add(dev, mp, fs, options=options)
4948
4949
4950@@ -280,9 +350,19 @@
4951 return system_mounts
4952
4953
4954+def fstab_mount(mountpoint):
4955+ """Mount filesystem using fstab"""
4956+ cmd_args = ['mount', mountpoint]
4957+ try:
4958+ subprocess.check_output(cmd_args)
4959+ except subprocess.CalledProcessError as e:
4960+ log('Error unmounting {}\n{}'.format(mountpoint, e.output))
4961+ return False
4962+ return True
4963+
4964+
4965 def file_hash(path, hash_type='md5'):
4966- """
4967- Generate a hash checksum of the contents of 'path' or None if not found.
4968+ """Generate a hash checksum of the contents of 'path' or None if not found.
4969
4970 :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
4971 such as md5, sha1, sha256, sha512, etc.
4972@@ -297,10 +377,9 @@
4973
4974
4975 def path_hash(path):
4976- """
4977- Generate a hash checksum of all files matching 'path'. Standard wildcards
4978- like '*' and '?' are supported, see documentation for the 'glob' module for
4979- more information.
4980+ """Generate a hash checksum of all files matching 'path'. Standard
4981+ wildcards like '*' and '?' are supported, see documentation for the 'glob'
4982+ module for more information.
4983
4984 :return: dict: A { filename: hash } dictionary for all matched files.
4985 Empty if none found.
4986@@ -312,8 +391,7 @@
4987
4988
4989 def check_hash(path, checksum, hash_type='md5'):
4990- """
4991- Validate a file using a cryptographic checksum.
4992+ """Validate a file using a cryptographic checksum.
4993
4994 :param str checksum: Value of the checksum used to validate the file.
4995 :param str hash_type: Hash algorithm used to generate `checksum`.
4996@@ -328,6 +406,7 @@
4997
4998
4999 class ChecksumError(ValueError):
5000+ """A class derived from Value error to indicate the checksum failed."""
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: