Merge lp:~james-page/charm-helpers/vpp-rebase into lp:~gnuoy/charm-helpers/cisco-vpp

Proposed by James Page
Status: Merged
Merged at revision: 399
Proposed branch: lp:~james-page/charm-helpers/vpp-rebase
Merge into: lp:~gnuoy/charm-helpers/cisco-vpp
Diff against target: 5730 lines (+3512/-430)
67 files modified
VERSION (+1/-1)
charmhelpers/cli/__init__.py (+32/-5)
charmhelpers/cli/commands.py (+5/-4)
charmhelpers/cli/hookenv.py (+23/-0)
charmhelpers/contrib/amulet/utils.py (+239/-9)
charmhelpers/contrib/benchmark/__init__.py (+3/-1)
charmhelpers/contrib/database/mysql.py (+3/-0)
charmhelpers/contrib/network/ufw.py (+46/-3)
charmhelpers/contrib/openstack/amulet/deployment.py (+38/-4)
charmhelpers/contrib/openstack/amulet/utils.py (+361/-51)
charmhelpers/contrib/openstack/context.py (+47/-34)
charmhelpers/contrib/openstack/templates/ceph.conf (+6/-6)
charmhelpers/contrib/openstack/templating.py (+2/-2)
charmhelpers/contrib/openstack/utils.py (+77/-23)
charmhelpers/contrib/peerstorage/__init__.py (+5/-4)
charmhelpers/contrib/python/packages.py (+2/-0)
charmhelpers/contrib/storage/linux/ceph.py (+6/-6)
charmhelpers/contrib/storage/linux/utils.py (+4/-3)
charmhelpers/contrib/unison/__init__.py (+23/-8)
charmhelpers/coordinator.py (+607/-0)
charmhelpers/core/files.py (+45/-0)
charmhelpers/core/hookenv.py (+192/-40)
charmhelpers/core/host.py (+31/-5)
charmhelpers/core/services/base.py (+12/-9)
charmhelpers/core/services/helpers.py (+1/-2)
charmhelpers/core/unitdata.py (+61/-17)
charmhelpers/fetch/__init__.py (+31/-14)
charmhelpers/fetch/archiveurl.py (+7/-1)
charmhelpers/fetch/giturl.py (+1/-1)
docs/_extensions/automembersummary.py (+86/-0)
docs/api/charmhelpers.coordinator.rst (+10/-0)
docs/api/charmhelpers.core.decorators.rst (+7/-0)
docs/api/charmhelpers.core.fstab.rst (+7/-0)
docs/api/charmhelpers.core.hookenv.rst (+12/-0)
docs/api/charmhelpers.core.host.rst (+12/-0)
docs/api/charmhelpers.core.rst (+11/-38)
docs/api/charmhelpers.core.services.base.rst (+12/-0)
docs/api/charmhelpers.core.services.helpers.rst (+12/-0)
docs/api/charmhelpers.core.services.rst (+12/-0)
docs/api/charmhelpers.core.strutils.rst (+7/-0)
docs/api/charmhelpers.core.sysctl.rst (+7/-0)
docs/api/charmhelpers.core.templating.rst (+7/-0)
docs/api/charmhelpers.core.unitdata.rst (+7/-0)
docs/api/charmhelpers.rst (+4/-2)
docs/api/modules.rst (+0/-7)
docs/conf.py (+4/-1)
setup.py (+22/-1)
test_requirements.txt (+3/-1)
tests/cli/test_cmdline.py (+56/-9)
tests/contrib/amulet/test_utils.py (+105/-0)
tests/contrib/benchmark/test_benchmark.py (+17/-13)
tests/contrib/hahelpers/test_apache_utils.py (+1/-1)
tests/contrib/network/test_ufw.py (+72/-0)
tests/contrib/openstack/test_openstack_utils.py (+34/-10)
tests/contrib/openstack/test_os_contexts.py (+60/-1)
tests/contrib/peerstorage/test_peerstorage.py (+7/-7)
tests/contrib/python/test_debug.py (+1/-1)
tests/contrib/storage/test_linux_ceph.py (+11/-11)
tests/contrib/storage/test_linux_storage_utils.py (+11/-2)
tests/contrib/unison/test_unison.py (+58/-1)
tests/coordinator/test_coordinator.py (+535/-0)
tests/core/test_files.py (+32/-0)
tests/core/test_hookenv.py (+232/-31)
tests/core/test_host.py (+33/-1)
tests/core/test_services.py (+13/-7)
tests/fetch/test_archiveurl.py (+21/-3)
tests/fetch/test_fetch.py (+60/-29)
To merge this branch: bzr merge lp:~james-page/charm-helpers/vpp-rebase
Reviewer Review Type Date Requested Status
Liam Young Approve
Review via email: mp+267916@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Liam Young (gnuoy) wrote :

lgtm

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'VERSION'
2--- VERSION 2015-05-20 14:31:33 +0000
3+++ VERSION 2015-08-13 08:33:21 +0000
4@@ -1,1 +1,1 @@
5-0.3.2
6+0.5.0
7
8=== modified file 'charmhelpers/cli/__init__.py'
9--- charmhelpers/cli/__init__.py 2015-01-22 06:06:03 +0000
10+++ charmhelpers/cli/__init__.py 2015-08-13 08:33:21 +0000
11@@ -20,6 +20,8 @@
12
13 from six.moves import zip
14
15+from charmhelpers.core import unitdata
16+
17
18 class OutputFormatter(object):
19 def __init__(self, outfile=sys.stdout):
20@@ -53,6 +55,8 @@
21
22 def raw(self, output):
23 """Output data as raw string (default)"""
24+ if isinstance(output, (list, tuple)):
25+ output = '\n'.join(map(str, output))
26 self.outfile.write(str(output))
27
28 def py(self, output):
29@@ -91,6 +95,7 @@
30 argument_parser = None
31 subparsers = None
32 formatter = None
33+ exit_code = 0
34
35 def __init__(self):
36 if not self.argument_parser:
37@@ -115,6 +120,21 @@
38 return decorated
39 return wrapper
40
41+ def test_command(self, decorated):
42+ """
43+ Subcommand is a boolean test function, so bool return values should be
44+ converted to a 0/1 exit code.
45+ """
46+ decorated._cli_test_command = True
47+ return decorated
48+
49+ def no_output(self, decorated):
50+ """
51+ Subcommand is not expected to return a value, so don't print a spurious None.
52+ """
53+ decorated._cli_no_output = True
54+ return decorated
55+
56 def subcommand_builder(self, command_name, description=None):
57 """
58 Decorate a function that builds a subcommand. Builders should accept a
59@@ -132,12 +152,19 @@
60 arguments = self.argument_parser.parse_args()
61 argspec = inspect.getargspec(arguments.func)
62 vargs = []
63- kwargs = {}
64+ for arg in argspec.args:
65+ vargs.append(getattr(arguments, arg))
66 if argspec.varargs:
67- vargs = getattr(arguments, argspec.varargs)
68- for arg in argspec.args:
69- kwargs[arg] = getattr(arguments, arg)
70- self.formatter.format_output(arguments.func(*vargs, **kwargs), arguments.format)
71+ vargs.extend(getattr(arguments, argspec.varargs))
72+ output = arguments.func(*vargs)
73+ if getattr(arguments.func, '_cli_test_command', False):
74+ self.exit_code = 0 if output else 1
75+ output = ''
76+ if getattr(arguments.func, '_cli_no_output', False):
77+ output = ''
78+ self.formatter.format_output(output, arguments.format)
79+ if unitdata._KV:
80+ unitdata._KV.flush()
81
82
83 cmdline = CommandLine()
84
85=== modified file 'charmhelpers/cli/commands.py'
86--- charmhelpers/cli/commands.py 2015-05-13 20:44:19 +0000
87+++ charmhelpers/cli/commands.py 2015-08-13 08:33:21 +0000
88@@ -24,8 +24,9 @@
89 from . import CommandLine # noqa
90
91 """
92-Import the sub-modules to be included by chlp.
93+Import the sub-modules which have decorated subcommands to register with chlp.
94 """
95-import host # noqa
96-import benchmark # noqa
97-import unitdata # noqa
98+from . import host # noqa
99+from . import benchmark # noqa
100+from . import unitdata # noqa
101+from . import hookenv # noqa
102
103=== added file 'charmhelpers/cli/hookenv.py'
104--- charmhelpers/cli/hookenv.py 1970-01-01 00:00:00 +0000
105+++ charmhelpers/cli/hookenv.py 2015-08-13 08:33:21 +0000
106@@ -0,0 +1,23 @@
107+# Copyright 2014-2015 Canonical Limited.
108+#
109+# This file is part of charm-helpers.
110+#
111+# charm-helpers is free software: you can redistribute it and/or modify
112+# it under the terms of the GNU Lesser General Public License version 3 as
113+# published by the Free Software Foundation.
114+#
115+# charm-helpers is distributed in the hope that it will be useful,
116+# but WITHOUT ANY WARRANTY; without even the implied warranty of
117+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
118+# GNU Lesser General Public License for more details.
119+#
120+# You should have received a copy of the GNU Lesser General Public License
121+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
122+
123+from . import cmdline
124+from charmhelpers.core import hookenv
125+
126+
127+cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
128+cmdline.subcommand('service-name')(hookenv.service_name)
129+cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
130
131=== modified file 'charmhelpers/contrib/amulet/utils.py'
132--- charmhelpers/contrib/amulet/utils.py 2015-04-21 15:40:51 +0000
133+++ charmhelpers/contrib/amulet/utils.py 2015-08-13 08:33:21 +0000
134@@ -14,14 +14,21 @@
135 # You should have received a copy of the GNU Lesser General Public License
136 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
137
138-import ConfigParser
139 import io
140 import logging
141+import os
142 import re
143 import sys
144 import time
145
146+import amulet
147+import distro_info
148 import six
149+from six.moves import configparser
150+if six.PY3:
151+ from urllib import parse as urlparse
152+else:
153+ import urlparse
154
155
156 class AmuletUtils(object):
157@@ -33,6 +40,7 @@
158
159 def __init__(self, log_level=logging.ERROR):
160 self.log = self.get_logger(level=log_level)
161+ self.ubuntu_releases = self.get_ubuntu_releases()
162
163 def get_logger(self, name="amulet-logger", level=logging.DEBUG):
164 """Get a logger object that will log to stdout."""
165@@ -70,12 +78,44 @@
166 else:
167 return False
168
169+ def get_ubuntu_release_from_sentry(self, sentry_unit):
170+ """Get Ubuntu release codename from sentry unit.
171+
172+ :param sentry_unit: amulet sentry/service unit pointer
173+ :returns: list of strings - release codename, failure message
174+ """
175+ msg = None
176+ cmd = 'lsb_release -cs'
177+ release, code = sentry_unit.run(cmd)
178+ if code == 0:
179+ self.log.debug('{} lsb_release: {}'.format(
180+ sentry_unit.info['unit_name'], release))
181+ else:
182+ msg = ('{} `{}` returned {} '
183+ '{}'.format(sentry_unit.info['unit_name'],
184+ cmd, release, code))
185+ if release not in self.ubuntu_releases:
186+ msg = ("Release ({}) not found in Ubuntu releases "
187+ "({})".format(release, self.ubuntu_releases))
188+ return release, msg
189+
190 def validate_services(self, commands):
191- """Validate services.
192-
193- Verify the specified services are running on the corresponding
194+ """Validate that lists of commands succeed on service units. Can be
195+ used to verify system services are running on the corresponding
196 service units.
197- """
198+
199+ :param commands: dict with sentry keys and arbitrary command list vals
200+ :returns: None if successful, Failure string message otherwise
201+ """
202+ self.log.debug('Checking status of system services...')
203+
204+ # /!\ DEPRECATION WARNING (beisner):
205+ # New and existing tests should be rewritten to use
206+ # validate_services_by_name() as it is aware of init systems.
207+ self.log.warn('/!\\ DEPRECATION WARNING: use '
208+ 'validate_services_by_name instead of validate_services '
209+ 'due to init system differences.')
210+
211 for k, v in six.iteritems(commands):
212 for cmd in v:
213 output, code = k.run(cmd)
214@@ -86,6 +126,45 @@
215 return "command `{}` returned {}".format(cmd, str(code))
216 return None
217
218+ def validate_services_by_name(self, sentry_services):
219+ """Validate system service status by service name, automatically
220+ detecting init system based on Ubuntu release codename.
221+
222+ :param sentry_services: dict with sentry keys and svc list values
223+ :returns: None if successful, Failure string message otherwise
224+ """
225+ self.log.debug('Checking status of system services...')
226+
227+ # Point at which systemd became a thing
228+ systemd_switch = self.ubuntu_releases.index('vivid')
229+
230+ for sentry_unit, services_list in six.iteritems(sentry_services):
231+ # Get lsb_release codename from unit
232+ release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
233+ if ret:
234+ return ret
235+
236+ for service_name in services_list:
237+ if (self.ubuntu_releases.index(release) >= systemd_switch or
238+ service_name in ['rabbitmq-server', 'apache2']):
239+ # init is systemd (or regular sysv)
240+ cmd = 'sudo service {} status'.format(service_name)
241+ output, code = sentry_unit.run(cmd)
242+ service_running = code == 0
243+ elif self.ubuntu_releases.index(release) < systemd_switch:
244+ # init is upstart
245+ cmd = 'sudo status {}'.format(service_name)
246+ output, code = sentry_unit.run(cmd)
247+ service_running = code == 0 and "start/running" in output
248+
249+ self.log.debug('{} `{}` returned '
250+ '{}'.format(sentry_unit.info['unit_name'],
251+ cmd, code))
252+ if not service_running:
253+ return u"command `{}` returned {} {}".format(
254+ cmd, output, str(code))
255+ return None
256+
257 def _get_config(self, unit, filename):
258 """Get a ConfigParser object for parsing a unit's config file."""
259 file_contents = unit.file_contents(filename)
260@@ -93,7 +172,7 @@
261 # NOTE(beisner): by default, ConfigParser does not handle options
262 # with no value, such as the flags used in the mysql my.cnf file.
263 # https://bugs.python.org/issue7005
264- config = ConfigParser.ConfigParser(allow_no_value=True)
265+ config = configparser.ConfigParser(allow_no_value=True)
266 config.readfp(io.StringIO(file_contents))
267 return config
268
269@@ -103,7 +182,15 @@
270
271 Verify that the specified section of the config file contains
272 the expected option key:value pairs.
273+
274+ Compare expected dictionary data vs actual dictionary data.
275+ The values in the 'expected' dictionary can be strings, bools, ints,
276+ longs, or can be a function that evaluates a variable and returns a
277+ bool.
278 """
279+ self.log.debug('Validating config file data ({} in {} on {})'
280+ '...'.format(section, config_file,
281+ sentry_unit.info['unit_name']))
282 config = self._get_config(sentry_unit, config_file)
283
284 if section != 'DEFAULT' and not config.has_section(section):
285@@ -112,9 +199,20 @@
286 for k in expected.keys():
287 if not config.has_option(section, k):
288 return "section [{}] is missing option {}".format(section, k)
289- if config.get(section, k) != expected[k]:
290+
291+ actual = config.get(section, k)
292+ v = expected[k]
293+ if (isinstance(v, six.string_types) or
294+ isinstance(v, bool) or
295+ isinstance(v, six.integer_types)):
296+ # handle explicit values
297+ if actual != v:
298+ return "section [{}] {}:{} != expected {}:{}".format(
299+ section, k, actual, k, expected[k])
300+ # handle function pointers, such as not_null or valid_ip
301+ elif not v(actual):
302 return "section [{}] {}:{} != expected {}:{}".format(
303- section, k, config.get(section, k), k, expected[k])
304+ section, k, actual, k, expected[k])
305 return None
306
307 def _validate_dict_data(self, expected, actual):
308@@ -122,7 +220,7 @@
309
310 Compare expected dictionary data vs actual dictionary data.
311 The values in the 'expected' dictionary can be strings, bools, ints,
312- longs, or can be a function that evaluate a variable and returns a
313+ longs, or can be a function that evaluates a variable and returns a
314 bool.
315 """
316 self.log.debug('actual: {}'.format(repr(actual)))
317@@ -133,8 +231,10 @@
318 if (isinstance(v, six.string_types) or
319 isinstance(v, bool) or
320 isinstance(v, six.integer_types)):
321+ # handle explicit values
322 if v != actual[k]:
323 return "{}:{}".format(k, actual[k])
324+ # handle function pointers, such as not_null or valid_ip
325 elif not v(actual[k]):
326 return "{}:{}".format(k, actual[k])
327 else:
328@@ -321,3 +421,133 @@
329
330 def endpoint_error(self, name, data):
331 return 'unexpected endpoint data in {} - {}'.format(name, data)
332+
333+ def get_ubuntu_releases(self):
334+ """Return a list of all Ubuntu releases in order of release."""
335+ _d = distro_info.UbuntuDistroInfo()
336+ _release_list = _d.all
337+ self.log.debug('Ubuntu release list: {}'.format(_release_list))
338+ return _release_list
339+
340+ def file_to_url(self, file_rel_path):
341+ """Convert a relative file path to a file URL."""
342+ _abs_path = os.path.abspath(file_rel_path)
343+ return urlparse.urlparse(_abs_path, scheme='file').geturl()
344+
345+ def check_commands_on_units(self, commands, sentry_units):
346+ """Check that all commands in a list exit zero on all
347+ sentry units in a list.
348+
349+ :param commands: list of bash commands
350+ :param sentry_units: list of sentry unit pointers
351+ :returns: None if successful; Failure message otherwise
352+ """
353+ self.log.debug('Checking exit codes for {} commands on {} '
354+ 'sentry units...'.format(len(commands),
355+ len(sentry_units)))
356+ for sentry_unit in sentry_units:
357+ for cmd in commands:
358+ output, code = sentry_unit.run(cmd)
359+ if code == 0:
360+ self.log.debug('{} `{}` returned {} '
361+ '(OK)'.format(sentry_unit.info['unit_name'],
362+ cmd, code))
363+ else:
364+ return ('{} `{}` returned {} '
365+ '{}'.format(sentry_unit.info['unit_name'],
366+ cmd, code, output))
367+ return None
368+
369+ def get_process_id_list(self, sentry_unit, process_name):
370+ """Get a list of process ID(s) from a single sentry juju unit
371+ for a single process name.
372+
373+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
374+ :param process_name: Process name
375+ :returns: List of process IDs
376+ """
377+ cmd = 'pidof {}'.format(process_name)
378+ output, code = sentry_unit.run(cmd)
379+ if code != 0:
380+ msg = ('{} `{}` returned {} '
381+ '{}'.format(sentry_unit.info['unit_name'],
382+ cmd, code, output))
383+ amulet.raise_status(amulet.FAIL, msg=msg)
384+ return str(output).split()
385+
386+ def get_unit_process_ids(self, unit_processes):
387+ """Construct a dict containing unit sentries, process names, and
388+ process IDs."""
389+ pid_dict = {}
390+ for sentry_unit, process_list in unit_processes.iteritems():
391+ pid_dict[sentry_unit] = {}
392+ for process in process_list:
393+ pids = self.get_process_id_list(sentry_unit, process)
394+ pid_dict[sentry_unit].update({process: pids})
395+ return pid_dict
396+
397+ def validate_unit_process_ids(self, expected, actual):
398+ """Validate process id quantities for services on units."""
399+ self.log.debug('Checking units for running processes...')
400+ self.log.debug('Expected PIDs: {}'.format(expected))
401+ self.log.debug('Actual PIDs: {}'.format(actual))
402+
403+ if len(actual) != len(expected):
404+ return ('Unit count mismatch. expected, actual: {}, '
405+ '{} '.format(len(expected), len(actual)))
406+
407+ for (e_sentry, e_proc_names) in expected.iteritems():
408+ e_sentry_name = e_sentry.info['unit_name']
409+ if e_sentry in actual.keys():
410+ a_proc_names = actual[e_sentry]
411+ else:
412+ return ('Expected sentry ({}) not found in actual dict data.'
413+ '{}'.format(e_sentry_name, e_sentry))
414+
415+ if len(e_proc_names.keys()) != len(a_proc_names.keys()):
416+ return ('Process name count mismatch. expected, actual: {}, '
417+ '{}'.format(len(expected), len(actual)))
418+
419+ for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
420+ zip(e_proc_names.items(), a_proc_names.items()):
421+ if e_proc_name != a_proc_name:
422+ return ('Process name mismatch. expected, actual: {}, '
423+ '{}'.format(e_proc_name, a_proc_name))
424+
425+ a_pids_length = len(a_pids)
426+ fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
427+ '{}, {} ({})'.format(e_sentry_name, e_proc_name,
428+ e_pids_length, a_pids_length,
429+ a_pids))
430+
431+ # If expected is not bool, ensure PID quantities match
432+ if not isinstance(e_pids_length, bool) and \
433+ a_pids_length != e_pids_length:
434+ return fail_msg
435+ # If expected is bool True, ensure 1 or more PIDs exist
436+ elif isinstance(e_pids_length, bool) and \
437+ e_pids_length is True and a_pids_length < 1:
438+ return fail_msg
439+ # If expected is bool False, ensure 0 PIDs exist
440+ elif isinstance(e_pids_length, bool) and \
441+ e_pids_length is False and a_pids_length != 0:
442+ return fail_msg
443+ else:
444+ self.log.debug('PID check OK: {} {} {}: '
445+ '{}'.format(e_sentry_name, e_proc_name,
446+ e_pids_length, a_pids))
447+ return None
448+
449+ def validate_list_of_identical_dicts(self, list_of_dicts):
450+ """Check that all dicts within a list are identical."""
451+ hashes = []
452+ for _dict in list_of_dicts:
453+ hashes.append(hash(frozenset(_dict.items())))
454+
455+ self.log.debug('Hashes: {}'.format(hashes))
456+ if len(set(hashes)) == 1:
457+ self.log.debug('Dicts within list are identical')
458+ else:
459+ return 'Dicts within list are not identical'
460+
461+ return None
462
463=== modified file 'charmhelpers/contrib/benchmark/__init__.py'
464--- charmhelpers/contrib/benchmark/__init__.py 2015-04-24 16:18:42 +0000
465+++ charmhelpers/contrib/benchmark/__init__.py 2015-08-13 08:33:21 +0000
466@@ -63,6 +63,8 @@
467
468 """
469
470+ BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing
471+
472 required_keys = [
473 'hostname',
474 'port',
475@@ -91,7 +93,7 @@
476 break
477
478 if len(config):
479- with open('/etc/benchmark.conf', 'w') as f:
480+ with open(self.BENCHMARK_CONF, 'w') as f:
481 for key, val in iter(config.items()):
482 f.write("%s=%s\n" % (key, val))
483
484
485=== modified file 'charmhelpers/contrib/database/mysql.py'
486--- charmhelpers/contrib/database/mysql.py 2015-06-03 20:31:29 +0000
487+++ charmhelpers/contrib/database/mysql.py 2015-08-13 08:33:21 +0000
488@@ -381,6 +381,9 @@
489 if 'wait-timeout' in config:
490 mysql_config['wait_timeout'] = config['wait-timeout']
491
492+ if 'innodb-flush-log-at-trx-commit' in config:
493+ mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit']
494+
495 # Set a sane default key_buffer size
496 mysql_config['key_buffer'] = self.human_to_bytes('32M')
497 total_memory = self.human_to_bytes(self.get_mem_total())
498
499=== modified file 'charmhelpers/contrib/network/ufw.py'
500--- charmhelpers/contrib/network/ufw.py 2015-02-12 20:08:28 +0000
501+++ charmhelpers/contrib/network/ufw.py 2015-08-13 08:33:21 +0000
502@@ -180,7 +180,43 @@
503 return True
504
505
506-def modify_access(src, dst='any', port=None, proto=None, action='allow'):
507+def default_policy(policy='deny', direction='incoming'):
508+ """
509+ Changes the default policy for traffic `direction`
510+
511+ :param policy: allow, deny or reject
512+ :param direction: traffic direction, possible values: incoming, outgoing,
513+ routed
514+ """
515+ if policy not in ['allow', 'deny', 'reject']:
516+ raise UFWError(('Unknown policy %s, valid values: '
517+ 'allow, deny, reject') % policy)
518+
519+ if direction not in ['incoming', 'outgoing', 'routed']:
520+ raise UFWError(('Unknown direction %s, valid values: '
521+ 'incoming, outgoing, routed') % direction)
522+
523+ output = subprocess.check_output(['ufw', 'default', policy, direction],
524+ universal_newlines=True,
525+ env={'LANG': 'en_US',
526+ 'PATH': os.environ['PATH']})
527+ hookenv.log(output, level='DEBUG')
528+
529+ m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
530+ policy),
531+ output, re.M)
532+ if len(m) == 0:
533+ hookenv.log("ufw couldn't change the default policy to %s for %s"
534+ % (policy, direction), level='WARN')
535+ return False
536+ else:
537+ hookenv.log("ufw default policy for %s changed to %s"
538+ % (direction, policy), level='INFO')
539+ return True
540+
541+
542+def modify_access(src, dst='any', port=None, proto=None, action='allow',
543+ index=None):
544 """
545 Grant access to an address or subnet
546
547@@ -192,6 +228,8 @@
548 :param port: destiny port
549 :param proto: protocol (tcp or udp)
550 :param action: `allow` or `delete`
551+ :param index: if different from None the rule is inserted at the given
552+ `index`.
553 """
554 if not is_enabled():
555 hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
556@@ -199,6 +237,8 @@
557
558 if action == 'delete':
559 cmd = ['ufw', 'delete', 'allow']
560+ elif index is not None:
561+ cmd = ['ufw', 'insert', str(index), action]
562 else:
563 cmd = ['ufw', action]
564
565@@ -227,7 +267,7 @@
566 level='ERROR')
567
568
569-def grant_access(src, dst='any', port=None, proto=None):
570+def grant_access(src, dst='any', port=None, proto=None, index=None):
571 """
572 Grant access to an address or subnet
573
574@@ -238,8 +278,11 @@
575 field has to be set.
576 :param port: destiny port
577 :param proto: protocol (tcp or udp)
578+ :param index: if different from None the rule is inserted at the given
579+ `index`.
580 """
581- return modify_access(src, dst=dst, port=port, proto=proto, action='allow')
582+ return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
583+ index=index)
584
585
586 def revoke_access(src, dst='any', port=None, proto=None):
587
588=== modified file 'charmhelpers/contrib/openstack/amulet/deployment.py'
589--- charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-12 07:50:34 +0000
590+++ charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-13 08:33:21 +0000
591@@ -44,7 +44,7 @@
592 Determine if the local branch being tested is derived from its
593 stable or next (dev) branch, and based on this, use the corresonding
594 stable or next branches for the other_services."""
595- base_charms = ['mysql', 'mongodb']
596+ base_charms = ['mysql', 'mongodb', 'nrpe']
597
598 if self.series in ['precise', 'trusty']:
599 base_series = self.series
600@@ -83,9 +83,10 @@
601 services.append(this_service)
602 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
603 'ceph-osd', 'ceph-radosgw']
604- # Openstack subordinate charms do not expose an origin option as that
605- # is controlled by the principle
606- ignore = ['neutron-openvswitch', 'cisco-vpp', 'odl-controller']
607+ # Most OpenStack subordinate charms do not expose an origin option
608+ # as that is controlled by the principle.
609+ ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
610+ 'cisco-vpp', 'odl-controller']
611
612 if self.openstack:
613 for svc in services:
614@@ -152,3 +153,36 @@
615 return os_origin.split('%s-' % self.series)[1].split('/')[0]
616 else:
617 return releases[self.series]
618+
619+ def get_ceph_expected_pools(self, radosgw=False):
620+ """Return a list of expected ceph pools in a ceph + cinder + glance
621+ test scenario, based on OpenStack release and whether ceph radosgw
622+ is flagged as present or not."""
623+
624+ if self._get_openstack_release() >= self.trusty_kilo:
625+ # Kilo or later
626+ pools = [
627+ 'rbd',
628+ 'cinder',
629+ 'glance'
630+ ]
631+ else:
632+ # Juno or earlier
633+ pools = [
634+ 'data',
635+ 'metadata',
636+ 'rbd',
637+ 'cinder',
638+ 'glance'
639+ ]
640+
641+ if radosgw:
642+ pools.extend([
643+ '.rgw.root',
644+ '.rgw.control',
645+ '.rgw',
646+ '.rgw.gc',
647+ '.users.uid'
648+ ])
649+
650+ return pools
651
652=== modified file 'charmhelpers/contrib/openstack/amulet/utils.py'
653--- charmhelpers/contrib/openstack/amulet/utils.py 2015-01-22 06:06:03 +0000
654+++ charmhelpers/contrib/openstack/amulet/utils.py 2015-08-13 08:33:21 +0000
655@@ -14,16 +14,20 @@
656 # You should have received a copy of the GNU Lesser General Public License
657 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
658
659+import amulet
660+import json
661 import logging
662 import os
663+import six
664 import time
665 import urllib
666
667+import cinderclient.v1.client as cinder_client
668 import glanceclient.v1.client as glance_client
669+import heatclient.v1.client as heat_client
670 import keystoneclient.v2_0 as keystone_client
671 import novaclient.v1_1.client as nova_client
672-
673-import six
674+import swiftclient
675
676 from charmhelpers.contrib.amulet.utils import (
677 AmuletUtils
678@@ -37,7 +41,7 @@
679 """OpenStack amulet utilities.
680
681 This class inherits from AmuletUtils and has additional support
682- that is specifically for use by OpenStack charms.
683+ that is specifically for use by OpenStack charm tests.
684 """
685
686 def __init__(self, log_level=ERROR):
687@@ -51,6 +55,8 @@
688 Validate actual endpoint data vs expected endpoint data. The ports
689 are used to find the matching endpoint.
690 """
691+ self.log.debug('Validating endpoint data...')
692+ self.log.debug('actual: {}'.format(repr(endpoints)))
693 found = False
694 for ep in endpoints:
695 self.log.debug('endpoint: {}'.format(repr(ep)))
696@@ -77,6 +83,7 @@
697 Validate a list of actual service catalog endpoints vs a list of
698 expected service catalog endpoints.
699 """
700+ self.log.debug('Validating service catalog endpoint data...')
701 self.log.debug('actual: {}'.format(repr(actual)))
702 for k, v in six.iteritems(expected):
703 if k in actual:
704@@ -93,6 +100,7 @@
705 Validate a list of actual tenant data vs list of expected tenant
706 data.
707 """
708+ self.log.debug('Validating tenant data...')
709 self.log.debug('actual: {}'.format(repr(actual)))
710 for e in expected:
711 found = False
712@@ -114,6 +122,7 @@
713 Validate a list of actual role data vs a list of expected role
714 data.
715 """
716+ self.log.debug('Validating role data...')
717 self.log.debug('actual: {}'.format(repr(actual)))
718 for e in expected:
719 found = False
720@@ -134,6 +143,7 @@
721 Validate a list of actual user data vs a list of expected user
722 data.
723 """
724+ self.log.debug('Validating user data...')
725 self.log.debug('actual: {}'.format(repr(actual)))
726 for e in expected:
727 found = False
728@@ -155,17 +165,30 @@
729
730 Validate a list of actual flavors vs a list of expected flavors.
731 """
732+ self.log.debug('Validating flavor data...')
733 self.log.debug('actual: {}'.format(repr(actual)))
734 act = [a.name for a in actual]
735 return self._validate_list_data(expected, act)
736
737 def tenant_exists(self, keystone, tenant):
738 """Return True if tenant exists."""
739+ self.log.debug('Checking if tenant exists ({})...'.format(tenant))
740 return tenant in [t.name for t in keystone.tenants.list()]
741
742+ def authenticate_cinder_admin(self, keystone_sentry, username,
743+ password, tenant):
744+ """Authenticates admin user with cinder."""
745+ # NOTE(beisner): cinder python client doesn't accept tokens.
746+ service_ip = \
747+ keystone_sentry.relation('shared-db',
748+ 'mysql:shared-db')['private-address']
749+ ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
750+ return cinder_client.Client(username, password, tenant, ept)
751+
752 def authenticate_keystone_admin(self, keystone_sentry, user, password,
753 tenant):
754 """Authenticates admin user with the keystone admin endpoint."""
755+ self.log.debug('Authenticating keystone admin...')
756 unit = keystone_sentry
757 service_ip = unit.relation('shared-db',
758 'mysql:shared-db')['private-address']
759@@ -175,6 +198,7 @@
760
761 def authenticate_keystone_user(self, keystone, user, password, tenant):
762 """Authenticates a regular user with the keystone public endpoint."""
763+ self.log.debug('Authenticating keystone user ({})...'.format(user))
764 ep = keystone.service_catalog.url_for(service_type='identity',
765 endpoint_type='publicURL')
766 return keystone_client.Client(username=user, password=password,
767@@ -182,19 +206,49 @@
768
769 def authenticate_glance_admin(self, keystone):
770 """Authenticates admin user with glance."""
771+ self.log.debug('Authenticating glance admin...')
772 ep = keystone.service_catalog.url_for(service_type='image',
773 endpoint_type='adminURL')
774 return glance_client.Client(ep, token=keystone.auth_token)
775
776+ def authenticate_heat_admin(self, keystone):
777+ """Authenticates the admin user with heat."""
778+ self.log.debug('Authenticating heat admin...')
779+ ep = keystone.service_catalog.url_for(service_type='orchestration',
780+ endpoint_type='publicURL')
781+ return heat_client.Client(endpoint=ep, token=keystone.auth_token)
782+
783 def authenticate_nova_user(self, keystone, user, password, tenant):
784 """Authenticates a regular user with nova-api."""
785+ self.log.debug('Authenticating nova user ({})...'.format(user))
786 ep = keystone.service_catalog.url_for(service_type='identity',
787 endpoint_type='publicURL')
788 return nova_client.Client(username=user, api_key=password,
789 project_id=tenant, auth_url=ep)
790
791+ def authenticate_swift_user(self, keystone, user, password, tenant):
792+ """Authenticates a regular user with swift api."""
793+ self.log.debug('Authenticating swift user ({})...'.format(user))
794+ ep = keystone.service_catalog.url_for(service_type='identity',
795+ endpoint_type='publicURL')
796+ return swiftclient.Connection(authurl=ep,
797+ user=user,
798+ key=password,
799+ tenant_name=tenant,
800+ auth_version='2.0')
801+
802 def create_cirros_image(self, glance, image_name):
803- """Download the latest cirros image and upload it to glance."""
804+ """Download the latest cirros image and upload it to glance,
805+ validate and return a resource pointer.
806+
807+ :param glance: pointer to authenticated glance connection
808+ :param image_name: display name for new image
809+ :returns: glance image pointer
810+ """
811+ self.log.debug('Creating glance cirros image '
812+ '({})...'.format(image_name))
813+
814+ # Download cirros image
815 http_proxy = os.getenv('AMULET_HTTP_PROXY')
816 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
817 if http_proxy:
818@@ -203,57 +257,67 @@
819 else:
820 opener = urllib.FancyURLopener()
821
822- f = opener.open("http://download.cirros-cloud.net/version/released")
823+ f = opener.open('http://download.cirros-cloud.net/version/released')
824 version = f.read().strip()
825- cirros_img = "cirros-{}-x86_64-disk.img".format(version)
826+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
827 local_path = os.path.join('tests', cirros_img)
828
829 if not os.path.exists(local_path):
830- cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
831+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
832 version, cirros_img)
833 opener.retrieve(cirros_url, local_path)
834 f.close()
835
836+ # Create glance image
837 with open(local_path) as f:
838 image = glance.images.create(name=image_name, is_public=True,
839 disk_format='qcow2',
840 container_format='bare', data=f)
841- count = 1
842- status = image.status
843- while status != 'active' and count < 10:
844- time.sleep(3)
845- image = glance.images.get(image.id)
846- status = image.status
847- self.log.debug('image status: {}'.format(status))
848- count += 1
849-
850- if status != 'active':
851- self.log.error('image creation timed out')
852- return None
853+
854+ # Wait for image to reach active status
855+ img_id = image.id
856+ ret = self.resource_reaches_status(glance.images, img_id,
857+ expected_stat='active',
858+ msg='Image status wait')
859+ if not ret:
860+ msg = 'Glance image failed to reach expected state.'
861+ amulet.raise_status(amulet.FAIL, msg=msg)
862+
863+ # Re-validate new image
864+ self.log.debug('Validating image attributes...')
865+ val_img_name = glance.images.get(img_id).name
866+ val_img_stat = glance.images.get(img_id).status
867+ val_img_pub = glance.images.get(img_id).is_public
868+ val_img_cfmt = glance.images.get(img_id).container_format
869+ val_img_dfmt = glance.images.get(img_id).disk_format
870+ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
871+ 'container fmt:{} disk fmt:{}'.format(
872+ val_img_name, val_img_pub, img_id,
873+ val_img_stat, val_img_cfmt, val_img_dfmt))
874+
875+ if val_img_name == image_name and val_img_stat == 'active' \
876+ and val_img_pub is True and val_img_cfmt == 'bare' \
877+ and val_img_dfmt == 'qcow2':
878+ self.log.debug(msg_attr)
879+ else:
880+ msg = ('Volume validation failed, {}'.format(msg_attr))
881+ amulet.raise_status(amulet.FAIL, msg=msg)
882
883 return image
884
885 def delete_image(self, glance, image):
886 """Delete the specified image."""
887- num_before = len(list(glance.images.list()))
888- glance.images.delete(image)
889-
890- count = 1
891- num_after = len(list(glance.images.list()))
892- while num_after != (num_before - 1) and count < 10:
893- time.sleep(3)
894- num_after = len(list(glance.images.list()))
895- self.log.debug('number of images: {}'.format(num_after))
896- count += 1
897-
898- if num_after != (num_before - 1):
899- self.log.error('image deletion timed out')
900- return False
901-
902- return True
903+
904+ # /!\ DEPRECATION WARNING
905+ self.log.warn('/!\\ DEPRECATION WARNING: use '
906+ 'delete_resource instead of delete_image.')
907+ self.log.debug('Deleting glance image ({})...'.format(image))
908+ return self.delete_resource(glance.images, image, msg='glance image')
909
910 def create_instance(self, nova, image_name, instance_name, flavor):
911 """Create the specified instance."""
912+ self.log.debug('Creating instance '
913+ '({}|{}|{})'.format(instance_name, image_name, flavor))
914 image = nova.images.find(name=image_name)
915 flavor = nova.flavors.find(name=flavor)
916 instance = nova.servers.create(name=instance_name, image=image,
917@@ -276,19 +340,265 @@
918
919 def delete_instance(self, nova, instance):
920 """Delete the specified instance."""
921- num_before = len(list(nova.servers.list()))
922- nova.servers.delete(instance)
923-
924- count = 1
925- num_after = len(list(nova.servers.list()))
926- while num_after != (num_before - 1) and count < 10:
927- time.sleep(3)
928- num_after = len(list(nova.servers.list()))
929- self.log.debug('number of instances: {}'.format(num_after))
930- count += 1
931-
932- if num_after != (num_before - 1):
933- self.log.error('instance deletion timed out')
934- return False
935-
936- return True
937+
938+ # /!\ DEPRECATION WARNING
939+ self.log.warn('/!\\ DEPRECATION WARNING: use '
940+ 'delete_resource instead of delete_instance.')
941+ self.log.debug('Deleting instance ({})...'.format(instance))
942+ return self.delete_resource(nova.servers, instance,
943+ msg='nova instance')
944+
945+ def create_or_get_keypair(self, nova, keypair_name="testkey"):
946+ """Create a new keypair, or return pointer if it already exists."""
947+ try:
948+ _keypair = nova.keypairs.get(keypair_name)
949+ self.log.debug('Keypair ({}) already exists, '
950+ 'using it.'.format(keypair_name))
951+ return _keypair
952+ except:
953+ self.log.debug('Keypair ({}) does not exist, '
954+ 'creating it.'.format(keypair_name))
955+
956+ _keypair = nova.keypairs.create(name=keypair_name)
957+ return _keypair
958+
959+ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
960+ img_id=None, src_vol_id=None, snap_id=None):
961+ """Create cinder volume, optionally from a glance image, OR
962+ optionally as a clone of an existing volume, OR optionally
963+ from a snapshot. Wait for the new volume status to reach
964+ the expected status, validate and return a resource pointer.
965+
966+ :param vol_name: cinder volume display name
967+ :param vol_size: size in gigabytes
968+ :param img_id: optional glance image id
969+ :param src_vol_id: optional source volume id to clone
970+ :param snap_id: optional snapshot id to use
971+ :returns: cinder volume pointer
972+ """
973+ # Handle parameter input and avoid impossible combinations
974+ if img_id and not src_vol_id and not snap_id:
975+ # Create volume from image
976+ self.log.debug('Creating cinder volume from glance image...')
977+ bootable = 'true'
978+ elif src_vol_id and not img_id and not snap_id:
979+ # Clone an existing volume
980+ self.log.debug('Cloning cinder volume...')
981+ bootable = cinder.volumes.get(src_vol_id).bootable
982+ elif snap_id and not src_vol_id and not img_id:
983+ # Create volume from snapshot
984+ self.log.debug('Creating cinder volume from snapshot...')
985+ snap = cinder.volume_snapshots.find(id=snap_id)
986+ vol_size = snap.size
987+ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
988+ bootable = cinder.volumes.get(snap_vol_id).bootable
989+ elif not img_id and not src_vol_id and not snap_id:
990+ # Create volume
991+ self.log.debug('Creating cinder volume...')
992+ bootable = 'false'
993+ else:
994+ # Impossible combination of parameters
995+ msg = ('Invalid method use - name:{} size:{} img_id:{} '
996+ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
997+ img_id, src_vol_id,
998+ snap_id))
999+ amulet.raise_status(amulet.FAIL, msg=msg)
1000+
1001+ # Create new volume
1002+ try:
1003+ vol_new = cinder.volumes.create(display_name=vol_name,
1004+ imageRef=img_id,
1005+ size=vol_size,
1006+ source_volid=src_vol_id,
1007+ snapshot_id=snap_id)
1008+ vol_id = vol_new.id
1009+ except Exception as e:
1010+ msg = 'Failed to create volume: {}'.format(e)
1011+ amulet.raise_status(amulet.FAIL, msg=msg)
1012+
1013+ # Wait for volume to reach available status
1014+ ret = self.resource_reaches_status(cinder.volumes, vol_id,
1015+ expected_stat="available",
1016+ msg="Volume status wait")
1017+ if not ret:
1018+ msg = 'Cinder volume failed to reach expected state.'
1019+ amulet.raise_status(amulet.FAIL, msg=msg)
1020+
1021+ # Re-validate new volume
1022+ self.log.debug('Validating volume attributes...')
1023+ val_vol_name = cinder.volumes.get(vol_id).display_name
1024+ val_vol_boot = cinder.volumes.get(vol_id).bootable
1025+ val_vol_stat = cinder.volumes.get(vol_id).status
1026+ val_vol_size = cinder.volumes.get(vol_id).size
1027+ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
1028+ '{} size:{}'.format(val_vol_name, vol_id,
1029+ val_vol_stat, val_vol_boot,
1030+ val_vol_size))
1031+
1032+ if val_vol_boot == bootable and val_vol_stat == 'available' \
1033+ and val_vol_name == vol_name and val_vol_size == vol_size:
1034+ self.log.debug(msg_attr)
1035+ else:
1036+ msg = ('Volume validation failed, {}'.format(msg_attr))
1037+ amulet.raise_status(amulet.FAIL, msg=msg)
1038+
1039+ return vol_new
1040+
1041+ def delete_resource(self, resource, resource_id,
1042+ msg="resource", max_wait=120):
1043+ """Delete one openstack resource, such as one instance, keypair,
1044+ image, volume, stack, etc., and confirm deletion within max wait time.
1045+
1046+ :param resource: pointer to os resource type, ex:glance_client.images
1047+ :param resource_id: unique name or id for the openstack resource
1048+ :param msg: text to identify purpose in logging
1049+ :param max_wait: maximum wait time in seconds
1050+ :returns: True if successful, otherwise False
1051+ """
1052+ self.log.debug('Deleting OpenStack resource '
1053+ '{} ({})'.format(resource_id, msg))
1054+ num_before = len(list(resource.list()))
1055+ resource.delete(resource_id)
1056+
1057+ tries = 0
1058+ num_after = len(list(resource.list()))
1059+ while num_after != (num_before - 1) and tries < (max_wait / 4):
1060+ self.log.debug('{} delete check: '
1061+ '{} [{}:{}] {}'.format(msg, tries,
1062+ num_before,
1063+ num_after,
1064+ resource_id))
1065+ time.sleep(4)
1066+ num_after = len(list(resource.list()))
1067+ tries += 1
1068+
1069+ self.log.debug('{}: expected, actual count = {}, '
1070+ '{}'.format(msg, num_before - 1, num_after))
1071+
1072+ if num_after == (num_before - 1):
1073+ return True
1074+ else:
1075+ self.log.error('{} delete timed out'.format(msg))
1076+ return False
1077+
1078+ def resource_reaches_status(self, resource, resource_id,
1079+ expected_stat='available',
1080+ msg='resource', max_wait=120):
1081+ """Wait for an openstack resources status to reach an
1082+ expected status within a specified time. Useful to confirm that
1083+ nova instances, cinder vols, snapshots, glance images, heat stacks
1084+ and other resources eventually reach the expected status.
1085+
1086+ :param resource: pointer to os resource type, ex: heat_client.stacks
1087+ :param resource_id: unique id for the openstack resource
1088+ :param expected_stat: status to expect resource to reach
1089+ :param msg: text to identify purpose in logging
1090+ :param max_wait: maximum wait time in seconds
1091+ :returns: True if successful, False if status is not reached
1092+ """
1093+
1094+ tries = 0
1095+ resource_stat = resource.get(resource_id).status
1096+ while resource_stat != expected_stat and tries < (max_wait / 4):
1097+ self.log.debug('{} status check: '
1098+ '{} [{}:{}] {}'.format(msg, tries,
1099+ resource_stat,
1100+ expected_stat,
1101+ resource_id))
1102+ time.sleep(4)
1103+ resource_stat = resource.get(resource_id).status
1104+ tries += 1
1105+
1106+ self.log.debug('{}: expected, actual status = {}, '
1107+ '{}'.format(msg, resource_stat, expected_stat))
1108+
1109+ if resource_stat == expected_stat:
1110+ return True
1111+ else:
1112+ self.log.debug('{} never reached expected status: '
1113+ '{}'.format(resource_id, expected_stat))
1114+ return False
1115+
1116+ def get_ceph_osd_id_cmd(self, index):
1117+ """Produce a shell command that will return a ceph-osd id."""
1118+ return ("`initctl list | grep 'ceph-osd ' | "
1119+ "awk 'NR=={} {{ print $2 }}' | "
1120+ "grep -o '[0-9]*'`".format(index + 1))
1121+
1122+ def get_ceph_pools(self, sentry_unit):
1123+ """Return a dict of ceph pools from a single ceph unit, with
1124+ pool name as keys, pool id as vals."""
1125+ pools = {}
1126+ cmd = 'sudo ceph osd lspools'
1127+ output, code = sentry_unit.run(cmd)
1128+ if code != 0:
1129+ msg = ('{} `{}` returned {} '
1130+ '{}'.format(sentry_unit.info['unit_name'],
1131+ cmd, code, output))
1132+ amulet.raise_status(amulet.FAIL, msg=msg)
1133+
1134+ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
1135+ for pool in str(output).split(','):
1136+ pool_id_name = pool.split(' ')
1137+ if len(pool_id_name) == 2:
1138+ pool_id = pool_id_name[0]
1139+ pool_name = pool_id_name[1]
1140+ pools[pool_name] = int(pool_id)
1141+
1142+ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
1143+ pools))
1144+ return pools
1145+
1146+ def get_ceph_df(self, sentry_unit):
1147+ """Return dict of ceph df json output, including ceph pool state.
1148+
1149+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
1150+ :returns: Dict of ceph df output
1151+ """
1152+ cmd = 'sudo ceph df --format=json'
1153+ output, code = sentry_unit.run(cmd)
1154+ if code != 0:
1155+ msg = ('{} `{}` returned {} '
1156+ '{}'.format(sentry_unit.info['unit_name'],
1157+ cmd, code, output))
1158+ amulet.raise_status(amulet.FAIL, msg=msg)
1159+ return json.loads(output)
1160+
1161+ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
1162+ """Take a sample of attributes of a ceph pool, returning ceph
1163+ pool name, object count and disk space used for the specified
1164+ pool ID number.
1165+
1166+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
1167+ :param pool_id: Ceph pool ID
1168+ :returns: List of pool name, object count, kb disk space used
1169+ """
1170+ df = self.get_ceph_df(sentry_unit)
1171+ pool_name = df['pools'][pool_id]['name']
1172+ obj_count = df['pools'][pool_id]['stats']['objects']
1173+ kb_used = df['pools'][pool_id]['stats']['kb_used']
1174+ self.log.debug('Ceph {} pool (ID {}): {} objects, '
1175+ '{} kb used'.format(pool_name, pool_id,
1176+ obj_count, kb_used))
1177+ return pool_name, obj_count, kb_used
1178+
1179+ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
1180+ """Validate ceph pool samples taken over time, such as pool
1181+ object counts or pool kb used, before adding, after adding, and
1182+ after deleting items which affect those pool attributes. The
1183+ 2nd element is expected to be greater than the 1st; 3rd is expected
1184+ to be less than the 2nd.
1185+
1186+ :param samples: List containing 3 data samples
1187+ :param sample_type: String for logging and usage context
1188+ :returns: None if successful, Failure message otherwise
1189+ """
1190+ original, created, deleted = range(3)
1191+ if samples[created] <= samples[original] or \
1192+ samples[deleted] >= samples[created]:
1193+ return ('Ceph {} samples ({}) '
1194+ 'unexpected.'.format(sample_type, samples))
1195+ else:
1196+ self.log.debug('Ceph {} samples (OK): '
1197+ '{}'.format(sample_type, samples))
1198+ return None
1199
1200=== modified file 'charmhelpers/contrib/openstack/context.py'
1201--- charmhelpers/contrib/openstack/context.py 2015-04-16 19:19:18 +0000
1202+++ charmhelpers/contrib/openstack/context.py 2015-08-13 08:33:21 +0000
1203@@ -122,21 +122,24 @@
1204 of specifying multiple key value pairs within the same string. For
1205 example, a string in the format of 'key1=value1, key2=value2' will
1206 return a dict of:
1207- {'key1': 'value1',
1208- 'key2': 'value2'}.
1209+
1210+ {'key1': 'value1',
1211+ 'key2': 'value2'}.
1212
1213 2. A string in the above format, but supporting a comma-delimited list
1214 of values for the same key. For example, a string in the format of
1215 'key1=value1, key2=value3,value4,value5' will return a dict of:
1216- {'key1', 'value1',
1217- 'key2', 'value2,value3,value4'}
1218+
1219+ {'key1', 'value1',
1220+ 'key2', 'value2,value3,value4'}
1221
1222 3. A string containing a colon character (:) prior to an equal
1223 character (=) will be treated as yaml and parsed as such. This can be
1224 used to specify more complex key value pairs. For example,
1225 a string in the format of 'key1: subkey1=value1, subkey2=value2' will
1226 return a dict of:
1227- {'key1', 'subkey1=value1, subkey2=value2'}
1228+
1229+ {'key1', 'subkey1=value1, subkey2=value2'}
1230
1231 The provided config_flags string may be a list of comma-separated values
1232 which themselves may be comma-separated list of values.
1233@@ -240,7 +243,7 @@
1234 if self.relation_prefix:
1235 password_setting = self.relation_prefix + '_password'
1236
1237- for rid in relation_ids('shared-db'):
1238+ for rid in relation_ids(self.interfaces[0]):
1239 for unit in related_units(rid):
1240 rdata = relation_get(rid=rid, unit=unit)
1241 host = rdata.get('db_host')
1242@@ -891,8 +894,6 @@
1243 return ctxt
1244
1245 def __call__(self):
1246- self._ensure_packages()
1247-
1248 if self.network_manager not in ['quantum', 'neutron']:
1249 return {}
1250
1251@@ -1050,13 +1051,22 @@
1252 :param config_file : Service's config file to query sections
1253 :param interface : Subordinate interface to inspect
1254 """
1255- self.service = service
1256 self.config_file = config_file
1257- self.interface = interface
1258+ if isinstance(service, list):
1259+ self.services = service
1260+ else:
1261+ self.services = [service]
1262+ if isinstance(interface, list):
1263+ self.interfaces = interface
1264+ else:
1265+ self.interfaces = [interface]
1266
1267 def __call__(self):
1268 ctxt = {'sections': {}}
1269- for rid in relation_ids(self.interface):
1270+ rids = []
1271+ for interface in self.interfaces:
1272+ rids.extend(relation_ids(interface))
1273+ for rid in rids:
1274 for unit in related_units(rid):
1275 sub_config = relation_get('subordinate_configuration',
1276 rid=rid, unit=unit)
1277@@ -1068,29 +1078,32 @@
1278 'setting from %s' % rid, level=ERROR)
1279 continue
1280
1281- if self.service not in sub_config:
1282- log('Found subordinate_config on %s but it contained'
1283- 'nothing for %s service' % (rid, self.service),
1284- level=INFO)
1285- continue
1286-
1287- sub_config = sub_config[self.service]
1288- if self.config_file not in sub_config:
1289- log('Found subordinate_config on %s but it contained'
1290- 'nothing for %s' % (rid, self.config_file),
1291- level=INFO)
1292- continue
1293-
1294- sub_config = sub_config[self.config_file]
1295- for k, v in six.iteritems(sub_config):
1296- if k == 'sections':
1297- for section, config_dict in six.iteritems(v):
1298- log("adding section '%s'" % (section),
1299- level=DEBUG)
1300- ctxt[k][section] = config_dict
1301- else:
1302- ctxt[k] = v
1303-
1304+ for service in self.services:
1305+ if service not in sub_config:
1306+ log('Found subordinate_config on %s but it contained'
1307+ 'nothing for %s service' % (rid, service),
1308+ level=INFO)
1309+ continue
1310+
1311+ sub_config = sub_config[service]
1312+ if self.config_file not in sub_config:
1313+ log('Found subordinate_config on %s but it contained'
1314+ 'nothing for %s' % (rid, self.config_file),
1315+ level=INFO)
1316+ continue
1317+
1318+ sub_config = sub_config[self.config_file]
1319+ for k, v in six.iteritems(sub_config):
1320+ if k == 'sections':
1321+ for section, config_list in six.iteritems(v):
1322+ log("adding section '%s'" % (section),
1323+ level=DEBUG)
1324+ if ctxt[k].get(section):
1325+ ctxt[k][section].extend(config_list)
1326+ else:
1327+ ctxt[k][section] = config_list
1328+ else:
1329+ ctxt[k] = v
1330 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1331 return ctxt
1332
1333
1334=== modified file 'charmhelpers/contrib/openstack/templates/ceph.conf'
1335--- charmhelpers/contrib/openstack/templates/ceph.conf 2014-03-26 10:26:36 +0000
1336+++ charmhelpers/contrib/openstack/templates/ceph.conf 2015-08-13 08:33:21 +0000
1337@@ -5,11 +5,11 @@
1338 ###############################################################################
1339 [global]
1340 {% if auth -%}
1341- auth_supported = {{ auth }}
1342- keyring = /etc/ceph/$cluster.$name.keyring
1343- mon host = {{ mon_hosts }}
1344+auth_supported = {{ auth }}
1345+keyring = /etc/ceph/$cluster.$name.keyring
1346+mon host = {{ mon_hosts }}
1347 {% endif -%}
1348- log to syslog = {{ use_syslog }}
1349- err to syslog = {{ use_syslog }}
1350- clog to syslog = {{ use_syslog }}
1351+log to syslog = {{ use_syslog }}
1352+err to syslog = {{ use_syslog }}
1353+clog to syslog = {{ use_syslog }}
1354
1355
1356=== modified file 'charmhelpers/contrib/openstack/templating.py'
1357--- charmhelpers/contrib/openstack/templating.py 2015-06-11 09:00:37 +0000
1358+++ charmhelpers/contrib/openstack/templating.py 2015-08-13 08:33:21 +0000
1359@@ -29,8 +29,8 @@
1360 try:
1361 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
1362 except ImportError:
1363- # python-jinja2 may not be installed yet, or we're running unittests.
1364- FileSystemLoader = ChoiceLoader = Environment = exceptions = None
1365+ apt_install('python-jinja2', fatal=True)
1366+ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
1367
1368
1369 class OSConfigException(Exception):
1370
1371=== modified file 'charmhelpers/contrib/openstack/utils.py'
1372--- charmhelpers/contrib/openstack/utils.py 2015-06-17 12:22:29 +0000
1373+++ charmhelpers/contrib/openstack/utils.py 2015-08-13 08:33:21 +0000
1374@@ -25,6 +25,7 @@
1375 import os
1376 import sys
1377 import uuid
1378+import re
1379
1380 import six
1381 import yaml
1382@@ -71,7 +72,6 @@
1383 DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
1384 'restricted main multiverse universe')
1385
1386-
1387 UBUNTU_OPENSTACK_RELEASE = OrderedDict([
1388 ('oneiric', 'diablo'),
1389 ('precise', 'essex'),
1390@@ -81,6 +81,7 @@
1391 ('trusty', 'icehouse'),
1392 ('utopic', 'juno'),
1393 ('vivid', 'kilo'),
1394+ ('wily', 'liberty'),
1395 ])
1396
1397
1398@@ -93,6 +94,7 @@
1399 ('2014.1', 'icehouse'),
1400 ('2014.2', 'juno'),
1401 ('2015.1', 'kilo'),
1402+ ('2015.2', 'liberty'),
1403 ])
1404
1405 # The ugly duckling
1406@@ -115,8 +117,37 @@
1407 ('2.2.0', 'juno'),
1408 ('2.2.1', 'kilo'),
1409 ('2.2.2', 'kilo'),
1410+ ('2.3.0', 'liberty'),
1411 ])
1412
1413+# >= Liberty version->codename mapping
1414+PACKAGE_CODENAMES = {
1415+ 'nova-common': OrderedDict([
1416+ ('12.0.0', 'liberty'),
1417+ ]),
1418+ 'neutron-common': OrderedDict([
1419+ ('7.0.0', 'liberty'),
1420+ ]),
1421+ 'cinder-common': OrderedDict([
1422+ ('7.0.0', 'liberty'),
1423+ ]),
1424+ 'keystone': OrderedDict([
1425+ ('8.0.0', 'liberty'),
1426+ ]),
1427+ 'horizon-common': OrderedDict([
1428+ ('8.0.0', 'liberty'),
1429+ ]),
1430+ 'ceilometer-common': OrderedDict([
1431+ ('5.0.0', 'liberty'),
1432+ ]),
1433+ 'heat-common': OrderedDict([
1434+ ('5.0.0', 'liberty'),
1435+ ]),
1436+ 'glance-common': OrderedDict([
1437+ ('11.0.0', 'liberty'),
1438+ ]),
1439+}
1440+
1441 DEFAULT_LOOPBACK_SIZE = '5G'
1442
1443
1444@@ -200,20 +231,29 @@
1445 error_out(e)
1446
1447 vers = apt.upstream_version(pkg.current_ver.ver_str)
1448+ match = re.match('^(\d)\.(\d)\.(\d)', vers)
1449+ if match:
1450+ vers = match.group(0)
1451
1452- try:
1453- if 'swift' in pkg.name:
1454- swift_vers = vers[:5]
1455- if swift_vers not in SWIFT_CODENAMES:
1456- # Deal with 1.10.0 upward
1457- swift_vers = vers[:6]
1458- return SWIFT_CODENAMES[swift_vers]
1459- else:
1460- vers = vers[:6]
1461- return OPENSTACK_CODENAMES[vers]
1462- except KeyError:
1463- e = 'Could not determine OpenStack codename for version %s' % vers
1464- error_out(e)
1465+ # >= Liberty independent project versions
1466+ if (package in PACKAGE_CODENAMES and
1467+ vers in PACKAGE_CODENAMES[package]):
1468+ return PACKAGE_CODENAMES[package][vers]
1469+ else:
1470+ # < Liberty co-ordinated project versions
1471+ try:
1472+ if 'swift' in pkg.name:
1473+ swift_vers = vers[:5]
1474+ if swift_vers not in SWIFT_CODENAMES:
1475+ # Deal with 1.10.0 upward
1476+ swift_vers = vers[:6]
1477+ return SWIFT_CODENAMES[swift_vers]
1478+ else:
1479+ vers = vers[:6]
1480+ return OPENSTACK_CODENAMES[vers]
1481+ except KeyError:
1482+ e = 'Could not determine OpenStack codename for version %s' % vers
1483+ error_out(e)
1484
1485
1486 def get_os_version_package(pkg, fatal=True):
1487@@ -323,6 +363,9 @@
1488 'kilo': 'trusty-updates/kilo',
1489 'kilo/updates': 'trusty-updates/kilo',
1490 'kilo/proposed': 'trusty-proposed/kilo',
1491+ 'liberty': 'trusty-updates/liberty',
1492+ 'liberty/updates': 'trusty-updates/liberty',
1493+ 'liberty/proposed': 'trusty-proposed/liberty',
1494 }
1495
1496 try:
1497@@ -518,6 +561,7 @@
1498 Clone/install all specified OpenStack repositories.
1499
1500 The expected format of projects_yaml is:
1501+
1502 repositories:
1503 - {name: keystone,
1504 repository: 'git://git.openstack.org/openstack/keystone.git',
1505@@ -525,11 +569,13 @@
1506 - {name: requirements,
1507 repository: 'git://git.openstack.org/openstack/requirements.git',
1508 branch: 'stable/icehouse'}
1509+
1510 directory: /mnt/openstack-git
1511 http_proxy: squid-proxy-url
1512 https_proxy: squid-proxy-url
1513
1514- The directory, http_proxy, and https_proxy keys are optional.
1515+ The directory, http_proxy, and https_proxy keys are optional.
1516+
1517 """
1518 global requirements_dir
1519 parent_dir = '/mnt/openstack-git'
1520@@ -551,6 +597,12 @@
1521
1522 pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
1523
1524+ # Upgrade setuptools and pip from default virtualenv versions. The default
1525+ # versions in trusty break master OpenStack branch deployments.
1526+ for p in ['pip', 'setuptools']:
1527+ pip_install(p, upgrade=True, proxy=http_proxy,
1528+ venv=os.path.join(parent_dir, 'venv'))
1529+
1530 for p in projects['repositories']:
1531 repo = p['repository']
1532 branch = p['branch']
1533@@ -612,24 +664,24 @@
1534 else:
1535 repo_dir = dest_dir
1536
1537+ venv = os.path.join(parent_dir, 'venv')
1538+
1539 if update_requirements:
1540 if not requirements_dir:
1541 error_out('requirements repo must be cloned before '
1542 'updating from global requirements.')
1543- _git_update_requirements(repo_dir, requirements_dir)
1544+ _git_update_requirements(venv, repo_dir, requirements_dir)
1545
1546 juju_log('Installing git repo from dir: {}'.format(repo_dir))
1547 if http_proxy:
1548- pip_install(repo_dir, proxy=http_proxy,
1549- venv=os.path.join(parent_dir, 'venv'))
1550+ pip_install(repo_dir, proxy=http_proxy, venv=venv)
1551 else:
1552- pip_install(repo_dir,
1553- venv=os.path.join(parent_dir, 'venv'))
1554+ pip_install(repo_dir, venv=venv)
1555
1556 return repo_dir
1557
1558
1559-def _git_update_requirements(package_dir, reqs_dir):
1560+def _git_update_requirements(venv, package_dir, reqs_dir):
1561 """
1562 Update from global requirements.
1563
1564@@ -638,12 +690,14 @@
1565 """
1566 orig_dir = os.getcwd()
1567 os.chdir(reqs_dir)
1568- cmd = ['python', 'update.py', package_dir]
1569+ python = os.path.join(venv, 'bin/python')
1570+ cmd = [python, 'update.py', package_dir]
1571 try:
1572 subprocess.check_call(cmd)
1573 except subprocess.CalledProcessError:
1574 package = os.path.basename(package_dir)
1575- error_out("Error updating {} from global-requirements.txt".format(package))
1576+ error_out("Error updating {} from "
1577+ "global-requirements.txt".format(package))
1578 os.chdir(orig_dir)
1579
1580
1581
1582=== modified file 'charmhelpers/contrib/peerstorage/__init__.py'
1583--- charmhelpers/contrib/peerstorage/__init__.py 2015-06-03 13:09:25 +0000
1584+++ charmhelpers/contrib/peerstorage/__init__.py 2015-08-13 08:33:21 +0000
1585@@ -59,7 +59,7 @@
1586 """
1587
1588
1589-def leader_get(attribute=None):
1590+def leader_get(attribute=None, rid=None):
1591 """Wrapper to ensure that settings are migrated from the peer relation.
1592
1593 This is to support upgrading an environment that does not support
1594@@ -94,7 +94,8 @@
1595 # If attribute not present in leader db, check if this unit has set
1596 # the attribute in the peer relation
1597 if not leader_settings:
1598- peer_setting = relation_get(attribute=attribute, unit=local_unit())
1599+ peer_setting = _relation_get(attribute=attribute, unit=local_unit(),
1600+ rid=rid)
1601 if peer_setting:
1602 leader_set(settings={attribute: peer_setting})
1603 leader_settings = peer_setting
1604@@ -103,7 +104,7 @@
1605 settings_migrated = True
1606 migrated.add(attribute)
1607 else:
1608- r_settings = relation_get(unit=local_unit())
1609+ r_settings = _relation_get(unit=local_unit(), rid=rid)
1610 if r_settings:
1611 for key in set(r_settings.keys()).difference(migrated):
1612 # Leader setting wins
1613@@ -151,7 +152,7 @@
1614 """
1615 try:
1616 if rid in relation_ids('cluster'):
1617- return leader_get(attribute)
1618+ return leader_get(attribute, rid)
1619 else:
1620 raise NotImplementedError
1621 except NotImplementedError:
1622
1623=== modified file 'charmhelpers/contrib/python/packages.py'
1624--- charmhelpers/contrib/python/packages.py 2015-05-07 18:12:54 +0000
1625+++ charmhelpers/contrib/python/packages.py 2015-08-13 08:33:21 +0000
1626@@ -36,6 +36,8 @@
1627 def parse_options(given, available):
1628 """Given a set of options, check if available"""
1629 for key, value in sorted(given.items()):
1630+ if not value:
1631+ continue
1632 if key in available:
1633 yield "--{0}={1}".format(key, value)
1634
1635
1636=== modified file 'charmhelpers/contrib/storage/linux/ceph.py'
1637--- charmhelpers/contrib/storage/linux/ceph.py 2015-01-22 06:11:15 +0000
1638+++ charmhelpers/contrib/storage/linux/ceph.py 2015-08-13 08:33:21 +0000
1639@@ -60,12 +60,12 @@
1640 KEYFILE = '/etc/ceph/ceph.client.{}.key'
1641
1642 CEPH_CONF = """[global]
1643- auth supported = {auth}
1644- keyring = {keyring}
1645- mon host = {mon_hosts}
1646- log to syslog = {use_syslog}
1647- err to syslog = {use_syslog}
1648- clog to syslog = {use_syslog}
1649+auth supported = {auth}
1650+keyring = {keyring}
1651+mon host = {mon_hosts}
1652+log to syslog = {use_syslog}
1653+err to syslog = {use_syslog}
1654+clog to syslog = {use_syslog}
1655 """
1656
1657
1658
1659=== modified file 'charmhelpers/contrib/storage/linux/utils.py'
1660--- charmhelpers/contrib/storage/linux/utils.py 2015-01-22 06:06:03 +0000
1661+++ charmhelpers/contrib/storage/linux/utils.py 2015-08-13 08:33:21 +0000
1662@@ -43,9 +43,10 @@
1663
1664 :param block_device: str: Full path of block device to clean.
1665 '''
1666+ # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
1667 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
1668- call(['sgdisk', '--zap-all', '--mbrtogpt',
1669- '--clear', block_device])
1670+ call(['sgdisk', '--zap-all', '--', block_device])
1671+ call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
1672 dev_end = check_output(['blockdev', '--getsz',
1673 block_device]).decode('UTF-8')
1674 gpt_end = int(dev_end.split()[0]) - 100
1675@@ -67,4 +68,4 @@
1676 out = check_output(['mount']).decode('UTF-8')
1677 if is_partition:
1678 return bool(re.search(device + r"\b", out))
1679- return bool(re.search(device + r"[0-9]+\b", out))
1680+ return bool(re.search(device + r"[0-9]*\b", out))
1681
1682=== modified file 'charmhelpers/contrib/unison/__init__.py'
1683--- charmhelpers/contrib/unison/__init__.py 2015-04-03 15:23:46 +0000
1684+++ charmhelpers/contrib/unison/__init__.py 2015-08-13 08:33:21 +0000
1685@@ -16,7 +16,7 @@
1686
1687 # Easy file synchronization among peer units using ssh + unison.
1688 #
1689-# From *both* peer relation -joined and -changed, add a call to
1690+# For the -joined, -changed, and -departed peer relations, add a call to
1691 # ssh_authorized_peers() describing the peer relation and the desired
1692 # user + group. After all peer relations have settled, all hosts should
1693 # be able to connect to on another via key auth'd ssh as the specified user.
1694@@ -30,14 +30,21 @@
1695 # ...
1696 # ssh_authorized_peers(peer_interface='cluster',
1697 # user='juju_ssh', group='juju_ssh',
1698-# ensure_user=True)
1699+# ensure_local_user=True)
1700 # ...
1701 #
1702 # cluster-relation-changed:
1703 # ...
1704 # ssh_authorized_peers(peer_interface='cluster',
1705 # user='juju_ssh', group='juju_ssh',
1706-# ensure_user=True)
1707+# ensure_local_user=True)
1708+# ...
1709+#
1710+# cluster-relation-departed:
1711+# ...
1712+# ssh_authorized_peers(peer_interface='cluster',
1713+# user='juju_ssh', group='juju_ssh',
1714+# ensure_local_user=True)
1715 # ...
1716 #
1717 # Hooks are now free to sync files as easily as:
1718@@ -92,11 +99,18 @@
1719 raise Exception
1720
1721
1722-def create_private_key(user, priv_key_path):
1723+def create_private_key(user, priv_key_path, key_type='rsa'):
1724+ types_bits = {
1725+ 'rsa': '2048',
1726+ 'ecdsa': '521',
1727+ }
1728+ if key_type not in types_bits:
1729+ log('Unknown ssh key type {}, using rsa'.format(key_type), ERROR)
1730+ key_type = 'rsa'
1731 if not os.path.isfile(priv_key_path):
1732 log('Generating new SSH key for user %s.' % user)
1733- cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
1734- '-f', priv_key_path]
1735+ cmd = ['ssh-keygen', '-q', '-N', '', '-t', key_type,
1736+ '-b', types_bits[key_type], '-f', priv_key_path]
1737 check_call(cmd)
1738 else:
1739 log('SSH key already exists at %s.' % priv_key_path)
1740@@ -152,7 +166,7 @@
1741 known_hosts = os.path.join(ssh_dir, 'known_hosts')
1742 khosts = []
1743 for host in hosts:
1744- cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
1745+ cmd = ['ssh-keyscan', host]
1746 remote_key = check_output(cmd, universal_newlines=True).strip()
1747 khosts.append(remote_key)
1748 log('Syncing known_hosts @ %s.' % known_hosts)
1749@@ -179,7 +193,8 @@
1750 hook = hook_name()
1751 if hook == '%s-relation-joined' % peer_interface:
1752 relation_set(ssh_pub_key=pub_key)
1753- elif hook == '%s-relation-changed' % peer_interface:
1754+ elif hook == '%s-relation-changed' % peer_interface or \
1755+ hook == '%s-relation-departed' % peer_interface:
1756 hosts = []
1757 keys = []
1758
1759
1760=== added file 'charmhelpers/coordinator.py'
1761--- charmhelpers/coordinator.py 1970-01-01 00:00:00 +0000
1762+++ charmhelpers/coordinator.py 2015-08-13 08:33:21 +0000
1763@@ -0,0 +1,607 @@
1764+# Copyright 2014-2015 Canonical Limited.
1765+#
1766+# This file is part of charm-helpers.
1767+#
1768+# charm-helpers is free software: you can redistribute it and/or modify
1769+# it under the terms of the GNU Lesser General Public License version 3 as
1770+# published by the Free Software Foundation.
1771+#
1772+# charm-helpers is distributed in the hope that it will be useful,
1773+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1774+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1775+# GNU Lesser General Public License for more details.
1776+#
1777+# You should have received a copy of the GNU Lesser General Public License
1778+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1779+'''
1780+The coordinator module allows you to use Juju's leadership feature to
1781+coordinate operations between units of a service.
1782+
1783+Behavior is defined in subclasses of coordinator.BaseCoordinator.
1784+One implementation is provided (coordinator.Serial), which allows an
1785+operation to be run on a single unit at a time, on a first come, first
1786+served basis. You can trivially define more complex behavior by
1787+subclassing BaseCoordinator or Serial.
1788+
1789+:author: Stuart Bishop <stuart.bishop@canonical.com>
1790+
1791+
1792+Services Framework Usage
1793+========================
1794+
1795+Ensure a peer relation is defined in metadata.yaml. Instantiate a
1796+BaseCoordinator subclass before invoking ServiceManager.manage().
1797+Ensure that ServiceManager.manage() is wired up to the leader-elected,
1798+leader-settings-changed, peer relation-changed and peer
1799+relation-departed hooks in addition to any other hooks you need, or your
1800+service will deadlock.
1801+
1802+Ensure calls to acquire() are guarded, so that locks are only requested
1803+when they are really needed (and thus hooks only triggered when necessary).
1804+Failing to do this and calling acquire() unconditionally will put your unit
1805+into a hook loop. Calls to granted() do not need to be guarded.
1806+
1807+For example::
1808+
1809+ from charmhelpers.core import hookenv, services
1810+ from charmhelpers import coordinator
1811+
1812+ def maybe_restart(servicename):
1813+ serial = coordinator.Serial()
1814+ if needs_restart():
1815+ serial.acquire('restart')
1816+ if serial.granted('restart'):
1817+ hookenv.service_restart(servicename)
1818+
1819+ services = [dict(service='servicename',
1820+ data_ready=[maybe_restart])]
1821+
1822+ if __name__ == '__main__':
1823+ _ = coordinator.Serial() # Must instantiate before manager.manage()
1824+ manager = services.ServiceManager(services)
1825+ manager.manage()
1826+
1827+
1828+You can implement a similar pattern using a decorator. If the lock has
1829+not been granted, an attempt to acquire() it will be made if the guard
1830+function returns True. If the lock has been granted, the decorated function
1831+is run as normal::
1832+
1833+ from charmhelpers.core import hookenv, services
1834+ from charmhelpers import coordinator
1835+
1836+ serial = coordinator.Serial() # Global, instatiated on module import.
1837+
1838+ def needs_restart():
1839+ [ ... Introspect state. Return True if restart is needed ... ]
1840+
1841+ @serial.require('restart', needs_restart)
1842+ def maybe_restart(servicename):
1843+ hookenv.service_restart(servicename)
1844+
1845+ services = [dict(service='servicename',
1846+ data_ready=[maybe_restart])]
1847+
1848+ if __name__ == '__main__':
1849+ manager = services.ServiceManager(services)
1850+ manager.manage()
1851+
1852+
1853+Traditional Usage
1854+=================
1855+
1856+Ensure a peer relationis defined in metadata.yaml.
1857+
1858+If you are using charmhelpers.core.hookenv.Hooks, ensure that a
1859+BaseCoordinator subclass is instantiated before calling Hooks.execute.
1860+
1861+If you are not using charmhelpers.core.hookenv.Hooks, ensure
1862+that a BaseCoordinator subclass is instantiated and its handle()
1863+method called at the start of all your hooks.
1864+
1865+For example::
1866+
1867+ import sys
1868+ from charmhelpers.core import hookenv
1869+ from charmhelpers import coordinator
1870+
1871+ hooks = hookenv.Hooks()
1872+
1873+ def maybe_restart():
1874+ serial = coordinator.Serial()
1875+ if serial.granted('restart'):
1876+ hookenv.service_restart('myservice')
1877+
1878+ @hooks.hook
1879+ def config_changed():
1880+ update_config()
1881+ serial = coordinator.Serial()
1882+ if needs_restart():
1883+ serial.acquire('restart'):
1884+ maybe_restart()
1885+
1886+ # Cluster hooks must be wired up.
1887+ @hooks.hook('cluster-relation-changed', 'cluster-relation-departed')
1888+ def cluster_relation_changed():
1889+ maybe_restart()
1890+
1891+ # Leader hooks must be wired up.
1892+ @hooks.hook('leader-elected', 'leader-settings-changed')
1893+ def leader_settings_changed():
1894+ maybe_restart()
1895+
1896+ [ ... repeat for *all* other hooks you are using ... ]
1897+
1898+ if __name__ == '__main__':
1899+ _ = coordinator.Serial() # Must instantiate before execute()
1900+ hooks.execute(sys.argv)
1901+
1902+
1903+You can also use the require decorator. If the lock has not been granted,
1904+an attempt to acquire() it will be made if the guard function returns True.
1905+If the lock has been granted, the decorated function is run as normal::
1906+
1907+ from charmhelpers.core import hookenv
1908+
1909+ hooks = hookenv.Hooks()
1910+ serial = coordinator.Serial() # Must instantiate before execute()
1911+
1912+ @require('restart', needs_restart)
1913+ def maybe_restart():
1914+ hookenv.service_restart('myservice')
1915+
1916+ @hooks.hook('install', 'config-changed', 'upgrade-charm',
1917+ # Peer and leader hooks must be wired up.
1918+ 'cluster-relation-changed', 'cluster-relation-departed',
1919+ 'leader-elected', 'leader-settings-changed')
1920+ def default_hook():
1921+ [...]
1922+ maybe_restart()
1923+
1924+ if __name__ == '__main__':
1925+ hooks.execute()
1926+
1927+
1928+Details
1929+=======
1930+
1931+A simple API is provided similar to traditional locking APIs. A lock
1932+may be requested using the acquire() method, and the granted() method
1933+may be used do to check if a lock previously requested by acquire() has
1934+been granted. It doesn't matter how many times acquire() is called in a
1935+hook.
1936+
1937+Locks are released at the end of the hook they are acquired in. This may
1938+be the current hook if the unit is leader and the lock is free. It is
1939+more likely a future hook (probably leader-settings-changed, possibly
1940+the peer relation-changed or departed hook, potentially any hook).
1941+
1942+Whenever a charm needs to perform a coordinated action it will acquire()
1943+the lock and perform the action immediately if acquisition is
1944+successful. It will also need to perform the same action in every other
1945+hook if the lock has been granted.
1946+
1947+
1948+Grubby Details
1949+--------------
1950+
1951+Why do you need to be able to perform the same action in every hook?
1952+If the unit is the leader, then it may be able to grant its own lock
1953+and perform the action immediately in the source hook. If the unit is
1954+the leader and cannot immediately grant the lock, then its only
1955+guaranteed chance of acquiring the lock is in the peer relation-joined,
1956+relation-changed or peer relation-departed hooks when another unit has
1957+released it (the only channel to communicate to the leader is the peer
1958+relation). If the unit is not the leader, then it is unlikely the lock
1959+is granted in the source hook (a previous hook must have also made the
1960+request for this to happen). A non-leader is notified about the lock via
1961+leader settings. These changes may be visible in any hook, even before
1962+the leader-settings-changed hook has been invoked. Or the requesting
1963+unit may be promoted to leader after making a request, in which case the
1964+lock may be granted in leader-elected or in a future peer
1965+relation-changed or relation-departed hook.
1966+
1967+This could be simpler if leader-settings-changed was invoked on the
1968+leader. We could then never grant locks except in
1969+leader-settings-changed hooks giving one place for the operation to be
1970+performed. Unfortunately this is not the case with Juju 1.23 leadership.
1971+
1972+But of course, this doesn't really matter to most people as most people
1973+seem to prefer the Services Framework or similar reset-the-world
1974+approaches, rather than the twisty maze of attempting to deduce what
1975+should be done based on what hook happens to be running (which always
1976+seems to evolve into reset-the-world anyway when the charm grows beyond
1977+the trivial).
1978+
1979+I chose not to implement a callback model, where a callback was passed
1980+to acquire to be executed when the lock is granted, because the callback
1981+may become invalid between making the request and the lock being granted
1982+due to an upgrade-charm being run in the interim. And it would create
1983+restrictions, such no lambdas, callback defined at the top level of a
1984+module, etc. Still, we could implement it on top of what is here, eg.
1985+by adding a defer decorator that stores a pickle of itself to disk and
1986+have BaseCoordinator unpickle and execute them when the locks are granted.
1987+'''
1988+from datetime import datetime
1989+from functools import wraps
1990+import json
1991+import os.path
1992+
1993+from six import with_metaclass
1994+
1995+from charmhelpers.core import hookenv
1996+
1997+
1998+# We make BaseCoordinator and subclasses singletons, so that if we
1999+# need to spill to local storage then only a single instance does so,
2000+# rather than having multiple instances stomp over each other.
2001+class Singleton(type):
2002+ _instances = {}
2003+
2004+ def __call__(cls, *args, **kwargs):
2005+ if cls not in cls._instances:
2006+ cls._instances[cls] = super(Singleton, cls).__call__(*args,
2007+ **kwargs)
2008+ return cls._instances[cls]
2009+
2010+
2011+class BaseCoordinator(with_metaclass(Singleton, object)):
2012+ relid = None # Peer relation-id, set by __init__
2013+ relname = None
2014+
2015+ grants = None # self.grants[unit][lock] == timestamp
2016+ requests = None # self.requests[unit][lock] == timestamp
2017+
2018+ def __init__(self, relation_key='coordinator', peer_relation_name=None):
2019+ '''Instatiate a Coordinator.
2020+
2021+ Data is stored on the peer relation and in leadership storage
2022+ under the provided relation_key.
2023+
2024+ The peer relation is identified by peer_relation_name, and defaults
2025+ to the first one found in metadata.yaml.
2026+ '''
2027+ # Most initialization is deferred, since invoking hook tools from
2028+ # the constructor makes testing hard.
2029+ self.key = relation_key
2030+ self.relname = peer_relation_name
2031+ hookenv.atstart(self.initialize)
2032+
2033+ # Ensure that handle() is called, without placing that burden on
2034+ # the charm author. They still need to do this manually if they
2035+ # are not using a hook framework.
2036+ hookenv.atstart(self.handle)
2037+
2038+ def initialize(self):
2039+ if self.requests is not None:
2040+ return # Already initialized.
2041+
2042+ assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+'
2043+
2044+ if self.relname is None:
2045+ self.relname = _implicit_peer_relation_name()
2046+
2047+ relids = hookenv.relation_ids(self.relname)
2048+ if relids:
2049+ self.relid = sorted(relids)[0]
2050+
2051+ # Load our state, from leadership, the peer relationship, and maybe
2052+ # local state as a fallback. Populates self.requests and self.grants.
2053+ self._load_state()
2054+ self._emit_state()
2055+
2056+ # Save our state if the hook completes successfully.
2057+ hookenv.atexit(self._save_state)
2058+
2059+ # Schedule release of granted locks for the end of the hook.
2060+ # This needs to be the last of our atexit callbacks to ensure
2061+ # it will be run first when the hook is complete, because there
2062+ # is no point mutating our state after it has been saved.
2063+ hookenv.atexit(self._release_granted)
2064+
2065+ def acquire(self, lock):
2066+ '''Acquire the named lock, non-blocking.
2067+
2068+ The lock may be granted immediately, or in a future hook.
2069+
2070+ Returns True if the lock has been granted. The lock will be
2071+ automatically released at the end of the hook in which it is
2072+ granted.
2073+
2074+ Do not mindlessly call this method, as it triggers a cascade of
2075+ hooks. For example, if you call acquire() every time in your
2076+ peer relation-changed hook you will end up with an infinite loop
2077+ of hooks. It should almost always be guarded by some condition.
2078+ '''
2079+ unit = hookenv.local_unit()
2080+ ts = self.requests[unit].get(lock)
2081+ if not ts:
2082+ # If there is no outstanding request on the peer relation,
2083+ # create one.
2084+ self.requests.setdefault(lock, {})
2085+ self.requests[unit][lock] = _timestamp()
2086+ self.msg('Requested {}'.format(lock))
2087+
2088+ # If the leader has granted the lock, yay.
2089+ if self.granted(lock):
2090+ self.msg('Acquired {}'.format(lock))
2091+ return True
2092+
2093+ # If the unit making the request also happens to be the
2094+ # leader, it must handle the request now. Even though the
2095+ # request has been stored on the peer relation, the peer
2096+ # relation-changed hook will not be triggered.
2097+ if hookenv.is_leader():
2098+ return self.grant(lock, unit)
2099+
2100+ return False # Can't acquire lock, yet. Maybe next hook.
2101+
2102+ def granted(self, lock):
2103+ '''Return True if a previously requested lock has been granted'''
2104+ unit = hookenv.local_unit()
2105+ ts = self.requests[unit].get(lock)
2106+ if ts and self.grants.get(unit, {}).get(lock) == ts:
2107+ return True
2108+ return False
2109+
2110+ def requested(self, lock):
2111+ '''Return True if we are in the queue for the lock'''
2112+ return lock in self.requests[hookenv.local_unit()]
2113+
2114+ def request_timestamp(self, lock):
2115+ '''Return the timestamp of our outstanding request for lock, or None.
2116+
2117+ Returns a datetime.datetime() UTC timestamp, with no tzinfo attribute.
2118+ '''
2119+ ts = self.requests[hookenv.local_unit()].get(lock, None)
2120+ if ts is not None:
2121+ return datetime.strptime(ts, _timestamp_format)
2122+
2123+ def handle(self):
2124+ if not hookenv.is_leader():
2125+ return # Only the leader can grant requests.
2126+
2127+ self.msg('Leader handling coordinator requests')
2128+
2129+ # Clear our grants that have been released.
2130+ for unit in self.grants.keys():
2131+ for lock, grant_ts in list(self.grants[unit].items()):
2132+ req_ts = self.requests.get(unit, {}).get(lock)
2133+ if req_ts != grant_ts:
2134+ # The request timestamp does not match the granted
2135+ # timestamp. Several hooks on 'unit' may have run
2136+ # before the leader got a chance to make a decision,
2137+ # and 'unit' may have released its lock and attempted
2138+ # to reacquire it. This will change the timestamp,
2139+ # and we correctly revoke the old grant putting it
2140+ # to the end of the queue.
2141+ ts = datetime.strptime(self.grants[unit][lock],
2142+ _timestamp_format)
2143+ del self.grants[unit][lock]
2144+ self.released(unit, lock, ts)
2145+
2146+ # Grant locks
2147+ for unit in self.requests.keys():
2148+ for lock in self.requests[unit]:
2149+ self.grant(lock, unit)
2150+
2151+ def grant(self, lock, unit):
2152+ '''Maybe grant the lock to a unit.
2153+
2154+ The decision to grant the lock or not is made for $lock
2155+ by a corresponding method grant_$lock, which you may define
2156+ in a subclass. If no such method is defined, the default_grant
2157+ method is used. See Serial.default_grant() for details.
2158+ '''
2159+ if not hookenv.is_leader():
2160+ return False # Not the leader, so we cannot grant.
2161+
2162+ # Set of units already granted the lock.
2163+ granted = set()
2164+ for u in self.grants:
2165+ if lock in self.grants[u]:
2166+ granted.add(u)
2167+ if unit in granted:
2168+ return True # Already granted.
2169+
2170+ # Ordered list of units waiting for the lock.
2171+ reqs = set()
2172+ for u in self.requests:
2173+ if u in granted:
2174+ continue # In the granted set. Not wanted in the req list.
2175+ for l, ts in self.requests[u].items():
2176+ if l == lock:
2177+ reqs.add((ts, u))
2178+ queue = [t[1] for t in sorted(reqs)]
2179+ if unit not in queue:
2180+ return False # Unit has not requested the lock.
2181+
2182+ # Locate custom logic, or fallback to the default.
2183+ grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant)
2184+
2185+ if grant_func(lock, unit, granted, queue):
2186+ # Grant the lock.
2187+ self.msg('Leader grants {} to {}'.format(lock, unit))
2188+ self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock]
2189+ return True
2190+
2191+ return False
2192+
2193+ def released(self, unit, lock, timestamp):
2194+ '''Called on the leader when it has released a lock.
2195+
2196+ By default, does nothing but log messages. Override if you
2197+ need to perform additional housekeeping when a lock is released,
2198+ for example recording timestamps.
2199+ '''
2200+ interval = _utcnow() - timestamp
2201+ self.msg('Leader released {} from {}, held {}'.format(lock, unit,
2202+ interval))
2203+
2204+ def require(self, lock, guard_func, *guard_args, **guard_kw):
2205+ """Decorate a function to be run only when a lock is acquired.
2206+
2207+ The lock is requested if the guard function returns True.
2208+
2209+ The decorated function is called if the lock has been granted.
2210+ """
2211+ def decorator(f):
2212+ @wraps(f)
2213+ def wrapper(*args, **kw):
2214+ if self.granted(lock):
2215+ self.msg('Granted {}'.format(lock))
2216+ return f(*args, **kw)
2217+ if guard_func(*guard_args, **guard_kw) and self.acquire(lock):
2218+ return f(*args, **kw)
2219+ return None
2220+ return wrapper
2221+ return decorator
2222+
2223+ def msg(self, msg):
2224+ '''Emit a message. Override to customize log spam.'''
2225+ hookenv.log('coordinator.{} {}'.format(self._name(), msg),
2226+ level=hookenv.INFO)
2227+
2228+ def _name(self):
2229+ return self.__class__.__name__
2230+
2231+ def _load_state(self):
2232+ self.msg('Loading state'.format(self._name()))
2233+
2234+ # All responses must be stored in the leadership settings.
2235+ # The leader cannot use local state, as a different unit may
2236+ # be leader next time. Which is fine, as the leadership
2237+ # settings are always available.
2238+ self.grants = json.loads(hookenv.leader_get(self.key) or '{}')
2239+
2240+ local_unit = hookenv.local_unit()
2241+
2242+ # All requests must be stored on the peer relation. This is
2243+ # the only channel units have to communicate with the leader.
2244+ # Even the leader needs to store its requests here, as a
2245+ # different unit may be leader by the time the request can be
2246+ # granted.
2247+ if self.relid is None:
2248+ # The peer relation is not available. Maybe we are early in
2249+ # the units's lifecycle. Maybe this unit is standalone.
2250+ # Fallback to using local state.
2251+ self.msg('No peer relation. Loading local state')
2252+ self.requests = {local_unit: self._load_local_state()}
2253+ else:
2254+ self.requests = self._load_peer_state()
2255+ if local_unit not in self.requests:
2256+ # The peer relation has just been joined. Update any state
2257+ # loaded from our peers with our local state.
2258+ self.msg('New peer relation. Merging local state')
2259+ self.requests[local_unit] = self._load_local_state()
2260+
2261+ def _emit_state(self):
2262+ # Emit this units lock status.
2263+ for lock in sorted(self.requests[hookenv.local_unit()].keys()):
2264+ if self.granted(lock):
2265+ self.msg('Granted {}'.format(lock))
2266+ else:
2267+ self.msg('Waiting on {}'.format(lock))
2268+
2269+ def _save_state(self):
2270+ self.msg('Publishing state'.format(self._name()))
2271+ if hookenv.is_leader():
2272+ # sort_keys to ensure stability.
2273+ raw = json.dumps(self.grants, sort_keys=True)
2274+ hookenv.leader_set({self.key: raw})
2275+
2276+ local_unit = hookenv.local_unit()
2277+
2278+ if self.relid is None:
2279+ # No peer relation yet. Fallback to local state.
2280+ self.msg('No peer relation. Saving local state')
2281+ self._save_local_state(self.requests[local_unit])
2282+ else:
2283+ # sort_keys to ensure stability.
2284+ raw = json.dumps(self.requests[local_unit], sort_keys=True)
2285+ hookenv.relation_set(self.relid, relation_settings={self.key: raw})
2286+
2287+ def _load_peer_state(self):
2288+ requests = {}
2289+ units = set(hookenv.related_units(self.relid))
2290+ units.add(hookenv.local_unit())
2291+ for unit in units:
2292+ raw = hookenv.relation_get(self.key, unit, self.relid)
2293+ if raw:
2294+ requests[unit] = json.loads(raw)
2295+ return requests
2296+
2297+ def _local_state_filename(self):
2298+ # Include the class name. We allow multiple BaseCoordinator
2299+ # subclasses to be instantiated, and they are singletons, so
2300+ # this avoids conflicts (unless someone creates and uses two
2301+ # BaseCoordinator subclasses with the same class name, so don't
2302+ # do that).
2303+ return '.charmhelpers.coordinator.{}'.format(self._name())
2304+
2305+ def _load_local_state(self):
2306+ fn = self._local_state_filename()
2307+ if os.path.exists(fn):
2308+ with open(fn, 'r') as f:
2309+ return json.load(f)
2310+ return {}
2311+
2312+ def _save_local_state(self, state):
2313+ fn = self._local_state_filename()
2314+ with open(fn, 'w') as f:
2315+ json.dump(state, f)
2316+
2317+ def _release_granted(self):
2318+ # At the end of every hook, release all locks granted to
2319+ # this unit. If a hook neglects to make use of what it
2320+ # requested, it will just have to make the request again.
2321+ # Implicit release is the only way this will work, as
2322+ # if the unit is standalone there may be no future triggers
2323+ # called to do a manual release.
2324+ unit = hookenv.local_unit()
2325+ for lock in list(self.requests[unit].keys()):
2326+ if self.granted(lock):
2327+ self.msg('Released local {} lock'.format(lock))
2328+ del self.requests[unit][lock]
2329+
2330+
2331+class Serial(BaseCoordinator):
2332+ def default_grant(self, lock, unit, granted, queue):
2333+ '''Default logic to grant a lock to a unit. Unless overridden,
2334+ only one unit may hold the lock and it will be granted to the
2335+ earliest queued request.
2336+
2337+ To define custom logic for $lock, create a subclass and
2338+ define a grant_$lock method.
2339+
2340+ `unit` is the unit name making the request.
2341+
2342+ `granted` is the set of units already granted the lock. It will
2343+ never include `unit`. It may be empty.
2344+
2345+ `queue` is the list of units waiting for the lock, ordered by time
2346+ of request. It will always include `unit`, but `unit` is not
2347+ necessarily first.
2348+
2349+ Returns True if the lock should be granted to `unit`.
2350+ '''
2351+ return unit == queue[0] and not granted
2352+
2353+
2354+def _implicit_peer_relation_name():
2355+ md = hookenv.metadata()
2356+ assert 'peers' in md, 'No peer relations in metadata.yaml'
2357+ return sorted(md['peers'].keys())[0]
2358+
2359+
2360+# A human readable, sortable UTC timestamp format.
2361+_timestamp_format = '%Y-%m-%d %H:%M:%S.%fZ'
2362+
2363+
2364+def _utcnow(): # pragma: no cover
2365+ # This wrapper exists as mocking datetime methods is problematic.
2366+ return datetime.utcnow()
2367+
2368+
2369+def _timestamp():
2370+ return _utcnow().strftime(_timestamp_format)
2371
2372=== added file 'charmhelpers/core/files.py'
2373--- charmhelpers/core/files.py 1970-01-01 00:00:00 +0000
2374+++ charmhelpers/core/files.py 2015-08-13 08:33:21 +0000
2375@@ -0,0 +1,45 @@
2376+#!/usr/bin/env python
2377+# -*- coding: utf-8 -*-
2378+
2379+# Copyright 2014-2015 Canonical Limited.
2380+#
2381+# This file is part of charm-helpers.
2382+#
2383+# charm-helpers is free software: you can redistribute it and/or modify
2384+# it under the terms of the GNU Lesser General Public License version 3 as
2385+# published by the Free Software Foundation.
2386+#
2387+# charm-helpers is distributed in the hope that it will be useful,
2388+# but WITHOUT ANY WARRANTY; without even the implied warranty of
2389+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2390+# GNU Lesser General Public License for more details.
2391+#
2392+# You should have received a copy of the GNU Lesser General Public License
2393+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2394+
2395+__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
2396+
2397+import os
2398+import subprocess
2399+
2400+
2401+def sed(filename, before, after, flags='g'):
2402+ """
2403+ Search and replaces the given pattern on filename.
2404+
2405+ :param filename: relative or absolute file path.
2406+ :param before: expression to be replaced (see 'man sed')
2407+ :param after: expression to replace with (see 'man sed')
2408+ :param flags: sed-compatible regex flags in example, to make
2409+ the search and replace case insensitive, specify ``flags="i"``.
2410+ The ``g`` flag is always specified regardless, so you do not
2411+ need to remember to include it when overriding this parameter.
2412+ :returns: If the sed command exit code was zero then return,
2413+ otherwise raise CalledProcessError.
2414+ """
2415+ expression = r's/{0}/{1}/{2}'.format(before,
2416+ after, flags)
2417+
2418+ return subprocess.check_call(["sed", "-i", "-r", "-e",
2419+ expression,
2420+ os.path.expanduser(filename)])
2421
2422=== modified file 'charmhelpers/core/hookenv.py'
2423--- charmhelpers/core/hookenv.py 2015-06-02 13:46:29 +0000
2424+++ charmhelpers/core/hookenv.py 2015-08-13 08:33:21 +0000
2425@@ -21,7 +21,10 @@
2426 # Charm Helpers Developers <juju@lists.ubuntu.com>
2427
2428 from __future__ import print_function
2429+import copy
2430+from distutils.version import LooseVersion
2431 from functools import wraps
2432+import glob
2433 import os
2434 import json
2435 import yaml
2436@@ -71,6 +74,7 @@
2437 res = func(*args, **kwargs)
2438 cache[key] = res
2439 return res
2440+ wrapper._wrapped = func
2441 return wrapper
2442
2443
2444@@ -170,9 +174,19 @@
2445 return os.environ.get('JUJU_RELATION', None)
2446
2447
2448-def relation_id():
2449- """The relation ID for the current relation hook"""
2450- return os.environ.get('JUJU_RELATION_ID', None)
2451+@cached
2452+def relation_id(relation_name=None, service_or_unit=None):
2453+ """The relation ID for the current or a specified relation"""
2454+ if not relation_name and not service_or_unit:
2455+ return os.environ.get('JUJU_RELATION_ID', None)
2456+ elif relation_name and service_or_unit:
2457+ service_name = service_or_unit.split('/')[0]
2458+ for relid in relation_ids(relation_name):
2459+ remote_service = remote_service_name(relid)
2460+ if remote_service == service_name:
2461+ return relid
2462+ else:
2463+ raise ValueError('Must specify neither or both of relation_name and service_or_unit')
2464
2465
2466 def local_unit():
2467@@ -190,9 +204,20 @@
2468 return local_unit().split('/')[0]
2469
2470
2471+@cached
2472+def remote_service_name(relid=None):
2473+ """The remote service name for a given relation-id (or the current relation)"""
2474+ if relid is None:
2475+ unit = remote_unit()
2476+ else:
2477+ units = related_units(relid)
2478+ unit = units[0] if units else None
2479+ return unit.split('/')[0] if unit else None
2480+
2481+
2482 def hook_name():
2483 """The name of the currently executing hook"""
2484- return os.path.basename(sys.argv[0])
2485+ return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
2486
2487
2488 class Config(dict):
2489@@ -242,29 +267,7 @@
2490 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
2491 if os.path.exists(self.path):
2492 self.load_previous()
2493-
2494- def __getitem__(self, key):
2495- """For regular dict lookups, check the current juju config first,
2496- then the previous (saved) copy. This ensures that user-saved values
2497- will be returned by a dict lookup.
2498-
2499- """
2500- try:
2501- return dict.__getitem__(self, key)
2502- except KeyError:
2503- return (self._prev_dict or {})[key]
2504-
2505- def get(self, key, default=None):
2506- try:
2507- return self[key]
2508- except KeyError:
2509- return default
2510-
2511- def keys(self):
2512- prev_keys = []
2513- if self._prev_dict is not None:
2514- prev_keys = self._prev_dict.keys()
2515- return list(set(prev_keys + list(dict.keys(self))))
2516+ atexit(self._implicit_save)
2517
2518 def load_previous(self, path=None):
2519 """Load previous copy of config from disk.
2520@@ -283,6 +286,9 @@
2521 self.path = path or self.path
2522 with open(self.path) as f:
2523 self._prev_dict = json.load(f)
2524+ for k, v in copy.deepcopy(self._prev_dict).items():
2525+ if k not in self:
2526+ self[k] = v
2527
2528 def changed(self, key):
2529 """Return True if the current value for this key is different from
2530@@ -314,13 +320,13 @@
2531 instance.
2532
2533 """
2534- if self._prev_dict:
2535- for k, v in six.iteritems(self._prev_dict):
2536- if k not in self:
2537- self[k] = v
2538 with open(self.path, 'w') as f:
2539 json.dump(self, f)
2540
2541+ def _implicit_save(self):
2542+ if self.implicit_save:
2543+ self.save()
2544+
2545
2546 @cached
2547 def config(scope=None):
2548@@ -485,6 +491,63 @@
2549
2550
2551 @cached
2552+def relation_to_interface(relation_name):
2553+ """
2554+ Given the name of a relation, return the interface that relation uses.
2555+
2556+ :returns: The interface name, or ``None``.
2557+ """
2558+ return relation_to_role_and_interface(relation_name)[1]
2559+
2560+
2561+@cached
2562+def relation_to_role_and_interface(relation_name):
2563+ """
2564+ Given the name of a relation, return the role and the name of the interface
2565+ that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
2566+
2567+ :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
2568+ """
2569+ _metadata = metadata()
2570+ for role in ('provides', 'requires', 'peer'):
2571+ interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
2572+ if interface:
2573+ return role, interface
2574+ return None, None
2575+
2576+
2577+@cached
2578+def role_and_interface_to_relations(role, interface_name):
2579+ """
2580+ Given a role and interface name, return a list of relation names for the
2581+ current charm that use that interface under that role (where role is one
2582+ of ``provides``, ``requires``, or ``peer``).
2583+
2584+ :returns: A list of relation names.
2585+ """
2586+ _metadata = metadata()
2587+ results = []
2588+ for relation_name, relation in _metadata.get(role, {}).items():
2589+ if relation['interface'] == interface_name:
2590+ results.append(relation_name)
2591+ return results
2592+
2593+
2594+@cached
2595+def interface_to_relations(interface_name):
2596+ """
2597+ Given an interface, return a list of relation names for the current
2598+ charm that use that interface.
2599+
2600+ :returns: A list of relation names.
2601+ """
2602+ results = []
2603+ for role in ('provides', 'requires', 'peer'):
2604+ results.extend(role_and_interface_to_relations(role, interface_name))
2605+ return results
2606+
2607+
2608+@cached
2609 def charm_name():
2610 """Get the name of the current charm as is specified on metadata.yaml"""
2611 return metadata().get('name')
2612@@ -587,10 +650,14 @@
2613 hooks.execute(sys.argv)
2614 """
2615
2616- def __init__(self, config_save=True):
2617+ def __init__(self, config_save=None):
2618 super(Hooks, self).__init__()
2619 self._hooks = {}
2620- self._config_save = config_save
2621+
2622+ # For unknown reasons, we allow the Hooks constructor to override
2623+ # config().implicit_save.
2624+ if config_save is not None:
2625+ config().implicit_save = config_save
2626
2627 def register(self, name, function):
2628 """Register a hook"""
2629@@ -598,13 +665,16 @@
2630
2631 def execute(self, args):
2632 """Execute a registered hook based on args[0]"""
2633+ _run_atstart()
2634 hook_name = os.path.basename(args[0])
2635 if hook_name in self._hooks:
2636- self._hooks[hook_name]()
2637- if self._config_save:
2638- cfg = config()
2639- if cfg.implicit_save:
2640- cfg.save()
2641+ try:
2642+ self._hooks[hook_name]()
2643+ except SystemExit as x:
2644+ if x.code is None or x.code == 0:
2645+ _run_atexit()
2646+ raise
2647+ _run_atexit()
2648 else:
2649 raise UnregisteredHookError(hook_name)
2650
2651@@ -653,6 +723,21 @@
2652 subprocess.check_call(['action-fail', message])
2653
2654
2655+def action_name():
2656+ """Get the name of the currently executing action."""
2657+ return os.environ.get('JUJU_ACTION_NAME')
2658+
2659+
2660+def action_uuid():
2661+ """Get the UUID of the currently executing action."""
2662+ return os.environ.get('JUJU_ACTION_UUID')
2663+
2664+
2665+def action_tag():
2666+ """Get the tag for the currently executing action."""
2667+ return os.environ.get('JUJU_ACTION_TAG')
2668+
2669+
2670 def status_set(workload_state, message):
2671 """Set the workload state with a message
2672
2673@@ -732,13 +817,80 @@
2674 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
2675 def leader_set(settings=None, **kwargs):
2676 """Juju leader set value(s)"""
2677- log("Juju leader-set '%s'" % (settings), level=DEBUG)
2678+ # Don't log secrets.
2679+ # log("Juju leader-set '%s'" % (settings), level=DEBUG)
2680 cmd = ['leader-set']
2681 settings = settings or {}
2682 settings.update(kwargs)
2683- for k, v in settings.iteritems():
2684+ for k, v in settings.items():
2685 if v is None:
2686 cmd.append('{}='.format(k))
2687 else:
2688 cmd.append('{}={}'.format(k, v))
2689 subprocess.check_call(cmd)
2690+
2691+
2692+@cached
2693+def juju_version():
2694+ """Full version string (eg. '1.23.3.1-trusty-amd64')"""
2695+ # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
2696+ jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
2697+ return subprocess.check_output([jujud, 'version'],
2698+ universal_newlines=True).strip()
2699+
2700+
2701+@cached
2702+def has_juju_version(minimum_version):
2703+ """Return True if the Juju version is at least the provided version"""
2704+ return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
2705+
2706+
2707+_atexit = []
2708+_atstart = []
2709+
2710+
2711+def atstart(callback, *args, **kwargs):
2712+ '''Schedule a callback to run before the main hook.
2713+
2714+ Callbacks are run in the order they were added.
2715+
2716+ This is useful for modules and classes to perform initialization
2717+ and inject behavior. In particular:
2718+
2719+ - Run common code before all of your hooks, such as logging
2720+ the hook name or interesting relation data.
2721+ - Defer object or module initialization that requires a hook
2722+ context until we know there actually is a hook context,
2723+ making testing easier.
2724+ - Rather than requiring charm authors to include boilerplate to
2725+ invoke your helper's behavior, have it run automatically if
2726+ your object is instantiated or module imported.
2727+
2728+ This is not at all useful after your hook framework as been launched.
2729+ '''
2730+ global _atstart
2731+ _atstart.append((callback, args, kwargs))
2732+
2733+
2734+def atexit(callback, *args, **kwargs):
2735+ '''Schedule a callback to run on successful hook completion.
2736+
2737+ Callbacks are run in the reverse order that they were added.'''
2738+ _atexit.append((callback, args, kwargs))
2739+
2740+
2741+def _run_atstart():
2742+ '''Hook frameworks must invoke this before running the main hook body.'''
2743+ global _atstart
2744+ for callback, args, kwargs in _atstart:
2745+ callback(*args, **kwargs)
2746+ del _atstart[:]
2747+
2748+
2749+def _run_atexit():
2750+ '''Hook frameworks must invoke this after the main hook body has
2751+ successfully completed. Do not invoke it if the hook fails.'''
2752+ global _atexit
2753+ for callback, args, kwargs in reversed(_atexit):
2754+ callback(*args, **kwargs)
2755+ del _atexit[:]
2756
2757=== modified file 'charmhelpers/core/host.py'
2758--- charmhelpers/core/host.py 2015-06-11 09:03:58 +0000
2759+++ charmhelpers/core/host.py 2015-08-13 08:33:21 +0000
2760@@ -63,6 +63,36 @@
2761 return service_result
2762
2763
2764+def service_pause(service_name, init_dir=None):
2765+ """Pause a system service.
2766+
2767+ Stop it, and prevent it from starting again at boot."""
2768+ if init_dir is None:
2769+ init_dir = "/etc/init"
2770+ stopped = service_stop(service_name)
2771+ # XXX: Support systemd too
2772+ override_path = os.path.join(
2773+ init_dir, '{}.override'.format(service_name))
2774+ with open(override_path, 'w') as fh:
2775+ fh.write("manual\n")
2776+ return stopped
2777+
2778+
2779+def service_resume(service_name, init_dir=None):
2780+ """Resume a system service.
2781+
2782+ Reenable starting again at boot. Start the service"""
2783+ # XXX: Support systemd too
2784+ if init_dir is None:
2785+ init_dir = "/etc/init"
2786+ override_path = os.path.join(
2787+ init_dir, '{}.override'.format(service_name))
2788+ if os.path.exists(override_path):
2789+ os.unlink(override_path)
2790+ started = service_start(service_name)
2791+ return started
2792+
2793+
2794 def service(action, service_name):
2795 """Control a system service"""
2796 cmd = ['service', service_name, action]
2797@@ -149,11 +179,7 @@
2798
2799 def add_user_to_group(username, group):
2800 """Add a user to a group"""
2801- cmd = [
2802- 'gpasswd', '-a',
2803- username,
2804- group
2805- ]
2806+ cmd = ['gpasswd', '-a', username, group]
2807 log("Adding user {} to group {}".format(username, group))
2808 subprocess.check_call(cmd)
2809
2810
2811=== modified file 'charmhelpers/core/services/base.py'
2812--- charmhelpers/core/services/base.py 2015-05-20 14:52:29 +0000
2813+++ charmhelpers/core/services/base.py 2015-08-13 08:33:21 +0000
2814@@ -128,15 +128,18 @@
2815 """
2816 Handle the current hook by doing The Right Thing with the registered services.
2817 """
2818- hook_name = hookenv.hook_name()
2819- if hook_name == 'stop':
2820- self.stop_services()
2821- else:
2822- self.reconfigure_services()
2823- self.provide_data()
2824- cfg = hookenv.config()
2825- if cfg.implicit_save:
2826- cfg.save()
2827+ hookenv._run_atstart()
2828+ try:
2829+ hook_name = hookenv.hook_name()
2830+ if hook_name == 'stop':
2831+ self.stop_services()
2832+ else:
2833+ self.reconfigure_services()
2834+ self.provide_data()
2835+ except SystemExit as x:
2836+ if x.code is None or x.code == 0:
2837+ hookenv._run_atexit()
2838+ hookenv._run_atexit()
2839
2840 def provide_data(self):
2841 """
2842
2843=== modified file 'charmhelpers/core/services/helpers.py'
2844--- charmhelpers/core/services/helpers.py 2015-06-12 11:27:22 +0000
2845+++ charmhelpers/core/services/helpers.py 2015-08-13 08:33:21 +0000
2846@@ -240,8 +240,7 @@
2847 action.
2848
2849 :param str source: The template source file, relative to
2850- `$CHARM_DIR/templates`
2851-
2852+ `$CHARM_DIR/templates`
2853 :param str target: The target to write the rendered template to
2854 :param str owner: The owner of the rendered file
2855 :param str group: The group of the rendered file
2856
2857=== modified file 'charmhelpers/core/unitdata.py'
2858--- charmhelpers/core/unitdata.py 2015-03-18 15:51:22 +0000
2859+++ charmhelpers/core/unitdata.py 2015-08-13 08:33:21 +0000
2860@@ -152,6 +152,7 @@
2861 import collections
2862 import contextlib
2863 import datetime
2864+import itertools
2865 import json
2866 import os
2867 import pprint
2868@@ -164,8 +165,7 @@
2869 class Storage(object):
2870 """Simple key value database for local unit state within charms.
2871
2872- Modifications are automatically committed at hook exit. That's
2873- currently regardless of exit code.
2874+ Modifications are not persisted unless :meth:`flush` is called.
2875
2876 To support dicts, lists, integer, floats, and booleans values
2877 are automatically json encoded/decoded.
2878@@ -173,8 +173,11 @@
2879 def __init__(self, path=None):
2880 self.db_path = path
2881 if path is None:
2882- self.db_path = os.path.join(
2883- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
2884+ if 'UNIT_STATE_DB' in os.environ:
2885+ self.db_path = os.environ['UNIT_STATE_DB']
2886+ else:
2887+ self.db_path = os.path.join(
2888+ os.environ.get('CHARM_DIR', ''), '.unit-state.db')
2889 self.conn = sqlite3.connect('%s' % self.db_path)
2890 self.cursor = self.conn.cursor()
2891 self.revision = None
2892@@ -189,15 +192,8 @@
2893 self.conn.close()
2894 self._closed = True
2895
2896- def _scoped_query(self, stmt, params=None):
2897- if params is None:
2898- params = []
2899- return stmt, params
2900-
2901 def get(self, key, default=None, record=False):
2902- self.cursor.execute(
2903- *self._scoped_query(
2904- 'select data from kv where key=?', [key]))
2905+ self.cursor.execute('select data from kv where key=?', [key])
2906 result = self.cursor.fetchone()
2907 if not result:
2908 return default
2909@@ -206,33 +202,81 @@
2910 return json.loads(result[0])
2911
2912 def getrange(self, key_prefix, strip=False):
2913- stmt = "select key, data from kv where key like '%s%%'" % key_prefix
2914- self.cursor.execute(*self._scoped_query(stmt))
2915+ """
2916+ Get a range of keys starting with a common prefix as a mapping of
2917+ keys to values.
2918+
2919+ :param str key_prefix: Common prefix among all keys
2920+ :param bool strip: Optionally strip the common prefix from the key
2921+ names in the returned dict
2922+ :return dict: A (possibly empty) dict of key-value mappings
2923+ """
2924+ self.cursor.execute("select key, data from kv where key like ?",
2925+ ['%s%%' % key_prefix])
2926 result = self.cursor.fetchall()
2927
2928 if not result:
2929- return None
2930+ return {}
2931 if not strip:
2932 key_prefix = ''
2933 return dict([
2934 (k[len(key_prefix):], json.loads(v)) for k, v in result])
2935
2936 def update(self, mapping, prefix=""):
2937+ """
2938+ Set the values of multiple keys at once.
2939+
2940+ :param dict mapping: Mapping of keys to values
2941+ :param str prefix: Optional prefix to apply to all keys in `mapping`
2942+ before setting
2943+ """
2944 for k, v in mapping.items():
2945 self.set("%s%s" % (prefix, k), v)
2946
2947 def unset(self, key):
2948+ """
2949+ Remove a key from the database entirely.
2950+ """
2951 self.cursor.execute('delete from kv where key=?', [key])
2952 if self.revision and self.cursor.rowcount:
2953 self.cursor.execute(
2954 'insert into kv_revisions values (?, ?, ?)',
2955 [key, self.revision, json.dumps('DELETED')])
2956
2957+ def unsetrange(self, keys=None, prefix=""):
2958+ """
2959+ Remove a range of keys starting with a common prefix, from the database
2960+ entirely.
2961+
2962+ :param list keys: List of keys to remove.
2963+ :param str prefix: Optional prefix to apply to all keys in ``keys``
2964+ before removing.
2965+ """
2966+ if keys is not None:
2967+ keys = ['%s%s' % (prefix, key) for key in keys]
2968+ self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
2969+ if self.revision and self.cursor.rowcount:
2970+ self.cursor.execute(
2971+ 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
2972+ list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
2973+ else:
2974+ self.cursor.execute('delete from kv where key like ?',
2975+ ['%s%%' % prefix])
2976+ if self.revision and self.cursor.rowcount:
2977+ self.cursor.execute(
2978+ 'insert into kv_revisions values (?, ?, ?)',
2979+ ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
2980+
2981 def set(self, key, value):
2982+ """
2983+ Set a value in the database.
2984+
2985+ :param str key: Key to set the value for
2986+ :param value: Any JSON-serializable value to be set
2987+ """
2988 serialized = json.dumps(value)
2989
2990- self.cursor.execute(
2991- 'select data from kv where key=?', [key])
2992+ self.cursor.execute('select data from kv where key=?', [key])
2993 exists = self.cursor.fetchone()
2994
2995 # Skip mutations to the same value
2996
2997=== modified file 'charmhelpers/fetch/__init__.py'
2998--- charmhelpers/fetch/__init__.py 2015-04-29 12:52:18 +0000
2999+++ charmhelpers/fetch/__init__.py 2015-08-13 08:33:21 +0000
3000@@ -90,6 +90,14 @@
3001 'kilo/proposed': 'trusty-proposed/kilo',
3002 'trusty-kilo/proposed': 'trusty-proposed/kilo',
3003 'trusty-proposed/kilo': 'trusty-proposed/kilo',
3004+ # Liberty
3005+ 'liberty': 'trusty-updates/liberty',
3006+ 'trusty-liberty': 'trusty-updates/liberty',
3007+ 'trusty-liberty/updates': 'trusty-updates/liberty',
3008+ 'trusty-updates/liberty': 'trusty-updates/liberty',
3009+ 'liberty/proposed': 'trusty-proposed/liberty',
3010+ 'trusty-liberty/proposed': 'trusty-proposed/liberty',
3011+ 'trusty-proposed/liberty': 'trusty-proposed/liberty',
3012 }
3013
3014 # The order of this list is very important. Handlers should be listed in from
3015@@ -215,19 +223,27 @@
3016 _run_apt_command(cmd, fatal)
3017
3018
3019+def apt_mark(packages, mark, fatal=False):
3020+ """Flag one or more packages using apt-mark"""
3021+ cmd = ['apt-mark', mark]
3022+ if isinstance(packages, six.string_types):
3023+ cmd.append(packages)
3024+ else:
3025+ cmd.extend(packages)
3026+ log("Holding {}".format(packages))
3027+
3028+ if fatal:
3029+ subprocess.check_call(cmd, universal_newlines=True)
3030+ else:
3031+ subprocess.call(cmd, universal_newlines=True)
3032+
3033+
3034 def apt_hold(packages, fatal=False):
3035- """Hold one or more packages"""
3036- cmd = ['apt-mark', 'hold']
3037- if isinstance(packages, six.string_types):
3038- cmd.append(packages)
3039- else:
3040- cmd.extend(packages)
3041- log("Holding {}".format(packages))
3042-
3043- if fatal:
3044- subprocess.check_call(cmd)
3045- else:
3046- subprocess.call(cmd)
3047+ return apt_mark(packages, 'hold', fatal=fatal)
3048+
3049+
3050+def apt_unhold(packages, fatal=False):
3051+ return apt_mark(packages, 'unhold', fatal=fatal)
3052
3053
3054 def add_source(source, key=None):
3055@@ -370,8 +386,9 @@
3056 for handler in handlers:
3057 try:
3058 installed_to = handler.install(source, *args, **kwargs)
3059- except UnhandledSource:
3060- pass
3061+ except UnhandledSource as e:
3062+ log('Install source attempt unsuccessful: {}'.format(e),
3063+ level='WARNING')
3064 if not installed_to:
3065 raise UnhandledSource("No handler found for source {}".format(source))
3066 return installed_to
3067
3068=== modified file 'charmhelpers/fetch/archiveurl.py'
3069--- charmhelpers/fetch/archiveurl.py 2015-02-11 21:41:57 +0000
3070+++ charmhelpers/fetch/archiveurl.py 2015-08-13 08:33:21 +0000
3071@@ -77,6 +77,8 @@
3072 def can_handle(self, source):
3073 url_parts = self.parse_url(source)
3074 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
3075+ # XXX: Why is this returning a boolean and a string? It's
3076+ # doomed to fail since "bool(can_handle('foo://'))" will be True.
3077 return "Wrong source type"
3078 if get_archive_handler(self.base_url(source)):
3079 return True
3080@@ -155,7 +157,11 @@
3081 else:
3082 algorithms = hashlib.algorithms_available
3083 if key in algorithms:
3084- check_hash(dld_file, value, key)
3085+ if len(value) != 1:
3086+ raise TypeError(
3087+ "Expected 1 hash value, not %d" % len(value))
3088+ expected = value[0]
3089+ check_hash(dld_file, expected, key)
3090 if checksum:
3091 check_hash(dld_file, checksum, hash_type)
3092 return extract(dld_file, dest)
3093
3094=== modified file 'charmhelpers/fetch/giturl.py'
3095--- charmhelpers/fetch/giturl.py 2015-05-27 12:55:44 +0000
3096+++ charmhelpers/fetch/giturl.py 2015-08-13 08:33:21 +0000
3097@@ -67,7 +67,7 @@
3098 try:
3099 self.clone(source, dest_dir, branch, depth)
3100 except GitCommandError as e:
3101- raise UnhandledSource(e.message)
3102+ raise UnhandledSource(e)
3103 except OSError as e:
3104 raise UnhandledSource(e.strerror)
3105 return dest_dir
3106
3107=== added directory 'docs/_extensions'
3108=== added file 'docs/_extensions/automembersummary.py'
3109--- docs/_extensions/automembersummary.py 1970-01-01 00:00:00 +0000
3110+++ docs/_extensions/automembersummary.py 2015-08-13 08:33:21 +0000
3111@@ -0,0 +1,86 @@
3112+# Copyright 2014-2015 Canonical Limited.
3113+#
3114+# This file is part of charm-helpers.
3115+#
3116+# charm-helpers is free software: you can redistribute it and/or modify
3117+# it under the terms of the GNU Lesser General Public License version 3 as
3118+# published by the Free Software Foundation.
3119+#
3120+# charm-helpers is distributed in the hope that it will be useful,
3121+# but WITHOUT ANY WARRANTY; without even the implied warranty of
3122+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3123+# GNU Lesser General Public License for more details.
3124+#
3125+# You should have received a copy of the GNU Lesser General Public License
3126+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3127+
3128+
3129+import inspect
3130+
3131+from docutils.parsers.rst import directives
3132+from sphinx.ext.autosummary import Autosummary
3133+from sphinx.ext.autosummary import get_import_prefixes_from_env
3134+from sphinx.ext.autosummary import import_by_name
3135+
3136+
3137+class AutoMemberSummary(Autosummary):
3138+ required_arguments = 0
3139+ optional_arguments = 0
3140+ final_argument_whitespace = False
3141+ has_content = True
3142+ option_spec = {
3143+ 'toctree': directives.unchanged,
3144+ 'nosignatures': directives.flag,
3145+ 'template': directives.unchanged,
3146+ }
3147+
3148+ def get_items(self, names):
3149+ env = self.state.document.settings.env
3150+ prefixes = get_import_prefixes_from_env(env)
3151+
3152+ items = []
3153+ prefix = ''
3154+ shorten = ''
3155+
3156+ def _get_items(name):
3157+ _items = super(AutoMemberSummary, self).get_items([shorten + name])
3158+ for dn, sig, summary, rn in _items:
3159+ items.append(('%s%s' % (prefix, dn), sig, summary, rn))
3160+
3161+ for name in names:
3162+ if '~' in name:
3163+ prefix, name = name.split('~')
3164+ shorten = '~'
3165+ else:
3166+ prefix = ''
3167+ shorten = ''
3168+
3169+ try:
3170+ real_name, obj, parent, _ = import_by_name(name, prefixes=prefixes)
3171+ except ImportError:
3172+ self.warn('failed to import %s' % name)
3173+ continue
3174+
3175+ if not inspect.ismodule(obj):
3176+ _get_items(name)
3177+ continue
3178+
3179+ for member in dir(obj):
3180+ if member.startswith('_'):
3181+ continue
3182+ mobj = getattr(obj, member)
3183+ if hasattr(mobj, '__module__'):
3184+ if not mobj.__module__.startswith(real_name):
3185+ continue # skip imported classes & functions
3186+ elif hasattr(mobj, '__name__'):
3187+ if not mobj.__name__.startswith(real_name):
3188+ continue # skip imported modules
3189+ else:
3190+ continue # skip instances
3191+ _get_items('%s.%s' % (name, member))
3192+
3193+ return items
3194+
3195+
3196+def setup(app):
3197+ app.add_directive('automembersummary', AutoMemberSummary)
3198
3199=== added file 'docs/api/charmhelpers.coordinator.rst'
3200--- docs/api/charmhelpers.coordinator.rst 1970-01-01 00:00:00 +0000
3201+++ docs/api/charmhelpers.coordinator.rst 2015-08-13 08:33:21 +0000
3202@@ -0,0 +1,10 @@
3203+charmhelpers.coordinator package
3204+================================
3205+
3206+charmhelpers.coordinator module
3207+-------------------------------
3208+
3209+.. automodule:: charmhelpers.coordinator
3210+ :members:
3211+ :undoc-members:
3212+ :show-inheritance:
3213
3214=== added file 'docs/api/charmhelpers.core.decorators.rst'
3215--- docs/api/charmhelpers.core.decorators.rst 1970-01-01 00:00:00 +0000
3216+++ docs/api/charmhelpers.core.decorators.rst 2015-08-13 08:33:21 +0000
3217@@ -0,0 +1,7 @@
3218+charmhelpers.core.decorators
3219+============================
3220+
3221+.. automodule:: charmhelpers.core.decorators
3222+ :members:
3223+ :undoc-members:
3224+ :show-inheritance:
3225
3226=== added file 'docs/api/charmhelpers.core.fstab.rst'
3227--- docs/api/charmhelpers.core.fstab.rst 1970-01-01 00:00:00 +0000
3228+++ docs/api/charmhelpers.core.fstab.rst 2015-08-13 08:33:21 +0000
3229@@ -0,0 +1,7 @@
3230+charmhelpers.core.fstab
3231+=======================
3232+
3233+.. automodule:: charmhelpers.core.fstab
3234+ :members:
3235+ :undoc-members:
3236+ :show-inheritance:
3237
3238=== added file 'docs/api/charmhelpers.core.hookenv.rst'
3239--- docs/api/charmhelpers.core.hookenv.rst 1970-01-01 00:00:00 +0000
3240+++ docs/api/charmhelpers.core.hookenv.rst 2015-08-13 08:33:21 +0000
3241@@ -0,0 +1,12 @@
3242+charmhelpers.core.hookenv
3243+=========================
3244+
3245+.. automembersummary::
3246+ :nosignatures:
3247+
3248+ ~charmhelpers.core.hookenv
3249+
3250+.. automodule:: charmhelpers.core.hookenv
3251+ :members:
3252+ :undoc-members:
3253+ :show-inheritance:
3254
3255=== added file 'docs/api/charmhelpers.core.host.rst'
3256--- docs/api/charmhelpers.core.host.rst 1970-01-01 00:00:00 +0000
3257+++ docs/api/charmhelpers.core.host.rst 2015-08-13 08:33:21 +0000
3258@@ -0,0 +1,12 @@
3259+charmhelpers.core.host
3260+======================
3261+
3262+.. automembersummary::
3263+ :nosignatures:
3264+
3265+ ~charmhelpers.core.host
3266+
3267+.. automodule:: charmhelpers.core.host
3268+ :members:
3269+ :undoc-members:
3270+ :show-inheritance:
3271
3272=== modified file 'docs/api/charmhelpers.core.rst'
3273--- docs/api/charmhelpers.core.rst 2014-08-05 21:28:01 +0000
3274+++ docs/api/charmhelpers.core.rst 2015-08-13 08:33:21 +0000
3275@@ -1,44 +1,17 @@
3276 charmhelpers.core package
3277 =========================
3278
3279-charmhelpers.core.fstab module
3280-------------------------------
3281-
3282-.. automodule:: charmhelpers.core.fstab
3283- :members:
3284- :undoc-members:
3285- :show-inheritance:
3286-
3287-charmhelpers.core.hookenv module
3288---------------------------------
3289-
3290-.. automodule:: charmhelpers.core.hookenv
3291- :members:
3292- :undoc-members:
3293- :show-inheritance:
3294-
3295-charmhelpers.core.host module
3296------------------------------
3297-
3298-.. automodule:: charmhelpers.core.host
3299- :members:
3300- :undoc-members:
3301- :show-inheritance:
3302-
3303-charmhelpers.core.services package
3304-----------------------------------
3305-
3306-.. automodule:: charmhelpers.core.services.base
3307- :members:
3308- :undoc-members:
3309- :show-inheritance:
3310- :special-members: __init__
3311-
3312-.. automodule:: charmhelpers.core.services.helpers
3313- :members:
3314- :undoc-members:
3315- :show-inheritance:
3316-
3317+.. toctree::
3318+
3319+ charmhelpers.core.decorators
3320+ charmhelpers.core.fstab
3321+ charmhelpers.core.hookenv
3322+ charmhelpers.core.host
3323+ charmhelpers.core.strutils
3324+ charmhelpers.core.sysctl
3325+ charmhelpers.core.templating
3326+ charmhelpers.core.unitdata
3327+ charmhelpers.core.services
3328
3329 .. automodule:: charmhelpers.core
3330 :members:
3331
3332=== added file 'docs/api/charmhelpers.core.services.base.rst'
3333--- docs/api/charmhelpers.core.services.base.rst 1970-01-01 00:00:00 +0000
3334+++ docs/api/charmhelpers.core.services.base.rst 2015-08-13 08:33:21 +0000
3335@@ -0,0 +1,12 @@
3336+charmhelpers.core.services.base
3337+===============================
3338+
3339+.. automembersummary::
3340+ :nosignatures:
3341+
3342+ ~charmhelpers.core.services.base
3343+
3344+.. automodule:: charmhelpers.core.services.base
3345+ :members:
3346+ :undoc-members:
3347+ :show-inheritance:
3348
3349=== added file 'docs/api/charmhelpers.core.services.helpers.rst'
3350--- docs/api/charmhelpers.core.services.helpers.rst 1970-01-01 00:00:00 +0000
3351+++ docs/api/charmhelpers.core.services.helpers.rst 2015-08-13 08:33:21 +0000
3352@@ -0,0 +1,12 @@
3353+charmhelpers.core.services.helpers
3354+==================================
3355+
3356+.. automembersummary::
3357+ :nosignatures:
3358+
3359+ ~charmhelpers.core.services.helpers
3360+
3361+.. automodule:: charmhelpers.core.services.helpers
3362+ :members:
3363+ :undoc-members:
3364+ :show-inheritance:
3365
3366=== added file 'docs/api/charmhelpers.core.services.rst'
3367--- docs/api/charmhelpers.core.services.rst 1970-01-01 00:00:00 +0000
3368+++ docs/api/charmhelpers.core.services.rst 2015-08-13 08:33:21 +0000
3369@@ -0,0 +1,12 @@
3370+charmhelpers.core.services
3371+==========================
3372+
3373+.. toctree::
3374+
3375+ charmhelpers.core.services.base
3376+ charmhelpers.core.services.helpers
3377+
3378+.. automodule:: charmhelpers.core.services
3379+ :members:
3380+ :undoc-members:
3381+ :show-inheritance:
3382
3383=== added file 'docs/api/charmhelpers.core.strutils.rst'
3384--- docs/api/charmhelpers.core.strutils.rst 1970-01-01 00:00:00 +0000
3385+++ docs/api/charmhelpers.core.strutils.rst 2015-08-13 08:33:21 +0000
3386@@ -0,0 +1,7 @@
3387+charmhelpers.core.strutils
3388+============================
3389+
3390+.. automodule:: charmhelpers.core.strutils
3391+ :members:
3392+ :undoc-members:
3393+ :show-inheritance:
3394
3395=== added file 'docs/api/charmhelpers.core.sysctl.rst'
3396--- docs/api/charmhelpers.core.sysctl.rst 1970-01-01 00:00:00 +0000
3397+++ docs/api/charmhelpers.core.sysctl.rst 2015-08-13 08:33:21 +0000
3398@@ -0,0 +1,7 @@
3399+charmhelpers.core.sysctl
3400+============================
3401+
3402+.. automodule:: charmhelpers.core.sysctl
3403+ :members:
3404+ :undoc-members:
3405+ :show-inheritance:
3406
3407=== added file 'docs/api/charmhelpers.core.templating.rst'
3408--- docs/api/charmhelpers.core.templating.rst 1970-01-01 00:00:00 +0000
3409+++ docs/api/charmhelpers.core.templating.rst 2015-08-13 08:33:21 +0000
3410@@ -0,0 +1,7 @@
3411+charmhelpers.core.templating
3412+============================
3413+
3414+.. automodule:: charmhelpers.core.templating
3415+ :members:
3416+ :undoc-members:
3417+ :show-inheritance:
3418
3419=== added file 'docs/api/charmhelpers.core.unitdata.rst'
3420--- docs/api/charmhelpers.core.unitdata.rst 1970-01-01 00:00:00 +0000
3421+++ docs/api/charmhelpers.core.unitdata.rst 2015-08-13 08:33:21 +0000
3422@@ -0,0 +1,7 @@
3423+charmhelpers.core.unitdata
3424+==========================
3425+
3426+.. automodule:: charmhelpers.core.unitdata
3427+ :members:
3428+ :undoc-members:
3429+ :show-inheritance:
3430
3431=== modified file 'docs/api/charmhelpers.rst'
3432--- docs/api/charmhelpers.rst 2014-06-09 17:10:38 +0000
3433+++ docs/api/charmhelpers.rst 2015-08-13 08:33:21 +0000
3434@@ -2,12 +2,14 @@
3435 =================
3436
3437 .. toctree::
3438- :maxdepth: 2
3439+ :maxdepth: 3
3440
3441+ charmhelpers.core
3442 charmhelpers.contrib
3443- charmhelpers.core
3444 charmhelpers.fetch
3445 charmhelpers.payload
3446+ charmhelpers.cli
3447+ charmhelpers.coordinator
3448
3449 .. automodule:: charmhelpers
3450 :members:
3451
3452=== removed file 'docs/api/modules.rst'
3453--- docs/api/modules.rst 2014-06-09 14:56:35 +0000
3454+++ docs/api/modules.rst 1970-01-01 00:00:00 +0000
3455@@ -1,7 +0,0 @@
3456-charmhelpers
3457-============
3458-
3459-.. toctree::
3460- :maxdepth: 4
3461-
3462- charmhelpers
3463
3464=== modified file 'docs/conf.py'
3465--- docs/conf.py 2014-09-23 16:34:54 +0000
3466+++ docs/conf.py 2015-08-13 08:33:21 +0000
3467@@ -19,6 +19,7 @@
3468 # add these directories to sys.path here. If the directory is relative to the
3469 # documentation root, use os.path.abspath to make it absolute, like shown here.
3470 sys.path.insert(0, os.path.abspath('../'))
3471+sys.path.append(os.path.abspath('_extensions/'))
3472
3473 # -- General configuration ------------------------------------------------
3474
3475@@ -30,6 +31,8 @@
3476 # ones.
3477 extensions = [
3478 'sphinx.ext.autodoc',
3479+ 'sphinx.ext.autosummary',
3480+ 'automembersummary',
3481 ]
3482
3483 # Add any paths that contain templates here, relative to this directory.
3484@@ -72,7 +75,7 @@
3485
3486 # List of patterns, relative to source directory, that match files and
3487 # directories to ignore when looking for source files.
3488-exclude_patterns = ['_build']
3489+exclude_patterns = ['_build', '_extensions']
3490
3491 # The reST default role (used for this markup: `text`) to use for all
3492 # documents.
3493
3494=== modified file 'setup.py'
3495--- setup.py 2015-03-04 16:15:18 +0000
3496+++ setup.py 2015-08-13 08:33:21 +0000
3497@@ -14,6 +14,13 @@
3498 'author': "Ubuntu Developers",
3499 'author_email': "ubuntu-devel-discuss@lists.ubuntu.com",
3500 'url': "https://code.launchpad.net/charm-helpers",
3501+ 'install_requires': [
3502+ 'netaddr',
3503+ 'PyYAML',
3504+ 'Tempita',
3505+ 'Jinja2',
3506+ 'six',
3507+ ],
3508 'packages': [
3509 "charmhelpers",
3510 "charmhelpers.cli",
3511@@ -22,13 +29,27 @@
3512 "charmhelpers.fetch",
3513 "charmhelpers.payload",
3514 "charmhelpers.contrib",
3515+ "charmhelpers.contrib.amulet",
3516 "charmhelpers.contrib.ansible",
3517 "charmhelpers.contrib.benchmark",
3518 "charmhelpers.contrib.charmhelpers",
3519 "charmhelpers.contrib.charmsupport",
3520+ "charmhelpers.contrib.database",
3521+ "charmhelpers.contrib.hahelpers",
3522+ "charmhelpers.contrib.network",
3523+ "charmhelpers.contrib.network.ovs",
3524+ "charmhelpers.contrib.openstack",
3525+ "charmhelpers.contrib.openstack.amulet",
3526+ "charmhelpers.contrib.openstack.files",
3527+ "charmhelpers.contrib.openstack.templates",
3528+ "charmhelpers.contrib.peerstorage",
3529+ "charmhelpers.contrib.python",
3530 "charmhelpers.contrib.saltstack",
3531- "charmhelpers.contrib.hahelpers",
3532+ "charmhelpers.contrib.ssl",
3533+ "charmhelpers.contrib.storage",
3534+ "charmhelpers.contrib.storage.linux",
3535 "charmhelpers.contrib.templating",
3536+ "charmhelpers.contrib.unison",
3537 ],
3538 'scripts': [
3539 "bin/chlp",
3540
3541=== modified file 'test_requirements.txt'
3542--- test_requirements.txt 2014-11-25 15:07:02 +0000
3543+++ test_requirements.txt 2015-08-13 08:33:21 +0000
3544@@ -3,10 +3,12 @@
3545 pip
3546 distribute
3547 coverage>=3.6
3548-mock>=1.0.1
3549+mock>=1.0.1,<1.1.0
3550 nose>=1.3.1
3551 flake8
3552 testtools==0.9.14 # Before dependent on modern 'six'
3553+amulet
3554+distro-info
3555 #
3556 # Specify precise versions of runtime dependencies where possible.
3557 netaddr==0.7.10 # trusty. precise is 0.7.5, but not in pypi.
3558
3559=== modified file 'tests/cli/test_cmdline.py'
3560--- tests/cli/test_cmdline.py 2014-11-25 15:04:52 +0000
3561+++ tests/cli/test_cmdline.py 2015-08-13 08:33:21 +0000
3562@@ -5,6 +5,7 @@
3563 from mock import (
3564 patch,
3565 MagicMock,
3566+ ANY,
3567 )
3568 import json
3569 from pprint import pformat
3570@@ -87,15 +88,61 @@
3571 @self.cl.subcommand()
3572 def bar(x, y=None, *vargs):
3573 "A function that does work."
3574- self.bar_called = True
3575- return "qux"
3576-
3577- args = ['foo', 'bar', 'baz']
3578- self.cl.formatter = MagicMock()
3579- with patch("sys.argv", args):
3580- self.cl.run()
3581- self.assertTrue(self.bar_called)
3582- self.assertTrue(self.cl.formatter.format_output.called)
3583+ self.assertEqual(x, 'baz')
3584+ self.assertEqual(y, 'why')
3585+ self.assertEqual(vargs, ('mux', 'zob'))
3586+ self.bar_called = True
3587+ return "qux"
3588+
3589+ args = ['chlp', 'bar', '--y', 'why', 'baz', 'mux', 'zob']
3590+ self.cl.formatter = MagicMock()
3591+ with patch("sys.argv", args):
3592+ with patch("charmhelpers.core.unitdata._KV") as _KV:
3593+ self.cl.run()
3594+ assert _KV.flush.called
3595+ self.assertTrue(self.bar_called)
3596+ self.cl.formatter.format_output.assert_called_once_with('qux', ANY)
3597+
3598+ def test_no_output(self):
3599+ self.bar_called = False
3600+
3601+ @self.cl.subcommand()
3602+ @self.cl.no_output
3603+ def bar(x, y=None, *vargs):
3604+ "A function that does work."
3605+ self.bar_called = True
3606+ return "qux"
3607+
3608+ args = ['foo', 'bar', 'baz']
3609+ self.cl.formatter = MagicMock()
3610+ with patch("sys.argv", args):
3611+ self.cl.run()
3612+ self.assertTrue(self.bar_called)
3613+ self.cl.formatter.format_output.assert_called_once_with('', ANY)
3614+
3615+ def test_test_command(self):
3616+ self.bar_called = False
3617+ self.bar_result = True
3618+
3619+ @self.cl.subcommand()
3620+ @self.cl.test_command
3621+ def bar(x, y=None, *vargs):
3622+ "A function that does work."
3623+ self.bar_called = True
3624+ return self.bar_result
3625+
3626+ args = ['foo', 'bar', 'baz']
3627+ self.cl.formatter = MagicMock()
3628+ with patch("sys.argv", args):
3629+ self.cl.run()
3630+ self.assertTrue(self.bar_called)
3631+ self.assertEqual(self.cl.exit_code, 0)
3632+ self.cl.formatter.format_output.assert_called_once_with('', ANY)
3633+
3634+ self.bar_result = False
3635+ with patch("sys.argv", args):
3636+ self.cl.run()
3637+ self.assertEqual(self.cl.exit_code, 1)
3638
3639
3640 class OutputFormatterTest(TestCase):
3641
3642=== added directory 'tests/contrib/amulet'
3643=== added file 'tests/contrib/amulet/test_utils.py'
3644--- tests/contrib/amulet/test_utils.py 1970-01-01 00:00:00 +0000
3645+++ tests/contrib/amulet/test_utils.py 2015-08-13 08:33:21 +0000
3646@@ -0,0 +1,105 @@
3647+# Copyright 2015 Canonical Ltd.
3648+#
3649+# Authors:
3650+# Adam Collard <adam.collard@canonical.com>
3651+
3652+import unittest
3653+
3654+from charmhelpers.contrib.amulet.utils import AmuletUtils
3655+
3656+
3657+class FakeSentry(object):
3658+
3659+ commands = {}
3660+
3661+ info = {"unit_name": "foo"}
3662+
3663+ def run(self, command):
3664+ return self.commands[command]
3665+
3666+
3667+class ValidateServicesByNameTestCase(unittest.TestCase):
3668+
3669+ def setUp(self):
3670+ self.utils = AmuletUtils()
3671+ self.sentry_unit = FakeSentry()
3672+
3673+ def test_errors_for_unknown_upstart_service(self):
3674+ """
3675+ Returns a message if the Upstart service is unknown.
3676+ """
3677+ self.sentry_unit.commands["lsb_release -cs"] = "trusty", 0
3678+ self.sentry_unit.commands["sudo status foo"] = (
3679+ "status: Unknown job: foo", 1)
3680+
3681+ result = self.utils.validate_services_by_name(
3682+ {self.sentry_unit: ["foo"]})
3683+ self.assertIsNotNone(result)
3684+
3685+ def test_none_for_started_upstart_service(self):
3686+ """
3687+ Returns None if the Upstart service is running.
3688+ """
3689+ self.sentry_unit.commands["lsb_release -cs"] = "trusty", 0
3690+ self.sentry_unit.commands["sudo status foo"] = (
3691+ "foo start/running, process 42", 0)
3692+
3693+ result = self.utils.validate_services_by_name(
3694+ {self.sentry_unit: ["foo"]})
3695+ self.assertIsNone(result)
3696+
3697+ def test_errors_for_stopped_upstart_service(self):
3698+ """
3699+ Returns a message if the Upstart service is stopped.
3700+ """
3701+ self.sentry_unit.commands["lsb_release -cs"] = "trusty", 0
3702+ self.sentry_unit.commands["sudo status foo"] = "foo stop/waiting", 0
3703+
3704+ result = self.utils.validate_services_by_name(
3705+ {self.sentry_unit: ["foo"]})
3706+ self.assertIsNotNone(result)
3707+
3708+ def test_errors_for_unknown_systemd_service(self):
3709+ """
3710+ Returns a message if a systemd service is unknown.
3711+ """
3712+ self.sentry_unit.commands["lsb_release -cs"] = "vivid", 0
3713+ self.sentry_unit.commands["sudo service foo status"] = (u"""\
3714+\u25cf foo.service
3715+ Loaded: not-found (Reason: No such file or directory)
3716+ Active: inactive (dead)
3717+""", 3)
3718+
3719+ result = self.utils.validate_services_by_name({
3720+ self.sentry_unit: ["foo"]})
3721+ self.assertIsNotNone(result)
3722+
3723+ def test_none_for_started_systemd_service(self):
3724+ """
3725+ Returns None if a systemd service is running.
3726+ """
3727+ self.sentry_unit.commands["lsb_release -cs"] = "vivid", 0
3728+ self.sentry_unit.commands["sudo service foo status"] = (u"""\
3729+\u25cf foo.service - Foo
3730+ Loaded: loaded (/lib/systemd/system/foo.service; enabled)
3731+ Active: active (exited) since Thu 1970-01-01 00:00:00 UTC; 42h 42min ago
3732+ Main PID: 3 (code=exited, status=0/SUCCESS)
3733+ CGroup: /system.slice/foo.service
3734+""", 0)
3735+ result = self.utils.validate_services_by_name(
3736+ {self.sentry_unit: ["foo"]})
3737+ self.assertIsNone(result)
3738+
3739+ def test_errors_for_stopped_systemd_service(self):
3740+ """
3741+ Returns a message if a systemd service is stopped.
3742+ """
3743+ self.sentry_unit.commands["lsb_release -cs"] = "vivid", 0
3744+ self.sentry_unit.commands["sudo service foo status"] = (u"""\
3745+\u25cf foo.service - Foo
3746+ Loaded: loaded (/lib/systemd/system/foo.service; disabled)
3747+ Active: inactive (dead)
3748+""", 3)
3749+ result = self.utils.validate_services_by_name(
3750+ {self.sentry_unit: ["foo"]})
3751+ self.assertIsNotNone(result)
3752
3753=== modified file 'tests/contrib/benchmark/test_benchmark.py'
3754--- tests/contrib/benchmark/test_benchmark.py 2015-04-24 16:07:29 +0000
3755+++ tests/contrib/benchmark/test_benchmark.py 2015-08-13 08:33:21 +0000
3756@@ -1,3 +1,8 @@
3757+from functools import partial
3758+from os.path import join
3759+from tempfile import mkdtemp
3760+from shutil import rmtree
3761+
3762 import mock
3763 from testtools import TestCase
3764 # import unittest
3765@@ -33,7 +38,8 @@
3766 self.fake_relation = FakeRelation(FAKE_RELATION)
3767 # self.hook_name.return_value = 'benchmark-relation-changed'
3768
3769- self.relation_get.side_effect = self.fake_relation.get
3770+ self.relation_get.side_effect = partial(
3771+ self.fake_relation.get, rid="benchmark:0", unit="benchmark/0")
3772 self.relation_ids.side_effect = self.fake_relation.relation_ids
3773
3774 def _patch(self, method):
3775@@ -87,34 +93,32 @@
3776 check_call.assert_any_call(['action-set', 'baz.foo=1'])
3777 check_call.assert_any_call(['action-set', 'baz.bar=2'])
3778
3779- @mock.patch('charmhelpers.contrib.benchmark.relation_get')
3780- @mock.patch('charmhelpers.contrib.benchmark.relation_set')
3781 @mock.patch('charmhelpers.contrib.benchmark.relation_ids')
3782 @mock.patch('charmhelpers.contrib.benchmark.in_relation_hook')
3783- def test_benchmark_init(self, in_relation_hook, relation_ids, relation_set, relation_get):
3784+ def test_benchmark_init(self, in_relation_hook, relation_ids):
3785
3786 in_relation_hook.return_value = True
3787 relation_ids.return_value = ['benchmark:0']
3788 actions = ['asdf', 'foobar']
3789
3790- with patch_open() as (_open, _file):
3791+ tempdir = mkdtemp(prefix=self.__class__.__name__)
3792+ self.addCleanup(rmtree, tempdir)
3793+ conf_path = join(tempdir, "benchmark.conf")
3794+ with mock.patch.object(Benchmark, "BENCHMARK_CONF", conf_path):
3795 b = Benchmark(actions)
3796
3797 self.assertIsInstance(b, Benchmark)
3798
3799- self.assertTrue(relation_get.called)
3800- self.assertTrue(relation_set.called)
3801+ self.assertTrue(self.relation_get.called)
3802+ self.assertTrue(self.relation_set.called)
3803
3804 relation_ids.assert_called_once_with('benchmark')
3805
3806- for key in b.required_keys:
3807- relation_get.assert_any_call(key)
3808-
3809- relation_set.assert_called_once_with(
3810+ self.relation_set.assert_called_once_with(
3811 relation_id='benchmark:0',
3812 relation_settings={'benchmarks': ",".join(actions)}
3813 )
3814
3815- _open.assert_called_with('/etc/benchmark.conf', 'w')
3816+ conf_contents = open(conf_path).readlines()
3817 for key, val in iter(FAKE_RELATION['benchmark:0']['benchmark/0'].items()):
3818- _file.write.assert_any_called("%s=%s\n" % (key, val))
3819+ self.assertIn("%s=%s\n" % (key, val), conf_contents)
3820
3821=== modified file 'tests/contrib/hahelpers/test_apache_utils.py'
3822--- tests/contrib/hahelpers/test_apache_utils.py 2014-09-24 09:42:52 +0000
3823+++ tests/contrib/hahelpers/test_apache_utils.py 2015-08-13 08:33:21 +0000
3824@@ -115,4 +115,4 @@
3825 apache_utils.install_ca_cert(cert)
3826 _open.assert_called_with('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', 'w')
3827 _file.write.assert_called_with(cert)
3828- self.subprocess.assertCalledWith(['update-ca-certificates', '--fresh'])
3829+ self.subprocess.check_call.assert_called_with(['update-ca-certificates', '--fresh'])
3830
3831=== modified file 'tests/contrib/network/test_ufw.py'
3832--- tests/contrib/network/test_ufw.py 2015-02-12 20:08:28 +0000
3833+++ tests/contrib/network/test_ufw.py 2015-08-13 08:33:21 +0000
3834@@ -31,6 +31,12 @@
3835 xt_LOG 17702 0
3836 xt_limit 12711 0
3837 """
3838+DEFAULT_POLICY_OUTPUT = """Default incoming policy changed to 'deny'
3839+(be sure to update your rules accordingly)
3840+"""
3841+DEFAULT_POLICY_OUTPUT_OUTGOING = """Default outgoing policy changed to 'allow'
3842+(be sure to update your rules accordingly)
3843+"""
3844
3845
3846 class TestUFW(unittest.TestCase):
3847@@ -194,6 +200,24 @@
3848 @mock.patch('charmhelpers.contrib.network.ufw.is_enabled')
3849 @mock.patch('charmhelpers.core.hookenv.log')
3850 @mock.patch('subprocess.Popen')
3851+ def test_modify_access_with_index(self, popen, log, is_enabled):
3852+ is_enabled.return_value = True
3853+ p = mock.Mock()
3854+ p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'),
3855+ 'returncode': 0})
3856+ popen.return_value = p
3857+
3858+ ufw.modify_access('127.0.0.1', dst='127.0.0.1', port='80', index=1)
3859+ popen.assert_any_call(['ufw', 'insert', '1', 'allow', 'from',
3860+ '127.0.0.1', 'to', '127.0.0.1', 'port', '80'],
3861+ stdout=subprocess.PIPE)
3862+ log.assert_any_call(('ufw allow: ufw insert 1 allow from 127.0.0.1 '
3863+ 'to 127.0.0.1 port 80'), level='DEBUG')
3864+ log.assert_any_call('stdout', level='INFO')
3865+
3866+ @mock.patch('charmhelpers.contrib.network.ufw.is_enabled')
3867+ @mock.patch('charmhelpers.core.hookenv.log')
3868+ @mock.patch('subprocess.Popen')
3869 def test_grant_access(self, popen, log, is_enabled):
3870 is_enabled.return_value = True
3871 p = mock.Mock()
3872@@ -212,6 +236,24 @@
3873 @mock.patch('charmhelpers.contrib.network.ufw.is_enabled')
3874 @mock.patch('charmhelpers.core.hookenv.log')
3875 @mock.patch('subprocess.Popen')
3876+ def test_grant_access_with_index(self, popen, log, is_enabled):
3877+ is_enabled.return_value = True
3878+ p = mock.Mock()
3879+ p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'),
3880+ 'returncode': 0})
3881+ popen.return_value = p
3882+
3883+ ufw.grant_access('127.0.0.1', dst='127.0.0.1', port='80', index=1)
3884+ popen.assert_any_call(['ufw', 'insert', '1', 'allow', 'from',
3885+ '127.0.0.1', 'to', '127.0.0.1', 'port', '80'],
3886+ stdout=subprocess.PIPE)
3887+ log.assert_any_call(('ufw allow: ufw insert 1 allow from 127.0.0.1 '
3888+ 'to 127.0.0.1 port 80'), level='DEBUG')
3889+ log.assert_any_call('stdout', level='INFO')
3890+
3891+ @mock.patch('charmhelpers.contrib.network.ufw.is_enabled')
3892+ @mock.patch('charmhelpers.core.hookenv.log')
3893+ @mock.patch('subprocess.Popen')
3894 def test_revoke_access(self, popen, log, is_enabled):
3895 is_enabled.return_value = True
3896 p = mock.Mock()
3897@@ -366,3 +408,33 @@
3898 is_enabled.return_value = False
3899 isdir.return_value = True
3900 ufw.enable()
3901+
3902+ @mock.patch('charmhelpers.core.hookenv.log')
3903+ @mock.patch('subprocess.check_output')
3904+ def test_change_default_policy(self, check_output, log):
3905+ check_output.return_value = DEFAULT_POLICY_OUTPUT
3906+ self.assertTrue(ufw.default_policy())
3907+ check_output.asser_any_call(['ufw', 'default', 'deny', 'incoming'])
3908+
3909+ @mock.patch('charmhelpers.core.hookenv.log')
3910+ @mock.patch('subprocess.check_output')
3911+ def test_change_default_policy_allow_outgoing(self, check_output, log):
3912+ check_output.return_value = DEFAULT_POLICY_OUTPUT_OUTGOING
3913+ self.assertTrue(ufw.default_policy('allow', 'outgoing'))
3914+ check_output.asser_any_call(['ufw', 'default', 'allow', 'outgoing'])
3915+
3916+ @mock.patch('charmhelpers.core.hookenv.log')
3917+ @mock.patch('subprocess.check_output')
3918+ def test_change_default_policy_unexpected_output(self, check_output, log):
3919+ check_output.return_value = "asdf"
3920+ self.assertFalse(ufw.default_policy())
3921+
3922+ @mock.patch('charmhelpers.core.hookenv.log')
3923+ @mock.patch('subprocess.check_output')
3924+ def test_change_default_policy_wrong_policy(self, check_output, log):
3925+ self.assertRaises(ufw.UFWError, ufw.default_policy, 'asdf')
3926+
3927+ @mock.patch('charmhelpers.core.hookenv.log')
3928+ @mock.patch('subprocess.check_output')
3929+ def test_change_default_policy_wrong_direction(self, check_output, log):
3930+ self.assertRaises(ufw.UFWError, ufw.default_policy, 'allow', 'asdf')
3931
3932=== modified file 'tests/contrib/openstack/test_openstack_utils.py'
3933--- tests/contrib/openstack/test_openstack_utils.py 2015-05-11 18:53:44 +0000
3934+++ tests/contrib/openstack/test_openstack_utils.py 2015-08-13 08:33:21 +0000
3935@@ -26,6 +26,16 @@
3936 }
3937
3938 FAKE_REPO = {
3939+ 'neutron-common': {
3940+ 'pkg_vers': '2:7.0.0-0ubuntu1',
3941+ 'os_release': 'liberty',
3942+ 'os_version': '2015.2'
3943+ },
3944+ 'nova-common': {
3945+ 'pkg_vers': '2:12.0.0~b1-0ubuntu1',
3946+ 'os_release': 'liberty',
3947+ 'os_version': '2015.2'
3948+ },
3949 'nova-common': {
3950 'pkg_vers': '2012.2.3-0ubuntu2.1',
3951 'os_release': 'folsom',
3952@@ -489,8 +499,8 @@
3953 expected_f = '/var/lib/juju/units/testing-foo-0/charm/scripts/scriptrc'
3954 _open.assert_called_with(expected_f, 'wb')
3955 _mkdir.assert_called_with(os.path.dirname(expected_f))
3956- for line in scriptrc:
3957- _file.__enter__().write.assert_has_calls(call(line))
3958+ _file.__enter__().write.assert_has_calls(
3959+ list(call(line) for line in scriptrc), any_order=True)
3960
3961 @patch.object(openstack, 'lsb_release')
3962 @patch.object(openstack, 'get_os_version_package')
3963@@ -642,11 +652,13 @@
3964 error_out.assert_called_with(
3965 'openstack-origin-git key \'%s\' is missing' % key)
3966
3967+ @patch('os.path.join')
3968 @patch.object(openstack, 'error_out')
3969 @patch.object(openstack, '_git_clone_and_install_single')
3970+ @patch.object(openstack, 'pip_install')
3971 @patch.object(openstack, 'pip_create_virtualenv')
3972- def test_git_clone_and_install_errors(self, pip_venv, git_install_single,
3973- error_out):
3974+ def test_git_clone_and_install_errors(self, pip_venv, pip_install,
3975+ git_install_single, error_out, join):
3976 git_missing_repos = """
3977 repostories:
3978 - {name: requirements,
3979@@ -704,19 +716,26 @@
3980 openstack.git_clone_and_install(git_wrong_order_2, 'keystone', depth=1)
3981 error_out.assert_called_with('requirements git repo must be specified first')
3982
3983+ @patch('os.path.join')
3984 @patch.object(openstack, 'charm_dir')
3985 @patch.object(openstack, 'error_out')
3986 @patch.object(openstack, '_git_clone_and_install_single')
3987+ @patch.object(openstack, 'pip_install')
3988 @patch.object(openstack, 'pip_create_virtualenv')
3989- def test_git_clone_and_install_success(self, pip_venv, _git_install_single,
3990- error_out, charm_dir):
3991+ def test_git_clone_and_install_success(self, pip_venv, pip_install,
3992+ _git_install_single, error_out,
3993+ charm_dir, join):
3994 proj = 'keystone'
3995 charm_dir.return_value = '/var/lib/juju/units/testing-foo-0/charm'
3996 # the following sets the global requirements_dir
3997 _git_install_single.return_value = '/mnt/openstack-git/requirements'
3998+ join.return_value = '/mnt/openstack-git/venv'
3999
4000 openstack.git_clone_and_install(openstack_origin_git, proj, depth=1)
4001 self.assertTrue(pip_venv.called)
4002+ pip_install.assert_called_with('setuptools', upgrade=True,
4003+ proxy=None,
4004+ venv='/mnt/openstack-git/venv')
4005 self.assertTrue(_git_install_single.call_count == 2)
4006 expected = [
4007 call('git://git.openstack.org/openstack/requirements',
4008@@ -775,6 +794,7 @@
4009 parent_dir = '/mnt/openstack-git/'
4010 http_proxy = 'http://squid-proxy-url'
4011 dest_dir = '/mnt/openstack-git'
4012+ venv_dir = '/mnt/openstack-git'
4013 reqs_dir = '/mnt/openstack-git/requirements-dir'
4014 join.return_value = dest_dir
4015 openstack.requirements_dir = reqs_dir
4016@@ -786,23 +806,27 @@
4017 mkdir.assert_called_with(parent_dir)
4018 install_remote.assert_called_with(repo, dest=parent_dir, depth=1,
4019 branch=branch)
4020- _git_update_reqs.assert_called_with(dest_dir, reqs_dir)
4021+ _git_update_reqs.assert_called_with(venv_dir, dest_dir, reqs_dir)
4022 pip_install.assert_called_with(dest_dir, venv='/mnt/openstack-git',
4023 proxy='http://squid-proxy-url')
4024
4025+ @patch('os.path.join')
4026 @patch('os.getcwd')
4027 @patch('os.chdir')
4028 @patch('subprocess.check_call')
4029- def test_git_update_requirements(self, check_call, chdir, getcwd):
4030+ def test_git_update_requirements(self, check_call, chdir, getcwd, join):
4031 pkg_dir = '/mnt/openstack-git/repo-dir'
4032 reqs_dir = '/mnt/openstack-git/reqs-dir'
4033 orig_dir = '/var/lib/juju/units/testing-foo-0/charm'
4034+ venv_dir = '/mnt/openstack-git/venv'
4035 getcwd.return_value = orig_dir
4036+ join.return_value = '/mnt/openstack-git/venv/python'
4037
4038- openstack._git_update_requirements(pkg_dir, reqs_dir)
4039+ openstack._git_update_requirements(venv_dir, pkg_dir, reqs_dir)
4040 expected = [call(reqs_dir), call(orig_dir)]
4041 self.assertEquals(expected, chdir.call_args_list)
4042- check_call.assert_called_with(['python', 'update.py', pkg_dir])
4043+ check_call.assert_called_with(['/mnt/openstack-git/venv/python',
4044+ 'update.py', pkg_dir])
4045
4046 @patch('os.path.join')
4047 @patch('subprocess.check_call')
4048
4049=== modified file 'tests/contrib/openstack/test_os_contexts.py'
4050--- tests/contrib/openstack/test_os_contexts.py 2015-04-29 12:52:18 +0000
4051+++ tests/contrib/openstack/test_os_contexts.py 2015-08-13 08:33:21 +0000
4052@@ -73,7 +73,11 @@
4053 return None
4054
4055 def relation_ids(self, relation):
4056- return self.relation_data.keys()
4057+ rids = []
4058+ for rid in self.relation_data.keys():
4059+ if relation + ':' in rid:
4060+ rids.append(rid)
4061+ return rids
4062
4063 def relation_units(self, relation_id):
4064 if relation_id not in self.relation_data:
4065@@ -325,6 +329,25 @@
4066 - [glance-key2, value2]
4067 """
4068
4069+NOVA_SUB_CONFIG1 = """
4070+nova:
4071+ /etc/nova/nova.conf:
4072+ sections:
4073+ DEFAULT:
4074+ - [nova-key1, value1]
4075+ - [nova-key2, value2]
4076+"""
4077+
4078+
4079+NOVA_SUB_CONFIG2 = """
4080+nova-compute:
4081+ /etc/nova/nova.conf:
4082+ sections:
4083+ DEFAULT:
4084+ - [nova-key3, value3]
4085+ - [nova-key4, value4]
4086+"""
4087+
4088 CINDER_SUB_CONFIG1 = """
4089 cinder:
4090 /etc/cinder/cinder.conf:
4091@@ -376,6 +399,21 @@
4092 },
4093 }
4094
4095+SUB_CONFIG_RELATION2 = {
4096+ 'nova-ceilometer:6': {
4097+ 'ceilometer-agent/0': {
4098+ 'private-address': 'nova_node1',
4099+ 'subordinate_configuration': json.dumps(yaml.load(NOVA_SUB_CONFIG1)),
4100+ },
4101+ },
4102+ 'neutron-plugin:3': {
4103+ 'neutron-ovs-plugin/0': {
4104+ 'private-address': 'nova_node1',
4105+ 'subordinate_configuration': json.dumps(yaml.load(NOVA_SUB_CONFIG2)),
4106+ },
4107+ }
4108+}
4109+
4110 NONET_CONFIG = {
4111 'vip': 'cinderhost1vip',
4112 'os-internal-network': None,
4113@@ -2053,6 +2091,27 @@
4114 # subordinate supplies bad input
4115 self.assertEquals(foo_sub_ctxt(), {'sections': {}})
4116
4117+ def test_os_subordinate_config_context_multiple(self):
4118+ relation = FakeRelation(relation_data=SUB_CONFIG_RELATION2)
4119+ self.relation_get.side_effect = relation.get
4120+ self.relation_ids.side_effect = relation.relation_ids
4121+ self.related_units.side_effect = relation.relation_units
4122+ nova_sub_ctxt = context.SubordinateConfigContext(
4123+ service=['nova', 'nova-compute'],
4124+ config_file='/etc/nova/nova.conf',
4125+ interface=['nova-ceilometer', 'neutron-plugin'],
4126+ )
4127+ self.assertEquals(
4128+ nova_sub_ctxt(),
4129+ {'sections': {
4130+ 'DEFAULT': [
4131+ ['nova-key1', 'value1'],
4132+ ['nova-key2', 'value2'],
4133+ ['nova-key3', 'value3'],
4134+ ['nova-key4', 'value4']]
4135+ }}
4136+ )
4137+
4138 def test_syslog_context(self):
4139 self.config.side_effect = fake_config({'use-syslog': 'foo'})
4140 syslog = context.SyslogContext()
4141
4142=== modified file 'tests/contrib/peerstorage/test_peerstorage.py'
4143--- tests/contrib/peerstorage/test_peerstorage.py 2015-06-03 14:46:50 +0000
4144+++ tests/contrib/peerstorage/test_peerstorage.py 2015-08-13 08:33:21 +0000
4145@@ -202,7 +202,7 @@
4146 l_settings = {'s3': 3}
4147 r_settings = {'s1': 1, 's2': 2}
4148
4149- def mock_relation_get(attribute=None, unit=None):
4150+ def mock_relation_get(attribute=None, unit=None, rid=None):
4151 if attribute:
4152 if attribute in r_settings:
4153 return r_settings.get(attribute)
4154@@ -237,11 +237,11 @@
4155 self.assertEqual(_dicta, _dictb)
4156
4157 migration_key = '__leader_get_migrated_settings__'
4158- self.relation_get.side_effect = mock_relation_get
4159+ self._relation_get.side_effect = mock_relation_get
4160 self._leader_get.side_effect = mock_leader_get
4161 self.leader_set.side_effect = mock_leader_set
4162
4163- self.assertEqual({'s1': 1, 's2': 2}, peerstorage.relation_get())
4164+ self.assertEqual({'s1': 1, 's2': 2}, peerstorage._relation_get())
4165 self.assertEqual({'s3': 3}, peerstorage._leader_get())
4166 self.assertEqual({'s1': 1, 's2': 2, 's3': 3}, peerstorage.leader_get())
4167 check_leader_db({'s1': 1, 's2': 2, 's3': 3,
4168@@ -274,7 +274,7 @@
4169
4170 peerstorage.leader_set.reset_mock()
4171 self.assertEqual({'s1': 1, 's2': 2, 's3': 2, 's4': 3},
4172- peerstorage.relation_get())
4173+ peerstorage._relation_get())
4174 check_leader_db({'s1': 1, 's3': 3, 's4': 4,
4175 migration_key: '["s1", "s4"]'},
4176 peerstorage._leader_get())
4177@@ -290,7 +290,7 @@
4178 l_settings = {'s3': 3}
4179 r_settings = {'s1': 1, 's2': 2}
4180
4181- def mock_relation_get(attribute=None, unit=None):
4182+ def mock_relation_get(attribute=None, unit=None, rid=None):
4183 if attribute:
4184 if attribute in r_settings:
4185 return r_settings.get(attribute)
4186@@ -314,10 +314,10 @@
4187
4188 l_settings.update(kwargs)
4189
4190- self.relation_get.side_effect = mock_relation_get
4191+ self._relation_get.side_effect = mock_relation_get
4192 self._leader_get.side_effect = mock_leader_get
4193 self.leader_set.side_effect = mock_leader_set
4194- self.assertEqual({'s1': 1, 's2': 2}, peerstorage.relation_get())
4195+ self.assertEqual({'s1': 1, 's2': 2}, peerstorage._relation_get())
4196 self.assertEqual({'s3': 3}, peerstorage._leader_get())
4197 self.assertEqual({'s3': 3}, peerstorage.leader_get())
4198 self.assertEqual({'s3': 3}, l_settings)
4199
4200=== modified file 'tests/contrib/python/test_debug.py' (properties changed: -x to +x)
4201--- tests/contrib/python/test_debug.py 2015-02-11 21:41:57 +0000
4202+++ tests/contrib/python/test_debug.py 2015-08-13 08:33:21 +0000
4203@@ -51,4 +51,4 @@
4204 """
4205 self.set_trace()
4206 self.Rpdb.set_trace.side_effect = Exception()
4207- self._error.assert_called_once()
4208+ self.assertTrue(self._error.called)
4209
4210=== modified file 'tests/contrib/storage/test_linux_ceph.py'
4211--- tests/contrib/storage/test_linux_ceph.py 2015-01-13 11:17:57 +0000
4212+++ tests/contrib/storage/test_linux_ceph.py 2015-08-13 08:33:21 +0000
4213@@ -62,7 +62,7 @@
4214 '''It creates a new ceph keyring'''
4215 _exists.return_value = True
4216 ceph_utils.create_keyring('cinder', 'cephkey')
4217- self.log.assert_called()
4218+ self.assertTrue(self.log.called)
4219 self.check_call.assert_not_called()
4220
4221 @patch('os.remove')
4222@@ -72,7 +72,7 @@
4223 _exists.return_value = True
4224 ceph_utils.delete_keyring('cinder')
4225 _remove.assert_called_with('/etc/ceph/ceph.client.cinder.keyring')
4226- self.log.assert_called()
4227+ self.assertTrue(self.log.called)
4228
4229 @patch('os.remove')
4230 @patch('os.path.exists')
4231@@ -80,7 +80,7 @@
4232 '''It creates a new ceph keyring.'''
4233 _exists.return_value = False
4234 ceph_utils.delete_keyring('cinder')
4235- self.log.assert_called()
4236+ self.assertTrue(self.log.called)
4237 _remove.assert_not_called()
4238
4239 @patch('os.path.exists')
4240@@ -90,14 +90,14 @@
4241 with patch_open() as (_open, _file):
4242 ceph_utils.create_key_file('cinder', 'cephkey')
4243 _file.write.assert_called_with('cephkey')
4244- self.log.assert_called()
4245+ self.assertTrue(self.log.called)
4246
4247 @patch('os.path.exists')
4248 def test_create_key_file_already_exists(self, _exists):
4249 '''It creates a new ceph keyring'''
4250 _exists.return_value = True
4251 ceph_utils.create_key_file('cinder', 'cephkey')
4252- self.log.assert_called()
4253+ self.assertTrue(self.log.called)
4254
4255 @patch('os.mkdir')
4256 @patch.object(ceph_utils, 'apt_install')
4257@@ -171,7 +171,7 @@
4258 self._patch('pool_exists')
4259 self.pool_exists.return_value = True
4260 ceph_utils.create_pool(service='cinder', name='foo')
4261- self.log.assert_called()
4262+ self.assertTrue(self.log.called)
4263 self.check_call.assert_not_called()
4264
4265 def test_keyring_path(self):
4266@@ -202,14 +202,14 @@
4267 def test_rbd_exists(self):
4268 self.check_output.return_value = LS_RBDS
4269 self.assertTrue(ceph_utils.rbd_exists('service', 'pool', 'rbd1'))
4270- self.check_output.assert_call_with(
4271+ self.check_output.assert_called_with(
4272 ['rbd', 'list', '--id', 'service', '--pool', 'pool']
4273 )
4274
4275 def test_rbd_does_not_exist(self):
4276 self.check_output.return_value = LS_RBDS
4277 self.assertFalse(ceph_utils.rbd_exists('service', 'pool', 'rbd4'))
4278- self.check_output.assert_call_with(
4279+ self.check_output.assert_called_with(
4280 ['rbd', 'list', '--id', 'service', '--pool', 'pool']
4281 )
4282
4283@@ -304,7 +304,7 @@
4284 _file.read.return_value = 'anothermod\n'
4285 ceph_utils.modprobe('mymod')
4286 _open.assert_called_with('/etc/modules', 'r+')
4287- _file.read.assert_called()
4288+ _file.read.assert_called_with()
4289 _file.write.assert_called_with('mymod')
4290 self.check_call.assert_called_with(['modprobe', 'mymod'])
4291
4292@@ -318,14 +318,14 @@
4293 def test_make_filesystem(self, _exists):
4294 _exists.return_value = True
4295 ceph_utils.make_filesystem('/dev/sdd')
4296- self.log.assert_called()
4297+ self.assertTrue(self.log.called)
4298 self.check_call.assert_called_with(['mkfs', '-t', 'ext4', '/dev/sdd'])
4299
4300 @patch('os.path.exists')
4301 def test_make_filesystem_xfs(self, _exists):
4302 _exists.return_value = True
4303 ceph_utils.make_filesystem('/dev/sdd', 'xfs')
4304- self.log.assert_called()
4305+ self.assertTrue(self.log.called)
4306 self.check_call.assert_called_with(['mkfs', '-t', 'xfs', '/dev/sdd'])
4307
4308 @patch('os.chown')
4309
4310=== modified file 'tests/contrib/storage/test_linux_storage_utils.py'
4311--- tests/contrib/storage/test_linux_storage_utils.py 2014-11-25 13:38:01 +0000
4312+++ tests/contrib/storage/test_linux_storage_utils.py 2015-08-13 08:33:21 +0000
4313@@ -16,8 +16,9 @@
4314 '''It calls sgdisk correctly to zap disk'''
4315 check_output.return_value = b'200\n'
4316 storage_utils.zap_disk('/dev/foo')
4317- call.assert_any_call(['sgdisk', '--zap-all', '--mbrtogpt',
4318- '--clear', '/dev/foo'])
4319+ call.assert_any_call(['sgdisk', '--zap-all', '--', '/dev/foo'])
4320+ call.assert_any_call(['sgdisk', '--clear', '--mbrtogpt',
4321+ '--', '/dev/foo'])
4322 check_output.assert_any_call(['blockdev', '--getsz', '/dev/foo'])
4323 check_call.assert_any_call(['dd', 'if=/dev/zero', 'of=/dev/foo',
4324 'bs=1M', 'count=1'])
4325@@ -88,6 +89,14 @@
4326 self.assertFalse(result)
4327
4328 @patch(STORAGE_LINUX_UTILS + '.check_output')
4329+ def test_is_device_mounted_full_disks(self, check_output):
4330+ '''It detects mounted full disks as mounted.'''
4331+ check_output.return_value = (
4332+ b"/dev/sda on / type ext4 (rw,errors=remount-ro)\n")
4333+ result = storage_utils.is_device_mounted('/dev/sda')
4334+ self.assertTrue(result)
4335+
4336+ @patch(STORAGE_LINUX_UTILS + '.check_output')
4337 def test_is_device_mounted_cciss(self, check_output):
4338 '''It detects mounted cciss partitions as mounted.'''
4339 check_output.return_value = (
4340
4341=== modified file 'tests/contrib/unison/test_unison.py'
4342--- tests/contrib/unison/test_unison.py 2015-04-03 15:23:46 +0000
4343+++ tests/contrib/unison/test_unison.py 2015-08-13 08:33:21 +0000
4344@@ -74,7 +74,7 @@
4345 self.assertIn(call(_call), self.check_call.call_args_list)
4346
4347 @patch('os.path.isfile')
4348- def test_create_private_key(self, isfile):
4349+ def test_create_private_key_rsa(self, isfile):
4350 create_cmd = [
4351 'ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
4352 '-f', '/home/foo/.ssh/id_rsa']
4353@@ -100,6 +100,36 @@
4354 _ensure_perms()
4355
4356 @patch('os.path.isfile')
4357+ def test_create_private_key_ecdsa(self, isfile):
4358+ create_cmd = [
4359+ 'ssh-keygen', '-q', '-N', '', '-t', 'ecdsa', '-b', '521',
4360+ '-f', '/home/foo/.ssh/id_ecdsa']
4361+
4362+ def _ensure_perms():
4363+ cmds = [
4364+ ['chown', 'foo', '/home/foo/.ssh/id_ecdsa'],
4365+ ['chmod', '0600', '/home/foo/.ssh/id_ecdsa'],
4366+ ]
4367+ self._ensure_calls_in(cmds)
4368+
4369+ isfile.return_value = False
4370+ unison.create_private_key(
4371+ user='foo',
4372+ priv_key_path='/home/foo/.ssh/id_ecdsa',
4373+ key_type='ecdsa')
4374+ self.assertIn(call(create_cmd), self.check_call.call_args_list)
4375+ _ensure_perms()
4376+ self.check_call.call_args_list = []
4377+
4378+ isfile.return_value = True
4379+ unison.create_private_key(
4380+ user='foo',
4381+ priv_key_path='/home/foo/.ssh/id_ecdsa',
4382+ key_type='ecdsa')
4383+ self.assertNotIn(call(create_cmd), self.check_call.call_args_list)
4384+ _ensure_perms()
4385+
4386+ @patch('os.path.isfile')
4387 def test_create_public_key(self, isfile):
4388 create_cmd = ['ssh-keygen', '-y', '-f', '/home/foo/.ssh/id_rsa']
4389 isfile.return_value = True
4390@@ -273,6 +303,33 @@
4391 write_hosts.assert_called_with('foo', ['host1', 'host2'])
4392 self.relation_set.assert_called_with(ssh_authorized_hosts='host1:host2')
4393
4394+ @patch.object(unison, 'write_known_hosts')
4395+ @patch.object(unison, 'write_authorized_keys')
4396+ @patch.object(unison, 'get_keypair')
4397+ @patch.object(unison, 'ensure_user')
4398+ def test_ssh_auth_peer_departed(self, ensure_user, get_keypair,
4399+ write_keys, write_hosts):
4400+ get_keypair.return_value = ('privkey', 'pubkey')
4401+
4402+ self.hook_name.return_value = 'cluster-relation-departed'
4403+
4404+ self.relation_get.side_effect = [
4405+ 'key1',
4406+ 'host1',
4407+ 'key2',
4408+ 'host2',
4409+ '', ''
4410+ ]
4411+ unison.ssh_authorized_peers(peer_interface='cluster',
4412+ user='foo', group='foo',
4413+ ensure_local_user=True)
4414+
4415+ ensure_user.assert_called_with('foo', 'foo')
4416+ get_keypair.assert_called_with('foo')
4417+ write_keys.assert_called_with('foo', ['key1', 'key2'])
4418+ write_hosts.assert_called_with('foo', ['host1', 'host2'])
4419+ self.relation_set.assert_called_with(ssh_authorized_hosts='host1:host2')
4420+
4421 def test_collect_authed_hosts(self):
4422 # only one of the hosts in fake environment has auth'd
4423 # the local peer
4424
4425=== added directory 'tests/coordinator'
4426=== added file 'tests/coordinator/__init__.py'
4427=== added file 'tests/coordinator/test_coordinator.py'
4428--- tests/coordinator/test_coordinator.py 1970-01-01 00:00:00 +0000
4429+++ tests/coordinator/test_coordinator.py 2015-08-13 08:33:21 +0000
4430@@ -0,0 +1,535 @@
4431+# Copyright 2014-2015 Canonical Limited.
4432+#
4433+# This file is part of charm-helpers.
4434+#
4435+# charm-helpers is free software: you can redistribute it and/or modify
4436+# it under the terms of the GNU Lesser General Public License version 3 as
4437+# published by the Free Software Foundation.
4438+#
4439+# charm-helpers is distributed in the hope that it will be useful,
4440+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4441+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4442+# GNU Lesser General Public License for more details.
4443+#
4444+# You should have received a copy of the GNU Lesser General Public License
4445+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4446+from datetime import datetime, timedelta
4447+import json
4448+import tempfile
4449+import unittest
4450+from mock import call, MagicMock, patch, sentinel
4451+
4452+from charmhelpers import coordinator
4453+from charmhelpers.core import hookenv
4454+
4455+
4456+class TestCoordinator(unittest.TestCase):
4457+
4458+ def setUp(self):
4459+ del hookenv._atstart[:]
4460+ del hookenv._atexit[:]
4461+ hookenv.cache.clear()
4462+ coordinator.Singleton._instances.clear()
4463+
4464+ def install(patch):
4465+ patch.start()
4466+ self.addCleanup(patch.stop)
4467+
4468+ install(patch.object(hookenv, 'local_unit', return_value='foo/1'))
4469+ install(patch.object(hookenv, 'is_leader', return_value=False))
4470+ install(patch.object(hookenv, 'metadata',
4471+ return_value={'peers': {'cluster': None}}))
4472+ install(patch.object(hookenv, 'log'))
4473+
4474+ # Ensure _timestamp always increases.
4475+ install(patch.object(coordinator, '_utcnow',
4476+ side_effect=self._utcnow))
4477+
4478+ _last_utcnow = datetime(2015, 1, 1, 00, 00)
4479+
4480+ def _utcnow(self, ts=coordinator._timestamp):
4481+ self._last_utcnow += timedelta(minutes=1)
4482+ return self._last_utcnow
4483+
4484+ def test_is_singleton(self):
4485+ # BaseCoordinator and subclasses are singletons. Placing this
4486+ # burden on charm authors is impractical, particularly if
4487+ # libraries start wanting to use coordinator instances.
4488+ # With singletons, we don't need to worry about sharing state
4489+ # between instances or have them stomping on each other when they
4490+ # need to serialize their state.
4491+ self.assertTrue(coordinator.BaseCoordinator()
4492+ is coordinator.BaseCoordinator())
4493+ self.assertTrue(coordinator.Serial() is coordinator.Serial())
4494+ self.assertFalse(coordinator.BaseCoordinator() is coordinator.Serial())
4495+
4496+ @patch.object(hookenv, 'atstart')
4497+ def test_implicit_initialize_and_handle(self, atstart):
4498+ # When you construct a BaseCoordinator(), its initialize() and
4499+ # handle() method are invoked automatically every hook. This
4500+ # is done using hookenv.atstart
4501+ c = coordinator.BaseCoordinator()
4502+ atstart.assert_has_calls([call(c.initialize), call(c.handle)])
4503+
4504+ @patch.object(hookenv, 'has_juju_version', return_value=False)
4505+ def test_initialize_enforces_juju_version(self, has_juju_version):
4506+ c = coordinator.BaseCoordinator()
4507+ with self.assertRaises(AssertionError):
4508+ c.initialize()
4509+ has_juju_version.assert_called_once_with('1.23')
4510+
4511+ @patch.object(hookenv, 'atexit')
4512+ @patch.object(hookenv, 'has_juju_version', return_value=True)
4513+ @patch.object(hookenv, 'relation_ids')
4514+ def test_initialize(self, relation_ids, ver, atexit):
4515+ # First initialization are done before there is a peer relation.
4516+ relation_ids.return_value = []
4517+ c = coordinator.BaseCoordinator()
4518+
4519+ with patch.object(c, '_load_state') as _load_state, \
4520+ patch.object(c, '_emit_state') as _emit_state: # IGNORE: E127
4521+ c.initialize()
4522+ _load_state.assert_called_once_with()
4523+ _emit_state.assert_called_once_with()
4524+
4525+ self.assertEqual(c.relname, 'cluster')
4526+ self.assertIsNone(c.relid)
4527+ relation_ids.assert_called_once_with('cluster')
4528+
4529+ # Methods installed to save state and release locks if the
4530+ # hook is successful.
4531+ atexit.assert_has_calls([call(c._save_state),
4532+ call(c._release_granted)])
4533+
4534+ # If we have a peer relation, the id is stored.
4535+ relation_ids.return_value = ['cluster:1']
4536+ c = coordinator.BaseCoordinator()
4537+ with patch.object(c, '_load_state'), patch.object(c, '_emit_state'):
4538+ c.initialize()
4539+ self.assertEqual(c.relid, 'cluster:1')
4540+
4541+ # If we are already initialized, nothing happens.
4542+ c.grants = {}
4543+ c.requests = {}
4544+ c.initialize()
4545+
4546+ def test_acquire(self):
4547+ c = coordinator.BaseCoordinator()
4548+ lock = 'mylock'
4549+ c.grants = {}
4550+ c.requests = {hookenv.local_unit(): {}}
4551+
4552+ # We are not the leader, so first acquire will return False.
4553+ self.assertFalse(c.acquire(lock))
4554+
4555+ # But the request is in the queue.
4556+ self.assertTrue(c.requested(lock))
4557+ ts = c.request_timestamp(lock)
4558+
4559+ # A further attempts at acquiring the lock do nothing,
4560+ # and the timestamp of the request remains unchanged.
4561+ self.assertFalse(c.acquire(lock))
4562+ self.assertEqual(ts, c.request_timestamp(lock))
4563+
4564+ # Once the leader has granted the lock, acquire returns True.
4565+ with patch.object(c, 'granted') as granted:
4566+ granted.return_value = True
4567+ self.assertTrue(c.acquire(lock))
4568+ granted.assert_called_once_with(lock)
4569+
4570+ def test_acquire_leader(self):
4571+ # When acquire() is called by the leader, it needs
4572+ # to make a grant decision immediately. It can't defer
4573+ # making the decision until a future hook, as no future
4574+ # hooks will be triggered.
4575+ hookenv.is_leader.return_value = True
4576+ c = coordinator.Serial() # Not Base. Test hooks into default_grant.
4577+ lock = 'mylock'
4578+ unit = hookenv.local_unit()
4579+ c.grants = {}
4580+ c.requests = {unit: {}}
4581+ with patch.object(c, 'default_grant') as default_grant:
4582+ default_grant.side_effect = iter([False, True])
4583+
4584+ self.assertFalse(c.acquire(lock))
4585+ ts = c.request_timestamp(lock)
4586+
4587+ self.assertTrue(c.acquire(lock))
4588+ self.assertEqual(ts, c.request_timestamp(lock))
4589+
4590+ # If it it granted, the leader doesn't make a decision again.
4591+ self.assertTrue(c.acquire(lock))
4592+ self.assertEqual(ts, c.request_timestamp(lock))
4593+
4594+ self.assertEqual(default_grant.call_count, 2)
4595+
4596+ def test_granted(self):
4597+ c = coordinator.BaseCoordinator()
4598+ unit = hookenv.local_unit()
4599+ lock = 'mylock'
4600+ ts = coordinator._timestamp()
4601+ c.grants = {}
4602+
4603+ # Unit makes a request, but it isn't granted
4604+ c.requests = {unit: {lock: ts}}
4605+ self.assertFalse(c.granted(lock))
4606+
4607+ # Once the leader has granted the request, all good.
4608+ # It does this by mirroring the request timestamp.
4609+ c.grants = {unit: {lock: ts}}
4610+ self.assertTrue(c.granted(lock))
4611+
4612+ # The unit releases the lock by removing the request.
4613+ c.requests = {unit: {}}
4614+ self.assertFalse(c.granted(lock))
4615+
4616+ # If the unit makes a new request before the leader
4617+ # has had a chance to do its housekeeping, the timestamps
4618+ # do not match and the lock not considered granted.
4619+ ts = coordinator._timestamp()
4620+ c.requests = {unit: {lock: ts}}
4621+ self.assertFalse(c.granted(lock))
4622+
4623+ # Until the leader gets around to its duties.
4624+ c.grants = {unit: {lock: ts}}
4625+ self.assertTrue(c.granted(lock))
4626+
4627+ def test_requested(self):
4628+ c = coordinator.BaseCoordinator()
4629+ lock = 'mylock'
4630+ c.requests = {hookenv.local_unit(): {}}
4631+ c.grants = {}
4632+
4633+ self.assertFalse(c.requested(lock))
4634+ c.acquire(lock)
4635+ self.assertTrue(c.requested(lock))
4636+
4637+ def test_request_timestamp(self):
4638+ c = coordinator.BaseCoordinator()
4639+ lock = 'mylock'
4640+ unit = hookenv.local_unit()
4641+
4642+ c.requests = {unit: {}}
4643+ c.grants = {}
4644+ self.assertIsNone(c.request_timestamp(lock))
4645+
4646+ now = datetime.utcnow()
4647+ fmt = coordinator._timestamp_format
4648+ c.requests = {hookenv.local_unit(): {lock: now.strftime(fmt)}}
4649+
4650+ self.assertEqual(c.request_timestamp(lock), now)
4651+
4652+ def test_handle_not_leader(self):
4653+ c = coordinator.BaseCoordinator()
4654+ # If we are not the leader, handle does nothing. We know this,
4655+ # because without mocks or initialization it would otherwise crash.
4656+ c.handle()
4657+
4658+ def test_handle(self):
4659+ hookenv.is_leader.return_value = True
4660+ lock = 'mylock'
4661+ c = coordinator.BaseCoordinator()
4662+ c.relid = 'cluster:1'
4663+
4664+ ts = coordinator._timestamp
4665+ ts1, ts2, ts3 = ts(), ts(), ts()
4666+
4667+ # Grant one of these requests.
4668+ requests = {'foo/1': {lock: ts1},
4669+ 'foo/2': {lock: ts2},
4670+ 'foo/3': {lock: ts3}}
4671+ c.requests = requests.copy()
4672+ # Because the existing grant should be released.
4673+ c.grants = {'foo/2': {lock: ts()}} # No request, release.
4674+
4675+ with patch.object(c, 'grant') as grant:
4676+ c.handle()
4677+
4678+ # The requests are unchanged. This is normally state on the
4679+ # peer relation, and only the units themselves can change it.
4680+ self.assertDictEqual(requests, c.requests)
4681+
4682+ # The grant without a corresponding requests was released.
4683+ self.assertDictEqual({'foo/2': {}}, c.grants)
4684+
4685+ # A potential grant was made for each of the outstanding requests.
4686+ grant.assert_has_calls([call(lock, 'foo/1'),
4687+ call(lock, 'foo/2'),
4688+ call(lock, 'foo/3')], any_order=True)
4689+
4690+ def test_grant_not_leader(self):
4691+ c = coordinator.BaseCoordinator()
4692+ c.grant(sentinel.whatever, sentinel.whatever) # Nothing happens.
4693+
4694+ def test_grant(self):
4695+ hookenv.is_leader.return_value = True
4696+ c = coordinator.BaseCoordinator()
4697+ c.default_grant = MagicMock()
4698+ c.grant_other = MagicMock()
4699+
4700+ ts = coordinator._timestamp
4701+ ts1, ts2 = ts(), ts()
4702+
4703+ c.requests = {'foo/1': {'mylock': ts1, 'other': ts()},
4704+ 'foo/2': {'mylock': ts2},
4705+ 'foo/3': {'mylock': ts()}}
4706+ grants = {'foo/1': {'mylock': ts1}}
4707+ c.grants = grants.copy()
4708+
4709+ # foo/1 already has a granted mylock, so returns True.
4710+ self.assertTrue(c.grant('mylock', 'foo/1'))
4711+
4712+ # foo/2 does not have a granted mylock. default_grant will
4713+ # be called to make a decision (no)
4714+ c.default_grant.return_value = False
4715+ self.assertFalse(c.grant('mylock', 'foo/2'))
4716+ self.assertDictEqual(grants, c.grants)
4717+ c.default_grant.assert_called_once_with('mylock', 'foo/2',
4718+ set(['foo/1']),
4719+ ['foo/2', 'foo/3'])
4720+ c.default_grant.reset_mock()
4721+
4722+ # Lets say yes.
4723+ c.default_grant.return_value = True
4724+ self.assertTrue(c.grant('mylock', 'foo/2'))
4725+ grants = {'foo/1': {'mylock': ts1}, 'foo/2': {'mylock': ts2}}
4726+ self.assertDictEqual(grants, c.grants)
4727+ c.default_grant.assert_called_once_with('mylock', 'foo/2',
4728+ set(['foo/1']),
4729+ ['foo/2', 'foo/3'])
4730+
4731+ # The other lock has custom logic, in the form of the overridden
4732+ # grant_other method.
4733+ c.grant_other.return_value = False
4734+ self.assertFalse(c.grant('other', 'foo/1'))
4735+ c.grant_other.assert_called_once_with('other', 'foo/1',
4736+ set(), ['foo/1'])
4737+
4738+ # If there is no request, grant returns False
4739+ c.grant_other.return_value = True
4740+ self.assertFalse(c.grant('other', 'foo/2'))
4741+
4742+ def test_released(self):
4743+ c = coordinator.BaseCoordinator()
4744+ with patch.object(c, 'msg') as msg:
4745+ c.released('foo/2', 'mylock', coordinator._utcnow())
4746+ expected = 'Leader released mylock from foo/2, held 0:01:00'
4747+ msg.assert_called_once_with(expected)
4748+
4749+ def test_require(self):
4750+ c = coordinator.BaseCoordinator()
4751+ c.acquire = MagicMock()
4752+ c.granted = MagicMock()
4753+ guard = MagicMock()
4754+
4755+ wrapped = MagicMock()
4756+
4757+ @c.require('mylock', guard)
4758+ def func(*args, **kw):
4759+ wrapped(*args, **kw)
4760+
4761+ # If the lock is granted, the wrapped function is called.
4762+ c.granted.return_value = True
4763+ func(arg=True)
4764+ wrapped.assert_called_once_with(arg=True)
4765+ wrapped.reset_mock()
4766+
4767+ # If the lock is not granted, and the guard returns False,
4768+ # the lock is not acquired.
4769+ c.acquire.return_value = False
4770+ c.granted.return_value = False
4771+ guard.return_value = False
4772+ func()
4773+ self.assertFalse(wrapped.called)
4774+ self.assertFalse(c.acquire.called)
4775+
4776+ # If the lock is not granted, and the guard returns True,
4777+ # the lock is acquired. But the function still isn't called if
4778+ # it cannot be acquired immediately.
4779+ guard.return_value = True
4780+ func()
4781+ self.assertFalse(wrapped.called)
4782+ c.acquire.assert_called_once_with('mylock')
4783+
4784+ # Finally, if the lock is not granted, and the guard returns True,
4785+ # and the lock acquired immediately, the function is called.
4786+ c.acquire.return_value = True
4787+ func(sentinel.arg)
4788+ wrapped.assert_called_once_with(sentinel.arg)
4789+
4790+ def test_msg(self):
4791+ c = coordinator.BaseCoordinator()
4792+ # Just a wrapper around hookenv.log
4793+ c.msg('hi')
4794+ hookenv.log.assert_called_once_with('coordinator.BaseCoordinator hi',
4795+ level=hookenv.INFO)
4796+
4797+ def test_name(self):
4798+ # We use the class name in a few places to avoid conflicts.
4799+ # We assume we won't be using multiple BaseCoordinator subclasses
4800+ # with the same name at the same time.
4801+ c = coordinator.BaseCoordinator()
4802+ self.assertEqual(c._name(), 'BaseCoordinator')
4803+ c = coordinator.Serial()
4804+ self.assertEqual(c._name(), 'Serial')
4805+
4806+ @patch.object(hookenv, 'leader_get')
4807+ def test_load_state(self, leader_get):
4808+ c = coordinator.BaseCoordinator()
4809+ unit = hookenv.local_unit()
4810+
4811+ # c.granted is just the leader_get decoded.
4812+ leader_get.return_value = '{"json": true}'
4813+ c._load_state()
4814+ self.assertDictEqual(c.grants, {'json': True})
4815+
4816+ # With no relid, there is no peer relation so request state
4817+ # is pulled from a local stash.
4818+ with patch.object(c, '_load_local_state') as loc_state:
4819+ loc_state.return_value = {'local': True}
4820+ c._load_state()
4821+ self.assertDictEqual(c.requests, {unit: {'local': True}})
4822+
4823+ # With a relid, request details are pulled from the peer relation.
4824+ # If there is no data in the peer relation from the local unit,
4825+ # we still pull it from the local stash as it means this is the
4826+ # first time we have joined.
4827+ c.relid = 'cluster:1'
4828+ with patch.object(c, '_load_local_state') as loc_state, \
4829+ patch.object(c, '_load_peer_state') as peer_state:
4830+ loc_state.return_value = {'local': True}
4831+ peer_state.return_value = {'foo/2': {'mylock': 'whatever'}}
4832+ c._load_state()
4833+ self.assertDictEqual(c.requests, {unit: {'local': True},
4834+ 'foo/2': {'mylock': 'whatever'}})
4835+
4836+ # If there are local details in the peer relation, the local
4837+ # stash is ignored.
4838+ with patch.object(c, '_load_local_state') as loc_state, \
4839+ patch.object(c, '_load_peer_state') as peer_state:
4840+ loc_state.return_value = {'local': True}
4841+ peer_state.return_value = {unit: {},
4842+ 'foo/2': {'mylock': 'whatever'}}
4843+ c._load_state()
4844+ self.assertDictEqual(c.requests, {unit: {},
4845+ 'foo/2': {'mylock': 'whatever'}})
4846+
4847+ def test_emit_state(self):
4848+ c = coordinator.BaseCoordinator()
4849+ unit = hookenv.local_unit()
4850+ c.requests = {unit: {'lock_a': sentinel.ts,
4851+ 'lock_b': sentinel.ts,
4852+ 'lock_c': sentinel.ts}}
4853+ c.grants = {unit: {'lock_a': sentinel.ts,
4854+ 'lock_b': sentinel.ts2}}
4855+ with patch.object(c, 'msg') as msg:
4856+ c._emit_state()
4857+ msg.assert_has_calls([call('Granted lock_a'),
4858+ call('Waiting on lock_b'),
4859+ call('Waiting on lock_c')],
4860+ any_order=True)
4861+
4862+ @patch.object(hookenv, 'relation_set')
4863+ @patch.object(hookenv, 'leader_set')
4864+ def test_save_state(self, leader_set, relation_set):
4865+ c = coordinator.BaseCoordinator()
4866+ unit = hookenv.local_unit()
4867+ c.grants = {'directdump': True}
4868+ c.requests = {unit: 'data1', 'foo/2': 'data2'}
4869+
4870+ # grants is dumped to leadership settings, if the unit is leader.
4871+ with patch.object(c, '_save_local_state') as save_loc:
4872+ c._save_state()
4873+ self.assertFalse(leader_set.called)
4874+ hookenv.is_leader.return_value = True
4875+ c._save_state()
4876+ leader_set.assert_called_once_with({c.key: '{"directdump": true}'})
4877+
4878+ # If there is no relation id, the local units requests is dumped
4879+ # to a local stash.
4880+ with patch.object(c, '_save_local_state') as save_loc:
4881+ c._save_state()
4882+ save_loc.assert_called_once_with('data1')
4883+
4884+ # If there is a relation id, the local units requests is dumped
4885+ # to the peer relation.
4886+ with patch.object(c, '_save_local_state') as save_loc:
4887+ c.relid = 'cluster:1'
4888+ c._save_state()
4889+ self.assertFalse(save_loc.called)
4890+ relation_set.assert_called_once_with(
4891+ c.relid, relation_settings={c.key: '"data1"'}) # JSON encoded
4892+
4893+ @patch.object(hookenv, 'relation_get')
4894+ @patch.object(hookenv, 'related_units')
4895+ def test_load_peer_state(self, related_units, relation_get):
4896+ # Standard relation-get loops, decoding results from JSON.
4897+ c = coordinator.BaseCoordinator()
4898+ c.key = sentinel.key
4899+ c.relid = sentinel.relid
4900+ related_units.return_value = ['foo/2', 'foo/3']
4901+ d = {'foo/1': {'foo/1': True},
4902+ 'foo/2': {'foo/2': True},
4903+ 'foo/3': {'foo/3': True}}
4904+
4905+ def _get(key, unit, relid):
4906+ assert key == sentinel.key
4907+ assert relid == sentinel.relid
4908+ return json.dumps(d[unit])
4909+ relation_get.side_effect = _get
4910+
4911+ self.assertDictEqual(c._load_peer_state(), d)
4912+
4913+ def test_local_state_filename(self):
4914+ c = coordinator.BaseCoordinator()
4915+ self.assertEqual(c._local_state_filename(),
4916+ '.charmhelpers.coordinator.BaseCoordinator')
4917+
4918+ def test_load_local_state(self):
4919+ c = coordinator.BaseCoordinator()
4920+ with tempfile.NamedTemporaryFile(mode='w') as f:
4921+ with patch.object(c, '_local_state_filename') as fn:
4922+ fn.return_value = f.name
4923+ d = 'some data'
4924+ json.dump(d, f)
4925+ f.flush()
4926+ d2 = c._load_local_state()
4927+ self.assertEqual(d, d2)
4928+
4929+ def test_save_local_state(self):
4930+ c = coordinator.BaseCoordinator()
4931+ with tempfile.NamedTemporaryFile(mode='r') as f:
4932+ with patch.object(c, '_local_state_filename') as fn:
4933+ fn.return_value = f.name
4934+ c._save_local_state('some data')
4935+ self.assertEqual(json.load(f), 'some data')
4936+
4937+ def test_release_granted(self):
4938+ c = coordinator.BaseCoordinator()
4939+ unit = hookenv.local_unit()
4940+ c.requests = {unit: {'lock1': sentinel.ts, 'lock2': sentinel.ts},
4941+ 'foo/2': {'lock1': sentinel.ts}}
4942+ c.grants = {unit: {'lock1': sentinel.ts},
4943+ 'foo/2': {'lock1': sentinel.ts}}
4944+ # The granted lock for the local unit is released.
4945+ c._release_granted()
4946+ self.assertDictEqual(c.requests, {unit: {'lock2': sentinel.ts},
4947+ 'foo/2': {'lock1': sentinel.ts}})
4948+
4949+ def test_implicit_peer_relation_name(self):
4950+ self.assertEqual(coordinator._implicit_peer_relation_name(),
4951+ 'cluster')
4952+
4953+ def test_default_grant(self):
4954+ c = coordinator.Serial()
4955+ # Lock not granted. First in the queue.
4956+ self.assertTrue(c.default_grant(sentinel.lock, sentinel.u1,
4957+ set(), [sentinel.u1, sentinel.u2]))
4958+
4959+ # Lock not granted. Later in the queue.
4960+ self.assertFalse(c.default_grant(sentinel.lock, sentinel.u1,
4961+ set(), [sentinel.u2, sentinel.u1]))
4962+
4963+ # Lock already granted
4964+ self.assertFalse(c.default_grant(sentinel.lock, sentinel.u1,
4965+ set([sentinel.u2]), [sentinel.u1]))
4966
4967=== added file 'tests/core/test_files.py'
4968--- tests/core/test_files.py 1970-01-01 00:00:00 +0000
4969+++ tests/core/test_files.py 2015-08-13 08:33:21 +0000
4970@@ -0,0 +1,32 @@
4971+#!/usr/bin/env python
4972+# -*- coding: utf-8 -*-
4973+
4974+from charmhelpers.core import files
4975+
4976+import mock
4977+import unittest
4978+import tempfile
4979+import os
4980+
4981+
4982+class FileTests(unittest.TestCase):
4983+
4984+ @mock.patch("subprocess.check_call")
4985+ def test_sed(self, check_call):
4986+ files.sed("/tmp/test-sed-file", "replace", "this")
4987+ check_call.assert_called_once_with(
4988+ ['sed', '-i', '-r', '-e', 's/replace/this/g',
4989+ '/tmp/test-sed-file']
4990+ )
4991+
4992+ def test_sed_file(self):
4993+ tmp = tempfile.NamedTemporaryFile(mode='w', delete=False)
4994+ tmp.write("IPV6=yes")
4995+ tmp.close()
4996+
4997+ files.sed(tmp.name, "IPV6=.*", "IPV6=no")
4998+
4999+ with open(tmp.name) as tmp:
5000+ self.assertEquals(tmp.read(), "IPV6=no")
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: