Merge lp:~james-page/charm-helpers/vpp-rebase into lp:~gnuoy/charm-helpers/cisco-vpp

Proposed by James Page
Status: Merged
Merged at revision: 399
Proposed branch: lp:~james-page/charm-helpers/vpp-rebase
Merge into: lp:~gnuoy/charm-helpers/cisco-vpp
Diff against target: 5730 lines (+3512/-430)
67 files modified
VERSION (+1/-1)
charmhelpers/cli/__init__.py (+32/-5)
charmhelpers/cli/commands.py (+5/-4)
charmhelpers/cli/hookenv.py (+23/-0)
charmhelpers/contrib/amulet/utils.py (+239/-9)
charmhelpers/contrib/benchmark/__init__.py (+3/-1)
charmhelpers/contrib/database/mysql.py (+3/-0)
charmhelpers/contrib/network/ufw.py (+46/-3)
charmhelpers/contrib/openstack/amulet/deployment.py (+38/-4)
charmhelpers/contrib/openstack/amulet/utils.py (+361/-51)
charmhelpers/contrib/openstack/context.py (+47/-34)
charmhelpers/contrib/openstack/templates/ceph.conf (+6/-6)
charmhelpers/contrib/openstack/templating.py (+2/-2)
charmhelpers/contrib/openstack/utils.py (+77/-23)
charmhelpers/contrib/peerstorage/__init__.py (+5/-4)
charmhelpers/contrib/python/packages.py (+2/-0)
charmhelpers/contrib/storage/linux/ceph.py (+6/-6)
charmhelpers/contrib/storage/linux/utils.py (+4/-3)
charmhelpers/contrib/unison/__init__.py (+23/-8)
charmhelpers/coordinator.py (+607/-0)
charmhelpers/core/files.py (+45/-0)
charmhelpers/core/hookenv.py (+192/-40)
charmhelpers/core/host.py (+31/-5)
charmhelpers/core/services/base.py (+12/-9)
charmhelpers/core/services/helpers.py (+1/-2)
charmhelpers/core/unitdata.py (+61/-17)
charmhelpers/fetch/__init__.py (+31/-14)
charmhelpers/fetch/archiveurl.py (+7/-1)
charmhelpers/fetch/giturl.py (+1/-1)
docs/_extensions/automembersummary.py (+86/-0)
docs/api/charmhelpers.coordinator.rst (+10/-0)
docs/api/charmhelpers.core.decorators.rst (+7/-0)
docs/api/charmhelpers.core.fstab.rst (+7/-0)
docs/api/charmhelpers.core.hookenv.rst (+12/-0)
docs/api/charmhelpers.core.host.rst (+12/-0)
docs/api/charmhelpers.core.rst (+11/-38)
docs/api/charmhelpers.core.services.base.rst (+12/-0)
docs/api/charmhelpers.core.services.helpers.rst (+12/-0)
docs/api/charmhelpers.core.services.rst (+12/-0)
docs/api/charmhelpers.core.strutils.rst (+7/-0)
docs/api/charmhelpers.core.sysctl.rst (+7/-0)
docs/api/charmhelpers.core.templating.rst (+7/-0)
docs/api/charmhelpers.core.unitdata.rst (+7/-0)
docs/api/charmhelpers.rst (+4/-2)
docs/api/modules.rst (+0/-7)
docs/conf.py (+4/-1)
setup.py (+22/-1)
test_requirements.txt (+3/-1)
tests/cli/test_cmdline.py (+56/-9)
tests/contrib/amulet/test_utils.py (+105/-0)
tests/contrib/benchmark/test_benchmark.py (+17/-13)
tests/contrib/hahelpers/test_apache_utils.py (+1/-1)
tests/contrib/network/test_ufw.py (+72/-0)
tests/contrib/openstack/test_openstack_utils.py (+34/-10)
tests/contrib/openstack/test_os_contexts.py (+60/-1)
tests/contrib/peerstorage/test_peerstorage.py (+7/-7)
tests/contrib/python/test_debug.py (+1/-1)
tests/contrib/storage/test_linux_ceph.py (+11/-11)
tests/contrib/storage/test_linux_storage_utils.py (+11/-2)
tests/contrib/unison/test_unison.py (+58/-1)
tests/coordinator/test_coordinator.py (+535/-0)
tests/core/test_files.py (+32/-0)
tests/core/test_hookenv.py (+232/-31)
tests/core/test_host.py (+33/-1)
tests/core/test_services.py (+13/-7)
tests/fetch/test_archiveurl.py (+21/-3)
tests/fetch/test_fetch.py (+60/-29)
To merge this branch: bzr merge lp:~james-page/charm-helpers/vpp-rebase
Reviewer Review Type Date Requested Status
Liam Young Approve
Review via email: mp+267916@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Liam Young (gnuoy) wrote :

lgtm

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'VERSION'
--- VERSION 2015-05-20 14:31:33 +0000
+++ VERSION 2015-08-13 08:33:21 +0000
@@ -1,1 +1,1 @@
10.3.210.5.0
22
=== modified file 'charmhelpers/cli/__init__.py'
--- charmhelpers/cli/__init__.py 2015-01-22 06:06:03 +0000
+++ charmhelpers/cli/__init__.py 2015-08-13 08:33:21 +0000
@@ -20,6 +20,8 @@
2020
21from six.moves import zip21from six.moves import zip
2222
23from charmhelpers.core import unitdata
24
2325
24class OutputFormatter(object):26class OutputFormatter(object):
25 def __init__(self, outfile=sys.stdout):27 def __init__(self, outfile=sys.stdout):
@@ -53,6 +55,8 @@
5355
54 def raw(self, output):56 def raw(self, output):
55 """Output data as raw string (default)"""57 """Output data as raw string (default)"""
58 if isinstance(output, (list, tuple)):
59 output = '\n'.join(map(str, output))
56 self.outfile.write(str(output))60 self.outfile.write(str(output))
5761
58 def py(self, output):62 def py(self, output):
@@ -91,6 +95,7 @@
91 argument_parser = None95 argument_parser = None
92 subparsers = None96 subparsers = None
93 formatter = None97 formatter = None
98 exit_code = 0
9499
95 def __init__(self):100 def __init__(self):
96 if not self.argument_parser:101 if not self.argument_parser:
@@ -115,6 +120,21 @@
115 return decorated120 return decorated
116 return wrapper121 return wrapper
117122
123 def test_command(self, decorated):
124 """
125 Subcommand is a boolean test function, so bool return values should be
126 converted to a 0/1 exit code.
127 """
128 decorated._cli_test_command = True
129 return decorated
130
131 def no_output(self, decorated):
132 """
133 Subcommand is not expected to return a value, so don't print a spurious None.
134 """
135 decorated._cli_no_output = True
136 return decorated
137
118 def subcommand_builder(self, command_name, description=None):138 def subcommand_builder(self, command_name, description=None):
119 """139 """
120 Decorate a function that builds a subcommand. Builders should accept a140 Decorate a function that builds a subcommand. Builders should accept a
@@ -132,12 +152,19 @@
132 arguments = self.argument_parser.parse_args()152 arguments = self.argument_parser.parse_args()
133 argspec = inspect.getargspec(arguments.func)153 argspec = inspect.getargspec(arguments.func)
134 vargs = []154 vargs = []
135 kwargs = {}155 for arg in argspec.args:
156 vargs.append(getattr(arguments, arg))
136 if argspec.varargs:157 if argspec.varargs:
137 vargs = getattr(arguments, argspec.varargs)158 vargs.extend(getattr(arguments, argspec.varargs))
138 for arg in argspec.args:159 output = arguments.func(*vargs)
139 kwargs[arg] = getattr(arguments, arg)160 if getattr(arguments.func, '_cli_test_command', False):
140 self.formatter.format_output(arguments.func(*vargs, **kwargs), arguments.format)161 self.exit_code = 0 if output else 1
162 output = ''
163 if getattr(arguments.func, '_cli_no_output', False):
164 output = ''
165 self.formatter.format_output(output, arguments.format)
166 if unitdata._KV:
167 unitdata._KV.flush()
141168
142169
143cmdline = CommandLine()170cmdline = CommandLine()
144171
=== modified file 'charmhelpers/cli/commands.py'
--- charmhelpers/cli/commands.py 2015-05-13 20:44:19 +0000
+++ charmhelpers/cli/commands.py 2015-08-13 08:33:21 +0000
@@ -24,8 +24,9 @@
24from . import CommandLine # noqa24from . import CommandLine # noqa
2525
26"""26"""
27Import the sub-modules to be included by chlp.27Import the sub-modules which have decorated subcommands to register with chlp.
28"""28"""
29import host # noqa29from . import host # noqa
30import benchmark # noqa30from . import benchmark # noqa
31import unitdata # noqa31from . import unitdata # noqa
32from . import hookenv # noqa
3233
=== added file 'charmhelpers/cli/hookenv.py'
--- charmhelpers/cli/hookenv.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/cli/hookenv.py 2015-08-13 08:33:21 +0000
@@ -0,0 +1,23 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.core import hookenv
19
20
21cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
22cmdline.subcommand('service-name')(hookenv.service_name)
23cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
024
=== modified file 'charmhelpers/contrib/amulet/utils.py'
--- charmhelpers/contrib/amulet/utils.py 2015-04-21 15:40:51 +0000
+++ charmhelpers/contrib/amulet/utils.py 2015-08-13 08:33:21 +0000
@@ -14,14 +14,21 @@
14# You should have received a copy of the GNU Lesser General Public License14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1616
17import ConfigParser
18import io17import io
19import logging18import logging
19import os
20import re20import re
21import sys21import sys
22import time22import time
2323
24import amulet
25import distro_info
24import six26import six
27from six.moves import configparser
28if six.PY3:
29 from urllib import parse as urlparse
30else:
31 import urlparse
2532
2633
27class AmuletUtils(object):34class AmuletUtils(object):
@@ -33,6 +40,7 @@
3340
34 def __init__(self, log_level=logging.ERROR):41 def __init__(self, log_level=logging.ERROR):
35 self.log = self.get_logger(level=log_level)42 self.log = self.get_logger(level=log_level)
43 self.ubuntu_releases = self.get_ubuntu_releases()
3644
37 def get_logger(self, name="amulet-logger", level=logging.DEBUG):45 def get_logger(self, name="amulet-logger", level=logging.DEBUG):
38 """Get a logger object that will log to stdout."""46 """Get a logger object that will log to stdout."""
@@ -70,12 +78,44 @@
70 else:78 else:
71 return False79 return False
7280
81 def get_ubuntu_release_from_sentry(self, sentry_unit):
82 """Get Ubuntu release codename from sentry unit.
83
84 :param sentry_unit: amulet sentry/service unit pointer
85 :returns: list of strings - release codename, failure message
86 """
87 msg = None
88 cmd = 'lsb_release -cs'
89 release, code = sentry_unit.run(cmd)
90 if code == 0:
91 self.log.debug('{} lsb_release: {}'.format(
92 sentry_unit.info['unit_name'], release))
93 else:
94 msg = ('{} `{}` returned {} '
95 '{}'.format(sentry_unit.info['unit_name'],
96 cmd, release, code))
97 if release not in self.ubuntu_releases:
98 msg = ("Release ({}) not found in Ubuntu releases "
99 "({})".format(release, self.ubuntu_releases))
100 return release, msg
101
73 def validate_services(self, commands):102 def validate_services(self, commands):
74 """Validate services.103 """Validate that lists of commands succeed on service units. Can be
75104 used to verify system services are running on the corresponding
76 Verify the specified services are running on the corresponding
77 service units.105 service units.
78 """106
107 :param commands: dict with sentry keys and arbitrary command list vals
108 :returns: None if successful, Failure string message otherwise
109 """
110 self.log.debug('Checking status of system services...')
111
112 # /!\ DEPRECATION WARNING (beisner):
113 # New and existing tests should be rewritten to use
114 # validate_services_by_name() as it is aware of init systems.
115 self.log.warn('/!\\ DEPRECATION WARNING: use '
116 'validate_services_by_name instead of validate_services '
117 'due to init system differences.')
118
79 for k, v in six.iteritems(commands):119 for k, v in six.iteritems(commands):
80 for cmd in v:120 for cmd in v:
81 output, code = k.run(cmd)121 output, code = k.run(cmd)
@@ -86,6 +126,45 @@
86 return "command `{}` returned {}".format(cmd, str(code))126 return "command `{}` returned {}".format(cmd, str(code))
87 return None127 return None
88128
129 def validate_services_by_name(self, sentry_services):
130 """Validate system service status by service name, automatically
131 detecting init system based on Ubuntu release codename.
132
133 :param sentry_services: dict with sentry keys and svc list values
134 :returns: None if successful, Failure string message otherwise
135 """
136 self.log.debug('Checking status of system services...')
137
138 # Point at which systemd became a thing
139 systemd_switch = self.ubuntu_releases.index('vivid')
140
141 for sentry_unit, services_list in six.iteritems(sentry_services):
142 # Get lsb_release codename from unit
143 release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
144 if ret:
145 return ret
146
147 for service_name in services_list:
148 if (self.ubuntu_releases.index(release) >= systemd_switch or
149 service_name in ['rabbitmq-server', 'apache2']):
150 # init is systemd (or regular sysv)
151 cmd = 'sudo service {} status'.format(service_name)
152 output, code = sentry_unit.run(cmd)
153 service_running = code == 0
154 elif self.ubuntu_releases.index(release) < systemd_switch:
155 # init is upstart
156 cmd = 'sudo status {}'.format(service_name)
157 output, code = sentry_unit.run(cmd)
158 service_running = code == 0 and "start/running" in output
159
160 self.log.debug('{} `{}` returned '
161 '{}'.format(sentry_unit.info['unit_name'],
162 cmd, code))
163 if not service_running:
164 return u"command `{}` returned {} {}".format(
165 cmd, output, str(code))
166 return None
167
89 def _get_config(self, unit, filename):168 def _get_config(self, unit, filename):
90 """Get a ConfigParser object for parsing a unit's config file."""169 """Get a ConfigParser object for parsing a unit's config file."""
91 file_contents = unit.file_contents(filename)170 file_contents = unit.file_contents(filename)
@@ -93,7 +172,7 @@
93 # NOTE(beisner): by default, ConfigParser does not handle options172 # NOTE(beisner): by default, ConfigParser does not handle options
94 # with no value, such as the flags used in the mysql my.cnf file.173 # with no value, such as the flags used in the mysql my.cnf file.
95 # https://bugs.python.org/issue7005174 # https://bugs.python.org/issue7005
96 config = ConfigParser.ConfigParser(allow_no_value=True)175 config = configparser.ConfigParser(allow_no_value=True)
97 config.readfp(io.StringIO(file_contents))176 config.readfp(io.StringIO(file_contents))
98 return config177 return config
99178
@@ -103,7 +182,15 @@
103182
104 Verify that the specified section of the config file contains183 Verify that the specified section of the config file contains
105 the expected option key:value pairs.184 the expected option key:value pairs.
185
186 Compare expected dictionary data vs actual dictionary data.
187 The values in the 'expected' dictionary can be strings, bools, ints,
188 longs, or can be a function that evaluates a variable and returns a
189 bool.
106 """190 """
191 self.log.debug('Validating config file data ({} in {} on {})'
192 '...'.format(section, config_file,
193 sentry_unit.info['unit_name']))
107 config = self._get_config(sentry_unit, config_file)194 config = self._get_config(sentry_unit, config_file)
108195
109 if section != 'DEFAULT' and not config.has_section(section):196 if section != 'DEFAULT' and not config.has_section(section):
@@ -112,9 +199,20 @@
112 for k in expected.keys():199 for k in expected.keys():
113 if not config.has_option(section, k):200 if not config.has_option(section, k):
114 return "section [{}] is missing option {}".format(section, k)201 return "section [{}] is missing option {}".format(section, k)
115 if config.get(section, k) != expected[k]:202
203 actual = config.get(section, k)
204 v = expected[k]
205 if (isinstance(v, six.string_types) or
206 isinstance(v, bool) or
207 isinstance(v, six.integer_types)):
208 # handle explicit values
209 if actual != v:
210 return "section [{}] {}:{} != expected {}:{}".format(
211 section, k, actual, k, expected[k])
212 # handle function pointers, such as not_null or valid_ip
213 elif not v(actual):
116 return "section [{}] {}:{} != expected {}:{}".format(214 return "section [{}] {}:{} != expected {}:{}".format(
117 section, k, config.get(section, k), k, expected[k])215 section, k, actual, k, expected[k])
118 return None216 return None
119217
120 def _validate_dict_data(self, expected, actual):218 def _validate_dict_data(self, expected, actual):
@@ -122,7 +220,7 @@
122220
123 Compare expected dictionary data vs actual dictionary data.221 Compare expected dictionary data vs actual dictionary data.
124 The values in the 'expected' dictionary can be strings, bools, ints,222 The values in the 'expected' dictionary can be strings, bools, ints,
125 longs, or can be a function that evaluate a variable and returns a223 longs, or can be a function that evaluates a variable and returns a
126 bool.224 bool.
127 """225 """
128 self.log.debug('actual: {}'.format(repr(actual)))226 self.log.debug('actual: {}'.format(repr(actual)))
@@ -133,8 +231,10 @@
133 if (isinstance(v, six.string_types) or231 if (isinstance(v, six.string_types) or
134 isinstance(v, bool) or232 isinstance(v, bool) or
135 isinstance(v, six.integer_types)):233 isinstance(v, six.integer_types)):
234 # handle explicit values
136 if v != actual[k]:235 if v != actual[k]:
137 return "{}:{}".format(k, actual[k])236 return "{}:{}".format(k, actual[k])
237 # handle function pointers, such as not_null or valid_ip
138 elif not v(actual[k]):238 elif not v(actual[k]):
139 return "{}:{}".format(k, actual[k])239 return "{}:{}".format(k, actual[k])
140 else:240 else:
@@ -321,3 +421,133 @@
321421
322 def endpoint_error(self, name, data):422 def endpoint_error(self, name, data):
323 return 'unexpected endpoint data in {} - {}'.format(name, data)423 return 'unexpected endpoint data in {} - {}'.format(name, data)
424
425 def get_ubuntu_releases(self):
426 """Return a list of all Ubuntu releases in order of release."""
427 _d = distro_info.UbuntuDistroInfo()
428 _release_list = _d.all
429 self.log.debug('Ubuntu release list: {}'.format(_release_list))
430 return _release_list
431
432 def file_to_url(self, file_rel_path):
433 """Convert a relative file path to a file URL."""
434 _abs_path = os.path.abspath(file_rel_path)
435 return urlparse.urlparse(_abs_path, scheme='file').geturl()
436
437 def check_commands_on_units(self, commands, sentry_units):
438 """Check that all commands in a list exit zero on all
439 sentry units in a list.
440
441 :param commands: list of bash commands
442 :param sentry_units: list of sentry unit pointers
443 :returns: None if successful; Failure message otherwise
444 """
445 self.log.debug('Checking exit codes for {} commands on {} '
446 'sentry units...'.format(len(commands),
447 len(sentry_units)))
448 for sentry_unit in sentry_units:
449 for cmd in commands:
450 output, code = sentry_unit.run(cmd)
451 if code == 0:
452 self.log.debug('{} `{}` returned {} '
453 '(OK)'.format(sentry_unit.info['unit_name'],
454 cmd, code))
455 else:
456 return ('{} `{}` returned {} '
457 '{}'.format(sentry_unit.info['unit_name'],
458 cmd, code, output))
459 return None
460
461 def get_process_id_list(self, sentry_unit, process_name):
462 """Get a list of process ID(s) from a single sentry juju unit
463 for a single process name.
464
465 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
466 :param process_name: Process name
467 :returns: List of process IDs
468 """
469 cmd = 'pidof {}'.format(process_name)
470 output, code = sentry_unit.run(cmd)
471 if code != 0:
472 msg = ('{} `{}` returned {} '
473 '{}'.format(sentry_unit.info['unit_name'],
474 cmd, code, output))
475 amulet.raise_status(amulet.FAIL, msg=msg)
476 return str(output).split()
477
478 def get_unit_process_ids(self, unit_processes):
479 """Construct a dict containing unit sentries, process names, and
480 process IDs."""
481 pid_dict = {}
482 for sentry_unit, process_list in unit_processes.iteritems():
483 pid_dict[sentry_unit] = {}
484 for process in process_list:
485 pids = self.get_process_id_list(sentry_unit, process)
486 pid_dict[sentry_unit].update({process: pids})
487 return pid_dict
488
489 def validate_unit_process_ids(self, expected, actual):
490 """Validate process id quantities for services on units."""
491 self.log.debug('Checking units for running processes...')
492 self.log.debug('Expected PIDs: {}'.format(expected))
493 self.log.debug('Actual PIDs: {}'.format(actual))
494
495 if len(actual) != len(expected):
496 return ('Unit count mismatch. expected, actual: {}, '
497 '{} '.format(len(expected), len(actual)))
498
499 for (e_sentry, e_proc_names) in expected.iteritems():
500 e_sentry_name = e_sentry.info['unit_name']
501 if e_sentry in actual.keys():
502 a_proc_names = actual[e_sentry]
503 else:
504 return ('Expected sentry ({}) not found in actual dict data.'
505 '{}'.format(e_sentry_name, e_sentry))
506
507 if len(e_proc_names.keys()) != len(a_proc_names.keys()):
508 return ('Process name count mismatch. expected, actual: {}, '
509 '{}'.format(len(expected), len(actual)))
510
511 for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
512 zip(e_proc_names.items(), a_proc_names.items()):
513 if e_proc_name != a_proc_name:
514 return ('Process name mismatch. expected, actual: {}, '
515 '{}'.format(e_proc_name, a_proc_name))
516
517 a_pids_length = len(a_pids)
518 fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
519 '{}, {} ({})'.format(e_sentry_name, e_proc_name,
520 e_pids_length, a_pids_length,
521 a_pids))
522
523 # If expected is not bool, ensure PID quantities match
524 if not isinstance(e_pids_length, bool) and \
525 a_pids_length != e_pids_length:
526 return fail_msg
527 # If expected is bool True, ensure 1 or more PIDs exist
528 elif isinstance(e_pids_length, bool) and \
529 e_pids_length is True and a_pids_length < 1:
530 return fail_msg
531 # If expected is bool False, ensure 0 PIDs exist
532 elif isinstance(e_pids_length, bool) and \
533 e_pids_length is False and a_pids_length != 0:
534 return fail_msg
535 else:
536 self.log.debug('PID check OK: {} {} {}: '
537 '{}'.format(e_sentry_name, e_proc_name,
538 e_pids_length, a_pids))
539 return None
540
541 def validate_list_of_identical_dicts(self, list_of_dicts):
542 """Check that all dicts within a list are identical."""
543 hashes = []
544 for _dict in list_of_dicts:
545 hashes.append(hash(frozenset(_dict.items())))
546
547 self.log.debug('Hashes: {}'.format(hashes))
548 if len(set(hashes)) == 1:
549 self.log.debug('Dicts within list are identical')
550 else:
551 return 'Dicts within list are not identical'
552
553 return None
324554
=== modified file 'charmhelpers/contrib/benchmark/__init__.py'
--- charmhelpers/contrib/benchmark/__init__.py 2015-04-24 16:18:42 +0000
+++ charmhelpers/contrib/benchmark/__init__.py 2015-08-13 08:33:21 +0000
@@ -63,6 +63,8 @@
6363
64 """64 """
6565
66 BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing
67
66 required_keys = [68 required_keys = [
67 'hostname',69 'hostname',
68 'port',70 'port',
@@ -91,7 +93,7 @@
91 break93 break
9294
93 if len(config):95 if len(config):
94 with open('/etc/benchmark.conf', 'w') as f:96 with open(self.BENCHMARK_CONF, 'w') as f:
95 for key, val in iter(config.items()):97 for key, val in iter(config.items()):
96 f.write("%s=%s\n" % (key, val))98 f.write("%s=%s\n" % (key, val))
9799
98100
=== modified file 'charmhelpers/contrib/database/mysql.py'
--- charmhelpers/contrib/database/mysql.py 2015-06-03 20:31:29 +0000
+++ charmhelpers/contrib/database/mysql.py 2015-08-13 08:33:21 +0000
@@ -381,6 +381,9 @@
381 if 'wait-timeout' in config:381 if 'wait-timeout' in config:
382 mysql_config['wait_timeout'] = config['wait-timeout']382 mysql_config['wait_timeout'] = config['wait-timeout']
383383
384 if 'innodb-flush-log-at-trx-commit' in config:
385 mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit']
386
384 # Set a sane default key_buffer size387 # Set a sane default key_buffer size
385 mysql_config['key_buffer'] = self.human_to_bytes('32M')388 mysql_config['key_buffer'] = self.human_to_bytes('32M')
386 total_memory = self.human_to_bytes(self.get_mem_total())389 total_memory = self.human_to_bytes(self.get_mem_total())
387390
=== modified file 'charmhelpers/contrib/network/ufw.py'
--- charmhelpers/contrib/network/ufw.py 2015-02-12 20:08:28 +0000
+++ charmhelpers/contrib/network/ufw.py 2015-08-13 08:33:21 +0000
@@ -180,7 +180,43 @@
180 return True180 return True
181181
182182
183def modify_access(src, dst='any', port=None, proto=None, action='allow'):183def default_policy(policy='deny', direction='incoming'):
184 """
185 Changes the default policy for traffic `direction`
186
187 :param policy: allow, deny or reject
188 :param direction: traffic direction, possible values: incoming, outgoing,
189 routed
190 """
191 if policy not in ['allow', 'deny', 'reject']:
192 raise UFWError(('Unknown policy %s, valid values: '
193 'allow, deny, reject') % policy)
194
195 if direction not in ['incoming', 'outgoing', 'routed']:
196 raise UFWError(('Unknown direction %s, valid values: '
197 'incoming, outgoing, routed') % direction)
198
199 output = subprocess.check_output(['ufw', 'default', policy, direction],
200 universal_newlines=True,
201 env={'LANG': 'en_US',
202 'PATH': os.environ['PATH']})
203 hookenv.log(output, level='DEBUG')
204
205 m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
206 policy),
207 output, re.M)
208 if len(m) == 0:
209 hookenv.log("ufw couldn't change the default policy to %s for %s"
210 % (policy, direction), level='WARN')
211 return False
212 else:
213 hookenv.log("ufw default policy for %s changed to %s"
214 % (direction, policy), level='INFO')
215 return True
216
217
218def modify_access(src, dst='any', port=None, proto=None, action='allow',
219 index=None):
184 """220 """
185 Grant access to an address or subnet221 Grant access to an address or subnet
186222
@@ -192,6 +228,8 @@
192 :param port: destiny port228 :param port: destiny port
193 :param proto: protocol (tcp or udp)229 :param proto: protocol (tcp or udp)
194 :param action: `allow` or `delete`230 :param action: `allow` or `delete`
231 :param index: if different from None the rule is inserted at the given
232 `index`.
195 """233 """
196 if not is_enabled():234 if not is_enabled():
197 hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')235 hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
@@ -199,6 +237,8 @@
199237
200 if action == 'delete':238 if action == 'delete':
201 cmd = ['ufw', 'delete', 'allow']239 cmd = ['ufw', 'delete', 'allow']
240 elif index is not None:
241 cmd = ['ufw', 'insert', str(index), action]
202 else:242 else:
203 cmd = ['ufw', action]243 cmd = ['ufw', action]
204244
@@ -227,7 +267,7 @@
227 level='ERROR')267 level='ERROR')
228268
229269
230def grant_access(src, dst='any', port=None, proto=None):270def grant_access(src, dst='any', port=None, proto=None, index=None):
231 """271 """
232 Grant access to an address or subnet272 Grant access to an address or subnet
233273
@@ -238,8 +278,11 @@
238 field has to be set.278 field has to be set.
239 :param port: destiny port279 :param port: destiny port
240 :param proto: protocol (tcp or udp)280 :param proto: protocol (tcp or udp)
281 :param index: if different from None the rule is inserted at the given
282 `index`.
241 """283 """
242 return modify_access(src, dst=dst, port=port, proto=proto, action='allow')284 return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
285 index=index)
243286
244287
245def revoke_access(src, dst='any', port=None, proto=None):288def revoke_access(src, dst='any', port=None, proto=None):
246289
=== modified file 'charmhelpers/contrib/openstack/amulet/deployment.py'
--- charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-12 07:50:34 +0000
+++ charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-13 08:33:21 +0000
@@ -44,7 +44,7 @@
44 Determine if the local branch being tested is derived from its44 Determine if the local branch being tested is derived from its
45 stable or next (dev) branch, and based on this, use the corresonding45 stable or next (dev) branch, and based on this, use the corresonding
46 stable or next branches for the other_services."""46 stable or next branches for the other_services."""
47 base_charms = ['mysql', 'mongodb']47 base_charms = ['mysql', 'mongodb', 'nrpe']
4848
49 if self.series in ['precise', 'trusty']:49 if self.series in ['precise', 'trusty']:
50 base_series = self.series50 base_series = self.series
@@ -83,9 +83,10 @@
83 services.append(this_service)83 services.append(this_service)
84 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',84 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
85 'ceph-osd', 'ceph-radosgw']85 'ceph-osd', 'ceph-radosgw']
86 # Openstack subordinate charms do not expose an origin option as that86 # Most OpenStack subordinate charms do not expose an origin option
87 # is controlled by the principle87 # as that is controlled by the principle.
88 ignore = ['neutron-openvswitch', 'cisco-vpp', 'odl-controller']88 ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
89 'cisco-vpp', 'odl-controller']
8990
90 if self.openstack:91 if self.openstack:
91 for svc in services:92 for svc in services:
@@ -152,3 +153,36 @@
152 return os_origin.split('%s-' % self.series)[1].split('/')[0]153 return os_origin.split('%s-' % self.series)[1].split('/')[0]
153 else:154 else:
154 return releases[self.series]155 return releases[self.series]
156
157 def get_ceph_expected_pools(self, radosgw=False):
158 """Return a list of expected ceph pools in a ceph + cinder + glance
159 test scenario, based on OpenStack release and whether ceph radosgw
160 is flagged as present or not."""
161
162 if self._get_openstack_release() >= self.trusty_kilo:
163 # Kilo or later
164 pools = [
165 'rbd',
166 'cinder',
167 'glance'
168 ]
169 else:
170 # Juno or earlier
171 pools = [
172 'data',
173 'metadata',
174 'rbd',
175 'cinder',
176 'glance'
177 ]
178
179 if radosgw:
180 pools.extend([
181 '.rgw.root',
182 '.rgw.control',
183 '.rgw',
184 '.rgw.gc',
185 '.users.uid'
186 ])
187
188 return pools
155189
=== modified file 'charmhelpers/contrib/openstack/amulet/utils.py'
--- charmhelpers/contrib/openstack/amulet/utils.py 2015-01-22 06:06:03 +0000
+++ charmhelpers/contrib/openstack/amulet/utils.py 2015-08-13 08:33:21 +0000
@@ -14,16 +14,20 @@
14# You should have received a copy of the GNU Lesser General Public License14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1616
17import amulet
18import json
17import logging19import logging
18import os20import os
21import six
19import time22import time
20import urllib23import urllib
2124
25import cinderclient.v1.client as cinder_client
22import glanceclient.v1.client as glance_client26import glanceclient.v1.client as glance_client
27import heatclient.v1.client as heat_client
23import keystoneclient.v2_0 as keystone_client28import keystoneclient.v2_0 as keystone_client
24import novaclient.v1_1.client as nova_client29import novaclient.v1_1.client as nova_client
2530import swiftclient
26import six
2731
28from charmhelpers.contrib.amulet.utils import (32from charmhelpers.contrib.amulet.utils import (
29 AmuletUtils33 AmuletUtils
@@ -37,7 +41,7 @@
37 """OpenStack amulet utilities.41 """OpenStack amulet utilities.
3842
39 This class inherits from AmuletUtils and has additional support43 This class inherits from AmuletUtils and has additional support
40 that is specifically for use by OpenStack charms.44 that is specifically for use by OpenStack charm tests.
41 """45 """
4246
43 def __init__(self, log_level=ERROR):47 def __init__(self, log_level=ERROR):
@@ -51,6 +55,8 @@
51 Validate actual endpoint data vs expected endpoint data. The ports55 Validate actual endpoint data vs expected endpoint data. The ports
52 are used to find the matching endpoint.56 are used to find the matching endpoint.
53 """57 """
58 self.log.debug('Validating endpoint data...')
59 self.log.debug('actual: {}'.format(repr(endpoints)))
54 found = False60 found = False
55 for ep in endpoints:61 for ep in endpoints:
56 self.log.debug('endpoint: {}'.format(repr(ep)))62 self.log.debug('endpoint: {}'.format(repr(ep)))
@@ -77,6 +83,7 @@
77 Validate a list of actual service catalog endpoints vs a list of83 Validate a list of actual service catalog endpoints vs a list of
78 expected service catalog endpoints.84 expected service catalog endpoints.
79 """85 """
86 self.log.debug('Validating service catalog endpoint data...')
80 self.log.debug('actual: {}'.format(repr(actual)))87 self.log.debug('actual: {}'.format(repr(actual)))
81 for k, v in six.iteritems(expected):88 for k, v in six.iteritems(expected):
82 if k in actual:89 if k in actual:
@@ -93,6 +100,7 @@
93 Validate a list of actual tenant data vs list of expected tenant100 Validate a list of actual tenant data vs list of expected tenant
94 data.101 data.
95 """102 """
103 self.log.debug('Validating tenant data...')
96 self.log.debug('actual: {}'.format(repr(actual)))104 self.log.debug('actual: {}'.format(repr(actual)))
97 for e in expected:105 for e in expected:
98 found = False106 found = False
@@ -114,6 +122,7 @@
114 Validate a list of actual role data vs a list of expected role122 Validate a list of actual role data vs a list of expected role
115 data.123 data.
116 """124 """
125 self.log.debug('Validating role data...')
117 self.log.debug('actual: {}'.format(repr(actual)))126 self.log.debug('actual: {}'.format(repr(actual)))
118 for e in expected:127 for e in expected:
119 found = False128 found = False
@@ -134,6 +143,7 @@
134 Validate a list of actual user data vs a list of expected user143 Validate a list of actual user data vs a list of expected user
135 data.144 data.
136 """145 """
146 self.log.debug('Validating user data...')
137 self.log.debug('actual: {}'.format(repr(actual)))147 self.log.debug('actual: {}'.format(repr(actual)))
138 for e in expected:148 for e in expected:
139 found = False149 found = False
@@ -155,17 +165,30 @@
155165
156 Validate a list of actual flavors vs a list of expected flavors.166 Validate a list of actual flavors vs a list of expected flavors.
157 """167 """
168 self.log.debug('Validating flavor data...')
158 self.log.debug('actual: {}'.format(repr(actual)))169 self.log.debug('actual: {}'.format(repr(actual)))
159 act = [a.name for a in actual]170 act = [a.name for a in actual]
160 return self._validate_list_data(expected, act)171 return self._validate_list_data(expected, act)
161172
162 def tenant_exists(self, keystone, tenant):173 def tenant_exists(self, keystone, tenant):
163 """Return True if tenant exists."""174 """Return True if tenant exists."""
175 self.log.debug('Checking if tenant exists ({})...'.format(tenant))
164 return tenant in [t.name for t in keystone.tenants.list()]176 return tenant in [t.name for t in keystone.tenants.list()]
165177
178 def authenticate_cinder_admin(self, keystone_sentry, username,
179 password, tenant):
180 """Authenticates admin user with cinder."""
181 # NOTE(beisner): cinder python client doesn't accept tokens.
182 service_ip = \
183 keystone_sentry.relation('shared-db',
184 'mysql:shared-db')['private-address']
185 ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
186 return cinder_client.Client(username, password, tenant, ept)
187
166 def authenticate_keystone_admin(self, keystone_sentry, user, password,188 def authenticate_keystone_admin(self, keystone_sentry, user, password,
167 tenant):189 tenant):
168 """Authenticates admin user with the keystone admin endpoint."""190 """Authenticates admin user with the keystone admin endpoint."""
191 self.log.debug('Authenticating keystone admin...')
169 unit = keystone_sentry192 unit = keystone_sentry
170 service_ip = unit.relation('shared-db',193 service_ip = unit.relation('shared-db',
171 'mysql:shared-db')['private-address']194 'mysql:shared-db')['private-address']
@@ -175,6 +198,7 @@
175198
176 def authenticate_keystone_user(self, keystone, user, password, tenant):199 def authenticate_keystone_user(self, keystone, user, password, tenant):
177 """Authenticates a regular user with the keystone public endpoint."""200 """Authenticates a regular user with the keystone public endpoint."""
201 self.log.debug('Authenticating keystone user ({})...'.format(user))
178 ep = keystone.service_catalog.url_for(service_type='identity',202 ep = keystone.service_catalog.url_for(service_type='identity',
179 endpoint_type='publicURL')203 endpoint_type='publicURL')
180 return keystone_client.Client(username=user, password=password,204 return keystone_client.Client(username=user, password=password,
@@ -182,19 +206,49 @@
182206
183 def authenticate_glance_admin(self, keystone):207 def authenticate_glance_admin(self, keystone):
184 """Authenticates admin user with glance."""208 """Authenticates admin user with glance."""
209 self.log.debug('Authenticating glance admin...')
185 ep = keystone.service_catalog.url_for(service_type='image',210 ep = keystone.service_catalog.url_for(service_type='image',
186 endpoint_type='adminURL')211 endpoint_type='adminURL')
187 return glance_client.Client(ep, token=keystone.auth_token)212 return glance_client.Client(ep, token=keystone.auth_token)
188213
214 def authenticate_heat_admin(self, keystone):
215 """Authenticates the admin user with heat."""
216 self.log.debug('Authenticating heat admin...')
217 ep = keystone.service_catalog.url_for(service_type='orchestration',
218 endpoint_type='publicURL')
219 return heat_client.Client(endpoint=ep, token=keystone.auth_token)
220
189 def authenticate_nova_user(self, keystone, user, password, tenant):221 def authenticate_nova_user(self, keystone, user, password, tenant):
190 """Authenticates a regular user with nova-api."""222 """Authenticates a regular user with nova-api."""
223 self.log.debug('Authenticating nova user ({})...'.format(user))
191 ep = keystone.service_catalog.url_for(service_type='identity',224 ep = keystone.service_catalog.url_for(service_type='identity',
192 endpoint_type='publicURL')225 endpoint_type='publicURL')
193 return nova_client.Client(username=user, api_key=password,226 return nova_client.Client(username=user, api_key=password,
194 project_id=tenant, auth_url=ep)227 project_id=tenant, auth_url=ep)
195228
229 def authenticate_swift_user(self, keystone, user, password, tenant):
230 """Authenticates a regular user with swift api."""
231 self.log.debug('Authenticating swift user ({})...'.format(user))
232 ep = keystone.service_catalog.url_for(service_type='identity',
233 endpoint_type='publicURL')
234 return swiftclient.Connection(authurl=ep,
235 user=user,
236 key=password,
237 tenant_name=tenant,
238 auth_version='2.0')
239
196 def create_cirros_image(self, glance, image_name):240 def create_cirros_image(self, glance, image_name):
197 """Download the latest cirros image and upload it to glance."""241 """Download the latest cirros image and upload it to glance,
242 validate and return a resource pointer.
243
244 :param glance: pointer to authenticated glance connection
245 :param image_name: display name for new image
246 :returns: glance image pointer
247 """
248 self.log.debug('Creating glance cirros image '
249 '({})...'.format(image_name))
250
251 # Download cirros image
198 http_proxy = os.getenv('AMULET_HTTP_PROXY')252 http_proxy = os.getenv('AMULET_HTTP_PROXY')
199 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))253 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
200 if http_proxy:254 if http_proxy:
@@ -203,57 +257,67 @@
203 else:257 else:
204 opener = urllib.FancyURLopener()258 opener = urllib.FancyURLopener()
205259
206 f = opener.open("http://download.cirros-cloud.net/version/released")260 f = opener.open('http://download.cirros-cloud.net/version/released')
207 version = f.read().strip()261 version = f.read().strip()
208 cirros_img = "cirros-{}-x86_64-disk.img".format(version)262 cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
209 local_path = os.path.join('tests', cirros_img)263 local_path = os.path.join('tests', cirros_img)
210264
211 if not os.path.exists(local_path):265 if not os.path.exists(local_path):
212 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",266 cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
213 version, cirros_img)267 version, cirros_img)
214 opener.retrieve(cirros_url, local_path)268 opener.retrieve(cirros_url, local_path)
215 f.close()269 f.close()
216270
271 # Create glance image
217 with open(local_path) as f:272 with open(local_path) as f:
218 image = glance.images.create(name=image_name, is_public=True,273 image = glance.images.create(name=image_name, is_public=True,
219 disk_format='qcow2',274 disk_format='qcow2',
220 container_format='bare', data=f)275 container_format='bare', data=f)
221 count = 1276
222 status = image.status277 # Wait for image to reach active status
223 while status != 'active' and count < 10:278 img_id = image.id
224 time.sleep(3)279 ret = self.resource_reaches_status(glance.images, img_id,
225 image = glance.images.get(image.id)280 expected_stat='active',
226 status = image.status281 msg='Image status wait')
227 self.log.debug('image status: {}'.format(status))282 if not ret:
228 count += 1283 msg = 'Glance image failed to reach expected state.'
229284 amulet.raise_status(amulet.FAIL, msg=msg)
230 if status != 'active':285
231 self.log.error('image creation timed out')286 # Re-validate new image
232 return None287 self.log.debug('Validating image attributes...')
288 val_img_name = glance.images.get(img_id).name
289 val_img_stat = glance.images.get(img_id).status
290 val_img_pub = glance.images.get(img_id).is_public
291 val_img_cfmt = glance.images.get(img_id).container_format
292 val_img_dfmt = glance.images.get(img_id).disk_format
293 msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
294 'container fmt:{} disk fmt:{}'.format(
295 val_img_name, val_img_pub, img_id,
296 val_img_stat, val_img_cfmt, val_img_dfmt))
297
298 if val_img_name == image_name and val_img_stat == 'active' \
299 and val_img_pub is True and val_img_cfmt == 'bare' \
300 and val_img_dfmt == 'qcow2':
301 self.log.debug(msg_attr)
302 else:
303 msg = ('Volume validation failed, {}'.format(msg_attr))
304 amulet.raise_status(amulet.FAIL, msg=msg)
233305
234 return image306 return image
235307
236 def delete_image(self, glance, image):308 def delete_image(self, glance, image):
237 """Delete the specified image."""309 """Delete the specified image."""
238 num_before = len(list(glance.images.list()))310
239 glance.images.delete(image)311 # /!\ DEPRECATION WARNING
240312 self.log.warn('/!\\ DEPRECATION WARNING: use '
241 count = 1313 'delete_resource instead of delete_image.')
242 num_after = len(list(glance.images.list()))314 self.log.debug('Deleting glance image ({})...'.format(image))
243 while num_after != (num_before - 1) and count < 10:315 return self.delete_resource(glance.images, image, msg='glance image')
244 time.sleep(3)
245 num_after = len(list(glance.images.list()))
246 self.log.debug('number of images: {}'.format(num_after))
247 count += 1
248
249 if num_after != (num_before - 1):
250 self.log.error('image deletion timed out')
251 return False
252
253 return True
254316
255 def create_instance(self, nova, image_name, instance_name, flavor):317 def create_instance(self, nova, image_name, instance_name, flavor):
256 """Create the specified instance."""318 """Create the specified instance."""
319 self.log.debug('Creating instance '
320 '({}|{}|{})'.format(instance_name, image_name, flavor))
257 image = nova.images.find(name=image_name)321 image = nova.images.find(name=image_name)
258 flavor = nova.flavors.find(name=flavor)322 flavor = nova.flavors.find(name=flavor)
259 instance = nova.servers.create(name=instance_name, image=image,323 instance = nova.servers.create(name=instance_name, image=image,
@@ -276,19 +340,265 @@
276340
277 def delete_instance(self, nova, instance):341 def delete_instance(self, nova, instance):
278 """Delete the specified instance."""342 """Delete the specified instance."""
279 num_before = len(list(nova.servers.list()))343
280 nova.servers.delete(instance)344 # /!\ DEPRECATION WARNING
281345 self.log.warn('/!\\ DEPRECATION WARNING: use '
282 count = 1346 'delete_resource instead of delete_instance.')
283 num_after = len(list(nova.servers.list()))347 self.log.debug('Deleting instance ({})...'.format(instance))
284 while num_after != (num_before - 1) and count < 10:348 return self.delete_resource(nova.servers, instance,
285 time.sleep(3)349 msg='nova instance')
286 num_after = len(list(nova.servers.list()))350
287 self.log.debug('number of instances: {}'.format(num_after))351 def create_or_get_keypair(self, nova, keypair_name="testkey"):
288 count += 1352 """Create a new keypair, or return pointer if it already exists."""
289353 try:
290 if num_after != (num_before - 1):354 _keypair = nova.keypairs.get(keypair_name)
291 self.log.error('instance deletion timed out')355 self.log.debug('Keypair ({}) already exists, '
292 return False356 'using it.'.format(keypair_name))
293357 return _keypair
294 return True358 except:
359 self.log.debug('Keypair ({}) does not exist, '
360 'creating it.'.format(keypair_name))
361
362 _keypair = nova.keypairs.create(name=keypair_name)
363 return _keypair
364
365 def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
366 img_id=None, src_vol_id=None, snap_id=None):
367 """Create cinder volume, optionally from a glance image, OR
368 optionally as a clone of an existing volume, OR optionally
369 from a snapshot. Wait for the new volume status to reach
370 the expected status, validate and return a resource pointer.
371
372 :param vol_name: cinder volume display name
373 :param vol_size: size in gigabytes
374 :param img_id: optional glance image id
375 :param src_vol_id: optional source volume id to clone
376 :param snap_id: optional snapshot id to use
377 :returns: cinder volume pointer
378 """
379 # Handle parameter input and avoid impossible combinations
380 if img_id and not src_vol_id and not snap_id:
381 # Create volume from image
382 self.log.debug('Creating cinder volume from glance image...')
383 bootable = 'true'
384 elif src_vol_id and not img_id and not snap_id:
385 # Clone an existing volume
386 self.log.debug('Cloning cinder volume...')
387 bootable = cinder.volumes.get(src_vol_id).bootable
388 elif snap_id and not src_vol_id and not img_id:
389 # Create volume from snapshot
390 self.log.debug('Creating cinder volume from snapshot...')
391 snap = cinder.volume_snapshots.find(id=snap_id)
392 vol_size = snap.size
393 snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
394 bootable = cinder.volumes.get(snap_vol_id).bootable
395 elif not img_id and not src_vol_id and not snap_id:
396 # Create volume
397 self.log.debug('Creating cinder volume...')
398 bootable = 'false'
399 else:
400 # Impossible combination of parameters
401 msg = ('Invalid method use - name:{} size:{} img_id:{} '
402 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
403 img_id, src_vol_id,
404 snap_id))
405 amulet.raise_status(amulet.FAIL, msg=msg)
406
407 # Create new volume
408 try:
409 vol_new = cinder.volumes.create(display_name=vol_name,
410 imageRef=img_id,
411 size=vol_size,
412 source_volid=src_vol_id,
413 snapshot_id=snap_id)
414 vol_id = vol_new.id
415 except Exception as e:
416 msg = 'Failed to create volume: {}'.format(e)
417 amulet.raise_status(amulet.FAIL, msg=msg)
418
419 # Wait for volume to reach available status
420 ret = self.resource_reaches_status(cinder.volumes, vol_id,
421 expected_stat="available",
422 msg="Volume status wait")
423 if not ret:
424 msg = 'Cinder volume failed to reach expected state.'
425 amulet.raise_status(amulet.FAIL, msg=msg)
426
427 # Re-validate new volume
428 self.log.debug('Validating volume attributes...')
429 val_vol_name = cinder.volumes.get(vol_id).display_name
430 val_vol_boot = cinder.volumes.get(vol_id).bootable
431 val_vol_stat = cinder.volumes.get(vol_id).status
432 val_vol_size = cinder.volumes.get(vol_id).size
433 msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
434 '{} size:{}'.format(val_vol_name, vol_id,
435 val_vol_stat, val_vol_boot,
436 val_vol_size))
437
438 if val_vol_boot == bootable and val_vol_stat == 'available' \
439 and val_vol_name == vol_name and val_vol_size == vol_size:
440 self.log.debug(msg_attr)
441 else:
442 msg = ('Volume validation failed, {}'.format(msg_attr))
443 amulet.raise_status(amulet.FAIL, msg=msg)
444
445 return vol_new
446
447 def delete_resource(self, resource, resource_id,
448 msg="resource", max_wait=120):
449 """Delete one openstack resource, such as one instance, keypair,
450 image, volume, stack, etc., and confirm deletion within max wait time.
451
452 :param resource: pointer to os resource type, ex:glance_client.images
453 :param resource_id: unique name or id for the openstack resource
454 :param msg: text to identify purpose in logging
455 :param max_wait: maximum wait time in seconds
456 :returns: True if successful, otherwise False
457 """
458 self.log.debug('Deleting OpenStack resource '
459 '{} ({})'.format(resource_id, msg))
460 num_before = len(list(resource.list()))
461 resource.delete(resource_id)
462
463 tries = 0
464 num_after = len(list(resource.list()))
465 while num_after != (num_before - 1) and tries < (max_wait / 4):
466 self.log.debug('{} delete check: '
467 '{} [{}:{}] {}'.format(msg, tries,
468 num_before,
469 num_after,
470 resource_id))
471 time.sleep(4)
472 num_after = len(list(resource.list()))
473 tries += 1
474
475 self.log.debug('{}: expected, actual count = {}, '
476 '{}'.format(msg, num_before - 1, num_after))
477
478 if num_after == (num_before - 1):
479 return True
480 else:
481 self.log.error('{} delete timed out'.format(msg))
482 return False
483
484 def resource_reaches_status(self, resource, resource_id,
485 expected_stat='available',
486 msg='resource', max_wait=120):
487 """Wait for an openstack resources status to reach an
488 expected status within a specified time. Useful to confirm that
489 nova instances, cinder vols, snapshots, glance images, heat stacks
490 and other resources eventually reach the expected status.
491
492 :param resource: pointer to os resource type, ex: heat_client.stacks
493 :param resource_id: unique id for the openstack resource
494 :param expected_stat: status to expect resource to reach
495 :param msg: text to identify purpose in logging
496 :param max_wait: maximum wait time in seconds
497 :returns: True if successful, False if status is not reached
498 """
499
500 tries = 0
501 resource_stat = resource.get(resource_id).status
502 while resource_stat != expected_stat and tries < (max_wait / 4):
503 self.log.debug('{} status check: '
504 '{} [{}:{}] {}'.format(msg, tries,
505 resource_stat,
506 expected_stat,
507 resource_id))
508 time.sleep(4)
509 resource_stat = resource.get(resource_id).status
510 tries += 1
511
512 self.log.debug('{}: expected, actual status = {}, '
513 '{}'.format(msg, resource_stat, expected_stat))
514
515 if resource_stat == expected_stat:
516 return True
517 else:
518 self.log.debug('{} never reached expected status: '
519 '{}'.format(resource_id, expected_stat))
520 return False
521
522 def get_ceph_osd_id_cmd(self, index):
523 """Produce a shell command that will return a ceph-osd id."""
524 return ("`initctl list | grep 'ceph-osd ' | "
525 "awk 'NR=={} {{ print $2 }}' | "
526 "grep -o '[0-9]*'`".format(index + 1))
527
528 def get_ceph_pools(self, sentry_unit):
529 """Return a dict of ceph pools from a single ceph unit, with
530 pool name as keys, pool id as vals."""
531 pools = {}
532 cmd = 'sudo ceph osd lspools'
533 output, code = sentry_unit.run(cmd)
534 if code != 0:
535 msg = ('{} `{}` returned {} '
536 '{}'.format(sentry_unit.info['unit_name'],
537 cmd, code, output))
538 amulet.raise_status(amulet.FAIL, msg=msg)
539
540 # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
541 for pool in str(output).split(','):
542 pool_id_name = pool.split(' ')
543 if len(pool_id_name) == 2:
544 pool_id = pool_id_name[0]
545 pool_name = pool_id_name[1]
546 pools[pool_name] = int(pool_id)
547
548 self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
549 pools))
550 return pools
551
552 def get_ceph_df(self, sentry_unit):
553 """Return dict of ceph df json output, including ceph pool state.
554
555 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
556 :returns: Dict of ceph df output
557 """
558 cmd = 'sudo ceph df --format=json'
559 output, code = sentry_unit.run(cmd)
560 if code != 0:
561 msg = ('{} `{}` returned {} '
562 '{}'.format(sentry_unit.info['unit_name'],
563 cmd, code, output))
564 amulet.raise_status(amulet.FAIL, msg=msg)
565 return json.loads(output)
566
567 def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
568 """Take a sample of attributes of a ceph pool, returning ceph
569 pool name, object count and disk space used for the specified
570 pool ID number.
571
572 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
573 :param pool_id: Ceph pool ID
574 :returns: List of pool name, object count, kb disk space used
575 """
576 df = self.get_ceph_df(sentry_unit)
577 pool_name = df['pools'][pool_id]['name']
578 obj_count = df['pools'][pool_id]['stats']['objects']
579 kb_used = df['pools'][pool_id]['stats']['kb_used']
580 self.log.debug('Ceph {} pool (ID {}): {} objects, '
581 '{} kb used'.format(pool_name, pool_id,
582 obj_count, kb_used))
583 return pool_name, obj_count, kb_used
584
585 def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
586 """Validate ceph pool samples taken over time, such as pool
587 object counts or pool kb used, before adding, after adding, and
588 after deleting items which affect those pool attributes. The
589 2nd element is expected to be greater than the 1st; 3rd is expected
590 to be less than the 2nd.
591
592 :param samples: List containing 3 data samples
593 :param sample_type: String for logging and usage context
594 :returns: None if successful, Failure message otherwise
595 """
596 original, created, deleted = range(3)
597 if samples[created] <= samples[original] or \
598 samples[deleted] >= samples[created]:
599 return ('Ceph {} samples ({}) '
600 'unexpected.'.format(sample_type, samples))
601 else:
602 self.log.debug('Ceph {} samples (OK): '
603 '{}'.format(sample_type, samples))
604 return None
295605
=== modified file 'charmhelpers/contrib/openstack/context.py'
--- charmhelpers/contrib/openstack/context.py 2015-04-16 19:19:18 +0000
+++ charmhelpers/contrib/openstack/context.py 2015-08-13 08:33:21 +0000
@@ -122,21 +122,24 @@
122 of specifying multiple key value pairs within the same string. For122 of specifying multiple key value pairs within the same string. For
123 example, a string in the format of 'key1=value1, key2=value2' will123 example, a string in the format of 'key1=value1, key2=value2' will
124 return a dict of:124 return a dict of:
125 {'key1': 'value1',125
126 'key2': 'value2'}.126 {'key1': 'value1',
127 'key2': 'value2'}.
127128
128 2. A string in the above format, but supporting a comma-delimited list129 2. A string in the above format, but supporting a comma-delimited list
129 of values for the same key. For example, a string in the format of130 of values for the same key. For example, a string in the format of
130 'key1=value1, key2=value3,value4,value5' will return a dict of:131 'key1=value1, key2=value3,value4,value5' will return a dict of:
131 {'key1', 'value1',132
132 'key2', 'value2,value3,value4'}133 {'key1', 'value1',
134 'key2', 'value2,value3,value4'}
133135
134 3. A string containing a colon character (:) prior to an equal136 3. A string containing a colon character (:) prior to an equal
135 character (=) will be treated as yaml and parsed as such. This can be137 character (=) will be treated as yaml and parsed as such. This can be
136 used to specify more complex key value pairs. For example,138 used to specify more complex key value pairs. For example,
137 a string in the format of 'key1: subkey1=value1, subkey2=value2' will139 a string in the format of 'key1: subkey1=value1, subkey2=value2' will
138 return a dict of:140 return a dict of:
139 {'key1', 'subkey1=value1, subkey2=value2'}141
142 {'key1', 'subkey1=value1, subkey2=value2'}
140143
141 The provided config_flags string may be a list of comma-separated values144 The provided config_flags string may be a list of comma-separated values
142 which themselves may be comma-separated list of values.145 which themselves may be comma-separated list of values.
@@ -240,7 +243,7 @@
240 if self.relation_prefix:243 if self.relation_prefix:
241 password_setting = self.relation_prefix + '_password'244 password_setting = self.relation_prefix + '_password'
242245
243 for rid in relation_ids('shared-db'):246 for rid in relation_ids(self.interfaces[0]):
244 for unit in related_units(rid):247 for unit in related_units(rid):
245 rdata = relation_get(rid=rid, unit=unit)248 rdata = relation_get(rid=rid, unit=unit)
246 host = rdata.get('db_host')249 host = rdata.get('db_host')
@@ -891,8 +894,6 @@
891 return ctxt894 return ctxt
892895
893 def __call__(self):896 def __call__(self):
894 self._ensure_packages()
895
896 if self.network_manager not in ['quantum', 'neutron']:897 if self.network_manager not in ['quantum', 'neutron']:
897 return {}898 return {}
898899
@@ -1050,13 +1051,22 @@
1050 :param config_file : Service's config file to query sections1051 :param config_file : Service's config file to query sections
1051 :param interface : Subordinate interface to inspect1052 :param interface : Subordinate interface to inspect
1052 """1053 """
1053 self.service = service
1054 self.config_file = config_file1054 self.config_file = config_file
1055 self.interface = interface1055 if isinstance(service, list):
1056 self.services = service
1057 else:
1058 self.services = [service]
1059 if isinstance(interface, list):
1060 self.interfaces = interface
1061 else:
1062 self.interfaces = [interface]
10561063
1057 def __call__(self):1064 def __call__(self):
1058 ctxt = {'sections': {}}1065 ctxt = {'sections': {}}
1059 for rid in relation_ids(self.interface):1066 rids = []
1067 for interface in self.interfaces:
1068 rids.extend(relation_ids(interface))
1069 for rid in rids:
1060 for unit in related_units(rid):1070 for unit in related_units(rid):
1061 sub_config = relation_get('subordinate_configuration',1071 sub_config = relation_get('subordinate_configuration',
1062 rid=rid, unit=unit)1072 rid=rid, unit=unit)
@@ -1068,29 +1078,32 @@
1068 'setting from %s' % rid, level=ERROR)1078 'setting from %s' % rid, level=ERROR)
1069 continue1079 continue
10701080
1071 if self.service not in sub_config:1081 for service in self.services:
1072 log('Found subordinate_config on %s but it contained'1082 if service not in sub_config:
1073 'nothing for %s service' % (rid, self.service),1083 log('Found subordinate_config on %s but it contained'
1074 level=INFO)1084 'nothing for %s service' % (rid, service),
1075 continue1085 level=INFO)
10761086 continue
1077 sub_config = sub_config[self.service]1087
1078 if self.config_file not in sub_config:1088 sub_config = sub_config[service]
1079 log('Found subordinate_config on %s but it contained'1089 if self.config_file not in sub_config:
1080 'nothing for %s' % (rid, self.config_file),1090 log('Found subordinate_config on %s but it contained'
1081 level=INFO)1091 'nothing for %s' % (rid, self.config_file),
1082 continue1092 level=INFO)
10831093 continue
1084 sub_config = sub_config[self.config_file]1094
1085 for k, v in six.iteritems(sub_config):1095 sub_config = sub_config[self.config_file]
1086 if k == 'sections':1096 for k, v in six.iteritems(sub_config):
1087 for section, config_dict in six.iteritems(v):1097 if k == 'sections':
1088 log("adding section '%s'" % (section),1098 for section, config_list in six.iteritems(v):
1089 level=DEBUG)1099 log("adding section '%s'" % (section),
1090 ctxt[k][section] = config_dict1100 level=DEBUG)
1091 else:1101 if ctxt[k].get(section):
1092 ctxt[k] = v1102 ctxt[k][section].extend(config_list)
10931103 else:
1104 ctxt[k][section] = config_list
1105 else:
1106 ctxt[k] = v
1094 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)1107 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1095 return ctxt1108 return ctxt
10961109
10971110
=== modified file 'charmhelpers/contrib/openstack/templates/ceph.conf'
--- charmhelpers/contrib/openstack/templates/ceph.conf 2014-03-26 10:26:36 +0000
+++ charmhelpers/contrib/openstack/templates/ceph.conf 2015-08-13 08:33:21 +0000
@@ -5,11 +5,11 @@
5###############################################################################5###############################################################################
6[global]6[global]
7{% if auth -%}7{% if auth -%}
8 auth_supported = {{ auth }}8auth_supported = {{ auth }}
9 keyring = /etc/ceph/$cluster.$name.keyring9keyring = /etc/ceph/$cluster.$name.keyring
10 mon host = {{ mon_hosts }}10mon host = {{ mon_hosts }}
11{% endif -%}11{% endif -%}
12 log to syslog = {{ use_syslog }}12log to syslog = {{ use_syslog }}
13 err to syslog = {{ use_syslog }}13err to syslog = {{ use_syslog }}
14 clog to syslog = {{ use_syslog }}14clog to syslog = {{ use_syslog }}
1515
1616
=== modified file 'charmhelpers/contrib/openstack/templating.py'
--- charmhelpers/contrib/openstack/templating.py 2015-06-11 09:00:37 +0000
+++ charmhelpers/contrib/openstack/templating.py 2015-08-13 08:33:21 +0000
@@ -29,8 +29,8 @@
29try:29try:
30 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions30 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
31except ImportError:31except ImportError:
32 # python-jinja2 may not be installed yet, or we're running unittests.32 apt_install('python-jinja2', fatal=True)
33 FileSystemLoader = ChoiceLoader = Environment = exceptions = None33 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
3434
3535
36class OSConfigException(Exception):36class OSConfigException(Exception):
3737
=== modified file 'charmhelpers/contrib/openstack/utils.py'
--- charmhelpers/contrib/openstack/utils.py 2015-06-17 12:22:29 +0000
+++ charmhelpers/contrib/openstack/utils.py 2015-08-13 08:33:21 +0000
@@ -25,6 +25,7 @@
25import os25import os
26import sys26import sys
27import uuid27import uuid
28import re
2829
29import six30import six
30import yaml31import yaml
@@ -71,7 +72,6 @@
71DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '72DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
72 'restricted main multiverse universe')73 'restricted main multiverse universe')
7374
74
75UBUNTU_OPENSTACK_RELEASE = OrderedDict([75UBUNTU_OPENSTACK_RELEASE = OrderedDict([
76 ('oneiric', 'diablo'),76 ('oneiric', 'diablo'),
77 ('precise', 'essex'),77 ('precise', 'essex'),
@@ -81,6 +81,7 @@
81 ('trusty', 'icehouse'),81 ('trusty', 'icehouse'),
82 ('utopic', 'juno'),82 ('utopic', 'juno'),
83 ('vivid', 'kilo'),83 ('vivid', 'kilo'),
84 ('wily', 'liberty'),
84])85])
8586
8687
@@ -93,6 +94,7 @@
93 ('2014.1', 'icehouse'),94 ('2014.1', 'icehouse'),
94 ('2014.2', 'juno'),95 ('2014.2', 'juno'),
95 ('2015.1', 'kilo'),96 ('2015.1', 'kilo'),
97 ('2015.2', 'liberty'),
96])98])
9799
98# The ugly duckling100# The ugly duckling
@@ -115,8 +117,37 @@
115 ('2.2.0', 'juno'),117 ('2.2.0', 'juno'),
116 ('2.2.1', 'kilo'),118 ('2.2.1', 'kilo'),
117 ('2.2.2', 'kilo'),119 ('2.2.2', 'kilo'),
120 ('2.3.0', 'liberty'),
118])121])
119122
123# >= Liberty version->codename mapping
124PACKAGE_CODENAMES = {
125 'nova-common': OrderedDict([
126 ('12.0.0', 'liberty'),
127 ]),
128 'neutron-common': OrderedDict([
129 ('7.0.0', 'liberty'),
130 ]),
131 'cinder-common': OrderedDict([
132 ('7.0.0', 'liberty'),
133 ]),
134 'keystone': OrderedDict([
135 ('8.0.0', 'liberty'),
136 ]),
137 'horizon-common': OrderedDict([
138 ('8.0.0', 'liberty'),
139 ]),
140 'ceilometer-common': OrderedDict([
141 ('5.0.0', 'liberty'),
142 ]),
143 'heat-common': OrderedDict([
144 ('5.0.0', 'liberty'),
145 ]),
146 'glance-common': OrderedDict([
147 ('11.0.0', 'liberty'),
148 ]),
149}
150
120DEFAULT_LOOPBACK_SIZE = '5G'151DEFAULT_LOOPBACK_SIZE = '5G'
121152
122153
@@ -200,20 +231,29 @@
200 error_out(e)231 error_out(e)
201232
202 vers = apt.upstream_version(pkg.current_ver.ver_str)233 vers = apt.upstream_version(pkg.current_ver.ver_str)
234 match = re.match('^(\d)\.(\d)\.(\d)', vers)
235 if match:
236 vers = match.group(0)
203237
204 try:238 # >= Liberty independent project versions
205 if 'swift' in pkg.name:239 if (package in PACKAGE_CODENAMES and
206 swift_vers = vers[:5]240 vers in PACKAGE_CODENAMES[package]):
207 if swift_vers not in SWIFT_CODENAMES:241 return PACKAGE_CODENAMES[package][vers]
208 # Deal with 1.10.0 upward242 else:
209 swift_vers = vers[:6]243 # < Liberty co-ordinated project versions
210 return SWIFT_CODENAMES[swift_vers]244 try:
211 else:245 if 'swift' in pkg.name:
212 vers = vers[:6]246 swift_vers = vers[:5]
213 return OPENSTACK_CODENAMES[vers]247 if swift_vers not in SWIFT_CODENAMES:
214 except KeyError:248 # Deal with 1.10.0 upward
215 e = 'Could not determine OpenStack codename for version %s' % vers249 swift_vers = vers[:6]
216 error_out(e)250 return SWIFT_CODENAMES[swift_vers]
251 else:
252 vers = vers[:6]
253 return OPENSTACK_CODENAMES[vers]
254 except KeyError:
255 e = 'Could not determine OpenStack codename for version %s' % vers
256 error_out(e)
217257
218258
219def get_os_version_package(pkg, fatal=True):259def get_os_version_package(pkg, fatal=True):
@@ -323,6 +363,9 @@
323 'kilo': 'trusty-updates/kilo',363 'kilo': 'trusty-updates/kilo',
324 'kilo/updates': 'trusty-updates/kilo',364 'kilo/updates': 'trusty-updates/kilo',
325 'kilo/proposed': 'trusty-proposed/kilo',365 'kilo/proposed': 'trusty-proposed/kilo',
366 'liberty': 'trusty-updates/liberty',
367 'liberty/updates': 'trusty-updates/liberty',
368 'liberty/proposed': 'trusty-proposed/liberty',
326 }369 }
327370
328 try:371 try:
@@ -518,6 +561,7 @@
518 Clone/install all specified OpenStack repositories.561 Clone/install all specified OpenStack repositories.
519562
520 The expected format of projects_yaml is:563 The expected format of projects_yaml is:
564
521 repositories:565 repositories:
522 - {name: keystone,566 - {name: keystone,
523 repository: 'git://git.openstack.org/openstack/keystone.git',567 repository: 'git://git.openstack.org/openstack/keystone.git',
@@ -525,11 +569,13 @@
525 - {name: requirements,569 - {name: requirements,
526 repository: 'git://git.openstack.org/openstack/requirements.git',570 repository: 'git://git.openstack.org/openstack/requirements.git',
527 branch: 'stable/icehouse'}571 branch: 'stable/icehouse'}
572
528 directory: /mnt/openstack-git573 directory: /mnt/openstack-git
529 http_proxy: squid-proxy-url574 http_proxy: squid-proxy-url
530 https_proxy: squid-proxy-url575 https_proxy: squid-proxy-url
531576
532 The directory, http_proxy, and https_proxy keys are optional.577 The directory, http_proxy, and https_proxy keys are optional.
578
533 """579 """
534 global requirements_dir580 global requirements_dir
535 parent_dir = '/mnt/openstack-git'581 parent_dir = '/mnt/openstack-git'
@@ -551,6 +597,12 @@
551597
552 pip_create_virtualenv(os.path.join(parent_dir, 'venv'))598 pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
553599
600 # Upgrade setuptools and pip from default virtualenv versions. The default
601 # versions in trusty break master OpenStack branch deployments.
602 for p in ['pip', 'setuptools']:
603 pip_install(p, upgrade=True, proxy=http_proxy,
604 venv=os.path.join(parent_dir, 'venv'))
605
554 for p in projects['repositories']:606 for p in projects['repositories']:
555 repo = p['repository']607 repo = p['repository']
556 branch = p['branch']608 branch = p['branch']
@@ -612,24 +664,24 @@
612 else:664 else:
613 repo_dir = dest_dir665 repo_dir = dest_dir
614666
667 venv = os.path.join(parent_dir, 'venv')
668
615 if update_requirements:669 if update_requirements:
616 if not requirements_dir:670 if not requirements_dir:
617 error_out('requirements repo must be cloned before '671 error_out('requirements repo must be cloned before '
618 'updating from global requirements.')672 'updating from global requirements.')
619 _git_update_requirements(repo_dir, requirements_dir)673 _git_update_requirements(venv, repo_dir, requirements_dir)
620674
621 juju_log('Installing git repo from dir: {}'.format(repo_dir))675 juju_log('Installing git repo from dir: {}'.format(repo_dir))
622 if http_proxy:676 if http_proxy:
623 pip_install(repo_dir, proxy=http_proxy,677 pip_install(repo_dir, proxy=http_proxy, venv=venv)
624 venv=os.path.join(parent_dir, 'venv'))
625 else:678 else:
626 pip_install(repo_dir,679 pip_install(repo_dir, venv=venv)
627 venv=os.path.join(parent_dir, 'venv'))
628680
629 return repo_dir681 return repo_dir
630682
631683
632def _git_update_requirements(package_dir, reqs_dir):684def _git_update_requirements(venv, package_dir, reqs_dir):
633 """685 """
634 Update from global requirements.686 Update from global requirements.
635687
@@ -638,12 +690,14 @@
638 """690 """
639 orig_dir = os.getcwd()691 orig_dir = os.getcwd()
640 os.chdir(reqs_dir)692 os.chdir(reqs_dir)
641 cmd = ['python', 'update.py', package_dir]693 python = os.path.join(venv, 'bin/python')
694 cmd = [python, 'update.py', package_dir]
642 try:695 try:
643 subprocess.check_call(cmd)696 subprocess.check_call(cmd)
644 except subprocess.CalledProcessError:697 except subprocess.CalledProcessError:
645 package = os.path.basename(package_dir)698 package = os.path.basename(package_dir)
646 error_out("Error updating {} from global-requirements.txt".format(package))699 error_out("Error updating {} from "
700 "global-requirements.txt".format(package))
647 os.chdir(orig_dir)701 os.chdir(orig_dir)
648702
649703
650704
=== modified file 'charmhelpers/contrib/peerstorage/__init__.py'
--- charmhelpers/contrib/peerstorage/__init__.py 2015-06-03 13:09:25 +0000
+++ charmhelpers/contrib/peerstorage/__init__.py 2015-08-13 08:33:21 +0000
@@ -59,7 +59,7 @@
59"""59"""
6060
6161
62def leader_get(attribute=None):62def leader_get(attribute=None, rid=None):
63 """Wrapper to ensure that settings are migrated from the peer relation.63 """Wrapper to ensure that settings are migrated from the peer relation.
6464
65 This is to support upgrading an environment that does not support65 This is to support upgrading an environment that does not support
@@ -94,7 +94,8 @@
94 # If attribute not present in leader db, check if this unit has set94 # If attribute not present in leader db, check if this unit has set
95 # the attribute in the peer relation95 # the attribute in the peer relation
96 if not leader_settings:96 if not leader_settings:
97 peer_setting = relation_get(attribute=attribute, unit=local_unit())97 peer_setting = _relation_get(attribute=attribute, unit=local_unit(),
98 rid=rid)
98 if peer_setting:99 if peer_setting:
99 leader_set(settings={attribute: peer_setting})100 leader_set(settings={attribute: peer_setting})
100 leader_settings = peer_setting101 leader_settings = peer_setting
@@ -103,7 +104,7 @@
103 settings_migrated = True104 settings_migrated = True
104 migrated.add(attribute)105 migrated.add(attribute)
105 else:106 else:
106 r_settings = relation_get(unit=local_unit())107 r_settings = _relation_get(unit=local_unit(), rid=rid)
107 if r_settings:108 if r_settings:
108 for key in set(r_settings.keys()).difference(migrated):109 for key in set(r_settings.keys()).difference(migrated):
109 # Leader setting wins110 # Leader setting wins
@@ -151,7 +152,7 @@
151 """152 """
152 try:153 try:
153 if rid in relation_ids('cluster'):154 if rid in relation_ids('cluster'):
154 return leader_get(attribute)155 return leader_get(attribute, rid)
155 else:156 else:
156 raise NotImplementedError157 raise NotImplementedError
157 except NotImplementedError:158 except NotImplementedError:
158159
=== modified file 'charmhelpers/contrib/python/packages.py'
--- charmhelpers/contrib/python/packages.py 2015-05-07 18:12:54 +0000
+++ charmhelpers/contrib/python/packages.py 2015-08-13 08:33:21 +0000
@@ -36,6 +36,8 @@
36def parse_options(given, available):36def parse_options(given, available):
37 """Given a set of options, check if available"""37 """Given a set of options, check if available"""
38 for key, value in sorted(given.items()):38 for key, value in sorted(given.items()):
39 if not value:
40 continue
39 if key in available:41 if key in available:
40 yield "--{0}={1}".format(key, value)42 yield "--{0}={1}".format(key, value)
4143
4244
=== modified file 'charmhelpers/contrib/storage/linux/ceph.py'
--- charmhelpers/contrib/storage/linux/ceph.py 2015-01-22 06:11:15 +0000
+++ charmhelpers/contrib/storage/linux/ceph.py 2015-08-13 08:33:21 +0000
@@ -60,12 +60,12 @@
60KEYFILE = '/etc/ceph/ceph.client.{}.key'60KEYFILE = '/etc/ceph/ceph.client.{}.key'
6161
62CEPH_CONF = """[global]62CEPH_CONF = """[global]
63 auth supported = {auth}63auth supported = {auth}
64 keyring = {keyring}64keyring = {keyring}
65 mon host = {mon_hosts}65mon host = {mon_hosts}
66 log to syslog = {use_syslog}66log to syslog = {use_syslog}
67 err to syslog = {use_syslog}67err to syslog = {use_syslog}
68 clog to syslog = {use_syslog}68clog to syslog = {use_syslog}
69"""69"""
7070
7171
7272
=== modified file 'charmhelpers/contrib/storage/linux/utils.py'
--- charmhelpers/contrib/storage/linux/utils.py 2015-01-22 06:06:03 +0000
+++ charmhelpers/contrib/storage/linux/utils.py 2015-08-13 08:33:21 +0000
@@ -43,9 +43,10 @@
4343
44 :param block_device: str: Full path of block device to clean.44 :param block_device: str: Full path of block device to clean.
45 '''45 '''
46 # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
46 # sometimes sgdisk exits non-zero; this is OK, dd will clean up47 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
47 call(['sgdisk', '--zap-all', '--mbrtogpt',48 call(['sgdisk', '--zap-all', '--', block_device])
48 '--clear', block_device])49 call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
49 dev_end = check_output(['blockdev', '--getsz',50 dev_end = check_output(['blockdev', '--getsz',
50 block_device]).decode('UTF-8')51 block_device]).decode('UTF-8')
51 gpt_end = int(dev_end.split()[0]) - 10052 gpt_end = int(dev_end.split()[0]) - 100
@@ -67,4 +68,4 @@
67 out = check_output(['mount']).decode('UTF-8')68 out = check_output(['mount']).decode('UTF-8')
68 if is_partition:69 if is_partition:
69 return bool(re.search(device + r"\b", out))70 return bool(re.search(device + r"\b", out))
70 return bool(re.search(device + r"[0-9]+\b", out))71 return bool(re.search(device + r"[0-9]*\b", out))
7172
=== modified file 'charmhelpers/contrib/unison/__init__.py'
--- charmhelpers/contrib/unison/__init__.py 2015-04-03 15:23:46 +0000
+++ charmhelpers/contrib/unison/__init__.py 2015-08-13 08:33:21 +0000
@@ -16,7 +16,7 @@
1616
17# Easy file synchronization among peer units using ssh + unison.17# Easy file synchronization among peer units using ssh + unison.
18#18#
19# From *both* peer relation -joined and -changed, add a call to19# For the -joined, -changed, and -departed peer relations, add a call to
20# ssh_authorized_peers() describing the peer relation and the desired20# ssh_authorized_peers() describing the peer relation and the desired
21# user + group. After all peer relations have settled, all hosts should21# user + group. After all peer relations have settled, all hosts should
22# be able to connect to on another via key auth'd ssh as the specified user.22# be able to connect to on another via key auth'd ssh as the specified user.
@@ -30,14 +30,21 @@
30# ...30# ...
31# ssh_authorized_peers(peer_interface='cluster',31# ssh_authorized_peers(peer_interface='cluster',
32# user='juju_ssh', group='juju_ssh',32# user='juju_ssh', group='juju_ssh',
33# ensure_user=True)33# ensure_local_user=True)
34# ...34# ...
35#35#
36# cluster-relation-changed:36# cluster-relation-changed:
37# ...37# ...
38# ssh_authorized_peers(peer_interface='cluster',38# ssh_authorized_peers(peer_interface='cluster',
39# user='juju_ssh', group='juju_ssh',39# user='juju_ssh', group='juju_ssh',
40# ensure_user=True)40# ensure_local_user=True)
41# ...
42#
43# cluster-relation-departed:
44# ...
45# ssh_authorized_peers(peer_interface='cluster',
46# user='juju_ssh', group='juju_ssh',
47# ensure_local_user=True)
41# ...48# ...
42#49#
43# Hooks are now free to sync files as easily as:50# Hooks are now free to sync files as easily as:
@@ -92,11 +99,18 @@
92 raise Exception99 raise Exception
93100
94101
95def create_private_key(user, priv_key_path):102def create_private_key(user, priv_key_path, key_type='rsa'):
103 types_bits = {
104 'rsa': '2048',
105 'ecdsa': '521',
106 }
107 if key_type not in types_bits:
108 log('Unknown ssh key type {}, using rsa'.format(key_type), ERROR)
109 key_type = 'rsa'
96 if not os.path.isfile(priv_key_path):110 if not os.path.isfile(priv_key_path):
97 log('Generating new SSH key for user %s.' % user)111 log('Generating new SSH key for user %s.' % user)
98 cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',112 cmd = ['ssh-keygen', '-q', '-N', '', '-t', key_type,
99 '-f', priv_key_path]113 '-b', types_bits[key_type], '-f', priv_key_path]
100 check_call(cmd)114 check_call(cmd)
101 else:115 else:
102 log('SSH key already exists at %s.' % priv_key_path)116 log('SSH key already exists at %s.' % priv_key_path)
@@ -152,7 +166,7 @@
152 known_hosts = os.path.join(ssh_dir, 'known_hosts')166 known_hosts = os.path.join(ssh_dir, 'known_hosts')
153 khosts = []167 khosts = []
154 for host in hosts:168 for host in hosts:
155 cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]169 cmd = ['ssh-keyscan', host]
156 remote_key = check_output(cmd, universal_newlines=True).strip()170 remote_key = check_output(cmd, universal_newlines=True).strip()
157 khosts.append(remote_key)171 khosts.append(remote_key)
158 log('Syncing known_hosts @ %s.' % known_hosts)172 log('Syncing known_hosts @ %s.' % known_hosts)
@@ -179,7 +193,8 @@
179 hook = hook_name()193 hook = hook_name()
180 if hook == '%s-relation-joined' % peer_interface:194 if hook == '%s-relation-joined' % peer_interface:
181 relation_set(ssh_pub_key=pub_key)195 relation_set(ssh_pub_key=pub_key)
182 elif hook == '%s-relation-changed' % peer_interface:196 elif hook == '%s-relation-changed' % peer_interface or \
197 hook == '%s-relation-departed' % peer_interface:
183 hosts = []198 hosts = []
184 keys = []199 keys = []
185200
186201
=== added file 'charmhelpers/coordinator.py'
--- charmhelpers/coordinator.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/coordinator.py 2015-08-13 08:33:21 +0000
@@ -0,0 +1,607 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16'''
17The coordinator module allows you to use Juju's leadership feature to
18coordinate operations between units of a service.
19
20Behavior is defined in subclasses of coordinator.BaseCoordinator.
21One implementation is provided (coordinator.Serial), which allows an
22operation to be run on a single unit at a time, on a first come, first
23served basis. You can trivially define more complex behavior by
24subclassing BaseCoordinator or Serial.
25
26:author: Stuart Bishop <stuart.bishop@canonical.com>
27
28
29Services Framework Usage
30========================
31
32Ensure a peer relation is defined in metadata.yaml. Instantiate a
33BaseCoordinator subclass before invoking ServiceManager.manage().
34Ensure that ServiceManager.manage() is wired up to the leader-elected,
35leader-settings-changed, peer relation-changed and peer
36relation-departed hooks in addition to any other hooks you need, or your
37service will deadlock.
38
39Ensure calls to acquire() are guarded, so that locks are only requested
40when they are really needed (and thus hooks only triggered when necessary).
41Failing to do this and calling acquire() unconditionally will put your unit
42into a hook loop. Calls to granted() do not need to be guarded.
43
44For example::
45
46 from charmhelpers.core import hookenv, services
47 from charmhelpers import coordinator
48
49 def maybe_restart(servicename):
50 serial = coordinator.Serial()
51 if needs_restart():
52 serial.acquire('restart')
53 if serial.granted('restart'):
54 hookenv.service_restart(servicename)
55
56 services = [dict(service='servicename',
57 data_ready=[maybe_restart])]
58
59 if __name__ == '__main__':
60 _ = coordinator.Serial() # Must instantiate before manager.manage()
61 manager = services.ServiceManager(services)
62 manager.manage()
63
64
65You can implement a similar pattern using a decorator. If the lock has
66not been granted, an attempt to acquire() it will be made if the guard
67function returns True. If the lock has been granted, the decorated function
68is run as normal::
69
70 from charmhelpers.core import hookenv, services
71 from charmhelpers import coordinator
72
73 serial = coordinator.Serial() # Global, instatiated on module import.
74
75 def needs_restart():
76 [ ... Introspect state. Return True if restart is needed ... ]
77
78 @serial.require('restart', needs_restart)
79 def maybe_restart(servicename):
80 hookenv.service_restart(servicename)
81
82 services = [dict(service='servicename',
83 data_ready=[maybe_restart])]
84
85 if __name__ == '__main__':
86 manager = services.ServiceManager(services)
87 manager.manage()
88
89
90Traditional Usage
91=================
92
93Ensure a peer relationis defined in metadata.yaml.
94
95If you are using charmhelpers.core.hookenv.Hooks, ensure that a
96BaseCoordinator subclass is instantiated before calling Hooks.execute.
97
98If you are not using charmhelpers.core.hookenv.Hooks, ensure
99that a BaseCoordinator subclass is instantiated and its handle()
100method called at the start of all your hooks.
101
102For example::
103
104 import sys
105 from charmhelpers.core import hookenv
106 from charmhelpers import coordinator
107
108 hooks = hookenv.Hooks()
109
110 def maybe_restart():
111 serial = coordinator.Serial()
112 if serial.granted('restart'):
113 hookenv.service_restart('myservice')
114
115 @hooks.hook
116 def config_changed():
117 update_config()
118 serial = coordinator.Serial()
119 if needs_restart():
120 serial.acquire('restart'):
121 maybe_restart()
122
123 # Cluster hooks must be wired up.
124 @hooks.hook('cluster-relation-changed', 'cluster-relation-departed')
125 def cluster_relation_changed():
126 maybe_restart()
127
128 # Leader hooks must be wired up.
129 @hooks.hook('leader-elected', 'leader-settings-changed')
130 def leader_settings_changed():
131 maybe_restart()
132
133 [ ... repeat for *all* other hooks you are using ... ]
134
135 if __name__ == '__main__':
136 _ = coordinator.Serial() # Must instantiate before execute()
137 hooks.execute(sys.argv)
138
139
140You can also use the require decorator. If the lock has not been granted,
141an attempt to acquire() it will be made if the guard function returns True.
142If the lock has been granted, the decorated function is run as normal::
143
144 from charmhelpers.core import hookenv
145
146 hooks = hookenv.Hooks()
147 serial = coordinator.Serial() # Must instantiate before execute()
148
149 @require('restart', needs_restart)
150 def maybe_restart():
151 hookenv.service_restart('myservice')
152
153 @hooks.hook('install', 'config-changed', 'upgrade-charm',
154 # Peer and leader hooks must be wired up.
155 'cluster-relation-changed', 'cluster-relation-departed',
156 'leader-elected', 'leader-settings-changed')
157 def default_hook():
158 [...]
159 maybe_restart()
160
161 if __name__ == '__main__':
162 hooks.execute()
163
164
165Details
166=======
167
168A simple API is provided similar to traditional locking APIs. A lock
169may be requested using the acquire() method, and the granted() method
170may be used do to check if a lock previously requested by acquire() has
171been granted. It doesn't matter how many times acquire() is called in a
172hook.
173
174Locks are released at the end of the hook they are acquired in. This may
175be the current hook if the unit is leader and the lock is free. It is
176more likely a future hook (probably leader-settings-changed, possibly
177the peer relation-changed or departed hook, potentially any hook).
178
179Whenever a charm needs to perform a coordinated action it will acquire()
180the lock and perform the action immediately if acquisition is
181successful. It will also need to perform the same action in every other
182hook if the lock has been granted.
183
184
185Grubby Details
186--------------
187
188Why do you need to be able to perform the same action in every hook?
189If the unit is the leader, then it may be able to grant its own lock
190and perform the action immediately in the source hook. If the unit is
191the leader and cannot immediately grant the lock, then its only
192guaranteed chance of acquiring the lock is in the peer relation-joined,
193relation-changed or peer relation-departed hooks when another unit has
194released it (the only channel to communicate to the leader is the peer
195relation). If the unit is not the leader, then it is unlikely the lock
196is granted in the source hook (a previous hook must have also made the
197request for this to happen). A non-leader is notified about the lock via
198leader settings. These changes may be visible in any hook, even before
199the leader-settings-changed hook has been invoked. Or the requesting
200unit may be promoted to leader after making a request, in which case the
201lock may be granted in leader-elected or in a future peer
202relation-changed or relation-departed hook.
203
204This could be simpler if leader-settings-changed was invoked on the
205leader. We could then never grant locks except in
206leader-settings-changed hooks giving one place for the operation to be
207performed. Unfortunately this is not the case with Juju 1.23 leadership.
208
209But of course, this doesn't really matter to most people as most people
210seem to prefer the Services Framework or similar reset-the-world
211approaches, rather than the twisty maze of attempting to deduce what
212should be done based on what hook happens to be running (which always
213seems to evolve into reset-the-world anyway when the charm grows beyond
214the trivial).
215
216I chose not to implement a callback model, where a callback was passed
217to acquire to be executed when the lock is granted, because the callback
218may become invalid between making the request and the lock being granted
219due to an upgrade-charm being run in the interim. And it would create
220restrictions, such no lambdas, callback defined at the top level of a
221module, etc. Still, we could implement it on top of what is here, eg.
222by adding a defer decorator that stores a pickle of itself to disk and
223have BaseCoordinator unpickle and execute them when the locks are granted.
224'''
225from datetime import datetime
226from functools import wraps
227import json
228import os.path
229
230from six import with_metaclass
231
232from charmhelpers.core import hookenv
233
234
235# We make BaseCoordinator and subclasses singletons, so that if we
236# need to spill to local storage then only a single instance does so,
237# rather than having multiple instances stomp over each other.
238class Singleton(type):
239 _instances = {}
240
241 def __call__(cls, *args, **kwargs):
242 if cls not in cls._instances:
243 cls._instances[cls] = super(Singleton, cls).__call__(*args,
244 **kwargs)
245 return cls._instances[cls]
246
247
248class BaseCoordinator(with_metaclass(Singleton, object)):
249 relid = None # Peer relation-id, set by __init__
250 relname = None
251
252 grants = None # self.grants[unit][lock] == timestamp
253 requests = None # self.requests[unit][lock] == timestamp
254
255 def __init__(self, relation_key='coordinator', peer_relation_name=None):
256 '''Instatiate a Coordinator.
257
258 Data is stored on the peer relation and in leadership storage
259 under the provided relation_key.
260
261 The peer relation is identified by peer_relation_name, and defaults
262 to the first one found in metadata.yaml.
263 '''
264 # Most initialization is deferred, since invoking hook tools from
265 # the constructor makes testing hard.
266 self.key = relation_key
267 self.relname = peer_relation_name
268 hookenv.atstart(self.initialize)
269
270 # Ensure that handle() is called, without placing that burden on
271 # the charm author. They still need to do this manually if they
272 # are not using a hook framework.
273 hookenv.atstart(self.handle)
274
275 def initialize(self):
276 if self.requests is not None:
277 return # Already initialized.
278
279 assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+'
280
281 if self.relname is None:
282 self.relname = _implicit_peer_relation_name()
283
284 relids = hookenv.relation_ids(self.relname)
285 if relids:
286 self.relid = sorted(relids)[0]
287
288 # Load our state, from leadership, the peer relationship, and maybe
289 # local state as a fallback. Populates self.requests and self.grants.
290 self._load_state()
291 self._emit_state()
292
293 # Save our state if the hook completes successfully.
294 hookenv.atexit(self._save_state)
295
296 # Schedule release of granted locks for the end of the hook.
297 # This needs to be the last of our atexit callbacks to ensure
298 # it will be run first when the hook is complete, because there
299 # is no point mutating our state after it has been saved.
300 hookenv.atexit(self._release_granted)
301
302 def acquire(self, lock):
303 '''Acquire the named lock, non-blocking.
304
305 The lock may be granted immediately, or in a future hook.
306
307 Returns True if the lock has been granted. The lock will be
308 automatically released at the end of the hook in which it is
309 granted.
310
311 Do not mindlessly call this method, as it triggers a cascade of
312 hooks. For example, if you call acquire() every time in your
313 peer relation-changed hook you will end up with an infinite loop
314 of hooks. It should almost always be guarded by some condition.
315 '''
316 unit = hookenv.local_unit()
317 ts = self.requests[unit].get(lock)
318 if not ts:
319 # If there is no outstanding request on the peer relation,
320 # create one.
321 self.requests.setdefault(lock, {})
322 self.requests[unit][lock] = _timestamp()
323 self.msg('Requested {}'.format(lock))
324
325 # If the leader has granted the lock, yay.
326 if self.granted(lock):
327 self.msg('Acquired {}'.format(lock))
328 return True
329
330 # If the unit making the request also happens to be the
331 # leader, it must handle the request now. Even though the
332 # request has been stored on the peer relation, the peer
333 # relation-changed hook will not be triggered.
334 if hookenv.is_leader():
335 return self.grant(lock, unit)
336
337 return False # Can't acquire lock, yet. Maybe next hook.
338
339 def granted(self, lock):
340 '''Return True if a previously requested lock has been granted'''
341 unit = hookenv.local_unit()
342 ts = self.requests[unit].get(lock)
343 if ts and self.grants.get(unit, {}).get(lock) == ts:
344 return True
345 return False
346
347 def requested(self, lock):
348 '''Return True if we are in the queue for the lock'''
349 return lock in self.requests[hookenv.local_unit()]
350
351 def request_timestamp(self, lock):
352 '''Return the timestamp of our outstanding request for lock, or None.
353
354 Returns a datetime.datetime() UTC timestamp, with no tzinfo attribute.
355 '''
356 ts = self.requests[hookenv.local_unit()].get(lock, None)
357 if ts is not None:
358 return datetime.strptime(ts, _timestamp_format)
359
360 def handle(self):
361 if not hookenv.is_leader():
362 return # Only the leader can grant requests.
363
364 self.msg('Leader handling coordinator requests')
365
366 # Clear our grants that have been released.
367 for unit in self.grants.keys():
368 for lock, grant_ts in list(self.grants[unit].items()):
369 req_ts = self.requests.get(unit, {}).get(lock)
370 if req_ts != grant_ts:
371 # The request timestamp does not match the granted
372 # timestamp. Several hooks on 'unit' may have run
373 # before the leader got a chance to make a decision,
374 # and 'unit' may have released its lock and attempted
375 # to reacquire it. This will change the timestamp,
376 # and we correctly revoke the old grant putting it
377 # to the end of the queue.
378 ts = datetime.strptime(self.grants[unit][lock],
379 _timestamp_format)
380 del self.grants[unit][lock]
381 self.released(unit, lock, ts)
382
383 # Grant locks
384 for unit in self.requests.keys():
385 for lock in self.requests[unit]:
386 self.grant(lock, unit)
387
388 def grant(self, lock, unit):
389 '''Maybe grant the lock to a unit.
390
391 The decision to grant the lock or not is made for $lock
392 by a corresponding method grant_$lock, which you may define
393 in a subclass. If no such method is defined, the default_grant
394 method is used. See Serial.default_grant() for details.
395 '''
396 if not hookenv.is_leader():
397 return False # Not the leader, so we cannot grant.
398
399 # Set of units already granted the lock.
400 granted = set()
401 for u in self.grants:
402 if lock in self.grants[u]:
403 granted.add(u)
404 if unit in granted:
405 return True # Already granted.
406
407 # Ordered list of units waiting for the lock.
408 reqs = set()
409 for u in self.requests:
410 if u in granted:
411 continue # In the granted set. Not wanted in the req list.
412 for l, ts in self.requests[u].items():
413 if l == lock:
414 reqs.add((ts, u))
415 queue = [t[1] for t in sorted(reqs)]
416 if unit not in queue:
417 return False # Unit has not requested the lock.
418
419 # Locate custom logic, or fallback to the default.
420 grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant)
421
422 if grant_func(lock, unit, granted, queue):
423 # Grant the lock.
424 self.msg('Leader grants {} to {}'.format(lock, unit))
425 self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock]
426 return True
427
428 return False
429
430 def released(self, unit, lock, timestamp):
431 '''Called on the leader when it has released a lock.
432
433 By default, does nothing but log messages. Override if you
434 need to perform additional housekeeping when a lock is released,
435 for example recording timestamps.
436 '''
437 interval = _utcnow() - timestamp
438 self.msg('Leader released {} from {}, held {}'.format(lock, unit,
439 interval))
440
441 def require(self, lock, guard_func, *guard_args, **guard_kw):
442 """Decorate a function to be run only when a lock is acquired.
443
444 The lock is requested if the guard function returns True.
445
446 The decorated function is called if the lock has been granted.
447 """
448 def decorator(f):
449 @wraps(f)
450 def wrapper(*args, **kw):
451 if self.granted(lock):
452 self.msg('Granted {}'.format(lock))
453 return f(*args, **kw)
454 if guard_func(*guard_args, **guard_kw) and self.acquire(lock):
455 return f(*args, **kw)
456 return None
457 return wrapper
458 return decorator
459
460 def msg(self, msg):
461 '''Emit a message. Override to customize log spam.'''
462 hookenv.log('coordinator.{} {}'.format(self._name(), msg),
463 level=hookenv.INFO)
464
465 def _name(self):
466 return self.__class__.__name__
467
468 def _load_state(self):
469 self.msg('Loading state'.format(self._name()))
470
471 # All responses must be stored in the leadership settings.
472 # The leader cannot use local state, as a different unit may
473 # be leader next time. Which is fine, as the leadership
474 # settings are always available.
475 self.grants = json.loads(hookenv.leader_get(self.key) or '{}')
476
477 local_unit = hookenv.local_unit()
478
479 # All requests must be stored on the peer relation. This is
480 # the only channel units have to communicate with the leader.
481 # Even the leader needs to store its requests here, as a
482 # different unit may be leader by the time the request can be
483 # granted.
484 if self.relid is None:
485 # The peer relation is not available. Maybe we are early in
486 # the units's lifecycle. Maybe this unit is standalone.
487 # Fallback to using local state.
488 self.msg('No peer relation. Loading local state')
489 self.requests = {local_unit: self._load_local_state()}
490 else:
491 self.requests = self._load_peer_state()
492 if local_unit not in self.requests:
493 # The peer relation has just been joined. Update any state
494 # loaded from our peers with our local state.
495 self.msg('New peer relation. Merging local state')
496 self.requests[local_unit] = self._load_local_state()
497
498 def _emit_state(self):
499 # Emit this units lock status.
500 for lock in sorted(self.requests[hookenv.local_unit()].keys()):
501 if self.granted(lock):
502 self.msg('Granted {}'.format(lock))
503 else:
504 self.msg('Waiting on {}'.format(lock))
505
506 def _save_state(self):
507 self.msg('Publishing state'.format(self._name()))
508 if hookenv.is_leader():
509 # sort_keys to ensure stability.
510 raw = json.dumps(self.grants, sort_keys=True)
511 hookenv.leader_set({self.key: raw})
512
513 local_unit = hookenv.local_unit()
514
515 if self.relid is None:
516 # No peer relation yet. Fallback to local state.
517 self.msg('No peer relation. Saving local state')
518 self._save_local_state(self.requests[local_unit])
519 else:
520 # sort_keys to ensure stability.
521 raw = json.dumps(self.requests[local_unit], sort_keys=True)
522 hookenv.relation_set(self.relid, relation_settings={self.key: raw})
523
524 def _load_peer_state(self):
525 requests = {}
526 units = set(hookenv.related_units(self.relid))
527 units.add(hookenv.local_unit())
528 for unit in units:
529 raw = hookenv.relation_get(self.key, unit, self.relid)
530 if raw:
531 requests[unit] = json.loads(raw)
532 return requests
533
534 def _local_state_filename(self):
535 # Include the class name. We allow multiple BaseCoordinator
536 # subclasses to be instantiated, and they are singletons, so
537 # this avoids conflicts (unless someone creates and uses two
538 # BaseCoordinator subclasses with the same class name, so don't
539 # do that).
540 return '.charmhelpers.coordinator.{}'.format(self._name())
541
542 def _load_local_state(self):
543 fn = self._local_state_filename()
544 if os.path.exists(fn):
545 with open(fn, 'r') as f:
546 return json.load(f)
547 return {}
548
549 def _save_local_state(self, state):
550 fn = self._local_state_filename()
551 with open(fn, 'w') as f:
552 json.dump(state, f)
553
554 def _release_granted(self):
555 # At the end of every hook, release all locks granted to
556 # this unit. If a hook neglects to make use of what it
557 # requested, it will just have to make the request again.
558 # Implicit release is the only way this will work, as
559 # if the unit is standalone there may be no future triggers
560 # called to do a manual release.
561 unit = hookenv.local_unit()
562 for lock in list(self.requests[unit].keys()):
563 if self.granted(lock):
564 self.msg('Released local {} lock'.format(lock))
565 del self.requests[unit][lock]
566
567
568class Serial(BaseCoordinator):
569 def default_grant(self, lock, unit, granted, queue):
570 '''Default logic to grant a lock to a unit. Unless overridden,
571 only one unit may hold the lock and it will be granted to the
572 earliest queued request.
573
574 To define custom logic for $lock, create a subclass and
575 define a grant_$lock method.
576
577 `unit` is the unit name making the request.
578
579 `granted` is the set of units already granted the lock. It will
580 never include `unit`. It may be empty.
581
582 `queue` is the list of units waiting for the lock, ordered by time
583 of request. It will always include `unit`, but `unit` is not
584 necessarily first.
585
586 Returns True if the lock should be granted to `unit`.
587 '''
588 return unit == queue[0] and not granted
589
590
591def _implicit_peer_relation_name():
592 md = hookenv.metadata()
593 assert 'peers' in md, 'No peer relations in metadata.yaml'
594 return sorted(md['peers'].keys())[0]
595
596
597# A human readable, sortable UTC timestamp format.
598_timestamp_format = '%Y-%m-%d %H:%M:%S.%fZ'
599
600
601def _utcnow(): # pragma: no cover
602 # This wrapper exists as mocking datetime methods is problematic.
603 return datetime.utcnow()
604
605
606def _timestamp():
607 return _utcnow().strftime(_timestamp_format)
0608
=== added file 'charmhelpers/core/files.py'
--- charmhelpers/core/files.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/core/files.py 2015-08-13 08:33:21 +0000
@@ -0,0 +1,45 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4# Copyright 2014-2015 Canonical Limited.
5#
6# This file is part of charm-helpers.
7#
8# charm-helpers is free software: you can redistribute it and/or modify
9# it under the terms of the GNU Lesser General Public License version 3 as
10# published by the Free Software Foundation.
11#
12# charm-helpers is distributed in the hope that it will be useful,
13# but WITHOUT ANY WARRANTY; without even the implied warranty of
14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15# GNU Lesser General Public License for more details.
16#
17# You should have received a copy of the GNU Lesser General Public License
18# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
19
20__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
21
22import os
23import subprocess
24
25
26def sed(filename, before, after, flags='g'):
27 """
28 Search and replaces the given pattern on filename.
29
30 :param filename: relative or absolute file path.
31 :param before: expression to be replaced (see 'man sed')
32 :param after: expression to replace with (see 'man sed')
33 :param flags: sed-compatible regex flags in example, to make
34 the search and replace case insensitive, specify ``flags="i"``.
35 The ``g`` flag is always specified regardless, so you do not
36 need to remember to include it when overriding this parameter.
37 :returns: If the sed command exit code was zero then return,
38 otherwise raise CalledProcessError.
39 """
40 expression = r's/{0}/{1}/{2}'.format(before,
41 after, flags)
42
43 return subprocess.check_call(["sed", "-i", "-r", "-e",
44 expression,
45 os.path.expanduser(filename)])
046
=== modified file 'charmhelpers/core/hookenv.py'
--- charmhelpers/core/hookenv.py 2015-06-02 13:46:29 +0000
+++ charmhelpers/core/hookenv.py 2015-08-13 08:33:21 +0000
@@ -21,7 +21,10 @@
21# Charm Helpers Developers <juju@lists.ubuntu.com>21# Charm Helpers Developers <juju@lists.ubuntu.com>
2222
23from __future__ import print_function23from __future__ import print_function
24import copy
25from distutils.version import LooseVersion
24from functools import wraps26from functools import wraps
27import glob
25import os28import os
26import json29import json
27import yaml30import yaml
@@ -71,6 +74,7 @@
71 res = func(*args, **kwargs)74 res = func(*args, **kwargs)
72 cache[key] = res75 cache[key] = res
73 return res76 return res
77 wrapper._wrapped = func
74 return wrapper78 return wrapper
7579
7680
@@ -170,9 +174,19 @@
170 return os.environ.get('JUJU_RELATION', None)174 return os.environ.get('JUJU_RELATION', None)
171175
172176
173def relation_id():177@cached
174 """The relation ID for the current relation hook"""178def relation_id(relation_name=None, service_or_unit=None):
175 return os.environ.get('JUJU_RELATION_ID', None)179 """The relation ID for the current or a specified relation"""
180 if not relation_name and not service_or_unit:
181 return os.environ.get('JUJU_RELATION_ID', None)
182 elif relation_name and service_or_unit:
183 service_name = service_or_unit.split('/')[0]
184 for relid in relation_ids(relation_name):
185 remote_service = remote_service_name(relid)
186 if remote_service == service_name:
187 return relid
188 else:
189 raise ValueError('Must specify neither or both of relation_name and service_or_unit')
176190
177191
178def local_unit():192def local_unit():
@@ -190,9 +204,20 @@
190 return local_unit().split('/')[0]204 return local_unit().split('/')[0]
191205
192206
207@cached
208def remote_service_name(relid=None):
209 """The remote service name for a given relation-id (or the current relation)"""
210 if relid is None:
211 unit = remote_unit()
212 else:
213 units = related_units(relid)
214 unit = units[0] if units else None
215 return unit.split('/')[0] if unit else None
216
217
193def hook_name():218def hook_name():
194 """The name of the currently executing hook"""219 """The name of the currently executing hook"""
195 return os.path.basename(sys.argv[0])220 return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
196221
197222
198class Config(dict):223class Config(dict):
@@ -242,29 +267,7 @@
242 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)267 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
243 if os.path.exists(self.path):268 if os.path.exists(self.path):
244 self.load_previous()269 self.load_previous()
245270 atexit(self._implicit_save)
246 def __getitem__(self, key):
247 """For regular dict lookups, check the current juju config first,
248 then the previous (saved) copy. This ensures that user-saved values
249 will be returned by a dict lookup.
250
251 """
252 try:
253 return dict.__getitem__(self, key)
254 except KeyError:
255 return (self._prev_dict or {})[key]
256
257 def get(self, key, default=None):
258 try:
259 return self[key]
260 except KeyError:
261 return default
262
263 def keys(self):
264 prev_keys = []
265 if self._prev_dict is not None:
266 prev_keys = self._prev_dict.keys()
267 return list(set(prev_keys + list(dict.keys(self))))
268271
269 def load_previous(self, path=None):272 def load_previous(self, path=None):
270 """Load previous copy of config from disk.273 """Load previous copy of config from disk.
@@ -283,6 +286,9 @@
283 self.path = path or self.path286 self.path = path or self.path
284 with open(self.path) as f:287 with open(self.path) as f:
285 self._prev_dict = json.load(f)288 self._prev_dict = json.load(f)
289 for k, v in copy.deepcopy(self._prev_dict).items():
290 if k not in self:
291 self[k] = v
286292
287 def changed(self, key):293 def changed(self, key):
288 """Return True if the current value for this key is different from294 """Return True if the current value for this key is different from
@@ -314,13 +320,13 @@
314 instance.320 instance.
315321
316 """322 """
317 if self._prev_dict:
318 for k, v in six.iteritems(self._prev_dict):
319 if k not in self:
320 self[k] = v
321 with open(self.path, 'w') as f:323 with open(self.path, 'w') as f:
322 json.dump(self, f)324 json.dump(self, f)
323325
326 def _implicit_save(self):
327 if self.implicit_save:
328 self.save()
329
324330
325@cached331@cached
326def config(scope=None):332def config(scope=None):
@@ -485,6 +491,63 @@
485491
486492
487@cached493@cached
494def relation_to_interface(relation_name):
495 """
496 Given the name of a relation, return the interface that relation uses.
497
498 :returns: The interface name, or ``None``.
499 """
500 return relation_to_role_and_interface(relation_name)[1]
501
502
503@cached
504def relation_to_role_and_interface(relation_name):
505 """
506 Given the name of a relation, return the role and the name of the interface
507 that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
508
509 :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
510 """
511 _metadata = metadata()
512 for role in ('provides', 'requires', 'peer'):
513 interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
514 if interface:
515 return role, interface
516 return None, None
517
518
519@cached
520def role_and_interface_to_relations(role, interface_name):
521 """
522 Given a role and interface name, return a list of relation names for the
523 current charm that use that interface under that role (where role is one
524 of ``provides``, ``requires``, or ``peer``).
525
526 :returns: A list of relation names.
527 """
528 _metadata = metadata()
529 results = []
530 for relation_name, relation in _metadata.get(role, {}).items():
531 if relation['interface'] == interface_name:
532 results.append(relation_name)
533 return results
534
535
536@cached
537def interface_to_relations(interface_name):
538 """
539 Given an interface, return a list of relation names for the current
540 charm that use that interface.
541
542 :returns: A list of relation names.
543 """
544 results = []
545 for role in ('provides', 'requires', 'peer'):
546 results.extend(role_and_interface_to_relations(role, interface_name))
547 return results
548
549
550@cached
488def charm_name():551def charm_name():
489 """Get the name of the current charm as is specified on metadata.yaml"""552 """Get the name of the current charm as is specified on metadata.yaml"""
490 return metadata().get('name')553 return metadata().get('name')
@@ -587,10 +650,14 @@
587 hooks.execute(sys.argv)650 hooks.execute(sys.argv)
588 """651 """
589652
590 def __init__(self, config_save=True):653 def __init__(self, config_save=None):
591 super(Hooks, self).__init__()654 super(Hooks, self).__init__()
592 self._hooks = {}655 self._hooks = {}
593 self._config_save = config_save656
657 # For unknown reasons, we allow the Hooks constructor to override
658 # config().implicit_save.
659 if config_save is not None:
660 config().implicit_save = config_save
594661
595 def register(self, name, function):662 def register(self, name, function):
596 """Register a hook"""663 """Register a hook"""
@@ -598,13 +665,16 @@
598665
599 def execute(self, args):666 def execute(self, args):
600 """Execute a registered hook based on args[0]"""667 """Execute a registered hook based on args[0]"""
668 _run_atstart()
601 hook_name = os.path.basename(args[0])669 hook_name = os.path.basename(args[0])
602 if hook_name in self._hooks:670 if hook_name in self._hooks:
603 self._hooks[hook_name]()671 try:
604 if self._config_save:672 self._hooks[hook_name]()
605 cfg = config()673 except SystemExit as x:
606 if cfg.implicit_save:674 if x.code is None or x.code == 0:
607 cfg.save()675 _run_atexit()
676 raise
677 _run_atexit()
608 else:678 else:
609 raise UnregisteredHookError(hook_name)679 raise UnregisteredHookError(hook_name)
610680
@@ -653,6 +723,21 @@
653 subprocess.check_call(['action-fail', message])723 subprocess.check_call(['action-fail', message])
654724
655725
726def action_name():
727 """Get the name of the currently executing action."""
728 return os.environ.get('JUJU_ACTION_NAME')
729
730
731def action_uuid():
732 """Get the UUID of the currently executing action."""
733 return os.environ.get('JUJU_ACTION_UUID')
734
735
736def action_tag():
737 """Get the tag for the currently executing action."""
738 return os.environ.get('JUJU_ACTION_TAG')
739
740
656def status_set(workload_state, message):741def status_set(workload_state, message):
657 """Set the workload state with a message742 """Set the workload state with a message
658743
@@ -732,13 +817,80 @@
732@translate_exc(from_exc=OSError, to_exc=NotImplementedError)817@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
733def leader_set(settings=None, **kwargs):818def leader_set(settings=None, **kwargs):
734 """Juju leader set value(s)"""819 """Juju leader set value(s)"""
735 log("Juju leader-set '%s'" % (settings), level=DEBUG)820 # Don't log secrets.
821 # log("Juju leader-set '%s'" % (settings), level=DEBUG)
736 cmd = ['leader-set']822 cmd = ['leader-set']
737 settings = settings or {}823 settings = settings or {}
738 settings.update(kwargs)824 settings.update(kwargs)
739 for k, v in settings.iteritems():825 for k, v in settings.items():
740 if v is None:826 if v is None:
741 cmd.append('{}='.format(k))827 cmd.append('{}='.format(k))
742 else:828 else:
743 cmd.append('{}={}'.format(k, v))829 cmd.append('{}={}'.format(k, v))
744 subprocess.check_call(cmd)830 subprocess.check_call(cmd)
831
832
833@cached
834def juju_version():
835 """Full version string (eg. '1.23.3.1-trusty-amd64')"""
836 # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
837 jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
838 return subprocess.check_output([jujud, 'version'],
839 universal_newlines=True).strip()
840
841
842@cached
843def has_juju_version(minimum_version):
844 """Return True if the Juju version is at least the provided version"""
845 return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
846
847
848_atexit = []
849_atstart = []
850
851
852def atstart(callback, *args, **kwargs):
853 '''Schedule a callback to run before the main hook.
854
855 Callbacks are run in the order they were added.
856
857 This is useful for modules and classes to perform initialization
858 and inject behavior. In particular:
859
860 - Run common code before all of your hooks, such as logging
861 the hook name or interesting relation data.
862 - Defer object or module initialization that requires a hook
863 context until we know there actually is a hook context,
864 making testing easier.
865 - Rather than requiring charm authors to include boilerplate to
866 invoke your helper's behavior, have it run automatically if
867 your object is instantiated or module imported.
868
869 This is not at all useful after your hook framework as been launched.
870 '''
871 global _atstart
872 _atstart.append((callback, args, kwargs))
873
874
875def atexit(callback, *args, **kwargs):
876 '''Schedule a callback to run on successful hook completion.
877
878 Callbacks are run in the reverse order that they were added.'''
879 _atexit.append((callback, args, kwargs))
880
881
882def _run_atstart():
883 '''Hook frameworks must invoke this before running the main hook body.'''
884 global _atstart
885 for callback, args, kwargs in _atstart:
886 callback(*args, **kwargs)
887 del _atstart[:]
888
889
890def _run_atexit():
891 '''Hook frameworks must invoke this after the main hook body has
892 successfully completed. Do not invoke it if the hook fails.'''
893 global _atexit
894 for callback, args, kwargs in reversed(_atexit):
895 callback(*args, **kwargs)
896 del _atexit[:]
745897
=== modified file 'charmhelpers/core/host.py'
--- charmhelpers/core/host.py 2015-06-11 09:03:58 +0000
+++ charmhelpers/core/host.py 2015-08-13 08:33:21 +0000
@@ -63,6 +63,36 @@
63 return service_result63 return service_result
6464
6565
66def service_pause(service_name, init_dir=None):
67 """Pause a system service.
68
69 Stop it, and prevent it from starting again at boot."""
70 if init_dir is None:
71 init_dir = "/etc/init"
72 stopped = service_stop(service_name)
73 # XXX: Support systemd too
74 override_path = os.path.join(
75 init_dir, '{}.override'.format(service_name))
76 with open(override_path, 'w') as fh:
77 fh.write("manual\n")
78 return stopped
79
80
81def service_resume(service_name, init_dir=None):
82 """Resume a system service.
83
84 Reenable starting again at boot. Start the service"""
85 # XXX: Support systemd too
86 if init_dir is None:
87 init_dir = "/etc/init"
88 override_path = os.path.join(
89 init_dir, '{}.override'.format(service_name))
90 if os.path.exists(override_path):
91 os.unlink(override_path)
92 started = service_start(service_name)
93 return started
94
95
66def service(action, service_name):96def service(action, service_name):
67 """Control a system service"""97 """Control a system service"""
68 cmd = ['service', service_name, action]98 cmd = ['service', service_name, action]
@@ -149,11 +179,7 @@
149179
150def add_user_to_group(username, group):180def add_user_to_group(username, group):
151 """Add a user to a group"""181 """Add a user to a group"""
152 cmd = [182 cmd = ['gpasswd', '-a', username, group]
153 'gpasswd', '-a',
154 username,
155 group
156 ]
157 log("Adding user {} to group {}".format(username, group))183 log("Adding user {} to group {}".format(username, group))
158 subprocess.check_call(cmd)184 subprocess.check_call(cmd)
159185
160186
=== modified file 'charmhelpers/core/services/base.py'
--- charmhelpers/core/services/base.py 2015-05-20 14:52:29 +0000
+++ charmhelpers/core/services/base.py 2015-08-13 08:33:21 +0000
@@ -128,15 +128,18 @@
128 """128 """
129 Handle the current hook by doing The Right Thing with the registered services.129 Handle the current hook by doing The Right Thing with the registered services.
130 """130 """
131 hook_name = hookenv.hook_name()131 hookenv._run_atstart()
132 if hook_name == 'stop':132 try:
133 self.stop_services()133 hook_name = hookenv.hook_name()
134 else:134 if hook_name == 'stop':
135 self.reconfigure_services()135 self.stop_services()
136 self.provide_data()136 else:
137 cfg = hookenv.config()137 self.reconfigure_services()
138 if cfg.implicit_save:138 self.provide_data()
139 cfg.save()139 except SystemExit as x:
140 if x.code is None or x.code == 0:
141 hookenv._run_atexit()
142 hookenv._run_atexit()
140143
141 def provide_data(self):144 def provide_data(self):
142 """145 """
143146
=== modified file 'charmhelpers/core/services/helpers.py'
--- charmhelpers/core/services/helpers.py 2015-06-12 11:27:22 +0000
+++ charmhelpers/core/services/helpers.py 2015-08-13 08:33:21 +0000
@@ -240,8 +240,7 @@
240 action.240 action.
241241
242 :param str source: The template source file, relative to242 :param str source: The template source file, relative to
243 `$CHARM_DIR/templates`243 `$CHARM_DIR/templates`
244
245 :param str target: The target to write the rendered template to244 :param str target: The target to write the rendered template to
246 :param str owner: The owner of the rendered file245 :param str owner: The owner of the rendered file
247 :param str group: The group of the rendered file246 :param str group: The group of the rendered file
248247
=== modified file 'charmhelpers/core/unitdata.py'
--- charmhelpers/core/unitdata.py 2015-03-18 15:51:22 +0000
+++ charmhelpers/core/unitdata.py 2015-08-13 08:33:21 +0000
@@ -152,6 +152,7 @@
152import collections152import collections
153import contextlib153import contextlib
154import datetime154import datetime
155import itertools
155import json156import json
156import os157import os
157import pprint158import pprint
@@ -164,8 +165,7 @@
164class Storage(object):165class Storage(object):
165 """Simple key value database for local unit state within charms.166 """Simple key value database for local unit state within charms.
166167
167 Modifications are automatically committed at hook exit. That's168 Modifications are not persisted unless :meth:`flush` is called.
168 currently regardless of exit code.
169169
170 To support dicts, lists, integer, floats, and booleans values170 To support dicts, lists, integer, floats, and booleans values
171 are automatically json encoded/decoded.171 are automatically json encoded/decoded.
@@ -173,8 +173,11 @@
173 def __init__(self, path=None):173 def __init__(self, path=None):
174 self.db_path = path174 self.db_path = path
175 if path is None:175 if path is None:
176 self.db_path = os.path.join(176 if 'UNIT_STATE_DB' in os.environ:
177 os.environ.get('CHARM_DIR', ''), '.unit-state.db')177 self.db_path = os.environ['UNIT_STATE_DB']
178 else:
179 self.db_path = os.path.join(
180 os.environ.get('CHARM_DIR', ''), '.unit-state.db')
178 self.conn = sqlite3.connect('%s' % self.db_path)181 self.conn = sqlite3.connect('%s' % self.db_path)
179 self.cursor = self.conn.cursor()182 self.cursor = self.conn.cursor()
180 self.revision = None183 self.revision = None
@@ -189,15 +192,8 @@
189 self.conn.close()192 self.conn.close()
190 self._closed = True193 self._closed = True
191194
192 def _scoped_query(self, stmt, params=None):
193 if params is None:
194 params = []
195 return stmt, params
196
197 def get(self, key, default=None, record=False):195 def get(self, key, default=None, record=False):
198 self.cursor.execute(196 self.cursor.execute('select data from kv where key=?', [key])
199 *self._scoped_query(
200 'select data from kv where key=?', [key]))
201 result = self.cursor.fetchone()197 result = self.cursor.fetchone()
202 if not result:198 if not result:
203 return default199 return default
@@ -206,33 +202,81 @@
206 return json.loads(result[0])202 return json.loads(result[0])
207203
208 def getrange(self, key_prefix, strip=False):204 def getrange(self, key_prefix, strip=False):
209 stmt = "select key, data from kv where key like '%s%%'" % key_prefix205 """
210 self.cursor.execute(*self._scoped_query(stmt))206 Get a range of keys starting with a common prefix as a mapping of
207 keys to values.
208
209 :param str key_prefix: Common prefix among all keys
210 :param bool strip: Optionally strip the common prefix from the key
211 names in the returned dict
212 :return dict: A (possibly empty) dict of key-value mappings
213 """
214 self.cursor.execute("select key, data from kv where key like ?",
215 ['%s%%' % key_prefix])
211 result = self.cursor.fetchall()216 result = self.cursor.fetchall()
212217
213 if not result:218 if not result:
214 return None219 return {}
215 if not strip:220 if not strip:
216 key_prefix = ''221 key_prefix = ''
217 return dict([222 return dict([
218 (k[len(key_prefix):], json.loads(v)) for k, v in result])223 (k[len(key_prefix):], json.loads(v)) for k, v in result])
219224
220 def update(self, mapping, prefix=""):225 def update(self, mapping, prefix=""):
226 """
227 Set the values of multiple keys at once.
228
229 :param dict mapping: Mapping of keys to values
230 :param str prefix: Optional prefix to apply to all keys in `mapping`
231 before setting
232 """
221 for k, v in mapping.items():233 for k, v in mapping.items():
222 self.set("%s%s" % (prefix, k), v)234 self.set("%s%s" % (prefix, k), v)
223235
224 def unset(self, key):236 def unset(self, key):
237 """
238 Remove a key from the database entirely.
239 """
225 self.cursor.execute('delete from kv where key=?', [key])240 self.cursor.execute('delete from kv where key=?', [key])
226 if self.revision and self.cursor.rowcount:241 if self.revision and self.cursor.rowcount:
227 self.cursor.execute(242 self.cursor.execute(
228 'insert into kv_revisions values (?, ?, ?)',243 'insert into kv_revisions values (?, ?, ?)',
229 [key, self.revision, json.dumps('DELETED')])244 [key, self.revision, json.dumps('DELETED')])
230245
246 def unsetrange(self, keys=None, prefix=""):
247 """
248 Remove a range of keys starting with a common prefix, from the database
249 entirely.
250
251 :param list keys: List of keys to remove.
252 :param str prefix: Optional prefix to apply to all keys in ``keys``
253 before removing.
254 """
255 if keys is not None:
256 keys = ['%s%s' % (prefix, key) for key in keys]
257 self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
258 if self.revision and self.cursor.rowcount:
259 self.cursor.execute(
260 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
261 list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
262 else:
263 self.cursor.execute('delete from kv where key like ?',
264 ['%s%%' % prefix])
265 if self.revision and self.cursor.rowcount:
266 self.cursor.execute(
267 'insert into kv_revisions values (?, ?, ?)',
268 ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
269
231 def set(self, key, value):270 def set(self, key, value):
271 """
272 Set a value in the database.
273
274 :param str key: Key to set the value for
275 :param value: Any JSON-serializable value to be set
276 """
232 serialized = json.dumps(value)277 serialized = json.dumps(value)
233278
234 self.cursor.execute(279 self.cursor.execute('select data from kv where key=?', [key])
235 'select data from kv where key=?', [key])
236 exists = self.cursor.fetchone()280 exists = self.cursor.fetchone()
237281
238 # Skip mutations to the same value282 # Skip mutations to the same value
239283
=== modified file 'charmhelpers/fetch/__init__.py'
--- charmhelpers/fetch/__init__.py 2015-04-29 12:52:18 +0000
+++ charmhelpers/fetch/__init__.py 2015-08-13 08:33:21 +0000
@@ -90,6 +90,14 @@
90 'kilo/proposed': 'trusty-proposed/kilo',90 'kilo/proposed': 'trusty-proposed/kilo',
91 'trusty-kilo/proposed': 'trusty-proposed/kilo',91 'trusty-kilo/proposed': 'trusty-proposed/kilo',
92 'trusty-proposed/kilo': 'trusty-proposed/kilo',92 'trusty-proposed/kilo': 'trusty-proposed/kilo',
93 # Liberty
94 'liberty': 'trusty-updates/liberty',
95 'trusty-liberty': 'trusty-updates/liberty',
96 'trusty-liberty/updates': 'trusty-updates/liberty',
97 'trusty-updates/liberty': 'trusty-updates/liberty',
98 'liberty/proposed': 'trusty-proposed/liberty',
99 'trusty-liberty/proposed': 'trusty-proposed/liberty',
100 'trusty-proposed/liberty': 'trusty-proposed/liberty',
93}101}
94102
95# The order of this list is very important. Handlers should be listed in from103# The order of this list is very important. Handlers should be listed in from
@@ -215,19 +223,27 @@
215 _run_apt_command(cmd, fatal)223 _run_apt_command(cmd, fatal)
216224
217225
226def apt_mark(packages, mark, fatal=False):
227 """Flag one or more packages using apt-mark"""
228 cmd = ['apt-mark', mark]
229 if isinstance(packages, six.string_types):
230 cmd.append(packages)
231 else:
232 cmd.extend(packages)
233 log("Holding {}".format(packages))
234
235 if fatal:
236 subprocess.check_call(cmd, universal_newlines=True)
237 else:
238 subprocess.call(cmd, universal_newlines=True)
239
240
218def apt_hold(packages, fatal=False):241def apt_hold(packages, fatal=False):
219 """Hold one or more packages"""242 return apt_mark(packages, 'hold', fatal=fatal)
220 cmd = ['apt-mark', 'hold']243
221 if isinstance(packages, six.string_types):244
222 cmd.append(packages)245def apt_unhold(packages, fatal=False):
223 else:246 return apt_mark(packages, 'unhold', fatal=fatal)
224 cmd.extend(packages)
225 log("Holding {}".format(packages))
226
227 if fatal:
228 subprocess.check_call(cmd)
229 else:
230 subprocess.call(cmd)
231247
232248
233def add_source(source, key=None):249def add_source(source, key=None):
@@ -370,8 +386,9 @@
370 for handler in handlers:386 for handler in handlers:
371 try:387 try:
372 installed_to = handler.install(source, *args, **kwargs)388 installed_to = handler.install(source, *args, **kwargs)
373 except UnhandledSource:389 except UnhandledSource as e:
374 pass390 log('Install source attempt unsuccessful: {}'.format(e),
391 level='WARNING')
375 if not installed_to:392 if not installed_to:
376 raise UnhandledSource("No handler found for source {}".format(source))393 raise UnhandledSource("No handler found for source {}".format(source))
377 return installed_to394 return installed_to
378395
=== modified file 'charmhelpers/fetch/archiveurl.py'
--- charmhelpers/fetch/archiveurl.py 2015-02-11 21:41:57 +0000
+++ charmhelpers/fetch/archiveurl.py 2015-08-13 08:33:21 +0000
@@ -77,6 +77,8 @@
77 def can_handle(self, source):77 def can_handle(self, source):
78 url_parts = self.parse_url(source)78 url_parts = self.parse_url(source)
79 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):79 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
80 # XXX: Why is this returning a boolean and a string? It's
81 # doomed to fail since "bool(can_handle('foo://'))" will be True.
80 return "Wrong source type"82 return "Wrong source type"
81 if get_archive_handler(self.base_url(source)):83 if get_archive_handler(self.base_url(source)):
82 return True84 return True
@@ -155,7 +157,11 @@
155 else:157 else:
156 algorithms = hashlib.algorithms_available158 algorithms = hashlib.algorithms_available
157 if key in algorithms:159 if key in algorithms:
158 check_hash(dld_file, value, key)160 if len(value) != 1:
161 raise TypeError(
162 "Expected 1 hash value, not %d" % len(value))
163 expected = value[0]
164 check_hash(dld_file, expected, key)
159 if checksum:165 if checksum:
160 check_hash(dld_file, checksum, hash_type)166 check_hash(dld_file, checksum, hash_type)
161 return extract(dld_file, dest)167 return extract(dld_file, dest)
162168
=== modified file 'charmhelpers/fetch/giturl.py'
--- charmhelpers/fetch/giturl.py 2015-05-27 12:55:44 +0000
+++ charmhelpers/fetch/giturl.py 2015-08-13 08:33:21 +0000
@@ -67,7 +67,7 @@
67 try:67 try:
68 self.clone(source, dest_dir, branch, depth)68 self.clone(source, dest_dir, branch, depth)
69 except GitCommandError as e:69 except GitCommandError as e:
70 raise UnhandledSource(e.message)70 raise UnhandledSource(e)
71 except OSError as e:71 except OSError as e:
72 raise UnhandledSource(e.strerror)72 raise UnhandledSource(e.strerror)
73 return dest_dir73 return dest_dir
7474
=== added directory 'docs/_extensions'
=== added file 'docs/_extensions/automembersummary.py'
--- docs/_extensions/automembersummary.py 1970-01-01 00:00:00 +0000
+++ docs/_extensions/automembersummary.py 2015-08-13 08:33:21 +0000
@@ -0,0 +1,86 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17
18import inspect
19
20from docutils.parsers.rst import directives
21from sphinx.ext.autosummary import Autosummary
22from sphinx.ext.autosummary import get_import_prefixes_from_env
23from sphinx.ext.autosummary import import_by_name
24
25
26class AutoMemberSummary(Autosummary):
27 required_arguments = 0
28 optional_arguments = 0
29 final_argument_whitespace = False
30 has_content = True
31 option_spec = {
32 'toctree': directives.unchanged,
33 'nosignatures': directives.flag,
34 'template': directives.unchanged,
35 }
36
37 def get_items(self, names):
38 env = self.state.document.settings.env
39 prefixes = get_import_prefixes_from_env(env)
40
41 items = []
42 prefix = ''
43 shorten = ''
44
45 def _get_items(name):
46 _items = super(AutoMemberSummary, self).get_items([shorten + name])
47 for dn, sig, summary, rn in _items:
48 items.append(('%s%s' % (prefix, dn), sig, summary, rn))
49
50 for name in names:
51 if '~' in name:
52 prefix, name = name.split('~')
53 shorten = '~'
54 else:
55 prefix = ''
56 shorten = ''
57
58 try:
59 real_name, obj, parent, _ = import_by_name(name, prefixes=prefixes)
60 except ImportError:
61 self.warn('failed to import %s' % name)
62 continue
63
64 if not inspect.ismodule(obj):
65 _get_items(name)
66 continue
67
68 for member in dir(obj):
69 if member.startswith('_'):
70 continue
71 mobj = getattr(obj, member)
72 if hasattr(mobj, '__module__'):
73 if not mobj.__module__.startswith(real_name):
74 continue # skip imported classes & functions
75 elif hasattr(mobj, '__name__'):
76 if not mobj.__name__.startswith(real_name):
77 continue # skip imported modules
78 else:
79 continue # skip instances
80 _get_items('%s.%s' % (name, member))
81
82 return items
83
84
85def setup(app):
86 app.add_directive('automembersummary', AutoMemberSummary)
087
=== added file 'docs/api/charmhelpers.coordinator.rst'
--- docs/api/charmhelpers.coordinator.rst 1970-01-01 00:00:00 +0000
+++ docs/api/charmhelpers.coordinator.rst 2015-08-13 08:33:21 +0000
@@ -0,0 +1,10 @@
1charmhelpers.coordinator package
2================================
3
4charmhelpers.coordinator module
5-------------------------------
6
7.. automodule:: charmhelpers.coordinator
8 :members:
9 :undoc-members:
10 :show-inheritance:
011
=== added file 'docs/api/charmhelpers.core.decorators.rst'
--- docs/api/charmhelpers.core.decorators.rst 1970-01-01 00:00:00 +0000
+++ docs/api/charmhelpers.core.decorators.rst 2015-08-13 08:33:21 +0000
@@ -0,0 +1,7 @@
1charmhelpers.core.decorators
2============================
3
4.. automodule:: charmhelpers.core.decorators
5 :members:
6 :undoc-members:
7 :show-inheritance:
08
=== added file 'docs/api/charmhelpers.core.fstab.rst'
--- docs/api/charmhelpers.core.fstab.rst 1970-01-01 00:00:00 +0000
+++ docs/api/charmhelpers.core.fstab.rst 2015-08-13 08:33:21 +0000
@@ -0,0 +1,7 @@
1charmhelpers.core.fstab
2=======================
3
4.. automodule:: charmhelpers.core.fstab
5 :members:
6 :undoc-members:
7 :show-inheritance:
08
=== added file 'docs/api/charmhelpers.core.hookenv.rst'
--- docs/api/charmhelpers.core.hookenv.rst 1970-01-01 00:00:00 +0000
+++ docs/api/charmhelpers.core.hookenv.rst 2015-08-13 08:33:21 +0000
@@ -0,0 +1,12 @@
1charmhelpers.core.hookenv
2=========================
3
4.. automembersummary::
5 :nosignatures:
6
7 ~charmhelpers.core.hookenv
8
9.. automodule:: charmhelpers.core.hookenv
10 :members:
11 :undoc-members:
12 :show-inheritance:
013
=== added file 'docs/api/charmhelpers.core.host.rst'
--- docs/api/charmhelpers.core.host.rst 1970-01-01 00:00:00 +0000
+++ docs/api/charmhelpers.core.host.rst 2015-08-13 08:33:21 +0000
@@ -0,0 +1,12 @@
1charmhelpers.core.host
2======================
3
4.. automembersummary::
5 :nosignatures:
6
7 ~charmhelpers.core.host
8
9.. automodule:: charmhelpers.core.host
10 :members:
11 :undoc-members:
12 :show-inheritance:
013
=== modified file 'docs/api/charmhelpers.core.rst'
--- docs/api/charmhelpers.core.rst 2014-08-05 21:28:01 +0000
+++ docs/api/charmhelpers.core.rst 2015-08-13 08:33:21 +0000
@@ -1,44 +1,17 @@
1charmhelpers.core package1charmhelpers.core package
2=========================2=========================
33
4charmhelpers.core.fstab module4.. toctree::
5------------------------------5
66 charmhelpers.core.decorators
7.. automodule:: charmhelpers.core.fstab7 charmhelpers.core.fstab
8 :members:8 charmhelpers.core.hookenv
9 :undoc-members:9 charmhelpers.core.host
10 :show-inheritance:10 charmhelpers.core.strutils
1111 charmhelpers.core.sysctl
12charmhelpers.core.hookenv module12 charmhelpers.core.templating
13--------------------------------13 charmhelpers.core.unitdata
1414 charmhelpers.core.services
15.. automodule:: charmhelpers.core.hookenv
16 :members:
17 :undoc-members:
18 :show-inheritance:
19
20charmhelpers.core.host module
21-----------------------------
22
23.. automodule:: charmhelpers.core.host
24 :members:
25 :undoc-members:
26 :show-inheritance:
27
28charmhelpers.core.services package
29----------------------------------
30
31.. automodule:: charmhelpers.core.services.base
32 :members:
33 :undoc-members:
34 :show-inheritance:
35 :special-members: __init__
36
37.. automodule:: charmhelpers.core.services.helpers
38 :members:
39 :undoc-members:
40 :show-inheritance:
41
4215
43.. automodule:: charmhelpers.core16.. automodule:: charmhelpers.core
44 :members:17 :members:
4518
=== added file 'docs/api/charmhelpers.core.services.base.rst'
--- docs/api/charmhelpers.core.services.base.rst 1970-01-01 00:00:00 +0000
+++ docs/api/charmhelpers.core.services.base.rst 2015-08-13 08:33:21 +0000
@@ -0,0 +1,12 @@
1charmhelpers.core.services.base
2===============================
3
4.. automembersummary::
5 :nosignatures:
6
7 ~charmhelpers.core.services.base
8
9.. automodule:: charmhelpers.core.services.base
10 :members:
11 :undoc-members:
12 :show-inheritance:
013
=== added file 'docs/api/charmhelpers.core.services.helpers.rst'
--- docs/api/charmhelpers.core.services.helpers.rst 1970-01-01 00:00:00 +0000
+++ docs/api/charmhelpers.core.services.helpers.rst 2015-08-13 08:33:21 +0000
@@ -0,0 +1,12 @@
1charmhelpers.core.services.helpers
2==================================
3
4.. automembersummary::
5 :nosignatures:
6
7 ~charmhelpers.core.services.helpers
8
9.. automodule:: charmhelpers.core.services.helpers
10 :members:
11 :undoc-members:
12 :show-inheritance:
013
=== added file 'docs/api/charmhelpers.core.services.rst'
--- docs/api/charmhelpers.core.services.rst 1970-01-01 00:00:00 +0000
+++ docs/api/charmhelpers.core.services.rst 2015-08-13 08:33:21 +0000
@@ -0,0 +1,12 @@
1charmhelpers.core.services
2==========================
3
4.. toctree::
5
6 charmhelpers.core.services.base
7 charmhelpers.core.services.helpers
8
9.. automodule:: charmhelpers.core.services
10 :members:
11 :undoc-members:
12 :show-inheritance:
013
=== added file 'docs/api/charmhelpers.core.strutils.rst'
--- docs/api/charmhelpers.core.strutils.rst 1970-01-01 00:00:00 +0000
+++ docs/api/charmhelpers.core.strutils.rst 2015-08-13 08:33:21 +0000
@@ -0,0 +1,7 @@
1charmhelpers.core.strutils
2============================
3
4.. automodule:: charmhelpers.core.strutils
5 :members:
6 :undoc-members:
7 :show-inheritance:
08
=== added file 'docs/api/charmhelpers.core.sysctl.rst'
--- docs/api/charmhelpers.core.sysctl.rst 1970-01-01 00:00:00 +0000
+++ docs/api/charmhelpers.core.sysctl.rst 2015-08-13 08:33:21 +0000
@@ -0,0 +1,7 @@
1charmhelpers.core.sysctl
2============================
3
4.. automodule:: charmhelpers.core.sysctl
5 :members:
6 :undoc-members:
7 :show-inheritance:
08
=== added file 'docs/api/charmhelpers.core.templating.rst'
--- docs/api/charmhelpers.core.templating.rst 1970-01-01 00:00:00 +0000
+++ docs/api/charmhelpers.core.templating.rst 2015-08-13 08:33:21 +0000
@@ -0,0 +1,7 @@
1charmhelpers.core.templating
2============================
3
4.. automodule:: charmhelpers.core.templating
5 :members:
6 :undoc-members:
7 :show-inheritance:
08
=== added file 'docs/api/charmhelpers.core.unitdata.rst'
--- docs/api/charmhelpers.core.unitdata.rst 1970-01-01 00:00:00 +0000
+++ docs/api/charmhelpers.core.unitdata.rst 2015-08-13 08:33:21 +0000
@@ -0,0 +1,7 @@
1charmhelpers.core.unitdata
2==========================
3
4.. automodule:: charmhelpers.core.unitdata
5 :members:
6 :undoc-members:
7 :show-inheritance:
08
=== modified file 'docs/api/charmhelpers.rst'
--- docs/api/charmhelpers.rst 2014-06-09 17:10:38 +0000
+++ docs/api/charmhelpers.rst 2015-08-13 08:33:21 +0000
@@ -2,12 +2,14 @@
2=================2=================
33
4.. toctree::4.. toctree::
5 :maxdepth: 25 :maxdepth: 3
66
7 charmhelpers.core
7 charmhelpers.contrib8 charmhelpers.contrib
8 charmhelpers.core
9 charmhelpers.fetch9 charmhelpers.fetch
10 charmhelpers.payload10 charmhelpers.payload
11 charmhelpers.cli
12 charmhelpers.coordinator
1113
12.. automodule:: charmhelpers14.. automodule:: charmhelpers
13 :members:15 :members:
1416
=== removed file 'docs/api/modules.rst'
--- docs/api/modules.rst 2014-06-09 14:56:35 +0000
+++ docs/api/modules.rst 1970-01-01 00:00:00 +0000
@@ -1,7 +0,0 @@
1charmhelpers
2============
3
4.. toctree::
5 :maxdepth: 4
6
7 charmhelpers
80
=== modified file 'docs/conf.py'
--- docs/conf.py 2014-09-23 16:34:54 +0000
+++ docs/conf.py 2015-08-13 08:33:21 +0000
@@ -19,6 +19,7 @@
19# add these directories to sys.path here. If the directory is relative to the19# add these directories to sys.path here. If the directory is relative to the
20# documentation root, use os.path.abspath to make it absolute, like shown here.20# documentation root, use os.path.abspath to make it absolute, like shown here.
21sys.path.insert(0, os.path.abspath('../'))21sys.path.insert(0, os.path.abspath('../'))
22sys.path.append(os.path.abspath('_extensions/'))
2223
23# -- General configuration ------------------------------------------------24# -- General configuration ------------------------------------------------
2425
@@ -30,6 +31,8 @@
30# ones.31# ones.
31extensions = [32extensions = [
32 'sphinx.ext.autodoc',33 'sphinx.ext.autodoc',
34 'sphinx.ext.autosummary',
35 'automembersummary',
33]36]
3437
35# Add any paths that contain templates here, relative to this directory.38# Add any paths that contain templates here, relative to this directory.
@@ -72,7 +75,7 @@
7275
73# List of patterns, relative to source directory, that match files and76# List of patterns, relative to source directory, that match files and
74# directories to ignore when looking for source files.77# directories to ignore when looking for source files.
75exclude_patterns = ['_build']78exclude_patterns = ['_build', '_extensions']
7679
77# The reST default role (used for this markup: `text`) to use for all80# The reST default role (used for this markup: `text`) to use for all
78# documents.81# documents.
7982
=== modified file 'setup.py'
--- setup.py 2015-03-04 16:15:18 +0000
+++ setup.py 2015-08-13 08:33:21 +0000
@@ -14,6 +14,13 @@
14 'author': "Ubuntu Developers",14 'author': "Ubuntu Developers",
15 'author_email': "ubuntu-devel-discuss@lists.ubuntu.com",15 'author_email': "ubuntu-devel-discuss@lists.ubuntu.com",
16 'url': "https://code.launchpad.net/charm-helpers",16 'url': "https://code.launchpad.net/charm-helpers",
17 'install_requires': [
18 'netaddr',
19 'PyYAML',
20 'Tempita',
21 'Jinja2',
22 'six',
23 ],
17 'packages': [24 'packages': [
18 "charmhelpers",25 "charmhelpers",
19 "charmhelpers.cli",26 "charmhelpers.cli",
@@ -22,13 +29,27 @@
22 "charmhelpers.fetch",29 "charmhelpers.fetch",
23 "charmhelpers.payload",30 "charmhelpers.payload",
24 "charmhelpers.contrib",31 "charmhelpers.contrib",
32 "charmhelpers.contrib.amulet",
25 "charmhelpers.contrib.ansible",33 "charmhelpers.contrib.ansible",
26 "charmhelpers.contrib.benchmark",34 "charmhelpers.contrib.benchmark",
27 "charmhelpers.contrib.charmhelpers",35 "charmhelpers.contrib.charmhelpers",
28 "charmhelpers.contrib.charmsupport",36 "charmhelpers.contrib.charmsupport",
37 "charmhelpers.contrib.database",
38 "charmhelpers.contrib.hahelpers",
39 "charmhelpers.contrib.network",
40 "charmhelpers.contrib.network.ovs",
41 "charmhelpers.contrib.openstack",
42 "charmhelpers.contrib.openstack.amulet",
43 "charmhelpers.contrib.openstack.files",
44 "charmhelpers.contrib.openstack.templates",
45 "charmhelpers.contrib.peerstorage",
46 "charmhelpers.contrib.python",
29 "charmhelpers.contrib.saltstack",47 "charmhelpers.contrib.saltstack",
30 "charmhelpers.contrib.hahelpers",48 "charmhelpers.contrib.ssl",
49 "charmhelpers.contrib.storage",
50 "charmhelpers.contrib.storage.linux",
31 "charmhelpers.contrib.templating",51 "charmhelpers.contrib.templating",
52 "charmhelpers.contrib.unison",
32 ],53 ],
33 'scripts': [54 'scripts': [
34 "bin/chlp",55 "bin/chlp",
3556
=== modified file 'test_requirements.txt'
--- test_requirements.txt 2014-11-25 15:07:02 +0000
+++ test_requirements.txt 2015-08-13 08:33:21 +0000
@@ -3,10 +3,12 @@
3pip3pip
4distribute4distribute
5coverage>=3.65coverage>=3.6
6mock>=1.0.16mock>=1.0.1,<1.1.0
7nose>=1.3.17nose>=1.3.1
8flake88flake8
9testtools==0.9.14 # Before dependent on modern 'six'9testtools==0.9.14 # Before dependent on modern 'six'
10amulet
11distro-info
10#12#
11# Specify precise versions of runtime dependencies where possible.13# Specify precise versions of runtime dependencies where possible.
12netaddr==0.7.10 # trusty. precise is 0.7.5, but not in pypi.14netaddr==0.7.10 # trusty. precise is 0.7.5, but not in pypi.
1315
=== modified file 'tests/cli/test_cmdline.py'
--- tests/cli/test_cmdline.py 2014-11-25 15:04:52 +0000
+++ tests/cli/test_cmdline.py 2015-08-13 08:33:21 +0000
@@ -5,6 +5,7 @@
5from mock import (5from mock import (
6 patch,6 patch,
7 MagicMock,7 MagicMock,
8 ANY,
8)9)
9import json10import json
10from pprint import pformat11from pprint import pformat
@@ -87,15 +88,61 @@
87 @self.cl.subcommand()88 @self.cl.subcommand()
88 def bar(x, y=None, *vargs):89 def bar(x, y=None, *vargs):
89 "A function that does work."90 "A function that does work."
90 self.bar_called = True91 self.assertEqual(x, 'baz')
91 return "qux"92 self.assertEqual(y, 'why')
9293 self.assertEqual(vargs, ('mux', 'zob'))
93 args = ['foo', 'bar', 'baz']94 self.bar_called = True
94 self.cl.formatter = MagicMock()95 return "qux"
95 with patch("sys.argv", args):96
96 self.cl.run()97 args = ['chlp', 'bar', '--y', 'why', 'baz', 'mux', 'zob']
97 self.assertTrue(self.bar_called)98 self.cl.formatter = MagicMock()
98 self.assertTrue(self.cl.formatter.format_output.called)99 with patch("sys.argv", args):
100 with patch("charmhelpers.core.unitdata._KV") as _KV:
101 self.cl.run()
102 assert _KV.flush.called
103 self.assertTrue(self.bar_called)
104 self.cl.formatter.format_output.assert_called_once_with('qux', ANY)
105
106 def test_no_output(self):
107 self.bar_called = False
108
109 @self.cl.subcommand()
110 @self.cl.no_output
111 def bar(x, y=None, *vargs):
112 "A function that does work."
113 self.bar_called = True
114 return "qux"
115
116 args = ['foo', 'bar', 'baz']
117 self.cl.formatter = MagicMock()
118 with patch("sys.argv", args):
119 self.cl.run()
120 self.assertTrue(self.bar_called)
121 self.cl.formatter.format_output.assert_called_once_with('', ANY)
122
123 def test_test_command(self):
124 self.bar_called = False
125 self.bar_result = True
126
127 @self.cl.subcommand()
128 @self.cl.test_command
129 def bar(x, y=None, *vargs):
130 "A function that does work."
131 self.bar_called = True
132 return self.bar_result
133
134 args = ['foo', 'bar', 'baz']
135 self.cl.formatter = MagicMock()
136 with patch("sys.argv", args):
137 self.cl.run()
138 self.assertTrue(self.bar_called)
139 self.assertEqual(self.cl.exit_code, 0)
140 self.cl.formatter.format_output.assert_called_once_with('', ANY)
141
142 self.bar_result = False
143 with patch("sys.argv", args):
144 self.cl.run()
145 self.assertEqual(self.cl.exit_code, 1)
99146
100147
101class OutputFormatterTest(TestCase):148class OutputFormatterTest(TestCase):
102149
=== added directory 'tests/contrib/amulet'
=== added file 'tests/contrib/amulet/test_utils.py'
--- tests/contrib/amulet/test_utils.py 1970-01-01 00:00:00 +0000
+++ tests/contrib/amulet/test_utils.py 2015-08-13 08:33:21 +0000
@@ -0,0 +1,105 @@
1# Copyright 2015 Canonical Ltd.
2#
3# Authors:
4# Adam Collard <adam.collard@canonical.com>
5
6import unittest
7
8from charmhelpers.contrib.amulet.utils import AmuletUtils
9
10
11class FakeSentry(object):
12
13 commands = {}
14
15 info = {"unit_name": "foo"}
16
17 def run(self, command):
18 return self.commands[command]
19
20
21class ValidateServicesByNameTestCase(unittest.TestCase):
22
23 def setUp(self):
24 self.utils = AmuletUtils()
25 self.sentry_unit = FakeSentry()
26
27 def test_errors_for_unknown_upstart_service(self):
28 """
29 Returns a message if the Upstart service is unknown.
30 """
31 self.sentry_unit.commands["lsb_release -cs"] = "trusty", 0
32 self.sentry_unit.commands["sudo status foo"] = (
33 "status: Unknown job: foo", 1)
34
35 result = self.utils.validate_services_by_name(
36 {self.sentry_unit: ["foo"]})
37 self.assertIsNotNone(result)
38
39 def test_none_for_started_upstart_service(self):
40 """
41 Returns None if the Upstart service is running.
42 """
43 self.sentry_unit.commands["lsb_release -cs"] = "trusty", 0
44 self.sentry_unit.commands["sudo status foo"] = (
45 "foo start/running, process 42", 0)
46
47 result = self.utils.validate_services_by_name(
48 {self.sentry_unit: ["foo"]})
49 self.assertIsNone(result)
50
51 def test_errors_for_stopped_upstart_service(self):
52 """
53 Returns a message if the Upstart service is stopped.
54 """
55 self.sentry_unit.commands["lsb_release -cs"] = "trusty", 0
56 self.sentry_unit.commands["sudo status foo"] = "foo stop/waiting", 0
57
58 result = self.utils.validate_services_by_name(
59 {self.sentry_unit: ["foo"]})
60 self.assertIsNotNone(result)
61
62 def test_errors_for_unknown_systemd_service(self):
63 """
64 Returns a message if a systemd service is unknown.
65 """
66 self.sentry_unit.commands["lsb_release -cs"] = "vivid", 0
67 self.sentry_unit.commands["sudo service foo status"] = (u"""\
68\u25cf foo.service
69 Loaded: not-found (Reason: No such file or directory)
70 Active: inactive (dead)
71""", 3)
72
73 result = self.utils.validate_services_by_name({
74 self.sentry_unit: ["foo"]})
75 self.assertIsNotNone(result)
76
77 def test_none_for_started_systemd_service(self):
78 """
79 Returns None if a systemd service is running.
80 """
81 self.sentry_unit.commands["lsb_release -cs"] = "vivid", 0
82 self.sentry_unit.commands["sudo service foo status"] = (u"""\
83\u25cf foo.service - Foo
84 Loaded: loaded (/lib/systemd/system/foo.service; enabled)
85 Active: active (exited) since Thu 1970-01-01 00:00:00 UTC; 42h 42min ago
86 Main PID: 3 (code=exited, status=0/SUCCESS)
87 CGroup: /system.slice/foo.service
88""", 0)
89 result = self.utils.validate_services_by_name(
90 {self.sentry_unit: ["foo"]})
91 self.assertIsNone(result)
92
93 def test_errors_for_stopped_systemd_service(self):
94 """
95 Returns a message if a systemd service is stopped.
96 """
97 self.sentry_unit.commands["lsb_release -cs"] = "vivid", 0
98 self.sentry_unit.commands["sudo service foo status"] = (u"""\
99\u25cf foo.service - Foo
100 Loaded: loaded (/lib/systemd/system/foo.service; disabled)
101 Active: inactive (dead)
102""", 3)
103 result = self.utils.validate_services_by_name(
104 {self.sentry_unit: ["foo"]})
105 self.assertIsNotNone(result)
0106
=== modified file 'tests/contrib/benchmark/test_benchmark.py'
--- tests/contrib/benchmark/test_benchmark.py 2015-04-24 16:07:29 +0000
+++ tests/contrib/benchmark/test_benchmark.py 2015-08-13 08:33:21 +0000
@@ -1,3 +1,8 @@
1from functools import partial
2from os.path import join
3from tempfile import mkdtemp
4from shutil import rmtree
5
1import mock6import mock
2from testtools import TestCase7from testtools import TestCase
3# import unittest8# import unittest
@@ -33,7 +38,8 @@
33 self.fake_relation = FakeRelation(FAKE_RELATION)38 self.fake_relation = FakeRelation(FAKE_RELATION)
34 # self.hook_name.return_value = 'benchmark-relation-changed'39 # self.hook_name.return_value = 'benchmark-relation-changed'
3540
36 self.relation_get.side_effect = self.fake_relation.get41 self.relation_get.side_effect = partial(
42 self.fake_relation.get, rid="benchmark:0", unit="benchmark/0")
37 self.relation_ids.side_effect = self.fake_relation.relation_ids43 self.relation_ids.side_effect = self.fake_relation.relation_ids
3844
39 def _patch(self, method):45 def _patch(self, method):
@@ -87,34 +93,32 @@
87 check_call.assert_any_call(['action-set', 'baz.foo=1'])93 check_call.assert_any_call(['action-set', 'baz.foo=1'])
88 check_call.assert_any_call(['action-set', 'baz.bar=2'])94 check_call.assert_any_call(['action-set', 'baz.bar=2'])
8995
90 @mock.patch('charmhelpers.contrib.benchmark.relation_get')
91 @mock.patch('charmhelpers.contrib.benchmark.relation_set')
92 @mock.patch('charmhelpers.contrib.benchmark.relation_ids')96 @mock.patch('charmhelpers.contrib.benchmark.relation_ids')
93 @mock.patch('charmhelpers.contrib.benchmark.in_relation_hook')97 @mock.patch('charmhelpers.contrib.benchmark.in_relation_hook')
94 def test_benchmark_init(self, in_relation_hook, relation_ids, relation_set, relation_get):98 def test_benchmark_init(self, in_relation_hook, relation_ids):
9599
96 in_relation_hook.return_value = True100 in_relation_hook.return_value = True
97 relation_ids.return_value = ['benchmark:0']101 relation_ids.return_value = ['benchmark:0']
98 actions = ['asdf', 'foobar']102 actions = ['asdf', 'foobar']
99103
100 with patch_open() as (_open, _file):104 tempdir = mkdtemp(prefix=self.__class__.__name__)
105 self.addCleanup(rmtree, tempdir)
106 conf_path = join(tempdir, "benchmark.conf")
107 with mock.patch.object(Benchmark, "BENCHMARK_CONF", conf_path):
101 b = Benchmark(actions)108 b = Benchmark(actions)
102109
103 self.assertIsInstance(b, Benchmark)110 self.assertIsInstance(b, Benchmark)
104111
105 self.assertTrue(relation_get.called)112 self.assertTrue(self.relation_get.called)
106 self.assertTrue(relation_set.called)113 self.assertTrue(self.relation_set.called)
107114
108 relation_ids.assert_called_once_with('benchmark')115 relation_ids.assert_called_once_with('benchmark')
109116
110 for key in b.required_keys:117 self.relation_set.assert_called_once_with(
111 relation_get.assert_any_call(key)
112
113 relation_set.assert_called_once_with(
114 relation_id='benchmark:0',118 relation_id='benchmark:0',
115 relation_settings={'benchmarks': ",".join(actions)}119 relation_settings={'benchmarks': ",".join(actions)}
116 )120 )
117121
118 _open.assert_called_with('/etc/benchmark.conf', 'w')122 conf_contents = open(conf_path).readlines()
119 for key, val in iter(FAKE_RELATION['benchmark:0']['benchmark/0'].items()):123 for key, val in iter(FAKE_RELATION['benchmark:0']['benchmark/0'].items()):
120 _file.write.assert_any_called("%s=%s\n" % (key, val))124 self.assertIn("%s=%s\n" % (key, val), conf_contents)
121125
=== modified file 'tests/contrib/hahelpers/test_apache_utils.py'
--- tests/contrib/hahelpers/test_apache_utils.py 2014-09-24 09:42:52 +0000
+++ tests/contrib/hahelpers/test_apache_utils.py 2015-08-13 08:33:21 +0000
@@ -115,4 +115,4 @@
115 apache_utils.install_ca_cert(cert)115 apache_utils.install_ca_cert(cert)
116 _open.assert_called_with('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', 'w')116 _open.assert_called_with('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', 'w')
117 _file.write.assert_called_with(cert)117 _file.write.assert_called_with(cert)
118 self.subprocess.assertCalledWith(['update-ca-certificates', '--fresh'])118 self.subprocess.check_call.assert_called_with(['update-ca-certificates', '--fresh'])
119119
=== modified file 'tests/contrib/network/test_ufw.py'
--- tests/contrib/network/test_ufw.py 2015-02-12 20:08:28 +0000
+++ tests/contrib/network/test_ufw.py 2015-08-13 08:33:21 +0000
@@ -31,6 +31,12 @@
31xt_LOG 17702 031xt_LOG 17702 0
32xt_limit 12711 032xt_limit 12711 0
33"""33"""
34DEFAULT_POLICY_OUTPUT = """Default incoming policy changed to 'deny'
35(be sure to update your rules accordingly)
36"""
37DEFAULT_POLICY_OUTPUT_OUTGOING = """Default outgoing policy changed to 'allow'
38(be sure to update your rules accordingly)
39"""
3440
3541
36class TestUFW(unittest.TestCase):42class TestUFW(unittest.TestCase):
@@ -194,6 +200,24 @@
194 @mock.patch('charmhelpers.contrib.network.ufw.is_enabled')200 @mock.patch('charmhelpers.contrib.network.ufw.is_enabled')
195 @mock.patch('charmhelpers.core.hookenv.log')201 @mock.patch('charmhelpers.core.hookenv.log')
196 @mock.patch('subprocess.Popen')202 @mock.patch('subprocess.Popen')
203 def test_modify_access_with_index(self, popen, log, is_enabled):
204 is_enabled.return_value = True
205 p = mock.Mock()
206 p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'),
207 'returncode': 0})
208 popen.return_value = p
209
210 ufw.modify_access('127.0.0.1', dst='127.0.0.1', port='80', index=1)
211 popen.assert_any_call(['ufw', 'insert', '1', 'allow', 'from',
212 '127.0.0.1', 'to', '127.0.0.1', 'port', '80'],
213 stdout=subprocess.PIPE)
214 log.assert_any_call(('ufw allow: ufw insert 1 allow from 127.0.0.1 '
215 'to 127.0.0.1 port 80'), level='DEBUG')
216 log.assert_any_call('stdout', level='INFO')
217
218 @mock.patch('charmhelpers.contrib.network.ufw.is_enabled')
219 @mock.patch('charmhelpers.core.hookenv.log')
220 @mock.patch('subprocess.Popen')
197 def test_grant_access(self, popen, log, is_enabled):221 def test_grant_access(self, popen, log, is_enabled):
198 is_enabled.return_value = True222 is_enabled.return_value = True
199 p = mock.Mock()223 p = mock.Mock()
@@ -212,6 +236,24 @@
212 @mock.patch('charmhelpers.contrib.network.ufw.is_enabled')236 @mock.patch('charmhelpers.contrib.network.ufw.is_enabled')
213 @mock.patch('charmhelpers.core.hookenv.log')237 @mock.patch('charmhelpers.core.hookenv.log')
214 @mock.patch('subprocess.Popen')238 @mock.patch('subprocess.Popen')
239 def test_grant_access_with_index(self, popen, log, is_enabled):
240 is_enabled.return_value = True
241 p = mock.Mock()
242 p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'),
243 'returncode': 0})
244 popen.return_value = p
245
246 ufw.grant_access('127.0.0.1', dst='127.0.0.1', port='80', index=1)
247 popen.assert_any_call(['ufw', 'insert', '1', 'allow', 'from',
248 '127.0.0.1', 'to', '127.0.0.1', 'port', '80'],
249 stdout=subprocess.PIPE)
250 log.assert_any_call(('ufw allow: ufw insert 1 allow from 127.0.0.1 '
251 'to 127.0.0.1 port 80'), level='DEBUG')
252 log.assert_any_call('stdout', level='INFO')
253
254 @mock.patch('charmhelpers.contrib.network.ufw.is_enabled')
255 @mock.patch('charmhelpers.core.hookenv.log')
256 @mock.patch('subprocess.Popen')
215 def test_revoke_access(self, popen, log, is_enabled):257 def test_revoke_access(self, popen, log, is_enabled):
216 is_enabled.return_value = True258 is_enabled.return_value = True
217 p = mock.Mock()259 p = mock.Mock()
@@ -366,3 +408,33 @@
366 is_enabled.return_value = False408 is_enabled.return_value = False
367 isdir.return_value = True409 isdir.return_value = True
368 ufw.enable()410 ufw.enable()
411
412 @mock.patch('charmhelpers.core.hookenv.log')
413 @mock.patch('subprocess.check_output')
414 def test_change_default_policy(self, check_output, log):
415 check_output.return_value = DEFAULT_POLICY_OUTPUT
416 self.assertTrue(ufw.default_policy())
417 check_output.asser_any_call(['ufw', 'default', 'deny', 'incoming'])
418
419 @mock.patch('charmhelpers.core.hookenv.log')
420 @mock.patch('subprocess.check_output')
421 def test_change_default_policy_allow_outgoing(self, check_output, log):
422 check_output.return_value = DEFAULT_POLICY_OUTPUT_OUTGOING
423 self.assertTrue(ufw.default_policy('allow', 'outgoing'))
424 check_output.asser_any_call(['ufw', 'default', 'allow', 'outgoing'])
425
426 @mock.patch('charmhelpers.core.hookenv.log')
427 @mock.patch('subprocess.check_output')
428 def test_change_default_policy_unexpected_output(self, check_output, log):
429 check_output.return_value = "asdf"
430 self.assertFalse(ufw.default_policy())
431
432 @mock.patch('charmhelpers.core.hookenv.log')
433 @mock.patch('subprocess.check_output')
434 def test_change_default_policy_wrong_policy(self, check_output, log):
435 self.assertRaises(ufw.UFWError, ufw.default_policy, 'asdf')
436
437 @mock.patch('charmhelpers.core.hookenv.log')
438 @mock.patch('subprocess.check_output')
439 def test_change_default_policy_wrong_direction(self, check_output, log):
440 self.assertRaises(ufw.UFWError, ufw.default_policy, 'allow', 'asdf')
369441
=== modified file 'tests/contrib/openstack/test_openstack_utils.py'
--- tests/contrib/openstack/test_openstack_utils.py 2015-05-11 18:53:44 +0000
+++ tests/contrib/openstack/test_openstack_utils.py 2015-08-13 08:33:21 +0000
@@ -26,6 +26,16 @@
26}26}
2727
28FAKE_REPO = {28FAKE_REPO = {
29 'neutron-common': {
30 'pkg_vers': '2:7.0.0-0ubuntu1',
31 'os_release': 'liberty',
32 'os_version': '2015.2'
33 },
34 'nova-common': {
35 'pkg_vers': '2:12.0.0~b1-0ubuntu1',
36 'os_release': 'liberty',
37 'os_version': '2015.2'
38 },
29 'nova-common': {39 'nova-common': {
30 'pkg_vers': '2012.2.3-0ubuntu2.1',40 'pkg_vers': '2012.2.3-0ubuntu2.1',
31 'os_release': 'folsom',41 'os_release': 'folsom',
@@ -489,8 +499,8 @@
489 expected_f = '/var/lib/juju/units/testing-foo-0/charm/scripts/scriptrc'499 expected_f = '/var/lib/juju/units/testing-foo-0/charm/scripts/scriptrc'
490 _open.assert_called_with(expected_f, 'wb')500 _open.assert_called_with(expected_f, 'wb')
491 _mkdir.assert_called_with(os.path.dirname(expected_f))501 _mkdir.assert_called_with(os.path.dirname(expected_f))
492 for line in scriptrc:502 _file.__enter__().write.assert_has_calls(
493 _file.__enter__().write.assert_has_calls(call(line))503 list(call(line) for line in scriptrc), any_order=True)
494504
495 @patch.object(openstack, 'lsb_release')505 @patch.object(openstack, 'lsb_release')
496 @patch.object(openstack, 'get_os_version_package')506 @patch.object(openstack, 'get_os_version_package')
@@ -642,11 +652,13 @@
642 error_out.assert_called_with(652 error_out.assert_called_with(
643 'openstack-origin-git key \'%s\' is missing' % key)653 'openstack-origin-git key \'%s\' is missing' % key)
644654
655 @patch('os.path.join')
645 @patch.object(openstack, 'error_out')656 @patch.object(openstack, 'error_out')
646 @patch.object(openstack, '_git_clone_and_install_single')657 @patch.object(openstack, '_git_clone_and_install_single')
658 @patch.object(openstack, 'pip_install')
647 @patch.object(openstack, 'pip_create_virtualenv')659 @patch.object(openstack, 'pip_create_virtualenv')
648 def test_git_clone_and_install_errors(self, pip_venv, git_install_single,660 def test_git_clone_and_install_errors(self, pip_venv, pip_install,
649 error_out):661 git_install_single, error_out, join):
650 git_missing_repos = """662 git_missing_repos = """
651 repostories:663 repostories:
652 - {name: requirements,664 - {name: requirements,
@@ -704,19 +716,26 @@
704 openstack.git_clone_and_install(git_wrong_order_2, 'keystone', depth=1)716 openstack.git_clone_and_install(git_wrong_order_2, 'keystone', depth=1)
705 error_out.assert_called_with('requirements git repo must be specified first')717 error_out.assert_called_with('requirements git repo must be specified first')
706718
719 @patch('os.path.join')
707 @patch.object(openstack, 'charm_dir')720 @patch.object(openstack, 'charm_dir')
708 @patch.object(openstack, 'error_out')721 @patch.object(openstack, 'error_out')
709 @patch.object(openstack, '_git_clone_and_install_single')722 @patch.object(openstack, '_git_clone_and_install_single')
723 @patch.object(openstack, 'pip_install')
710 @patch.object(openstack, 'pip_create_virtualenv')724 @patch.object(openstack, 'pip_create_virtualenv')
711 def test_git_clone_and_install_success(self, pip_venv, _git_install_single,725 def test_git_clone_and_install_success(self, pip_venv, pip_install,
712 error_out, charm_dir):726 _git_install_single, error_out,
727 charm_dir, join):
713 proj = 'keystone'728 proj = 'keystone'
714 charm_dir.return_value = '/var/lib/juju/units/testing-foo-0/charm'729 charm_dir.return_value = '/var/lib/juju/units/testing-foo-0/charm'
715 # the following sets the global requirements_dir730 # the following sets the global requirements_dir
716 _git_install_single.return_value = '/mnt/openstack-git/requirements'731 _git_install_single.return_value = '/mnt/openstack-git/requirements'
732 join.return_value = '/mnt/openstack-git/venv'
717733
718 openstack.git_clone_and_install(openstack_origin_git, proj, depth=1)734 openstack.git_clone_and_install(openstack_origin_git, proj, depth=1)
719 self.assertTrue(pip_venv.called)735 self.assertTrue(pip_venv.called)
736 pip_install.assert_called_with('setuptools', upgrade=True,
737 proxy=None,
738 venv='/mnt/openstack-git/venv')
720 self.assertTrue(_git_install_single.call_count == 2)739 self.assertTrue(_git_install_single.call_count == 2)
721 expected = [740 expected = [
722 call('git://git.openstack.org/openstack/requirements',741 call('git://git.openstack.org/openstack/requirements',
@@ -775,6 +794,7 @@
775 parent_dir = '/mnt/openstack-git/'794 parent_dir = '/mnt/openstack-git/'
776 http_proxy = 'http://squid-proxy-url'795 http_proxy = 'http://squid-proxy-url'
777 dest_dir = '/mnt/openstack-git'796 dest_dir = '/mnt/openstack-git'
797 venv_dir = '/mnt/openstack-git'
778 reqs_dir = '/mnt/openstack-git/requirements-dir'798 reqs_dir = '/mnt/openstack-git/requirements-dir'
779 join.return_value = dest_dir799 join.return_value = dest_dir
780 openstack.requirements_dir = reqs_dir800 openstack.requirements_dir = reqs_dir
@@ -786,23 +806,27 @@
786 mkdir.assert_called_with(parent_dir)806 mkdir.assert_called_with(parent_dir)
787 install_remote.assert_called_with(repo, dest=parent_dir, depth=1,807 install_remote.assert_called_with(repo, dest=parent_dir, depth=1,
788 branch=branch)808 branch=branch)
789 _git_update_reqs.assert_called_with(dest_dir, reqs_dir)809 _git_update_reqs.assert_called_with(venv_dir, dest_dir, reqs_dir)
790 pip_install.assert_called_with(dest_dir, venv='/mnt/openstack-git',810 pip_install.assert_called_with(dest_dir, venv='/mnt/openstack-git',
791 proxy='http://squid-proxy-url')811 proxy='http://squid-proxy-url')
792812
813 @patch('os.path.join')
793 @patch('os.getcwd')814 @patch('os.getcwd')
794 @patch('os.chdir')815 @patch('os.chdir')
795 @patch('subprocess.check_call')816 @patch('subprocess.check_call')
796 def test_git_update_requirements(self, check_call, chdir, getcwd):817 def test_git_update_requirements(self, check_call, chdir, getcwd, join):
797 pkg_dir = '/mnt/openstack-git/repo-dir'818 pkg_dir = '/mnt/openstack-git/repo-dir'
798 reqs_dir = '/mnt/openstack-git/reqs-dir'819 reqs_dir = '/mnt/openstack-git/reqs-dir'
799 orig_dir = '/var/lib/juju/units/testing-foo-0/charm'820 orig_dir = '/var/lib/juju/units/testing-foo-0/charm'
821 venv_dir = '/mnt/openstack-git/venv'
800 getcwd.return_value = orig_dir822 getcwd.return_value = orig_dir
823 join.return_value = '/mnt/openstack-git/venv/python'
801824
802 openstack._git_update_requirements(pkg_dir, reqs_dir)825 openstack._git_update_requirements(venv_dir, pkg_dir, reqs_dir)
803 expected = [call(reqs_dir), call(orig_dir)]826 expected = [call(reqs_dir), call(orig_dir)]
804 self.assertEquals(expected, chdir.call_args_list)827 self.assertEquals(expected, chdir.call_args_list)
805 check_call.assert_called_with(['python', 'update.py', pkg_dir])828 check_call.assert_called_with(['/mnt/openstack-git/venv/python',
829 'update.py', pkg_dir])
806830
807 @patch('os.path.join')831 @patch('os.path.join')
808 @patch('subprocess.check_call')832 @patch('subprocess.check_call')
809833
=== modified file 'tests/contrib/openstack/test_os_contexts.py'
--- tests/contrib/openstack/test_os_contexts.py 2015-04-29 12:52:18 +0000
+++ tests/contrib/openstack/test_os_contexts.py 2015-08-13 08:33:21 +0000
@@ -73,7 +73,11 @@
73 return None73 return None
7474
75 def relation_ids(self, relation):75 def relation_ids(self, relation):
76 return self.relation_data.keys()76 rids = []
77 for rid in self.relation_data.keys():
78 if relation + ':' in rid:
79 rids.append(rid)
80 return rids
7781
78 def relation_units(self, relation_id):82 def relation_units(self, relation_id):
79 if relation_id not in self.relation_data:83 if relation_id not in self.relation_data:
@@ -325,6 +329,25 @@
325 - [glance-key2, value2]329 - [glance-key2, value2]
326"""330"""
327331
332NOVA_SUB_CONFIG1 = """
333nova:
334 /etc/nova/nova.conf:
335 sections:
336 DEFAULT:
337 - [nova-key1, value1]
338 - [nova-key2, value2]
339"""
340
341
342NOVA_SUB_CONFIG2 = """
343nova-compute:
344 /etc/nova/nova.conf:
345 sections:
346 DEFAULT:
347 - [nova-key3, value3]
348 - [nova-key4, value4]
349"""
350
328CINDER_SUB_CONFIG1 = """351CINDER_SUB_CONFIG1 = """
329cinder:352cinder:
330 /etc/cinder/cinder.conf:353 /etc/cinder/cinder.conf:
@@ -376,6 +399,21 @@
376 },399 },
377}400}
378401
402SUB_CONFIG_RELATION2 = {
403 'nova-ceilometer:6': {
404 'ceilometer-agent/0': {
405 'private-address': 'nova_node1',
406 'subordinate_configuration': json.dumps(yaml.load(NOVA_SUB_CONFIG1)),
407 },
408 },
409 'neutron-plugin:3': {
410 'neutron-ovs-plugin/0': {
411 'private-address': 'nova_node1',
412 'subordinate_configuration': json.dumps(yaml.load(NOVA_SUB_CONFIG2)),
413 },
414 }
415}
416
379NONET_CONFIG = {417NONET_CONFIG = {
380 'vip': 'cinderhost1vip',418 'vip': 'cinderhost1vip',
381 'os-internal-network': None,419 'os-internal-network': None,
@@ -2053,6 +2091,27 @@
2053 # subordinate supplies bad input2091 # subordinate supplies bad input
2054 self.assertEquals(foo_sub_ctxt(), {'sections': {}})2092 self.assertEquals(foo_sub_ctxt(), {'sections': {}})
20552093
2094 def test_os_subordinate_config_context_multiple(self):
2095 relation = FakeRelation(relation_data=SUB_CONFIG_RELATION2)
2096 self.relation_get.side_effect = relation.get
2097 self.relation_ids.side_effect = relation.relation_ids
2098 self.related_units.side_effect = relation.relation_units
2099 nova_sub_ctxt = context.SubordinateConfigContext(
2100 service=['nova', 'nova-compute'],
2101 config_file='/etc/nova/nova.conf',
2102 interface=['nova-ceilometer', 'neutron-plugin'],
2103 )
2104 self.assertEquals(
2105 nova_sub_ctxt(),
2106 {'sections': {
2107 'DEFAULT': [
2108 ['nova-key1', 'value1'],
2109 ['nova-key2', 'value2'],
2110 ['nova-key3', 'value3'],
2111 ['nova-key4', 'value4']]
2112 }}
2113 )
2114
2056 def test_syslog_context(self):2115 def test_syslog_context(self):
2057 self.config.side_effect = fake_config({'use-syslog': 'foo'})2116 self.config.side_effect = fake_config({'use-syslog': 'foo'})
2058 syslog = context.SyslogContext()2117 syslog = context.SyslogContext()
20592118
=== modified file 'tests/contrib/peerstorage/test_peerstorage.py'
--- tests/contrib/peerstorage/test_peerstorage.py 2015-06-03 14:46:50 +0000
+++ tests/contrib/peerstorage/test_peerstorage.py 2015-08-13 08:33:21 +0000
@@ -202,7 +202,7 @@
202 l_settings = {'s3': 3}202 l_settings = {'s3': 3}
203 r_settings = {'s1': 1, 's2': 2}203 r_settings = {'s1': 1, 's2': 2}
204204
205 def mock_relation_get(attribute=None, unit=None):205 def mock_relation_get(attribute=None, unit=None, rid=None):
206 if attribute:206 if attribute:
207 if attribute in r_settings:207 if attribute in r_settings:
208 return r_settings.get(attribute)208 return r_settings.get(attribute)
@@ -237,11 +237,11 @@
237 self.assertEqual(_dicta, _dictb)237 self.assertEqual(_dicta, _dictb)
238238
239 migration_key = '__leader_get_migrated_settings__'239 migration_key = '__leader_get_migrated_settings__'
240 self.relation_get.side_effect = mock_relation_get240 self._relation_get.side_effect = mock_relation_get
241 self._leader_get.side_effect = mock_leader_get241 self._leader_get.side_effect = mock_leader_get
242 self.leader_set.side_effect = mock_leader_set242 self.leader_set.side_effect = mock_leader_set
243243
244 self.assertEqual({'s1': 1, 's2': 2}, peerstorage.relation_get())244 self.assertEqual({'s1': 1, 's2': 2}, peerstorage._relation_get())
245 self.assertEqual({'s3': 3}, peerstorage._leader_get())245 self.assertEqual({'s3': 3}, peerstorage._leader_get())
246 self.assertEqual({'s1': 1, 's2': 2, 's3': 3}, peerstorage.leader_get())246 self.assertEqual({'s1': 1, 's2': 2, 's3': 3}, peerstorage.leader_get())
247 check_leader_db({'s1': 1, 's2': 2, 's3': 3,247 check_leader_db({'s1': 1, 's2': 2, 's3': 3,
@@ -274,7 +274,7 @@
274274
275 peerstorage.leader_set.reset_mock()275 peerstorage.leader_set.reset_mock()
276 self.assertEqual({'s1': 1, 's2': 2, 's3': 2, 's4': 3},276 self.assertEqual({'s1': 1, 's2': 2, 's3': 2, 's4': 3},
277 peerstorage.relation_get())277 peerstorage._relation_get())
278 check_leader_db({'s1': 1, 's3': 3, 's4': 4,278 check_leader_db({'s1': 1, 's3': 3, 's4': 4,
279 migration_key: '["s1", "s4"]'},279 migration_key: '["s1", "s4"]'},
280 peerstorage._leader_get())280 peerstorage._leader_get())
@@ -290,7 +290,7 @@
290 l_settings = {'s3': 3}290 l_settings = {'s3': 3}
291 r_settings = {'s1': 1, 's2': 2}291 r_settings = {'s1': 1, 's2': 2}
292292
293 def mock_relation_get(attribute=None, unit=None):293 def mock_relation_get(attribute=None, unit=None, rid=None):
294 if attribute:294 if attribute:
295 if attribute in r_settings:295 if attribute in r_settings:
296 return r_settings.get(attribute)296 return r_settings.get(attribute)
@@ -314,10 +314,10 @@
314314
315 l_settings.update(kwargs)315 l_settings.update(kwargs)
316316
317 self.relation_get.side_effect = mock_relation_get317 self._relation_get.side_effect = mock_relation_get
318 self._leader_get.side_effect = mock_leader_get318 self._leader_get.side_effect = mock_leader_get
319 self.leader_set.side_effect = mock_leader_set319 self.leader_set.side_effect = mock_leader_set
320 self.assertEqual({'s1': 1, 's2': 2}, peerstorage.relation_get())320 self.assertEqual({'s1': 1, 's2': 2}, peerstorage._relation_get())
321 self.assertEqual({'s3': 3}, peerstorage._leader_get())321 self.assertEqual({'s3': 3}, peerstorage._leader_get())
322 self.assertEqual({'s3': 3}, peerstorage.leader_get())322 self.assertEqual({'s3': 3}, peerstorage.leader_get())
323 self.assertEqual({'s3': 3}, l_settings)323 self.assertEqual({'s3': 3}, l_settings)
324324
=== modified file 'tests/contrib/python/test_debug.py' (properties changed: -x to +x)
--- tests/contrib/python/test_debug.py 2015-02-11 21:41:57 +0000
+++ tests/contrib/python/test_debug.py 2015-08-13 08:33:21 +0000
@@ -51,4 +51,4 @@
51 """51 """
52 self.set_trace()52 self.set_trace()
53 self.Rpdb.set_trace.side_effect = Exception()53 self.Rpdb.set_trace.side_effect = Exception()
54 self._error.assert_called_once()54 self.assertTrue(self._error.called)
5555
=== modified file 'tests/contrib/storage/test_linux_ceph.py'
--- tests/contrib/storage/test_linux_ceph.py 2015-01-13 11:17:57 +0000
+++ tests/contrib/storage/test_linux_ceph.py 2015-08-13 08:33:21 +0000
@@ -62,7 +62,7 @@
62 '''It creates a new ceph keyring'''62 '''It creates a new ceph keyring'''
63 _exists.return_value = True63 _exists.return_value = True
64 ceph_utils.create_keyring('cinder', 'cephkey')64 ceph_utils.create_keyring('cinder', 'cephkey')
65 self.log.assert_called()65 self.assertTrue(self.log.called)
66 self.check_call.assert_not_called()66 self.check_call.assert_not_called()
6767
68 @patch('os.remove')68 @patch('os.remove')
@@ -72,7 +72,7 @@
72 _exists.return_value = True72 _exists.return_value = True
73 ceph_utils.delete_keyring('cinder')73 ceph_utils.delete_keyring('cinder')
74 _remove.assert_called_with('/etc/ceph/ceph.client.cinder.keyring')74 _remove.assert_called_with('/etc/ceph/ceph.client.cinder.keyring')
75 self.log.assert_called()75 self.assertTrue(self.log.called)
7676
77 @patch('os.remove')77 @patch('os.remove')
78 @patch('os.path.exists')78 @patch('os.path.exists')
@@ -80,7 +80,7 @@
80 '''It creates a new ceph keyring.'''80 '''It creates a new ceph keyring.'''
81 _exists.return_value = False81 _exists.return_value = False
82 ceph_utils.delete_keyring('cinder')82 ceph_utils.delete_keyring('cinder')
83 self.log.assert_called()83 self.assertTrue(self.log.called)
84 _remove.assert_not_called()84 _remove.assert_not_called()
8585
86 @patch('os.path.exists')86 @patch('os.path.exists')
@@ -90,14 +90,14 @@
90 with patch_open() as (_open, _file):90 with patch_open() as (_open, _file):
91 ceph_utils.create_key_file('cinder', 'cephkey')91 ceph_utils.create_key_file('cinder', 'cephkey')
92 _file.write.assert_called_with('cephkey')92 _file.write.assert_called_with('cephkey')
93 self.log.assert_called()93 self.assertTrue(self.log.called)
9494
95 @patch('os.path.exists')95 @patch('os.path.exists')
96 def test_create_key_file_already_exists(self, _exists):96 def test_create_key_file_already_exists(self, _exists):
97 '''It creates a new ceph keyring'''97 '''It creates a new ceph keyring'''
98 _exists.return_value = True98 _exists.return_value = True
99 ceph_utils.create_key_file('cinder', 'cephkey')99 ceph_utils.create_key_file('cinder', 'cephkey')
100 self.log.assert_called()100 self.assertTrue(self.log.called)
101101
102 @patch('os.mkdir')102 @patch('os.mkdir')
103 @patch.object(ceph_utils, 'apt_install')103 @patch.object(ceph_utils, 'apt_install')
@@ -171,7 +171,7 @@
171 self._patch('pool_exists')171 self._patch('pool_exists')
172 self.pool_exists.return_value = True172 self.pool_exists.return_value = True
173 ceph_utils.create_pool(service='cinder', name='foo')173 ceph_utils.create_pool(service='cinder', name='foo')
174 self.log.assert_called()174 self.assertTrue(self.log.called)
175 self.check_call.assert_not_called()175 self.check_call.assert_not_called()
176176
177 def test_keyring_path(self):177 def test_keyring_path(self):
@@ -202,14 +202,14 @@
202 def test_rbd_exists(self):202 def test_rbd_exists(self):
203 self.check_output.return_value = LS_RBDS203 self.check_output.return_value = LS_RBDS
204 self.assertTrue(ceph_utils.rbd_exists('service', 'pool', 'rbd1'))204 self.assertTrue(ceph_utils.rbd_exists('service', 'pool', 'rbd1'))
205 self.check_output.assert_call_with(205 self.check_output.assert_called_with(
206 ['rbd', 'list', '--id', 'service', '--pool', 'pool']206 ['rbd', 'list', '--id', 'service', '--pool', 'pool']
207 )207 )
208208
209 def test_rbd_does_not_exist(self):209 def test_rbd_does_not_exist(self):
210 self.check_output.return_value = LS_RBDS210 self.check_output.return_value = LS_RBDS
211 self.assertFalse(ceph_utils.rbd_exists('service', 'pool', 'rbd4'))211 self.assertFalse(ceph_utils.rbd_exists('service', 'pool', 'rbd4'))
212 self.check_output.assert_call_with(212 self.check_output.assert_called_with(
213 ['rbd', 'list', '--id', 'service', '--pool', 'pool']213 ['rbd', 'list', '--id', 'service', '--pool', 'pool']
214 )214 )
215215
@@ -304,7 +304,7 @@
304 _file.read.return_value = 'anothermod\n'304 _file.read.return_value = 'anothermod\n'
305 ceph_utils.modprobe('mymod')305 ceph_utils.modprobe('mymod')
306 _open.assert_called_with('/etc/modules', 'r+')306 _open.assert_called_with('/etc/modules', 'r+')
307 _file.read.assert_called()307 _file.read.assert_called_with()
308 _file.write.assert_called_with('mymod')308 _file.write.assert_called_with('mymod')
309 self.check_call.assert_called_with(['modprobe', 'mymod'])309 self.check_call.assert_called_with(['modprobe', 'mymod'])
310310
@@ -318,14 +318,14 @@
318 def test_make_filesystem(self, _exists):318 def test_make_filesystem(self, _exists):
319 _exists.return_value = True319 _exists.return_value = True
320 ceph_utils.make_filesystem('/dev/sdd')320 ceph_utils.make_filesystem('/dev/sdd')
321 self.log.assert_called()321 self.assertTrue(self.log.called)
322 self.check_call.assert_called_with(['mkfs', '-t', 'ext4', '/dev/sdd'])322 self.check_call.assert_called_with(['mkfs', '-t', 'ext4', '/dev/sdd'])
323323
324 @patch('os.path.exists')324 @patch('os.path.exists')
325 def test_make_filesystem_xfs(self, _exists):325 def test_make_filesystem_xfs(self, _exists):
326 _exists.return_value = True326 _exists.return_value = True
327 ceph_utils.make_filesystem('/dev/sdd', 'xfs')327 ceph_utils.make_filesystem('/dev/sdd', 'xfs')
328 self.log.assert_called()328 self.assertTrue(self.log.called)
329 self.check_call.assert_called_with(['mkfs', '-t', 'xfs', '/dev/sdd'])329 self.check_call.assert_called_with(['mkfs', '-t', 'xfs', '/dev/sdd'])
330330
331 @patch('os.chown')331 @patch('os.chown')
332332
=== modified file 'tests/contrib/storage/test_linux_storage_utils.py'
--- tests/contrib/storage/test_linux_storage_utils.py 2014-11-25 13:38:01 +0000
+++ tests/contrib/storage/test_linux_storage_utils.py 2015-08-13 08:33:21 +0000
@@ -16,8 +16,9 @@
16 '''It calls sgdisk correctly to zap disk'''16 '''It calls sgdisk correctly to zap disk'''
17 check_output.return_value = b'200\n'17 check_output.return_value = b'200\n'
18 storage_utils.zap_disk('/dev/foo')18 storage_utils.zap_disk('/dev/foo')
19 call.assert_any_call(['sgdisk', '--zap-all', '--mbrtogpt',19 call.assert_any_call(['sgdisk', '--zap-all', '--', '/dev/foo'])
20 '--clear', '/dev/foo'])20 call.assert_any_call(['sgdisk', '--clear', '--mbrtogpt',
21 '--', '/dev/foo'])
21 check_output.assert_any_call(['blockdev', '--getsz', '/dev/foo'])22 check_output.assert_any_call(['blockdev', '--getsz', '/dev/foo'])
22 check_call.assert_any_call(['dd', 'if=/dev/zero', 'of=/dev/foo',23 check_call.assert_any_call(['dd', 'if=/dev/zero', 'of=/dev/foo',
23 'bs=1M', 'count=1'])24 'bs=1M', 'count=1'])
@@ -88,6 +89,14 @@
88 self.assertFalse(result)89 self.assertFalse(result)
8990
90 @patch(STORAGE_LINUX_UTILS + '.check_output')91 @patch(STORAGE_LINUX_UTILS + '.check_output')
92 def test_is_device_mounted_full_disks(self, check_output):
93 '''It detects mounted full disks as mounted.'''
94 check_output.return_value = (
95 b"/dev/sda on / type ext4 (rw,errors=remount-ro)\n")
96 result = storage_utils.is_device_mounted('/dev/sda')
97 self.assertTrue(result)
98
99 @patch(STORAGE_LINUX_UTILS + '.check_output')
91 def test_is_device_mounted_cciss(self, check_output):100 def test_is_device_mounted_cciss(self, check_output):
92 '''It detects mounted cciss partitions as mounted.'''101 '''It detects mounted cciss partitions as mounted.'''
93 check_output.return_value = (102 check_output.return_value = (
94103
=== modified file 'tests/contrib/unison/test_unison.py'
--- tests/contrib/unison/test_unison.py 2015-04-03 15:23:46 +0000
+++ tests/contrib/unison/test_unison.py 2015-08-13 08:33:21 +0000
@@ -74,7 +74,7 @@
74 self.assertIn(call(_call), self.check_call.call_args_list)74 self.assertIn(call(_call), self.check_call.call_args_list)
7575
76 @patch('os.path.isfile')76 @patch('os.path.isfile')
77 def test_create_private_key(self, isfile):77 def test_create_private_key_rsa(self, isfile):
78 create_cmd = [78 create_cmd = [
79 'ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',79 'ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
80 '-f', '/home/foo/.ssh/id_rsa']80 '-f', '/home/foo/.ssh/id_rsa']
@@ -100,6 +100,36 @@
100 _ensure_perms()100 _ensure_perms()
101101
102 @patch('os.path.isfile')102 @patch('os.path.isfile')
103 def test_create_private_key_ecdsa(self, isfile):
104 create_cmd = [
105 'ssh-keygen', '-q', '-N', '', '-t', 'ecdsa', '-b', '521',
106 '-f', '/home/foo/.ssh/id_ecdsa']
107
108 def _ensure_perms():
109 cmds = [
110 ['chown', 'foo', '/home/foo/.ssh/id_ecdsa'],
111 ['chmod', '0600', '/home/foo/.ssh/id_ecdsa'],
112 ]
113 self._ensure_calls_in(cmds)
114
115 isfile.return_value = False
116 unison.create_private_key(
117 user='foo',
118 priv_key_path='/home/foo/.ssh/id_ecdsa',
119 key_type='ecdsa')
120 self.assertIn(call(create_cmd), self.check_call.call_args_list)
121 _ensure_perms()
122 self.check_call.call_args_list = []
123
124 isfile.return_value = True
125 unison.create_private_key(
126 user='foo',
127 priv_key_path='/home/foo/.ssh/id_ecdsa',
128 key_type='ecdsa')
129 self.assertNotIn(call(create_cmd), self.check_call.call_args_list)
130 _ensure_perms()
131
132 @patch('os.path.isfile')
103 def test_create_public_key(self, isfile):133 def test_create_public_key(self, isfile):
104 create_cmd = ['ssh-keygen', '-y', '-f', '/home/foo/.ssh/id_rsa']134 create_cmd = ['ssh-keygen', '-y', '-f', '/home/foo/.ssh/id_rsa']
105 isfile.return_value = True135 isfile.return_value = True
@@ -273,6 +303,33 @@
273 write_hosts.assert_called_with('foo', ['host1', 'host2'])303 write_hosts.assert_called_with('foo', ['host1', 'host2'])
274 self.relation_set.assert_called_with(ssh_authorized_hosts='host1:host2')304 self.relation_set.assert_called_with(ssh_authorized_hosts='host1:host2')
275305
306 @patch.object(unison, 'write_known_hosts')
307 @patch.object(unison, 'write_authorized_keys')
308 @patch.object(unison, 'get_keypair')
309 @patch.object(unison, 'ensure_user')
310 def test_ssh_auth_peer_departed(self, ensure_user, get_keypair,
311 write_keys, write_hosts):
312 get_keypair.return_value = ('privkey', 'pubkey')
313
314 self.hook_name.return_value = 'cluster-relation-departed'
315
316 self.relation_get.side_effect = [
317 'key1',
318 'host1',
319 'key2',
320 'host2',
321 '', ''
322 ]
323 unison.ssh_authorized_peers(peer_interface='cluster',
324 user='foo', group='foo',
325 ensure_local_user=True)
326
327 ensure_user.assert_called_with('foo', 'foo')
328 get_keypair.assert_called_with('foo')
329 write_keys.assert_called_with('foo', ['key1', 'key2'])
330 write_hosts.assert_called_with('foo', ['host1', 'host2'])
331 self.relation_set.assert_called_with(ssh_authorized_hosts='host1:host2')
332
276 def test_collect_authed_hosts(self):333 def test_collect_authed_hosts(self):
277 # only one of the hosts in fake environment has auth'd334 # only one of the hosts in fake environment has auth'd
278 # the local peer335 # the local peer
279336
=== added directory 'tests/coordinator'
=== added file 'tests/coordinator/__init__.py'
=== added file 'tests/coordinator/test_coordinator.py'
--- tests/coordinator/test_coordinator.py 1970-01-01 00:00:00 +0000
+++ tests/coordinator/test_coordinator.py 2015-08-13 08:33:21 +0000
@@ -0,0 +1,535 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16from datetime import datetime, timedelta
17import json
18import tempfile
19import unittest
20from mock import call, MagicMock, patch, sentinel
21
22from charmhelpers import coordinator
23from charmhelpers.core import hookenv
24
25
26class TestCoordinator(unittest.TestCase):
27
28 def setUp(self):
29 del hookenv._atstart[:]
30 del hookenv._atexit[:]
31 hookenv.cache.clear()
32 coordinator.Singleton._instances.clear()
33
34 def install(patch):
35 patch.start()
36 self.addCleanup(patch.stop)
37
38 install(patch.object(hookenv, 'local_unit', return_value='foo/1'))
39 install(patch.object(hookenv, 'is_leader', return_value=False))
40 install(patch.object(hookenv, 'metadata',
41 return_value={'peers': {'cluster': None}}))
42 install(patch.object(hookenv, 'log'))
43
44 # Ensure _timestamp always increases.
45 install(patch.object(coordinator, '_utcnow',
46 side_effect=self._utcnow))
47
48 _last_utcnow = datetime(2015, 1, 1, 00, 00)
49
50 def _utcnow(self, ts=coordinator._timestamp):
51 self._last_utcnow += timedelta(minutes=1)
52 return self._last_utcnow
53
54 def test_is_singleton(self):
55 # BaseCoordinator and subclasses are singletons. Placing this
56 # burden on charm authors is impractical, particularly if
57 # libraries start wanting to use coordinator instances.
58 # With singletons, we don't need to worry about sharing state
59 # between instances or have them stomping on each other when they
60 # need to serialize their state.
61 self.assertTrue(coordinator.BaseCoordinator()
62 is coordinator.BaseCoordinator())
63 self.assertTrue(coordinator.Serial() is coordinator.Serial())
64 self.assertFalse(coordinator.BaseCoordinator() is coordinator.Serial())
65
66 @patch.object(hookenv, 'atstart')
67 def test_implicit_initialize_and_handle(self, atstart):
68 # When you construct a BaseCoordinator(), its initialize() and
69 # handle() method are invoked automatically every hook. This
70 # is done using hookenv.atstart
71 c = coordinator.BaseCoordinator()
72 atstart.assert_has_calls([call(c.initialize), call(c.handle)])
73
74 @patch.object(hookenv, 'has_juju_version', return_value=False)
75 def test_initialize_enforces_juju_version(self, has_juju_version):
76 c = coordinator.BaseCoordinator()
77 with self.assertRaises(AssertionError):
78 c.initialize()
79 has_juju_version.assert_called_once_with('1.23')
80
81 @patch.object(hookenv, 'atexit')
82 @patch.object(hookenv, 'has_juju_version', return_value=True)
83 @patch.object(hookenv, 'relation_ids')
84 def test_initialize(self, relation_ids, ver, atexit):
85 # First initialization are done before there is a peer relation.
86 relation_ids.return_value = []
87 c = coordinator.BaseCoordinator()
88
89 with patch.object(c, '_load_state') as _load_state, \
90 patch.object(c, '_emit_state') as _emit_state: # IGNORE: E127
91 c.initialize()
92 _load_state.assert_called_once_with()
93 _emit_state.assert_called_once_with()
94
95 self.assertEqual(c.relname, 'cluster')
96 self.assertIsNone(c.relid)
97 relation_ids.assert_called_once_with('cluster')
98
99 # Methods installed to save state and release locks if the
100 # hook is successful.
101 atexit.assert_has_calls([call(c._save_state),
102 call(c._release_granted)])
103
104 # If we have a peer relation, the id is stored.
105 relation_ids.return_value = ['cluster:1']
106 c = coordinator.BaseCoordinator()
107 with patch.object(c, '_load_state'), patch.object(c, '_emit_state'):
108 c.initialize()
109 self.assertEqual(c.relid, 'cluster:1')
110
111 # If we are already initialized, nothing happens.
112 c.grants = {}
113 c.requests = {}
114 c.initialize()
115
116 def test_acquire(self):
117 c = coordinator.BaseCoordinator()
118 lock = 'mylock'
119 c.grants = {}
120 c.requests = {hookenv.local_unit(): {}}
121
122 # We are not the leader, so first acquire will return False.
123 self.assertFalse(c.acquire(lock))
124
125 # But the request is in the queue.
126 self.assertTrue(c.requested(lock))
127 ts = c.request_timestamp(lock)
128
129 # A further attempts at acquiring the lock do nothing,
130 # and the timestamp of the request remains unchanged.
131 self.assertFalse(c.acquire(lock))
132 self.assertEqual(ts, c.request_timestamp(lock))
133
134 # Once the leader has granted the lock, acquire returns True.
135 with patch.object(c, 'granted') as granted:
136 granted.return_value = True
137 self.assertTrue(c.acquire(lock))
138 granted.assert_called_once_with(lock)
139
140 def test_acquire_leader(self):
141 # When acquire() is called by the leader, it needs
142 # to make a grant decision immediately. It can't defer
143 # making the decision until a future hook, as no future
144 # hooks will be triggered.
145 hookenv.is_leader.return_value = True
146 c = coordinator.Serial() # Not Base. Test hooks into default_grant.
147 lock = 'mylock'
148 unit = hookenv.local_unit()
149 c.grants = {}
150 c.requests = {unit: {}}
151 with patch.object(c, 'default_grant') as default_grant:
152 default_grant.side_effect = iter([False, True])
153
154 self.assertFalse(c.acquire(lock))
155 ts = c.request_timestamp(lock)
156
157 self.assertTrue(c.acquire(lock))
158 self.assertEqual(ts, c.request_timestamp(lock))
159
160 # If it it granted, the leader doesn't make a decision again.
161 self.assertTrue(c.acquire(lock))
162 self.assertEqual(ts, c.request_timestamp(lock))
163
164 self.assertEqual(default_grant.call_count, 2)
165
166 def test_granted(self):
167 c = coordinator.BaseCoordinator()
168 unit = hookenv.local_unit()
169 lock = 'mylock'
170 ts = coordinator._timestamp()
171 c.grants = {}
172
173 # Unit makes a request, but it isn't granted
174 c.requests = {unit: {lock: ts}}
175 self.assertFalse(c.granted(lock))
176
177 # Once the leader has granted the request, all good.
178 # It does this by mirroring the request timestamp.
179 c.grants = {unit: {lock: ts}}
180 self.assertTrue(c.granted(lock))
181
182 # The unit releases the lock by removing the request.
183 c.requests = {unit: {}}
184 self.assertFalse(c.granted(lock))
185
186 # If the unit makes a new request before the leader
187 # has had a chance to do its housekeeping, the timestamps
188 # do not match and the lock not considered granted.
189 ts = coordinator._timestamp()
190 c.requests = {unit: {lock: ts}}
191 self.assertFalse(c.granted(lock))
192
193 # Until the leader gets around to its duties.
194 c.grants = {unit: {lock: ts}}
195 self.assertTrue(c.granted(lock))
196
197 def test_requested(self):
198 c = coordinator.BaseCoordinator()
199 lock = 'mylock'
200 c.requests = {hookenv.local_unit(): {}}
201 c.grants = {}
202
203 self.assertFalse(c.requested(lock))
204 c.acquire(lock)
205 self.assertTrue(c.requested(lock))
206
207 def test_request_timestamp(self):
208 c = coordinator.BaseCoordinator()
209 lock = 'mylock'
210 unit = hookenv.local_unit()
211
212 c.requests = {unit: {}}
213 c.grants = {}
214 self.assertIsNone(c.request_timestamp(lock))
215
216 now = datetime.utcnow()
217 fmt = coordinator._timestamp_format
218 c.requests = {hookenv.local_unit(): {lock: now.strftime(fmt)}}
219
220 self.assertEqual(c.request_timestamp(lock), now)
221
222 def test_handle_not_leader(self):
223 c = coordinator.BaseCoordinator()
224 # If we are not the leader, handle does nothing. We know this,
225 # because without mocks or initialization it would otherwise crash.
226 c.handle()
227
228 def test_handle(self):
229 hookenv.is_leader.return_value = True
230 lock = 'mylock'
231 c = coordinator.BaseCoordinator()
232 c.relid = 'cluster:1'
233
234 ts = coordinator._timestamp
235 ts1, ts2, ts3 = ts(), ts(), ts()
236
237 # Grant one of these requests.
238 requests = {'foo/1': {lock: ts1},
239 'foo/2': {lock: ts2},
240 'foo/3': {lock: ts3}}
241 c.requests = requests.copy()
242 # Because the existing grant should be released.
243 c.grants = {'foo/2': {lock: ts()}} # No request, release.
244
245 with patch.object(c, 'grant') as grant:
246 c.handle()
247
248 # The requests are unchanged. This is normally state on the
249 # peer relation, and only the units themselves can change it.
250 self.assertDictEqual(requests, c.requests)
251
252 # The grant without a corresponding requests was released.
253 self.assertDictEqual({'foo/2': {}}, c.grants)
254
255 # A potential grant was made for each of the outstanding requests.
256 grant.assert_has_calls([call(lock, 'foo/1'),
257 call(lock, 'foo/2'),
258 call(lock, 'foo/3')], any_order=True)
259
260 def test_grant_not_leader(self):
261 c = coordinator.BaseCoordinator()
262 c.grant(sentinel.whatever, sentinel.whatever) # Nothing happens.
263
264 def test_grant(self):
265 hookenv.is_leader.return_value = True
266 c = coordinator.BaseCoordinator()
267 c.default_grant = MagicMock()
268 c.grant_other = MagicMock()
269
270 ts = coordinator._timestamp
271 ts1, ts2 = ts(), ts()
272
273 c.requests = {'foo/1': {'mylock': ts1, 'other': ts()},
274 'foo/2': {'mylock': ts2},
275 'foo/3': {'mylock': ts()}}
276 grants = {'foo/1': {'mylock': ts1}}
277 c.grants = grants.copy()
278
279 # foo/1 already has a granted mylock, so returns True.
280 self.assertTrue(c.grant('mylock', 'foo/1'))
281
282 # foo/2 does not have a granted mylock. default_grant will
283 # be called to make a decision (no)
284 c.default_grant.return_value = False
285 self.assertFalse(c.grant('mylock', 'foo/2'))
286 self.assertDictEqual(grants, c.grants)
287 c.default_grant.assert_called_once_with('mylock', 'foo/2',
288 set(['foo/1']),
289 ['foo/2', 'foo/3'])
290 c.default_grant.reset_mock()
291
292 # Lets say yes.
293 c.default_grant.return_value = True
294 self.assertTrue(c.grant('mylock', 'foo/2'))
295 grants = {'foo/1': {'mylock': ts1}, 'foo/2': {'mylock': ts2}}
296 self.assertDictEqual(grants, c.grants)
297 c.default_grant.assert_called_once_with('mylock', 'foo/2',
298 set(['foo/1']),
299 ['foo/2', 'foo/3'])
300
301 # The other lock has custom logic, in the form of the overridden
302 # grant_other method.
303 c.grant_other.return_value = False
304 self.assertFalse(c.grant('other', 'foo/1'))
305 c.grant_other.assert_called_once_with('other', 'foo/1',
306 set(), ['foo/1'])
307
308 # If there is no request, grant returns False
309 c.grant_other.return_value = True
310 self.assertFalse(c.grant('other', 'foo/2'))
311
312 def test_released(self):
313 c = coordinator.BaseCoordinator()
314 with patch.object(c, 'msg') as msg:
315 c.released('foo/2', 'mylock', coordinator._utcnow())
316 expected = 'Leader released mylock from foo/2, held 0:01:00'
317 msg.assert_called_once_with(expected)
318
319 def test_require(self):
320 c = coordinator.BaseCoordinator()
321 c.acquire = MagicMock()
322 c.granted = MagicMock()
323 guard = MagicMock()
324
325 wrapped = MagicMock()
326
327 @c.require('mylock', guard)
328 def func(*args, **kw):
329 wrapped(*args, **kw)
330
331 # If the lock is granted, the wrapped function is called.
332 c.granted.return_value = True
333 func(arg=True)
334 wrapped.assert_called_once_with(arg=True)
335 wrapped.reset_mock()
336
337 # If the lock is not granted, and the guard returns False,
338 # the lock is not acquired.
339 c.acquire.return_value = False
340 c.granted.return_value = False
341 guard.return_value = False
342 func()
343 self.assertFalse(wrapped.called)
344 self.assertFalse(c.acquire.called)
345
346 # If the lock is not granted, and the guard returns True,
347 # the lock is acquired. But the function still isn't called if
348 # it cannot be acquired immediately.
349 guard.return_value = True
350 func()
351 self.assertFalse(wrapped.called)
352 c.acquire.assert_called_once_with('mylock')
353
354 # Finally, if the lock is not granted, and the guard returns True,
355 # and the lock acquired immediately, the function is called.
356 c.acquire.return_value = True
357 func(sentinel.arg)
358 wrapped.assert_called_once_with(sentinel.arg)
359
360 def test_msg(self):
361 c = coordinator.BaseCoordinator()
362 # Just a wrapper around hookenv.log
363 c.msg('hi')
364 hookenv.log.assert_called_once_with('coordinator.BaseCoordinator hi',
365 level=hookenv.INFO)
366
367 def test_name(self):
368 # We use the class name in a few places to avoid conflicts.
369 # We assume we won't be using multiple BaseCoordinator subclasses
370 # with the same name at the same time.
371 c = coordinator.BaseCoordinator()
372 self.assertEqual(c._name(), 'BaseCoordinator')
373 c = coordinator.Serial()
374 self.assertEqual(c._name(), 'Serial')
375
376 @patch.object(hookenv, 'leader_get')
377 def test_load_state(self, leader_get):
378 c = coordinator.BaseCoordinator()
379 unit = hookenv.local_unit()
380
381 # c.granted is just the leader_get decoded.
382 leader_get.return_value = '{"json": true}'
383 c._load_state()
384 self.assertDictEqual(c.grants, {'json': True})
385
386 # With no relid, there is no peer relation so request state
387 # is pulled from a local stash.
388 with patch.object(c, '_load_local_state') as loc_state:
389 loc_state.return_value = {'local': True}
390 c._load_state()
391 self.assertDictEqual(c.requests, {unit: {'local': True}})
392
393 # With a relid, request details are pulled from the peer relation.
394 # If there is no data in the peer relation from the local unit,
395 # we still pull it from the local stash as it means this is the
396 # first time we have joined.
397 c.relid = 'cluster:1'
398 with patch.object(c, '_load_local_state') as loc_state, \
399 patch.object(c, '_load_peer_state') as peer_state:
400 loc_state.return_value = {'local': True}
401 peer_state.return_value = {'foo/2': {'mylock': 'whatever'}}
402 c._load_state()
403 self.assertDictEqual(c.requests, {unit: {'local': True},
404 'foo/2': {'mylock': 'whatever'}})
405
406 # If there are local details in the peer relation, the local
407 # stash is ignored.
408 with patch.object(c, '_load_local_state') as loc_state, \
409 patch.object(c, '_load_peer_state') as peer_state:
410 loc_state.return_value = {'local': True}
411 peer_state.return_value = {unit: {},
412 'foo/2': {'mylock': 'whatever'}}
413 c._load_state()
414 self.assertDictEqual(c.requests, {unit: {},
415 'foo/2': {'mylock': 'whatever'}})
416
417 def test_emit_state(self):
418 c = coordinator.BaseCoordinator()
419 unit = hookenv.local_unit()
420 c.requests = {unit: {'lock_a': sentinel.ts,
421 'lock_b': sentinel.ts,
422 'lock_c': sentinel.ts}}
423 c.grants = {unit: {'lock_a': sentinel.ts,
424 'lock_b': sentinel.ts2}}
425 with patch.object(c, 'msg') as msg:
426 c._emit_state()
427 msg.assert_has_calls([call('Granted lock_a'),
428 call('Waiting on lock_b'),
429 call('Waiting on lock_c')],
430 any_order=True)
431
432 @patch.object(hookenv, 'relation_set')
433 @patch.object(hookenv, 'leader_set')
434 def test_save_state(self, leader_set, relation_set):
435 c = coordinator.BaseCoordinator()
436 unit = hookenv.local_unit()
437 c.grants = {'directdump': True}
438 c.requests = {unit: 'data1', 'foo/2': 'data2'}
439
440 # grants is dumped to leadership settings, if the unit is leader.
441 with patch.object(c, '_save_local_state') as save_loc:
442 c._save_state()
443 self.assertFalse(leader_set.called)
444 hookenv.is_leader.return_value = True
445 c._save_state()
446 leader_set.assert_called_once_with({c.key: '{"directdump": true}'})
447
448 # If there is no relation id, the local units requests is dumped
449 # to a local stash.
450 with patch.object(c, '_save_local_state') as save_loc:
451 c._save_state()
452 save_loc.assert_called_once_with('data1')
453
454 # If there is a relation id, the local units requests is dumped
455 # to the peer relation.
456 with patch.object(c, '_save_local_state') as save_loc:
457 c.relid = 'cluster:1'
458 c._save_state()
459 self.assertFalse(save_loc.called)
460 relation_set.assert_called_once_with(
461 c.relid, relation_settings={c.key: '"data1"'}) # JSON encoded
462
463 @patch.object(hookenv, 'relation_get')
464 @patch.object(hookenv, 'related_units')
465 def test_load_peer_state(self, related_units, relation_get):
466 # Standard relation-get loops, decoding results from JSON.
467 c = coordinator.BaseCoordinator()
468 c.key = sentinel.key
469 c.relid = sentinel.relid
470 related_units.return_value = ['foo/2', 'foo/3']
471 d = {'foo/1': {'foo/1': True},
472 'foo/2': {'foo/2': True},
473 'foo/3': {'foo/3': True}}
474
475 def _get(key, unit, relid):
476 assert key == sentinel.key
477 assert relid == sentinel.relid
478 return json.dumps(d[unit])
479 relation_get.side_effect = _get
480
481 self.assertDictEqual(c._load_peer_state(), d)
482
483 def test_local_state_filename(self):
484 c = coordinator.BaseCoordinator()
485 self.assertEqual(c._local_state_filename(),
486 '.charmhelpers.coordinator.BaseCoordinator')
487
488 def test_load_local_state(self):
489 c = coordinator.BaseCoordinator()
490 with tempfile.NamedTemporaryFile(mode='w') as f:
491 with patch.object(c, '_local_state_filename') as fn:
492 fn.return_value = f.name
493 d = 'some data'
494 json.dump(d, f)
495 f.flush()
496 d2 = c._load_local_state()
497 self.assertEqual(d, d2)
498
499 def test_save_local_state(self):
500 c = coordinator.BaseCoordinator()
501 with tempfile.NamedTemporaryFile(mode='r') as f:
502 with patch.object(c, '_local_state_filename') as fn:
503 fn.return_value = f.name
504 c._save_local_state('some data')
505 self.assertEqual(json.load(f), 'some data')
506
507 def test_release_granted(self):
508 c = coordinator.BaseCoordinator()
509 unit = hookenv.local_unit()
510 c.requests = {unit: {'lock1': sentinel.ts, 'lock2': sentinel.ts},
511 'foo/2': {'lock1': sentinel.ts}}
512 c.grants = {unit: {'lock1': sentinel.ts},
513 'foo/2': {'lock1': sentinel.ts}}
514 # The granted lock for the local unit is released.
515 c._release_granted()
516 self.assertDictEqual(c.requests, {unit: {'lock2': sentinel.ts},
517 'foo/2': {'lock1': sentinel.ts}})
518
519 def test_implicit_peer_relation_name(self):
520 self.assertEqual(coordinator._implicit_peer_relation_name(),
521 'cluster')
522
523 def test_default_grant(self):
524 c = coordinator.Serial()
525 # Lock not granted. First in the queue.
526 self.assertTrue(c.default_grant(sentinel.lock, sentinel.u1,
527 set(), [sentinel.u1, sentinel.u2]))
528
529 # Lock not granted. Later in the queue.
530 self.assertFalse(c.default_grant(sentinel.lock, sentinel.u1,
531 set(), [sentinel.u2, sentinel.u1]))
532
533 # Lock already granted
534 self.assertFalse(c.default_grant(sentinel.lock, sentinel.u1,
535 set([sentinel.u2]), [sentinel.u1]))
0536
=== added file 'tests/core/test_files.py'
--- tests/core/test_files.py 1970-01-01 00:00:00 +0000
+++ tests/core/test_files.py 2015-08-13 08:33:21 +0000
@@ -0,0 +1,32 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4from charmhelpers.core import files
5
6import mock
7import unittest
8import tempfile
9import os
10
11
12class FileTests(unittest.TestCase):
13
14 @mock.patch("subprocess.check_call")
15 def test_sed(self, check_call):
16 files.sed("/tmp/test-sed-file", "replace", "this")
17 check_call.assert_called_once_with(
18 ['sed', '-i', '-r', '-e', 's/replace/this/g',
19 '/tmp/test-sed-file']
20 )
21
22 def test_sed_file(self):
23 tmp = tempfile.NamedTemporaryFile(mode='w', delete=False)
24 tmp.write("IPV6=yes")
25 tmp.close()
26
27 files.sed(tmp.name, "IPV6=.*", "IPV6=no")
28
29 with open(tmp.name) as tmp:
30 self.assertEquals(tmp.read(), "IPV6=no")
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: