Merge lp:~gnuoy/charms/trusty/hacluster/1604 into lp:charms/trusty/hacluster

Proposed by Liam Young on 2016-04-21
Status: Merged
Merged at revision: 56
Proposed branch: lp:~gnuoy/charms/trusty/hacluster/1604
Merge into: lp:charms/trusty/hacluster
Diff against target: 4568 lines (+2941/-434)
30 files modified
actions.yaml (+5/-0)
actions/actions.py (+41/-0)
hooks/charmhelpers/cli/__init__.py (+4/-8)
hooks/charmhelpers/cli/commands.py (+4/-4)
hooks/charmhelpers/cli/hookenv.py (+23/-0)
hooks/charmhelpers/contrib/charmsupport/nrpe.py (+52/-14)
hooks/charmhelpers/contrib/network/ip.py (+55/-23)
hooks/charmhelpers/contrib/openstack/utils.py (+939/-70)
hooks/charmhelpers/contrib/python/packages.py (+35/-11)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+812/-61)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+10/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-2)
hooks/charmhelpers/core/hookenv.py (+127/-33)
hooks/charmhelpers/core/host.py (+298/-75)
hooks/charmhelpers/core/hugepage.py (+71/-0)
hooks/charmhelpers/core/kernel.py (+68/-0)
hooks/charmhelpers/core/services/helpers.py (+30/-5)
hooks/charmhelpers/core/strutils.py (+30/-0)
hooks/charmhelpers/core/templating.py (+21/-8)
hooks/charmhelpers/fetch/__init__.py (+18/-2)
hooks/charmhelpers/fetch/archiveurl.py (+1/-1)
hooks/charmhelpers/fetch/bzrurl.py (+22/-32)
hooks/charmhelpers/fetch/giturl.py (+20/-23)
hooks/hooks.py (+2/-14)
hooks/utils.py (+137/-5)
tests/00-setup (+0/-17)
tests/basic_deployment.py (+46/-0)
tests/charmhelpers/contrib/amulet/utils.py (+22/-11)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+5/-2)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+40/-13)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/hacluster/1604
Reviewer Review Type Date Requested Status
charmers 2016-04-21 Pending
Review via email: mp+292493@code.launchpad.net
To post a comment you must log in.

charm_unit_test #1858 hacluster for gnuoy mp292493
    UNIT OK: passed

Build: http://10.245.162.36:8080/job/charm_unit_test/1858/

charm_lint_check #2353 hacluster for gnuoy mp292493
    LINT OK: passed

Build: http://10.245.162.36:8080/job/charm_lint_check/2353/

57. By Liam Young on 2016-04-21

Point charmhelper sync and amulet at stable

58. By Liam Young on 2016-04-21

Point charm-helpers-tests.yaml at stable ch as well

charm_unit_test #1859 hacluster for gnuoy mp292493
    UNIT OK: passed

Build: http://10.245.162.36:8080/job/charm_unit_test/1859/

charm_lint_check #2354 hacluster for gnuoy mp292493
    LINT OK: passed

Build: http://10.245.162.36:8080/job/charm_lint_check/2354/

charm_amulet_test #717 hacluster for gnuoy mp292493
    AMULET FAIL: amulet-test failed

AMULET Results (max last 2 lines):
make: *** [functional_test] Error 1
ERROR:root:Make target returned non-zero.

Full amulet test output: http://paste.ubuntu.com/15963347/
Build: http://10.245.162.36:8080/job/charm_amulet_test/717/

charm_amulet_test #718 hacluster for gnuoy mp292493
    AMULET OK: passed

Build: http://10.245.162.36:8080/job/charm_amulet_test/718/

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added directory 'actions'
2=== added file 'actions.yaml'
3--- actions.yaml 1970-01-01 00:00:00 +0000
4+++ actions.yaml 2016-04-21 10:41:47 +0000
5@@ -0,0 +1,5 @@
6+pause:
7+ description: Put hacluster unit in crm standby mode which migrates resources
8+ from this unit to another unit in the hacluster
9+resume:
10+ descrpition: Take hacluster unit out of standby mode
11
12=== added file 'actions/actions.py'
13--- actions/actions.py 1970-01-01 00:00:00 +0000
14+++ actions/actions.py 2016-04-21 10:41:47 +0000
15@@ -0,0 +1,41 @@
16+#!/usr/bin/python
17+
18+import sys
19+import os
20+sys.path.append('hooks/')
21+import subprocess
22+from charmhelpers.core.hookenv import action_fail
23+from utils import (
24+ pause_unit,
25+ resume_unit,
26+)
27+
28+def pause(args):
29+ """Pause the hacluster services.
30+ @raises Exception should the service fail to stop.
31+ """
32+ pause_unit()
33+
34+def resume(args):
35+ """Resume the hacluster services.
36+ @raises Exception should the service fail to start."""
37+ resume_unit()
38+
39+
40+ACTIONS = {"pause": pause, "resume": resume}
41+
42+def main(args):
43+ action_name = os.path.basename(args[0])
44+ try:
45+ action = ACTIONS[action_name]
46+ except KeyError:
47+ return "Action %s undefined" % action_name
48+ else:
49+ try:
50+ action(args)
51+ except Exception as e:
52+ action_fail(str(e))
53+
54+
55+if __name__ == "__main__":
56+ sys.exit(main(sys.argv))
57
58=== added symlink 'actions/pause'
59=== target is u'actions.py'
60=== added symlink 'actions/resume'
61=== target is u'actions.py'
62=== modified file 'hooks/charmhelpers/cli/__init__.py'
63--- hooks/charmhelpers/cli/__init__.py 2015-10-22 13:26:06 +0000
64+++ hooks/charmhelpers/cli/__init__.py 2016-04-21 10:41:47 +0000
65@@ -20,7 +20,7 @@
66
67 from six.moves import zip
68
69-from charmhelpers.core import unitdata
70+import charmhelpers.core.unitdata
71
72
73 class OutputFormatter(object):
74@@ -152,23 +152,19 @@
75 arguments = self.argument_parser.parse_args()
76 argspec = inspect.getargspec(arguments.func)
77 vargs = []
78- kwargs = {}
79 for arg in argspec.args:
80 vargs.append(getattr(arguments, arg))
81 if argspec.varargs:
82 vargs.extend(getattr(arguments, argspec.varargs))
83- if argspec.keywords:
84- for kwarg in argspec.keywords.items():
85- kwargs[kwarg] = getattr(arguments, kwarg)
86- output = arguments.func(*vargs, **kwargs)
87+ output = arguments.func(*vargs)
88 if getattr(arguments.func, '_cli_test_command', False):
89 self.exit_code = 0 if output else 1
90 output = ''
91 if getattr(arguments.func, '_cli_no_output', False):
92 output = ''
93 self.formatter.format_output(output, arguments.format)
94- if unitdata._KV:
95- unitdata._KV.flush()
96+ if charmhelpers.core.unitdata._KV:
97+ charmhelpers.core.unitdata._KV.flush()
98
99
100 cmdline = CommandLine()
101
102=== modified file 'hooks/charmhelpers/cli/commands.py'
103--- hooks/charmhelpers/cli/commands.py 2015-10-22 13:26:06 +0000
104+++ hooks/charmhelpers/cli/commands.py 2016-04-21 10:41:47 +0000
105@@ -26,7 +26,7 @@
106 """
107 Import the sub-modules which have decorated subcommands to register with chlp.
108 """
109-import host # noqa
110-import benchmark # noqa
111-import unitdata # noqa
112-from charmhelpers.core import hookenv # noqa
113+from . import host # noqa
114+from . import benchmark # noqa
115+from . import unitdata # noqa
116+from . import hookenv # noqa
117
118=== added file 'hooks/charmhelpers/cli/hookenv.py'
119--- hooks/charmhelpers/cli/hookenv.py 1970-01-01 00:00:00 +0000
120+++ hooks/charmhelpers/cli/hookenv.py 2016-04-21 10:41:47 +0000
121@@ -0,0 +1,23 @@
122+# Copyright 2014-2015 Canonical Limited.
123+#
124+# This file is part of charm-helpers.
125+#
126+# charm-helpers is free software: you can redistribute it and/or modify
127+# it under the terms of the GNU Lesser General Public License version 3 as
128+# published by the Free Software Foundation.
129+#
130+# charm-helpers is distributed in the hope that it will be useful,
131+# but WITHOUT ANY WARRANTY; without even the implied warranty of
132+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
133+# GNU Lesser General Public License for more details.
134+#
135+# You should have received a copy of the GNU Lesser General Public License
136+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
137+
138+from . import cmdline
139+from charmhelpers.core import hookenv
140+
141+
142+cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
143+cmdline.subcommand('service-name')(hookenv.service_name)
144+cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
145
146=== modified file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py'
147--- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2015-04-20 00:39:03 +0000
148+++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 2016-04-21 10:41:47 +0000
149@@ -148,6 +148,13 @@
150 self.description = description
151 self.check_cmd = self._locate_cmd(check_cmd)
152
153+ def _get_check_filename(self):
154+ return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
155+
156+ def _get_service_filename(self, hostname):
157+ return os.path.join(NRPE.nagios_exportdir,
158+ 'service__{}_{}.cfg'.format(hostname, self.command))
159+
160 def _locate_cmd(self, check_cmd):
161 search_path = (
162 '/usr/lib/nagios/plugins',
163@@ -163,9 +170,21 @@
164 log('Check command not found: {}'.format(parts[0]))
165 return ''
166
167+ def _remove_service_files(self):
168+ if not os.path.exists(NRPE.nagios_exportdir):
169+ return
170+ for f in os.listdir(NRPE.nagios_exportdir):
171+ if f.endswith('_{}.cfg'.format(self.command)):
172+ os.remove(os.path.join(NRPE.nagios_exportdir, f))
173+
174+ def remove(self, hostname):
175+ nrpe_check_file = self._get_check_filename()
176+ if os.path.exists(nrpe_check_file):
177+ os.remove(nrpe_check_file)
178+ self._remove_service_files()
179+
180 def write(self, nagios_context, hostname, nagios_servicegroups):
181- nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
182- self.command)
183+ nrpe_check_file = self._get_check_filename()
184 with open(nrpe_check_file, 'w') as nrpe_check_config:
185 nrpe_check_config.write("# check {}\n".format(self.shortname))
186 nrpe_check_config.write("command[{}]={}\n".format(
187@@ -180,9 +199,7 @@
188
189 def write_service_config(self, nagios_context, hostname,
190 nagios_servicegroups):
191- for f in os.listdir(NRPE.nagios_exportdir):
192- if re.search('.*{}.cfg'.format(self.command), f):
193- os.remove(os.path.join(NRPE.nagios_exportdir, f))
194+ self._remove_service_files()
195
196 templ_vars = {
197 'nagios_hostname': hostname,
198@@ -192,8 +209,7 @@
199 'command': self.command,
200 }
201 nrpe_service_text = Check.service_template.format(**templ_vars)
202- nrpe_service_file = '{}/service__{}_{}.cfg'.format(
203- NRPE.nagios_exportdir, hostname, self.command)
204+ nrpe_service_file = self._get_service_filename(hostname)
205 with open(nrpe_service_file, 'w') as nrpe_service_config:
206 nrpe_service_config.write(str(nrpe_service_text))
207
208@@ -218,12 +234,32 @@
209 if hostname:
210 self.hostname = hostname
211 else:
212- self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
213+ nagios_hostname = get_nagios_hostname()
214+ if nagios_hostname:
215+ self.hostname = nagios_hostname
216+ else:
217+ self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
218 self.checks = []
219
220 def add_check(self, *args, **kwargs):
221 self.checks.append(Check(*args, **kwargs))
222
223+ def remove_check(self, *args, **kwargs):
224+ if kwargs.get('shortname') is None:
225+ raise ValueError('shortname of check must be specified')
226+
227+ # Use sensible defaults if they're not specified - these are not
228+ # actually used during removal, but they're required for constructing
229+ # the Check object; check_disk is chosen because it's part of the
230+ # nagios-plugins-basic package.
231+ if kwargs.get('check_cmd') is None:
232+ kwargs['check_cmd'] = 'check_disk'
233+ if kwargs.get('description') is None:
234+ kwargs['description'] = ''
235+
236+ check = Check(*args, **kwargs)
237+ check.remove(self.hostname)
238+
239 def write(self):
240 try:
241 nagios_uid = pwd.getpwnam('nagios').pw_uid
242@@ -260,7 +296,7 @@
243 :param str relation_name: Name of relation nrpe sub joined to
244 """
245 for rel in relations_of_type(relation_name):
246- if 'nagios_hostname' in rel:
247+ if 'nagios_host_context' in rel:
248 return rel['nagios_host_context']
249
250
251@@ -301,11 +337,13 @@
252 upstart_init = '/etc/init/%s.conf' % svc
253 sysv_init = '/etc/init.d/%s' % svc
254 if os.path.exists(upstart_init):
255- nrpe.add_check(
256- shortname=svc,
257- description='process check {%s}' % unit_name,
258- check_cmd='check_upstart_job %s' % svc
259- )
260+ # Don't add a check for these services from neutron-gateway
261+ if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
262+ nrpe.add_check(
263+ shortname=svc,
264+ description='process check {%s}' % unit_name,
265+ check_cmd='check_upstart_job %s' % svc
266+ )
267 elif os.path.exists(sysv_init):
268 cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
269 cron_file = ('*/5 * * * * root '
270
271=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
272--- hooks/charmhelpers/contrib/network/ip.py 2015-10-22 13:26:06 +0000
273+++ hooks/charmhelpers/contrib/network/ip.py 2016-04-21 10:41:47 +0000
274@@ -23,7 +23,7 @@
275 from functools import partial
276
277 from charmhelpers.core.hookenv import unit_get
278-from charmhelpers.fetch import apt_install
279+from charmhelpers.fetch import apt_install, apt_update
280 from charmhelpers.core.hookenv import (
281 log,
282 WARNING,
283@@ -32,13 +32,15 @@
284 try:
285 import netifaces
286 except ImportError:
287- apt_install('python-netifaces')
288+ apt_update(fatal=True)
289+ apt_install('python-netifaces', fatal=True)
290 import netifaces
291
292 try:
293 import netaddr
294 except ImportError:
295- apt_install('python-netaddr')
296+ apt_update(fatal=True)
297+ apt_install('python-netaddr', fatal=True)
298 import netaddr
299
300
301@@ -51,7 +53,7 @@
302
303
304 def no_ip_found_error_out(network):
305- errmsg = ("No IP address found in network: %s" % network)
306+ errmsg = ("No IP address found in network(s): %s" % network)
307 raise ValueError(errmsg)
308
309
310@@ -59,7 +61,7 @@
311 """Get an IPv4 or IPv6 address within the network from the host.
312
313 :param network (str): CIDR presentation format. For example,
314- '192.168.1.0/24'.
315+ '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
316 :param fallback (str): If no address is found, return fallback.
317 :param fatal (boolean): If no address is found, fallback is not
318 set and fatal is True then exit(1).
319@@ -73,24 +75,26 @@
320 else:
321 return None
322
323- _validate_cidr(network)
324- network = netaddr.IPNetwork(network)
325- for iface in netifaces.interfaces():
326- addresses = netifaces.ifaddresses(iface)
327- if network.version == 4 and netifaces.AF_INET in addresses:
328- addr = addresses[netifaces.AF_INET][0]['addr']
329- netmask = addresses[netifaces.AF_INET][0]['netmask']
330- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
331- if cidr in network:
332- return str(cidr.ip)
333+ networks = network.split() or [network]
334+ for network in networks:
335+ _validate_cidr(network)
336+ network = netaddr.IPNetwork(network)
337+ for iface in netifaces.interfaces():
338+ addresses = netifaces.ifaddresses(iface)
339+ if network.version == 4 and netifaces.AF_INET in addresses:
340+ addr = addresses[netifaces.AF_INET][0]['addr']
341+ netmask = addresses[netifaces.AF_INET][0]['netmask']
342+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
343+ if cidr in network:
344+ return str(cidr.ip)
345
346- if network.version == 6 and netifaces.AF_INET6 in addresses:
347- for addr in addresses[netifaces.AF_INET6]:
348- if not addr['addr'].startswith('fe80'):
349- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
350- addr['netmask']))
351- if cidr in network:
352- return str(cidr.ip)
353+ if network.version == 6 and netifaces.AF_INET6 in addresses:
354+ for addr in addresses[netifaces.AF_INET6]:
355+ if not addr['addr'].startswith('fe80'):
356+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
357+ addr['netmask']))
358+ if cidr in network:
359+ return str(cidr.ip)
360
361 if fallback is not None:
362 return fallback
363@@ -187,6 +191,15 @@
364 get_netmask_for_address = partial(_get_for_address, key='netmask')
365
366
367+def resolve_network_cidr(ip_address):
368+ '''
369+ Resolves the full address cidr of an ip_address based on
370+ configured network interfaces
371+ '''
372+ netmask = get_netmask_for_address(ip_address)
373+ return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
374+
375+
376 def format_ipv6_addr(address):
377 """If address is IPv6, wrap it in '[]' otherwise return None.
378
379@@ -435,8 +448,12 @@
380
381 rev = dns.reversename.from_address(address)
382 result = ns_query(rev)
383+
384 if not result:
385- return None
386+ try:
387+ result = socket.gethostbyaddr(address)[0]
388+ except:
389+ return None
390 else:
391 result = address
392
393@@ -448,3 +465,18 @@
394 return result
395 else:
396 return result.split('.')[0]
397+
398+
399+def port_has_listener(address, port):
400+ """
401+ Returns True if the address:port is open and being listened to,
402+ else False.
403+
404+ @param address: an IP address or hostname
405+ @param port: integer port
406+
407+ Note calls 'zc' via a subprocess shell
408+ """
409+ cmd = ['nc', '-z', address, str(port)]
410+ result = subprocess.call(cmd)
411+ return not(bool(result))
412
413=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
414--- hooks/charmhelpers/contrib/openstack/utils.py 2015-10-22 13:26:06 +0000
415+++ hooks/charmhelpers/contrib/openstack/utils.py 2016-04-21 10:41:47 +0000
416@@ -1,5 +1,3 @@
417-#!/usr/bin/python
418-
419 # Copyright 2014-2015 Canonical Limited.
420 #
421 # This file is part of charm-helpers.
422@@ -24,8 +22,14 @@
423 import json
424 import os
425 import sys
426+import re
427+import itertools
428+import functools
429
430 import six
431+import tempfile
432+import traceback
433+import uuid
434 import yaml
435
436 from charmhelpers.contrib.network import ip
437@@ -35,12 +39,18 @@
438 )
439
440 from charmhelpers.core.hookenv import (
441+ action_fail,
442+ action_set,
443 config,
444 log as juju_log,
445 charm_dir,
446+ DEBUG,
447 INFO,
448+ related_units,
449 relation_ids,
450- relation_set
451+ relation_set,
452+ status_set,
453+ hook_name
454 )
455
456 from charmhelpers.contrib.storage.linux.lvm import (
457@@ -50,7 +60,9 @@
458 )
459
460 from charmhelpers.contrib.network.ip import (
461- get_ipv6_addr
462+ get_ipv6_addr,
463+ is_ipv6,
464+ port_has_listener,
465 )
466
467 from charmhelpers.contrib.python.packages import (
468@@ -58,7 +70,15 @@
469 pip_install,
470 )
471
472-from charmhelpers.core.host import lsb_release, mounts, umount
473+from charmhelpers.core.host import (
474+ lsb_release,
475+ mounts,
476+ umount,
477+ service_running,
478+ service_pause,
479+ service_resume,
480+ restart_on_change_helper,
481+)
482 from charmhelpers.fetch import apt_install, apt_cache, install_remote
483 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
484 from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
485@@ -69,7 +89,6 @@
486 DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
487 'restricted main multiverse universe')
488
489-
490 UBUNTU_OPENSTACK_RELEASE = OrderedDict([
491 ('oneiric', 'diablo'),
492 ('precise', 'essex'),
493@@ -80,6 +99,7 @@
494 ('utopic', 'juno'),
495 ('vivid', 'kilo'),
496 ('wily', 'liberty'),
497+ ('xenial', 'mitaka'),
498 ])
499
500
501@@ -93,31 +113,74 @@
502 ('2014.2', 'juno'),
503 ('2015.1', 'kilo'),
504 ('2015.2', 'liberty'),
505+ ('2016.1', 'mitaka'),
506 ])
507
508-# The ugly duckling
509+# The ugly duckling - must list releases oldest to newest
510 SWIFT_CODENAMES = OrderedDict([
511- ('1.4.3', 'diablo'),
512- ('1.4.8', 'essex'),
513- ('1.7.4', 'folsom'),
514- ('1.8.0', 'grizzly'),
515- ('1.7.7', 'grizzly'),
516- ('1.7.6', 'grizzly'),
517- ('1.10.0', 'havana'),
518- ('1.9.1', 'havana'),
519- ('1.9.0', 'havana'),
520- ('1.13.1', 'icehouse'),
521- ('1.13.0', 'icehouse'),
522- ('1.12.0', 'icehouse'),
523- ('1.11.0', 'icehouse'),
524- ('2.0.0', 'juno'),
525- ('2.1.0', 'juno'),
526- ('2.2.0', 'juno'),
527- ('2.2.1', 'kilo'),
528- ('2.2.2', 'kilo'),
529- ('2.3.0', 'liberty'),
530+ ('diablo',
531+ ['1.4.3']),
532+ ('essex',
533+ ['1.4.8']),
534+ ('folsom',
535+ ['1.7.4']),
536+ ('grizzly',
537+ ['1.7.6', '1.7.7', '1.8.0']),
538+ ('havana',
539+ ['1.9.0', '1.9.1', '1.10.0']),
540+ ('icehouse',
541+ ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
542+ ('juno',
543+ ['2.0.0', '2.1.0', '2.2.0']),
544+ ('kilo',
545+ ['2.2.1', '2.2.2']),
546+ ('liberty',
547+ ['2.3.0', '2.4.0', '2.5.0']),
548+ ('mitaka',
549+ ['2.5.0', '2.6.0', '2.7.0']),
550 ])
551
552+# >= Liberty version->codename mapping
553+PACKAGE_CODENAMES = {
554+ 'nova-common': OrderedDict([
555+ ('12.0', 'liberty'),
556+ ('13.0', 'mitaka'),
557+ ]),
558+ 'neutron-common': OrderedDict([
559+ ('7.0', 'liberty'),
560+ ('8.0', 'mitaka'),
561+ ]),
562+ 'cinder-common': OrderedDict([
563+ ('7.0', 'liberty'),
564+ ('8.0', 'mitaka'),
565+ ]),
566+ 'keystone': OrderedDict([
567+ ('8.0', 'liberty'),
568+ ('8.1', 'liberty'),
569+ ('9.0', 'mitaka'),
570+ ]),
571+ 'horizon-common': OrderedDict([
572+ ('8.0', 'liberty'),
573+ ('9.0', 'mitaka'),
574+ ]),
575+ 'ceilometer-common': OrderedDict([
576+ ('5.0', 'liberty'),
577+ ('6.0', 'mitaka'),
578+ ]),
579+ 'heat-common': OrderedDict([
580+ ('5.0', 'liberty'),
581+ ('6.0', 'mitaka'),
582+ ]),
583+ 'glance-common': OrderedDict([
584+ ('11.0', 'liberty'),
585+ ('12.0', 'mitaka'),
586+ ]),
587+ 'openstack-dashboard': OrderedDict([
588+ ('8.0', 'liberty'),
589+ ('9.0', 'mitaka'),
590+ ]),
591+}
592+
593 DEFAULT_LOOPBACK_SIZE = '5G'
594
595
596@@ -167,9 +230,9 @@
597 error_out(e)
598
599
600-def get_os_version_codename(codename):
601+def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
602 '''Determine OpenStack version number from codename.'''
603- for k, v in six.iteritems(OPENSTACK_CODENAMES):
604+ for k, v in six.iteritems(version_map):
605 if v == codename:
606 return k
607 e = 'Could not derive OpenStack version for '\
608@@ -177,6 +240,33 @@
609 error_out(e)
610
611
612+def get_os_version_codename_swift(codename):
613+ '''Determine OpenStack version number of swift from codename.'''
614+ for k, v in six.iteritems(SWIFT_CODENAMES):
615+ if k == codename:
616+ return v[-1]
617+ e = 'Could not derive swift version for '\
618+ 'codename: %s' % codename
619+ error_out(e)
620+
621+
622+def get_swift_codename(version):
623+ '''Determine OpenStack codename that corresponds to swift version.'''
624+ codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
625+ if len(codenames) > 1:
626+ # If more than one release codename contains this version we determine
627+ # the actual codename based on the highest available install source.
628+ for codename in reversed(codenames):
629+ releases = UBUNTU_OPENSTACK_RELEASE
630+ release = [k for k, v in six.iteritems(releases) if codename in v]
631+ ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
632+ if codename in ret or release[0] in ret:
633+ return codename
634+ elif len(codenames) == 1:
635+ return codenames[0]
636+ return None
637+
638+
639 def get_os_codename_package(package, fatal=True):
640 '''Derive OpenStack release codename from an installed package.'''
641 import apt_pkg as apt
642@@ -201,20 +291,33 @@
643 error_out(e)
644
645 vers = apt.upstream_version(pkg.current_ver.ver_str)
646-
647- try:
648- if 'swift' in pkg.name:
649- swift_vers = vers[:5]
650- if swift_vers not in SWIFT_CODENAMES:
651- # Deal with 1.10.0 upward
652- swift_vers = vers[:6]
653- return SWIFT_CODENAMES[swift_vers]
654- else:
655- vers = vers[:6]
656- return OPENSTACK_CODENAMES[vers]
657- except KeyError:
658- e = 'Could not determine OpenStack codename for version %s' % vers
659- error_out(e)
660+ if 'swift' in pkg.name:
661+ # Fully x.y.z match for swift versions
662+ match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
663+ else:
664+ # x.y match only for 20XX.X
665+ # and ignore patch level for other packages
666+ match = re.match('^(\d+)\.(\d+)', vers)
667+
668+ if match:
669+ vers = match.group(0)
670+
671+ # >= Liberty independent project versions
672+ if (package in PACKAGE_CODENAMES and
673+ vers in PACKAGE_CODENAMES[package]):
674+ return PACKAGE_CODENAMES[package][vers]
675+ else:
676+ # < Liberty co-ordinated project versions
677+ try:
678+ if 'swift' in pkg.name:
679+ return get_swift_codename(vers)
680+ else:
681+ return OPENSTACK_CODENAMES[vers]
682+ except KeyError:
683+ if not fatal:
684+ return None
685+ e = 'Could not determine OpenStack codename for version %s' % vers
686+ error_out(e)
687
688
689 def get_os_version_package(pkg, fatal=True):
690@@ -226,12 +329,14 @@
691
692 if 'swift' in pkg:
693 vers_map = SWIFT_CODENAMES
694+ for cname, version in six.iteritems(vers_map):
695+ if cname == codename:
696+ return version[-1]
697 else:
698 vers_map = OPENSTACK_CODENAMES
699-
700- for version, cname in six.iteritems(vers_map):
701- if cname == codename:
702- return version
703+ for version, cname in six.iteritems(vers_map):
704+ if cname == codename:
705+ return version
706 # e = "Could not determine OpenStack version for package: %s" % pkg
707 # error_out(e)
708
709@@ -256,12 +361,42 @@
710
711
712 def import_key(keyid):
713- cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
714- "--recv-keys %s" % keyid
715- try:
716- subprocess.check_call(cmd.split(' '))
717- except subprocess.CalledProcessError:
718- error_out("Error importing repo key %s" % keyid)
719+ key = keyid.strip()
720+ if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
721+ key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
722+ juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
723+ juju_log("Importing ASCII Armor PGP key", level=DEBUG)
724+ with tempfile.NamedTemporaryFile() as keyfile:
725+ with open(keyfile.name, 'w') as fd:
726+ fd.write(key)
727+ fd.write("\n")
728+
729+ cmd = ['apt-key', 'add', keyfile.name]
730+ try:
731+ subprocess.check_call(cmd)
732+ except subprocess.CalledProcessError:
733+ error_out("Error importing PGP key '%s'" % key)
734+ else:
735+ juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
736+ juju_log("Importing PGP key from keyserver", level=DEBUG)
737+ cmd = ['apt-key', 'adv', '--keyserver',
738+ 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
739+ try:
740+ subprocess.check_call(cmd)
741+ except subprocess.CalledProcessError:
742+ error_out("Error importing PGP key '%s'" % key)
743+
744+
745+def get_source_and_pgp_key(input):
746+ """Look for a pgp key ID or ascii-armor key in the given input."""
747+ index = input.strip()
748+ index = input.rfind('|')
749+ if index < 0:
750+ return input, None
751+
752+ key = input[index + 1:].strip('|')
753+ source = input[:index]
754+ return source, key
755
756
757 def configure_installation_source(rel):
758@@ -273,16 +408,16 @@
759 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
760 f.write(DISTRO_PROPOSED % ubuntu_rel)
761 elif rel[:4] == "ppa:":
762- src = rel
763+ src, key = get_source_and_pgp_key(rel)
764+ if key:
765+ import_key(key)
766+
767 subprocess.check_call(["add-apt-repository", "-y", src])
768 elif rel[:3] == "deb":
769- l = len(rel.split('|'))
770- if l == 2:
771- src, key = rel.split('|')
772- juju_log("Importing PPA key from keyserver for %s" % src)
773+ src, key = get_source_and_pgp_key(rel)
774+ if key:
775 import_key(key)
776- elif l == 1:
777- src = rel
778+
779 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
780 f.write(src)
781 elif rel[:6] == 'cloud:':
782@@ -327,6 +462,9 @@
783 'liberty': 'trusty-updates/liberty',
784 'liberty/updates': 'trusty-updates/liberty',
785 'liberty/proposed': 'trusty-proposed/liberty',
786+ 'mitaka': 'trusty-updates/mitaka',
787+ 'mitaka/updates': 'trusty-updates/mitaka',
788+ 'mitaka/proposed': 'trusty-proposed/mitaka',
789 }
790
791 try:
792@@ -392,9 +530,18 @@
793 import apt_pkg as apt
794 src = config('openstack-origin')
795 cur_vers = get_os_version_package(package)
796- available_vers = get_os_version_install_source(src)
797+ if "swift" in package:
798+ codename = get_os_codename_install_source(src)
799+ avail_vers = get_os_version_codename_swift(codename)
800+ else:
801+ avail_vers = get_os_version_install_source(src)
802 apt.init()
803- return apt.version_compare(available_vers, cur_vers) == 1
804+ if "swift" in package:
805+ major_cur_vers = cur_vers.split('.', 1)[0]
806+ major_avail_vers = avail_vers.split('.', 1)[0]
807+ major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
808+ return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
809+ return apt.version_compare(avail_vers, cur_vers) == 1
810
811
812 def ensure_block_device(block_device):
813@@ -469,6 +616,12 @@
814 relation_prefix=None):
815 hosts = get_ipv6_addr(dynamic_only=False)
816
817+ if config('vip'):
818+ vips = config('vip').split()
819+ for vip in vips:
820+ if vip and is_ipv6(vip):
821+ hosts.append(vip)
822+
823 kwargs = {'database': database,
824 'username': database_user,
825 'hostname': json.dumps(hosts)}
826@@ -517,7 +670,7 @@
827 return yaml.load(projects_yaml)
828
829
830-def git_clone_and_install(projects_yaml, core_project, depth=1):
831+def git_clone_and_install(projects_yaml, core_project):
832 """
833 Clone/install all specified OpenStack repositories.
834
835@@ -567,6 +720,9 @@
836 for p in projects['repositories']:
837 repo = p['repository']
838 branch = p['branch']
839+ depth = '1'
840+ if 'depth' in p.keys():
841+ depth = p['depth']
842 if p['name'] == 'requirements':
843 repo_dir = _git_clone_and_install_single(repo, branch, depth,
844 parent_dir, http_proxy,
845@@ -611,19 +767,14 @@
846 """
847 Clone and install a single git repository.
848 """
849- dest_dir = os.path.join(parent_dir, os.path.basename(repo))
850-
851 if not os.path.exists(parent_dir):
852 juju_log('Directory already exists at {}. '
853 'No need to create directory.'.format(parent_dir))
854 os.mkdir(parent_dir)
855
856- if not os.path.exists(dest_dir):
857- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
858- repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
859- depth=depth)
860- else:
861- repo_dir = dest_dir
862+ juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
863+ repo_dir = install_remote(
864+ repo, dest=parent_dir, branch=branch, depth=depth)
865
866 venv = os.path.join(parent_dir, 'venv')
867
868@@ -704,3 +855,721 @@
869 return projects[key]
870
871 return None
872+
873+
874+def os_workload_status(configs, required_interfaces, charm_func=None):
875+ """
876+ Decorator to set workload status based on complete contexts
877+ """
878+ def wrap(f):
879+ @wraps(f)
880+ def wrapped_f(*args, **kwargs):
881+ # Run the original function first
882+ f(*args, **kwargs)
883+ # Set workload status now that contexts have been
884+ # acted on
885+ set_os_workload_status(configs, required_interfaces, charm_func)
886+ return wrapped_f
887+ return wrap
888+
889+
890+def set_os_workload_status(configs, required_interfaces, charm_func=None,
891+ services=None, ports=None):
892+ """Set the state of the workload status for the charm.
893+
894+ This calls _determine_os_workload_status() to get the new state, message
895+ and sets the status using status_set()
896+
897+ @param configs: a templating.OSConfigRenderer() object
898+ @param required_interfaces: {generic: [specific, specific2, ...]}
899+ @param charm_func: a callable function that returns state, message. The
900+ signature is charm_func(configs) -> (state, message)
901+ @param services: list of strings OR dictionary specifying services/ports
902+ @param ports: OPTIONAL list of port numbers.
903+ @returns state, message: the new workload status, user message
904+ """
905+ state, message = _determine_os_workload_status(
906+ configs, required_interfaces, charm_func, services, ports)
907+ status_set(state, message)
908+
909+
910+def _determine_os_workload_status(
911+ configs, required_interfaces, charm_func=None,
912+ services=None, ports=None):
913+ """Determine the state of the workload status for the charm.
914+
915+ This function returns the new workload status for the charm based
916+ on the state of the interfaces, the paused state and whether the
917+ services are actually running and any specified ports are open.
918+
919+ This checks:
920+
921+ 1. if the unit should be paused, that it is actually paused. If so the
922+ state is 'maintenance' + message, else 'broken'.
923+ 2. that the interfaces/relations are complete. If they are not then
924+ it sets the state to either 'broken' or 'waiting' and an appropriate
925+ message.
926+ 3. If all the relation data is set, then it checks that the actual
927+ services really are running. If not it sets the state to 'broken'.
928+
929+ If everything is okay then the state returns 'active'.
930+
931+ @param configs: a templating.OSConfigRenderer() object
932+ @param required_interfaces: {generic: [specific, specific2, ...]}
933+ @param charm_func: a callable function that returns state, message. The
934+ signature is charm_func(configs) -> (state, message)
935+ @param services: list of strings OR dictionary specifying services/ports
936+ @param ports: OPTIONAL list of port numbers.
937+ @returns state, message: the new workload status, user message
938+ """
939+ state, message = _ows_check_if_paused(services, ports)
940+
941+ if state is None:
942+ state, message = _ows_check_generic_interfaces(
943+ configs, required_interfaces)
944+
945+ if state != 'maintenance' and charm_func:
946+ # _ows_check_charm_func() may modify the state, message
947+ state, message = _ows_check_charm_func(
948+ state, message, lambda: charm_func(configs))
949+
950+ if state is None:
951+ state, message = _ows_check_services_running(services, ports)
952+
953+ if state is None:
954+ state = 'active'
955+ message = "Unit is ready"
956+ juju_log(message, 'INFO')
957+
958+ return state, message
959+
960+
961+def _ows_check_if_paused(services=None, ports=None):
962+ """Check if the unit is supposed to be paused, and if so check that the
963+ services/ports (if passed) are actually stopped/not being listened to.
964+
965+ if the unit isn't supposed to be paused, just return None, None
966+
967+ @param services: OPTIONAL services spec or list of service names.
968+ @param ports: OPTIONAL list of port numbers.
969+ @returns state, message or None, None
970+ """
971+ if is_unit_paused_set():
972+ state, message = check_actually_paused(services=services,
973+ ports=ports)
974+ if state is None:
975+ # we're paused okay, so set maintenance and return
976+ state = "maintenance"
977+ message = "Paused. Use 'resume' action to resume normal service."
978+ return state, message
979+ return None, None
980+
981+
982+def _ows_check_generic_interfaces(configs, required_interfaces):
983+ """Check the complete contexts to determine the workload status.
984+
985+ - Checks for missing or incomplete contexts
986+ - juju log details of missing required data.
987+ - determines the correct workload status
988+ - creates an appropriate message for status_set(...)
989+
990+ if there are no problems then the function returns None, None
991+
992+ @param configs: a templating.OSConfigRenderer() object
993+ @params required_interfaces: {generic_interface: [specific_interface], }
994+ @returns state, message or None, None
995+ """
996+ incomplete_rel_data = incomplete_relation_data(configs,
997+ required_interfaces)
998+ state = None
999+ message = None
1000+ missing_relations = set()
1001+ incomplete_relations = set()
1002+
1003+ for generic_interface, relations_states in incomplete_rel_data.items():
1004+ related_interface = None
1005+ missing_data = {}
1006+ # Related or not?
1007+ for interface, relation_state in relations_states.items():
1008+ if relation_state.get('related'):
1009+ related_interface = interface
1010+ missing_data = relation_state.get('missing_data')
1011+ break
1012+ # No relation ID for the generic_interface?
1013+ if not related_interface:
1014+ juju_log("{} relation is missing and must be related for "
1015+ "functionality. ".format(generic_interface), 'WARN')
1016+ state = 'blocked'
1017+ missing_relations.add(generic_interface)
1018+ else:
1019+ # Relation ID eists but no related unit
1020+ if not missing_data:
1021+ # Edge case - relation ID exists but departings
1022+ _hook_name = hook_name()
1023+ if (('departed' in _hook_name or 'broken' in _hook_name) and
1024+ related_interface in _hook_name):
1025+ state = 'blocked'
1026+ missing_relations.add(generic_interface)
1027+ juju_log("{} relation's interface, {}, "
1028+ "relationship is departed or broken "
1029+ "and is required for functionality."
1030+ "".format(generic_interface, related_interface),
1031+ "WARN")
1032+ # Normal case relation ID exists but no related unit
1033+ # (joining)
1034+ else:
1035+ juju_log("{} relations's interface, {}, is related but has"
1036+ " no units in the relation."
1037+ "".format(generic_interface, related_interface),
1038+ "INFO")
1039+ # Related unit exists and data missing on the relation
1040+ else:
1041+ juju_log("{} relation's interface, {}, is related awaiting "
1042+ "the following data from the relationship: {}. "
1043+ "".format(generic_interface, related_interface,
1044+ ", ".join(missing_data)), "INFO")
1045+ if state != 'blocked':
1046+ state = 'waiting'
1047+ if generic_interface not in missing_relations:
1048+ incomplete_relations.add(generic_interface)
1049+
1050+ if missing_relations:
1051+ message = "Missing relations: {}".format(", ".join(missing_relations))
1052+ if incomplete_relations:
1053+ message += "; incomplete relations: {}" \
1054+ "".format(", ".join(incomplete_relations))
1055+ state = 'blocked'
1056+ elif incomplete_relations:
1057+ message = "Incomplete relations: {}" \
1058+ "".format(", ".join(incomplete_relations))
1059+ state = 'waiting'
1060+
1061+ return state, message
1062+
1063+
1064+def _ows_check_charm_func(state, message, charm_func_with_configs):
1065+ """Run a custom check function for the charm to see if it wants to
1066+ change the state. This is only run if not in 'maintenance' and
1067+ tests to see if the new state is more important that the previous
1068+ one determined by the interfaces/relations check.
1069+
1070+ @param state: the previously determined state so far.
1071+ @param message: the user orientated message so far.
1072+ @param charm_func: a callable function that returns state, message
1073+ @returns state, message strings.
1074+ """
1075+ if charm_func_with_configs:
1076+ charm_state, charm_message = charm_func_with_configs()
1077+ if charm_state != 'active' and charm_state != 'unknown':
1078+ state = workload_state_compare(state, charm_state)
1079+ if message:
1080+ charm_message = charm_message.replace("Incomplete relations: ",
1081+ "")
1082+ message = "{}, {}".format(message, charm_message)
1083+ else:
1084+ message = charm_message
1085+ return state, message
1086+
1087+
1088+def _ows_check_services_running(services, ports):
1089+ """Check that the services that should be running are actually running
1090+ and that any ports specified are being listened to.
1091+
1092+ @param services: list of strings OR dictionary specifying services/ports
1093+ @param ports: list of ports
1094+ @returns state, message: strings or None, None
1095+ """
1096+ messages = []
1097+ state = None
1098+ if services is not None:
1099+ services = _extract_services_list_helper(services)
1100+ services_running, running = _check_running_services(services)
1101+ if not all(running):
1102+ messages.append(
1103+ "Services not running that should be: {}"
1104+ .format(", ".join(_filter_tuples(services_running, False))))
1105+ state = 'blocked'
1106+ # also verify that the ports that should be open are open
1107+ # NB, that ServiceManager objects only OPTIONALLY have ports
1108+ map_not_open, ports_open = (
1109+ _check_listening_on_services_ports(services))
1110+ if not all(ports_open):
1111+ # find which service has missing ports. They are in service
1112+ # order which makes it a bit easier.
1113+ message_parts = {service: ", ".join([str(v) for v in open_ports])
1114+ for service, open_ports in map_not_open.items()}
1115+ message = ", ".join(
1116+ ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
1117+ messages.append(
1118+ "Services with ports not open that should be: {}"
1119+ .format(message))
1120+ state = 'blocked'
1121+
1122+ if ports is not None:
1123+ # and we can also check ports which we don't know the service for
1124+ ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
1125+ if not all(ports_open_bools):
1126+ messages.append(
1127+ "Ports which should be open, but are not: {}"
1128+ .format(", ".join([str(p) for p, v in ports_open
1129+ if not v])))
1130+ state = 'blocked'
1131+
1132+ if state is not None:
1133+ message = "; ".join(messages)
1134+ return state, message
1135+
1136+ return None, None
1137+
1138+
1139+def _extract_services_list_helper(services):
1140+ """Extract a OrderedDict of {service: [ports]} of the supplied services
1141+ for use by the other functions.
1142+
1143+ The services object can either be:
1144+ - None : no services were passed (an empty dict is returned)
1145+ - a list of strings
1146+ - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
1147+ - An array of [{'service': service_name, ...}, ...]
1148+
1149+ @param services: see above
1150+ @returns OrderedDict(service: [ports], ...)
1151+ """
1152+ if services is None:
1153+ return {}
1154+ if isinstance(services, dict):
1155+ services = services.values()
1156+ # either extract the list of services from the dictionary, or if
1157+ # it is a simple string, use that. i.e. works with mixed lists.
1158+ _s = OrderedDict()
1159+ for s in services:
1160+ if isinstance(s, dict) and 'service' in s:
1161+ _s[s['service']] = s.get('ports', [])
1162+ if isinstance(s, str):
1163+ _s[s] = []
1164+ return _s
1165+
1166+
1167+def _check_running_services(services):
1168+ """Check that the services dict provided is actually running and provide
1169+ a list of (service, boolean) tuples for each service.
1170+
1171+ Returns both a zipped list of (service, boolean) and a list of booleans
1172+ in the same order as the services.
1173+
1174+ @param services: OrderedDict of strings: [ports], one for each service to
1175+ check.
1176+ @returns [(service, boolean), ...], : results for checks
1177+ [boolean] : just the result of the service checks
1178+ """
1179+ services_running = [service_running(s) for s in services]
1180+ return list(zip(services, services_running)), services_running
1181+
1182+
1183+def _check_listening_on_services_ports(services, test=False):
1184+ """Check that the unit is actually listening (has the port open) on the
1185+ ports that the service specifies are open. If test is True then the
1186+ function returns the services with ports that are open rather than
1187+ closed.
1188+
1189+ Returns an OrderedDict of service: ports and a list of booleans
1190+
1191+ @param services: OrderedDict(service: [port, ...], ...)
1192+ @param test: default=False, if False, test for closed, otherwise open.
1193+ @returns OrderedDict(service: [port-not-open, ...]...), [boolean]
1194+ """
1195+ test = not(not(test)) # ensure test is True or False
1196+ all_ports = list(itertools.chain(*services.values()))
1197+ ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
1198+ map_ports = OrderedDict()
1199+ matched_ports = [p for p, opened in zip(all_ports, ports_states)
1200+ if opened == test] # essentially opened xor test
1201+ for service, ports in services.items():
1202+ set_ports = set(ports).intersection(matched_ports)
1203+ if set_ports:
1204+ map_ports[service] = set_ports
1205+ return map_ports, ports_states
1206+
1207+
1208+def _check_listening_on_ports_list(ports):
1209+ """Check that the ports list given are being listened to
1210+
1211+ Returns a list of ports being listened to and a list of the
1212+ booleans.
1213+
1214+ @param ports: LIST or port numbers.
1215+ @returns [(port_num, boolean), ...], [boolean]
1216+ """
1217+ ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
1218+ return zip(ports, ports_open), ports_open
1219+
1220+
1221+def _filter_tuples(services_states, state):
1222+ """Return a simple list from a list of tuples according to the condition
1223+
1224+ @param services_states: LIST of (string, boolean): service and running
1225+ state.
1226+ @param state: Boolean to match the tuple against.
1227+ @returns [LIST of strings] that matched the tuple RHS.
1228+ """
1229+ return [s for s, b in services_states if b == state]
1230+
1231+
1232+def workload_state_compare(current_workload_state, workload_state):
1233+ """ Return highest priority of two states"""
1234+ hierarchy = {'unknown': -1,
1235+ 'active': 0,
1236+ 'maintenance': 1,
1237+ 'waiting': 2,
1238+ 'blocked': 3,
1239+ }
1240+
1241+ if hierarchy.get(workload_state) is None:
1242+ workload_state = 'unknown'
1243+ if hierarchy.get(current_workload_state) is None:
1244+ current_workload_state = 'unknown'
1245+
1246+ # Set workload_state based on hierarchy of statuses
1247+ if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
1248+ return current_workload_state
1249+ else:
1250+ return workload_state
1251+
1252+
1253+def incomplete_relation_data(configs, required_interfaces):
1254+ """Check complete contexts against required_interfaces
1255+ Return dictionary of incomplete relation data.
1256+
1257+ configs is an OSConfigRenderer object with configs registered
1258+
1259+ required_interfaces is a dictionary of required general interfaces
1260+ with dictionary values of possible specific interfaces.
1261+ Example:
1262+ required_interfaces = {'database': ['shared-db', 'pgsql-db']}
1263+
1264+ The interface is said to be satisfied if anyone of the interfaces in the
1265+ list has a complete context.
1266+
1267+ Return dictionary of incomplete or missing required contexts with relation
1268+ status of interfaces and any missing data points. Example:
1269+ {'message':
1270+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
1271+ 'zeromq-configuration': {'related': False}},
1272+ 'identity':
1273+ {'identity-service': {'related': False}},
1274+ 'database':
1275+ {'pgsql-db': {'related': False},
1276+ 'shared-db': {'related': True}}}
1277+ """
1278+ complete_ctxts = configs.complete_contexts()
1279+ incomplete_relations = [
1280+ svc_type
1281+ for svc_type, interfaces in required_interfaces.items()
1282+ if not set(interfaces).intersection(complete_ctxts)]
1283+ return {
1284+ i: configs.get_incomplete_context_data(required_interfaces[i])
1285+ for i in incomplete_relations}
1286+
1287+
1288+def do_action_openstack_upgrade(package, upgrade_callback, configs):
1289+ """Perform action-managed OpenStack upgrade.
1290+
1291+ Upgrades packages to the configured openstack-origin version and sets
1292+ the corresponding action status as a result.
1293+
1294+ If the charm was installed from source we cannot upgrade it.
1295+ For backwards compatibility a config flag (action-managed-upgrade) must
1296+ be set for this code to run, otherwise a full service level upgrade will
1297+ fire on config-changed.
1298+
1299+ @param package: package name for determining if upgrade available
1300+ @param upgrade_callback: function callback to charm's upgrade function
1301+ @param configs: templating object derived from OSConfigRenderer class
1302+
1303+ @return: True if upgrade successful; False if upgrade failed or skipped
1304+ """
1305+ ret = False
1306+
1307+ if git_install_requested():
1308+ action_set({'outcome': 'installed from source, skipped upgrade.'})
1309+ else:
1310+ if openstack_upgrade_available(package):
1311+ if config('action-managed-upgrade'):
1312+ juju_log('Upgrading OpenStack release')
1313+
1314+ try:
1315+ upgrade_callback(configs=configs)
1316+ action_set({'outcome': 'success, upgrade completed.'})
1317+ ret = True
1318+ except:
1319+ action_set({'outcome': 'upgrade failed, see traceback.'})
1320+ action_set({'traceback': traceback.format_exc()})
1321+ action_fail('do_openstack_upgrade resulted in an '
1322+ 'unexpected error')
1323+ else:
1324+ action_set({'outcome': 'action-managed-upgrade config is '
1325+ 'False, skipped upgrade.'})
1326+ else:
1327+ action_set({'outcome': 'no upgrade available.'})
1328+
1329+ return ret
1330+
1331+
1332+def remote_restart(rel_name, remote_service=None):
1333+ trigger = {
1334+ 'restart-trigger': str(uuid.uuid4()),
1335+ }
1336+ if remote_service:
1337+ trigger['remote-service'] = remote_service
1338+ for rid in relation_ids(rel_name):
1339+ # This subordinate can be related to two seperate services using
1340+ # different subordinate relations so only issue the restart if
1341+ # the principle is conencted down the relation we think it is
1342+ if related_units(relid=rid):
1343+ relation_set(relation_id=rid,
1344+ relation_settings=trigger,
1345+ )
1346+
1347+
1348+def check_actually_paused(services=None, ports=None):
1349+ """Check that services listed in the services object and and ports
1350+ are actually closed (not listened to), to verify that the unit is
1351+ properly paused.
1352+
1353+ @param services: See _extract_services_list_helper
1354+ @returns status, : string for status (None if okay)
1355+ message : string for problem for status_set
1356+ """
1357+ state = None
1358+ message = None
1359+ messages = []
1360+ if services is not None:
1361+ services = _extract_services_list_helper(services)
1362+ services_running, services_states = _check_running_services(services)
1363+ if any(services_states):
1364+ # there shouldn't be any running so this is a problem
1365+ messages.append("these services running: {}"
1366+ .format(", ".join(
1367+ _filter_tuples(services_running, True))))
1368+ state = "blocked"
1369+ ports_open, ports_open_bools = (
1370+ _check_listening_on_services_ports(services, True))
1371+ if any(ports_open_bools):
1372+ message_parts = {service: ", ".join([str(v) for v in open_ports])
1373+ for service, open_ports in ports_open.items()}
1374+ message = ", ".join(
1375+ ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
1376+ messages.append(
1377+ "these service:ports are open: {}".format(message))
1378+ state = 'blocked'
1379+ if ports is not None:
1380+ ports_open, bools = _check_listening_on_ports_list(ports)
1381+ if any(bools):
1382+ messages.append(
1383+ "these ports which should be closed, but are open: {}"
1384+ .format(", ".join([str(p) for p, v in ports_open if v])))
1385+ state = 'blocked'
1386+ if messages:
1387+ message = ("Services should be paused but {}"
1388+ .format(", ".join(messages)))
1389+ return state, message
1390+
1391+
1392+def set_unit_paused():
1393+ """Set the unit to a paused state in the local kv() store.
1394+ This does NOT actually pause the unit
1395+ """
1396+ with unitdata.HookData()() as t:
1397+ kv = t[0]
1398+ kv.set('unit-paused', True)
1399+
1400+
1401+def clear_unit_paused():
1402+ """Clear the unit from a paused state in the local kv() store
1403+ This does NOT actually restart any services - it only clears the
1404+ local state.
1405+ """
1406+ with unitdata.HookData()() as t:
1407+ kv = t[0]
1408+ kv.set('unit-paused', False)
1409+
1410+
1411+def is_unit_paused_set():
1412+ """Return the state of the kv().get('unit-paused').
1413+ This does NOT verify that the unit really is paused.
1414+
1415+ To help with units that don't have HookData() (testing)
1416+ if it excepts, return False
1417+ """
1418+ try:
1419+ with unitdata.HookData()() as t:
1420+ kv = t[0]
1421+ # transform something truth-y into a Boolean.
1422+ return not(not(kv.get('unit-paused')))
1423+ except:
1424+ return False
1425+
1426+
1427+def pause_unit(assess_status_func, services=None, ports=None,
1428+ charm_func=None):
1429+ """Pause a unit by stopping the services and setting 'unit-paused'
1430+ in the local kv() store.
1431+
1432+ Also checks that the services have stopped and ports are no longer
1433+ being listened to.
1434+
1435+ An optional charm_func() can be called that can either raise an
1436+ Exception or return non None, None to indicate that the unit
1437+ didn't pause cleanly.
1438+
1439+ The signature for charm_func is:
1440+ charm_func() -> message: string
1441+
1442+ charm_func() is executed after any services are stopped, if supplied.
1443+
1444+ The services object can either be:
1445+ - None : no services were passed (an empty dict is returned)
1446+ - a list of strings
1447+ - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
1448+ - An array of [{'service': service_name, ...}, ...]
1449+
1450+ @param assess_status_func: (f() -> message: string | None) or None
1451+ @param services: OPTIONAL see above
1452+ @param ports: OPTIONAL list of port
1453+ @param charm_func: function to run for custom charm pausing.
1454+ @returns None
1455+ @raises Exception(message) on an error for action_fail().
1456+ """
1457+ services = _extract_services_list_helper(services)
1458+ messages = []
1459+ if services:
1460+ for service in services.keys():
1461+ stopped = service_pause(service)
1462+ if not stopped:
1463+ messages.append("{} didn't stop cleanly.".format(service))
1464+ if charm_func:
1465+ try:
1466+ message = charm_func()
1467+ if message:
1468+ messages.append(message)
1469+ except Exception as e:
1470+ message.append(str(e))
1471+ set_unit_paused()
1472+ if assess_status_func:
1473+ message = assess_status_func()
1474+ if message:
1475+ messages.append(message)
1476+ if messages:
1477+ raise Exception("Couldn't pause: {}".format("; ".join(messages)))
1478+
1479+
1480+def resume_unit(assess_status_func, services=None, ports=None,
1481+ charm_func=None):
1482+ """Resume a unit by starting the services and clearning 'unit-paused'
1483+ in the local kv() store.
1484+
1485+ Also checks that the services have started and ports are being listened to.
1486+
1487+ An optional charm_func() can be called that can either raise an
1488+ Exception or return non None to indicate that the unit
1489+ didn't resume cleanly.
1490+
1491+ The signature for charm_func is:
1492+ charm_func() -> message: string
1493+
1494+ charm_func() is executed after any services are started, if supplied.
1495+
1496+ The services object can either be:
1497+ - None : no services were passed (an empty dict is returned)
1498+ - a list of strings
1499+ - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
1500+ - An array of [{'service': service_name, ...}, ...]
1501+
1502+ @param assess_status_func: (f() -> message: string | None) or None
1503+ @param services: OPTIONAL see above
1504+ @param ports: OPTIONAL list of port
1505+ @param charm_func: function to run for custom charm resuming.
1506+ @returns None
1507+ @raises Exception(message) on an error for action_fail().
1508+ """
1509+ services = _extract_services_list_helper(services)
1510+ messages = []
1511+ if services:
1512+ for service in services.keys():
1513+ started = service_resume(service)
1514+ if not started:
1515+ messages.append("{} didn't start cleanly.".format(service))
1516+ if charm_func:
1517+ try:
1518+ message = charm_func()
1519+ if message:
1520+ messages.append(message)
1521+ except Exception as e:
1522+ message.append(str(e))
1523+ clear_unit_paused()
1524+ if assess_status_func:
1525+ message = assess_status_func()
1526+ if message:
1527+ messages.append(message)
1528+ if messages:
1529+ raise Exception("Couldn't resume: {}".format("; ".join(messages)))
1530+
1531+
1532+def make_assess_status_func(*args, **kwargs):
1533+ """Creates an assess_status_func() suitable for handing to pause_unit()
1534+ and resume_unit().
1535+
1536+ This uses the _determine_os_workload_status(...) function to determine
1537+ what the workload_status should be for the unit. If the unit is
1538+ not in maintenance or active states, then the message is returned to
1539+ the caller. This is so an action that doesn't result in either a
1540+ complete pause or complete resume can signal failure with an action_fail()
1541+ """
1542+ def _assess_status_func():
1543+ state, message = _determine_os_workload_status(*args, **kwargs)
1544+ status_set(state, message)
1545+ if state not in ['maintenance', 'active']:
1546+ return message
1547+ return None
1548+
1549+ return _assess_status_func
1550+
1551+
1552+def pausable_restart_on_change(restart_map, stopstart=False,
1553+ restart_functions=None):
1554+ """A restart_on_change decorator that checks to see if the unit is
1555+ paused. If it is paused then the decorated function doesn't fire.
1556+
1557+ This is provided as a helper, as the @restart_on_change(...) decorator
1558+ is in core.host, yet the openstack specific helpers are in this file
1559+ (contrib.openstack.utils). Thus, this needs to be an optional feature
1560+ for openstack charms (or charms that wish to use the openstack
1561+ pause/resume type features).
1562+
1563+ It is used as follows:
1564+
1565+ from contrib.openstack.utils import (
1566+ pausable_restart_on_change as restart_on_change)
1567+
1568+ @restart_on_change(restart_map, stopstart=<boolean>)
1569+ def some_hook(...):
1570+ pass
1571+
1572+ see core.utils.restart_on_change() for more details.
1573+
1574+ @param f: the function to decorate
1575+ @param restart_map: the restart map {conf_file: [services]}
1576+ @param stopstart: DEFAULT false; whether to stop, start or just restart
1577+ @returns decorator to use a restart_on_change with pausability
1578+ """
1579+ def wrap(f):
1580+ @functools.wraps(f)
1581+ def wrapped_f(*args, **kwargs):
1582+ if is_unit_paused_set():
1583+ return f(*args, **kwargs)
1584+ # otherwise, normal restart_on_change functionality
1585+ return restart_on_change_helper(
1586+ (lambda: f(*args, **kwargs)), restart_map, stopstart,
1587+ restart_functions)
1588+ return wrapped_f
1589+ return wrap
1590
1591=== modified file 'hooks/charmhelpers/contrib/python/packages.py'
1592--- hooks/charmhelpers/contrib/python/packages.py 2015-08-10 16:40:00 +0000
1593+++ hooks/charmhelpers/contrib/python/packages.py 2016-04-21 10:41:47 +0000
1594@@ -19,20 +19,35 @@
1595
1596 import os
1597 import subprocess
1598+import sys
1599
1600 from charmhelpers.fetch import apt_install, apt_update
1601 from charmhelpers.core.hookenv import charm_dir, log
1602
1603-try:
1604- from pip import main as pip_execute
1605-except ImportError:
1606- apt_update()
1607- apt_install('python-pip')
1608- from pip import main as pip_execute
1609-
1610 __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
1611
1612
1613+def pip_execute(*args, **kwargs):
1614+ """Overriden pip_execute() to stop sys.path being changed.
1615+
1616+ The act of importing main from the pip module seems to cause add wheels
1617+ from the /usr/share/python-wheels which are installed by various tools.
1618+ This function ensures that sys.path remains the same after the call is
1619+ executed.
1620+ """
1621+ try:
1622+ _path = sys.path
1623+ try:
1624+ from pip import main as _pip_execute
1625+ except ImportError:
1626+ apt_update()
1627+ apt_install('python-pip')
1628+ from pip import main as _pip_execute
1629+ _pip_execute(*args, **kwargs)
1630+ finally:
1631+ sys.path = _path
1632+
1633+
1634 def parse_options(given, available):
1635 """Given a set of options, check if available"""
1636 for key, value in sorted(given.items()):
1637@@ -42,8 +57,12 @@
1638 yield "--{0}={1}".format(key, value)
1639
1640
1641-def pip_install_requirements(requirements, **options):
1642- """Install a requirements file """
1643+def pip_install_requirements(requirements, constraints=None, **options):
1644+ """Install a requirements file.
1645+
1646+ :param constraints: Path to pip constraints file.
1647+ http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
1648+ """
1649 command = ["install"]
1650
1651 available_options = ('proxy', 'src', 'log', )
1652@@ -51,8 +70,13 @@
1653 command.append(option)
1654
1655 command.append("-r {0}".format(requirements))
1656- log("Installing from file: {} with options: {}".format(requirements,
1657- command))
1658+ if constraints:
1659+ command.append("-c {0}".format(constraints))
1660+ log("Installing from file: {} with constraints {} "
1661+ "and options: {}".format(requirements, constraints, command))
1662+ else:
1663+ log("Installing from file: {} with options: {}".format(requirements,
1664+ command))
1665 pip_execute(command)
1666
1667
1668
1669=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
1670--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-10-22 13:26:06 +0000
1671+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2016-04-21 10:41:47 +0000
1672@@ -23,11 +23,16 @@
1673 # James Page <james.page@ubuntu.com>
1674 # Adam Gandelman <adamg@ubuntu.com>
1675 #
1676+import bisect
1677+import errno
1678+import hashlib
1679+import six
1680
1681 import os
1682 import shutil
1683 import json
1684 import time
1685+import uuid
1686
1687 from subprocess import (
1688 check_call,
1689@@ -35,8 +40,10 @@
1690 CalledProcessError,
1691 )
1692 from charmhelpers.core.hookenv import (
1693+ local_unit,
1694 relation_get,
1695 relation_ids,
1696+ relation_set,
1697 related_units,
1698 log,
1699 DEBUG,
1700@@ -56,6 +63,8 @@
1701 apt_install,
1702 )
1703
1704+from charmhelpers.core.kernel import modprobe
1705+
1706 KEYRING = '/etc/ceph/ceph.client.{}.keyring'
1707 KEYFILE = '/etc/ceph/ceph.client.{}.key'
1708
1709@@ -67,6 +76,548 @@
1710 err to syslog = {use_syslog}
1711 clog to syslog = {use_syslog}
1712 """
1713+# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
1714+powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
1715+
1716+
1717+def validator(value, valid_type, valid_range=None):
1718+ """
1719+ Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
1720+ Example input:
1721+ validator(value=1,
1722+ valid_type=int,
1723+ valid_range=[0, 2])
1724+ This says I'm testing value=1. It must be an int inclusive in [0,2]
1725+
1726+ :param value: The value to validate
1727+ :param valid_type: The type that value should be.
1728+ :param valid_range: A range of values that value can assume.
1729+ :return:
1730+ """
1731+ assert isinstance(value, valid_type), "{} is not a {}".format(
1732+ value,
1733+ valid_type)
1734+ if valid_range is not None:
1735+ assert isinstance(valid_range, list), \
1736+ "valid_range must be a list, was given {}".format(valid_range)
1737+ # If we're dealing with strings
1738+ if valid_type is six.string_types:
1739+ assert value in valid_range, \
1740+ "{} is not in the list {}".format(value, valid_range)
1741+ # Integer, float should have a min and max
1742+ else:
1743+ if len(valid_range) != 2:
1744+ raise ValueError(
1745+ "Invalid valid_range list of {} for {}. "
1746+ "List must be [min,max]".format(valid_range, value))
1747+ assert value >= valid_range[0], \
1748+ "{} is less than minimum allowed value of {}".format(
1749+ value, valid_range[0])
1750+ assert value <= valid_range[1], \
1751+ "{} is greater than maximum allowed value of {}".format(
1752+ value, valid_range[1])
1753+
1754+
1755+class PoolCreationError(Exception):
1756+ """
1757+ A custom error to inform the caller that a pool creation failed. Provides an error message
1758+ """
1759+
1760+ def __init__(self, message):
1761+ super(PoolCreationError, self).__init__(message)
1762+
1763+
1764+class Pool(object):
1765+ """
1766+ An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
1767+ Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
1768+ """
1769+
1770+ def __init__(self, service, name):
1771+ self.service = service
1772+ self.name = name
1773+
1774+ # Create the pool if it doesn't exist already
1775+ # To be implemented by subclasses
1776+ def create(self):
1777+ pass
1778+
1779+ def add_cache_tier(self, cache_pool, mode):
1780+ """
1781+ Adds a new cache tier to an existing pool.
1782+ :param cache_pool: six.string_types. The cache tier pool name to add.
1783+ :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
1784+ :return: None
1785+ """
1786+ # Check the input types and values
1787+ validator(value=cache_pool, valid_type=six.string_types)
1788+ validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
1789+
1790+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
1791+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
1792+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
1793+ check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
1794+
1795+ def remove_cache_tier(self, cache_pool):
1796+ """
1797+ Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
1798+ :param cache_pool: six.string_types. The cache tier pool name to remove.
1799+ :return: None
1800+ """
1801+ # read-only is easy, writeback is much harder
1802+ mode = get_cache_mode(self.service, cache_pool)
1803+ if mode == 'readonly':
1804+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
1805+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
1806+
1807+ elif mode == 'writeback':
1808+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
1809+ # Flush the cache and wait for it to return
1810+ check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
1811+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
1812+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
1813+
1814+ def get_pgs(self, pool_size):
1815+ """
1816+ :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
1817+ erasure coded pools
1818+ :return: int. The number of pgs to use.
1819+ """
1820+ validator(value=pool_size, valid_type=int)
1821+ osd_list = get_osds(self.service)
1822+ if not osd_list:
1823+ # NOTE(james-page): Default to 200 for older ceph versions
1824+ # which don't support OSD query from cli
1825+ return 200
1826+
1827+ osd_list_length = len(osd_list)
1828+ # Calculate based on Ceph best practices
1829+ if osd_list_length < 5:
1830+ return 128
1831+ elif 5 < osd_list_length < 10:
1832+ return 512
1833+ elif 10 < osd_list_length < 50:
1834+ return 4096
1835+ else:
1836+ estimate = (osd_list_length * 100) / pool_size
1837+ # Return the next nearest power of 2
1838+ index = bisect.bisect_right(powers_of_two, estimate)
1839+ return powers_of_two[index]
1840+
1841+
1842+class ReplicatedPool(Pool):
1843+ def __init__(self, service, name, pg_num=None, replicas=2):
1844+ super(ReplicatedPool, self).__init__(service=service, name=name)
1845+ self.replicas = replicas
1846+ if pg_num is None:
1847+ self.pg_num = self.get_pgs(self.replicas)
1848+ else:
1849+ self.pg_num = pg_num
1850+
1851+ def create(self):
1852+ if not pool_exists(self.service, self.name):
1853+ # Create it
1854+ cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
1855+ self.name, str(self.pg_num)]
1856+ try:
1857+ check_call(cmd)
1858+ except CalledProcessError:
1859+ raise
1860+
1861+
1862+# Default jerasure erasure coded pool
1863+class ErasurePool(Pool):
1864+ def __init__(self, service, name, erasure_code_profile="default"):
1865+ super(ErasurePool, self).__init__(service=service, name=name)
1866+ self.erasure_code_profile = erasure_code_profile
1867+
1868+ def create(self):
1869+ if not pool_exists(self.service, self.name):
1870+ # Try to find the erasure profile information so we can properly size the pgs
1871+ erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
1872+
1873+ # Check for errors
1874+ if erasure_profile is None:
1875+ log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
1876+ level=ERROR)
1877+ raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
1878+ if 'k' not in erasure_profile or 'm' not in erasure_profile:
1879+ # Error
1880+ log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
1881+ level=ERROR)
1882+ raise PoolCreationError(
1883+ message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
1884+
1885+ pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
1886+ # Create it
1887+ cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs),
1888+ 'erasure', self.erasure_code_profile]
1889+ try:
1890+ check_call(cmd)
1891+ except CalledProcessError:
1892+ raise
1893+
1894+ """Get an existing erasure code profile if it already exists.
1895+ Returns json formatted output"""
1896+
1897+
1898+def get_mon_map(service):
1899+ """
1900+ Returns the current monitor map.
1901+ :param service: six.string_types. The Ceph user name to run the command under
1902+ :return: json string. :raise: ValueError if the monmap fails to parse.
1903+ Also raises CalledProcessError if our ceph command fails
1904+ """
1905+ try:
1906+ mon_status = check_output(
1907+ ['ceph', '--id', service,
1908+ 'mon_status', '--format=json'])
1909+ try:
1910+ return json.loads(mon_status)
1911+ except ValueError as v:
1912+ log("Unable to parse mon_status json: {}. Error: {}".format(
1913+ mon_status, v.message))
1914+ raise
1915+ except CalledProcessError as e:
1916+ log("mon_status command failed with message: {}".format(
1917+ e.message))
1918+ raise
1919+
1920+
1921+def hash_monitor_names(service):
1922+ """
1923+ Uses the get_mon_map() function to get information about the monitor
1924+ cluster.
1925+ Hash the name of each monitor. Return a sorted list of monitor hashes
1926+ in an ascending order.
1927+ :param service: six.string_types. The Ceph user name to run the command under
1928+ :rtype : dict. json dict of monitor name, ip address and rank
1929+ example: {
1930+ 'name': 'ip-172-31-13-165',
1931+ 'rank': 0,
1932+ 'addr': '172.31.13.165:6789/0'}
1933+ """
1934+ try:
1935+ hash_list = []
1936+ monitor_list = get_mon_map(service=service)
1937+ if monitor_list['monmap']['mons']:
1938+ for mon in monitor_list['monmap']['mons']:
1939+ hash_list.append(
1940+ hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
1941+ return sorted(hash_list)
1942+ else:
1943+ return None
1944+ except (ValueError, CalledProcessError):
1945+ raise
1946+
1947+
1948+def monitor_key_delete(service, key):
1949+ """
1950+ Delete a key and value pair from the monitor cluster
1951+ :param service: six.string_types. The Ceph user name to run the command under
1952+ Deletes a key value pair on the monitor cluster.
1953+ :param key: six.string_types. The key to delete.
1954+ """
1955+ try:
1956+ check_output(
1957+ ['ceph', '--id', service,
1958+ 'config-key', 'del', str(key)])
1959+ except CalledProcessError as e:
1960+ log("Monitor config-key put failed with message: {}".format(
1961+ e.output))
1962+ raise
1963+
1964+
1965+def monitor_key_set(service, key, value):
1966+ """
1967+ Sets a key value pair on the monitor cluster.
1968+ :param service: six.string_types. The Ceph user name to run the command under
1969+ :param key: six.string_types. The key to set.
1970+ :param value: The value to set. This will be converted to a string
1971+ before setting
1972+ """
1973+ try:
1974+ check_output(
1975+ ['ceph', '--id', service,
1976+ 'config-key', 'put', str(key), str(value)])
1977+ except CalledProcessError as e:
1978+ log("Monitor config-key put failed with message: {}".format(
1979+ e.output))
1980+ raise
1981+
1982+
1983+def monitor_key_get(service, key):
1984+ """
1985+ Gets the value of an existing key in the monitor cluster.
1986+ :param service: six.string_types. The Ceph user name to run the command under
1987+ :param key: six.string_types. The key to search for.
1988+ :return: Returns the value of that key or None if not found.
1989+ """
1990+ try:
1991+ output = check_output(
1992+ ['ceph', '--id', service,
1993+ 'config-key', 'get', str(key)])
1994+ return output
1995+ except CalledProcessError as e:
1996+ log("Monitor config-key get failed with message: {}".format(
1997+ e.output))
1998+ return None
1999+
2000+
2001+def monitor_key_exists(service, key):
2002+ """
2003+ Searches for the existence of a key in the monitor cluster.
2004+ :param service: six.string_types. The Ceph user name to run the command under
2005+ :param key: six.string_types. The key to search for
2006+ :return: Returns True if the key exists, False if not and raises an
2007+ exception if an unknown error occurs. :raise: CalledProcessError if
2008+ an unknown error occurs
2009+ """
2010+ try:
2011+ check_call(
2012+ ['ceph', '--id', service,
2013+ 'config-key', 'exists', str(key)])
2014+ # I can return true here regardless because Ceph returns
2015+ # ENOENT if the key wasn't found
2016+ return True
2017+ except CalledProcessError as e:
2018+ if e.returncode == errno.ENOENT:
2019+ return False
2020+ else:
2021+ log("Unknown error from ceph config-get exists: {} {}".format(
2022+ e.returncode, e.output))
2023+ raise
2024+
2025+
2026+def get_erasure_profile(service, name):
2027+ """
2028+ :param service: six.string_types. The Ceph user name to run the command under
2029+ :param name:
2030+ :return:
2031+ """
2032+ try:
2033+ out = check_output(['ceph', '--id', service,
2034+ 'osd', 'erasure-code-profile', 'get',
2035+ name, '--format=json'])
2036+ return json.loads(out)
2037+ except (CalledProcessError, OSError, ValueError):
2038+ return None
2039+
2040+
2041+def pool_set(service, pool_name, key, value):
2042+ """
2043+ Sets a value for a RADOS pool in ceph.
2044+ :param service: six.string_types. The Ceph user name to run the command under
2045+ :param pool_name: six.string_types
2046+ :param key: six.string_types
2047+ :param value:
2048+ :return: None. Can raise CalledProcessError
2049+ """
2050+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
2051+ try:
2052+ check_call(cmd)
2053+ except CalledProcessError:
2054+ raise
2055+
2056+
2057+def snapshot_pool(service, pool_name, snapshot_name):
2058+ """
2059+ Snapshots a RADOS pool in ceph.
2060+ :param service: six.string_types. The Ceph user name to run the command under
2061+ :param pool_name: six.string_types
2062+ :param snapshot_name: six.string_types
2063+ :return: None. Can raise CalledProcessError
2064+ """
2065+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
2066+ try:
2067+ check_call(cmd)
2068+ except CalledProcessError:
2069+ raise
2070+
2071+
2072+def remove_pool_snapshot(service, pool_name, snapshot_name):
2073+ """
2074+ Remove a snapshot from a RADOS pool in ceph.
2075+ :param service: six.string_types. The Ceph user name to run the command under
2076+ :param pool_name: six.string_types
2077+ :param snapshot_name: six.string_types
2078+ :return: None. Can raise CalledProcessError
2079+ """
2080+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
2081+ try:
2082+ check_call(cmd)
2083+ except CalledProcessError:
2084+ raise
2085+
2086+
2087+# max_bytes should be an int or long
2088+def set_pool_quota(service, pool_name, max_bytes):
2089+ """
2090+ :param service: six.string_types. The Ceph user name to run the command under
2091+ :param pool_name: six.string_types
2092+ :param max_bytes: int or long
2093+ :return: None. Can raise CalledProcessError
2094+ """
2095+ # Set a byte quota on a RADOS pool in ceph.
2096+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
2097+ 'max_bytes', str(max_bytes)]
2098+ try:
2099+ check_call(cmd)
2100+ except CalledProcessError:
2101+ raise
2102+
2103+
2104+def remove_pool_quota(service, pool_name):
2105+ """
2106+ Set a byte quota on a RADOS pool in ceph.
2107+ :param service: six.string_types. The Ceph user name to run the command under
2108+ :param pool_name: six.string_types
2109+ :return: None. Can raise CalledProcessError
2110+ """
2111+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
2112+ try:
2113+ check_call(cmd)
2114+ except CalledProcessError:
2115+ raise
2116+
2117+
2118+def remove_erasure_profile(service, profile_name):
2119+ """
2120+ Create a new erasure code profile if one does not already exist for it. Updates
2121+ the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
2122+ for more details
2123+ :param service: six.string_types. The Ceph user name to run the command under
2124+ :param profile_name: six.string_types
2125+ :return: None. Can raise CalledProcessError
2126+ """
2127+ cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
2128+ profile_name]
2129+ try:
2130+ check_call(cmd)
2131+ except CalledProcessError:
2132+ raise
2133+
2134+
2135+def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
2136+ failure_domain='host',
2137+ data_chunks=2, coding_chunks=1,
2138+ locality=None, durability_estimator=None):
2139+ """
2140+ Create a new erasure code profile if one does not already exist for it. Updates
2141+ the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
2142+ for more details
2143+ :param service: six.string_types. The Ceph user name to run the command under
2144+ :param profile_name: six.string_types
2145+ :param erasure_plugin_name: six.string_types
2146+ :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
2147+ 'room', 'root', 'row'])
2148+ :param data_chunks: int
2149+ :param coding_chunks: int
2150+ :param locality: int
2151+ :param durability_estimator: int
2152+ :return: None. Can raise CalledProcessError
2153+ """
2154+ # Ensure this failure_domain is allowed by Ceph
2155+ validator(failure_domain, six.string_types,
2156+ ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
2157+
2158+ cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
2159+ 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
2160+ 'ruleset_failure_domain=' + failure_domain]
2161+ if locality is not None and durability_estimator is not None:
2162+ raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
2163+
2164+ # Add plugin specific information
2165+ if locality is not None:
2166+ # For local erasure codes
2167+ cmd.append('l=' + str(locality))
2168+ if durability_estimator is not None:
2169+ # For Shec erasure codes
2170+ cmd.append('c=' + str(durability_estimator))
2171+
2172+ if erasure_profile_exists(service, profile_name):
2173+ cmd.append('--force')
2174+
2175+ try:
2176+ check_call(cmd)
2177+ except CalledProcessError:
2178+ raise
2179+
2180+
2181+def rename_pool(service, old_name, new_name):
2182+ """
2183+ Rename a Ceph pool from old_name to new_name
2184+ :param service: six.string_types. The Ceph user name to run the command under
2185+ :param old_name: six.string_types
2186+ :param new_name: six.string_types
2187+ :return: None
2188+ """
2189+ validator(value=old_name, valid_type=six.string_types)
2190+ validator(value=new_name, valid_type=six.string_types)
2191+
2192+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
2193+ check_call(cmd)
2194+
2195+
2196+def erasure_profile_exists(service, name):
2197+ """
2198+ Check to see if an Erasure code profile already exists.
2199+ :param service: six.string_types. The Ceph user name to run the command under
2200+ :param name: six.string_types
2201+ :return: int or None
2202+ """
2203+ validator(value=name, valid_type=six.string_types)
2204+ try:
2205+ check_call(['ceph', '--id', service,
2206+ 'osd', 'erasure-code-profile', 'get',
2207+ name])
2208+ return True
2209+ except CalledProcessError:
2210+ return False
2211+
2212+
2213+def get_cache_mode(service, pool_name):
2214+ """
2215+ Find the current caching mode of the pool_name given.
2216+ :param service: six.string_types. The Ceph user name to run the command under
2217+ :param pool_name: six.string_types
2218+ :return: int or None
2219+ """
2220+ validator(value=service, valid_type=six.string_types)
2221+ validator(value=pool_name, valid_type=six.string_types)
2222+ out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
2223+ try:
2224+ osd_json = json.loads(out)
2225+ for pool in osd_json['pools']:
2226+ if pool['pool_name'] == pool_name:
2227+ return pool['cache_mode']
2228+ return None
2229+ except ValueError:
2230+ raise
2231+
2232+
2233+def pool_exists(service, name):
2234+ """Check to see if a RADOS pool already exists."""
2235+ try:
2236+ out = check_output(['rados', '--id', service,
2237+ 'lspools']).decode('UTF-8')
2238+ except CalledProcessError:
2239+ return False
2240+
2241+ return name in out
2242+
2243+
2244+def get_osds(service):
2245+ """Return a list of all Ceph Object Storage Daemons currently in the
2246+ cluster.
2247+ """
2248+ version = ceph_version()
2249+ if version and version >= '0.56':
2250+ return json.loads(check_output(['ceph', '--id', service,
2251+ 'osd', 'ls',
2252+ '--format=json']).decode('UTF-8'))
2253+
2254+ return None
2255
2256
2257 def install():
2258@@ -96,53 +647,37 @@
2259 check_call(cmd)
2260
2261
2262-def pool_exists(service, name):
2263- """Check to see if a RADOS pool already exists."""
2264- try:
2265- out = check_output(['rados', '--id', service,
2266- 'lspools']).decode('UTF-8')
2267- except CalledProcessError:
2268- return False
2269-
2270- return name in out
2271-
2272-
2273-def get_osds(service):
2274- """Return a list of all Ceph Object Storage Daemons currently in the
2275- cluster.
2276- """
2277- version = ceph_version()
2278- if version and version >= '0.56':
2279- return json.loads(check_output(['ceph', '--id', service,
2280- 'osd', 'ls',
2281- '--format=json']).decode('UTF-8'))
2282-
2283- return None
2284-
2285-
2286-def create_pool(service, name, replicas=3):
2287+def update_pool(client, pool, settings):
2288+ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
2289+ for k, v in six.iteritems(settings):
2290+ cmd.append(k)
2291+ cmd.append(v)
2292+
2293+ check_call(cmd)
2294+
2295+
2296+def create_pool(service, name, replicas=3, pg_num=None):
2297 """Create a new RADOS pool."""
2298 if pool_exists(service, name):
2299 log("Ceph pool {} already exists, skipping creation".format(name),
2300 level=WARNING)
2301 return
2302
2303- # Calculate the number of placement groups based
2304- # on upstream recommended best practices.
2305- osds = get_osds(service)
2306- if osds:
2307- pgnum = (len(osds) * 100 // replicas)
2308- else:
2309- # NOTE(james-page): Default to 200 for older ceph versions
2310- # which don't support OSD query from cli
2311- pgnum = 200
2312-
2313- cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
2314- check_call(cmd)
2315-
2316- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
2317- str(replicas)]
2318- check_call(cmd)
2319+ if not pg_num:
2320+ # Calculate the number of placement groups based
2321+ # on upstream recommended best practices.
2322+ osds = get_osds(service)
2323+ if osds:
2324+ pg_num = (len(osds) * 100 // replicas)
2325+ else:
2326+ # NOTE(james-page): Default to 200 for older ceph versions
2327+ # which don't support OSD query from cli
2328+ pg_num = 200
2329+
2330+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
2331+ check_call(cmd)
2332+
2333+ update_pool(service, name, settings={'size': str(replicas)})
2334
2335
2336 def delete_pool(service, name):
2337@@ -197,10 +732,10 @@
2338 log('Created new keyfile at %s.' % keyfile, level=INFO)
2339
2340
2341-def get_ceph_nodes():
2342- """Query named relation 'ceph' to determine current nodes."""
2343+def get_ceph_nodes(relation='ceph'):
2344+ """Query named relation to determine current nodes."""
2345 hosts = []
2346- for r_id in relation_ids('ceph'):
2347+ for r_id in relation_ids(relation):
2348 for unit in related_units(r_id):
2349 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
2350
2351@@ -288,17 +823,6 @@
2352 os.chown(data_src_dst, uid, gid)
2353
2354
2355-# TODO: re-use
2356-def modprobe(module):
2357- """Load a kernel module and configure for auto-load on reboot."""
2358- log('Loading kernel module', level=INFO)
2359- cmd = ['modprobe', module]
2360- check_call(cmd)
2361- with open('/etc/modules', 'r+') as modules:
2362- if module not in modules.read():
2363- modules.write(module)
2364-
2365-
2366 def copy_files(src, dst, symlinks=False, ignore=None):
2367 """Copy files from src to dst."""
2368 for item in os.listdir(src):
2369@@ -363,14 +887,14 @@
2370 service_start(svc)
2371
2372
2373-def ensure_ceph_keyring(service, user=None, group=None):
2374+def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
2375 """Ensures a ceph keyring is created for a named service and optionally
2376 ensures user and group ownership.
2377
2378 Returns False if no ceph key is available in relation state.
2379 """
2380 key = None
2381- for rid in relation_ids('ceph'):
2382+ for rid in relation_ids(relation):
2383 for unit in related_units(rid):
2384 key = relation_get('key', rid=rid, unit=unit)
2385 if key:
2386@@ -411,17 +935,60 @@
2387
2388 The API is versioned and defaults to version 1.
2389 """
2390- def __init__(self, api_version=1):
2391+
2392+ def __init__(self, api_version=1, request_id=None):
2393 self.api_version = api_version
2394+ if request_id:
2395+ self.request_id = request_id
2396+ else:
2397+ self.request_id = str(uuid.uuid1())
2398 self.ops = []
2399
2400- def add_op_create_pool(self, name, replica_count=3):
2401+ def add_op_create_pool(self, name, replica_count=3, pg_num=None):
2402+ """Adds an operation to create a pool.
2403+
2404+ @param pg_num setting: optional setting. If not provided, this value
2405+ will be calculated by the broker based on how many OSDs are in the
2406+ cluster at the time of creation. Note that, if provided, this value
2407+ will be capped at the current available maximum.
2408+ """
2409 self.ops.append({'op': 'create-pool', 'name': name,
2410- 'replicas': replica_count})
2411+ 'replicas': replica_count, 'pg_num': pg_num})
2412+
2413+ def set_ops(self, ops):
2414+ """Set request ops to provided value.
2415+
2416+ Useful for injecting ops that come from a previous request
2417+ to allow comparisons to ensure validity.
2418+ """
2419+ self.ops = ops
2420
2421 @property
2422 def request(self):
2423- return json.dumps({'api-version': self.api_version, 'ops': self.ops})
2424+ return json.dumps({'api-version': self.api_version, 'ops': self.ops,
2425+ 'request-id': self.request_id})
2426+
2427+ def _ops_equal(self, other):
2428+ if len(self.ops) == len(other.ops):
2429+ for req_no in range(0, len(self.ops)):
2430+ for key in ['replicas', 'name', 'op', 'pg_num']:
2431+ if self.ops[req_no].get(key) != other.ops[req_no].get(key):
2432+ return False
2433+ else:
2434+ return False
2435+ return True
2436+
2437+ def __eq__(self, other):
2438+ if not isinstance(other, self.__class__):
2439+ return False
2440+ if self.api_version == other.api_version and \
2441+ self._ops_equal(other):
2442+ return True
2443+ else:
2444+ return False
2445+
2446+ def __ne__(self, other):
2447+ return not self.__eq__(other)
2448
2449
2450 class CephBrokerRsp(object):
2451@@ -431,14 +998,198 @@
2452
2453 The API is versioned and defaults to version 1.
2454 """
2455+
2456 def __init__(self, encoded_rsp):
2457 self.api_version = None
2458 self.rsp = json.loads(encoded_rsp)
2459
2460 @property
2461+ def request_id(self):
2462+ return self.rsp.get('request-id')
2463+
2464+ @property
2465 def exit_code(self):
2466 return self.rsp.get('exit-code')
2467
2468 @property
2469 def exit_msg(self):
2470 return self.rsp.get('stderr')
2471+
2472+
2473+# Ceph Broker Conversation:
2474+# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
2475+# and send that request to ceph via the ceph relation. The CephBrokerRq has a
2476+# unique id so that the client can identity which CephBrokerRsp is associated
2477+# with the request. Ceph will also respond to each client unit individually
2478+# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
2479+# via key broker-rsp-glance-0
2480+#
2481+# To use this the charm can just do something like:
2482+#
2483+# from charmhelpers.contrib.storage.linux.ceph import (
2484+# send_request_if_needed,
2485+# is_request_complete,
2486+# CephBrokerRq,
2487+# )
2488+#
2489+# @hooks.hook('ceph-relation-changed')
2490+# def ceph_changed():
2491+# rq = CephBrokerRq()
2492+# rq.add_op_create_pool(name='poolname', replica_count=3)
2493+#
2494+# if is_request_complete(rq):
2495+# <Request complete actions>
2496+# else:
2497+# send_request_if_needed(get_ceph_request())
2498+#
2499+# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
2500+# of glance having sent a request to ceph which ceph has successfully processed
2501+# 'ceph:8': {
2502+# 'ceph/0': {
2503+# 'auth': 'cephx',
2504+# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
2505+# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
2506+# 'ceph-public-address': '10.5.44.103',
2507+# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
2508+# 'private-address': '10.5.44.103',
2509+# },
2510+# 'glance/0': {
2511+# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
2512+# '"ops": [{"replicas": 3, "name": "glance", '
2513+# '"op": "create-pool"}]}'),
2514+# 'private-address': '10.5.44.109',
2515+# },
2516+# }
2517+
2518+def get_previous_request(rid):
2519+ """Return the last ceph broker request sent on a given relation
2520+
2521+ @param rid: Relation id to query for request
2522+ """
2523+ request = None
2524+ broker_req = relation_get(attribute='broker_req', rid=rid,
2525+ unit=local_unit())
2526+ if broker_req:
2527+ request_data = json.loads(broker_req)
2528+ request = CephBrokerRq(api_version=request_data['api-version'],
2529+ request_id=request_data['request-id'])
2530+ request.set_ops(request_data['ops'])
2531+
2532+ return request
2533+
2534+
2535+def get_request_states(request, relation='ceph'):
2536+ """Return a dict of requests per relation id with their corresponding
2537+ completion state.
2538+
2539+ This allows a charm, which has a request for ceph, to see whether there is
2540+ an equivalent request already being processed and if so what state that
2541+ request is in.
2542+
2543+ @param request: A CephBrokerRq object
2544+ """
2545+ complete = []
2546+ requests = {}
2547+ for rid in relation_ids(relation):
2548+ complete = False
2549+ previous_request = get_previous_request(rid)
2550+ if request == previous_request:
2551+ sent = True
2552+ complete = is_request_complete_for_rid(previous_request, rid)
2553+ else:
2554+ sent = False
2555+ complete = False
2556+
2557+ requests[rid] = {
2558+ 'sent': sent,
2559+ 'complete': complete,
2560+ }
2561+
2562+ return requests
2563+
2564+
2565+def is_request_sent(request, relation='ceph'):
2566+ """Check to see if a functionally equivalent request has already been sent
2567+
2568+ Returns True if a similair request has been sent
2569+
2570+ @param request: A CephBrokerRq object
2571+ """
2572+ states = get_request_states(request, relation=relation)
2573+ for rid in states.keys():
2574+ if not states[rid]['sent']:
2575+ return False
2576+
2577+ return True
2578+
2579+
2580+def is_request_complete(request, relation='ceph'):
2581+ """Check to see if a functionally equivalent request has already been
2582+ completed
2583+
2584+ Returns True if a similair request has been completed
2585+
2586+ @param request: A CephBrokerRq object
2587+ """
2588+ states = get_request_states(request, relation=relation)
2589+ for rid in states.keys():
2590+ if not states[rid]['complete']:
2591+ return False
2592+
2593+ return True
2594+
2595+
2596+def is_request_complete_for_rid(request, rid):
2597+ """Check if a given request has been completed on the given relation
2598+
2599+ @param request: A CephBrokerRq object
2600+ @param rid: Relation ID
2601+ """
2602+ broker_key = get_broker_rsp_key()
2603+ for unit in related_units(rid):
2604+ rdata = relation_get(rid=rid, unit=unit)
2605+ if rdata.get(broker_key):
2606+ rsp = CephBrokerRsp(rdata.get(broker_key))
2607+ if rsp.request_id == request.request_id:
2608+ if not rsp.exit_code:
2609+ return True
2610+ else:
2611+ # The remote unit sent no reply targeted at this unit so either the
2612+ # remote ceph cluster does not support unit targeted replies or it
2613+ # has not processed our request yet.
2614+ if rdata.get('broker_rsp'):
2615+ request_data = json.loads(rdata['broker_rsp'])
2616+ if request_data.get('request-id'):
2617+ log('Ignoring legacy broker_rsp without unit key as remote '
2618+ 'service supports unit specific replies', level=DEBUG)
2619+ else:
2620+ log('Using legacy broker_rsp as remote service does not '
2621+ 'supports unit specific replies', level=DEBUG)
2622+ rsp = CephBrokerRsp(rdata['broker_rsp'])
2623+ if not rsp.exit_code:
2624+ return True
2625+
2626+ return False
2627+
2628+
2629+def get_broker_rsp_key():
2630+ """Return broker response key for this unit
2631+
2632+ This is the key that ceph is going to use to pass request status
2633+ information back to this unit
2634+ """
2635+ return 'broker-rsp-' + local_unit().replace('/', '-')
2636+
2637+
2638+def send_request_if_needed(request, relation='ceph'):
2639+ """Send broker request if an equivalent request has not already been sent
2640+
2641+ @param request: A CephBrokerRq object
2642+ """
2643+ if is_request_sent(request, relation=relation):
2644+ log('Request already sent but not complete, not sending new request',
2645+ level=DEBUG)
2646+ else:
2647+ for rid in relation_ids(relation):
2648+ log('Sending request {}'.format(request.request_id), level=DEBUG)
2649+ relation_set(relation_id=rid, broker_req=request.request)
2650
2651=== modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
2652--- hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-01-26 10:44:46 +0000
2653+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2016-04-21 10:41:47 +0000
2654@@ -76,3 +76,13 @@
2655 check_call(cmd)
2656
2657 return create_loopback(path)
2658+
2659+
2660+def is_mapped_loopback_device(device):
2661+ """
2662+ Checks if a given device name is an existing/mapped loopback device.
2663+ :param device: str: Full path to the device (eg, /dev/loop1).
2664+ :returns: str: Path to the backing file if is a loopback device
2665+ empty string otherwise
2666+ """
2667+ return loopback_devices().get(device, "")
2668
2669=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
2670--- hooks/charmhelpers/contrib/storage/linux/utils.py 2015-10-22 13:26:06 +0000
2671+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2016-04-21 10:41:47 +0000
2672@@ -43,9 +43,10 @@
2673
2674 :param block_device: str: Full path of block device to clean.
2675 '''
2676+ # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
2677 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
2678- call(['sgdisk', '--zap-all', '--mbrtogpt',
2679- '--clear', block_device])
2680+ call(['sgdisk', '--zap-all', '--', block_device])
2681+ call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
2682 dev_end = check_output(['blockdev', '--getsz',
2683 block_device]).decode('UTF-8')
2684 gpt_end = int(dev_end.split()[0]) - 100
2685
2686=== modified file 'hooks/charmhelpers/core/hookenv.py'
2687--- hooks/charmhelpers/core/hookenv.py 2015-10-22 13:26:06 +0000
2688+++ hooks/charmhelpers/core/hookenv.py 2016-04-21 10:41:47 +0000
2689@@ -34,23 +34,6 @@
2690 import tempfile
2691 from subprocess import CalledProcessError
2692
2693-try:
2694- from charmhelpers.cli import cmdline
2695-except ImportError as e:
2696- # due to the anti-pattern of partially synching charmhelpers directly
2697- # into charms, it's possible that charmhelpers.cli is not available;
2698- # if that's the case, they don't really care about using the cli anyway,
2699- # so mock it out
2700- if str(e) == 'No module named cli':
2701- class cmdline(object):
2702- @classmethod
2703- def subcommand(cls, *args, **kwargs):
2704- def _wrap(func):
2705- return func
2706- return _wrap
2707- else:
2708- raise
2709-
2710 import six
2711 if not six.PY3:
2712 from UserDict import UserDict
2713@@ -91,6 +74,7 @@
2714 res = func(*args, **kwargs)
2715 cache[key] = res
2716 return res
2717+ wrapper._wrapped = func
2718 return wrapper
2719
2720
2721@@ -190,7 +174,6 @@
2722 return os.environ.get('JUJU_RELATION', None)
2723
2724
2725-@cmdline.subcommand()
2726 @cached
2727 def relation_id(relation_name=None, service_or_unit=None):
2728 """The relation ID for the current or a specified relation"""
2729@@ -216,13 +199,11 @@
2730 return os.environ.get('JUJU_REMOTE_UNIT', None)
2731
2732
2733-@cmdline.subcommand()
2734 def service_name():
2735 """The name service group this unit belongs to"""
2736 return local_unit().split('/')[0]
2737
2738
2739-@cmdline.subcommand()
2740 @cached
2741 def remote_service_name(relid=None):
2742 """The remote service name for a given relation-id (or the current relation)"""
2743@@ -510,6 +491,19 @@
2744
2745
2746 @cached
2747+def peer_relation_id():
2748+ '''Get the peers relation id if a peers relation has been joined, else None.'''
2749+ md = metadata()
2750+ section = md.get('peers')
2751+ if section:
2752+ for key in section:
2753+ relids = relation_ids(key)
2754+ if relids:
2755+ return relids[0]
2756+ return None
2757+
2758+
2759+@cached
2760 def relation_to_interface(relation_name):
2761 """
2762 Given the name of a relation, return the interface that relation uses.
2763@@ -523,12 +517,12 @@
2764 def relation_to_role_and_interface(relation_name):
2765 """
2766 Given the name of a relation, return the role and the name of the interface
2767- that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
2768+ that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
2769
2770 :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
2771 """
2772 _metadata = metadata()
2773- for role in ('provides', 'requires', 'peer'):
2774+ for role in ('provides', 'requires', 'peers'):
2775 interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
2776 if interface:
2777 return role, interface
2778@@ -540,7 +534,7 @@
2779 """
2780 Given a role and interface name, return a list of relation names for the
2781 current charm that use that interface under that role (where role is one
2782- of ``provides``, ``requires``, or ``peer``).
2783+ of ``provides``, ``requires``, or ``peers``).
2784
2785 :returns: A list of relation names.
2786 """
2787@@ -561,7 +555,7 @@
2788 :returns: A list of relation names.
2789 """
2790 results = []
2791- for role in ('provides', 'requires', 'peer'):
2792+ for role in ('provides', 'requires', 'peers'):
2793 results.extend(role_and_interface_to_relations(role, interface_name))
2794 return results
2795
2796@@ -642,6 +636,38 @@
2797 return unit_get('private-address')
2798
2799
2800+@cached
2801+def storage_get(attribute=None, storage_id=None):
2802+ """Get storage attributes"""
2803+ _args = ['storage-get', '--format=json']
2804+ if storage_id:
2805+ _args.extend(('-s', storage_id))
2806+ if attribute:
2807+ _args.append(attribute)
2808+ try:
2809+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
2810+ except ValueError:
2811+ return None
2812+
2813+
2814+@cached
2815+def storage_list(storage_name=None):
2816+ """List the storage IDs for the unit"""
2817+ _args = ['storage-list', '--format=json']
2818+ if storage_name:
2819+ _args.append(storage_name)
2820+ try:
2821+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
2822+ except ValueError:
2823+ return None
2824+ except OSError as e:
2825+ import errno
2826+ if e.errno == errno.ENOENT:
2827+ # storage-list does not exist
2828+ return []
2829+ raise
2830+
2831+
2832 class UnregisteredHookError(Exception):
2833 """Raised when an undefined hook is called"""
2834 pass
2835@@ -786,25 +812,28 @@
2836
2837
2838 def status_get():
2839- """Retrieve the previously set juju workload state
2840-
2841- If the status-set command is not found then assume this is juju < 1.23 and
2842- return 'unknown'
2843+ """Retrieve the previously set juju workload state and message
2844+
2845+ If the status-get command is not found then assume this is juju < 1.23 and
2846+ return 'unknown', ""
2847+
2848 """
2849- cmd = ['status-get']
2850+ cmd = ['status-get', "--format=json", "--include-data"]
2851 try:
2852- raw_status = subprocess.check_output(cmd, universal_newlines=True)
2853- status = raw_status.rstrip()
2854- return status
2855+ raw_status = subprocess.check_output(cmd)
2856 except OSError as e:
2857 if e.errno == errno.ENOENT:
2858- return 'unknown'
2859+ return ('unknown', "")
2860 else:
2861 raise
2862+ else:
2863+ status = json.loads(raw_status.decode("UTF-8"))
2864+ return (status["status"], status["message"])
2865
2866
2867 def translate_exc(from_exc, to_exc):
2868 def inner_translate_exc1(f):
2869+ @wraps(f)
2870 def inner_translate_exc2(*args, **kwargs):
2871 try:
2872 return f(*args, **kwargs)
2873@@ -849,6 +878,58 @@
2874 subprocess.check_call(cmd)
2875
2876
2877+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
2878+def payload_register(ptype, klass, pid):
2879+ """ is used while a hook is running to let Juju know that a
2880+ payload has been started."""
2881+ cmd = ['payload-register']
2882+ for x in [ptype, klass, pid]:
2883+ cmd.append(x)
2884+ subprocess.check_call(cmd)
2885+
2886+
2887+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
2888+def payload_unregister(klass, pid):
2889+ """ is used while a hook is running to let Juju know
2890+ that a payload has been manually stopped. The <class> and <id> provided
2891+ must match a payload that has been previously registered with juju using
2892+ payload-register."""
2893+ cmd = ['payload-unregister']
2894+ for x in [klass, pid]:
2895+ cmd.append(x)
2896+ subprocess.check_call(cmd)
2897+
2898+
2899+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
2900+def payload_status_set(klass, pid, status):
2901+ """is used to update the current status of a registered payload.
2902+ The <class> and <id> provided must match a payload that has been previously
2903+ registered with juju using payload-register. The <status> must be one of the
2904+ follow: starting, started, stopping, stopped"""
2905+ cmd = ['payload-status-set']
2906+ for x in [klass, pid, status]:
2907+ cmd.append(x)
2908+ subprocess.check_call(cmd)
2909+
2910+
2911+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
2912+def resource_get(name):
2913+ """used to fetch the resource path of the given name.
2914+
2915+ <name> must match a name of defined resource in metadata.yaml
2916+
2917+ returns either a path or False if resource not available
2918+ """
2919+ if not name:
2920+ return False
2921+
2922+ cmd = ['resource-get', name]
2923+ try:
2924+ return subprocess.check_output(cmd).decode('UTF-8')
2925+ except subprocess.CalledProcessError:
2926+ return False
2927+
2928+
2929 @cached
2930 def juju_version():
2931 """Full version string (eg. '1.23.3.1-trusty-amd64')"""
2932@@ -913,3 +994,16 @@
2933 for callback, args, kwargs in reversed(_atexit):
2934 callback(*args, **kwargs)
2935 del _atexit[:]
2936+
2937+
2938+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
2939+def network_get_primary_address(binding):
2940+ '''
2941+ Retrieve the primary network address for a named binding
2942+
2943+ :param binding: string. The name of a relation of extra-binding
2944+ :return: string. The primary IP address for the named binding
2945+ :raise: NotImplementedError if run on Juju < 2.0
2946+ '''
2947+ cmd = ['network-get', '--primary-address', binding]
2948+ return subprocess.check_output(cmd).strip()
2949
2950=== modified file 'hooks/charmhelpers/core/host.py'
2951--- hooks/charmhelpers/core/host.py 2015-10-22 13:26:06 +0000
2952+++ hooks/charmhelpers/core/host.py 2016-04-21 10:41:47 +0000
2953@@ -30,6 +30,8 @@
2954 import string
2955 import subprocess
2956 import hashlib
2957+import functools
2958+import itertools
2959 from contextlib import contextmanager
2960 from collections import OrderedDict
2961
2962@@ -63,54 +65,96 @@
2963 return service_result
2964
2965
2966-def service_pause(service_name, init_dir=None):
2967+def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
2968 """Pause a system service.
2969
2970 Stop it, and prevent it from starting again at boot."""
2971- if init_dir is None:
2972- init_dir = "/etc/init"
2973- stopped = service_stop(service_name)
2974- # XXX: Support systemd too
2975- override_path = os.path.join(
2976- init_dir, '{}.conf.override'.format(service_name))
2977- with open(override_path, 'w') as fh:
2978- fh.write("manual\n")
2979+ stopped = True
2980+ if service_running(service_name):
2981+ stopped = service_stop(service_name)
2982+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
2983+ sysv_file = os.path.join(initd_dir, service_name)
2984+ if init_is_systemd():
2985+ service('disable', service_name)
2986+ elif os.path.exists(upstart_file):
2987+ override_path = os.path.join(
2988+ init_dir, '{}.override'.format(service_name))
2989+ with open(override_path, 'w') as fh:
2990+ fh.write("manual\n")
2991+ elif os.path.exists(sysv_file):
2992+ subprocess.check_call(["update-rc.d", service_name, "disable"])
2993+ else:
2994+ raise ValueError(
2995+ "Unable to detect {0} as SystemD, Upstart {1} or"
2996+ " SysV {2}".format(
2997+ service_name, upstart_file, sysv_file))
2998 return stopped
2999
3000
3001-def service_resume(service_name, init_dir=None):
3002+def service_resume(service_name, init_dir="/etc/init",
3003+ initd_dir="/etc/init.d"):
3004 """Resume a system service.
3005
3006 Reenable starting again at boot. Start the service"""
3007- # XXX: Support systemd too
3008- if init_dir is None:
3009- init_dir = "/etc/init"
3010- override_path = os.path.join(
3011- init_dir, '{}.conf.override'.format(service_name))
3012- if os.path.exists(override_path):
3013- os.unlink(override_path)
3014- started = service_start(service_name)
3015+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
3016+ sysv_file = os.path.join(initd_dir, service_name)
3017+ if init_is_systemd():
3018+ service('enable', service_name)
3019+ elif os.path.exists(upstart_file):
3020+ override_path = os.path.join(
3021+ init_dir, '{}.override'.format(service_name))
3022+ if os.path.exists(override_path):
3023+ os.unlink(override_path)
3024+ elif os.path.exists(sysv_file):
3025+ subprocess.check_call(["update-rc.d", service_name, "enable"])
3026+ else:
3027+ raise ValueError(
3028+ "Unable to detect {0} as SystemD, Upstart {1} or"
3029+ " SysV {2}".format(
3030+ service_name, upstart_file, sysv_file))
3031+
3032+ started = service_running(service_name)
3033+ if not started:
3034+ started = service_start(service_name)
3035 return started
3036
3037
3038 def service(action, service_name):
3039 """Control a system service"""
3040- cmd = ['service', service_name, action]
3041+ if init_is_systemd():
3042+ cmd = ['systemctl', action, service_name]
3043+ else:
3044+ cmd = ['service', service_name, action]
3045 return subprocess.call(cmd) == 0
3046
3047
3048-def service_running(service):
3049+def systemv_services_running():
3050+ output = subprocess.check_output(
3051+ ['service', '--status-all'],
3052+ stderr=subprocess.STDOUT).decode('UTF-8')
3053+ return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row]
3054+
3055+
3056+def service_running(service_name):
3057 """Determine whether a system service is running"""
3058- try:
3059- output = subprocess.check_output(
3060- ['service', service, 'status'],
3061- stderr=subprocess.STDOUT).decode('UTF-8')
3062- except subprocess.CalledProcessError:
3063- return False
3064+ if init_is_systemd():
3065+ return service('is-active', service_name)
3066 else:
3067- if ("start/running" in output or "is running" in output):
3068- return True
3069+ try:
3070+ output = subprocess.check_output(
3071+ ['service', service_name, 'status'],
3072+ stderr=subprocess.STDOUT).decode('UTF-8')
3073+ except subprocess.CalledProcessError:
3074+ return False
3075 else:
3076+ # This works for upstart scripts where the 'service' command
3077+ # returns a consistent string to represent running 'start/running'
3078+ if ("start/running" in output or "is running" in output or
3079+ "up and running" in output):
3080+ return True
3081+ # Check System V scripts init script return codes
3082+ if service_name in systemv_services_running():
3083+ return True
3084 return False
3085
3086
3087@@ -126,8 +170,29 @@
3088 return True
3089
3090
3091-def adduser(username, password=None, shell='/bin/bash', system_user=False):
3092- """Add a user to the system"""
3093+SYSTEMD_SYSTEM = '/run/systemd/system'
3094+
3095+
3096+def init_is_systemd():
3097+ """Return True if the host system uses systemd, False otherwise."""
3098+ return os.path.isdir(SYSTEMD_SYSTEM)
3099+
3100+
3101+def adduser(username, password=None, shell='/bin/bash', system_user=False,
3102+ primary_group=None, secondary_groups=None):
3103+ """Add a user to the system.
3104+
3105+ Will log but otherwise succeed if the user already exists.
3106+
3107+ :param str username: Username to create
3108+ :param str password: Password for user; if ``None``, create a system user
3109+ :param str shell: The default shell for the user
3110+ :param bool system_user: Whether to create a login or system user
3111+ :param str primary_group: Primary group for user; defaults to username
3112+ :param list secondary_groups: Optional list of additional groups
3113+
3114+ :returns: The password database entry struct, as returned by `pwd.getpwnam`
3115+ """
3116 try:
3117 user_info = pwd.getpwnam(username)
3118 log('user {0} already exists!'.format(username))
3119@@ -142,12 +207,32 @@
3120 '--shell', shell,
3121 '--password', password,
3122 ])
3123+ if not primary_group:
3124+ try:
3125+ grp.getgrnam(username)
3126+ primary_group = username # avoid "group exists" error
3127+ except KeyError:
3128+ pass
3129+ if primary_group:
3130+ cmd.extend(['-g', primary_group])
3131+ if secondary_groups:
3132+ cmd.extend(['-G', ','.join(secondary_groups)])
3133 cmd.append(username)
3134 subprocess.check_call(cmd)
3135 user_info = pwd.getpwnam(username)
3136 return user_info
3137
3138
3139+def user_exists(username):
3140+ """Check if a user exists"""
3141+ try:
3142+ pwd.getpwnam(username)
3143+ user_exists = True
3144+ except KeyError:
3145+ user_exists = False
3146+ return user_exists
3147+
3148+
3149 def add_group(group_name, system_group=False):
3150 """Add a group to the system"""
3151 try:
3152@@ -229,14 +314,12 @@
3153
3154
3155 def fstab_remove(mp):
3156- """Remove the given mountpoint entry from /etc/fstab
3157- """
3158+ """Remove the given mountpoint entry from /etc/fstab"""
3159 return Fstab.remove_by_mountpoint(mp)
3160
3161
3162 def fstab_add(dev, mp, fs, options=None):
3163- """Adds the given device entry to the /etc/fstab file
3164- """
3165+ """Adds the given device entry to the /etc/fstab file"""
3166 return Fstab.add(dev, mp, fs, options=options)
3167
3168
3169@@ -280,9 +363,19 @@
3170 return system_mounts
3171
3172
3173+def fstab_mount(mountpoint):
3174+ """Mount filesystem using fstab"""
3175+ cmd_args = ['mount', mountpoint]
3176+ try:
3177+ subprocess.check_output(cmd_args)
3178+ except subprocess.CalledProcessError as e:
3179+ log('Error unmounting {}\n{}'.format(mountpoint, e.output))
3180+ return False
3181+ return True
3182+
3183+
3184 def file_hash(path, hash_type='md5'):
3185- """
3186- Generate a hash checksum of the contents of 'path' or None if not found.
3187+ """Generate a hash checksum of the contents of 'path' or None if not found.
3188
3189 :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
3190 such as md5, sha1, sha256, sha512, etc.
3191@@ -297,10 +390,9 @@
3192
3193
3194 def path_hash(path):
3195- """
3196- Generate a hash checksum of all files matching 'path'. Standard wildcards
3197- like '*' and '?' are supported, see documentation for the 'glob' module for
3198- more information.
3199+ """Generate a hash checksum of all files matching 'path'. Standard
3200+ wildcards like '*' and '?' are supported, see documentation for the 'glob'
3201+ module for more information.
3202
3203 :return: dict: A { filename: hash } dictionary for all matched files.
3204 Empty if none found.
3205@@ -312,8 +404,7 @@
3206
3207
3208 def check_hash(path, checksum, hash_type='md5'):
3209- """
3210- Validate a file using a cryptographic checksum.
3211+ """Validate a file using a cryptographic checksum.
3212
3213 :param str checksum: Value of the checksum used to validate the file.
3214 :param str hash_type: Hash algorithm used to generate `checksum`.
3215@@ -328,10 +419,11 @@
3216
3217
3218 class ChecksumError(ValueError):
3219+ """A class derived from Value error to indicate the checksum failed."""
3220 pass
3221
3222
3223-def restart_on_change(restart_map, stopstart=False):
3224+def restart_on_change(restart_map, stopstart=False, restart_functions=None):
3225 """Restart services based on configuration files changing
3226
3227 This function is used a decorator, for example::
3228@@ -349,27 +441,58 @@
3229 restarted if any file matching the pattern got changed, created
3230 or removed. Standard wildcards are supported, see documentation
3231 for the 'glob' module for more information.
3232+
3233+ @param restart_map: {path_file_name: [service_name, ...]
3234+ @param stopstart: DEFAULT false; whether to stop, start OR restart
3235+ @param restart_functions: nonstandard functions to use to restart services
3236+ {svc: func, ...}
3237+ @returns result from decorated function
3238 """
3239 def wrap(f):
3240+ @functools.wraps(f)
3241 def wrapped_f(*args, **kwargs):
3242- checksums = {path: path_hash(path) for path in restart_map}
3243- f(*args, **kwargs)
3244- restarts = []
3245- for path in restart_map:
3246- if path_hash(path) != checksums[path]:
3247- restarts += restart_map[path]
3248- services_list = list(OrderedDict.fromkeys(restarts))
3249- if not stopstart:
3250- for service_name in services_list:
3251- service('restart', service_name)
3252- else:
3253- for action in ['stop', 'start']:
3254- for service_name in services_list:
3255- service(action, service_name)
3256+ return restart_on_change_helper(
3257+ (lambda: f(*args, **kwargs)), restart_map, stopstart,
3258+ restart_functions)
3259 return wrapped_f
3260 return wrap
3261
3262
3263+def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
3264+ restart_functions=None):
3265+ """Helper function to perform the restart_on_change function.
3266+
3267+ This is provided for decorators to restart services if files described
3268+ in the restart_map have changed after an invocation of lambda_f().
3269+
3270+ @param lambda_f: function to call.
3271+ @param restart_map: {file: [service, ...]}
3272+ @param stopstart: whether to stop, start or restart a service
3273+ @param restart_functions: nonstandard functions to use to restart services
3274+ {svc: func, ...}
3275+ @returns result of lambda_f()
3276+ """
3277+ if restart_functions is None:
3278+ restart_functions = {}
3279+ checksums = {path: path_hash(path) for path in restart_map}
3280+ r = lambda_f()
3281+ # create a list of lists of the services to restart
3282+ restarts = [restart_map[path]
3283+ for path in restart_map
3284+ if path_hash(path) != checksums[path]]
3285+ # create a flat list of ordered services without duplicates from lists
3286+ services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
3287+ if services_list:
3288+ actions = ('stop', 'start') if stopstart else ('restart',)
3289+ for service_name in services_list:
3290+ if service_name in restart_functions:
3291+ restart_functions[service_name](service_name)
3292+ else:
3293+ for action in actions:
3294+ service(action, service_name)
3295+ return r
3296+
3297+
3298 def lsb_release():
3299 """Return /etc/lsb-release in a dict"""
3300 d = {}
3301@@ -396,36 +519,92 @@
3302 return(''.join(random_chars))
3303
3304
3305-def list_nics(nic_type):
3306- '''Return a list of nics of given type(s)'''
3307+def is_phy_iface(interface):
3308+ """Returns True if interface is not virtual, otherwise False."""
3309+ if interface:
3310+ sys_net = '/sys/class/net'
3311+ if os.path.isdir(sys_net):
3312+ for iface in glob.glob(os.path.join(sys_net, '*')):
3313+ if '/virtual/' in os.path.realpath(iface):
3314+ continue
3315+
3316+ if interface == os.path.basename(iface):
3317+ return True
3318+
3319+ return False
3320+
3321+
3322+def get_bond_master(interface):
3323+ """Returns bond master if interface is bond slave otherwise None.
3324+
3325+ NOTE: the provided interface is expected to be physical
3326+ """
3327+ if interface:
3328+ iface_path = '/sys/class/net/%s' % (interface)
3329+ if os.path.exists(iface_path):
3330+ if '/virtual/' in os.path.realpath(iface_path):
3331+ return None
3332+
3333+ master = os.path.join(iface_path, 'master')
3334+ if os.path.exists(master):
3335+ master = os.path.realpath(master)
3336+ # make sure it is a bond master
3337+ if os.path.exists(os.path.join(master, 'bonding')):
3338+ return os.path.basename(master)
3339+
3340+ return None
3341+
3342+
3343+def list_nics(nic_type=None):
3344+ """Return a list of nics of given type(s)"""
3345 if isinstance(nic_type, six.string_types):
3346 int_types = [nic_type]
3347 else:
3348 int_types = nic_type
3349+
3350 interfaces = []
3351- for int_type in int_types:
3352- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
3353+ if nic_type:
3354+ for int_type in int_types:
3355+ cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
3356+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
3357+ ip_output = ip_output.split('\n')
3358+ ip_output = (line for line in ip_output if line)
3359+ for line in ip_output:
3360+ if line.split()[1].startswith(int_type):
3361+ matched = re.search('.*: (' + int_type +
3362+ r'[0-9]+\.[0-9]+)@.*', line)
3363+ if matched:
3364+ iface = matched.groups()[0]
3365+ else:
3366+ iface = line.split()[1].replace(":", "")
3367+
3368+ if iface not in interfaces:
3369+ interfaces.append(iface)
3370+ else:
3371+ cmd = ['ip', 'a']
3372 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
3373- ip_output = (line for line in ip_output if line)
3374+ ip_output = (line.strip() for line in ip_output if line)
3375+
3376+ key = re.compile('^[0-9]+:\s+(.+):')
3377 for line in ip_output:
3378- if line.split()[1].startswith(int_type):
3379- matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
3380- if matched:
3381- interface = matched.groups()[0]
3382- else:
3383- interface = line.split()[1].replace(":", "")
3384- interfaces.append(interface)
3385+ matched = re.search(key, line)
3386+ if matched:
3387+ iface = matched.group(1)
3388+ iface = iface.partition("@")[0]
3389+ if iface not in interfaces:
3390+ interfaces.append(iface)
3391
3392 return interfaces
3393
3394
3395 def set_nic_mtu(nic, mtu):
3396- '''Set MTU on a network interface'''
3397+ """Set the Maximum Transmission Unit (MTU) on a network interface."""
3398 cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
3399 subprocess.check_call(cmd)
3400
3401
3402 def get_nic_mtu(nic):
3403+ """Return the Maximum Transmission Unit (MTU) for a network interface."""
3404 cmd = ['ip', 'addr', 'show', nic]
3405 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
3406 mtu = ""
3407@@ -437,6 +616,7 @@
3408
3409
3410 def get_nic_hwaddr(nic):
3411+ """Return the Media Access Control (MAC) for a network interface."""
3412 cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
3413 ip_output = subprocess.check_output(cmd).decode('UTF-8')
3414 hwaddr = ""
3415@@ -447,7 +627,7 @@
3416
3417
3418 def cmp_pkgrevno(package, revno, pkgcache=None):
3419- '''Compare supplied revno with the revno of the installed package
3420+ """Compare supplied revno with the revno of the installed package
3421
3422 * 1 => Installed revno is greater than supplied arg
3423 * 0 => Installed revno is the same as supplied arg
3424@@ -456,7 +636,7 @@
3425 This function imports apt_cache function from charmhelpers.fetch if
3426 the pkgcache argument is None. Be sure to add charmhelpers.fetch if
3427 you call this function, or pass an apt_pkg.Cache() instance.
3428- '''
3429+ """
3430 import apt_pkg
3431 if not pkgcache:
3432 from charmhelpers.fetch import apt_cache
3433@@ -466,15 +646,30 @@
3434
3435
3436 @contextmanager
3437-def chdir(d):
3438+def chdir(directory):
3439+ """Change the current working directory to a different directory for a code
3440+ block and return the previous directory after the block exits. Useful to
3441+ run commands from a specificed directory.
3442+
3443+ :param str directory: The directory path to change to for this context.
3444+ """
3445 cur = os.getcwd()
3446 try:
3447- yield os.chdir(d)
3448+ yield os.chdir(directory)
3449 finally:
3450 os.chdir(cur)
3451
3452
3453-def chownr(path, owner, group, follow_links=True):
3454+def chownr(path, owner, group, follow_links=True, chowntopdir=False):
3455+ """Recursively change user and group ownership of files and directories
3456+ in given path. Doesn't chown path itself by default, only its children.
3457+
3458+ :param str path: The string path to start changing ownership.
3459+ :param str owner: The owner string to use when looking up the uid.
3460+ :param str group: The group string to use when looking up the gid.
3461+ :param bool follow_links: Also Chown links if True
3462+ :param bool chowntopdir: Also chown path itself if True
3463+ """
3464 uid = pwd.getpwnam(owner).pw_uid
3465 gid = grp.getgrnam(group).gr_gid
3466 if follow_links:
3467@@ -482,6 +677,10 @@
3468 else:
3469 chown = os.lchown
3470
3471+ if chowntopdir:
3472+ broken_symlink = os.path.lexists(path) and not os.path.exists(path)
3473+ if not broken_symlink:
3474+ chown(path, uid, gid)
3475 for root, dirs, files in os.walk(path):
3476 for name in dirs + files:
3477 full = os.path.join(root, name)
3478@@ -491,4 +690,28 @@
3479
3480
3481 def lchownr(path, owner, group):
3482+ """Recursively change user and group ownership of files and directories
3483+ in a given path, not following symbolic links. See the documentation for
3484+ 'os.lchown' for more information.
3485+
3486+ :param str path: The string path to start changing ownership.
3487+ :param str owner: The owner string to use when looking up the uid.
3488+ :param str group: The group string to use when looking up the gid.
3489+ """
3490 chownr(path, owner, group, follow_links=False)
3491+
3492+
3493+def get_total_ram():
3494+ """The total amount of system RAM in bytes.
3495+
3496+ This is what is reported by the OS, and may be overcommitted when
3497+ there are multiple containers hosted on the same machine.
3498+ """
3499+ with open('/proc/meminfo', 'r') as f:
3500+ for line in f.readlines():
3501+ if line:
3502+ key, value, unit = line.split()
3503+ if key == 'MemTotal:':
3504+ assert unit == 'kB', 'Unknown unit'
3505+ return int(value) * 1024 # Classic, not KiB.
3506+ raise NotImplementedError()
3507
3508=== added file 'hooks/charmhelpers/core/hugepage.py'
3509--- hooks/charmhelpers/core/hugepage.py 1970-01-01 00:00:00 +0000
3510+++ hooks/charmhelpers/core/hugepage.py 2016-04-21 10:41:47 +0000
3511@@ -0,0 +1,71 @@
3512+# -*- coding: utf-8 -*-
3513+
3514+# Copyright 2014-2015 Canonical Limited.
3515+#
3516+# This file is part of charm-helpers.
3517+#
3518+# charm-helpers is free software: you can redistribute it and/or modify
3519+# it under the terms of the GNU Lesser General Public License version 3 as
3520+# published by the Free Software Foundation.
3521+#
3522+# charm-helpers is distributed in the hope that it will be useful,
3523+# but WITHOUT ANY WARRANTY; without even the implied warranty of
3524+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3525+# GNU Lesser General Public License for more details.
3526+#
3527+# You should have received a copy of the GNU Lesser General Public License
3528+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3529+
3530+import yaml
3531+from charmhelpers.core import fstab
3532+from charmhelpers.core import sysctl
3533+from charmhelpers.core.host import (
3534+ add_group,
3535+ add_user_to_group,
3536+ fstab_mount,
3537+ mkdir,
3538+)
3539+from charmhelpers.core.strutils import bytes_from_string
3540+from subprocess import check_output
3541+
3542+
3543+def hugepage_support(user, group='hugetlb', nr_hugepages=256,
3544+ max_map_count=65536, mnt_point='/run/hugepages/kvm',
3545+ pagesize='2MB', mount=True, set_shmmax=False):
3546+ """Enable hugepages on system.
3547+
3548+ Args:
3549+ user (str) -- Username to allow access to hugepages to
3550+ group (str) -- Group name to own hugepages
3551+ nr_hugepages (int) -- Number of pages to reserve
3552+ max_map_count (int) -- Number of Virtual Memory Areas a process can own
3553+ mnt_point (str) -- Directory to mount hugepages on
3554+ pagesize (str) -- Size of hugepages
3555+ mount (bool) -- Whether to Mount hugepages
3556+ """
3557+ group_info = add_group(group)
3558+ gid = group_info.gr_gid
3559+ add_user_to_group(user, group)
3560+ if max_map_count < 2 * nr_hugepages:
3561+ max_map_count = 2 * nr_hugepages
3562+ sysctl_settings = {
3563+ 'vm.nr_hugepages': nr_hugepages,
3564+ 'vm.max_map_count': max_map_count,
3565+ 'vm.hugetlb_shm_group': gid,
3566+ }
3567+ if set_shmmax:
3568+ shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
3569+ shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
3570+ if shmmax_minsize > shmmax_current:
3571+ sysctl_settings['kernel.shmmax'] = shmmax_minsize
3572+ sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
3573+ mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
3574+ lfstab = fstab.Fstab()
3575+ fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
3576+ if fstab_entry:
3577+ lfstab.remove_entry(fstab_entry)
3578+ entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
3579+ 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
3580+ lfstab.add_entry(entry)
3581+ if mount:
3582+ fstab_mount(mnt_point)
3583
3584=== added file 'hooks/charmhelpers/core/kernel.py'
3585--- hooks/charmhelpers/core/kernel.py 1970-01-01 00:00:00 +0000
3586+++ hooks/charmhelpers/core/kernel.py 2016-04-21 10:41:47 +0000
3587@@ -0,0 +1,68 @@
3588+#!/usr/bin/env python
3589+# -*- coding: utf-8 -*-
3590+
3591+# Copyright 2014-2015 Canonical Limited.
3592+#
3593+# This file is part of charm-helpers.
3594+#
3595+# charm-helpers is free software: you can redistribute it and/or modify
3596+# it under the terms of the GNU Lesser General Public License version 3 as
3597+# published by the Free Software Foundation.
3598+#
3599+# charm-helpers is distributed in the hope that it will be useful,
3600+# but WITHOUT ANY WARRANTY; without even the implied warranty of
3601+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3602+# GNU Lesser General Public License for more details.
3603+#
3604+# You should have received a copy of the GNU Lesser General Public License
3605+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3606+
3607+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
3608+
3609+from charmhelpers.core.hookenv import (
3610+ log,
3611+ INFO
3612+)
3613+
3614+from subprocess import check_call, check_output
3615+import re
3616+
3617+
3618+def modprobe(module, persist=True):
3619+ """Load a kernel module and configure for auto-load on reboot."""
3620+ cmd = ['modprobe', module]
3621+
3622+ log('Loading kernel module %s' % module, level=INFO)
3623+
3624+ check_call(cmd)
3625+ if persist:
3626+ with open('/etc/modules', 'r+') as modules:
3627+ if module not in modules.read():
3628+ modules.write(module)
3629+
3630+
3631+def rmmod(module, force=False):
3632+ """Remove a module from the linux kernel"""
3633+ cmd = ['rmmod']
3634+ if force:
3635+ cmd.append('-f')
3636+ cmd.append(module)
3637+ log('Removing kernel module %s' % module, level=INFO)
3638+ return check_call(cmd)
3639+
3640+
3641+def lsmod():
3642+ """Shows what kernel modules are currently loaded"""
3643+ return check_output(['lsmod'],
3644+ universal_newlines=True)
3645+
3646+
3647+def is_module_loaded(module):
3648+ """Checks if a kernel module is already loaded"""
3649+ matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
3650+ return len(matches) > 0
3651+
3652+
3653+def update_initramfs(version='all'):
3654+ """Updates an initramfs image"""
3655+ return check_call(["update-initramfs", "-k", version, "-u"])
3656
3657=== modified file 'hooks/charmhelpers/core/services/helpers.py'
3658--- hooks/charmhelpers/core/services/helpers.py 2015-08-10 16:40:00 +0000
3659+++ hooks/charmhelpers/core/services/helpers.py 2016-04-21 10:41:47 +0000
3660@@ -16,7 +16,9 @@
3661
3662 import os
3663 import yaml
3664+
3665 from charmhelpers.core import hookenv
3666+from charmhelpers.core import host
3667 from charmhelpers.core import templating
3668
3669 from charmhelpers.core.services.base import ManagerCallback
3670@@ -240,27 +242,50 @@
3671
3672 :param str source: The template source file, relative to
3673 `$CHARM_DIR/templates`
3674- :param str target: The target to write the rendered template to
3675+
3676+ :param str target: The target to write the rendered template to (or None)
3677 :param str owner: The owner of the rendered file
3678 :param str group: The group of the rendered file
3679 :param int perms: The permissions of the rendered file
3680+ :param partial on_change_action: functools partial to be executed when
3681+ rendered file changes
3682+ :param jinja2 loader template_loader: A jinja2 template loader
3683
3684+ :return str: The rendered template
3685 """
3686 def __init__(self, source, target,
3687- owner='root', group='root', perms=0o444):
3688+ owner='root', group='root', perms=0o444,
3689+ on_change_action=None, template_loader=None):
3690 self.source = source
3691 self.target = target
3692 self.owner = owner
3693 self.group = group
3694 self.perms = perms
3695+ self.on_change_action = on_change_action
3696+ self.template_loader = template_loader
3697
3698 def __call__(self, manager, service_name, event_name):
3699+ pre_checksum = ''
3700+ if self.on_change_action and os.path.isfile(self.target):
3701+ pre_checksum = host.file_hash(self.target)
3702 service = manager.get_service(service_name)
3703- context = {}
3704+ context = {'ctx': {}}
3705 for ctx in service.get('required_data', []):
3706 context.update(ctx)
3707- templating.render(self.source, self.target, context,
3708- self.owner, self.group, self.perms)
3709+ context['ctx'].update(ctx)
3710+
3711+ result = templating.render(self.source, self.target, context,
3712+ self.owner, self.group, self.perms,
3713+ template_loader=self.template_loader)
3714+ if self.on_change_action:
3715+ if pre_checksum == host.file_hash(self.target):
3716+ hookenv.log(
3717+ 'No change detected: {}'.format(self.target),
3718+ hookenv.DEBUG)
3719+ else:
3720+ self.on_change_action()
3721+
3722+ return result
3723
3724
3725 # Convenience aliases for templates
3726
3727=== modified file 'hooks/charmhelpers/core/strutils.py'
3728--- hooks/charmhelpers/core/strutils.py 2015-04-20 00:39:03 +0000
3729+++ hooks/charmhelpers/core/strutils.py 2016-04-21 10:41:47 +0000
3730@@ -18,6 +18,7 @@
3731 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3732
3733 import six
3734+import re
3735
3736
3737 def bool_from_string(value):
3738@@ -40,3 +41,32 @@
3739
3740 msg = "Unable to interpret string value '%s' as boolean" % (value)
3741 raise ValueError(msg)
3742+
3743+
3744+def bytes_from_string(value):
3745+ """Interpret human readable string value as bytes.
3746+
3747+ Returns int
3748+ """
3749+ BYTE_POWER = {
3750+ 'K': 1,
3751+ 'KB': 1,
3752+ 'M': 2,
3753+ 'MB': 2,
3754+ 'G': 3,
3755+ 'GB': 3,
3756+ 'T': 4,
3757+ 'TB': 4,
3758+ 'P': 5,
3759+ 'PB': 5,
3760+ }
3761+ if isinstance(value, six.string_types):
3762+ value = six.text_type(value)
3763+ else:
3764+ msg = "Unable to interpret non-string value '%s' as boolean" % (value)
3765+ raise ValueError(msg)
3766+ matches = re.match("([0-9]+)([a-zA-Z]+)", value)
3767+ if not matches:
3768+ msg = "Unable to interpret string value '%s' as bytes" % (value)
3769+ raise ValueError(msg)
3770+ return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
3771
3772=== modified file 'hooks/charmhelpers/core/templating.py'
3773--- hooks/charmhelpers/core/templating.py 2015-04-20 08:54:49 +0000
3774+++ hooks/charmhelpers/core/templating.py 2016-04-21 10:41:47 +0000
3775@@ -21,13 +21,14 @@
3776
3777
3778 def render(source, target, context, owner='root', group='root',
3779- perms=0o444, templates_dir=None, encoding='UTF-8'):
3780+ perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
3781 """
3782 Render a template.
3783
3784 The `source` path, if not absolute, is relative to the `templates_dir`.
3785
3786- The `target` path should be absolute.
3787+ The `target` path should be absolute. It can also be `None`, in which
3788+ case no file will be written.
3789
3790 The context should be a dict containing the values to be replaced in the
3791 template.
3792@@ -36,6 +37,9 @@
3793
3794 If omitted, `templates_dir` defaults to the `templates` folder in the charm.
3795
3796+ The rendered template will be written to the file as well as being returned
3797+ as a string.
3798+
3799 Note: Using this requires python-jinja2; if it is not installed, calling
3800 this will attempt to use charmhelpers.fetch.apt_install to install it.
3801 """
3802@@ -52,17 +56,26 @@
3803 apt_install('python-jinja2', fatal=True)
3804 from jinja2 import FileSystemLoader, Environment, exceptions
3805
3806- if templates_dir is None:
3807- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
3808- loader = Environment(loader=FileSystemLoader(templates_dir))
3809+ if template_loader:
3810+ template_env = Environment(loader=template_loader)
3811+ else:
3812+ if templates_dir is None:
3813+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
3814+ template_env = Environment(loader=FileSystemLoader(templates_dir))
3815 try:
3816 source = source
3817- template = loader.get_template(source)
3818+ template = template_env.get_template(source)
3819 except exceptions.TemplateNotFound as e:
3820 hookenv.log('Could not load template %s from %s.' %
3821 (source, templates_dir),
3822 level=hookenv.ERROR)
3823 raise e
3824 content = template.render(context)
3825- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
3826- host.write_file(target, content.encode(encoding), owner, group, perms)
3827+ if target is not None:
3828+ target_dir = os.path.dirname(target)
3829+ if not os.path.exists(target_dir):
3830+ # This is a terrible default directory permission, as the file
3831+ # or its siblings will often contain secrets.
3832+ host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
3833+ host.write_file(target, content.encode(encoding), owner, group, perms)
3834+ return content
3835
3836=== modified file 'hooks/charmhelpers/fetch/__init__.py'
3837--- hooks/charmhelpers/fetch/__init__.py 2015-08-10 16:40:00 +0000
3838+++ hooks/charmhelpers/fetch/__init__.py 2016-04-21 10:41:47 +0000
3839@@ -90,6 +90,22 @@
3840 'kilo/proposed': 'trusty-proposed/kilo',
3841 'trusty-kilo/proposed': 'trusty-proposed/kilo',
3842 'trusty-proposed/kilo': 'trusty-proposed/kilo',
3843+ # Liberty
3844+ 'liberty': 'trusty-updates/liberty',
3845+ 'trusty-liberty': 'trusty-updates/liberty',
3846+ 'trusty-liberty/updates': 'trusty-updates/liberty',
3847+ 'trusty-updates/liberty': 'trusty-updates/liberty',
3848+ 'liberty/proposed': 'trusty-proposed/liberty',
3849+ 'trusty-liberty/proposed': 'trusty-proposed/liberty',
3850+ 'trusty-proposed/liberty': 'trusty-proposed/liberty',
3851+ # Mitaka
3852+ 'mitaka': 'trusty-updates/mitaka',
3853+ 'trusty-mitaka': 'trusty-updates/mitaka',
3854+ 'trusty-mitaka/updates': 'trusty-updates/mitaka',
3855+ 'trusty-updates/mitaka': 'trusty-updates/mitaka',
3856+ 'mitaka/proposed': 'trusty-proposed/mitaka',
3857+ 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
3858+ 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
3859 }
3860
3861 # The order of this list is very important. Handlers should be listed in from
3862@@ -217,12 +233,12 @@
3863
3864 def apt_mark(packages, mark, fatal=False):
3865 """Flag one or more packages using apt-mark"""
3866+ log("Marking {} as {}".format(packages, mark))
3867 cmd = ['apt-mark', mark]
3868 if isinstance(packages, six.string_types):
3869 cmd.append(packages)
3870 else:
3871 cmd.extend(packages)
3872- log("Holding {}".format(packages))
3873
3874 if fatal:
3875 subprocess.check_call(cmd, universal_newlines=True)
3876@@ -403,7 +419,7 @@
3877 importlib.import_module(package),
3878 classname)
3879 plugin_list.append(handler_class())
3880- except (ImportError, AttributeError):
3881+ except NotImplementedError:
3882 # Skip missing plugins so that they can be ommitted from
3883 # installation if desired
3884 log("FetchHandler {} not found, skipping plugin".format(
3885
3886=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
3887--- hooks/charmhelpers/fetch/archiveurl.py 2015-08-10 16:40:00 +0000
3888+++ hooks/charmhelpers/fetch/archiveurl.py 2016-04-21 10:41:47 +0000
3889@@ -108,7 +108,7 @@
3890 install_opener(opener)
3891 response = urlopen(source)
3892 try:
3893- with open(dest, 'w') as dest_file:
3894+ with open(dest, 'wb') as dest_file:
3895 dest_file.write(response.read())
3896 except Exception as e:
3897 if os.path.isfile(dest):
3898
3899=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
3900--- hooks/charmhelpers/fetch/bzrurl.py 2015-01-26 10:44:46 +0000
3901+++ hooks/charmhelpers/fetch/bzrurl.py 2016-04-21 10:41:47 +0000
3902@@ -15,60 +15,50 @@
3903 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3904
3905 import os
3906+from subprocess import check_call
3907 from charmhelpers.fetch import (
3908 BaseFetchHandler,
3909- UnhandledSource
3910+ UnhandledSource,
3911+ filter_installed_packages,
3912+ apt_install,
3913 )
3914 from charmhelpers.core.host import mkdir
3915
3916-import six
3917-if six.PY3:
3918- raise ImportError('bzrlib does not support Python3')
3919
3920-try:
3921- from bzrlib.branch import Branch
3922- from bzrlib import bzrdir, workingtree, errors
3923-except ImportError:
3924- from charmhelpers.fetch import apt_install
3925- apt_install("python-bzrlib")
3926- from bzrlib.branch import Branch
3927- from bzrlib import bzrdir, workingtree, errors
3928+if filter_installed_packages(['bzr']) != []:
3929+ apt_install(['bzr'])
3930+ if filter_installed_packages(['bzr']) != []:
3931+ raise NotImplementedError('Unable to install bzr')
3932
3933
3934 class BzrUrlFetchHandler(BaseFetchHandler):
3935 """Handler for bazaar branches via generic and lp URLs"""
3936 def can_handle(self, source):
3937 url_parts = self.parse_url(source)
3938- if url_parts.scheme not in ('bzr+ssh', 'lp'):
3939+ if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
3940 return False
3941+ elif not url_parts.scheme:
3942+ return os.path.exists(os.path.join(source, '.bzr'))
3943 else:
3944 return True
3945
3946 def branch(self, source, dest):
3947- url_parts = self.parse_url(source)
3948- # If we use lp:branchname scheme we need to load plugins
3949 if not self.can_handle(source):
3950 raise UnhandledSource("Cannot handle {}".format(source))
3951- if url_parts.scheme == "lp":
3952- from bzrlib.plugin import load_plugins
3953- load_plugins()
3954- try:
3955- local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
3956- except errors.AlreadyControlDirError:
3957- local_branch = Branch.open(dest)
3958- try:
3959- remote_branch = Branch.open(source)
3960- remote_branch.push(local_branch)
3961- tree = workingtree.WorkingTree.open(dest)
3962- tree.update()
3963- except Exception as e:
3964- raise e
3965+ if os.path.exists(dest):
3966+ check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
3967+ else:
3968+ check_call(['bzr', 'branch', source, dest])
3969
3970- def install(self, source):
3971+ def install(self, source, dest=None):
3972 url_parts = self.parse_url(source)
3973 branch_name = url_parts.path.strip("/").split("/")[-1]
3974- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
3975- branch_name)
3976+ if dest:
3977+ dest_dir = os.path.join(dest, branch_name)
3978+ else:
3979+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
3980+ branch_name)
3981+
3982 if not os.path.exists(dest_dir):
3983 mkdir(dest_dir, perms=0o755)
3984 try:
3985
3986=== modified file 'hooks/charmhelpers/fetch/giturl.py'
3987--- hooks/charmhelpers/fetch/giturl.py 2015-08-10 16:40:00 +0000
3988+++ hooks/charmhelpers/fetch/giturl.py 2016-04-21 10:41:47 +0000
3989@@ -15,24 +15,18 @@
3990 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3991
3992 import os
3993+from subprocess import check_call, CalledProcessError
3994 from charmhelpers.fetch import (
3995 BaseFetchHandler,
3996- UnhandledSource
3997+ UnhandledSource,
3998+ filter_installed_packages,
3999+ apt_install,
4000 )
4001-from charmhelpers.core.host import mkdir
4002-
4003-import six
4004-if six.PY3:
4005- raise ImportError('GitPython does not support Python 3')
4006-
4007-try:
4008- from git import Repo
4009-except ImportError:
4010- from charmhelpers.fetch import apt_install
4011- apt_install("python-git")
4012- from git import Repo
4013-
4014-from git.exc import GitCommandError # noqa E402
4015+
4016+if filter_installed_packages(['git']) != []:
4017+ apt_install(['git'])
4018+ if filter_installed_packages(['git']) != []:
4019+ raise NotImplementedError('Unable to install git')
4020
4021
4022 class GitUrlFetchHandler(BaseFetchHandler):
4023@@ -40,19 +34,24 @@
4024 def can_handle(self, source):
4025 url_parts = self.parse_url(source)
4026 # TODO (mattyw) no support for ssh git@ yet
4027- if url_parts.scheme not in ('http', 'https', 'git'):
4028+ if url_parts.scheme not in ('http', 'https', 'git', ''):
4029 return False
4030+ elif not url_parts.scheme:
4031+ return os.path.exists(os.path.join(source, '.git'))
4032 else:
4033 return True
4034
4035- def clone(self, source, dest, branch, depth=None):
4036+ def clone(self, source, dest, branch="master", depth=None):
4037 if not self.can_handle(source):
4038 raise UnhandledSource("Cannot handle {}".format(source))
4039
4040- if depth:
4041- Repo.clone_from(source, dest, branch=branch, depth=depth)
4042+ if os.path.exists(dest):
4043+ cmd = ['git', '-C', dest, 'pull', source, branch]
4044 else:
4045- Repo.clone_from(source, dest, branch=branch)
4046+ cmd = ['git', 'clone', source, dest, '--branch', branch]
4047+ if depth:
4048+ cmd.extend(['--depth', depth])
4049+ check_call(cmd)
4050
4051 def install(self, source, branch="master", dest=None, depth=None):
4052 url_parts = self.parse_url(source)
4053@@ -62,11 +61,9 @@
4054 else:
4055 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
4056 branch_name)
4057- if not os.path.exists(dest_dir):
4058- mkdir(dest_dir, perms=0o755)
4059 try:
4060 self.clone(source, dest_dir, branch, depth)
4061- except GitCommandError as e:
4062+ except CalledProcessError as e:
4063 raise UnhandledSource(e)
4064 except OSError as e:
4065 raise UnhandledSource(e.strerror)
4066
4067=== modified file 'hooks/hooks.py'
4068--- hooks/hooks.py 2015-10-22 13:26:06 +0000
4069+++ hooks/hooks.py 2016-04-21 10:41:47 +0000
4070@@ -55,6 +55,7 @@
4071 disable_lsb_services,
4072 disable_upstart_services,
4073 get_ipv6_addr,
4074+ set_unit_status,
4075 )
4076
4077 from charmhelpers.contrib.charmsupport import nrpe
4078@@ -406,22 +407,9 @@
4079 nrpe_setup.write()
4080
4081
4082-def assess_status():
4083- '''Assess status of current unit'''
4084- node_count = int(config('cluster_count'))
4085- # not enough peers
4086- for relid in relation_ids('hanode'):
4087- if len(related_units(relid)) + 1 < node_count:
4088- status_set('blocked', 'Insufficient peer units for ha cluster '
4089- '(require {})'.format(node_count))
4090- return
4091-
4092- status_set('active', 'Unit is ready and clustered')
4093-
4094-
4095 if __name__ == '__main__':
4096 try:
4097 hooks.execute(sys.argv)
4098 except UnregisteredHookError as e:
4099 log('Unknown hook {} - skipping.'.format(e), level=DEBUG)
4100- assess_status()
4101+ set_unit_status()
4102
4103=== modified file 'hooks/utils.py'
4104--- hooks/utils.py 2015-10-22 13:26:06 +0000
4105+++ hooks/utils.py 2016-04-21 10:41:47 +0000
4106@@ -7,6 +7,7 @@
4107 import socket
4108 import fcntl
4109 import struct
4110+import xml.etree.ElementTree as ET
4111
4112 from base64 import b64decode
4113
4114@@ -23,7 +24,12 @@
4115 unit_get,
4116 status_set,
4117 )
4118-from charmhelpers.contrib.openstack.utils import get_host_ip
4119+from charmhelpers.contrib.openstack.utils import (
4120+ get_host_ip,
4121+ set_unit_paused,
4122+ clear_unit_paused,
4123+ is_unit_paused_set,
4124+)
4125 from charmhelpers.core.host import (
4126 service_start,
4127 service_stop,
4128@@ -65,7 +71,8 @@
4129 COROSYNC_CONF = '/etc/corosync/corosync.conf'
4130 COROSYNC_DEFAULT = '/etc/default/corosync'
4131 COROSYNC_AUTHKEY = '/etc/corosync/authkey'
4132-COROSYNC_HACLUSTER_ACL = '/etc/corosync/uidgid.d/hacluster'
4133+COROSYNC_HACLUSTER_ACL_DIR = '/etc/corosync/uidgid.d'
4134+COROSYNC_HACLUSTER_ACL = COROSYNC_HACLUSTER_ACL_DIR + '/hacluster'
4135 COROSYNC_CONF_FILES = [
4136 COROSYNC_DEFAULT,
4137 COROSYNC_AUTHKEY,
4138@@ -168,7 +175,7 @@
4139 def nulls(data):
4140 """Returns keys of values that are null (but not bool)"""
4141 return [k for k in data.iterkeys()
4142- if not bool == type(data[k]) and not data[k]]
4143+ if not isinstance(data[k], bool) and not data[k]]
4144
4145
4146 def get_corosync_conf():
4147@@ -252,6 +259,8 @@
4148
4149
4150 def emit_base_conf():
4151+ if not os.path.isdir(COROSYNC_HACLUSTER_ACL_DIR):
4152+ os.mkdir(COROSYNC_HACLUSTER_ACL_DIR)
4153 corosync_default_context = {'corosync_enabled': 'yes'}
4154 write_file(path=COROSYNC_DEFAULT,
4155 content=render_template('corosync',
4156@@ -503,5 +512,128 @@
4157 if service_running("pacemaker"):
4158 service_stop("pacemaker")
4159
4160- service_restart("corosync")
4161- service_start("pacemaker")
4162+ if not is_unit_paused_set():
4163+ service_restart("corosync")
4164+ service_start("pacemaker")
4165+
4166+
4167+def is_in_standby_mode(node_name):
4168+ """Check if node is in standby mode in pacemaker
4169+
4170+ @param node_name: The name of the node to check
4171+ @returns boolean - True if node_name is in standby mode
4172+ """
4173+ out = subprocess.check_output(['crm', 'node', 'status', node_name])
4174+ root = ET.fromstring(out)
4175+
4176+ standby_mode = False
4177+ for nvpair in root.iter('nvpair'):
4178+ if (nvpair.attrib.get('name') == 'standby' and
4179+ nvpair.attrib.get('value') == 'on'):
4180+ standby_mode = True
4181+ return standby_mode
4182+
4183+
4184+def get_hostname():
4185+ """Return the hostname of this unit
4186+
4187+ @returns hostname
4188+ """
4189+ return socket.gethostname()
4190+
4191+
4192+def enter_standby_mode(node_name, duration='forever'):
4193+ """Put this node into standby mode in pacemaker
4194+
4195+ @returns None
4196+ """
4197+ subprocess.check_call(['crm', 'node', 'standby', node_name, duration])
4198+
4199+
4200+def leave_standby_mode(node_name):
4201+ """Take this node out of standby mode in pacemaker
4202+
4203+ @returns None
4204+ """
4205+ subprocess.check_call(['crm', 'node', 'online', node_name])
4206+
4207+
4208+def node_has_resources(node_name):
4209+ """Check if this node is running resources
4210+
4211+ @param node_name: The name of the node to check
4212+ @returns boolean - True if node_name has resources
4213+ """
4214+ out = subprocess.check_output(['crm_mon', '-X'])
4215+ root = ET.fromstring(out)
4216+ has_resources = False
4217+ for resource in root.iter('resource'):
4218+ for child in resource:
4219+ if child.tag == 'node' and child.attrib.get('name') == node_name:
4220+ has_resources = True
4221+ return has_resources
4222+
4223+
4224+def set_unit_status():
4225+ """Set the workload status for this unit
4226+
4227+ @returns None
4228+ """
4229+ status_set(*assess_status_helper())
4230+
4231+
4232+def resume_unit():
4233+ """Resume services on this unit and update the units status
4234+
4235+ @returns None
4236+ """
4237+ node_name = get_hostname()
4238+ messages = []
4239+ leave_standby_mode(node_name)
4240+ if is_in_standby_mode(node_name):
4241+ messages.append("Node still in standby mode")
4242+ if messages:
4243+ raise Exception("Couldn't resume: {}".format("; ".join(messages)))
4244+ else:
4245+ clear_unit_paused()
4246+ set_unit_status()
4247+
4248+
4249+def pause_unit():
4250+ """Pause services on this unit and update the units status
4251+
4252+ @returns None
4253+ """
4254+ node_name = get_hostname()
4255+ messages = []
4256+ enter_standby_mode(node_name)
4257+ if not is_in_standby_mode(node_name):
4258+ messages.append("Node not in standby mode")
4259+ if node_has_resources(node_name):
4260+ messages.append("Resources still running on unit")
4261+ status, message = assess_status_helper()
4262+ if status != 'active':
4263+ messages.append(message)
4264+ if messages:
4265+ raise Exception("Couldn't pause: {}".format("; ".join(messages)))
4266+ else:
4267+ set_unit_paused()
4268+ status_set("maintenance",
4269+ "Paused. Use 'resume' action to resume normal service.")
4270+
4271+
4272+def assess_status_helper():
4273+ """Assess status of unit
4274+
4275+ @returns status, message - status is workload status and message is any
4276+ corresponding messages
4277+ """
4278+ node_count = int(config('cluster_count'))
4279+ status = 'active'
4280+ message = 'Unit is ready and clustered'
4281+ for relid in relation_ids('hanode'):
4282+ if len(related_units(relid)) + 1 < node_count:
4283+ status = 'blocked'
4284+ message = ("Insufficient peer units for ha cluster "
4285+ "(require {})".format(node_count))
4286+ return status, message
4287
4288=== removed file 'tests/00-setup'
4289--- tests/00-setup 2015-10-22 13:26:06 +0000
4290+++ tests/00-setup 1970-01-01 00:00:00 +0000
4291@@ -1,17 +0,0 @@
4292-#!/bin/bash
4293-
4294-set -ex
4295-
4296-sudo add-apt-repository --yes ppa:juju/stable
4297-sudo apt-get update --yes
4298-sudo apt-get install --yes amulet \
4299- distro-info-data \
4300- python-cinderclient \
4301- python-distro-info \
4302- python-glanceclient \
4303- python-heatclient \
4304- python-keystoneclient \
4305- python-neutronclient \
4306- python-novaclient \
4307- python-pika \
4308- python-swiftclient
4309
4310=== modified file 'tests/basic_deployment.py'
4311--- tests/basic_deployment.py 2016-01-28 09:03:37 +0000
4312+++ tests/basic_deployment.py 2016-04-21 10:41:47 +0000
4313@@ -1,6 +1,9 @@
4314 #!/usr/bin/env python
4315 import os
4316+import subprocess
4317+import json
4318 import amulet
4319+import time
4320
4321 import keystoneclient.v2_0 as keystone_client
4322
4323@@ -132,3 +135,46 @@
4324 user=self.demo_user,
4325 password='password',
4326 tenant=self.demo_tenant)
4327+
4328+ def _run_action(self, unit_id, action, *args):
4329+ command = ["juju", "action", "do", "--format=json", unit_id, action]
4330+ command.extend(args)
4331+ print("Running command: %s\n" % " ".join(command))
4332+ output = subprocess.check_output(command)
4333+ output_json = output.decode(encoding="UTF-8")
4334+ data = json.loads(output_json)
4335+ action_id = data[u'Action queued with id']
4336+ return action_id
4337+
4338+ def _wait_on_action(self, action_id):
4339+ command = ["juju", "action", "fetch", "--format=json", action_id]
4340+ while True:
4341+ try:
4342+ output = subprocess.check_output(command)
4343+ except Exception as e:
4344+ print(e)
4345+ return False
4346+ output_json = output.decode(encoding="UTF-8")
4347+ data = json.loads(output_json)
4348+ if data[u"status"] == "completed":
4349+ return True
4350+ elif data[u"status"] == "failed":
4351+ return False
4352+ time.sleep(2)
4353+
4354+ def test_910_pause_and_resume(self):
4355+ """The services can be paused and resumed. """
4356+ u.log.debug('Checking pause and resume actions...')
4357+ unit_name = "hacluster/0"
4358+ unit = self.d.sentry.unit[unit_name]
4359+
4360+ assert u.status_get(unit)[0] == "active"
4361+
4362+ action_id = self._run_action(unit_name, "pause")
4363+ assert self._wait_on_action(action_id), "Pause action failed."
4364+ assert u.status_get(unit)[0] == "maintenance"
4365+
4366+ action_id = self._run_action(unit_name, "resume")
4367+ assert self._wait_on_action(action_id), "Resume action failed."
4368+ assert u.status_get(unit)[0] == "active"
4369+ u.log.debug('OK')
4370
4371=== modified file 'tests/charmhelpers/contrib/amulet/utils.py'
4372--- tests/charmhelpers/contrib/amulet/utils.py 2016-01-28 09:03:37 +0000
4373+++ tests/charmhelpers/contrib/amulet/utils.py 2016-04-21 10:41:47 +0000
4374@@ -601,7 +601,7 @@
4375 return ('Process name count mismatch. expected, actual: {}, '
4376 '{}'.format(len(expected), len(actual)))
4377
4378- for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
4379+ for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
4380 zip(e_proc_names.items(), a_proc_names.items()):
4381 if e_proc_name != a_proc_name:
4382 return ('Process name mismatch. expected, actual: {}, '
4383@@ -610,25 +610,31 @@
4384 a_pids_length = len(a_pids)
4385 fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
4386 '{}, {} ({})'.format(e_sentry_name, e_proc_name,
4387- e_pids_length, a_pids_length,
4388+ e_pids, a_pids_length,
4389 a_pids))
4390
4391- # If expected is not bool, ensure PID quantities match
4392- if not isinstance(e_pids_length, bool) and \
4393- a_pids_length != e_pids_length:
4394+ # If expected is a list, ensure at least one PID quantity match
4395+ if isinstance(e_pids, list) and \
4396+ a_pids_length not in e_pids:
4397+ return fail_msg
4398+ # If expected is not bool and not list,
4399+ # ensure PID quantities match
4400+ elif not isinstance(e_pids, bool) and \
4401+ not isinstance(e_pids, list) and \
4402+ a_pids_length != e_pids:
4403 return fail_msg
4404 # If expected is bool True, ensure 1 or more PIDs exist
4405- elif isinstance(e_pids_length, bool) and \
4406- e_pids_length is True and a_pids_length < 1:
4407+ elif isinstance(e_pids, bool) and \
4408+ e_pids is True and a_pids_length < 1:
4409 return fail_msg
4410 # If expected is bool False, ensure 0 PIDs exist
4411- elif isinstance(e_pids_length, bool) and \
4412- e_pids_length is False and a_pids_length != 0:
4413+ elif isinstance(e_pids, bool) and \
4414+ e_pids is False and a_pids_length != 0:
4415 return fail_msg
4416 else:
4417 self.log.debug('PID check OK: {} {} {}: '
4418 '{}'.format(e_sentry_name, e_proc_name,
4419- e_pids_length, a_pids))
4420+ e_pids, a_pids))
4421 return None
4422
4423 def validate_list_of_identical_dicts(self, list_of_dicts):
4424@@ -782,15 +788,20 @@
4425
4426 # amulet juju action helpers:
4427 def run_action(self, unit_sentry, action,
4428- _check_output=subprocess.check_output):
4429+ _check_output=subprocess.check_output,
4430+ params=None):
4431 """Run the named action on a given unit sentry.
4432
4433+ params a dict of parameters to use
4434 _check_output parameter is used for dependency injection.
4435
4436 @return action_id.
4437 """
4438 unit_id = unit_sentry.info["unit_name"]
4439 command = ["juju", "action", "do", "--format=json", unit_id, action]
4440+ if params is not None:
4441+ for key, value in params.iteritems():
4442+ command.append("{}={}".format(key, value))
4443 self.log.info("Running command: %s\n" % " ".join(command))
4444 output = _check_output(command, universal_newlines=True)
4445 data = json.loads(output)
4446
4447=== modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
4448--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2016-01-28 09:03:37 +0000
4449+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2016-04-21 10:41:47 +0000
4450@@ -121,11 +121,14 @@
4451
4452 # Charms which should use the source config option
4453 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
4454- 'ceph-osd', 'ceph-radosgw']
4455+ 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
4456
4457 # Charms which can not use openstack-origin, ie. many subordinates
4458 no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
4459- 'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
4460+ 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
4461+ 'cinder-backup', 'nexentaedge-data',
4462+ 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
4463+ 'cinder-nexentaedge', 'nexentaedge-mgmt']
4464
4465 if self.openstack:
4466 for svc in services:
4467
4468=== modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
4469--- tests/charmhelpers/contrib/openstack/amulet/utils.py 2016-01-28 09:03:37 +0000
4470+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2016-04-21 10:41:47 +0000
4471@@ -27,7 +27,11 @@
4472 import glanceclient.v1.client as glance_client
4473 import heatclient.v1.client as heat_client
4474 import keystoneclient.v2_0 as keystone_client
4475-import novaclient.v1_1.client as nova_client
4476+from keystoneclient.auth.identity import v3 as keystone_id_v3
4477+from keystoneclient import session as keystone_session
4478+from keystoneclient.v3 import client as keystone_client_v3
4479+
4480+import novaclient.client as nova_client
4481 import pika
4482 import swiftclient
4483
4484@@ -38,6 +42,8 @@
4485 DEBUG = logging.DEBUG
4486 ERROR = logging.ERROR
4487
4488+NOVA_CLIENT_VERSION = "2"
4489+
4490
4491 class OpenStackAmuletUtils(AmuletUtils):
4492 """OpenStack amulet utilities.
4493@@ -139,7 +145,7 @@
4494 return "role {} does not exist".format(e['name'])
4495 return ret
4496
4497- def validate_user_data(self, expected, actual):
4498+ def validate_user_data(self, expected, actual, api_version=None):
4499 """Validate user data.
4500
4501 Validate a list of actual user data vs a list of expected user
4502@@ -150,10 +156,15 @@
4503 for e in expected:
4504 found = False
4505 for act in actual:
4506- a = {'enabled': act.enabled, 'name': act.name,
4507- 'email': act.email, 'tenantId': act.tenantId,
4508- 'id': act.id}
4509- if e['name'] == a['name']:
4510+ if e['name'] == act.name:
4511+ a = {'enabled': act.enabled, 'name': act.name,
4512+ 'email': act.email, 'id': act.id}
4513+ if api_version == 3:
4514+ a['default_project_id'] = getattr(act,
4515+ 'default_project_id',
4516+ 'none')
4517+ else:
4518+ a['tenantId'] = act.tenantId
4519 found = True
4520 ret = self._validate_dict_data(e, a)
4521 if ret:
4522@@ -188,15 +199,30 @@
4523 return cinder_client.Client(username, password, tenant, ept)
4524
4525 def authenticate_keystone_admin(self, keystone_sentry, user, password,
4526- tenant):
4527+ tenant=None, api_version=None,
4528+ keystone_ip=None):
4529 """Authenticates admin user with the keystone admin endpoint."""
4530 self.log.debug('Authenticating keystone admin...')
4531 unit = keystone_sentry
4532- service_ip = unit.relation('shared-db',
4533- 'mysql:shared-db')['private-address']
4534- ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
4535- return keystone_client.Client(username=user, password=password,
4536- tenant_name=tenant, auth_url=ep)
4537+ if not keystone_ip:
4538+ keystone_ip = unit.relation('shared-db',
4539+ 'mysql:shared-db')['private-address']
4540+ base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
4541+ if not api_version or api_version == 2:
4542+ ep = base_ep + "/v2.0"
4543+ return keystone_client.Client(username=user, password=password,
4544+ tenant_name=tenant, auth_url=ep)
4545+ else:
4546+ ep = base_ep + "/v3"
4547+ auth = keystone_id_v3.Password(
4548+ user_domain_name='admin_domain',
4549+ username=user,
4550+ password=password,
4551+ domain_name='admin_domain',
4552+ auth_url=ep,
4553+ )
4554+ sess = keystone_session.Session(auth=auth)
4555+ return keystone_client_v3.Client(session=sess)
4556
4557 def authenticate_keystone_user(self, keystone, user, password, tenant):
4558 """Authenticates a regular user with the keystone public endpoint."""
4559@@ -225,7 +251,8 @@
4560 self.log.debug('Authenticating nova user ({})...'.format(user))
4561 ep = keystone.service_catalog.url_for(service_type='identity',
4562 endpoint_type='publicURL')
4563- return nova_client.Client(username=user, api_key=password,
4564+ return nova_client.Client(NOVA_CLIENT_VERSION,
4565+ username=user, api_key=password,
4566 project_id=tenant, auth_url=ep)
4567
4568 def authenticate_swift_user(self, keystone, user, password, tenant):

Subscribers

People subscribed via source and target branches