Merge lp:~verterok/charms/trusty/tanuki-spec-manager/db-relation into lp:~tanuki/charms/trusty/tanuki-spec-manager/trunk

Proposed by Guillermo Gonzalez
Status: Superseded
Proposed branch: lp:~verterok/charms/trusty/tanuki-spec-manager/db-relation
Merge into: lp:~tanuki/charms/trusty/tanuki-spec-manager/trunk
Diff against target: 5132 lines (+3048/-485)
39 files modified
actions.yaml (+3/-0)
charm-helpers.yaml (+6/-0)
config.yaml (+16/-1)
hooks/actions.py (+116/-3)
hooks/charmhelpers/contrib/amulet/utils.py (+352/-13)
hooks/charmhelpers/contrib/ansible/__init__.py (+68/-4)
hooks/charmhelpers/contrib/benchmark/__init__.py (+124/-0)
hooks/charmhelpers/contrib/charmsupport/nrpe.py (+44/-8)
hooks/charmhelpers/contrib/database/mysql.py (+119/-79)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+52/-4)
hooks/charmhelpers/contrib/network/ip.py (+84/-1)
hooks/charmhelpers/contrib/network/ufw.py (+46/-3)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+84/-9)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+361/-51)
hooks/charmhelpers/contrib/openstack/context.py (+293/-16)
hooks/charmhelpers/contrib/openstack/files/__init__.py (+18/-0)
hooks/charmhelpers/contrib/openstack/ip.py (+49/-7)
hooks/charmhelpers/contrib/openstack/neutron.py (+93/-3)
hooks/charmhelpers/contrib/openstack/utils.py (+214/-150)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+123/-3)
hooks/charmhelpers/contrib/python/packages.py (+30/-5)
hooks/charmhelpers/contrib/ssl/service.py (+13/-17)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+6/-6)
hooks/charmhelpers/contrib/templating/contexts.py (+6/-1)
hooks/charmhelpers/contrib/unison/__init__.py (+5/-4)
hooks/charmhelpers/core/fstab.py (+2/-2)
hooks/charmhelpers/core/hookenv.py (+272/-39)
hooks/charmhelpers/core/host.py (+30/-8)
hooks/charmhelpers/core/services/base.py (+43/-19)
hooks/charmhelpers/core/services/helpers.py (+14/-6)
hooks/charmhelpers/core/strutils.py (+42/-0)
hooks/charmhelpers/core/unitdata.py (+1/-1)
hooks/charmhelpers/fetch/__init__.py (+21/-13)
hooks/charmhelpers/fetch/giturl.py (+7/-5)
hooks/schema-upgrade (+53/-0)
hooks/services.py (+6/-2)
metadata.yaml (+6/-0)
scripts/charm_helpers_sync.py (+223/-0)
templates/upstart.conf (+3/-2)
To merge this branch: bzr merge lp:~verterok/charms/trusty/tanuki-spec-manager/db-relation
Reviewer Review Type Date Requested Status
Tanuki Squad Pending
Review via email: mp+265042@code.launchpad.net

Description of the change

This branch add db[-admin] relations support, but also changes a bit the files layout, moving logs/ directory outside the code directory.

Main changes:
 * update charmhelpers
 * add basic postgresql|db support
    - use db-admin relation credentials to manage the schema (only root can read those files)
    - use db relation credentials for the appserver
    - add action to run the schema upgrade

To post a comment you must log in.
13. By Guillermo Gonzalez

- don't install python3-psycopg2
- use env/bin/python to run the schema script

Unmerged revisions

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added directory 'actions'
2=== added file 'actions.yaml'
3--- actions.yaml 1970-01-01 00:00:00 +0000
4+++ actions.yaml 2015-07-16 20:20:59 +0000
5@@ -0,0 +1,3 @@
6+schema-upgrade:
7+ description: Run the schema upgrade script
8+
9
10=== added symlink 'actions/schema-upgrade'
11=== target is u'../hooks/schema-upgrade'
12=== added file 'charm-helpers.yaml'
13--- charm-helpers.yaml 1970-01-01 00:00:00 +0000
14+++ charm-helpers.yaml 2015-07-16 20:20:59 +0000
15@@ -0,0 +1,6 @@
16+destination: lib/charmhelpers
17+branch: lp:charm-helpers
18+include:
19+ - contrib
20+ - core
21+ - fetch
22
23=== modified file 'config.yaml'
24--- config.yaml 2015-06-18 14:29:25 +0000
25+++ config.yaml 2015-07-16 20:20:59 +0000
26@@ -1,10 +1,25 @@
27 options:
28 environment:
29 type: string
30- default: "production"
31+ default: "devel"
32 description: |
33 Environment (devel, staging, production, etc.) that we're running.
34 config-file:
35 type: string
36 description: |
37 base64 encoded string with the config file for adt-cloud-worker
38+ db_name:
39+ type: string
40+ default: "specmanager"
41+ description: |
42+ database name
43+ db_user:
44+ type: string
45+ default: "specmgr_user"
46+ description: |
47+ DB user name
48+ db_roles:
49+ type: string
50+ default: "specmgr_app,appserver"
51+ description: |
52+ comma separated list of roles for the appserver/common DB user
53
54=== modified file 'hooks/actions.py'
55--- hooks/actions.py 2015-07-14 18:28:19 +0000
56+++ hooks/actions.py 2015-07-16 20:20:59 +0000
57@@ -1,6 +1,8 @@
58+import ConfigParser
59 import base64
60 import os
61 import subprocess
62+import yaml
63
64 from charmhelpers import fetch
65 from charmhelpers.core import (
66@@ -12,7 +14,7 @@
67
68
69 REQUIRED_PACKAGES = [
70- 'python-virtualenv', 'python3-dev', 'libpq5',
71+ 'python-virtualenv', 'python3-dev', 'libpq5', 'python3-psycopg2',
72 ]
73
74 WSGI_USER = 'www-data'
75@@ -20,14 +22,21 @@
76
77 config = hookenv.config()
78
79-SERVICE_DIR = '/srv/{}/spec-manager'.format(config['environment'])
80-LOG_DIR = os.path.join(SERVICE_DIR, 'logs')
81+BASE_DIR = '/srv/{}'.format(config['environment'])
82+SERVICE_DIR = os.path.join(BASE_DIR, 'spec-manager')
83+LOG_DIR = os.path.join(BASE_DIR, 'logs')
84+ETC_DIR = os.path.join(BASE_DIR, 'etc')
85
86
87 def log_start(service_name):
88 hookenv.log('spec-manager starting')
89
90
91+def ensure_directories(service_name):
92+ for dirpath in [BASE_DIR, ETC_DIR]:
93+ host.mkdir(dirpath, owner='root', group='ubuntu')
94+
95+
96 def basenode(service_name):
97 hookenv.log("Executing basenode")
98 execd.execd_preinstall()
99@@ -89,3 +98,107 @@
100 data = super(WebsiteRelation, self).provide_data()
101 data['port'] = 8000
102 return data
103+
104+
105+def write_db_config(service_name):
106+ db_rels = hookenv.relations_of_type('db')
107+ db_config_created = False
108+ db_config_path = os.path.join(ETC_DIR, 'db.yaml')
109+ db_schema_config_path = os.path.join(ETC_DIR, 'db-schema.yaml')
110+ db_admin_config_created = False
111+ db_admin_config_path = os.path.join(ETC_DIR, 'db-admin.yaml')
112+ if db_rels:
113+ # use the master/standalone server
114+ for db_rel in db_rels:
115+ if 'user' not in db_rel or 'state' not in db_rel:
116+ continue
117+ if db_rel['state'] in ('standalone', 'master'):
118+ app_db_config = {}
119+ for k in ('database', 'user', 'password', 'host', 'port'):
120+ app_db_config[k] = db_rel[k]
121+ host.write_file(db_config_path, yaml.dump(app_db_config),
122+ owner='root', group='www-data', perms=0o440)
123+ schema_db_config = {}
124+ for k in ('database', 'schema_user', 'schema_password',
125+ 'host', 'port'):
126+ schema_db_config[k] = db_rel[k]
127+ host.write_file(db_schema_config_path, yaml.dump(schema_db_config),
128+ owner='root', group='www-data', perms=0o440)
129+ db_config_created = True
130+ # update service.conf inplace
131+ parser = ConfigParser.ConfigParser()
132+ config_file = os.path.join(SERVICE_DIR, 'service.conf')
133+ parser.read([config_file])
134+ parser.set('db', 'host', value=db_rel['host'])
135+ parser.set('db', 'username', value=db_rel['user'])
136+ parser.set('db', 'password', value=db_rel['password'])
137+ parser.set('db', 'port', value=db_rel['port'])
138+ with open(config_file, 'w') as fp:
139+ parser.write(fp)
140+ fp.flush()
141+ # cleanup if nothing was created
142+ if not db_config_created:
143+ if os.path.exists(db_config_path):
144+ os.remove(db_config_path)
145+ if os.path.exists(db_schema_config_path):
146+ os.remove(db_schema_config_path)
147+ db_admin_rels = hookenv.relations_of_type('db-admin')
148+ if db_admin_rels and db_config_created:
149+ # use the master/standalone server
150+ for db_rel in db_admin_rels:
151+ if 'user' not in db_rel or 'state' not in db_rel:
152+ continue
153+ if db_rel['state'] in ('standalone', 'master'):
154+ content = yaml.dump(db_rel)
155+ host.write_file(db_admin_config_path, content,
156+ owner='root', group='root', perms=0o400)
157+ db_admin_config_created = True
158+
159+ # cleanup if nothing was updated
160+ if not db_config_created and os.path.exists(db_config_path):
161+ os.remove(db_config_path)
162+ if not db_admin_config_created and os.path.exists(db_admin_config_path):
163+ os.remove(db_admin_config_path)
164+
165+
166+class PostgresqlRelation(helpers.RelationContext):
167+ """
168+ Relation context for the `postgresql` interface.
169+
170+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
171+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
172+ """
173+ name = 'db'
174+ interface = 'pgsql'
175+ required_keys = []
176+ roles = None
177+ database = None
178+ user = None
179+
180+ def __init__(self, required=False):
181+ config = hookenv.config()
182+ self.roles = config['db_roles']
183+ self.database = config['db_name']
184+ self.user = config['db_user']
185+ if required:
186+ self.required_keys = ['host', 'user', 'password', 'database']
187+ super(PostgresqlRelation, self).__init__(name=self.name)
188+
189+ def is_ready(self):
190+ data = self.get(self.name, [])
191+ if not data:
192+ return False
193+ all_rels = []
194+ for d in data:
195+ if d.get('database') == self.database and self._unit_in(d.get('allowed-units', [])):
196+ all_rels.append(True)
197+ else:
198+ all_rels.append(False)
199+ return all(all_rels)
200+
201+ def provide_data(self):
202+ return dict(roles=self.roles, database=self.database, user=self.user)
203+
204+ def _unit_in(self, units):
205+ return hookenv.local_unit() in units
206+
207
208=== modified file 'hooks/charmhelpers/contrib/amulet/utils.py'
209--- hooks/charmhelpers/contrib/amulet/utils.py 2015-06-18 14:29:25 +0000
210+++ hooks/charmhelpers/contrib/amulet/utils.py 2015-07-16 20:20:59 +0000
211@@ -14,14 +14,17 @@
212 # You should have received a copy of the GNU Lesser General Public License
213 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
214
215+import amulet
216 import ConfigParser
217+import distro_info
218 import io
219 import logging
220+import os
221 import re
222+import six
223 import sys
224 import time
225-
226-import six
227+import urlparse
228
229
230 class AmuletUtils(object):
231@@ -33,6 +36,7 @@
232
233 def __init__(self, log_level=logging.ERROR):
234 self.log = self.get_logger(level=log_level)
235+ self.ubuntu_releases = self.get_ubuntu_releases()
236
237 def get_logger(self, name="amulet-logger", level=logging.DEBUG):
238 """Get a logger object that will log to stdout."""
239@@ -70,15 +74,85 @@
240 else:
241 return False
242
243+ def get_ubuntu_release_from_sentry(self, sentry_unit):
244+ """Get Ubuntu release codename from sentry unit.
245+
246+ :param sentry_unit: amulet sentry/service unit pointer
247+ :returns: list of strings - release codename, failure message
248+ """
249+ msg = None
250+ cmd = 'lsb_release -cs'
251+ release, code = sentry_unit.run(cmd)
252+ if code == 0:
253+ self.log.debug('{} lsb_release: {}'.format(
254+ sentry_unit.info['unit_name'], release))
255+ else:
256+ msg = ('{} `{}` returned {} '
257+ '{}'.format(sentry_unit.info['unit_name'],
258+ cmd, release, code))
259+ if release not in self.ubuntu_releases:
260+ msg = ("Release ({}) not found in Ubuntu releases "
261+ "({})".format(release, self.ubuntu_releases))
262+ return release, msg
263+
264 def validate_services(self, commands):
265- """Validate services.
266-
267- Verify the specified services are running on the corresponding
268+ """Validate that lists of commands succeed on service units. Can be
269+ used to verify system services are running on the corresponding
270 service units.
271- """
272+
273+ :param commands: dict with sentry keys and arbitrary command list vals
274+ :returns: None if successful, Failure string message otherwise
275+ """
276+ self.log.debug('Checking status of system services...')
277+
278+ # /!\ DEPRECATION WARNING (beisner):
279+ # New and existing tests should be rewritten to use
280+ # validate_services_by_name() as it is aware of init systems.
281+ self.log.warn('/!\\ DEPRECATION WARNING: use '
282+ 'validate_services_by_name instead of validate_services '
283+ 'due to init system differences.')
284+
285 for k, v in six.iteritems(commands):
286 for cmd in v:
287 output, code = k.run(cmd)
288+ self.log.debug('{} `{}` returned '
289+ '{}'.format(k.info['unit_name'],
290+ cmd, code))
291+ if code != 0:
292+ return "command `{}` returned {}".format(cmd, str(code))
293+ return None
294+
295+ def validate_services_by_name(self, sentry_services):
296+ """Validate system service status by service name, automatically
297+ detecting init system based on Ubuntu release codename.
298+
299+ :param sentry_services: dict with sentry keys and svc list values
300+ :returns: None if successful, Failure string message otherwise
301+ """
302+ self.log.debug('Checking status of system services...')
303+
304+ # Point at which systemd became a thing
305+ systemd_switch = self.ubuntu_releases.index('vivid')
306+
307+ for sentry_unit, services_list in six.iteritems(sentry_services):
308+ # Get lsb_release codename from unit
309+ release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
310+ if ret:
311+ return ret
312+
313+ for service_name in services_list:
314+ if (self.ubuntu_releases.index(release) >= systemd_switch or
315+ service_name == "rabbitmq-server"):
316+ # init is systemd
317+ cmd = 'sudo service {} status'.format(service_name)
318+ elif self.ubuntu_releases.index(release) < systemd_switch:
319+ # init is upstart
320+ cmd = 'sudo status {}'.format(service_name)
321+
322+ output, code = sentry_unit.run(cmd)
323+ self.log.debug('{} `{}` returned '
324+ '{}'.format(sentry_unit.info['unit_name'],
325+ cmd, code))
326 if code != 0:
327 return "command `{}` returned {}".format(cmd, str(code))
328 return None
329@@ -86,7 +160,11 @@
330 def _get_config(self, unit, filename):
331 """Get a ConfigParser object for parsing a unit's config file."""
332 file_contents = unit.file_contents(filename)
333- config = ConfigParser.ConfigParser()
334+
335+ # NOTE(beisner): by default, ConfigParser does not handle options
336+ # with no value, such as the flags used in the mysql my.cnf file.
337+ # https://bugs.python.org/issue7005
338+ config = ConfigParser.ConfigParser(allow_no_value=True)
339 config.readfp(io.StringIO(file_contents))
340 return config
341
342@@ -96,7 +174,15 @@
343
344 Verify that the specified section of the config file contains
345 the expected option key:value pairs.
346+
347+ Compare expected dictionary data vs actual dictionary data.
348+ The values in the 'expected' dictionary can be strings, bools, ints,
349+ longs, or can be a function that evaluates a variable and returns a
350+ bool.
351 """
352+ self.log.debug('Validating config file data ({} in {} on {})'
353+ '...'.format(section, config_file,
354+ sentry_unit.info['unit_name']))
355 config = self._get_config(sentry_unit, config_file)
356
357 if section != 'DEFAULT' and not config.has_section(section):
358@@ -105,9 +191,20 @@
359 for k in expected.keys():
360 if not config.has_option(section, k):
361 return "section [{}] is missing option {}".format(section, k)
362- if config.get(section, k) != expected[k]:
363+
364+ actual = config.get(section, k)
365+ v = expected[k]
366+ if (isinstance(v, six.string_types) or
367+ isinstance(v, bool) or
368+ isinstance(v, six.integer_types)):
369+ # handle explicit values
370+ if actual != v:
371+ return "section [{}] {}:{} != expected {}:{}".format(
372+ section, k, actual, k, expected[k])
373+ # handle function pointers, such as not_null or valid_ip
374+ elif not v(actual):
375 return "section [{}] {}:{} != expected {}:{}".format(
376- section, k, config.get(section, k), k, expected[k])
377+ section, k, actual, k, expected[k])
378 return None
379
380 def _validate_dict_data(self, expected, actual):
381@@ -115,16 +212,21 @@
382
383 Compare expected dictionary data vs actual dictionary data.
384 The values in the 'expected' dictionary can be strings, bools, ints,
385- longs, or can be a function that evaluate a variable and returns a
386+ longs, or can be a function that evaluates a variable and returns a
387 bool.
388 """
389+ self.log.debug('actual: {}'.format(repr(actual)))
390+ self.log.debug('expected: {}'.format(repr(expected)))
391+
392 for k, v in six.iteritems(expected):
393 if k in actual:
394 if (isinstance(v, six.string_types) or
395 isinstance(v, bool) or
396 isinstance(v, six.integer_types)):
397+ # handle explicit values
398 if v != actual[k]:
399 return "{}:{}".format(k, actual[k])
400+ # handle function pointers, such as not_null or valid_ip
401 elif not v(actual[k]):
402 return "{}:{}".format(k, actual[k])
403 else:
404@@ -134,7 +236,6 @@
405 def validate_relation_data(self, sentry_unit, relation, expected):
406 """Validate actual relation data based on expected relation data."""
407 actual = sentry_unit.relation(relation[0], relation[1])
408- self.log.debug('actual: {}'.format(repr(actual)))
409 return self._validate_dict_data(expected, actual)
410
411 def _validate_list_data(self, expected, actual):
412@@ -169,8 +270,13 @@
413 cmd = 'pgrep -o -f {}'.format(service)
414 else:
415 cmd = 'pgrep -o {}'.format(service)
416- proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
417- return self._get_dir_mtime(sentry_unit, proc_dir)
418+ cmd = cmd + ' | grep -v pgrep || exit 0'
419+ cmd_out = sentry_unit.run(cmd)
420+ self.log.debug('CMDout: ' + str(cmd_out))
421+ if cmd_out[0]:
422+ self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
423+ proc_dir = '/proc/{}'.format(cmd_out[0].strip())
424+ return self._get_dir_mtime(sentry_unit, proc_dir)
425
426 def service_restarted(self, sentry_unit, service, filename,
427 pgrep_full=False, sleep_time=20):
428@@ -187,8 +293,241 @@
429 else:
430 return False
431
432+ def service_restarted_since(self, sentry_unit, mtime, service,
433+ pgrep_full=False, sleep_time=20,
434+ retry_count=2):
435+ """Check if service was been started after a given time.
436+
437+ Args:
438+ sentry_unit (sentry): The sentry unit to check for the service on
439+ mtime (float): The epoch time to check against
440+ service (string): service name to look for in process table
441+ pgrep_full (boolean): Use full command line search mode with pgrep
442+ sleep_time (int): Seconds to sleep before looking for process
443+ retry_count (int): If service is not found, how many times to retry
444+
445+ Returns:
446+ bool: True if service found and its start time it newer than mtime,
447+ False if service is older than mtime or if service was
448+ not found.
449+ """
450+ self.log.debug('Checking %s restarted since %s' % (service, mtime))
451+ time.sleep(sleep_time)
452+ proc_start_time = self._get_proc_start_time(sentry_unit, service,
453+ pgrep_full)
454+ while retry_count > 0 and not proc_start_time:
455+ self.log.debug('No pid file found for service %s, will retry %i '
456+ 'more times' % (service, retry_count))
457+ time.sleep(30)
458+ proc_start_time = self._get_proc_start_time(sentry_unit, service,
459+ pgrep_full)
460+ retry_count = retry_count - 1
461+
462+ if not proc_start_time:
463+ self.log.warn('No proc start time found, assuming service did '
464+ 'not start')
465+ return False
466+ if proc_start_time >= mtime:
467+ self.log.debug('proc start time is newer than provided mtime'
468+ '(%s >= %s)' % (proc_start_time, mtime))
469+ return True
470+ else:
471+ self.log.warn('proc start time (%s) is older than provided mtime '
472+ '(%s), service did not restart' % (proc_start_time,
473+ mtime))
474+ return False
475+
476+ def config_updated_since(self, sentry_unit, filename, mtime,
477+ sleep_time=20):
478+ """Check if file was modified after a given time.
479+
480+ Args:
481+ sentry_unit (sentry): The sentry unit to check the file mtime on
482+ filename (string): The file to check mtime of
483+ mtime (float): The epoch time to check against
484+ sleep_time (int): Seconds to sleep before looking for process
485+
486+ Returns:
487+ bool: True if file was modified more recently than mtime, False if
488+ file was modified before mtime,
489+ """
490+ self.log.debug('Checking %s updated since %s' % (filename, mtime))
491+ time.sleep(sleep_time)
492+ file_mtime = self._get_file_mtime(sentry_unit, filename)
493+ if file_mtime >= mtime:
494+ self.log.debug('File mtime is newer than provided mtime '
495+ '(%s >= %s)' % (file_mtime, mtime))
496+ return True
497+ else:
498+ self.log.warn('File mtime %s is older than provided mtime %s'
499+ % (file_mtime, mtime))
500+ return False
501+
502+ def validate_service_config_changed(self, sentry_unit, mtime, service,
503+ filename, pgrep_full=False,
504+ sleep_time=20, retry_count=2):
505+ """Check service and file were updated after mtime
506+
507+ Args:
508+ sentry_unit (sentry): The sentry unit to check for the service on
509+ mtime (float): The epoch time to check against
510+ service (string): service name to look for in process table
511+ filename (string): The file to check mtime of
512+ pgrep_full (boolean): Use full command line search mode with pgrep
513+ sleep_time (int): Seconds to sleep before looking for process
514+ retry_count (int): If service is not found, how many times to retry
515+
516+ Typical Usage:
517+ u = OpenStackAmuletUtils(ERROR)
518+ ...
519+ mtime = u.get_sentry_time(self.cinder_sentry)
520+ self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
521+ if not u.validate_service_config_changed(self.cinder_sentry,
522+ mtime,
523+ 'cinder-api',
524+ '/etc/cinder/cinder.conf')
525+ amulet.raise_status(amulet.FAIL, msg='update failed')
526+ Returns:
527+ bool: True if both service and file where updated/restarted after
528+ mtime, False if service is older than mtime or if service was
529+ not found or if filename was modified before mtime.
530+ """
531+ self.log.debug('Checking %s restarted since %s' % (service, mtime))
532+ time.sleep(sleep_time)
533+ service_restart = self.service_restarted_since(sentry_unit, mtime,
534+ service,
535+ pgrep_full=pgrep_full,
536+ sleep_time=0,
537+ retry_count=retry_count)
538+ config_update = self.config_updated_since(sentry_unit, filename, mtime,
539+ sleep_time=0)
540+ return service_restart and config_update
541+
542+ def get_sentry_time(self, sentry_unit):
543+ """Return current epoch time on a sentry"""
544+ cmd = "date +'%s'"
545+ return float(sentry_unit.run(cmd)[0])
546+
547 def relation_error(self, name, data):
548 return 'unexpected relation data in {} - {}'.format(name, data)
549
550 def endpoint_error(self, name, data):
551 return 'unexpected endpoint data in {} - {}'.format(name, data)
552+
553+ def get_ubuntu_releases(self):
554+ """Return a list of all Ubuntu releases in order of release."""
555+ _d = distro_info.UbuntuDistroInfo()
556+ _release_list = _d.all
557+ self.log.debug('Ubuntu release list: {}'.format(_release_list))
558+ return _release_list
559+
560+ def file_to_url(self, file_rel_path):
561+ """Convert a relative file path to a file URL."""
562+ _abs_path = os.path.abspath(file_rel_path)
563+ return urlparse.urlparse(_abs_path, scheme='file').geturl()
564+
565+ def check_commands_on_units(self, commands, sentry_units):
566+ """Check that all commands in a list exit zero on all
567+ sentry units in a list.
568+
569+ :param commands: list of bash commands
570+ :param sentry_units: list of sentry unit pointers
571+ :returns: None if successful; Failure message otherwise
572+ """
573+ self.log.debug('Checking exit codes for {} commands on {} '
574+ 'sentry units...'.format(len(commands),
575+ len(sentry_units)))
576+ for sentry_unit in sentry_units:
577+ for cmd in commands:
578+ output, code = sentry_unit.run(cmd)
579+ if code == 0:
580+ self.log.debug('{} `{}` returned {} '
581+ '(OK)'.format(sentry_unit.info['unit_name'],
582+ cmd, code))
583+ else:
584+ return ('{} `{}` returned {} '
585+ '{}'.format(sentry_unit.info['unit_name'],
586+ cmd, code, output))
587+ return None
588+
589+ def get_process_id_list(self, sentry_unit, process_name):
590+ """Get a list of process ID(s) from a single sentry juju unit
591+ for a single process name.
592+
593+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
594+ :param process_name: Process name
595+ :returns: List of process IDs
596+ """
597+ cmd = 'pidof {}'.format(process_name)
598+ output, code = sentry_unit.run(cmd)
599+ if code != 0:
600+ msg = ('{} `{}` returned {} '
601+ '{}'.format(sentry_unit.info['unit_name'],
602+ cmd, code, output))
603+ amulet.raise_status(amulet.FAIL, msg=msg)
604+ return str(output).split()
605+
606+ def get_unit_process_ids(self, unit_processes):
607+ """Construct a dict containing unit sentries, process names, and
608+ process IDs."""
609+ pid_dict = {}
610+ for sentry_unit, process_list in unit_processes.iteritems():
611+ pid_dict[sentry_unit] = {}
612+ for process in process_list:
613+ pids = self.get_process_id_list(sentry_unit, process)
614+ pid_dict[sentry_unit].update({process: pids})
615+ return pid_dict
616+
617+ def validate_unit_process_ids(self, expected, actual):
618+ """Validate process id quantities for services on units."""
619+ self.log.debug('Checking units for running processes...')
620+ self.log.debug('Expected PIDs: {}'.format(expected))
621+ self.log.debug('Actual PIDs: {}'.format(actual))
622+
623+ if len(actual) != len(expected):
624+ return ('Unit count mismatch. expected, actual: {}, '
625+ '{} '.format(len(expected), len(actual)))
626+
627+ for (e_sentry, e_proc_names) in expected.iteritems():
628+ e_sentry_name = e_sentry.info['unit_name']
629+ if e_sentry in actual.keys():
630+ a_proc_names = actual[e_sentry]
631+ else:
632+ return ('Expected sentry ({}) not found in actual dict data.'
633+ '{}'.format(e_sentry_name, e_sentry))
634+
635+ if len(e_proc_names.keys()) != len(a_proc_names.keys()):
636+ return ('Process name count mismatch. expected, actual: {}, '
637+ '{}'.format(len(expected), len(actual)))
638+
639+ for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
640+ zip(e_proc_names.items(), a_proc_names.items()):
641+ if e_proc_name != a_proc_name:
642+ return ('Process name mismatch. expected, actual: {}, '
643+ '{}'.format(e_proc_name, a_proc_name))
644+
645+ a_pids_length = len(a_pids)
646+ if e_pids_length != a_pids_length:
647+ return ('PID count mismatch. {} ({}) expected, actual: '
648+ '{}, {} ({})'.format(e_sentry_name, e_proc_name,
649+ e_pids_length, a_pids_length,
650+ a_pids))
651+ else:
652+ self.log.debug('PID check OK: {} {} {}: '
653+ '{}'.format(e_sentry_name, e_proc_name,
654+ e_pids_length, a_pids))
655+ return None
656+
657+ def validate_list_of_identical_dicts(self, list_of_dicts):
658+ """Check that all dicts within a list are identical."""
659+ hashes = []
660+ for _dict in list_of_dicts:
661+ hashes.append(hash(frozenset(_dict.items())))
662+
663+ self.log.debug('Hashes: {}'.format(hashes))
664+ if len(set(hashes)) == 1:
665+ self.log.debug('Dicts within list are identical')
666+ else:
667+ return 'Dicts within list are not identical'
668+
669+ return None
670
671=== modified file 'hooks/charmhelpers/contrib/ansible/__init__.py'
672--- hooks/charmhelpers/contrib/ansible/__init__.py 2015-06-18 14:29:25 +0000
673+++ hooks/charmhelpers/contrib/ansible/__init__.py 2015-07-16 20:20:59 +0000
674@@ -75,9 +75,36 @@
675 .. _playbooks: http://www.ansibleworks.com/docs/playbooks.html
676 .. _modules: http://www.ansibleworks.com/docs/modules.html
677
678+A further feature os the ansible hooks is to provide a light weight "action"
679+scripting tool. This is a decorator that you apply to a function, and that
680+function can now receive cli args, and can pass extra args to the playbook.
681+
682+e.g.
683+
684+
685+@hooks.action()
686+def some_action(amount, force="False"):
687+ "Usage: some-action AMOUNT [force=True]" # <-- shown on error
688+ # process the arguments
689+ # do some calls
690+ # return extra-vars to be passed to ansible-playbook
691+ return {
692+ 'amount': int(amount),
693+ 'type': force,
694+ }
695+
696+You can now create a symlink to hooks.py that can be invoked like a hook, but
697+with cli params:
698+
699+# link actions/some-action to hooks/hooks.py
700+
701+actions/some-action amount=10 force=true
702+
703 """
704 import os
705+import stat
706 import subprocess
707+import functools
708
709 import charmhelpers.contrib.templating.contexts
710 import charmhelpers.core.host
711@@ -112,12 +139,13 @@
712 hosts_file.write('localhost ansible_connection=local')
713
714
715-def apply_playbook(playbook, tags=None):
716+def apply_playbook(playbook, tags=None, extra_vars=None):
717 tags = tags or []
718 tags = ",".join(tags)
719 charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
720 ansible_vars_path, namespace_separator='__',
721- allow_hyphens_in_keys=False)
722+ allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR))
723+
724 # we want ansible's log output to be unbuffered
725 env = os.environ.copy()
726 env['PYTHONUNBUFFERED'] = "1"
727@@ -129,6 +157,9 @@
728 ]
729 if tags:
730 call.extend(['--tags', '{}'.format(tags)])
731+ if extra_vars:
732+ extra = ["%s=%s" % (k, v) for k, v in extra_vars.items()]
733+ call.extend(['--extra-vars', " ".join(extra)])
734 subprocess.check_call(call, env=env)
735
736
737@@ -172,6 +203,7 @@
738 """Register any hooks handled by ansible."""
739 super(AnsibleHooks, self).__init__()
740
741+ self._actions = {}
742 self.playbook_path = playbook_path
743
744 default_hooks = default_hooks or []
745@@ -182,9 +214,41 @@
746 for hook in default_hooks:
747 self.register(hook, noop)
748
749+ def register_action(self, name, function):
750+ """Register a hook"""
751+ self._actions[name] = function
752+
753 def execute(self, args):
754 """Execute the hook followed by the playbook using the hook as tag."""
755- super(AnsibleHooks, self).execute(args)
756 hook_name = os.path.basename(args[0])
757+ extra_vars = None
758+ if hook_name in self._actions:
759+ extra_vars = self._actions[hook_name](args[1:])
760+ else:
761+ super(AnsibleHooks, self).execute(args)
762+
763 charmhelpers.contrib.ansible.apply_playbook(
764- self.playbook_path, tags=[hook_name])
765+ self.playbook_path, tags=[hook_name], extra_vars=extra_vars)
766+
767+ def action(self, *action_names):
768+ """Decorator, registering them as actions"""
769+ def action_wrapper(decorated):
770+
771+ @functools.wraps(decorated)
772+ def wrapper(argv):
773+ kwargs = dict(arg.split('=') for arg in argv)
774+ try:
775+ return decorated(**kwargs)
776+ except TypeError as e:
777+ if decorated.__doc__:
778+ e.args += (decorated.__doc__,)
779+ raise
780+
781+ self.register_action(decorated.__name__, wrapper)
782+ if '_' in decorated.__name__:
783+ self.register_action(
784+ decorated.__name__.replace('_', '-'), wrapper)
785+
786+ return wrapper
787+
788+ return action_wrapper
789
790=== added directory 'hooks/charmhelpers/contrib/benchmark'
791=== added file 'hooks/charmhelpers/contrib/benchmark/__init__.py'
792--- hooks/charmhelpers/contrib/benchmark/__init__.py 1970-01-01 00:00:00 +0000
793+++ hooks/charmhelpers/contrib/benchmark/__init__.py 2015-07-16 20:20:59 +0000
794@@ -0,0 +1,124 @@
795+# Copyright 2014-2015 Canonical Limited.
796+#
797+# This file is part of charm-helpers.
798+#
799+# charm-helpers is free software: you can redistribute it and/or modify
800+# it under the terms of the GNU Lesser General Public License version 3 as
801+# published by the Free Software Foundation.
802+#
803+# charm-helpers is distributed in the hope that it will be useful,
804+# but WITHOUT ANY WARRANTY; without even the implied warranty of
805+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
806+# GNU Lesser General Public License for more details.
807+#
808+# You should have received a copy of the GNU Lesser General Public License
809+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
810+
811+import subprocess
812+import time
813+import os
814+from distutils.spawn import find_executable
815+
816+from charmhelpers.core.hookenv import (
817+ in_relation_hook,
818+ relation_ids,
819+ relation_set,
820+ relation_get,
821+)
822+
823+
824+def action_set(key, val):
825+ if find_executable('action-set'):
826+ action_cmd = ['action-set']
827+
828+ if isinstance(val, dict):
829+ for k, v in iter(val.items()):
830+ action_set('%s.%s' % (key, k), v)
831+ return True
832+
833+ action_cmd.append('%s=%s' % (key, val))
834+ subprocess.check_call(action_cmd)
835+ return True
836+ return False
837+
838+
839+class Benchmark():
840+ """
841+ Helper class for the `benchmark` interface.
842+
843+ :param list actions: Define the actions that are also benchmarks
844+
845+ From inside the benchmark-relation-changed hook, you would
846+ Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])
847+
848+ Examples:
849+
850+ siege = Benchmark(['siege'])
851+ siege.start()
852+ [... run siege ...]
853+ # The higher the score, the better the benchmark
854+ siege.set_composite_score(16.70, 'trans/sec', 'desc')
855+ siege.finish()
856+
857+
858+ """
859+
860+ required_keys = [
861+ 'hostname',
862+ 'port',
863+ 'graphite_port',
864+ 'graphite_endpoint',
865+ 'api_port'
866+ ]
867+
868+ def __init__(self, benchmarks=None):
869+ if in_relation_hook():
870+ if benchmarks is not None:
871+ for rid in sorted(relation_ids('benchmark')):
872+ relation_set(relation_id=rid, relation_settings={
873+ 'benchmarks': ",".join(benchmarks)
874+ })
875+
876+ # Check the relation data
877+ config = {}
878+ for key in self.required_keys:
879+ val = relation_get(key)
880+ if val is not None:
881+ config[key] = val
882+ else:
883+ # We don't have all of the required keys
884+ config = {}
885+ break
886+
887+ if len(config):
888+ with open('/etc/benchmark.conf', 'w') as f:
889+ for key, val in iter(config.items()):
890+ f.write("%s=%s\n" % (key, val))
891+
892+ @staticmethod
893+ def start():
894+ action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
895+
896+ """
897+ If the collectd charm is also installed, tell it to send a snapshot
898+ of the current profile data.
899+ """
900+ COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
901+ if os.path.exists(COLLECT_PROFILE_DATA):
902+ subprocess.check_output([COLLECT_PROFILE_DATA])
903+
904+ @staticmethod
905+ def finish():
906+ action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
907+
908+ @staticmethod
909+ def set_composite_score(value, units, direction='asc'):
910+ """
911+ Set the composite score for a benchmark run. This is a single number
912+ representative of the benchmark results. This could be the most
913+ important metric, or an amalgamation of metric scores.
914+ """
915+ return action_set(
916+ "meta.composite",
917+ {'value': value, 'units': units, 'direction': direction}
918+ )
919
920=== modified file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py'
921--- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2015-06-18 14:29:25 +0000
922+++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 2015-07-16 20:20:59 +0000
923@@ -24,6 +24,8 @@
924 import pwd
925 import grp
926 import os
927+import glob
928+import shutil
929 import re
930 import shlex
931 import yaml
932@@ -161,7 +163,7 @@
933 log('Check command not found: {}'.format(parts[0]))
934 return ''
935
936- def write(self, nagios_context, hostname, nagios_servicegroups=None):
937+ def write(self, nagios_context, hostname, nagios_servicegroups):
938 nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
939 self.command)
940 with open(nrpe_check_file, 'w') as nrpe_check_config:
941@@ -177,14 +179,11 @@
942 nagios_servicegroups)
943
944 def write_service_config(self, nagios_context, hostname,
945- nagios_servicegroups=None):
946+ nagios_servicegroups):
947 for f in os.listdir(NRPE.nagios_exportdir):
948 if re.search('.*{}.cfg'.format(self.command), f):
949 os.remove(os.path.join(NRPE.nagios_exportdir, f))
950
951- if not nagios_servicegroups:
952- nagios_servicegroups = nagios_context
953-
954 templ_vars = {
955 'nagios_hostname': hostname,
956 'nagios_servicegroup': nagios_servicegroups,
957@@ -211,10 +210,10 @@
958 super(NRPE, self).__init__()
959 self.config = config()
960 self.nagios_context = self.config['nagios_context']
961- if 'nagios_servicegroups' in self.config:
962+ if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
963 self.nagios_servicegroups = self.config['nagios_servicegroups']
964 else:
965- self.nagios_servicegroups = 'juju'
966+ self.nagios_servicegroups = self.nagios_context
967 self.unit_name = local_unit().replace('/', '-')
968 if hostname:
969 self.hostname = hostname
970@@ -248,7 +247,9 @@
971
972 service('restart', 'nagios-nrpe-server')
973
974- for rid in relation_ids("local-monitors"):
975+ monitor_ids = relation_ids("local-monitors") + \
976+ relation_ids("nrpe-external-master")
977+ for rid in monitor_ids:
978 relation_set(relation_id=rid, monitors=yaml.dump(monitors))
979
980
981@@ -322,3 +323,38 @@
982 check_cmd='check_status_file.py -f '
983 '/var/lib/nagios/service-check-%s.txt' % svc,
984 )
985+
986+
987+def copy_nrpe_checks():
988+ """
989+ Copy the nrpe checks into place
990+
991+ """
992+ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
993+ nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
994+ 'charmhelpers', 'contrib', 'openstack',
995+ 'files')
996+
997+ if not os.path.exists(NAGIOS_PLUGINS):
998+ os.makedirs(NAGIOS_PLUGINS)
999+ for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
1000+ if os.path.isfile(fname):
1001+ shutil.copy2(fname,
1002+ os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
1003+
1004+
1005+def add_haproxy_checks(nrpe, unit_name):
1006+ """
1007+ Add checks for each service in list
1008+
1009+ :param NRPE nrpe: NRPE object to add check to
1010+ :param str unit_name: Unit name to use in check description
1011+ """
1012+ nrpe.add_check(
1013+ shortname='haproxy_servers',
1014+ description='Check HAProxy {%s}' % unit_name,
1015+ check_cmd='check_haproxy.sh')
1016+ nrpe.add_check(
1017+ shortname='haproxy_queue',
1018+ description='Check HAProxy queue depth {%s}' % unit_name,
1019+ check_cmd='check_haproxy_queue_depth.sh')
1020
1021=== modified file 'hooks/charmhelpers/contrib/database/mysql.py'
1022--- hooks/charmhelpers/contrib/database/mysql.py 2015-06-18 14:29:25 +0000
1023+++ hooks/charmhelpers/contrib/database/mysql.py 2015-07-16 20:20:59 +0000
1024@@ -1,13 +1,12 @@
1025 """Helper for working with a MySQL database"""
1026 import json
1027-import socket
1028 import re
1029 import sys
1030 import platform
1031 import os
1032 import glob
1033
1034-from string import upper
1035+# from string import upper
1036
1037 from charmhelpers.core.host import (
1038 mkdir,
1039@@ -15,14 +14,15 @@
1040 write_file
1041 )
1042 from charmhelpers.core.hookenv import (
1043+ config as config_get,
1044 relation_get,
1045 related_units,
1046 unit_get,
1047 log,
1048 DEBUG,
1049 INFO,
1050+ WARNING,
1051 )
1052-from charmhelpers.core.hookenv import config as config_get
1053 from charmhelpers.fetch import (
1054 apt_install,
1055 apt_update,
1056@@ -32,6 +32,7 @@
1057 peer_store,
1058 peer_retrieve,
1059 )
1060+from charmhelpers.contrib.network.ip import get_host_ip
1061
1062 try:
1063 import MySQLdb
1064@@ -43,13 +44,20 @@
1065
1066 class MySQLHelper(object):
1067
1068- def __init__(self, rpasswdf_template, upasswdf_template, host='localhost'):
1069+ def __init__(self, rpasswdf_template, upasswdf_template, host='localhost',
1070+ migrate_passwd_to_peer_relation=True,
1071+ delete_ondisk_passwd_file=True):
1072 self.host = host
1073 # Password file path templates
1074 self.root_passwd_file_template = rpasswdf_template
1075 self.user_passwd_file_template = upasswdf_template
1076
1077+ self.migrate_passwd_to_peer_relation = migrate_passwd_to_peer_relation
1078+ # If we migrate we have the option to delete local copy of root passwd
1079+ self.delete_ondisk_passwd_file = delete_ondisk_passwd_file
1080+
1081 def connect(self, user='root', password=None):
1082+ log("Opening db connection for %s@%s" % (user, self.host), level=DEBUG)
1083 self.connection = MySQLdb.connect(user=user, host=self.host,
1084 passwd=password)
1085
1086@@ -126,18 +134,24 @@
1087 finally:
1088 cursor.close()
1089
1090- def migrate_passwords_to_peer_relation(self):
1091+ def migrate_passwords_to_peer_relation(self, excludes=None):
1092 """Migrate any passwords storage on disk to cluster peer relation."""
1093 dirname = os.path.dirname(self.root_passwd_file_template)
1094 path = os.path.join(dirname, '*.passwd')
1095 for f in glob.glob(path):
1096- _key = os.path.basename(f)
1097+ if excludes and f in excludes:
1098+ log("Excluding %s from peer migration" % (f), level=DEBUG)
1099+ continue
1100+
1101+ key = os.path.basename(f)
1102 with open(f, 'r') as passwd:
1103 _value = passwd.read().strip()
1104
1105 try:
1106- peer_store(_key, _value)
1107- os.unlink(f)
1108+ peer_store(key, _value)
1109+
1110+ if self.delete_ondisk_passwd_file:
1111+ os.unlink(f)
1112 except ValueError:
1113 # NOTE cluster relation not yet ready - skip for now
1114 pass
1115@@ -153,43 +167,92 @@
1116
1117 _password = None
1118 if os.path.exists(passwd_file):
1119+ log("Using existing password file '%s'" % passwd_file, level=DEBUG)
1120 with open(passwd_file, 'r') as passwd:
1121 _password = passwd.read().strip()
1122 else:
1123- mkdir(os.path.dirname(passwd_file), owner='root', group='root',
1124- perms=0o770)
1125- # Force permissions - for some reason the chmod in makedirs fails
1126- os.chmod(os.path.dirname(passwd_file), 0o770)
1127+ log("Generating new password file '%s'" % passwd_file, level=DEBUG)
1128+ if not os.path.isdir(os.path.dirname(passwd_file)):
1129+ # NOTE: need to ensure this is not mysql root dir (which needs
1130+ # to be mysql readable)
1131+ mkdir(os.path.dirname(passwd_file), owner='root', group='root',
1132+ perms=0o770)
1133+ # Force permissions - for some reason the chmod in makedirs
1134+ # fails
1135+ os.chmod(os.path.dirname(passwd_file), 0o770)
1136+
1137 _password = password or pwgen(length=32)
1138 write_file(passwd_file, _password, owner='root', group='root',
1139 perms=0o660)
1140
1141 return _password
1142
1143+ def passwd_keys(self, username):
1144+ """Generator to return keys used to store passwords in peer store.
1145+
1146+ NOTE: we support both legacy and new format to support mysql
1147+ charm prior to refactor. This is necessary to avoid LP 1451890.
1148+ """
1149+ keys = []
1150+ if username == 'mysql':
1151+ log("Bad username '%s'" % (username), level=WARNING)
1152+
1153+ if username:
1154+ # IMPORTANT: *newer* format must be returned first
1155+ keys.append('mysql-%s.passwd' % (username))
1156+ keys.append('%s.passwd' % (username))
1157+ else:
1158+ keys.append('mysql.passwd')
1159+
1160+ for key in keys:
1161+ yield key
1162+
1163 def get_mysql_password(self, username=None, password=None):
1164 """Retrieve, generate or store a mysql password for the provided
1165 username using peer relation cluster."""
1166- self.migrate_passwords_to_peer_relation()
1167- if username:
1168- _key = 'mysql-{}.passwd'.format(username)
1169- else:
1170- _key = 'mysql.passwd'
1171+ excludes = []
1172
1173+ # First check peer relation.
1174 try:
1175- _password = peer_retrieve(_key)
1176- if _password is None:
1177- _password = password or pwgen(length=32)
1178- peer_store(_key, _password)
1179+ for key in self.passwd_keys(username):
1180+ _password = peer_retrieve(key)
1181+ if _password:
1182+ break
1183+
1184+ # If root password available don't update peer relation from local
1185+ if _password and not username:
1186+ excludes.append(self.root_passwd_file_template)
1187+
1188 except ValueError:
1189 # cluster relation is not yet started; use on-disk
1190+ _password = None
1191+
1192+ # If none available, generate new one
1193+ if not _password:
1194 _password = self.get_mysql_password_on_disk(username, password)
1195
1196+ # Put on wire if required
1197+ if self.migrate_passwd_to_peer_relation:
1198+ self.migrate_passwords_to_peer_relation(excludes=excludes)
1199+
1200 return _password
1201
1202 def get_mysql_root_password(self, password=None):
1203 """Retrieve or generate mysql root password for service units."""
1204 return self.get_mysql_password(username=None, password=password)
1205
1206+ def normalize_address(self, hostname):
1207+ """Ensure that address returned is an IP address (i.e. not fqdn)"""
1208+ if config_get('prefer-ipv6'):
1209+ # TODO: add support for ipv6 dns
1210+ return hostname
1211+
1212+ if hostname != unit_get('private-address'):
1213+ return get_host_ip(hostname, fallback=hostname)
1214+
1215+ # Otherwise assume localhost
1216+ return '127.0.0.1'
1217+
1218 def get_allowed_units(self, database, username, relation_id=None):
1219 """Get list of units with access grants for database with username.
1220
1221@@ -217,6 +280,7 @@
1222
1223 if hosts:
1224 for host in hosts:
1225+ host = self.normalize_address(host)
1226 if self.grant_exists(database, username, host):
1227 log("Grant exists for host '%s' on db '%s'" %
1228 (host, database), level=DEBUG)
1229@@ -232,21 +296,11 @@
1230
1231 def configure_db(self, hostname, database, username, admin=False):
1232 """Configure access to database for username from hostname."""
1233- if config_get('prefer-ipv6'):
1234- remote_ip = hostname
1235- elif hostname != unit_get('private-address'):
1236- try:
1237- remote_ip = socket.gethostbyname(hostname)
1238- except Exception:
1239- # socket.gethostbyname doesn't support ipv6
1240- remote_ip = hostname
1241- else:
1242- remote_ip = '127.0.0.1'
1243-
1244 self.connect(password=self.get_mysql_root_password())
1245 if not self.database_exists(database):
1246 self.create_database(database)
1247
1248+ remote_ip = self.normalize_address(hostname)
1249 password = self.get_mysql_password(username)
1250 if not self.grant_exists(database, username, remote_ip):
1251 if not admin:
1252@@ -259,9 +313,11 @@
1253
1254 class PerconaClusterHelper(object):
1255
1256- # Going for the biggest page size to avoid wasted bytes. InnoDB page size is
1257- # 16MB
1258+ # Going for the biggest page size to avoid wasted bytes.
1259+ # InnoDB page size is 16MB
1260+
1261 DEFAULT_PAGE_SIZE = 16 * 1024 * 1024
1262+ DEFAULT_INNODB_BUFFER_FACTOR = 0.50
1263
1264 def human_to_bytes(self, human):
1265 """Convert human readable configuration options to bytes."""
1266@@ -313,7 +369,7 @@
1267 key, mem = line.split(':', 2)
1268 if key == 'MemTotal':
1269 mtot, modifier = mem.strip().split(' ')
1270- return '%s%s' % (mtot, upper(modifier[0]))
1271+ return '%s%s' % (mtot, modifier[0].upper())
1272
1273 def parse_config(self):
1274 """Parse charm configuration and calculate values for config files."""
1275@@ -322,51 +378,35 @@
1276 if 'max-connections' in config:
1277 mysql_config['max_connections'] = config['max-connections']
1278
1279- # Total memory available for dataset
1280- dataset_bytes = self.human_to_bytes(config['dataset-size'])
1281- mysql_config['dataset_bytes'] = dataset_bytes
1282-
1283- if 'query-cache-type' in config:
1284- # Query Cache Configuration
1285- mysql_config['query_cache_size'] = config['query-cache-size']
1286- if (config['query-cache-size'] == -1 and
1287- config['query-cache-type'] in ['ON', 'DEMAND']):
1288- # Calculate the query cache size automatically
1289- qcache_bytes = (dataset_bytes * 0.20)
1290- qcache_bytes = int(qcache_bytes -
1291- (qcache_bytes % self.DEFAULT_PAGE_SIZE))
1292- mysql_config['query_cache_size'] = qcache_bytes
1293- dataset_bytes -= qcache_bytes
1294-
1295- # 5.5 allows the words, but not 5.1
1296- if config['query-cache-type'] == 'ON':
1297- mysql_config['query_cache_type'] = 1
1298- elif config['query-cache-type'] == 'DEMAND':
1299- mysql_config['query_cache_type'] = 2
1300- else:
1301- mysql_config['query_cache_type'] = 0
1302+ if 'wait-timeout' in config:
1303+ mysql_config['wait_timeout'] = config['wait-timeout']
1304+
1305+ if 'innodb-flush-log-at-trx-commit' in config:
1306+ mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit']
1307
1308 # Set a sane default key_buffer size
1309 mysql_config['key_buffer'] = self.human_to_bytes('32M')
1310-
1311- if 'preferred-storage-engine' in config:
1312- # Storage engine configuration
1313- preferred_engines = config['preferred-storage-engine'].split(',')
1314- chunk_size = int(dataset_bytes / len(preferred_engines))
1315- mysql_config['innodb_flush_log_at_trx_commit'] = 1
1316- mysql_config['sync_binlog'] = 1
1317- if 'InnoDB' in preferred_engines:
1318- mysql_config['innodb_buffer_pool_size'] = chunk_size
1319- if config['tuning-level'] == 'fast':
1320- mysql_config['innodb_flush_log_at_trx_commit'] = 2
1321- else:
1322- mysql_config['innodb_buffer_pool_size'] = 0
1323-
1324- mysql_config['default_storage_engine'] = preferred_engines[0]
1325- if 'MyISAM' in preferred_engines:
1326- mysql_config['key_buffer'] = chunk_size
1327-
1328- if config['tuning-level'] == 'fast':
1329- mysql_config['sync_binlog'] = 0
1330-
1331+ total_memory = self.human_to_bytes(self.get_mem_total())
1332+
1333+ dataset_bytes = config.get('dataset-size', None)
1334+ innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None)
1335+
1336+ if innodb_buffer_pool_size:
1337+ innodb_buffer_pool_size = self.human_to_bytes(
1338+ innodb_buffer_pool_size)
1339+ elif dataset_bytes:
1340+ log("Option 'dataset-size' has been deprecated, please use"
1341+ "innodb_buffer_pool_size option instead", level="WARN")
1342+ innodb_buffer_pool_size = self.human_to_bytes(
1343+ dataset_bytes)
1344+ else:
1345+ innodb_buffer_pool_size = int(
1346+ total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR)
1347+
1348+ if innodb_buffer_pool_size > total_memory:
1349+ log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format(
1350+ innodb_buffer_pool_size,
1351+ total_memory), level='WARN')
1352+
1353+ mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size
1354 return mysql_config
1355
1356=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
1357--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-06-18 14:29:25 +0000
1358+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-07-16 20:20:59 +0000
1359@@ -44,10 +44,16 @@
1360 ERROR,
1361 WARNING,
1362 unit_get,
1363+ is_leader as juju_is_leader
1364 )
1365 from charmhelpers.core.decorators import (
1366 retry_on_exception,
1367 )
1368+from charmhelpers.core.strutils import (
1369+ bool_from_string,
1370+)
1371+
1372+DC_RESOURCE_NAME = 'DC'
1373
1374
1375 class HAIncompleteConfig(Exception):
1376@@ -58,17 +64,30 @@
1377 pass
1378
1379
1380+class CRMDCNotFound(Exception):
1381+ pass
1382+
1383+
1384 def is_elected_leader(resource):
1385 """
1386 Returns True if the charm executing this is the elected cluster leader.
1387
1388 It relies on two mechanisms to determine leadership:
1389- 1. If the charm is part of a corosync cluster, call corosync to
1390+ 1. If juju is sufficiently new and leadership election is supported,
1391+ the is_leader command will be used.
1392+ 2. If the charm is part of a corosync cluster, call corosync to
1393 determine leadership.
1394- 2. If the charm is not part of a corosync cluster, the leader is
1395+ 3. If the charm is not part of a corosync cluster, the leader is
1396 determined as being "the alive unit with the lowest unit numer". In
1397 other words, the oldest surviving unit.
1398 """
1399+ try:
1400+ return juju_is_leader()
1401+ except NotImplementedError:
1402+ log('Juju leadership election feature not enabled'
1403+ ', using fallback support',
1404+ level=WARNING)
1405+
1406 if is_clustered():
1407 if not is_crm_leader(resource):
1408 log('Deferring action to CRM leader.', level=INFO)
1409@@ -92,7 +111,33 @@
1410 return False
1411
1412
1413-@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
1414+def is_crm_dc():
1415+ """
1416+ Determine leadership by querying the pacemaker Designated Controller
1417+ """
1418+ cmd = ['crm', 'status']
1419+ try:
1420+ status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
1421+ if not isinstance(status, six.text_type):
1422+ status = six.text_type(status, "utf-8")
1423+ except subprocess.CalledProcessError as ex:
1424+ raise CRMDCNotFound(str(ex))
1425+
1426+ current_dc = ''
1427+ for line in status.split('\n'):
1428+ if line.startswith('Current DC'):
1429+ # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
1430+ current_dc = line.split(':')[1].split()[0]
1431+ if current_dc == get_unit_hostname():
1432+ return True
1433+ elif current_dc == 'NONE':
1434+ raise CRMDCNotFound('Current DC: NONE')
1435+
1436+ return False
1437+
1438+
1439+@retry_on_exception(5, base_delay=2,
1440+ exc_type=(CRMResourceNotFound, CRMDCNotFound))
1441 def is_crm_leader(resource, retry=False):
1442 """
1443 Returns True if the charm calling this is the elected corosync leader,
1444@@ -101,6 +146,8 @@
1445 We allow this operation to be retried to avoid the possibility of getting a
1446 false negative. See LP #1396246 for more info.
1447 """
1448+ if resource == DC_RESOURCE_NAME:
1449+ return is_crm_dc()
1450 cmd = ['crm', 'resource', 'show', resource]
1451 try:
1452 status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
1453@@ -164,7 +211,8 @@
1454 .
1455 returns: boolean
1456 '''
1457- if config_get('use-https') == "yes":
1458+ use_https = config_get('use-https')
1459+ if use_https and bool_from_string(use_https):
1460 return True
1461 if config_get('ssl_cert') and config_get('ssl_key'):
1462 return True
1463
1464=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
1465--- hooks/charmhelpers/contrib/network/ip.py 2015-06-18 14:29:25 +0000
1466+++ hooks/charmhelpers/contrib/network/ip.py 2015-07-16 20:20:59 +0000
1467@@ -17,13 +17,16 @@
1468 import glob
1469 import re
1470 import subprocess
1471+import six
1472+import socket
1473
1474 from functools import partial
1475
1476 from charmhelpers.core.hookenv import unit_get
1477 from charmhelpers.fetch import apt_install
1478 from charmhelpers.core.hookenv import (
1479- log
1480+ log,
1481+ WARNING,
1482 )
1483
1484 try:
1485@@ -365,3 +368,83 @@
1486 return True
1487
1488 return False
1489+
1490+
1491+def is_ip(address):
1492+ """
1493+ Returns True if address is a valid IP address.
1494+ """
1495+ try:
1496+ # Test to see if already an IPv4 address
1497+ socket.inet_aton(address)
1498+ return True
1499+ except socket.error:
1500+ return False
1501+
1502+
1503+def ns_query(address):
1504+ try:
1505+ import dns.resolver
1506+ except ImportError:
1507+ apt_install('python-dnspython')
1508+ import dns.resolver
1509+
1510+ if isinstance(address, dns.name.Name):
1511+ rtype = 'PTR'
1512+ elif isinstance(address, six.string_types):
1513+ rtype = 'A'
1514+ else:
1515+ return None
1516+
1517+ answers = dns.resolver.query(address, rtype)
1518+ if answers:
1519+ return str(answers[0])
1520+ return None
1521+
1522+
1523+def get_host_ip(hostname, fallback=None):
1524+ """
1525+ Resolves the IP for a given hostname, or returns
1526+ the input if it is already an IP.
1527+ """
1528+ if is_ip(hostname):
1529+ return hostname
1530+
1531+ ip_addr = ns_query(hostname)
1532+ if not ip_addr:
1533+ try:
1534+ ip_addr = socket.gethostbyname(hostname)
1535+ except:
1536+ log("Failed to resolve hostname '%s'" % (hostname),
1537+ level=WARNING)
1538+ return fallback
1539+ return ip_addr
1540+
1541+
1542+def get_hostname(address, fqdn=True):
1543+ """
1544+ Resolves hostname for given IP, or returns the input
1545+ if it is already a hostname.
1546+ """
1547+ if is_ip(address):
1548+ try:
1549+ import dns.reversename
1550+ except ImportError:
1551+ apt_install("python-dnspython")
1552+ import dns.reversename
1553+
1554+ rev = dns.reversename.from_address(address)
1555+ result = ns_query(rev)
1556+ if not result:
1557+ return None
1558+ else:
1559+ result = address
1560+
1561+ if fqdn:
1562+ # strip trailing .
1563+ if result.endswith('.'):
1564+ return result[:-1]
1565+ else:
1566+ return result
1567+ else:
1568+ return result.split('.')[0]
1569
1570=== modified file 'hooks/charmhelpers/contrib/network/ufw.py'
1571--- hooks/charmhelpers/contrib/network/ufw.py 2015-06-18 14:29:25 +0000
1572+++ hooks/charmhelpers/contrib/network/ufw.py 2015-07-16 20:20:59 +0000
1573@@ -180,7 +180,43 @@
1574 return True
1575
1576
1577-def modify_access(src, dst='any', port=None, proto=None, action='allow'):
1578+def default_policy(policy='deny', direction='incoming'):
1579+ """
1580+ Changes the default policy for traffic `direction`
1581+
1582+ :param policy: allow, deny or reject
1583+ :param direction: traffic direction, possible values: incoming, outgoing,
1584+ routed
1585+ """
1586+ if policy not in ['allow', 'deny', 'reject']:
1587+ raise UFWError(('Unknown policy %s, valid values: '
1588+ 'allow, deny, reject') % policy)
1589+
1590+ if direction not in ['incoming', 'outgoing', 'routed']:
1591+ raise UFWError(('Unknown direction %s, valid values: '
1592+ 'incoming, outgoing, routed') % direction)
1593+
1594+ output = subprocess.check_output(['ufw', 'default', policy, direction],
1595+ universal_newlines=True,
1596+ env={'LANG': 'en_US',
1597+ 'PATH': os.environ['PATH']})
1598+ hookenv.log(output, level='DEBUG')
1599+
1600+ m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
1601+ policy),
1602+ output, re.M)
1603+ if len(m) == 0:
1604+ hookenv.log("ufw couldn't change the default policy to %s for %s"
1605+ % (policy, direction), level='WARN')
1606+ return False
1607+ else:
1608+ hookenv.log("ufw default policy for %s changed to %s"
1609+ % (direction, policy), level='INFO')
1610+ return True
1611+
1612+
1613+def modify_access(src, dst='any', port=None, proto=None, action='allow',
1614+ index=None):
1615 """
1616 Grant access to an address or subnet
1617
1618@@ -192,6 +228,8 @@
1619 :param port: destiny port
1620 :param proto: protocol (tcp or udp)
1621 :param action: `allow` or `delete`
1622+ :param index: if different from None the rule is inserted at the given
1623+ `index`.
1624 """
1625 if not is_enabled():
1626 hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
1627@@ -199,6 +237,8 @@
1628
1629 if action == 'delete':
1630 cmd = ['ufw', 'delete', 'allow']
1631+ elif index is not None:
1632+ cmd = ['ufw', 'insert', str(index), action]
1633 else:
1634 cmd = ['ufw', action]
1635
1636@@ -227,7 +267,7 @@
1637 level='ERROR')
1638
1639
1640-def grant_access(src, dst='any', port=None, proto=None):
1641+def grant_access(src, dst='any', port=None, proto=None, index=None):
1642 """
1643 Grant access to an address or subnet
1644
1645@@ -238,8 +278,11 @@
1646 field has to be set.
1647 :param port: destiny port
1648 :param proto: protocol (tcp or udp)
1649+ :param index: if different from None the rule is inserted at the given
1650+ `index`.
1651 """
1652- return modify_access(src, dst=dst, port=port, proto=proto, action='allow')
1653+ return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
1654+ index=index)
1655
1656
1657 def revoke_access(src, dst='any', port=None, proto=None):
1658
1659=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
1660--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-18 14:29:25 +0000
1661+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-07-16 20:20:59 +0000
1662@@ -15,6 +15,7 @@
1663 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1664
1665 import six
1666+from collections import OrderedDict
1667 from charmhelpers.contrib.amulet.deployment import (
1668 AmuletDeployment
1669 )
1670@@ -43,17 +44,24 @@
1671 Determine if the local branch being tested is derived from its
1672 stable or next (dev) branch, and based on this, use the corresonding
1673 stable or next branches for the other_services."""
1674- base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
1675+ base_charms = ['mysql', 'mongodb']
1676+
1677+ if self.series in ['precise', 'trusty']:
1678+ base_series = self.series
1679+ else:
1680+ base_series = self.current_next
1681
1682 if self.stable:
1683 for svc in other_services:
1684- temp = 'lp:charms/{}'
1685- svc['location'] = temp.format(svc['name'])
1686+ temp = 'lp:charms/{}/{}'
1687+ svc['location'] = temp.format(base_series,
1688+ svc['name'])
1689 else:
1690 for svc in other_services:
1691 if svc['name'] in base_charms:
1692- temp = 'lp:charms/{}'
1693- svc['location'] = temp.format(svc['name'])
1694+ temp = 'lp:charms/{}/{}'
1695+ svc['location'] = temp.format(base_series,
1696+ svc['name'])
1697 else:
1698 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
1699 svc['location'] = temp.format(self.current_next,
1700@@ -71,16 +79,19 @@
1701 services.append(this_service)
1702 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
1703 'ceph-osd', 'ceph-radosgw']
1704+ # Most OpenStack subordinate charms do not expose an origin option
1705+ # as that is controlled by the principle.
1706+ ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
1707
1708 if self.openstack:
1709 for svc in services:
1710- if svc['name'] not in use_source:
1711+ if svc['name'] not in use_source + ignore:
1712 config = {'openstack-origin': self.openstack}
1713 self.d.configure(svc['name'], config)
1714
1715 if self.source:
1716 for svc in services:
1717- if svc['name'] in use_source:
1718+ if svc['name'] in use_source and svc['name'] not in ignore:
1719 config = {'source': self.source}
1720 self.d.configure(svc['name'], config)
1721
1722@@ -95,14 +106,78 @@
1723 Return an integer representing the enum value of the openstack
1724 release.
1725 """
1726+ # Must be ordered by OpenStack release (not by Ubuntu release):
1727 (self.precise_essex, self.precise_folsom, self.precise_grizzly,
1728 self.precise_havana, self.precise_icehouse,
1729- self.trusty_icehouse) = range(6)
1730+ self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
1731+ self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
1732+ self.wily_liberty) = range(12)
1733+
1734 releases = {
1735 ('precise', None): self.precise_essex,
1736 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
1737 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
1738 ('precise', 'cloud:precise-havana'): self.precise_havana,
1739 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
1740- ('trusty', None): self.trusty_icehouse}
1741+ ('trusty', None): self.trusty_icehouse,
1742+ ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
1743+ ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
1744+ ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
1745+ ('utopic', None): self.utopic_juno,
1746+ ('vivid', None): self.vivid_kilo,
1747+ ('wily', None): self.wily_liberty}
1748 return releases[(self.series, self.openstack)]
1749+
1750+ def _get_openstack_release_string(self):
1751+ """Get openstack release string.
1752+
1753+ Return a string representing the openstack release.
1754+ """
1755+ releases = OrderedDict([
1756+ ('precise', 'essex'),
1757+ ('quantal', 'folsom'),
1758+ ('raring', 'grizzly'),
1759+ ('saucy', 'havana'),
1760+ ('trusty', 'icehouse'),
1761+ ('utopic', 'juno'),
1762+ ('vivid', 'kilo'),
1763+ ('wily', 'liberty'),
1764+ ])
1765+ if self.openstack:
1766+ os_origin = self.openstack.split(':')[1]
1767+ return os_origin.split('%s-' % self.series)[1].split('/')[0]
1768+ else:
1769+ return releases[self.series]
1770+
1771+ def get_ceph_expected_pools(self, radosgw=False):
1772+ """Return a list of expected ceph pools in a ceph + cinder + glance
1773+ test scenario, based on OpenStack release and whether ceph radosgw
1774+ is flagged as present or not."""
1775+
1776+ if self._get_openstack_release() >= self.trusty_kilo:
1777+ # Kilo or later
1778+ pools = [
1779+ 'rbd',
1780+ 'cinder',
1781+ 'glance'
1782+ ]
1783+ else:
1784+ # Juno or earlier
1785+ pools = [
1786+ 'data',
1787+ 'metadata',
1788+ 'rbd',
1789+ 'cinder',
1790+ 'glance'
1791+ ]
1792+
1793+ if radosgw:
1794+ pools.extend([
1795+ '.rgw.root',
1796+ '.rgw.control',
1797+ '.rgw',
1798+ '.rgw.gc',
1799+ '.users.uid'
1800+ ])
1801+
1802+ return pools
1803
1804=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
1805--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-18 14:29:25 +0000
1806+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-16 20:20:59 +0000
1807@@ -14,16 +14,20 @@
1808 # You should have received a copy of the GNU Lesser General Public License
1809 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1810
1811+import amulet
1812+import json
1813 import logging
1814 import os
1815+import six
1816 import time
1817 import urllib
1818
1819+import cinderclient.v1.client as cinder_client
1820 import glanceclient.v1.client as glance_client
1821+import heatclient.v1.client as heat_client
1822 import keystoneclient.v2_0 as keystone_client
1823 import novaclient.v1_1.client as nova_client
1824-
1825-import six
1826+import swiftclient
1827
1828 from charmhelpers.contrib.amulet.utils import (
1829 AmuletUtils
1830@@ -37,7 +41,7 @@
1831 """OpenStack amulet utilities.
1832
1833 This class inherits from AmuletUtils and has additional support
1834- that is specifically for use by OpenStack charms.
1835+ that is specifically for use by OpenStack charm tests.
1836 """
1837
1838 def __init__(self, log_level=ERROR):
1839@@ -51,6 +55,8 @@
1840 Validate actual endpoint data vs expected endpoint data. The ports
1841 are used to find the matching endpoint.
1842 """
1843+ self.log.debug('Validating endpoint data...')
1844+ self.log.debug('actual: {}'.format(repr(endpoints)))
1845 found = False
1846 for ep in endpoints:
1847 self.log.debug('endpoint: {}'.format(repr(ep)))
1848@@ -77,6 +83,7 @@
1849 Validate a list of actual service catalog endpoints vs a list of
1850 expected service catalog endpoints.
1851 """
1852+ self.log.debug('Validating service catalog endpoint data...')
1853 self.log.debug('actual: {}'.format(repr(actual)))
1854 for k, v in six.iteritems(expected):
1855 if k in actual:
1856@@ -93,6 +100,7 @@
1857 Validate a list of actual tenant data vs list of expected tenant
1858 data.
1859 """
1860+ self.log.debug('Validating tenant data...')
1861 self.log.debug('actual: {}'.format(repr(actual)))
1862 for e in expected:
1863 found = False
1864@@ -114,6 +122,7 @@
1865 Validate a list of actual role data vs a list of expected role
1866 data.
1867 """
1868+ self.log.debug('Validating role data...')
1869 self.log.debug('actual: {}'.format(repr(actual)))
1870 for e in expected:
1871 found = False
1872@@ -134,6 +143,7 @@
1873 Validate a list of actual user data vs a list of expected user
1874 data.
1875 """
1876+ self.log.debug('Validating user data...')
1877 self.log.debug('actual: {}'.format(repr(actual)))
1878 for e in expected:
1879 found = False
1880@@ -155,17 +165,30 @@
1881
1882 Validate a list of actual flavors vs a list of expected flavors.
1883 """
1884+ self.log.debug('Validating flavor data...')
1885 self.log.debug('actual: {}'.format(repr(actual)))
1886 act = [a.name for a in actual]
1887 return self._validate_list_data(expected, act)
1888
1889 def tenant_exists(self, keystone, tenant):
1890 """Return True if tenant exists."""
1891+ self.log.debug('Checking if tenant exists ({})...'.format(tenant))
1892 return tenant in [t.name for t in keystone.tenants.list()]
1893
1894+ def authenticate_cinder_admin(self, keystone_sentry, username,
1895+ password, tenant):
1896+ """Authenticates admin user with cinder."""
1897+ # NOTE(beisner): cinder python client doesn't accept tokens.
1898+ service_ip = \
1899+ keystone_sentry.relation('shared-db',
1900+ 'mysql:shared-db')['private-address']
1901+ ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
1902+ return cinder_client.Client(username, password, tenant, ept)
1903+
1904 def authenticate_keystone_admin(self, keystone_sentry, user, password,
1905 tenant):
1906 """Authenticates admin user with the keystone admin endpoint."""
1907+ self.log.debug('Authenticating keystone admin...')
1908 unit = keystone_sentry
1909 service_ip = unit.relation('shared-db',
1910 'mysql:shared-db')['private-address']
1911@@ -175,6 +198,7 @@
1912
1913 def authenticate_keystone_user(self, keystone, user, password, tenant):
1914 """Authenticates a regular user with the keystone public endpoint."""
1915+ self.log.debug('Authenticating keystone user ({})...'.format(user))
1916 ep = keystone.service_catalog.url_for(service_type='identity',
1917 endpoint_type='publicURL')
1918 return keystone_client.Client(username=user, password=password,
1919@@ -182,19 +206,49 @@
1920
1921 def authenticate_glance_admin(self, keystone):
1922 """Authenticates admin user with glance."""
1923+ self.log.debug('Authenticating glance admin...')
1924 ep = keystone.service_catalog.url_for(service_type='image',
1925 endpoint_type='adminURL')
1926 return glance_client.Client(ep, token=keystone.auth_token)
1927
1928+ def authenticate_heat_admin(self, keystone):
1929+ """Authenticates the admin user with heat."""
1930+ self.log.debug('Authenticating heat admin...')
1931+ ep = keystone.service_catalog.url_for(service_type='orchestration',
1932+ endpoint_type='publicURL')
1933+ return heat_client.Client(endpoint=ep, token=keystone.auth_token)
1934+
1935 def authenticate_nova_user(self, keystone, user, password, tenant):
1936 """Authenticates a regular user with nova-api."""
1937+ self.log.debug('Authenticating nova user ({})...'.format(user))
1938 ep = keystone.service_catalog.url_for(service_type='identity',
1939 endpoint_type='publicURL')
1940 return nova_client.Client(username=user, api_key=password,
1941 project_id=tenant, auth_url=ep)
1942
1943+ def authenticate_swift_user(self, keystone, user, password, tenant):
1944+ """Authenticates a regular user with swift api."""
1945+ self.log.debug('Authenticating swift user ({})...'.format(user))
1946+ ep = keystone.service_catalog.url_for(service_type='identity',
1947+ endpoint_type='publicURL')
1948+ return swiftclient.Connection(authurl=ep,
1949+ user=user,
1950+ key=password,
1951+ tenant_name=tenant,
1952+ auth_version='2.0')
1953+
1954 def create_cirros_image(self, glance, image_name):
1955- """Download the latest cirros image and upload it to glance."""
1956+ """Download the latest cirros image and upload it to glance,
1957+ validate and return a resource pointer.
1958+
1959+ :param glance: pointer to authenticated glance connection
1960+ :param image_name: display name for new image
1961+ :returns: glance image pointer
1962+ """
1963+ self.log.debug('Creating glance cirros image '
1964+ '({})...'.format(image_name))
1965+
1966+ # Download cirros image
1967 http_proxy = os.getenv('AMULET_HTTP_PROXY')
1968 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
1969 if http_proxy:
1970@@ -203,57 +257,67 @@
1971 else:
1972 opener = urllib.FancyURLopener()
1973
1974- f = opener.open("http://download.cirros-cloud.net/version/released")
1975+ f = opener.open('http://download.cirros-cloud.net/version/released')
1976 version = f.read().strip()
1977- cirros_img = "cirros-{}-x86_64-disk.img".format(version)
1978+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
1979 local_path = os.path.join('tests', cirros_img)
1980
1981 if not os.path.exists(local_path):
1982- cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
1983+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
1984 version, cirros_img)
1985 opener.retrieve(cirros_url, local_path)
1986 f.close()
1987
1988+ # Create glance image
1989 with open(local_path) as f:
1990 image = glance.images.create(name=image_name, is_public=True,
1991 disk_format='qcow2',
1992 container_format='bare', data=f)
1993- count = 1
1994- status = image.status
1995- while status != 'active' and count < 10:
1996- time.sleep(3)
1997- image = glance.images.get(image.id)
1998- status = image.status
1999- self.log.debug('image status: {}'.format(status))
2000- count += 1
2001-
2002- if status != 'active':
2003- self.log.error('image creation timed out')
2004- return None
2005+
2006+ # Wait for image to reach active status
2007+ img_id = image.id
2008+ ret = self.resource_reaches_status(glance.images, img_id,
2009+ expected_stat='active',
2010+ msg='Image status wait')
2011+ if not ret:
2012+ msg = 'Glance image failed to reach expected state.'
2013+ amulet.raise_status(amulet.FAIL, msg=msg)
2014+
2015+ # Re-validate new image
2016+ self.log.debug('Validating image attributes...')
2017+ val_img_name = glance.images.get(img_id).name
2018+ val_img_stat = glance.images.get(img_id).status
2019+ val_img_pub = glance.images.get(img_id).is_public
2020+ val_img_cfmt = glance.images.get(img_id).container_format
2021+ val_img_dfmt = glance.images.get(img_id).disk_format
2022+ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
2023+ 'container fmt:{} disk fmt:{}'.format(
2024+ val_img_name, val_img_pub, img_id,
2025+ val_img_stat, val_img_cfmt, val_img_dfmt))
2026+
2027+ if val_img_name == image_name and val_img_stat == 'active' \
2028+ and val_img_pub is True and val_img_cfmt == 'bare' \
2029+ and val_img_dfmt == 'qcow2':
2030+ self.log.debug(msg_attr)
2031+ else:
2032+ msg = ('Volume validation failed, {}'.format(msg_attr))
2033+ amulet.raise_status(amulet.FAIL, msg=msg)
2034
2035 return image
2036
2037 def delete_image(self, glance, image):
2038 """Delete the specified image."""
2039- num_before = len(list(glance.images.list()))
2040- glance.images.delete(image)
2041-
2042- count = 1
2043- num_after = len(list(glance.images.list()))
2044- while num_after != (num_before - 1) and count < 10:
2045- time.sleep(3)
2046- num_after = len(list(glance.images.list()))
2047- self.log.debug('number of images: {}'.format(num_after))
2048- count += 1
2049-
2050- if num_after != (num_before - 1):
2051- self.log.error('image deletion timed out')
2052- return False
2053-
2054- return True
2055+
2056+ # /!\ DEPRECATION WARNING
2057+ self.log.warn('/!\\ DEPRECATION WARNING: use '
2058+ 'delete_resource instead of delete_image.')
2059+ self.log.debug('Deleting glance image ({})...'.format(image))
2060+ return self.delete_resource(glance.images, image, msg='glance image')
2061
2062 def create_instance(self, nova, image_name, instance_name, flavor):
2063 """Create the specified instance."""
2064+ self.log.debug('Creating instance '
2065+ '({}|{}|{})'.format(instance_name, image_name, flavor))
2066 image = nova.images.find(name=image_name)
2067 flavor = nova.flavors.find(name=flavor)
2068 instance = nova.servers.create(name=instance_name, image=image,
2069@@ -276,19 +340,265 @@
2070
2071 def delete_instance(self, nova, instance):
2072 """Delete the specified instance."""
2073- num_before = len(list(nova.servers.list()))
2074- nova.servers.delete(instance)
2075-
2076- count = 1
2077- num_after = len(list(nova.servers.list()))
2078- while num_after != (num_before - 1) and count < 10:
2079- time.sleep(3)
2080- num_after = len(list(nova.servers.list()))
2081- self.log.debug('number of instances: {}'.format(num_after))
2082- count += 1
2083-
2084- if num_after != (num_before - 1):
2085- self.log.error('instance deletion timed out')
2086- return False
2087-
2088- return True
2089+
2090+ # /!\ DEPRECATION WARNING
2091+ self.log.warn('/!\\ DEPRECATION WARNING: use '
2092+ 'delete_resource instead of delete_instance.')
2093+ self.log.debug('Deleting instance ({})...'.format(instance))
2094+ return self.delete_resource(nova.servers, instance,
2095+ msg='nova instance')
2096+
2097+ def create_or_get_keypair(self, nova, keypair_name="testkey"):
2098+ """Create a new keypair, or return pointer if it already exists."""
2099+ try:
2100+ _keypair = nova.keypairs.get(keypair_name)
2101+ self.log.debug('Keypair ({}) already exists, '
2102+ 'using it.'.format(keypair_name))
2103+ return _keypair
2104+ except:
2105+ self.log.debug('Keypair ({}) does not exist, '
2106+ 'creating it.'.format(keypair_name))
2107+
2108+ _keypair = nova.keypairs.create(name=keypair_name)
2109+ return _keypair
2110+
2111+ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
2112+ img_id=None, src_vol_id=None, snap_id=None):
2113+ """Create cinder volume, optionally from a glance image, OR
2114+ optionally as a clone of an existing volume, OR optionally
2115+ from a snapshot. Wait for the new volume status to reach
2116+ the expected status, validate and return a resource pointer.
2117+
2118+ :param vol_name: cinder volume display name
2119+ :param vol_size: size in gigabytes
2120+ :param img_id: optional glance image id
2121+ :param src_vol_id: optional source volume id to clone
2122+ :param snap_id: optional snapshot id to use
2123+ :returns: cinder volume pointer
2124+ """
2125+ # Handle parameter input and avoid impossible combinations
2126+ if img_id and not src_vol_id and not snap_id:
2127+ # Create volume from image
2128+ self.log.debug('Creating cinder volume from glance image...')
2129+ bootable = 'true'
2130+ elif src_vol_id and not img_id and not snap_id:
2131+ # Clone an existing volume
2132+ self.log.debug('Cloning cinder volume...')
2133+ bootable = cinder.volumes.get(src_vol_id).bootable
2134+ elif snap_id and not src_vol_id and not img_id:
2135+ # Create volume from snapshot
2136+ self.log.debug('Creating cinder volume from snapshot...')
2137+ snap = cinder.volume_snapshots.find(id=snap_id)
2138+ vol_size = snap.size
2139+ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
2140+ bootable = cinder.volumes.get(snap_vol_id).bootable
2141+ elif not img_id and not src_vol_id and not snap_id:
2142+ # Create volume
2143+ self.log.debug('Creating cinder volume...')
2144+ bootable = 'false'
2145+ else:
2146+ # Impossible combination of parameters
2147+ msg = ('Invalid method use - name:{} size:{} img_id:{} '
2148+ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
2149+ img_id, src_vol_id,
2150+ snap_id))
2151+ amulet.raise_status(amulet.FAIL, msg=msg)
2152+
2153+ # Create new volume
2154+ try:
2155+ vol_new = cinder.volumes.create(display_name=vol_name,
2156+ imageRef=img_id,
2157+ size=vol_size,
2158+ source_volid=src_vol_id,
2159+ snapshot_id=snap_id)
2160+ vol_id = vol_new.id
2161+ except Exception as e:
2162+ msg = 'Failed to create volume: {}'.format(e)
2163+ amulet.raise_status(amulet.FAIL, msg=msg)
2164+
2165+ # Wait for volume to reach available status
2166+ ret = self.resource_reaches_status(cinder.volumes, vol_id,
2167+ expected_stat="available",
2168+ msg="Volume status wait")
2169+ if not ret:
2170+ msg = 'Cinder volume failed to reach expected state.'
2171+ amulet.raise_status(amulet.FAIL, msg=msg)
2172+
2173+ # Re-validate new volume
2174+ self.log.debug('Validating volume attributes...')
2175+ val_vol_name = cinder.volumes.get(vol_id).display_name
2176+ val_vol_boot = cinder.volumes.get(vol_id).bootable
2177+ val_vol_stat = cinder.volumes.get(vol_id).status
2178+ val_vol_size = cinder.volumes.get(vol_id).size
2179+ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
2180+ '{} size:{}'.format(val_vol_name, vol_id,
2181+ val_vol_stat, val_vol_boot,
2182+ val_vol_size))
2183+
2184+ if val_vol_boot == bootable and val_vol_stat == 'available' \
2185+ and val_vol_name == vol_name and val_vol_size == vol_size:
2186+ self.log.debug(msg_attr)
2187+ else:
2188+ msg = ('Volume validation failed, {}'.format(msg_attr))
2189+ amulet.raise_status(amulet.FAIL, msg=msg)
2190+
2191+ return vol_new
2192+
2193+ def delete_resource(self, resource, resource_id,
2194+ msg="resource", max_wait=120):
2195+ """Delete one openstack resource, such as one instance, keypair,
2196+ image, volume, stack, etc., and confirm deletion within max wait time.
2197+
2198+ :param resource: pointer to os resource type, ex:glance_client.images
2199+ :param resource_id: unique name or id for the openstack resource
2200+ :param msg: text to identify purpose in logging
2201+ :param max_wait: maximum wait time in seconds
2202+ :returns: True if successful, otherwise False
2203+ """
2204+ self.log.debug('Deleting OpenStack resource '
2205+ '{} ({})'.format(resource_id, msg))
2206+ num_before = len(list(resource.list()))
2207+ resource.delete(resource_id)
2208+
2209+ tries = 0
2210+ num_after = len(list(resource.list()))
2211+ while num_after != (num_before - 1) and tries < (max_wait / 4):
2212+ self.log.debug('{} delete check: '
2213+ '{} [{}:{}] {}'.format(msg, tries,
2214+ num_before,
2215+ num_after,
2216+ resource_id))
2217+ time.sleep(4)
2218+ num_after = len(list(resource.list()))
2219+ tries += 1
2220+
2221+ self.log.debug('{}: expected, actual count = {}, '
2222+ '{}'.format(msg, num_before - 1, num_after))
2223+
2224+ if num_after == (num_before - 1):
2225+ return True
2226+ else:
2227+ self.log.error('{} delete timed out'.format(msg))
2228+ return False
2229+
2230+ def resource_reaches_status(self, resource, resource_id,
2231+ expected_stat='available',
2232+ msg='resource', max_wait=120):
2233+ """Wait for an openstack resources status to reach an
2234+ expected status within a specified time. Useful to confirm that
2235+ nova instances, cinder vols, snapshots, glance images, heat stacks
2236+ and other resources eventually reach the expected status.
2237+
2238+ :param resource: pointer to os resource type, ex: heat_client.stacks
2239+ :param resource_id: unique id for the openstack resource
2240+ :param expected_stat: status to expect resource to reach
2241+ :param msg: text to identify purpose in logging
2242+ :param max_wait: maximum wait time in seconds
2243+ :returns: True if successful, False if status is not reached
2244+ """
2245+
2246+ tries = 0
2247+ resource_stat = resource.get(resource_id).status
2248+ while resource_stat != expected_stat and tries < (max_wait / 4):
2249+ self.log.debug('{} status check: '
2250+ '{} [{}:{}] {}'.format(msg, tries,
2251+ resource_stat,
2252+ expected_stat,
2253+ resource_id))
2254+ time.sleep(4)
2255+ resource_stat = resource.get(resource_id).status
2256+ tries += 1
2257+
2258+ self.log.debug('{}: expected, actual status = {}, '
2259+ '{}'.format(msg, resource_stat, expected_stat))
2260+
2261+ if resource_stat == expected_stat:
2262+ return True
2263+ else:
2264+ self.log.debug('{} never reached expected status: '
2265+ '{}'.format(resource_id, expected_stat))
2266+ return False
2267+
2268+ def get_ceph_osd_id_cmd(self, index):
2269+ """Produce a shell command that will return a ceph-osd id."""
2270+ return ("`initctl list | grep 'ceph-osd ' | "
2271+ "awk 'NR=={} {{ print $2 }}' | "
2272+ "grep -o '[0-9]*'`".format(index + 1))
2273+
2274+ def get_ceph_pools(self, sentry_unit):
2275+ """Return a dict of ceph pools from a single ceph unit, with
2276+ pool name as keys, pool id as vals."""
2277+ pools = {}
2278+ cmd = 'sudo ceph osd lspools'
2279+ output, code = sentry_unit.run(cmd)
2280+ if code != 0:
2281+ msg = ('{} `{}` returned {} '
2282+ '{}'.format(sentry_unit.info['unit_name'],
2283+ cmd, code, output))
2284+ amulet.raise_status(amulet.FAIL, msg=msg)
2285+
2286+ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
2287+ for pool in str(output).split(','):
2288+ pool_id_name = pool.split(' ')
2289+ if len(pool_id_name) == 2:
2290+ pool_id = pool_id_name[0]
2291+ pool_name = pool_id_name[1]
2292+ pools[pool_name] = int(pool_id)
2293+
2294+ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
2295+ pools))
2296+ return pools
2297+
2298+ def get_ceph_df(self, sentry_unit):
2299+ """Return dict of ceph df json output, including ceph pool state.
2300+
2301+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
2302+ :returns: Dict of ceph df output
2303+ """
2304+ cmd = 'sudo ceph df --format=json'
2305+ output, code = sentry_unit.run(cmd)
2306+ if code != 0:
2307+ msg = ('{} `{}` returned {} '
2308+ '{}'.format(sentry_unit.info['unit_name'],
2309+ cmd, code, output))
2310+ amulet.raise_status(amulet.FAIL, msg=msg)
2311+ return json.loads(output)
2312+
2313+ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
2314+ """Take a sample of attributes of a ceph pool, returning ceph
2315+ pool name, object count and disk space used for the specified
2316+ pool ID number.
2317+
2318+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
2319+ :param pool_id: Ceph pool ID
2320+ :returns: List of pool name, object count, kb disk space used
2321+ """
2322+ df = self.get_ceph_df(sentry_unit)
2323+ pool_name = df['pools'][pool_id]['name']
2324+ obj_count = df['pools'][pool_id]['stats']['objects']
2325+ kb_used = df['pools'][pool_id]['stats']['kb_used']
2326+ self.log.debug('Ceph {} pool (ID {}): {} objects, '
2327+ '{} kb used'.format(pool_name, pool_id,
2328+ obj_count, kb_used))
2329+ return pool_name, obj_count, kb_used
2330+
2331+ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
2332+ """Validate ceph pool samples taken over time, such as pool
2333+ object counts or pool kb used, before adding, after adding, and
2334+ after deleting items which affect those pool attributes. The
2335+ 2nd element is expected to be greater than the 1st; 3rd is expected
2336+ to be less than the 2nd.
2337+
2338+ :param samples: List containing 3 data samples
2339+ :param sample_type: String for logging and usage context
2340+ :returns: None if successful, Failure message otherwise
2341+ """
2342+ original, created, deleted = range(3)
2343+ if samples[created] <= samples[original] or \
2344+ samples[deleted] >= samples[created]:
2345+ return ('Ceph {} samples ({}) '
2346+ 'unexpected.'.format(sample_type, samples))
2347+ else:
2348+ self.log.debug('Ceph {} samples (OK): '
2349+ '{}'.format(sample_type, samples))
2350+ return None
2351
2352=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
2353--- hooks/charmhelpers/contrib/openstack/context.py 2015-06-18 14:29:25 +0000
2354+++ hooks/charmhelpers/contrib/openstack/context.py 2015-07-16 20:20:59 +0000
2355@@ -16,11 +16,13 @@
2356
2357 import json
2358 import os
2359+import re
2360 import time
2361 from base64 import b64decode
2362 from subprocess import check_call
2363
2364 import six
2365+import yaml
2366
2367 from charmhelpers.fetch import (
2368 apt_install,
2369@@ -45,8 +47,11 @@
2370 )
2371
2372 from charmhelpers.core.sysctl import create as sysctl_create
2373+from charmhelpers.core.strutils import bool_from_string
2374
2375 from charmhelpers.core.host import (
2376+ list_nics,
2377+ get_nic_hwaddr,
2378 mkdir,
2379 write_file,
2380 )
2381@@ -63,16 +68,22 @@
2382 )
2383 from charmhelpers.contrib.openstack.neutron import (
2384 neutron_plugin_attribute,
2385+ parse_data_port_mappings,
2386+)
2387+from charmhelpers.contrib.openstack.ip import (
2388+ resolve_address,
2389+ INTERNAL,
2390 )
2391 from charmhelpers.contrib.network.ip import (
2392 get_address_in_network,
2393+ get_ipv4_addr,
2394 get_ipv6_addr,
2395 get_netmask_for_address,
2396 format_ipv6_addr,
2397 is_address_in_network,
2398+ is_bridge_member,
2399 )
2400 from charmhelpers.contrib.openstack.utils import get_host_ip
2401-
2402 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
2403 ADDRESS_TYPES = ['admin', 'internal', 'public']
2404
2405@@ -104,9 +115,44 @@
2406 def config_flags_parser(config_flags):
2407 """Parses config flags string into dict.
2408
2409+ This parsing method supports a few different formats for the config
2410+ flag values to be parsed:
2411+
2412+ 1. A string in the simple format of key=value pairs, with the possibility
2413+ of specifying multiple key value pairs within the same string. For
2414+ example, a string in the format of 'key1=value1, key2=value2' will
2415+ return a dict of:
2416+
2417+ {'key1': 'value1',
2418+ 'key2': 'value2'}.
2419+
2420+ 2. A string in the above format, but supporting a comma-delimited list
2421+ of values for the same key. For example, a string in the format of
2422+ 'key1=value1, key2=value3,value4,value5' will return a dict of:
2423+
2424+ {'key1', 'value1',
2425+ 'key2', 'value2,value3,value4'}
2426+
2427+ 3. A string containing a colon character (:) prior to an equal
2428+ character (=) will be treated as yaml and parsed as such. This can be
2429+ used to specify more complex key value pairs. For example,
2430+ a string in the format of 'key1: subkey1=value1, subkey2=value2' will
2431+ return a dict of:
2432+
2433+ {'key1', 'subkey1=value1, subkey2=value2'}
2434+
2435 The provided config_flags string may be a list of comma-separated values
2436 which themselves may be comma-separated list of values.
2437 """
2438+ # If we find a colon before an equals sign then treat it as yaml.
2439+ # Note: limit it to finding the colon first since this indicates assignment
2440+ # for inline yaml.
2441+ colon = config_flags.find(':')
2442+ equals = config_flags.find('=')
2443+ if colon > 0:
2444+ if colon < equals or equals < 0:
2445+ return yaml.safe_load(config_flags)
2446+
2447 if config_flags.find('==') >= 0:
2448 log("config_flags is not in expected format (key=value)", level=ERROR)
2449 raise OSContextError
2450@@ -191,13 +237,13 @@
2451 unit=local_unit())
2452 if set_hostname != access_hostname:
2453 relation_set(relation_settings={hostname_key: access_hostname})
2454- return ctxt # Defer any further hook execution for now....
2455+ return None # Defer any further hook execution for now....
2456
2457 password_setting = 'password'
2458 if self.relation_prefix:
2459 password_setting = self.relation_prefix + '_password'
2460
2461- for rid in relation_ids('shared-db'):
2462+ for rid in relation_ids(self.interfaces[0]):
2463 for unit in related_units(rid):
2464 rdata = relation_get(rid=rid, unit=unit)
2465 host = rdata.get('db_host')
2466@@ -277,12 +323,29 @@
2467
2468
2469 class IdentityServiceContext(OSContextGenerator):
2470- interfaces = ['identity-service']
2471+
2472+ def __init__(self, service=None, service_user=None, rel_name='identity-service'):
2473+ self.service = service
2474+ self.service_user = service_user
2475+ self.rel_name = rel_name
2476+ self.interfaces = [self.rel_name]
2477
2478 def __call__(self):
2479- log('Generating template context for identity-service', level=DEBUG)
2480+ log('Generating template context for ' + self.rel_name, level=DEBUG)
2481 ctxt = {}
2482- for rid in relation_ids('identity-service'):
2483+
2484+ if self.service and self.service_user:
2485+ # This is required for pki token signing if we don't want /tmp to
2486+ # be used.
2487+ cachedir = '/var/cache/%s' % (self.service)
2488+ if not os.path.isdir(cachedir):
2489+ log("Creating service cache dir %s" % (cachedir), level=DEBUG)
2490+ mkdir(path=cachedir, owner=self.service_user,
2491+ group=self.service_user, perms=0o700)
2492+
2493+ ctxt['signing_dir'] = cachedir
2494+
2495+ for rid in relation_ids(self.rel_name):
2496 for unit in related_units(rid):
2497 rdata = relation_get(rid=rid, unit=unit)
2498 serv_host = rdata.get('service_host')
2499@@ -291,15 +354,16 @@
2500 auth_host = format_ipv6_addr(auth_host) or auth_host
2501 svc_protocol = rdata.get('service_protocol') or 'http'
2502 auth_protocol = rdata.get('auth_protocol') or 'http'
2503- ctxt = {'service_port': rdata.get('service_port'),
2504- 'service_host': serv_host,
2505- 'auth_host': auth_host,
2506- 'auth_port': rdata.get('auth_port'),
2507- 'admin_tenant_name': rdata.get('service_tenant'),
2508- 'admin_user': rdata.get('service_username'),
2509- 'admin_password': rdata.get('service_password'),
2510- 'service_protocol': svc_protocol,
2511- 'auth_protocol': auth_protocol}
2512+ ctxt.update({'service_port': rdata.get('service_port'),
2513+ 'service_host': serv_host,
2514+ 'auth_host': auth_host,
2515+ 'auth_port': rdata.get('auth_port'),
2516+ 'admin_tenant_name': rdata.get('service_tenant'),
2517+ 'admin_user': rdata.get('service_username'),
2518+ 'admin_password': rdata.get('service_password'),
2519+ 'service_protocol': svc_protocol,
2520+ 'auth_protocol': auth_protocol})
2521+
2522 if context_complete(ctxt):
2523 # NOTE(jamespage) this is required for >= icehouse
2524 # so a missing value just indicates keystone needs
2525@@ -398,6 +462,11 @@
2526
2527 ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
2528
2529+ oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
2530+ if oslo_messaging_flags:
2531+ ctxt['oslo_messaging_flags'] = config_flags_parser(
2532+ oslo_messaging_flags)
2533+
2534 if not context_complete(ctxt):
2535 return {}
2536
2537@@ -677,7 +746,14 @@
2538 'endpoints': [],
2539 'ext_ports': []}
2540
2541- for cn in self.canonical_names():
2542+ cns = self.canonical_names()
2543+ if cns:
2544+ for cn in cns:
2545+ self.configure_cert(cn)
2546+ else:
2547+ # Expect cert/key provided in config (currently assumed that ca
2548+ # uses ip for cn)
2549+ cn = resolve_address(endpoint_type=INTERNAL)
2550 self.configure_cert(cn)
2551
2552 addresses = self.get_network_addresses()
2553@@ -740,6 +816,19 @@
2554
2555 return ovs_ctxt
2556
2557+ def nuage_ctxt(self):
2558+ driver = neutron_plugin_attribute(self.plugin, 'driver',
2559+ self.network_manager)
2560+ config = neutron_plugin_attribute(self.plugin, 'config',
2561+ self.network_manager)
2562+ nuage_ctxt = {'core_plugin': driver,
2563+ 'neutron_plugin': 'vsp',
2564+ 'neutron_security_groups': self.neutron_security_groups,
2565+ 'local_ip': unit_private_ip(),
2566+ 'config': config}
2567+
2568+ return nuage_ctxt
2569+
2570 def nvp_ctxt(self):
2571 driver = neutron_plugin_attribute(self.plugin, 'driver',
2572 self.network_manager)
2573@@ -823,6 +912,8 @@
2574 ctxt.update(self.n1kv_ctxt())
2575 elif self.plugin == 'Calico':
2576 ctxt.update(self.calico_ctxt())
2577+ elif self.plugin == 'vsp':
2578+ ctxt.update(self.nuage_ctxt())
2579
2580 alchemy_flags = config('neutron-alchemy-flags')
2581 if alchemy_flags:
2582@@ -833,6 +924,48 @@
2583 return ctxt
2584
2585
2586+class NeutronPortContext(OSContextGenerator):
2587+ NIC_PREFIXES = ['eth', 'bond']
2588+
2589+ def resolve_ports(self, ports):
2590+ """Resolve NICs not yet bound to bridge(s)
2591+
2592+ If hwaddress provided then returns resolved hwaddress otherwise NIC.
2593+ """
2594+ if not ports:
2595+ return None
2596+
2597+ hwaddr_to_nic = {}
2598+ hwaddr_to_ip = {}
2599+ for nic in list_nics(self.NIC_PREFIXES):
2600+ hwaddr = get_nic_hwaddr(nic)
2601+ hwaddr_to_nic[hwaddr] = nic
2602+ addresses = get_ipv4_addr(nic, fatal=False)
2603+ addresses += get_ipv6_addr(iface=nic, fatal=False)
2604+ hwaddr_to_ip[hwaddr] = addresses
2605+
2606+ resolved = []
2607+ mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
2608+ for entry in ports:
2609+ if re.match(mac_regex, entry):
2610+ # NIC is in known NICs and does NOT hace an IP address
2611+ if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
2612+ # If the nic is part of a bridge then don't use it
2613+ if is_bridge_member(hwaddr_to_nic[entry]):
2614+ continue
2615+
2616+ # Entry is a MAC address for a valid interface that doesn't
2617+ # have an IP address assigned yet.
2618+ resolved.append(hwaddr_to_nic[entry])
2619+ else:
2620+ # If the passed entry is not a MAC address, assume it's a valid
2621+ # interface, and that the user put it there on purpose (we can
2622+ # trust it to be the real external network).
2623+ resolved.append(entry)
2624+
2625+ return resolved
2626+
2627+
2628 class OSConfigFlagContext(OSContextGenerator):
2629 """Provides support for user-defined config flags.
2630
2631@@ -1021,6 +1154,8 @@
2632 for unit in related_units(rid):
2633 ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
2634 ctxt['zmq_host'] = relation_get('host', unit, rid)
2635+ ctxt['zmq_redis_address'] = relation_get(
2636+ 'zmq_redis_address', unit, rid)
2637
2638 return ctxt
2639
2640@@ -1052,3 +1187,145 @@
2641 sysctl_create(sysctl_dict,
2642 '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
2643 return {'sysctl': sysctl_dict}
2644+
2645+
2646+class NeutronAPIContext(OSContextGenerator):
2647+ '''
2648+ Inspects current neutron-plugin-api relation for neutron settings. Return
2649+ defaults if it is not present.
2650+ '''
2651+ interfaces = ['neutron-plugin-api']
2652+
2653+ def __call__(self):
2654+ self.neutron_defaults = {
2655+ 'l2_population': {
2656+ 'rel_key': 'l2-population',
2657+ 'default': False,
2658+ },
2659+ 'overlay_network_type': {
2660+ 'rel_key': 'overlay-network-type',
2661+ 'default': 'gre',
2662+ },
2663+ 'neutron_security_groups': {
2664+ 'rel_key': 'neutron-security-groups',
2665+ 'default': False,
2666+ },
2667+ 'network_device_mtu': {
2668+ 'rel_key': 'network-device-mtu',
2669+ 'default': None,
2670+ },
2671+ 'enable_dvr': {
2672+ 'rel_key': 'enable-dvr',
2673+ 'default': False,
2674+ },
2675+ 'enable_l3ha': {
2676+ 'rel_key': 'enable-l3ha',
2677+ 'default': False,
2678+ },
2679+ }
2680+ ctxt = self.get_neutron_options({})
2681+ for rid in relation_ids('neutron-plugin-api'):
2682+ for unit in related_units(rid):
2683+ rdata = relation_get(rid=rid, unit=unit)
2684+ if 'l2-population' in rdata:
2685+ ctxt.update(self.get_neutron_options(rdata))
2686+
2687+ return ctxt
2688+
2689+ def get_neutron_options(self, rdata):
2690+ settings = {}
2691+ for nkey in self.neutron_defaults.keys():
2692+ defv = self.neutron_defaults[nkey]['default']
2693+ rkey = self.neutron_defaults[nkey]['rel_key']
2694+ if rkey in rdata.keys():
2695+ if type(defv) is bool:
2696+ settings[nkey] = bool_from_string(rdata[rkey])
2697+ else:
2698+ settings[nkey] = rdata[rkey]
2699+ else:
2700+ settings[nkey] = defv
2701+ return settings
2702+
2703+
2704+class ExternalPortContext(NeutronPortContext):
2705+
2706+ def __call__(self):
2707+ ctxt = {}
2708+ ports = config('ext-port')
2709+ if ports:
2710+ ports = [p.strip() for p in ports.split()]
2711+ ports = self.resolve_ports(ports)
2712+ if ports:
2713+ ctxt = {"ext_port": ports[0]}
2714+ napi_settings = NeutronAPIContext()()
2715+ mtu = napi_settings.get('network_device_mtu')
2716+ if mtu:
2717+ ctxt['ext_port_mtu'] = mtu
2718+
2719+ return ctxt
2720+
2721+
2722+class DataPortContext(NeutronPortContext):
2723+
2724+ def __call__(self):
2725+ ports = config('data-port')
2726+ if ports:
2727+ portmap = parse_data_port_mappings(ports)
2728+ ports = portmap.values()
2729+ resolved = self.resolve_ports(ports)
2730+ normalized = {get_nic_hwaddr(port): port for port in resolved
2731+ if port not in ports}
2732+ normalized.update({port: port for port in resolved
2733+ if port in ports})
2734+ if resolved:
2735+ return {bridge: normalized[port] for bridge, port in
2736+ six.iteritems(portmap) if port in normalized.keys()}
2737+
2738+ return None
2739+
2740+
2741+class PhyNICMTUContext(DataPortContext):
2742+
2743+ def __call__(self):
2744+ ctxt = {}
2745+ mappings = super(PhyNICMTUContext, self).__call__()
2746+ if mappings and mappings.values():
2747+ ports = mappings.values()
2748+ napi_settings = NeutronAPIContext()()
2749+ mtu = napi_settings.get('network_device_mtu')
2750+ if mtu:
2751+ ctxt["devs"] = '\\n'.join(ports)
2752+ ctxt['mtu'] = mtu
2753+
2754+ return ctxt
2755+
2756+
2757+class NetworkServiceContext(OSContextGenerator):
2758+
2759+ def __init__(self, rel_name='quantum-network-service'):
2760+ self.rel_name = rel_name
2761+ self.interfaces = [rel_name]
2762+
2763+ def __call__(self):
2764+ for rid in relation_ids(self.rel_name):
2765+ for unit in related_units(rid):
2766+ rdata = relation_get(rid=rid, unit=unit)
2767+ ctxt = {
2768+ 'keystone_host': rdata.get('keystone_host'),
2769+ 'service_port': rdata.get('service_port'),
2770+ 'auth_port': rdata.get('auth_port'),
2771+ 'service_tenant': rdata.get('service_tenant'),
2772+ 'service_username': rdata.get('service_username'),
2773+ 'service_password': rdata.get('service_password'),
2774+ 'quantum_host': rdata.get('quantum_host'),
2775+ 'quantum_port': rdata.get('quantum_port'),
2776+ 'quantum_url': rdata.get('quantum_url'),
2777+ 'region': rdata.get('region'),
2778+ 'service_protocol':
2779+ rdata.get('service_protocol') or 'http',
2780+ 'auth_protocol':
2781+ rdata.get('auth_protocol') or 'http',
2782+ }
2783+ if context_complete(ctxt):
2784+ return ctxt
2785+ return {}
2786
2787=== added directory 'hooks/charmhelpers/contrib/openstack/files'
2788=== added file 'hooks/charmhelpers/contrib/openstack/files/__init__.py'
2789--- hooks/charmhelpers/contrib/openstack/files/__init__.py 1970-01-01 00:00:00 +0000
2790+++ hooks/charmhelpers/contrib/openstack/files/__init__.py 2015-07-16 20:20:59 +0000
2791@@ -0,0 +1,18 @@
2792+# Copyright 2014-2015 Canonical Limited.
2793+#
2794+# This file is part of charm-helpers.
2795+#
2796+# charm-helpers is free software: you can redistribute it and/or modify
2797+# it under the terms of the GNU Lesser General Public License version 3 as
2798+# published by the Free Software Foundation.
2799+#
2800+# charm-helpers is distributed in the hope that it will be useful,
2801+# but WITHOUT ANY WARRANTY; without even the implied warranty of
2802+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2803+# GNU Lesser General Public License for more details.
2804+#
2805+# You should have received a copy of the GNU Lesser General Public License
2806+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2807+
2808+# dummy __init__.py to fool syncer into thinking this is a syncable python
2809+# module
2810
2811=== modified file 'hooks/charmhelpers/contrib/openstack/ip.py'
2812--- hooks/charmhelpers/contrib/openstack/ip.py 2015-06-18 14:29:25 +0000
2813+++ hooks/charmhelpers/contrib/openstack/ip.py 2015-07-16 20:20:59 +0000
2814@@ -17,6 +17,7 @@
2815 from charmhelpers.core.hookenv import (
2816 config,
2817 unit_get,
2818+ service_name,
2819 )
2820 from charmhelpers.contrib.network.ip import (
2821 get_address_in_network,
2822@@ -33,15 +34,18 @@
2823 ADDRESS_MAP = {
2824 PUBLIC: {
2825 'config': 'os-public-network',
2826- 'fallback': 'public-address'
2827+ 'fallback': 'public-address',
2828+ 'override': 'os-public-hostname',
2829 },
2830 INTERNAL: {
2831 'config': 'os-internal-network',
2832- 'fallback': 'private-address'
2833+ 'fallback': 'private-address',
2834+ 'override': 'os-internal-hostname',
2835 },
2836 ADMIN: {
2837 'config': 'os-admin-network',
2838- 'fallback': 'private-address'
2839+ 'fallback': 'private-address',
2840+ 'override': 'os-admin-hostname',
2841 }
2842 }
2843
2844@@ -55,15 +59,50 @@
2845 :param endpoint_type: str endpoint type to resolve.
2846 :param returns: str base URL for services on the current service unit.
2847 """
2848- scheme = 'http'
2849- if 'https' in configs.complete_contexts():
2850- scheme = 'https'
2851+ scheme = _get_scheme(configs)
2852+
2853 address = resolve_address(endpoint_type)
2854 if is_ipv6(address):
2855 address = "[{}]".format(address)
2856+
2857 return '%s://%s' % (scheme, address)
2858
2859
2860+def _get_scheme(configs):
2861+ """Returns the scheme to use for the url (either http or https)
2862+ depending upon whether https is in the configs value.
2863+
2864+ :param configs: OSTemplateRenderer config templating object to inspect
2865+ for a complete https context.
2866+ :returns: either 'http' or 'https' depending on whether https is
2867+ configured within the configs context.
2868+ """
2869+ scheme = 'http'
2870+ if configs and 'https' in configs.complete_contexts():
2871+ scheme = 'https'
2872+ return scheme
2873+
2874+
2875+def _get_address_override(endpoint_type=PUBLIC):
2876+ """Returns any address overrides that the user has defined based on the
2877+ endpoint type.
2878+
2879+ Note: this function allows for the service name to be inserted into the
2880+ address if the user specifies {service_name}.somehost.org.
2881+
2882+ :param endpoint_type: the type of endpoint to retrieve the override
2883+ value for.
2884+ :returns: any endpoint address or hostname that the user has overridden
2885+ or None if an override is not present.
2886+ """
2887+ override_key = ADDRESS_MAP[endpoint_type]['override']
2888+ addr_override = config(override_key)
2889+ if not addr_override:
2890+ return None
2891+ else:
2892+ return addr_override.format(service_name=service_name())
2893+
2894+
2895 def resolve_address(endpoint_type=PUBLIC):
2896 """Return unit address depending on net config.
2897
2898@@ -75,7 +114,10 @@
2899
2900 :param endpoint_type: Network endpoing type
2901 """
2902- resolved_address = None
2903+ resolved_address = _get_address_override(endpoint_type)
2904+ if resolved_address:
2905+ return resolved_address
2906+
2907 vips = config('vip')
2908 if vips:
2909 vips = vips.split()
2910
2911=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
2912--- hooks/charmhelpers/contrib/openstack/neutron.py 2015-06-18 14:29:25 +0000
2913+++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-07-16 20:20:59 +0000
2914@@ -16,6 +16,7 @@
2915
2916 # Various utilies for dealing with Neutron and the renaming from Quantum.
2917
2918+import six
2919 from subprocess import check_output
2920
2921 from charmhelpers.core.hookenv import (
2922@@ -171,13 +172,28 @@
2923 'services': ['calico-felix',
2924 'bird',
2925 'neutron-dhcp-agent',
2926- 'nova-api-metadata'],
2927+ 'nova-api-metadata',
2928+ 'etcd'],
2929 'packages': [[headers_package()] + determine_dkms_package(),
2930 ['calico-compute',
2931 'bird',
2932 'neutron-dhcp-agent',
2933- 'nova-api-metadata']],
2934- 'server_packages': ['neutron-server', 'calico-control'],
2935+ 'nova-api-metadata',
2936+ 'etcd']],
2937+ 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
2938+ 'server_services': ['neutron-server', 'etcd']
2939+ },
2940+ 'vsp': {
2941+ 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
2942+ 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
2943+ 'contexts': [
2944+ context.SharedDBContext(user=config('neutron-database-user'),
2945+ database=config('neutron-database'),
2946+ relation_prefix='neutron',
2947+ ssl_dir=NEUTRON_CONF_DIR)],
2948+ 'services': [],
2949+ 'packages': [],
2950+ 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
2951 'server_services': ['neutron-server']
2952 }
2953 }
2954@@ -237,3 +253,77 @@
2955 else:
2956 # ensure accurate naming for all releases post-H
2957 return 'neutron'
2958+
2959+
2960+def parse_mappings(mappings):
2961+ parsed = {}
2962+ if mappings:
2963+ mappings = mappings.split()
2964+ for m in mappings:
2965+ p = m.partition(':')
2966+ key = p[0].strip()
2967+ if p[1]:
2968+ parsed[key] = p[2].strip()
2969+ else:
2970+ parsed[key] = ''
2971+
2972+ return parsed
2973+
2974+
2975+def parse_bridge_mappings(mappings):
2976+ """Parse bridge mappings.
2977+
2978+ Mappings must be a space-delimited list of provider:bridge mappings.
2979+
2980+ Returns dict of the form {provider:bridge}.
2981+ """
2982+ return parse_mappings(mappings)
2983+
2984+
2985+def parse_data_port_mappings(mappings, default_bridge='br-data'):
2986+ """Parse data port mappings.
2987+
2988+ Mappings must be a space-delimited list of bridge:port mappings.
2989+
2990+ Returns dict of the form {bridge:port}.
2991+ """
2992+ _mappings = parse_mappings(mappings)
2993+ if not _mappings or list(_mappings.values()) == ['']:
2994+ if not mappings:
2995+ return {}
2996+
2997+ # For backwards-compatibility we need to support port-only provided in
2998+ # config.
2999+ _mappings = {default_bridge: mappings.split()[0]}
3000+
3001+ bridges = _mappings.keys()
3002+ ports = _mappings.values()
3003+ if len(set(bridges)) != len(bridges):
3004+ raise Exception("It is not allowed to have more than one port "
3005+ "configured on the same bridge")
3006+
3007+ if len(set(ports)) != len(ports):
3008+ raise Exception("It is not allowed to have the same port configured "
3009+ "on more than one bridge")
3010+
3011+ return _mappings
3012+
3013+
3014+def parse_vlan_range_mappings(mappings):
3015+ """Parse vlan range mappings.
3016+
3017+ Mappings must be a space-delimited list of provider:start:end mappings.
3018+
3019+ The start:end range is optional and may be omitted.
3020+
3021+ Returns dict of the form {provider: (start, end)}.
3022+ """
3023+ _mappings = parse_mappings(mappings)
3024+ if not _mappings:
3025+ return {}
3026+
3027+ mappings = {}
3028+ for p, r in six.iteritems(_mappings):
3029+ mappings[p] = tuple(r.split(':'))
3030+
3031+ return mappings
3032
3033=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
3034--- hooks/charmhelpers/contrib/openstack/utils.py 2015-06-18 14:29:25 +0000
3035+++ hooks/charmhelpers/contrib/openstack/utils.py 2015-07-16 20:20:59 +0000
3036@@ -23,12 +23,17 @@
3037 import subprocess
3038 import json
3039 import os
3040-import socket
3041 import sys
3042
3043 import six
3044 import yaml
3045
3046+from charmhelpers.contrib.network import ip
3047+
3048+from charmhelpers.core import (
3049+ unitdata,
3050+)
3051+
3052 from charmhelpers.core.hookenv import (
3053 config,
3054 log as juju_log,
3055@@ -48,9 +53,13 @@
3056 get_ipv6_addr
3057 )
3058
3059+from charmhelpers.contrib.python.packages import (
3060+ pip_create_virtualenv,
3061+ pip_install,
3062+)
3063+
3064 from charmhelpers.core.host import lsb_release, mounts, umount
3065 from charmhelpers.fetch import apt_install, apt_cache, install_remote
3066-from charmhelpers.contrib.python.packages import pip_install
3067 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
3068 from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
3069
3070@@ -70,6 +79,7 @@
3071 ('trusty', 'icehouse'),
3072 ('utopic', 'juno'),
3073 ('vivid', 'kilo'),
3074+ ('wily', 'liberty'),
3075 ])
3076
3077
3078@@ -82,6 +92,7 @@
3079 ('2014.1', 'icehouse'),
3080 ('2014.2', 'juno'),
3081 ('2015.1', 'kilo'),
3082+ ('2015.2', 'liberty'),
3083 ])
3084
3085 # The ugly duckling
3086@@ -103,6 +114,8 @@
3087 ('2.1.0', 'juno'),
3088 ('2.2.0', 'juno'),
3089 ('2.2.1', 'kilo'),
3090+ ('2.2.2', 'kilo'),
3091+ ('2.3.0', 'liberty'),
3092 ])
3093
3094 DEFAULT_LOOPBACK_SIZE = '5G'
3095@@ -311,6 +324,9 @@
3096 'kilo': 'trusty-updates/kilo',
3097 'kilo/updates': 'trusty-updates/kilo',
3098 'kilo/proposed': 'trusty-proposed/kilo',
3099+ 'liberty': 'trusty-updates/liberty',
3100+ 'liberty/updates': 'trusty-updates/liberty',
3101+ 'liberty/proposed': 'trusty-proposed/liberty',
3102 }
3103
3104 try:
3105@@ -328,6 +344,21 @@
3106 error_out("Invalid openstack-release specified: %s" % rel)
3107
3108
3109+def config_value_changed(option):
3110+ """
3111+ Determine if config value changed since last call to this function.
3112+ """
3113+ hook_data = unitdata.HookData()
3114+ with hook_data():
3115+ db = unitdata.kv()
3116+ current = config(option)
3117+ saved = db.get(option)
3118+ db.set(option, current)
3119+ if saved is None:
3120+ return False
3121+ return current != saved
3122+
3123+
3124 def save_script_rc(script_path="scripts/scriptrc", **env_vars):
3125 """
3126 Write an rc file in the charm-delivered directory containing
3127@@ -420,77 +451,10 @@
3128 else:
3129 zap_disk(block_device)
3130
3131-
3132-def is_ip(address):
3133- """
3134- Returns True if address is a valid IP address.
3135- """
3136- try:
3137- # Test to see if already an IPv4 address
3138- socket.inet_aton(address)
3139- return True
3140- except socket.error:
3141- return False
3142-
3143-
3144-def ns_query(address):
3145- try:
3146- import dns.resolver
3147- except ImportError:
3148- apt_install('python-dnspython')
3149- import dns.resolver
3150-
3151- if isinstance(address, dns.name.Name):
3152- rtype = 'PTR'
3153- elif isinstance(address, six.string_types):
3154- rtype = 'A'
3155- else:
3156- return None
3157-
3158- answers = dns.resolver.query(address, rtype)
3159- if answers:
3160- return str(answers[0])
3161- return None
3162-
3163-
3164-def get_host_ip(hostname):
3165- """
3166- Resolves the IP for a given hostname, or returns
3167- the input if it is already an IP.
3168- """
3169- if is_ip(hostname):
3170- return hostname
3171-
3172- return ns_query(hostname)
3173-
3174-
3175-def get_hostname(address, fqdn=True):
3176- """
3177- Resolves hostname for given IP, or returns the input
3178- if it is already a hostname.
3179- """
3180- if is_ip(address):
3181- try:
3182- import dns.reversename
3183- except ImportError:
3184- apt_install('python-dnspython')
3185- import dns.reversename
3186-
3187- rev = dns.reversename.from_address(address)
3188- result = ns_query(rev)
3189- if not result:
3190- return None
3191- else:
3192- result = address
3193-
3194- if fqdn:
3195- # strip trailing .
3196- if result.endswith('.'):
3197- return result[:-1]
3198- else:
3199- return result
3200- else:
3201- return result.split('.')[0]
3202+is_ip = ip.is_ip
3203+ns_query = ip.ns_query
3204+get_host_ip = ip.get_host_ip
3205+get_hostname = ip.get_hostname
3206
3207
3208 def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
3209@@ -534,108 +498,208 @@
3210
3211
3212 def git_install_requested():
3213- """Returns true if openstack-origin-git is specified."""
3214- return config('openstack-origin-git') != "None"
3215+ """
3216+ Returns true if openstack-origin-git is specified.
3217+ """
3218+ return config('openstack-origin-git') is not None
3219
3220
3221 requirements_dir = None
3222
3223
3224-def git_clone_and_install(file_name, core_project):
3225- """Clone/install all OpenStack repos specified in yaml config file."""
3226- global requirements_dir
3227-
3228- if file_name == "None":
3229- return
3230-
3231- yaml_file = os.path.join(charm_dir(), file_name)
3232-
3233- # clone/install the requirements project first
3234- installed = _git_clone_and_install_subset(yaml_file,
3235- whitelist=['requirements'])
3236- if 'requirements' not in installed:
3237- error_out('requirements git repository must be specified')
3238-
3239- # clone/install all other projects except requirements and the core project
3240- blacklist = ['requirements', core_project]
3241- _git_clone_and_install_subset(yaml_file, blacklist=blacklist,
3242- update_requirements=True)
3243-
3244- # clone/install the core project
3245- whitelist = [core_project]
3246- installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
3247- update_requirements=True)
3248- if core_project not in installed:
3249- error_out('{} git repository must be specified'.format(core_project))
3250-
3251-
3252-def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
3253- update_requirements=False):
3254- """Clone/install subset of OpenStack repos specified in yaml config file."""
3255- global requirements_dir
3256- installed = []
3257-
3258- with open(yaml_file, 'r') as fd:
3259- projects = yaml.load(fd)
3260- for proj, val in projects.items():
3261- # The project subset is chosen based on the following 3 rules:
3262- # 1) If project is in blacklist, we don't clone/install it, period.
3263- # 2) If whitelist is empty, we clone/install everything else.
3264- # 3) If whitelist is not empty, we clone/install everything in the
3265- # whitelist.
3266- if proj in blacklist:
3267- continue
3268- if whitelist and proj not in whitelist:
3269- continue
3270- repo = val['repository']
3271- branch = val['branch']
3272- repo_dir = _git_clone_and_install_single(repo, branch,
3273- update_requirements)
3274- if proj == 'requirements':
3275- requirements_dir = repo_dir
3276- installed.append(proj)
3277- return installed
3278-
3279-
3280-def _git_clone_and_install_single(repo, branch, update_requirements=False):
3281- """Clone and install a single git repository."""
3282- dest_parent_dir = "/mnt/openstack-git/"
3283- dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
3284-
3285- if not os.path.exists(dest_parent_dir):
3286- juju_log('Host dir not mounted at {}. '
3287- 'Creating directory there instead.'.format(dest_parent_dir))
3288- os.mkdir(dest_parent_dir)
3289+def _git_yaml_load(projects_yaml):
3290+ """
3291+ Load the specified yaml into a dictionary.
3292+ """
3293+ if not projects_yaml:
3294+ return None
3295+
3296+ return yaml.load(projects_yaml)
3297+
3298+
3299+def git_clone_and_install(projects_yaml, core_project, depth=1):
3300+ """
3301+ Clone/install all specified OpenStack repositories.
3302+
3303+ The expected format of projects_yaml is:
3304+
3305+ repositories:
3306+ - {name: keystone,
3307+ repository: 'git://git.openstack.org/openstack/keystone.git',
3308+ branch: 'stable/icehouse'}
3309+ - {name: requirements,
3310+ repository: 'git://git.openstack.org/openstack/requirements.git',
3311+ branch: 'stable/icehouse'}
3312+
3313+ directory: /mnt/openstack-git
3314+ http_proxy: squid-proxy-url
3315+ https_proxy: squid-proxy-url
3316+
3317+ The directory, http_proxy, and https_proxy keys are optional.
3318+
3319+ """
3320+ global requirements_dir
3321+ parent_dir = '/mnt/openstack-git'
3322+ http_proxy = None
3323+
3324+ projects = _git_yaml_load(projects_yaml)
3325+ _git_validate_projects_yaml(projects, core_project)
3326+
3327+ old_environ = dict(os.environ)
3328+
3329+ if 'http_proxy' in projects.keys():
3330+ http_proxy = projects['http_proxy']
3331+ os.environ['http_proxy'] = projects['http_proxy']
3332+ if 'https_proxy' in projects.keys():
3333+ os.environ['https_proxy'] = projects['https_proxy']
3334+
3335+ if 'directory' in projects.keys():
3336+ parent_dir = projects['directory']
3337+
3338+ pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
3339+
3340+ # Upgrade setuptools from default virtualenv version. The default version
3341+ # in trusty breaks update.py in global requirements master branch.
3342+ pip_install('setuptools', upgrade=True, proxy=http_proxy,
3343+ venv=os.path.join(parent_dir, 'venv'))
3344+
3345+ for p in projects['repositories']:
3346+ repo = p['repository']
3347+ branch = p['branch']
3348+ if p['name'] == 'requirements':
3349+ repo_dir = _git_clone_and_install_single(repo, branch, depth,
3350+ parent_dir, http_proxy,
3351+ update_requirements=False)
3352+ requirements_dir = repo_dir
3353+ else:
3354+ repo_dir = _git_clone_and_install_single(repo, branch, depth,
3355+ parent_dir, http_proxy,
3356+ update_requirements=True)
3357+
3358+ os.environ = old_environ
3359+
3360+
3361+def _git_validate_projects_yaml(projects, core_project):
3362+ """
3363+ Validate the projects yaml.
3364+ """
3365+ _git_ensure_key_exists('repositories', projects)
3366+
3367+ for project in projects['repositories']:
3368+ _git_ensure_key_exists('name', project.keys())
3369+ _git_ensure_key_exists('repository', project.keys())
3370+ _git_ensure_key_exists('branch', project.keys())
3371+
3372+ if projects['repositories'][0]['name'] != 'requirements':
3373+ error_out('{} git repo must be specified first'.format('requirements'))
3374+
3375+ if projects['repositories'][-1]['name'] != core_project:
3376+ error_out('{} git repo must be specified last'.format(core_project))
3377+
3378+
3379+def _git_ensure_key_exists(key, keys):
3380+ """
3381+ Ensure that key exists in keys.
3382+ """
3383+ if key not in keys:
3384+ error_out('openstack-origin-git key \'{}\' is missing'.format(key))
3385+
3386+
3387+def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
3388+ update_requirements):
3389+ """
3390+ Clone and install a single git repository.
3391+ """
3392+ dest_dir = os.path.join(parent_dir, os.path.basename(repo))
3393+
3394+ if not os.path.exists(parent_dir):
3395+ juju_log('Directory already exists at {}. '
3396+ 'No need to create directory.'.format(parent_dir))
3397+ os.mkdir(parent_dir)
3398
3399 if not os.path.exists(dest_dir):
3400 juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
3401- repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
3402+ repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
3403+ depth=depth)
3404 else:
3405 repo_dir = dest_dir
3406
3407+ venv = os.path.join(parent_dir, 'venv')
3408+
3409 if update_requirements:
3410 if not requirements_dir:
3411 error_out('requirements repo must be cloned before '
3412 'updating from global requirements.')
3413- _git_update_requirements(repo_dir, requirements_dir)
3414+ _git_update_requirements(venv, repo_dir, requirements_dir)
3415
3416 juju_log('Installing git repo from dir: {}'.format(repo_dir))
3417- pip_install(repo_dir)
3418+ if http_proxy:
3419+ pip_install(repo_dir, proxy=http_proxy, venv=venv)
3420+ else:
3421+ pip_install(repo_dir, venv=venv)
3422
3423 return repo_dir
3424
3425
3426-def _git_update_requirements(package_dir, reqs_dir):
3427- """Update from global requirements.
3428+def _git_update_requirements(venv, package_dir, reqs_dir):
3429+ """
3430+ Update from global requirements.
3431
3432- Update an OpenStack git directory's requirements.txt and
3433- test-requirements.txt from global-requirements.txt."""
3434+ Update an OpenStack git directory's requirements.txt and
3435+ test-requirements.txt from global-requirements.txt.
3436+ """
3437 orig_dir = os.getcwd()
3438 os.chdir(reqs_dir)
3439- cmd = "python update.py {}".format(package_dir)
3440+ python = os.path.join(venv, 'bin/python')
3441+ cmd = [python, 'update.py', package_dir]
3442 try:
3443- subprocess.check_call(cmd.split(' '))
3444+ subprocess.check_call(cmd)
3445 except subprocess.CalledProcessError:
3446 package = os.path.basename(package_dir)
3447- error_out("Error updating {} from global-requirements.txt".format(package))
3448+ error_out("Error updating {} from "
3449+ "global-requirements.txt".format(package))
3450 os.chdir(orig_dir)
3451+
3452+
3453+def git_pip_venv_dir(projects_yaml):
3454+ """
3455+ Return the pip virtualenv path.
3456+ """
3457+ parent_dir = '/mnt/openstack-git'
3458+
3459+ projects = _git_yaml_load(projects_yaml)
3460+
3461+ if 'directory' in projects.keys():
3462+ parent_dir = projects['directory']
3463+
3464+ return os.path.join(parent_dir, 'venv')
3465+
3466+
3467+def git_src_dir(projects_yaml, project):
3468+ """
3469+ Return the directory where the specified project's source is located.
3470+ """
3471+ parent_dir = '/mnt/openstack-git'
3472+
3473+ projects = _git_yaml_load(projects_yaml)
3474+
3475+ if 'directory' in projects.keys():
3476+ parent_dir = projects['directory']
3477+
3478+ for p in projects['repositories']:
3479+ if p['name'] == project:
3480+ return os.path.join(parent_dir, os.path.basename(p['repository']))
3481+
3482+ return None
3483+
3484+
3485+def git_yaml_value(projects_yaml, key):
3486+ """
3487+ Return the value in projects_yaml for the specified key.
3488+ """
3489+ projects = _git_yaml_load(projects_yaml)
3490+
3491+ if key in projects.keys():
3492+ return projects[key]
3493+
3494+ return None
3495
3496=== modified file 'hooks/charmhelpers/contrib/peerstorage/__init__.py'
3497--- hooks/charmhelpers/contrib/peerstorage/__init__.py 2015-06-18 14:29:25 +0000
3498+++ hooks/charmhelpers/contrib/peerstorage/__init__.py 2015-07-16 20:20:59 +0000
3499@@ -14,14 +14,19 @@
3500 # You should have received a copy of the GNU Lesser General Public License
3501 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3502
3503+import json
3504 import six
3505+
3506 from charmhelpers.core.hookenv import relation_id as current_relation_id
3507 from charmhelpers.core.hookenv import (
3508 is_relation_made,
3509 relation_ids,
3510- relation_get,
3511+ relation_get as _relation_get,
3512 local_unit,
3513- relation_set,
3514+ relation_set as _relation_set,
3515+ leader_get as _leader_get,
3516+ leader_set,
3517+ is_leader,
3518 )
3519
3520
3521@@ -54,6 +59,105 @@
3522 """
3523
3524
3525+def leader_get(attribute=None):
3526+ """Wrapper to ensure that settings are migrated from the peer relation.
3527+
3528+ This is to support upgrading an environment that does not support
3529+ Juju leadership election to one that does.
3530+
3531+ If a setting is not extant in the leader-get but is on the relation-get
3532+ peer rel, it is migrated and marked as such so that it is not re-migrated.
3533+ """
3534+ migration_key = '__leader_get_migrated_settings__'
3535+ if not is_leader():
3536+ return _leader_get(attribute=attribute)
3537+
3538+ settings_migrated = False
3539+ leader_settings = _leader_get(attribute=attribute)
3540+ previously_migrated = _leader_get(attribute=migration_key)
3541+
3542+ if previously_migrated:
3543+ migrated = set(json.loads(previously_migrated))
3544+ else:
3545+ migrated = set([])
3546+
3547+ try:
3548+ if migration_key in leader_settings:
3549+ del leader_settings[migration_key]
3550+ except TypeError:
3551+ pass
3552+
3553+ if attribute:
3554+ if attribute in migrated:
3555+ return leader_settings
3556+
3557+ # If attribute not present in leader db, check if this unit has set
3558+ # the attribute in the peer relation
3559+ if not leader_settings:
3560+ peer_setting = relation_get(attribute=attribute, unit=local_unit())
3561+ if peer_setting:
3562+ leader_set(settings={attribute: peer_setting})
3563+ leader_settings = peer_setting
3564+
3565+ if leader_settings:
3566+ settings_migrated = True
3567+ migrated.add(attribute)
3568+ else:
3569+ r_settings = relation_get(unit=local_unit())
3570+ if r_settings:
3571+ for key in set(r_settings.keys()).difference(migrated):
3572+ # Leader setting wins
3573+ if not leader_settings.get(key):
3574+ leader_settings[key] = r_settings[key]
3575+
3576+ settings_migrated = True
3577+ migrated.add(key)
3578+
3579+ if settings_migrated:
3580+ leader_set(**leader_settings)
3581+
3582+ if migrated and settings_migrated:
3583+ migrated = json.dumps(list(migrated))
3584+ leader_set(settings={migration_key: migrated})
3585+
3586+ return leader_settings
3587+
3588+
3589+def relation_set(relation_id=None, relation_settings=None, **kwargs):
3590+ """Attempt to use leader-set if supported in the current version of Juju,
3591+ otherwise falls back on relation-set.
3592+
3593+ Note that we only attempt to use leader-set if the provided relation_id is
3594+ a peer relation id or no relation id is provided (in which case we assume
3595+ we are within the peer relation context).
3596+ """
3597+ try:
3598+ if relation_id in relation_ids('cluster'):
3599+ return leader_set(settings=relation_settings, **kwargs)
3600+ else:
3601+ raise NotImplementedError
3602+ except NotImplementedError:
3603+ return _relation_set(relation_id=relation_id,
3604+ relation_settings=relation_settings, **kwargs)
3605+
3606+
3607+def relation_get(attribute=None, unit=None, rid=None):
3608+ """Attempt to use leader-get if supported in the current version of Juju,
3609+ otherwise falls back on relation-get.
3610+
3611+ Note that we only attempt to use leader-get if the provided rid is a peer
3612+ relation id or no relation id is provided (in which case we assume we are
3613+ within the peer relation context).
3614+ """
3615+ try:
3616+ if rid in relation_ids('cluster'):
3617+ return leader_get(attribute)
3618+ else:
3619+ raise NotImplementedError
3620+ except NotImplementedError:
3621+ return _relation_get(attribute=attribute, rid=rid, unit=unit)
3622+
3623+
3624 def peer_retrieve(key, relation_name='cluster'):
3625 """Retrieve a named key from peer relation `relation_name`."""
3626 cluster_rels = relation_ids(relation_name)
3627@@ -73,6 +177,8 @@
3628 exc_list = exc_list if exc_list else []
3629 peerdb_settings = peer_retrieve('-', relation_name=relation_name)
3630 matched = {}
3631+ if peerdb_settings is None:
3632+ return matched
3633 for k, v in peerdb_settings.items():
3634 full_prefix = prefix + delimiter
3635 if k.startswith(full_prefix):
3636@@ -96,12 +202,26 @@
3637 'peer relation {}'.format(relation_name))
3638
3639
3640-def peer_echo(includes=None):
3641+def peer_echo(includes=None, force=False):
3642 """Echo filtered attributes back onto the same relation for storage.
3643
3644 This is a requirement to use the peerstorage module - it needs to be called
3645 from the peer relation's changed hook.
3646+
3647+ If Juju leader support exists this will be a noop unless force is True.
3648 """
3649+ try:
3650+ is_leader()
3651+ except NotImplementedError:
3652+ pass
3653+ else:
3654+ if not force:
3655+ return # NOOP if leader-election is supported
3656+
3657+ # Use original non-leader calls
3658+ relation_get = _relation_get
3659+ relation_set = _relation_set
3660+
3661 rdata = relation_get()
3662 echo_data = {}
3663 if includes is None:
3664
3665=== modified file 'hooks/charmhelpers/contrib/python/packages.py'
3666--- hooks/charmhelpers/contrib/python/packages.py 2015-06-18 14:29:25 +0000
3667+++ hooks/charmhelpers/contrib/python/packages.py 2015-07-16 20:20:59 +0000
3668@@ -17,8 +17,11 @@
3669 # You should have received a copy of the GNU Lesser General Public License
3670 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3671
3672+import os
3673+import subprocess
3674+
3675 from charmhelpers.fetch import apt_install, apt_update
3676-from charmhelpers.core.hookenv import log
3677+from charmhelpers.core.hookenv import charm_dir, log
3678
3679 try:
3680 from pip import main as pip_execute
3681@@ -33,6 +36,8 @@
3682 def parse_options(given, available):
3683 """Given a set of options, check if available"""
3684 for key, value in sorted(given.items()):
3685+ if not value:
3686+ continue
3687 if key in available:
3688 yield "--{0}={1}".format(key, value)
3689
3690@@ -51,11 +56,15 @@
3691 pip_execute(command)
3692
3693
3694-def pip_install(package, fatal=False, upgrade=False, **options):
3695+def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
3696 """Install a python package"""
3697- command = ["install"]
3698+ if venv:
3699+ venv_python = os.path.join(venv, 'bin/pip')
3700+ command = [venv_python, "install"]
3701+ else:
3702+ command = ["install"]
3703
3704- available_options = ('proxy', 'src', 'log', "index-url", )
3705+ available_options = ('proxy', 'src', 'log', 'index-url', )
3706 for option in parse_options(options, available_options):
3707 command.append(option)
3708
3709@@ -69,7 +78,10 @@
3710
3711 log("Installing {} package with options: {}".format(package,
3712 command))
3713- pip_execute(command)
3714+ if venv:
3715+ subprocess.check_call(command)
3716+ else:
3717+ pip_execute(command)
3718
3719
3720 def pip_uninstall(package, **options):
3721@@ -94,3 +106,16 @@
3722 """Returns the list of current python installed packages
3723 """
3724 return pip_execute(["list"])
3725+
3726+
3727+def pip_create_virtualenv(path=None):
3728+ """Create an isolated Python environment."""
3729+ apt_install('python-virtualenv')
3730+
3731+ if path:
3732+ venv_path = path
3733+ else:
3734+ venv_path = os.path.join(charm_dir(), 'venv')
3735+
3736+ if not os.path.exists(venv_path):
3737+ subprocess.check_call(['virtualenv', venv_path])
3738
3739=== modified file 'hooks/charmhelpers/contrib/ssl/service.py'
3740--- hooks/charmhelpers/contrib/ssl/service.py 2015-06-18 14:29:25 +0000
3741+++ hooks/charmhelpers/contrib/ssl/service.py 2015-07-16 20:20:59 +0000
3742@@ -14,16 +14,12 @@
3743 # You should have received a copy of the GNU Lesser General Public License
3744 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3745
3746-import logging
3747 import os
3748 from os.path import join as path_join
3749 from os.path import exists
3750 import subprocess
3751
3752-
3753-log = logging.getLogger("service_ca")
3754-
3755-logging.basicConfig(level=logging.DEBUG)
3756+from charmhelpers.core.hookenv import log, DEBUG
3757
3758 STD_CERT = "standard"
3759
3760@@ -62,7 +58,7 @@
3761 ###############
3762
3763 def init(self):
3764- log.debug("initializing service ca")
3765+ log("initializing service ca", level=DEBUG)
3766 if not exists(self.ca_dir):
3767 self._init_ca_dir(self.ca_dir)
3768 self._init_ca()
3769@@ -91,23 +87,23 @@
3770 os.mkdir(sd)
3771
3772 if not exists(path_join(ca_dir, 'serial')):
3773- with open(path_join(ca_dir, 'serial'), 'wb') as fh:
3774+ with open(path_join(ca_dir, 'serial'), 'w') as fh:
3775 fh.write('02\n')
3776
3777 if not exists(path_join(ca_dir, 'index.txt')):
3778- with open(path_join(ca_dir, 'index.txt'), 'wb') as fh:
3779+ with open(path_join(ca_dir, 'index.txt'), 'w') as fh:
3780 fh.write('')
3781
3782 def _init_ca(self):
3783 """Generate the root ca's cert and key.
3784 """
3785 if not exists(path_join(self.ca_dir, 'ca.cnf')):
3786- with open(path_join(self.ca_dir, 'ca.cnf'), 'wb') as fh:
3787+ with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh:
3788 fh.write(
3789 CA_CONF_TEMPLATE % (self.get_conf_variables()))
3790
3791 if not exists(path_join(self.ca_dir, 'signing.cnf')):
3792- with open(path_join(self.ca_dir, 'signing.cnf'), 'wb') as fh:
3793+ with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh:
3794 fh.write(
3795 SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
3796
3797@@ -119,7 +115,7 @@
3798 '-keyout', self.ca_key, '-out', self.ca_cert,
3799 '-outform', 'PEM']
3800 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
3801- log.debug("CA Init:\n %s", output)
3802+ log("CA Init:\n %s" % output, level=DEBUG)
3803
3804 def get_conf_variables(self):
3805 return dict(
3806@@ -163,15 +159,15 @@
3807 subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
3808 template_vars)
3809
3810- log.debug("CA Create Cert %s", common_name)
3811+ log("CA Create Cert %s" % common_name, level=DEBUG)
3812 cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
3813 '-nodes', '-days', self.default_expiry,
3814 '-keyout', key_p, '-out', csr_p, '-subj', subj]
3815- subprocess.check_call(cmd)
3816+ subprocess.check_call(cmd, stderr=subprocess.PIPE)
3817 cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
3818- subprocess.check_call(cmd)
3819+ subprocess.check_call(cmd, stderr=subprocess.PIPE)
3820
3821- log.debug("CA Sign Cert %s", common_name)
3822+ log("CA Sign Cert %s" % common_name, level=DEBUG)
3823 if self.cert_type == MYSQL_CERT:
3824 cmd = ['openssl', 'x509', '-req',
3825 '-in', csr_p, '-days', self.default_expiry,
3826@@ -182,8 +178,8 @@
3827 '-extensions', 'req_extensions',
3828 '-days', self.default_expiry, '-notext',
3829 '-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
3830- log.debug("running %s", " ".join(cmd))
3831- subprocess.check_call(cmd)
3832+ log("running %s" % " ".join(cmd), level=DEBUG)
3833+ subprocess.check_call(cmd, stderr=subprocess.PIPE)
3834
3835 def get_ca_bundle(self):
3836 with open(self.ca_cert) as fh:
3837
3838=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
3839--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-06-18 14:29:25 +0000
3840+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-07-16 20:20:59 +0000
3841@@ -60,12 +60,12 @@
3842 KEYFILE = '/etc/ceph/ceph.client.{}.key'
3843
3844 CEPH_CONF = """[global]
3845- auth supported = {auth}
3846- keyring = {keyring}
3847- mon host = {mon_hosts}
3848- log to syslog = {use_syslog}
3849- err to syslog = {use_syslog}
3850- clog to syslog = {use_syslog}
3851+auth supported = {auth}
3852+keyring = {keyring}
3853+mon host = {mon_hosts}
3854+log to syslog = {use_syslog}
3855+err to syslog = {use_syslog}
3856+clog to syslog = {use_syslog}
3857 """
3858
3859
3860
3861=== modified file 'hooks/charmhelpers/contrib/templating/contexts.py'
3862--- hooks/charmhelpers/contrib/templating/contexts.py 2015-06-18 14:29:25 +0000
3863+++ hooks/charmhelpers/contrib/templating/contexts.py 2015-07-16 20:20:59 +0000
3864@@ -80,7 +80,7 @@
3865
3866
3867 def juju_state_to_yaml(yaml_path, namespace_separator=':',
3868- allow_hyphens_in_keys=True):
3869+ allow_hyphens_in_keys=True, mode=None):
3870 """Update the juju config and state in a yaml file.
3871
3872 This includes any current relation-get data, and the charm
3873@@ -122,8 +122,13 @@
3874 with open(yaml_path, "r") as existing_vars_file:
3875 existing_vars = yaml.load(existing_vars_file.read())
3876 else:
3877+ with open(yaml_path, "w+"):
3878+ pass
3879 existing_vars = {}
3880
3881+ if mode is not None:
3882+ os.chmod(yaml_path, mode)
3883+
3884 if not allow_hyphens_in_keys:
3885 config = dict_keys_without_hyphens(config)
3886 existing_vars.update(config)
3887
3888=== modified file 'hooks/charmhelpers/contrib/unison/__init__.py'
3889--- hooks/charmhelpers/contrib/unison/__init__.py 2015-06-18 14:29:25 +0000
3890+++ hooks/charmhelpers/contrib/unison/__init__.py 2015-07-16 20:20:59 +0000
3891@@ -63,6 +63,7 @@
3892 from charmhelpers.core.host import (
3893 adduser,
3894 add_user_to_group,
3895+ pwgen,
3896 )
3897
3898 from charmhelpers.core.hookenv import (
3899@@ -140,7 +141,7 @@
3900 ssh_dir = os.path.join(home_dir, '.ssh')
3901 auth_keys = os.path.join(ssh_dir, 'authorized_keys')
3902 log('Syncing authorized_keys @ %s.' % auth_keys)
3903- with open(auth_keys, 'wb') as out:
3904+ with open(auth_keys, 'w') as out:
3905 for k in keys:
3906 out.write('%s\n' % k)
3907
3908@@ -152,16 +153,16 @@
3909 khosts = []
3910 for host in hosts:
3911 cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
3912- remote_key = check_output(cmd).strip()
3913+ remote_key = check_output(cmd, universal_newlines=True).strip()
3914 khosts.append(remote_key)
3915 log('Syncing known_hosts @ %s.' % known_hosts)
3916- with open(known_hosts, 'wb') as out:
3917+ with open(known_hosts, 'w') as out:
3918 for host in khosts:
3919 out.write('%s\n' % host)
3920
3921
3922 def ensure_user(user, group=None):
3923- adduser(user)
3924+ adduser(user, pwgen())
3925 if group:
3926 add_user_to_group(user, group)
3927
3928
3929=== modified file 'hooks/charmhelpers/core/fstab.py'
3930--- hooks/charmhelpers/core/fstab.py 2015-06-18 14:29:25 +0000
3931+++ hooks/charmhelpers/core/fstab.py 2015-07-16 20:20:59 +0000
3932@@ -77,7 +77,7 @@
3933 for line in self.readlines():
3934 line = line.decode('us-ascii')
3935 try:
3936- if line.strip() and not line.startswith("#"):
3937+ if line.strip() and not line.strip().startswith("#"):
3938 yield self._hydrate_entry(line)
3939 except ValueError:
3940 pass
3941@@ -104,7 +104,7 @@
3942
3943 found = False
3944 for index, line in enumerate(lines):
3945- if not line.startswith("#"):
3946+ if line.strip() and not line.strip().startswith("#"):
3947 if self._hydrate_entry(line) == entry:
3948 found = True
3949 break
3950
3951=== modified file 'hooks/charmhelpers/core/hookenv.py'
3952--- hooks/charmhelpers/core/hookenv.py 2015-06-18 14:29:25 +0000
3953+++ hooks/charmhelpers/core/hookenv.py 2015-07-16 20:20:59 +0000
3954@@ -20,11 +20,17 @@
3955 # Authors:
3956 # Charm Helpers Developers <juju@lists.ubuntu.com>
3957
3958+from __future__ import print_function
3959+from distutils.version import LooseVersion
3960+from functools import wraps
3961+import glob
3962 import os
3963 import json
3964 import yaml
3965 import subprocess
3966 import sys
3967+import errno
3968+import tempfile
3969 from subprocess import CalledProcessError
3970
3971 import six
3972@@ -56,15 +62,17 @@
3973
3974 will cache the result of unit_get + 'test' for future calls.
3975 """
3976+ @wraps(func)
3977 def wrapper(*args, **kwargs):
3978 global cache
3979 key = str((func, args, kwargs))
3980 try:
3981 return cache[key]
3982 except KeyError:
3983- res = func(*args, **kwargs)
3984- cache[key] = res
3985- return res
3986+ pass # Drop out of the exception handler scope.
3987+ res = func(*args, **kwargs)
3988+ cache[key] = res
3989+ return res
3990 return wrapper
3991
3992
3993@@ -87,7 +95,18 @@
3994 if not isinstance(message, six.string_types):
3995 message = repr(message)
3996 command += [message]
3997- subprocess.call(command)
3998+ # Missing juju-log should not cause failures in unit tests
3999+ # Send log output to stderr
4000+ try:
4001+ subprocess.call(command)
4002+ except OSError as e:
4003+ if e.errno == errno.ENOENT:
4004+ if level:
4005+ message = "{}: {}".format(level, message)
4006+ message = "juju-log: {}".format(message)
4007+ print(message, file=sys.stderr)
4008+ else:
4009+ raise
4010
4011
4012 class Serializable(UserDict):
4013@@ -165,7 +184,7 @@
4014
4015 def remote_unit():
4016 """The remote unit for the current relation hook"""
4017- return os.environ['JUJU_REMOTE_UNIT']
4018+ return os.environ.get('JUJU_REMOTE_UNIT', None)
4019
4020
4021 def service_name():
4022@@ -225,23 +244,7 @@
4023 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
4024 if os.path.exists(self.path):
4025 self.load_previous()
4026-
4027- def __getitem__(self, key):
4028- """For regular dict lookups, check the current juju config first,
4029- then the previous (saved) copy. This ensures that user-saved values
4030- will be returned by a dict lookup.
4031-
4032- """
4033- try:
4034- return dict.__getitem__(self, key)
4035- except KeyError:
4036- return (self._prev_dict or {})[key]
4037-
4038- def keys(self):
4039- prev_keys = []
4040- if self._prev_dict is not None:
4041- prev_keys = self._prev_dict.keys()
4042- return list(set(prev_keys + list(dict.keys(self))))
4043+ atexit(self._implicit_save)
4044
4045 def load_previous(self, path=None):
4046 """Load previous copy of config from disk.
4047@@ -260,6 +263,9 @@
4048 self.path = path or self.path
4049 with open(self.path) as f:
4050 self._prev_dict = json.load(f)
4051+ for k, v in self._prev_dict.items():
4052+ if k not in self:
4053+ self[k] = v
4054
4055 def changed(self, key):
4056 """Return True if the current value for this key is different from
4057@@ -291,13 +297,13 @@
4058 instance.
4059
4060 """
4061- if self._prev_dict:
4062- for k, v in six.iteritems(self._prev_dict):
4063- if k not in self:
4064- self[k] = v
4065 with open(self.path, 'w') as f:
4066 json.dump(self, f)
4067
4068+ def _implicit_save(self):
4069+ if self.implicit_save:
4070+ self.save()
4071+
4072
4073 @cached
4074 def config(scope=None):
4075@@ -340,18 +346,49 @@
4076 """Set relation information for the current unit"""
4077 relation_settings = relation_settings if relation_settings else {}
4078 relation_cmd_line = ['relation-set']
4079+ accepts_file = "--file" in subprocess.check_output(
4080+ relation_cmd_line + ["--help"], universal_newlines=True)
4081 if relation_id is not None:
4082 relation_cmd_line.extend(('-r', relation_id))
4083- for k, v in (list(relation_settings.items()) + list(kwargs.items())):
4084- if v is None:
4085- relation_cmd_line.append('{}='.format(k))
4086- else:
4087- relation_cmd_line.append('{}={}'.format(k, v))
4088- subprocess.check_call(relation_cmd_line)
4089+ settings = relation_settings.copy()
4090+ settings.update(kwargs)
4091+ for key, value in settings.items():
4092+ # Force value to be a string: it always should, but some call
4093+ # sites pass in things like dicts or numbers.
4094+ if value is not None:
4095+ settings[key] = "{}".format(value)
4096+ if accepts_file:
4097+ # --file was introduced in Juju 1.23.2. Use it by default if
4098+ # available, since otherwise we'll break if the relation data is
4099+ # too big. Ideally we should tell relation-set to read the data from
4100+ # stdin, but that feature is broken in 1.23.2: Bug #1454678.
4101+ with tempfile.NamedTemporaryFile(delete=False) as settings_file:
4102+ settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
4103+ subprocess.check_call(
4104+ relation_cmd_line + ["--file", settings_file.name])
4105+ os.remove(settings_file.name)
4106+ else:
4107+ for key, value in settings.items():
4108+ if value is None:
4109+ relation_cmd_line.append('{}='.format(key))
4110+ else:
4111+ relation_cmd_line.append('{}={}'.format(key, value))
4112+ subprocess.check_call(relation_cmd_line)
4113 # Flush cache of any relation-gets for local unit
4114 flush(local_unit())
4115
4116
4117+def relation_clear(r_id=None):
4118+ ''' Clears any relation data already set on relation r_id '''
4119+ settings = relation_get(rid=r_id,
4120+ unit=local_unit())
4121+ for setting in settings:
4122+ if setting not in ['public-address', 'private-address']:
4123+ settings[setting] = None
4124+ relation_set(relation_id=r_id,
4125+ **settings)
4126+
4127+
4128 @cached
4129 def relation_ids(reltype=None):
4130 """A list of relation_ids"""
4131@@ -496,6 +533,11 @@
4132 return None
4133
4134
4135+def unit_public_ip():
4136+ """Get this unit's public IP address"""
4137+ return unit_get('public-address')
4138+
4139+
4140 def unit_private_ip():
4141 """Get this unit's private IP address"""
4142 return unit_get('private-address')
4143@@ -528,10 +570,14 @@
4144 hooks.execute(sys.argv)
4145 """
4146
4147- def __init__(self, config_save=True):
4148+ def __init__(self, config_save=None):
4149 super(Hooks, self).__init__()
4150 self._hooks = {}
4151- self._config_save = config_save
4152+
4153+ # For unknown reasons, we allow the Hooks constructor to override
4154+ # config().implicit_save.
4155+ if config_save is not None:
4156+ config().implicit_save = config_save
4157
4158 def register(self, name, function):
4159 """Register a hook"""
4160@@ -539,13 +585,16 @@
4161
4162 def execute(self, args):
4163 """Execute a registered hook based on args[0]"""
4164+ _run_atstart()
4165 hook_name = os.path.basename(args[0])
4166 if hook_name in self._hooks:
4167- self._hooks[hook_name]()
4168- if self._config_save:
4169- cfg = config()
4170- if cfg.implicit_save:
4171- cfg.save()
4172+ try:
4173+ self._hooks[hook_name]()
4174+ except SystemExit as x:
4175+ if x.code is None or x.code == 0:
4176+ _run_atexit()
4177+ raise
4178+ _run_atexit()
4179 else:
4180 raise UnregisteredHookError(hook_name)
4181
4182@@ -566,3 +615,187 @@
4183 def charm_dir():
4184 """Return the root directory of the current charm"""
4185 return os.environ.get('CHARM_DIR')
4186+
4187+
4188+@cached
4189+def action_get(key=None):
4190+ """Gets the value of an action parameter, or all key/value param pairs"""
4191+ cmd = ['action-get']
4192+ if key is not None:
4193+ cmd.append(key)
4194+ cmd.append('--format=json')
4195+ action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
4196+ return action_data
4197+
4198+
4199+def action_set(values):
4200+ """Sets the values to be returned after the action finishes"""
4201+ cmd = ['action-set']
4202+ for k, v in list(values.items()):
4203+ cmd.append('{}={}'.format(k, v))
4204+ subprocess.check_call(cmd)
4205+
4206+
4207+def action_fail(message):
4208+ """Sets the action status to failed and sets the error message.
4209+
4210+ The results set by action_set are preserved."""
4211+ subprocess.check_call(['action-fail', message])
4212+
4213+
4214+def status_set(workload_state, message):
4215+ """Set the workload state with a message
4216+
4217+ Use status-set to set the workload state with a message which is visible
4218+ to the user via juju status. If the status-set command is not found then
4219+ assume this is juju < 1.23 and juju-log the message unstead.
4220+
4221+ workload_state -- valid juju workload state.
4222+ message -- status update message
4223+ """
4224+ valid_states = ['maintenance', 'blocked', 'waiting', 'active']
4225+ if workload_state not in valid_states:
4226+ raise ValueError(
4227+ '{!r} is not a valid workload state'.format(workload_state)
4228+ )
4229+ cmd = ['status-set', workload_state, message]
4230+ try:
4231+ ret = subprocess.call(cmd)
4232+ if ret == 0:
4233+ return
4234+ except OSError as e:
4235+ if e.errno != errno.ENOENT:
4236+ raise
4237+ log_message = 'status-set failed: {} {}'.format(workload_state,
4238+ message)
4239+ log(log_message, level='INFO')
4240+
4241+
4242+def status_get():
4243+ """Retrieve the previously set juju workload state
4244+
4245+ If the status-set command is not found then assume this is juju < 1.23 and
4246+ return 'unknown'
4247+ """
4248+ cmd = ['status-get']
4249+ try:
4250+ raw_status = subprocess.check_output(cmd, universal_newlines=True)
4251+ status = raw_status.rstrip()
4252+ return status
4253+ except OSError as e:
4254+ if e.errno == errno.ENOENT:
4255+ return 'unknown'
4256+ else:
4257+ raise
4258+
4259+
4260+def translate_exc(from_exc, to_exc):
4261+ def inner_translate_exc1(f):
4262+ def inner_translate_exc2(*args, **kwargs):
4263+ try:
4264+ return f(*args, **kwargs)
4265+ except from_exc:
4266+ raise to_exc
4267+
4268+ return inner_translate_exc2
4269+
4270+ return inner_translate_exc1
4271+
4272+
4273+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
4274+def is_leader():
4275+ """Does the current unit hold the juju leadership
4276+
4277+ Uses juju to determine whether the current unit is the leader of its peers
4278+ """
4279+ cmd = ['is-leader', '--format=json']
4280+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
4281+
4282+
4283+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
4284+def leader_get(attribute=None):
4285+ """Juju leader get value(s)"""
4286+ cmd = ['leader-get', '--format=json'] + [attribute or '-']
4287+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
4288+
4289+
4290+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
4291+def leader_set(settings=None, **kwargs):
4292+ """Juju leader set value(s)"""
4293+ # Don't log secrets.
4294+ # log("Juju leader-set '%s'" % (settings), level=DEBUG)
4295+ cmd = ['leader-set']
4296+ settings = settings or {}
4297+ settings.update(kwargs)
4298+ for k, v in settings.items():
4299+ if v is None:
4300+ cmd.append('{}='.format(k))
4301+ else:
4302+ cmd.append('{}={}'.format(k, v))
4303+ subprocess.check_call(cmd)
4304+
4305+
4306+@cached
4307+def juju_version():
4308+ """Full version string (eg. '1.23.3.1-trusty-amd64')"""
4309+ # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
4310+ jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
4311+ return subprocess.check_output([jujud, 'version'],
4312+ universal_newlines=True).strip()
4313+
4314+
4315+@cached
4316+def has_juju_version(minimum_version):
4317+ """Return True if the Juju version is at least the provided version"""
4318+ return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
4319+
4320+
4321+_atexit = []
4322+_atstart = []
4323+
4324+
4325+def atstart(callback, *args, **kwargs):
4326+ '''Schedule a callback to run before the main hook.
4327+
4328+ Callbacks are run in the order they were added.
4329+
4330+ This is useful for modules and classes to perform initialization
4331+ and inject behavior. In particular:
4332+
4333+ - Run common code before all of your hooks, such as logging
4334+ the hook name or interesting relation data.
4335+ - Defer object or module initialization that requires a hook
4336+ context until we know there actually is a hook context,
4337+ making testing easier.
4338+ - Rather than requiring charm authors to include boilerplate to
4339+ invoke your helper's behavior, have it run automatically if
4340+ your object is instantiated or module imported.
4341+
4342+ This is not at all useful after your hook framework as been launched.
4343+ '''
4344+ global _atstart
4345+ _atstart.append((callback, args, kwargs))
4346+
4347+
4348+def atexit(callback, *args, **kwargs):
4349+ '''Schedule a callback to run on successful hook completion.
4350+
4351+ Callbacks are run in the reverse order that they were added.'''
4352+ _atexit.append((callback, args, kwargs))
4353+
4354+
4355+def _run_atstart():
4356+ '''Hook frameworks must invoke this before running the main hook body.'''
4357+ global _atstart
4358+ for callback, args, kwargs in _atstart:
4359+ callback(*args, **kwargs)
4360+ del _atstart[:]
4361+
4362+
4363+def _run_atexit():
4364+ '''Hook frameworks must invoke this after the main hook body has
4365+ successfully completed. Do not invoke it if the hook fails.'''
4366+ global _atexit
4367+ for callback, args, kwargs in reversed(_atexit):
4368+ callback(*args, **kwargs)
4369+ del _atexit[:]
4370
4371=== modified file 'hooks/charmhelpers/core/host.py'
4372--- hooks/charmhelpers/core/host.py 2015-06-18 14:29:25 +0000
4373+++ hooks/charmhelpers/core/host.py 2015-07-16 20:20:59 +0000
4374@@ -24,6 +24,7 @@
4375 import os
4376 import re
4377 import pwd
4378+import glob
4379 import grp
4380 import random
4381 import string
4382@@ -90,7 +91,7 @@
4383 ['service', service_name, 'status'],
4384 stderr=subprocess.STDOUT).decode('UTF-8')
4385 except subprocess.CalledProcessError as e:
4386- return 'unrecognized service' not in e.output
4387+ return b'unrecognized service' not in e.output
4388 else:
4389 return True
4390
4391@@ -269,6 +270,21 @@
4392 return None
4393
4394
4395+def path_hash(path):
4396+ """
4397+ Generate a hash checksum of all files matching 'path'. Standard wildcards
4398+ like '*' and '?' are supported, see documentation for the 'glob' module for
4399+ more information.
4400+
4401+ :return: dict: A { filename: hash } dictionary for all matched files.
4402+ Empty if none found.
4403+ """
4404+ return {
4405+ filename: file_hash(filename)
4406+ for filename in glob.iglob(path)
4407+ }
4408+
4409+
4410 def check_hash(path, checksum, hash_type='md5'):
4411 """
4412 Validate a file using a cryptographic checksum.
4413@@ -296,23 +312,25 @@
4414
4415 @restart_on_change({
4416 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
4417+ '/etc/apache/sites-enabled/*': [ 'apache2' ]
4418 })
4419- def ceph_client_changed():
4420+ def config_changed():
4421 pass # your code here
4422
4423 In this example, the cinder-api and cinder-volume services
4424 would be restarted if /etc/ceph/ceph.conf is changed by the
4425- ceph_client_changed function.
4426+ ceph_client_changed function. The apache2 service would be
4427+ restarted if any file matching the pattern got changed, created
4428+ or removed. Standard wildcards are supported, see documentation
4429+ for the 'glob' module for more information.
4430 """
4431 def wrap(f):
4432 def wrapped_f(*args, **kwargs):
4433- checksums = {}
4434- for path in restart_map:
4435- checksums[path] = file_hash(path)
4436+ checksums = {path: path_hash(path) for path in restart_map}
4437 f(*args, **kwargs)
4438 restarts = []
4439 for path in restart_map:
4440- if checksums[path] != file_hash(path):
4441+ if path_hash(path) != checksums[path]:
4442 restarts += restart_map[path]
4443 services_list = list(OrderedDict.fromkeys(restarts))
4444 if not stopstart:
4445@@ -339,12 +357,16 @@
4446 def pwgen(length=None):
4447 """Generate a random pasword."""
4448 if length is None:
4449+ # A random length is ok to use a weak PRNG
4450 length = random.choice(range(35, 45))
4451 alphanumeric_chars = [
4452 l for l in (string.ascii_letters + string.digits)
4453 if l not in 'l0QD1vAEIOUaeiou']
4454+ # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
4455+ # actual password
4456+ random_generator = random.SystemRandom()
4457 random_chars = [
4458- random.choice(alphanumeric_chars) for _ in range(length)]
4459+ random_generator.choice(alphanumeric_chars) for _ in range(length)]
4460 return(''.join(random_chars))
4461
4462
4463
4464=== modified file 'hooks/charmhelpers/core/services/base.py'
4465--- hooks/charmhelpers/core/services/base.py 2015-06-18 14:29:25 +0000
4466+++ hooks/charmhelpers/core/services/base.py 2015-07-16 20:20:59 +0000
4467@@ -15,9 +15,9 @@
4468 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4469
4470 import os
4471-import re
4472 import json
4473-from collections import Iterable
4474+from inspect import getargspec
4475+from collections import Iterable, OrderedDict
4476
4477 from charmhelpers.core import host
4478 from charmhelpers.core import hookenv
4479@@ -119,7 +119,7 @@
4480 """
4481 self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
4482 self._ready = None
4483- self.services = {}
4484+ self.services = OrderedDict()
4485 for service in services or []:
4486 service_name = service['service']
4487 self.services[service_name] = service
4488@@ -128,15 +128,18 @@
4489 """
4490 Handle the current hook by doing The Right Thing with the registered services.
4491 """
4492- hook_name = hookenv.hook_name()
4493- if hook_name == 'stop':
4494- self.stop_services()
4495- else:
4496- self.provide_data()
4497- self.reconfigure_services()
4498- cfg = hookenv.config()
4499- if cfg.implicit_save:
4500- cfg.save()
4501+ hookenv._run_atstart()
4502+ try:
4503+ hook_name = hookenv.hook_name()
4504+ if hook_name == 'stop':
4505+ self.stop_services()
4506+ else:
4507+ self.reconfigure_services()
4508+ self.provide_data()
4509+ except SystemExit as x:
4510+ if x.code is None or x.code == 0:
4511+ hookenv._run_atexit()
4512+ hookenv._run_atexit()
4513
4514 def provide_data(self):
4515 """
4516@@ -145,15 +148,36 @@
4517 A provider must have a `name` attribute, which indicates which relation
4518 to set data on, and a `provide_data()` method, which returns a dict of
4519 data to set.
4520+
4521+ The `provide_data()` method can optionally accept two parameters:
4522+
4523+ * ``remote_service`` The name of the remote service that the data will
4524+ be provided to. The `provide_data()` method will be called once
4525+ for each connected service (not unit). This allows the method to
4526+ tailor its data to the given service.
4527+ * ``service_ready`` Whether or not the service definition had all of
4528+ its requirements met, and thus the ``data_ready`` callbacks run.
4529+
4530+ Note that the ``provided_data`` methods are now called **after** the
4531+ ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
4532+ a chance to generate any data necessary for the providing to the remote
4533+ services.
4534 """
4535- hook_name = hookenv.hook_name()
4536- for service in self.services.values():
4537+ for service_name, service in self.services.items():
4538+ service_ready = self.is_ready(service_name)
4539 for provider in service.get('provided_data', []):
4540- if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
4541- data = provider.provide_data()
4542- _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
4543- if _ready:
4544- hookenv.relation_set(None, data)
4545+ for relid in hookenv.relation_ids(provider.name):
4546+ units = hookenv.related_units(relid)
4547+ if not units:
4548+ continue
4549+ remote_service = units[0].split('/')[0]
4550+ argspec = getargspec(provider.provide_data)
4551+ if len(argspec.args) > 1:
4552+ data = provider.provide_data(remote_service, service_ready)
4553+ else:
4554+ data = provider.provide_data()
4555+ if data:
4556+ hookenv.relation_set(relid, data)
4557
4558 def reconfigure_services(self, *service_names):
4559 """
4560
4561=== modified file 'hooks/charmhelpers/core/services/helpers.py'
4562--- hooks/charmhelpers/core/services/helpers.py 2015-06-18 14:29:25 +0000
4563+++ hooks/charmhelpers/core/services/helpers.py 2015-07-16 20:20:59 +0000
4564@@ -45,12 +45,14 @@
4565 """
4566 name = None
4567 interface = None
4568- required_keys = []
4569
4570 def __init__(self, name=None, additional_required_keys=None):
4571+ if not hasattr(self, 'required_keys'):
4572+ self.required_keys = []
4573+
4574 if name is not None:
4575 self.name = name
4576- if additional_required_keys is not None:
4577+ if additional_required_keys:
4578 self.required_keys.extend(additional_required_keys)
4579 self.get_data()
4580
4581@@ -134,7 +136,10 @@
4582 """
4583 name = 'db'
4584 interface = 'mysql'
4585- required_keys = ['host', 'user', 'password', 'database']
4586+
4587+ def __init__(self, *args, **kwargs):
4588+ self.required_keys = ['host', 'user', 'password', 'database']
4589+ RelationContext.__init__(self, *args, **kwargs)
4590
4591
4592 class HttpRelation(RelationContext):
4593@@ -146,7 +151,10 @@
4594 """
4595 name = 'website'
4596 interface = 'http'
4597- required_keys = ['host', 'port']
4598+
4599+ def __init__(self, *args, **kwargs):
4600+ self.required_keys = ['host', 'port']
4601+ RelationContext.__init__(self, *args, **kwargs)
4602
4603 def provide_data(self):
4604 return {
4605@@ -231,12 +239,12 @@
4606 action.
4607
4608 :param str source: The template source file, relative to
4609- `$CHARM_DIR/templates`
4610-
4611+ `$CHARM_DIR/templates`
4612 :param str target: The target to write the rendered template to
4613 :param str owner: The owner of the rendered file
4614 :param str group: The group of the rendered file
4615 :param int perms: The permissions of the rendered file
4616+
4617 """
4618 def __init__(self, source, target,
4619 owner='root', group='root', perms=0o444):
4620
4621=== added file 'hooks/charmhelpers/core/strutils.py'
4622--- hooks/charmhelpers/core/strutils.py 1970-01-01 00:00:00 +0000
4623+++ hooks/charmhelpers/core/strutils.py 2015-07-16 20:20:59 +0000
4624@@ -0,0 +1,42 @@
4625+#!/usr/bin/env python
4626+# -*- coding: utf-8 -*-
4627+
4628+# Copyright 2014-2015 Canonical Limited.
4629+#
4630+# This file is part of charm-helpers.
4631+#
4632+# charm-helpers is free software: you can redistribute it and/or modify
4633+# it under the terms of the GNU Lesser General Public License version 3 as
4634+# published by the Free Software Foundation.
4635+#
4636+# charm-helpers is distributed in the hope that it will be useful,
4637+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4638+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4639+# GNU Lesser General Public License for more details.
4640+#
4641+# You should have received a copy of the GNU Lesser General Public License
4642+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4643+
4644+import six
4645+
4646+
4647+def bool_from_string(value):
4648+ """Interpret string value as boolean.
4649+
4650+ Returns True if value translates to True otherwise False.
4651+ """
4652+ if isinstance(value, six.string_types):
4653+ value = six.text_type(value)
4654+ else:
4655+ msg = "Unable to interpret non-string value '%s' as boolean" % (value)
4656+ raise ValueError(msg)
4657+
4658+ value = value.strip().lower()
4659+
4660+ if value in ['y', 'yes', 'true', 't', 'on']:
4661+ return True
4662+ elif value in ['n', 'no', 'false', 'f', 'off']:
4663+ return False
4664+
4665+ msg = "Unable to interpret string value '%s' as boolean" % (value)
4666+ raise ValueError(msg)
4667
4668=== modified file 'hooks/charmhelpers/core/unitdata.py'
4669--- hooks/charmhelpers/core/unitdata.py 2015-06-18 14:29:25 +0000
4670+++ hooks/charmhelpers/core/unitdata.py 2015-07-16 20:20:59 +0000
4671@@ -443,7 +443,7 @@
4672 data = hookenv.execution_environment()
4673 self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
4674 self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
4675- self.kv.set('env', data['env'])
4676+ self.kv.set('env', dict(data['env']))
4677 self.kv.set('unit', data['unit'])
4678 self.kv.set('relid', data.get('relid'))
4679 return conf_delta, rels_delta
4680
4681=== modified file 'hooks/charmhelpers/fetch/__init__.py'
4682--- hooks/charmhelpers/fetch/__init__.py 2015-06-18 14:29:25 +0000
4683+++ hooks/charmhelpers/fetch/__init__.py 2015-07-16 20:20:59 +0000
4684@@ -158,7 +158,7 @@
4685
4686 def apt_cache(in_memory=True):
4687 """Build and return an apt cache"""
4688- import apt_pkg
4689+ from apt import apt_pkg
4690 apt_pkg.init()
4691 if in_memory:
4692 apt_pkg.config.set("Dir::Cache::pkgcache", "")
4693@@ -215,19 +215,27 @@
4694 _run_apt_command(cmd, fatal)
4695
4696
4697+def apt_mark(packages, mark, fatal=False):
4698+ """Flag one or more packages using apt-mark"""
4699+ cmd = ['apt-mark', mark]
4700+ if isinstance(packages, six.string_types):
4701+ cmd.append(packages)
4702+ else:
4703+ cmd.extend(packages)
4704+ log("Holding {}".format(packages))
4705+
4706+ if fatal:
4707+ subprocess.check_call(cmd, universal_newlines=True)
4708+ else:
4709+ subprocess.call(cmd, universal_newlines=True)
4710+
4711+
4712 def apt_hold(packages, fatal=False):
4713- """Hold one or more packages"""
4714- cmd = ['apt-mark', 'hold']
4715- if isinstance(packages, six.string_types):
4716- cmd.append(packages)
4717- else:
4718- cmd.extend(packages)
4719- log("Holding {}".format(packages))
4720-
4721- if fatal:
4722- subprocess.check_call(cmd)
4723- else:
4724- subprocess.call(cmd)
4725+ return apt_mark(packages, 'hold', fatal=fatal)
4726+
4727+
4728+def apt_unhold(packages, fatal=False):
4729+ return apt_mark(packages, 'unhold', fatal=fatal)
4730
4731
4732 def add_source(source, key=None):
4733
4734=== modified file 'hooks/charmhelpers/fetch/giturl.py'
4735--- hooks/charmhelpers/fetch/giturl.py 2015-06-18 14:29:25 +0000
4736+++ hooks/charmhelpers/fetch/giturl.py 2015-07-16 20:20:59 +0000
4737@@ -45,14 +45,16 @@
4738 else:
4739 return True
4740
4741- def clone(self, source, dest, branch):
4742+ def clone(self, source, dest, branch, depth=None):
4743 if not self.can_handle(source):
4744 raise UnhandledSource("Cannot handle {}".format(source))
4745
4746- repo = Repo.clone_from(source, dest)
4747- repo.git.checkout(branch)
4748+ if depth:
4749+ Repo.clone_from(source, dest, branch=branch, depth=depth)
4750+ else:
4751+ Repo.clone_from(source, dest, branch=branch)
4752
4753- def install(self, source, branch="master", dest=None):
4754+ def install(self, source, branch="master", dest=None, depth=None):
4755 url_parts = self.parse_url(source)
4756 branch_name = url_parts.path.strip("/").split("/")[-1]
4757 if dest:
4758@@ -63,7 +65,7 @@
4759 if not os.path.exists(dest_dir):
4760 mkdir(dest_dir, perms=0o755)
4761 try:
4762- self.clone(source, dest_dir, branch)
4763+ self.clone(source, dest_dir, branch, depth)
4764 except GitCommandError as e:
4765 raise UnhandledSource(e.message)
4766 except OSError as e:
4767
4768=== added symlink 'hooks/db-admin-relation-changed'
4769=== target is u'hooks.py'
4770=== added symlink 'hooks/db-admin-relation-departed'
4771=== target is u'hooks.py'
4772=== added symlink 'hooks/db-admin-relation-joined'
4773=== target is u'hooks.py'
4774=== added symlink 'hooks/db-relation-changed'
4775=== target is u'hooks.py'
4776=== added symlink 'hooks/db-relation-departed'
4777=== target is u'hooks.py'
4778=== added symlink 'hooks/db-relation-joined'
4779=== target is u'hooks.py'
4780=== added file 'hooks/schema-upgrade'
4781--- hooks/schema-upgrade 1970-01-01 00:00:00 +0000
4782+++ hooks/schema-upgrade 2015-07-16 20:20:59 +0000
4783@@ -0,0 +1,53 @@
4784+#!/usr/bin/env python
4785+
4786+import os
4787+import subprocess
4788+import sys
4789+import yaml
4790+
4791+from charmhelpers.core import (
4792+ hookenv,
4793+)
4794+
4795+hooks = hookenv.Hooks()
4796+config = hookenv.config()
4797+
4798+DB_FILE = '/srv/{}/etc/db-schema.yaml'.format(config['environment'])
4799+DB_ADMIN_FILE = '/srv/{}/etc/db-admin.yaml'.format(config['environment'])
4800+SERVICE_DIR = '/srv/{}/spec-manager'.format(config['environment'])
4801+
4802+
4803+@hooks.hook('schema-upgrade')
4804+def schema_upgrade():
4805+ running_as_action = False
4806+ if os.environ.get('JUJU_ACTION_NAME', None) == 'schema-upgrade' and \
4807+ os.environ.get('JUJU_ACTION_UUID', None) is not None:
4808+ running_as_action = True
4809+ with open(DB_FILE, 'r') as fd:
4810+ db_config = yaml.load(fd)
4811+ with open(DB_ADMIN_FILE, 'r') as fd:
4812+ db_admin_config = yaml.load(fd)
4813+ schema_script = os.path.abspath('{}/schema/schema'.format(SERVICE_DIR))
4814+ db_config['admin_user'] = db_admin_config['user']
4815+ db_config['admin_password'] = db_admin_config['password']
4816+ conn_str = "dbname={database} user={admin_user} host={host} password={admin_password}".format(**db_config)
4817+ cmd = [schema_script, conn_str]
4818+ try:
4819+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
4820+ stderr=subprocess.STDOUT, close_fds=True)
4821+ output, _ = p.communicate()
4822+ except Exception, e:
4823+ if running_as_action:
4824+ hookenv.action_fail("Error {}".format(str(e)))
4825+ else:
4826+ raise
4827+ if running_as_action:
4828+ hookenv.action_set({'output': output})
4829+ else:
4830+ print output
4831+
4832+
4833+# Main section #
4834+if __name__ == "__main__":
4835+ # execute a hook based on the name the program is called by
4836+ hooks.execute(sys.argv)
4837
4838=== modified file 'hooks/services.py'
4839--- hooks/services.py 2015-06-19 05:32:37 +0000
4840+++ hooks/services.py 2015-07-16 20:20:59 +0000
4841@@ -12,11 +12,14 @@
4842 manager = ServiceManager([
4843 {
4844 'service': 'spec-manager',
4845- 'provided_data': [actions.WebsiteRelation()],
4846- 'required_data': [config],
4847+ 'provided_data': [actions.WebsiteRelation(),
4848+ actions.PostgresqlRelation()],
4849+ 'required_data': [config,
4850+ actions.PostgresqlRelation(required=True)],
4851 'data_ready': [
4852 actions.basenode,
4853 actions.install_packages,
4854+ actions.ensure_directories,
4855 actions.get_cloud_service_from_tarball,
4856 actions.install_python_packages,
4857 actions.get_config_file,
4858@@ -24,6 +27,7 @@
4859 helpers.render_template(
4860 source='upstart.conf',
4861 target='/etc/init/spec-manager.conf'),
4862+ actions.write_db_config,
4863 actions.log_start,
4864 ],
4865 },
4866
4867=== modified file 'metadata.yaml'
4868--- metadata.yaml 2015-06-18 14:29:25 +0000
4869+++ metadata.yaml 2015-07-16 20:20:59 +0000
4870@@ -7,3 +7,9 @@
4871 website:
4872 interface: http
4873 optional: true
4874+
4875+requires:
4876+ db:
4877+ interface: pgsql
4878+ db-admin:
4879+ interface: pgsql
4880
4881=== added directory 'scripts'
4882=== added file 'scripts/charm_helpers_sync.py'
4883--- scripts/charm_helpers_sync.py 1970-01-01 00:00:00 +0000
4884+++ scripts/charm_helpers_sync.py 2015-07-16 20:20:59 +0000
4885@@ -0,0 +1,223 @@
4886+#!/usr/bin/env python
4887+# Copyright 2013 Canonical Ltd.
4888+
4889+# Authors:
4890+# Adam Gandelman <adamg@ubuntu.com>
4891+
4892+import logging
4893+import optparse
4894+import os
4895+import subprocess
4896+import shutil
4897+import sys
4898+import tempfile
4899+import yaml
4900+
4901+from fnmatch import fnmatch
4902+
4903+CHARM_HELPERS_BRANCH = 'lp:charm-helpers'
4904+
4905+
4906+def parse_config(conf_file):
4907+ if not os.path.isfile(conf_file):
4908+ logging.error('Invalid config file: %s.' % conf_file)
4909+ return False
4910+ return yaml.load(open(conf_file).read())
4911+
4912+
4913+def clone_helpers(work_dir, branch):
4914+ dest = os.path.join(work_dir, 'charm-helpers')
4915+ logging.info('Checking out %s to %s.' % (branch, dest))
4916+ cmd = ['bzr', 'branch', branch, dest]
4917+ subprocess.check_call(cmd)
4918+ return dest
4919+
4920+
4921+def _module_path(module):
4922+ return os.path.join(*module.split('.'))
4923+
4924+
4925+def _src_path(src, module):
4926+ return os.path.join(src, 'charmhelpers', _module_path(module))
4927+
4928+
4929+def _dest_path(dest, module):
4930+ return os.path.join(dest, _module_path(module))
4931+
4932+
4933+def _is_pyfile(path):
4934+ return os.path.isfile(path + '.py')
4935+
4936+
4937+def ensure_init(path):
4938+ '''
4939+ ensure directories leading up to path are importable, omitting
4940+ parent directory, eg path='/hooks/helpers/foo'/:
4941+ hooks/
4942+ hooks/helpers/__init__.py
4943+ hooks/helpers/foo/__init__.py
4944+ '''
4945+ for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])):
4946+ _i = os.path.join(d, '__init__.py')
4947+ if not os.path.exists(_i):
4948+ logging.info('Adding missing __init__.py: %s' % _i)
4949+ open(_i, 'wb').close()
4950+
4951+
4952+def sync_pyfile(src, dest):
4953+ src = src + '.py'
4954+ src_dir = os.path.dirname(src)
4955+ logging.info('Syncing pyfile: %s -> %s.' % (src, dest))
4956+ if not os.path.exists(dest):
4957+ os.makedirs(dest)
4958+ shutil.copy(src, dest)
4959+ if os.path.isfile(os.path.join(src_dir, '__init__.py')):
4960+ shutil.copy(os.path.join(src_dir, '__init__.py'),
4961+ dest)
4962+ ensure_init(dest)
4963+
4964+
4965+def get_filter(opts=None):
4966+ opts = opts or []
4967+ if 'inc=*' in opts:
4968+ # do not filter any files, include everything
4969+ return None
4970+
4971+ def _filter(dir, ls):
4972+ incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt]
4973+ _filter = []
4974+ for f in ls:
4975+ _f = os.path.join(dir, f)
4976+
4977+ if not os.path.isdir(_f) and not _f.endswith('.py') and incs:
4978+ if True not in [fnmatch(_f, inc) for inc in incs]:
4979+ logging.debug('Not syncing %s, does not match include '
4980+ 'filters (%s)' % (_f, incs))
4981+ _filter.append(f)
4982+ else:
4983+ logging.debug('Including file, which matches include '
4984+ 'filters (%s): %s' % (incs, _f))
4985+ elif (os.path.isfile(_f) and not _f.endswith('.py')):
4986+ logging.debug('Not syncing file: %s' % f)
4987+ _filter.append(f)
4988+ elif (os.path.isdir(_f) and not
4989+ os.path.isfile(os.path.join(_f, '__init__.py'))):
4990+ logging.debug('Not syncing directory: %s' % f)
4991+ _filter.append(f)
4992+ return _filter
4993+ return _filter
4994+
4995+
4996+def sync_directory(src, dest, opts=None):
4997+ if os.path.exists(dest):
4998+ logging.debug('Removing existing directory: %s' % dest)
4999+ shutil.rmtree(dest)
5000+ logging.info('Syncing directory: %s -> %s.' % (src, dest))
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches