Merge lp:~hopem/charms/trusty/glance/lp1499643 into lp:~openstack-charmers-archive/charms/trusty/glance/next

Proposed by Edward Hope-Morley
Status: Merged
Merged at revision: 143
Proposed branch: lp:~hopem/charms/trusty/glance/lp1499643
Merge into: lp:~openstack-charmers-archive/charms/trusty/glance/next
Diff against target: 1408 lines (+874/-79)
14 files modified
charmhelpers/contrib/network/ip.py (+5/-3)
charmhelpers/contrib/openstack/amulet/deployment.py (+23/-9)
charmhelpers/contrib/openstack/amulet/utils.py (+359/-0)
charmhelpers/contrib/openstack/context.py (+52/-7)
charmhelpers/contrib/openstack/templating.py (+30/-2)
charmhelpers/contrib/openstack/utils.py (+232/-2)
charmhelpers/contrib/storage/linux/ceph.py (+2/-11)
charmhelpers/core/hookenv.py (+32/-0)
charmhelpers/core/host.py (+32/-16)
charmhelpers/core/hugepage.py (+8/-1)
charmhelpers/core/strutils.py (+30/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+4/-2)
tests/charmhelpers/contrib/amulet/utils.py (+56/-16)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+9/-10)
To merge this branch: bzr merge lp:~hopem/charms/trusty/glance/lp1499643
Reviewer Review Type Date Requested Status
Liam Young (community) Approve
Review via email: mp+272409@code.launchpad.net
To post a comment you must log in.
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #10762 glance-next for hopem mp272409
    LINT OK: passed

Build: http://10.245.162.77:8080/job/charm_lint_check/10762/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #9942 glance-next for hopem mp272409
    UNIT FAIL: unit-test failed

UNIT Results (max last 2 lines):
make: *** [test] Error 1
ERROR:root:Make target returned non-zero.

Full unit test output: http://paste.ubuntu.com/12555285/
Build: http://10.245.162.77:8080/job/charm_unit_test/9942/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #6771 glance-next for hopem mp272409
    AMULET FAIL: amulet-test failed

AMULET Results (max last 2 lines):
make: *** [functional_test] Error 1
ERROR:root:Make target returned non-zero.

Full amulet test output: http://paste.ubuntu.com/12555788/
Build: http://10.245.162.77:8080/job/charm_amulet_test/6771/

Revision history for this message
Liam Young (gnuoy) wrote :

Unrelated Amulet fail. I'll tweak the unit test and merge time. Approved.

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'charmhelpers/contrib/network/ip.py'
2--- charmhelpers/contrib/network/ip.py 2015-09-03 09:41:01 +0000
3+++ charmhelpers/contrib/network/ip.py 2015-09-25 14:42:27 +0000
4@@ -23,7 +23,7 @@
5 from functools import partial
6
7 from charmhelpers.core.hookenv import unit_get
8-from charmhelpers.fetch import apt_install
9+from charmhelpers.fetch import apt_install, apt_update
10 from charmhelpers.core.hookenv import (
11 log,
12 WARNING,
13@@ -32,13 +32,15 @@
14 try:
15 import netifaces
16 except ImportError:
17- apt_install('python-netifaces')
18+ apt_update(fatal=True)
19+ apt_install('python-netifaces', fatal=True)
20 import netifaces
21
22 try:
23 import netaddr
24 except ImportError:
25- apt_install('python-netaddr')
26+ apt_update(fatal=True)
27+ apt_install('python-netaddr', fatal=True)
28 import netaddr
29
30
31
32=== modified file 'charmhelpers/contrib/openstack/amulet/deployment.py'
33--- charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-18 17:34:34 +0000
34+++ charmhelpers/contrib/openstack/amulet/deployment.py 2015-09-25 14:42:27 +0000
35@@ -44,20 +44,31 @@
36 Determine if the local branch being tested is derived from its
37 stable or next (dev) branch, and based on this, use the corresonding
38 stable or next branches for the other_services."""
39+
40+ # Charms outside the lp:~openstack-charmers namespace
41 base_charms = ['mysql', 'mongodb', 'nrpe']
42
43+ # Force these charms to current series even when using an older series.
44+ # ie. Use trusty/nrpe even when series is precise, as the P charm
45+ # does not possess the necessary external master config and hooks.
46+ force_series_current = ['nrpe']
47+
48 if self.series in ['precise', 'trusty']:
49 base_series = self.series
50 else:
51 base_series = self.current_next
52
53- if self.stable:
54- for svc in other_services:
55+ for svc in other_services:
56+ if svc['name'] in force_series_current:
57+ base_series = self.current_next
58+ # If a location has been explicitly set, use it
59+ if svc.get('location'):
60+ continue
61+ if self.stable:
62 temp = 'lp:charms/{}/{}'
63 svc['location'] = temp.format(base_series,
64 svc['name'])
65- else:
66- for svc in other_services:
67+ else:
68 if svc['name'] in base_charms:
69 temp = 'lp:charms/{}/{}'
70 svc['location'] = temp.format(base_series,
71@@ -66,6 +77,7 @@
72 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
73 svc['location'] = temp.format(self.current_next,
74 svc['name'])
75+
76 return other_services
77
78 def _add_services(self, this_service, other_services):
79@@ -77,21 +89,23 @@
80
81 services = other_services
82 services.append(this_service)
83+
84+ # Charms which should use the source config option
85 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
86 'ceph-osd', 'ceph-radosgw']
87- # Most OpenStack subordinate charms do not expose an origin option
88- # as that is controlled by the principle.
89- ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
90+
91+ # Charms which can not use openstack-origin, ie. many subordinates
92+ no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
93
94 if self.openstack:
95 for svc in services:
96- if svc['name'] not in use_source + ignore:
97+ if svc['name'] not in use_source + no_origin:
98 config = {'openstack-origin': self.openstack}
99 self.d.configure(svc['name'], config)
100
101 if self.source:
102 for svc in services:
103- if svc['name'] in use_source and svc['name'] not in ignore:
104+ if svc['name'] in use_source and svc['name'] not in no_origin:
105 config = {'source': self.source}
106 self.d.configure(svc['name'], config)
107
108
109=== modified file 'charmhelpers/contrib/openstack/amulet/utils.py'
110--- charmhelpers/contrib/openstack/amulet/utils.py 2015-07-16 20:17:23 +0000
111+++ charmhelpers/contrib/openstack/amulet/utils.py 2015-09-25 14:42:27 +0000
112@@ -27,6 +27,7 @@
113 import heatclient.v1.client as heat_client
114 import keystoneclient.v2_0 as keystone_client
115 import novaclient.v1_1.client as nova_client
116+import pika
117 import swiftclient
118
119 from charmhelpers.contrib.amulet.utils import (
120@@ -602,3 +603,361 @@
121 self.log.debug('Ceph {} samples (OK): '
122 '{}'.format(sample_type, samples))
123 return None
124+
125+# rabbitmq/amqp specific helpers:
126+ def add_rmq_test_user(self, sentry_units,
127+ username="testuser1", password="changeme"):
128+ """Add a test user via the first rmq juju unit, check connection as
129+ the new user against all sentry units.
130+
131+ :param sentry_units: list of sentry unit pointers
132+ :param username: amqp user name, default to testuser1
133+ :param password: amqp user password
134+ :returns: None if successful. Raise on error.
135+ """
136+ self.log.debug('Adding rmq user ({})...'.format(username))
137+
138+ # Check that user does not already exist
139+ cmd_user_list = 'rabbitmqctl list_users'
140+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
141+ if username in output:
142+ self.log.warning('User ({}) already exists, returning '
143+ 'gracefully.'.format(username))
144+ return
145+
146+ perms = '".*" ".*" ".*"'
147+ cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
148+ 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
149+
150+ # Add user via first unit
151+ for cmd in cmds:
152+ output, _ = self.run_cmd_unit(sentry_units[0], cmd)
153+
154+ # Check connection against the other sentry_units
155+ self.log.debug('Checking user connect against units...')
156+ for sentry_unit in sentry_units:
157+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
158+ username=username,
159+ password=password)
160+ connection.close()
161+
162+ def delete_rmq_test_user(self, sentry_units, username="testuser1"):
163+ """Delete a rabbitmq user via the first rmq juju unit.
164+
165+ :param sentry_units: list of sentry unit pointers
166+ :param username: amqp user name, default to testuser1
167+ :param password: amqp user password
168+ :returns: None if successful or no such user.
169+ """
170+ self.log.debug('Deleting rmq user ({})...'.format(username))
171+
172+ # Check that the user exists
173+ cmd_user_list = 'rabbitmqctl list_users'
174+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
175+
176+ if username not in output:
177+ self.log.warning('User ({}) does not exist, returning '
178+ 'gracefully.'.format(username))
179+ return
180+
181+ # Delete the user
182+ cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
183+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
184+
185+ def get_rmq_cluster_status(self, sentry_unit):
186+ """Execute rabbitmq cluster status command on a unit and return
187+ the full output.
188+
189+ :param unit: sentry unit
190+ :returns: String containing console output of cluster status command
191+ """
192+ cmd = 'rabbitmqctl cluster_status'
193+ output, _ = self.run_cmd_unit(sentry_unit, cmd)
194+ self.log.debug('{} cluster_status:\n{}'.format(
195+ sentry_unit.info['unit_name'], output))
196+ return str(output)
197+
198+ def get_rmq_cluster_running_nodes(self, sentry_unit):
199+ """Parse rabbitmqctl cluster_status output string, return list of
200+ running rabbitmq cluster nodes.
201+
202+ :param unit: sentry unit
203+ :returns: List containing node names of running nodes
204+ """
205+ # NOTE(beisner): rabbitmqctl cluster_status output is not
206+ # json-parsable, do string chop foo, then json.loads that.
207+ str_stat = self.get_rmq_cluster_status(sentry_unit)
208+ if 'running_nodes' in str_stat:
209+ pos_start = str_stat.find("{running_nodes,") + 15
210+ pos_end = str_stat.find("]},", pos_start) + 1
211+ str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
212+ run_nodes = json.loads(str_run_nodes)
213+ return run_nodes
214+ else:
215+ return []
216+
217+ def validate_rmq_cluster_running_nodes(self, sentry_units):
218+ """Check that all rmq unit hostnames are represented in the
219+ cluster_status output of all units.
220+
221+ :param host_names: dict of juju unit names to host names
222+ :param units: list of sentry unit pointers (all rmq units)
223+ :returns: None if successful, otherwise return error message
224+ """
225+ host_names = self.get_unit_hostnames(sentry_units)
226+ errors = []
227+
228+ # Query every unit for cluster_status running nodes
229+ for query_unit in sentry_units:
230+ query_unit_name = query_unit.info['unit_name']
231+ running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
232+
233+ # Confirm that every unit is represented in the queried unit's
234+ # cluster_status running nodes output.
235+ for validate_unit in sentry_units:
236+ val_host_name = host_names[validate_unit.info['unit_name']]
237+ val_node_name = 'rabbit@{}'.format(val_host_name)
238+
239+ if val_node_name not in running_nodes:
240+ errors.append('Cluster member check failed on {}: {} not '
241+ 'in {}\n'.format(query_unit_name,
242+ val_node_name,
243+ running_nodes))
244+ if errors:
245+ return ''.join(errors)
246+
247+ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
248+ """Check a single juju rmq unit for ssl and port in the config file."""
249+ host = sentry_unit.info['public-address']
250+ unit_name = sentry_unit.info['unit_name']
251+
252+ conf_file = '/etc/rabbitmq/rabbitmq.config'
253+ conf_contents = str(self.file_contents_safe(sentry_unit,
254+ conf_file, max_wait=16))
255+ # Checks
256+ conf_ssl = 'ssl' in conf_contents
257+ conf_port = str(port) in conf_contents
258+
259+ # Port explicitly checked in config
260+ if port and conf_port and conf_ssl:
261+ self.log.debug('SSL is enabled @{}:{} '
262+ '({})'.format(host, port, unit_name))
263+ return True
264+ elif port and not conf_port and conf_ssl:
265+ self.log.debug('SSL is enabled @{} but not on port {} '
266+ '({})'.format(host, port, unit_name))
267+ return False
268+ # Port not checked (useful when checking that ssl is disabled)
269+ elif not port and conf_ssl:
270+ self.log.debug('SSL is enabled @{}:{} '
271+ '({})'.format(host, port, unit_name))
272+ return True
273+ elif not port and not conf_ssl:
274+ self.log.debug('SSL not enabled @{}:{} '
275+ '({})'.format(host, port, unit_name))
276+ return False
277+ else:
278+ msg = ('Unknown condition when checking SSL status @{}:{} '
279+ '({})'.format(host, port, unit_name))
280+ amulet.raise_status(amulet.FAIL, msg)
281+
282+ def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
283+ """Check that ssl is enabled on rmq juju sentry units.
284+
285+ :param sentry_units: list of all rmq sentry units
286+ :param port: optional ssl port override to validate
287+ :returns: None if successful, otherwise return error message
288+ """
289+ for sentry_unit in sentry_units:
290+ if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
291+ return ('Unexpected condition: ssl is disabled on unit '
292+ '({})'.format(sentry_unit.info['unit_name']))
293+ return None
294+
295+ def validate_rmq_ssl_disabled_units(self, sentry_units):
296+ """Check that ssl is enabled on listed rmq juju sentry units.
297+
298+ :param sentry_units: list of all rmq sentry units
299+ :returns: True if successful. Raise on error.
300+ """
301+ for sentry_unit in sentry_units:
302+ if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
303+ return ('Unexpected condition: ssl is enabled on unit '
304+ '({})'.format(sentry_unit.info['unit_name']))
305+ return None
306+
307+ def configure_rmq_ssl_on(self, sentry_units, deployment,
308+ port=None, max_wait=60):
309+ """Turn ssl charm config option on, with optional non-default
310+ ssl port specification. Confirm that it is enabled on every
311+ unit.
312+
313+ :param sentry_units: list of sentry units
314+ :param deployment: amulet deployment object pointer
315+ :param port: amqp port, use defaults if None
316+ :param max_wait: maximum time to wait in seconds to confirm
317+ :returns: None if successful. Raise on error.
318+ """
319+ self.log.debug('Setting ssl charm config option: on')
320+
321+ # Enable RMQ SSL
322+ config = {'ssl': 'on'}
323+ if port:
324+ config['ssl_port'] = port
325+
326+ deployment.configure('rabbitmq-server', config)
327+
328+ # Confirm
329+ tries = 0
330+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
331+ while ret and tries < (max_wait / 4):
332+ time.sleep(4)
333+ self.log.debug('Attempt {}: {}'.format(tries, ret))
334+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
335+ tries += 1
336+
337+ if ret:
338+ amulet.raise_status(amulet.FAIL, ret)
339+
340+ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
341+ """Turn ssl charm config option off, confirm that it is disabled
342+ on every unit.
343+
344+ :param sentry_units: list of sentry units
345+ :param deployment: amulet deployment object pointer
346+ :param max_wait: maximum time to wait in seconds to confirm
347+ :returns: None if successful. Raise on error.
348+ """
349+ self.log.debug('Setting ssl charm config option: off')
350+
351+ # Disable RMQ SSL
352+ config = {'ssl': 'off'}
353+ deployment.configure('rabbitmq-server', config)
354+
355+ # Confirm
356+ tries = 0
357+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
358+ while ret and tries < (max_wait / 4):
359+ time.sleep(4)
360+ self.log.debug('Attempt {}: {}'.format(tries, ret))
361+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
362+ tries += 1
363+
364+ if ret:
365+ amulet.raise_status(amulet.FAIL, ret)
366+
367+ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
368+ port=None, fatal=True,
369+ username="testuser1", password="changeme"):
370+ """Establish and return a pika amqp connection to the rabbitmq service
371+ running on a rmq juju unit.
372+
373+ :param sentry_unit: sentry unit pointer
374+ :param ssl: boolean, default to False
375+ :param port: amqp port, use defaults if None
376+ :param fatal: boolean, default to True (raises on connect error)
377+ :param username: amqp user name, default to testuser1
378+ :param password: amqp user password
379+ :returns: pika amqp connection pointer or None if failed and non-fatal
380+ """
381+ host = sentry_unit.info['public-address']
382+ unit_name = sentry_unit.info['unit_name']
383+
384+ # Default port logic if port is not specified
385+ if ssl and not port:
386+ port = 5671
387+ elif not ssl and not port:
388+ port = 5672
389+
390+ self.log.debug('Connecting to amqp on {}:{} ({}) as '
391+ '{}...'.format(host, port, unit_name, username))
392+
393+ try:
394+ credentials = pika.PlainCredentials(username, password)
395+ parameters = pika.ConnectionParameters(host=host, port=port,
396+ credentials=credentials,
397+ ssl=ssl,
398+ connection_attempts=3,
399+ retry_delay=5,
400+ socket_timeout=1)
401+ connection = pika.BlockingConnection(parameters)
402+ assert connection.server_properties['product'] == 'RabbitMQ'
403+ self.log.debug('Connect OK')
404+ return connection
405+ except Exception as e:
406+ msg = ('amqp connection failed to {}:{} as '
407+ '{} ({})'.format(host, port, username, str(e)))
408+ if fatal:
409+ amulet.raise_status(amulet.FAIL, msg)
410+ else:
411+ self.log.warn(msg)
412+ return None
413+
414+ def publish_amqp_message_by_unit(self, sentry_unit, message,
415+ queue="test", ssl=False,
416+ username="testuser1",
417+ password="changeme",
418+ port=None):
419+ """Publish an amqp message to a rmq juju unit.
420+
421+ :param sentry_unit: sentry unit pointer
422+ :param message: amqp message string
423+ :param queue: message queue, default to test
424+ :param username: amqp user name, default to testuser1
425+ :param password: amqp user password
426+ :param ssl: boolean, default to False
427+ :param port: amqp port, use defaults if None
428+ :returns: None. Raises exception if publish failed.
429+ """
430+ self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
431+ message))
432+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
433+ port=port,
434+ username=username,
435+ password=password)
436+
437+ # NOTE(beisner): extra debug here re: pika hang potential:
438+ # https://github.com/pika/pika/issues/297
439+ # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
440+ self.log.debug('Defining channel...')
441+ channel = connection.channel()
442+ self.log.debug('Declaring queue...')
443+ channel.queue_declare(queue=queue, auto_delete=False, durable=True)
444+ self.log.debug('Publishing message...')
445+ channel.basic_publish(exchange='', routing_key=queue, body=message)
446+ self.log.debug('Closing channel...')
447+ channel.close()
448+ self.log.debug('Closing connection...')
449+ connection.close()
450+
451+ def get_amqp_message_by_unit(self, sentry_unit, queue="test",
452+ username="testuser1",
453+ password="changeme",
454+ ssl=False, port=None):
455+ """Get an amqp message from a rmq juju unit.
456+
457+ :param sentry_unit: sentry unit pointer
458+ :param queue: message queue, default to test
459+ :param username: amqp user name, default to testuser1
460+ :param password: amqp user password
461+ :param ssl: boolean, default to False
462+ :param port: amqp port, use defaults if None
463+ :returns: amqp message body as string. Raise if get fails.
464+ """
465+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
466+ port=port,
467+ username=username,
468+ password=password)
469+ channel = connection.channel()
470+ method_frame, _, body = channel.basic_get(queue)
471+
472+ if method_frame:
473+ self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
474+ body))
475+ channel.basic_ack(method_frame.delivery_tag)
476+ channel.close()
477+ connection.close()
478+ return body
479+ else:
480+ msg = 'No message retrieved.'
481+ amulet.raise_status(amulet.FAIL, msg)
482
483=== modified file 'charmhelpers/contrib/openstack/context.py'
484--- charmhelpers/contrib/openstack/context.py 2015-09-12 06:27:17 +0000
485+++ charmhelpers/contrib/openstack/context.py 2015-09-25 14:42:27 +0000
486@@ -194,10 +194,50 @@
487 class OSContextGenerator(object):
488 """Base class for all context generators."""
489 interfaces = []
490+ related = False
491+ complete = False
492+ missing_data = []
493
494 def __call__(self):
495 raise NotImplementedError
496
497+ def context_complete(self, ctxt):
498+ """Check for missing data for the required context data.
499+ Set self.missing_data if it exists and return False.
500+ Set self.complete if no missing data and return True.
501+ """
502+ # Fresh start
503+ self.complete = False
504+ self.missing_data = []
505+ for k, v in six.iteritems(ctxt):
506+ if v is None or v == '':
507+ if k not in self.missing_data:
508+ self.missing_data.append(k)
509+
510+ if self.missing_data:
511+ self.complete = False
512+ log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
513+ else:
514+ self.complete = True
515+ return self.complete
516+
517+ def get_related(self):
518+ """Check if any of the context interfaces have relation ids.
519+ Set self.related and return True if one of the interfaces
520+ has relation ids.
521+ """
522+ # Fresh start
523+ self.related = False
524+ try:
525+ for interface in self.interfaces:
526+ if relation_ids(interface):
527+ self.related = True
528+ return self.related
529+ except AttributeError as e:
530+ log("{} {}"
531+ "".format(self, e), 'INFO')
532+ return self.related
533+
534
535 class SharedDBContext(OSContextGenerator):
536 interfaces = ['shared-db']
537@@ -213,6 +253,7 @@
538 self.database = database
539 self.user = user
540 self.ssl_dir = ssl_dir
541+ self.rel_name = self.interfaces[0]
542
543 def __call__(self):
544 self.database = self.database or config('database')
545@@ -246,6 +287,7 @@
546 password_setting = self.relation_prefix + '_password'
547
548 for rid in relation_ids(self.interfaces[0]):
549+ self.related = True
550 for unit in related_units(rid):
551 rdata = relation_get(rid=rid, unit=unit)
552 host = rdata.get('db_host')
553@@ -257,7 +299,7 @@
554 'database_password': rdata.get(password_setting),
555 'database_type': 'mysql'
556 }
557- if context_complete(ctxt):
558+ if self.context_complete(ctxt):
559 db_ssl(rdata, ctxt, self.ssl_dir)
560 return ctxt
561 return {}
562@@ -278,6 +320,7 @@
563
564 ctxt = {}
565 for rid in relation_ids(self.interfaces[0]):
566+ self.related = True
567 for unit in related_units(rid):
568 rel_host = relation_get('host', rid=rid, unit=unit)
569 rel_user = relation_get('user', rid=rid, unit=unit)
570@@ -287,7 +330,7 @@
571 'database_user': rel_user,
572 'database_password': rel_passwd,
573 'database_type': 'postgresql'}
574- if context_complete(ctxt):
575+ if self.context_complete(ctxt):
576 return ctxt
577
578 return {}
579@@ -348,6 +391,7 @@
580 ctxt['signing_dir'] = cachedir
581
582 for rid in relation_ids(self.rel_name):
583+ self.related = True
584 for unit in related_units(rid):
585 rdata = relation_get(rid=rid, unit=unit)
586 serv_host = rdata.get('service_host')
587@@ -366,7 +410,7 @@
588 'service_protocol': svc_protocol,
589 'auth_protocol': auth_protocol})
590
591- if context_complete(ctxt):
592+ if self.context_complete(ctxt):
593 # NOTE(jamespage) this is required for >= icehouse
594 # so a missing value just indicates keystone needs
595 # upgrading
596@@ -405,6 +449,7 @@
597 ctxt = {}
598 for rid in relation_ids(self.rel_name):
599 ha_vip_only = False
600+ self.related = True
601 for unit in related_units(rid):
602 if relation_get('clustered', rid=rid, unit=unit):
603 ctxt['clustered'] = True
604@@ -437,7 +482,7 @@
605 ha_vip_only = relation_get('ha-vip-only',
606 rid=rid, unit=unit) is not None
607
608- if context_complete(ctxt):
609+ if self.context_complete(ctxt):
610 if 'rabbit_ssl_ca' in ctxt:
611 if not self.ssl_dir:
612 log("Charm not setup for ssl support but ssl ca "
613@@ -469,7 +514,7 @@
614 ctxt['oslo_messaging_flags'] = config_flags_parser(
615 oslo_messaging_flags)
616
617- if not context_complete(ctxt):
618+ if not self.complete:
619 return {}
620
621 return ctxt
622@@ -507,7 +552,7 @@
623 if not os.path.isdir('/etc/ceph'):
624 os.mkdir('/etc/ceph')
625
626- if not context_complete(ctxt):
627+ if not self.context_complete(ctxt):
628 return {}
629
630 ensure_packages(['ceph-common'])
631@@ -1366,6 +1411,6 @@
632 'auth_protocol':
633 rdata.get('auth_protocol') or 'http',
634 }
635- if context_complete(ctxt):
636+ if self.context_complete(ctxt):
637 return ctxt
638 return {}
639
640=== modified file 'charmhelpers/contrib/openstack/templating.py'
641--- charmhelpers/contrib/openstack/templating.py 2015-07-29 10:47:33 +0000
642+++ charmhelpers/contrib/openstack/templating.py 2015-09-25 14:42:27 +0000
643@@ -18,7 +18,7 @@
644
645 import six
646
647-from charmhelpers.fetch import apt_install
648+from charmhelpers.fetch import apt_install, apt_update
649 from charmhelpers.core.hookenv import (
650 log,
651 ERROR,
652@@ -29,6 +29,7 @@
653 try:
654 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
655 except ImportError:
656+ apt_update(fatal=True)
657 apt_install('python-jinja2', fatal=True)
658 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
659
660@@ -112,7 +113,7 @@
661
662 def complete_contexts(self):
663 '''
664- Return a list of interfaces that have atisfied contexts.
665+ Return a list of interfaces that have satisfied contexts.
666 '''
667 if self._complete_contexts:
668 return self._complete_contexts
669@@ -293,3 +294,30 @@
670 [interfaces.extend(i.complete_contexts())
671 for i in six.itervalues(self.templates)]
672 return interfaces
673+
674+ def get_incomplete_context_data(self, interfaces):
675+ '''
676+ Return dictionary of relation status of interfaces and any missing
677+ required context data. Example:
678+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
679+ 'zeromq-configuration': {'related': False}}
680+ '''
681+ incomplete_context_data = {}
682+
683+ for i in six.itervalues(self.templates):
684+ for context in i.contexts:
685+ for interface in interfaces:
686+ related = False
687+ if interface in context.interfaces:
688+ related = context.get_related()
689+ missing_data = context.missing_data
690+ if missing_data:
691+ incomplete_context_data[interface] = {'missing_data': missing_data}
692+ if related:
693+ if incomplete_context_data.get(interface):
694+ incomplete_context_data[interface].update({'related': True})
695+ else:
696+ incomplete_context_data[interface] = {'related': True}
697+ else:
698+ incomplete_context_data[interface] = {'related': False}
699+ return incomplete_context_data
700
701=== modified file 'charmhelpers/contrib/openstack/utils.py'
702--- charmhelpers/contrib/openstack/utils.py 2015-09-03 09:41:01 +0000
703+++ charmhelpers/contrib/openstack/utils.py 2015-09-25 14:42:27 +0000
704@@ -25,6 +25,7 @@
705 import re
706
707 import six
708+import traceback
709 import yaml
710
711 from charmhelpers.contrib.network import ip
712@@ -34,12 +35,16 @@
713 )
714
715 from charmhelpers.core.hookenv import (
716+ action_fail,
717+ action_set,
718 config,
719 log as juju_log,
720 charm_dir,
721 INFO,
722 relation_ids,
723- relation_set
724+ relation_set,
725+ status_set,
726+ hook_name
727 )
728
729 from charmhelpers.contrib.storage.linux.lvm import (
730@@ -49,7 +54,8 @@
731 )
732
733 from charmhelpers.contrib.network.ip import (
734- get_ipv6_addr
735+ get_ipv6_addr,
736+ is_ipv6,
737 )
738
739 from charmhelpers.contrib.python.packages import (
740@@ -114,6 +120,7 @@
741 ('2.2.1', 'kilo'),
742 ('2.2.2', 'kilo'),
743 ('2.3.0', 'liberty'),
744+ ('2.4.0', 'liberty'),
745 ])
746
747 # >= Liberty version->codename mapping
748@@ -142,6 +149,9 @@
749 'glance-common': OrderedDict([
750 ('11.0.0', 'liberty'),
751 ]),
752+ 'openstack-dashboard': OrderedDict([
753+ ('8.0.0', 'liberty'),
754+ ]),
755 }
756
757 DEFAULT_LOOPBACK_SIZE = '5G'
758@@ -510,6 +520,12 @@
759 relation_prefix=None):
760 hosts = get_ipv6_addr(dynamic_only=False)
761
762+ if config('vip'):
763+ vips = config('vip').split()
764+ for vip in vips:
765+ if vip and is_ipv6(vip):
766+ hosts.append(vip)
767+
768 kwargs = {'database': database,
769 'username': database_user,
770 'hostname': json.dumps(hosts)}
771@@ -745,3 +761,217 @@
772 return projects[key]
773
774 return None
775+
776+
777+def os_workload_status(configs, required_interfaces, charm_func=None):
778+ """
779+ Decorator to set workload status based on complete contexts
780+ """
781+ def wrap(f):
782+ @wraps(f)
783+ def wrapped_f(*args, **kwargs):
784+ # Run the original function first
785+ f(*args, **kwargs)
786+ # Set workload status now that contexts have been
787+ # acted on
788+ set_os_workload_status(configs, required_interfaces, charm_func)
789+ return wrapped_f
790+ return wrap
791+
792+
793+def set_os_workload_status(configs, required_interfaces, charm_func=None):
794+ """
795+ Set workload status based on complete contexts.
796+ status-set missing or incomplete contexts
797+ and juju-log details of missing required data.
798+ charm_func is a charm specific function to run checking
799+ for charm specific requirements such as a VIP setting.
800+ """
801+ incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
802+ state = 'active'
803+ missing_relations = []
804+ incomplete_relations = []
805+ message = None
806+ charm_state = None
807+ charm_message = None
808+
809+ for generic_interface in incomplete_rel_data.keys():
810+ related_interface = None
811+ missing_data = {}
812+ # Related or not?
813+ for interface in incomplete_rel_data[generic_interface]:
814+ if incomplete_rel_data[generic_interface][interface].get('related'):
815+ related_interface = interface
816+ missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
817+ # No relation ID for the generic_interface
818+ if not related_interface:
819+ juju_log("{} relation is missing and must be related for "
820+ "functionality. ".format(generic_interface), 'WARN')
821+ state = 'blocked'
822+ if generic_interface not in missing_relations:
823+ missing_relations.append(generic_interface)
824+ else:
825+ # Relation ID exists but no related unit
826+ if not missing_data:
827+ # Edge case relation ID exists but departing
828+ if ('departed' in hook_name() or 'broken' in hook_name()) \
829+ and related_interface in hook_name():
830+ state = 'blocked'
831+ if generic_interface not in missing_relations:
832+ missing_relations.append(generic_interface)
833+ juju_log("{} relation's interface, {}, "
834+ "relationship is departed or broken "
835+ "and is required for functionality."
836+ "".format(generic_interface, related_interface), "WARN")
837+ # Normal case relation ID exists but no related unit
838+ # (joining)
839+ else:
840+ juju_log("{} relations's interface, {}, is related but has "
841+ "no units in the relation."
842+ "".format(generic_interface, related_interface), "INFO")
843+ # Related unit exists and data missing on the relation
844+ else:
845+ juju_log("{} relation's interface, {}, is related awaiting "
846+ "the following data from the relationship: {}. "
847+ "".format(generic_interface, related_interface,
848+ ", ".join(missing_data)), "INFO")
849+ if state != 'blocked':
850+ state = 'waiting'
851+ if generic_interface not in incomplete_relations \
852+ and generic_interface not in missing_relations:
853+ incomplete_relations.append(generic_interface)
854+
855+ if missing_relations:
856+ message = "Missing relations: {}".format(", ".join(missing_relations))
857+ if incomplete_relations:
858+ message += "; incomplete relations: {}" \
859+ "".format(", ".join(incomplete_relations))
860+ state = 'blocked'
861+ elif incomplete_relations:
862+ message = "Incomplete relations: {}" \
863+ "".format(", ".join(incomplete_relations))
864+ state = 'waiting'
865+
866+ # Run charm specific checks
867+ if charm_func:
868+ charm_state, charm_message = charm_func(configs)
869+ if charm_state != 'active' and charm_state != 'unknown':
870+ state = workload_state_compare(state, charm_state)
871+ if message:
872+ message = "{} {}".format(message, charm_message)
873+ else:
874+ message = charm_message
875+
876+ # Set to active if all requirements have been met
877+ if state == 'active':
878+ message = "Unit is ready"
879+ juju_log(message, "INFO")
880+
881+ status_set(state, message)
882+
883+
884+def workload_state_compare(current_workload_state, workload_state):
885+ """ Return highest priority of two states"""
886+ hierarchy = {'unknown': -1,
887+ 'active': 0,
888+ 'maintenance': 1,
889+ 'waiting': 2,
890+ 'blocked': 3,
891+ }
892+
893+ if hierarchy.get(workload_state) is None:
894+ workload_state = 'unknown'
895+ if hierarchy.get(current_workload_state) is None:
896+ current_workload_state = 'unknown'
897+
898+ # Set workload_state based on hierarchy of statuses
899+ if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
900+ return current_workload_state
901+ else:
902+ return workload_state
903+
904+
905+def incomplete_relation_data(configs, required_interfaces):
906+ """
907+ Check complete contexts against required_interfaces
908+ Return dictionary of incomplete relation data.
909+
910+ configs is an OSConfigRenderer object with configs registered
911+
912+ required_interfaces is a dictionary of required general interfaces
913+ with dictionary values of possible specific interfaces.
914+ Example:
915+ required_interfaces = {'database': ['shared-db', 'pgsql-db']}
916+
917+ The interface is said to be satisfied if anyone of the interfaces in the
918+ list has a complete context.
919+
920+ Return dictionary of incomplete or missing required contexts with relation
921+ status of interfaces and any missing data points. Example:
922+ {'message':
923+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
924+ 'zeromq-configuration': {'related': False}},
925+ 'identity':
926+ {'identity-service': {'related': False}},
927+ 'database':
928+ {'pgsql-db': {'related': False},
929+ 'shared-db': {'related': True}}}
930+ """
931+ complete_ctxts = configs.complete_contexts()
932+ incomplete_relations = []
933+ for svc_type in required_interfaces.keys():
934+ # Avoid duplicates
935+ found_ctxt = False
936+ for interface in required_interfaces[svc_type]:
937+ if interface in complete_ctxts:
938+ found_ctxt = True
939+ if not found_ctxt:
940+ incomplete_relations.append(svc_type)
941+ incomplete_context_data = {}
942+ for i in incomplete_relations:
943+ incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
944+ return incomplete_context_data
945+
946+
947+def do_action_openstack_upgrade(package, upgrade_callback, configs):
948+ """Perform action-managed OpenStack upgrade.
949+
950+ Upgrades packages to the configured openstack-origin version and sets
951+ the corresponding action status as a result.
952+
953+ If the charm was installed from source we cannot upgrade it.
954+ For backwards compatibility a config flag (action-managed-upgrade) must
955+ be set for this code to run, otherwise a full service level upgrade will
956+ fire on config-changed.
957+
958+ @param package: package name for determining if upgrade available
959+ @param upgrade_callback: function callback to charm's upgrade function
960+ @param configs: templating object derived from OSConfigRenderer class
961+
962+ @return: True if upgrade successful; False if upgrade failed or skipped
963+ """
964+ ret = False
965+
966+ if git_install_requested():
967+ action_set({'outcome': 'installed from source, skipped upgrade.'})
968+ else:
969+ if openstack_upgrade_available(package):
970+ if config('action-managed-upgrade'):
971+ juju_log('Upgrading OpenStack release')
972+
973+ try:
974+ upgrade_callback(configs=configs)
975+ action_set({'outcome': 'success, upgrade completed.'})
976+ ret = True
977+ except:
978+ action_set({'outcome': 'upgrade failed, see traceback.'})
979+ action_set({'traceback': traceback.format_exc()})
980+ action_fail('do_openstack_upgrade resulted in an '
981+ 'unexpected error')
982+ else:
983+ action_set({'outcome': 'action-managed-upgrade config is '
984+ 'False, skipped upgrade.'})
985+ else:
986+ action_set({'outcome': 'no upgrade available.'})
987+
988+ return ret
989
990=== modified file 'charmhelpers/contrib/storage/linux/ceph.py'
991--- charmhelpers/contrib/storage/linux/ceph.py 2015-09-10 09:30:59 +0000
992+++ charmhelpers/contrib/storage/linux/ceph.py 2015-09-25 14:42:27 +0000
993@@ -59,6 +59,8 @@
994 apt_install,
995 )
996
997+from charmhelpers.core.kernel import modprobe
998+
999 KEYRING = '/etc/ceph/ceph.client.{}.keyring'
1000 KEYFILE = '/etc/ceph/ceph.client.{}.key'
1001
1002@@ -291,17 +293,6 @@
1003 os.chown(data_src_dst, uid, gid)
1004
1005
1006-# TODO: re-use
1007-def modprobe(module):
1008- """Load a kernel module and configure for auto-load on reboot."""
1009- log('Loading kernel module', level=INFO)
1010- cmd = ['modprobe', module]
1011- check_call(cmd)
1012- with open('/etc/modules', 'r+') as modules:
1013- if module not in modules.read():
1014- modules.write(module)
1015-
1016-
1017 def copy_files(src, dst, symlinks=False, ignore=None):
1018 """Copy files from src to dst."""
1019 for item in os.listdir(src):
1020
1021=== modified file 'charmhelpers/core/hookenv.py'
1022--- charmhelpers/core/hookenv.py 2015-09-03 09:41:01 +0000
1023+++ charmhelpers/core/hookenv.py 2015-09-25 14:42:27 +0000
1024@@ -623,6 +623,38 @@
1025 return unit_get('private-address')
1026
1027
1028+@cached
1029+def storage_get(attribute="", storage_id=""):
1030+ """Get storage attributes"""
1031+ _args = ['storage-get', '--format=json']
1032+ if storage_id:
1033+ _args.extend(('-s', storage_id))
1034+ if attribute:
1035+ _args.append(attribute)
1036+ try:
1037+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
1038+ except ValueError:
1039+ return None
1040+
1041+
1042+@cached
1043+def storage_list(storage_name=""):
1044+ """List the storage IDs for the unit"""
1045+ _args = ['storage-list', '--format=json']
1046+ if storage_name:
1047+ _args.append(storage_name)
1048+ try:
1049+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
1050+ except ValueError:
1051+ return None
1052+ except OSError as e:
1053+ import errno
1054+ if e.errno == errno.ENOENT:
1055+ # storage-list does not exist
1056+ return []
1057+ raise
1058+
1059+
1060 class UnregisteredHookError(Exception):
1061 """Raised when an undefined hook is called"""
1062 pass
1063
1064=== modified file 'charmhelpers/core/host.py'
1065--- charmhelpers/core/host.py 2015-08-19 13:49:22 +0000
1066+++ charmhelpers/core/host.py 2015-09-25 14:42:27 +0000
1067@@ -63,32 +63,48 @@
1068 return service_result
1069
1070
1071-def service_pause(service_name, init_dir=None):
1072+def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
1073 """Pause a system service.
1074
1075 Stop it, and prevent it from starting again at boot."""
1076- if init_dir is None:
1077- init_dir = "/etc/init"
1078 stopped = service_stop(service_name)
1079- # XXX: Support systemd too
1080- override_path = os.path.join(
1081- init_dir, '{}.override'.format(service_name))
1082- with open(override_path, 'w') as fh:
1083- fh.write("manual\n")
1084+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
1085+ sysv_file = os.path.join(initd_dir, service_name)
1086+ if os.path.exists(upstart_file):
1087+ override_path = os.path.join(
1088+ init_dir, '{}.override'.format(service_name))
1089+ with open(override_path, 'w') as fh:
1090+ fh.write("manual\n")
1091+ elif os.path.exists(sysv_file):
1092+ subprocess.check_call(["update-rc.d", service_name, "disable"])
1093+ else:
1094+ # XXX: Support SystemD too
1095+ raise ValueError(
1096+ "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
1097+ service_name, upstart_file, sysv_file))
1098 return stopped
1099
1100
1101-def service_resume(service_name, init_dir=None):
1102+def service_resume(service_name, init_dir="/etc/init",
1103+ initd_dir="/etc/init.d"):
1104 """Resume a system service.
1105
1106 Reenable starting again at boot. Start the service"""
1107- # XXX: Support systemd too
1108- if init_dir is None:
1109- init_dir = "/etc/init"
1110- override_path = os.path.join(
1111- init_dir, '{}.override'.format(service_name))
1112- if os.path.exists(override_path):
1113- os.unlink(override_path)
1114+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
1115+ sysv_file = os.path.join(initd_dir, service_name)
1116+ if os.path.exists(upstart_file):
1117+ override_path = os.path.join(
1118+ init_dir, '{}.override'.format(service_name))
1119+ if os.path.exists(override_path):
1120+ os.unlink(override_path)
1121+ elif os.path.exists(sysv_file):
1122+ subprocess.check_call(["update-rc.d", service_name, "enable"])
1123+ else:
1124+ # XXX: Support SystemD too
1125+ raise ValueError(
1126+ "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
1127+ service_name, upstart_file, sysv_file))
1128+
1129 started = service_start(service_name)
1130 return started
1131
1132
1133=== modified file 'charmhelpers/core/hugepage.py'
1134--- charmhelpers/core/hugepage.py 2015-08-19 13:49:22 +0000
1135+++ charmhelpers/core/hugepage.py 2015-09-25 14:42:27 +0000
1136@@ -25,11 +25,13 @@
1137 fstab_mount,
1138 mkdir,
1139 )
1140+from charmhelpers.core.strutils import bytes_from_string
1141+from subprocess import check_output
1142
1143
1144 def hugepage_support(user, group='hugetlb', nr_hugepages=256,
1145 max_map_count=65536, mnt_point='/run/hugepages/kvm',
1146- pagesize='2MB', mount=True):
1147+ pagesize='2MB', mount=True, set_shmmax=False):
1148 """Enable hugepages on system.
1149
1150 Args:
1151@@ -49,6 +51,11 @@
1152 'vm.max_map_count': max_map_count,
1153 'vm.hugetlb_shm_group': gid,
1154 }
1155+ if set_shmmax:
1156+ shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
1157+ shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
1158+ if shmmax_minsize > shmmax_current:
1159+ sysctl_settings['kernel.shmmax'] = shmmax_minsize
1160 sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
1161 mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
1162 lfstab = fstab.Fstab()
1163
1164=== modified file 'charmhelpers/core/strutils.py'
1165--- charmhelpers/core/strutils.py 2015-04-16 19:53:49 +0000
1166+++ charmhelpers/core/strutils.py 2015-09-25 14:42:27 +0000
1167@@ -18,6 +18,7 @@
1168 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1169
1170 import six
1171+import re
1172
1173
1174 def bool_from_string(value):
1175@@ -40,3 +41,32 @@
1176
1177 msg = "Unable to interpret string value '%s' as boolean" % (value)
1178 raise ValueError(msg)
1179+
1180+
1181+def bytes_from_string(value):
1182+ """Interpret human readable string value as bytes.
1183+
1184+ Returns int
1185+ """
1186+ BYTE_POWER = {
1187+ 'K': 1,
1188+ 'KB': 1,
1189+ 'M': 2,
1190+ 'MB': 2,
1191+ 'G': 3,
1192+ 'GB': 3,
1193+ 'T': 4,
1194+ 'TB': 4,
1195+ 'P': 5,
1196+ 'PB': 5,
1197+ }
1198+ if isinstance(value, six.string_types):
1199+ value = six.text_type(value)
1200+ else:
1201+ msg = "Unable to interpret non-string value '%s' as boolean" % (value)
1202+ raise ValueError(msg)
1203+ matches = re.match("([0-9]+)([a-zA-Z]+)", value)
1204+ if not matches:
1205+ msg = "Unable to interpret string value '%s' as bytes" % (value)
1206+ raise ValueError(msg)
1207+ return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
1208
1209=== modified file 'tests/charmhelpers/contrib/amulet/deployment.py'
1210--- tests/charmhelpers/contrib/amulet/deployment.py 2015-03-20 17:15:02 +0000
1211+++ tests/charmhelpers/contrib/amulet/deployment.py 2015-09-25 14:42:27 +0000
1212@@ -51,7 +51,8 @@
1213 if 'units' not in this_service:
1214 this_service['units'] = 1
1215
1216- self.d.add(this_service['name'], units=this_service['units'])
1217+ self.d.add(this_service['name'], units=this_service['units'],
1218+ constraints=this_service.get('constraints'))
1219
1220 for svc in other_services:
1221 if 'location' in svc:
1222@@ -64,7 +65,8 @@
1223 if 'units' not in svc:
1224 svc['units'] = 1
1225
1226- self.d.add(svc['name'], charm=branch_location, units=svc['units'])
1227+ self.d.add(svc['name'], charm=branch_location, units=svc['units'],
1228+ constraints=svc.get('constraints'))
1229
1230 def _add_relations(self, relations):
1231 """Add all of the relations for the services."""
1232
1233=== modified file 'tests/charmhelpers/contrib/amulet/utils.py'
1234--- tests/charmhelpers/contrib/amulet/utils.py 2015-09-10 09:30:59 +0000
1235+++ tests/charmhelpers/contrib/amulet/utils.py 2015-09-25 14:42:27 +0000
1236@@ -326,7 +326,7 @@
1237
1238 def service_restarted_since(self, sentry_unit, mtime, service,
1239 pgrep_full=None, sleep_time=20,
1240- retry_count=2, retry_sleep_time=30):
1241+ retry_count=30, retry_sleep_time=10):
1242 """Check if service was been started after a given time.
1243
1244 Args:
1245@@ -334,8 +334,9 @@
1246 mtime (float): The epoch time to check against
1247 service (string): service name to look for in process table
1248 pgrep_full: [Deprecated] Use full command line search mode with pgrep
1249- sleep_time (int): Seconds to sleep before looking for process
1250- retry_count (int): If service is not found, how many times to retry
1251+ sleep_time (int): Initial sleep time (s) before looking for file
1252+ retry_sleep_time (int): Time (s) to sleep between retries
1253+ retry_count (int): If file is not found, how many times to retry
1254
1255 Returns:
1256 bool: True if service found and its start time it newer than mtime,
1257@@ -359,11 +360,12 @@
1258 pgrep_full)
1259 self.log.debug('Attempt {} to get {} proc start time on {} '
1260 'OK'.format(tries, service, unit_name))
1261- except IOError:
1262+ except IOError as e:
1263 # NOTE(beisner) - race avoidance, proc may not exist yet.
1264 # https://bugs.launchpad.net/charm-helpers/+bug/1474030
1265 self.log.debug('Attempt {} to get {} proc start time on {} '
1266- 'failed'.format(tries, service, unit_name))
1267+ 'failed\n{}'.format(tries, service,
1268+ unit_name, e))
1269 time.sleep(retry_sleep_time)
1270 tries += 1
1271
1272@@ -383,35 +385,62 @@
1273 return False
1274
1275 def config_updated_since(self, sentry_unit, filename, mtime,
1276- sleep_time=20):
1277+ sleep_time=20, retry_count=30,
1278+ retry_sleep_time=10):
1279 """Check if file was modified after a given time.
1280
1281 Args:
1282 sentry_unit (sentry): The sentry unit to check the file mtime on
1283 filename (string): The file to check mtime of
1284 mtime (float): The epoch time to check against
1285- sleep_time (int): Seconds to sleep before looking for process
1286+ sleep_time (int): Initial sleep time (s) before looking for file
1287+ retry_sleep_time (int): Time (s) to sleep between retries
1288+ retry_count (int): If file is not found, how many times to retry
1289
1290 Returns:
1291 bool: True if file was modified more recently than mtime, False if
1292- file was modified before mtime,
1293+ file was modified before mtime, or if file not found.
1294 """
1295- self.log.debug('Checking %s updated since %s' % (filename, mtime))
1296+ unit_name = sentry_unit.info['unit_name']
1297+ self.log.debug('Checking that %s updated since %s on '
1298+ '%s' % (filename, mtime, unit_name))
1299 time.sleep(sleep_time)
1300- file_mtime = self._get_file_mtime(sentry_unit, filename)
1301+ file_mtime = None
1302+ tries = 0
1303+ while tries <= retry_count and not file_mtime:
1304+ try:
1305+ file_mtime = self._get_file_mtime(sentry_unit, filename)
1306+ self.log.debug('Attempt {} to get {} file mtime on {} '
1307+ 'OK'.format(tries, filename, unit_name))
1308+ except IOError as e:
1309+ # NOTE(beisner) - race avoidance, file may not exist yet.
1310+ # https://bugs.launchpad.net/charm-helpers/+bug/1474030
1311+ self.log.debug('Attempt {} to get {} file mtime on {} '
1312+ 'failed\n{}'.format(tries, filename,
1313+ unit_name, e))
1314+ time.sleep(retry_sleep_time)
1315+ tries += 1
1316+
1317+ if not file_mtime:
1318+ self.log.warn('Could not determine file mtime, assuming '
1319+ 'file does not exist')
1320+ return False
1321+
1322 if file_mtime >= mtime:
1323 self.log.debug('File mtime is newer than provided mtime '
1324- '(%s >= %s)' % (file_mtime, mtime))
1325+ '(%s >= %s) on %s (OK)' % (file_mtime,
1326+ mtime, unit_name))
1327 return True
1328 else:
1329- self.log.warn('File mtime %s is older than provided mtime %s'
1330- % (file_mtime, mtime))
1331+ self.log.warn('File mtime is older than provided mtime'
1332+ '(%s < on %s) on %s' % (file_mtime,
1333+ mtime, unit_name))
1334 return False
1335
1336 def validate_service_config_changed(self, sentry_unit, mtime, service,
1337 filename, pgrep_full=None,
1338- sleep_time=20, retry_count=2,
1339- retry_sleep_time=30):
1340+ sleep_time=20, retry_count=30,
1341+ retry_sleep_time=10):
1342 """Check service and file were updated after mtime
1343
1344 Args:
1345@@ -456,7 +485,9 @@
1346 sentry_unit,
1347 filename,
1348 mtime,
1349- sleep_time=0)
1350+ sleep_time=sleep_time,
1351+ retry_count=retry_count,
1352+ retry_sleep_time=retry_sleep_time)
1353
1354 return service_restart and config_update
1355
1356@@ -776,3 +807,12 @@
1357 output = _check_output(command, universal_newlines=True)
1358 data = json.loads(output)
1359 return data.get(u"status") == "completed"
1360+
1361+ def status_get(self, unit):
1362+ """Return the current service status of this unit."""
1363+ raw_status, return_code = unit.run(
1364+ "status-get --format=json --include-data")
1365+ if return_code != 0:
1366+ return ("unknown", "")
1367+ status = json.loads(raw_status)
1368+ return (status["status"], status["message"])
1369
1370=== modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
1371--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-09-10 09:30:59 +0000
1372+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-09-25 14:42:27 +0000
1373@@ -58,19 +58,17 @@
1374 else:
1375 base_series = self.current_next
1376
1377- if self.stable:
1378- for svc in other_services:
1379- if svc['name'] in force_series_current:
1380- base_series = self.current_next
1381-
1382+ for svc in other_services:
1383+ if svc['name'] in force_series_current:
1384+ base_series = self.current_next
1385+ # If a location has been explicitly set, use it
1386+ if svc.get('location'):
1387+ continue
1388+ if self.stable:
1389 temp = 'lp:charms/{}/{}'
1390 svc['location'] = temp.format(base_series,
1391 svc['name'])
1392- else:
1393- for svc in other_services:
1394- if svc['name'] in force_series_current:
1395- base_series = self.current_next
1396-
1397+ else:
1398 if svc['name'] in base_charms:
1399 temp = 'lp:charms/{}/{}'
1400 svc['location'] = temp.format(base_series,
1401@@ -79,6 +77,7 @@
1402 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
1403 svc['location'] = temp.format(self.current_next,
1404 svc['name'])
1405+
1406 return other_services
1407
1408 def _add_services(self, this_service, other_services):

Subscribers

People subscribed via source and target branches