Merge lp:~james-page/charms/trusty/openstack-dashboard/status into lp:~openstack-charmers-archive/charms/trusty/openstack-dashboard/next

Proposed by James Page
Status: Merged
Merged at revision: 92
Proposed branch: lp:~james-page/charms/trusty/openstack-dashboard/status
Merge into: lp:~openstack-charmers-archive/charms/trusty/openstack-dashboard/next
Diff against target: 2668 lines (+1726/-137)
23 files modified
hooks/charmhelpers/contrib/network/ip.py (+5/-3)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+23/-9)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+359/-0)
hooks/charmhelpers/contrib/openstack/context.py (+100/-29)
hooks/charmhelpers/contrib/openstack/neutron.py (+17/-3)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+6/-0)
hooks/charmhelpers/contrib/openstack/templating.py (+30/-2)
hooks/charmhelpers/contrib/openstack/utils.py (+182/-2)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+226/-13)
hooks/charmhelpers/core/hookenv.py (+32/-0)
hooks/charmhelpers/core/host.py (+32/-16)
hooks/charmhelpers/core/hugepage.py (+8/-1)
hooks/charmhelpers/core/kernel.py (+68/-0)
hooks/charmhelpers/core/strutils.py (+30/-0)
hooks/horizon_hooks.py (+13/-4)
hooks/horizon_utils.py (+3/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+4/-2)
tests/charmhelpers/contrib/amulet/utils.py (+193/-19)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+23/-9)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+359/-0)
unit_tests/test_horizon_contexts.py (+2/-2)
unit_tests/test_horizon_hooks.py (+10/-22)
unit_tests/test_horizon_utils.py (+1/-1)
To merge this branch: bzr merge lp:~james-page/charms/trusty/openstack-dashboard/status
Reviewer Review Type Date Requested Status
Liam Young (community) Approve
Review via email: mp+274060@code.launchpad.net

Description of the change

Add status support

Clear up incorrect use of assert_called

This also reveals that the hook registration tests fail; dropped as patching not possible due to indirection in hooks object already containing a handle to the original function

To post a comment you must log in.
Revision history for this message
Liam Young (gnuoy) wrote :

Approved

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
2--- hooks/charmhelpers/contrib/network/ip.py 2015-09-03 09:42:35 +0000
3+++ hooks/charmhelpers/contrib/network/ip.py 2015-10-10 23:44:18 +0000
4@@ -23,7 +23,7 @@
5 from functools import partial
6
7 from charmhelpers.core.hookenv import unit_get
8-from charmhelpers.fetch import apt_install
9+from charmhelpers.fetch import apt_install, apt_update
10 from charmhelpers.core.hookenv import (
11 log,
12 WARNING,
13@@ -32,13 +32,15 @@
14 try:
15 import netifaces
16 except ImportError:
17- apt_install('python-netifaces')
18+ apt_update(fatal=True)
19+ apt_install('python-netifaces', fatal=True)
20 import netifaces
21
22 try:
23 import netaddr
24 except ImportError:
25- apt_install('python-netaddr')
26+ apt_update(fatal=True)
27+ apt_install('python-netaddr', fatal=True)
28 import netaddr
29
30
31
32=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
33--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-18 17:34:36 +0000
34+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-10-10 23:44:18 +0000
35@@ -44,20 +44,31 @@
36 Determine if the local branch being tested is derived from its
37 stable or next (dev) branch, and based on this, use the corresonding
38 stable or next branches for the other_services."""
39+
40+ # Charms outside the lp:~openstack-charmers namespace
41 base_charms = ['mysql', 'mongodb', 'nrpe']
42
43+ # Force these charms to current series even when using an older series.
44+ # ie. Use trusty/nrpe even when series is precise, as the P charm
45+ # does not possess the necessary external master config and hooks.
46+ force_series_current = ['nrpe']
47+
48 if self.series in ['precise', 'trusty']:
49 base_series = self.series
50 else:
51 base_series = self.current_next
52
53- if self.stable:
54- for svc in other_services:
55+ for svc in other_services:
56+ if svc['name'] in force_series_current:
57+ base_series = self.current_next
58+ # If a location has been explicitly set, use it
59+ if svc.get('location'):
60+ continue
61+ if self.stable:
62 temp = 'lp:charms/{}/{}'
63 svc['location'] = temp.format(base_series,
64 svc['name'])
65- else:
66- for svc in other_services:
67+ else:
68 if svc['name'] in base_charms:
69 temp = 'lp:charms/{}/{}'
70 svc['location'] = temp.format(base_series,
71@@ -66,6 +77,7 @@
72 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
73 svc['location'] = temp.format(self.current_next,
74 svc['name'])
75+
76 return other_services
77
78 def _add_services(self, this_service, other_services):
79@@ -77,21 +89,23 @@
80
81 services = other_services
82 services.append(this_service)
83+
84+ # Charms which should use the source config option
85 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
86 'ceph-osd', 'ceph-radosgw']
87- # Most OpenStack subordinate charms do not expose an origin option
88- # as that is controlled by the principle.
89- ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
90+
91+ # Charms which can not use openstack-origin, ie. many subordinates
92+ no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
93
94 if self.openstack:
95 for svc in services:
96- if svc['name'] not in use_source + ignore:
97+ if svc['name'] not in use_source + no_origin:
98 config = {'openstack-origin': self.openstack}
99 self.d.configure(svc['name'], config)
100
101 if self.source:
102 for svc in services:
103- if svc['name'] in use_source and svc['name'] not in ignore:
104+ if svc['name'] in use_source and svc['name'] not in no_origin:
105 config = {'source': self.source}
106 self.d.configure(svc['name'], config)
107
108
109=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
110--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-17 13:24:05 +0000
111+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-10-10 23:44:18 +0000
112@@ -27,6 +27,7 @@
113 import heatclient.v1.client as heat_client
114 import keystoneclient.v2_0 as keystone_client
115 import novaclient.v1_1.client as nova_client
116+import pika
117 import swiftclient
118
119 from charmhelpers.contrib.amulet.utils import (
120@@ -602,3 +603,361 @@
121 self.log.debug('Ceph {} samples (OK): '
122 '{}'.format(sample_type, samples))
123 return None
124+
125+# rabbitmq/amqp specific helpers:
126+ def add_rmq_test_user(self, sentry_units,
127+ username="testuser1", password="changeme"):
128+ """Add a test user via the first rmq juju unit, check connection as
129+ the new user against all sentry units.
130+
131+ :param sentry_units: list of sentry unit pointers
132+ :param username: amqp user name, default to testuser1
133+ :param password: amqp user password
134+ :returns: None if successful. Raise on error.
135+ """
136+ self.log.debug('Adding rmq user ({})...'.format(username))
137+
138+ # Check that user does not already exist
139+ cmd_user_list = 'rabbitmqctl list_users'
140+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
141+ if username in output:
142+ self.log.warning('User ({}) already exists, returning '
143+ 'gracefully.'.format(username))
144+ return
145+
146+ perms = '".*" ".*" ".*"'
147+ cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
148+ 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
149+
150+ # Add user via first unit
151+ for cmd in cmds:
152+ output, _ = self.run_cmd_unit(sentry_units[0], cmd)
153+
154+ # Check connection against the other sentry_units
155+ self.log.debug('Checking user connect against units...')
156+ for sentry_unit in sentry_units:
157+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
158+ username=username,
159+ password=password)
160+ connection.close()
161+
162+ def delete_rmq_test_user(self, sentry_units, username="testuser1"):
163+ """Delete a rabbitmq user via the first rmq juju unit.
164+
165+ :param sentry_units: list of sentry unit pointers
166+ :param username: amqp user name, default to testuser1
167+ :param password: amqp user password
168+ :returns: None if successful or no such user.
169+ """
170+ self.log.debug('Deleting rmq user ({})...'.format(username))
171+
172+ # Check that the user exists
173+ cmd_user_list = 'rabbitmqctl list_users'
174+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
175+
176+ if username not in output:
177+ self.log.warning('User ({}) does not exist, returning '
178+ 'gracefully.'.format(username))
179+ return
180+
181+ # Delete the user
182+ cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
183+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
184+
185+ def get_rmq_cluster_status(self, sentry_unit):
186+ """Execute rabbitmq cluster status command on a unit and return
187+ the full output.
188+
189+ :param unit: sentry unit
190+ :returns: String containing console output of cluster status command
191+ """
192+ cmd = 'rabbitmqctl cluster_status'
193+ output, _ = self.run_cmd_unit(sentry_unit, cmd)
194+ self.log.debug('{} cluster_status:\n{}'.format(
195+ sentry_unit.info['unit_name'], output))
196+ return str(output)
197+
198+ def get_rmq_cluster_running_nodes(self, sentry_unit):
199+ """Parse rabbitmqctl cluster_status output string, return list of
200+ running rabbitmq cluster nodes.
201+
202+ :param unit: sentry unit
203+ :returns: List containing node names of running nodes
204+ """
205+ # NOTE(beisner): rabbitmqctl cluster_status output is not
206+ # json-parsable, do string chop foo, then json.loads that.
207+ str_stat = self.get_rmq_cluster_status(sentry_unit)
208+ if 'running_nodes' in str_stat:
209+ pos_start = str_stat.find("{running_nodes,") + 15
210+ pos_end = str_stat.find("]},", pos_start) + 1
211+ str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
212+ run_nodes = json.loads(str_run_nodes)
213+ return run_nodes
214+ else:
215+ return []
216+
217+ def validate_rmq_cluster_running_nodes(self, sentry_units):
218+ """Check that all rmq unit hostnames are represented in the
219+ cluster_status output of all units.
220+
221+ :param host_names: dict of juju unit names to host names
222+ :param units: list of sentry unit pointers (all rmq units)
223+ :returns: None if successful, otherwise return error message
224+ """
225+ host_names = self.get_unit_hostnames(sentry_units)
226+ errors = []
227+
228+ # Query every unit for cluster_status running nodes
229+ for query_unit in sentry_units:
230+ query_unit_name = query_unit.info['unit_name']
231+ running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
232+
233+ # Confirm that every unit is represented in the queried unit's
234+ # cluster_status running nodes output.
235+ for validate_unit in sentry_units:
236+ val_host_name = host_names[validate_unit.info['unit_name']]
237+ val_node_name = 'rabbit@{}'.format(val_host_name)
238+
239+ if val_node_name not in running_nodes:
240+ errors.append('Cluster member check failed on {}: {} not '
241+ 'in {}\n'.format(query_unit_name,
242+ val_node_name,
243+ running_nodes))
244+ if errors:
245+ return ''.join(errors)
246+
247+ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
248+ """Check a single juju rmq unit for ssl and port in the config file."""
249+ host = sentry_unit.info['public-address']
250+ unit_name = sentry_unit.info['unit_name']
251+
252+ conf_file = '/etc/rabbitmq/rabbitmq.config'
253+ conf_contents = str(self.file_contents_safe(sentry_unit,
254+ conf_file, max_wait=16))
255+ # Checks
256+ conf_ssl = 'ssl' in conf_contents
257+ conf_port = str(port) in conf_contents
258+
259+ # Port explicitly checked in config
260+ if port and conf_port and conf_ssl:
261+ self.log.debug('SSL is enabled @{}:{} '
262+ '({})'.format(host, port, unit_name))
263+ return True
264+ elif port and not conf_port and conf_ssl:
265+ self.log.debug('SSL is enabled @{} but not on port {} '
266+ '({})'.format(host, port, unit_name))
267+ return False
268+ # Port not checked (useful when checking that ssl is disabled)
269+ elif not port and conf_ssl:
270+ self.log.debug('SSL is enabled @{}:{} '
271+ '({})'.format(host, port, unit_name))
272+ return True
273+ elif not conf_ssl:
274+ self.log.debug('SSL not enabled @{}:{} '
275+ '({})'.format(host, port, unit_name))
276+ return False
277+ else:
278+ msg = ('Unknown condition when checking SSL status @{}:{} '
279+ '({})'.format(host, port, unit_name))
280+ amulet.raise_status(amulet.FAIL, msg)
281+
282+ def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
283+ """Check that ssl is enabled on rmq juju sentry units.
284+
285+ :param sentry_units: list of all rmq sentry units
286+ :param port: optional ssl port override to validate
287+ :returns: None if successful, otherwise return error message
288+ """
289+ for sentry_unit in sentry_units:
290+ if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
291+ return ('Unexpected condition: ssl is disabled on unit '
292+ '({})'.format(sentry_unit.info['unit_name']))
293+ return None
294+
295+ def validate_rmq_ssl_disabled_units(self, sentry_units):
296+ """Check that ssl is enabled on listed rmq juju sentry units.
297+
298+ :param sentry_units: list of all rmq sentry units
299+ :returns: True if successful. Raise on error.
300+ """
301+ for sentry_unit in sentry_units:
302+ if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
303+ return ('Unexpected condition: ssl is enabled on unit '
304+ '({})'.format(sentry_unit.info['unit_name']))
305+ return None
306+
307+ def configure_rmq_ssl_on(self, sentry_units, deployment,
308+ port=None, max_wait=60):
309+ """Turn ssl charm config option on, with optional non-default
310+ ssl port specification. Confirm that it is enabled on every
311+ unit.
312+
313+ :param sentry_units: list of sentry units
314+ :param deployment: amulet deployment object pointer
315+ :param port: amqp port, use defaults if None
316+ :param max_wait: maximum time to wait in seconds to confirm
317+ :returns: None if successful. Raise on error.
318+ """
319+ self.log.debug('Setting ssl charm config option: on')
320+
321+ # Enable RMQ SSL
322+ config = {'ssl': 'on'}
323+ if port:
324+ config['ssl_port'] = port
325+
326+ deployment.configure('rabbitmq-server', config)
327+
328+ # Confirm
329+ tries = 0
330+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
331+ while ret and tries < (max_wait / 4):
332+ time.sleep(4)
333+ self.log.debug('Attempt {}: {}'.format(tries, ret))
334+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
335+ tries += 1
336+
337+ if ret:
338+ amulet.raise_status(amulet.FAIL, ret)
339+
340+ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
341+ """Turn ssl charm config option off, confirm that it is disabled
342+ on every unit.
343+
344+ :param sentry_units: list of sentry units
345+ :param deployment: amulet deployment object pointer
346+ :param max_wait: maximum time to wait in seconds to confirm
347+ :returns: None if successful. Raise on error.
348+ """
349+ self.log.debug('Setting ssl charm config option: off')
350+
351+ # Disable RMQ SSL
352+ config = {'ssl': 'off'}
353+ deployment.configure('rabbitmq-server', config)
354+
355+ # Confirm
356+ tries = 0
357+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
358+ while ret and tries < (max_wait / 4):
359+ time.sleep(4)
360+ self.log.debug('Attempt {}: {}'.format(tries, ret))
361+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
362+ tries += 1
363+
364+ if ret:
365+ amulet.raise_status(amulet.FAIL, ret)
366+
367+ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
368+ port=None, fatal=True,
369+ username="testuser1", password="changeme"):
370+ """Establish and return a pika amqp connection to the rabbitmq service
371+ running on a rmq juju unit.
372+
373+ :param sentry_unit: sentry unit pointer
374+ :param ssl: boolean, default to False
375+ :param port: amqp port, use defaults if None
376+ :param fatal: boolean, default to True (raises on connect error)
377+ :param username: amqp user name, default to testuser1
378+ :param password: amqp user password
379+ :returns: pika amqp connection pointer or None if failed and non-fatal
380+ """
381+ host = sentry_unit.info['public-address']
382+ unit_name = sentry_unit.info['unit_name']
383+
384+ # Default port logic if port is not specified
385+ if ssl and not port:
386+ port = 5671
387+ elif not ssl and not port:
388+ port = 5672
389+
390+ self.log.debug('Connecting to amqp on {}:{} ({}) as '
391+ '{}...'.format(host, port, unit_name, username))
392+
393+ try:
394+ credentials = pika.PlainCredentials(username, password)
395+ parameters = pika.ConnectionParameters(host=host, port=port,
396+ credentials=credentials,
397+ ssl=ssl,
398+ connection_attempts=3,
399+ retry_delay=5,
400+ socket_timeout=1)
401+ connection = pika.BlockingConnection(parameters)
402+ assert connection.server_properties['product'] == 'RabbitMQ'
403+ self.log.debug('Connect OK')
404+ return connection
405+ except Exception as e:
406+ msg = ('amqp connection failed to {}:{} as '
407+ '{} ({})'.format(host, port, username, str(e)))
408+ if fatal:
409+ amulet.raise_status(amulet.FAIL, msg)
410+ else:
411+ self.log.warn(msg)
412+ return None
413+
414+ def publish_amqp_message_by_unit(self, sentry_unit, message,
415+ queue="test", ssl=False,
416+ username="testuser1",
417+ password="changeme",
418+ port=None):
419+ """Publish an amqp message to a rmq juju unit.
420+
421+ :param sentry_unit: sentry unit pointer
422+ :param message: amqp message string
423+ :param queue: message queue, default to test
424+ :param username: amqp user name, default to testuser1
425+ :param password: amqp user password
426+ :param ssl: boolean, default to False
427+ :param port: amqp port, use defaults if None
428+ :returns: None. Raises exception if publish failed.
429+ """
430+ self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
431+ message))
432+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
433+ port=port,
434+ username=username,
435+ password=password)
436+
437+ # NOTE(beisner): extra debug here re: pika hang potential:
438+ # https://github.com/pika/pika/issues/297
439+ # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
440+ self.log.debug('Defining channel...')
441+ channel = connection.channel()
442+ self.log.debug('Declaring queue...')
443+ channel.queue_declare(queue=queue, auto_delete=False, durable=True)
444+ self.log.debug('Publishing message...')
445+ channel.basic_publish(exchange='', routing_key=queue, body=message)
446+ self.log.debug('Closing channel...')
447+ channel.close()
448+ self.log.debug('Closing connection...')
449+ connection.close()
450+
451+ def get_amqp_message_by_unit(self, sentry_unit, queue="test",
452+ username="testuser1",
453+ password="changeme",
454+ ssl=False, port=None):
455+ """Get an amqp message from a rmq juju unit.
456+
457+ :param sentry_unit: sentry unit pointer
458+ :param queue: message queue, default to test
459+ :param username: amqp user name, default to testuser1
460+ :param password: amqp user password
461+ :param ssl: boolean, default to False
462+ :param port: amqp port, use defaults if None
463+ :returns: amqp message body as string. Raise if get fails.
464+ """
465+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
466+ port=port,
467+ username=username,
468+ password=password)
469+ channel = connection.channel()
470+ method_frame, _, body = channel.basic_get(queue)
471+
472+ if method_frame:
473+ self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
474+ body))
475+ channel.basic_ack(method_frame.delivery_tag)
476+ channel.close()
477+ connection.close()
478+ return body
479+ else:
480+ msg = 'No message retrieved.'
481+ amulet.raise_status(amulet.FAIL, msg)
482
483=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
484--- hooks/charmhelpers/contrib/openstack/context.py 2015-09-12 10:58:20 +0000
485+++ hooks/charmhelpers/contrib/openstack/context.py 2015-10-10 23:44:18 +0000
486@@ -14,6 +14,7 @@
487 # You should have received a copy of the GNU Lesser General Public License
488 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
489
490+import glob
491 import json
492 import os
493 import re
494@@ -194,10 +195,50 @@
495 class OSContextGenerator(object):
496 """Base class for all context generators."""
497 interfaces = []
498+ related = False
499+ complete = False
500+ missing_data = []
501
502 def __call__(self):
503 raise NotImplementedError
504
505+ def context_complete(self, ctxt):
506+ """Check for missing data for the required context data.
507+ Set self.missing_data if it exists and return False.
508+ Set self.complete if no missing data and return True.
509+ """
510+ # Fresh start
511+ self.complete = False
512+ self.missing_data = []
513+ for k, v in six.iteritems(ctxt):
514+ if v is None or v == '':
515+ if k not in self.missing_data:
516+ self.missing_data.append(k)
517+
518+ if self.missing_data:
519+ self.complete = False
520+ log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
521+ else:
522+ self.complete = True
523+ return self.complete
524+
525+ def get_related(self):
526+ """Check if any of the context interfaces have relation ids.
527+ Set self.related and return True if one of the interfaces
528+ has relation ids.
529+ """
530+ # Fresh start
531+ self.related = False
532+ try:
533+ for interface in self.interfaces:
534+ if relation_ids(interface):
535+ self.related = True
536+ return self.related
537+ except AttributeError as e:
538+ log("{} {}"
539+ "".format(self, e), 'INFO')
540+ return self.related
541+
542
543 class SharedDBContext(OSContextGenerator):
544 interfaces = ['shared-db']
545@@ -213,6 +254,7 @@
546 self.database = database
547 self.user = user
548 self.ssl_dir = ssl_dir
549+ self.rel_name = self.interfaces[0]
550
551 def __call__(self):
552 self.database = self.database or config('database')
553@@ -246,6 +288,7 @@
554 password_setting = self.relation_prefix + '_password'
555
556 for rid in relation_ids(self.interfaces[0]):
557+ self.related = True
558 for unit in related_units(rid):
559 rdata = relation_get(rid=rid, unit=unit)
560 host = rdata.get('db_host')
561@@ -257,7 +300,7 @@
562 'database_password': rdata.get(password_setting),
563 'database_type': 'mysql'
564 }
565- if context_complete(ctxt):
566+ if self.context_complete(ctxt):
567 db_ssl(rdata, ctxt, self.ssl_dir)
568 return ctxt
569 return {}
570@@ -278,6 +321,7 @@
571
572 ctxt = {}
573 for rid in relation_ids(self.interfaces[0]):
574+ self.related = True
575 for unit in related_units(rid):
576 rel_host = relation_get('host', rid=rid, unit=unit)
577 rel_user = relation_get('user', rid=rid, unit=unit)
578@@ -287,7 +331,7 @@
579 'database_user': rel_user,
580 'database_password': rel_passwd,
581 'database_type': 'postgresql'}
582- if context_complete(ctxt):
583+ if self.context_complete(ctxt):
584 return ctxt
585
586 return {}
587@@ -348,6 +392,7 @@
588 ctxt['signing_dir'] = cachedir
589
590 for rid in relation_ids(self.rel_name):
591+ self.related = True
592 for unit in related_units(rid):
593 rdata = relation_get(rid=rid, unit=unit)
594 serv_host = rdata.get('service_host')
595@@ -366,7 +411,7 @@
596 'service_protocol': svc_protocol,
597 'auth_protocol': auth_protocol})
598
599- if context_complete(ctxt):
600+ if self.context_complete(ctxt):
601 # NOTE(jamespage) this is required for >= icehouse
602 # so a missing value just indicates keystone needs
603 # upgrading
604@@ -405,6 +450,7 @@
605 ctxt = {}
606 for rid in relation_ids(self.rel_name):
607 ha_vip_only = False
608+ self.related = True
609 for unit in related_units(rid):
610 if relation_get('clustered', rid=rid, unit=unit):
611 ctxt['clustered'] = True
612@@ -437,7 +483,7 @@
613 ha_vip_only = relation_get('ha-vip-only',
614 rid=rid, unit=unit) is not None
615
616- if context_complete(ctxt):
617+ if self.context_complete(ctxt):
618 if 'rabbit_ssl_ca' in ctxt:
619 if not self.ssl_dir:
620 log("Charm not setup for ssl support but ssl ca "
621@@ -469,7 +515,7 @@
622 ctxt['oslo_messaging_flags'] = config_flags_parser(
623 oslo_messaging_flags)
624
625- if not context_complete(ctxt):
626+ if not self.complete:
627 return {}
628
629 return ctxt
630@@ -485,13 +531,15 @@
631
632 log('Generating template context for ceph', level=DEBUG)
633 mon_hosts = []
634- auth = None
635- key = None
636- use_syslog = str(config('use-syslog')).lower()
637+ ctxt = {
638+ 'use_syslog': str(config('use-syslog')).lower()
639+ }
640 for rid in relation_ids('ceph'):
641 for unit in related_units(rid):
642- auth = relation_get('auth', rid=rid, unit=unit)
643- key = relation_get('key', rid=rid, unit=unit)
644+ if not ctxt.get('auth'):
645+ ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
646+ if not ctxt.get('key'):
647+ ctxt['key'] = relation_get('key', rid=rid, unit=unit)
648 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
649 unit=unit)
650 unit_priv_addr = relation_get('private-address', rid=rid,
651@@ -500,15 +548,12 @@
652 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
653 mon_hosts.append(ceph_addr)
654
655- ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
656- 'auth': auth,
657- 'key': key,
658- 'use_syslog': use_syslog}
659+ ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
660
661 if not os.path.isdir('/etc/ceph'):
662 os.mkdir('/etc/ceph')
663
664- if not context_complete(ctxt):
665+ if not self.context_complete(ctxt):
666 return {}
667
668 ensure_packages(['ceph-common'])
669@@ -907,6 +952,19 @@
670 'config': config}
671 return ovs_ctxt
672
673+ def midonet_ctxt(self):
674+ driver = neutron_plugin_attribute(self.plugin, 'driver',
675+ self.network_manager)
676+ midonet_config = neutron_plugin_attribute(self.plugin, 'config',
677+ self.network_manager)
678+ mido_ctxt = {'core_plugin': driver,
679+ 'neutron_plugin': 'midonet',
680+ 'neutron_security_groups': self.neutron_security_groups,
681+ 'local_ip': unit_private_ip(),
682+ 'config': midonet_config}
683+
684+ return mido_ctxt
685+
686 def __call__(self):
687 if self.network_manager not in ['quantum', 'neutron']:
688 return {}
689@@ -928,6 +986,8 @@
690 ctxt.update(self.nuage_ctxt())
691 elif self.plugin == 'plumgrid':
692 ctxt.update(self.pg_ctxt())
693+ elif self.plugin == 'midonet':
694+ ctxt.update(self.midonet_ctxt())
695
696 alchemy_flags = config('neutron-alchemy-flags')
697 if alchemy_flags:
698@@ -1060,7 +1120,7 @@
699
700 ctxt = {
701 ... other context ...
702- 'subordinate_config': {
703+ 'subordinate_configuration': {
704 'DEFAULT': {
705 'key1': 'value1',
706 },
707@@ -1101,22 +1161,23 @@
708 try:
709 sub_config = json.loads(sub_config)
710 except:
711- log('Could not parse JSON from subordinate_config '
712- 'setting from %s' % rid, level=ERROR)
713+ log('Could not parse JSON from '
714+ 'subordinate_configuration setting from %s'
715+ % rid, level=ERROR)
716 continue
717
718 for service in self.services:
719 if service not in sub_config:
720- log('Found subordinate_config on %s but it contained'
721- 'nothing for %s service' % (rid, service),
722- level=INFO)
723+ log('Found subordinate_configuration on %s but it '
724+ 'contained nothing for %s service'
725+ % (rid, service), level=INFO)
726 continue
727
728 sub_config = sub_config[service]
729 if self.config_file not in sub_config:
730- log('Found subordinate_config on %s but it contained'
731- 'nothing for %s' % (rid, self.config_file),
732- level=INFO)
733+ log('Found subordinate_configuration on %s but it '
734+ 'contained nothing for %s'
735+ % (rid, self.config_file), level=INFO)
736 continue
737
738 sub_config = sub_config[self.config_file]
739@@ -1319,7 +1380,7 @@
740 normalized.update({port: port for port in resolved
741 if port in ports})
742 if resolved:
743- return {bridge: normalized[port] for port, bridge in
744+ return {normalized[port]: bridge for port, bridge in
745 six.iteritems(portmap) if port in normalized.keys()}
746
747 return None
748@@ -1330,12 +1391,22 @@
749 def __call__(self):
750 ctxt = {}
751 mappings = super(PhyNICMTUContext, self).__call__()
752- if mappings and mappings.values():
753- ports = mappings.values()
754+ if mappings and mappings.keys():
755+ ports = sorted(mappings.keys())
756 napi_settings = NeutronAPIContext()()
757 mtu = napi_settings.get('network_device_mtu')
758+ all_ports = set()
759+ # If any of ports is a vlan device, its underlying device must have
760+ # mtu applied first.
761+ for port in ports:
762+ for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
763+ lport = os.path.basename(lport)
764+ all_ports.add(lport.split('_')[1])
765+
766+ all_ports = list(all_ports)
767+ all_ports.extend(ports)
768 if mtu:
769- ctxt["devs"] = '\\n'.join(ports)
770+ ctxt["devs"] = '\\n'.join(all_ports)
771 ctxt['mtu'] = mtu
772
773 return ctxt
774@@ -1367,6 +1438,6 @@
775 'auth_protocol':
776 rdata.get('auth_protocol') or 'http',
777 }
778- if context_complete(ctxt):
779+ if self.context_complete(ctxt):
780 return ctxt
781 return {}
782
783=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
784--- hooks/charmhelpers/contrib/openstack/neutron.py 2015-09-03 09:42:35 +0000
785+++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-10-10 23:44:18 +0000
786@@ -209,6 +209,20 @@
787 'server_packages': ['neutron-server',
788 'neutron-plugin-plumgrid'],
789 'server_services': ['neutron-server']
790+ },
791+ 'midonet': {
792+ 'config': '/etc/neutron/plugins/midonet/midonet.ini',
793+ 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
794+ 'contexts': [
795+ context.SharedDBContext(user=config('neutron-database-user'),
796+ database=config('neutron-database'),
797+ relation_prefix='neutron',
798+ ssl_dir=NEUTRON_CONF_DIR)],
799+ 'services': [],
800+ 'packages': [[headers_package()] + determine_dkms_package()],
801+ 'server_packages': ['neutron-server',
802+ 'python-neutron-plugin-midonet'],
803+ 'server_services': ['neutron-server']
804 }
805 }
806 if release >= 'icehouse':
807@@ -310,10 +324,10 @@
808 def parse_data_port_mappings(mappings, default_bridge='br-data'):
809 """Parse data port mappings.
810
811- Mappings must be a space-delimited list of port:bridge mappings.
812+ Mappings must be a space-delimited list of bridge:port.
813
814- Returns dict of the form {port:bridge} where port may be an mac address or
815- interface name.
816+ Returns dict of the form {port:bridge} where ports may be mac addresses or
817+ interface names.
818 """
819
820 # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
821
822=== modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf'
823--- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-07-17 13:24:05 +0000
824+++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-10-10 23:44:18 +0000
825@@ -13,3 +13,9 @@
826 err to syslog = {{ use_syslog }}
827 clog to syslog = {{ use_syslog }}
828
829+[client]
830+{% if rbd_client_cache_settings -%}
831+{% for key, value in rbd_client_cache_settings.iteritems() -%}
832+{{ key }} = {{ value }}
833+{% endfor -%}
834+{%- endif %}
835\ No newline at end of file
836
837=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
838--- hooks/charmhelpers/contrib/openstack/templating.py 2015-08-27 15:02:34 +0000
839+++ hooks/charmhelpers/contrib/openstack/templating.py 2015-10-10 23:44:18 +0000
840@@ -18,7 +18,7 @@
841
842 import six
843
844-from charmhelpers.fetch import apt_install
845+from charmhelpers.fetch import apt_install, apt_update
846 from charmhelpers.core.hookenv import (
847 log,
848 ERROR,
849@@ -29,6 +29,7 @@
850 try:
851 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
852 except ImportError:
853+ apt_update(fatal=True)
854 apt_install('python-jinja2', fatal=True)
855 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
856
857@@ -112,7 +113,7 @@
858
859 def complete_contexts(self):
860 '''
861- Return a list of interfaces that have atisfied contexts.
862+ Return a list of interfaces that have satisfied contexts.
863 '''
864 if self._complete_contexts:
865 return self._complete_contexts
866@@ -293,3 +294,30 @@
867 [interfaces.extend(i.complete_contexts())
868 for i in six.itervalues(self.templates)]
869 return interfaces
870+
871+ def get_incomplete_context_data(self, interfaces):
872+ '''
873+ Return dictionary of relation status of interfaces and any missing
874+ required context data. Example:
875+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
876+ 'zeromq-configuration': {'related': False}}
877+ '''
878+ incomplete_context_data = {}
879+
880+ for i in six.itervalues(self.templates):
881+ for context in i.contexts:
882+ for interface in interfaces:
883+ related = False
884+ if interface in context.interfaces:
885+ related = context.get_related()
886+ missing_data = context.missing_data
887+ if missing_data:
888+ incomplete_context_data[interface] = {'missing_data': missing_data}
889+ if related:
890+ if incomplete_context_data.get(interface):
891+ incomplete_context_data[interface].update({'related': True})
892+ else:
893+ incomplete_context_data[interface] = {'related': True}
894+ else:
895+ incomplete_context_data[interface] = {'related': False}
896+ return incomplete_context_data
897
898=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
899--- hooks/charmhelpers/contrib/openstack/utils.py 2015-09-14 20:23:58 +0000
900+++ hooks/charmhelpers/contrib/openstack/utils.py 2015-10-10 23:44:18 +0000
901@@ -42,7 +42,9 @@
902 charm_dir,
903 INFO,
904 relation_ids,
905- relation_set
906+ relation_set,
907+ status_set,
908+ hook_name
909 )
910
911 from charmhelpers.contrib.storage.linux.lvm import (
912@@ -52,7 +54,8 @@
913 )
914
915 from charmhelpers.contrib.network.ip import (
916- get_ipv6_addr
917+ get_ipv6_addr,
918+ is_ipv6,
919 )
920
921 from charmhelpers.contrib.python.packages import (
922@@ -118,6 +121,7 @@
923 ('2.2.2', 'kilo'),
924 ('2.3.0', 'liberty'),
925 ('2.4.0', 'liberty'),
926+ ('2.5.0', 'liberty'),
927 ])
928
929 # >= Liberty version->codename mapping
930@@ -517,6 +521,12 @@
931 relation_prefix=None):
932 hosts = get_ipv6_addr(dynamic_only=False)
933
934+ if config('vip'):
935+ vips = config('vip').split()
936+ for vip in vips:
937+ if vip and is_ipv6(vip):
938+ hosts.append(vip)
939+
940 kwargs = {'database': database,
941 'username': database_user,
942 'hostname': json.dumps(hosts)}
943@@ -754,6 +764,176 @@
944 return None
945
946
947+def os_workload_status(configs, required_interfaces, charm_func=None):
948+ """
949+ Decorator to set workload status based on complete contexts
950+ """
951+ def wrap(f):
952+ @wraps(f)
953+ def wrapped_f(*args, **kwargs):
954+ # Run the original function first
955+ f(*args, **kwargs)
956+ # Set workload status now that contexts have been
957+ # acted on
958+ set_os_workload_status(configs, required_interfaces, charm_func)
959+ return wrapped_f
960+ return wrap
961+
962+
963+def set_os_workload_status(configs, required_interfaces, charm_func=None):
964+ """
965+ Set workload status based on complete contexts.
966+ status-set missing or incomplete contexts
967+ and juju-log details of missing required data.
968+ charm_func is a charm specific function to run checking
969+ for charm specific requirements such as a VIP setting.
970+ """
971+ incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
972+ state = 'active'
973+ missing_relations = []
974+ incomplete_relations = []
975+ message = None
976+ charm_state = None
977+ charm_message = None
978+
979+ for generic_interface in incomplete_rel_data.keys():
980+ related_interface = None
981+ missing_data = {}
982+ # Related or not?
983+ for interface in incomplete_rel_data[generic_interface]:
984+ if incomplete_rel_data[generic_interface][interface].get('related'):
985+ related_interface = interface
986+ missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
987+ # No relation ID for the generic_interface
988+ if not related_interface:
989+ juju_log("{} relation is missing and must be related for "
990+ "functionality. ".format(generic_interface), 'WARN')
991+ state = 'blocked'
992+ if generic_interface not in missing_relations:
993+ missing_relations.append(generic_interface)
994+ else:
995+ # Relation ID exists but no related unit
996+ if not missing_data:
997+ # Edge case relation ID exists but departing
998+ if ('departed' in hook_name() or 'broken' in hook_name()) \
999+ and related_interface in hook_name():
1000+ state = 'blocked'
1001+ if generic_interface not in missing_relations:
1002+ missing_relations.append(generic_interface)
1003+ juju_log("{} relation's interface, {}, "
1004+ "relationship is departed or broken "
1005+ "and is required for functionality."
1006+ "".format(generic_interface, related_interface), "WARN")
1007+ # Normal case relation ID exists but no related unit
1008+ # (joining)
1009+ else:
1010+ juju_log("{} relations's interface, {}, is related but has "
1011+ "no units in the relation."
1012+ "".format(generic_interface, related_interface), "INFO")
1013+ # Related unit exists and data missing on the relation
1014+ else:
1015+ juju_log("{} relation's interface, {}, is related awaiting "
1016+ "the following data from the relationship: {}. "
1017+ "".format(generic_interface, related_interface,
1018+ ", ".join(missing_data)), "INFO")
1019+ if state != 'blocked':
1020+ state = 'waiting'
1021+ if generic_interface not in incomplete_relations \
1022+ and generic_interface not in missing_relations:
1023+ incomplete_relations.append(generic_interface)
1024+
1025+ if missing_relations:
1026+ message = "Missing relations: {}".format(", ".join(missing_relations))
1027+ if incomplete_relations:
1028+ message += "; incomplete relations: {}" \
1029+ "".format(", ".join(incomplete_relations))
1030+ state = 'blocked'
1031+ elif incomplete_relations:
1032+ message = "Incomplete relations: {}" \
1033+ "".format(", ".join(incomplete_relations))
1034+ state = 'waiting'
1035+
1036+ # Run charm specific checks
1037+ if charm_func:
1038+ charm_state, charm_message = charm_func(configs)
1039+ if charm_state != 'active' and charm_state != 'unknown':
1040+ state = workload_state_compare(state, charm_state)
1041+ if message:
1042+ message = "{} {}".format(message, charm_message)
1043+ else:
1044+ message = charm_message
1045+
1046+ # Set to active if all requirements have been met
1047+ if state == 'active':
1048+ message = "Unit is ready"
1049+ juju_log(message, "INFO")
1050+
1051+ status_set(state, message)
1052+
1053+
1054+def workload_state_compare(current_workload_state, workload_state):
1055+ """ Return highest priority of two states"""
1056+ hierarchy = {'unknown': -1,
1057+ 'active': 0,
1058+ 'maintenance': 1,
1059+ 'waiting': 2,
1060+ 'blocked': 3,
1061+ }
1062+
1063+ if hierarchy.get(workload_state) is None:
1064+ workload_state = 'unknown'
1065+ if hierarchy.get(current_workload_state) is None:
1066+ current_workload_state = 'unknown'
1067+
1068+ # Set workload_state based on hierarchy of statuses
1069+ if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
1070+ return current_workload_state
1071+ else:
1072+ return workload_state
1073+
1074+
1075+def incomplete_relation_data(configs, required_interfaces):
1076+ """
1077+ Check complete contexts against required_interfaces
1078+ Return dictionary of incomplete relation data.
1079+
1080+ configs is an OSConfigRenderer object with configs registered
1081+
1082+ required_interfaces is a dictionary of required general interfaces
1083+ with dictionary values of possible specific interfaces.
1084+ Example:
1085+ required_interfaces = {'database': ['shared-db', 'pgsql-db']}
1086+
1087+ The interface is said to be satisfied if anyone of the interfaces in the
1088+ list has a complete context.
1089+
1090+ Return dictionary of incomplete or missing required contexts with relation
1091+ status of interfaces and any missing data points. Example:
1092+ {'message':
1093+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
1094+ 'zeromq-configuration': {'related': False}},
1095+ 'identity':
1096+ {'identity-service': {'related': False}},
1097+ 'database':
1098+ {'pgsql-db': {'related': False},
1099+ 'shared-db': {'related': True}}}
1100+ """
1101+ complete_ctxts = configs.complete_contexts()
1102+ incomplete_relations = []
1103+ for svc_type in required_interfaces.keys():
1104+ # Avoid duplicates
1105+ found_ctxt = False
1106+ for interface in required_interfaces[svc_type]:
1107+ if interface in complete_ctxts:
1108+ found_ctxt = True
1109+ if not found_ctxt:
1110+ incomplete_relations.append(svc_type)
1111+ incomplete_context_data = {}
1112+ for i in incomplete_relations:
1113+ incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
1114+ return incomplete_context_data
1115+
1116+
1117 def do_action_openstack_upgrade(package, upgrade_callback, configs):
1118 """Perform action-managed OpenStack upgrade.
1119
1120
1121=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
1122--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-07-17 13:24:05 +0000
1123+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-10-10 23:44:18 +0000
1124@@ -28,6 +28,7 @@
1125 import shutil
1126 import json
1127 import time
1128+import uuid
1129
1130 from subprocess import (
1131 check_call,
1132@@ -35,8 +36,10 @@
1133 CalledProcessError,
1134 )
1135 from charmhelpers.core.hookenv import (
1136+ local_unit,
1137 relation_get,
1138 relation_ids,
1139+ relation_set,
1140 related_units,
1141 log,
1142 DEBUG,
1143@@ -56,6 +59,8 @@
1144 apt_install,
1145 )
1146
1147+from charmhelpers.core.kernel import modprobe
1148+
1149 KEYRING = '/etc/ceph/ceph.client.{}.keyring'
1150 KEYFILE = '/etc/ceph/ceph.client.{}.key'
1151
1152@@ -288,17 +293,6 @@
1153 os.chown(data_src_dst, uid, gid)
1154
1155
1156-# TODO: re-use
1157-def modprobe(module):
1158- """Load a kernel module and configure for auto-load on reboot."""
1159- log('Loading kernel module', level=INFO)
1160- cmd = ['modprobe', module]
1161- check_call(cmd)
1162- with open('/etc/modules', 'r+') as modules:
1163- if module not in modules.read():
1164- modules.write(module)
1165-
1166-
1167 def copy_files(src, dst, symlinks=False, ignore=None):
1168 """Copy files from src to dst."""
1169 for item in os.listdir(src):
1170@@ -411,17 +405,52 @@
1171
1172 The API is versioned and defaults to version 1.
1173 """
1174- def __init__(self, api_version=1):
1175+ def __init__(self, api_version=1, request_id=None):
1176 self.api_version = api_version
1177+ if request_id:
1178+ self.request_id = request_id
1179+ else:
1180+ self.request_id = str(uuid.uuid1())
1181 self.ops = []
1182
1183 def add_op_create_pool(self, name, replica_count=3):
1184 self.ops.append({'op': 'create-pool', 'name': name,
1185 'replicas': replica_count})
1186
1187+ def set_ops(self, ops):
1188+ """Set request ops to provided value.
1189+
1190+ Useful for injecting ops that come from a previous request
1191+ to allow comparisons to ensure validity.
1192+ """
1193+ self.ops = ops
1194+
1195 @property
1196 def request(self):
1197- return json.dumps({'api-version': self.api_version, 'ops': self.ops})
1198+ return json.dumps({'api-version': self.api_version, 'ops': self.ops,
1199+ 'request-id': self.request_id})
1200+
1201+ def _ops_equal(self, other):
1202+ if len(self.ops) == len(other.ops):
1203+ for req_no in range(0, len(self.ops)):
1204+ for key in ['replicas', 'name', 'op']:
1205+ if self.ops[req_no][key] != other.ops[req_no][key]:
1206+ return False
1207+ else:
1208+ return False
1209+ return True
1210+
1211+ def __eq__(self, other):
1212+ if not isinstance(other, self.__class__):
1213+ return False
1214+ if self.api_version == other.api_version and \
1215+ self._ops_equal(other):
1216+ return True
1217+ else:
1218+ return False
1219+
1220+ def __ne__(self, other):
1221+ return not self.__eq__(other)
1222
1223
1224 class CephBrokerRsp(object):
1225@@ -431,14 +460,198 @@
1226
1227 The API is versioned and defaults to version 1.
1228 """
1229+
1230 def __init__(self, encoded_rsp):
1231 self.api_version = None
1232 self.rsp = json.loads(encoded_rsp)
1233
1234 @property
1235+ def request_id(self):
1236+ return self.rsp.get('request-id')
1237+
1238+ @property
1239 def exit_code(self):
1240 return self.rsp.get('exit-code')
1241
1242 @property
1243 def exit_msg(self):
1244 return self.rsp.get('stderr')
1245+
1246+
1247+# Ceph Broker Conversation:
1248+# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
1249+# and send that request to ceph via the ceph relation. The CephBrokerRq has a
1250+# unique id so that the client can identity which CephBrokerRsp is associated
1251+# with the request. Ceph will also respond to each client unit individually
1252+# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
1253+# via key broker-rsp-glance-0
1254+#
1255+# To use this the charm can just do something like:
1256+#
1257+# from charmhelpers.contrib.storage.linux.ceph import (
1258+# send_request_if_needed,
1259+# is_request_complete,
1260+# CephBrokerRq,
1261+# )
1262+#
1263+# @hooks.hook('ceph-relation-changed')
1264+# def ceph_changed():
1265+# rq = CephBrokerRq()
1266+# rq.add_op_create_pool(name='poolname', replica_count=3)
1267+#
1268+# if is_request_complete(rq):
1269+# <Request complete actions>
1270+# else:
1271+# send_request_if_needed(get_ceph_request())
1272+#
1273+# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
1274+# of glance having sent a request to ceph which ceph has successfully processed
1275+# 'ceph:8': {
1276+# 'ceph/0': {
1277+# 'auth': 'cephx',
1278+# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
1279+# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
1280+# 'ceph-public-address': '10.5.44.103',
1281+# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
1282+# 'private-address': '10.5.44.103',
1283+# },
1284+# 'glance/0': {
1285+# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
1286+# '"ops": [{"replicas": 3, "name": "glance", '
1287+# '"op": "create-pool"}]}'),
1288+# 'private-address': '10.5.44.109',
1289+# },
1290+# }
1291+
1292+def get_previous_request(rid):
1293+ """Return the last ceph broker request sent on a given relation
1294+
1295+ @param rid: Relation id to query for request
1296+ """
1297+ request = None
1298+ broker_req = relation_get(attribute='broker_req', rid=rid,
1299+ unit=local_unit())
1300+ if broker_req:
1301+ request_data = json.loads(broker_req)
1302+ request = CephBrokerRq(api_version=request_data['api-version'],
1303+ request_id=request_data['request-id'])
1304+ request.set_ops(request_data['ops'])
1305+
1306+ return request
1307+
1308+
1309+def get_request_states(request):
1310+ """Return a dict of requests per relation id with their corresponding
1311+ completion state.
1312+
1313+ This allows a charm, which has a request for ceph, to see whether there is
1314+ an equivalent request already being processed and if so what state that
1315+ request is in.
1316+
1317+ @param request: A CephBrokerRq object
1318+ """
1319+ complete = []
1320+ requests = {}
1321+ for rid in relation_ids('ceph'):
1322+ complete = False
1323+ previous_request = get_previous_request(rid)
1324+ if request == previous_request:
1325+ sent = True
1326+ complete = is_request_complete_for_rid(previous_request, rid)
1327+ else:
1328+ sent = False
1329+ complete = False
1330+
1331+ requests[rid] = {
1332+ 'sent': sent,
1333+ 'complete': complete,
1334+ }
1335+
1336+ return requests
1337+
1338+
1339+def is_request_sent(request):
1340+ """Check to see if a functionally equivalent request has already been sent
1341+
1342+ Returns True if a similair request has been sent
1343+
1344+ @param request: A CephBrokerRq object
1345+ """
1346+ states = get_request_states(request)
1347+ for rid in states.keys():
1348+ if not states[rid]['sent']:
1349+ return False
1350+
1351+ return True
1352+
1353+
1354+def is_request_complete(request):
1355+ """Check to see if a functionally equivalent request has already been
1356+ completed
1357+
1358+ Returns True if a similair request has been completed
1359+
1360+ @param request: A CephBrokerRq object
1361+ """
1362+ states = get_request_states(request)
1363+ for rid in states.keys():
1364+ if not states[rid]['complete']:
1365+ return False
1366+
1367+ return True
1368+
1369+
1370+def is_request_complete_for_rid(request, rid):
1371+ """Check if a given request has been completed on the given relation
1372+
1373+ @param request: A CephBrokerRq object
1374+ @param rid: Relation ID
1375+ """
1376+ broker_key = get_broker_rsp_key()
1377+ for unit in related_units(rid):
1378+ rdata = relation_get(rid=rid, unit=unit)
1379+ if rdata.get(broker_key):
1380+ rsp = CephBrokerRsp(rdata.get(broker_key))
1381+ if rsp.request_id == request.request_id:
1382+ if not rsp.exit_code:
1383+ return True
1384+ else:
1385+ # The remote unit sent no reply targeted at this unit so either the
1386+ # remote ceph cluster does not support unit targeted replies or it
1387+ # has not processed our request yet.
1388+ if rdata.get('broker_rsp'):
1389+ request_data = json.loads(rdata['broker_rsp'])
1390+ if request_data.get('request-id'):
1391+ log('Ignoring legacy broker_rsp without unit key as remote '
1392+ 'service supports unit specific replies', level=DEBUG)
1393+ else:
1394+ log('Using legacy broker_rsp as remote service does not '
1395+ 'supports unit specific replies', level=DEBUG)
1396+ rsp = CephBrokerRsp(rdata['broker_rsp'])
1397+ if not rsp.exit_code:
1398+ return True
1399+
1400+ return False
1401+
1402+
1403+def get_broker_rsp_key():
1404+ """Return broker response key for this unit
1405+
1406+ This is the key that ceph is going to use to pass request status
1407+ information back to this unit
1408+ """
1409+ return 'broker-rsp-' + local_unit().replace('/', '-')
1410+
1411+
1412+def send_request_if_needed(request):
1413+ """Send broker request if an equivalent request has not already been sent
1414+
1415+ @param request: A CephBrokerRq object
1416+ """
1417+ if is_request_sent(request):
1418+ log('Request already sent but not complete, not sending new request',
1419+ level=DEBUG)
1420+ else:
1421+ for rid in relation_ids('ceph'):
1422+ log('Sending request {}'.format(request.request_id), level=DEBUG)
1423+ relation_set(relation_id=rid, broker_req=request.request)
1424
1425=== modified file 'hooks/charmhelpers/core/hookenv.py'
1426--- hooks/charmhelpers/core/hookenv.py 2015-09-03 09:42:35 +0000
1427+++ hooks/charmhelpers/core/hookenv.py 2015-10-10 23:44:18 +0000
1428@@ -623,6 +623,38 @@
1429 return unit_get('private-address')
1430
1431
1432+@cached
1433+def storage_get(attribute="", storage_id=""):
1434+ """Get storage attributes"""
1435+ _args = ['storage-get', '--format=json']
1436+ if storage_id:
1437+ _args.extend(('-s', storage_id))
1438+ if attribute:
1439+ _args.append(attribute)
1440+ try:
1441+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
1442+ except ValueError:
1443+ return None
1444+
1445+
1446+@cached
1447+def storage_list(storage_name=""):
1448+ """List the storage IDs for the unit"""
1449+ _args = ['storage-list', '--format=json']
1450+ if storage_name:
1451+ _args.append(storage_name)
1452+ try:
1453+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
1454+ except ValueError:
1455+ return None
1456+ except OSError as e:
1457+ import errno
1458+ if e.errno == errno.ENOENT:
1459+ # storage-list does not exist
1460+ return []
1461+ raise
1462+
1463+
1464 class UnregisteredHookError(Exception):
1465 """Raised when an undefined hook is called"""
1466 pass
1467
1468=== modified file 'hooks/charmhelpers/core/host.py'
1469--- hooks/charmhelpers/core/host.py 2015-08-27 15:02:34 +0000
1470+++ hooks/charmhelpers/core/host.py 2015-10-10 23:44:18 +0000
1471@@ -63,32 +63,48 @@
1472 return service_result
1473
1474
1475-def service_pause(service_name, init_dir=None):
1476+def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
1477 """Pause a system service.
1478
1479 Stop it, and prevent it from starting again at boot."""
1480- if init_dir is None:
1481- init_dir = "/etc/init"
1482 stopped = service_stop(service_name)
1483- # XXX: Support systemd too
1484- override_path = os.path.join(
1485- init_dir, '{}.override'.format(service_name))
1486- with open(override_path, 'w') as fh:
1487- fh.write("manual\n")
1488+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
1489+ sysv_file = os.path.join(initd_dir, service_name)
1490+ if os.path.exists(upstart_file):
1491+ override_path = os.path.join(
1492+ init_dir, '{}.override'.format(service_name))
1493+ with open(override_path, 'w') as fh:
1494+ fh.write("manual\n")
1495+ elif os.path.exists(sysv_file):
1496+ subprocess.check_call(["update-rc.d", service_name, "disable"])
1497+ else:
1498+ # XXX: Support SystemD too
1499+ raise ValueError(
1500+ "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
1501+ service_name, upstart_file, sysv_file))
1502 return stopped
1503
1504
1505-def service_resume(service_name, init_dir=None):
1506+def service_resume(service_name, init_dir="/etc/init",
1507+ initd_dir="/etc/init.d"):
1508 """Resume a system service.
1509
1510 Reenable starting again at boot. Start the service"""
1511- # XXX: Support systemd too
1512- if init_dir is None:
1513- init_dir = "/etc/init"
1514- override_path = os.path.join(
1515- init_dir, '{}.override'.format(service_name))
1516- if os.path.exists(override_path):
1517- os.unlink(override_path)
1518+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
1519+ sysv_file = os.path.join(initd_dir, service_name)
1520+ if os.path.exists(upstart_file):
1521+ override_path = os.path.join(
1522+ init_dir, '{}.override'.format(service_name))
1523+ if os.path.exists(override_path):
1524+ os.unlink(override_path)
1525+ elif os.path.exists(sysv_file):
1526+ subprocess.check_call(["update-rc.d", service_name, "enable"])
1527+ else:
1528+ # XXX: Support SystemD too
1529+ raise ValueError(
1530+ "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
1531+ service_name, upstart_file, sysv_file))
1532+
1533 started = service_start(service_name)
1534 return started
1535
1536
1537=== modified file 'hooks/charmhelpers/core/hugepage.py'
1538--- hooks/charmhelpers/core/hugepage.py 2015-08-19 13:51:03 +0000
1539+++ hooks/charmhelpers/core/hugepage.py 2015-10-10 23:44:18 +0000
1540@@ -25,11 +25,13 @@
1541 fstab_mount,
1542 mkdir,
1543 )
1544+from charmhelpers.core.strutils import bytes_from_string
1545+from subprocess import check_output
1546
1547
1548 def hugepage_support(user, group='hugetlb', nr_hugepages=256,
1549 max_map_count=65536, mnt_point='/run/hugepages/kvm',
1550- pagesize='2MB', mount=True):
1551+ pagesize='2MB', mount=True, set_shmmax=False):
1552 """Enable hugepages on system.
1553
1554 Args:
1555@@ -49,6 +51,11 @@
1556 'vm.max_map_count': max_map_count,
1557 'vm.hugetlb_shm_group': gid,
1558 }
1559+ if set_shmmax:
1560+ shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
1561+ shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
1562+ if shmmax_minsize > shmmax_current:
1563+ sysctl_settings['kernel.shmmax'] = shmmax_minsize
1564 sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
1565 mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
1566 lfstab = fstab.Fstab()
1567
1568=== added file 'hooks/charmhelpers/core/kernel.py'
1569--- hooks/charmhelpers/core/kernel.py 1970-01-01 00:00:00 +0000
1570+++ hooks/charmhelpers/core/kernel.py 2015-10-10 23:44:18 +0000
1571@@ -0,0 +1,68 @@
1572+#!/usr/bin/env python
1573+# -*- coding: utf-8 -*-
1574+
1575+# Copyright 2014-2015 Canonical Limited.
1576+#
1577+# This file is part of charm-helpers.
1578+#
1579+# charm-helpers is free software: you can redistribute it and/or modify
1580+# it under the terms of the GNU Lesser General Public License version 3 as
1581+# published by the Free Software Foundation.
1582+#
1583+# charm-helpers is distributed in the hope that it will be useful,
1584+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1585+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1586+# GNU Lesser General Public License for more details.
1587+#
1588+# You should have received a copy of the GNU Lesser General Public License
1589+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1590+
1591+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
1592+
1593+from charmhelpers.core.hookenv import (
1594+ log,
1595+ INFO
1596+)
1597+
1598+from subprocess import check_call, check_output
1599+import re
1600+
1601+
1602+def modprobe(module, persist=True):
1603+ """Load a kernel module and configure for auto-load on reboot."""
1604+ cmd = ['modprobe', module]
1605+
1606+ log('Loading kernel module %s' % module, level=INFO)
1607+
1608+ check_call(cmd)
1609+ if persist:
1610+ with open('/etc/modules', 'r+') as modules:
1611+ if module not in modules.read():
1612+ modules.write(module)
1613+
1614+
1615+def rmmod(module, force=False):
1616+ """Remove a module from the linux kernel"""
1617+ cmd = ['rmmod']
1618+ if force:
1619+ cmd.append('-f')
1620+ cmd.append(module)
1621+ log('Removing kernel module %s' % module, level=INFO)
1622+ return check_call(cmd)
1623+
1624+
1625+def lsmod():
1626+ """Shows what kernel modules are currently loaded"""
1627+ return check_output(['lsmod'],
1628+ universal_newlines=True)
1629+
1630+
1631+def is_module_loaded(module):
1632+ """Checks if a kernel module is already loaded"""
1633+ matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
1634+ return len(matches) > 0
1635+
1636+
1637+def update_initramfs(version='all'):
1638+ """Updates an initramfs image"""
1639+ return check_call(["update-initramfs", "-k", version, "-u"])
1640
1641=== modified file 'hooks/charmhelpers/core/strutils.py'
1642--- hooks/charmhelpers/core/strutils.py 2015-04-16 20:24:28 +0000
1643+++ hooks/charmhelpers/core/strutils.py 2015-10-10 23:44:18 +0000
1644@@ -18,6 +18,7 @@
1645 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1646
1647 import six
1648+import re
1649
1650
1651 def bool_from_string(value):
1652@@ -40,3 +41,32 @@
1653
1654 msg = "Unable to interpret string value '%s' as boolean" % (value)
1655 raise ValueError(msg)
1656+
1657+
1658+def bytes_from_string(value):
1659+ """Interpret human readable string value as bytes.
1660+
1661+ Returns int
1662+ """
1663+ BYTE_POWER = {
1664+ 'K': 1,
1665+ 'KB': 1,
1666+ 'M': 2,
1667+ 'MB': 2,
1668+ 'G': 3,
1669+ 'GB': 3,
1670+ 'T': 4,
1671+ 'TB': 4,
1672+ 'P': 5,
1673+ 'PB': 5,
1674+ }
1675+ if isinstance(value, six.string_types):
1676+ value = six.text_type(value)
1677+ else:
1678+ msg = "Unable to interpret non-string value '%s' as boolean" % (value)
1679+ raise ValueError(msg)
1680+ matches = re.match("([0-9]+)([a-zA-Z]+)", value)
1681+ if not matches:
1682+ msg = "Unable to interpret string value '%s' as bytes" % (value)
1683+ raise ValueError(msg)
1684+ return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
1685
1686=== modified file 'hooks/horizon_hooks.py'
1687--- hooks/horizon_hooks.py 2015-09-28 19:15:37 +0000
1688+++ hooks/horizon_hooks.py 2015-10-10 23:44:18 +0000
1689@@ -10,7 +10,8 @@
1690 relation_set,
1691 relation_get,
1692 relation_ids,
1693- unit_get
1694+ unit_get,
1695+ status_set,
1696 )
1697 from charmhelpers.fetch import (
1698 apt_update, apt_install,
1699@@ -27,7 +28,8 @@
1700 git_pip_venv_dir,
1701 openstack_upgrade_available,
1702 os_release,
1703- save_script_rc
1704+ save_script_rc,
1705+ set_os_workload_status,
1706 )
1707 from horizon_utils import (
1708 determine_packages,
1709@@ -40,7 +42,8 @@
1710 git_install,
1711 git_post_install_late,
1712 setup_ipv6,
1713- INSTALL_DIR
1714+ INSTALL_DIR,
1715+ REQUIRED_INTERFACES,
1716 )
1717 from charmhelpers.contrib.network.ip import (
1718 get_iface_for_address,
1719@@ -70,7 +73,10 @@
1720 if lsb_release()['DISTRIB_CODENAME'] == 'precise':
1721 # Explicitly upgrade python-six Bug#1420708
1722 apt_install('python-six', fatal=True)
1723- apt_install(filter_installed_packages(packages), fatal=True)
1724+ packages = filter_installed_packages(packages)
1725+ if packages:
1726+ status_set('maintenance', 'Installing packages')
1727+ apt_install(packages, fatal=True)
1728
1729 git_install(config('openstack-origin-git'))
1730
1731@@ -108,6 +114,7 @@
1732 git_install(config('openstack-origin-git'))
1733 elif not config('action-managed-upgrade'):
1734 if openstack_upgrade_available('openstack-dashboard'):
1735+ status_set('maintenance', 'Upgrading to new OpenStack release')
1736 do_openstack_upgrade(configs=CONFIGS)
1737
1738 env_vars = {
1739@@ -265,10 +272,12 @@
1740
1741
1742 def main():
1743+ print sys.argv
1744 try:
1745 hooks.execute(sys.argv)
1746 except UnregisteredHookError as e:
1747 log('Unknown hook {} - skipping.'.format(e))
1748+ set_os_workload_status(CONFIGS, REQUIRED_INTERFACES)
1749
1750
1751 if __name__ == '__main__':
1752
1753=== modified file 'hooks/horizon_utils.py'
1754--- hooks/horizon_utils.py 2015-09-28 19:15:37 +0000
1755+++ hooks/horizon_utils.py 2015-10-10 23:44:18 +0000
1756@@ -72,6 +72,9 @@
1757 'zlib1g-dev',
1758 ]
1759
1760+REQUIRED_INTERFACES = {
1761+ 'identity': ['identity-service'],
1762+}
1763 # ubuntu packages that should not be installed when deploying from git
1764 GIT_PACKAGE_BLACKLIST = [
1765 'openstack-dashboard',
1766
1767=== added symlink 'hooks/identity-service-relation-departed'
1768=== target is u'horizon_hooks.py'
1769=== modified file 'tests/charmhelpers/contrib/amulet/deployment.py'
1770--- tests/charmhelpers/contrib/amulet/deployment.py 2015-02-10 18:50:39 +0000
1771+++ tests/charmhelpers/contrib/amulet/deployment.py 2015-10-10 23:44:18 +0000
1772@@ -51,7 +51,8 @@
1773 if 'units' not in this_service:
1774 this_service['units'] = 1
1775
1776- self.d.add(this_service['name'], units=this_service['units'])
1777+ self.d.add(this_service['name'], units=this_service['units'],
1778+ constraints=this_service.get('constraints'))
1779
1780 for svc in other_services:
1781 if 'location' in svc:
1782@@ -64,7 +65,8 @@
1783 if 'units' not in svc:
1784 svc['units'] = 1
1785
1786- self.d.add(svc['name'], charm=branch_location, units=svc['units'])
1787+ self.d.add(svc['name'], charm=branch_location, units=svc['units'],
1788+ constraints=svc.get('constraints'))
1789
1790 def _add_relations(self, relations):
1791 """Add all of the relations for the services."""
1792
1793=== modified file 'tests/charmhelpers/contrib/amulet/utils.py'
1794--- tests/charmhelpers/contrib/amulet/utils.py 2015-09-02 02:02:06 +0000
1795+++ tests/charmhelpers/contrib/amulet/utils.py 2015-10-10 23:44:18 +0000
1796@@ -19,9 +19,11 @@
1797 import logging
1798 import os
1799 import re
1800+import socket
1801 import subprocess
1802 import sys
1803 import time
1804+import uuid
1805
1806 import amulet
1807 import distro_info
1808@@ -324,7 +326,7 @@
1809
1810 def service_restarted_since(self, sentry_unit, mtime, service,
1811 pgrep_full=None, sleep_time=20,
1812- retry_count=2, retry_sleep_time=30):
1813+ retry_count=30, retry_sleep_time=10):
1814 """Check if service was been started after a given time.
1815
1816 Args:
1817@@ -332,8 +334,9 @@
1818 mtime (float): The epoch time to check against
1819 service (string): service name to look for in process table
1820 pgrep_full: [Deprecated] Use full command line search mode with pgrep
1821- sleep_time (int): Seconds to sleep before looking for process
1822- retry_count (int): If service is not found, how many times to retry
1823+ sleep_time (int): Initial sleep time (s) before looking for file
1824+ retry_sleep_time (int): Time (s) to sleep between retries
1825+ retry_count (int): If file is not found, how many times to retry
1826
1827 Returns:
1828 bool: True if service found and its start time it newer than mtime,
1829@@ -357,11 +360,12 @@
1830 pgrep_full)
1831 self.log.debug('Attempt {} to get {} proc start time on {} '
1832 'OK'.format(tries, service, unit_name))
1833- except IOError:
1834+ except IOError as e:
1835 # NOTE(beisner) - race avoidance, proc may not exist yet.
1836 # https://bugs.launchpad.net/charm-helpers/+bug/1474030
1837 self.log.debug('Attempt {} to get {} proc start time on {} '
1838- 'failed'.format(tries, service, unit_name))
1839+ 'failed\n{}'.format(tries, service,
1840+ unit_name, e))
1841 time.sleep(retry_sleep_time)
1842 tries += 1
1843
1844@@ -381,38 +385,62 @@
1845 return False
1846
1847 def config_updated_since(self, sentry_unit, filename, mtime,
1848- sleep_time=20):
1849+ sleep_time=20, retry_count=30,
1850+ retry_sleep_time=10):
1851 """Check if file was modified after a given time.
1852
1853 Args:
1854 sentry_unit (sentry): The sentry unit to check the file mtime on
1855 filename (string): The file to check mtime of
1856 mtime (float): The epoch time to check against
1857- sleep_time (int): Seconds to sleep before looking for process
1858+ sleep_time (int): Initial sleep time (s) before looking for file
1859+ retry_sleep_time (int): Time (s) to sleep between retries
1860+ retry_count (int): If file is not found, how many times to retry
1861
1862 Returns:
1863 bool: True if file was modified more recently than mtime, False if
1864- file was modified before mtime,
1865+ file was modified before mtime, or if file not found.
1866 """
1867- self.log.debug('Checking that %s file updated since '
1868- '%s' % (filename, mtime))
1869 unit_name = sentry_unit.info['unit_name']
1870+ self.log.debug('Checking that %s updated since %s on '
1871+ '%s' % (filename, mtime, unit_name))
1872 time.sleep(sleep_time)
1873- file_mtime = self._get_file_mtime(sentry_unit, filename)
1874+ file_mtime = None
1875+ tries = 0
1876+ while tries <= retry_count and not file_mtime:
1877+ try:
1878+ file_mtime = self._get_file_mtime(sentry_unit, filename)
1879+ self.log.debug('Attempt {} to get {} file mtime on {} '
1880+ 'OK'.format(tries, filename, unit_name))
1881+ except IOError as e:
1882+ # NOTE(beisner) - race avoidance, file may not exist yet.
1883+ # https://bugs.launchpad.net/charm-helpers/+bug/1474030
1884+ self.log.debug('Attempt {} to get {} file mtime on {} '
1885+ 'failed\n{}'.format(tries, filename,
1886+ unit_name, e))
1887+ time.sleep(retry_sleep_time)
1888+ tries += 1
1889+
1890+ if not file_mtime:
1891+ self.log.warn('Could not determine file mtime, assuming '
1892+ 'file does not exist')
1893+ return False
1894+
1895 if file_mtime >= mtime:
1896 self.log.debug('File mtime is newer than provided mtime '
1897- '(%s >= %s) on %s (OK)' % (file_mtime, mtime,
1898- unit_name))
1899+ '(%s >= %s) on %s (OK)' % (file_mtime,
1900+ mtime, unit_name))
1901 return True
1902 else:
1903- self.log.warn('File mtime %s is older than provided mtime %s'
1904- % (file_mtime, mtime))
1905+ self.log.warn('File mtime is older than provided mtime'
1906+ '(%s < on %s) on %s' % (file_mtime,
1907+ mtime, unit_name))
1908 return False
1909
1910 def validate_service_config_changed(self, sentry_unit, mtime, service,
1911 filename, pgrep_full=None,
1912- sleep_time=20, retry_count=2,
1913- retry_sleep_time=30):
1914+ sleep_time=20, retry_count=30,
1915+ retry_sleep_time=10):
1916 """Check service and file were updated after mtime
1917
1918 Args:
1919@@ -457,7 +485,9 @@
1920 sentry_unit,
1921 filename,
1922 mtime,
1923- sleep_time=0)
1924+ sleep_time=sleep_time,
1925+ retry_count=retry_count,
1926+ retry_sleep_time=retry_sleep_time)
1927
1928 return service_restart and config_update
1929
1930@@ -476,7 +506,6 @@
1931 """Return a list of all Ubuntu releases in order of release."""
1932 _d = distro_info.UbuntuDistroInfo()
1933 _release_list = _d.all
1934- self.log.debug('Ubuntu release list: {}'.format(_release_list))
1935 return _release_list
1936
1937 def file_to_url(self, file_rel_path):
1938@@ -616,6 +645,142 @@
1939
1940 return None
1941
1942+ def validate_sectionless_conf(self, file_contents, expected):
1943+ """A crude conf parser. Useful to inspect configuration files which
1944+ do not have section headers (as would be necessary in order to use
1945+ the configparser). Such as openstack-dashboard or rabbitmq confs."""
1946+ for line in file_contents.split('\n'):
1947+ if '=' in line:
1948+ args = line.split('=')
1949+ if len(args) <= 1:
1950+ continue
1951+ key = args[0].strip()
1952+ value = args[1].strip()
1953+ if key in expected.keys():
1954+ if expected[key] != value:
1955+ msg = ('Config mismatch. Expected, actual: {}, '
1956+ '{}'.format(expected[key], value))
1957+ amulet.raise_status(amulet.FAIL, msg=msg)
1958+
1959+ def get_unit_hostnames(self, units):
1960+ """Return a dict of juju unit names to hostnames."""
1961+ host_names = {}
1962+ for unit in units:
1963+ host_names[unit.info['unit_name']] = \
1964+ str(unit.file_contents('/etc/hostname').strip())
1965+ self.log.debug('Unit host names: {}'.format(host_names))
1966+ return host_names
1967+
1968+ def run_cmd_unit(self, sentry_unit, cmd):
1969+ """Run a command on a unit, return the output and exit code."""
1970+ output, code = sentry_unit.run(cmd)
1971+ if code == 0:
1972+ self.log.debug('{} `{}` command returned {} '
1973+ '(OK)'.format(sentry_unit.info['unit_name'],
1974+ cmd, code))
1975+ else:
1976+ msg = ('{} `{}` command returned {} '
1977+ '{}'.format(sentry_unit.info['unit_name'],
1978+ cmd, code, output))
1979+ amulet.raise_status(amulet.FAIL, msg=msg)
1980+ return str(output), code
1981+
1982+ def file_exists_on_unit(self, sentry_unit, file_name):
1983+ """Check if a file exists on a unit."""
1984+ try:
1985+ sentry_unit.file_stat(file_name)
1986+ return True
1987+ except IOError:
1988+ return False
1989+ except Exception as e:
1990+ msg = 'Error checking file {}: {}'.format(file_name, e)
1991+ amulet.raise_status(amulet.FAIL, msg=msg)
1992+
1993+ def file_contents_safe(self, sentry_unit, file_name,
1994+ max_wait=60, fatal=False):
1995+ """Get file contents from a sentry unit. Wrap amulet file_contents
1996+ with retry logic to address races where a file checks as existing,
1997+ but no longer exists by the time file_contents is called.
1998+ Return None if file not found. Optionally raise if fatal is True."""
1999+ unit_name = sentry_unit.info['unit_name']
2000+ file_contents = False
2001+ tries = 0
2002+ while not file_contents and tries < (max_wait / 4):
2003+ try:
2004+ file_contents = sentry_unit.file_contents(file_name)
2005+ except IOError:
2006+ self.log.debug('Attempt {} to open file {} from {} '
2007+ 'failed'.format(tries, file_name,
2008+ unit_name))
2009+ time.sleep(4)
2010+ tries += 1
2011+
2012+ if file_contents:
2013+ return file_contents
2014+ elif not fatal:
2015+ return None
2016+ elif fatal:
2017+ msg = 'Failed to get file contents from unit.'
2018+ amulet.raise_status(amulet.FAIL, msg)
2019+
2020+ def port_knock_tcp(self, host="localhost", port=22, timeout=15):
2021+ """Open a TCP socket to check for a listening sevice on a host.
2022+
2023+ :param host: host name or IP address, default to localhost
2024+ :param port: TCP port number, default to 22
2025+ :param timeout: Connect timeout, default to 15 seconds
2026+ :returns: True if successful, False if connect failed
2027+ """
2028+
2029+ # Resolve host name if possible
2030+ try:
2031+ connect_host = socket.gethostbyname(host)
2032+ host_human = "{} ({})".format(connect_host, host)
2033+ except socket.error as e:
2034+ self.log.warn('Unable to resolve address: '
2035+ '{} ({}) Trying anyway!'.format(host, e))
2036+ connect_host = host
2037+ host_human = connect_host
2038+
2039+ # Attempt socket connection
2040+ try:
2041+ knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
2042+ knock.settimeout(timeout)
2043+ knock.connect((connect_host, port))
2044+ knock.close()
2045+ self.log.debug('Socket connect OK for host '
2046+ '{} on port {}.'.format(host_human, port))
2047+ return True
2048+ except socket.error as e:
2049+ self.log.debug('Socket connect FAIL for'
2050+ ' {} port {} ({})'.format(host_human, port, e))
2051+ return False
2052+
2053+ def port_knock_units(self, sentry_units, port=22,
2054+ timeout=15, expect_success=True):
2055+ """Open a TCP socket to check for a listening sevice on each
2056+ listed juju unit.
2057+
2058+ :param sentry_units: list of sentry unit pointers
2059+ :param port: TCP port number, default to 22
2060+ :param timeout: Connect timeout, default to 15 seconds
2061+ :expect_success: True by default, set False to invert logic
2062+ :returns: None if successful, Failure message otherwise
2063+ """
2064+ for unit in sentry_units:
2065+ host = unit.info['public-address']
2066+ connected = self.port_knock_tcp(host, port, timeout)
2067+ if not connected and expect_success:
2068+ return 'Socket connect failed.'
2069+ elif connected and not expect_success:
2070+ return 'Socket connected unexpectedly.'
2071+
2072+ def get_uuid_epoch_stamp(self):
2073+ """Returns a stamp string based on uuid4 and epoch time. Useful in
2074+ generating test messages which need to be unique-ish."""
2075+ return '[{}-{}]'.format(uuid.uuid4(), time.time())
2076+
2077+# amulet juju action helpers:
2078 def run_action(self, unit_sentry, action,
2079 _check_output=subprocess.check_output):
2080 """Run the named action on a given unit sentry.
2081@@ -642,3 +807,12 @@
2082 output = _check_output(command, universal_newlines=True)
2083 data = json.loads(output)
2084 return data.get(u"status") == "completed"
2085+
2086+ def status_get(self, unit):
2087+ """Return the current service status of this unit."""
2088+ raw_status, return_code = unit.run(
2089+ "status-get --format=json --include-data")
2090+ if return_code != 0:
2091+ return ("unknown", "")
2092+ status = json.loads(raw_status)
2093+ return (status["status"], status["message"])
2094
2095=== modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
2096--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-18 17:34:36 +0000
2097+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-10-10 23:44:18 +0000
2098@@ -44,20 +44,31 @@
2099 Determine if the local branch being tested is derived from its
2100 stable or next (dev) branch, and based on this, use the corresonding
2101 stable or next branches for the other_services."""
2102+
2103+ # Charms outside the lp:~openstack-charmers namespace
2104 base_charms = ['mysql', 'mongodb', 'nrpe']
2105
2106+ # Force these charms to current series even when using an older series.
2107+ # ie. Use trusty/nrpe even when series is precise, as the P charm
2108+ # does not possess the necessary external master config and hooks.
2109+ force_series_current = ['nrpe']
2110+
2111 if self.series in ['precise', 'trusty']:
2112 base_series = self.series
2113 else:
2114 base_series = self.current_next
2115
2116- if self.stable:
2117- for svc in other_services:
2118+ for svc in other_services:
2119+ if svc['name'] in force_series_current:
2120+ base_series = self.current_next
2121+ # If a location has been explicitly set, use it
2122+ if svc.get('location'):
2123+ continue
2124+ if self.stable:
2125 temp = 'lp:charms/{}/{}'
2126 svc['location'] = temp.format(base_series,
2127 svc['name'])
2128- else:
2129- for svc in other_services:
2130+ else:
2131 if svc['name'] in base_charms:
2132 temp = 'lp:charms/{}/{}'
2133 svc['location'] = temp.format(base_series,
2134@@ -66,6 +77,7 @@
2135 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
2136 svc['location'] = temp.format(self.current_next,
2137 svc['name'])
2138+
2139 return other_services
2140
2141 def _add_services(self, this_service, other_services):
2142@@ -77,21 +89,23 @@
2143
2144 services = other_services
2145 services.append(this_service)
2146+
2147+ # Charms which should use the source config option
2148 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
2149 'ceph-osd', 'ceph-radosgw']
2150- # Most OpenStack subordinate charms do not expose an origin option
2151- # as that is controlled by the principle.
2152- ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
2153+
2154+ # Charms which can not use openstack-origin, ie. many subordinates
2155+ no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
2156
2157 if self.openstack:
2158 for svc in services:
2159- if svc['name'] not in use_source + ignore:
2160+ if svc['name'] not in use_source + no_origin:
2161 config = {'openstack-origin': self.openstack}
2162 self.d.configure(svc['name'], config)
2163
2164 if self.source:
2165 for svc in services:
2166- if svc['name'] in use_source and svc['name'] not in ignore:
2167+ if svc['name'] in use_source and svc['name'] not in no_origin:
2168 config = {'source': self.source}
2169 self.d.configure(svc['name'], config)
2170
2171
2172=== modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
2173--- tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-07-17 13:24:05 +0000
2174+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-10-10 23:44:18 +0000
2175@@ -27,6 +27,7 @@
2176 import heatclient.v1.client as heat_client
2177 import keystoneclient.v2_0 as keystone_client
2178 import novaclient.v1_1.client as nova_client
2179+import pika
2180 import swiftclient
2181
2182 from charmhelpers.contrib.amulet.utils import (
2183@@ -602,3 +603,361 @@
2184 self.log.debug('Ceph {} samples (OK): '
2185 '{}'.format(sample_type, samples))
2186 return None
2187+
2188+# rabbitmq/amqp specific helpers:
2189+ def add_rmq_test_user(self, sentry_units,
2190+ username="testuser1", password="changeme"):
2191+ """Add a test user via the first rmq juju unit, check connection as
2192+ the new user against all sentry units.
2193+
2194+ :param sentry_units: list of sentry unit pointers
2195+ :param username: amqp user name, default to testuser1
2196+ :param password: amqp user password
2197+ :returns: None if successful. Raise on error.
2198+ """
2199+ self.log.debug('Adding rmq user ({})...'.format(username))
2200+
2201+ # Check that user does not already exist
2202+ cmd_user_list = 'rabbitmqctl list_users'
2203+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
2204+ if username in output:
2205+ self.log.warning('User ({}) already exists, returning '
2206+ 'gracefully.'.format(username))
2207+ return
2208+
2209+ perms = '".*" ".*" ".*"'
2210+ cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
2211+ 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
2212+
2213+ # Add user via first unit
2214+ for cmd in cmds:
2215+ output, _ = self.run_cmd_unit(sentry_units[0], cmd)
2216+
2217+ # Check connection against the other sentry_units
2218+ self.log.debug('Checking user connect against units...')
2219+ for sentry_unit in sentry_units:
2220+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
2221+ username=username,
2222+ password=password)
2223+ connection.close()
2224+
2225+ def delete_rmq_test_user(self, sentry_units, username="testuser1"):
2226+ """Delete a rabbitmq user via the first rmq juju unit.
2227+
2228+ :param sentry_units: list of sentry unit pointers
2229+ :param username: amqp user name, default to testuser1
2230+ :param password: amqp user password
2231+ :returns: None if successful or no such user.
2232+ """
2233+ self.log.debug('Deleting rmq user ({})...'.format(username))
2234+
2235+ # Check that the user exists
2236+ cmd_user_list = 'rabbitmqctl list_users'
2237+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
2238+
2239+ if username not in output:
2240+ self.log.warning('User ({}) does not exist, returning '
2241+ 'gracefully.'.format(username))
2242+ return
2243+
2244+ # Delete the user
2245+ cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
2246+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
2247+
2248+ def get_rmq_cluster_status(self, sentry_unit):
2249+ """Execute rabbitmq cluster status command on a unit and return
2250+ the full output.
2251+
2252+ :param unit: sentry unit
2253+ :returns: String containing console output of cluster status command
2254+ """
2255+ cmd = 'rabbitmqctl cluster_status'
2256+ output, _ = self.run_cmd_unit(sentry_unit, cmd)
2257+ self.log.debug('{} cluster_status:\n{}'.format(
2258+ sentry_unit.info['unit_name'], output))
2259+ return str(output)
2260+
2261+ def get_rmq_cluster_running_nodes(self, sentry_unit):
2262+ """Parse rabbitmqctl cluster_status output string, return list of
2263+ running rabbitmq cluster nodes.
2264+
2265+ :param unit: sentry unit
2266+ :returns: List containing node names of running nodes
2267+ """
2268+ # NOTE(beisner): rabbitmqctl cluster_status output is not
2269+ # json-parsable, do string chop foo, then json.loads that.
2270+ str_stat = self.get_rmq_cluster_status(sentry_unit)
2271+ if 'running_nodes' in str_stat:
2272+ pos_start = str_stat.find("{running_nodes,") + 15
2273+ pos_end = str_stat.find("]},", pos_start) + 1
2274+ str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
2275+ run_nodes = json.loads(str_run_nodes)
2276+ return run_nodes
2277+ else:
2278+ return []
2279+
2280+ def validate_rmq_cluster_running_nodes(self, sentry_units):
2281+ """Check that all rmq unit hostnames are represented in the
2282+ cluster_status output of all units.
2283+
2284+ :param host_names: dict of juju unit names to host names
2285+ :param units: list of sentry unit pointers (all rmq units)
2286+ :returns: None if successful, otherwise return error message
2287+ """
2288+ host_names = self.get_unit_hostnames(sentry_units)
2289+ errors = []
2290+
2291+ # Query every unit for cluster_status running nodes
2292+ for query_unit in sentry_units:
2293+ query_unit_name = query_unit.info['unit_name']
2294+ running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
2295+
2296+ # Confirm that every unit is represented in the queried unit's
2297+ # cluster_status running nodes output.
2298+ for validate_unit in sentry_units:
2299+ val_host_name = host_names[validate_unit.info['unit_name']]
2300+ val_node_name = 'rabbit@{}'.format(val_host_name)
2301+
2302+ if val_node_name not in running_nodes:
2303+ errors.append('Cluster member check failed on {}: {} not '
2304+ 'in {}\n'.format(query_unit_name,
2305+ val_node_name,
2306+ running_nodes))
2307+ if errors:
2308+ return ''.join(errors)
2309+
2310+ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
2311+ """Check a single juju rmq unit for ssl and port in the config file."""
2312+ host = sentry_unit.info['public-address']
2313+ unit_name = sentry_unit.info['unit_name']
2314+
2315+ conf_file = '/etc/rabbitmq/rabbitmq.config'
2316+ conf_contents = str(self.file_contents_safe(sentry_unit,
2317+ conf_file, max_wait=16))
2318+ # Checks
2319+ conf_ssl = 'ssl' in conf_contents
2320+ conf_port = str(port) in conf_contents
2321+
2322+ # Port explicitly checked in config
2323+ if port and conf_port and conf_ssl:
2324+ self.log.debug('SSL is enabled @{}:{} '
2325+ '({})'.format(host, port, unit_name))
2326+ return True
2327+ elif port and not conf_port and conf_ssl:
2328+ self.log.debug('SSL is enabled @{} but not on port {} '
2329+ '({})'.format(host, port, unit_name))
2330+ return False
2331+ # Port not checked (useful when checking that ssl is disabled)
2332+ elif not port and conf_ssl:
2333+ self.log.debug('SSL is enabled @{}:{} '
2334+ '({})'.format(host, port, unit_name))
2335+ return True
2336+ elif not conf_ssl:
2337+ self.log.debug('SSL not enabled @{}:{} '
2338+ '({})'.format(host, port, unit_name))
2339+ return False
2340+ else:
2341+ msg = ('Unknown condition when checking SSL status @{}:{} '
2342+ '({})'.format(host, port, unit_name))
2343+ amulet.raise_status(amulet.FAIL, msg)
2344+
2345+ def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
2346+ """Check that ssl is enabled on rmq juju sentry units.
2347+
2348+ :param sentry_units: list of all rmq sentry units
2349+ :param port: optional ssl port override to validate
2350+ :returns: None if successful, otherwise return error message
2351+ """
2352+ for sentry_unit in sentry_units:
2353+ if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
2354+ return ('Unexpected condition: ssl is disabled on unit '
2355+ '({})'.format(sentry_unit.info['unit_name']))
2356+ return None
2357+
2358+ def validate_rmq_ssl_disabled_units(self, sentry_units):
2359+ """Check that ssl is enabled on listed rmq juju sentry units.
2360+
2361+ :param sentry_units: list of all rmq sentry units
2362+ :returns: True if successful. Raise on error.
2363+ """
2364+ for sentry_unit in sentry_units:
2365+ if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
2366+ return ('Unexpected condition: ssl is enabled on unit '
2367+ '({})'.format(sentry_unit.info['unit_name']))
2368+ return None
2369+
2370+ def configure_rmq_ssl_on(self, sentry_units, deployment,
2371+ port=None, max_wait=60):
2372+ """Turn ssl charm config option on, with optional non-default
2373+ ssl port specification. Confirm that it is enabled on every
2374+ unit.
2375+
2376+ :param sentry_units: list of sentry units
2377+ :param deployment: amulet deployment object pointer
2378+ :param port: amqp port, use defaults if None
2379+ :param max_wait: maximum time to wait in seconds to confirm
2380+ :returns: None if successful. Raise on error.
2381+ """
2382+ self.log.debug('Setting ssl charm config option: on')
2383+
2384+ # Enable RMQ SSL
2385+ config = {'ssl': 'on'}
2386+ if port:
2387+ config['ssl_port'] = port
2388+
2389+ deployment.configure('rabbitmq-server', config)
2390+
2391+ # Confirm
2392+ tries = 0
2393+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
2394+ while ret and tries < (max_wait / 4):
2395+ time.sleep(4)
2396+ self.log.debug('Attempt {}: {}'.format(tries, ret))
2397+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
2398+ tries += 1
2399+
2400+ if ret:
2401+ amulet.raise_status(amulet.FAIL, ret)
2402+
2403+ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
2404+ """Turn ssl charm config option off, confirm that it is disabled
2405+ on every unit.
2406+
2407+ :param sentry_units: list of sentry units
2408+ :param deployment: amulet deployment object pointer
2409+ :param max_wait: maximum time to wait in seconds to confirm
2410+ :returns: None if successful. Raise on error.
2411+ """
2412+ self.log.debug('Setting ssl charm config option: off')
2413+
2414+ # Disable RMQ SSL
2415+ config = {'ssl': 'off'}
2416+ deployment.configure('rabbitmq-server', config)
2417+
2418+ # Confirm
2419+ tries = 0
2420+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
2421+ while ret and tries < (max_wait / 4):
2422+ time.sleep(4)
2423+ self.log.debug('Attempt {}: {}'.format(tries, ret))
2424+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
2425+ tries += 1
2426+
2427+ if ret:
2428+ amulet.raise_status(amulet.FAIL, ret)
2429+
2430+ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
2431+ port=None, fatal=True,
2432+ username="testuser1", password="changeme"):
2433+ """Establish and return a pika amqp connection to the rabbitmq service
2434+ running on a rmq juju unit.
2435+
2436+ :param sentry_unit: sentry unit pointer
2437+ :param ssl: boolean, default to False
2438+ :param port: amqp port, use defaults if None
2439+ :param fatal: boolean, default to True (raises on connect error)
2440+ :param username: amqp user name, default to testuser1
2441+ :param password: amqp user password
2442+ :returns: pika amqp connection pointer or None if failed and non-fatal
2443+ """
2444+ host = sentry_unit.info['public-address']
2445+ unit_name = sentry_unit.info['unit_name']
2446+
2447+ # Default port logic if port is not specified
2448+ if ssl and not port:
2449+ port = 5671
2450+ elif not ssl and not port:
2451+ port = 5672
2452+
2453+ self.log.debug('Connecting to amqp on {}:{} ({}) as '
2454+ '{}...'.format(host, port, unit_name, username))
2455+
2456+ try:
2457+ credentials = pika.PlainCredentials(username, password)
2458+ parameters = pika.ConnectionParameters(host=host, port=port,
2459+ credentials=credentials,
2460+ ssl=ssl,
2461+ connection_attempts=3,
2462+ retry_delay=5,
2463+ socket_timeout=1)
2464+ connection = pika.BlockingConnection(parameters)
2465+ assert connection.server_properties['product'] == 'RabbitMQ'
2466+ self.log.debug('Connect OK')
2467+ return connection
2468+ except Exception as e:
2469+ msg = ('amqp connection failed to {}:{} as '
2470+ '{} ({})'.format(host, port, username, str(e)))
2471+ if fatal:
2472+ amulet.raise_status(amulet.FAIL, msg)
2473+ else:
2474+ self.log.warn(msg)
2475+ return None
2476+
2477+ def publish_amqp_message_by_unit(self, sentry_unit, message,
2478+ queue="test", ssl=False,
2479+ username="testuser1",
2480+ password="changeme",
2481+ port=None):
2482+ """Publish an amqp message to a rmq juju unit.
2483+
2484+ :param sentry_unit: sentry unit pointer
2485+ :param message: amqp message string
2486+ :param queue: message queue, default to test
2487+ :param username: amqp user name, default to testuser1
2488+ :param password: amqp user password
2489+ :param ssl: boolean, default to False
2490+ :param port: amqp port, use defaults if None
2491+ :returns: None. Raises exception if publish failed.
2492+ """
2493+ self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
2494+ message))
2495+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
2496+ port=port,
2497+ username=username,
2498+ password=password)
2499+
2500+ # NOTE(beisner): extra debug here re: pika hang potential:
2501+ # https://github.com/pika/pika/issues/297
2502+ # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
2503+ self.log.debug('Defining channel...')
2504+ channel = connection.channel()
2505+ self.log.debug('Declaring queue...')
2506+ channel.queue_declare(queue=queue, auto_delete=False, durable=True)
2507+ self.log.debug('Publishing message...')
2508+ channel.basic_publish(exchange='', routing_key=queue, body=message)
2509+ self.log.debug('Closing channel...')
2510+ channel.close()
2511+ self.log.debug('Closing connection...')
2512+ connection.close()
2513+
2514+ def get_amqp_message_by_unit(self, sentry_unit, queue="test",
2515+ username="testuser1",
2516+ password="changeme",
2517+ ssl=False, port=None):
2518+ """Get an amqp message from a rmq juju unit.
2519+
2520+ :param sentry_unit: sentry unit pointer
2521+ :param queue: message queue, default to test
2522+ :param username: amqp user name, default to testuser1
2523+ :param password: amqp user password
2524+ :param ssl: boolean, default to False
2525+ :param port: amqp port, use defaults if None
2526+ :returns: amqp message body as string. Raise if get fails.
2527+ """
2528+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
2529+ port=port,
2530+ username=username,
2531+ password=password)
2532+ channel = connection.channel()
2533+ method_frame, _, body = channel.basic_get(queue)
2534+
2535+ if method_frame:
2536+ self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
2537+ body))
2538+ channel.basic_ack(method_frame.delivery_tag)
2539+ channel.close()
2540+ connection.close()
2541+ return body
2542+ else:
2543+ msg = 'No message retrieved.'
2544+ amulet.raise_status(amulet.FAIL, msg)
2545
2546=== modified file 'unit_tests/test_horizon_contexts.py'
2547--- unit_tests/test_horizon_contexts.py 2015-09-25 02:05:05 +0000
2548+++ unit_tests/test_horizon_contexts.py 2015-10-10 23:44:18 +0000
2549@@ -239,7 +239,7 @@
2550 'dash_secure': [443, 433]},
2551 'prefer_ipv6': False})
2552 _open.assert_called_with('/etc/default/haproxy', 'w')
2553- _file.write.assert_called()
2554+ self.assertTrue(_file.write.called)
2555
2556 def test_HorizonHAProxyContext_clustered(self):
2557 self.relation_ids.return_value = ['cluster:0']
2558@@ -258,7 +258,7 @@
2559 'dash_secure': [443, 433]},
2560 'prefer_ipv6': False})
2561 _open.assert_called_with('/etc/default/haproxy', 'w')
2562- _file.write.assert_called()
2563+ self.assertTrue(_file.write.called)
2564
2565 def test_RouterSettingContext(self):
2566 self.test_config.set('profile', 'cisco')
2567
2568=== modified file 'unit_tests/test_horizon_hooks.py'
2569--- unit_tests/test_horizon_hooks.py 2015-09-28 19:48:18 +0000
2570+++ unit_tests/test_horizon_hooks.py 2015-10-10 23:44:18 +0000
2571@@ -37,6 +37,7 @@
2572 'git_post_install_late',
2573 'update_nrpe_config',
2574 'lsb_release',
2575+ 'status_set',
2576 ]
2577
2578
2579@@ -85,23 +86,23 @@
2580 def test_install_hook_icehouse_pkgs(self, _git_requested):
2581 _git_requested.return_value = False
2582 self.os_release.return_value = 'icehouse'
2583- self._call_hook('install')
2584+ self._call_hook('install.real')
2585 for pkg in ['nodejs', 'node-less']:
2586 self.assertFalse(
2587 pkg in self.filter_installed_packages.call_args[0][0]
2588 )
2589- self.apt_install.assert_called()
2590+ self.assertTrue(self.apt_install.called)
2591
2592 @patch.object(utils, 'git_install_requested')
2593 def test_install_hook_pre_icehouse_pkgs(self, _git_requested):
2594 _git_requested.return_value = False
2595 self.os_release.return_value = 'grizzly'
2596- self._call_hook('install')
2597+ self._call_hook('install.real')
2598 for pkg in ['nodejs', 'node-less']:
2599 self.assertTrue(
2600 pkg in self.filter_installed_packages.call_args[0][0]
2601 )
2602- self.apt_install.assert_called()
2603+ self.assertTrue(self.apt_install.called)
2604
2605 @patch.object(utils, 'git_install_requested')
2606 def test_install_hook_git(self, _git_requested):
2607@@ -141,7 +142,7 @@
2608 self.filter_installed_packages.return_value = ['foo']
2609 self._call_hook('upgrade-charm')
2610 self.apt_install.assert_called_with(['foo'], fatal=True)
2611- self.CONFIGS.write_all.assert_called()
2612+ self.assertTrue(self.CONFIGS.write_all.called)
2613 ex = [
2614 call('restart', 'apache2'),
2615 call('restart', 'haproxy')
2616@@ -224,10 +225,10 @@
2617 self.openstack_upgrade_available.assert_called_with(
2618 'openstack-dashboard'
2619 )
2620- self.enable_ssl.assert_called()
2621+ self.assertTrue(self.enable_ssl.called)
2622 self.do_openstack_upgrade.assert_not_called()
2623- self.save_script_rc.assert_called()
2624- self.CONFIGS.write_all.assert_called()
2625+ self.assertTrue(self.save_script_rc.called)
2626+ self.assertTrue(self.CONFIGS.write_all.called)
2627 self.open_port.assert_has_calls([call(80), call(443)])
2628
2629 @patch.object(hooks, 'git_install_requested')
2630@@ -237,7 +238,7 @@
2631 self.test_config.set('openstack-origin', 'cloud:precise-grizzly')
2632 self.openstack_upgrade_available.return_value = True
2633 self._call_hook('config-changed')
2634- self.do_openstack_upgrade.assert_called()
2635+ self.assertTrue(self.do_openstack_upgrade.called)
2636
2637 @patch.object(hooks, 'git_install_requested')
2638 @patch.object(hooks, 'config_value_changed')
2639@@ -338,16 +339,3 @@
2640 openstack_dir='/usr/share/openstack-dashboard',
2641 relation_id=None
2642 )
2643-
2644- @patch('sys.argv')
2645- @patch.object(hooks, 'install')
2646- def test_main_hook_exists(self, _install, _argv):
2647- _argv = ['hooks/install'] # NOQA
2648- hooks.main()
2649- _install.assert_called()
2650-
2651- @patch('sys.argv')
2652- def test_main_hook_missing(self, _argv):
2653- _argv = ['hooks/start'] # NOQA
2654- hooks.main()
2655- self.log.assert_called()
2656
2657=== modified file 'unit_tests/test_horizon_utils.py'
2658--- unit_tests/test_horizon_utils.py 2015-08-27 15:02:34 +0000
2659+++ unit_tests/test_horizon_utils.py 2015-10-10 23:44:18 +0000
2660@@ -66,7 +66,7 @@
2661 configs = MagicMock()
2662 horizon_utils.do_openstack_upgrade(configs)
2663 configs.set_release.assert_called_with(openstack_release='havana')
2664- self.log.assert_called()
2665+ self.assertTrue(self.log.called)
2666 self.apt_update.assert_called_with(fatal=True)
2667 dpkg_opts = [
2668 '--option', 'Dpkg::Options::=--force-confnew',

Subscribers

People subscribed via source and target branches