Merge lp:~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers into lp:charms/trusty/rabbitmq-server

Proposed by Chris Glass
Status: Merged
Merged at revision: 60
Proposed branch: lp:~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers
Merge into: lp:charms/trusty/rabbitmq-server
Diff against target: 3241 lines (+2078/-264)
25 files modified
hooks/charmhelpers/contrib/charmsupport/volumes.py (+5/-2)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+59/-17)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+94/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+276/-0)
hooks/charmhelpers/contrib/openstack/context.py (+187/-47)
hooks/charmhelpers/contrib/openstack/ip.py (+79/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+31/-1)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/openstack/utils.py (+18/-7)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+77/-29)
hooks/charmhelpers/contrib/ssl/service.py (+1/-1)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+1/-1)
hooks/charmhelpers/contrib/storage/linux/utils.py (+23/-5)
hooks/charmhelpers/core/fstab.py (+116/-0)
hooks/charmhelpers/core/hookenv.py (+132/-7)
hooks/charmhelpers/core/host.py (+100/-12)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+313/-0)
hooks/charmhelpers/core/services/helpers.py (+239/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+192/-90)
hooks/charmhelpers/fetch/archiveurl.py (+49/-4)
hooks/charmhelpers/fetch/bzrurl.py (+2/-1)
hooks/rabbit_utils.py (+8/-16)
To merge this branch: bzr merge lp:~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers
Reviewer Review Type Date Requested Status
David Britton (community) Approve
Review via email: mp+236072@code.launchpad.net

Description of the change

This branch resyncs charm-helpers to make the charm benefit from in-memeory apt-cache index, so as not to run into race-conditions with other charms.

It also uses the chram-helpers package version comparison instead of its own (again, to prevent grabbing the apt index lock for nothing).

Similar causes and fixes than https://bugs.launchpad.net/charms/+source/ceph/+bug/1346489

To post a comment you must log in.
Revision history for this message
David Britton (dpb) wrote :

This looks great! I deployed, it worked fine. (since it was more than just a charm-helpers sync I wanted to check). Thanks, Chris!

review: Approve
Revision history for this message
Michael Hudson-Doyle (mwhudson) wrote :

Hi, I'm afraid this broke the amqp-relation-changed hook https://bugs.launchpad.net/charms/+source/rabbitmq-server/+bug/1375084

Revision history for this message
David Britton (dpb) wrote :

Thanks @Michael, I put up a follow-on MP:

https://code.launchpad.net/~davidpbritton/charms/trusty/rabbitmq-server/compare-version-1375084/+merge/236279

On Sun, Sep 28, 2014 at 7:54 PM, Michael Hudson-Doyle <
<email address hidden>> wrote:

> Hi, I'm afraid this broke the amqp-relation-changed hook
> https://bugs.launchpad.net/charms/+source/rabbitmq-server/+bug/1375084
> --
>
> https://code.launchpad.net/~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers/+merge/236072
> You are reviewing the proposed merge of
> lp:~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers into
> lp:charms/trusty/rabbitmq-server.
>

--
David Britton <email address hidden>

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/charmhelpers/contrib/charmsupport/volumes.py'
2--- hooks/charmhelpers/contrib/charmsupport/volumes.py 2014-03-05 12:57:20 +0000
3+++ hooks/charmhelpers/contrib/charmsupport/volumes.py 2014-09-26 08:15:24 +0000
4@@ -2,7 +2,8 @@
5 Functions for managing volumes in juju units. One volume is supported per unit.
6 Subordinates may have their own storage, provided it is on its own partition.
7
8-Configuration stanzas:
9+Configuration stanzas::
10+
11 volume-ephemeral:
12 type: boolean
13 default: true
14@@ -20,7 +21,8 @@
15 is 'true' and no volume-map value is set. Use 'juju set' to set a
16 value and 'juju resolved' to complete configuration.
17
18-Usage:
19+Usage::
20+
21 from charmsupport.volumes import configure_volume, VolumeConfigurationError
22 from charmsupport.hookenv import log, ERROR
23 def post_mount_hook():
24@@ -34,6 +36,7 @@
25 after_change=post_mount_hook)
26 except VolumeConfigurationError:
27 log('Storage could not be configured', ERROR)
28+
29 '''
30
31 # XXX: Known limitations
32
33=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
34--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-03-05 12:57:20 +0000
35+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-09-26 08:15:24 +0000
36@@ -6,6 +6,11 @@
37 # Adam Gandelman <adamg@ubuntu.com>
38 #
39
40+"""
41+Helpers for clustering and determining "cluster leadership" and other
42+clustering-related helpers.
43+"""
44+
45 import subprocess
46 import os
47
48@@ -19,6 +24,7 @@
49 config as config_get,
50 INFO,
51 ERROR,
52+ WARNING,
53 unit_get,
54 )
55
56@@ -27,6 +33,29 @@
57 pass
58
59
60+def is_elected_leader(resource):
61+ """
62+ Returns True if the charm executing this is the elected cluster leader.
63+
64+ It relies on two mechanisms to determine leadership:
65+ 1. If the charm is part of a corosync cluster, call corosync to
66+ determine leadership.
67+ 2. If the charm is not part of a corosync cluster, the leader is
68+ determined as being "the alive unit with the lowest unit numer". In
69+ other words, the oldest surviving unit.
70+ """
71+ if is_clustered():
72+ if not is_crm_leader(resource):
73+ log('Deferring action to CRM leader.', level=INFO)
74+ return False
75+ else:
76+ peers = peer_units()
77+ if peers and not oldest_peer(peers):
78+ log('Deferring action to oldest service unit.', level=INFO)
79+ return False
80+ return True
81+
82+
83 def is_clustered():
84 for r_id in (relation_ids('ha') or []):
85 for unit in (relation_list(r_id) or []):
86@@ -38,7 +67,11 @@
87 return False
88
89
90-def is_leader(resource):
91+def is_crm_leader(resource):
92+ """
93+ Returns True if the charm calling this is the elected corosync leader,
94+ as returned by calling the external "crm" command.
95+ """
96 cmd = [
97 "crm", "resource",
98 "show", resource
99@@ -54,15 +87,31 @@
100 return False
101
102
103-def peer_units():
104+def is_leader(resource):
105+ log("is_leader is deprecated. Please consider using is_crm_leader "
106+ "instead.", level=WARNING)
107+ return is_crm_leader(resource)
108+
109+
110+def peer_units(peer_relation="cluster"):
111 peers = []
112- for r_id in (relation_ids('cluster') or []):
113+ for r_id in (relation_ids(peer_relation) or []):
114 for unit in (relation_list(r_id) or []):
115 peers.append(unit)
116 return peers
117
118
119+def peer_ips(peer_relation='cluster', addr_key='private-address'):
120+ '''Return a dict of peers and their private-address'''
121+ peers = {}
122+ for r_id in relation_ids(peer_relation):
123+ for unit in relation_list(r_id):
124+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
125+ return peers
126+
127+
128 def oldest_peer(peers):
129+ """Determines who the oldest peer is by comparing unit numbers."""
130 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
131 for peer in peers:
132 remote_unit_no = int(peer.split('/')[1])
133@@ -72,16 +121,9 @@
134
135
136 def eligible_leader(resource):
137- if is_clustered():
138- if not is_leader(resource):
139- log('Deferring action to CRM leader.', level=INFO)
140- return False
141- else:
142- peers = peer_units()
143- if peers and not oldest_peer(peers):
144- log('Deferring action to oldest service unit.', level=INFO)
145- return False
146- return True
147+ log("eligible_leader is deprecated. Please consider using "
148+ "is_elected_leader instead.", level=WARNING)
149+ return is_elected_leader(resource)
150
151
152 def https():
153@@ -97,10 +139,9 @@
154 return True
155 for r_id in relation_ids('identity-service'):
156 for unit in relation_list(r_id):
157+ # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
158 rel_state = [
159 relation_get('https_keystone', rid=r_id, unit=unit),
160- relation_get('ssl_cert', rid=r_id, unit=unit),
161- relation_get('ssl_key', rid=r_id, unit=unit),
162 relation_get('ca_cert', rid=r_id, unit=unit),
163 ]
164 # NOTE: works around (LP: #1203241)
165@@ -146,12 +187,12 @@
166 Obtains all relevant configuration from charm configuration required
167 for initiating a relation to hacluster:
168
169- ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
170+ ha-bindiface, ha-mcastport, vip
171
172 returns: dict: A dict containing settings keyed by setting name.
173 raises: HAIncompleteConfig if settings are missing.
174 '''
175- settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
176+ settings = ['ha-bindiface', 'ha-mcastport', 'vip']
177 conf = {}
178 for setting in settings:
179 conf[setting] = config_get(setting)
180@@ -170,6 +211,7 @@
181
182 :configs : OSTemplateRenderer: A config tempating object to inspect for
183 a complete https context.
184+
185 :vip_setting: str: Setting in charm config that specifies
186 VIP address.
187 '''
188
189=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
190=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
191=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
192--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
193+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-26 08:15:24 +0000
194@@ -0,0 +1,94 @@
195+from bzrlib.branch import Branch
196+import os
197+import re
198+from charmhelpers.contrib.amulet.deployment import (
199+ AmuletDeployment
200+)
201+
202+
203+class OpenStackAmuletDeployment(AmuletDeployment):
204+ """OpenStack amulet deployment.
205+
206+ This class inherits from AmuletDeployment and has additional support
207+ that is specifically for use by OpenStack charms.
208+ """
209+
210+ def __init__(self, series=None, openstack=None, source=None):
211+ """Initialize the deployment environment."""
212+ super(OpenStackAmuletDeployment, self).__init__(series)
213+ self.openstack = openstack
214+ self.source = source
215+
216+ def _is_dev_branch(self):
217+ """Determine if branch being tested is a dev (i.e. next) branch."""
218+ branch = Branch.open(os.getcwd())
219+ parent = branch.get_parent()
220+ pattern = re.compile("^.*/next/$")
221+ if (pattern.match(parent)):
222+ return True
223+ else:
224+ return False
225+
226+ def _determine_branch_locations(self, other_services):
227+ """Determine the branch locations for the other services.
228+
229+ If the branch being tested is a dev branch, then determine the
230+ development branch locations for the other services. Otherwise,
231+ the default charm store branches will be used."""
232+ name = 0
233+ if self._is_dev_branch():
234+ updated_services = []
235+ for svc in other_services:
236+ if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']:
237+ location = 'lp:charms/{}'.format(svc[name])
238+ else:
239+ temp = 'lp:~openstack-charmers/charms/trusty/{}/next'
240+ location = temp.format(svc[name])
241+ updated_services.append(svc + (location,))
242+ other_services = updated_services
243+ return other_services
244+
245+ def _add_services(self, this_service, other_services):
246+ """Add services to the deployment and set openstack-origin/source."""
247+ name = 0
248+ other_services = self._determine_branch_locations(other_services)
249+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
250+ other_services)
251+ services = other_services
252+ services.append(this_service)
253+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
254+
255+ if self.openstack:
256+ for svc in services:
257+ if svc[name] not in use_source:
258+ config = {'openstack-origin': self.openstack}
259+ self.d.configure(svc[name], config)
260+
261+ if self.source:
262+ for svc in services:
263+ if svc[name] in use_source:
264+ config = {'source': self.source}
265+ self.d.configure(svc[name], config)
266+
267+ def _configure_services(self, configs):
268+ """Configure all of the services."""
269+ for service, config in configs.iteritems():
270+ self.d.configure(service, config)
271+
272+ def _get_openstack_release(self):
273+ """Get openstack release.
274+
275+ Return an integer representing the enum value of the openstack
276+ release.
277+ """
278+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
279+ self.precise_havana, self.precise_icehouse,
280+ self.trusty_icehouse) = range(6)
281+ releases = {
282+ ('precise', None): self.precise_essex,
283+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
284+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
285+ ('precise', 'cloud:precise-havana'): self.precise_havana,
286+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
287+ ('trusty', None): self.trusty_icehouse}
288+ return releases[(self.series, self.openstack)]
289
290=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
291--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
292+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-26 08:15:24 +0000
293@@ -0,0 +1,276 @@
294+import logging
295+import os
296+import time
297+import urllib
298+
299+import glanceclient.v1.client as glance_client
300+import keystoneclient.v2_0 as keystone_client
301+import novaclient.v1_1.client as nova_client
302+
303+from charmhelpers.contrib.amulet.utils import (
304+ AmuletUtils
305+)
306+
307+DEBUG = logging.DEBUG
308+ERROR = logging.ERROR
309+
310+
311+class OpenStackAmuletUtils(AmuletUtils):
312+ """OpenStack amulet utilities.
313+
314+ This class inherits from AmuletUtils and has additional support
315+ that is specifically for use by OpenStack charms.
316+ """
317+
318+ def __init__(self, log_level=ERROR):
319+ """Initialize the deployment environment."""
320+ super(OpenStackAmuletUtils, self).__init__(log_level)
321+
322+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
323+ public_port, expected):
324+ """Validate endpoint data.
325+
326+ Validate actual endpoint data vs expected endpoint data. The ports
327+ are used to find the matching endpoint.
328+ """
329+ found = False
330+ for ep in endpoints:
331+ self.log.debug('endpoint: {}'.format(repr(ep)))
332+ if (admin_port in ep.adminurl and
333+ internal_port in ep.internalurl and
334+ public_port in ep.publicurl):
335+ found = True
336+ actual = {'id': ep.id,
337+ 'region': ep.region,
338+ 'adminurl': ep.adminurl,
339+ 'internalurl': ep.internalurl,
340+ 'publicurl': ep.publicurl,
341+ 'service_id': ep.service_id}
342+ ret = self._validate_dict_data(expected, actual)
343+ if ret:
344+ return 'unexpected endpoint data - {}'.format(ret)
345+
346+ if not found:
347+ return 'endpoint not found'
348+
349+ def validate_svc_catalog_endpoint_data(self, expected, actual):
350+ """Validate service catalog endpoint data.
351+
352+ Validate a list of actual service catalog endpoints vs a list of
353+ expected service catalog endpoints.
354+ """
355+ self.log.debug('actual: {}'.format(repr(actual)))
356+ for k, v in expected.iteritems():
357+ if k in actual:
358+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
359+ if ret:
360+ return self.endpoint_error(k, ret)
361+ else:
362+ return "endpoint {} does not exist".format(k)
363+ return ret
364+
365+ def validate_tenant_data(self, expected, actual):
366+ """Validate tenant data.
367+
368+ Validate a list of actual tenant data vs list of expected tenant
369+ data.
370+ """
371+ self.log.debug('actual: {}'.format(repr(actual)))
372+ for e in expected:
373+ found = False
374+ for act in actual:
375+ a = {'enabled': act.enabled, 'description': act.description,
376+ 'name': act.name, 'id': act.id}
377+ if e['name'] == a['name']:
378+ found = True
379+ ret = self._validate_dict_data(e, a)
380+ if ret:
381+ return "unexpected tenant data - {}".format(ret)
382+ if not found:
383+ return "tenant {} does not exist".format(e['name'])
384+ return ret
385+
386+ def validate_role_data(self, expected, actual):
387+ """Validate role data.
388+
389+ Validate a list of actual role data vs a list of expected role
390+ data.
391+ """
392+ self.log.debug('actual: {}'.format(repr(actual)))
393+ for e in expected:
394+ found = False
395+ for act in actual:
396+ a = {'name': act.name, 'id': act.id}
397+ if e['name'] == a['name']:
398+ found = True
399+ ret = self._validate_dict_data(e, a)
400+ if ret:
401+ return "unexpected role data - {}".format(ret)
402+ if not found:
403+ return "role {} does not exist".format(e['name'])
404+ return ret
405+
406+ def validate_user_data(self, expected, actual):
407+ """Validate user data.
408+
409+ Validate a list of actual user data vs a list of expected user
410+ data.
411+ """
412+ self.log.debug('actual: {}'.format(repr(actual)))
413+ for e in expected:
414+ found = False
415+ for act in actual:
416+ a = {'enabled': act.enabled, 'name': act.name,
417+ 'email': act.email, 'tenantId': act.tenantId,
418+ 'id': act.id}
419+ if e['name'] == a['name']:
420+ found = True
421+ ret = self._validate_dict_data(e, a)
422+ if ret:
423+ return "unexpected user data - {}".format(ret)
424+ if not found:
425+ return "user {} does not exist".format(e['name'])
426+ return ret
427+
428+ def validate_flavor_data(self, expected, actual):
429+ """Validate flavor data.
430+
431+ Validate a list of actual flavors vs a list of expected flavors.
432+ """
433+ self.log.debug('actual: {}'.format(repr(actual)))
434+ act = [a.name for a in actual]
435+ return self._validate_list_data(expected, act)
436+
437+ def tenant_exists(self, keystone, tenant):
438+ """Return True if tenant exists."""
439+ return tenant in [t.name for t in keystone.tenants.list()]
440+
441+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
442+ tenant):
443+ """Authenticates admin user with the keystone admin endpoint."""
444+ unit = keystone_sentry
445+ service_ip = unit.relation('shared-db',
446+ 'mysql:shared-db')['private-address']
447+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
448+ return keystone_client.Client(username=user, password=password,
449+ tenant_name=tenant, auth_url=ep)
450+
451+ def authenticate_keystone_user(self, keystone, user, password, tenant):
452+ """Authenticates a regular user with the keystone public endpoint."""
453+ ep = keystone.service_catalog.url_for(service_type='identity',
454+ endpoint_type='publicURL')
455+ return keystone_client.Client(username=user, password=password,
456+ tenant_name=tenant, auth_url=ep)
457+
458+ def authenticate_glance_admin(self, keystone):
459+ """Authenticates admin user with glance."""
460+ ep = keystone.service_catalog.url_for(service_type='image',
461+ endpoint_type='adminURL')
462+ return glance_client.Client(ep, token=keystone.auth_token)
463+
464+ def authenticate_nova_user(self, keystone, user, password, tenant):
465+ """Authenticates a regular user with nova-api."""
466+ ep = keystone.service_catalog.url_for(service_type='identity',
467+ endpoint_type='publicURL')
468+ return nova_client.Client(username=user, api_key=password,
469+ project_id=tenant, auth_url=ep)
470+
471+ def create_cirros_image(self, glance, image_name):
472+ """Download the latest cirros image and upload it to glance."""
473+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
474+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
475+ if http_proxy:
476+ proxies = {'http': http_proxy}
477+ opener = urllib.FancyURLopener(proxies)
478+ else:
479+ opener = urllib.FancyURLopener()
480+
481+ f = opener.open("http://download.cirros-cloud.net/version/released")
482+ version = f.read().strip()
483+ cirros_img = "cirros-{}-x86_64-disk.img".format(version)
484+ local_path = os.path.join('tests', cirros_img)
485+
486+ if not os.path.exists(local_path):
487+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
488+ version, cirros_img)
489+ opener.retrieve(cirros_url, local_path)
490+ f.close()
491+
492+ with open(local_path) as f:
493+ image = glance.images.create(name=image_name, is_public=True,
494+ disk_format='qcow2',
495+ container_format='bare', data=f)
496+ count = 1
497+ status = image.status
498+ while status != 'active' and count < 10:
499+ time.sleep(3)
500+ image = glance.images.get(image.id)
501+ status = image.status
502+ self.log.debug('image status: {}'.format(status))
503+ count += 1
504+
505+ if status != 'active':
506+ self.log.error('image creation timed out')
507+ return None
508+
509+ return image
510+
511+ def delete_image(self, glance, image):
512+ """Delete the specified image."""
513+ num_before = len(list(glance.images.list()))
514+ glance.images.delete(image)
515+
516+ count = 1
517+ num_after = len(list(glance.images.list()))
518+ while num_after != (num_before - 1) and count < 10:
519+ time.sleep(3)
520+ num_after = len(list(glance.images.list()))
521+ self.log.debug('number of images: {}'.format(num_after))
522+ count += 1
523+
524+ if num_after != (num_before - 1):
525+ self.log.error('image deletion timed out')
526+ return False
527+
528+ return True
529+
530+ def create_instance(self, nova, image_name, instance_name, flavor):
531+ """Create the specified instance."""
532+ image = nova.images.find(name=image_name)
533+ flavor = nova.flavors.find(name=flavor)
534+ instance = nova.servers.create(name=instance_name, image=image,
535+ flavor=flavor)
536+
537+ count = 1
538+ status = instance.status
539+ while status != 'ACTIVE' and count < 60:
540+ time.sleep(3)
541+ instance = nova.servers.get(instance.id)
542+ status = instance.status
543+ self.log.debug('instance status: {}'.format(status))
544+ count += 1
545+
546+ if status != 'ACTIVE':
547+ self.log.error('instance creation timed out')
548+ return None
549+
550+ return instance
551+
552+ def delete_instance(self, nova, instance):
553+ """Delete the specified instance."""
554+ num_before = len(list(nova.servers.list()))
555+ nova.servers.delete(instance)
556+
557+ count = 1
558+ num_after = len(list(nova.servers.list()))
559+ while num_after != (num_before - 1) and count < 10:
560+ time.sleep(3)
561+ num_after = len(list(nova.servers.list()))
562+ self.log.debug('number of instances: {}'.format(num_after))
563+ count += 1
564+
565+ if num_after != (num_before - 1):
566+ self.log.error('instance deletion timed out')
567+ return False
568+
569+ return True
570
571=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
572--- hooks/charmhelpers/contrib/openstack/context.py 2014-04-10 16:56:26 +0000
573+++ hooks/charmhelpers/contrib/openstack/context.py 2014-09-26 08:15:24 +0000
574@@ -8,7 +8,6 @@
575 check_call
576 )
577
578-
579 from charmhelpers.fetch import (
580 apt_install,
581 filter_installed_packages,
582@@ -21,9 +20,16 @@
583 relation_get,
584 relation_ids,
585 related_units,
586+ relation_set,
587 unit_get,
588 unit_private_ip,
589 ERROR,
590+ INFO
591+)
592+
593+from charmhelpers.core.host import (
594+ mkdir,
595+ write_file
596 )
597
598 from charmhelpers.contrib.hahelpers.cluster import (
599@@ -36,12 +42,19 @@
600 from charmhelpers.contrib.hahelpers.apache import (
601 get_cert,
602 get_ca_cert,
603+ install_ca_cert,
604 )
605
606 from charmhelpers.contrib.openstack.neutron import (
607 neutron_plugin_attribute,
608 )
609
610+from charmhelpers.contrib.network.ip import (
611+ get_address_in_network,
612+ get_ipv6_addr,
613+ is_address_in_network
614+)
615+
616 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
617
618
619@@ -134,8 +147,26 @@
620 'Missing required charm config options. '
621 '(database name and user)')
622 raise OSContextError
623+
624 ctxt = {}
625
626+ # NOTE(jamespage) if mysql charm provides a network upon which
627+ # access to the database should be made, reconfigure relation
628+ # with the service units local address and defer execution
629+ access_network = relation_get('access-network')
630+ if access_network is not None:
631+ if self.relation_prefix is not None:
632+ hostname_key = "{}_hostname".format(self.relation_prefix)
633+ else:
634+ hostname_key = "hostname"
635+ access_hostname = get_address_in_network(access_network,
636+ unit_get('private-address'))
637+ set_hostname = relation_get(attribute=hostname_key,
638+ unit=local_unit())
639+ if set_hostname != access_hostname:
640+ relation_set(relation_settings={hostname_key: access_hostname})
641+ return ctxt # Defer any further hook execution for now....
642+
643 password_setting = 'password'
644 if self.relation_prefix:
645 password_setting = self.relation_prefix + '_password'
646@@ -243,23 +274,31 @@
647
648
649 class AMQPContext(OSContextGenerator):
650- interfaces = ['amqp']
651
652- def __init__(self, ssl_dir=None):
653+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
654 self.ssl_dir = ssl_dir
655+ self.rel_name = rel_name
656+ self.relation_prefix = relation_prefix
657+ self.interfaces = [rel_name]
658
659 def __call__(self):
660 log('Generating template context for amqp')
661 conf = config()
662+ user_setting = 'rabbit-user'
663+ vhost_setting = 'rabbit-vhost'
664+ if self.relation_prefix:
665+ user_setting = self.relation_prefix + '-rabbit-user'
666+ vhost_setting = self.relation_prefix + '-rabbit-vhost'
667+
668 try:
669- username = conf['rabbit-user']
670- vhost = conf['rabbit-vhost']
671+ username = conf[user_setting]
672+ vhost = conf[vhost_setting]
673 except KeyError as e:
674 log('Could not generate shared_db context. '
675 'Missing required charm config options: %s.' % e)
676 raise OSContextError
677 ctxt = {}
678- for rid in relation_ids('amqp'):
679+ for rid in relation_ids(self.rel_name):
680 ha_vip_only = False
681 for unit in related_units(rid):
682 if relation_get('clustered', rid=rid, unit=unit):
683@@ -332,10 +371,12 @@
684 use_syslog = str(config('use-syslog')).lower()
685 for rid in relation_ids('ceph'):
686 for unit in related_units(rid):
687- mon_hosts.append(relation_get('private-address', rid=rid,
688- unit=unit))
689 auth = relation_get('auth', rid=rid, unit=unit)
690 key = relation_get('key', rid=rid, unit=unit)
691+ ceph_addr = \
692+ relation_get('ceph-public-address', rid=rid, unit=unit) or \
693+ relation_get('private-address', rid=rid, unit=unit)
694+ mon_hosts.append(ceph_addr)
695
696 ctxt = {
697 'mon_hosts': ' '.join(mon_hosts),
698@@ -369,7 +410,12 @@
699
700 cluster_hosts = {}
701 l_unit = local_unit().replace('/', '-')
702- cluster_hosts[l_unit] = unit_get('private-address')
703+ if config('prefer-ipv6'):
704+ addr = get_ipv6_addr()
705+ else:
706+ addr = unit_get('private-address')
707+ cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
708+ addr)
709
710 for rid in relation_ids('cluster'):
711 for unit in related_units(rid):
712@@ -380,6 +426,21 @@
713 ctxt = {
714 'units': cluster_hosts,
715 }
716+
717+ if config('haproxy-server-timeout'):
718+ ctxt['haproxy-server-timeout'] = config('haproxy-server-timeout')
719+ if config('haproxy-client-timeout'):
720+ ctxt['haproxy-client-timeout'] = config('haproxy-client-timeout')
721+
722+ if config('prefer-ipv6'):
723+ ctxt['local_host'] = 'ip6-localhost'
724+ ctxt['haproxy_host'] = '::'
725+ ctxt['stat_port'] = ':::8888'
726+ else:
727+ ctxt['local_host'] = '127.0.0.1'
728+ ctxt['haproxy_host'] = '0.0.0.0'
729+ ctxt['stat_port'] = ':8888'
730+
731 if len(cluster_hosts.keys()) > 1:
732 # Enable haproxy when we have enough peers.
733 log('Ensuring haproxy enabled in /etc/default/haproxy.')
734@@ -418,12 +479,13 @@
735 """
736 Generates a context for an apache vhost configuration that configures
737 HTTPS reverse proxying for one or many endpoints. Generated context
738- looks something like:
739- {
740- 'namespace': 'cinder',
741- 'private_address': 'iscsi.mycinderhost.com',
742- 'endpoints': [(8776, 8766), (8777, 8767)]
743- }
744+ looks something like::
745+
746+ {
747+ 'namespace': 'cinder',
748+ 'private_address': 'iscsi.mycinderhost.com',
749+ 'endpoints': [(8776, 8766), (8777, 8767)]
750+ }
751
752 The endpoints list consists of a tuples mapping external ports
753 to internal ports.
754@@ -439,22 +501,36 @@
755 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
756 check_call(cmd)
757
758- def configure_cert(self):
759- if not os.path.isdir('/etc/apache2/ssl'):
760- os.mkdir('/etc/apache2/ssl')
761+ def configure_cert(self, cn=None):
762 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
763- if not os.path.isdir(ssl_dir):
764- os.mkdir(ssl_dir)
765- cert, key = get_cert()
766- with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
767- cert_out.write(b64decode(cert))
768- with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
769- key_out.write(b64decode(key))
770+ mkdir(path=ssl_dir)
771+ cert, key = get_cert(cn)
772+ if cn:
773+ cert_filename = 'cert_{}'.format(cn)
774+ key_filename = 'key_{}'.format(cn)
775+ else:
776+ cert_filename = 'cert'
777+ key_filename = 'key'
778+ write_file(path=os.path.join(ssl_dir, cert_filename),
779+ content=b64decode(cert))
780+ write_file(path=os.path.join(ssl_dir, key_filename),
781+ content=b64decode(key))
782+
783+ def configure_ca(self):
784 ca_cert = get_ca_cert()
785 if ca_cert:
786- with open(CA_CERT_PATH, 'w') as ca_out:
787- ca_out.write(b64decode(ca_cert))
788- check_call(['update-ca-certificates'])
789+ install_ca_cert(b64decode(ca_cert))
790+
791+ def canonical_names(self):
792+ '''Figure out which canonical names clients will access this service'''
793+ cns = []
794+ for r_id in relation_ids('identity-service'):
795+ for unit in related_units(r_id):
796+ rdata = relation_get(rid=r_id, unit=unit)
797+ for k in rdata:
798+ if k.startswith('ssl_key_'):
799+ cns.append(k.lstrip('ssl_key_'))
800+ return list(set(cns))
801
802 def __call__(self):
803 if isinstance(self.external_ports, basestring):
804@@ -462,21 +538,47 @@
805 if (not self.external_ports or not https()):
806 return {}
807
808- self.configure_cert()
809+ self.configure_ca()
810 self.enable_modules()
811
812 ctxt = {
813 'namespace': self.service_namespace,
814- 'private_address': unit_get('private-address'),
815- 'endpoints': []
816+ 'endpoints': [],
817+ 'ext_ports': []
818 }
819- if is_clustered():
820- ctxt['private_address'] = config('vip')
821- for api_port in self.external_ports:
822- ext_port = determine_apache_port(api_port)
823- int_port = determine_api_port(api_port)
824- portmap = (int(ext_port), int(int_port))
825- ctxt['endpoints'].append(portmap)
826+
827+ for cn in self.canonical_names():
828+ self.configure_cert(cn)
829+
830+ addresses = []
831+ vips = []
832+ if config('vip'):
833+ vips = config('vip').split()
834+
835+ for network_type in ['os-internal-network',
836+ 'os-admin-network',
837+ 'os-public-network']:
838+ address = get_address_in_network(config(network_type),
839+ unit_get('private-address'))
840+ if len(vips) > 0 and is_clustered():
841+ for vip in vips:
842+ if is_address_in_network(config(network_type),
843+ vip):
844+ addresses.append((address, vip))
845+ break
846+ elif is_clustered():
847+ addresses.append((address, config('vip')))
848+ else:
849+ addresses.append((address, address))
850+
851+ for address, endpoint in set(addresses):
852+ for api_port in self.external_ports:
853+ ext_port = determine_apache_port(api_port)
854+ int_port = determine_api_port(api_port)
855+ portmap = (address, endpoint, int(ext_port), int(int_port))
856+ ctxt['endpoints'].append(portmap)
857+ ctxt['ext_ports'].append(int(ext_port))
858+ ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
859 return ctxt
860
861
862@@ -541,6 +643,26 @@
863
864 return nvp_ctxt
865
866+ def n1kv_ctxt(self):
867+ driver = neutron_plugin_attribute(self.plugin, 'driver',
868+ self.network_manager)
869+ n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
870+ self.network_manager)
871+ n1kv_ctxt = {
872+ 'core_plugin': driver,
873+ 'neutron_plugin': 'n1kv',
874+ 'neutron_security_groups': self.neutron_security_groups,
875+ 'local_ip': unit_private_ip(),
876+ 'config': n1kv_config,
877+ 'vsm_ip': config('n1kv-vsm-ip'),
878+ 'vsm_username': config('n1kv-vsm-username'),
879+ 'vsm_password': config('n1kv-vsm-password'),
880+ 'restrict_policy_profiles': config(
881+ 'n1kv_restrict_policy_profiles'),
882+ }
883+
884+ return n1kv_ctxt
885+
886 def neutron_ctxt(self):
887 if https():
888 proto = 'https'
889@@ -570,8 +692,10 @@
890
891 if self.plugin == 'ovs':
892 ctxt.update(self.ovs_ctxt())
893- elif self.plugin == 'nvp':
894+ elif self.plugin in ['nvp', 'nsx']:
895 ctxt.update(self.nvp_ctxt())
896+ elif self.plugin == 'n1kv':
897+ ctxt.update(self.n1kv_ctxt())
898
899 alchemy_flags = config('neutron-alchemy-flags')
900 if alchemy_flags:
901@@ -611,7 +735,7 @@
902 The subordinate interface allows subordinates to export their
903 configuration requirements to the principle for multiple config
904 files and multiple serivces. Ie, a subordinate that has interfaces
905- to both glance and nova may export to following yaml blob as json:
906+ to both glance and nova may export to following yaml blob as json::
907
908 glance:
909 /etc/glance/glance-api.conf:
910@@ -630,7 +754,8 @@
911
912 It is then up to the principle charms to subscribe this context to
913 the service+config file it is interestd in. Configuration data will
914- be available in the template context, in glance's case, as:
915+ be available in the template context, in glance's case, as::
916+
917 ctxt = {
918 ... other context ...
919 'subordinate_config': {
920@@ -657,7 +782,7 @@
921 self.interface = interface
922
923 def __call__(self):
924- ctxt = {}
925+ ctxt = {'sections': {}}
926 for rid in relation_ids(self.interface):
927 for unit in related_units(rid):
928 sub_config = relation_get('subordinate_configuration',
929@@ -683,11 +808,26 @@
930
931 sub_config = sub_config[self.config_file]
932 for k, v in sub_config.iteritems():
933- ctxt[k] = v
934-
935- if not ctxt:
936- ctxt['sections'] = {}
937-
938+ if k == 'sections':
939+ for section, config_dict in v.iteritems():
940+ log("adding section '%s'" % (section))
941+ ctxt[k][section] = config_dict
942+ else:
943+ ctxt[k] = v
944+
945+ log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
946+
947+ return ctxt
948+
949+
950+class LogLevelContext(OSContextGenerator):
951+
952+ def __call__(self):
953+ ctxt = {}
954+ ctxt['debug'] = \
955+ False if config('debug') is None else config('debug')
956+ ctxt['verbose'] = \
957+ False if config('verbose') is None else config('verbose')
958 return ctxt
959
960
961
962=== added file 'hooks/charmhelpers/contrib/openstack/ip.py'
963--- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
964+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-09-26 08:15:24 +0000
965@@ -0,0 +1,79 @@
966+from charmhelpers.core.hookenv import (
967+ config,
968+ unit_get,
969+)
970+
971+from charmhelpers.contrib.network.ip import (
972+ get_address_in_network,
973+ is_address_in_network,
974+ is_ipv6,
975+ get_ipv6_addr,
976+)
977+
978+from charmhelpers.contrib.hahelpers.cluster import is_clustered
979+
980+PUBLIC = 'public'
981+INTERNAL = 'int'
982+ADMIN = 'admin'
983+
984+_address_map = {
985+ PUBLIC: {
986+ 'config': 'os-public-network',
987+ 'fallback': 'public-address'
988+ },
989+ INTERNAL: {
990+ 'config': 'os-internal-network',
991+ 'fallback': 'private-address'
992+ },
993+ ADMIN: {
994+ 'config': 'os-admin-network',
995+ 'fallback': 'private-address'
996+ }
997+}
998+
999+
1000+def canonical_url(configs, endpoint_type=PUBLIC):
1001+ '''
1002+ Returns the correct HTTP URL to this host given the state of HTTPS
1003+ configuration, hacluster and charm configuration.
1004+
1005+ :configs OSTemplateRenderer: A config tempating object to inspect for
1006+ a complete https context.
1007+ :endpoint_type str: The endpoint type to resolve.
1008+
1009+ :returns str: Base URL for services on the current service unit.
1010+ '''
1011+ scheme = 'http'
1012+ if 'https' in configs.complete_contexts():
1013+ scheme = 'https'
1014+ address = resolve_address(endpoint_type)
1015+ if is_ipv6(address):
1016+ address = "[{}]".format(address)
1017+ return '%s://%s' % (scheme, address)
1018+
1019+
1020+def resolve_address(endpoint_type=PUBLIC):
1021+ resolved_address = None
1022+ if is_clustered():
1023+ if config(_address_map[endpoint_type]['config']) is None:
1024+ # Assume vip is simple and pass back directly
1025+ resolved_address = config('vip')
1026+ else:
1027+ for vip in config('vip').split():
1028+ if is_address_in_network(
1029+ config(_address_map[endpoint_type]['config']),
1030+ vip):
1031+ resolved_address = vip
1032+ else:
1033+ if config('prefer-ipv6'):
1034+ fallback_addr = get_ipv6_addr()
1035+ else:
1036+ fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
1037+ resolved_address = get_address_in_network(
1038+ config(_address_map[endpoint_type]['config']), fallback_addr)
1039+
1040+ if resolved_address is None:
1041+ raise ValueError('Unable to resolve a suitable IP address'
1042+ ' based on charm state and configuration')
1043+ else:
1044+ return resolved_address
1045
1046=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
1047--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-03-27 12:33:12 +0000
1048+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-09-26 08:15:24 +0000
1049@@ -114,14 +114,44 @@
1050 'server_packages': ['neutron-server',
1051 'neutron-plugin-nicira'],
1052 'server_services': ['neutron-server']
1053+ },
1054+ 'nsx': {
1055+ 'config': '/etc/neutron/plugins/vmware/nsx.ini',
1056+ 'driver': 'vmware',
1057+ 'contexts': [
1058+ context.SharedDBContext(user=config('neutron-database-user'),
1059+ database=config('neutron-database'),
1060+ relation_prefix='neutron',
1061+ ssl_dir=NEUTRON_CONF_DIR)],
1062+ 'services': [],
1063+ 'packages': [],
1064+ 'server_packages': ['neutron-server',
1065+ 'neutron-plugin-vmware'],
1066+ 'server_services': ['neutron-server']
1067+ },
1068+ 'n1kv': {
1069+ 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
1070+ 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
1071+ 'contexts': [
1072+ context.SharedDBContext(user=config('neutron-database-user'),
1073+ database=config('neutron-database'),
1074+ relation_prefix='neutron',
1075+ ssl_dir=NEUTRON_CONF_DIR)],
1076+ 'services': [],
1077+ 'packages': [['neutron-plugin-cisco']],
1078+ 'server_packages': ['neutron-server',
1079+ 'neutron-plugin-cisco'],
1080+ 'server_services': ['neutron-server']
1081 }
1082 }
1083- # NOTE: patch in ml2 plugin for icehouse onwards
1084 if release >= 'icehouse':
1085+ # NOTE: patch in ml2 plugin for icehouse onwards
1086 plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
1087 plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
1088 plugins['ovs']['server_packages'] = ['neutron-server',
1089 'neutron-plugin-ml2']
1090+ # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
1091+ plugins['nvp'] = plugins['nsx']
1092 return plugins
1093
1094
1095
1096=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
1097--- hooks/charmhelpers/contrib/openstack/templating.py 2014-03-05 12:57:20 +0000
1098+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-09-26 08:15:24 +0000
1099@@ -30,17 +30,17 @@
1100 loading dir.
1101
1102 A charm may also ship a templates dir with this module
1103- and it will be appended to the bottom of the search list, eg:
1104- hooks/charmhelpers/contrib/openstack/templates.
1105-
1106- :param templates_dir: str: Base template directory containing release
1107- sub-directories.
1108- :param os_release : str: OpenStack release codename to construct template
1109- loader.
1110-
1111- :returns : jinja2.ChoiceLoader constructed with a list of
1112- jinja2.FilesystemLoaders, ordered in descending
1113- order by OpenStack release.
1114+ and it will be appended to the bottom of the search list, eg::
1115+
1116+ hooks/charmhelpers/contrib/openstack/templates
1117+
1118+ :param templates_dir (str): Base template directory containing release
1119+ sub-directories.
1120+ :param os_release (str): OpenStack release codename to construct template
1121+ loader.
1122+ :returns: jinja2.ChoiceLoader constructed with a list of
1123+ jinja2.FilesystemLoaders, ordered in descending
1124+ order by OpenStack release.
1125 """
1126 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
1127 for rel in OPENSTACK_CODENAMES.itervalues()]
1128@@ -111,7 +111,8 @@
1129 and ease the burden of managing config templates across multiple OpenStack
1130 releases.
1131
1132- Basic usage:
1133+ Basic usage::
1134+
1135 # import some common context generates from charmhelpers
1136 from charmhelpers.contrib.openstack import context
1137
1138@@ -131,21 +132,19 @@
1139 # write out all registered configs
1140 configs.write_all()
1141
1142- Details:
1143+ **OpenStack Releases and template loading**
1144
1145- OpenStack Releases and template loading
1146- ---------------------------------------
1147 When the object is instantiated, it is associated with a specific OS
1148 release. This dictates how the template loader will be constructed.
1149
1150 The constructed loader attempts to load the template from several places
1151 in the following order:
1152- - from the most recent OS release-specific template dir (if one exists)
1153- - the base templates_dir
1154- - a template directory shipped in the charm with this helper file.
1155-
1156-
1157- For the example above, '/tmp/templates' contains the following structure:
1158+ - from the most recent OS release-specific template dir (if one exists)
1159+ - the base templates_dir
1160+ - a template directory shipped in the charm with this helper file.
1161+
1162+ For the example above, '/tmp/templates' contains the following structure::
1163+
1164 /tmp/templates/nova.conf
1165 /tmp/templates/api-paste.ini
1166 /tmp/templates/grizzly/api-paste.ini
1167@@ -169,8 +168,8 @@
1168 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
1169 us to ship common templates (haproxy, apache) with the helpers.
1170
1171- Context generators
1172- ---------------------------------------
1173+ **Context generators**
1174+
1175 Context generators are used to generate template contexts during hook
1176 execution. Doing so may require inspecting service relations, charm
1177 config, etc. When registered, a config file is associated with a list
1178
1179=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
1180--- hooks/charmhelpers/contrib/openstack/utils.py 2014-04-10 16:56:26 +0000
1181+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-09-26 08:15:24 +0000
1182@@ -3,7 +3,6 @@
1183 # Common python helper functions used for OpenStack charms.
1184 from collections import OrderedDict
1185
1186-import apt_pkg as apt
1187 import subprocess
1188 import os
1189 import socket
1190@@ -24,7 +23,7 @@
1191 )
1192
1193 from charmhelpers.core.host import lsb_release, mounts, umount
1194-from charmhelpers.fetch import apt_install
1195+from charmhelpers.fetch import apt_install, apt_cache
1196 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
1197 from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
1198
1199@@ -41,7 +40,8 @@
1200 ('quantal', 'folsom'),
1201 ('raring', 'grizzly'),
1202 ('saucy', 'havana'),
1203- ('trusty', 'icehouse')
1204+ ('trusty', 'icehouse'),
1205+ ('utopic', 'juno'),
1206 ])
1207
1208
1209@@ -52,6 +52,7 @@
1210 ('2013.1', 'grizzly'),
1211 ('2013.2', 'havana'),
1212 ('2014.1', 'icehouse'),
1213+ ('2014.2', 'juno'),
1214 ])
1215
1216 # The ugly duckling
1217@@ -69,6 +70,7 @@
1218 ('1.13.0', 'icehouse'),
1219 ('1.12.0', 'icehouse'),
1220 ('1.11.0', 'icehouse'),
1221+ ('2.0.0', 'juno'),
1222 ])
1223
1224 DEFAULT_LOOPBACK_SIZE = '5G'
1225@@ -83,6 +85,8 @@
1226 '''Derive OpenStack release codename from a given installation source.'''
1227 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
1228 rel = ''
1229+ if src is None:
1230+ return rel
1231 if src in ['distro', 'distro-proposed']:
1232 try:
1233 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
1234@@ -130,8 +134,9 @@
1235
1236 def get_os_codename_package(package, fatal=True):
1237 '''Derive OpenStack release codename from an installed package.'''
1238- apt.init()
1239- cache = apt.Cache()
1240+ import apt_pkg as apt
1241+
1242+ cache = apt_cache()
1243
1244 try:
1245 pkg = cache[package]
1246@@ -182,8 +187,8 @@
1247 for version, cname in vers_map.iteritems():
1248 if cname == codename:
1249 return version
1250- #e = "Could not determine OpenStack version for package: %s" % pkg
1251- #error_out(e)
1252+ # e = "Could not determine OpenStack version for package: %s" % pkg
1253+ # error_out(e)
1254
1255
1256 os_rel = None
1257@@ -268,6 +273,9 @@
1258 'icehouse': 'precise-updates/icehouse',
1259 'icehouse/updates': 'precise-updates/icehouse',
1260 'icehouse/proposed': 'precise-proposed/icehouse',
1261+ 'juno': 'trusty-updates/juno',
1262+ 'juno/updates': 'trusty-updates/juno',
1263+ 'juno/proposed': 'trusty-proposed/juno',
1264 }
1265
1266 try:
1267@@ -315,6 +323,7 @@
1268
1269 """
1270
1271+ import apt_pkg as apt
1272 src = config('openstack-origin')
1273 cur_vers = get_os_version_package(package)
1274 available_vers = get_os_version_install_source(src)
1275@@ -401,6 +410,8 @@
1276 rtype = 'PTR'
1277 elif isinstance(address, basestring):
1278 rtype = 'A'
1279+ else:
1280+ return None
1281
1282 answers = dns.resolver.query(address, rtype)
1283 if answers:
1284
1285=== modified file 'hooks/charmhelpers/contrib/peerstorage/__init__.py'
1286--- hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-03-10 11:38:19 +0000
1287+++ hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-09-26 08:15:24 +0000
1288@@ -1,44 +1,44 @@
1289+from charmhelpers.core.hookenv import relation_id as current_relation_id
1290 from charmhelpers.core.hookenv import (
1291+ is_relation_made,
1292 relation_ids,
1293 relation_get,
1294 local_unit,
1295 relation_set,
1296 )
1297
1298+
1299 """
1300 This helper provides functions to support use of a peer relation
1301 for basic key/value storage, with the added benefit that all storage
1302-can be replicated across peer units, so this is really useful for
1303-services that issue usernames/passwords to remote services.
1304-
1305-def shared_db_changed()
1306- # Only the lead unit should create passwords
1307- if not is_leader():
1308- return
1309- username = relation_get('username')
1310- key = '{}.password'.format(username)
1311- # Attempt to retrieve any existing password for this user
1312- password = peer_retrieve(key)
1313- if password is None:
1314- # New user, create password and store
1315- password = pwgen(length=64)
1316- peer_store(key, password)
1317- create_access(username, password)
1318- relation_set(password=password)
1319-
1320-
1321-def cluster_changed()
1322- # Echo any relation data other that *-address
1323- # back onto the peer relation so all units have
1324- # all *.password keys stored on their local relation
1325- # for later retrieval.
1326+can be replicated across peer units.
1327+
1328+Requirement to use:
1329+
1330+To use this, the "peer_echo()" method has to be called form the peer
1331+relation's relation-changed hook:
1332+
1333+@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
1334+def cluster_relation_changed():
1335 peer_echo()
1336
1337+Once this is done, you can use peer storage from anywhere:
1338+
1339+@hooks.hook("some-hook")
1340+def some_hook():
1341+ # You can store and retrieve key/values this way:
1342+ if is_relation_made("cluster"): # from charmhelpers.core.hookenv
1343+ # There are peers available so we can work with peer storage
1344+ peer_store("mykey", "myvalue")
1345+ value = peer_retrieve("mykey")
1346+ print value
1347+ else:
1348+ print "No peers joind the relation, cannot share key/values :("
1349 """
1350
1351
1352 def peer_retrieve(key, relation_name='cluster'):
1353- """ Retrieve a named key from peer relation relation_name """
1354+ """Retrieve a named key from peer relation `relation_name`."""
1355 cluster_rels = relation_ids(relation_name)
1356 if len(cluster_rels) > 0:
1357 cluster_rid = cluster_rels[0]
1358@@ -49,8 +49,26 @@
1359 'peer relation {}'.format(relation_name))
1360
1361
1362+def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
1363+ inc_list=None, exc_list=None):
1364+ """ Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
1365+ inc_list = inc_list if inc_list else []
1366+ exc_list = exc_list if exc_list else []
1367+ peerdb_settings = peer_retrieve('-', relation_name=relation_name)
1368+ matched = {}
1369+ for k, v in peerdb_settings.items():
1370+ full_prefix = prefix + delimiter
1371+ if k.startswith(full_prefix):
1372+ new_key = k.replace(full_prefix, '')
1373+ if new_key in exc_list:
1374+ continue
1375+ if new_key in inc_list or len(inc_list) == 0:
1376+ matched[new_key] = v
1377+ return matched
1378+
1379+
1380 def peer_store(key, value, relation_name='cluster'):
1381- """ Store the key/value pair on the named peer relation relation_name """
1382+ """Store the key/value pair on the named peer relation `relation_name`."""
1383 cluster_rels = relation_ids(relation_name)
1384 if len(cluster_rels) > 0:
1385 cluster_rid = cluster_rels[0]
1386@@ -62,10 +80,10 @@
1387
1388
1389 def peer_echo(includes=None):
1390- """Echo filtered attributes back onto the same relation for storage
1391+ """Echo filtered attributes back onto the same relation for storage.
1392
1393- Note that this helper must only be called within a peer relation
1394- changed hook
1395+ This is a requirement to use the peerstorage module - it needs to be called
1396+ from the peer relation's changed hook.
1397 """
1398 rdata = relation_get()
1399 echo_data = {}
1400@@ -81,3 +99,33 @@
1401 echo_data[attribute] = value
1402 if len(echo_data) > 0:
1403 relation_set(relation_settings=echo_data)
1404+
1405+
1406+def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
1407+ peer_store_fatal=False, relation_settings=None,
1408+ delimiter='_', **kwargs):
1409+ """Store passed-in arguments both in argument relation and in peer storage.
1410+
1411+ It functions like doing relation_set() and peer_store() at the same time,
1412+ with the same data.
1413+
1414+ @param relation_id: the id of the relation to store the data on. Defaults
1415+ to the current relation.
1416+ @param peer_store_fatal: Set to True, the function will raise an exception
1417+ should the peer sotrage not be avialable."""
1418+
1419+ relation_settings = relation_settings if relation_settings else {}
1420+ relation_set(relation_id=relation_id,
1421+ relation_settings=relation_settings,
1422+ **kwargs)
1423+ if is_relation_made(peer_relation_name):
1424+ for key, value in dict(kwargs.items() +
1425+ relation_settings.items()).iteritems():
1426+ key_prefix = relation_id or current_relation_id()
1427+ peer_store(key_prefix + delimiter + key,
1428+ value,
1429+ relation_name=peer_relation_name)
1430+ else:
1431+ if peer_store_fatal:
1432+ raise ValueError('Unable to detect '
1433+ 'peer relation {}'.format(peer_relation_name))
1434
1435=== modified file 'hooks/charmhelpers/contrib/ssl/service.py'
1436--- hooks/charmhelpers/contrib/ssl/service.py 2014-03-05 12:57:20 +0000
1437+++ hooks/charmhelpers/contrib/ssl/service.py 2014-09-26 08:15:24 +0000
1438@@ -127,7 +127,7 @@
1439 return self.get_certificate(common_name)
1440
1441 def get_certificate(self, common_name):
1442- if not common_name in self:
1443+ if common_name not in self:
1444 raise ValueError("No certificate for %s" % common_name)
1445 key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
1446 crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
1447
1448=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
1449--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-05 12:57:20 +0000
1450+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-09-26 08:15:24 +0000
1451@@ -303,7 +303,7 @@
1452 blk_device, fstype, system_services=[]):
1453 """
1454 NOTE: This function must only be called from a single service unit for
1455- the same rbd_img otherwise data loss will occur.
1456+ the same rbd_img otherwise data loss will occur.
1457
1458 Ensures given pool and RBD image exists, is mapped to a block device,
1459 and the device is formatted and mounted at the given mount_point.
1460
1461=== modified file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
1462--- hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-03-05 12:57:20 +0000
1463+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-09-26 08:15:24 +0000
1464@@ -62,7 +62,7 @@
1465 pvd = check_output(['pvdisplay', block_device]).splitlines()
1466 for l in pvd:
1467 if l.strip().startswith('VG Name'):
1468- vg = ' '.join(l.split()).split(' ').pop()
1469+ vg = ' '.join(l.strip().split()[2:])
1470 return vg
1471
1472
1473
1474=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
1475--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-02 13:03:56 +0000
1476+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-09-26 08:15:24 +0000
1477@@ -1,4 +1,5 @@
1478-from os import stat
1479+import os
1480+import re
1481 from stat import S_ISBLK
1482
1483 from subprocess import (
1484@@ -14,7 +15,9 @@
1485
1486 :returns: boolean: True if path is a block device, False if not.
1487 '''
1488- return S_ISBLK(stat(path).st_mode)
1489+ if not os.path.exists(path):
1490+ return False
1491+ return S_ISBLK(os.stat(path).st_mode)
1492
1493
1494 def zap_disk(block_device):
1495@@ -29,7 +32,22 @@
1496 '--clear', block_device])
1497 dev_end = check_output(['blockdev', '--getsz', block_device])
1498 gpt_end = int(dev_end.split()[0]) - 100
1499- check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device),
1500+ check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
1501 'bs=1M', 'count=1'])
1502- check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device),
1503- 'bs=512', 'count=100', 'seek=%s'%(gpt_end)])
1504+ check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
1505+ 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
1506+
1507+
1508+def is_device_mounted(device):
1509+ '''Given a device path, return True if that device is mounted, and False
1510+ if it isn't.
1511+
1512+ :param device: str: Full path of the device to check.
1513+ :returns: boolean: True if the path represents a mounted device, False if
1514+ it doesn't.
1515+ '''
1516+ is_partition = bool(re.search(r".*[0-9]+\b", device))
1517+ out = check_output(['mount'])
1518+ if is_partition:
1519+ return bool(re.search(device + r"\b", out))
1520+ return bool(re.search(device + r"[0-9]+\b", out))
1521
1522=== added file 'hooks/charmhelpers/core/fstab.py'
1523--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
1524+++ hooks/charmhelpers/core/fstab.py 2014-09-26 08:15:24 +0000
1525@@ -0,0 +1,116 @@
1526+#!/usr/bin/env python
1527+# -*- coding: utf-8 -*-
1528+
1529+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
1530+
1531+import os
1532+
1533+
1534+class Fstab(file):
1535+ """This class extends file in order to implement a file reader/writer
1536+ for file `/etc/fstab`
1537+ """
1538+
1539+ class Entry(object):
1540+ """Entry class represents a non-comment line on the `/etc/fstab` file
1541+ """
1542+ def __init__(self, device, mountpoint, filesystem,
1543+ options, d=0, p=0):
1544+ self.device = device
1545+ self.mountpoint = mountpoint
1546+ self.filesystem = filesystem
1547+
1548+ if not options:
1549+ options = "defaults"
1550+
1551+ self.options = options
1552+ self.d = d
1553+ self.p = p
1554+
1555+ def __eq__(self, o):
1556+ return str(self) == str(o)
1557+
1558+ def __str__(self):
1559+ return "{} {} {} {} {} {}".format(self.device,
1560+ self.mountpoint,
1561+ self.filesystem,
1562+ self.options,
1563+ self.d,
1564+ self.p)
1565+
1566+ DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
1567+
1568+ def __init__(self, path=None):
1569+ if path:
1570+ self._path = path
1571+ else:
1572+ self._path = self.DEFAULT_PATH
1573+ file.__init__(self, self._path, 'r+')
1574+
1575+ def _hydrate_entry(self, line):
1576+ # NOTE: use split with no arguments to split on any
1577+ # whitespace including tabs
1578+ return Fstab.Entry(*filter(
1579+ lambda x: x not in ('', None),
1580+ line.strip("\n").split()))
1581+
1582+ @property
1583+ def entries(self):
1584+ self.seek(0)
1585+ for line in self.readlines():
1586+ try:
1587+ if not line.startswith("#"):
1588+ yield self._hydrate_entry(line)
1589+ except ValueError:
1590+ pass
1591+
1592+ def get_entry_by_attr(self, attr, value):
1593+ for entry in self.entries:
1594+ e_attr = getattr(entry, attr)
1595+ if e_attr == value:
1596+ return entry
1597+ return None
1598+
1599+ def add_entry(self, entry):
1600+ if self.get_entry_by_attr('device', entry.device):
1601+ return False
1602+
1603+ self.write(str(entry) + '\n')
1604+ self.truncate()
1605+ return entry
1606+
1607+ def remove_entry(self, entry):
1608+ self.seek(0)
1609+
1610+ lines = self.readlines()
1611+
1612+ found = False
1613+ for index, line in enumerate(lines):
1614+ if not line.startswith("#"):
1615+ if self._hydrate_entry(line) == entry:
1616+ found = True
1617+ break
1618+
1619+ if not found:
1620+ return False
1621+
1622+ lines.remove(line)
1623+
1624+ self.seek(0)
1625+ self.write(''.join(lines))
1626+ self.truncate()
1627+ return True
1628+
1629+ @classmethod
1630+ def remove_by_mountpoint(cls, mountpoint, path=None):
1631+ fstab = cls(path=path)
1632+ entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
1633+ if entry:
1634+ return fstab.remove_entry(entry)
1635+ return False
1636+
1637+ @classmethod
1638+ def add(cls, device, mountpoint, filesystem, options=None, path=None):
1639+ return cls(path=path).add_entry(Fstab.Entry(device,
1640+ mountpoint, filesystem,
1641+ options=options))
1642
1643=== modified file 'hooks/charmhelpers/core/hookenv.py'
1644--- hooks/charmhelpers/core/hookenv.py 2014-03-05 12:57:20 +0000
1645+++ hooks/charmhelpers/core/hookenv.py 2014-09-26 08:15:24 +0000
1646@@ -25,7 +25,7 @@
1647 def cached(func):
1648 """Cache return values for multiple executions of func + args
1649
1650- For example:
1651+ For example::
1652
1653 @cached
1654 def unit_get(attribute):
1655@@ -155,6 +155,121 @@
1656 return os.path.basename(sys.argv[0])
1657
1658
1659+class Config(dict):
1660+ """A dictionary representation of the charm's config.yaml, with some
1661+ extra features:
1662+
1663+ - See which values in the dictionary have changed since the previous hook.
1664+ - For values that have changed, see what the previous value was.
1665+ - Store arbitrary data for use in a later hook.
1666+
1667+ NOTE: Do not instantiate this object directly - instead call
1668+ ``hookenv.config()``, which will return an instance of :class:`Config`.
1669+
1670+ Example usage::
1671+
1672+ >>> # inside a hook
1673+ >>> from charmhelpers.core import hookenv
1674+ >>> config = hookenv.config()
1675+ >>> config['foo']
1676+ 'bar'
1677+ >>> # store a new key/value for later use
1678+ >>> config['mykey'] = 'myval'
1679+
1680+
1681+ >>> # user runs `juju set mycharm foo=baz`
1682+ >>> # now we're inside subsequent config-changed hook
1683+ >>> config = hookenv.config()
1684+ >>> config['foo']
1685+ 'baz'
1686+ >>> # test to see if this val has changed since last hook
1687+ >>> config.changed('foo')
1688+ True
1689+ >>> # what was the previous value?
1690+ >>> config.previous('foo')
1691+ 'bar'
1692+ >>> # keys/values that we add are preserved across hooks
1693+ >>> config['mykey']
1694+ 'myval'
1695+
1696+ """
1697+ CONFIG_FILE_NAME = '.juju-persistent-config'
1698+
1699+ def __init__(self, *args, **kw):
1700+ super(Config, self).__init__(*args, **kw)
1701+ self.implicit_save = True
1702+ self._prev_dict = None
1703+ self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
1704+ if os.path.exists(self.path):
1705+ self.load_previous()
1706+
1707+ def __getitem__(self, key):
1708+ """For regular dict lookups, check the current juju config first,
1709+ then the previous (saved) copy. This ensures that user-saved values
1710+ will be returned by a dict lookup.
1711+
1712+ """
1713+ try:
1714+ return dict.__getitem__(self, key)
1715+ except KeyError:
1716+ return (self._prev_dict or {})[key]
1717+
1718+ def load_previous(self, path=None):
1719+ """Load previous copy of config from disk.
1720+
1721+ In normal usage you don't need to call this method directly - it
1722+ is called automatically at object initialization.
1723+
1724+ :param path:
1725+
1726+ File path from which to load the previous config. If `None`,
1727+ config is loaded from the default location. If `path` is
1728+ specified, subsequent `save()` calls will write to the same
1729+ path.
1730+
1731+ """
1732+ self.path = path or self.path
1733+ with open(self.path) as f:
1734+ self._prev_dict = json.load(f)
1735+
1736+ def changed(self, key):
1737+ """Return True if the current value for this key is different from
1738+ the previous value.
1739+
1740+ """
1741+ if self._prev_dict is None:
1742+ return True
1743+ return self.previous(key) != self.get(key)
1744+
1745+ def previous(self, key):
1746+ """Return previous value for this key, or None if there
1747+ is no previous value.
1748+
1749+ """
1750+ if self._prev_dict:
1751+ return self._prev_dict.get(key)
1752+ return None
1753+
1754+ def save(self):
1755+ """Save this config to disk.
1756+
1757+ If the charm is using the :mod:`Services Framework <services.base>`
1758+ or :meth:'@hook <Hooks.hook>' decorator, this
1759+ is called automatically at the end of successful hook execution.
1760+ Otherwise, it should be called directly by user code.
1761+
1762+ To disable automatic saves, set ``implicit_save=False`` on this
1763+ instance.
1764+
1765+ """
1766+ if self._prev_dict:
1767+ for k, v in self._prev_dict.iteritems():
1768+ if k not in self:
1769+ self[k] = v
1770+ with open(self.path, 'w') as f:
1771+ json.dump(self, f)
1772+
1773+
1774 @cached
1775 def config(scope=None):
1776 """Juju charm configuration"""
1777@@ -163,7 +278,10 @@
1778 config_cmd_line.append(scope)
1779 config_cmd_line.append('--format=json')
1780 try:
1781- return json.loads(subprocess.check_output(config_cmd_line))
1782+ config_data = json.loads(subprocess.check_output(config_cmd_line))
1783+ if scope is not None:
1784+ return config_data
1785+ return Config(config_data)
1786 except ValueError:
1787 return None
1788
1789@@ -188,8 +306,9 @@
1790 raise
1791
1792
1793-def relation_set(relation_id=None, relation_settings={}, **kwargs):
1794+def relation_set(relation_id=None, relation_settings=None, **kwargs):
1795 """Set relation information for the current unit"""
1796+ relation_settings = relation_settings if relation_settings else {}
1797 relation_cmd_line = ['relation-set']
1798 if relation_id is not None:
1799 relation_cmd_line.extend(('-r', relation_id))
1800@@ -348,27 +467,29 @@
1801 class Hooks(object):
1802 """A convenient handler for hook functions.
1803
1804- Example:
1805+ Example::
1806+
1807 hooks = Hooks()
1808
1809 # register a hook, taking its name from the function name
1810 @hooks.hook()
1811 def install():
1812- ...
1813+ pass # your code here
1814
1815 # register a hook, providing a custom hook name
1816 @hooks.hook("config-changed")
1817 def config_changed():
1818- ...
1819+ pass # your code here
1820
1821 if __name__ == "__main__":
1822 # execute a hook based on the name the program is called by
1823 hooks.execute(sys.argv)
1824 """
1825
1826- def __init__(self):
1827+ def __init__(self, config_save=True):
1828 super(Hooks, self).__init__()
1829 self._hooks = {}
1830+ self._config_save = config_save
1831
1832 def register(self, name, function):
1833 """Register a hook"""
1834@@ -379,6 +500,10 @@
1835 hook_name = os.path.basename(args[0])
1836 if hook_name in self._hooks:
1837 self._hooks[hook_name]()
1838+ if self._config_save:
1839+ cfg = config()
1840+ if cfg.implicit_save:
1841+ cfg.save()
1842 else:
1843 raise UnregisteredHookError(hook_name)
1844
1845
1846=== modified file 'hooks/charmhelpers/core/host.py'
1847--- hooks/charmhelpers/core/host.py 2014-03-05 12:57:20 +0000
1848+++ hooks/charmhelpers/core/host.py 2014-09-26 08:15:24 +0000
1849@@ -12,10 +12,13 @@
1850 import string
1851 import subprocess
1852 import hashlib
1853+import shutil
1854+from contextlib import contextmanager
1855
1856 from collections import OrderedDict
1857
1858 from hookenv import log
1859+from fstab import Fstab
1860
1861
1862 def service_start(service_name):
1863@@ -34,7 +37,8 @@
1864
1865
1866 def service_reload(service_name, restart_on_failure=False):
1867- """Reload a system service, optionally falling back to restart if reload fails"""
1868+ """Reload a system service, optionally falling back to restart if
1869+ reload fails"""
1870 service_result = service('reload', service_name)
1871 if not service_result and restart_on_failure:
1872 service_result = service('restart', service_name)
1873@@ -50,7 +54,7 @@
1874 def service_running(service):
1875 """Determine whether a system service is running"""
1876 try:
1877- output = subprocess.check_output(['service', service, 'status'])
1878+ output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
1879 except subprocess.CalledProcessError:
1880 return False
1881 else:
1882@@ -60,6 +64,16 @@
1883 return False
1884
1885
1886+def service_available(service_name):
1887+ """Determine whether a system service is available"""
1888+ try:
1889+ subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
1890+ except subprocess.CalledProcessError as e:
1891+ return 'unrecognized service' not in e.output
1892+ else:
1893+ return True
1894+
1895+
1896 def adduser(username, password=None, shell='/bin/bash', system_user=False):
1897 """Add a user to the system"""
1898 try:
1899@@ -143,7 +157,19 @@
1900 target.write(content)
1901
1902
1903-def mount(device, mountpoint, options=None, persist=False):
1904+def fstab_remove(mp):
1905+ """Remove the given mountpoint entry from /etc/fstab
1906+ """
1907+ return Fstab.remove_by_mountpoint(mp)
1908+
1909+
1910+def fstab_add(dev, mp, fs, options=None):
1911+ """Adds the given device entry to the /etc/fstab file
1912+ """
1913+ return Fstab.add(dev, mp, fs, options=options)
1914+
1915+
1916+def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
1917 """Mount a filesystem at a particular mountpoint"""
1918 cmd_args = ['mount']
1919 if options is not None:
1920@@ -154,9 +180,9 @@
1921 except subprocess.CalledProcessError, e:
1922 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
1923 return False
1924+
1925 if persist:
1926- # TODO: update fstab
1927- pass
1928+ return fstab_add(device, mountpoint, filesystem, options=options)
1929 return True
1930
1931
1932@@ -168,9 +194,9 @@
1933 except subprocess.CalledProcessError, e:
1934 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
1935 return False
1936+
1937 if persist:
1938- # TODO: update fstab
1939- pass
1940+ return fstab_remove(mountpoint)
1941 return True
1942
1943
1944@@ -183,10 +209,15 @@
1945 return system_mounts
1946
1947
1948-def file_hash(path):
1949- """Generate a md5 hash of the contents of 'path' or None if not found """
1950+def file_hash(path, hash_type='md5'):
1951+ """
1952+ Generate a hash checksum of the contents of 'path' or None if not found.
1953+
1954+ :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
1955+ such as md5, sha1, sha256, sha512, etc.
1956+ """
1957 if os.path.exists(path):
1958- h = hashlib.md5()
1959+ h = getattr(hashlib, hash_type)()
1960 with open(path, 'r') as source:
1961 h.update(source.read()) # IGNORE:E1101 - it does have update
1962 return h.hexdigest()
1963@@ -194,16 +225,36 @@
1964 return None
1965
1966
1967+def check_hash(path, checksum, hash_type='md5'):
1968+ """
1969+ Validate a file using a cryptographic checksum.
1970+
1971+ :param str checksum: Value of the checksum used to validate the file.
1972+ :param str hash_type: Hash algorithm used to generate `checksum`.
1973+ Can be any hash alrgorithm supported by :mod:`hashlib`,
1974+ such as md5, sha1, sha256, sha512, etc.
1975+ :raises ChecksumError: If the file fails the checksum
1976+
1977+ """
1978+ actual_checksum = file_hash(path, hash_type)
1979+ if checksum != actual_checksum:
1980+ raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
1981+
1982+
1983+class ChecksumError(ValueError):
1984+ pass
1985+
1986+
1987 def restart_on_change(restart_map, stopstart=False):
1988 """Restart services based on configuration files changing
1989
1990- This function is used a decorator, for example
1991+ This function is used a decorator, for example::
1992
1993 @restart_on_change({
1994 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
1995 })
1996 def ceph_client_changed():
1997- ...
1998+ pass # your code here
1999
2000 In this example, the cinder-api and cinder-volume services
2001 would be restarted if /etc/ceph/ceph.conf is changed by the
2002@@ -295,3 +346,40 @@
2003 if 'link/ether' in words:
2004 hwaddr = words[words.index('link/ether') + 1]
2005 return hwaddr
2006+
2007+
2008+def cmp_pkgrevno(package, revno, pkgcache=None):
2009+ '''Compare supplied revno with the revno of the installed package
2010+
2011+ * 1 => Installed revno is greater than supplied arg
2012+ * 0 => Installed revno is the same as supplied arg
2013+ * -1 => Installed revno is less than supplied arg
2014+
2015+ '''
2016+ import apt_pkg
2017+ from charmhelpers.fetch import apt_cache
2018+ if not pkgcache:
2019+ pkgcache = apt_cache()
2020+ pkg = pkgcache[package]
2021+ return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
2022+
2023+
2024+@contextmanager
2025+def chdir(d):
2026+ cur = os.getcwd()
2027+ try:
2028+ yield os.chdir(d)
2029+ finally:
2030+ os.chdir(cur)
2031+
2032+
2033+def chownr(path, owner, group):
2034+ uid = pwd.getpwnam(owner).pw_uid
2035+ gid = grp.getgrnam(group).gr_gid
2036+
2037+ for root, dirs, files in os.walk(path):
2038+ for name in dirs + files:
2039+ full = os.path.join(root, name)
2040+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
2041+ if not broken_symlink:
2042+ os.chown(full, uid, gid)
2043
2044=== added directory 'hooks/charmhelpers/core/services'
2045=== added file 'hooks/charmhelpers/core/services/__init__.py'
2046--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
2047+++ hooks/charmhelpers/core/services/__init__.py 2014-09-26 08:15:24 +0000
2048@@ -0,0 +1,2 @@
2049+from .base import *
2050+from .helpers import *
2051
2052=== added file 'hooks/charmhelpers/core/services/base.py'
2053--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
2054+++ hooks/charmhelpers/core/services/base.py 2014-09-26 08:15:24 +0000
2055@@ -0,0 +1,313 @@
2056+import os
2057+import re
2058+import json
2059+from collections import Iterable
2060+
2061+from charmhelpers.core import host
2062+from charmhelpers.core import hookenv
2063+
2064+
2065+__all__ = ['ServiceManager', 'ManagerCallback',
2066+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
2067+ 'service_restart', 'service_stop']
2068+
2069+
2070+class ServiceManager(object):
2071+ def __init__(self, services=None):
2072+ """
2073+ Register a list of services, given their definitions.
2074+
2075+ Service definitions are dicts in the following formats (all keys except
2076+ 'service' are optional)::
2077+
2078+ {
2079+ "service": <service name>,
2080+ "required_data": <list of required data contexts>,
2081+ "provided_data": <list of provided data contexts>,
2082+ "data_ready": <one or more callbacks>,
2083+ "data_lost": <one or more callbacks>,
2084+ "start": <one or more callbacks>,
2085+ "stop": <one or more callbacks>,
2086+ "ports": <list of ports to manage>,
2087+ }
2088+
2089+ The 'required_data' list should contain dicts of required data (or
2090+ dependency managers that act like dicts and know how to collect the data).
2091+ Only when all items in the 'required_data' list are populated are the list
2092+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
2093+ information.
2094+
2095+ The 'provided_data' list should contain relation data providers, most likely
2096+ a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
2097+ that will indicate a set of data to set on a given relation.
2098+
2099+ The 'data_ready' value should be either a single callback, or a list of
2100+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
2101+ Each callback will be called with the service name as the only parameter.
2102+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
2103+ are fired.
2104+
2105+ The 'data_lost' value should be either a single callback, or a list of
2106+ callbacks, to be called when a 'required_data' item no longer passes
2107+ `is_ready()`. Each callback will be called with the service name as the
2108+ only parameter. After all of the 'data_lost' callbacks are called,
2109+ the 'stop' callbacks are fired.
2110+
2111+ The 'start' value should be either a single callback, or a list of
2112+ callbacks, to be called when starting the service, after the 'data_ready'
2113+ callbacks are complete. Each callback will be called with the service
2114+ name as the only parameter. This defaults to
2115+ `[host.service_start, services.open_ports]`.
2116+
2117+ The 'stop' value should be either a single callback, or a list of
2118+ callbacks, to be called when stopping the service. If the service is
2119+ being stopped because it no longer has all of its 'required_data', this
2120+ will be called after all of the 'data_lost' callbacks are complete.
2121+ Each callback will be called with the service name as the only parameter.
2122+ This defaults to `[services.close_ports, host.service_stop]`.
2123+
2124+ The 'ports' value should be a list of ports to manage. The default
2125+ 'start' handler will open the ports after the service is started,
2126+ and the default 'stop' handler will close the ports prior to stopping
2127+ the service.
2128+
2129+
2130+ Examples:
2131+
2132+ The following registers an Upstart service called bingod that depends on
2133+ a mongodb relation and which runs a custom `db_migrate` function prior to
2134+ restarting the service, and a Runit service called spadesd::
2135+
2136+ manager = services.ServiceManager([
2137+ {
2138+ 'service': 'bingod',
2139+ 'ports': [80, 443],
2140+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
2141+ 'data_ready': [
2142+ services.template(source='bingod.conf'),
2143+ services.template(source='bingod.ini',
2144+ target='/etc/bingod.ini',
2145+ owner='bingo', perms=0400),
2146+ ],
2147+ },
2148+ {
2149+ 'service': 'spadesd',
2150+ 'data_ready': services.template(source='spadesd_run.j2',
2151+ target='/etc/sv/spadesd/run',
2152+ perms=0555),
2153+ 'start': runit_start,
2154+ 'stop': runit_stop,
2155+ },
2156+ ])
2157+ manager.manage()
2158+ """
2159+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
2160+ self._ready = None
2161+ self.services = {}
2162+ for service in services or []:
2163+ service_name = service['service']
2164+ self.services[service_name] = service
2165+
2166+ def manage(self):
2167+ """
2168+ Handle the current hook by doing The Right Thing with the registered services.
2169+ """
2170+ hook_name = hookenv.hook_name()
2171+ if hook_name == 'stop':
2172+ self.stop_services()
2173+ else:
2174+ self.provide_data()
2175+ self.reconfigure_services()
2176+ cfg = hookenv.config()
2177+ if cfg.implicit_save:
2178+ cfg.save()
2179+
2180+ def provide_data(self):
2181+ """
2182+ Set the relation data for each provider in the ``provided_data`` list.
2183+
2184+ A provider must have a `name` attribute, which indicates which relation
2185+ to set data on, and a `provide_data()` method, which returns a dict of
2186+ data to set.
2187+ """
2188+ hook_name = hookenv.hook_name()
2189+ for service in self.services.values():
2190+ for provider in service.get('provided_data', []):
2191+ if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
2192+ data = provider.provide_data()
2193+ _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
2194+ if _ready:
2195+ hookenv.relation_set(None, data)
2196+
2197+ def reconfigure_services(self, *service_names):
2198+ """
2199+ Update all files for one or more registered services, and,
2200+ if ready, optionally restart them.
2201+
2202+ If no service names are given, reconfigures all registered services.
2203+ """
2204+ for service_name in service_names or self.services.keys():
2205+ if self.is_ready(service_name):
2206+ self.fire_event('data_ready', service_name)
2207+ self.fire_event('start', service_name, default=[
2208+ service_restart,
2209+ manage_ports])
2210+ self.save_ready(service_name)
2211+ else:
2212+ if self.was_ready(service_name):
2213+ self.fire_event('data_lost', service_name)
2214+ self.fire_event('stop', service_name, default=[
2215+ manage_ports,
2216+ service_stop])
2217+ self.save_lost(service_name)
2218+
2219+ def stop_services(self, *service_names):
2220+ """
2221+ Stop one or more registered services, by name.
2222+
2223+ If no service names are given, stops all registered services.
2224+ """
2225+ for service_name in service_names or self.services.keys():
2226+ self.fire_event('stop', service_name, default=[
2227+ manage_ports,
2228+ service_stop])
2229+
2230+ def get_service(self, service_name):
2231+ """
2232+ Given the name of a registered service, return its service definition.
2233+ """
2234+ service = self.services.get(service_name)
2235+ if not service:
2236+ raise KeyError('Service not registered: %s' % service_name)
2237+ return service
2238+
2239+ def fire_event(self, event_name, service_name, default=None):
2240+ """
2241+ Fire a data_ready, data_lost, start, or stop event on a given service.
2242+ """
2243+ service = self.get_service(service_name)
2244+ callbacks = service.get(event_name, default)
2245+ if not callbacks:
2246+ return
2247+ if not isinstance(callbacks, Iterable):
2248+ callbacks = [callbacks]
2249+ for callback in callbacks:
2250+ if isinstance(callback, ManagerCallback):
2251+ callback(self, service_name, event_name)
2252+ else:
2253+ callback(service_name)
2254+
2255+ def is_ready(self, service_name):
2256+ """
2257+ Determine if a registered service is ready, by checking its 'required_data'.
2258+
2259+ A 'required_data' item can be any mapping type, and is considered ready
2260+ if `bool(item)` evaluates as True.
2261+ """
2262+ service = self.get_service(service_name)
2263+ reqs = service.get('required_data', [])
2264+ return all(bool(req) for req in reqs)
2265+
2266+ def _load_ready_file(self):
2267+ if self._ready is not None:
2268+ return
2269+ if os.path.exists(self._ready_file):
2270+ with open(self._ready_file) as fp:
2271+ self._ready = set(json.load(fp))
2272+ else:
2273+ self._ready = set()
2274+
2275+ def _save_ready_file(self):
2276+ if self._ready is None:
2277+ return
2278+ with open(self._ready_file, 'w') as fp:
2279+ json.dump(list(self._ready), fp)
2280+
2281+ def save_ready(self, service_name):
2282+ """
2283+ Save an indicator that the given service is now data_ready.
2284+ """
2285+ self._load_ready_file()
2286+ self._ready.add(service_name)
2287+ self._save_ready_file()
2288+
2289+ def save_lost(self, service_name):
2290+ """
2291+ Save an indicator that the given service is no longer data_ready.
2292+ """
2293+ self._load_ready_file()
2294+ self._ready.discard(service_name)
2295+ self._save_ready_file()
2296+
2297+ def was_ready(self, service_name):
2298+ """
2299+ Determine if the given service was previously data_ready.
2300+ """
2301+ self._load_ready_file()
2302+ return service_name in self._ready
2303+
2304+
2305+class ManagerCallback(object):
2306+ """
2307+ Special case of a callback that takes the `ServiceManager` instance
2308+ in addition to the service name.
2309+
2310+ Subclasses should implement `__call__` which should accept three parameters:
2311+
2312+ * `manager` The `ServiceManager` instance
2313+ * `service_name` The name of the service it's being triggered for
2314+ * `event_name` The name of the event that this callback is handling
2315+ """
2316+ def __call__(self, manager, service_name, event_name):
2317+ raise NotImplementedError()
2318+
2319+
2320+class PortManagerCallback(ManagerCallback):
2321+ """
2322+ Callback class that will open or close ports, for use as either
2323+ a start or stop action.
2324+ """
2325+ def __call__(self, manager, service_name, event_name):
2326+ service = manager.get_service(service_name)
2327+ new_ports = service.get('ports', [])
2328+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
2329+ if os.path.exists(port_file):
2330+ with open(port_file) as fp:
2331+ old_ports = fp.read().split(',')
2332+ for old_port in old_ports:
2333+ if bool(old_port):
2334+ old_port = int(old_port)
2335+ if old_port not in new_ports:
2336+ hookenv.close_port(old_port)
2337+ with open(port_file, 'w') as fp:
2338+ fp.write(','.join(str(port) for port in new_ports))
2339+ for port in new_ports:
2340+ if event_name == 'start':
2341+ hookenv.open_port(port)
2342+ elif event_name == 'stop':
2343+ hookenv.close_port(port)
2344+
2345+
2346+def service_stop(service_name):
2347+ """
2348+ Wrapper around host.service_stop to prevent spurious "unknown service"
2349+ messages in the logs.
2350+ """
2351+ if host.service_running(service_name):
2352+ host.service_stop(service_name)
2353+
2354+
2355+def service_restart(service_name):
2356+ """
2357+ Wrapper around host.service_restart to prevent spurious "unknown service"
2358+ messages in the logs.
2359+ """
2360+ if host.service_available(service_name):
2361+ if host.service_running(service_name):
2362+ host.service_restart(service_name)
2363+ else:
2364+ host.service_start(service_name)
2365+
2366+
2367+# Convenience aliases
2368+open_ports = close_ports = manage_ports = PortManagerCallback()
2369
2370=== added file 'hooks/charmhelpers/core/services/helpers.py'
2371--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
2372+++ hooks/charmhelpers/core/services/helpers.py 2014-09-26 08:15:24 +0000
2373@@ -0,0 +1,239 @@
2374+import os
2375+import yaml
2376+from charmhelpers.core import hookenv
2377+from charmhelpers.core import templating
2378+
2379+from charmhelpers.core.services.base import ManagerCallback
2380+
2381+
2382+__all__ = ['RelationContext', 'TemplateCallback',
2383+ 'render_template', 'template']
2384+
2385+
2386+class RelationContext(dict):
2387+ """
2388+ Base class for a context generator that gets relation data from juju.
2389+
2390+ Subclasses must provide the attributes `name`, which is the name of the
2391+ interface of interest, `interface`, which is the type of the interface of
2392+ interest, and `required_keys`, which is the set of keys required for the
2393+ relation to be considered complete. The data for all interfaces matching
2394+ the `name` attribute that are complete will used to populate the dictionary
2395+ values (see `get_data`, below).
2396+
2397+ The generated context will be namespaced under the relation :attr:`name`,
2398+ to prevent potential naming conflicts.
2399+
2400+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
2401+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
2402+ """
2403+ name = None
2404+ interface = None
2405+ required_keys = []
2406+
2407+ def __init__(self, name=None, additional_required_keys=None):
2408+ if name is not None:
2409+ self.name = name
2410+ if additional_required_keys is not None:
2411+ self.required_keys.extend(additional_required_keys)
2412+ self.get_data()
2413+
2414+ def __bool__(self):
2415+ """
2416+ Returns True if all of the required_keys are available.
2417+ """
2418+ return self.is_ready()
2419+
2420+ __nonzero__ = __bool__
2421+
2422+ def __repr__(self):
2423+ return super(RelationContext, self).__repr__()
2424+
2425+ def is_ready(self):
2426+ """
2427+ Returns True if all of the `required_keys` are available from any units.
2428+ """
2429+ ready = len(self.get(self.name, [])) > 0
2430+ if not ready:
2431+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
2432+ return ready
2433+
2434+ def _is_ready(self, unit_data):
2435+ """
2436+ Helper method that tests a set of relation data and returns True if
2437+ all of the `required_keys` are present.
2438+ """
2439+ return set(unit_data.keys()).issuperset(set(self.required_keys))
2440+
2441+ def get_data(self):
2442+ """
2443+ Retrieve the relation data for each unit involved in a relation and,
2444+ if complete, store it in a list under `self[self.name]`. This
2445+ is automatically called when the RelationContext is instantiated.
2446+
2447+ The units are sorted lexographically first by the service ID, then by
2448+ the unit ID. Thus, if an interface has two other services, 'db:1'
2449+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
2450+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
2451+ set of data, the relation data for the units will be stored in the
2452+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
2453+
2454+ If you only care about a single unit on the relation, you can just
2455+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
2456+ support multiple units on a relation, you should iterate over the list,
2457+ like::
2458+
2459+ {% for unit in interface -%}
2460+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
2461+ {%- endfor %}
2462+
2463+ Note that since all sets of relation data from all related services and
2464+ units are in a single list, if you need to know which service or unit a
2465+ set of data came from, you'll need to extend this class to preserve
2466+ that information.
2467+ """
2468+ if not hookenv.relation_ids(self.name):
2469+ return
2470+
2471+ ns = self.setdefault(self.name, [])
2472+ for rid in sorted(hookenv.relation_ids(self.name)):
2473+ for unit in sorted(hookenv.related_units(rid)):
2474+ reldata = hookenv.relation_get(rid=rid, unit=unit)
2475+ if self._is_ready(reldata):
2476+ ns.append(reldata)
2477+
2478+ def provide_data(self):
2479+ """
2480+ Return data to be relation_set for this interface.
2481+ """
2482+ return {}
2483+
2484+
2485+class MysqlRelation(RelationContext):
2486+ """
2487+ Relation context for the `mysql` interface.
2488+
2489+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
2490+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
2491+ """
2492+ name = 'db'
2493+ interface = 'mysql'
2494+ required_keys = ['host', 'user', 'password', 'database']
2495+
2496+
2497+class HttpRelation(RelationContext):
2498+ """
2499+ Relation context for the `http` interface.
2500+
2501+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
2502+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
2503+ """
2504+ name = 'website'
2505+ interface = 'http'
2506+ required_keys = ['host', 'port']
2507+
2508+ def provide_data(self):
2509+ return {
2510+ 'host': hookenv.unit_get('private-address'),
2511+ 'port': 80,
2512+ }
2513+
2514+
2515+class RequiredConfig(dict):
2516+ """
2517+ Data context that loads config options with one or more mandatory options.
2518+
2519+ Once the required options have been changed from their default values, all
2520+ config options will be available, namespaced under `config` to prevent
2521+ potential naming conflicts (for example, between a config option and a
2522+ relation property).
2523+
2524+ :param list *args: List of options that must be changed from their default values.
2525+ """
2526+
2527+ def __init__(self, *args):
2528+ self.required_options = args
2529+ self['config'] = hookenv.config()
2530+ with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
2531+ self.config = yaml.load(fp).get('options', {})
2532+
2533+ def __bool__(self):
2534+ for option in self.required_options:
2535+ if option not in self['config']:
2536+ return False
2537+ current_value = self['config'][option]
2538+ default_value = self.config[option].get('default')
2539+ if current_value == default_value:
2540+ return False
2541+ if current_value in (None, '') and default_value in (None, ''):
2542+ return False
2543+ return True
2544+
2545+ def __nonzero__(self):
2546+ return self.__bool__()
2547+
2548+
2549+class StoredContext(dict):
2550+ """
2551+ A data context that always returns the data that it was first created with.
2552+
2553+ This is useful to do a one-time generation of things like passwords, that
2554+ will thereafter use the same value that was originally generated, instead
2555+ of generating a new value each time it is run.
2556+ """
2557+ def __init__(self, file_name, config_data):
2558+ """
2559+ If the file exists, populate `self` with the data from the file.
2560+ Otherwise, populate with the given data and persist it to the file.
2561+ """
2562+ if os.path.exists(file_name):
2563+ self.update(self.read_context(file_name))
2564+ else:
2565+ self.store_context(file_name, config_data)
2566+ self.update(config_data)
2567+
2568+ def store_context(self, file_name, config_data):
2569+ if not os.path.isabs(file_name):
2570+ file_name = os.path.join(hookenv.charm_dir(), file_name)
2571+ with open(file_name, 'w') as file_stream:
2572+ os.fchmod(file_stream.fileno(), 0600)
2573+ yaml.dump(config_data, file_stream)
2574+
2575+ def read_context(self, file_name):
2576+ if not os.path.isabs(file_name):
2577+ file_name = os.path.join(hookenv.charm_dir(), file_name)
2578+ with open(file_name, 'r') as file_stream:
2579+ data = yaml.load(file_stream)
2580+ if not data:
2581+ raise OSError("%s is empty" % file_name)
2582+ return data
2583+
2584+
2585+class TemplateCallback(ManagerCallback):
2586+ """
2587+ Callback class that will render a Jinja2 template, for use as a ready action.
2588+
2589+ :param str source: The template source file, relative to `$CHARM_DIR/templates`
2590+ :param str target: The target to write the rendered template to
2591+ :param str owner: The owner of the rendered file
2592+ :param str group: The group of the rendered file
2593+ :param int perms: The permissions of the rendered file
2594+ """
2595+ def __init__(self, source, target, owner='root', group='root', perms=0444):
2596+ self.source = source
2597+ self.target = target
2598+ self.owner = owner
2599+ self.group = group
2600+ self.perms = perms
2601+
2602+ def __call__(self, manager, service_name, event_name):
2603+ service = manager.get_service(service_name)
2604+ context = {}
2605+ for ctx in service.get('required_data', []):
2606+ context.update(ctx)
2607+ templating.render(self.source, self.target, context,
2608+ self.owner, self.group, self.perms)
2609+
2610+
2611+# Convenience aliases for templates
2612+render_template = template = TemplateCallback
2613
2614=== added file 'hooks/charmhelpers/core/templating.py'
2615--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
2616+++ hooks/charmhelpers/core/templating.py 2014-09-26 08:15:24 +0000
2617@@ -0,0 +1,51 @@
2618+import os
2619+
2620+from charmhelpers.core import host
2621+from charmhelpers.core import hookenv
2622+
2623+
2624+def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
2625+ """
2626+ Render a template.
2627+
2628+ The `source` path, if not absolute, is relative to the `templates_dir`.
2629+
2630+ The `target` path should be absolute.
2631+
2632+ The context should be a dict containing the values to be replaced in the
2633+ template.
2634+
2635+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
2636+
2637+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
2638+
2639+ Note: Using this requires python-jinja2; if it is not installed, calling
2640+ this will attempt to use charmhelpers.fetch.apt_install to install it.
2641+ """
2642+ try:
2643+ from jinja2 import FileSystemLoader, Environment, exceptions
2644+ except ImportError:
2645+ try:
2646+ from charmhelpers.fetch import apt_install
2647+ except ImportError:
2648+ hookenv.log('Could not import jinja2, and could not import '
2649+ 'charmhelpers.fetch to install it',
2650+ level=hookenv.ERROR)
2651+ raise
2652+ apt_install('python-jinja2', fatal=True)
2653+ from jinja2 import FileSystemLoader, Environment, exceptions
2654+
2655+ if templates_dir is None:
2656+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
2657+ loader = Environment(loader=FileSystemLoader(templates_dir))
2658+ try:
2659+ source = source
2660+ template = loader.get_template(source)
2661+ except exceptions.TemplateNotFound as e:
2662+ hookenv.log('Could not load template %s from %s.' %
2663+ (source, templates_dir),
2664+ level=hookenv.ERROR)
2665+ raise e
2666+ content = template.render(context)
2667+ host.mkdir(os.path.dirname(target))
2668+ host.write_file(target, content, owner, group, perms)
2669
2670=== modified file 'hooks/charmhelpers/fetch/__init__.py'
2671--- hooks/charmhelpers/fetch/__init__.py 2014-05-02 13:03:56 +0000
2672+++ hooks/charmhelpers/fetch/__init__.py 2014-09-26 08:15:24 +0000
2673@@ -1,4 +1,6 @@
2674 import importlib
2675+from tempfile import NamedTemporaryFile
2676+import time
2677 from yaml import safe_load
2678 from charmhelpers.core.host import (
2679 lsb_release
2680@@ -12,9 +14,9 @@
2681 config,
2682 log,
2683 )
2684-import apt_pkg
2685 import os
2686
2687+
2688 CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
2689 deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
2690 """
2691@@ -54,13 +56,68 @@
2692 'icehouse/proposed': 'precise-proposed/icehouse',
2693 'precise-icehouse/proposed': 'precise-proposed/icehouse',
2694 'precise-proposed/icehouse': 'precise-proposed/icehouse',
2695+ # Juno
2696+ 'juno': 'trusty-updates/juno',
2697+ 'trusty-juno': 'trusty-updates/juno',
2698+ 'trusty-juno/updates': 'trusty-updates/juno',
2699+ 'trusty-updates/juno': 'trusty-updates/juno',
2700+ 'juno/proposed': 'trusty-proposed/juno',
2701+ 'juno/proposed': 'trusty-proposed/juno',
2702+ 'trusty-juno/proposed': 'trusty-proposed/juno',
2703+ 'trusty-proposed/juno': 'trusty-proposed/juno',
2704 }
2705
2706+# The order of this list is very important. Handlers should be listed in from
2707+# least- to most-specific URL matching.
2708+FETCH_HANDLERS = (
2709+ 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
2710+ 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
2711+)
2712+
2713+APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
2714+APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
2715+APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
2716+
2717+
2718+class SourceConfigError(Exception):
2719+ pass
2720+
2721+
2722+class UnhandledSource(Exception):
2723+ pass
2724+
2725+
2726+class AptLockError(Exception):
2727+ pass
2728+
2729+
2730+class BaseFetchHandler(object):
2731+
2732+ """Base class for FetchHandler implementations in fetch plugins"""
2733+
2734+ def can_handle(self, source):
2735+ """Returns True if the source can be handled. Otherwise returns
2736+ a string explaining why it cannot"""
2737+ return "Wrong source type"
2738+
2739+ def install(self, source):
2740+ """Try to download and unpack the source. Return the path to the
2741+ unpacked files or raise UnhandledSource."""
2742+ raise UnhandledSource("Wrong source type {}".format(source))
2743+
2744+ def parse_url(self, url):
2745+ return urlparse(url)
2746+
2747+ def base_url(self, url):
2748+ """Return url without querystring or fragment"""
2749+ parts = list(self.parse_url(url))
2750+ parts[4:] = ['' for i in parts[4:]]
2751+ return urlunparse(parts)
2752+
2753
2754 def filter_installed_packages(packages):
2755 """Returns a list of packages that require installation"""
2756- apt_pkg.init()
2757- cache = apt_pkg.Cache()
2758+ cache = apt_cache()
2759 _pkgs = []
2760 for package in packages:
2761 try:
2762@@ -73,6 +130,16 @@
2763 return _pkgs
2764
2765
2766+def apt_cache(in_memory=True):
2767+ """Build and return an apt cache"""
2768+ import apt_pkg
2769+ apt_pkg.init()
2770+ if in_memory:
2771+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
2772+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
2773+ return apt_pkg.Cache()
2774+
2775+
2776 def apt_install(packages, options=None, fatal=False):
2777 """Install one or more packages"""
2778 if options is None:
2779@@ -87,14 +154,7 @@
2780 cmd.extend(packages)
2781 log("Installing {} with options: {}".format(packages,
2782 options))
2783- env = os.environ.copy()
2784- if 'DEBIAN_FRONTEND' not in env:
2785- env['DEBIAN_FRONTEND'] = 'noninteractive'
2786-
2787- if fatal:
2788- subprocess.check_call(cmd, env=env)
2789- else:
2790- subprocess.call(cmd, env=env)
2791+ _run_apt_command(cmd, fatal)
2792
2793
2794 def apt_upgrade(options=None, fatal=False, dist=False):
2795@@ -109,24 +169,13 @@
2796 else:
2797 cmd.append('upgrade')
2798 log("Upgrading with options: {}".format(options))
2799-
2800- env = os.environ.copy()
2801- if 'DEBIAN_FRONTEND' not in env:
2802- env['DEBIAN_FRONTEND'] = 'noninteractive'
2803-
2804- if fatal:
2805- subprocess.check_call(cmd, env=env)
2806- else:
2807- subprocess.call(cmd, env=env)
2808+ _run_apt_command(cmd, fatal)
2809
2810
2811 def apt_update(fatal=False):
2812 """Update local apt cache"""
2813 cmd = ['apt-get', 'update']
2814- if fatal:
2815- subprocess.check_call(cmd)
2816- else:
2817- subprocess.call(cmd)
2818+ _run_apt_command(cmd, fatal)
2819
2820
2821 def apt_purge(packages, fatal=False):
2822@@ -137,10 +186,7 @@
2823 else:
2824 cmd.extend(packages)
2825 log("Purging {}".format(packages))
2826- if fatal:
2827- subprocess.check_call(cmd)
2828- else:
2829- subprocess.call(cmd)
2830+ _run_apt_command(cmd, fatal)
2831
2832
2833 def apt_hold(packages, fatal=False):
2834@@ -151,6 +197,7 @@
2835 else:
2836 cmd.extend(packages)
2837 log("Holding {}".format(packages))
2838+
2839 if fatal:
2840 subprocess.check_call(cmd)
2841 else:
2842@@ -158,6 +205,28 @@
2843
2844
2845 def add_source(source, key=None):
2846+ """Add a package source to this system.
2847+
2848+ @param source: a URL or sources.list entry, as supported by
2849+ add-apt-repository(1). Examples::
2850+
2851+ ppa:charmers/example
2852+ deb https://stub:key@private.example.com/ubuntu trusty main
2853+
2854+ In addition:
2855+ 'proposed:' may be used to enable the standard 'proposed'
2856+ pocket for the release.
2857+ 'cloud:' may be used to activate official cloud archive pockets,
2858+ such as 'cloud:icehouse'
2859+
2860+ @param key: A key to be added to the system's APT keyring and used
2861+ to verify the signatures on packages. Ideally, this should be an
2862+ ASCII format GPG public key including the block headers. A GPG key
2863+ id may also be used, but be aware that only insecure protocols are
2864+ available to retrieve the actual public key from a public keyserver
2865+ placing your Juju environment at risk. ppa and cloud archive keys
2866+ are securely added automtically, so sould not be provided.
2867+ """
2868 if source is None:
2869 log('Source is not present. Skipping')
2870 return
2871@@ -182,76 +251,96 @@
2872 release = lsb_release()['DISTRIB_CODENAME']
2873 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
2874 apt.write(PROPOSED_POCKET.format(release))
2875+ else:
2876+ raise SourceConfigError("Unknown source: {!r}".format(source))
2877+
2878 if key:
2879- subprocess.check_call(['apt-key', 'adv', '--keyserver',
2880- 'hkp://keyserver.ubuntu.com:80', '--recv',
2881- key])
2882-
2883-
2884-class SourceConfigError(Exception):
2885- pass
2886+ if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
2887+ with NamedTemporaryFile() as key_file:
2888+ key_file.write(key)
2889+ key_file.flush()
2890+ key_file.seek(0)
2891+ subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
2892+ else:
2893+ # Note that hkp: is in no way a secure protocol. Using a
2894+ # GPG key id is pointless from a security POV unless you
2895+ # absolutely trust your network and DNS.
2896+ subprocess.check_call(['apt-key', 'adv', '--keyserver',
2897+ 'hkp://keyserver.ubuntu.com:80', '--recv',
2898+ key])
2899
2900
2901 def configure_sources(update=False,
2902 sources_var='install_sources',
2903 keys_var='install_keys'):
2904 """
2905- Configure multiple sources from charm configuration
2906+ Configure multiple sources from charm configuration.
2907+
2908+ The lists are encoded as yaml fragments in the configuration.
2909+ The frament needs to be included as a string. Sources and their
2910+ corresponding keys are of the types supported by add_source().
2911
2912 Example config:
2913- install_sources:
2914+ install_sources: |
2915 - "ppa:foo"
2916 - "http://example.com/repo precise main"
2917- install_keys:
2918+ install_keys: |
2919 - null
2920 - "a1b2c3d4"
2921
2922 Note that 'null' (a.k.a. None) should not be quoted.
2923 """
2924- sources = safe_load(config(sources_var))
2925- keys = config(keys_var)
2926- if keys is not None:
2927- keys = safe_load(keys)
2928- if isinstance(sources, basestring) and (
2929- keys is None or isinstance(keys, basestring)):
2930- add_source(sources, keys)
2931+ sources = safe_load((config(sources_var) or '').strip()) or []
2932+ keys = safe_load((config(keys_var) or '').strip()) or None
2933+
2934+ if isinstance(sources, basestring):
2935+ sources = [sources]
2936+
2937+ if keys is None:
2938+ for source in sources:
2939+ add_source(source, None)
2940 else:
2941- if not len(sources) == len(keys):
2942- msg = 'Install sources and keys lists are different lengths'
2943- raise SourceConfigError(msg)
2944- for src_num in range(len(sources)):
2945- add_source(sources[src_num], keys[src_num])
2946+ if isinstance(keys, basestring):
2947+ keys = [keys]
2948+
2949+ if len(sources) != len(keys):
2950+ raise SourceConfigError(
2951+ 'Install sources and keys lists are different lengths')
2952+ for source, key in zip(sources, keys):
2953+ add_source(source, key)
2954 if update:
2955 apt_update(fatal=True)
2956
2957-# The order of this list is very important. Handlers should be listed in from
2958-# least- to most-specific URL matching.
2959-FETCH_HANDLERS = (
2960- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
2961- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
2962-)
2963-
2964-
2965-class UnhandledSource(Exception):
2966- pass
2967-
2968-
2969-def install_remote(source):
2970+
2971+def install_remote(source, *args, **kwargs):
2972 """
2973 Install a file tree from a remote source
2974
2975 The specified source should be a url of the form:
2976 scheme://[host]/path[#[option=value][&...]]
2977
2978- Schemes supported are based on this modules submodules
2979- Options supported are submodule-specific"""
2980+ Schemes supported are based on this modules submodules.
2981+ Options supported are submodule-specific.
2982+ Additional arguments are passed through to the submodule.
2983+
2984+ For example::
2985+
2986+ dest = install_remote('http://example.com/archive.tgz',
2987+ checksum='deadbeef',
2988+ hash_type='sha1')
2989+
2990+ This will download `archive.tgz`, validate it using SHA1 and, if
2991+ the file is ok, extract it and return the directory in which it
2992+ was extracted. If the checksum fails, it will raise
2993+ :class:`charmhelpers.core.host.ChecksumError`.
2994+ """
2995 # We ONLY check for True here because can_handle may return a string
2996 # explaining why it can't handle a given source.
2997 handlers = [h for h in plugins() if h.can_handle(source) is True]
2998 installed_to = None
2999 for handler in handlers:
3000 try:
3001- installed_to = handler.install(source)
3002+ installed_to = handler.install(source, *args, **kwargs)
3003 except UnhandledSource:
3004 pass
3005 if not installed_to:
3006@@ -265,30 +354,6 @@
3007 return install_remote(source)
3008
3009
3010-class BaseFetchHandler(object):
3011-
3012- """Base class for FetchHandler implementations in fetch plugins"""
3013-
3014- def can_handle(self, source):
3015- """Returns True if the source can be handled. Otherwise returns
3016- a string explaining why it cannot"""
3017- return "Wrong source type"
3018-
3019- def install(self, source):
3020- """Try to download and unpack the source. Return the path to the
3021- unpacked files or raise UnhandledSource."""
3022- raise UnhandledSource("Wrong source type {}".format(source))
3023-
3024- def parse_url(self, url):
3025- return urlparse(url)
3026-
3027- def base_url(self, url):
3028- """Return url without querystring or fragment"""
3029- parts = list(self.parse_url(url))
3030- parts[4:] = ['' for i in parts[4:]]
3031- return urlunparse(parts)
3032-
3033-
3034 def plugins(fetch_handlers=None):
3035 if not fetch_handlers:
3036 fetch_handlers = FETCH_HANDLERS
3037@@ -306,3 +371,40 @@
3038 log("FetchHandler {} not found, skipping plugin".format(
3039 handler_name))
3040 return plugin_list
3041+
3042+
3043+def _run_apt_command(cmd, fatal=False):
3044+ """
3045+ Run an APT command, checking output and retrying if the fatal flag is set
3046+ to True.
3047+
3048+ :param: cmd: str: The apt command to run.
3049+ :param: fatal: bool: Whether the command's output should be checked and
3050+ retried.
3051+ """
3052+ env = os.environ.copy()
3053+
3054+ if 'DEBIAN_FRONTEND' not in env:
3055+ env['DEBIAN_FRONTEND'] = 'noninteractive'
3056+
3057+ if fatal:
3058+ retry_count = 0
3059+ result = None
3060+
3061+ # If the command is considered "fatal", we need to retry if the apt
3062+ # lock was not acquired.
3063+
3064+ while result is None or result == APT_NO_LOCK:
3065+ try:
3066+ result = subprocess.check_call(cmd, env=env)
3067+ except subprocess.CalledProcessError, e:
3068+ retry_count = retry_count + 1
3069+ if retry_count > APT_NO_LOCK_RETRY_COUNT:
3070+ raise
3071+ result = e.returncode
3072+ log("Couldn't acquire DPKG lock. Will retry in {} seconds."
3073+ "".format(APT_NO_LOCK_RETRY_DELAY))
3074+ time.sleep(APT_NO_LOCK_RETRY_DELAY)
3075+
3076+ else:
3077+ subprocess.call(cmd, env=env)
3078
3079=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
3080--- hooks/charmhelpers/fetch/archiveurl.py 2014-03-27 12:33:12 +0000
3081+++ hooks/charmhelpers/fetch/archiveurl.py 2014-09-26 08:15:24 +0000
3082@@ -1,6 +1,8 @@
3083 import os
3084 import urllib2
3085+from urllib import urlretrieve
3086 import urlparse
3087+import hashlib
3088
3089 from charmhelpers.fetch import (
3090 BaseFetchHandler,
3091@@ -10,11 +12,19 @@
3092 get_archive_handler,
3093 extract,
3094 )
3095-from charmhelpers.core.host import mkdir
3096+from charmhelpers.core.host import mkdir, check_hash
3097
3098
3099 class ArchiveUrlFetchHandler(BaseFetchHandler):
3100- """Handler for archives via generic URLs"""
3101+ """
3102+ Handler to download archive files from arbitrary URLs.
3103+
3104+ Can fetch from http, https, ftp, and file URLs.
3105+
3106+ Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
3107+
3108+ Installs the contents of the archive in $CHARM_DIR/fetched/.
3109+ """
3110 def can_handle(self, source):
3111 url_parts = self.parse_url(source)
3112 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
3113@@ -24,6 +34,12 @@
3114 return False
3115
3116 def download(self, source, dest):
3117+ """
3118+ Download an archive file.
3119+
3120+ :param str source: URL pointing to an archive file.
3121+ :param str dest: Local path location to download archive file to.
3122+ """
3123 # propogate all exceptions
3124 # URLError, OSError, etc
3125 proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
3126@@ -48,7 +64,30 @@
3127 os.unlink(dest)
3128 raise e
3129
3130- def install(self, source):
3131+ # Mandatory file validation via Sha1 or MD5 hashing.
3132+ def download_and_validate(self, url, hashsum, validate="sha1"):
3133+ tempfile, headers = urlretrieve(url)
3134+ check_hash(tempfile, hashsum, validate)
3135+ return tempfile
3136+
3137+ def install(self, source, dest=None, checksum=None, hash_type='sha1'):
3138+ """
3139+ Download and install an archive file, with optional checksum validation.
3140+
3141+ The checksum can also be given on the `source` URL's fragment.
3142+ For example::
3143+
3144+ handler.install('http://example.com/file.tgz#sha1=deadbeef')
3145+
3146+ :param str source: URL pointing to an archive file.
3147+ :param str dest: Local destination path to install to. If not given,
3148+ installs to `$CHARM_DIR/archives/archive_file_name`.
3149+ :param str checksum: If given, validate the archive file after download.
3150+ :param str hash_type: Algorithm used to generate `checksum`.
3151+ Can be any hash alrgorithm supported by :mod:`hashlib`,
3152+ such as md5, sha1, sha256, sha512, etc.
3153+
3154+ """
3155 url_parts = self.parse_url(source)
3156 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
3157 if not os.path.exists(dest_dir):
3158@@ -60,4 +99,10 @@
3159 raise UnhandledSource(e.reason)
3160 except OSError as e:
3161 raise UnhandledSource(e.strerror)
3162- return extract(dld_file)
3163+ options = urlparse.parse_qs(url_parts.fragment)
3164+ for key, value in options.items():
3165+ if key in hashlib.algorithms:
3166+ check_hash(dld_file, value, key)
3167+ if checksum:
3168+ check_hash(dld_file, checksum, hash_type)
3169+ return extract(dld_file, dest)
3170
3171=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
3172--- hooks/charmhelpers/fetch/bzrurl.py 2014-03-05 12:57:20 +0000
3173+++ hooks/charmhelpers/fetch/bzrurl.py 2014-09-26 08:15:24 +0000
3174@@ -39,7 +39,8 @@
3175 def install(self, source):
3176 url_parts = self.parse_url(source)
3177 branch_name = url_parts.path.strip("/").split("/")[-1]
3178- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
3179+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
3180+ branch_name)
3181 if not os.path.exists(dest_dir):
3182 mkdir(dest_dir, perms=0755)
3183 try:
3184
3185=== modified file 'hooks/rabbit_utils.py'
3186--- hooks/rabbit_utils.py 2014-06-11 19:58:50 +0000
3187+++ hooks/rabbit_utils.py 2014-09-26 08:15:24 +0000
3188@@ -6,7 +6,6 @@
3189 import subprocess
3190 import glob
3191 from lib.utils import render_template
3192-import apt_pkg as apt
3193
3194 from charmhelpers.contrib.openstack.utils import (
3195 get_hostname,
3196@@ -21,7 +20,12 @@
3197 service_name
3198 )
3199
3200-from charmhelpers.core.host import pwgen, mkdir, write_file
3201+from charmhelpers.core.host import (
3202+ pwgen,
3203+ mkdir,
3204+ write_file,
3205+ cmp_pkgrevno,
3206+)
3207
3208 from charmhelpers.contrib.peerstorage import (
3209 peer_store,
3210@@ -103,21 +107,9 @@
3211 subprocess.check_call(cmd)
3212
3213
3214-def compare_version(base_version):
3215- apt.init()
3216- cache = apt.Cache()
3217- pkg = cache['rabbitmq-server']
3218- if pkg.current_ver:
3219- return apt.version_compare(
3220- apt.upstream_version(pkg.current_ver.ver_str),
3221- base_version)
3222- else:
3223- return False
3224-
3225-
3226 def cluster_with():
3227 log('Clustering with new node')
3228- if compare_version('3.0.1') >= 0:
3229+ if cmp_pkgrevno('rabbitmq-server', '3.0.1') >= 0:
3230 cluster_cmd = 'join_cluster'
3231 else:
3232 cluster_cmd = 'cluster'
3233@@ -167,7 +159,7 @@
3234 cmd = [RABBITMQ_CTL, 'start_app']
3235 subprocess.check_call(cmd)
3236 log('Host clustered with %s.' % node)
3237- if compare_version('3.0.1') >= 0:
3238+ if cmp_pkgrevno('rabbitmq-server', '3.0.1') >= 0:
3239 cmd = [RABBITMQ_CTL, 'set_policy', 'HA',
3240 '^(?!amq\.).*', '{"ha-mode": "all"}']
3241 subprocess.check_call(cmd)

Subscribers

People subscribed via source and target branches