Merge lp:~tribaal/charms/trusty/swift-storage/trunk-fix-1350049 into lp:~openstack-charmers-archive/charms/trusty/swift-storage/trunk

Proposed by Chris Glass
Status: Merged
Merged at revision: 28
Proposed branch: lp:~tribaal/charms/trusty/swift-storage/trunk-fix-1350049
Merge into: lp:~openstack-charmers-archive/charms/trusty/swift-storage/trunk
Diff against target: 1611 lines (+968/-90)
21 files modified
Makefile (+8/-2)
charm-helpers.yaml (+1/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+12/-2)
hooks/charmhelpers/contrib/network/ip.py (+156/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+61/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+275/-0)
hooks/charmhelpers/contrib/openstack/context.py (+95/-22)
hooks/charmhelpers/contrib/openstack/ip.py (+75/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+6/-1)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/openstack/utils.py (+11/-3)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/contrib/storage/linux/utils.py (+4/-0)
hooks/charmhelpers/core/fstab.py (+116/-0)
hooks/charmhelpers/core/hookenv.py (+5/-4)
hooks/charmhelpers/core/host.py (+32/-12)
hooks/charmhelpers/fetch/__init__.py (+33/-16)
hooks/charmhelpers/fetch/bzrurl.py (+2/-1)
hooks/swift_storage_utils.py (+11/-3)
unit_tests/test_swift_storage_utils.py (+28/-0)
To merge this branch: bzr merge lp:~tribaal/charms/trusty/swift-storage/trunk-fix-1350049
Reviewer Review Type Date Requested Status
Liam Young (community) Approve
Review via email: mp+229009@code.launchpad.net

This proposal supersedes a proposal from 2014-07-31.

Description of the change

This branch fixes the swift-storage charm's "guess" config option to ignore disks with mounted partitions instead of using a blacklist.

The linked bug has more details about the error condition, but basically sometimes root is not /dev/sda1, and so the blacklist "detection" fails. Testing for mounted partitions is more robust.

To post a comment you must log in.
30. By Chris Glass

Added missing network.ip charmhelpers entry.

31. By Chris Glass

Added new charmhelpers files.

Revision history for this message
Liam Young (gnuoy) wrote :

Approved

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'Makefile'
2--- Makefile 2014-05-21 10:08:22 +0000
3+++ Makefile 2014-07-31 11:10:21 +0000
4@@ -8,10 +8,16 @@
5
6 test:
7 @echo Starting tests...
8+ @echo Please make sure the following deb files are installed: python-netaddr python-netifaces
9 @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests
10
11-sync:
12- @charm-helper-sync -c charm-helpers.yaml
13+bin/charm_helpers_sync.py:
14+ @mkdir -p bin
15+ @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
16+ > bin/charm_helpers_sync.py
17+
18+sync: bin/charm_helpers_sync.py
19+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
20
21 publish: lint test
22 bzr push lp:charms/swift-storage
23
24=== modified file 'charm-helpers.yaml'
25--- charm-helpers.yaml 2014-03-25 17:05:07 +0000
26+++ charm-helpers.yaml 2014-07-31 11:10:21 +0000
27@@ -4,6 +4,7 @@
28 - core
29 - contrib.openstack|inc=*
30 - contrib.storage
31+ - contrib.network.ip
32 - fetch
33 - contrib.hahelpers:
34 - apache
35
36=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
37--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-02-24 17:52:34 +0000
38+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-31 11:10:21 +0000
39@@ -62,6 +62,15 @@
40 return peers
41
42
43+def peer_ips(peer_relation='cluster', addr_key='private-address'):
44+ '''Return a dict of peers and their private-address'''
45+ peers = {}
46+ for r_id in relation_ids(peer_relation):
47+ for unit in relation_list(r_id):
48+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
49+ return peers
50+
51+
52 def oldest_peer(peers):
53 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
54 for peer in peers:
55@@ -146,12 +155,12 @@
56 Obtains all relevant configuration from charm configuration required
57 for initiating a relation to hacluster:
58
59- ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
60+ ha-bindiface, ha-mcastport, vip
61
62 returns: dict: A dict containing settings keyed by setting name.
63 raises: HAIncompleteConfig if settings are missing.
64 '''
65- settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
66+ settings = ['ha-bindiface', 'ha-mcastport', 'vip']
67 conf = {}
68 for setting in settings:
69 conf[setting] = config_get(setting)
70@@ -170,6 +179,7 @@
71
72 :configs : OSTemplateRenderer: A config tempating object to inspect for
73 a complete https context.
74+
75 :vip_setting: str: Setting in charm config that specifies
76 VIP address.
77 '''
78
79=== added directory 'hooks/charmhelpers/contrib/network'
80=== added file 'hooks/charmhelpers/contrib/network/__init__.py'
81=== added file 'hooks/charmhelpers/contrib/network/ip.py'
82--- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
83+++ hooks/charmhelpers/contrib/network/ip.py 2014-07-31 11:10:21 +0000
84@@ -0,0 +1,156 @@
85+import sys
86+
87+from functools import partial
88+
89+from charmhelpers.fetch import apt_install
90+from charmhelpers.core.hookenv import (
91+ ERROR, log,
92+)
93+
94+try:
95+ import netifaces
96+except ImportError:
97+ apt_install('python-netifaces')
98+ import netifaces
99+
100+try:
101+ import netaddr
102+except ImportError:
103+ apt_install('python-netaddr')
104+ import netaddr
105+
106+
107+def _validate_cidr(network):
108+ try:
109+ netaddr.IPNetwork(network)
110+ except (netaddr.core.AddrFormatError, ValueError):
111+ raise ValueError("Network (%s) is not in CIDR presentation format" %
112+ network)
113+
114+
115+def get_address_in_network(network, fallback=None, fatal=False):
116+ """
117+ Get an IPv4 or IPv6 address within the network from the host.
118+
119+ :param network (str): CIDR presentation format. For example,
120+ '192.168.1.0/24'.
121+ :param fallback (str): If no address is found, return fallback.
122+ :param fatal (boolean): If no address is found, fallback is not
123+ set and fatal is True then exit(1).
124+
125+ """
126+
127+ def not_found_error_out():
128+ log("No IP address found in network: %s" % network,
129+ level=ERROR)
130+ sys.exit(1)
131+
132+ if network is None:
133+ if fallback is not None:
134+ return fallback
135+ else:
136+ if fatal:
137+ not_found_error_out()
138+
139+ _validate_cidr(network)
140+ network = netaddr.IPNetwork(network)
141+ for iface in netifaces.interfaces():
142+ addresses = netifaces.ifaddresses(iface)
143+ if network.version == 4 and netifaces.AF_INET in addresses:
144+ addr = addresses[netifaces.AF_INET][0]['addr']
145+ netmask = addresses[netifaces.AF_INET][0]['netmask']
146+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
147+ if cidr in network:
148+ return str(cidr.ip)
149+ if network.version == 6 and netifaces.AF_INET6 in addresses:
150+ for addr in addresses[netifaces.AF_INET6]:
151+ if not addr['addr'].startswith('fe80'):
152+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
153+ addr['netmask']))
154+ if cidr in network:
155+ return str(cidr.ip)
156+
157+ if fallback is not None:
158+ return fallback
159+
160+ if fatal:
161+ not_found_error_out()
162+
163+ return None
164+
165+
166+def is_ipv6(address):
167+ '''Determine whether provided address is IPv6 or not'''
168+ try:
169+ address = netaddr.IPAddress(address)
170+ except netaddr.AddrFormatError:
171+ # probably a hostname - so not an address at all!
172+ return False
173+ else:
174+ return address.version == 6
175+
176+
177+def is_address_in_network(network, address):
178+ """
179+ Determine whether the provided address is within a network range.
180+
181+ :param network (str): CIDR presentation format. For example,
182+ '192.168.1.0/24'.
183+ :param address: An individual IPv4 or IPv6 address without a net
184+ mask or subnet prefix. For example, '192.168.1.1'.
185+ :returns boolean: Flag indicating whether address is in network.
186+ """
187+ try:
188+ network = netaddr.IPNetwork(network)
189+ except (netaddr.core.AddrFormatError, ValueError):
190+ raise ValueError("Network (%s) is not in CIDR presentation format" %
191+ network)
192+ try:
193+ address = netaddr.IPAddress(address)
194+ except (netaddr.core.AddrFormatError, ValueError):
195+ raise ValueError("Address (%s) is not in correct presentation format" %
196+ address)
197+ if address in network:
198+ return True
199+ else:
200+ return False
201+
202+
203+def _get_for_address(address, key):
204+ """Retrieve an attribute of or the physical interface that
205+ the IP address provided could be bound to.
206+
207+ :param address (str): An individual IPv4 or IPv6 address without a net
208+ mask or subnet prefix. For example, '192.168.1.1'.
209+ :param key: 'iface' for the physical interface name or an attribute
210+ of the configured interface, for example 'netmask'.
211+ :returns str: Requested attribute or None if address is not bindable.
212+ """
213+ address = netaddr.IPAddress(address)
214+ for iface in netifaces.interfaces():
215+ addresses = netifaces.ifaddresses(iface)
216+ if address.version == 4 and netifaces.AF_INET in addresses:
217+ addr = addresses[netifaces.AF_INET][0]['addr']
218+ netmask = addresses[netifaces.AF_INET][0]['netmask']
219+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
220+ if address in cidr:
221+ if key == 'iface':
222+ return iface
223+ else:
224+ return addresses[netifaces.AF_INET][0][key]
225+ if address.version == 6 and netifaces.AF_INET6 in addresses:
226+ for addr in addresses[netifaces.AF_INET6]:
227+ if not addr['addr'].startswith('fe80'):
228+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
229+ addr['netmask']))
230+ if address in cidr:
231+ if key == 'iface':
232+ return iface
233+ else:
234+ return addr[key]
235+ return None
236+
237+
238+get_iface_for_address = partial(_get_for_address, key='iface')
239+
240+get_netmask_for_address = partial(_get_for_address, key='netmask')
241
242=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
243=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
244=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
245--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
246+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-31 11:10:21 +0000
247@@ -0,0 +1,61 @@
248+from charmhelpers.contrib.amulet.deployment import (
249+ AmuletDeployment
250+)
251+
252+
253+class OpenStackAmuletDeployment(AmuletDeployment):
254+ """OpenStack amulet deployment.
255+
256+ This class inherits from AmuletDeployment and has additional support
257+ that is specifically for use by OpenStack charms.
258+ """
259+
260+ def __init__(self, series=None, openstack=None, source=None):
261+ """Initialize the deployment environment."""
262+ super(OpenStackAmuletDeployment, self).__init__(series)
263+ self.openstack = openstack
264+ self.source = source
265+
266+ def _add_services(self, this_service, other_services):
267+ """Add services to the deployment and set openstack-origin."""
268+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
269+ other_services)
270+ name = 0
271+ services = other_services
272+ services.append(this_service)
273+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
274+
275+ if self.openstack:
276+ for svc in services:
277+ if svc[name] not in use_source:
278+ config = {'openstack-origin': self.openstack}
279+ self.d.configure(svc[name], config)
280+
281+ if self.source:
282+ for svc in services:
283+ if svc[name] in use_source:
284+ config = {'source': self.source}
285+ self.d.configure(svc[name], config)
286+
287+ def _configure_services(self, configs):
288+ """Configure all of the services."""
289+ for service, config in configs.iteritems():
290+ self.d.configure(service, config)
291+
292+ def _get_openstack_release(self):
293+ """Get openstack release.
294+
295+ Return an integer representing the enum value of the openstack
296+ release.
297+ """
298+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
299+ self.precise_havana, self.precise_icehouse,
300+ self.trusty_icehouse) = range(6)
301+ releases = {
302+ ('precise', None): self.precise_essex,
303+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
304+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
305+ ('precise', 'cloud:precise-havana'): self.precise_havana,
306+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
307+ ('trusty', None): self.trusty_icehouse}
308+ return releases[(self.series, self.openstack)]
309
310=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
311--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
312+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-31 11:10:21 +0000
313@@ -0,0 +1,275 @@
314+import logging
315+import os
316+import time
317+import urllib
318+
319+import glanceclient.v1.client as glance_client
320+import keystoneclient.v2_0 as keystone_client
321+import novaclient.v1_1.client as nova_client
322+
323+from charmhelpers.contrib.amulet.utils import (
324+ AmuletUtils
325+)
326+
327+DEBUG = logging.DEBUG
328+ERROR = logging.ERROR
329+
330+
331+class OpenStackAmuletUtils(AmuletUtils):
332+ """OpenStack amulet utilities.
333+
334+ This class inherits from AmuletUtils and has additional support
335+ that is specifically for use by OpenStack charms.
336+ """
337+
338+ def __init__(self, log_level=ERROR):
339+ """Initialize the deployment environment."""
340+ super(OpenStackAmuletUtils, self).__init__(log_level)
341+
342+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
343+ public_port, expected):
344+ """Validate endpoint data.
345+
346+ Validate actual endpoint data vs expected endpoint data. The ports
347+ are used to find the matching endpoint.
348+ """
349+ found = False
350+ for ep in endpoints:
351+ self.log.debug('endpoint: {}'.format(repr(ep)))
352+ if (admin_port in ep.adminurl and
353+ internal_port in ep.internalurl and
354+ public_port in ep.publicurl):
355+ found = True
356+ actual = {'id': ep.id,
357+ 'region': ep.region,
358+ 'adminurl': ep.adminurl,
359+ 'internalurl': ep.internalurl,
360+ 'publicurl': ep.publicurl,
361+ 'service_id': ep.service_id}
362+ ret = self._validate_dict_data(expected, actual)
363+ if ret:
364+ return 'unexpected endpoint data - {}'.format(ret)
365+
366+ if not found:
367+ return 'endpoint not found'
368+
369+ def validate_svc_catalog_endpoint_data(self, expected, actual):
370+ """Validate service catalog endpoint data.
371+
372+ Validate a list of actual service catalog endpoints vs a list of
373+ expected service catalog endpoints.
374+ """
375+ self.log.debug('actual: {}'.format(repr(actual)))
376+ for k, v in expected.iteritems():
377+ if k in actual:
378+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
379+ if ret:
380+ return self.endpoint_error(k, ret)
381+ else:
382+ return "endpoint {} does not exist".format(k)
383+ return ret
384+
385+ def validate_tenant_data(self, expected, actual):
386+ """Validate tenant data.
387+
388+ Validate a list of actual tenant data vs list of expected tenant
389+ data.
390+ """
391+ self.log.debug('actual: {}'.format(repr(actual)))
392+ for e in expected:
393+ found = False
394+ for act in actual:
395+ a = {'enabled': act.enabled, 'description': act.description,
396+ 'name': act.name, 'id': act.id}
397+ if e['name'] == a['name']:
398+ found = True
399+ ret = self._validate_dict_data(e, a)
400+ if ret:
401+ return "unexpected tenant data - {}".format(ret)
402+ if not found:
403+ return "tenant {} does not exist".format(e['name'])
404+ return ret
405+
406+ def validate_role_data(self, expected, actual):
407+ """Validate role data.
408+
409+ Validate a list of actual role data vs a list of expected role
410+ data.
411+ """
412+ self.log.debug('actual: {}'.format(repr(actual)))
413+ for e in expected:
414+ found = False
415+ for act in actual:
416+ a = {'name': act.name, 'id': act.id}
417+ if e['name'] == a['name']:
418+ found = True
419+ ret = self._validate_dict_data(e, a)
420+ if ret:
421+ return "unexpected role data - {}".format(ret)
422+ if not found:
423+ return "role {} does not exist".format(e['name'])
424+ return ret
425+
426+ def validate_user_data(self, expected, actual):
427+ """Validate user data.
428+
429+ Validate a list of actual user data vs a list of expected user
430+ data.
431+ """
432+ self.log.debug('actual: {}'.format(repr(actual)))
433+ for e in expected:
434+ found = False
435+ for act in actual:
436+ a = {'enabled': act.enabled, 'name': act.name,
437+ 'email': act.email, 'tenantId': act.tenantId,
438+ 'id': act.id}
439+ if e['name'] == a['name']:
440+ found = True
441+ ret = self._validate_dict_data(e, a)
442+ if ret:
443+ return "unexpected user data - {}".format(ret)
444+ if not found:
445+ return "user {} does not exist".format(e['name'])
446+ return ret
447+
448+ def validate_flavor_data(self, expected, actual):
449+ """Validate flavor data.
450+
451+ Validate a list of actual flavors vs a list of expected flavors.
452+ """
453+ self.log.debug('actual: {}'.format(repr(actual)))
454+ act = [a.name for a in actual]
455+ return self._validate_list_data(expected, act)
456+
457+ def tenant_exists(self, keystone, tenant):
458+ """Return True if tenant exists."""
459+ return tenant in [t.name for t in keystone.tenants.list()]
460+
461+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
462+ tenant):
463+ """Authenticates admin user with the keystone admin endpoint."""
464+ unit = keystone_sentry
465+ service_ip = unit.relation('shared-db',
466+ 'mysql:shared-db')['private-address']
467+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
468+ return keystone_client.Client(username=user, password=password,
469+ tenant_name=tenant, auth_url=ep)
470+
471+ def authenticate_keystone_user(self, keystone, user, password, tenant):
472+ """Authenticates a regular user with the keystone public endpoint."""
473+ ep = keystone.service_catalog.url_for(service_type='identity',
474+ endpoint_type='publicURL')
475+ return keystone_client.Client(username=user, password=password,
476+ tenant_name=tenant, auth_url=ep)
477+
478+ def authenticate_glance_admin(self, keystone):
479+ """Authenticates admin user with glance."""
480+ ep = keystone.service_catalog.url_for(service_type='image',
481+ endpoint_type='adminURL')
482+ return glance_client.Client(ep, token=keystone.auth_token)
483+
484+ def authenticate_nova_user(self, keystone, user, password, tenant):
485+ """Authenticates a regular user with nova-api."""
486+ ep = keystone.service_catalog.url_for(service_type='identity',
487+ endpoint_type='publicURL')
488+ return nova_client.Client(username=user, api_key=password,
489+ project_id=tenant, auth_url=ep)
490+
491+ def create_cirros_image(self, glance, image_name):
492+ """Download the latest cirros image and upload it to glance."""
493+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
494+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
495+ if http_proxy:
496+ proxies = {'http': http_proxy}
497+ opener = urllib.FancyURLopener(proxies)
498+ else:
499+ opener = urllib.FancyURLopener()
500+
501+ f = opener.open("http://download.cirros-cloud.net/version/released")
502+ version = f.read().strip()
503+ cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
504+
505+ if not os.path.exists(cirros_img):
506+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
507+ version, cirros_img)
508+ opener.retrieve(cirros_url, cirros_img)
509+ f.close()
510+
511+ with open(cirros_img) as f:
512+ image = glance.images.create(name=image_name, is_public=True,
513+ disk_format='qcow2',
514+ container_format='bare', data=f)
515+ count = 1
516+ status = image.status
517+ while status != 'active' and count < 10:
518+ time.sleep(3)
519+ image = glance.images.get(image.id)
520+ status = image.status
521+ self.log.debug('image status: {}'.format(status))
522+ count += 1
523+
524+ if status != 'active':
525+ self.log.error('image creation timed out')
526+ return None
527+
528+ return image
529+
530+ def delete_image(self, glance, image):
531+ """Delete the specified image."""
532+ num_before = len(list(glance.images.list()))
533+ glance.images.delete(image)
534+
535+ count = 1
536+ num_after = len(list(glance.images.list()))
537+ while num_after != (num_before - 1) and count < 10:
538+ time.sleep(3)
539+ num_after = len(list(glance.images.list()))
540+ self.log.debug('number of images: {}'.format(num_after))
541+ count += 1
542+
543+ if num_after != (num_before - 1):
544+ self.log.error('image deletion timed out')
545+ return False
546+
547+ return True
548+
549+ def create_instance(self, nova, image_name, instance_name, flavor):
550+ """Create the specified instance."""
551+ image = nova.images.find(name=image_name)
552+ flavor = nova.flavors.find(name=flavor)
553+ instance = nova.servers.create(name=instance_name, image=image,
554+ flavor=flavor)
555+
556+ count = 1
557+ status = instance.status
558+ while status != 'ACTIVE' and count < 60:
559+ time.sleep(3)
560+ instance = nova.servers.get(instance.id)
561+ status = instance.status
562+ self.log.debug('instance status: {}'.format(status))
563+ count += 1
564+
565+ if status != 'ACTIVE':
566+ self.log.error('instance creation timed out')
567+ return None
568+
569+ return instance
570+
571+ def delete_instance(self, nova, instance):
572+ """Delete the specified instance."""
573+ num_before = len(list(nova.servers.list()))
574+ nova.servers.delete(instance)
575+
576+ count = 1
577+ num_after = len(list(nova.servers.list()))
578+ while num_after != (num_before - 1) and count < 10:
579+ time.sleep(3)
580+ num_after = len(list(nova.servers.list()))
581+ self.log.debug('number of instances: {}'.format(num_after))
582+ count += 1
583+
584+ if num_after != (num_before - 1):
585+ self.log.error('instance deletion timed out')
586+ return False
587+
588+ return True
589
590=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
591--- hooks/charmhelpers/contrib/openstack/context.py 2014-05-19 11:41:35 +0000
592+++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-31 11:10:21 +0000
593@@ -21,9 +21,11 @@
594 relation_get,
595 relation_ids,
596 related_units,
597+ relation_set,
598 unit_get,
599 unit_private_ip,
600 ERROR,
601+ INFO
602 )
603
604 from charmhelpers.contrib.hahelpers.cluster import (
605@@ -42,6 +44,8 @@
606 neutron_plugin_attribute,
607 )
608
609+from charmhelpers.contrib.network.ip import get_address_in_network
610+
611 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
612
613
614@@ -134,8 +138,26 @@
615 'Missing required charm config options. '
616 '(database name and user)')
617 raise OSContextError
618+
619 ctxt = {}
620
621+ # NOTE(jamespage) if mysql charm provides a network upon which
622+ # access to the database should be made, reconfigure relation
623+ # with the service units local address and defer execution
624+ access_network = relation_get('access-network')
625+ if access_network is not None:
626+ if self.relation_prefix is not None:
627+ hostname_key = "{}_hostname".format(self.relation_prefix)
628+ else:
629+ hostname_key = "hostname"
630+ access_hostname = get_address_in_network(access_network,
631+ unit_get('private-address'))
632+ set_hostname = relation_get(attribute=hostname_key,
633+ unit=local_unit())
634+ if set_hostname != access_hostname:
635+ relation_set(relation_settings={hostname_key: access_hostname})
636+ return ctxt # Defer any further hook execution for now....
637+
638 password_setting = 'password'
639 if self.relation_prefix:
640 password_setting = self.relation_prefix + '_password'
641@@ -243,23 +265,31 @@
642
643
644 class AMQPContext(OSContextGenerator):
645- interfaces = ['amqp']
646
647- def __init__(self, ssl_dir=None):
648+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
649 self.ssl_dir = ssl_dir
650+ self.rel_name = rel_name
651+ self.relation_prefix = relation_prefix
652+ self.interfaces = [rel_name]
653
654 def __call__(self):
655 log('Generating template context for amqp')
656 conf = config()
657+ user_setting = 'rabbit-user'
658+ vhost_setting = 'rabbit-vhost'
659+ if self.relation_prefix:
660+ user_setting = self.relation_prefix + '-rabbit-user'
661+ vhost_setting = self.relation_prefix + '-rabbit-vhost'
662+
663 try:
664- username = conf['rabbit-user']
665- vhost = conf['rabbit-vhost']
666+ username = conf[user_setting]
667+ vhost = conf[vhost_setting]
668 except KeyError as e:
669 log('Could not generate shared_db context. '
670 'Missing required charm config options: %s.' % e)
671 raise OSContextError
672 ctxt = {}
673- for rid in relation_ids('amqp'):
674+ for rid in relation_ids(self.rel_name):
675 ha_vip_only = False
676 for unit in related_units(rid):
677 if relation_get('clustered', rid=rid, unit=unit):
678@@ -332,10 +362,12 @@
679 use_syslog = str(config('use-syslog')).lower()
680 for rid in relation_ids('ceph'):
681 for unit in related_units(rid):
682- mon_hosts.append(relation_get('private-address', rid=rid,
683- unit=unit))
684 auth = relation_get('auth', rid=rid, unit=unit)
685 key = relation_get('key', rid=rid, unit=unit)
686+ ceph_addr = \
687+ relation_get('ceph-public-address', rid=rid, unit=unit) or \
688+ relation_get('private-address', rid=rid, unit=unit)
689+ mon_hosts.append(ceph_addr)
690
691 ctxt = {
692 'mon_hosts': ' '.join(mon_hosts),
693@@ -369,7 +401,9 @@
694
695 cluster_hosts = {}
696 l_unit = local_unit().replace('/', '-')
697- cluster_hosts[l_unit] = unit_get('private-address')
698+ cluster_hosts[l_unit] = \
699+ get_address_in_network(config('os-internal-network'),
700+ unit_get('private-address'))
701
702 for rid in relation_ids('cluster'):
703 for unit in related_units(rid):
704@@ -418,12 +452,13 @@
705 """
706 Generates a context for an apache vhost configuration that configures
707 HTTPS reverse proxying for one or many endpoints. Generated context
708- looks something like:
709- {
710- 'namespace': 'cinder',
711- 'private_address': 'iscsi.mycinderhost.com',
712- 'endpoints': [(8776, 8766), (8777, 8767)]
713- }
714+ looks something like::
715+
716+ {
717+ 'namespace': 'cinder',
718+ 'private_address': 'iscsi.mycinderhost.com',
719+ 'endpoints': [(8776, 8766), (8777, 8767)]
720+ }
721
722 The endpoints list consists of a tuples mapping external ports
723 to internal ports.
724@@ -541,6 +576,26 @@
725
726 return nvp_ctxt
727
728+ def n1kv_ctxt(self):
729+ driver = neutron_plugin_attribute(self.plugin, 'driver',
730+ self.network_manager)
731+ n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
732+ self.network_manager)
733+ n1kv_ctxt = {
734+ 'core_plugin': driver,
735+ 'neutron_plugin': 'n1kv',
736+ 'neutron_security_groups': self.neutron_security_groups,
737+ 'local_ip': unit_private_ip(),
738+ 'config': n1kv_config,
739+ 'vsm_ip': config('n1kv-vsm-ip'),
740+ 'vsm_username': config('n1kv-vsm-username'),
741+ 'vsm_password': config('n1kv-vsm-password'),
742+ 'restrict_policy_profiles': config(
743+ 'n1kv_restrict_policy_profiles'),
744+ }
745+
746+ return n1kv_ctxt
747+
748 def neutron_ctxt(self):
749 if https():
750 proto = 'https'
751@@ -572,6 +627,8 @@
752 ctxt.update(self.ovs_ctxt())
753 elif self.plugin in ['nvp', 'nsx']:
754 ctxt.update(self.nvp_ctxt())
755+ elif self.plugin == 'n1kv':
756+ ctxt.update(self.n1kv_ctxt())
757
758 alchemy_flags = config('neutron-alchemy-flags')
759 if alchemy_flags:
760@@ -611,7 +668,7 @@
761 The subordinate interface allows subordinates to export their
762 configuration requirements to the principle for multiple config
763 files and multiple serivces. Ie, a subordinate that has interfaces
764- to both glance and nova may export to following yaml blob as json:
765+ to both glance and nova may export to following yaml blob as json::
766
767 glance:
768 /etc/glance/glance-api.conf:
769@@ -630,7 +687,8 @@
770
771 It is then up to the principle charms to subscribe this context to
772 the service+config file it is interestd in. Configuration data will
773- be available in the template context, in glance's case, as:
774+ be available in the template context, in glance's case, as::
775+
776 ctxt = {
777 ... other context ...
778 'subordinate_config': {
779@@ -657,7 +715,7 @@
780 self.interface = interface
781
782 def __call__(self):
783- ctxt = {}
784+ ctxt = {'sections': {}}
785 for rid in relation_ids(self.interface):
786 for unit in related_units(rid):
787 sub_config = relation_get('subordinate_configuration',
788@@ -683,11 +741,26 @@
789
790 sub_config = sub_config[self.config_file]
791 for k, v in sub_config.iteritems():
792- ctxt[k] = v
793-
794- if not ctxt:
795- ctxt['sections'] = {}
796-
797+ if k == 'sections':
798+ for section, config_dict in v.iteritems():
799+ log("adding section '%s'" % (section))
800+ ctxt[k][section] = config_dict
801+ else:
802+ ctxt[k] = v
803+
804+ log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
805+
806+ return ctxt
807+
808+
809+class LogLevelContext(OSContextGenerator):
810+
811+ def __call__(self):
812+ ctxt = {}
813+ ctxt['debug'] = \
814+ False if config('debug') is None else config('debug')
815+ ctxt['verbose'] = \
816+ False if config('verbose') is None else config('verbose')
817 return ctxt
818
819
820
821=== added file 'hooks/charmhelpers/contrib/openstack/ip.py'
822--- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
823+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-07-31 11:10:21 +0000
824@@ -0,0 +1,75 @@
825+from charmhelpers.core.hookenv import (
826+ config,
827+ unit_get,
828+)
829+
830+from charmhelpers.contrib.network.ip import (
831+ get_address_in_network,
832+ is_address_in_network,
833+ is_ipv6,
834+)
835+
836+from charmhelpers.contrib.hahelpers.cluster import is_clustered
837+
838+PUBLIC = 'public'
839+INTERNAL = 'int'
840+ADMIN = 'admin'
841+
842+_address_map = {
843+ PUBLIC: {
844+ 'config': 'os-public-network',
845+ 'fallback': 'public-address'
846+ },
847+ INTERNAL: {
848+ 'config': 'os-internal-network',
849+ 'fallback': 'private-address'
850+ },
851+ ADMIN: {
852+ 'config': 'os-admin-network',
853+ 'fallback': 'private-address'
854+ }
855+}
856+
857+
858+def canonical_url(configs, endpoint_type=PUBLIC):
859+ '''
860+ Returns the correct HTTP URL to this host given the state of HTTPS
861+ configuration, hacluster and charm configuration.
862+
863+ :configs OSTemplateRenderer: A config tempating object to inspect for
864+ a complete https context.
865+ :endpoint_type str: The endpoint type to resolve.
866+
867+ :returns str: Base URL for services on the current service unit.
868+ '''
869+ scheme = 'http'
870+ if 'https' in configs.complete_contexts():
871+ scheme = 'https'
872+ address = resolve_address(endpoint_type)
873+ if is_ipv6(address):
874+ address = "[{}]".format(address)
875+ return '%s://%s' % (scheme, address)
876+
877+
878+def resolve_address(endpoint_type=PUBLIC):
879+ resolved_address = None
880+ if is_clustered():
881+ if config(_address_map[endpoint_type]['config']) is None:
882+ # Assume vip is simple and pass back directly
883+ resolved_address = config('vip')
884+ else:
885+ for vip in config('vip').split():
886+ if is_address_in_network(
887+ config(_address_map[endpoint_type]['config']),
888+ vip):
889+ resolved_address = vip
890+ else:
891+ resolved_address = get_address_in_network(
892+ config(_address_map[endpoint_type]['config']),
893+ unit_get(_address_map[endpoint_type]['fallback'])
894+ )
895+ if resolved_address is None:
896+ raise ValueError('Unable to resolve a suitable IP address'
897+ ' based on charm state and configuration')
898+ else:
899+ return resolved_address
900
901=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
902--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-19 11:41:35 +0000
903+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-07-31 11:10:21 +0000
904@@ -128,6 +128,20 @@
905 'server_packages': ['neutron-server',
906 'neutron-plugin-vmware'],
907 'server_services': ['neutron-server']
908+ },
909+ 'n1kv': {
910+ 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
911+ 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
912+ 'contexts': [
913+ context.SharedDBContext(user=config('neutron-database-user'),
914+ database=config('neutron-database'),
915+ relation_prefix='neutron',
916+ ssl_dir=NEUTRON_CONF_DIR)],
917+ 'services': [],
918+ 'packages': [['neutron-plugin-cisco']],
919+ 'server_packages': ['neutron-server',
920+ 'neutron-plugin-cisco'],
921+ 'server_services': ['neutron-server']
922 }
923 }
924 if release >= 'icehouse':
925
926=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
927--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-02-24 17:52:34 +0000
928+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-31 11:10:21 +0000
929@@ -27,7 +27,12 @@
930
931 {% if units -%}
932 {% for service, ports in service_ports.iteritems() -%}
933-listen {{ service }} 0.0.0.0:{{ ports[0] }}
934+listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
935+ balance roundrobin
936+ {% for unit, address in units.iteritems() -%}
937+ server {{ unit }} {{ address }}:{{ ports[1] }} check
938+ {% endfor %}
939+listen {{ service }}_ipv6 :::{{ ports[0] }}
940 balance roundrobin
941 {% for unit, address in units.iteritems() -%}
942 server {{ unit }} {{ address }}:{{ ports[1] }} check
943
944=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
945--- hooks/charmhelpers/contrib/openstack/templating.py 2013-09-23 19:01:06 +0000
946+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-07-31 11:10:21 +0000
947@@ -30,17 +30,17 @@
948 loading dir.
949
950 A charm may also ship a templates dir with this module
951- and it will be appended to the bottom of the search list, eg:
952- hooks/charmhelpers/contrib/openstack/templates.
953-
954- :param templates_dir: str: Base template directory containing release
955- sub-directories.
956- :param os_release : str: OpenStack release codename to construct template
957- loader.
958-
959- :returns : jinja2.ChoiceLoader constructed with a list of
960- jinja2.FilesystemLoaders, ordered in descending
961- order by OpenStack release.
962+ and it will be appended to the bottom of the search list, eg::
963+
964+ hooks/charmhelpers/contrib/openstack/templates
965+
966+ :param templates_dir (str): Base template directory containing release
967+ sub-directories.
968+ :param os_release (str): OpenStack release codename to construct template
969+ loader.
970+ :returns: jinja2.ChoiceLoader constructed with a list of
971+ jinja2.FilesystemLoaders, ordered in descending
972+ order by OpenStack release.
973 """
974 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
975 for rel in OPENSTACK_CODENAMES.itervalues()]
976@@ -111,7 +111,8 @@
977 and ease the burden of managing config templates across multiple OpenStack
978 releases.
979
980- Basic usage:
981+ Basic usage::
982+
983 # import some common context generates from charmhelpers
984 from charmhelpers.contrib.openstack import context
985
986@@ -131,21 +132,19 @@
987 # write out all registered configs
988 configs.write_all()
989
990- Details:
991+ **OpenStack Releases and template loading**
992
993- OpenStack Releases and template loading
994- ---------------------------------------
995 When the object is instantiated, it is associated with a specific OS
996 release. This dictates how the template loader will be constructed.
997
998 The constructed loader attempts to load the template from several places
999 in the following order:
1000- - from the most recent OS release-specific template dir (if one exists)
1001- - the base templates_dir
1002- - a template directory shipped in the charm with this helper file.
1003-
1004-
1005- For the example above, '/tmp/templates' contains the following structure:
1006+ - from the most recent OS release-specific template dir (if one exists)
1007+ - the base templates_dir
1008+ - a template directory shipped in the charm with this helper file.
1009+
1010+ For the example above, '/tmp/templates' contains the following structure::
1011+
1012 /tmp/templates/nova.conf
1013 /tmp/templates/api-paste.ini
1014 /tmp/templates/grizzly/api-paste.ini
1015@@ -169,8 +168,8 @@
1016 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
1017 us to ship common templates (haproxy, apache) with the helpers.
1018
1019- Context generators
1020- ---------------------------------------
1021+ **Context generators**
1022+
1023 Context generators are used to generate template contexts during hook
1024 execution. Doing so may require inspecting service relations, charm
1025 config, etc. When registered, a config file is associated with a list
1026
1027=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
1028--- hooks/charmhelpers/contrib/openstack/utils.py 2014-05-19 11:41:35 +0000
1029+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-07-31 11:10:21 +0000
1030@@ -3,7 +3,6 @@
1031 # Common python helper functions used for OpenStack charms.
1032 from collections import OrderedDict
1033
1034-import apt_pkg as apt
1035 import subprocess
1036 import os
1037 import socket
1038@@ -41,7 +40,8 @@
1039 ('quantal', 'folsom'),
1040 ('raring', 'grizzly'),
1041 ('saucy', 'havana'),
1042- ('trusty', 'icehouse')
1043+ ('trusty', 'icehouse'),
1044+ ('utopic', 'juno'),
1045 ])
1046
1047
1048@@ -52,6 +52,7 @@
1049 ('2013.1', 'grizzly'),
1050 ('2013.2', 'havana'),
1051 ('2014.1', 'icehouse'),
1052+ ('2014.2', 'juno'),
1053 ])
1054
1055 # The ugly duckling
1056@@ -83,6 +84,8 @@
1057 '''Derive OpenStack release codename from a given installation source.'''
1058 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
1059 rel = ''
1060+ if src is None:
1061+ return rel
1062 if src in ['distro', 'distro-proposed']:
1063 try:
1064 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
1065@@ -130,6 +133,7 @@
1066
1067 def get_os_codename_package(package, fatal=True):
1068 '''Derive OpenStack release codename from an installed package.'''
1069+ import apt_pkg as apt
1070 apt.init()
1071
1072 # Tell apt to build an in-memory cache to prevent race conditions (if
1073@@ -187,7 +191,7 @@
1074 for version, cname in vers_map.iteritems():
1075 if cname == codename:
1076 return version
1077- #e = "Could not determine OpenStack version for package: %s" % pkg
1078+ # e = "Could not determine OpenStack version for package: %s" % pkg
1079 # error_out(e)
1080
1081
1082@@ -273,6 +277,9 @@
1083 'icehouse': 'precise-updates/icehouse',
1084 'icehouse/updates': 'precise-updates/icehouse',
1085 'icehouse/proposed': 'precise-proposed/icehouse',
1086+ 'juno': 'trusty-updates/juno',
1087+ 'juno/updates': 'trusty-updates/juno',
1088+ 'juno/proposed': 'trusty-proposed/juno',
1089 }
1090
1091 try:
1092@@ -320,6 +327,7 @@
1093
1094 """
1095
1096+ import apt_pkg as apt
1097 src = config('openstack-origin')
1098 cur_vers = get_os_version_package(package)
1099 available_vers = get_os_version_install_source(src)
1100
1101=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
1102--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-02-24 17:52:34 +0000
1103+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-31 11:10:21 +0000
1104@@ -303,7 +303,7 @@
1105 blk_device, fstype, system_services=[]):
1106 """
1107 NOTE: This function must only be called from a single service unit for
1108- the same rbd_img otherwise data loss will occur.
1109+ the same rbd_img otherwise data loss will occur.
1110
1111 Ensures given pool and RBD image exists, is mapped to a block device,
1112 and the device is formatted and mounted at the given mount_point.
1113
1114=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
1115--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-19 11:41:35 +0000
1116+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-31 11:10:21 +0000
1117@@ -37,6 +37,7 @@
1118 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
1119 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
1120
1121+
1122 def is_device_mounted(device):
1123 '''Given a device path, return True if that device is mounted, and False
1124 if it isn't.
1125@@ -45,5 +46,8 @@
1126 :returns: boolean: True if the path represents a mounted device, False if
1127 it doesn't.
1128 '''
1129+ is_partition = bool(re.search(r".*[0-9]+\b", device))
1130 out = check_output(['mount'])
1131+ if is_partition:
1132+ return bool(re.search(device + r"\b", out))
1133 return bool(re.search(device + r"[0-9]+\b", out))
1134
1135=== added file 'hooks/charmhelpers/core/fstab.py'
1136--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
1137+++ hooks/charmhelpers/core/fstab.py 2014-07-31 11:10:21 +0000
1138@@ -0,0 +1,116 @@
1139+#!/usr/bin/env python
1140+# -*- coding: utf-8 -*-
1141+
1142+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
1143+
1144+import os
1145+
1146+
1147+class Fstab(file):
1148+ """This class extends file in order to implement a file reader/writer
1149+ for file `/etc/fstab`
1150+ """
1151+
1152+ class Entry(object):
1153+ """Entry class represents a non-comment line on the `/etc/fstab` file
1154+ """
1155+ def __init__(self, device, mountpoint, filesystem,
1156+ options, d=0, p=0):
1157+ self.device = device
1158+ self.mountpoint = mountpoint
1159+ self.filesystem = filesystem
1160+
1161+ if not options:
1162+ options = "defaults"
1163+
1164+ self.options = options
1165+ self.d = d
1166+ self.p = p
1167+
1168+ def __eq__(self, o):
1169+ return str(self) == str(o)
1170+
1171+ def __str__(self):
1172+ return "{} {} {} {} {} {}".format(self.device,
1173+ self.mountpoint,
1174+ self.filesystem,
1175+ self.options,
1176+ self.d,
1177+ self.p)
1178+
1179+ DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
1180+
1181+ def __init__(self, path=None):
1182+ if path:
1183+ self._path = path
1184+ else:
1185+ self._path = self.DEFAULT_PATH
1186+ file.__init__(self, self._path, 'r+')
1187+
1188+ def _hydrate_entry(self, line):
1189+ # NOTE: use split with no arguments to split on any
1190+ # whitespace including tabs
1191+ return Fstab.Entry(*filter(
1192+ lambda x: x not in ('', None),
1193+ line.strip("\n").split()))
1194+
1195+ @property
1196+ def entries(self):
1197+ self.seek(0)
1198+ for line in self.readlines():
1199+ try:
1200+ if not line.startswith("#"):
1201+ yield self._hydrate_entry(line)
1202+ except ValueError:
1203+ pass
1204+
1205+ def get_entry_by_attr(self, attr, value):
1206+ for entry in self.entries:
1207+ e_attr = getattr(entry, attr)
1208+ if e_attr == value:
1209+ return entry
1210+ return None
1211+
1212+ def add_entry(self, entry):
1213+ if self.get_entry_by_attr('device', entry.device):
1214+ return False
1215+
1216+ self.write(str(entry) + '\n')
1217+ self.truncate()
1218+ return entry
1219+
1220+ def remove_entry(self, entry):
1221+ self.seek(0)
1222+
1223+ lines = self.readlines()
1224+
1225+ found = False
1226+ for index, line in enumerate(lines):
1227+ if not line.startswith("#"):
1228+ if self._hydrate_entry(line) == entry:
1229+ found = True
1230+ break
1231+
1232+ if not found:
1233+ return False
1234+
1235+ lines.remove(line)
1236+
1237+ self.seek(0)
1238+ self.write(''.join(lines))
1239+ self.truncate()
1240+ return True
1241+
1242+ @classmethod
1243+ def remove_by_mountpoint(cls, mountpoint, path=None):
1244+ fstab = cls(path=path)
1245+ entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
1246+ if entry:
1247+ return fstab.remove_entry(entry)
1248+ return False
1249+
1250+ @classmethod
1251+ def add(cls, device, mountpoint, filesystem, options=None, path=None):
1252+ return cls(path=path).add_entry(Fstab.Entry(device,
1253+ mountpoint, filesystem,
1254+ options=options))
1255
1256=== modified file 'hooks/charmhelpers/core/hookenv.py'
1257--- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:41:35 +0000
1258+++ hooks/charmhelpers/core/hookenv.py 2014-07-31 11:10:21 +0000
1259@@ -25,7 +25,7 @@
1260 def cached(func):
1261 """Cache return values for multiple executions of func + args
1262
1263- For example:
1264+ For example::
1265
1266 @cached
1267 def unit_get(attribute):
1268@@ -445,18 +445,19 @@
1269 class Hooks(object):
1270 """A convenient handler for hook functions.
1271
1272- Example:
1273+ Example::
1274+
1275 hooks = Hooks()
1276
1277 # register a hook, taking its name from the function name
1278 @hooks.hook()
1279 def install():
1280- ...
1281+ pass # your code here
1282
1283 # register a hook, providing a custom hook name
1284 @hooks.hook("config-changed")
1285 def config_changed():
1286- ...
1287+ pass # your code here
1288
1289 if __name__ == "__main__":
1290 # execute a hook based on the name the program is called by
1291
1292=== modified file 'hooks/charmhelpers/core/host.py'
1293--- hooks/charmhelpers/core/host.py 2014-05-19 11:41:35 +0000
1294+++ hooks/charmhelpers/core/host.py 2014-07-31 11:10:21 +0000
1295@@ -12,11 +12,11 @@
1296 import string
1297 import subprocess
1298 import hashlib
1299-import apt_pkg
1300
1301 from collections import OrderedDict
1302
1303 from hookenv import log
1304+from fstab import Fstab
1305
1306
1307 def service_start(service_name):
1308@@ -35,7 +35,8 @@
1309
1310
1311 def service_reload(service_name, restart_on_failure=False):
1312- """Reload a system service, optionally falling back to restart if reload fails"""
1313+ """Reload a system service, optionally falling back to restart if
1314+ reload fails"""
1315 service_result = service('reload', service_name)
1316 if not service_result and restart_on_failure:
1317 service_result = service('restart', service_name)
1318@@ -144,7 +145,19 @@
1319 target.write(content)
1320
1321
1322-def mount(device, mountpoint, options=None, persist=False):
1323+def fstab_remove(mp):
1324+ """Remove the given mountpoint entry from /etc/fstab
1325+ """
1326+ return Fstab.remove_by_mountpoint(mp)
1327+
1328+
1329+def fstab_add(dev, mp, fs, options=None):
1330+ """Adds the given device entry to the /etc/fstab file
1331+ """
1332+ return Fstab.add(dev, mp, fs, options=options)
1333+
1334+
1335+def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
1336 """Mount a filesystem at a particular mountpoint"""
1337 cmd_args = ['mount']
1338 if options is not None:
1339@@ -155,9 +168,9 @@
1340 except subprocess.CalledProcessError, e:
1341 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
1342 return False
1343+
1344 if persist:
1345- # TODO: update fstab
1346- pass
1347+ return fstab_add(device, mountpoint, filesystem, options=options)
1348 return True
1349
1350
1351@@ -169,9 +182,9 @@
1352 except subprocess.CalledProcessError, e:
1353 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
1354 return False
1355+
1356 if persist:
1357- # TODO: update fstab
1358- pass
1359+ return fstab_remove(mountpoint)
1360 return True
1361
1362
1363@@ -198,13 +211,13 @@
1364 def restart_on_change(restart_map, stopstart=False):
1365 """Restart services based on configuration files changing
1366
1367- This function is used a decorator, for example
1368+ This function is used a decorator, for example::
1369
1370 @restart_on_change({
1371 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
1372 })
1373 def ceph_client_changed():
1374- ...
1375+ pass # your code here
1376
1377 In this example, the cinder-api and cinder-volume services
1378 would be restarted if /etc/ceph/ceph.conf is changed by the
1379@@ -300,12 +313,19 @@
1380
1381 def cmp_pkgrevno(package, revno, pkgcache=None):
1382 '''Compare supplied revno with the revno of the installed package
1383- 1 => Installed revno is greater than supplied arg
1384- 0 => Installed revno is the same as supplied arg
1385- -1 => Installed revno is less than supplied arg
1386+
1387+ * 1 => Installed revno is greater than supplied arg
1388+ * 0 => Installed revno is the same as supplied arg
1389+ * -1 => Installed revno is less than supplied arg
1390+
1391 '''
1392+ import apt_pkg
1393 if not pkgcache:
1394 apt_pkg.init()
1395+ # Force Apt to build its cache in memory. That way we avoid race
1396+ # conditions with other applications building the cache in the same
1397+ # place.
1398+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
1399 pkgcache = apt_pkg.Cache()
1400 pkg = pkgcache[package]
1401 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
1402
1403=== modified file 'hooks/charmhelpers/fetch/__init__.py'
1404--- hooks/charmhelpers/fetch/__init__.py 2014-05-19 11:41:35 +0000
1405+++ hooks/charmhelpers/fetch/__init__.py 2014-07-31 11:10:21 +0000
1406@@ -13,7 +13,6 @@
1407 config,
1408 log,
1409 )
1410-import apt_pkg
1411 import os
1412
1413
1414@@ -56,6 +55,15 @@
1415 'icehouse/proposed': 'precise-proposed/icehouse',
1416 'precise-icehouse/proposed': 'precise-proposed/icehouse',
1417 'precise-proposed/icehouse': 'precise-proposed/icehouse',
1418+ # Juno
1419+ 'juno': 'trusty-updates/juno',
1420+ 'trusty-juno': 'trusty-updates/juno',
1421+ 'trusty-juno/updates': 'trusty-updates/juno',
1422+ 'trusty-updates/juno': 'trusty-updates/juno',
1423+ 'juno/proposed': 'trusty-proposed/juno',
1424+ 'juno/proposed': 'trusty-proposed/juno',
1425+ 'trusty-juno/proposed': 'trusty-proposed/juno',
1426+ 'trusty-proposed/juno': 'trusty-proposed/juno',
1427 }
1428
1429 # The order of this list is very important. Handlers should be listed in from
1430@@ -108,6 +116,7 @@
1431
1432 def filter_installed_packages(packages):
1433 """Returns a list of packages that require installation"""
1434+ import apt_pkg
1435 apt_pkg.init()
1436
1437 # Tell apt to build an in-memory cache to prevent race conditions (if
1438@@ -226,31 +235,39 @@
1439 sources_var='install_sources',
1440 keys_var='install_keys'):
1441 """
1442- Configure multiple sources from charm configuration
1443+ Configure multiple sources from charm configuration.
1444+
1445+ The lists are encoded as yaml fragments in the configuration.
1446+ The frament needs to be included as a string.
1447
1448 Example config:
1449- install_sources:
1450+ install_sources: |
1451 - "ppa:foo"
1452 - "http://example.com/repo precise main"
1453- install_keys:
1454+ install_keys: |
1455 - null
1456 - "a1b2c3d4"
1457
1458 Note that 'null' (a.k.a. None) should not be quoted.
1459 """
1460- sources = safe_load(config(sources_var))
1461- keys = config(keys_var)
1462- if keys is not None:
1463- keys = safe_load(keys)
1464- if isinstance(sources, basestring) and (
1465- keys is None or isinstance(keys, basestring)):
1466- add_source(sources, keys)
1467+ sources = safe_load((config(sources_var) or '').strip()) or []
1468+ keys = safe_load((config(keys_var) or '').strip()) or None
1469+
1470+ if isinstance(sources, basestring):
1471+ sources = [sources]
1472+
1473+ if keys is None:
1474+ for source in sources:
1475+ add_source(source, None)
1476 else:
1477- if not len(sources) == len(keys):
1478- msg = 'Install sources and keys lists are different lengths'
1479- raise SourceConfigError(msg)
1480- for src_num in range(len(sources)):
1481- add_source(sources[src_num], keys[src_num])
1482+ if isinstance(keys, basestring):
1483+ keys = [keys]
1484+
1485+ if len(sources) != len(keys):
1486+ raise SourceConfigError(
1487+ 'Install sources and keys lists are different lengths')
1488+ for source, key in zip(sources, keys):
1489+ add_source(source, key)
1490 if update:
1491 apt_update(fatal=True)
1492
1493
1494=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
1495--- hooks/charmhelpers/fetch/bzrurl.py 2013-12-04 09:51:46 +0000
1496+++ hooks/charmhelpers/fetch/bzrurl.py 2014-07-31 11:10:21 +0000
1497@@ -39,7 +39,8 @@
1498 def install(self, source):
1499 url_parts = self.parse_url(source)
1500 branch_name = url_parts.path.strip("/").split("/")[-1]
1501- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
1502+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
1503+ branch_name)
1504 if not os.path.exists(dest_dir):
1505 mkdir(dest_dir, perms=0755)
1506 try:
1507
1508=== modified file 'hooks/swift_storage_utils.py'
1509--- hooks/swift_storage_utils.py 2014-04-07 14:50:34 +0000
1510+++ hooks/swift_storage_utils.py 2014-07-31 11:10:21 +0000
1511@@ -33,6 +33,7 @@
1512
1513 from charmhelpers.contrib.storage.linux.utils import (
1514 is_block_device,
1515+ is_device_mounted,
1516 )
1517
1518 from charmhelpers.contrib.openstack.utils import (
1519@@ -135,10 +136,17 @@
1520 (ACCOUNT_SVCS + CONTAINER_SVCS + OBJECT_SVCS)]
1521
1522
1523+def _is_storage_ready(partition):
1524+ """
1525+ A small helper to determine if a given device is suitabe to be used as
1526+ a storage device.
1527+ """
1528+ return is_block_device(partition) and not is_device_mounted(partition)
1529+
1530+
1531 def find_block_devices():
1532 found = []
1533 incl = ['sd[a-z]', 'vd[a-z]', 'cciss\/c[0-9]d[0-9]']
1534- blacklist = ['sda', 'vda', 'cciss/c0d0']
1535
1536 with open('/proc/partitions') as proc:
1537 print proc
1538@@ -146,9 +154,9 @@
1539 for partition in [p[3] for p in partitions if p]:
1540 for inc in incl:
1541 _re = re.compile(r'^(%s)$' % inc)
1542- if _re.match(partition) and partition not in blacklist:
1543+ if _re.match(partition):
1544 found.append(os.path.join('/dev', partition))
1545- return [f for f in found if is_block_device(f)]
1546+ return [f for f in found if _is_storage_ready(f)]
1547
1548
1549 def determine_block_devices():
1550
1551=== modified file 'unit_tests/test_swift_storage_utils.py'
1552--- unit_tests/test_swift_storage_utils.py 2014-03-20 13:50:49 +0000
1553+++ unit_tests/test_swift_storage_utils.py 2014-07-31 11:10:21 +0000
1554@@ -17,6 +17,7 @@
1555 'ensure_block_device',
1556 'clean_storage',
1557 'is_block_device',
1558+ 'is_device_mounted',
1559 'get_os_codename_package',
1560 'get_os_codename_install_source',
1561 'unit_private_ip',
1562@@ -62,6 +63,14 @@
1563 }
1564
1565
1566+REAL_WORLD_PARTITIONS = """
1567+major minor #blocks name
1568+
1569+ 8 0 117220824 sda
1570+ 8 1 117219800 sda1
1571+ 8 16 119454720 sdb
1572+"""
1573+
1574 class SwiftStorageUtilsTests(CharmTestCase):
1575 def setUp(self):
1576 super(SwiftStorageUtilsTests, self).setUp(swift_utils, TO_PATCH)
1577@@ -156,8 +165,15 @@
1578 group='swift')
1579 self.mount.assert_called('/dev/vdb', '/srv/node/vdb', persist=True)
1580
1581+ def _fake_is_device_mounted(self, device):
1582+ if device in ["/dev/sda", "/dev/vda", "/dev/cciss/c0d0"]:
1583+ return True
1584+ else:
1585+ return False
1586+
1587 def test_find_block_devices(self):
1588 self.is_block_device.return_value = True
1589+ self.is_device_mounted.side_effect = self._fake_is_device_mounted
1590 with patch_open() as (_open, _file):
1591 _file.read.return_value = PROC_PARTITIONS
1592 _file.readlines = MagicMock()
1593@@ -166,6 +182,18 @@
1594 ex = ['/dev/sdb', '/dev/vdb', '/dev/cciss/c1d0']
1595 self.assertEquals(ex, result)
1596
1597+ def test_find_block_devices_real_world(self):
1598+ self.is_block_device.return_value = True
1599+ side_effect = lambda x: x in ["/dev/sda", "/dev/sda1"]
1600+ self.is_device_mounted.side_effect = side_effect
1601+ with patch_open() as (_open, _file):
1602+ _file.read.return_value = REAL_WORLD_PARTITIONS
1603+ _file.readlines = MagicMock()
1604+ _file.readlines.return_value = REAL_WORLD_PARTITIONS.split('\n')
1605+ result = swift_utils.find_block_devices()
1606+ expected = ["/dev/sdb"]
1607+ self.assertEquals(expected, result)
1608+
1609 def test_save_script_rc(self):
1610 self.unit_private_ip.return_value = '10.0.0.1'
1611 swift_utils.save_script_rc()

Subscribers

People subscribed via source and target branches