Merge lp:~james-page/charms/trusty/nova-cloud-controller/service-guard into lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/trunk

Proposed by James Page
Status: Superseded
Proposed branch: lp:~james-page/charms/trusty/nova-cloud-controller/service-guard
Merge into: lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/trunk
Diff against target: 4341 lines (+2840/-302) (has conflicts)
45 files modified
.bzrignore (+2/-0)
Makefile (+17/-5)
README.txt (+5/-0)
charm-helpers-hooks.yaml (+11/-0)
charm-helpers-tests.yaml (+5/-0)
charm-helpers.yaml (+0/-10)
config.yaml (+54/-11)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+3/-2)
hooks/charmhelpers/contrib/network/ip.py (+156/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+55/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+209/-0)
hooks/charmhelpers/contrib/openstack/context.py (+95/-22)
hooks/charmhelpers/contrib/openstack/ip.py (+75/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+6/-1)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/openstack/utils.py (+11/-3)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/contrib/storage/linux/utils.py (+1/-0)
hooks/charmhelpers/core/fstab.py (+116/-0)
hooks/charmhelpers/core/hookenv.py (+5/-4)
hooks/charmhelpers/core/host.py (+32/-12)
hooks/charmhelpers/fetch/__init__.py (+33/-16)
hooks/charmhelpers/fetch/bzrurl.py (+2/-1)
hooks/nova_cc_context.py (+32/-1)
hooks/nova_cc_hooks.py (+211/-57)
hooks/nova_cc_utils.py (+218/-103)
metadata.yaml (+2/-0)
revision (+1/-1)
tests/00-setup (+10/-0)
tests/10-basic-precise-essex (+10/-0)
tests/11-basic-precise-folsom (+18/-0)
tests/12-basic-precise-grizzly (+12/-0)
tests/13-basic-precise-havana (+12/-0)
tests/14-basic-precise-icehouse (+12/-0)
tests/15-basic-trusty-icehouse (+10/-0)
tests/README (+47/-0)
tests/basic_deployment.py (+520/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+58/-0)
tests/charmhelpers/contrib/amulet/utils.py (+157/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+55/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+209/-0)
unit_tests/test_nova_cc_hooks.py (+146/-12)
unit_tests/test_nova_cc_utils.py (+167/-14)
unit_tests/test_utils.py (+3/-3)
Text conflict in config.yaml
To merge this branch: bzr merge lp:~james-page/charms/trusty/nova-cloud-controller/service-guard
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+228669@code.launchpad.net

This proposal has been superseded by a proposal from 2014-07-29.

Description of the change

Add support for service-guard configuration to disable services prior to relations being completely formed.

To post a comment you must log in.
95. By James Page

Don't add neutron stuff if related to neutron-api charm

96. By James Page

Fixup unit tests

97. By James Page

Tidy lint

Unmerged revisions

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== added file '.bzrignore'
--- .bzrignore 1970-01-01 00:00:00 +0000
+++ .bzrignore 2014-07-29 13:07:23 +0000
@@ -0,0 +1,2 @@
1bin
2.coverage
03
=== modified file 'Makefile'
--- Makefile 2014-05-21 10:14:28 +0000
+++ Makefile 2014-07-29 13:07:23 +0000
@@ -2,16 +2,28 @@
2PYTHON := /usr/bin/env python2PYTHON := /usr/bin/env python
33
4lint:4lint:
5 @flake8 --exclude hooks/charmhelpers hooks unit_tests5 @flake8 --exclude hooks/charmhelpers hooks unit_tests tests
6 @charm proof6 @charm proof
77
8unit_test:
9 @echo Starting unit tests...
10 @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
11
12bin/charm_helpers_sync.py:
13 @mkdir -p bin
14 @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
15 > bin/charm_helpers_sync.py
8test:16test:
9 @echo Starting tests...17 @echo Starting Amulet tests...
10 @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests18 # coreycb note: The -v should only be temporary until Amulet sends
19 # raise_status() messages to stderr:
20 # https://bugs.launchpad.net/amulet/+bug/1320357
21 @juju test -v -p AMULET_HTTP_PROXY
1122
12sync:23sync:
13 @charm-helper-sync -c charm-helpers.yaml24 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
25 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
1426
15publish: lint test27publish: lint unit_test
16 bzr push lp:charms/nova-cloud-controller28 bzr push lp:charms/nova-cloud-controller
17 bzr push lp:charms/trusty/nova-cloud-controller29 bzr push lp:charms/trusty/nova-cloud-controller
1830
=== modified file 'README.txt'
--- README.txt 2014-03-25 09:11:04 +0000
+++ README.txt 2014-07-29 13:07:23 +0000
@@ -4,6 +4,11 @@
44
5Cloud controller node for Openstack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore.5Cloud controller node for Openstack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore.
66
7The neutron-api interface can be used join this charm with an external neutron-api server. If this is done
8then this charm will shutdown its neutron-api service and the external charm will be registered as the
9neutron-api endpoint in keystone. It will also use the quantum-security-groups setting which is passed to
10it by the api service rather than its own quantum-security-groups setting.
11
7******************************************************12******************************************************
8Special considerations to be deployed using Postgresql13Special considerations to be deployed using Postgresql
9******************************************************14******************************************************
1015
=== added file 'charm-helpers-hooks.yaml'
--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers-hooks.yaml 2014-07-29 13:07:23 +0000
@@ -0,0 +1,11 @@
1branch: lp:charm-helpers
2destination: hooks/charmhelpers
3include:
4 - core
5 - fetch
6 - contrib.openstack|inc=*
7 - contrib.storage
8 - contrib.hahelpers:
9 - apache
10 - payload.execd
11 - contrib.network.ip
012
=== added file 'charm-helpers-tests.yaml'
--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers-tests.yaml 2014-07-29 13:07:23 +0000
@@ -0,0 +1,5 @@
1branch: lp:charm-helpers
2destination: tests/charmhelpers
3include:
4 - contrib.amulet
5 - contrib.openstack.amulet
06
=== removed file 'charm-helpers.yaml'
--- charm-helpers.yaml 2014-04-02 17:04:22 +0000
+++ charm-helpers.yaml 1970-01-01 00:00:00 +0000
@@ -1,10 +0,0 @@
1branch: lp:charm-helpers
2destination: hooks/charmhelpers
3include:
4 - core
5 - fetch
6 - contrib.openstack|inc=*
7 - contrib.storage
8 - contrib.hahelpers:
9 - apache
10 - payload.execd
110
=== modified file 'config.yaml'
--- config.yaml 2014-06-17 10:01:21 +0000
+++ config.yaml 2014-07-29 13:07:23 +0000
@@ -97,15 +97,11 @@
97 # HA configuration settings97 # HA configuration settings
98 vip:98 vip:
99 type: string99 type: string
100 description: "Virtual IP to use to front API services in ha configuration"100 description: |
101 vip_iface:101 Virtual IP(s) to use to front API services in HA configuration.
102 type: string102 .
103 default: eth0103 If multiple networks are being used, a VIP should be provided for each
104 description: "Network Interface where to place the Virtual IP"104 network, separated by spaces.
105 vip_cidr:
106 type: int
107 default: 24
108 description: "Netmask that will be used for the Virtual IP"
109 ha-bindiface:105 ha-bindiface:
110 type: string106 type: string
111 default: eth0107 default: eth0
@@ -163,5 +159,52 @@
163 nvp-l3-uuid:159 nvp-l3-uuid:
164 type: string160 type: string
165 description: |161 description: |
166 This is uuid of the default NVP/NSX L3 Gateway Service.162<<<<<<< TREE
167 # end of NVP/NSX configuration163 This is uuid of the default NVP/NSX L3 Gateway Service.
164 # end of NVP/NSX configuration
165=======
166 This is uuid of the default NVP/NSX L3 Gateway Service.
167 # end of NVP/NSX configuration
168 # Network configuration options
169 # by default all access is over 'private-address'
170 os-admin-network:
171 type: string
172 description: |
173 The IP address and netmask of the OpenStack Admin network (e.g.,
174 192.168.0.0/24)
175 .
176 This network will be used for admin endpoints.
177 os-internal-network:
178 type: string
179 description: |
180 The IP address and netmask of the OpenStack Internal network (e.g.,
181 192.168.0.0/24)
182 .
183 This network will be used for internal endpoints.
184 os-public-network:
185 type: string
186 description: |
187 The IP address and netmask of the OpenStack Public network (e.g.,
188 192.168.0.0/24)
189 .
190 This network will be used for public endpoints.
191 service-guard:
192 type: boolean
193 default: false
194 description: |
195 Ensure required relations are made and complete before allowing services
196 to be started
197 .
198 By default, services may be up and accepting API request from install
199 onwards.
200 .
201 Enabling this flag ensures that services will not be started until the
202 minimum 'core relations' have been made between this charm and other
203 charms.
204 .
205 For this charm the following relations must be made:
206 .
207 * shared-db or (pgsql-nova-db, pgsql-neutron-db)
208 * amqp
209 * identity-service
210>>>>>>> MERGE-SOURCE
168211
=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-02-17 12:10:27 +0000
+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-29 13:07:23 +0000
@@ -146,12 +146,12 @@
146 Obtains all relevant configuration from charm configuration required146 Obtains all relevant configuration from charm configuration required
147 for initiating a relation to hacluster:147 for initiating a relation to hacluster:
148148
149 ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr149 ha-bindiface, ha-mcastport, vip
150150
151 returns: dict: A dict containing settings keyed by setting name.151 returns: dict: A dict containing settings keyed by setting name.
152 raises: HAIncompleteConfig if settings are missing.152 raises: HAIncompleteConfig if settings are missing.
153 '''153 '''
154 settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']154 settings = ['ha-bindiface', 'ha-mcastport', 'vip']
155 conf = {}155 conf = {}
156 for setting in settings:156 for setting in settings:
157 conf[setting] = config_get(setting)157 conf[setting] = config_get(setting)
@@ -170,6 +170,7 @@
170170
171 :configs : OSTemplateRenderer: A config tempating object to inspect for171 :configs : OSTemplateRenderer: A config tempating object to inspect for
172 a complete https context.172 a complete https context.
173
173 :vip_setting: str: Setting in charm config that specifies174 :vip_setting: str: Setting in charm config that specifies
174 VIP address.175 VIP address.
175 '''176 '''
176177
=== added directory 'hooks/charmhelpers/contrib/network'
=== added file 'hooks/charmhelpers/contrib/network/__init__.py'
=== added file 'hooks/charmhelpers/contrib/network/ip.py'
--- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/network/ip.py 2014-07-29 13:07:23 +0000
@@ -0,0 +1,156 @@
1import sys
2
3from functools import partial
4
5from charmhelpers.fetch import apt_install
6from charmhelpers.core.hookenv import (
7 ERROR, log,
8)
9
10try:
11 import netifaces
12except ImportError:
13 apt_install('python-netifaces')
14 import netifaces
15
16try:
17 import netaddr
18except ImportError:
19 apt_install('python-netaddr')
20 import netaddr
21
22
23def _validate_cidr(network):
24 try:
25 netaddr.IPNetwork(network)
26 except (netaddr.core.AddrFormatError, ValueError):
27 raise ValueError("Network (%s) is not in CIDR presentation format" %
28 network)
29
30
31def get_address_in_network(network, fallback=None, fatal=False):
32 """
33 Get an IPv4 or IPv6 address within the network from the host.
34
35 :param network (str): CIDR presentation format. For example,
36 '192.168.1.0/24'.
37 :param fallback (str): If no address is found, return fallback.
38 :param fatal (boolean): If no address is found, fallback is not
39 set and fatal is True then exit(1).
40
41 """
42
43 def not_found_error_out():
44 log("No IP address found in network: %s" % network,
45 level=ERROR)
46 sys.exit(1)
47
48 if network is None:
49 if fallback is not None:
50 return fallback
51 else:
52 if fatal:
53 not_found_error_out()
54
55 _validate_cidr(network)
56 network = netaddr.IPNetwork(network)
57 for iface in netifaces.interfaces():
58 addresses = netifaces.ifaddresses(iface)
59 if network.version == 4 and netifaces.AF_INET in addresses:
60 addr = addresses[netifaces.AF_INET][0]['addr']
61 netmask = addresses[netifaces.AF_INET][0]['netmask']
62 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
63 if cidr in network:
64 return str(cidr.ip)
65 if network.version == 6 and netifaces.AF_INET6 in addresses:
66 for addr in addresses[netifaces.AF_INET6]:
67 if not addr['addr'].startswith('fe80'):
68 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
69 addr['netmask']))
70 if cidr in network:
71 return str(cidr.ip)
72
73 if fallback is not None:
74 return fallback
75
76 if fatal:
77 not_found_error_out()
78
79 return None
80
81
82def is_ipv6(address):
83 '''Determine whether provided address is IPv6 or not'''
84 try:
85 address = netaddr.IPAddress(address)
86 except netaddr.AddrFormatError:
87 # probably a hostname - so not an address at all!
88 return False
89 else:
90 return address.version == 6
91
92
93def is_address_in_network(network, address):
94 """
95 Determine whether the provided address is within a network range.
96
97 :param network (str): CIDR presentation format. For example,
98 '192.168.1.0/24'.
99 :param address: An individual IPv4 or IPv6 address without a net
100 mask or subnet prefix. For example, '192.168.1.1'.
101 :returns boolean: Flag indicating whether address is in network.
102 """
103 try:
104 network = netaddr.IPNetwork(network)
105 except (netaddr.core.AddrFormatError, ValueError):
106 raise ValueError("Network (%s) is not in CIDR presentation format" %
107 network)
108 try:
109 address = netaddr.IPAddress(address)
110 except (netaddr.core.AddrFormatError, ValueError):
111 raise ValueError("Address (%s) is not in correct presentation format" %
112 address)
113 if address in network:
114 return True
115 else:
116 return False
117
118
119def _get_for_address(address, key):
120 """Retrieve an attribute of or the physical interface that
121 the IP address provided could be bound to.
122
123 :param address (str): An individual IPv4 or IPv6 address without a net
124 mask or subnet prefix. For example, '192.168.1.1'.
125 :param key: 'iface' for the physical interface name or an attribute
126 of the configured interface, for example 'netmask'.
127 :returns str: Requested attribute or None if address is not bindable.
128 """
129 address = netaddr.IPAddress(address)
130 for iface in netifaces.interfaces():
131 addresses = netifaces.ifaddresses(iface)
132 if address.version == 4 and netifaces.AF_INET in addresses:
133 addr = addresses[netifaces.AF_INET][0]['addr']
134 netmask = addresses[netifaces.AF_INET][0]['netmask']
135 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
136 if address in cidr:
137 if key == 'iface':
138 return iface
139 else:
140 return addresses[netifaces.AF_INET][0][key]
141 if address.version == 6 and netifaces.AF_INET6 in addresses:
142 for addr in addresses[netifaces.AF_INET6]:
143 if not addr['addr'].startswith('fe80'):
144 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
145 addr['netmask']))
146 if address in cidr:
147 if key == 'iface':
148 return iface
149 else:
150 return addr[key]
151 return None
152
153
154get_iface_for_address = partial(_get_for_address, key='iface')
155
156get_netmask_for_address = partial(_get_for_address, key='netmask')
0157
=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-29 13:07:23 +0000
@@ -0,0 +1,55 @@
1from charmhelpers.contrib.amulet.deployment import (
2 AmuletDeployment
3)
4
5
6class OpenStackAmuletDeployment(AmuletDeployment):
7 """This class inherits from AmuletDeployment and has additional support
8 that is specifically for use by OpenStack charms."""
9
10 def __init__(self, series=None, openstack=None, source=None):
11 """Initialize the deployment environment."""
12 super(OpenStackAmuletDeployment, self).__init__(series)
13 self.openstack = openstack
14 self.source = source
15
16 def _add_services(self, this_service, other_services):
17 """Add services to the deployment and set openstack-origin."""
18 super(OpenStackAmuletDeployment, self)._add_services(this_service,
19 other_services)
20 name = 0
21 services = other_services
22 services.append(this_service)
23 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
24
25 if self.openstack:
26 for svc in services:
27 if svc[name] not in use_source:
28 config = {'openstack-origin': self.openstack}
29 self.d.configure(svc[name], config)
30
31 if self.source:
32 for svc in services:
33 if svc[name] in use_source:
34 config = {'source': self.source}
35 self.d.configure(svc[name], config)
36
37 def _configure_services(self, configs):
38 """Configure all of the services."""
39 for service, config in configs.iteritems():
40 self.d.configure(service, config)
41
42 def _get_openstack_release(self):
43 """Return an integer representing the enum value of the openstack
44 release."""
45 self.precise_essex, self.precise_folsom, self.precise_grizzly, \
46 self.precise_havana, self.precise_icehouse, \
47 self.trusty_icehouse = range(6)
48 releases = {
49 ('precise', None): self.precise_essex,
50 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
51 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
52 ('precise', 'cloud:precise-havana'): self.precise_havana,
53 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
54 ('trusty', None): self.trusty_icehouse}
55 return releases[(self.series, self.openstack)]
056
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-29 13:07:23 +0000
@@ -0,0 +1,209 @@
1import logging
2import os
3import time
4import urllib
5
6import glanceclient.v1.client as glance_client
7import keystoneclient.v2_0 as keystone_client
8import novaclient.v1_1.client as nova_client
9
10from charmhelpers.contrib.amulet.utils import (
11 AmuletUtils
12)
13
14DEBUG = logging.DEBUG
15ERROR = logging.ERROR
16
17
18class OpenStackAmuletUtils(AmuletUtils):
19 """This class inherits from AmuletUtils and has additional support
20 that is specifically for use by OpenStack charms."""
21
22 def __init__(self, log_level=ERROR):
23 """Initialize the deployment environment."""
24 super(OpenStackAmuletUtils, self).__init__(log_level)
25
26 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
27 public_port, expected):
28 """Validate actual endpoint data vs expected endpoint data. The ports
29 are used to find the matching endpoint."""
30 found = False
31 for ep in endpoints:
32 self.log.debug('endpoint: {}'.format(repr(ep)))
33 if admin_port in ep.adminurl and internal_port in ep.internalurl \
34 and public_port in ep.publicurl:
35 found = True
36 actual = {'id': ep.id,
37 'region': ep.region,
38 'adminurl': ep.adminurl,
39 'internalurl': ep.internalurl,
40 'publicurl': ep.publicurl,
41 'service_id': ep.service_id}
42 ret = self._validate_dict_data(expected, actual)
43 if ret:
44 return 'unexpected endpoint data - {}'.format(ret)
45
46 if not found:
47 return 'endpoint not found'
48
49 def validate_svc_catalog_endpoint_data(self, expected, actual):
50 """Validate a list of actual service catalog endpoints vs a list of
51 expected service catalog endpoints."""
52 self.log.debug('actual: {}'.format(repr(actual)))
53 for k, v in expected.iteritems():
54 if k in actual:
55 ret = self._validate_dict_data(expected[k][0], actual[k][0])
56 if ret:
57 return self.endpoint_error(k, ret)
58 else:
59 return "endpoint {} does not exist".format(k)
60 return ret
61
62 def validate_tenant_data(self, expected, actual):
63 """Validate a list of actual tenant data vs list of expected tenant
64 data."""
65 self.log.debug('actual: {}'.format(repr(actual)))
66 for e in expected:
67 found = False
68 for act in actual:
69 a = {'enabled': act.enabled, 'description': act.description,
70 'name': act.name, 'id': act.id}
71 if e['name'] == a['name']:
72 found = True
73 ret = self._validate_dict_data(e, a)
74 if ret:
75 return "unexpected tenant data - {}".format(ret)
76 if not found:
77 return "tenant {} does not exist".format(e['name'])
78 return ret
79
80 def validate_role_data(self, expected, actual):
81 """Validate a list of actual role data vs a list of expected role
82 data."""
83 self.log.debug('actual: {}'.format(repr(actual)))
84 for e in expected:
85 found = False
86 for act in actual:
87 a = {'name': act.name, 'id': act.id}
88 if e['name'] == a['name']:
89 found = True
90 ret = self._validate_dict_data(e, a)
91 if ret:
92 return "unexpected role data - {}".format(ret)
93 if not found:
94 return "role {} does not exist".format(e['name'])
95 return ret
96
97 def validate_user_data(self, expected, actual):
98 """Validate a list of actual user data vs a list of expected user
99 data."""
100 self.log.debug('actual: {}'.format(repr(actual)))
101 for e in expected:
102 found = False
103 for act in actual:
104 a = {'enabled': act.enabled, 'name': act.name,
105 'email': act.email, 'tenantId': act.tenantId,
106 'id': act.id}
107 if e['name'] == a['name']:
108 found = True
109 ret = self._validate_dict_data(e, a)
110 if ret:
111 return "unexpected user data - {}".format(ret)
112 if not found:
113 return "user {} does not exist".format(e['name'])
114 return ret
115
116 def validate_flavor_data(self, expected, actual):
117 """Validate a list of actual flavors vs a list of expected flavors."""
118 self.log.debug('actual: {}'.format(repr(actual)))
119 act = [a.name for a in actual]
120 return self._validate_list_data(expected, act)
121
122 def tenant_exists(self, keystone, tenant):
123 """Return True if tenant exists"""
124 return tenant in [t.name for t in keystone.tenants.list()]
125
126 def authenticate_keystone_admin(self, keystone_sentry, user, password,
127 tenant):
128 """Authenticates admin user with the keystone admin endpoint."""
129 service_ip = \
130 keystone_sentry.relation('shared-db',
131 'mysql:shared-db')['private-address']
132 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
133 return keystone_client.Client(username=user, password=password,
134 tenant_name=tenant, auth_url=ep)
135
136 def authenticate_keystone_user(self, keystone, user, password, tenant):
137 """Authenticates a regular user with the keystone public endpoint."""
138 ep = keystone.service_catalog.url_for(service_type='identity',
139 endpoint_type='publicURL')
140 return keystone_client.Client(username=user, password=password,
141 tenant_name=tenant, auth_url=ep)
142
143 def authenticate_glance_admin(self, keystone):
144 """Authenticates admin user with glance."""
145 ep = keystone.service_catalog.url_for(service_type='image',
146 endpoint_type='adminURL')
147 return glance_client.Client(ep, token=keystone.auth_token)
148
149 def authenticate_nova_user(self, keystone, user, password, tenant):
150 """Authenticates a regular user with nova-api."""
151 ep = keystone.service_catalog.url_for(service_type='identity',
152 endpoint_type='publicURL')
153 return nova_client.Client(username=user, api_key=password,
154 project_id=tenant, auth_url=ep)
155
156 def create_cirros_image(self, glance, image_name):
157 """Download the latest cirros image and upload it to glance."""
158 http_proxy = os.getenv('AMULET_HTTP_PROXY')
159 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
160 if http_proxy:
161 proxies = {'http': http_proxy}
162 opener = urllib.FancyURLopener(proxies)
163 else:
164 opener = urllib.FancyURLopener()
165
166 f = opener.open("http://download.cirros-cloud.net/version/released")
167 version = f.read().strip()
168 cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
169
170 if not os.path.exists(cirros_img):
171 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
172 version, cirros_img)
173 opener.retrieve(cirros_url, cirros_img)
174 f.close()
175
176 with open(cirros_img) as f:
177 image = glance.images.create(name=image_name, is_public=True,
178 disk_format='qcow2',
179 container_format='bare', data=f)
180 return image
181
182 def delete_image(self, glance, image):
183 """Delete the specified image."""
184 glance.images.delete(image)
185
186 def create_instance(self, nova, image_name, instance_name, flavor):
187 """Create the specified instance."""
188 image = nova.images.find(name=image_name)
189 flavor = nova.flavors.find(name=flavor)
190 instance = nova.servers.create(name=instance_name, image=image,
191 flavor=flavor)
192
193 count = 1
194 status = instance.status
195 while status != 'ACTIVE' and count < 60:
196 time.sleep(3)
197 instance = nova.servers.get(instance.id)
198 status = instance.status
199 self.log.debug('instance status: {}'.format(status))
200 count += 1
201
202 if status == 'BUILD':
203 return None
204
205 return instance
206
207 def delete_instance(self, nova, instance):
208 """Delete the specified instance."""
209 nova.servers.delete(instance)
0210
=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2014-05-19 11:38:09 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-29 13:07:23 +0000
@@ -21,9 +21,11 @@
21 relation_get,21 relation_get,
22 relation_ids,22 relation_ids,
23 related_units,23 related_units,
24 relation_set,
24 unit_get,25 unit_get,
25 unit_private_ip,26 unit_private_ip,
26 ERROR,27 ERROR,
28 INFO
27)29)
2830
29from charmhelpers.contrib.hahelpers.cluster import (31from charmhelpers.contrib.hahelpers.cluster import (
@@ -42,6 +44,8 @@
42 neutron_plugin_attribute,44 neutron_plugin_attribute,
43)45)
4446
47from charmhelpers.contrib.network.ip import get_address_in_network
48
45CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'49CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
4650
4751
@@ -134,8 +138,26 @@
134 'Missing required charm config options. '138 'Missing required charm config options. '
135 '(database name and user)')139 '(database name and user)')
136 raise OSContextError140 raise OSContextError
141
137 ctxt = {}142 ctxt = {}
138143
144 # NOTE(jamespage) if mysql charm provides a network upon which
145 # access to the database should be made, reconfigure relation
146 # with the service units local address and defer execution
147 access_network = relation_get('access-network')
148 if access_network is not None:
149 if self.relation_prefix is not None:
150 hostname_key = "{}_hostname".format(self.relation_prefix)
151 else:
152 hostname_key = "hostname"
153 access_hostname = get_address_in_network(access_network,
154 unit_get('private-address'))
155 set_hostname = relation_get(attribute=hostname_key,
156 unit=local_unit())
157 if set_hostname != access_hostname:
158 relation_set(relation_settings={hostname_key: access_hostname})
159 return ctxt # Defer any further hook execution for now....
160
139 password_setting = 'password'161 password_setting = 'password'
140 if self.relation_prefix:162 if self.relation_prefix:
141 password_setting = self.relation_prefix + '_password'163 password_setting = self.relation_prefix + '_password'
@@ -243,23 +265,31 @@
243265
244266
245class AMQPContext(OSContextGenerator):267class AMQPContext(OSContextGenerator):
246 interfaces = ['amqp']
247268
248 def __init__(self, ssl_dir=None):269 def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
249 self.ssl_dir = ssl_dir270 self.ssl_dir = ssl_dir
271 self.rel_name = rel_name
272 self.relation_prefix = relation_prefix
273 self.interfaces = [rel_name]
250274
251 def __call__(self):275 def __call__(self):
252 log('Generating template context for amqp')276 log('Generating template context for amqp')
253 conf = config()277 conf = config()
278 user_setting = 'rabbit-user'
279 vhost_setting = 'rabbit-vhost'
280 if self.relation_prefix:
281 user_setting = self.relation_prefix + '-rabbit-user'
282 vhost_setting = self.relation_prefix + '-rabbit-vhost'
283
254 try:284 try:
255 username = conf['rabbit-user']285 username = conf[user_setting]
256 vhost = conf['rabbit-vhost']286 vhost = conf[vhost_setting]
257 except KeyError as e:287 except KeyError as e:
258 log('Could not generate shared_db context. '288 log('Could not generate shared_db context. '
259 'Missing required charm config options: %s.' % e)289 'Missing required charm config options: %s.' % e)
260 raise OSContextError290 raise OSContextError
261 ctxt = {}291 ctxt = {}
262 for rid in relation_ids('amqp'):292 for rid in relation_ids(self.rel_name):
263 ha_vip_only = False293 ha_vip_only = False
264 for unit in related_units(rid):294 for unit in related_units(rid):
265 if relation_get('clustered', rid=rid, unit=unit):295 if relation_get('clustered', rid=rid, unit=unit):
@@ -332,10 +362,12 @@
332 use_syslog = str(config('use-syslog')).lower()362 use_syslog = str(config('use-syslog')).lower()
333 for rid in relation_ids('ceph'):363 for rid in relation_ids('ceph'):
334 for unit in related_units(rid):364 for unit in related_units(rid):
335 mon_hosts.append(relation_get('private-address', rid=rid,
336 unit=unit))
337 auth = relation_get('auth', rid=rid, unit=unit)365 auth = relation_get('auth', rid=rid, unit=unit)
338 key = relation_get('key', rid=rid, unit=unit)366 key = relation_get('key', rid=rid, unit=unit)
367 ceph_addr = \
368 relation_get('ceph-public-address', rid=rid, unit=unit) or \
369 relation_get('private-address', rid=rid, unit=unit)
370 mon_hosts.append(ceph_addr)
339371
340 ctxt = {372 ctxt = {
341 'mon_hosts': ' '.join(mon_hosts),373 'mon_hosts': ' '.join(mon_hosts),
@@ -369,7 +401,9 @@
369401
370 cluster_hosts = {}402 cluster_hosts = {}
371 l_unit = local_unit().replace('/', '-')403 l_unit = local_unit().replace('/', '-')
372 cluster_hosts[l_unit] = unit_get('private-address')404 cluster_hosts[l_unit] = \
405 get_address_in_network(config('os-internal-network'),
406 unit_get('private-address'))
373407
374 for rid in relation_ids('cluster'):408 for rid in relation_ids('cluster'):
375 for unit in related_units(rid):409 for unit in related_units(rid):
@@ -418,12 +452,13 @@
418 """452 """
419 Generates a context for an apache vhost configuration that configures453 Generates a context for an apache vhost configuration that configures
420 HTTPS reverse proxying for one or many endpoints. Generated context454 HTTPS reverse proxying for one or many endpoints. Generated context
421 looks something like:455 looks something like::
422 {456
423 'namespace': 'cinder',457 {
424 'private_address': 'iscsi.mycinderhost.com',458 'namespace': 'cinder',
425 'endpoints': [(8776, 8766), (8777, 8767)]459 'private_address': 'iscsi.mycinderhost.com',
426 }460 'endpoints': [(8776, 8766), (8777, 8767)]
461 }
427462
428 The endpoints list consists of a tuples mapping external ports463 The endpoints list consists of a tuples mapping external ports
429 to internal ports.464 to internal ports.
@@ -541,6 +576,26 @@
541576
542 return nvp_ctxt577 return nvp_ctxt
543578
579 def n1kv_ctxt(self):
580 driver = neutron_plugin_attribute(self.plugin, 'driver',
581 self.network_manager)
582 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
583 self.network_manager)
584 n1kv_ctxt = {
585 'core_plugin': driver,
586 'neutron_plugin': 'n1kv',
587 'neutron_security_groups': self.neutron_security_groups,
588 'local_ip': unit_private_ip(),
589 'config': n1kv_config,
590 'vsm_ip': config('n1kv-vsm-ip'),
591 'vsm_username': config('n1kv-vsm-username'),
592 'vsm_password': config('n1kv-vsm-password'),
593 'restrict_policy_profiles': config(
594 'n1kv_restrict_policy_profiles'),
595 }
596
597 return n1kv_ctxt
598
544 def neutron_ctxt(self):599 def neutron_ctxt(self):
545 if https():600 if https():
546 proto = 'https'601 proto = 'https'
@@ -572,6 +627,8 @@
572 ctxt.update(self.ovs_ctxt())627 ctxt.update(self.ovs_ctxt())
573 elif self.plugin in ['nvp', 'nsx']:628 elif self.plugin in ['nvp', 'nsx']:
574 ctxt.update(self.nvp_ctxt())629 ctxt.update(self.nvp_ctxt())
630 elif self.plugin == 'n1kv':
631 ctxt.update(self.n1kv_ctxt())
575632
576 alchemy_flags = config('neutron-alchemy-flags')633 alchemy_flags = config('neutron-alchemy-flags')
577 if alchemy_flags:634 if alchemy_flags:
@@ -611,7 +668,7 @@
611 The subordinate interface allows subordinates to export their668 The subordinate interface allows subordinates to export their
612 configuration requirements to the principle for multiple config669 configuration requirements to the principle for multiple config
613 files and multiple serivces. Ie, a subordinate that has interfaces670 files and multiple serivces. Ie, a subordinate that has interfaces
614 to both glance and nova may export to following yaml blob as json:671 to both glance and nova may export to following yaml blob as json::
615672
616 glance:673 glance:
617 /etc/glance/glance-api.conf:674 /etc/glance/glance-api.conf:
@@ -630,7 +687,8 @@
630687
631 It is then up to the principle charms to subscribe this context to688 It is then up to the principle charms to subscribe this context to
632 the service+config file it is interestd in. Configuration data will689 the service+config file it is interestd in. Configuration data will
633 be available in the template context, in glance's case, as:690 be available in the template context, in glance's case, as::
691
634 ctxt = {692 ctxt = {
635 ... other context ...693 ... other context ...
636 'subordinate_config': {694 'subordinate_config': {
@@ -657,7 +715,7 @@
657 self.interface = interface715 self.interface = interface
658716
659 def __call__(self):717 def __call__(self):
660 ctxt = {}718 ctxt = {'sections': {}}
661 for rid in relation_ids(self.interface):719 for rid in relation_ids(self.interface):
662 for unit in related_units(rid):720 for unit in related_units(rid):
663 sub_config = relation_get('subordinate_configuration',721 sub_config = relation_get('subordinate_configuration',
@@ -683,11 +741,26 @@
683741
684 sub_config = sub_config[self.config_file]742 sub_config = sub_config[self.config_file]
685 for k, v in sub_config.iteritems():743 for k, v in sub_config.iteritems():
686 ctxt[k] = v744 if k == 'sections':
687745 for section, config_dict in v.iteritems():
688 if not ctxt:746 log("adding section '%s'" % (section))
689 ctxt['sections'] = {}747 ctxt[k][section] = config_dict
690748 else:
749 ctxt[k] = v
750
751 log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
752
753 return ctxt
754
755
756class LogLevelContext(OSContextGenerator):
757
758 def __call__(self):
759 ctxt = {}
760 ctxt['debug'] = \
761 False if config('debug') is None else config('debug')
762 ctxt['verbose'] = \
763 False if config('verbose') is None else config('verbose')
691 return ctxt764 return ctxt
692765
693766
694767
=== added file 'hooks/charmhelpers/contrib/openstack/ip.py'
--- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-07-29 13:07:23 +0000
@@ -0,0 +1,75 @@
1from charmhelpers.core.hookenv import (
2 config,
3 unit_get,
4)
5
6from charmhelpers.contrib.network.ip import (
7 get_address_in_network,
8 is_address_in_network,
9 is_ipv6,
10)
11
12from charmhelpers.contrib.hahelpers.cluster import is_clustered
13
14PUBLIC = 'public'
15INTERNAL = 'int'
16ADMIN = 'admin'
17
18_address_map = {
19 PUBLIC: {
20 'config': 'os-public-network',
21 'fallback': 'public-address'
22 },
23 INTERNAL: {
24 'config': 'os-internal-network',
25 'fallback': 'private-address'
26 },
27 ADMIN: {
28 'config': 'os-admin-network',
29 'fallback': 'private-address'
30 }
31}
32
33
34def canonical_url(configs, endpoint_type=PUBLIC):
35 '''
36 Returns the correct HTTP URL to this host given the state of HTTPS
37 configuration, hacluster and charm configuration.
38
39 :configs OSTemplateRenderer: A config tempating object to inspect for
40 a complete https context.
41 :endpoint_type str: The endpoint type to resolve.
42
43 :returns str: Base URL for services on the current service unit.
44 '''
45 scheme = 'http'
46 if 'https' in configs.complete_contexts():
47 scheme = 'https'
48 address = resolve_address(endpoint_type)
49 if is_ipv6(address):
50 address = "[{}]".format(address)
51 return '%s://%s' % (scheme, address)
52
53
54def resolve_address(endpoint_type=PUBLIC):
55 resolved_address = None
56 if is_clustered():
57 if config(_address_map[endpoint_type]['config']) is None:
58 # Assume vip is simple and pass back directly
59 resolved_address = config('vip')
60 else:
61 for vip in config('vip').split():
62 if is_address_in_network(
63 config(_address_map[endpoint_type]['config']),
64 vip):
65 resolved_address = vip
66 else:
67 resolved_address = get_address_in_network(
68 config(_address_map[endpoint_type]['config']),
69 unit_get(_address_map[endpoint_type]['fallback'])
70 )
71 if resolved_address is None:
72 raise ValueError('Unable to resolve a suitable IP address'
73 ' based on charm state and configuration')
74 else:
75 return resolved_address
076
=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-19 11:38:09 +0000
+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-07-29 13:07:23 +0000
@@ -128,6 +128,20 @@
128 'server_packages': ['neutron-server',128 'server_packages': ['neutron-server',
129 'neutron-plugin-vmware'],129 'neutron-plugin-vmware'],
130 'server_services': ['neutron-server']130 'server_services': ['neutron-server']
131 },
132 'n1kv': {
133 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
134 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
135 'contexts': [
136 context.SharedDBContext(user=config('neutron-database-user'),
137 database=config('neutron-database'),
138 relation_prefix='neutron',
139 ssl_dir=NEUTRON_CONF_DIR)],
140 'services': [],
141 'packages': [['neutron-plugin-cisco']],
142 'server_packages': ['neutron-server',
143 'neutron-plugin-cisco'],
144 'server_services': ['neutron-server']
131 }145 }
132 }146 }
133 if release >= 'icehouse':147 if release >= 'icehouse':
134148
=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-02-27 09:26:38 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-29 13:07:23 +0000
@@ -27,7 +27,12 @@
2727
28{% if units -%}28{% if units -%}
29{% for service, ports in service_ports.iteritems() -%}29{% for service, ports in service_ports.iteritems() -%}
30listen {{ service }} 0.0.0.0:{{ ports[0] }}30listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
31 balance roundrobin
32 {% for unit, address in units.iteritems() -%}
33 server {{ unit }} {{ address }}:{{ ports[1] }} check
34 {% endfor %}
35listen {{ service }}_ipv6 :::{{ ports[0] }}
31 balance roundrobin36 balance roundrobin
32 {% for unit, address in units.iteritems() -%}37 {% for unit, address in units.iteritems() -%}
33 server {{ unit }} {{ address }}:{{ ports[1] }} check38 server {{ unit }} {{ address }}:{{ ports[1] }} check
3439
=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
--- hooks/charmhelpers/contrib/openstack/templating.py 2014-02-24 19:31:57 +0000
+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-07-29 13:07:23 +0000
@@ -30,17 +30,17 @@
30 loading dir.30 loading dir.
3131
32 A charm may also ship a templates dir with this module32 A charm may also ship a templates dir with this module
33 and it will be appended to the bottom of the search list, eg:33 and it will be appended to the bottom of the search list, eg::
34 hooks/charmhelpers/contrib/openstack/templates.34
3535 hooks/charmhelpers/contrib/openstack/templates
36 :param templates_dir: str: Base template directory containing release36
37 sub-directories.37 :param templates_dir (str): Base template directory containing release
38 :param os_release : str: OpenStack release codename to construct template38 sub-directories.
39 loader.39 :param os_release (str): OpenStack release codename to construct template
4040 loader.
41 :returns : jinja2.ChoiceLoader constructed with a list of41 :returns: jinja2.ChoiceLoader constructed with a list of
42 jinja2.FilesystemLoaders, ordered in descending42 jinja2.FilesystemLoaders, ordered in descending
43 order by OpenStack release.43 order by OpenStack release.
44 """44 """
45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
46 for rel in OPENSTACK_CODENAMES.itervalues()]46 for rel in OPENSTACK_CODENAMES.itervalues()]
@@ -111,7 +111,8 @@
111 and ease the burden of managing config templates across multiple OpenStack111 and ease the burden of managing config templates across multiple OpenStack
112 releases.112 releases.
113113
114 Basic usage:114 Basic usage::
115
115 # import some common context generates from charmhelpers116 # import some common context generates from charmhelpers
116 from charmhelpers.contrib.openstack import context117 from charmhelpers.contrib.openstack import context
117118
@@ -131,21 +132,19 @@
131 # write out all registered configs132 # write out all registered configs
132 configs.write_all()133 configs.write_all()
133134
134 Details:135 **OpenStack Releases and template loading**
135136
136 OpenStack Releases and template loading
137 ---------------------------------------
138 When the object is instantiated, it is associated with a specific OS137 When the object is instantiated, it is associated with a specific OS
139 release. This dictates how the template loader will be constructed.138 release. This dictates how the template loader will be constructed.
140139
141 The constructed loader attempts to load the template from several places140 The constructed loader attempts to load the template from several places
142 in the following order:141 in the following order:
143 - from the most recent OS release-specific template dir (if one exists)142 - from the most recent OS release-specific template dir (if one exists)
144 - the base templates_dir143 - the base templates_dir
145 - a template directory shipped in the charm with this helper file.144 - a template directory shipped in the charm with this helper file.
146145
147146 For the example above, '/tmp/templates' contains the following structure::
148 For the example above, '/tmp/templates' contains the following structure:147
149 /tmp/templates/nova.conf148 /tmp/templates/nova.conf
150 /tmp/templates/api-paste.ini149 /tmp/templates/api-paste.ini
151 /tmp/templates/grizzly/api-paste.ini150 /tmp/templates/grizzly/api-paste.ini
@@ -169,8 +168,8 @@
169 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows168 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
170 us to ship common templates (haproxy, apache) with the helpers.169 us to ship common templates (haproxy, apache) with the helpers.
171170
172 Context generators171 **Context generators**
173 ---------------------------------------172
174 Context generators are used to generate template contexts during hook173 Context generators are used to generate template contexts during hook
175 execution. Doing so may require inspecting service relations, charm174 execution. Doing so may require inspecting service relations, charm
176 config, etc. When registered, a config file is associated with a list175 config, etc. When registered, a config file is associated with a list
177176
=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
--- hooks/charmhelpers/contrib/openstack/utils.py 2014-05-19 11:38:09 +0000
+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-07-29 13:07:23 +0000
@@ -3,7 +3,6 @@
3# Common python helper functions used for OpenStack charms.3# Common python helper functions used for OpenStack charms.
4from collections import OrderedDict4from collections import OrderedDict
55
6import apt_pkg as apt
7import subprocess6import subprocess
8import os7import os
9import socket8import socket
@@ -41,7 +40,8 @@
41 ('quantal', 'folsom'),40 ('quantal', 'folsom'),
42 ('raring', 'grizzly'),41 ('raring', 'grizzly'),
43 ('saucy', 'havana'),42 ('saucy', 'havana'),
44 ('trusty', 'icehouse')43 ('trusty', 'icehouse'),
44 ('utopic', 'juno'),
45])45])
4646
4747
@@ -52,6 +52,7 @@
52 ('2013.1', 'grizzly'),52 ('2013.1', 'grizzly'),
53 ('2013.2', 'havana'),53 ('2013.2', 'havana'),
54 ('2014.1', 'icehouse'),54 ('2014.1', 'icehouse'),
55 ('2014.2', 'juno'),
55])56])
5657
57# The ugly duckling58# The ugly duckling
@@ -83,6 +84,8 @@
83 '''Derive OpenStack release codename from a given installation source.'''84 '''Derive OpenStack release codename from a given installation source.'''
84 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']85 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
85 rel = ''86 rel = ''
87 if src is None:
88 return rel
86 if src in ['distro', 'distro-proposed']:89 if src in ['distro', 'distro-proposed']:
87 try:90 try:
88 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]91 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
@@ -130,6 +133,7 @@
130133
131def get_os_codename_package(package, fatal=True):134def get_os_codename_package(package, fatal=True):
132 '''Derive OpenStack release codename from an installed package.'''135 '''Derive OpenStack release codename from an installed package.'''
136 import apt_pkg as apt
133 apt.init()137 apt.init()
134138
135 # Tell apt to build an in-memory cache to prevent race conditions (if139 # Tell apt to build an in-memory cache to prevent race conditions (if
@@ -187,7 +191,7 @@
187 for version, cname in vers_map.iteritems():191 for version, cname in vers_map.iteritems():
188 if cname == codename:192 if cname == codename:
189 return version193 return version
190 #e = "Could not determine OpenStack version for package: %s" % pkg194 # e = "Could not determine OpenStack version for package: %s" % pkg
191 # error_out(e)195 # error_out(e)
192196
193197
@@ -273,6 +277,9 @@
273 'icehouse': 'precise-updates/icehouse',277 'icehouse': 'precise-updates/icehouse',
274 'icehouse/updates': 'precise-updates/icehouse',278 'icehouse/updates': 'precise-updates/icehouse',
275 'icehouse/proposed': 'precise-proposed/icehouse',279 'icehouse/proposed': 'precise-proposed/icehouse',
280 'juno': 'trusty-updates/juno',
281 'juno/updates': 'trusty-updates/juno',
282 'juno/proposed': 'trusty-proposed/juno',
276 }283 }
277284
278 try:285 try:
@@ -320,6 +327,7 @@
320327
321 """328 """
322329
330 import apt_pkg as apt
323 src = config('openstack-origin')331 src = config('openstack-origin')
324 cur_vers = get_os_version_package(package)332 cur_vers = get_os_version_package(package)
325 available_vers = get_os_version_install_source(src)333 available_vers = get_os_version_install_source(src)
326334
=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-27 11:02:24 +0000
+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-29 13:07:23 +0000
@@ -303,7 +303,7 @@
303 blk_device, fstype, system_services=[]):303 blk_device, fstype, system_services=[]):
304 """304 """
305 NOTE: This function must only be called from a single service unit for305 NOTE: This function must only be called from a single service unit for
306 the same rbd_img otherwise data loss will occur.306 the same rbd_img otherwise data loss will occur.
307307
308 Ensures given pool and RBD image exists, is mapped to a block device,308 Ensures given pool and RBD image exists, is mapped to a block device,
309 and the device is formatted and mounted at the given mount_point.309 and the device is formatted and mounted at the given mount_point.
310310
=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-19 11:38:09 +0000
+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-29 13:07:23 +0000
@@ -37,6 +37,7 @@
37 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),37 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
38 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])38 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
3939
40
40def is_device_mounted(device):41def is_device_mounted(device):
41 '''Given a device path, return True if that device is mounted, and False42 '''Given a device path, return True if that device is mounted, and False
42 if it isn't.43 if it isn't.
4344
=== added file 'hooks/charmhelpers/core/fstab.py'
--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/fstab.py 2014-07-29 13:07:23 +0000
@@ -0,0 +1,116 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
5
6import os
7
8
9class Fstab(file):
10 """This class extends file in order to implement a file reader/writer
11 for file `/etc/fstab`
12 """
13
14 class Entry(object):
15 """Entry class represents a non-comment line on the `/etc/fstab` file
16 """
17 def __init__(self, device, mountpoint, filesystem,
18 options, d=0, p=0):
19 self.device = device
20 self.mountpoint = mountpoint
21 self.filesystem = filesystem
22
23 if not options:
24 options = "defaults"
25
26 self.options = options
27 self.d = d
28 self.p = p
29
30 def __eq__(self, o):
31 return str(self) == str(o)
32
33 def __str__(self):
34 return "{} {} {} {} {} {}".format(self.device,
35 self.mountpoint,
36 self.filesystem,
37 self.options,
38 self.d,
39 self.p)
40
41 DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
42
43 def __init__(self, path=None):
44 if path:
45 self._path = path
46 else:
47 self._path = self.DEFAULT_PATH
48 file.__init__(self, self._path, 'r+')
49
50 def _hydrate_entry(self, line):
51 # NOTE: use split with no arguments to split on any
52 # whitespace including tabs
53 return Fstab.Entry(*filter(
54 lambda x: x not in ('', None),
55 line.strip("\n").split()))
56
57 @property
58 def entries(self):
59 self.seek(0)
60 for line in self.readlines():
61 try:
62 if not line.startswith("#"):
63 yield self._hydrate_entry(line)
64 except ValueError:
65 pass
66
67 def get_entry_by_attr(self, attr, value):
68 for entry in self.entries:
69 e_attr = getattr(entry, attr)
70 if e_attr == value:
71 return entry
72 return None
73
74 def add_entry(self, entry):
75 if self.get_entry_by_attr('device', entry.device):
76 return False
77
78 self.write(str(entry) + '\n')
79 self.truncate()
80 return entry
81
82 def remove_entry(self, entry):
83 self.seek(0)
84
85 lines = self.readlines()
86
87 found = False
88 for index, line in enumerate(lines):
89 if not line.startswith("#"):
90 if self._hydrate_entry(line) == entry:
91 found = True
92 break
93
94 if not found:
95 return False
96
97 lines.remove(line)
98
99 self.seek(0)
100 self.write(''.join(lines))
101 self.truncate()
102 return True
103
104 @classmethod
105 def remove_by_mountpoint(cls, mountpoint, path=None):
106 fstab = cls(path=path)
107 entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
108 if entry:
109 return fstab.remove_entry(entry)
110 return False
111
112 @classmethod
113 def add(cls, device, mountpoint, filesystem, options=None, path=None):
114 return cls(path=path).add_entry(Fstab.Entry(device,
115 mountpoint, filesystem,
116 options=options))
0117
=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:38:09 +0000
+++ hooks/charmhelpers/core/hookenv.py 2014-07-29 13:07:23 +0000
@@ -25,7 +25,7 @@
25def cached(func):25def cached(func):
26 """Cache return values for multiple executions of func + args26 """Cache return values for multiple executions of func + args
2727
28 For example:28 For example::
2929
30 @cached30 @cached
31 def unit_get(attribute):31 def unit_get(attribute):
@@ -445,18 +445,19 @@
445class Hooks(object):445class Hooks(object):
446 """A convenient handler for hook functions.446 """A convenient handler for hook functions.
447447
448 Example:448 Example::
449
449 hooks = Hooks()450 hooks = Hooks()
450451
451 # register a hook, taking its name from the function name452 # register a hook, taking its name from the function name
452 @hooks.hook()453 @hooks.hook()
453 def install():454 def install():
454 ...455 pass # your code here
455456
456 # register a hook, providing a custom hook name457 # register a hook, providing a custom hook name
457 @hooks.hook("config-changed")458 @hooks.hook("config-changed")
458 def config_changed():459 def config_changed():
459 ...460 pass # your code here
460461
461 if __name__ == "__main__":462 if __name__ == "__main__":
462 # execute a hook based on the name the program is called by463 # execute a hook based on the name the program is called by
463464
=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py 2014-05-19 11:38:09 +0000
+++ hooks/charmhelpers/core/host.py 2014-07-29 13:07:23 +0000
@@ -12,11 +12,11 @@
12import string12import string
13import subprocess13import subprocess
14import hashlib14import hashlib
15import apt_pkg
1615
17from collections import OrderedDict16from collections import OrderedDict
1817
19from hookenv import log18from hookenv import log
19from fstab import Fstab
2020
2121
22def service_start(service_name):22def service_start(service_name):
@@ -35,7 +35,8 @@
3535
3636
37def service_reload(service_name, restart_on_failure=False):37def service_reload(service_name, restart_on_failure=False):
38 """Reload a system service, optionally falling back to restart if reload fails"""38 """Reload a system service, optionally falling back to restart if
39 reload fails"""
39 service_result = service('reload', service_name)40 service_result = service('reload', service_name)
40 if not service_result and restart_on_failure:41 if not service_result and restart_on_failure:
41 service_result = service('restart', service_name)42 service_result = service('restart', service_name)
@@ -144,7 +145,19 @@
144 target.write(content)145 target.write(content)
145146
146147
147def mount(device, mountpoint, options=None, persist=False):148def fstab_remove(mp):
149 """Remove the given mountpoint entry from /etc/fstab
150 """
151 return Fstab.remove_by_mountpoint(mp)
152
153
154def fstab_add(dev, mp, fs, options=None):
155 """Adds the given device entry to the /etc/fstab file
156 """
157 return Fstab.add(dev, mp, fs, options=options)
158
159
160def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
148 """Mount a filesystem at a particular mountpoint"""161 """Mount a filesystem at a particular mountpoint"""
149 cmd_args = ['mount']162 cmd_args = ['mount']
150 if options is not None:163 if options is not None:
@@ -155,9 +168,9 @@
155 except subprocess.CalledProcessError, e:168 except subprocess.CalledProcessError, e:
156 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))169 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
157 return False170 return False
171
158 if persist:172 if persist:
159 # TODO: update fstab173 return fstab_add(device, mountpoint, filesystem, options=options)
160 pass
161 return True174 return True
162175
163176
@@ -169,9 +182,9 @@
169 except subprocess.CalledProcessError, e:182 except subprocess.CalledProcessError, e:
170 log('Error unmounting {}\n{}'.format(mountpoint, e.output))183 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
171 return False184 return False
185
172 if persist:186 if persist:
173 # TODO: update fstab187 return fstab_remove(mountpoint)
174 pass
175 return True188 return True
176189
177190
@@ -198,13 +211,13 @@
198def restart_on_change(restart_map, stopstart=False):211def restart_on_change(restart_map, stopstart=False):
199 """Restart services based on configuration files changing212 """Restart services based on configuration files changing
200213
201 This function is used a decorator, for example214 This function is used a decorator, for example::
202215
203 @restart_on_change({216 @restart_on_change({
204 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]217 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
205 })218 })
206 def ceph_client_changed():219 def ceph_client_changed():
207 ...220 pass # your code here
208221
209 In this example, the cinder-api and cinder-volume services222 In this example, the cinder-api and cinder-volume services
210 would be restarted if /etc/ceph/ceph.conf is changed by the223 would be restarted if /etc/ceph/ceph.conf is changed by the
@@ -300,12 +313,19 @@
300313
301def cmp_pkgrevno(package, revno, pkgcache=None):314def cmp_pkgrevno(package, revno, pkgcache=None):
302 '''Compare supplied revno with the revno of the installed package315 '''Compare supplied revno with the revno of the installed package
303 1 => Installed revno is greater than supplied arg316
304 0 => Installed revno is the same as supplied arg317 * 1 => Installed revno is greater than supplied arg
305 -1 => Installed revno is less than supplied arg318 * 0 => Installed revno is the same as supplied arg
319 * -1 => Installed revno is less than supplied arg
320
306 '''321 '''
322 import apt_pkg
307 if not pkgcache:323 if not pkgcache:
308 apt_pkg.init()324 apt_pkg.init()
325 # Force Apt to build its cache in memory. That way we avoid race
326 # conditions with other applications building the cache in the same
327 # place.
328 apt_pkg.config.set("Dir::Cache::pkgcache", "")
309 pkgcache = apt_pkg.Cache()329 pkgcache = apt_pkg.Cache()
310 pkg = pkgcache[package]330 pkg = pkgcache[package]
311 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)331 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
312332
=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py 2014-05-19 11:38:09 +0000
+++ hooks/charmhelpers/fetch/__init__.py 2014-07-29 13:07:23 +0000
@@ -13,7 +13,6 @@
13 config,13 config,
14 log,14 log,
15)15)
16import apt_pkg
17import os16import os
1817
1918
@@ -56,6 +55,15 @@
56 'icehouse/proposed': 'precise-proposed/icehouse',55 'icehouse/proposed': 'precise-proposed/icehouse',
57 'precise-icehouse/proposed': 'precise-proposed/icehouse',56 'precise-icehouse/proposed': 'precise-proposed/icehouse',
58 'precise-proposed/icehouse': 'precise-proposed/icehouse',57 'precise-proposed/icehouse': 'precise-proposed/icehouse',
58 # Juno
59 'juno': 'trusty-updates/juno',
60 'trusty-juno': 'trusty-updates/juno',
61 'trusty-juno/updates': 'trusty-updates/juno',
62 'trusty-updates/juno': 'trusty-updates/juno',
63 'juno/proposed': 'trusty-proposed/juno',
64 'juno/proposed': 'trusty-proposed/juno',
65 'trusty-juno/proposed': 'trusty-proposed/juno',
66 'trusty-proposed/juno': 'trusty-proposed/juno',
59}67}
6068
61# The order of this list is very important. Handlers should be listed in from69# The order of this list is very important. Handlers should be listed in from
@@ -108,6 +116,7 @@
108116
109def filter_installed_packages(packages):117def filter_installed_packages(packages):
110 """Returns a list of packages that require installation"""118 """Returns a list of packages that require installation"""
119 import apt_pkg
111 apt_pkg.init()120 apt_pkg.init()
112121
113 # Tell apt to build an in-memory cache to prevent race conditions (if122 # Tell apt to build an in-memory cache to prevent race conditions (if
@@ -226,31 +235,39 @@
226 sources_var='install_sources',235 sources_var='install_sources',
227 keys_var='install_keys'):236 keys_var='install_keys'):
228 """237 """
229 Configure multiple sources from charm configuration238 Configure multiple sources from charm configuration.
239
240 The lists are encoded as yaml fragments in the configuration.
241 The frament needs to be included as a string.
230242
231 Example config:243 Example config:
232 install_sources:244 install_sources: |
233 - "ppa:foo"245 - "ppa:foo"
234 - "http://example.com/repo precise main"246 - "http://example.com/repo precise main"
235 install_keys:247 install_keys: |
236 - null248 - null
237 - "a1b2c3d4"249 - "a1b2c3d4"
238250
239 Note that 'null' (a.k.a. None) should not be quoted.251 Note that 'null' (a.k.a. None) should not be quoted.
240 """252 """
241 sources = safe_load(config(sources_var))253 sources = safe_load((config(sources_var) or '').strip()) or []
242 keys = config(keys_var)254 keys = safe_load((config(keys_var) or '').strip()) or None
243 if keys is not None:255
244 keys = safe_load(keys)256 if isinstance(sources, basestring):
245 if isinstance(sources, basestring) and (257 sources = [sources]
246 keys is None or isinstance(keys, basestring)):258
247 add_source(sources, keys)259 if keys is None:
260 for source in sources:
261 add_source(source, None)
248 else:262 else:
249 if not len(sources) == len(keys):263 if isinstance(keys, basestring):
250 msg = 'Install sources and keys lists are different lengths'264 keys = [keys]
251 raise SourceConfigError(msg)265
252 for src_num in range(len(sources)):266 if len(sources) != len(keys):
253 add_source(sources[src_num], keys[src_num])267 raise SourceConfigError(
268 'Install sources and keys lists are different lengths')
269 for source, key in zip(sources, keys):
270 add_source(source, key)
254 if update:271 if update:
255 apt_update(fatal=True)272 apt_update(fatal=True)
256273
257274
=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
--- hooks/charmhelpers/fetch/bzrurl.py 2013-11-06 03:48:26 +0000
+++ hooks/charmhelpers/fetch/bzrurl.py 2014-07-29 13:07:23 +0000
@@ -39,7 +39,8 @@
39 def install(self, source):39 def install(self, source):
40 url_parts = self.parse_url(source)40 url_parts = self.parse_url(source)
41 branch_name = url_parts.path.strip("/").split("/")[-1]41 branch_name = url_parts.path.strip("/").split("/")[-1]
42 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)42 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
43 branch_name)
43 if not os.path.exists(dest_dir):44 if not os.path.exists(dest_dir):
44 mkdir(dest_dir, perms=0755)45 mkdir(dest_dir, perms=0755)
45 try:46 try:
4647
=== added symlink 'hooks/neutron-api-relation-broken'
=== target is u'nova_cc_hooks.py'
=== added symlink 'hooks/neutron-api-relation-changed'
=== target is u'nova_cc_hooks.py'
=== added symlink 'hooks/neutron-api-relation-departed'
=== target is u'nova_cc_hooks.py'
=== added symlink 'hooks/neutron-api-relation-joined'
=== target is u'nova_cc_hooks.py'
=== modified file 'hooks/nova_cc_context.py'
--- hooks/nova_cc_context.py 2014-06-17 10:01:21 +0000
+++ hooks/nova_cc_context.py 2014-07-29 13:07:23 +0000
@@ -1,7 +1,7 @@
11
2from charmhelpers.core.hookenv import (2from charmhelpers.core.hookenv import (
3 config, relation_ids, relation_set, log, ERROR,3 config, relation_ids, relation_set, log, ERROR,
4 unit_get)4 unit_get, related_units, relation_get)
55
6from charmhelpers.fetch import apt_install, filter_installed_packages6from charmhelpers.fetch import apt_install, filter_installed_packages
7from charmhelpers.contrib.openstack import context, neutron, utils7from charmhelpers.contrib.openstack import context, neutron, utils
@@ -14,6 +14,17 @@
14)14)
1515
1616
17def context_complete(ctxt):
18 _missing = []
19 for k, v in ctxt.iteritems():
20 if v is None or v == '':
21 _missing.append(k)
22 if _missing:
23 log('Missing required data: %s' % ' '.join(_missing), level='INFO')
24 return False
25 return True
26
27
17class ApacheSSLContext(context.ApacheSSLContext):28class ApacheSSLContext(context.ApacheSSLContext):
1829
19 interfaces = ['https']30 interfaces = ['https']
@@ -27,6 +38,26 @@
27 return super(ApacheSSLContext, self).__call__()38 return super(ApacheSSLContext, self).__call__()
2839
2940
41class NeutronAPIContext(context.OSContextGenerator):
42
43 def __call__(self):
44 log('Generating template context from neutron api relation')
45 ctxt = {}
46 for rid in relation_ids('neutron-api'):
47 for unit in related_units(rid):
48 rdata = relation_get(rid=rid, unit=unit)
49 ctxt = {
50 'neutron_url': rdata.get('neutron-url'),
51 'neutron_plugin': rdata.get('neutron-plugin'),
52 'neutron_security_groups':
53 rdata.get('neutron-security-groups'),
54 'network_manager': 'neutron',
55 }
56 if context_complete(ctxt):
57 return ctxt
58 return {}
59
60
30class VolumeServiceContext(context.OSContextGenerator):61class VolumeServiceContext(context.OSContextGenerator):
31 interfaces = []62 interfaces = []
3263
3364
=== modified file 'hooks/nova_cc_hooks.py'
--- hooks/nova_cc_hooks.py 2014-04-11 16:41:42 +0000
+++ hooks/nova_cc_hooks.py 2014-07-29 13:07:23 +0000
@@ -19,12 +19,15 @@
19 relation_get,19 relation_get,
20 relation_ids,20 relation_ids,
21 relation_set,21 relation_set,
22 related_units,
22 open_port,23 open_port,
23 unit_get,24 unit_get,
24)25)
2526
26from charmhelpers.core.host import (27from charmhelpers.core.host import (
27 restart_on_change28 restart_on_change,
29 service_running,
30 service_stop,
28)31)
2932
30from charmhelpers.fetch import (33from charmhelpers.fetch import (
@@ -41,6 +44,10 @@
41 neutron_plugin_attribute,44 neutron_plugin_attribute,
42)45)
4346
47from nova_cc_context import (
48 NeutronAPIContext
49)
50
44from nova_cc_utils import (51from nova_cc_utils import (
45 api_port,52 api_port,
46 auth_token_config,53 auth_token_config,
@@ -54,8 +61,8 @@
54 save_script_rc,61 save_script_rc,
55 ssh_compute_add,62 ssh_compute_add,
56 ssh_compute_remove,63 ssh_compute_remove,
57 ssh_known_hosts_b64,64 ssh_known_hosts_lines,
58 ssh_authorized_keys_b64,65 ssh_authorized_keys_lines,
59 register_configs,66 register_configs,
60 restart_map,67 restart_map,
61 volume_service,68 volume_service,
@@ -63,11 +70,12 @@
63 NOVA_CONF,70 NOVA_CONF,
64 QUANTUM_CONF,71 QUANTUM_CONF,
65 NEUTRON_CONF,72 NEUTRON_CONF,
66 QUANTUM_API_PASTE73 QUANTUM_API_PASTE,
74 service_guard,
75 guard_map,
67)76)
6877
69from charmhelpers.contrib.hahelpers.cluster import (78from charmhelpers.contrib.hahelpers.cluster import (
70 canonical_url,
71 eligible_leader,79 eligible_leader,
72 get_hacluster_config,80 get_hacluster_config,
73 is_leader,81 is_leader,
@@ -75,6 +83,16 @@
7583
76from charmhelpers.payload.execd import execd_preinstall84from charmhelpers.payload.execd import execd_preinstall
7785
86from charmhelpers.contrib.openstack.ip import (
87 canonical_url,
88 PUBLIC, INTERNAL, ADMIN
89)
90
91from charmhelpers.contrib.network.ip import (
92 get_iface_for_address,
93 get_netmask_for_address
94)
95
78hooks = Hooks()96hooks = Hooks()
79CONFIGS = register_configs()97CONFIGS = register_configs()
8098
@@ -96,6 +114,8 @@
96114
97115
98@hooks.hook('config-changed')116@hooks.hook('config-changed')
117@service_guard(guard_map(), CONFIGS,
118 active=config('service-guard'))
99@restart_on_change(restart_map(), stopstart=True)119@restart_on_change(restart_map(), stopstart=True)
100def config_changed():120def config_changed():
101 global CONFIGS121 global CONFIGS
@@ -104,6 +124,8 @@
104 save_script_rc()124 save_script_rc()
105 configure_https()125 configure_https()
106 CONFIGS.write_all()126 CONFIGS.write_all()
127 for r_id in relation_ids('identity-service'):
128 identity_joined(rid=r_id)
107129
108130
109@hooks.hook('amqp-relation-joined')131@hooks.hook('amqp-relation-joined')
@@ -114,16 +136,19 @@
114136
115@hooks.hook('amqp-relation-changed')137@hooks.hook('amqp-relation-changed')
116@hooks.hook('amqp-relation-departed')138@hooks.hook('amqp-relation-departed')
139@service_guard(guard_map(), CONFIGS,
140 active=config('service-guard'))
117@restart_on_change(restart_map())141@restart_on_change(restart_map())
118def amqp_changed():142def amqp_changed():
119 if 'amqp' not in CONFIGS.complete_contexts():143 if 'amqp' not in CONFIGS.complete_contexts():
120 log('amqp relation incomplete. Peer not ready?')144 log('amqp relation incomplete. Peer not ready?')
121 return145 return
122 CONFIGS.write(NOVA_CONF)146 CONFIGS.write(NOVA_CONF)
123 if network_manager() == 'quantum':147 if not is_relation_made('neutron-api'):
124 CONFIGS.write(QUANTUM_CONF)148 if network_manager() == 'quantum':
125 if network_manager() == 'neutron':149 CONFIGS.write(QUANTUM_CONF)
126 CONFIGS.write(NEUTRON_CONF)150 if network_manager() == 'neutron':
151 CONFIGS.write(NEUTRON_CONF)
127152
128153
129@hooks.hook('shared-db-relation-joined')154@hooks.hook('shared-db-relation-joined')
@@ -171,6 +196,8 @@
171196
172197
173@hooks.hook('shared-db-relation-changed')198@hooks.hook('shared-db-relation-changed')
199@service_guard(guard_map(), CONFIGS,
200 active=config('service-guard'))
174@restart_on_change(restart_map())201@restart_on_change(restart_map())
175def db_changed():202def db_changed():
176 if 'shared-db' not in CONFIGS.complete_contexts():203 if 'shared-db' not in CONFIGS.complete_contexts():
@@ -186,6 +213,8 @@
186213
187214
188@hooks.hook('pgsql-nova-db-relation-changed')215@hooks.hook('pgsql-nova-db-relation-changed')
216@service_guard(guard_map(), CONFIGS,
217 active=config('service-guard'))
189@restart_on_change(restart_map())218@restart_on_change(restart_map())
190def postgresql_nova_db_changed():219def postgresql_nova_db_changed():
191 if 'pgsql-nova-db' not in CONFIGS.complete_contexts():220 if 'pgsql-nova-db' not in CONFIGS.complete_contexts():
@@ -201,6 +230,8 @@
201230
202231
203@hooks.hook('pgsql-neutron-db-relation-changed')232@hooks.hook('pgsql-neutron-db-relation-changed')
233@service_guard(guard_map(), CONFIGS,
234 active=config('service-guard'))
204@restart_on_change(restart_map())235@restart_on_change(restart_map())
205def postgresql_neutron_db_changed():236def postgresql_neutron_db_changed():
206 if network_manager() in ['neutron', 'quantum']:237 if network_manager() in ['neutron', 'quantum']:
@@ -210,6 +241,8 @@
210241
211242
212@hooks.hook('image-service-relation-changed')243@hooks.hook('image-service-relation-changed')
244@service_guard(guard_map(), CONFIGS,
245 active=config('service-guard'))
213@restart_on_change(restart_map())246@restart_on_change(restart_map())
214def image_service_changed():247def image_service_changed():
215 if 'image-service' not in CONFIGS.complete_contexts():248 if 'image-service' not in CONFIGS.complete_contexts():
@@ -223,11 +256,17 @@
223def identity_joined(rid=None):256def identity_joined(rid=None):
224 if not eligible_leader(CLUSTER_RES):257 if not eligible_leader(CLUSTER_RES):
225 return258 return
226 base_url = canonical_url(CONFIGS)259 public_url = canonical_url(CONFIGS, PUBLIC)
227 relation_set(relation_id=rid, **determine_endpoints(base_url))260 internal_url = canonical_url(CONFIGS, INTERNAL)
261 admin_url = canonical_url(CONFIGS, ADMIN)
262 relation_set(relation_id=rid, **determine_endpoints(public_url,
263 internal_url,
264 admin_url))
228265
229266
230@hooks.hook('identity-service-relation-changed')267@hooks.hook('identity-service-relation-changed')
268@service_guard(guard_map(), CONFIGS,
269 active=config('service-guard'))
231@restart_on_change(restart_map())270@restart_on_change(restart_map())
232def identity_changed():271def identity_changed():
233 if 'identity-service' not in CONFIGS.complete_contexts():272 if 'identity-service' not in CONFIGS.complete_contexts():
@@ -235,20 +274,24 @@
235 return274 return
236 CONFIGS.write('/etc/nova/api-paste.ini')275 CONFIGS.write('/etc/nova/api-paste.ini')
237 CONFIGS.write(NOVA_CONF)276 CONFIGS.write(NOVA_CONF)
238 if network_manager() == 'quantum':277 if not is_relation_made('neutron-api'):
239 CONFIGS.write(QUANTUM_API_PASTE)278 if network_manager() == 'quantum':
240 CONFIGS.write(QUANTUM_CONF)279 CONFIGS.write(QUANTUM_API_PASTE)
241 save_novarc()280 CONFIGS.write(QUANTUM_CONF)
242 if network_manager() == 'neutron':281 save_novarc()
243 CONFIGS.write(NEUTRON_CONF)282 if network_manager() == 'neutron':
283 CONFIGS.write(NEUTRON_CONF)
244 [compute_joined(rid) for rid in relation_ids('cloud-compute')]284 [compute_joined(rid) for rid in relation_ids('cloud-compute')]
245 [quantum_joined(rid) for rid in relation_ids('quantum-network-service')]285 [quantum_joined(rid) for rid in relation_ids('quantum-network-service')]
246 [nova_vmware_relation_joined(rid) for rid in relation_ids('nova-vmware')]286 [nova_vmware_relation_joined(rid) for rid in relation_ids('nova-vmware')]
287 [neutron_api_relation_joined(rid) for rid in relation_ids('neutron-api')]
247 configure_https()288 configure_https()
248289
249290
250@hooks.hook('nova-volume-service-relation-joined',291@hooks.hook('nova-volume-service-relation-joined',
251 'cinder-volume-service-relation-joined')292 'cinder-volume-service-relation-joined')
293@service_guard(guard_map(), CONFIGS,
294 active=config('service-guard'))
252@restart_on_change(restart_map())295@restart_on_change(restart_map())
253def volume_joined():296def volume_joined():
254 CONFIGS.write(NOVA_CONF)297 CONFIGS.write(NOVA_CONF)
@@ -293,6 +336,33 @@
293 out.write('export OS_REGION_NAME=%s\n' % config('region'))336 out.write('export OS_REGION_NAME=%s\n' % config('region'))
294337
295338
339def neutron_settings():
340 neutron_settings = {}
341 if is_relation_made('neutron-api', 'neutron-plugin'):
342 neutron_api_info = NeutronAPIContext()()
343 neutron_settings.update({
344 # XXX: Rename these relations settings?
345 'quantum_plugin': neutron_api_info['neutron_plugin'],
346 'region': config('region'),
347 'quantum_security_groups':
348 neutron_api_info['neutron_security_groups'],
349 'quantum_url': neutron_api_info['neutron_url'],
350 })
351 else:
352 neutron_settings.update({
353 # XXX: Rename these relations settings?
354 'quantum_plugin': neutron_plugin(),
355 'region': config('region'),
356 'quantum_security_groups': config('quantum-security-groups'),
357 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL),
358 str(api_port('neutron-server'))),
359 })
360 neutron_url = urlparse(neutron_settings['quantum_url'])
361 neutron_settings['quantum_host'] = neutron_url.hostname
362 neutron_settings['quantum_port'] = neutron_url.port
363 return neutron_settings
364
365
296def keystone_compute_settings():366def keystone_compute_settings():
297 ks_auth_config = _auth_config()367 ks_auth_config = _auth_config()
298 rel_settings = {}368 rel_settings = {}
@@ -300,20 +370,10 @@
300 if network_manager() in ['quantum', 'neutron']:370 if network_manager() in ['quantum', 'neutron']:
301 if ks_auth_config:371 if ks_auth_config:
302 rel_settings.update(ks_auth_config)372 rel_settings.update(ks_auth_config)
303373 rel_settings.update(neutron_settings())
304 rel_settings.update({
305 # XXX: Rename these relations settings?
306 'quantum_plugin': neutron_plugin(),
307 'region': config('region'),
308 'quantum_security_groups': config('quantum-security-groups'),
309 'quantum_url': (canonical_url(CONFIGS) + ':' +
310 str(api_port('neutron-server'))),
311 })
312
313 ks_ca = keystone_ca_cert_b64()374 ks_ca = keystone_ca_cert_b64()
314 if ks_auth_config and ks_ca:375 if ks_auth_config and ks_ca:
315 rel_settings['ca_cert'] = ks_ca376 rel_settings['ca_cert'] = ks_ca
316
317 return rel_settings377 return rel_settings
318378
319379
@@ -328,7 +388,6 @@
328 # this may not even be needed.388 # this may not even be needed.
329 'ec2_host': unit_get('private-address'),389 'ec2_host': unit_get('private-address'),
330 }390 }
331
332 # update relation setting if we're attempting to restart remote391 # update relation setting if we're attempting to restart remote
333 # services392 # services
334 if remote_restart:393 if remote_restart:
@@ -339,21 +398,63 @@
339398
340399
341@hooks.hook('cloud-compute-relation-changed')400@hooks.hook('cloud-compute-relation-changed')
342def compute_changed():401def compute_changed(rid=None, unit=None):
343 migration_auth = relation_get('migration_auth_type')402 rel_settings = relation_get(rid=rid, unit=unit)
344 if migration_auth == 'ssh':403 if 'migration_auth_type' not in rel_settings:
345 key = relation_get('ssh_public_key')404 return
405 if rel_settings['migration_auth_type'] == 'ssh':
406 key = rel_settings.get('ssh_public_key')
346 if not key:407 if not key:
347 log('SSH migration set but peer did not publish key.')408 log('SSH migration set but peer did not publish key.')
348 return409 return
349 ssh_compute_add(key)410 ssh_compute_add(key, rid=rid, unit=unit)
350 relation_set(known_hosts=ssh_known_hosts_b64(),411 index = 0
351 authorized_keys=ssh_authorized_keys_b64())412 for line in ssh_known_hosts_lines(unit=unit):
352 if relation_get('nova_ssh_public_key'):413 relation_set(
353 key = relation_get('nova_ssh_public_key')414 relation_id=rid,
354 ssh_compute_add(key, user='nova')415 relation_settings={
355 relation_set(nova_known_hosts=ssh_known_hosts_b64(user='nova'),416 'known_hosts_{}'.format(index): line})
356 nova_authorized_keys=ssh_authorized_keys_b64(user='nova'))417 index += 1
418 relation_set(relation_id=rid, known_hosts_max_index=index)
419 index = 0
420 for line in ssh_authorized_keys_lines(unit=unit):
421 relation_set(
422 relation_id=rid,
423 relation_settings={
424 'authorized_keys_{}'.format(index): line})
425 index += 1
426 relation_set(relation_id=rid, authorized_keys_max_index=index)
427 if 'nova_ssh_public_key' not in rel_settings:
428 return
429 if rel_settings['nova_ssh_public_key']:
430 ssh_compute_add(rel_settings['nova_ssh_public_key'],
431 rid=rid, unit=unit, user='nova')
432 index = 0
433 for line in ssh_known_hosts_lines(unit=unit, user='nova'):
434 relation_set(
435 relation_id=rid,
436 relation_settings={
437 '{}_known_hosts_{}'.format(
438 'nova',
439 index): line})
440 index += 1
441 relation_set(
442 relation_id=rid,
443 relation_settings={
444 '{}_known_hosts_max_index'.format('nova'): index})
445 index = 0
446 for line in ssh_authorized_keys_lines(unit=unit, user='nova'):
447 relation_set(
448 relation_id=rid,
449 relation_settings={
450 '{}_authorized_keys_{}'.format(
451 'nova',
452 index): line})
453 index += 1
454 relation_set(
455 relation_id=rid,
456 relation_settings={
457 '{}_authorized_keys_max_index'.format('nova'): index})
357458
358459
359@hooks.hook('cloud-compute-relation-departed')460@hooks.hook('cloud-compute-relation-departed')
@@ -367,15 +468,7 @@
367 if not eligible_leader(CLUSTER_RES):468 if not eligible_leader(CLUSTER_RES):
368 return469 return
369470
370 url = canonical_url(CONFIGS) + ':9696'471 rel_settings = neutron_settings()
371 # XXX: Can we rename to neutron_*?
372 rel_settings = {
373 'quantum_host': urlparse(url).hostname,
374 'quantum_url': url,
375 'quantum_port': 9696,
376 'quantum_plugin': neutron_plugin(),
377 'region': config('region')
378 }
379472
380 # inform quantum about local keystone auth config473 # inform quantum about local keystone auth config
381 ks_auth_config = _auth_config()474 ks_auth_config = _auth_config()
@@ -385,12 +478,13 @@
385 ks_ca = keystone_ca_cert_b64()478 ks_ca = keystone_ca_cert_b64()
386 if ks_auth_config and ks_ca:479 if ks_auth_config and ks_ca:
387 rel_settings['ca_cert'] = ks_ca480 rel_settings['ca_cert'] = ks_ca
388
389 relation_set(relation_id=rid, **rel_settings)481 relation_set(relation_id=rid, **rel_settings)
390482
391483
392@hooks.hook('cluster-relation-changed',484@hooks.hook('cluster-relation-changed',
393 'cluster-relation-departed')485 'cluster-relation-departed')
486@service_guard(guard_map(), CONFIGS,
487 active=config('service-guard'))
394@restart_on_change(restart_map(), stopstart=True)488@restart_on_change(restart_map(), stopstart=True)
395def cluster_changed():489def cluster_changed():
396 CONFIGS.write_all()490 CONFIGS.write_all()
@@ -400,15 +494,28 @@
400def ha_joined():494def ha_joined():
401 config = get_hacluster_config()495 config = get_hacluster_config()
402 resources = {496 resources = {
403 'res_nova_vip': 'ocf:heartbeat:IPaddr2',
404 'res_nova_haproxy': 'lsb:haproxy',497 'res_nova_haproxy': 'lsb:haproxy',
405 }498 }
406 vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
407 (config['vip'], config['vip_cidr'], config['vip_iface'])
408 resource_params = {499 resource_params = {
409 'res_nova_vip': vip_params,
410 'res_nova_haproxy': 'op monitor interval="5s"'500 'res_nova_haproxy': 'op monitor interval="5s"'
411 }501 }
502 vip_group = []
503 for vip in config['vip'].split():
504 iface = get_iface_for_address(vip)
505 if iface is not None:
506 vip_key = 'res_nova_{}_vip'.format(iface)
507 resources[vip_key] = 'ocf:heartbeat:IPaddr2'
508 resource_params[vip_key] = (
509 'params ip="{vip}" cidr_netmask="{netmask}"'
510 ' nic="{iface}"'.format(vip=vip,
511 iface=iface,
512 netmask=get_netmask_for_address(vip))
513 )
514 vip_group.append(vip_key)
515
516 if len(vip_group) > 1:
517 relation_set(groups={'grp_nova_vips': ' '.join(vip_group)})
518
412 init_services = {519 init_services = {
413 'res_nova_haproxy': 'haproxy'520 'res_nova_haproxy': 'haproxy'
414 }521 }
@@ -447,6 +554,8 @@
447 'pgsql-nova-db-relation-broken',554 'pgsql-nova-db-relation-broken',
448 'pgsql-neutron-db-relation-broken',555 'pgsql-neutron-db-relation-broken',
449 'quantum-network-service-relation-broken')556 'quantum-network-service-relation-broken')
557@service_guard(guard_map(), CONFIGS,
558 active=config('service-guard'))
450def relation_broken():559def relation_broken():
451 CONFIGS.write_all()560 CONFIGS.write_all()
452561
@@ -480,13 +589,15 @@
480 rel_settings.update({589 rel_settings.update({
481 'quantum_plugin': neutron_plugin(),590 'quantum_plugin': neutron_plugin(),
482 'quantum_security_groups': config('quantum-security-groups'),591 'quantum_security_groups': config('quantum-security-groups'),
483 'quantum_url': (canonical_url(CONFIGS) + ':' +592 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL),
484 str(api_port('neutron-server')))})593 str(api_port('neutron-server')))})
485594
486 relation_set(relation_id=rid, **rel_settings)595 relation_set(relation_id=rid, **rel_settings)
487596
488597
489@hooks.hook('nova-vmware-relation-changed')598@hooks.hook('nova-vmware-relation-changed')
599@service_guard(guard_map(), CONFIGS,
600 active=config('service-guard'))
490@restart_on_change(restart_map())601@restart_on_change(restart_map())
491def nova_vmware_relation_changed():602def nova_vmware_relation_changed():
492 CONFIGS.write('/etc/nova/nova.conf')603 CONFIGS.write('/etc/nova/nova.conf')
@@ -498,6 +609,49 @@
498 amqp_joined(relation_id=r_id)609 amqp_joined(relation_id=r_id)
499 for r_id in relation_ids('identity-service'):610 for r_id in relation_ids('identity-service'):
500 identity_joined(rid=r_id)611 identity_joined(rid=r_id)
612 for r_id in relation_ids('cloud-compute'):
613 for unit in related_units(r_id):
614 compute_changed(r_id, unit)
615
616
617@hooks.hook('neutron-api-relation-joined')
618def neutron_api_relation_joined(rid=None):
619 with open('/etc/init/neutron-server.override', 'wb') as out:
620 out.write('manual\n')
621 if os.path.isfile(NEUTRON_CONF):
622 os.rename(NEUTRON_CONF, NEUTRON_CONF + '_unused')
623 if service_running('neutron-server'):
624 service_stop('neutron-server')
625 for id_rid in relation_ids('identity-service'):
626 identity_joined(rid=id_rid)
627 nova_url = canonical_url(CONFIGS, INTERNAL) + ":8774/v2"
628 relation_set(relation_id=rid, nova_url=nova_url)
629
630
631@hooks.hook('neutron-api-relation-changed')
632@service_guard(guard_map(), CONFIGS,
633 active=config('service-guard'))
634@restart_on_change(restart_map())
635def neutron_api_relation_changed():
636 CONFIGS.write(NOVA_CONF)
637 for rid in relation_ids('cloud-compute'):
638 compute_joined(rid=rid)
639 for rid in relation_ids('quantum-network-service'):
640 quantum_joined(rid=rid)
641
642
643@hooks.hook('neutron-api-relation-broken')
644@service_guard(guard_map(), CONFIGS,
645 active=config('service-guard'))
646@restart_on_change(restart_map())
647def neutron_api_relation_broken():
648 if os.path.isfile('/etc/init/neutron-server.override'):
649 os.remove('/etc/init/neutron-server.override')
650 CONFIGS.write_all()
651 for rid in relation_ids('cloud-compute'):
652 compute_joined(rid=rid)
653 for rid in relation_ids('quantum-network-service'):
654 quantum_joined(rid=rid)
501655
502656
503def main():657def main():
504658
=== modified file 'hooks/nova_cc_utils.py'
--- hooks/nova_cc_utils.py 2014-05-21 10:03:01 +0000
+++ hooks/nova_cc_utils.py 2014-07-29 13:07:23 +0000
@@ -33,20 +33,22 @@
33 relation_get,33 relation_get,
34 relation_ids,34 relation_ids,
35 remote_unit,35 remote_unit,
36 is_relation_made,
36 INFO,37 INFO,
37 ERROR,38 ERROR,
38)39)
3940
40from charmhelpers.core.host import (41from charmhelpers.core.host import (
41 service_start42 service_start,
43 service_stop,
44 service_running
42)45)
4346
44
45import nova_cc_context47import nova_cc_context
4648
47TEMPLATES = 'templates/'49TEMPLATES = 'templates/'
4850
49CLUSTER_RES = 'res_nova_vip'51CLUSTER_RES = 'grp_nova_vips'
5052
51# removed from original: charm-helper-sh53# removed from original: charm-helper-sh
52BASE_PACKAGES = [54BASE_PACKAGES = [
@@ -106,8 +108,7 @@
106 context.SyslogContext(),108 context.SyslogContext(),
107 nova_cc_context.HAProxyContext(),109 nova_cc_context.HAProxyContext(),
108 nova_cc_context.IdentityServiceContext(),110 nova_cc_context.IdentityServiceContext(),
109 nova_cc_context.VolumeServiceContext(),111 nova_cc_context.VolumeServiceContext()],
110 nova_cc_context.NeutronCCContext()],
111 }),112 }),
112 (NOVA_API_PASTE, {113 (NOVA_API_PASTE, {
113 'services': [s for s in BASE_SERVICES if 'api' in s],114 'services': [s for s in BASE_SERVICES if 'api' in s],
@@ -188,39 +189,47 @@
188189
189 net_manager = network_manager()190 net_manager = network_manager()
190191
191 # pop out irrelevant resources from the OrderedDict (easier than adding
192 # them late)
193 if net_manager != 'quantum':
194 [resource_map.pop(k) for k in list(resource_map.iterkeys())
195 if 'quantum' in k]
196 if net_manager != 'neutron':
197 [resource_map.pop(k) for k in list(resource_map.iterkeys())
198 if 'neutron' in k]
199
200 if os.path.exists('/etc/apache2/conf-available'):192 if os.path.exists('/etc/apache2/conf-available'):
201 resource_map.pop(APACHE_CONF)193 resource_map.pop(APACHE_CONF)
202 else:194 else:
203 resource_map.pop(APACHE_24_CONF)195 resource_map.pop(APACHE_24_CONF)
204196
205 # add neutron plugin requirements. nova-c-c only needs the neutron-server197 if is_relation_made('neutron-api'):
206 # associated with configs, not the plugin agent.198 [resource_map.pop(k) for k in list(resource_map.iterkeys())
207 if net_manager in ['quantum', 'neutron']:199 if 'quantum' in k or 'neutron' in k]
208 plugin = neutron_plugin()200 resource_map[NOVA_CONF]['contexts'].append(
209 if plugin:201 nova_cc_context.NeutronAPIContext())
210 conf = neutron_plugin_attribute(plugin, 'config', net_manager)202 else:
211 ctxts = (neutron_plugin_attribute(plugin, 'contexts', net_manager)203 resource_map[NOVA_CONF]['contexts'].append(
212 or [])204 nova_cc_context.NeutronCCContext())
213 services = neutron_plugin_attribute(plugin, 'server_services',205 # pop out irrelevant resources from the OrderedDict (easier than adding
214 net_manager)206 # them late)
215 resource_map[conf] = {}207 if net_manager != 'quantum':
216 resource_map[conf]['services'] = services208 [resource_map.pop(k) for k in list(resource_map.iterkeys())
217 resource_map[conf]['contexts'] = ctxts209 if 'quantum' in k]
218 resource_map[conf]['contexts'].append(210 if net_manager != 'neutron':
219 nova_cc_context.NeutronCCContext())211 [resource_map.pop(k) for k in list(resource_map.iterkeys())
212 if 'neutron' in k]
213 # add neutron plugin requirements. nova-c-c only needs the
214 # neutron-server associated with configs, not the plugin agent.
215 if net_manager in ['quantum', 'neutron']:
216 plugin = neutron_plugin()
217 if plugin:
218 conf = neutron_plugin_attribute(plugin, 'config', net_manager)
219 ctxts = (neutron_plugin_attribute(plugin, 'contexts',
220 net_manager)
221 or [])
222 services = neutron_plugin_attribute(plugin, 'server_services',
223 net_manager)
224 resource_map[conf] = {}
225 resource_map[conf]['services'] = services
226 resource_map[conf]['contexts'] = ctxts
227 resource_map[conf]['contexts'].append(
228 nova_cc_context.NeutronCCContext())
220229
221 # update for postgres230 # update for postgres
222 resource_map[conf]['contexts'].append(231 resource_map[conf]['contexts'].append(
223 nova_cc_context.NeutronPostgresqlDBContext())232 nova_cc_context.NeutronPostgresqlDBContext())
224233
225 # nova-conductor for releases >= G.234 # nova-conductor for releases >= G.
226 if os_release('nova-common') not in ['essex', 'folsom']:235 if os_release('nova-common') not in ['essex', 'folsom']:
@@ -235,6 +244,7 @@
235 for s in vmware_ctxt['services']:244 for s in vmware_ctxt['services']:
236 if s not in resource_map[NOVA_CONF]['services']:245 if s not in resource_map[NOVA_CONF]['services']:
237 resource_map[NOVA_CONF]['services'].append(s)246 resource_map[NOVA_CONF]['services'].append(s)
247
238 return resource_map248 return resource_map
239249
240250
@@ -509,8 +519,11 @@
509 return b64encode(_in.read())519 return b64encode(_in.read())
510520
511521
512def ssh_directory_for_unit(user=None):522def ssh_directory_for_unit(unit=None, user=None):
513 remote_service = remote_unit().split('/')[0]523 if unit:
524 remote_service = unit.split('/')[0]
525 else:
526 remote_service = remote_unit().split('/')[0]
514 if user:527 if user:
515 remote_service = "{}_{}".format(remote_service, user)528 remote_service = "{}_{}".format(remote_service, user)
516 _dir = os.path.join(NOVA_SSH_DIR, remote_service)529 _dir = os.path.join(NOVA_SSH_DIR, remote_service)
@@ -524,29 +537,29 @@
524 return _dir537 return _dir
525538
526539
527def known_hosts(user=None):540def known_hosts(unit=None, user=None):
528 return os.path.join(ssh_directory_for_unit(user), 'known_hosts')541 return os.path.join(ssh_directory_for_unit(unit, user), 'known_hosts')
529542
530543
531def authorized_keys(user=None):544def authorized_keys(unit=None, user=None):
532 return os.path.join(ssh_directory_for_unit(user), 'authorized_keys')545 return os.path.join(ssh_directory_for_unit(unit, user), 'authorized_keys')
533546
534547
535def ssh_known_host_key(host, user=None):548def ssh_known_host_key(host, unit=None, user=None):
536 cmd = ['ssh-keygen', '-f', known_hosts(user), '-H', '-F', host]549 cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-H', '-F', host]
537 try:550 try:
538 return subprocess.check_output(cmd).strip()551 return subprocess.check_output(cmd).strip()
539 except subprocess.CalledProcessError:552 except subprocess.CalledProcessError:
540 return None553 return None
541554
542555
543def remove_known_host(host, user=None):556def remove_known_host(host, unit=None, user=None):
544 log('Removing SSH known host entry for compute host at %s' % host)557 log('Removing SSH known host entry for compute host at %s' % host)
545 cmd = ['ssh-keygen', '-f', known_hosts(user), '-R', host]558 cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-R', host]
546 subprocess.check_call(cmd)559 subprocess.check_call(cmd)
547560
548561
549def add_known_host(host, user=None):562def add_known_host(host, unit=None, user=None):
550 '''Add variations of host to a known hosts file.'''563 '''Add variations of host to a known hosts file.'''
551 cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]564 cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
552 try:565 try:
@@ -555,34 +568,37 @@
555 log('Could not obtain SSH host key from %s' % host, level=ERROR)568 log('Could not obtain SSH host key from %s' % host, level=ERROR)
556 raise e569 raise e
557570
558 current_key = ssh_known_host_key(host, user)571 current_key = ssh_known_host_key(host, unit, user)
559 if current_key:572 if current_key:
560 if remote_key == current_key:573 if remote_key == current_key:
561 log('Known host key for compute host %s up to date.' % host)574 log('Known host key for compute host %s up to date.' % host)
562 return575 return
563 else:576 else:
564 remove_known_host(host, user)577 remove_known_host(host, unit, user)
565578
566 log('Adding SSH host key to known hosts for compute node at %s.' % host)579 log('Adding SSH host key to known hosts for compute node at %s.' % host)
567 with open(known_hosts(user), 'a') as out:580 with open(known_hosts(unit, user), 'a') as out:
568 out.write(remote_key + '\n')581 out.write(remote_key + '\n')
569582
570583
571def ssh_authorized_key_exists(public_key, user=None):584def ssh_authorized_key_exists(public_key, unit=None, user=None):
572 with open(authorized_keys(user)) as keys:585 with open(authorized_keys(unit, user)) as keys:
573 return (' %s ' % public_key) in keys.read()586 return (' %s ' % public_key) in keys.read()
574587
575588
576def add_authorized_key(public_key, user=None):589def add_authorized_key(public_key, unit=None, user=None):
577 with open(authorized_keys(user), 'a') as keys:590 with open(authorized_keys(unit, user), 'a') as keys:
578 keys.write(public_key + '\n')591 keys.write(public_key + '\n')
579592
580593
581def ssh_compute_add(public_key, user=None):594def ssh_compute_add(public_key, rid=None, unit=None, user=None):
582 # If remote compute node hands us a hostname, ensure we have a595 # If remote compute node hands us a hostname, ensure we have a
583 # known hosts entry for its IP, hostname and FQDN.596 # known hosts entry for its IP, hostname and FQDN.
584 private_address = relation_get('private-address')597 private_address = relation_get(rid=rid, unit=unit,
598 attribute='private-address')
585 hosts = [private_address]599 hosts = [private_address]
600 if relation_get('hostname'):
601 hosts.append(relation_get('hostname'))
586602
587 if not is_ip(private_address):603 if not is_ip(private_address):
588 hosts.append(get_host_ip(private_address))604 hosts.append(get_host_ip(private_address))
@@ -593,31 +609,41 @@
593 hosts.append(hn.split('.')[0])609 hosts.append(hn.split('.')[0])
594610
595 for host in list(set(hosts)):611 for host in list(set(hosts)):
596 if not ssh_known_host_key(host, user):612 if not ssh_known_host_key(host, unit, user):
597 add_known_host(host, user)613 add_known_host(host, unit, user)
598614
599 if not ssh_authorized_key_exists(public_key, user):615 if not ssh_authorized_key_exists(public_key, unit, user):
600 log('Saving SSH authorized key for compute host at %s.' %616 log('Saving SSH authorized key for compute host at %s.' %
601 private_address)617 private_address)
602 add_authorized_key(public_key, user)618 add_authorized_key(public_key, unit, user)
603619
604620
605def ssh_known_hosts_b64(user=None):621def ssh_known_hosts_lines(unit=None, user=None):
606 with open(known_hosts(user)) as hosts:622 known_hosts_list = []
607 return b64encode(hosts.read())623
608624 with open(known_hosts(unit, user)) as hosts:
609625 for hosts_line in hosts:
610def ssh_authorized_keys_b64(user=None):626 if hosts_line.rstrip():
611 with open(authorized_keys(user)) as keys:627 known_hosts_list.append(hosts_line.rstrip())
612 return b64encode(keys.read())628 return(known_hosts_list)
613629
614630
615def ssh_compute_remove(public_key, user=None):631def ssh_authorized_keys_lines(unit=None, user=None):
616 if not (os.path.isfile(authorized_keys(user)) or632 authorized_keys_list = []
617 os.path.isfile(known_hosts(user))):633
634 with open(authorized_keys(unit, user)) as keys:
635 for authkey_line in keys:
636 if authkey_line.rstrip():
637 authorized_keys_list.append(authkey_line.rstrip())
638 return(authorized_keys_list)
639
640
641def ssh_compute_remove(public_key, unit=None, user=None):
642 if not (os.path.isfile(authorized_keys(unit, user)) or
643 os.path.isfile(known_hosts(unit, user))):
618 return644 return
619645
620 with open(authorized_keys(user)) as _keys:646 with open(authorized_keys(unit, user)) as _keys:
621 keys = [k.strip() for k in _keys.readlines()]647 keys = [k.strip() for k in _keys.readlines()]
622648
623 if public_key not in keys:649 if public_key not in keys:
@@ -625,67 +651,101 @@
625651
626 [keys.remove(key) for key in keys if key == public_key]652 [keys.remove(key) for key in keys if key == public_key]
627653
628 with open(authorized_keys(user), 'w') as _keys:654 with open(authorized_keys(unit, user), 'w') as _keys:
629 keys = '\n'.join(keys)655 keys = '\n'.join(keys)
630 if not keys.endswith('\n'):656 if not keys.endswith('\n'):
631 keys += '\n'657 keys += '\n'
632 _keys.write(keys)658 _keys.write(keys)
633659
634660
635def determine_endpoints(url):661def determine_endpoints(public_url, internal_url, admin_url):
636 '''Generates a dictionary containing all relevant endpoints to be662 '''Generates a dictionary containing all relevant endpoints to be
637 passed to keystone as relation settings.'''663 passed to keystone as relation settings.'''
638 region = config('region')664 region = config('region')
639 os_rel = os_release('nova-common')665 os_rel = os_release('nova-common')
640666
641 if os_rel >= 'grizzly':667 if os_rel >= 'grizzly':
642 nova_url = ('%s:%s/v2/$(tenant_id)s' %668 nova_public_url = ('%s:%s/v2/$(tenant_id)s' %
643 (url, api_port('nova-api-os-compute')))669 (public_url, api_port('nova-api-os-compute')))
670 nova_internal_url = ('%s:%s/v2/$(tenant_id)s' %
671 (internal_url, api_port('nova-api-os-compute')))
672 nova_admin_url = ('%s:%s/v2/$(tenant_id)s' %
673 (admin_url, api_port('nova-api-os-compute')))
644 else:674 else:
645 nova_url = ('%s:%s/v1.1/$(tenant_id)s' %675 nova_public_url = ('%s:%s/v1.1/$(tenant_id)s' %
646 (url, api_port('nova-api-os-compute')))676 (public_url, api_port('nova-api-os-compute')))
647 ec2_url = '%s:%s/services/Cloud' % (url, api_port('nova-api-ec2'))677 nova_internal_url = ('%s:%s/v1.1/$(tenant_id)s' %
648 nova_volume_url = ('%s:%s/v1/$(tenant_id)s' %678 (internal_url, api_port('nova-api-os-compute')))
649 (url, api_port('nova-api-os-compute')))679 nova_admin_url = ('%s:%s/v1.1/$(tenant_id)s' %
650 neutron_url = '%s:%s' % (url, api_port('neutron-server'))680 (admin_url, api_port('nova-api-os-compute')))
651 s3_url = '%s:%s' % (url, api_port('nova-objectstore'))681
682 ec2_public_url = '%s:%s/services/Cloud' % (
683 public_url, api_port('nova-api-ec2'))
684 ec2_internal_url = '%s:%s/services/Cloud' % (
685 internal_url, api_port('nova-api-ec2'))
686 ec2_admin_url = '%s:%s/services/Cloud' % (admin_url,
687 api_port('nova-api-ec2'))
688
689 nova_volume_public_url = ('%s:%s/v1/$(tenant_id)s' %
690 (public_url, api_port('nova-api-os-compute')))
691 nova_volume_internal_url = ('%s:%s/v1/$(tenant_id)s' %
692 (internal_url,
693 api_port('nova-api-os-compute')))
694 nova_volume_admin_url = ('%s:%s/v1/$(tenant_id)s' %
695 (admin_url, api_port('nova-api-os-compute')))
696
697 neutron_public_url = '%s:%s' % (public_url, api_port('neutron-server'))
698 neutron_internal_url = '%s:%s' % (internal_url, api_port('neutron-server'))
699 neutron_admin_url = '%s:%s' % (admin_url, api_port('neutron-server'))
700
701 s3_public_url = '%s:%s' % (public_url, api_port('nova-objectstore'))
702 s3_internal_url = '%s:%s' % (internal_url, api_port('nova-objectstore'))
703 s3_admin_url = '%s:%s' % (admin_url, api_port('nova-objectstore'))
652704
653 # the base endpoints705 # the base endpoints
654 endpoints = {706 endpoints = {
655 'nova_service': 'nova',707 'nova_service': 'nova',
656 'nova_region': region,708 'nova_region': region,
657 'nova_public_url': nova_url,709 'nova_public_url': nova_public_url,
658 'nova_admin_url': nova_url,710 'nova_admin_url': nova_admin_url,
659 'nova_internal_url': nova_url,711 'nova_internal_url': nova_internal_url,
660 'ec2_service': 'ec2',712 'ec2_service': 'ec2',
661 'ec2_region': region,713 'ec2_region': region,
662 'ec2_public_url': ec2_url,714 'ec2_public_url': ec2_public_url,
663 'ec2_admin_url': ec2_url,715 'ec2_admin_url': ec2_admin_url,
664 'ec2_internal_url': ec2_url,716 'ec2_internal_url': ec2_internal_url,
665 's3_service': 's3',717 's3_service': 's3',
666 's3_region': region,718 's3_region': region,
667 's3_public_url': s3_url,719 's3_public_url': s3_public_url,
668 's3_admin_url': s3_url,720 's3_admin_url': s3_admin_url,
669 's3_internal_url': s3_url,721 's3_internal_url': s3_internal_url,
670 }722 }
671723
672 if relation_ids('nova-volume-service'):724 if relation_ids('nova-volume-service'):
673 endpoints.update({725 endpoints.update({
674 'nova-volume_service': 'nova-volume',726 'nova-volume_service': 'nova-volume',
675 'nova-volume_region': region,727 'nova-volume_region': region,
676 'nova-volume_public_url': nova_volume_url,728 'nova-volume_public_url': nova_volume_public_url,
677 'nova-volume_admin_url': nova_volume_url,729 'nova-volume_admin_url': nova_volume_admin_url,
678 'nova-volume_internal_url': nova_volume_url,730 'nova-volume_internal_url': nova_volume_internal_url,
679 })731 })
680732
681 # XXX: Keep these relations named quantum_*??733 # XXX: Keep these relations named quantum_*??
682 if network_manager() in ['quantum', 'neutron']:734 if is_relation_made('neutron-api'):
735 endpoints.update({
736 'quantum_service': None,
737 'quantum_region': None,
738 'quantum_public_url': None,
739 'quantum_admin_url': None,
740 'quantum_internal_url': None,
741 })
742 elif network_manager() in ['quantum', 'neutron']:
683 endpoints.update({743 endpoints.update({
684 'quantum_service': 'quantum',744 'quantum_service': 'quantum',
685 'quantum_region': region,745 'quantum_region': region,
686 'quantum_public_url': neutron_url,746 'quantum_public_url': neutron_public_url,
687 'quantum_admin_url': neutron_url,747 'quantum_admin_url': neutron_admin_url,
688 'quantum_internal_url': neutron_url,748 'quantum_internal_url': neutron_internal_url,
689 })749 })
690750
691 return endpoints751 return endpoints
@@ -695,3 +755,58 @@
695 # quantum-plugin config setting can be safely overriden755 # quantum-plugin config setting can be safely overriden
696 # as we only supported OVS in G/neutron756 # as we only supported OVS in G/neutron
697 return config('neutron-plugin') or config('quantum-plugin')757 return config('neutron-plugin') or config('quantum-plugin')
758
759
760def guard_map():
761 '''Map of services and required interfaces that must be present before
762 the service should be allowed to start'''
763 gmap = {}
764 nova_services = deepcopy(BASE_SERVICES)
765 if os_release('nova-common') not in ['essex', 'folsom']:
766 nova_services.append('nova-conductor')
767
768 nova_interfaces = ['identity-service', 'amqp']
769 if relation_ids('pgsql-nova-db'):
770 nova_interfaces.append('pgsql-nova-db')
771 else:
772 nova_interfaces.append('shared-db')
773
774 for svc in nova_services:
775 gmap[svc] = nova_interfaces
776
777 net_manager = network_manager()
778 if net_manager in ['neutron', 'quantum']:
779 neutron_interfaces = ['identity-service', 'amqp']
780 if relation_ids('pgsql-neutron-db'):
781 neutron_interfaces.append('pgsql-neutron-db')
782 else:
783 neutron_interfaces.append('shared-db')
784 if network_manager() == 'quantum':
785 gmap['quantum-server'] = neutron_interfaces
786 else:
787 gmap['neutron-server'] = neutron_interfaces
788
789 return gmap
790
791
792def service_guard(guard_map, contexts, active=False):
793 '''Inhibit services in guard_map from running unless
794 required interfaces are found complete in contexts.'''
795 def wrap(f):
796 def wrapped_f(*args):
797 if active is True:
798 incomplete_services = []
799 for svc in guard_map:
800 for interface in guard_map[svc]:
801 if interface not in contexts.complete_contexts():
802 incomplete_services.append(svc)
803 f(*args)
804 for svc in incomplete_services:
805 if service_running(svc):
806 log('Service {} has unfulfilled '
807 'interface requirements, stopping.'.format(svc))
808 service_stop(svc)
809 else:
810 f(*args)
811 return wrapped_f
812 return wrap
698813
=== modified file 'metadata.yaml'
--- metadata.yaml 2014-03-31 11:56:09 +0000
+++ metadata.yaml 2014-07-29 13:07:23 +0000
@@ -30,6 +30,8 @@
30 interface: nova-volume30 interface: nova-volume
31 quantum-network-service:31 quantum-network-service:
32 interface: quantum32 interface: quantum
33 neutron-api:
34 interface: neutron-api
33 ha:35 ha:
34 interface: hacluster36 interface: hacluster
35 scope: container37 scope: container
3638
=== modified file 'revision'
--- revision 2014-04-16 08:25:14 +0000
+++ revision 2014-07-29 13:07:23 +0000
@@ -1,1 +1,1 @@
13151500
22
=== added directory 'tests'
=== added file 'tests/00-setup'
--- tests/00-setup 1970-01-01 00:00:00 +0000
+++ tests/00-setup 2014-07-29 13:07:23 +0000
@@ -0,0 +1,10 @@
1#!/bin/bash
2
3set -ex
4
5sudo add-apt-repository --yes ppa:juju/stable
6sudo apt-get update --yes
7sudo apt-get install --yes python-amulet
8sudo apt-get install --yes python-glanceclient
9sudo apt-get install --yes python-keystoneclient
10sudo apt-get install --yes python-novaclient
011
=== added file 'tests/10-basic-precise-essex'
--- tests/10-basic-precise-essex 1970-01-01 00:00:00 +0000
+++ tests/10-basic-precise-essex 2014-07-29 13:07:23 +0000
@@ -0,0 +1,10 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-essex."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='precise')
10 deployment.run_tests()
011
=== added file 'tests/11-basic-precise-folsom'
--- tests/11-basic-precise-folsom 1970-01-01 00:00:00 +0000
+++ tests/11-basic-precise-folsom 2014-07-29 13:07:23 +0000
@@ -0,0 +1,18 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-folsom."""
5
6import amulet
7from basic_deployment import NovaCCBasicDeployment
8
9if __name__ == '__main__':
10 # NOTE(coreycb): Skipping failing test until resolved. 'nova-manage db sync'
11 # fails in shared-db-relation-changed (only fails on folsom)
12 message = "Skipping failing test until resolved"
13 amulet.raise_status(amulet.SKIP, msg=message)
14
15 deployment = NovaCCBasicDeployment(series='precise',
16 openstack='cloud:precise-folsom',
17 source='cloud:precise-updates/folsom')
18 deployment.run_tests()
019
=== added file 'tests/12-basic-precise-grizzly'
--- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000
+++ tests/12-basic-precise-grizzly 2014-07-29 13:07:23 +0000
@@ -0,0 +1,12 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-grizzly."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='precise',
10 openstack='cloud:precise-grizzly',
11 source='cloud:precise-updates/grizzly')
12 deployment.run_tests()
013
=== added file 'tests/13-basic-precise-havana'
--- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000
+++ tests/13-basic-precise-havana 2014-07-29 13:07:23 +0000
@@ -0,0 +1,12 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-havana."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='precise',
10 openstack='cloud:precise-havana',
11 source='cloud:precise-updates/havana')
12 deployment.run_tests()
013
=== added file 'tests/14-basic-precise-icehouse'
--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
+++ tests/14-basic-precise-icehouse 2014-07-29 13:07:23 +0000
@@ -0,0 +1,12 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-icehouse."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='precise',
10 openstack='cloud:precise-icehouse',
11 source='cloud:precise-updates/icehouse')
12 deployment.run_tests()
013
=== added file 'tests/15-basic-trusty-icehouse'
--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
+++ tests/15-basic-trusty-icehouse 2014-07-29 13:07:23 +0000
@@ -0,0 +1,10 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 trusty-icehouse."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='trusty')
10 deployment.run_tests()
011
=== added file 'tests/README'
--- tests/README 1970-01-01 00:00:00 +0000
+++ tests/README 2014-07-29 13:07:23 +0000
@@ -0,0 +1,47 @@
1This directory provides Amulet tests that focus on verification of Nova Cloud
2Controller deployments.
3
4If you use a web proxy server to access the web, you'll need to set the
5AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
6
7The following examples demonstrate different ways that tests can be executed.
8All examples are run from the charm's root directory.
9
10 * To run all tests (starting with 00-setup):
11
12 make test
13
14 * To run a specific test module (or modules):
15
16 juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
17
18 * To run a specific test module (or modules), and keep the environment
19 deployed after a failure:
20
21 juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
22
23 * To re-run a test module against an already deployed environment (one
24 that was deployed by a previous call to 'juju test --set-e'):
25
26 ./tests/15-basic-trusty-icehouse
27
28For debugging and test development purposes, all code should be idempotent.
29In other words, the code should have the ability to be re-run without changing
30the results beyond the initial run. This enables editing and re-running of a
31test module against an already deployed environment, as described above.
32
33Manual debugging tips:
34
35 * Set the following env vars before using the OpenStack CLI as admin:
36 export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
37 export OS_TENANT_NAME=admin
38 export OS_USERNAME=admin
39 export OS_PASSWORD=openstack
40 export OS_REGION_NAME=RegionOne
41
42 * Set the following env vars before using the OpenStack CLI as demoUser:
43 export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
44 export OS_TENANT_NAME=demoTenant
45 export OS_USERNAME=demoUser
46 export OS_PASSWORD=password
47 export OS_REGION_NAME=RegionOne
048
=== added file 'tests/basic_deployment.py'
--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
+++ tests/basic_deployment.py 2014-07-29 13:07:23 +0000
@@ -0,0 +1,520 @@
1#!/usr/bin/python
2
3import amulet
4
5from charmhelpers.contrib.openstack.amulet.deployment import (
6 OpenStackAmuletDeployment
7)
8
9from charmhelpers.contrib.openstack.amulet.utils import (
10 OpenStackAmuletUtils,
11 DEBUG, # flake8: noqa
12 ERROR
13)
14
15# Use DEBUG to turn on debug logging
16u = OpenStackAmuletUtils(ERROR)
17
18
19class NovaCCBasicDeployment(OpenStackAmuletDeployment):
20 """Amulet tests on a basic nova cloud controller deployment."""
21
22 def __init__(self, series=None, openstack=None, source=None):
23 """Deploy the entire test environment."""
24 super(NovaCCBasicDeployment, self).__init__(series, openstack, source)
25 self._add_services()
26 self._add_relations()
27 self._configure_services()
28 self._deploy()
29 self._initialize_tests()
30
31 def _add_services(self):
32 """Add the service that we're testing, including the number of units,
33 where nova-cloud-controller is local, and the other charms are from
34 the charm store."""
35 this_service = ('nova-cloud-controller', 1)
36 other_services = [('mysql', 1), ('rabbitmq-server', 1),
37 ('nova-compute', 2), ('keystone', 1), ('glance', 1)]
38 super(NovaCCBasicDeployment, self)._add_services(this_service,
39 other_services)
40
41 def _add_relations(self):
42 """Add all of the relations for the services."""
43 relations = {
44 'nova-cloud-controller:shared-db': 'mysql:shared-db',
45 'nova-cloud-controller:identity-service': 'keystone:identity-service',
46 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
47 'nova-cloud-controller:cloud-compute': 'nova-compute:cloud-compute',
48 'nova-cloud-controller:image-service': 'glance:image-service',
49 'nova-compute:image-service': 'glance:image-service',
50 'nova-compute:shared-db': 'mysql:shared-db',
51 'nova-compute:amqp': 'rabbitmq-server:amqp',
52 'keystone:shared-db': 'mysql:shared-db',
53 'glance:identity-service': 'keystone:identity-service',
54 'glance:shared-db': 'mysql:shared-db',
55 'glance:amqp': 'rabbitmq-server:amqp'
56 }
57 super(NovaCCBasicDeployment, self)._add_relations(relations)
58
59 def _configure_services(self):
60 """Configure all of the services."""
61 keystone_config = {'admin-password': 'openstack',
62 'admin-token': 'ubuntutesting'}
63 configs = {'keystone': keystone_config}
64 super(NovaCCBasicDeployment, self)._configure_services(configs)
65
66 def _initialize_tests(self):
67 """Perform final initialization before tests get run."""
68 # Access the sentries for inspecting service units
69 self.mysql_sentry = self.d.sentry.unit['mysql/0']
70 self.keystone_sentry = self.d.sentry.unit['keystone/0']
71 self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
72 self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0']
73 self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
74 self.glance_sentry = self.d.sentry.unit['glance/0']
75
76 # Authenticate admin with keystone
77 self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
78 user='admin',
79 password='openstack',
80 tenant='admin')
81
82 # Authenticate admin with glance endpoint
83 self.glance = u.authenticate_glance_admin(self.keystone)
84
85 # Create a demo tenant/role/user
86 self.demo_tenant = 'demoTenant'
87 self.demo_role = 'demoRole'
88 self.demo_user = 'demoUser'
89 if not u.tenant_exists(self.keystone, self.demo_tenant):
90 tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
91 description='demo tenant',
92 enabled=True)
93 self.keystone.roles.create(name=self.demo_role)
94 self.keystone.users.create(name=self.demo_user,
95 password='password',
96 tenant_id=tenant.id,
97 email='demo@demo.com')
98
99 # Authenticate demo user with keystone
100 self.keystone_demo = \
101 u.authenticate_keystone_user(self.keystone, user=self.demo_user,
102 password='password',
103 tenant=self.demo_tenant)
104
105 # Authenticate demo user with nova-api
106 self.nova_demo = u.authenticate_nova_user(self.keystone,
107 user=self.demo_user,
108 password='password',
109 tenant=self.demo_tenant)
110
111 def test_services(self):
112 """Verify the expected services are running on the corresponding
113 service units."""
114 commands = {
115 self.mysql_sentry: ['status mysql'],
116 self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
117 self.nova_cc_sentry: ['status nova-api-ec2',
118 'status nova-api-os-compute',
119 'status nova-objectstore',
120 'status nova-cert',
121 'status nova-scheduler'],
122 self.nova_compute_sentry: ['status nova-compute',
123 'status nova-network',
124 'status nova-api'],
125 self.keystone_sentry: ['status keystone'],
126 self.glance_sentry: ['status glance-registry', 'status glance-api']
127 }
128 if self._get_openstack_release() >= self.precise_grizzly:
129 commands[self.nova_cc_sentry] = ['status nova-conductor']
130
131 ret = u.validate_services(commands)
132 if ret:
133 amulet.raise_status(amulet.FAIL, msg=ret)
134
135 def test_service_catalog(self):
136 """Verify that the service catalog endpoint data is valid."""
137 endpoint_vol = {'adminURL': u.valid_url,
138 'region': 'RegionOne',
139 'publicURL': u.valid_url,
140 'internalURL': u.valid_url}
141 endpoint_id = {'adminURL': u.valid_url,
142 'region': 'RegionOne',
143 'publicURL': u.valid_url,
144 'internalURL': u.valid_url}
145 if self._get_openstack_release() >= self.precise_folsom:
146 endpoint_vol['id'] = u.not_null
147 endpoint_id['id'] = u.not_null
148 expected = {'s3': [endpoint_vol], 'compute': [endpoint_vol],
149 'ec2': [endpoint_vol], 'identity': [endpoint_id]}
150 actual = self.keystone_demo.service_catalog.get_endpoints()
151
152 ret = u.validate_svc_catalog_endpoint_data(expected, actual)
153 if ret:
154 amulet.raise_status(amulet.FAIL, msg=ret)
155
156 def test_openstack_compute_api_endpoint(self):
157 """Verify the openstack compute api (osapi) endpoint data."""
158 endpoints = self.keystone.endpoints.list()
159 admin_port = internal_port = public_port = '8774'
160 expected = {'id': u.not_null,
161 'region': 'RegionOne',
162 'adminurl': u.valid_url,
163 'internalurl': u.valid_url,
164 'publicurl': u.valid_url,
165 'service_id': u.not_null}
166
167 ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
168 public_port, expected)
169 if ret:
170 message = 'osapi endpoint: {}'.format(ret)
171 amulet.raise_status(amulet.FAIL, msg=message)
172
173 def test_ec2_api_endpoint(self):
174 """Verify the EC2 api endpoint data."""
175 endpoints = self.keystone.endpoints.list()
176 admin_port = internal_port = public_port = '8773'
177 expected = {'id': u.not_null,
178 'region': 'RegionOne',
179 'adminurl': u.valid_url,
180 'internalurl': u.valid_url,
181 'publicurl': u.valid_url,
182 'service_id': u.not_null}
183
184 ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
185 public_port, expected)
186 if ret:
187 message = 'EC2 endpoint: {}'.format(ret)
188 amulet.raise_status(amulet.FAIL, msg=message)
189
190 def test_s3_api_endpoint(self):
191 """Verify the S3 api endpoint data."""
192 endpoints = self.keystone.endpoints.list()
193 admin_port = internal_port = public_port = '3333'
194 expected = {'id': u.not_null,
195 'region': 'RegionOne',
196 'adminurl': u.valid_url,
197 'internalurl': u.valid_url,
198 'publicurl': u.valid_url,
199 'service_id': u.not_null}
200
201 ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
202 public_port, expected)
203 if ret:
204 message = 'S3 endpoint: {}'.format(ret)
205 amulet.raise_status(amulet.FAIL, msg=message)
206
207 def test_nova_cc_shared_db_relation(self):
208 """Verify the nova-cc to mysql shared-db relation data"""
209 unit = self.nova_cc_sentry
210 relation = ['shared-db', 'mysql:shared-db']
211 expected = {
212 'private-address': u.valid_ip,
213 'nova_database': 'nova',
214 'nova_username': 'nova',
215 'nova_hostname': u.valid_ip
216 }
217
218 ret = u.validate_relation_data(unit, relation, expected)
219 if ret:
220 message = u.relation_error('nova-cc shared-db', ret)
221 amulet.raise_status(amulet.FAIL, msg=message)
222
223 def test_mysql_shared_db_relation(self):
224 """Verify the mysql to nova-cc shared-db relation data"""
225 unit = self.mysql_sentry
226 relation = ['shared-db', 'nova-cloud-controller:shared-db']
227 expected = {
228 'private-address': u.valid_ip,
229 'nova_password': u.not_null,
230 'db_host': u.valid_ip
231 }
232
233 ret = u.validate_relation_data(unit, relation, expected)
234 if ret:
235 message = u.relation_error('mysql shared-db', ret)
236 amulet.raise_status(amulet.FAIL, msg=message)
237
238 def test_nova_cc_identity_service_relation(self):
239 """Verify the nova-cc to keystone identity-service relation data"""
240 unit = self.nova_cc_sentry
241 relation = ['identity-service', 'keystone:identity-service']
242 expected = {
243 'nova_internal_url': u.valid_url,
244 'nova_public_url': u.valid_url,
245 's3_public_url': u.valid_url,
246 's3_service': 's3',
247 'ec2_admin_url': u.valid_url,
248 'ec2_internal_url': u.valid_url,
249 'nova_service': 'nova',
250 's3_region': 'RegionOne',
251 'private-address': u.valid_ip,
252 'nova_region': 'RegionOne',
253 'ec2_public_url': u.valid_url,
254 'ec2_region': 'RegionOne',
255 's3_internal_url': u.valid_url,
256 's3_admin_url': u.valid_url,
257 'nova_admin_url': u.valid_url,
258 'ec2_service': 'ec2'
259 }
260
261 ret = u.validate_relation_data(unit, relation, expected)
262 if ret:
263 message = u.relation_error('nova-cc identity-service', ret)
264 amulet.raise_status(amulet.FAIL, msg=message)
265
266 def test_keystone_identity_service_relation(self):
267 """Verify the keystone to nova-cc identity-service relation data"""
268 unit = self.keystone_sentry
269 relation = ['identity-service',
270 'nova-cloud-controller:identity-service']
271 expected = {
272 'service_protocol': 'http',
273 'service_tenant': 'services',
274 'admin_token': 'ubuntutesting',
275 'service_password': u.not_null,
276 'service_port': '5000',
277 'auth_port': '35357',
278 'auth_protocol': 'http',
279 'private-address': u.valid_ip,
280 'https_keystone': 'False',
281 'auth_host': u.valid_ip,
282 'service_username': 's3_ec2_nova',
283 'service_tenant_id': u.not_null,
284 'service_host': u.valid_ip
285 }
286
287 ret = u.validate_relation_data(unit, relation, expected)
288 if ret:
289 message = u.relation_error('keystone identity-service', ret)
290 amulet.raise_status(amulet.FAIL, msg=message)
291
292 def test_nova_cc_amqp_relation(self):
293 """Verify the nova-cc to rabbitmq-server amqp relation data"""
294 unit = self.nova_cc_sentry
295 relation = ['amqp', 'rabbitmq-server:amqp']
296 expected = {
297 'username': 'nova',
298 'private-address': u.valid_ip,
299 'vhost': 'openstack'
300 }
301
302 ret = u.validate_relation_data(unit, relation, expected)
303 if ret:
304 message = u.relation_error('nova-cc amqp', ret)
305 amulet.raise_status(amulet.FAIL, msg=message)
306
307 def test_rabbitmq_amqp_relation(self):
308 """Verify the rabbitmq-server to nova-cc amqp relation data"""
309 unit = self.rabbitmq_sentry
310 relation = ['amqp', 'nova-cloud-controller:amqp']
311 expected = {
312 'private-address': u.valid_ip,
313 'password': u.not_null,
314 'hostname': u.valid_ip
315 }
316
317 ret = u.validate_relation_data(unit, relation, expected)
318 if ret:
319 message = u.relation_error('rabbitmq amqp', ret)
320 amulet.raise_status(amulet.FAIL, msg=message)
321
322 def test_nova_cc_cloud_compute_relation(self):
323 """Verify the nova-cc to nova-compute cloud-compute relation data"""
324 unit = self.nova_cc_sentry
325 relation = ['cloud-compute', 'nova-compute:cloud-compute']
326 expected = {
327 'volume_service': 'cinder',
328 'network_manager': 'flatdhcpmanager',
329 'ec2_host': u.valid_ip,
330 'private-address': u.valid_ip,
331 'restart_trigger': u.not_null
332 }
333 if self._get_openstack_release() == self.precise_essex:
334 expected['volume_service'] = 'nova-volume'
335
336 ret = u.validate_relation_data(unit, relation, expected)
337 if ret:
338 message = u.relation_error('nova-cc cloud-compute', ret)
339 amulet.raise_status(amulet.FAIL, msg=message)
340
341 def test_nova_cloud_compute_relation(self):
342 """Verify the nova-compute to nova-cc cloud-compute relation data"""
343 unit = self.nova_compute_sentry
344 relation = ['cloud-compute', 'nova-cloud-controller:cloud-compute']
345 expected = {
346 'private-address': u.valid_ip,
347 }
348
349 ret = u.validate_relation_data(unit, relation, expected)
350 if ret:
351 message = u.relation_error('nova-compute cloud-compute', ret)
352 amulet.raise_status(amulet.FAIL, msg=message)
353
354 def test_nova_cc_image_service_relation(self):
355 """Verify the nova-cc to glance image-service relation data"""
356 unit = self.nova_cc_sentry
357 relation = ['image-service', 'glance:image-service']
358 expected = {
359 'private-address': u.valid_ip,
360 }
361
362 ret = u.validate_relation_data(unit, relation, expected)
363 if ret:
364 message = u.relation_error('nova-cc image-service', ret)
365 amulet.raise_status(amulet.FAIL, msg=message)
366
367 def test_glance_image_service_relation(self):
368 """Verify the glance to nova-cc image-service relation data"""
369 unit = self.glance_sentry
370 relation = ['image-service', 'nova-cloud-controller:image-service']
371 expected = {
372 'private-address': u.valid_ip,
373 'glance-api-server': u.valid_url
374 }
375
376 ret = u.validate_relation_data(unit, relation, expected)
377 if ret:
378 message = u.relation_error('glance image-service', ret)
379 amulet.raise_status(amulet.FAIL, msg=message)
380
381 def test_restart_on_config_change(self):
382 """Verify that the specified services are restarted when the config
383 is changed."""
384 # NOTE(coreycb): Skipping failing test on essex until resolved.
385 # config-flags don't take effect on essex.
386 if self._get_openstack_release() == self.precise_essex:
387 u.log.error("Skipping failing test until resolved")
388 return
389
390 services = ['nova-api-ec2', 'nova-api-os-compute', 'nova-objectstore',
391 'nova-cert', 'nova-scheduler', 'nova-conductor']
392 self.d.configure('nova-cloud-controller',
393 {'config-flags': 'quota_cores=20,quota_instances=40,quota_ram=102400'})
394 pgrep_full = True
395
396 time = 20
397 conf = '/etc/nova/nova.conf'
398 for s in services:
399 if not u.service_restarted(self.nova_cc_sentry, s, conf,
400 pgrep_full=True, sleep_time=time):
401 msg = "service {} didn't restart after config change".format(s)
402 amulet.raise_status(amulet.FAIL, msg=msg)
403 time = 0
404
405 def test_nova_default_config(self):
406 """Verify the data in the nova config file's default section."""
407 # NOTE(coreycb): Currently no way to test on essex because config file
408 # has no section headers.
409 if self._get_openstack_release() == self.precise_essex:
410 return
411
412 unit = self.nova_cc_sentry
413 conf = '/etc/nova/nova.conf'
414 rabbitmq_relation = self.rabbitmq_sentry.relation('amqp',
415 'nova-cloud-controller:amqp')
416 glance_relation = self.glance_sentry.relation('image-service',
417 'nova-cloud-controller:image-service')
418 mysql_relation = self.mysql_sentry.relation('shared-db',
419 'nova-cloud-controller:shared-db')
420 db_uri = "mysql://{}:{}@{}/{}".format('nova',
421 mysql_relation['nova_password'],
422 mysql_relation['db_host'],
423 'nova')
424 keystone_ep = self.keystone_demo.service_catalog.url_for(\
425 service_type='identity',
426 endpoint_type='publicURL')
427 keystone_ec2 = "{}/ec2tokens".format(keystone_ep)
428
429 expected = {'dhcpbridge_flagfile': '/etc/nova/nova.conf',
430 'dhcpbridge': '/usr/bin/nova-dhcpbridge',
431 'logdir': '/var/log/nova',
432 'state_path': '/var/lib/nova',
433 'lock_path': '/var/lock/nova',
434 'force_dhcp_release': 'True',
435 'iscsi_helper': 'tgtadm',
436 'libvirt_use_virtio_for_bridges': 'True',
437 'connection_type': 'libvirt',
438 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf',
439 'verbose': 'True',
440 'ec2_private_dns_show_ip': 'True',
441 'api_paste_config': '/etc/nova/api-paste.ini',
442 'volumes_path': '/var/lib/nova/volumes',
443 'enabled_apis': 'ec2,osapi_compute,metadata',
444 'auth_strategy': 'keystone',
445 'compute_driver': 'libvirt.LibvirtDriver',
446 'keystone_ec2_url': keystone_ec2,
447 'sql_connection': db_uri,
448 'rabbit_userid': 'nova',
449 'rabbit_virtual_host': 'openstack',
450 'rabbit_password': rabbitmq_relation['password'],
451 'rabbit_host': rabbitmq_relation['hostname'],
452 'glance_api_servers': glance_relation['glance-api-server'],
453 'network_manager': 'nova.network.manager.FlatDHCPManager',
454 's3_listen_port': '3333',
455 'osapi_compute_listen_port': '8774',
456 'ec2_listen_port': '8773'}
457
458 ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
459 if ret:
460 message = "nova config error: {}".format(ret)
461 amulet.raise_status(amulet.FAIL, msg=message)
462
463
464 def test_nova_keystone_authtoken_config(self):
465 """Verify the data in the nova config file's keystone_authtoken
466 section. This data only exists since icehouse."""
467 if self._get_openstack_release() < self.precise_icehouse:
468 return
469
470 unit = self.nova_cc_sentry
471 conf = '/etc/nova/nova.conf'
472 keystone_relation = self.keystone_sentry.relation('identity-service',
473 'nova-cloud-controller:identity-service')
474 keystone_uri = "http://{}:{}/".format(keystone_relation['service_host'],
475 keystone_relation['service_port'])
476 expected = {'auth_uri': keystone_uri,
477 'auth_host': keystone_relation['service_host'],
478 'auth_port': keystone_relation['auth_port'],
479 'auth_protocol': keystone_relation['auth_protocol'],
480 'admin_tenant_name': keystone_relation['service_tenant'],
481 'admin_user': keystone_relation['service_username'],
482 'admin_password': keystone_relation['service_password']}
483
484 ret = u.validate_config_data(unit, conf, 'keystone_authtoken', expected)
485 if ret:
486 message = "nova config error: {}".format(ret)
487 amulet.raise_status(amulet.FAIL, msg=message)
488
489 def test_image_instance_create(self):
490 """Create an image/instance, verify they exist, and delete them."""
491 # NOTE(coreycb): Skipping failing test on essex until resolved. essex
492 # nova API calls are getting "Malformed request url (HTTP
493 # 400)".
494 if self._get_openstack_release() == self.precise_essex:
495 u.log.error("Skipping failing test until resolved")
496 return
497
498 image = u.create_cirros_image(self.glance, "cirros-image")
499 if not image:
500 amulet.raise_status(amulet.FAIL, msg="Image create failed")
501
502 instance = u.create_instance(self.nova_demo, "cirros-image", "cirros",
503 "m1.tiny")
504 if not instance:
505 amulet.raise_status(amulet.FAIL, msg="Instance create failed")
506
507 found = False
508 for instance in self.nova_demo.servers.list():
509 if instance.name == 'cirros':
510 found = True
511 if instance.status != 'ACTIVE':
512 msg = "cirros instance is not active"
513 amulet.raise_status(amulet.FAIL, msg=message)
514
515 if not found:
516 message = "nova cirros instance does not exist"
517 amulet.raise_status(amulet.FAIL, msg=message)
518
519 u.delete_image(self.glance, image)
520 u.delete_instance(self.nova_demo, instance)
0521
=== added directory 'tests/charmhelpers'
=== added file 'tests/charmhelpers/__init__.py'
=== added directory 'tests/charmhelpers/contrib'
=== added file 'tests/charmhelpers/contrib/__init__.py'
=== added directory 'tests/charmhelpers/contrib/amulet'
=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-07-29 13:07:23 +0000
@@ -0,0 +1,58 @@
1import amulet
2
3
4class AmuletDeployment(object):
5 """This class provides generic Amulet deployment and test runner
6 methods."""
7
8 def __init__(self, series=None):
9 """Initialize the deployment environment."""
10 self.series = None
11
12 if series:
13 self.series = series
14 self.d = amulet.Deployment(series=self.series)
15 else:
16 self.d = amulet.Deployment()
17
18 def _add_services(self, this_service, other_services):
19 """Add services to the deployment where this_service is the local charm
20 that we're focused on testing and other_services are the other
21 charms that come from the charm store."""
22 name, units = range(2)
23 self.this_service = this_service[name]
24 self.d.add(this_service[name], units=this_service[units])
25
26 for svc in other_services:
27 if self.series:
28 self.d.add(svc[name],
29 charm='cs:{}/{}'.format(self.series, svc[name]),
30 units=svc[units])
31 else:
32 self.d.add(svc[name], units=svc[units])
33
34 def _add_relations(self, relations):
35 """Add all of the relations for the services."""
36 for k, v in relations.iteritems():
37 self.d.relate(k, v)
38
39 def _configure_services(self, configs):
40 """Configure all of the services."""
41 for service, config in configs.iteritems():
42 self.d.configure(service, config)
43
44 def _deploy(self):
45 """Deploy environment and wait for all hooks to finish executing."""
46 try:
47 self.d.setup()
48 self.d.sentry.wait()
49 except amulet.helpers.TimeoutError:
50 amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
51 except:
52 raise
53
54 def run_tests(self):
55 """Run all of the methods that are prefixed with 'test_'."""
56 for test in dir(self):
57 if test.startswith('test_'):
58 getattr(self, test)()
059
=== added file 'tests/charmhelpers/contrib/amulet/utils.py'
--- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/amulet/utils.py 2014-07-29 13:07:23 +0000
@@ -0,0 +1,157 @@
1import ConfigParser
2import io
3import logging
4import re
5import sys
6from time import sleep
7
8
9class AmuletUtils(object):
10 """This class provides common utility functions that are used by Amulet
11 tests."""
12
13 def __init__(self, log_level=logging.ERROR):
14 self.log = self.get_logger(level=log_level)
15
16 def get_logger(self, name="amulet-logger", level=logging.DEBUG):
17 """Get a logger object that will log to stdout."""
18 log = logging
19 logger = log.getLogger(name)
20 fmt = \
21 log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s")
22
23 handler = log.StreamHandler(stream=sys.stdout)
24 handler.setLevel(level)
25 handler.setFormatter(fmt)
26
27 logger.addHandler(handler)
28 logger.setLevel(level)
29
30 return logger
31
32 def valid_ip(self, ip):
33 if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
34 return True
35 else:
36 return False
37
38 def valid_url(self, url):
39 p = re.compile(
40 r'^(?:http|ftp)s?://'
41 r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa
42 r'localhost|'
43 r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
44 r'(?::\d+)?'
45 r'(?:/?|[/?]\S+)$',
46 re.IGNORECASE)
47 if p.match(url):
48 return True
49 else:
50 return False
51
52 def validate_services(self, commands):
53 """Verify the specified services are running on the corresponding
54 service units."""
55 for k, v in commands.iteritems():
56 for cmd in v:
57 output, code = k.run(cmd)
58 if code != 0:
59 return "command `{}` returned {}".format(cmd, str(code))
60 return None
61
62 def _get_config(self, unit, filename):
63 """Get a ConfigParser object for parsing a unit's config file."""
64 file_contents = unit.file_contents(filename)
65 config = ConfigParser.ConfigParser()
66 config.readfp(io.StringIO(file_contents))
67 return config
68
69 def validate_config_data(self, sentry_unit, config_file, section, expected):
70 """Verify that the specified section of the config file contains
71 the expected option key:value pairs."""
72 config = self._get_config(sentry_unit, config_file)
73
74 if section != 'DEFAULT' and not config.has_section(section):
75 return "section [{}] does not exist".format(section)
76
77 for k in expected.keys():
78 if not config.has_option(section, k):
79 return "section [{}] is missing option {}".format(section, k)
80 if config.get(section, k) != expected[k]:
81 return "section [{}] {}:{} != expected {}:{}".format(section,
82 k, config.get(section, k), k, expected[k])
83 return None
84
85 def _validate_dict_data(self, expected, actual):
86 """Compare expected dictionary data vs actual dictionary data.
87 The values in the 'expected' dictionary can be strings, bools, ints,
88 longs, or can be a function that evaluate a variable and returns a
89 bool."""
90 for k, v in expected.iteritems():
91 if k in actual:
92 if isinstance(v, basestring) or \
93 isinstance(v, bool) or \
94 isinstance(v, (int, long)):
95 if v != actual[k]:
96 return "{}:{}".format(k, actual[k])
97 elif not v(actual[k]):
98 return "{}:{}".format(k, actual[k])
99 else:
100 return "key '{}' does not exist".format(k)
101 return None
102
103 def validate_relation_data(self, sentry_unit, relation, expected):
104 """Validate actual relation data based on expected relation data."""
105 actual = sentry_unit.relation(relation[0], relation[1])
106 self.log.debug('actual: {}'.format(repr(actual)))
107 return self._validate_dict_data(expected, actual)
108
109 def _validate_list_data(self, expected, actual):
110 """Compare expected list vs actual list data."""
111 for e in expected:
112 if e not in actual:
113 return "expected item {} not found in actual list".format(e)
114 return None
115
116 def not_null(self, string):
117 if string != None:
118 return True
119 else:
120 return False
121
122 def _get_file_mtime(self, sentry_unit, filename):
123 """Get last modification time of file."""
124 return sentry_unit.file_stat(filename)['mtime']
125
126 def _get_dir_mtime(self, sentry_unit, directory):
127 """Get last modification time of directory."""
128 return sentry_unit.directory_stat(directory)['mtime']
129
130 def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
131 """Determine start time of the process based on the last modification
132 time of the /proc/pid directory. If pgrep_full is True, the process
133 name is matched against the full command line."""
134 if pgrep_full:
135 cmd = 'pgrep -o -f {}'.format(service)
136 else:
137 cmd = 'pgrep -o {}'.format(service)
138 proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
139 return self._get_dir_mtime(sentry_unit, proc_dir)
140
141 def service_restarted(self, sentry_unit, service, filename,
142 pgrep_full=False):
143 """Compare a service's start time vs a file's last modification time
144 (such as a config file for that service) to determine if the service
145 has been restarted."""
146 sleep(10)
147 if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \
148 self._get_file_mtime(sentry_unit, filename):
149 return True
150 else:
151 return False
152
153 def relation_error(self, name, data):
154 return 'unexpected relation data in {} - {}'.format(name, data)
155
156 def endpoint_error(self, name, data):
157 return 'unexpected endpoint data in {} - {}'.format(name, data)
0158
=== added directory 'tests/charmhelpers/contrib/openstack'
=== added file 'tests/charmhelpers/contrib/openstack/__init__.py'
=== added directory 'tests/charmhelpers/contrib/openstack/amulet'
=== added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py'
=== added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-29 13:07:23 +0000
@@ -0,0 +1,55 @@
1from charmhelpers.contrib.amulet.deployment import (
2 AmuletDeployment
3)
4
5
6class OpenStackAmuletDeployment(AmuletDeployment):
7 """This class inherits from AmuletDeployment and has additional support
8 that is specifically for use by OpenStack charms."""
9
10 def __init__(self, series=None, openstack=None, source=None):
11 """Initialize the deployment environment."""
12 super(OpenStackAmuletDeployment, self).__init__(series)
13 self.openstack = openstack
14 self.source = source
15
16 def _add_services(self, this_service, other_services):
17 """Add services to the deployment and set openstack-origin."""
18 super(OpenStackAmuletDeployment, self)._add_services(this_service,
19 other_services)
20 name = 0
21 services = other_services
22 services.append(this_service)
23 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
24
25 if self.openstack:
26 for svc in services:
27 if svc[name] not in use_source:
28 config = {'openstack-origin': self.openstack}
29 self.d.configure(svc[name], config)
30
31 if self.source:
32 for svc in services:
33 if svc[name] in use_source:
34 config = {'source': self.source}
35 self.d.configure(svc[name], config)
36
37 def _configure_services(self, configs):
38 """Configure all of the services."""
39 for service, config in configs.iteritems():
40 self.d.configure(service, config)
41
42 def _get_openstack_release(self):
43 """Return an integer representing the enum value of the openstack
44 release."""
45 self.precise_essex, self.precise_folsom, self.precise_grizzly, \
46 self.precise_havana, self.precise_icehouse, \
47 self.trusty_icehouse = range(6)
48 releases = {
49 ('precise', None): self.precise_essex,
50 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
51 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
52 ('precise', 'cloud:precise-havana'): self.precise_havana,
53 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
54 ('trusty', None): self.trusty_icehouse}
55 return releases[(self.series, self.openstack)]
056
=== added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
--- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-29 13:07:23 +0000
@@ -0,0 +1,209 @@
1import logging
2import os
3import time
4import urllib
5
6import glanceclient.v1.client as glance_client
7import keystoneclient.v2_0 as keystone_client
8import novaclient.v1_1.client as nova_client
9
10from charmhelpers.contrib.amulet.utils import (
11 AmuletUtils
12)
13
14DEBUG = logging.DEBUG
15ERROR = logging.ERROR
16
17
18class OpenStackAmuletUtils(AmuletUtils):
19 """This class inherits from AmuletUtils and has additional support
20 that is specifically for use by OpenStack charms."""
21
22 def __init__(self, log_level=ERROR):
23 """Initialize the deployment environment."""
24 super(OpenStackAmuletUtils, self).__init__(log_level)
25
26 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
27 public_port, expected):
28 """Validate actual endpoint data vs expected endpoint data. The ports
29 are used to find the matching endpoint."""
30 found = False
31 for ep in endpoints:
32 self.log.debug('endpoint: {}'.format(repr(ep)))
33 if admin_port in ep.adminurl and internal_port in ep.internalurl \
34 and public_port in ep.publicurl:
35 found = True
36 actual = {'id': ep.id,
37 'region': ep.region,
38 'adminurl': ep.adminurl,
39 'internalurl': ep.internalurl,
40 'publicurl': ep.publicurl,
41 'service_id': ep.service_id}
42 ret = self._validate_dict_data(expected, actual)
43 if ret:
44 return 'unexpected endpoint data - {}'.format(ret)
45
46 if not found:
47 return 'endpoint not found'
48
49 def validate_svc_catalog_endpoint_data(self, expected, actual):
50 """Validate a list of actual service catalog endpoints vs a list of
51 expected service catalog endpoints."""
52 self.log.debug('actual: {}'.format(repr(actual)))
53 for k, v in expected.iteritems():
54 if k in actual:
55 ret = self._validate_dict_data(expected[k][0], actual[k][0])
56 if ret:
57 return self.endpoint_error(k, ret)
58 else:
59 return "endpoint {} does not exist".format(k)
60 return ret
61
62 def validate_tenant_data(self, expected, actual):
63 """Validate a list of actual tenant data vs list of expected tenant
64 data."""
65 self.log.debug('actual: {}'.format(repr(actual)))
66 for e in expected:
67 found = False
68 for act in actual:
69 a = {'enabled': act.enabled, 'description': act.description,
70 'name': act.name, 'id': act.id}
71 if e['name'] == a['name']:
72 found = True
73 ret = self._validate_dict_data(e, a)
74 if ret:
75 return "unexpected tenant data - {}".format(ret)
76 if not found:
77 return "tenant {} does not exist".format(e['name'])
78 return ret
79
80 def validate_role_data(self, expected, actual):
81 """Validate a list of actual role data vs a list of expected role
82 data."""
83 self.log.debug('actual: {}'.format(repr(actual)))
84 for e in expected:
85 found = False
86 for act in actual:
87 a = {'name': act.name, 'id': act.id}
88 if e['name'] == a['name']:
89 found = True
90 ret = self._validate_dict_data(e, a)
91 if ret:
92 return "unexpected role data - {}".format(ret)
93 if not found:
94 return "role {} does not exist".format(e['name'])
95 return ret
96
97 def validate_user_data(self, expected, actual):
98 """Validate a list of actual user data vs a list of expected user
99 data."""
100 self.log.debug('actual: {}'.format(repr(actual)))
101 for e in expected:
102 found = False
103 for act in actual:
104 a = {'enabled': act.enabled, 'name': act.name,
105 'email': act.email, 'tenantId': act.tenantId,
106 'id': act.id}
107 if e['name'] == a['name']:
108 found = True
109 ret = self._validate_dict_data(e, a)
110 if ret:
111 return "unexpected user data - {}".format(ret)
112 if not found:
113 return "user {} does not exist".format(e['name'])
114 return ret
115
116 def validate_flavor_data(self, expected, actual):
117 """Validate a list of actual flavors vs a list of expected flavors."""
118 self.log.debug('actual: {}'.format(repr(actual)))
119 act = [a.name for a in actual]
120 return self._validate_list_data(expected, act)
121
122 def tenant_exists(self, keystone, tenant):
123 """Return True if tenant exists"""
124 return tenant in [t.name for t in keystone.tenants.list()]
125
126 def authenticate_keystone_admin(self, keystone_sentry, user, password,
127 tenant):
128 """Authenticates admin user with the keystone admin endpoint."""
129 service_ip = \
130 keystone_sentry.relation('shared-db',
131 'mysql:shared-db')['private-address']
132 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
133 return keystone_client.Client(username=user, password=password,
134 tenant_name=tenant, auth_url=ep)
135
136 def authenticate_keystone_user(self, keystone, user, password, tenant):
137 """Authenticates a regular user with the keystone public endpoint."""
138 ep = keystone.service_catalog.url_for(service_type='identity',
139 endpoint_type='publicURL')
140 return keystone_client.Client(username=user, password=password,
141 tenant_name=tenant, auth_url=ep)
142
143 def authenticate_glance_admin(self, keystone):
144 """Authenticates admin user with glance."""
145 ep = keystone.service_catalog.url_for(service_type='image',
146 endpoint_type='adminURL')
147 return glance_client.Client(ep, token=keystone.auth_token)
148
149 def authenticate_nova_user(self, keystone, user, password, tenant):
150 """Authenticates a regular user with nova-api."""
151 ep = keystone.service_catalog.url_for(service_type='identity',
152 endpoint_type='publicURL')
153 return nova_client.Client(username=user, api_key=password,
154 project_id=tenant, auth_url=ep)
155
156 def create_cirros_image(self, glance, image_name):
157 """Download the latest cirros image and upload it to glance."""
158 http_proxy = os.getenv('AMULET_HTTP_PROXY')
159 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
160 if http_proxy:
161 proxies = {'http': http_proxy}
162 opener = urllib.FancyURLopener(proxies)
163 else:
164 opener = urllib.FancyURLopener()
165
166 f = opener.open("http://download.cirros-cloud.net/version/released")
167 version = f.read().strip()
168 cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
169
170 if not os.path.exists(cirros_img):
171 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
172 version, cirros_img)
173 opener.retrieve(cirros_url, cirros_img)
174 f.close()
175
176 with open(cirros_img) as f:
177 image = glance.images.create(name=image_name, is_public=True,
178 disk_format='qcow2',
179 container_format='bare', data=f)
180 return image
181
182 def delete_image(self, glance, image):
183 """Delete the specified image."""
184 glance.images.delete(image)
185
186 def create_instance(self, nova, image_name, instance_name, flavor):
187 """Create the specified instance."""
188 image = nova.images.find(name=image_name)
189 flavor = nova.flavors.find(name=flavor)
190 instance = nova.servers.create(name=instance_name, image=image,
191 flavor=flavor)
192
193 count = 1
194 status = instance.status
195 while status != 'ACTIVE' and count < 60:
196 time.sleep(3)
197 instance = nova.servers.get(instance.id)
198 status = instance.status
199 self.log.debug('instance status: {}'.format(status))
200 count += 1
201
202 if status == 'BUILD':
203 return None
204
205 return instance
206
207 def delete_instance(self, nova, instance):
208 """Delete the specified instance."""
209 nova.servers.delete(instance)
0210
=== modified file 'unit_tests/test_nova_cc_hooks.py'
--- unit_tests/test_nova_cc_hooks.py 2014-05-21 10:03:01 +0000
+++ unit_tests/test_nova_cc_hooks.py 2014-07-29 13:07:23 +0000
@@ -1,6 +1,6 @@
1from mock import MagicMock, patch1from mock import MagicMock, patch, call
2from test_utils import CharmTestCase2from test_utils import CharmTestCase, patch_open
33import os
4with patch('charmhelpers.core.hookenv.config') as config:4with patch('charmhelpers.core.hookenv.config') as config:
5 config.return_value = 'neutron'5 config.return_value = 'neutron'
6 import nova_cc_utils as utils6 import nova_cc_utils as utils
@@ -11,7 +11,11 @@
11utils.register_configs = MagicMock()11utils.register_configs = MagicMock()
12utils.restart_map = MagicMock()12utils.restart_map = MagicMock()
1313
14import nova_cc_hooks as hooks14with patch('nova_cc_utils.guard_map') as gmap:
15 with patch('charmhelpers.core.hookenv.config') as config:
16 config.return_value = False
17 gmap.return_value = {}
18 import nova_cc_hooks as hooks
1519
16utils.register_configs = _reg20utils.register_configs = _reg
17utils.restart_map = _map21utils.restart_map = _map
@@ -35,9 +39,11 @@
35 'relation_set',39 'relation_set',
36 'relation_ids',40 'relation_ids',
37 'ssh_compute_add',41 'ssh_compute_add',
38 'ssh_known_hosts_b64',42 'ssh_known_hosts_lines',
39 'ssh_authorized_keys_b64',43 'ssh_authorized_keys_lines',
40 'save_script_rc',44 'save_script_rc',
45 'service_running',
46 'service_stop',
41 'execd_preinstall',47 'execd_preinstall',
42 'network_manager',48 'network_manager',
43 'volume_service',49 'volume_service',
@@ -98,15 +104,64 @@
98 self.test_relation.set({104 self.test_relation.set({
99 'migration_auth_type': 'ssh', 'ssh_public_key': 'fookey',105 'migration_auth_type': 'ssh', 'ssh_public_key': 'fookey',
100 'private-address': '10.0.0.1'})106 'private-address': '10.0.0.1'})
101 self.ssh_known_hosts_b64.return_value = 'hosts'107 self.ssh_known_hosts_lines.return_value = [
102 self.ssh_authorized_keys_b64.return_value = 'keys'108 'k_h_0', 'k_h_1', 'k_h_2']
103 hooks.compute_changed()109 self.ssh_authorized_keys_lines.return_value = [
104 self.ssh_compute_add.assert_called_with('fookey')110 'auth_0', 'auth_1', 'auth_2']
105 self.relation_set.assert_called_with(known_hosts='hosts',111 hooks.compute_changed()
106 authorized_keys='keys')112 self.ssh_compute_add.assert_called_with('fookey', rid=None, unit=None)
113 expected_relations = [
114 call(relation_settings={'authorized_keys_0': 'auth_0'},
115 relation_id=None),
116 call(relation_settings={'authorized_keys_1': 'auth_1'},
117 relation_id=None),
118 call(relation_settings={'authorized_keys_2': 'auth_2'},
119 relation_id=None),
120 call(relation_settings={'known_hosts_0': 'k_h_0'},
121 relation_id=None),
122 call(relation_settings={'known_hosts_1': 'k_h_1'},
123 relation_id=None),
124 call(relation_settings={'known_hosts_2': 'k_h_2'},
125 relation_id=None),
126 call(authorized_keys_max_index=3, relation_id=None),
127 call(known_hosts_max_index=3, relation_id=None)]
128 self.assertEquals(sorted(self.relation_set.call_args_list),
129 sorted(expected_relations))
130
131 def test_compute_changed_nova_public_key(self):
132 self.test_relation.set({
133 'migration_auth_type': 'sasl', 'nova_ssh_public_key': 'fookey',
134 'private-address': '10.0.0.1'})
135 self.ssh_known_hosts_lines.return_value = [
136 'k_h_0', 'k_h_1', 'k_h_2']
137 self.ssh_authorized_keys_lines.return_value = [
138 'auth_0', 'auth_1', 'auth_2']
139 hooks.compute_changed()
140 self.ssh_compute_add.assert_called_with('fookey', user='nova',
141 rid=None, unit=None)
142 expected_relations = [
143 call(relation_settings={'nova_authorized_keys_0': 'auth_0'},
144 relation_id=None),
145 call(relation_settings={'nova_authorized_keys_1': 'auth_1'},
146 relation_id=None),
147 call(relation_settings={'nova_authorized_keys_2': 'auth_2'},
148 relation_id=None),
149 call(relation_settings={'nova_known_hosts_0': 'k_h_0'},
150 relation_id=None),
151 call(relation_settings={'nova_known_hosts_1': 'k_h_1'},
152 relation_id=None),
153 call(relation_settings={'nova_known_hosts_2': 'k_h_2'},
154 relation_id=None),
155 call(relation_settings={'nova_known_hosts_max_index': 3},
156 relation_id=None),
157 call(relation_settings={'nova_authorized_keys_max_index': 3},
158 relation_id=None)]
159 self.assertEquals(sorted(self.relation_set.call_args_list),
160 sorted(expected_relations))
107161
108 @patch.object(hooks, '_auth_config')162 @patch.object(hooks, '_auth_config')
109 def test_compute_joined_neutron(self, auth_config):163 def test_compute_joined_neutron(self, auth_config):
164 self.is_relation_made.return_value = False
110 self.network_manager.return_value = 'neutron'165 self.network_manager.return_value = 'neutron'
111 self.eligible_leader = True166 self.eligible_leader = True
112 self.keystone_ca_cert_b64.return_value = 'foocert64'167 self.keystone_ca_cert_b64.return_value = 'foocert64'
@@ -122,6 +177,8 @@
122 relation_id=None,177 relation_id=None,
123 quantum_url='http://nova-cc-host1:9696',178 quantum_url='http://nova-cc-host1:9696',
124 ca_cert='foocert64',179 ca_cert='foocert64',
180 quantum_port=9696,
181 quantum_host='nova-cc-host1',
125 quantum_security_groups='no',182 quantum_security_groups='no',
126 region='RegionOne',183 region='RegionOne',
127 volume_service='cinder',184 volume_service='cinder',
@@ -129,6 +186,40 @@
129 quantum_plugin='nvp',186 quantum_plugin='nvp',
130 network_manager='neutron', **FAKE_KS_AUTH_CFG)187 network_manager='neutron', **FAKE_KS_AUTH_CFG)
131188
189 @patch.object(hooks, 'NeutronAPIContext')
190 @patch.object(hooks, '_auth_config')
191 def test_compute_joined_neutron_api_rel(self, auth_config, napi):
192 def mock_NeutronAPIContext():
193 return {
194 'neutron_plugin': 'bob',
195 'neutron_security_groups': 'yes',
196 'neutron_url': 'http://nova-cc-host1:9696',
197 }
198 napi.return_value = mock_NeutronAPIContext
199 self.is_relation_made.return_value = True
200 self.network_manager.return_value = 'neutron'
201 self.eligible_leader = True
202 self.keystone_ca_cert_b64.return_value = 'foocert64'
203 self.volume_service.return_value = 'cinder'
204 self.unit_get.return_value = 'nova-cc-host1'
205 self.canonical_url.return_value = 'http://nova-cc-host1'
206 self.api_port.return_value = '9696'
207 self.neutron_plugin.return_value = 'nvp'
208 auth_config.return_value = FAKE_KS_AUTH_CFG
209 hooks.compute_joined()
210 self.relation_set.assert_called_with(
211 relation_id=None,
212 quantum_url='http://nova-cc-host1:9696',
213 ca_cert='foocert64',
214 quantum_port=9696,
215 quantum_host='nova-cc-host1',
216 quantum_security_groups='yes',
217 region='RegionOne',
218 volume_service='cinder',
219 ec2_host='nova-cc-host1',
220 quantum_plugin='bob',
221 network_manager='neutron', **FAKE_KS_AUTH_CFG)
222
132 @patch.object(hooks, '_auth_config')223 @patch.object(hooks, '_auth_config')
133 def test_nova_vmware_joined(self, auth_config):224 def test_nova_vmware_joined(self, auth_config):
134 auth_config.return_value = FAKE_KS_AUTH_CFG225 auth_config.return_value = FAKE_KS_AUTH_CFG
@@ -231,3 +322,46 @@
231 self._postgresql_db_test(configs)322 self._postgresql_db_test(configs)
232 self.assertTrue(configs.write_all.called)323 self.assertTrue(configs.write_all.called)
233 self.migrate_database.assert_called_with()324 self.migrate_database.assert_called_with()
325
326 @patch.object(os, 'rename')
327 @patch.object(os.path, 'isfile')
328 @patch.object(hooks, 'CONFIGS')
329 def test_neutron_api_relation_joined(self, configs, isfile, rename):
330 neutron_conf = '/etc/neutron/neutron.conf'
331 nova_url = 'http://novaurl:8774/v2'
332 isfile.return_value = True
333 self.service_running.return_value = True
334 _identity_joined = self.patch('identity_joined')
335 self.relation_ids.side_effect = ['relid']
336 self.canonical_url.return_value = 'http://novaurl'
337 with patch_open() as (_open, _file):
338 hooks.neutron_api_relation_joined()
339 self.service_stop.assert_called_with('neutron-server')
340 rename.assert_called_with(neutron_conf, neutron_conf + '_unused')
341 self.assertTrue(_identity_joined.called)
342 self.relation_set.assert_called_with(relation_id=None,
343 nova_url=nova_url)
344
345 @patch.object(hooks, 'CONFIGS')
346 def test_neutron_api_relation_changed(self, configs):
347 self.relation_ids.return_value = ['relid']
348 _compute_joined = self.patch('compute_joined')
349 _quantum_joined = self.patch('quantum_joined')
350 hooks.neutron_api_relation_changed()
351 self.assertTrue(configs.write.called_with('/etc/nova/nova.conf'))
352 self.assertTrue(_compute_joined.called)
353 self.assertTrue(_quantum_joined.called)
354
355 @patch.object(os, 'remove')
356 @patch.object(os.path, 'isfile')
357 @patch.object(hooks, 'CONFIGS')
358 def test_neutron_api_relation_broken(self, configs, isfile, remove):
359 isfile.return_value = True
360 self.relation_ids.return_value = ['relid']
361 _compute_joined = self.patch('compute_joined')
362 _quantum_joined = self.patch('quantum_joined')
363 hooks.neutron_api_relation_broken()
364 remove.assert_called_with('/etc/init/neutron-server.override')
365 self.assertTrue(configs.write_all.called)
366 self.assertTrue(_compute_joined.called)
367 self.assertTrue(_quantum_joined.called)
234368
=== modified file 'unit_tests/test_nova_cc_utils.py'
--- unit_tests/test_nova_cc_utils.py 2014-05-02 10:06:23 +0000
+++ unit_tests/test_nova_cc_utils.py 2014-07-29 13:07:23 +0000
@@ -22,6 +22,7 @@
22 'eligible_leader',22 'eligible_leader',
23 'enable_policy_rcd',23 'enable_policy_rcd',
24 'get_os_codename_install_source',24 'get_os_codename_install_source',
25 'is_relation_made',
25 'log',26 'log',
26 'ml2_migration',27 'ml2_migration',
27 'network_manager',28 'network_manager',
@@ -34,7 +35,9 @@
34 'remote_unit',35 'remote_unit',
35 '_save_script_rc',36 '_save_script_rc',
36 'service_start',37 'service_start',
37 'services'38 'services',
39 'service_running',
40 'service_stop'
38]41]
3942
40SCRIPTRC_ENV_VARS = {43SCRIPTRC_ENV_VARS = {
@@ -151,6 +154,7 @@
151154
152 @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')155 @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
153 def test_resource_map_quantum(self, subcontext):156 def test_resource_map_quantum(self, subcontext):
157 self.is_relation_made.return_value = False
154 self._resource_map(network_manager='quantum')158 self._resource_map(network_manager='quantum')
155 _map = utils.resource_map()159 _map = utils.resource_map()
156 confs = [160 confs = [
@@ -162,6 +166,7 @@
162166
163 @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')167 @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
164 def test_resource_map_neutron(self, subcontext):168 def test_resource_map_neutron(self, subcontext):
169 self.is_relation_made.return_value = False
165 self._resource_map(network_manager='neutron')170 self._resource_map(network_manager='neutron')
166 _map = utils.resource_map()171 _map = utils.resource_map()
167 confs = [172 confs = [
@@ -170,6 +175,17 @@
170 [self.assertIn(q_conf, _map.keys()) for q_conf in confs]175 [self.assertIn(q_conf, _map.keys()) for q_conf in confs]
171176
172 @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')177 @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
178 def test_resource_map_neutron_api_rel(self, subcontext):
179 self.is_relation_made.return_value = True
180 self._resource_map(network_manager='neutron')
181 _map = utils.resource_map()
182 confs = [
183 '/etc/neutron/neutron.conf',
184 ]
185 for q_conf in confs:
186 self.assertFalse(q_conf in _map.keys())
187
188 @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
173 def test_resource_map_vmware(self, subcontext):189 def test_resource_map_vmware(self, subcontext):
174 fake_context = MagicMock()190 fake_context = MagicMock()
175 fake_context.return_value = {191 fake_context.return_value = {
@@ -201,6 +217,7 @@
201 @patch('os.path.exists')217 @patch('os.path.exists')
202 @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')218 @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
203 def test_restart_map_api_before_frontends(self, subcontext, _exists):219 def test_restart_map_api_before_frontends(self, subcontext, _exists):
220 self.is_relation_made.return_value = False
204 _exists.return_value = False221 _exists.return_value = False
205 self._resource_map(network_manager='neutron')222 self._resource_map(network_manager='neutron')
206 _map = utils.restart_map()223 _map = utils.restart_map()
@@ -226,6 +243,7 @@
226243
227 @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')244 @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
228 def test_determine_packages_neutron(self, subcontext):245 def test_determine_packages_neutron(self, subcontext):
246 self.is_relation_made.return_value = False
229 self._resource_map(network_manager='neutron')247 self._resource_map(network_manager='neutron')
230 pkgs = utils.determine_packages()248 pkgs = utils.determine_packages()
231 self.assertIn('neutron-server', pkgs)249 self.assertIn('neutron-server', pkgs)
@@ -321,8 +339,8 @@
321 check_output.return_value = 'fookey'339 check_output.return_value = 'fookey'
322 host_key.return_value = 'fookey_old'340 host_key.return_value = 'fookey_old'
323 with patch_open() as (_open, _file):341 with patch_open() as (_open, _file):
324 utils.add_known_host('foohost')342 utils.add_known_host('foohost', None, None)
325 rm.assert_called_with('foohost', None)343 rm.assert_called_with('foohost', None, None)
326344
327 @patch.object(utils, 'known_hosts')345 @patch.object(utils, 'known_hosts')
328 @patch.object(utils, 'remove_known_host')346 @patch.object(utils, 'remove_known_host')
@@ -355,19 +373,19 @@
355 def test_known_hosts(self, ssh_dir):373 def test_known_hosts(self, ssh_dir):
356 ssh_dir.return_value = '/tmp/foo'374 ssh_dir.return_value = '/tmp/foo'
357 self.assertEquals(utils.known_hosts(), '/tmp/foo/known_hosts')375 self.assertEquals(utils.known_hosts(), '/tmp/foo/known_hosts')
358 ssh_dir.assert_called_with(None)376 ssh_dir.assert_called_with(None, None)
359 self.assertEquals(utils.known_hosts('bar'), '/tmp/foo/known_hosts')377 self.assertEquals(utils.known_hosts('bar'), '/tmp/foo/known_hosts')
360 ssh_dir.assert_called_with('bar')378 ssh_dir.assert_called_with('bar', None)
361379
362 @patch.object(utils, 'ssh_directory_for_unit')380 @patch.object(utils, 'ssh_directory_for_unit')
363 def test_authorized_keys(self, ssh_dir):381 def test_authorized_keys(self, ssh_dir):
364 ssh_dir.return_value = '/tmp/foo'382 ssh_dir.return_value = '/tmp/foo'
365 self.assertEquals(utils.authorized_keys(), '/tmp/foo/authorized_keys')383 self.assertEquals(utils.authorized_keys(), '/tmp/foo/authorized_keys')
366 ssh_dir.assert_called_with(None)384 ssh_dir.assert_called_with(None, None)
367 self.assertEquals(385 self.assertEquals(
368 utils.authorized_keys('bar'),386 utils.authorized_keys('bar'),
369 '/tmp/foo/authorized_keys')387 '/tmp/foo/authorized_keys')
370 ssh_dir.assert_called_with('bar')388 ssh_dir.assert_called_with('bar', None)
371389
372 @patch.object(utils, 'known_hosts')390 @patch.object(utils, 'known_hosts')
373 @patch('subprocess.check_call')391 @patch('subprocess.check_call')
@@ -421,11 +439,15 @@
421 self.os_release.return_value = 'folsom'439 self.os_release.return_value = 'folsom'
422440
423 def test_determine_endpoints_base(self):441 def test_determine_endpoints_base(self):
442 self.is_relation_made.return_value = False
424 self.relation_ids.return_value = []443 self.relation_ids.return_value = []
425 self.assertEquals(444 self.assertEquals(
426 BASE_ENDPOINTS, utils.determine_endpoints('http://foohost.com'))445 BASE_ENDPOINTS, utils.determine_endpoints('http://foohost.com',
446 'http://foohost.com',
447 'http://foohost.com'))
427448
428 def test_determine_endpoints_nova_volume(self):449 def test_determine_endpoints_nova_volume(self):
450 self.is_relation_made.return_value = False
429 self.relation_ids.return_value = ['nova-volume-service/0']451 self.relation_ids.return_value = ['nova-volume-service/0']
430 endpoints = deepcopy(BASE_ENDPOINTS)452 endpoints = deepcopy(BASE_ENDPOINTS)
431 endpoints.update({453 endpoints.update({
@@ -438,9 +460,12 @@
438 'nova-volume_region': 'RegionOne',460 'nova-volume_region': 'RegionOne',
439 'nova-volume_service': 'nova-volume'})461 'nova-volume_service': 'nova-volume'})
440 self.assertEquals(462 self.assertEquals(
441 endpoints, utils.determine_endpoints('http://foohost.com'))463 endpoints, utils.determine_endpoints('http://foohost.com',
464 'http://foohost.com',
465 'http://foohost.com'))
442466
443 def test_determine_endpoints_quantum_neutron(self):467 def test_determine_endpoints_quantum_neutron(self):
468 self.is_relation_made.return_value = False
444 self.relation_ids.return_value = []469 self.relation_ids.return_value = []
445 self.network_manager.return_value = 'quantum'470 self.network_manager.return_value = 'quantum'
446 endpoints = deepcopy(BASE_ENDPOINTS)471 endpoints = deepcopy(BASE_ENDPOINTS)
@@ -451,7 +476,25 @@
451 'quantum_region': 'RegionOne',476 'quantum_region': 'RegionOne',
452 'quantum_service': 'quantum'})477 'quantum_service': 'quantum'})
453 self.assertEquals(478 self.assertEquals(
454 endpoints, utils.determine_endpoints('http://foohost.com'))479 endpoints, utils.determine_endpoints('http://foohost.com',
480 'http://foohost.com',
481 'http://foohost.com'))
482
483 def test_determine_endpoints_neutron_api_rel(self):
484 self.is_relation_made.return_value = True
485 self.relation_ids.return_value = []
486 self.network_manager.return_value = 'quantum'
487 endpoints = deepcopy(BASE_ENDPOINTS)
488 endpoints.update({
489 'quantum_admin_url': None,
490 'quantum_internal_url': None,
491 'quantum_public_url': None,
492 'quantum_region': None,
493 'quantum_service': None})
494 self.assertEquals(
495 endpoints, utils.determine_endpoints('http://foohost.com',
496 'http://foohost.com',
497 'http://foohost.com'))
455498
456 @patch.object(utils, 'known_hosts')499 @patch.object(utils, 'known_hosts')
457 @patch('subprocess.check_output')500 @patch('subprocess.check_output')
@@ -461,9 +504,9 @@
461 _check_output.assert_called_with(504 _check_output.assert_called_with(
462 ['ssh-keygen', '-f', '/foo/known_hosts',505 ['ssh-keygen', '-f', '/foo/known_hosts',
463 '-H', '-F', 'test'])506 '-H', '-F', 'test'])
464 _known_hosts.assert_called_with(None)507 _known_hosts.assert_called_with(None, None)
465 utils.ssh_known_host_key('test', 'bar')508 utils.ssh_known_host_key('test', 'bar')
466 _known_hosts.assert_called_with('bar')509 _known_hosts.assert_called_with('bar', None)
467510
468 @patch.object(utils, 'known_hosts')511 @patch.object(utils, 'known_hosts')
469 @patch('subprocess.check_call')512 @patch('subprocess.check_call')
@@ -473,9 +516,9 @@
473 _check_call.assert_called_with(516 _check_call.assert_called_with(
474 ['ssh-keygen', '-f', '/foo/known_hosts',517 ['ssh-keygen', '-f', '/foo/known_hosts',
475 '-R', 'test'])518 '-R', 'test'])
476 _known_hosts.assert_called_with(None)519 _known_hosts.assert_called_with(None, None)
477 utils.remove_known_host('test', 'bar')520 utils.remove_known_host('test', 'bar')
478 _known_hosts.assert_called_with('bar')521 _known_hosts.assert_called_with('bar', None)
479522
480 @patch('subprocess.check_output')523 @patch('subprocess.check_output')
481 def test_migrate_database(self, check_output):524 def test_migrate_database(self, check_output):
@@ -555,3 +598,113 @@
555 utils.do_openstack_upgrade()598 utils.do_openstack_upgrade()
556 expected = [call('cloud:precise-icehouse')]599 expected = [call('cloud:precise-icehouse')]
557 self.assertEquals(_do_openstack_upgrade.call_args_list, expected)600 self.assertEquals(_do_openstack_upgrade.call_args_list, expected)
601
602 def test_guard_map_nova(self):
603 self.relation_ids.return_value = []
604 self.os_release.return_value = 'havana'
605 self.assertEqual(
606 {'nova-api-ec2': ['identity-service', 'amqp', 'shared-db'],
607 'nova-api-os-compute': ['identity-service', 'amqp', 'shared-db'],
608 'nova-cert': ['identity-service', 'amqp', 'shared-db'],
609 'nova-conductor': ['identity-service', 'amqp', 'shared-db'],
610 'nova-objectstore': ['identity-service', 'amqp', 'shared-db'],
611 'nova-scheduler': ['identity-service', 'amqp', 'shared-db']},
612 utils.guard_map()
613 )
614 self.os_release.return_value = 'essex'
615 self.assertEqual(
616 {'nova-api-ec2': ['identity-service', 'amqp', 'shared-db'],
617 'nova-api-os-compute': ['identity-service', 'amqp', 'shared-db'],
618 'nova-cert': ['identity-service', 'amqp', 'shared-db'],
619 'nova-objectstore': ['identity-service', 'amqp', 'shared-db'],
620 'nova-scheduler': ['identity-service', 'amqp', 'shared-db']},
621 utils.guard_map()
622 )
623
624 def test_guard_map_neutron(self):
625 self.relation_ids.return_value = []
626 self.network_manager.return_value = 'neutron'
627 self.os_release.return_value = 'icehouse'
628 self.assertEqual(
629 {'neutron-server': ['identity-service', 'amqp', 'shared-db'],
630 'nova-api-ec2': ['identity-service', 'amqp', 'shared-db'],
631 'nova-api-os-compute': ['identity-service', 'amqp', 'shared-db'],
632 'nova-cert': ['identity-service', 'amqp', 'shared-db'],
633 'nova-conductor': ['identity-service', 'amqp', 'shared-db'],
634 'nova-objectstore': ['identity-service', 'amqp', 'shared-db'],
635 'nova-scheduler': ['identity-service', 'amqp', 'shared-db'], },
636 utils.guard_map()
637 )
638 self.network_manager.return_value = 'quantum'
639 self.os_release.return_value = 'grizzly'
640 self.assertEqual(
641 {'quantum-server': ['identity-service', 'amqp', 'shared-db'],
642 'nova-api-ec2': ['identity-service', 'amqp', 'shared-db'],
643 'nova-api-os-compute': ['identity-service', 'amqp', 'shared-db'],
644 'nova-cert': ['identity-service', 'amqp', 'shared-db'],
645 'nova-conductor': ['identity-service', 'amqp', 'shared-db'],
646 'nova-objectstore': ['identity-service', 'amqp', 'shared-db'],
647 'nova-scheduler': ['identity-service', 'amqp', 'shared-db'], },
648 utils.guard_map()
649 )
650
651 def test_guard_map_pgsql(self):
652 self.relation_ids.return_value = ['pgsql:1']
653 self.network_manager.return_value = 'neutron'
654 self.os_release.return_value = 'icehouse'
655 self.assertEqual(
656 {'neutron-server': ['identity-service', 'amqp',
657 'pgsql-neutron-db'],
658 'nova-api-ec2': ['identity-service', 'amqp', 'pgsql-nova-db'],
659 'nova-api-os-compute': ['identity-service', 'amqp',
660 'pgsql-nova-db'],
661 'nova-cert': ['identity-service', 'amqp', 'pgsql-nova-db'],
662 'nova-conductor': ['identity-service', 'amqp', 'pgsql-nova-db'],
663 'nova-objectstore': ['identity-service', 'amqp',
664 'pgsql-nova-db'],
665 'nova-scheduler': ['identity-service', 'amqp',
666 'pgsql-nova-db'], },
667 utils.guard_map()
668 )
669
670 def test_service_guard_inactive(self):
671 '''Ensure that if disabled, service guards nothing'''
672 contexts = MagicMock()
673
674 @utils.service_guard({'test': ['interfacea', 'interfaceb']},
675 contexts, False)
676 def dummy_func():
677 pass
678 dummy_func()
679 self.assertFalse(self.service_running.called)
680 self.assertFalse(contexts.complete_contexts.called)
681
682 def test_service_guard_active_guard(self):
683 '''Ensure services with incomplete interfaces are stopped'''
684 contexts = MagicMock()
685 contexts.complete_contexts.return_value = ['interfacea']
686 self.service_running.return_value = True
687
688 @utils.service_guard({'test': ['interfacea', 'interfaceb']},
689 contexts, True)
690 def dummy_func():
691 pass
692 dummy_func()
693 self.service_running.assert_called_with('test')
694 self.service_stop.assert_called_with('test')
695 self.assertTrue(contexts.complete_contexts.called)
696
697 def test_service_guard_active_release(self):
698 '''Ensure services with complete interfaces are not stopped'''
699 contexts = MagicMock()
700 contexts.complete_contexts.return_value = ['interfacea',
701 'interfaceb']
702
703 @utils.service_guard({'test': ['interfacea', 'interfaceb']},
704 contexts, True)
705 def dummy_func():
706 pass
707 dummy_func()
708 self.assertFalse(self.service_running.called)
709 self.assertFalse(self.service_stop.called)
710 self.assertTrue(contexts.complete_contexts.called)
558711
=== modified file 'unit_tests/test_utils.py'
--- unit_tests/test_utils.py 2013-11-08 05:41:39 +0000
+++ unit_tests/test_utils.py 2014-07-29 13:07:23 +0000
@@ -82,9 +82,9 @@
82 return self.config82 return self.config
8383
84 def set(self, attr, value):84 def set(self, attr, value):
85 if attr not in self.config:85 if attr not in self.config:
86 raise KeyError86 raise KeyError
87 self.config[attr] = value87 self.config[attr] = value
8888
8989
90class TestRelation(object):90class TestRelation(object):

Subscribers

People subscribed via source and target branches