Merge lp:~james-page/charms/trusty/nova-cloud-controller/ram-allocation-ratio into lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/trunk

Proposed by James Page
Status: Superseded
Proposed branch: lp:~james-page/charms/trusty/nova-cloud-controller/ram-allocation-ratio
Merge into: lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/trunk
Diff against target: 5890 lines (+4147/-361) (has conflicts)
49 files modified
.bzrignore (+2/-0)
Makefile (+24/-1)
README.txt (+10/-0)
charm-helpers-hooks.yaml (+12/-0)
charm-helpers-tests.yaml (+5/-0)
config.yaml (+100/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+3/-2)
hooks/charmhelpers/contrib/network/ip.py (+174/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+61/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+275/-0)
hooks/charmhelpers/contrib/openstack/context.py (+121/-25)
hooks/charmhelpers/contrib/openstack/ip.py (+79/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+9/-4)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/openstack/utils.py (+18/-5)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+83/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/core/fstab.py (+116/-0)
hooks/charmhelpers/core/hookenv.py (+7/-5)
hooks/charmhelpers/core/host.py (+47/-8)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+310/-0)
hooks/charmhelpers/core/services/helpers.py (+125/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+97/-28)
hooks/nova_cc_context.py (+51/-2)
hooks/nova_cc_hooks.py (+286/-61)
hooks/nova_cc_utils.py (+369/-163)
metadata.yaml (+2/-0)
revision (+1/-1)
templates/havana/nova.conf (+11/-2)
templates/icehouse/neutron.conf (+5/-0)
templates/icehouse/nova.conf (+18/-2)
tests/00-setup (+10/-0)
tests/10-basic-precise-essex (+10/-0)
tests/11-basic-precise-folsom (+18/-0)
tests/12-basic-precise-grizzly (+12/-0)
tests/13-basic-precise-havana (+12/-0)
tests/14-basic-precise-icehouse (+12/-0)
tests/15-basic-trusty-icehouse (+10/-0)
tests/README (+47/-0)
tests/basic_deployment.py (+520/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+71/-0)
tests/charmhelpers/contrib/amulet/utils.py (+176/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+61/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+275/-0)
unit_tests/test_nova_cc_hooks.py (+262/-13)
unit_tests/test_nova_cc_utils.py (+140/-15)
Conflict adding file .bzrignore.  Moved existing file to .bzrignore.moved.
Text conflict in Makefile
Contents conflict in charm-helpers.yaml
Text conflict in config.yaml
Text conflict in hooks/charmhelpers/contrib/openstack/context.py
Text conflict in hooks/charmhelpers/contrib/openstack/utils.py
Conflict adding file hooks/charmhelpers/core/fstab.py.  Moved existing file to hooks/charmhelpers/core/fstab.py.moved.
Text conflict in hooks/charmhelpers/core/host.py
Text conflict in hooks/charmhelpers/fetch/__init__.py
Text conflict in hooks/nova_cc_hooks.py
Text conflict in hooks/nova_cc_utils.py
Text conflict in templates/havana/nova.conf
Text conflict in templates/icehouse/neutron.conf
Text conflict in templates/icehouse/nova.conf
To merge this branch: bzr merge lp:~james-page/charms/trusty/nova-cloud-controller/ram-allocation-ratio
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+234781@code.launchpad.net
To post a comment you must log in.

Unmerged revisions

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== added file '.bzrignore'
--- .bzrignore 1970-01-01 00:00:00 +0000
+++ .bzrignore 2014-09-16 09:08:32 +0000
@@ -0,0 +1,2 @@
1bin
2.coverage
03
=== renamed file '.bzrignore' => '.bzrignore.moved'
=== modified file 'Makefile'
--- Makefile 2014-09-09 23:43:43 +0000
+++ Makefile 2014-09-16 09:08:32 +0000
@@ -2,9 +2,10 @@
2PYTHON := /usr/bin/env python2PYTHON := /usr/bin/env python
33
4lint:4lint:
5 @flake8 --exclude hooks/charmhelpers hooks unit_tests5 @flake8 --exclude hooks/charmhelpers hooks unit_tests tests
6 @charm proof6 @charm proof
77
8<<<<<<< TREE
8test: .venv9test: .venv
9 @echo Starting tests...10 @echo Starting tests...
10 .venv/bin/nosetests --nologcapture --with-coverage unit_tests11 .venv/bin/nosetests --nologcapture --with-coverage unit_tests
@@ -18,6 +19,28 @@
18 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml19 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
1920
20publish: lint test21publish: lint test
22=======
23unit_test:
24 @echo Starting unit tests...
25 @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
26
27bin/charm_helpers_sync.py:
28 @mkdir -p bin
29 @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
30 > bin/charm_helpers_sync.py
31test:
32 @echo Starting Amulet tests...
33 # coreycb note: The -v should only be temporary until Amulet sends
34 # raise_status() messages to stderr:
35 # https://bugs.launchpad.net/amulet/+bug/1320357
36 @juju test -v -p AMULET_HTTP_PROXY
37
38sync: bin/charm_helpers_sync.py
39 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
40 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
41
42publish: lint unit_test
43>>>>>>> MERGE-SOURCE
21 bzr push lp:charms/nova-cloud-controller44 bzr push lp:charms/nova-cloud-controller
22 bzr push lp:charms/trusty/nova-cloud-controller45 bzr push lp:charms/trusty/nova-cloud-controller
2346
2447
=== modified file 'README.txt'
--- README.txt 2014-03-25 09:11:04 +0000
+++ README.txt 2014-09-16 09:08:32 +0000
@@ -4,6 +4,16 @@
44
5Cloud controller node for Openstack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore.5Cloud controller node for Openstack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore.
66
7The neutron-api interface can be used join this charm with an external neutron-api server. If this is done
8then this charm will shutdown its neutron-api service and the external charm will be registered as the
9neutron-api endpoint in keystone. It will also use the quantum-security-groups setting which is passed to
10it by the api service rather than its own quantum-security-groups setting.
11
12If console access is required then console-proxy-ip should be set to a client accessible IP that resolves
13to the nova-cloud-controller. If running in HA mode then the public vip is used if console-proxy-ip is set
14to local. Note: The console access protocol is baked into a guest when it is created, if you change it then
15console access for existing guests will stop working
16
7******************************************************17******************************************************
8Special considerations to be deployed using Postgresql18Special considerations to be deployed using Postgresql
9******************************************************19******************************************************
1020
=== added file 'charm-helpers-hooks.yaml'
--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers-hooks.yaml 2014-09-16 09:08:32 +0000
@@ -0,0 +1,12 @@
1branch: lp:charm-helpers
2destination: hooks/charmhelpers
3include:
4 - core
5 - fetch
6 - contrib.openstack|inc=*
7 - contrib.storage
8 - contrib.peerstorage
9 - contrib.hahelpers:
10 - apache
11 - payload.execd
12 - contrib.network.ip
013
=== added file 'charm-helpers-tests.yaml'
--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers-tests.yaml 2014-09-16 09:08:32 +0000
@@ -0,0 +1,5 @@
1branch: lp:charm-helpers
2destination: tests/charmhelpers
3include:
4 - contrib.amulet
5 - contrib.openstack.amulet
06
=== renamed file 'charm-helpers.yaml' => 'charm-helpers.yaml.THIS'
=== modified file 'config.yaml'
--- config.yaml 2014-09-09 23:43:43 +0000
+++ config.yaml 2014-09-16 09:08:32 +0000
@@ -97,6 +97,7 @@
97 # HA configuration settings97 # HA configuration settings
98 vip:98 vip:
99 type: string99 type: string
100<<<<<<< TREE
100 default:101 default:
101 description: "Virtual IP to use to front API services in ha configuration"102 description: "Virtual IP to use to front API services in ha configuration"
102 vip_iface:103 vip_iface:
@@ -107,6 +108,13 @@
107 type: int108 type: int
108 default: 24109 default: 24
109 description: "Netmask that will be used for the Virtual IP"110 description: "Netmask that will be used for the Virtual IP"
111=======
112 description: |
113 Virtual IP(s) to use to front API services in HA configuration.
114 .
115 If multiple networks are being used, a VIP should be provided for each
116 network, separated by spaces.
117>>>>>>> MERGE-SOURCE
110 ha-bindiface:118 ha-bindiface:
111 type: string119 type: string
112 default: eth0120 default: eth0
@@ -145,8 +153,12 @@
145 # Neutron NVP and VMware NSX plugin configuration153 # Neutron NVP and VMware NSX plugin configuration
146 nvp-controllers:154 nvp-controllers:
147 type: string155 type: string
156<<<<<<< TREE
148 default:157 default:
149 description: Space delimited addresses of NVP/NSX controllers158 description: Space delimited addresses of NVP/NSX controllers
159=======
160 description: Space delimited addresses of NVP/NSX controllers
161>>>>>>> MERGE-SOURCE
150 nvp-username:162 nvp-username:
151 type: string163 type: string
152 default: admin164 default: admin
@@ -168,6 +180,7 @@
168 in NVP before starting Quantum with the nvp plugin.180 in NVP before starting Quantum with the nvp plugin.
169 nvp-l3-uuid:181 nvp-l3-uuid:
170 type: string182 type: string
183<<<<<<< TREE
171 default:184 default:
172 description: |185 description: |
173 This is uuid of the default NVP/NSX L3 Gateway Service.186 This is uuid of the default NVP/NSX L3 Gateway Service.
@@ -191,3 +204,90 @@
191 * shared-db or (pgsql-nova-db, pgsql-neutron-db)204 * shared-db or (pgsql-nova-db, pgsql-neutron-db)
192 * amqp205 * amqp
193 * identity-service206 * identity-service
207=======
208 description: |
209 This is uuid of the default NVP/NSX L3 Gateway Service.
210 # end of NVP/NSX configuration
211 # Network configuration options
212 # by default all access is over 'private-address'
213 os-admin-network:
214 type: string
215 description: |
216 The IP address and netmask of the OpenStack Admin network (e.g.,
217 192.168.0.0/24)
218 .
219 This network will be used for admin endpoints.
220 os-internal-network:
221 type: string
222 description: |
223 The IP address and netmask of the OpenStack Internal network (e.g.,
224 192.168.0.0/24)
225 .
226 This network will be used for internal endpoints.
227 os-public-network:
228 type: string
229 description: |
230 The IP address and netmask of the OpenStack Public network (e.g.,
231 192.168.0.0/24)
232 .
233 This network will be used for public endpoints.
234 service-guard:
235 type: boolean
236 default: false
237 description: |
238 Ensure required relations are made and complete before allowing services
239 to be started
240 .
241 By default, services may be up and accepting API request from install
242 onwards.
243 .
244 Enabling this flag ensures that services will not be started until the
245 minimum 'core relations' have been made between this charm and other
246 charms.
247 .
248 For this charm the following relations must be made:
249 .
250 * shared-db or (pgsql-nova-db, pgsql-neutron-db)
251 * amqp
252 * identity-service
253 console-access-protocol:
254 type: string
255 description: |
256 Protocol to use when accessing virtual machine console. Supported types
257 are None, spice, xvpvnc, novnc and vnc (for both xvpvnc and novnc)
258 console-proxy-ip:
259 type: string
260 default: local
261 description: |
262 If console-access-protocol != None then this is the ip published to
263 clients for access to console proxy. Set to local for the ip address of
264 the nova-cloud-controller serving the request to be used
265 console-keymap:
266 type: string
267 default: 'en-us'
268 description: |
269 Console keymap
270 worker-multiplier:
271 type: int
272 default: 2
273 description: |
274 The CPU core multiplier to use when configuring worker processes for
275 Nova and Neutron. By default, the number of workers for each daemon
276 is set to twice the number of CPU cores a service unit has.
277 cpu-allocation-ratio:
278 type: float
279 default: 16.0
280 description: |
281 The per physical core -> virtual core ratio to use in the Nova scheduler.
282 .
283 Increasing this value will increase instance density on compute nodes
284 at the expense of instance performance.
285 ram-allocation-ratio:
286 type: float
287 default: 1.5
288 description: |
289 The physical ram -> virtual ram ratio to use in the Nova scheduler.
290 .
291 Increasing this value will increase instance density on compute nodes
292 at the potential expense of instance performance.
293>>>>>>> MERGE-SOURCE
194294
=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-02-17 12:10:27 +0000
+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-09-16 09:08:32 +0000
@@ -146,12 +146,12 @@
146 Obtains all relevant configuration from charm configuration required146 Obtains all relevant configuration from charm configuration required
147 for initiating a relation to hacluster:147 for initiating a relation to hacluster:
148148
149 ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr149 ha-bindiface, ha-mcastport, vip
150150
151 returns: dict: A dict containing settings keyed by setting name.151 returns: dict: A dict containing settings keyed by setting name.
152 raises: HAIncompleteConfig if settings are missing.152 raises: HAIncompleteConfig if settings are missing.
153 '''153 '''
154 settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']154 settings = ['ha-bindiface', 'ha-mcastport', 'vip']
155 conf = {}155 conf = {}
156 for setting in settings:156 for setting in settings:
157 conf[setting] = config_get(setting)157 conf[setting] = config_get(setting)
@@ -170,6 +170,7 @@
170170
171 :configs : OSTemplateRenderer: A config tempating object to inspect for171 :configs : OSTemplateRenderer: A config tempating object to inspect for
172 a complete https context.172 a complete https context.
173
173 :vip_setting: str: Setting in charm config that specifies174 :vip_setting: str: Setting in charm config that specifies
174 VIP address.175 VIP address.
175 '''176 '''
176177
=== added directory 'hooks/charmhelpers/contrib/network'
=== added file 'hooks/charmhelpers/contrib/network/__init__.py'
=== added file 'hooks/charmhelpers/contrib/network/ip.py'
--- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/network/ip.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,174 @@
1import sys
2
3from functools import partial
4
5from charmhelpers.fetch import apt_install
6from charmhelpers.core.hookenv import (
7 ERROR, log, config,
8)
9
10try:
11 import netifaces
12except ImportError:
13 apt_install('python-netifaces')
14 import netifaces
15
16try:
17 import netaddr
18except ImportError:
19 apt_install('python-netaddr')
20 import netaddr
21
22
23def _validate_cidr(network):
24 try:
25 netaddr.IPNetwork(network)
26 except (netaddr.core.AddrFormatError, ValueError):
27 raise ValueError("Network (%s) is not in CIDR presentation format" %
28 network)
29
30
31def get_address_in_network(network, fallback=None, fatal=False):
32 """
33 Get an IPv4 or IPv6 address within the network from the host.
34
35 :param network (str): CIDR presentation format. For example,
36 '192.168.1.0/24'.
37 :param fallback (str): If no address is found, return fallback.
38 :param fatal (boolean): If no address is found, fallback is not
39 set and fatal is True then exit(1).
40
41 """
42
43 def not_found_error_out():
44 log("No IP address found in network: %s" % network,
45 level=ERROR)
46 sys.exit(1)
47
48 if network is None:
49 if fallback is not None:
50 return fallback
51 else:
52 if fatal:
53 not_found_error_out()
54
55 _validate_cidr(network)
56 network = netaddr.IPNetwork(network)
57 for iface in netifaces.interfaces():
58 addresses = netifaces.ifaddresses(iface)
59 if network.version == 4 and netifaces.AF_INET in addresses:
60 addr = addresses[netifaces.AF_INET][0]['addr']
61 netmask = addresses[netifaces.AF_INET][0]['netmask']
62 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
63 if cidr in network:
64 return str(cidr.ip)
65 if network.version == 6 and netifaces.AF_INET6 in addresses:
66 for addr in addresses[netifaces.AF_INET6]:
67 if not addr['addr'].startswith('fe80'):
68 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
69 addr['netmask']))
70 if cidr in network:
71 return str(cidr.ip)
72
73 if fallback is not None:
74 return fallback
75
76 if fatal:
77 not_found_error_out()
78
79 return None
80
81
82def is_ipv6(address):
83 '''Determine whether provided address is IPv6 or not'''
84 try:
85 address = netaddr.IPAddress(address)
86 except netaddr.AddrFormatError:
87 # probably a hostname - so not an address at all!
88 return False
89 else:
90 return address.version == 6
91
92
93def is_address_in_network(network, address):
94 """
95 Determine whether the provided address is within a network range.
96
97 :param network (str): CIDR presentation format. For example,
98 '192.168.1.0/24'.
99 :param address: An individual IPv4 or IPv6 address without a net
100 mask or subnet prefix. For example, '192.168.1.1'.
101 :returns boolean: Flag indicating whether address is in network.
102 """
103 try:
104 network = netaddr.IPNetwork(network)
105 except (netaddr.core.AddrFormatError, ValueError):
106 raise ValueError("Network (%s) is not in CIDR presentation format" %
107 network)
108 try:
109 address = netaddr.IPAddress(address)
110 except (netaddr.core.AddrFormatError, ValueError):
111 raise ValueError("Address (%s) is not in correct presentation format" %
112 address)
113 if address in network:
114 return True
115 else:
116 return False
117
118
119def _get_for_address(address, key):
120 """Retrieve an attribute of or the physical interface that
121 the IP address provided could be bound to.
122
123 :param address (str): An individual IPv4 or IPv6 address without a net
124 mask or subnet prefix. For example, '192.168.1.1'.
125 :param key: 'iface' for the physical interface name or an attribute
126 of the configured interface, for example 'netmask'.
127 :returns str: Requested attribute or None if address is not bindable.
128 """
129 address = netaddr.IPAddress(address)
130 for iface in netifaces.interfaces():
131 addresses = netifaces.ifaddresses(iface)
132 if address.version == 4 and netifaces.AF_INET in addresses:
133 addr = addresses[netifaces.AF_INET][0]['addr']
134 netmask = addresses[netifaces.AF_INET][0]['netmask']
135 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
136 if address in cidr:
137 if key == 'iface':
138 return iface
139 else:
140 return addresses[netifaces.AF_INET][0][key]
141 if address.version == 6 and netifaces.AF_INET6 in addresses:
142 for addr in addresses[netifaces.AF_INET6]:
143 if not addr['addr'].startswith('fe80'):
144 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
145 addr['netmask']))
146 if address in cidr:
147 if key == 'iface':
148 return iface
149 else:
150 return addr[key]
151 return None
152
153
154get_iface_for_address = partial(_get_for_address, key='iface')
155
156get_netmask_for_address = partial(_get_for_address, key='netmask')
157
158
159def get_ipv6_addr(iface="eth0"):
160 try:
161 iface_addrs = netifaces.ifaddresses(iface)
162 if netifaces.AF_INET6 not in iface_addrs:
163 raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
164
165 addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
166 ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
167 and config('vip') != a['addr']]
168 if not ipv6_addr:
169 raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
170
171 return ipv6_addr[0]
172
173 except ValueError:
174 raise ValueError("Invalid interface '%s'" % iface)
0175
=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,61 @@
1from charmhelpers.contrib.amulet.deployment import (
2 AmuletDeployment
3)
4
5
6class OpenStackAmuletDeployment(AmuletDeployment):
7 """OpenStack amulet deployment.
8
9 This class inherits from AmuletDeployment and has additional support
10 that is specifically for use by OpenStack charms.
11 """
12
13 def __init__(self, series=None, openstack=None, source=None):
14 """Initialize the deployment environment."""
15 super(OpenStackAmuletDeployment, self).__init__(series)
16 self.openstack = openstack
17 self.source = source
18
19 def _add_services(self, this_service, other_services):
20 """Add services to the deployment and set openstack-origin."""
21 super(OpenStackAmuletDeployment, self)._add_services(this_service,
22 other_services)
23 name = 0
24 services = other_services
25 services.append(this_service)
26 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
27
28 if self.openstack:
29 for svc in services:
30 if svc[name] not in use_source:
31 config = {'openstack-origin': self.openstack}
32 self.d.configure(svc[name], config)
33
34 if self.source:
35 for svc in services:
36 if svc[name] in use_source:
37 config = {'source': self.source}
38 self.d.configure(svc[name], config)
39
40 def _configure_services(self, configs):
41 """Configure all of the services."""
42 for service, config in configs.iteritems():
43 self.d.configure(service, config)
44
45 def _get_openstack_release(self):
46 """Get openstack release.
47
48 Return an integer representing the enum value of the openstack
49 release.
50 """
51 (self.precise_essex, self.precise_folsom, self.precise_grizzly,
52 self.precise_havana, self.precise_icehouse,
53 self.trusty_icehouse) = range(6)
54 releases = {
55 ('precise', None): self.precise_essex,
56 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
57 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
58 ('precise', 'cloud:precise-havana'): self.precise_havana,
59 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
60 ('trusty', None): self.trusty_icehouse}
61 return releases[(self.series, self.openstack)]
062
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,275 @@
1import logging
2import os
3import time
4import urllib
5
6import glanceclient.v1.client as glance_client
7import keystoneclient.v2_0 as keystone_client
8import novaclient.v1_1.client as nova_client
9
10from charmhelpers.contrib.amulet.utils import (
11 AmuletUtils
12)
13
14DEBUG = logging.DEBUG
15ERROR = logging.ERROR
16
17
18class OpenStackAmuletUtils(AmuletUtils):
19 """OpenStack amulet utilities.
20
21 This class inherits from AmuletUtils and has additional support
22 that is specifically for use by OpenStack charms.
23 """
24
25 def __init__(self, log_level=ERROR):
26 """Initialize the deployment environment."""
27 super(OpenStackAmuletUtils, self).__init__(log_level)
28
29 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
30 public_port, expected):
31 """Validate endpoint data.
32
33 Validate actual endpoint data vs expected endpoint data. The ports
34 are used to find the matching endpoint.
35 """
36 found = False
37 for ep in endpoints:
38 self.log.debug('endpoint: {}'.format(repr(ep)))
39 if (admin_port in ep.adminurl and
40 internal_port in ep.internalurl and
41 public_port in ep.publicurl):
42 found = True
43 actual = {'id': ep.id,
44 'region': ep.region,
45 'adminurl': ep.adminurl,
46 'internalurl': ep.internalurl,
47 'publicurl': ep.publicurl,
48 'service_id': ep.service_id}
49 ret = self._validate_dict_data(expected, actual)
50 if ret:
51 return 'unexpected endpoint data - {}'.format(ret)
52
53 if not found:
54 return 'endpoint not found'
55
56 def validate_svc_catalog_endpoint_data(self, expected, actual):
57 """Validate service catalog endpoint data.
58
59 Validate a list of actual service catalog endpoints vs a list of
60 expected service catalog endpoints.
61 """
62 self.log.debug('actual: {}'.format(repr(actual)))
63 for k, v in expected.iteritems():
64 if k in actual:
65 ret = self._validate_dict_data(expected[k][0], actual[k][0])
66 if ret:
67 return self.endpoint_error(k, ret)
68 else:
69 return "endpoint {} does not exist".format(k)
70 return ret
71
72 def validate_tenant_data(self, expected, actual):
73 """Validate tenant data.
74
75 Validate a list of actual tenant data vs list of expected tenant
76 data.
77 """
78 self.log.debug('actual: {}'.format(repr(actual)))
79 for e in expected:
80 found = False
81 for act in actual:
82 a = {'enabled': act.enabled, 'description': act.description,
83 'name': act.name, 'id': act.id}
84 if e['name'] == a['name']:
85 found = True
86 ret = self._validate_dict_data(e, a)
87 if ret:
88 return "unexpected tenant data - {}".format(ret)
89 if not found:
90 return "tenant {} does not exist".format(e['name'])
91 return ret
92
93 def validate_role_data(self, expected, actual):
94 """Validate role data.
95
96 Validate a list of actual role data vs a list of expected role
97 data.
98 """
99 self.log.debug('actual: {}'.format(repr(actual)))
100 for e in expected:
101 found = False
102 for act in actual:
103 a = {'name': act.name, 'id': act.id}
104 if e['name'] == a['name']:
105 found = True
106 ret = self._validate_dict_data(e, a)
107 if ret:
108 return "unexpected role data - {}".format(ret)
109 if not found:
110 return "role {} does not exist".format(e['name'])
111 return ret
112
113 def validate_user_data(self, expected, actual):
114 """Validate user data.
115
116 Validate a list of actual user data vs a list of expected user
117 data.
118 """
119 self.log.debug('actual: {}'.format(repr(actual)))
120 for e in expected:
121 found = False
122 for act in actual:
123 a = {'enabled': act.enabled, 'name': act.name,
124 'email': act.email, 'tenantId': act.tenantId,
125 'id': act.id}
126 if e['name'] == a['name']:
127 found = True
128 ret = self._validate_dict_data(e, a)
129 if ret:
130 return "unexpected user data - {}".format(ret)
131 if not found:
132 return "user {} does not exist".format(e['name'])
133 return ret
134
135 def validate_flavor_data(self, expected, actual):
136 """Validate flavor data.
137
138 Validate a list of actual flavors vs a list of expected flavors.
139 """
140 self.log.debug('actual: {}'.format(repr(actual)))
141 act = [a.name for a in actual]
142 return self._validate_list_data(expected, act)
143
144 def tenant_exists(self, keystone, tenant):
145 """Return True if tenant exists."""
146 return tenant in [t.name for t in keystone.tenants.list()]
147
148 def authenticate_keystone_admin(self, keystone_sentry, user, password,
149 tenant):
150 """Authenticates admin user with the keystone admin endpoint."""
151 unit = keystone_sentry
152 service_ip = unit.relation('shared-db',
153 'mysql:shared-db')['private-address']
154 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
155 return keystone_client.Client(username=user, password=password,
156 tenant_name=tenant, auth_url=ep)
157
158 def authenticate_keystone_user(self, keystone, user, password, tenant):
159 """Authenticates a regular user with the keystone public endpoint."""
160 ep = keystone.service_catalog.url_for(service_type='identity',
161 endpoint_type='publicURL')
162 return keystone_client.Client(username=user, password=password,
163 tenant_name=tenant, auth_url=ep)
164
165 def authenticate_glance_admin(self, keystone):
166 """Authenticates admin user with glance."""
167 ep = keystone.service_catalog.url_for(service_type='image',
168 endpoint_type='adminURL')
169 return glance_client.Client(ep, token=keystone.auth_token)
170
171 def authenticate_nova_user(self, keystone, user, password, tenant):
172 """Authenticates a regular user with nova-api."""
173 ep = keystone.service_catalog.url_for(service_type='identity',
174 endpoint_type='publicURL')
175 return nova_client.Client(username=user, api_key=password,
176 project_id=tenant, auth_url=ep)
177
178 def create_cirros_image(self, glance, image_name):
179 """Download the latest cirros image and upload it to glance."""
180 http_proxy = os.getenv('AMULET_HTTP_PROXY')
181 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
182 if http_proxy:
183 proxies = {'http': http_proxy}
184 opener = urllib.FancyURLopener(proxies)
185 else:
186 opener = urllib.FancyURLopener()
187
188 f = opener.open("http://download.cirros-cloud.net/version/released")
189 version = f.read().strip()
190 cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
191
192 if not os.path.exists(cirros_img):
193 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
194 version, cirros_img)
195 opener.retrieve(cirros_url, cirros_img)
196 f.close()
197
198 with open(cirros_img) as f:
199 image = glance.images.create(name=image_name, is_public=True,
200 disk_format='qcow2',
201 container_format='bare', data=f)
202 count = 1
203 status = image.status
204 while status != 'active' and count < 10:
205 time.sleep(3)
206 image = glance.images.get(image.id)
207 status = image.status
208 self.log.debug('image status: {}'.format(status))
209 count += 1
210
211 if status != 'active':
212 self.log.error('image creation timed out')
213 return None
214
215 return image
216
217 def delete_image(self, glance, image):
218 """Delete the specified image."""
219 num_before = len(list(glance.images.list()))
220 glance.images.delete(image)
221
222 count = 1
223 num_after = len(list(glance.images.list()))
224 while num_after != (num_before - 1) and count < 10:
225 time.sleep(3)
226 num_after = len(list(glance.images.list()))
227 self.log.debug('number of images: {}'.format(num_after))
228 count += 1
229
230 if num_after != (num_before - 1):
231 self.log.error('image deletion timed out')
232 return False
233
234 return True
235
236 def create_instance(self, nova, image_name, instance_name, flavor):
237 """Create the specified instance."""
238 image = nova.images.find(name=image_name)
239 flavor = nova.flavors.find(name=flavor)
240 instance = nova.servers.create(name=instance_name, image=image,
241 flavor=flavor)
242
243 count = 1
244 status = instance.status
245 while status != 'ACTIVE' and count < 60:
246 time.sleep(3)
247 instance = nova.servers.get(instance.id)
248 status = instance.status
249 self.log.debug('instance status: {}'.format(status))
250 count += 1
251
252 if status != 'ACTIVE':
253 self.log.error('instance creation timed out')
254 return None
255
256 return instance
257
258 def delete_instance(self, nova, instance):
259 """Delete the specified instance."""
260 num_before = len(list(nova.servers.list()))
261 nova.servers.delete(instance)
262
263 count = 1
264 num_after = len(list(nova.servers.list()))
265 while num_after != (num_before - 1) and count < 10:
266 time.sleep(3)
267 num_after = len(list(nova.servers.list()))
268 self.log.debug('number of instances: {}'.format(num_after))
269 count += 1
270
271 if num_after != (num_before - 1):
272 self.log.error('instance deletion timed out')
273 return False
274
275 return True
0276
=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 15:53:46 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 2014-09-16 09:08:32 +0000
@@ -21,6 +21,7 @@
21 relation_get,21 relation_get,
22 relation_ids,22 relation_ids,
23 related_units,23 related_units,
24 relation_set,
24 unit_get,25 unit_get,
25 unit_private_ip,26 unit_private_ip,
26 ERROR,27 ERROR,
@@ -43,6 +44,11 @@
43 neutron_plugin_attribute,44 neutron_plugin_attribute,
44)45)
4546
47from charmhelpers.contrib.network.ip import (
48 get_address_in_network,
49 get_ipv6_addr,
50)
51
46CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'52CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
4753
4854
@@ -135,8 +141,26 @@
135 'Missing required charm config options. '141 'Missing required charm config options. '
136 '(database name and user)')142 '(database name and user)')
137 raise OSContextError143 raise OSContextError
144
138 ctxt = {}145 ctxt = {}
139146
147 # NOTE(jamespage) if mysql charm provides a network upon which
148 # access to the database should be made, reconfigure relation
149 # with the service units local address and defer execution
150 access_network = relation_get('access-network')
151 if access_network is not None:
152 if self.relation_prefix is not None:
153 hostname_key = "{}_hostname".format(self.relation_prefix)
154 else:
155 hostname_key = "hostname"
156 access_hostname = get_address_in_network(access_network,
157 unit_get('private-address'))
158 set_hostname = relation_get(attribute=hostname_key,
159 unit=local_unit())
160 if set_hostname != access_hostname:
161 relation_set(relation_settings={hostname_key: access_hostname})
162 return ctxt # Defer any further hook execution for now....
163
140 password_setting = 'password'164 password_setting = 'password'
141 if self.relation_prefix:165 if self.relation_prefix:
142 password_setting = self.relation_prefix + '_password'166 password_setting = self.relation_prefix + '_password'
@@ -244,23 +268,31 @@
244268
245269
246class AMQPContext(OSContextGenerator):270class AMQPContext(OSContextGenerator):
247 interfaces = ['amqp']
248271
249 def __init__(self, ssl_dir=None):272 def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
250 self.ssl_dir = ssl_dir273 self.ssl_dir = ssl_dir
274 self.rel_name = rel_name
275 self.relation_prefix = relation_prefix
276 self.interfaces = [rel_name]
251277
252 def __call__(self):278 def __call__(self):
253 log('Generating template context for amqp')279 log('Generating template context for amqp')
254 conf = config()280 conf = config()
281 user_setting = 'rabbit-user'
282 vhost_setting = 'rabbit-vhost'
283 if self.relation_prefix:
284 user_setting = self.relation_prefix + '-rabbit-user'
285 vhost_setting = self.relation_prefix + '-rabbit-vhost'
286
255 try:287 try:
256 username = conf['rabbit-user']288 username = conf[user_setting]
257 vhost = conf['rabbit-vhost']289 vhost = conf[vhost_setting]
258 except KeyError as e:290 except KeyError as e:
259 log('Could not generate shared_db context. '291 log('Could not generate shared_db context. '
260 'Missing required charm config options: %s.' % e)292 'Missing required charm config options: %s.' % e)
261 raise OSContextError293 raise OSContextError
262 ctxt = {}294 ctxt = {}
263 for rid in relation_ids('amqp'):295 for rid in relation_ids(self.rel_name):
264 ha_vip_only = False296 ha_vip_only = False
265 for unit in related_units(rid):297 for unit in related_units(rid):
266 if relation_get('clustered', rid=rid, unit=unit):298 if relation_get('clustered', rid=rid, unit=unit):
@@ -333,10 +365,12 @@
333 use_syslog = str(config('use-syslog')).lower()365 use_syslog = str(config('use-syslog')).lower()
334 for rid in relation_ids('ceph'):366 for rid in relation_ids('ceph'):
335 for unit in related_units(rid):367 for unit in related_units(rid):
336 mon_hosts.append(relation_get('private-address', rid=rid,
337 unit=unit))
338 auth = relation_get('auth', rid=rid, unit=unit)368 auth = relation_get('auth', rid=rid, unit=unit)
339 key = relation_get('key', rid=rid, unit=unit)369 key = relation_get('key', rid=rid, unit=unit)
370 ceph_addr = \
371 relation_get('ceph-public-address', rid=rid, unit=unit) or \
372 relation_get('private-address', rid=rid, unit=unit)
373 mon_hosts.append(ceph_addr)
340374
341 ctxt = {375 ctxt = {
342 'mon_hosts': ' '.join(mon_hosts),376 'mon_hosts': ' '.join(mon_hosts),
@@ -370,7 +404,12 @@
370404
371 cluster_hosts = {}405 cluster_hosts = {}
372 l_unit = local_unit().replace('/', '-')406 l_unit = local_unit().replace('/', '-')
373 cluster_hosts[l_unit] = unit_get('private-address')407 if config('prefer-ipv6'):
408 addr = get_ipv6_addr()
409 else:
410 addr = unit_get('private-address')
411 cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
412 addr)
374413
375 for rid in relation_ids('cluster'):414 for rid in relation_ids('cluster'):
376 for unit in related_units(rid):415 for unit in related_units(rid):
@@ -381,6 +420,16 @@
381 ctxt = {420 ctxt = {
382 'units': cluster_hosts,421 'units': cluster_hosts,
383 }422 }
423
424 if config('prefer-ipv6'):
425 ctxt['local_host'] = 'ip6-localhost'
426 ctxt['haproxy_host'] = '::'
427 ctxt['stat_port'] = ':::8888'
428 else:
429 ctxt['local_host'] = '127.0.0.1'
430 ctxt['haproxy_host'] = '0.0.0.0'
431 ctxt['stat_port'] = ':8888'
432
384 if len(cluster_hosts.keys()) > 1:433 if len(cluster_hosts.keys()) > 1:
385 # Enable haproxy when we have enough peers.434 # Enable haproxy when we have enough peers.
386 log('Ensuring haproxy enabled in /etc/default/haproxy.')435 log('Ensuring haproxy enabled in /etc/default/haproxy.')
@@ -419,12 +468,13 @@
419 """468 """
420 Generates a context for an apache vhost configuration that configures469 Generates a context for an apache vhost configuration that configures
421 HTTPS reverse proxying for one or many endpoints. Generated context470 HTTPS reverse proxying for one or many endpoints. Generated context
422 looks something like:471 looks something like::
423 {472
424 'namespace': 'cinder',473 {
425 'private_address': 'iscsi.mycinderhost.com',474 'namespace': 'cinder',
426 'endpoints': [(8776, 8766), (8777, 8767)]475 'private_address': 'iscsi.mycinderhost.com',
427 }476 'endpoints': [(8776, 8766), (8777, 8767)]
477 }
428478
429 The endpoints list consists of a tuples mapping external ports479 The endpoints list consists of a tuples mapping external ports
430 to internal ports.480 to internal ports.
@@ -542,6 +592,26 @@
542592
543 return nvp_ctxt593 return nvp_ctxt
544594
595 def n1kv_ctxt(self):
596 driver = neutron_plugin_attribute(self.plugin, 'driver',
597 self.network_manager)
598 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
599 self.network_manager)
600 n1kv_ctxt = {
601 'core_plugin': driver,
602 'neutron_plugin': 'n1kv',
603 'neutron_security_groups': self.neutron_security_groups,
604 'local_ip': unit_private_ip(),
605 'config': n1kv_config,
606 'vsm_ip': config('n1kv-vsm-ip'),
607 'vsm_username': config('n1kv-vsm-username'),
608 'vsm_password': config('n1kv-vsm-password'),
609 'restrict_policy_profiles': config(
610 'n1kv_restrict_policy_profiles'),
611 }
612
613 return n1kv_ctxt
614
545 def neutron_ctxt(self):615 def neutron_ctxt(self):
546 if https():616 if https():
547 proto = 'https'617 proto = 'https'
@@ -573,6 +643,8 @@
573 ctxt.update(self.ovs_ctxt())643 ctxt.update(self.ovs_ctxt())
574 elif self.plugin in ['nvp', 'nsx']:644 elif self.plugin in ['nvp', 'nsx']:
575 ctxt.update(self.nvp_ctxt())645 ctxt.update(self.nvp_ctxt())
646 elif self.plugin == 'n1kv':
647 ctxt.update(self.n1kv_ctxt())
576648
577 alchemy_flags = config('neutron-alchemy-flags')649 alchemy_flags = config('neutron-alchemy-flags')
578 if alchemy_flags:650 if alchemy_flags:
@@ -612,7 +684,7 @@
612 The subordinate interface allows subordinates to export their684 The subordinate interface allows subordinates to export their
613 configuration requirements to the principle for multiple config685 configuration requirements to the principle for multiple config
614 files and multiple serivces. Ie, a subordinate that has interfaces686 files and multiple serivces. Ie, a subordinate that has interfaces
615 to both glance and nova may export to following yaml blob as json:687 to both glance and nova may export to following yaml blob as json::
616688
617 glance:689 glance:
618 /etc/glance/glance-api.conf:690 /etc/glance/glance-api.conf:
@@ -631,7 +703,8 @@
631703
632 It is then up to the principle charms to subscribe this context to704 It is then up to the principle charms to subscribe this context to
633 the service+config file it is interestd in. Configuration data will705 the service+config file it is interestd in. Configuration data will
634 be available in the template context, in glance's case, as:706 be available in the template context, in glance's case, as::
707
635 ctxt = {708 ctxt = {
636 ... other context ...709 ... other context ...
637 'subordinate_config': {710 'subordinate_config': {
@@ -684,15 +757,38 @@
684757
685 sub_config = sub_config[self.config_file]758 sub_config = sub_config[self.config_file]
686 for k, v in sub_config.iteritems():759 for k, v in sub_config.iteritems():
687 if k == 'sections':760<<<<<<< TREE
688 for section, config_dict in v.iteritems():761 if k == 'sections':
689 log("adding section '%s'" % (section))762 for section, config_dict in v.iteritems():
690 ctxt[k][section] = config_dict763 log("adding section '%s'" % (section))
691 else:764 ctxt[k][section] = config_dict
692 ctxt[k] = v765 else:
693766 ctxt[k] = v
694 log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)767
695768 log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
769
770=======
771 if k == 'sections':
772 for section, config_dict in v.iteritems():
773 log("adding section '%s'" % (section))
774 ctxt[k][section] = config_dict
775 else:
776 ctxt[k] = v
777
778 log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
779
780 return ctxt
781
782
783class LogLevelContext(OSContextGenerator):
784
785 def __call__(self):
786 ctxt = {}
787 ctxt['debug'] = \
788 False if config('debug') is None else config('debug')
789 ctxt['verbose'] = \
790 False if config('verbose') is None else config('verbose')
791>>>>>>> MERGE-SOURCE
696 return ctxt792 return ctxt
697793
698794
699795
=== added file 'hooks/charmhelpers/contrib/openstack/ip.py'
--- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,79 @@
1from charmhelpers.core.hookenv import (
2 config,
3 unit_get,
4)
5
6from charmhelpers.contrib.network.ip import (
7 get_address_in_network,
8 is_address_in_network,
9 is_ipv6,
10 get_ipv6_addr,
11)
12
13from charmhelpers.contrib.hahelpers.cluster import is_clustered
14
15PUBLIC = 'public'
16INTERNAL = 'int'
17ADMIN = 'admin'
18
19_address_map = {
20 PUBLIC: {
21 'config': 'os-public-network',
22 'fallback': 'public-address'
23 },
24 INTERNAL: {
25 'config': 'os-internal-network',
26 'fallback': 'private-address'
27 },
28 ADMIN: {
29 'config': 'os-admin-network',
30 'fallback': 'private-address'
31 }
32}
33
34
35def canonical_url(configs, endpoint_type=PUBLIC):
36 '''
37 Returns the correct HTTP URL to this host given the state of HTTPS
38 configuration, hacluster and charm configuration.
39
40 :configs OSTemplateRenderer: A config tempating object to inspect for
41 a complete https context.
42 :endpoint_type str: The endpoint type to resolve.
43
44 :returns str: Base URL for services on the current service unit.
45 '''
46 scheme = 'http'
47 if 'https' in configs.complete_contexts():
48 scheme = 'https'
49 address = resolve_address(endpoint_type)
50 if is_ipv6(address):
51 address = "[{}]".format(address)
52 return '%s://%s' % (scheme, address)
53
54
55def resolve_address(endpoint_type=PUBLIC):
56 resolved_address = None
57 if is_clustered():
58 if config(_address_map[endpoint_type]['config']) is None:
59 # Assume vip is simple and pass back directly
60 resolved_address = config('vip')
61 else:
62 for vip in config('vip').split():
63 if is_address_in_network(
64 config(_address_map[endpoint_type]['config']),
65 vip):
66 resolved_address = vip
67 else:
68 if config('prefer-ipv6'):
69 fallback_addr = get_ipv6_addr()
70 else:
71 fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
72 resolved_address = get_address_in_network(
73 config(_address_map[endpoint_type]['config']), fallback_addr)
74
75 if resolved_address is None:
76 raise ValueError('Unable to resolve a suitable IP address'
77 ' based on charm state and configuration')
78 else:
79 return resolved_address
080
=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-19 11:38:09 +0000
+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-09-16 09:08:32 +0000
@@ -128,6 +128,20 @@
128 'server_packages': ['neutron-server',128 'server_packages': ['neutron-server',
129 'neutron-plugin-vmware'],129 'neutron-plugin-vmware'],
130 'server_services': ['neutron-server']130 'server_services': ['neutron-server']
131 },
132 'n1kv': {
133 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
134 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
135 'contexts': [
136 context.SharedDBContext(user=config('neutron-database-user'),
137 database=config('neutron-database'),
138 relation_prefix='neutron',
139 ssl_dir=NEUTRON_CONF_DIR)],
140 'services': [],
141 'packages': [['neutron-plugin-cisco']],
142 'server_packages': ['neutron-server',
143 'neutron-plugin-cisco'],
144 'server_services': ['neutron-server']
131 }145 }
132 }146 }
133 if release >= 'icehouse':147 if release >= 'icehouse':
134148
=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-02-27 09:26:38 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-09-16 09:08:32 +0000
@@ -1,6 +1,6 @@
1global1global
2 log 127.0.0.1 local02 log {{ local_host }} local0
3 log 127.0.0.1 local1 notice3 log {{ local_host }} local1 notice
4 maxconn 200004 maxconn 20000
5 user haproxy5 user haproxy
6 group haproxy6 group haproxy
@@ -17,7 +17,7 @@
17 timeout client 3000017 timeout client 30000
18 timeout server 3000018 timeout server 30000
1919
20listen stats :888820listen stats {{ stat_port }}
21 mode http21 mode http
22 stats enable22 stats enable
23 stats hide-version23 stats hide-version
@@ -27,7 +27,12 @@
2727
28{% if units -%}28{% if units -%}
29{% for service, ports in service_ports.iteritems() -%}29{% for service, ports in service_ports.iteritems() -%}
30listen {{ service }} 0.0.0.0:{{ ports[0] }}30listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
31 balance roundrobin
32 {% for unit, address in units.iteritems() -%}
33 server {{ unit }} {{ address }}:{{ ports[1] }} check
34 {% endfor %}
35listen {{ service }}_ipv6 :::{{ ports[0] }}
31 balance roundrobin36 balance roundrobin
32 {% for unit, address in units.iteritems() -%}37 {% for unit, address in units.iteritems() -%}
33 server {{ unit }} {{ address }}:{{ ports[1] }} check38 server {{ unit }} {{ address }}:{{ ports[1] }} check
3439
=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
--- hooks/charmhelpers/contrib/openstack/templating.py 2014-02-24 19:31:57 +0000
+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-09-16 09:08:32 +0000
@@ -30,17 +30,17 @@
30 loading dir.30 loading dir.
3131
32 A charm may also ship a templates dir with this module32 A charm may also ship a templates dir with this module
33 and it will be appended to the bottom of the search list, eg:33 and it will be appended to the bottom of the search list, eg::
34 hooks/charmhelpers/contrib/openstack/templates.34
3535 hooks/charmhelpers/contrib/openstack/templates
36 :param templates_dir: str: Base template directory containing release36
37 sub-directories.37 :param templates_dir (str): Base template directory containing release
38 :param os_release : str: OpenStack release codename to construct template38 sub-directories.
39 loader.39 :param os_release (str): OpenStack release codename to construct template
4040 loader.
41 :returns : jinja2.ChoiceLoader constructed with a list of41 :returns: jinja2.ChoiceLoader constructed with a list of
42 jinja2.FilesystemLoaders, ordered in descending42 jinja2.FilesystemLoaders, ordered in descending
43 order by OpenStack release.43 order by OpenStack release.
44 """44 """
45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
46 for rel in OPENSTACK_CODENAMES.itervalues()]46 for rel in OPENSTACK_CODENAMES.itervalues()]
@@ -111,7 +111,8 @@
111 and ease the burden of managing config templates across multiple OpenStack111 and ease the burden of managing config templates across multiple OpenStack
112 releases.112 releases.
113113
114 Basic usage:114 Basic usage::
115
115 # import some common context generates from charmhelpers116 # import some common context generates from charmhelpers
116 from charmhelpers.contrib.openstack import context117 from charmhelpers.contrib.openstack import context
117118
@@ -131,21 +132,19 @@
131 # write out all registered configs132 # write out all registered configs
132 configs.write_all()133 configs.write_all()
133134
134 Details:135 **OpenStack Releases and template loading**
135136
136 OpenStack Releases and template loading
137 ---------------------------------------
138 When the object is instantiated, it is associated with a specific OS137 When the object is instantiated, it is associated with a specific OS
139 release. This dictates how the template loader will be constructed.138 release. This dictates how the template loader will be constructed.
140139
141 The constructed loader attempts to load the template from several places140 The constructed loader attempts to load the template from several places
142 in the following order:141 in the following order:
143 - from the most recent OS release-specific template dir (if one exists)142 - from the most recent OS release-specific template dir (if one exists)
144 - the base templates_dir143 - the base templates_dir
145 - a template directory shipped in the charm with this helper file.144 - a template directory shipped in the charm with this helper file.
146145
147146 For the example above, '/tmp/templates' contains the following structure::
148 For the example above, '/tmp/templates' contains the following structure:147
149 /tmp/templates/nova.conf148 /tmp/templates/nova.conf
150 /tmp/templates/api-paste.ini149 /tmp/templates/api-paste.ini
151 /tmp/templates/grizzly/api-paste.ini150 /tmp/templates/grizzly/api-paste.ini
@@ -169,8 +168,8 @@
169 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows168 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
170 us to ship common templates (haproxy, apache) with the helpers.169 us to ship common templates (haproxy, apache) with the helpers.
171170
172 Context generators171 **Context generators**
173 ---------------------------------------172
174 Context generators are used to generate template contexts during hook173 Context generators are used to generate template contexts during hook
175 execution. Doing so may require inspecting service relations, charm174 execution. Doing so may require inspecting service relations, charm
176 config, etc. When registered, a config file is associated with a list175 config, etc. When registered, a config file is associated with a list
177176
=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
--- hooks/charmhelpers/contrib/openstack/utils.py 2014-08-27 07:14:03 +0000
+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-09-16 09:08:32 +0000
@@ -3,7 +3,6 @@
3# Common python helper functions used for OpenStack charms.3# Common python helper functions used for OpenStack charms.
4from collections import OrderedDict4from collections import OrderedDict
55
6import apt_pkg as apt
7import subprocess6import subprocess
8import os7import os
9import socket8import socket
@@ -41,7 +40,8 @@
41 ('quantal', 'folsom'),40 ('quantal', 'folsom'),
42 ('raring', 'grizzly'),41 ('raring', 'grizzly'),
43 ('saucy', 'havana'),42 ('saucy', 'havana'),
44 ('trusty', 'icehouse')43 ('trusty', 'icehouse'),
44 ('utopic', 'juno'),
45])45])
4646
4747
@@ -52,6 +52,7 @@
52 ('2013.1', 'grizzly'),52 ('2013.1', 'grizzly'),
53 ('2013.2', 'havana'),53 ('2013.2', 'havana'),
54 ('2014.1', 'icehouse'),54 ('2014.1', 'icehouse'),
55 ('2014.2', 'juno'),
55])56])
5657
57# The ugly duckling58# The ugly duckling
@@ -83,6 +84,8 @@
83 '''Derive OpenStack release codename from a given installation source.'''84 '''Derive OpenStack release codename from a given installation source.'''
84 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']85 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
85 rel = ''86 rel = ''
87 if src is None:
88 return rel
86 if src in ['distro', 'distro-proposed']:89 if src in ['distro', 'distro-proposed']:
87 try:90 try:
88 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]91 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
@@ -130,8 +133,14 @@
130133
131def get_os_codename_package(package, fatal=True):134def get_os_codename_package(package, fatal=True):
132 '''Derive OpenStack release codename from an installed package.'''135 '''Derive OpenStack release codename from an installed package.'''
133136<<<<<<< TREE
134 cache = apt_cache()137
138 cache = apt_cache()
139=======
140 import apt_pkg as apt
141
142 cache = apt_cache()
143>>>>>>> MERGE-SOURCE
135144
136 try:145 try:
137 pkg = cache[package]146 pkg = cache[package]
@@ -182,7 +191,7 @@
182 for version, cname in vers_map.iteritems():191 for version, cname in vers_map.iteritems():
183 if cname == codename:192 if cname == codename:
184 return version193 return version
185 #e = "Could not determine OpenStack version for package: %s" % pkg194 # e = "Could not determine OpenStack version for package: %s" % pkg
186 # error_out(e)195 # error_out(e)
187196
188197
@@ -268,6 +277,9 @@
268 'icehouse': 'precise-updates/icehouse',277 'icehouse': 'precise-updates/icehouse',
269 'icehouse/updates': 'precise-updates/icehouse',278 'icehouse/updates': 'precise-updates/icehouse',
270 'icehouse/proposed': 'precise-proposed/icehouse',279 'icehouse/proposed': 'precise-proposed/icehouse',
280 'juno': 'trusty-updates/juno',
281 'juno/updates': 'trusty-updates/juno',
282 'juno/proposed': 'trusty-proposed/juno',
271 }283 }
272284
273 try:285 try:
@@ -315,6 +327,7 @@
315327
316 """328 """
317329
330 import apt_pkg as apt
318 src = config('openstack-origin')331 src = config('openstack-origin')
319 cur_vers = get_os_version_package(package)332 cur_vers = get_os_version_package(package)
320 available_vers = get_os_version_install_source(src)333 available_vers = get_os_version_install_source(src)
321334
=== added directory 'hooks/charmhelpers/contrib/peerstorage'
=== added file 'hooks/charmhelpers/contrib/peerstorage/__init__.py'
--- hooks/charmhelpers/contrib/peerstorage/__init__.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,83 @@
1from charmhelpers.core.hookenv import (
2 relation_ids,
3 relation_get,
4 local_unit,
5 relation_set,
6)
7
8"""
9This helper provides functions to support use of a peer relation
10for basic key/value storage, with the added benefit that all storage
11can be replicated across peer units, so this is really useful for
12services that issue usernames/passwords to remote services.
13
14def shared_db_changed()
15 # Only the lead unit should create passwords
16 if not is_leader():
17 return
18 username = relation_get('username')
19 key = '{}.password'.format(username)
20 # Attempt to retrieve any existing password for this user
21 password = peer_retrieve(key)
22 if password is None:
23 # New user, create password and store
24 password = pwgen(length=64)
25 peer_store(key, password)
26 create_access(username, password)
27 relation_set(password=password)
28
29
30def cluster_changed()
31 # Echo any relation data other that *-address
32 # back onto the peer relation so all units have
33 # all *.password keys stored on their local relation
34 # for later retrieval.
35 peer_echo()
36
37"""
38
39
40def peer_retrieve(key, relation_name='cluster'):
41 """ Retrieve a named key from peer relation relation_name """
42 cluster_rels = relation_ids(relation_name)
43 if len(cluster_rels) > 0:
44 cluster_rid = cluster_rels[0]
45 return relation_get(attribute=key, rid=cluster_rid,
46 unit=local_unit())
47 else:
48 raise ValueError('Unable to detect'
49 'peer relation {}'.format(relation_name))
50
51
52def peer_store(key, value, relation_name='cluster'):
53 """ Store the key/value pair on the named peer relation relation_name """
54 cluster_rels = relation_ids(relation_name)
55 if len(cluster_rels) > 0:
56 cluster_rid = cluster_rels[0]
57 relation_set(relation_id=cluster_rid,
58 relation_settings={key: value})
59 else:
60 raise ValueError('Unable to detect '
61 'peer relation {}'.format(relation_name))
62
63
64def peer_echo(includes=None):
65 """Echo filtered attributes back onto the same relation for storage
66
67 Note that this helper must only be called within a peer relation
68 changed hook
69 """
70 rdata = relation_get()
71 echo_data = {}
72 if includes is None:
73 echo_data = rdata.copy()
74 for ex in ['private-address', 'public-address']:
75 if ex in echo_data:
76 echo_data.pop(ex)
77 else:
78 for attribute, value in rdata.iteritems():
79 for include in includes:
80 if include in attribute:
81 echo_data[attribute] = value
82 if len(echo_data) > 0:
83 relation_set(relation_settings=echo_data)
084
=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-27 11:02:24 +0000
+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-09-16 09:08:32 +0000
@@ -303,7 +303,7 @@
303 blk_device, fstype, system_services=[]):303 blk_device, fstype, system_services=[]):
304 """304 """
305 NOTE: This function must only be called from a single service unit for305 NOTE: This function must only be called from a single service unit for
306 the same rbd_img otherwise data loss will occur.306 the same rbd_img otherwise data loss will occur.
307307
308 Ensures given pool and RBD image exists, is mapped to a block device,308 Ensures given pool and RBD image exists, is mapped to a block device,
309 and the device is formatted and mounted at the given mount_point.309 and the device is formatted and mounted at the given mount_point.
310310
=== added file 'hooks/charmhelpers/core/fstab.py'
--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/fstab.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,116 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
5
6import os
7
8
9class Fstab(file):
10 """This class extends file in order to implement a file reader/writer
11 for file `/etc/fstab`
12 """
13
14 class Entry(object):
15 """Entry class represents a non-comment line on the `/etc/fstab` file
16 """
17 def __init__(self, device, mountpoint, filesystem,
18 options, d=0, p=0):
19 self.device = device
20 self.mountpoint = mountpoint
21 self.filesystem = filesystem
22
23 if not options:
24 options = "defaults"
25
26 self.options = options
27 self.d = d
28 self.p = p
29
30 def __eq__(self, o):
31 return str(self) == str(o)
32
33 def __str__(self):
34 return "{} {} {} {} {} {}".format(self.device,
35 self.mountpoint,
36 self.filesystem,
37 self.options,
38 self.d,
39 self.p)
40
41 DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
42
43 def __init__(self, path=None):
44 if path:
45 self._path = path
46 else:
47 self._path = self.DEFAULT_PATH
48 file.__init__(self, self._path, 'r+')
49
50 def _hydrate_entry(self, line):
51 # NOTE: use split with no arguments to split on any
52 # whitespace including tabs
53 return Fstab.Entry(*filter(
54 lambda x: x not in ('', None),
55 line.strip("\n").split()))
56
57 @property
58 def entries(self):
59 self.seek(0)
60 for line in self.readlines():
61 try:
62 if not line.startswith("#"):
63 yield self._hydrate_entry(line)
64 except ValueError:
65 pass
66
67 def get_entry_by_attr(self, attr, value):
68 for entry in self.entries:
69 e_attr = getattr(entry, attr)
70 if e_attr == value:
71 return entry
72 return None
73
74 def add_entry(self, entry):
75 if self.get_entry_by_attr('device', entry.device):
76 return False
77
78 self.write(str(entry) + '\n')
79 self.truncate()
80 return entry
81
82 def remove_entry(self, entry):
83 self.seek(0)
84
85 lines = self.readlines()
86
87 found = False
88 for index, line in enumerate(lines):
89 if not line.startswith("#"):
90 if self._hydrate_entry(line) == entry:
91 found = True
92 break
93
94 if not found:
95 return False
96
97 lines.remove(line)
98
99 self.seek(0)
100 self.write(''.join(lines))
101 self.truncate()
102 return True
103
104 @classmethod
105 def remove_by_mountpoint(cls, mountpoint, path=None):
106 fstab = cls(path=path)
107 entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
108 if entry:
109 return fstab.remove_entry(entry)
110 return False
111
112 @classmethod
113 def add(cls, device, mountpoint, filesystem, options=None, path=None):
114 return cls(path=path).add_entry(Fstab.Entry(device,
115 mountpoint, filesystem,
116 options=options))
0117
=== renamed file 'hooks/charmhelpers/core/fstab.py' => 'hooks/charmhelpers/core/fstab.py.moved'
=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:38:09 +0000
+++ hooks/charmhelpers/core/hookenv.py 2014-09-16 09:08:32 +0000
@@ -25,7 +25,7 @@
25def cached(func):25def cached(func):
26 """Cache return values for multiple executions of func + args26 """Cache return values for multiple executions of func + args
2727
28 For example:28 For example::
2929
30 @cached30 @cached
31 def unit_get(attribute):31 def unit_get(attribute):
@@ -285,8 +285,9 @@
285 raise285 raise
286286
287287
288def relation_set(relation_id=None, relation_settings={}, **kwargs):288def relation_set(relation_id=None, relation_settings=None, **kwargs):
289 """Set relation information for the current unit"""289 """Set relation information for the current unit"""
290 relation_settings = relation_settings if relation_settings else {}
290 relation_cmd_line = ['relation-set']291 relation_cmd_line = ['relation-set']
291 if relation_id is not None:292 if relation_id is not None:
292 relation_cmd_line.extend(('-r', relation_id))293 relation_cmd_line.extend(('-r', relation_id))
@@ -445,18 +446,19 @@
445class Hooks(object):446class Hooks(object):
446 """A convenient handler for hook functions.447 """A convenient handler for hook functions.
447448
448 Example:449 Example::
450
449 hooks = Hooks()451 hooks = Hooks()
450452
451 # register a hook, taking its name from the function name453 # register a hook, taking its name from the function name
452 @hooks.hook()454 @hooks.hook()
453 def install():455 def install():
454 ...456 pass # your code here
455457
456 # register a hook, providing a custom hook name458 # register a hook, providing a custom hook name
457 @hooks.hook("config-changed")459 @hooks.hook("config-changed")
458 def config_changed():460 def config_changed():
459 ...461 pass # your code here
460462
461 if __name__ == "__main__":463 if __name__ == "__main__":
462 # execute a hook based on the name the program is called by464 # execute a hook based on the name the program is called by
463465
=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py 2014-08-27 07:14:03 +0000
+++ hooks/charmhelpers/core/host.py 2014-09-16 09:08:32 +0000
@@ -12,7 +12,8 @@
12import string12import string
13import subprocess13import subprocess
14import hashlib14import hashlib
15import apt_pkg15import shutil
16from contextlib import contextmanager
1617
17from collections import OrderedDict18from collections import OrderedDict
1819
@@ -53,7 +54,7 @@
53def service_running(service):54def service_running(service):
54 """Determine whether a system service is running"""55 """Determine whether a system service is running"""
55 try:56 try:
56 output = subprocess.check_output(['service', service, 'status'])57 output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
57 except subprocess.CalledProcessError:58 except subprocess.CalledProcessError:
58 return False59 return False
59 else:60 else:
@@ -63,6 +64,16 @@
63 return False64 return False
6465
6566
67def service_available(service_name):
68 """Determine whether a system service is available"""
69 try:
70 subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
71 except subprocess.CalledProcessError:
72 return False
73 else:
74 return True
75
76
66def adduser(username, password=None, shell='/bin/bash', system_user=False):77def adduser(username, password=None, shell='/bin/bash', system_user=False):
67 """Add a user to the system"""78 """Add a user to the system"""
68 try:79 try:
@@ -212,13 +223,13 @@
212def restart_on_change(restart_map, stopstart=False):223def restart_on_change(restart_map, stopstart=False):
213 """Restart services based on configuration files changing224 """Restart services based on configuration files changing
214225
215 This function is used a decorator, for example226 This function is used a decorator, for example::
216227
217 @restart_on_change({228 @restart_on_change({
218 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]229 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
219 })230 })
220 def ceph_client_changed():231 def ceph_client_changed():
221 ...232 pass # your code here
222233
223 In this example, the cinder-api and cinder-volume services234 In this example, the cinder-api and cinder-volume services
224 would be restarted if /etc/ceph/ceph.conf is changed by the235 would be restarted if /etc/ceph/ceph.conf is changed by the
@@ -314,12 +325,40 @@
314325
315def cmp_pkgrevno(package, revno, pkgcache=None):326def cmp_pkgrevno(package, revno, pkgcache=None):
316 '''Compare supplied revno with the revno of the installed package327 '''Compare supplied revno with the revno of the installed package
317 1 => Installed revno is greater than supplied arg328
318 0 => Installed revno is the same as supplied arg329 * 1 => Installed revno is greater than supplied arg
319 -1 => Installed revno is less than supplied arg330 * 0 => Installed revno is the same as supplied arg
331 * -1 => Installed revno is less than supplied arg
332
320 '''333 '''
321 from charmhelpers.fetch import apt_cache334<<<<<<< TREE
335 from charmhelpers.fetch import apt_cache
336=======
337 import apt_pkg
338 from charmhelpers.fetch import apt_cache
339>>>>>>> MERGE-SOURCE
322 if not pkgcache:340 if not pkgcache:
323 pkgcache = apt_cache()341 pkgcache = apt_cache()
324 pkg = pkgcache[package]342 pkg = pkgcache[package]
325 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)343 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
344
345
346@contextmanager
347def chdir(d):
348 cur = os.getcwd()
349 try:
350 yield os.chdir(d)
351 finally:
352 os.chdir(cur)
353
354
355def chownr(path, owner, group):
356 uid = pwd.getpwnam(owner).pw_uid
357 gid = grp.getgrnam(group).gr_gid
358
359 for root, dirs, files in os.walk(path):
360 for name in dirs + files:
361 full = os.path.join(root, name)
362 broken_symlink = os.path.lexists(full) and not os.path.exists(full)
363 if not broken_symlink:
364 os.chown(full, uid, gid)
326365
=== added directory 'hooks/charmhelpers/core/services'
=== added file 'hooks/charmhelpers/core/services/__init__.py'
--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/services/__init__.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,2 @@
1from .base import *
2from .helpers import *
03
=== added file 'hooks/charmhelpers/core/services/base.py'
--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/services/base.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,310 @@
1import os
2import re
3import json
4from collections import Iterable
5
6from charmhelpers.core import host
7from charmhelpers.core import hookenv
8
9
10__all__ = ['ServiceManager', 'ManagerCallback',
11 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
12 'service_restart', 'service_stop']
13
14
15class ServiceManager(object):
16 def __init__(self, services=None):
17 """
18 Register a list of services, given their definitions.
19
20 Service definitions are dicts in the following formats (all keys except
21 'service' are optional)::
22
23 {
24 "service": <service name>,
25 "required_data": <list of required data contexts>,
26 "provided_data": <list of provided data contexts>,
27 "data_ready": <one or more callbacks>,
28 "data_lost": <one or more callbacks>,
29 "start": <one or more callbacks>,
30 "stop": <one or more callbacks>,
31 "ports": <list of ports to manage>,
32 }
33
34 The 'required_data' list should contain dicts of required data (or
35 dependency managers that act like dicts and know how to collect the data).
36 Only when all items in the 'required_data' list are populated are the list
37 of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
38 information.
39
40 The 'provided_data' list should contain relation data providers, most likely
41 a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
42 that will indicate a set of data to set on a given relation.
43
44 The 'data_ready' value should be either a single callback, or a list of
45 callbacks, to be called when all items in 'required_data' pass `is_ready()`.
46 Each callback will be called with the service name as the only parameter.
47 After all of the 'data_ready' callbacks are called, the 'start' callbacks
48 are fired.
49
50 The 'data_lost' value should be either a single callback, or a list of
51 callbacks, to be called when a 'required_data' item no longer passes
52 `is_ready()`. Each callback will be called with the service name as the
53 only parameter. After all of the 'data_lost' callbacks are called,
54 the 'stop' callbacks are fired.
55
56 The 'start' value should be either a single callback, or a list of
57 callbacks, to be called when starting the service, after the 'data_ready'
58 callbacks are complete. Each callback will be called with the service
59 name as the only parameter. This defaults to
60 `[host.service_start, services.open_ports]`.
61
62 The 'stop' value should be either a single callback, or a list of
63 callbacks, to be called when stopping the service. If the service is
64 being stopped because it no longer has all of its 'required_data', this
65 will be called after all of the 'data_lost' callbacks are complete.
66 Each callback will be called with the service name as the only parameter.
67 This defaults to `[services.close_ports, host.service_stop]`.
68
69 The 'ports' value should be a list of ports to manage. The default
70 'start' handler will open the ports after the service is started,
71 and the default 'stop' handler will close the ports prior to stopping
72 the service.
73
74
75 Examples:
76
77 The following registers an Upstart service called bingod that depends on
78 a mongodb relation and which runs a custom `db_migrate` function prior to
79 restarting the service, and a Runit service called spadesd::
80
81 manager = services.ServiceManager([
82 {
83 'service': 'bingod',
84 'ports': [80, 443],
85 'required_data': [MongoRelation(), config(), {'my': 'data'}],
86 'data_ready': [
87 services.template(source='bingod.conf'),
88 services.template(source='bingod.ini',
89 target='/etc/bingod.ini',
90 owner='bingo', perms=0400),
91 ],
92 },
93 {
94 'service': 'spadesd',
95 'data_ready': services.template(source='spadesd_run.j2',
96 target='/etc/sv/spadesd/run',
97 perms=0555),
98 'start': runit_start,
99 'stop': runit_stop,
100 },
101 ])
102 manager.manage()
103 """
104 self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
105 self._ready = None
106 self.services = {}
107 for service in services or []:
108 service_name = service['service']
109 self.services[service_name] = service
110
111 def manage(self):
112 """
113 Handle the current hook by doing The Right Thing with the registered services.
114 """
115 hook_name = hookenv.hook_name()
116 if hook_name == 'stop':
117 self.stop_services()
118 else:
119 self.provide_data()
120 self.reconfigure_services()
121
122 def provide_data(self):
123 """
124 Set the relation data for each provider in the ``provided_data`` list.
125
126 A provider must have a `name` attribute, which indicates which relation
127 to set data on, and a `provide_data()` method, which returns a dict of
128 data to set.
129 """
130 hook_name = hookenv.hook_name()
131 for service in self.services.values():
132 for provider in service.get('provided_data', []):
133 if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
134 data = provider.provide_data()
135 _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
136 if _ready:
137 hookenv.relation_set(None, data)
138
139 def reconfigure_services(self, *service_names):
140 """
141 Update all files for one or more registered services, and,
142 if ready, optionally restart them.
143
144 If no service names are given, reconfigures all registered services.
145 """
146 for service_name in service_names or self.services.keys():
147 if self.is_ready(service_name):
148 self.fire_event('data_ready', service_name)
149 self.fire_event('start', service_name, default=[
150 service_restart,
151 manage_ports])
152 self.save_ready(service_name)
153 else:
154 if self.was_ready(service_name):
155 self.fire_event('data_lost', service_name)
156 self.fire_event('stop', service_name, default=[
157 manage_ports,
158 service_stop])
159 self.save_lost(service_name)
160
161 def stop_services(self, *service_names):
162 """
163 Stop one or more registered services, by name.
164
165 If no service names are given, stops all registered services.
166 """
167 for service_name in service_names or self.services.keys():
168 self.fire_event('stop', service_name, default=[
169 manage_ports,
170 service_stop])
171
172 def get_service(self, service_name):
173 """
174 Given the name of a registered service, return its service definition.
175 """
176 service = self.services.get(service_name)
177 if not service:
178 raise KeyError('Service not registered: %s' % service_name)
179 return service
180
181 def fire_event(self, event_name, service_name, default=None):
182 """
183 Fire a data_ready, data_lost, start, or stop event on a given service.
184 """
185 service = self.get_service(service_name)
186 callbacks = service.get(event_name, default)
187 if not callbacks:
188 return
189 if not isinstance(callbacks, Iterable):
190 callbacks = [callbacks]
191 for callback in callbacks:
192 if isinstance(callback, ManagerCallback):
193 callback(self, service_name, event_name)
194 else:
195 callback(service_name)
196
197 def is_ready(self, service_name):
198 """
199 Determine if a registered service is ready, by checking its 'required_data'.
200
201 A 'required_data' item can be any mapping type, and is considered ready
202 if `bool(item)` evaluates as True.
203 """
204 service = self.get_service(service_name)
205 reqs = service.get('required_data', [])
206 return all(bool(req) for req in reqs)
207
208 def _load_ready_file(self):
209 if self._ready is not None:
210 return
211 if os.path.exists(self._ready_file):
212 with open(self._ready_file) as fp:
213 self._ready = set(json.load(fp))
214 else:
215 self._ready = set()
216
217 def _save_ready_file(self):
218 if self._ready is None:
219 return
220 with open(self._ready_file, 'w') as fp:
221 json.dump(list(self._ready), fp)
222
223 def save_ready(self, service_name):
224 """
225 Save an indicator that the given service is now data_ready.
226 """
227 self._load_ready_file()
228 self._ready.add(service_name)
229 self._save_ready_file()
230
231 def save_lost(self, service_name):
232 """
233 Save an indicator that the given service is no longer data_ready.
234 """
235 self._load_ready_file()
236 self._ready.discard(service_name)
237 self._save_ready_file()
238
239 def was_ready(self, service_name):
240 """
241 Determine if the given service was previously data_ready.
242 """
243 self._load_ready_file()
244 return service_name in self._ready
245
246
247class ManagerCallback(object):
248 """
249 Special case of a callback that takes the `ServiceManager` instance
250 in addition to the service name.
251
252 Subclasses should implement `__call__` which should accept three parameters:
253
254 * `manager` The `ServiceManager` instance
255 * `service_name` The name of the service it's being triggered for
256 * `event_name` The name of the event that this callback is handling
257 """
258 def __call__(self, manager, service_name, event_name):
259 raise NotImplementedError()
260
261
262class PortManagerCallback(ManagerCallback):
263 """
264 Callback class that will open or close ports, for use as either
265 a start or stop action.
266 """
267 def __call__(self, manager, service_name, event_name):
268 service = manager.get_service(service_name)
269 new_ports = service.get('ports', [])
270 port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
271 if os.path.exists(port_file):
272 with open(port_file) as fp:
273 old_ports = fp.read().split(',')
274 for old_port in old_ports:
275 if bool(old_port):
276 old_port = int(old_port)
277 if old_port not in new_ports:
278 hookenv.close_port(old_port)
279 with open(port_file, 'w') as fp:
280 fp.write(','.join(str(port) for port in new_ports))
281 for port in new_ports:
282 if event_name == 'start':
283 hookenv.open_port(port)
284 elif event_name == 'stop':
285 hookenv.close_port(port)
286
287
288def service_stop(service_name):
289 """
290 Wrapper around host.service_stop to prevent spurious "unknown service"
291 messages in the logs.
292 """
293 if host.service_running(service_name):
294 host.service_stop(service_name)
295
296
297def service_restart(service_name):
298 """
299 Wrapper around host.service_restart to prevent spurious "unknown service"
300 messages in the logs.
301 """
302 if host.service_available(service_name):
303 if host.service_running(service_name):
304 host.service_restart(service_name)
305 else:
306 host.service_start(service_name)
307
308
309# Convenience aliases
310open_ports = close_ports = manage_ports = PortManagerCallback()
0311
=== added file 'hooks/charmhelpers/core/services/helpers.py'
--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/services/helpers.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,125 @@
1from charmhelpers.core import hookenv
2from charmhelpers.core import templating
3
4from charmhelpers.core.services.base import ManagerCallback
5
6
7__all__ = ['RelationContext', 'TemplateCallback',
8 'render_template', 'template']
9
10
11class RelationContext(dict):
12 """
13 Base class for a context generator that gets relation data from juju.
14
15 Subclasses must provide the attributes `name`, which is the name of the
16 interface of interest, `interface`, which is the type of the interface of
17 interest, and `required_keys`, which is the set of keys required for the
18 relation to be considered complete. The data for all interfaces matching
19 the `name` attribute that are complete will used to populate the dictionary
20 values (see `get_data`, below).
21
22 The generated context will be namespaced under the interface type, to prevent
23 potential naming conflicts.
24 """
25 name = None
26 interface = None
27 required_keys = []
28
29 def __init__(self, *args, **kwargs):
30 super(RelationContext, self).__init__(*args, **kwargs)
31 self.get_data()
32
33 def __bool__(self):
34 """
35 Returns True if all of the required_keys are available.
36 """
37 return self.is_ready()
38
39 __nonzero__ = __bool__
40
41 def __repr__(self):
42 return super(RelationContext, self).__repr__()
43
44 def is_ready(self):
45 """
46 Returns True if all of the `required_keys` are available from any units.
47 """
48 ready = len(self.get(self.name, [])) > 0
49 if not ready:
50 hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
51 return ready
52
53 def _is_ready(self, unit_data):
54 """
55 Helper method that tests a set of relation data and returns True if
56 all of the `required_keys` are present.
57 """
58 return set(unit_data.keys()).issuperset(set(self.required_keys))
59
60 def get_data(self):
61 """
62 Retrieve the relation data for each unit involved in a relation and,
63 if complete, store it in a list under `self[self.name]`. This
64 is automatically called when the RelationContext is instantiated.
65
66 The units are sorted lexographically first by the service ID, then by
67 the unit ID. Thus, if an interface has two other services, 'db:1'
68 and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
69 and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
70 set of data, the relation data for the units will be stored in the
71 order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
72
73 If you only care about a single unit on the relation, you can just
74 access it as `{{ interface[0]['key'] }}`. However, if you can at all
75 support multiple units on a relation, you should iterate over the list,
76 like::
77
78 {% for unit in interface -%}
79 {{ unit['key'] }}{% if not loop.last %},{% endif %}
80 {%- endfor %}
81
82 Note that since all sets of relation data from all related services and
83 units are in a single list, if you need to know which service or unit a
84 set of data came from, you'll need to extend this class to preserve
85 that information.
86 """
87 if not hookenv.relation_ids(self.name):
88 return
89
90 ns = self.setdefault(self.name, [])
91 for rid in sorted(hookenv.relation_ids(self.name)):
92 for unit in sorted(hookenv.related_units(rid)):
93 reldata = hookenv.relation_get(rid=rid, unit=unit)
94 if self._is_ready(reldata):
95 ns.append(reldata)
96
97 def provide_data(self):
98 """
99 Return data to be relation_set for this interface.
100 """
101 return {}
102
103
104class TemplateCallback(ManagerCallback):
105 """
106 Callback class that will render a template, for use as a ready action.
107 """
108 def __init__(self, source, target, owner='root', group='root', perms=0444):
109 self.source = source
110 self.target = target
111 self.owner = owner
112 self.group = group
113 self.perms = perms
114
115 def __call__(self, manager, service_name, event_name):
116 service = manager.get_service(service_name)
117 context = {}
118 for ctx in service.get('required_data', []):
119 context.update(ctx)
120 templating.render(self.source, self.target, context,
121 self.owner, self.group, self.perms)
122
123
124# Convenience aliases for templates
125render_template = template = TemplateCallback
0126
=== added file 'hooks/charmhelpers/core/templating.py'
--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/templating.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,51 @@
1import os
2
3from charmhelpers.core import host
4from charmhelpers.core import hookenv
5
6
7def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
8 """
9 Render a template.
10
11 The `source` path, if not absolute, is relative to the `templates_dir`.
12
13 The `target` path should be absolute.
14
15 The context should be a dict containing the values to be replaced in the
16 template.
17
18 The `owner`, `group`, and `perms` options will be passed to `write_file`.
19
20 If omitted, `templates_dir` defaults to the `templates` folder in the charm.
21
22 Note: Using this requires python-jinja2; if it is not installed, calling
23 this will attempt to use charmhelpers.fetch.apt_install to install it.
24 """
25 try:
26 from jinja2 import FileSystemLoader, Environment, exceptions
27 except ImportError:
28 try:
29 from charmhelpers.fetch import apt_install
30 except ImportError:
31 hookenv.log('Could not import jinja2, and could not import '
32 'charmhelpers.fetch to install it',
33 level=hookenv.ERROR)
34 raise
35 apt_install('python-jinja2', fatal=True)
36 from jinja2 import FileSystemLoader, Environment, exceptions
37
38 if templates_dir is None:
39 templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
40 loader = Environment(loader=FileSystemLoader(templates_dir))
41 try:
42 source = source
43 template = loader.get_template(source)
44 except exceptions.TemplateNotFound as e:
45 hookenv.log('Could not load template %s from %s.' %
46 (source, templates_dir),
47 level=hookenv.ERROR)
48 raise e
49 content = template.render(context)
50 host.mkdir(os.path.dirname(target))
51 host.write_file(target, content, owner, group, perms)
052
=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py 2014-08-27 07:14:03 +0000
+++ hooks/charmhelpers/fetch/__init__.py 2014-09-16 09:08:32 +0000
@@ -1,4 +1,5 @@
1import importlib1import importlib
2from tempfile import NamedTemporaryFile
2import time3import time
3from yaml import safe_load4from yaml import safe_load
4from charmhelpers.core.host import (5from charmhelpers.core.host import (
@@ -13,7 +14,6 @@
13 config,14 config,
14 log,15 log,
15)16)
16import apt_pkg
17import os17import os
1818
1919
@@ -56,6 +56,15 @@
56 'icehouse/proposed': 'precise-proposed/icehouse',56 'icehouse/proposed': 'precise-proposed/icehouse',
57 'precise-icehouse/proposed': 'precise-proposed/icehouse',57 'precise-icehouse/proposed': 'precise-proposed/icehouse',
58 'precise-proposed/icehouse': 'precise-proposed/icehouse',58 'precise-proposed/icehouse': 'precise-proposed/icehouse',
59 # Juno
60 'juno': 'trusty-updates/juno',
61 'trusty-juno': 'trusty-updates/juno',
62 'trusty-juno/updates': 'trusty-updates/juno',
63 'trusty-updates/juno': 'trusty-updates/juno',
64 'juno/proposed': 'trusty-proposed/juno',
65 'juno/proposed': 'trusty-proposed/juno',
66 'trusty-juno/proposed': 'trusty-proposed/juno',
67 'trusty-proposed/juno': 'trusty-proposed/juno',
59}68}
6069
61# The order of this list is very important. Handlers should be listed in from70# The order of this list is very important. Handlers should be listed in from
@@ -108,8 +117,12 @@
108117
109def filter_installed_packages(packages):118def filter_installed_packages(packages):
110 """Returns a list of packages that require installation"""119 """Returns a list of packages that require installation"""
120<<<<<<< TREE
111121
112 cache = apt_cache()122 cache = apt_cache()
123=======
124 cache = apt_cache()
125>>>>>>> MERGE-SOURCE
113 _pkgs = []126 _pkgs = []
114 for package in packages:127 for package in packages:
115 try:128 try:
@@ -122,15 +135,28 @@
122 return _pkgs135 return _pkgs
123136
124137
125def apt_cache(in_memory=True):138<<<<<<< TREE
126 """Build and return an apt cache"""139def apt_cache(in_memory=True):
127 apt_pkg.init()140 """Build and return an apt cache"""
128 if in_memory:141 apt_pkg.init()
129 apt_pkg.config.set("Dir::Cache::pkgcache", "")142 if in_memory:
130 apt_pkg.config.set("Dir::Cache::srcpkgcache", "")143 apt_pkg.config.set("Dir::Cache::pkgcache", "")
131 return apt_pkg.Cache()144 apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
132145 return apt_pkg.Cache()
133146
147
148=======
149def apt_cache(in_memory=True):
150 """Build and return an apt cache"""
151 import apt_pkg
152 apt_pkg.init()
153 if in_memory:
154 apt_pkg.config.set("Dir::Cache::pkgcache", "")
155 apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
156 return apt_pkg.Cache()
157
158
159>>>>>>> MERGE-SOURCE
134def apt_install(packages, options=None, fatal=False):160def apt_install(packages, options=None, fatal=False):
135 """Install one or more packages"""161 """Install one or more packages"""
136 if options is None:162 if options is None:
@@ -196,6 +222,27 @@
196222
197223
198def add_source(source, key=None):224def add_source(source, key=None):
225 """Add a package source to this system.
226
227 @param source: a URL or sources.list entry, as supported by
228 add-apt-repository(1). Examples:
229 ppa:charmers/example
230 deb https://stub:key@private.example.com/ubuntu trusty main
231
232 In addition:
233 'proposed:' may be used to enable the standard 'proposed'
234 pocket for the release.
235 'cloud:' may be used to activate official cloud archive pockets,
236 such as 'cloud:icehouse'
237
238 @param key: A key to be added to the system's APT keyring and used
239 to verify the signatures on packages. Ideally, this should be an
240 ASCII format GPG public key including the block headers. A GPG key
241 id may also be used, but be aware that only insecure protocols are
242 available to retrieve the actual public key from a public keyserver
243 placing your Juju environment at risk. ppa and cloud archive keys
244 are securely added automtically, so sould not be provided.
245 """
199 if source is None:246 if source is None:
200 log('Source is not present. Skipping')247 log('Source is not present. Skipping')
201 return248 return
@@ -220,41 +267,63 @@
220 release = lsb_release()['DISTRIB_CODENAME']267 release = lsb_release()['DISTRIB_CODENAME']
221 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:268 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
222 apt.write(PROPOSED_POCKET.format(release))269 apt.write(PROPOSED_POCKET.format(release))
270 else:
271 raise SourceConfigError("Unknown source: {!r}".format(source))
272
223 if key:273 if key:
224 subprocess.check_call(['apt-key', 'adv', '--keyserver',274 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
225 'hkp://keyserver.ubuntu.com:80', '--recv',275 with NamedTemporaryFile() as key_file:
226 key])276 key_file.write(key)
277 key_file.flush()
278 key_file.seek(0)
279 subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
280 else:
281 # Note that hkp: is in no way a secure protocol. Using a
282 # GPG key id is pointless from a security POV unless you
283 # absolutely trust your network and DNS.
284 subprocess.check_call(['apt-key', 'adv', '--keyserver',
285 'hkp://keyserver.ubuntu.com:80', '--recv',
286 key])
227287
228288
229def configure_sources(update=False,289def configure_sources(update=False,
230 sources_var='install_sources',290 sources_var='install_sources',
231 keys_var='install_keys'):291 keys_var='install_keys'):
232 """292 """
233 Configure multiple sources from charm configuration293 Configure multiple sources from charm configuration.
294
295 The lists are encoded as yaml fragments in the configuration.
296 The frament needs to be included as a string. Sources and their
297 corresponding keys are of the types supported by add_source().
234298
235 Example config:299 Example config:
236 install_sources:300 install_sources: |
237 - "ppa:foo"301 - "ppa:foo"
238 - "http://example.com/repo precise main"302 - "http://example.com/repo precise main"
239 install_keys:303 install_keys: |
240 - null304 - null
241 - "a1b2c3d4"305 - "a1b2c3d4"
242306
243 Note that 'null' (a.k.a. None) should not be quoted.307 Note that 'null' (a.k.a. None) should not be quoted.
244 """308 """
245 sources = safe_load(config(sources_var))309 sources = safe_load((config(sources_var) or '').strip()) or []
246 keys = config(keys_var)310 keys = safe_load((config(keys_var) or '').strip()) or None
247 if keys is not None:311
248 keys = safe_load(keys)312 if isinstance(sources, basestring):
249 if isinstance(sources, basestring) and (313 sources = [sources]
250 keys is None or isinstance(keys, basestring)):314
251 add_source(sources, keys)315 if keys is None:
316 for source in sources:
317 add_source(source, None)
252 else:318 else:
253 if not len(sources) == len(keys):319 if isinstance(keys, basestring):
254 msg = 'Install sources and keys lists are different lengths'320 keys = [keys]
255 raise SourceConfigError(msg)321
256 for src_num in range(len(sources)):322 if len(sources) != len(keys):
257 add_source(sources[src_num], keys[src_num])323 raise SourceConfigError(
324 'Install sources and keys lists are different lengths')
325 for source, key in zip(sources, keys):
326 add_source(source, key)
258 if update:327 if update:
259 apt_update(fatal=True)328 apt_update(fatal=True)
260329
261330
=== added symlink 'hooks/neutron-api-relation-broken'
=== target is u'nova_cc_hooks.py'
=== added symlink 'hooks/neutron-api-relation-changed'
=== target is u'nova_cc_hooks.py'
=== added symlink 'hooks/neutron-api-relation-departed'
=== target is u'nova_cc_hooks.py'
=== added symlink 'hooks/neutron-api-relation-joined'
=== target is u'nova_cc_hooks.py'
=== modified file 'hooks/nova_cc_context.py'
--- hooks/nova_cc_context.py 2014-06-17 10:01:21 +0000
+++ hooks/nova_cc_context.py 2014-09-16 09:08:32 +0000
@@ -1,7 +1,6 @@
1
2from charmhelpers.core.hookenv import (1from charmhelpers.core.hookenv import (
3 config, relation_ids, relation_set, log, ERROR,2 config, relation_ids, relation_set, log, ERROR,
4 unit_get)3 unit_get, related_units, relation_get)
54
6from charmhelpers.fetch import apt_install, filter_installed_packages5from charmhelpers.fetch import apt_install, filter_installed_packages
7from charmhelpers.contrib.openstack import context, neutron, utils6from charmhelpers.contrib.openstack import context, neutron, utils
@@ -14,6 +13,17 @@
14)13)
1514
1615
16def context_complete(ctxt):
17 _missing = []
18 for k, v in ctxt.iteritems():
19 if v is None or v == '':
20 _missing.append(k)
21 if _missing:
22 log('Missing required data: %s' % ' '.join(_missing), level='INFO')
23 return False
24 return True
25
26
17class ApacheSSLContext(context.ApacheSSLContext):27class ApacheSSLContext(context.ApacheSSLContext):
1828
19 interfaces = ['https']29 interfaces = ['https']
@@ -27,6 +37,26 @@
27 return super(ApacheSSLContext, self).__call__()37 return super(ApacheSSLContext, self).__call__()
2838
2939
40class NeutronAPIContext(context.OSContextGenerator):
41
42 def __call__(self):
43 log('Generating template context from neutron api relation')
44 ctxt = {}
45 for rid in relation_ids('neutron-api'):
46 for unit in related_units(rid):
47 rdata = relation_get(rid=rid, unit=unit)
48 ctxt = {
49 'neutron_url': rdata.get('neutron-url'),
50 'neutron_plugin': rdata.get('neutron-plugin'),
51 'neutron_security_groups':
52 rdata.get('neutron-security-groups'),
53 'network_manager': 'neutron',
54 }
55 if context_complete(ctxt):
56 return ctxt
57 return {}
58
59
30class VolumeServiceContext(context.OSContextGenerator):60class VolumeServiceContext(context.OSContextGenerator):
31 interfaces = []61 interfaces = []
3262
@@ -204,3 +234,22 @@
204 def __init__(self):234 def __init__(self):
205 super(NeutronPostgresqlDBContext,235 super(NeutronPostgresqlDBContext,
206 self).__init__(config('neutron-database'))236 self).__init__(config('neutron-database'))
237
238
239class WorkerConfigContext(context.OSContextGenerator):
240
241 def __call__(self):
242 import psutil
243 multiplier = config('worker-multiplier') or 1
244 ctxt = {
245 "workers": psutil.NUM_CPUS * multiplier
246 }
247 return ctxt
248
249
250class NovaConfigContext(WorkerConfigContext):
251 def __call__(self):
252 ctxt = super(NovaConfigContext, self).__call__()
253 ctxt['cpu_allocation_ratio'] = config('cpu-allocation-ratio')
254 ctxt['ram_allocation_ratio'] = config('ram-allocation-ratio')
255 return ctxt
207256
=== modified file 'hooks/nova_cc_hooks.py'
--- hooks/nova_cc_hooks.py 2014-08-01 11:13:57 +0000
+++ hooks/nova_cc_hooks.py 2014-09-16 09:08:32 +0000
@@ -15,20 +15,25 @@
15 charm_dir,15 charm_dir,
16 is_relation_made,16 is_relation_made,
17 log,17 log,
18 local_unit,
18 ERROR,19 ERROR,
19 relation_get,20 relation_get,
20 relation_ids,21 relation_ids,
21 relation_set,22 relation_set,
23 related_units,
22 open_port,24 open_port,
23 unit_get,25 unit_get,
24)26)
2527
26from charmhelpers.core.host import (28from charmhelpers.core.host import (
27 restart_on_change29 restart_on_change,
30 service_running,
31 service_stop,
28)32)
2933
30from charmhelpers.fetch import (34from charmhelpers.fetch import (
31 apt_install, apt_update35 apt_install, apt_update,
36 filter_installed_packages
32)37)
3338
34from charmhelpers.contrib.openstack.utils import (39from charmhelpers.contrib.openstack.utils import (
@@ -41,21 +46,33 @@
41 neutron_plugin_attribute,46 neutron_plugin_attribute,
42)47)
4348
49from nova_cc_context import (
50 NeutronAPIContext
51)
52
53from charmhelpers.contrib.peerstorage import (
54 peer_retrieve,
55 peer_echo,
56)
57
44from nova_cc_utils import (58from nova_cc_utils import (
45 api_port,59 api_port,
46 auth_token_config,60 auth_token_config,
61 cmd_all_services,
47 determine_endpoints,62 determine_endpoints,
48 determine_packages,63 determine_packages,
49 determine_ports,64 determine_ports,
65 disable_services,
50 do_openstack_upgrade,66 do_openstack_upgrade,
67 enable_services,
51 keystone_ca_cert_b64,68 keystone_ca_cert_b64,
52 migrate_database,69 migrate_database,
53 neutron_plugin,70 neutron_plugin,
54 save_script_rc,71 save_script_rc,
55 ssh_compute_add,72 ssh_compute_add,
56 ssh_compute_remove,73 ssh_compute_remove,
57 ssh_known_hosts_b64,74 ssh_known_hosts_lines,
58 ssh_authorized_keys_b64,75 ssh_authorized_keys_lines,
59 register_configs,76 register_configs,
60 restart_map,77 restart_map,
61 volume_service,78 volume_service,
@@ -63,13 +80,19 @@
63 NOVA_CONF,80 NOVA_CONF,
64 QUANTUM_CONF,81 QUANTUM_CONF,
65 NEUTRON_CONF,82 NEUTRON_CONF,
66 QUANTUM_API_PASTE,83<<<<<<< TREE
67 service_guard,84 QUANTUM_API_PASTE,
68 guard_map,85 service_guard,
86 guard_map,
87=======
88 QUANTUM_API_PASTE,
89 console_attributes,
90 service_guard,
91 guard_map,
92>>>>>>> MERGE-SOURCE
69)93)
7094
71from charmhelpers.contrib.hahelpers.cluster import (95from charmhelpers.contrib.hahelpers.cluster import (
72 canonical_url,
73 eligible_leader,96 eligible_leader,
74 get_hacluster_config,97 get_hacluster_config,
75 is_leader,98 is_leader,
@@ -77,6 +100,16 @@
77100
78from charmhelpers.payload.execd import execd_preinstall101from charmhelpers.payload.execd import execd_preinstall
79102
103from charmhelpers.contrib.openstack.ip import (
104 canonical_url,
105 PUBLIC, INTERNAL, ADMIN
106)
107
108from charmhelpers.contrib.network.ip import (
109 get_iface_for_address,
110 get_netmask_for_address
111)
112
80hooks = Hooks()113hooks = Hooks()
81CONFIGS = register_configs()114CONFIGS = register_configs()
82115
@@ -95,6 +128,9 @@
95 log('Installing %s to /usr/bin' % f)128 log('Installing %s to /usr/bin' % f)
96 shutil.copy2(f, '/usr/bin')129 shutil.copy2(f, '/usr/bin')
97 [open_port(port) for port in determine_ports()]130 [open_port(port) for port in determine_ports()]
131 log('Disabling services into db relation joined')
132 disable_services()
133 cmd_all_services('stop')
98134
99135
100@hooks.hook('config-changed')136@hooks.hook('config-changed')
@@ -108,6 +144,13 @@
108 save_script_rc()144 save_script_rc()
109 configure_https()145 configure_https()
110 CONFIGS.write_all()146 CONFIGS.write_all()
147 if console_attributes('protocol'):
148 apt_update()
149 apt_install(console_attributes('packages'), fatal=True)
150 [compute_joined(rid=rid)
151 for rid in relation_ids('cloud-compute')]
152 for r_id in relation_ids('identity-service'):
153 identity_joined(rid=r_id)
111154
112155
113@hooks.hook('amqp-relation-joined')156@hooks.hook('amqp-relation-joined')
@@ -126,10 +169,11 @@
126 log('amqp relation incomplete. Peer not ready?')169 log('amqp relation incomplete. Peer not ready?')
127 return170 return
128 CONFIGS.write(NOVA_CONF)171 CONFIGS.write(NOVA_CONF)
129 if network_manager() == 'quantum':172 if not is_relation_made('neutron-api'):
130 CONFIGS.write(QUANTUM_CONF)173 if network_manager() == 'quantum':
131 if network_manager() == 'neutron':174 CONFIGS.write(QUANTUM_CONF)
132 CONFIGS.write(NEUTRON_CONF)175 if network_manager() == 'neutron':
176 CONFIGS.write(NEUTRON_CONF)
133177
134178
135@hooks.hook('shared-db-relation-joined')179@hooks.hook('shared-db-relation-joined')
@@ -187,6 +231,13 @@
187 CONFIGS.write_all()231 CONFIGS.write_all()
188232
189 if eligible_leader(CLUSTER_RES):233 if eligible_leader(CLUSTER_RES):
234 # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
235 # acl entry has been added. So, if the db supports passing a list of
236 # permitted units then check if we're in the list.
237 allowed_units = relation_get('nova_allowed_units')
238 if allowed_units and local_unit() not in allowed_units.split():
239 log('Allowed_units list provided and this unit not present')
240 return
190 migrate_database()241 migrate_database()
191 log('Triggering remote cloud-compute restarts.')242 log('Triggering remote cloud-compute restarts.')
192 [compute_joined(rid=rid, remote_restart=True)243 [compute_joined(rid=rid, remote_restart=True)
@@ -237,8 +288,12 @@
237def identity_joined(rid=None):288def identity_joined(rid=None):
238 if not eligible_leader(CLUSTER_RES):289 if not eligible_leader(CLUSTER_RES):
239 return290 return
240 base_url = canonical_url(CONFIGS)291 public_url = canonical_url(CONFIGS, PUBLIC)
241 relation_set(relation_id=rid, **determine_endpoints(base_url))292 internal_url = canonical_url(CONFIGS, INTERNAL)
293 admin_url = canonical_url(CONFIGS, ADMIN)
294 relation_set(relation_id=rid, **determine_endpoints(public_url,
295 internal_url,
296 admin_url))
242297
243298
244@hooks.hook('identity-service-relation-changed')299@hooks.hook('identity-service-relation-changed')
@@ -251,15 +306,17 @@
251 return306 return
252 CONFIGS.write('/etc/nova/api-paste.ini')307 CONFIGS.write('/etc/nova/api-paste.ini')
253 CONFIGS.write(NOVA_CONF)308 CONFIGS.write(NOVA_CONF)
254 if network_manager() == 'quantum':309 if not is_relation_made('neutron-api'):
255 CONFIGS.write(QUANTUM_API_PASTE)310 if network_manager() == 'quantum':
256 CONFIGS.write(QUANTUM_CONF)311 CONFIGS.write(QUANTUM_API_PASTE)
257 save_novarc()312 CONFIGS.write(QUANTUM_CONF)
258 if network_manager() == 'neutron':313 save_novarc()
259 CONFIGS.write(NEUTRON_CONF)314 if network_manager() == 'neutron':
315 CONFIGS.write(NEUTRON_CONF)
260 [compute_joined(rid) for rid in relation_ids('cloud-compute')]316 [compute_joined(rid) for rid in relation_ids('cloud-compute')]
261 [quantum_joined(rid) for rid in relation_ids('quantum-network-service')]317 [quantum_joined(rid) for rid in relation_ids('quantum-network-service')]
262 [nova_vmware_relation_joined(rid) for rid in relation_ids('nova-vmware')]318 [nova_vmware_relation_joined(rid) for rid in relation_ids('nova-vmware')]
319 [neutron_api_relation_joined(rid) for rid in relation_ids('neutron-api')]
263 configure_https()320 configure_https()
264321
265322
@@ -311,6 +368,33 @@
311 out.write('export OS_REGION_NAME=%s\n' % config('region'))368 out.write('export OS_REGION_NAME=%s\n' % config('region'))
312369
313370
371def neutron_settings():
372 neutron_settings = {}
373 if is_relation_made('neutron-api', 'neutron-plugin'):
374 neutron_api_info = NeutronAPIContext()()
375 neutron_settings.update({
376 # XXX: Rename these relations settings?
377 'quantum_plugin': neutron_api_info['neutron_plugin'],
378 'region': config('region'),
379 'quantum_security_groups':
380 neutron_api_info['neutron_security_groups'],
381 'quantum_url': neutron_api_info['neutron_url'],
382 })
383 else:
384 neutron_settings.update({
385 # XXX: Rename these relations settings?
386 'quantum_plugin': neutron_plugin(),
387 'region': config('region'),
388 'quantum_security_groups': config('quantum-security-groups'),
389 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL),
390 str(api_port('neutron-server'))),
391 })
392 neutron_url = urlparse(neutron_settings['quantum_url'])
393 neutron_settings['quantum_host'] = neutron_url.hostname
394 neutron_settings['quantum_port'] = neutron_url.port
395 return neutron_settings
396
397
314def keystone_compute_settings():398def keystone_compute_settings():
315 ks_auth_config = _auth_config()399 ks_auth_config = _auth_config()
316 rel_settings = {}400 rel_settings = {}
@@ -318,25 +402,45 @@
318 if network_manager() in ['quantum', 'neutron']:402 if network_manager() in ['quantum', 'neutron']:
319 if ks_auth_config:403 if ks_auth_config:
320 rel_settings.update(ks_auth_config)404 rel_settings.update(ks_auth_config)
321405 rel_settings.update(neutron_settings())
322 rel_settings.update({
323 # XXX: Rename these relations settings?
324 'quantum_plugin': neutron_plugin(),
325 'region': config('region'),
326 'quantum_security_groups': config('quantum-security-groups'),
327 'quantum_url': (canonical_url(CONFIGS) + ':' +
328 str(api_port('neutron-server'))),
329 })
330
331 ks_ca = keystone_ca_cert_b64()406 ks_ca = keystone_ca_cert_b64()
332 if ks_auth_config and ks_ca:407 if ks_auth_config and ks_ca:
333 rel_settings['ca_cert'] = ks_ca408 rel_settings['ca_cert'] = ks_ca
409 return rel_settings
410
411
412def console_settings():
413 rel_settings = {}
414 proto = console_attributes('protocol')
415 if not proto:
416 return {}
417 rel_settings['console_keymap'] = config('console-keymap')
418 rel_settings['console_access_protocol'] = proto
419 if config('console-proxy-ip') == 'local':
420 proxy_base_addr = canonical_url(CONFIGS, PUBLIC)
421 else:
422 proxy_base_addr = "http://" + config('console-proxy-ip')
423 if proto == 'vnc':
424 protocols = ['novnc', 'xvpvnc']
425 else:
426 protocols = [proto]
427 for _proto in protocols:
428 rel_settings['console_proxy_%s_address' % (_proto)] = \
429 "%s:%s%s" % (proxy_base_addr,
430 console_attributes('proxy-port', proto=_proto),
431 console_attributes('proxy-page', proto=_proto))
432 rel_settings['console_proxy_%s_host' % (_proto)] = \
433 urlparse(proxy_base_addr).hostname
434 rel_settings['console_proxy_%s_port' % (_proto)] = \
435 console_attributes('proxy-port', proto=_proto)
334436
335 return rel_settings437 return rel_settings
336438
337439
338@hooks.hook('cloud-compute-relation-joined')440@hooks.hook('cloud-compute-relation-joined')
339def compute_joined(rid=None, remote_restart=False):441def compute_joined(rid=None, remote_restart=False):
442 cons_settings = console_settings()
443 relation_set(relation_id=rid, **cons_settings)
340 if not eligible_leader(CLUSTER_RES):444 if not eligible_leader(CLUSTER_RES):
341 return445 return
342 rel_settings = {446 rel_settings = {
@@ -346,7 +450,6 @@
346 # this may not even be needed.450 # this may not even be needed.
347 'ec2_host': unit_get('private-address'),451 'ec2_host': unit_get('private-address'),
348 }452 }
349
350 # update relation setting if we're attempting to restart remote453 # update relation setting if we're attempting to restart remote
351 # services454 # services
352 if remote_restart:455 if remote_restart:
@@ -357,21 +460,63 @@
357460
358461
359@hooks.hook('cloud-compute-relation-changed')462@hooks.hook('cloud-compute-relation-changed')
360def compute_changed():463def compute_changed(rid=None, unit=None):
361 migration_auth = relation_get('migration_auth_type')464 rel_settings = relation_get(rid=rid, unit=unit)
362 if migration_auth == 'ssh':465 if 'migration_auth_type' not in rel_settings:
363 key = relation_get('ssh_public_key')466 return
467 if rel_settings['migration_auth_type'] == 'ssh':
468 key = rel_settings.get('ssh_public_key')
364 if not key:469 if not key:
365 log('SSH migration set but peer did not publish key.')470 log('SSH migration set but peer did not publish key.')
366 return471 return
367 ssh_compute_add(key)472 ssh_compute_add(key, rid=rid, unit=unit)
368 relation_set(known_hosts=ssh_known_hosts_b64(),473 index = 0
369 authorized_keys=ssh_authorized_keys_b64())474 for line in ssh_known_hosts_lines(unit=unit):
370 if relation_get('nova_ssh_public_key'):475 relation_set(
371 key = relation_get('nova_ssh_public_key')476 relation_id=rid,
372 ssh_compute_add(key, user='nova')477 relation_settings={
373 relation_set(nova_known_hosts=ssh_known_hosts_b64(user='nova'),478 'known_hosts_{}'.format(index): line})
374 nova_authorized_keys=ssh_authorized_keys_b64(user='nova'))479 index += 1
480 relation_set(relation_id=rid, known_hosts_max_index=index)
481 index = 0
482 for line in ssh_authorized_keys_lines(unit=unit):
483 relation_set(
484 relation_id=rid,
485 relation_settings={
486 'authorized_keys_{}'.format(index): line})
487 index += 1
488 relation_set(relation_id=rid, authorized_keys_max_index=index)
489 if 'nova_ssh_public_key' not in rel_settings:
490 return
491 if rel_settings['nova_ssh_public_key']:
492 ssh_compute_add(rel_settings['nova_ssh_public_key'],
493 rid=rid, unit=unit, user='nova')
494 index = 0
495 for line in ssh_known_hosts_lines(unit=unit, user='nova'):
496 relation_set(
497 relation_id=rid,
498 relation_settings={
499 '{}_known_hosts_{}'.format(
500 'nova',
501 index): line})
502 index += 1
503 relation_set(
504 relation_id=rid,
505 relation_settings={
506 '{}_known_hosts_max_index'.format('nova'): index})
507 index = 0
508 for line in ssh_authorized_keys_lines(unit=unit, user='nova'):
509 relation_set(
510 relation_id=rid,
511 relation_settings={
512 '{}_authorized_keys_{}'.format(
513 'nova',
514 index): line})
515 index += 1
516 relation_set(
517 relation_id=rid,
518 relation_settings={
519 '{}_authorized_keys_max_index'.format('nova'): index})
375520
376521
377@hooks.hook('cloud-compute-relation-departed')522@hooks.hook('cloud-compute-relation-departed')
@@ -385,15 +530,7 @@
385 if not eligible_leader(CLUSTER_RES):530 if not eligible_leader(CLUSTER_RES):
386 return531 return
387532
388 url = canonical_url(CONFIGS) + ':9696'533 rel_settings = neutron_settings()
389 # XXX: Can we rename to neutron_*?
390 rel_settings = {
391 'quantum_host': urlparse(url).hostname,
392 'quantum_url': url,
393 'quantum_port': 9696,
394 'quantum_plugin': neutron_plugin(),
395 'region': config('region')
396 }
397534
398 # inform quantum about local keystone auth config535 # inform quantum about local keystone auth config
399 ks_auth_config = _auth_config()536 ks_auth_config = _auth_config()
@@ -403,7 +540,6 @@
403 ks_ca = keystone_ca_cert_b64()540 ks_ca = keystone_ca_cert_b64()
404 if ks_auth_config and ks_ca:541 if ks_auth_config and ks_ca:
405 rel_settings['ca_cert'] = ks_ca542 rel_settings['ca_cert'] = ks_ca
406
407 relation_set(relation_id=rid, **rel_settings)543 relation_set(relation_id=rid, **rel_settings)
408544
409545
@@ -414,21 +550,44 @@
414@restart_on_change(restart_map(), stopstart=True)550@restart_on_change(restart_map(), stopstart=True)
415def cluster_changed():551def cluster_changed():
416 CONFIGS.write_all()552 CONFIGS.write_all()
553 if is_relation_made('cluster'):
554 peer_echo(includes='dbsync_state')
555 dbsync_state = peer_retrieve('dbsync_state')
556 if dbsync_state == 'complete':
557 enable_services()
558 cmd_all_services('start')
559 else:
560 log('Database sync not ready. Shutting down services')
561 disable_services()
562 cmd_all_services('stop')
417563
418564
419@hooks.hook('ha-relation-joined')565@hooks.hook('ha-relation-joined')
420def ha_joined():566def ha_joined():
421 config = get_hacluster_config()567 config = get_hacluster_config()
422 resources = {568 resources = {
423 'res_nova_vip': 'ocf:heartbeat:IPaddr2',
424 'res_nova_haproxy': 'lsb:haproxy',569 'res_nova_haproxy': 'lsb:haproxy',
425 }570 }
426 vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
427 (config['vip'], config['vip_cidr'], config['vip_iface'])
428 resource_params = {571 resource_params = {
429 'res_nova_vip': vip_params,
430 'res_nova_haproxy': 'op monitor interval="5s"'572 'res_nova_haproxy': 'op monitor interval="5s"'
431 }573 }
574 vip_group = []
575 for vip in config['vip'].split():
576 iface = get_iface_for_address(vip)
577 if iface is not None:
578 vip_key = 'res_nova_{}_vip'.format(iface)
579 resources[vip_key] = 'ocf:heartbeat:IPaddr2'
580 resource_params[vip_key] = (
581 'params ip="{vip}" cidr_netmask="{netmask}"'
582 ' nic="{iface}"'.format(vip=vip,
583 iface=iface,
584 netmask=get_netmask_for_address(vip))
585 )
586 vip_group.append(vip_key)
587
588 if len(vip_group) >= 1:
589 relation_set(groups={'grp_nova_vips': ' '.join(vip_group)})
590
432 init_services = {591 init_services = {
433 'res_nova_haproxy': 'haproxy'592 'res_nova_haproxy': 'haproxy'
434 }593 }
@@ -449,6 +608,7 @@
449 if not clustered or clustered in [None, 'None', '']:608 if not clustered or clustered in [None, 'None', '']:
450 log('ha_changed: hacluster subordinate not fully clustered.')609 log('ha_changed: hacluster subordinate not fully clustered.')
451 return610 return
611<<<<<<< TREE
452612
453 CONFIGS.write(NOVA_CONF)613 CONFIGS.write(NOVA_CONF)
454 if network_manager() == 'quantum':614 if network_manager() == 'quantum':
@@ -456,6 +616,16 @@
456 if network_manager() == 'neutron':616 if network_manager() == 'neutron':
457 CONFIGS.write(NEUTRON_CONF)617 CONFIGS.write(NEUTRON_CONF)
458618
619=======
620
621 CONFIGS.write(NOVA_CONF)
622 if not is_relation_made('neutron-api'):
623 if network_manager() == 'quantum':
624 CONFIGS.write(QUANTUM_CONF)
625 if network_manager() == 'neutron':
626 CONFIGS.write(NEUTRON_CONF)
627
628>>>>>>> MERGE-SOURCE
459 if not is_leader(CLUSTER_RES):629 if not is_leader(CLUSTER_RES):
460 log('ha_changed: hacluster complete but we are not leader.')630 log('ha_changed: hacluster complete but we are not leader.')
461 return631 return
@@ -465,13 +635,23 @@
465 identity_joined(rid=rid)635 identity_joined(rid=rid)
466636
467637
638@hooks.hook('shared-db-relation-broken',
639 'pgsql-nova-db-relation-broken')
640@service_guard(guard_map(), CONFIGS,
641 active=config('service-guard'))
642def db_departed():
643 CONFIGS.write_all()
644 for r_id in relation_ids('cluster'):
645 relation_set(relation_id=r_id, dbsync_state='incomplete')
646 disable_services()
647 cmd_all_services('stop')
648
649
468@hooks.hook('amqp-relation-broken',650@hooks.hook('amqp-relation-broken',
469 'cinder-volume-service-relation-broken',651 'cinder-volume-service-relation-broken',
470 'identity-service-relation-broken',652 'identity-service-relation-broken',
471 'image-service-relation-broken',653 'image-service-relation-broken',
472 'nova-volume-service-relation-broken',654 'nova-volume-service-relation-broken',
473 'shared-db-relation-broken',
474 'pgsql-nova-db-relation-broken',
475 'pgsql-neutron-db-relation-broken',655 'pgsql-neutron-db-relation-broken',
476 'quantum-network-service-relation-broken')656 'quantum-network-service-relation-broken')
477@service_guard(guard_map(), CONFIGS,657@service_guard(guard_map(), CONFIGS,
@@ -509,8 +689,8 @@
509 rel_settings.update({689 rel_settings.update({
510 'quantum_plugin': neutron_plugin(),690 'quantum_plugin': neutron_plugin(),
511 'quantum_security_groups': config('quantum-security-groups'),691 'quantum_security_groups': config('quantum-security-groups'),
512 'quantum_url': (canonical_url(CONFIGS) + ':' +692 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL),
513 str(api_port('neutron-server')))})693 str(api_port('neutron-server')))})
514694
515 relation_set(relation_id=rid, **rel_settings)695 relation_set(relation_id=rid, **rel_settings)
516696
@@ -525,10 +705,55 @@
525705
526@hooks.hook('upgrade-charm')706@hooks.hook('upgrade-charm')
527def upgrade_charm():707def upgrade_charm():
708 apt_install(filter_installed_packages(determine_packages()),
709 fatal=True)
528 for r_id in relation_ids('amqp'):710 for r_id in relation_ids('amqp'):
529 amqp_joined(relation_id=r_id)711 amqp_joined(relation_id=r_id)
530 for r_id in relation_ids('identity-service'):712 for r_id in relation_ids('identity-service'):
531 identity_joined(rid=r_id)713 identity_joined(rid=r_id)
714 for r_id in relation_ids('cloud-compute'):
715 for unit in related_units(r_id):
716 compute_changed(r_id, unit)
717
718
719@hooks.hook('neutron-api-relation-joined')
720def neutron_api_relation_joined(rid=None):
721 with open('/etc/init/neutron-server.override', 'wb') as out:
722 out.write('manual\n')
723 if os.path.isfile(NEUTRON_CONF):
724 os.rename(NEUTRON_CONF, NEUTRON_CONF + '_unused')
725 if service_running('neutron-server'):
726 service_stop('neutron-server')
727 for id_rid in relation_ids('identity-service'):
728 identity_joined(rid=id_rid)
729 nova_url = canonical_url(CONFIGS, INTERNAL) + ":8774/v2"
730 relation_set(relation_id=rid, nova_url=nova_url)
731
732
733@hooks.hook('neutron-api-relation-changed')
734@service_guard(guard_map(), CONFIGS,
735 active=config('service-guard'))
736@restart_on_change(restart_map())
737def neutron_api_relation_changed():
738 CONFIGS.write(NOVA_CONF)
739 for rid in relation_ids('cloud-compute'):
740 compute_joined(rid=rid)
741 for rid in relation_ids('quantum-network-service'):
742 quantum_joined(rid=rid)
743
744
745@hooks.hook('neutron-api-relation-broken')
746@service_guard(guard_map(), CONFIGS,
747 active=config('service-guard'))
748@restart_on_change(restart_map())
749def neutron_api_relation_broken():
750 if os.path.isfile('/etc/init/neutron-server.override'):
751 os.remove('/etc/init/neutron-server.override')
752 CONFIGS.write_all()
753 for rid in relation_ids('cloud-compute'):
754 compute_joined(rid=rid)
755 for rid in relation_ids('quantum-network-service'):
756 quantum_joined(rid=rid)
532757
533758
534def main():759def main():
535760
=== modified file 'hooks/nova_cc_utils.py'
--- hooks/nova_cc_utils.py 2014-07-29 15:05:01 +0000
+++ hooks/nova_cc_utils.py 2014-09-16 09:08:32 +0000
@@ -12,6 +12,8 @@
1212
13from charmhelpers.contrib.hahelpers.cluster import eligible_leader13from charmhelpers.contrib.hahelpers.cluster import eligible_leader
1414
15from charmhelpers.contrib.peerstorage import peer_store
16
15from charmhelpers.contrib.openstack.utils import (17from charmhelpers.contrib.openstack.utils import (
16 configure_installation_source,18 configure_installation_source,
17 get_host_ip,19 get_host_ip,
@@ -39,17 +41,23 @@
39)41)
4042
41from charmhelpers.core.host import (43from charmhelpers.core.host import (
42 service_start,44<<<<<<< TREE
43 service_stop,45 service_start,
44 service_running46 service_stop,
47 service_running
48=======
49 service,
50 service_start,
51 service_stop,
52 service_running
53>>>>>>> MERGE-SOURCE
45)54)
4655
47
48import nova_cc_context56import nova_cc_context
4957
50TEMPLATES = 'templates/'58TEMPLATES = 'templates/'
5159
52CLUSTER_RES = 'res_nova_vip'60CLUSTER_RES = 'grp_nova_vips'
5361
54# removed from original: charm-helper-sh62# removed from original: charm-helper-sh
55BASE_PACKAGES = [63BASE_PACKAGES = [
@@ -58,6 +66,7 @@
58 'python-keystoneclient',66 'python-keystoneclient',
59 'python-mysqldb',67 'python-mysqldb',
60 'python-psycopg2',68 'python-psycopg2',
69 'python-psutil',
61 'uuid',70 'uuid',
62]71]
6372
@@ -110,7 +119,8 @@
110 nova_cc_context.HAProxyContext(),119 nova_cc_context.HAProxyContext(),
111 nova_cc_context.IdentityServiceContext(),120 nova_cc_context.IdentityServiceContext(),
112 nova_cc_context.VolumeServiceContext(),121 nova_cc_context.VolumeServiceContext(),
113 nova_cc_context.NeutronCCContext()],122 nova_cc_context.NeutronCCContext(),
123 nova_cc_context.NovaConfigContext()],
114 }),124 }),
115 (NOVA_API_PASTE, {125 (NOVA_API_PASTE, {
116 'services': [s for s in BASE_SERVICES if 'api' in s],126 'services': [s for s in BASE_SERVICES if 'api' in s],
@@ -150,7 +160,8 @@
150 nova_cc_context.IdentityServiceContext(),160 nova_cc_context.IdentityServiceContext(),
151 nova_cc_context.NeutronCCContext(),161 nova_cc_context.NeutronCCContext(),
152 nova_cc_context.HAProxyContext(),162 nova_cc_context.HAProxyContext(),
153 context.SyslogContext()],163 context.SyslogContext(),
164 nova_cc_context.NovaConfigContext()],
154 }),165 }),
155 (NEUTRON_DEFAULT, {166 (NEUTRON_DEFAULT, {
156 'services': ['neutron-server'],167 'services': ['neutron-server'],
@@ -175,6 +186,27 @@
175186
176NOVA_SSH_DIR = '/etc/nova/compute_ssh/'187NOVA_SSH_DIR = '/etc/nova/compute_ssh/'
177188
189CONSOLE_CONFIG = {
190 'spice': {
191 'packages': ['nova-spiceproxy', 'nova-consoleauth'],
192 'services': ['nova-spiceproxy', 'nova-consoleauth'],
193 'proxy-page': '/spice_auto.html',
194 'proxy-port': 6082,
195 },
196 'novnc': {
197 'packages': ['nova-novncproxy', 'nova-consoleauth'],
198 'services': ['nova-novncproxy', 'nova-consoleauth'],
199 'proxy-page': '/vnc_auto.html',
200 'proxy-port': 6080,
201 },
202 'xvpvnc': {
203 'packages': ['nova-xvpvncproxy', 'nova-consoleauth'],
204 'services': ['nova-xvpvncproxy', 'nova-consoleauth'],
205 'proxy-page': '/console',
206 'proxy-port': 6081,
207 },
208}
209
178210
179def resource_map():211def resource_map():
180 '''212 '''
@@ -191,44 +223,56 @@
191223
192 net_manager = network_manager()224 net_manager = network_manager()
193225
194 # pop out irrelevant resources from the OrderedDict (easier than adding
195 # them late)
196 if net_manager != 'quantum':
197 [resource_map.pop(k) for k in list(resource_map.iterkeys())
198 if 'quantum' in k]
199 if net_manager != 'neutron':
200 [resource_map.pop(k) for k in list(resource_map.iterkeys())
201 if 'neutron' in k]
202
203 if os.path.exists('/etc/apache2/conf-available'):226 if os.path.exists('/etc/apache2/conf-available'):
204 resource_map.pop(APACHE_CONF)227 resource_map.pop(APACHE_CONF)
205 else:228 else:
206 resource_map.pop(APACHE_24_CONF)229 resource_map.pop(APACHE_24_CONF)
207230
208 # add neutron plugin requirements. nova-c-c only needs the neutron-server231 if is_relation_made('neutron-api'):
209 # associated with configs, not the plugin agent.232 [resource_map.pop(k) for k in list(resource_map.iterkeys())
210 if net_manager in ['quantum', 'neutron']:233 if 'quantum' in k or 'neutron' in k]
211 plugin = neutron_plugin()234 resource_map[NOVA_CONF]['contexts'].append(
212 if plugin:235 nova_cc_context.NeutronAPIContext())
213 conf = neutron_plugin_attribute(plugin, 'config', net_manager)236 else:
214 ctxts = (neutron_plugin_attribute(plugin, 'contexts', net_manager)237 resource_map[NOVA_CONF]['contexts'].append(
215 or [])238 nova_cc_context.NeutronCCContext())
216 services = neutron_plugin_attribute(plugin, 'server_services',239 # pop out irrelevant resources from the OrderedDict (easier than adding
217 net_manager)240 # them late)
218 resource_map[conf] = {}241 if net_manager != 'quantum':
219 resource_map[conf]['services'] = services242 [resource_map.pop(k) for k in list(resource_map.iterkeys())
220 resource_map[conf]['contexts'] = ctxts243 if 'quantum' in k]
221 resource_map[conf]['contexts'].append(244 if net_manager != 'neutron':
222 nova_cc_context.NeutronCCContext())245 [resource_map.pop(k) for k in list(resource_map.iterkeys())
246 if 'neutron' in k]
247 # add neutron plugin requirements. nova-c-c only needs the
248 # neutron-server associated with configs, not the plugin agent.
249 if net_manager in ['quantum', 'neutron']:
250 plugin = neutron_plugin()
251 if plugin:
252 conf = neutron_plugin_attribute(plugin, 'config', net_manager)
253 ctxts = (neutron_plugin_attribute(plugin, 'contexts',
254 net_manager)
255 or [])
256 services = neutron_plugin_attribute(plugin, 'server_services',
257 net_manager)
258 resource_map[conf] = {}
259 resource_map[conf]['services'] = services
260 resource_map[conf]['contexts'] = ctxts
261 resource_map[conf]['contexts'].append(
262 nova_cc_context.NeutronCCContext())
223263
224 # update for postgres264 # update for postgres
225 resource_map[conf]['contexts'].append(265 resource_map[conf]['contexts'].append(
226 nova_cc_context.NeutronPostgresqlDBContext())266 nova_cc_context.NeutronPostgresqlDBContext())
227267
228 # nova-conductor for releases >= G.268 # nova-conductor for releases >= G.
229 if os_release('nova-common') not in ['essex', 'folsom']:269 if os_release('nova-common') not in ['essex', 'folsom']:
230 resource_map['/etc/nova/nova.conf']['services'] += ['nova-conductor']270 resource_map['/etc/nova/nova.conf']['services'] += ['nova-conductor']
231271
272 if console_attributes('services'):
273 resource_map['/etc/nova/nova.conf']['services'] += \
274 console_attributes('services')
275
232 # also manage any configs that are being updated by subordinates.276 # also manage any configs that are being updated by subordinates.
233 vmware_ctxt = context.SubordinateConfigContext(interface='nova-vmware',277 vmware_ctxt = context.SubordinateConfigContext(interface='nova-vmware',
234 service='nova',278 service='nova',
@@ -238,6 +282,7 @@
238 for s in vmware_ctxt['services']:282 for s in vmware_ctxt['services']:
239 if s not in resource_map[NOVA_CONF]['services']:283 if s not in resource_map[NOVA_CONF]['services']:
240 resource_map[NOVA_CONF]['services'].append(s)284 resource_map[NOVA_CONF]['services'].append(s)
285
241 return resource_map286 return resource_map
242287
243288
@@ -268,9 +313,9 @@
268 '''Assemble a list of API ports for services we are managing'''313 '''Assemble a list of API ports for services we are managing'''
269 ports = []314 ports = []
270 for services in restart_map().values():315 for services in restart_map().values():
271 for service in services:316 for svc in services:
272 try:317 try:
273 ports.append(API_PORTS[service])318 ports.append(API_PORTS[svc])
274 except KeyError:319 except KeyError:
275 pass320 pass
276 return list(set(ports))321 return list(set(ports))
@@ -280,6 +325,27 @@
280 return API_PORTS[service]325 return API_PORTS[service]
281326
282327
328def console_attributes(attr, proto=None):
329 '''Leave proto unset to query attributes of the protocal specified at
330 runtime'''
331 if proto:
332 console_proto = proto
333 else:
334 console_proto = config('console-access-protocol')
335 if attr == 'protocol':
336 return console_proto
337 # 'vnc' is a virtual type made up of novnc and xvpvnc
338 if console_proto == 'vnc':
339 if attr in ['packages', 'services']:
340 return list(set(CONSOLE_CONFIG['novnc'][attr] +
341 CONSOLE_CONFIG['xvpvnc'][attr]))
342 else:
343 return None
344 if console_proto in CONSOLE_CONFIG:
345 return CONSOLE_CONFIG[console_proto][attr]
346 return None
347
348
283def determine_packages():349def determine_packages():
284 # currently all packages match service names350 # currently all packages match service names
285 packages = [] + BASE_PACKAGES351 packages = [] + BASE_PACKAGES
@@ -289,6 +355,8 @@
289 pkgs = neutron_plugin_attribute(neutron_plugin(), 'server_packages',355 pkgs = neutron_plugin_attribute(neutron_plugin(), 'server_packages',
290 network_manager())356 network_manager())
291 packages.extend(pkgs)357 packages.extend(pkgs)
358 if console_attributes('packages'):
359 packages.extend(console_attributes('packages'))
292 return list(set(packages))360 return list(set(packages))
293361
294362
@@ -486,6 +554,12 @@
486 log('Migrating the nova database.', level=INFO)554 log('Migrating the nova database.', level=INFO)
487 cmd = ['nova-manage', 'db', 'sync']555 cmd = ['nova-manage', 'db', 'sync']
488 subprocess.check_output(cmd)556 subprocess.check_output(cmd)
557 if is_relation_made('cluster'):
558 log('Informing peers that dbsync is complete', level=INFO)
559 peer_store('dbsync_state', 'complete')
560 log('Enabling services', level=INFO)
561 enable_services()
562 cmd_all_services('start')
489563
490564
491def auth_token_config(setting):565def auth_token_config(setting):
@@ -512,8 +586,11 @@
512 return b64encode(_in.read())586 return b64encode(_in.read())
513587
514588
515def ssh_directory_for_unit(user=None):589def ssh_directory_for_unit(unit=None, user=None):
516 remote_service = remote_unit().split('/')[0]590 if unit:
591 remote_service = unit.split('/')[0]
592 else:
593 remote_service = remote_unit().split('/')[0]
517 if user:594 if user:
518 remote_service = "{}_{}".format(remote_service, user)595 remote_service = "{}_{}".format(remote_service, user)
519 _dir = os.path.join(NOVA_SSH_DIR, remote_service)596 _dir = os.path.join(NOVA_SSH_DIR, remote_service)
@@ -527,29 +604,29 @@
527 return _dir604 return _dir
528605
529606
530def known_hosts(user=None):607def known_hosts(unit=None, user=None):
531 return os.path.join(ssh_directory_for_unit(user), 'known_hosts')608 return os.path.join(ssh_directory_for_unit(unit, user), 'known_hosts')
532609
533610
534def authorized_keys(user=None):611def authorized_keys(unit=None, user=None):
535 return os.path.join(ssh_directory_for_unit(user), 'authorized_keys')612 return os.path.join(ssh_directory_for_unit(unit, user), 'authorized_keys')
536613
537614
538def ssh_known_host_key(host, user=None):615def ssh_known_host_key(host, unit=None, user=None):
539 cmd = ['ssh-keygen', '-f', known_hosts(user), '-H', '-F', host]616 cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-H', '-F', host]
540 try:617 try:
541 return subprocess.check_output(cmd).strip()618 return subprocess.check_output(cmd).strip()
542 except subprocess.CalledProcessError:619 except subprocess.CalledProcessError:
543 return None620 return None
544621
545622
546def remove_known_host(host, user=None):623def remove_known_host(host, unit=None, user=None):
547 log('Removing SSH known host entry for compute host at %s' % host)624 log('Removing SSH known host entry for compute host at %s' % host)
548 cmd = ['ssh-keygen', '-f', known_hosts(user), '-R', host]625 cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-R', host]
549 subprocess.check_call(cmd)626 subprocess.check_call(cmd)
550627
551628
552def add_known_host(host, user=None):629def add_known_host(host, unit=None, user=None):
553 '''Add variations of host to a known hosts file.'''630 '''Add variations of host to a known hosts file.'''
554 cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]631 cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
555 try:632 try:
@@ -558,34 +635,37 @@
558 log('Could not obtain SSH host key from %s' % host, level=ERROR)635 log('Could not obtain SSH host key from %s' % host, level=ERROR)
559 raise e636 raise e
560637
561 current_key = ssh_known_host_key(host, user)638 current_key = ssh_known_host_key(host, unit, user)
562 if current_key:639 if current_key:
563 if remote_key == current_key:640 if remote_key == current_key:
564 log('Known host key for compute host %s up to date.' % host)641 log('Known host key for compute host %s up to date.' % host)
565 return642 return
566 else:643 else:
567 remove_known_host(host, user)644 remove_known_host(host, unit, user)
568645
569 log('Adding SSH host key to known hosts for compute node at %s.' % host)646 log('Adding SSH host key to known hosts for compute node at %s.' % host)
570 with open(known_hosts(user), 'a') as out:647 with open(known_hosts(unit, user), 'a') as out:
571 out.write(remote_key + '\n')648 out.write(remote_key + '\n')
572649
573650
574def ssh_authorized_key_exists(public_key, user=None):651def ssh_authorized_key_exists(public_key, unit=None, user=None):
575 with open(authorized_keys(user)) as keys:652 with open(authorized_keys(unit, user)) as keys:
576 return (' %s ' % public_key) in keys.read()653 return (' %s ' % public_key) in keys.read()
577654
578655
579def add_authorized_key(public_key, user=None):656def add_authorized_key(public_key, unit=None, user=None):
580 with open(authorized_keys(user), 'a') as keys:657 with open(authorized_keys(unit, user), 'a') as keys:
581 keys.write(public_key + '\n')658 keys.write(public_key + '\n')
582659
583660
584def ssh_compute_add(public_key, user=None):661def ssh_compute_add(public_key, rid=None, unit=None, user=None):
585 # If remote compute node hands us a hostname, ensure we have a662 # If remote compute node hands us a hostname, ensure we have a
586 # known hosts entry for its IP, hostname and FQDN.663 # known hosts entry for its IP, hostname and FQDN.
587 private_address = relation_get('private-address')664 private_address = relation_get(rid=rid, unit=unit,
665 attribute='private-address')
588 hosts = [private_address]666 hosts = [private_address]
667 if relation_get('hostname'):
668 hosts.append(relation_get('hostname'))
589669
590 if not is_ip(private_address):670 if not is_ip(private_address):
591 hosts.append(get_host_ip(private_address))671 hosts.append(get_host_ip(private_address))
@@ -596,31 +676,41 @@
596 hosts.append(hn.split('.')[0])676 hosts.append(hn.split('.')[0])
597677
598 for host in list(set(hosts)):678 for host in list(set(hosts)):
599 if not ssh_known_host_key(host, user):679 if not ssh_known_host_key(host, unit, user):
600 add_known_host(host, user)680 add_known_host(host, unit, user)
601681
602 if not ssh_authorized_key_exists(public_key, user):682 if not ssh_authorized_key_exists(public_key, unit, user):
603 log('Saving SSH authorized key for compute host at %s.' %683 log('Saving SSH authorized key for compute host at %s.' %
604 private_address)684 private_address)
605 add_authorized_key(public_key, user)685 add_authorized_key(public_key, unit, user)
606686
607687
608def ssh_known_hosts_b64(user=None):688def ssh_known_hosts_lines(unit=None, user=None):
609 with open(known_hosts(user)) as hosts:689 known_hosts_list = []
610 return b64encode(hosts.read())690
611691 with open(known_hosts(unit, user)) as hosts:
612692 for hosts_line in hosts:
613def ssh_authorized_keys_b64(user=None):693 if hosts_line.rstrip():
614 with open(authorized_keys(user)) as keys:694 known_hosts_list.append(hosts_line.rstrip())
615 return b64encode(keys.read())695 return(known_hosts_list)
616696
617697
618def ssh_compute_remove(public_key, user=None):698def ssh_authorized_keys_lines(unit=None, user=None):
619 if not (os.path.isfile(authorized_keys(user)) or699 authorized_keys_list = []
620 os.path.isfile(known_hosts(user))):700
701 with open(authorized_keys(unit, user)) as keys:
702 for authkey_line in keys:
703 if authkey_line.rstrip():
704 authorized_keys_list.append(authkey_line.rstrip())
705 return(authorized_keys_list)
706
707
708def ssh_compute_remove(public_key, unit=None, user=None):
709 if not (os.path.isfile(authorized_keys(unit, user)) or
710 os.path.isfile(known_hosts(unit, user))):
621 return711 return
622712
623 with open(authorized_keys(user)) as _keys:713 with open(authorized_keys(unit, user)) as _keys:
624 keys = [k.strip() for k in _keys.readlines()]714 keys = [k.strip() for k in _keys.readlines()]
625715
626 if public_key not in keys:716 if public_key not in keys:
@@ -628,67 +718,101 @@
628718
629 [keys.remove(key) for key in keys if key == public_key]719 [keys.remove(key) for key in keys if key == public_key]
630720
631 with open(authorized_keys(user), 'w') as _keys:721 with open(authorized_keys(unit, user), 'w') as _keys:
632 keys = '\n'.join(keys)722 keys = '\n'.join(keys)
633 if not keys.endswith('\n'):723 if not keys.endswith('\n'):
634 keys += '\n'724 keys += '\n'
635 _keys.write(keys)725 _keys.write(keys)
636726
637727
638def determine_endpoints(url):728def determine_endpoints(public_url, internal_url, admin_url):
639 '''Generates a dictionary containing all relevant endpoints to be729 '''Generates a dictionary containing all relevant endpoints to be
640 passed to keystone as relation settings.'''730 passed to keystone as relation settings.'''
641 region = config('region')731 region = config('region')
642 os_rel = os_release('nova-common')732 os_rel = os_release('nova-common')
643733
644 if os_rel >= 'grizzly':734 if os_rel >= 'grizzly':
645 nova_url = ('%s:%s/v2/$(tenant_id)s' %735 nova_public_url = ('%s:%s/v2/$(tenant_id)s' %
646 (url, api_port('nova-api-os-compute')))736 (public_url, api_port('nova-api-os-compute')))
737 nova_internal_url = ('%s:%s/v2/$(tenant_id)s' %
738 (internal_url, api_port('nova-api-os-compute')))
739 nova_admin_url = ('%s:%s/v2/$(tenant_id)s' %
740 (admin_url, api_port('nova-api-os-compute')))
647 else:741 else:
648 nova_url = ('%s:%s/v1.1/$(tenant_id)s' %742 nova_public_url = ('%s:%s/v1.1/$(tenant_id)s' %
649 (url, api_port('nova-api-os-compute')))743 (public_url, api_port('nova-api-os-compute')))
650 ec2_url = '%s:%s/services/Cloud' % (url, api_port('nova-api-ec2'))744 nova_internal_url = ('%s:%s/v1.1/$(tenant_id)s' %
651 nova_volume_url = ('%s:%s/v1/$(tenant_id)s' %745 (internal_url, api_port('nova-api-os-compute')))
652 (url, api_port('nova-api-os-compute')))746 nova_admin_url = ('%s:%s/v1.1/$(tenant_id)s' %
653 neutron_url = '%s:%s' % (url, api_port('neutron-server'))747 (admin_url, api_port('nova-api-os-compute')))
654 s3_url = '%s:%s' % (url, api_port('nova-objectstore'))748
749 ec2_public_url = '%s:%s/services/Cloud' % (
750 public_url, api_port('nova-api-ec2'))
751 ec2_internal_url = '%s:%s/services/Cloud' % (
752 internal_url, api_port('nova-api-ec2'))
753 ec2_admin_url = '%s:%s/services/Cloud' % (admin_url,
754 api_port('nova-api-ec2'))
755
756 nova_volume_public_url = ('%s:%s/v1/$(tenant_id)s' %
757 (public_url, api_port('nova-api-os-compute')))
758 nova_volume_internal_url = ('%s:%s/v1/$(tenant_id)s' %
759 (internal_url,
760 api_port('nova-api-os-compute')))
761 nova_volume_admin_url = ('%s:%s/v1/$(tenant_id)s' %
762 (admin_url, api_port('nova-api-os-compute')))
763
764 neutron_public_url = '%s:%s' % (public_url, api_port('neutron-server'))
765 neutron_internal_url = '%s:%s' % (internal_url, api_port('neutron-server'))
766 neutron_admin_url = '%s:%s' % (admin_url, api_port('neutron-server'))
767
768 s3_public_url = '%s:%s' % (public_url, api_port('nova-objectstore'))
769 s3_internal_url = '%s:%s' % (internal_url, api_port('nova-objectstore'))
770 s3_admin_url = '%s:%s' % (admin_url, api_port('nova-objectstore'))
655771
656 # the base endpoints772 # the base endpoints
657 endpoints = {773 endpoints = {
658 'nova_service': 'nova',774 'nova_service': 'nova',
659 'nova_region': region,775 'nova_region': region,
660 'nova_public_url': nova_url,776 'nova_public_url': nova_public_url,
661 'nova_admin_url': nova_url,777 'nova_admin_url': nova_admin_url,
662 'nova_internal_url': nova_url,778 'nova_internal_url': nova_internal_url,
663 'ec2_service': 'ec2',779 'ec2_service': 'ec2',
664 'ec2_region': region,780 'ec2_region': region,
665 'ec2_public_url': ec2_url,781 'ec2_public_url': ec2_public_url,
666 'ec2_admin_url': ec2_url,782 'ec2_admin_url': ec2_admin_url,
667 'ec2_internal_url': ec2_url,783 'ec2_internal_url': ec2_internal_url,
668 's3_service': 's3',784 's3_service': 's3',
669 's3_region': region,785 's3_region': region,
670 's3_public_url': s3_url,786 's3_public_url': s3_public_url,
671 's3_admin_url': s3_url,787 's3_admin_url': s3_admin_url,
672 's3_internal_url': s3_url,788 's3_internal_url': s3_internal_url,
673 }789 }
674790
675 if relation_ids('nova-volume-service'):791 if relation_ids('nova-volume-service'):
676 endpoints.update({792 endpoints.update({
677 'nova-volume_service': 'nova-volume',793 'nova-volume_service': 'nova-volume',
678 'nova-volume_region': region,794 'nova-volume_region': region,
679 'nova-volume_public_url': nova_volume_url,795 'nova-volume_public_url': nova_volume_public_url,
680 'nova-volume_admin_url': nova_volume_url,796 'nova-volume_admin_url': nova_volume_admin_url,
681 'nova-volume_internal_url': nova_volume_url,797 'nova-volume_internal_url': nova_volume_internal_url,
682 })798 })
683799
684 # XXX: Keep these relations named quantum_*??800 # XXX: Keep these relations named quantum_*??
685 if network_manager() in ['quantum', 'neutron']:801 if is_relation_made('neutron-api'):
802 endpoints.update({
803 'quantum_service': None,
804 'quantum_region': None,
805 'quantum_public_url': None,
806 'quantum_admin_url': None,
807 'quantum_internal_url': None,
808 })
809 elif network_manager() in ['quantum', 'neutron']:
686 endpoints.update({810 endpoints.update({
687 'quantum_service': 'quantum',811 'quantum_service': 'quantum',
688 'quantum_region': region,812 'quantum_region': region,
689 'quantum_public_url': neutron_url,813 'quantum_public_url': neutron_public_url,
690 'quantum_admin_url': neutron_url,814 'quantum_admin_url': neutron_admin_url,
691 'quantum_internal_url': neutron_url,815 'quantum_internal_url': neutron_internal_url,
692 })816 })
693817
694 return endpoints818 return endpoints
@@ -698,59 +822,141 @@
698 # quantum-plugin config setting can be safely overriden822 # quantum-plugin config setting can be safely overriden
699 # as we only supported OVS in G/neutron823 # as we only supported OVS in G/neutron
700 return config('neutron-plugin') or config('quantum-plugin')824 return config('neutron-plugin') or config('quantum-plugin')
701825<<<<<<< TREE
702826
703def guard_map():827
704 '''Map of services and required interfaces that must be present before828def guard_map():
705 the service should be allowed to start'''829 '''Map of services and required interfaces that must be present before
706 gmap = {}830 the service should be allowed to start'''
707 nova_services = deepcopy(BASE_SERVICES)831 gmap = {}
708 if os_release('nova-common') not in ['essex', 'folsom']:832 nova_services = deepcopy(BASE_SERVICES)
709 nova_services.append('nova-conductor')833 if os_release('nova-common') not in ['essex', 'folsom']:
710834 nova_services.append('nova-conductor')
711 nova_interfaces = ['identity-service', 'amqp']835
712 if relation_ids('pgsql-nova-db'):836 nova_interfaces = ['identity-service', 'amqp']
713 nova_interfaces.append('pgsql-nova-db')837 if relation_ids('pgsql-nova-db'):
714 else:838 nova_interfaces.append('pgsql-nova-db')
715 nova_interfaces.append('shared-db')839 else:
716840 nova_interfaces.append('shared-db')
717 for svc in nova_services:841
718 gmap[svc] = nova_interfaces842 for svc in nova_services:
719843 gmap[svc] = nova_interfaces
720 net_manager = network_manager()844
721 if net_manager in ['neutron', 'quantum'] and \845 net_manager = network_manager()
722 not is_relation_made('neutron-api'):846 if net_manager in ['neutron', 'quantum'] and \
723 neutron_interfaces = ['identity-service', 'amqp']847 not is_relation_made('neutron-api'):
724 if relation_ids('pgsql-neutron-db'):848 neutron_interfaces = ['identity-service', 'amqp']
725 neutron_interfaces.append('pgsql-neutron-db')849 if relation_ids('pgsql-neutron-db'):
726 else:850 neutron_interfaces.append('pgsql-neutron-db')
727 neutron_interfaces.append('shared-db')851 else:
728 if network_manager() == 'quantum':852 neutron_interfaces.append('shared-db')
729 gmap['quantum-server'] = neutron_interfaces853 if network_manager() == 'quantum':
730 else:854 gmap['quantum-server'] = neutron_interfaces
731 gmap['neutron-server'] = neutron_interfaces855 else:
732856 gmap['neutron-server'] = neutron_interfaces
733 return gmap857
734858 return gmap
735859
736def service_guard(guard_map, contexts, active=False):860
737 '''Inhibit services in guard_map from running unless861def service_guard(guard_map, contexts, active=False):
738 required interfaces are found complete in contexts.'''862 '''Inhibit services in guard_map from running unless
739 def wrap(f):863 required interfaces are found complete in contexts.'''
740 def wrapped_f(*args):864 def wrap(f):
741 if active is True:865 def wrapped_f(*args):
742 incomplete_services = []866 if active is True:
743 for svc in guard_map:867 incomplete_services = []
744 for interface in guard_map[svc]:868 for svc in guard_map:
745 if interface not in contexts.complete_contexts():869 for interface in guard_map[svc]:
746 incomplete_services.append(svc)870 if interface not in contexts.complete_contexts():
747 f(*args)871 incomplete_services.append(svc)
748 for svc in incomplete_services:872 f(*args)
749 if service_running(svc):873 for svc in incomplete_services:
750 log('Service {} has unfulfilled '874 if service_running(svc):
751 'interface requirements, stopping.'.format(svc))875 log('Service {} has unfulfilled '
752 service_stop(svc)876 'interface requirements, stopping.'.format(svc))
753 else:877 service_stop(svc)
754 f(*args)878 else:
755 return wrapped_f879 f(*args)
756 return wrap880 return wrapped_f
881 return wrap
882=======
883
884
885def guard_map():
886 '''Map of services and required interfaces that must be present before
887 the service should be allowed to start'''
888 gmap = {}
889 nova_services = deepcopy(BASE_SERVICES)
890 if os_release('nova-common') not in ['essex', 'folsom']:
891 nova_services.append('nova-conductor')
892
893 nova_interfaces = ['identity-service', 'amqp']
894 if relation_ids('pgsql-nova-db'):
895 nova_interfaces.append('pgsql-nova-db')
896 else:
897 nova_interfaces.append('shared-db')
898
899 for svc in nova_services:
900 gmap[svc] = nova_interfaces
901
902 net_manager = network_manager()
903 if net_manager in ['neutron', 'quantum'] and \
904 not is_relation_made('neutron-api'):
905 neutron_interfaces = ['identity-service', 'amqp']
906 if relation_ids('pgsql-neutron-db'):
907 neutron_interfaces.append('pgsql-neutron-db')
908 else:
909 neutron_interfaces.append('shared-db')
910 if network_manager() == 'quantum':
911 gmap['quantum-server'] = neutron_interfaces
912 else:
913 gmap['neutron-server'] = neutron_interfaces
914
915 return gmap
916
917
918def service_guard(guard_map, contexts, active=False):
919 '''Inhibit services in guard_map from running unless
920 required interfaces are found complete in contexts.'''
921 def wrap(f):
922 def wrapped_f(*args):
923 if active is True:
924 incomplete_services = []
925 for svc in guard_map:
926 for interface in guard_map[svc]:
927 if interface not in contexts.complete_contexts():
928 incomplete_services.append(svc)
929 f(*args)
930 for svc in incomplete_services:
931 if service_running(svc):
932 log('Service {} has unfulfilled '
933 'interface requirements, stopping.'.format(svc))
934 service_stop(svc)
935 else:
936 f(*args)
937 return wrapped_f
938 return wrap
939
940
941def cmd_all_services(cmd):
942 if cmd == 'start':
943 for svc in services():
944 if not service_running(svc):
945 service_start(svc)
946 else:
947 for svc in services():
948 service(cmd, svc)
949
950
951def disable_services():
952 for svc in services():
953 with open('/etc/init/{}.override'.format(svc), 'wb') as out:
954 out.write('exec true\n')
955
956
957def enable_services():
958 for svc in services():
959 override_file = '/etc/init/{}.override'.format(svc)
960 if os.path.isfile(override_file):
961 os.remove(override_file)
962>>>>>>> MERGE-SOURCE
757963
=== modified file 'metadata.yaml'
--- metadata.yaml 2014-03-31 11:56:09 +0000
+++ metadata.yaml 2014-09-16 09:08:32 +0000
@@ -30,6 +30,8 @@
30 interface: nova-volume30 interface: nova-volume
31 quantum-network-service:31 quantum-network-service:
32 interface: quantum32 interface: quantum
33 neutron-api:
34 interface: neutron-api
33 ha:35 ha:
34 interface: hacluster36 interface: hacluster
35 scope: container37 scope: container
3638
=== modified file 'revision'
--- revision 2014-04-16 08:25:14 +0000
+++ revision 2014-09-16 09:08:32 +0000
@@ -1,1 +1,1 @@
13151500
22
=== modified file 'templates/havana/nova.conf'
--- templates/havana/nova.conf 2014-08-01 11:04:31 +0000
+++ templates/havana/nova.conf 2014-09-16 09:08:32 +0000
@@ -20,8 +20,17 @@
20enabled_apis=ec2,osapi_compute,metadata20enabled_apis=ec2,osapi_compute,metadata
21auth_strategy=keystone21auth_strategy=keystone
22compute_driver=libvirt.LibvirtDriver22compute_driver=libvirt.LibvirtDriver
23use_syslog={{ use_syslog }}23<<<<<<< TREE
2424use_syslog={{ use_syslog }}
25
26=======
27osapi_compute_workers = {{ workers }}
28ec2_workers = {{ workers }}
29scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
30cpu_allocation_ratio = {{ cpu_allocation_ratio }}
31use_syslog={{ use_syslog }}
32
33>>>>>>> MERGE-SOURCE
25{% if keystone_ec2_url -%}34{% if keystone_ec2_url -%}
26keystone_ec2_url = {{ keystone_ec2_url }}35keystone_ec2_url = {{ keystone_ec2_url }}
27{% endif -%}36{% endif -%}
2837
=== modified file 'templates/icehouse/neutron.conf'
--- templates/icehouse/neutron.conf 2014-08-01 11:04:31 +0000
+++ templates/icehouse/neutron.conf 2014-09-16 09:08:32 +0000
@@ -8,7 +8,12 @@
8bind_host = 0.0.0.08bind_host = 0.0.0.0
9auth_strategy = keystone9auth_strategy = keystone
10notification_driver = neutron.openstack.common.notifier.rpc_notifier10notification_driver = neutron.openstack.common.notifier.rpc_notifier
11<<<<<<< TREE
11use_syslog={{ use_syslog }}12use_syslog={{ use_syslog }}
13=======
14api_workers = {{ workers }}
15use_syslog = {{ use_syslog }}
16>>>>>>> MERGE-SOURCE
1217
13{% if neutron_bind_port -%}18{% if neutron_bind_port -%}
14bind_port = {{ neutron_bind_port }}19bind_port = {{ neutron_bind_port }}
1520
=== modified file 'templates/icehouse/nova.conf'
--- templates/icehouse/nova.conf 2014-08-01 11:04:31 +0000
+++ templates/icehouse/nova.conf 2014-09-16 09:08:32 +0000
@@ -1,3 +1,4 @@
1# icehouse
1###############################################################################2###############################################################################
2# [ WARNING ]3# [ WARNING ]
3# Configuration file maintained by Juju. Local changes may be overwritten.4# Configuration file maintained by Juju. Local changes may be overwritten.
@@ -20,8 +21,21 @@
20enabled_apis=ec2,osapi_compute,metadata21enabled_apis=ec2,osapi_compute,metadata
21auth_strategy=keystone22auth_strategy=keystone
22compute_driver=libvirt.LibvirtDriver23compute_driver=libvirt.LibvirtDriver
23use_syslog={{ use_syslog }}24<<<<<<< TREE
2425use_syslog={{ use_syslog }}
26
27=======
28
29osapi_compute_workers = {{ workers }}
30ec2_workers = {{ workers }}
31
32scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
33cpu_allocation_ratio = {{ cpu_allocation_ratio }}
34ram_allocation_ratio = {{ ram_allocation_ratio }}
35
36use_syslog={{ use_syslog }}
37
38>>>>>>> MERGE-SOURCE
25{% if keystone_ec2_url -%}39{% if keystone_ec2_url -%}
26keystone_ec2_url = {{ keystone_ec2_url }}40keystone_ec2_url = {{ keystone_ec2_url }}
27{% endif -%}41{% endif -%}
@@ -130,3 +144,5 @@
130[osapi_v3]144[osapi_v3]
131enabled=True145enabled=True
132146
147[conductor]
148workers = {{ workers }}
133149
=== added directory 'tests'
=== added file 'tests/00-setup'
--- tests/00-setup 1970-01-01 00:00:00 +0000
+++ tests/00-setup 2014-09-16 09:08:32 +0000
@@ -0,0 +1,10 @@
1#!/bin/bash
2
3set -ex
4
5sudo add-apt-repository --yes ppa:juju/stable
6sudo apt-get update --yes
7sudo apt-get install --yes python-amulet
8sudo apt-get install --yes python-glanceclient
9sudo apt-get install --yes python-keystoneclient
10sudo apt-get install --yes python-novaclient
011
=== added file 'tests/10-basic-precise-essex'
--- tests/10-basic-precise-essex 1970-01-01 00:00:00 +0000
+++ tests/10-basic-precise-essex 2014-09-16 09:08:32 +0000
@@ -0,0 +1,10 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-essex."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='precise')
10 deployment.run_tests()
011
=== added file 'tests/11-basic-precise-folsom'
--- tests/11-basic-precise-folsom 1970-01-01 00:00:00 +0000
+++ tests/11-basic-precise-folsom 2014-09-16 09:08:32 +0000
@@ -0,0 +1,18 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-folsom."""
5
6import amulet
7from basic_deployment import NovaCCBasicDeployment
8
9if __name__ == '__main__':
10 # NOTE(coreycb): Skipping failing test until resolved. 'nova-manage db sync'
11 # fails in shared-db-relation-changed (only fails on folsom)
12 message = "Skipping failing test until resolved"
13 amulet.raise_status(amulet.SKIP, msg=message)
14
15 deployment = NovaCCBasicDeployment(series='precise',
16 openstack='cloud:precise-folsom',
17 source='cloud:precise-updates/folsom')
18 deployment.run_tests()
019
=== added file 'tests/12-basic-precise-grizzly'
--- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000
+++ tests/12-basic-precise-grizzly 2014-09-16 09:08:32 +0000
@@ -0,0 +1,12 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-grizzly."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='precise',
10 openstack='cloud:precise-grizzly',
11 source='cloud:precise-updates/grizzly')
12 deployment.run_tests()
013
=== added file 'tests/13-basic-precise-havana'
--- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000
+++ tests/13-basic-precise-havana 2014-09-16 09:08:32 +0000
@@ -0,0 +1,12 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-havana."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='precise',
10 openstack='cloud:precise-havana',
11 source='cloud:precise-updates/havana')
12 deployment.run_tests()
013
=== added file 'tests/14-basic-precise-icehouse'
--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
+++ tests/14-basic-precise-icehouse 2014-09-16 09:08:32 +0000
@@ -0,0 +1,12 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 precise-icehouse."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='precise',
10 openstack='cloud:precise-icehouse',
11 source='cloud:precise-updates/icehouse')
12 deployment.run_tests()
013
=== added file 'tests/15-basic-trusty-icehouse'
--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
+++ tests/15-basic-trusty-icehouse 2014-09-16 09:08:32 +0000
@@ -0,0 +1,10 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic nova cloud controller deployment on
4 trusty-icehouse."""
5
6from basic_deployment import NovaCCBasicDeployment
7
8if __name__ == '__main__':
9 deployment = NovaCCBasicDeployment(series='trusty')
10 deployment.run_tests()
011
=== added file 'tests/README'
--- tests/README 1970-01-01 00:00:00 +0000
+++ tests/README 2014-09-16 09:08:32 +0000
@@ -0,0 +1,47 @@
1This directory provides Amulet tests that focus on verification of Nova Cloud
2Controller deployments.
3
4If you use a web proxy server to access the web, you'll need to set the
5AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
6
7The following examples demonstrate different ways that tests can be executed.
8All examples are run from the charm's root directory.
9
10 * To run all tests (starting with 00-setup):
11
12 make test
13
14 * To run a specific test module (or modules):
15
16 juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
17
18 * To run a specific test module (or modules), and keep the environment
19 deployed after a failure:
20
21 juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
22
23 * To re-run a test module against an already deployed environment (one
24 that was deployed by a previous call to 'juju test --set-e'):
25
26 ./tests/15-basic-trusty-icehouse
27
28For debugging and test development purposes, all code should be idempotent.
29In other words, the code should have the ability to be re-run without changing
30the results beyond the initial run. This enables editing and re-running of a
31test module against an already deployed environment, as described above.
32
33Manual debugging tips:
34
35 * Set the following env vars before using the OpenStack CLI as admin:
36 export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
37 export OS_TENANT_NAME=admin
38 export OS_USERNAME=admin
39 export OS_PASSWORD=openstack
40 export OS_REGION_NAME=RegionOne
41
42 * Set the following env vars before using the OpenStack CLI as demoUser:
43 export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
44 export OS_TENANT_NAME=demoTenant
45 export OS_USERNAME=demoUser
46 export OS_PASSWORD=password
47 export OS_REGION_NAME=RegionOne
048
=== added file 'tests/basic_deployment.py'
--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
+++ tests/basic_deployment.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,520 @@
1#!/usr/bin/python
2
3import amulet
4
5from charmhelpers.contrib.openstack.amulet.deployment import (
6 OpenStackAmuletDeployment
7)
8
9from charmhelpers.contrib.openstack.amulet.utils import (
10 OpenStackAmuletUtils,
11 DEBUG, # flake8: noqa
12 ERROR
13)
14
15# Use DEBUG to turn on debug logging
16u = OpenStackAmuletUtils(ERROR)
17
18
19class NovaCCBasicDeployment(OpenStackAmuletDeployment):
20 """Amulet tests on a basic nova cloud controller deployment."""
21
22 def __init__(self, series=None, openstack=None, source=None):
23 """Deploy the entire test environment."""
24 super(NovaCCBasicDeployment, self).__init__(series, openstack, source)
25 self._add_services()
26 self._add_relations()
27 self._configure_services()
28 self._deploy()
29 self._initialize_tests()
30
31 def _add_services(self):
32 """Add the service that we're testing, including the number of units,
33 where nova-cloud-controller is local, and the other charms are from
34 the charm store."""
35 this_service = ('nova-cloud-controller', 1)
36 other_services = [('mysql', 1), ('rabbitmq-server', 1),
37 ('nova-compute', 2), ('keystone', 1), ('glance', 1)]
38 super(NovaCCBasicDeployment, self)._add_services(this_service,
39 other_services)
40
41 def _add_relations(self):
42 """Add all of the relations for the services."""
43 relations = {
44 'nova-cloud-controller:shared-db': 'mysql:shared-db',
45 'nova-cloud-controller:identity-service': 'keystone:identity-service',
46 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
47 'nova-cloud-controller:cloud-compute': 'nova-compute:cloud-compute',
48 'nova-cloud-controller:image-service': 'glance:image-service',
49 'nova-compute:image-service': 'glance:image-service',
50 'nova-compute:shared-db': 'mysql:shared-db',
51 'nova-compute:amqp': 'rabbitmq-server:amqp',
52 'keystone:shared-db': 'mysql:shared-db',
53 'glance:identity-service': 'keystone:identity-service',
54 'glance:shared-db': 'mysql:shared-db',
55 'glance:amqp': 'rabbitmq-server:amqp'
56 }
57 super(NovaCCBasicDeployment, self)._add_relations(relations)
58
59 def _configure_services(self):
60 """Configure all of the services."""
61 keystone_config = {'admin-password': 'openstack',
62 'admin-token': 'ubuntutesting'}
63 configs = {'keystone': keystone_config}
64 super(NovaCCBasicDeployment, self)._configure_services(configs)
65
66 def _initialize_tests(self):
67 """Perform final initialization before tests get run."""
68 # Access the sentries for inspecting service units
69 self.mysql_sentry = self.d.sentry.unit['mysql/0']
70 self.keystone_sentry = self.d.sentry.unit['keystone/0']
71 self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
72 self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0']
73 self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
74 self.glance_sentry = self.d.sentry.unit['glance/0']
75
76 # Authenticate admin with keystone
77 self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
78 user='admin',
79 password='openstack',
80 tenant='admin')
81
82 # Authenticate admin with glance endpoint
83 self.glance = u.authenticate_glance_admin(self.keystone)
84
85 # Create a demo tenant/role/user
86 self.demo_tenant = 'demoTenant'
87 self.demo_role = 'demoRole'
88 self.demo_user = 'demoUser'
89 if not u.tenant_exists(self.keystone, self.demo_tenant):
90 tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
91 description='demo tenant',
92 enabled=True)
93 self.keystone.roles.create(name=self.demo_role)
94 self.keystone.users.create(name=self.demo_user,
95 password='password',
96 tenant_id=tenant.id,
97 email='demo@demo.com')
98
99 # Authenticate demo user with keystone
100 self.keystone_demo = \
101 u.authenticate_keystone_user(self.keystone, user=self.demo_user,
102 password='password',
103 tenant=self.demo_tenant)
104
105 # Authenticate demo user with nova-api
106 self.nova_demo = u.authenticate_nova_user(self.keystone,
107 user=self.demo_user,
108 password='password',
109 tenant=self.demo_tenant)
110
111 def test_services(self):
112 """Verify the expected services are running on the corresponding
113 service units."""
114 commands = {
115 self.mysql_sentry: ['status mysql'],
116 self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
117 self.nova_cc_sentry: ['status nova-api-ec2',
118 'status nova-api-os-compute',
119 'status nova-objectstore',
120 'status nova-cert',
121 'status nova-scheduler'],
122 self.nova_compute_sentry: ['status nova-compute',
123 'status nova-network',
124 'status nova-api'],
125 self.keystone_sentry: ['status keystone'],
126 self.glance_sentry: ['status glance-registry', 'status glance-api']
127 }
128 if self._get_openstack_release() >= self.precise_grizzly:
129 commands[self.nova_cc_sentry] = ['status nova-conductor']
130
131 ret = u.validate_services(commands)
132 if ret:
133 amulet.raise_status(amulet.FAIL, msg=ret)
134
135 def test_service_catalog(self):
136 """Verify that the service catalog endpoint data is valid."""
137 endpoint_vol = {'adminURL': u.valid_url,
138 'region': 'RegionOne',
139 'publicURL': u.valid_url,
140 'internalURL': u.valid_url}
141 endpoint_id = {'adminURL': u.valid_url,
142 'region': 'RegionOne',
143 'publicURL': u.valid_url,
144 'internalURL': u.valid_url}
145 if self._get_openstack_release() >= self.precise_folsom:
146 endpoint_vol['id'] = u.not_null
147 endpoint_id['id'] = u.not_null
148 expected = {'s3': [endpoint_vol], 'compute': [endpoint_vol],
149 'ec2': [endpoint_vol], 'identity': [endpoint_id]}
150 actual = self.keystone_demo.service_catalog.get_endpoints()
151
152 ret = u.validate_svc_catalog_endpoint_data(expected, actual)
153 if ret:
154 amulet.raise_status(amulet.FAIL, msg=ret)
155
156 def test_openstack_compute_api_endpoint(self):
157 """Verify the openstack compute api (osapi) endpoint data."""
158 endpoints = self.keystone.endpoints.list()
159 admin_port = internal_port = public_port = '8774'
160 expected = {'id': u.not_null,
161 'region': 'RegionOne',
162 'adminurl': u.valid_url,
163 'internalurl': u.valid_url,
164 'publicurl': u.valid_url,
165 'service_id': u.not_null}
166
167 ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
168 public_port, expected)
169 if ret:
170 message = 'osapi endpoint: {}'.format(ret)
171 amulet.raise_status(amulet.FAIL, msg=message)
172
173 def test_ec2_api_endpoint(self):
174 """Verify the EC2 api endpoint data."""
175 endpoints = self.keystone.endpoints.list()
176 admin_port = internal_port = public_port = '8773'
177 expected = {'id': u.not_null,
178 'region': 'RegionOne',
179 'adminurl': u.valid_url,
180 'internalurl': u.valid_url,
181 'publicurl': u.valid_url,
182 'service_id': u.not_null}
183
184 ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
185 public_port, expected)
186 if ret:
187 message = 'EC2 endpoint: {}'.format(ret)
188 amulet.raise_status(amulet.FAIL, msg=message)
189
190 def test_s3_api_endpoint(self):
191 """Verify the S3 api endpoint data."""
192 endpoints = self.keystone.endpoints.list()
193 admin_port = internal_port = public_port = '3333'
194 expected = {'id': u.not_null,
195 'region': 'RegionOne',
196 'adminurl': u.valid_url,
197 'internalurl': u.valid_url,
198 'publicurl': u.valid_url,
199 'service_id': u.not_null}
200
201 ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
202 public_port, expected)
203 if ret:
204 message = 'S3 endpoint: {}'.format(ret)
205 amulet.raise_status(amulet.FAIL, msg=message)
206
207 def test_nova_cc_shared_db_relation(self):
208 """Verify the nova-cc to mysql shared-db relation data"""
209 unit = self.nova_cc_sentry
210 relation = ['shared-db', 'mysql:shared-db']
211 expected = {
212 'private-address': u.valid_ip,
213 'nova_database': 'nova',
214 'nova_username': 'nova',
215 'nova_hostname': u.valid_ip
216 }
217
218 ret = u.validate_relation_data(unit, relation, expected)
219 if ret:
220 message = u.relation_error('nova-cc shared-db', ret)
221 amulet.raise_status(amulet.FAIL, msg=message)
222
223 def test_mysql_shared_db_relation(self):
224 """Verify the mysql to nova-cc shared-db relation data"""
225 unit = self.mysql_sentry
226 relation = ['shared-db', 'nova-cloud-controller:shared-db']
227 expected = {
228 'private-address': u.valid_ip,
229 'nova_password': u.not_null,
230 'db_host': u.valid_ip
231 }
232
233 ret = u.validate_relation_data(unit, relation, expected)
234 if ret:
235 message = u.relation_error('mysql shared-db', ret)
236 amulet.raise_status(amulet.FAIL, msg=message)
237
238 def test_nova_cc_identity_service_relation(self):
239 """Verify the nova-cc to keystone identity-service relation data"""
240 unit = self.nova_cc_sentry
241 relation = ['identity-service', 'keystone:identity-service']
242 expected = {
243 'nova_internal_url': u.valid_url,
244 'nova_public_url': u.valid_url,
245 's3_public_url': u.valid_url,
246 's3_service': 's3',
247 'ec2_admin_url': u.valid_url,
248 'ec2_internal_url': u.valid_url,
249 'nova_service': 'nova',
250 's3_region': 'RegionOne',
251 'private-address': u.valid_ip,
252 'nova_region': 'RegionOne',
253 'ec2_public_url': u.valid_url,
254 'ec2_region': 'RegionOne',
255 's3_internal_url': u.valid_url,
256 's3_admin_url': u.valid_url,
257 'nova_admin_url': u.valid_url,
258 'ec2_service': 'ec2'
259 }
260
261 ret = u.validate_relation_data(unit, relation, expected)
262 if ret:
263 message = u.relation_error('nova-cc identity-service', ret)
264 amulet.raise_status(amulet.FAIL, msg=message)
265
266 def test_keystone_identity_service_relation(self):
267 """Verify the keystone to nova-cc identity-service relation data"""
268 unit = self.keystone_sentry
269 relation = ['identity-service',
270 'nova-cloud-controller:identity-service']
271 expected = {
272 'service_protocol': 'http',
273 'service_tenant': 'services',
274 'admin_token': 'ubuntutesting',
275 'service_password': u.not_null,
276 'service_port': '5000',
277 'auth_port': '35357',
278 'auth_protocol': 'http',
279 'private-address': u.valid_ip,
280 'https_keystone': 'False',
281 'auth_host': u.valid_ip,
282 'service_username': 's3_ec2_nova',
283 'service_tenant_id': u.not_null,
284 'service_host': u.valid_ip
285 }
286
287 ret = u.validate_relation_data(unit, relation, expected)
288 if ret:
289 message = u.relation_error('keystone identity-service', ret)
290 amulet.raise_status(amulet.FAIL, msg=message)
291
292 def test_nova_cc_amqp_relation(self):
293 """Verify the nova-cc to rabbitmq-server amqp relation data"""
294 unit = self.nova_cc_sentry
295 relation = ['amqp', 'rabbitmq-server:amqp']
296 expected = {
297 'username': 'nova',
298 'private-address': u.valid_ip,
299 'vhost': 'openstack'
300 }
301
302 ret = u.validate_relation_data(unit, relation, expected)
303 if ret:
304 message = u.relation_error('nova-cc amqp', ret)
305 amulet.raise_status(amulet.FAIL, msg=message)
306
307 def test_rabbitmq_amqp_relation(self):
308 """Verify the rabbitmq-server to nova-cc amqp relation data"""
309 unit = self.rabbitmq_sentry
310 relation = ['amqp', 'nova-cloud-controller:amqp']
311 expected = {
312 'private-address': u.valid_ip,
313 'password': u.not_null,
314 'hostname': u.valid_ip
315 }
316
317 ret = u.validate_relation_data(unit, relation, expected)
318 if ret:
319 message = u.relation_error('rabbitmq amqp', ret)
320 amulet.raise_status(amulet.FAIL, msg=message)
321
322 def test_nova_cc_cloud_compute_relation(self):
323 """Verify the nova-cc to nova-compute cloud-compute relation data"""
324 unit = self.nova_cc_sentry
325 relation = ['cloud-compute', 'nova-compute:cloud-compute']
326 expected = {
327 'volume_service': 'cinder',
328 'network_manager': 'flatdhcpmanager',
329 'ec2_host': u.valid_ip,
330 'private-address': u.valid_ip,
331 'restart_trigger': u.not_null
332 }
333 if self._get_openstack_release() == self.precise_essex:
334 expected['volume_service'] = 'nova-volume'
335
336 ret = u.validate_relation_data(unit, relation, expected)
337 if ret:
338 message = u.relation_error('nova-cc cloud-compute', ret)
339 amulet.raise_status(amulet.FAIL, msg=message)
340
341 def test_nova_cloud_compute_relation(self):
342 """Verify the nova-compute to nova-cc cloud-compute relation data"""
343 unit = self.nova_compute_sentry
344 relation = ['cloud-compute', 'nova-cloud-controller:cloud-compute']
345 expected = {
346 'private-address': u.valid_ip,
347 }
348
349 ret = u.validate_relation_data(unit, relation, expected)
350 if ret:
351 message = u.relation_error('nova-compute cloud-compute', ret)
352 amulet.raise_status(amulet.FAIL, msg=message)
353
354 def test_nova_cc_image_service_relation(self):
355 """Verify the nova-cc to glance image-service relation data"""
356 unit = self.nova_cc_sentry
357 relation = ['image-service', 'glance:image-service']
358 expected = {
359 'private-address': u.valid_ip,
360 }
361
362 ret = u.validate_relation_data(unit, relation, expected)
363 if ret:
364 message = u.relation_error('nova-cc image-service', ret)
365 amulet.raise_status(amulet.FAIL, msg=message)
366
367 def test_glance_image_service_relation(self):
368 """Verify the glance to nova-cc image-service relation data"""
369 unit = self.glance_sentry
370 relation = ['image-service', 'nova-cloud-controller:image-service']
371 expected = {
372 'private-address': u.valid_ip,
373 'glance-api-server': u.valid_url
374 }
375
376 ret = u.validate_relation_data(unit, relation, expected)
377 if ret:
378 message = u.relation_error('glance image-service', ret)
379 amulet.raise_status(amulet.FAIL, msg=message)
380
381 def test_restart_on_config_change(self):
382 """Verify that the specified services are restarted when the config
383 is changed."""
384 # NOTE(coreycb): Skipping failing test on essex until resolved.
385 # config-flags don't take effect on essex.
386 if self._get_openstack_release() == self.precise_essex:
387 u.log.error("Skipping failing test until resolved")
388 return
389
390 services = ['nova-api-ec2', 'nova-api-os-compute', 'nova-objectstore',
391 'nova-cert', 'nova-scheduler', 'nova-conductor']
392 self.d.configure('nova-cloud-controller',
393 {'config-flags': 'quota_cores=20,quota_instances=40,quota_ram=102400'})
394 pgrep_full = True
395
396 time = 20
397 conf = '/etc/nova/nova.conf'
398 for s in services:
399 if not u.service_restarted(self.nova_cc_sentry, s, conf,
400 pgrep_full=True, sleep_time=time):
401 msg = "service {} didn't restart after config change".format(s)
402 amulet.raise_status(amulet.FAIL, msg=msg)
403 time = 0
404
405 def test_nova_default_config(self):
406 """Verify the data in the nova config file's default section."""
407 # NOTE(coreycb): Currently no way to test on essex because config file
408 # has no section headers.
409 if self._get_openstack_release() == self.precise_essex:
410 return
411
412 unit = self.nova_cc_sentry
413 conf = '/etc/nova/nova.conf'
414 rabbitmq_relation = self.rabbitmq_sentry.relation('amqp',
415 'nova-cloud-controller:amqp')
416 glance_relation = self.glance_sentry.relation('image-service',
417 'nova-cloud-controller:image-service')
418 mysql_relation = self.mysql_sentry.relation('shared-db',
419 'nova-cloud-controller:shared-db')
420 db_uri = "mysql://{}:{}@{}/{}".format('nova',
421 mysql_relation['nova_password'],
422 mysql_relation['db_host'],
423 'nova')
424 keystone_ep = self.keystone_demo.service_catalog.url_for(\
425 service_type='identity',
426 endpoint_type='publicURL')
427 keystone_ec2 = "{}/ec2tokens".format(keystone_ep)
428
429 expected = {'dhcpbridge_flagfile': '/etc/nova/nova.conf',
430 'dhcpbridge': '/usr/bin/nova-dhcpbridge',
431 'logdir': '/var/log/nova',
432 'state_path': '/var/lib/nova',
433 'lock_path': '/var/lock/nova',
434 'force_dhcp_release': 'True',
435 'iscsi_helper': 'tgtadm',
436 'libvirt_use_virtio_for_bridges': 'True',
437 'connection_type': 'libvirt',
438 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf',
439 'verbose': 'True',
440 'ec2_private_dns_show_ip': 'True',
441 'api_paste_config': '/etc/nova/api-paste.ini',
442 'volumes_path': '/var/lib/nova/volumes',
443 'enabled_apis': 'ec2,osapi_compute,metadata',
444 'auth_strategy': 'keystone',
445 'compute_driver': 'libvirt.LibvirtDriver',
446 'keystone_ec2_url': keystone_ec2,
447 'sql_connection': db_uri,
448 'rabbit_userid': 'nova',
449 'rabbit_virtual_host': 'openstack',
450 'rabbit_password': rabbitmq_relation['password'],
451 'rabbit_host': rabbitmq_relation['hostname'],
452 'glance_api_servers': glance_relation['glance-api-server'],
453 'network_manager': 'nova.network.manager.FlatDHCPManager',
454 's3_listen_port': '3333',
455 'osapi_compute_listen_port': '8774',
456 'ec2_listen_port': '8773'}
457
458 ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
459 if ret:
460 message = "nova config error: {}".format(ret)
461 amulet.raise_status(amulet.FAIL, msg=message)
462
463
464 def test_nova_keystone_authtoken_config(self):
465 """Verify the data in the nova config file's keystone_authtoken
466 section. This data only exists since icehouse."""
467 if self._get_openstack_release() < self.precise_icehouse:
468 return
469
470 unit = self.nova_cc_sentry
471 conf = '/etc/nova/nova.conf'
472 keystone_relation = self.keystone_sentry.relation('identity-service',
473 'nova-cloud-controller:identity-service')
474 keystone_uri = "http://{}:{}/".format(keystone_relation['service_host'],
475 keystone_relation['service_port'])
476 expected = {'auth_uri': keystone_uri,
477 'auth_host': keystone_relation['service_host'],
478 'auth_port': keystone_relation['auth_port'],
479 'auth_protocol': keystone_relation['auth_protocol'],
480 'admin_tenant_name': keystone_relation['service_tenant'],
481 'admin_user': keystone_relation['service_username'],
482 'admin_password': keystone_relation['service_password']}
483
484 ret = u.validate_config_data(unit, conf, 'keystone_authtoken', expected)
485 if ret:
486 message = "nova config error: {}".format(ret)
487 amulet.raise_status(amulet.FAIL, msg=message)
488
489 def test_image_instance_create(self):
490 """Create an image/instance, verify they exist, and delete them."""
491 # NOTE(coreycb): Skipping failing test on essex until resolved. essex
492 # nova API calls are getting "Malformed request url (HTTP
493 # 400)".
494 if self._get_openstack_release() == self.precise_essex:
495 u.log.error("Skipping failing test until resolved")
496 return
497
498 image = u.create_cirros_image(self.glance, "cirros-image")
499 if not image:
500 amulet.raise_status(amulet.FAIL, msg="Image create failed")
501
502 instance = u.create_instance(self.nova_demo, "cirros-image", "cirros",
503 "m1.tiny")
504 if not instance:
505 amulet.raise_status(amulet.FAIL, msg="Instance create failed")
506
507 found = False
508 for instance in self.nova_demo.servers.list():
509 if instance.name == 'cirros':
510 found = True
511 if instance.status != 'ACTIVE':
512 msg = "cirros instance is not active"
513 amulet.raise_status(amulet.FAIL, msg=message)
514
515 if not found:
516 message = "nova cirros instance does not exist"
517 amulet.raise_status(amulet.FAIL, msg=message)
518
519 u.delete_image(self.glance, image)
520 u.delete_instance(self.nova_demo, instance)
0521
=== added directory 'tests/charmhelpers'
=== added file 'tests/charmhelpers/__init__.py'
=== added directory 'tests/charmhelpers/contrib'
=== added file 'tests/charmhelpers/contrib/__init__.py'
=== added directory 'tests/charmhelpers/contrib/amulet'
=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,71 @@
1import amulet
2
3import os
4
5
6class AmuletDeployment(object):
7 """Amulet deployment.
8
9 This class provides generic Amulet deployment and test runner
10 methods.
11 """
12
13 def __init__(self, series=None):
14 """Initialize the deployment environment."""
15 self.series = None
16
17 if series:
18 self.series = series
19 self.d = amulet.Deployment(series=self.series)
20 else:
21 self.d = amulet.Deployment()
22
23 def _add_services(self, this_service, other_services):
24 """Add services.
25
26 Add services to the deployment where this_service is the local charm
27 that we're focused on testing and other_services are the other
28 charms that come from the charm store.
29 """
30 name, units = range(2)
31
32 if this_service[name] != os.path.basename(os.getcwd()):
33 s = this_service[name]
34 msg = "The charm's root directory name needs to be {}".format(s)
35 amulet.raise_status(amulet.FAIL, msg=msg)
36
37 self.d.add(this_service[name], units=this_service[units])
38
39 for svc in other_services:
40 if self.series:
41 self.d.add(svc[name],
42 charm='cs:{}/{}'.format(self.series, svc[name]),
43 units=svc[units])
44 else:
45 self.d.add(svc[name], units=svc[units])
46
47 def _add_relations(self, relations):
48 """Add all of the relations for the services."""
49 for k, v in relations.iteritems():
50 self.d.relate(k, v)
51
52 def _configure_services(self, configs):
53 """Configure all of the services."""
54 for service, config in configs.iteritems():
55 self.d.configure(service, config)
56
57 def _deploy(self):
58 """Deploy environment and wait for all hooks to finish executing."""
59 try:
60 self.d.setup()
61 self.d.sentry.wait(timeout=900)
62 except amulet.helpers.TimeoutError:
63 amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
64 except Exception:
65 raise
66
67 def run_tests(self):
68 """Run all of the methods that are prefixed with 'test_'."""
69 for test in dir(self):
70 if test.startswith('test_'):
71 getattr(self, test)()
072
=== added file 'tests/charmhelpers/contrib/amulet/utils.py'
--- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/amulet/utils.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,176 @@
1import ConfigParser
2import io
3import logging
4import re
5import sys
6import time
7
8
9class AmuletUtils(object):
10 """Amulet utilities.
11
12 This class provides common utility functions that are used by Amulet
13 tests.
14 """
15
16 def __init__(self, log_level=logging.ERROR):
17 self.log = self.get_logger(level=log_level)
18
19 def get_logger(self, name="amulet-logger", level=logging.DEBUG):
20 """Get a logger object that will log to stdout."""
21 log = logging
22 logger = log.getLogger(name)
23 fmt = log.Formatter("%(asctime)s %(funcName)s "
24 "%(levelname)s: %(message)s")
25
26 handler = log.StreamHandler(stream=sys.stdout)
27 handler.setLevel(level)
28 handler.setFormatter(fmt)
29
30 logger.addHandler(handler)
31 logger.setLevel(level)
32
33 return logger
34
35 def valid_ip(self, ip):
36 if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
37 return True
38 else:
39 return False
40
41 def valid_url(self, url):
42 p = re.compile(
43 r'^(?:http|ftp)s?://'
44 r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
45 r'localhost|'
46 r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
47 r'(?::\d+)?'
48 r'(?:/?|[/?]\S+)$',
49 re.IGNORECASE)
50 if p.match(url):
51 return True
52 else:
53 return False
54
55 def validate_services(self, commands):
56 """Validate services.
57
58 Verify the specified services are running on the corresponding
59 service units.
60 """
61 for k, v in commands.iteritems():
62 for cmd in v:
63 output, code = k.run(cmd)
64 if code != 0:
65 return "command `{}` returned {}".format(cmd, str(code))
66 return None
67
68 def _get_config(self, unit, filename):
69 """Get a ConfigParser object for parsing a unit's config file."""
70 file_contents = unit.file_contents(filename)
71 config = ConfigParser.ConfigParser()
72 config.readfp(io.StringIO(file_contents))
73 return config
74
75 def validate_config_data(self, sentry_unit, config_file, section,
76 expected):
77 """Validate config file data.
78
79 Verify that the specified section of the config file contains
80 the expected option key:value pairs.
81 """
82 config = self._get_config(sentry_unit, config_file)
83
84 if section != 'DEFAULT' and not config.has_section(section):
85 return "section [{}] does not exist".format(section)
86
87 for k in expected.keys():
88 if not config.has_option(section, k):
89 return "section [{}] is missing option {}".format(section, k)
90 if config.get(section, k) != expected[k]:
91 return "section [{}] {}:{} != expected {}:{}".format(
92 section, k, config.get(section, k), k, expected[k])
93 return None
94
95 def _validate_dict_data(self, expected, actual):
96 """Validate dictionary data.
97
98 Compare expected dictionary data vs actual dictionary data.
99 The values in the 'expected' dictionary can be strings, bools, ints,
100 longs, or can be a function that evaluate a variable and returns a
101 bool.
102 """
103 for k, v in expected.iteritems():
104 if k in actual:
105 if (isinstance(v, basestring) or
106 isinstance(v, bool) or
107 isinstance(v, (int, long))):
108 if v != actual[k]:
109 return "{}:{}".format(k, actual[k])
110 elif not v(actual[k]):
111 return "{}:{}".format(k, actual[k])
112 else:
113 return "key '{}' does not exist".format(k)
114 return None
115
116 def validate_relation_data(self, sentry_unit, relation, expected):
117 """Validate actual relation data based on expected relation data."""
118 actual = sentry_unit.relation(relation[0], relation[1])
119 self.log.debug('actual: {}'.format(repr(actual)))
120 return self._validate_dict_data(expected, actual)
121
122 def _validate_list_data(self, expected, actual):
123 """Compare expected list vs actual list data."""
124 for e in expected:
125 if e not in actual:
126 return "expected item {} not found in actual list".format(e)
127 return None
128
129 def not_null(self, string):
130 if string is not None:
131 return True
132 else:
133 return False
134
135 def _get_file_mtime(self, sentry_unit, filename):
136 """Get last modification time of file."""
137 return sentry_unit.file_stat(filename)['mtime']
138
139 def _get_dir_mtime(self, sentry_unit, directory):
140 """Get last modification time of directory."""
141 return sentry_unit.directory_stat(directory)['mtime']
142
143 def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
144 """Get process' start time.
145
146 Determine start time of the process based on the last modification
147 time of the /proc/pid directory. If pgrep_full is True, the process
148 name is matched against the full command line.
149 """
150 if pgrep_full:
151 cmd = 'pgrep -o -f {}'.format(service)
152 else:
153 cmd = 'pgrep -o {}'.format(service)
154 proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
155 return self._get_dir_mtime(sentry_unit, proc_dir)
156
157 def service_restarted(self, sentry_unit, service, filename,
158 pgrep_full=False, sleep_time=20):
159 """Check if service was restarted.
160
161 Compare a service's start time vs a file's last modification time
162 (such as a config file for that service) to determine if the service
163 has been restarted.
164 """
165 time.sleep(sleep_time)
166 if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
167 self._get_file_mtime(sentry_unit, filename)):
168 return True
169 else:
170 return False
171
172 def relation_error(self, name, data):
173 return 'unexpected relation data in {} - {}'.format(name, data)
174
175 def endpoint_error(self, name, data):
176 return 'unexpected endpoint data in {} - {}'.format(name, data)
0177
=== added directory 'tests/charmhelpers/contrib/openstack'
=== added file 'tests/charmhelpers/contrib/openstack/__init__.py'
=== added directory 'tests/charmhelpers/contrib/openstack/amulet'
=== added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py'
=== added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,61 @@
1from charmhelpers.contrib.amulet.deployment import (
2 AmuletDeployment
3)
4
5
6class OpenStackAmuletDeployment(AmuletDeployment):
7 """OpenStack amulet deployment.
8
9 This class inherits from AmuletDeployment and has additional support
10 that is specifically for use by OpenStack charms.
11 """
12
13 def __init__(self, series=None, openstack=None, source=None):
14 """Initialize the deployment environment."""
15 super(OpenStackAmuletDeployment, self).__init__(series)
16 self.openstack = openstack
17 self.source = source
18
19 def _add_services(self, this_service, other_services):
20 """Add services to the deployment and set openstack-origin."""
21 super(OpenStackAmuletDeployment, self)._add_services(this_service,
22 other_services)
23 name = 0
24 services = other_services
25 services.append(this_service)
26 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
27
28 if self.openstack:
29 for svc in services:
30 if svc[name] not in use_source:
31 config = {'openstack-origin': self.openstack}
32 self.d.configure(svc[name], config)
33
34 if self.source:
35 for svc in services:
36 if svc[name] in use_source:
37 config = {'source': self.source}
38 self.d.configure(svc[name], config)
39
40 def _configure_services(self, configs):
41 """Configure all of the services."""
42 for service, config in configs.iteritems():
43 self.d.configure(service, config)
44
45 def _get_openstack_release(self):
46 """Get openstack release.
47
48 Return an integer representing the enum value of the openstack
49 release.
50 """
51 (self.precise_essex, self.precise_folsom, self.precise_grizzly,
52 self.precise_havana, self.precise_icehouse,
53 self.trusty_icehouse) = range(6)
54 releases = {
55 ('precise', None): self.precise_essex,
56 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
57 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
58 ('precise', 'cloud:precise-havana'): self.precise_havana,
59 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
60 ('trusty', None): self.trusty_icehouse}
61 return releases[(self.series, self.openstack)]
062
=== added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
--- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-16 09:08:32 +0000
@@ -0,0 +1,275 @@
1import logging
2import os
3import time
4import urllib
5
6import glanceclient.v1.client as glance_client
7import keystoneclient.v2_0 as keystone_client
8import novaclient.v1_1.client as nova_client
9
10from charmhelpers.contrib.amulet.utils import (
11 AmuletUtils
12)
13
14DEBUG = logging.DEBUG
15ERROR = logging.ERROR
16
17
18class OpenStackAmuletUtils(AmuletUtils):
19 """OpenStack amulet utilities.
20
21 This class inherits from AmuletUtils and has additional support
22 that is specifically for use by OpenStack charms.
23 """
24
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches