Merge lp:~gnuoy/charms/trusty/glance/next-charm-sync into lp:~openstack-charmers-archive/charms/trusty/glance/next
- Trusty Tahr (14.04)
- next-charm-sync
- Merge into next
Proposed by
Liam Young
Status: | Merged |
---|---|
Merged at revision: | 56 |
Proposed branch: | lp:~gnuoy/charms/trusty/glance/next-charm-sync |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/glance/next |
Diff against target: |
1530 lines (+897/-113) 17 files modified
hooks/charmhelpers/contrib/hahelpers/cluster.py (+55/-13) hooks/charmhelpers/contrib/network/ip.py (+19/-1) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+13/-7) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+86/-20) hooks/charmhelpers/contrib/openstack/context.py (+31/-4) hooks/charmhelpers/contrib/openstack/ip.py (+7/-3) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+3/-3) hooks/charmhelpers/core/host.py (+34/-1) hooks/charmhelpers/core/services/__init__.py (+2/-0) hooks/charmhelpers/core/services/base.py (+305/-0) hooks/charmhelpers/core/services/helpers.py (+125/-0) hooks/charmhelpers/core/templating.py (+51/-0) hooks/charmhelpers/fetch/__init__.py (+1/-0) tests/charmhelpers/contrib/amulet/deployment.py (+20/-7) tests/charmhelpers/contrib/amulet/utils.py (+46/-27) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+13/-7) tests/charmhelpers/contrib/openstack/amulet/utils.py (+86/-20) |
To merge this branch: | bzr merge lp:~gnuoy/charms/trusty/glance/next-charm-sync |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Liam Young (community) | Approve | ||
Review via email: mp+230636@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' | |||
2 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-25 09:37:25 +0000 | |||
3 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-08-13 13:59:42 +0000 | |||
4 | @@ -6,6 +6,11 @@ | |||
5 | 6 | # Adam Gandelman <adamg@ubuntu.com> | 6 | # Adam Gandelman <adamg@ubuntu.com> |
6 | 7 | # | 7 | # |
7 | 8 | 8 | ||
8 | 9 | """ | ||
9 | 10 | Helpers for clustering and determining "cluster leadership" and other | ||
10 | 11 | clustering-related helpers. | ||
11 | 12 | """ | ||
12 | 13 | |||
13 | 9 | import subprocess | 14 | import subprocess |
14 | 10 | import os | 15 | import os |
15 | 11 | 16 | ||
16 | @@ -19,6 +24,7 @@ | |||
17 | 19 | config as config_get, | 24 | config as config_get, |
18 | 20 | INFO, | 25 | INFO, |
19 | 21 | ERROR, | 26 | ERROR, |
20 | 27 | WARNING, | ||
21 | 22 | unit_get, | 28 | unit_get, |
22 | 23 | ) | 29 | ) |
23 | 24 | 30 | ||
24 | @@ -27,6 +33,29 @@ | |||
25 | 27 | pass | 33 | pass |
26 | 28 | 34 | ||
27 | 29 | 35 | ||
28 | 36 | def is_elected_leader(resource): | ||
29 | 37 | """ | ||
30 | 38 | Returns True if the charm executing this is the elected cluster leader. | ||
31 | 39 | |||
32 | 40 | It relies on two mechanisms to determine leadership: | ||
33 | 41 | 1. If the charm is part of a corosync cluster, call corosync to | ||
34 | 42 | determine leadership. | ||
35 | 43 | 2. If the charm is not part of a corosync cluster, the leader is | ||
36 | 44 | determined as being "the alive unit with the lowest unit numer". In | ||
37 | 45 | other words, the oldest surviving unit. | ||
38 | 46 | """ | ||
39 | 47 | if is_clustered(): | ||
40 | 48 | if not is_crm_leader(resource): | ||
41 | 49 | log('Deferring action to CRM leader.', level=INFO) | ||
42 | 50 | return False | ||
43 | 51 | else: | ||
44 | 52 | peers = peer_units() | ||
45 | 53 | if peers and not oldest_peer(peers): | ||
46 | 54 | log('Deferring action to oldest service unit.', level=INFO) | ||
47 | 55 | return False | ||
48 | 56 | return True | ||
49 | 57 | |||
50 | 58 | |||
51 | 30 | def is_clustered(): | 59 | def is_clustered(): |
52 | 31 | for r_id in (relation_ids('ha') or []): | 60 | for r_id in (relation_ids('ha') or []): |
53 | 32 | for unit in (relation_list(r_id) or []): | 61 | for unit in (relation_list(r_id) or []): |
54 | @@ -38,7 +67,11 @@ | |||
55 | 38 | return False | 67 | return False |
56 | 39 | 68 | ||
57 | 40 | 69 | ||
59 | 41 | def is_leader(resource): | 70 | def is_crm_leader(resource): |
60 | 71 | """ | ||
61 | 72 | Returns True if the charm calling this is the elected corosync leader, | ||
62 | 73 | as returned by calling the external "crm" command. | ||
63 | 74 | """ | ||
64 | 42 | cmd = [ | 75 | cmd = [ |
65 | 43 | "crm", "resource", | 76 | "crm", "resource", |
66 | 44 | "show", resource | 77 | "show", resource |
67 | @@ -54,15 +87,31 @@ | |||
68 | 54 | return False | 87 | return False |
69 | 55 | 88 | ||
70 | 56 | 89 | ||
72 | 57 | def peer_units(): | 90 | def is_leader(resource): |
73 | 91 | log("is_leader is deprecated. Please consider using is_crm_leader " | ||
74 | 92 | "instead.", level=WARNING) | ||
75 | 93 | return is_crm_leader(resource) | ||
76 | 94 | |||
77 | 95 | |||
78 | 96 | def peer_units(peer_relation="cluster"): | ||
79 | 58 | peers = [] | 97 | peers = [] |
81 | 59 | for r_id in (relation_ids('cluster') or []): | 98 | for r_id in (relation_ids(peer_relation) or []): |
82 | 60 | for unit in (relation_list(r_id) or []): | 99 | for unit in (relation_list(r_id) or []): |
83 | 61 | peers.append(unit) | 100 | peers.append(unit) |
84 | 62 | return peers | 101 | return peers |
85 | 63 | 102 | ||
86 | 64 | 103 | ||
87 | 104 | def peer_ips(peer_relation='cluster', addr_key='private-address'): | ||
88 | 105 | '''Return a dict of peers and their private-address''' | ||
89 | 106 | peers = {} | ||
90 | 107 | for r_id in relation_ids(peer_relation): | ||
91 | 108 | for unit in relation_list(r_id): | ||
92 | 109 | peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) | ||
93 | 110 | return peers | ||
94 | 111 | |||
95 | 112 | |||
96 | 65 | def oldest_peer(peers): | 113 | def oldest_peer(peers): |
97 | 114 | """Determines who the oldest peer is by comparing unit numbers.""" | ||
98 | 66 | local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) | 115 | local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) |
99 | 67 | for peer in peers: | 116 | for peer in peers: |
100 | 68 | remote_unit_no = int(peer.split('/')[1]) | 117 | remote_unit_no = int(peer.split('/')[1]) |
101 | @@ -72,16 +121,9 @@ | |||
102 | 72 | 121 | ||
103 | 73 | 122 | ||
104 | 74 | def eligible_leader(resource): | 123 | def eligible_leader(resource): |
115 | 75 | if is_clustered(): | 124 | log("eligible_leader is deprecated. Please consider using " |
116 | 76 | if not is_leader(resource): | 125 | "is_elected_leader instead.", level=WARNING) |
117 | 77 | log('Deferring action to CRM leader.', level=INFO) | 126 | return is_elected_leader(resource) |
108 | 78 | return False | ||
109 | 79 | else: | ||
110 | 80 | peers = peer_units() | ||
111 | 81 | if peers and not oldest_peer(peers): | ||
112 | 82 | log('Deferring action to oldest service unit.', level=INFO) | ||
113 | 83 | return False | ||
114 | 84 | return True | ||
118 | 85 | 127 | ||
119 | 86 | 128 | ||
120 | 87 | def https(): | 129 | def https(): |
121 | 88 | 130 | ||
122 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' | |||
123 | --- hooks/charmhelpers/contrib/network/ip.py 2014-07-24 10:26:34 +0000 | |||
124 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-08-13 13:59:42 +0000 | |||
125 | @@ -4,7 +4,7 @@ | |||
126 | 4 | 4 | ||
127 | 5 | from charmhelpers.fetch import apt_install | 5 | from charmhelpers.fetch import apt_install |
128 | 6 | from charmhelpers.core.hookenv import ( | 6 | from charmhelpers.core.hookenv import ( |
130 | 7 | ERROR, log, | 7 | ERROR, log, config, |
131 | 8 | ) | 8 | ) |
132 | 9 | 9 | ||
133 | 10 | try: | 10 | try: |
134 | @@ -154,3 +154,21 @@ | |||
135 | 154 | get_iface_for_address = partial(_get_for_address, key='iface') | 154 | get_iface_for_address = partial(_get_for_address, key='iface') |
136 | 155 | 155 | ||
137 | 156 | get_netmask_for_address = partial(_get_for_address, key='netmask') | 156 | get_netmask_for_address = partial(_get_for_address, key='netmask') |
138 | 157 | |||
139 | 158 | |||
140 | 159 | def get_ipv6_addr(iface="eth0"): | ||
141 | 160 | try: | ||
142 | 161 | iface_addrs = netifaces.ifaddresses(iface) | ||
143 | 162 | if netifaces.AF_INET6 not in iface_addrs: | ||
144 | 163 | raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) | ||
145 | 164 | |||
146 | 165 | addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] | ||
147 | 166 | ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') | ||
148 | 167 | and config('vip') != a['addr']] | ||
149 | 168 | if not ipv6_addr: | ||
150 | 169 | raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) | ||
151 | 170 | |||
152 | 171 | return ipv6_addr[0] | ||
153 | 172 | |||
154 | 173 | except ValueError: | ||
155 | 174 | raise ValueError("Invalid interface '%s'" % iface) | ||
156 | 157 | 175 | ||
157 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
158 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-10 21:43:51 +0000 | |||
159 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-08-13 13:59:42 +0000 | |||
160 | @@ -4,8 +4,11 @@ | |||
161 | 4 | 4 | ||
162 | 5 | 5 | ||
163 | 6 | class OpenStackAmuletDeployment(AmuletDeployment): | 6 | class OpenStackAmuletDeployment(AmuletDeployment): |
166 | 7 | """This class inherits from AmuletDeployment and has additional support | 7 | """OpenStack amulet deployment. |
167 | 8 | that is specifically for use by OpenStack charms.""" | 8 | |
168 | 9 | This class inherits from AmuletDeployment and has additional support | ||
169 | 10 | that is specifically for use by OpenStack charms. | ||
170 | 11 | """ | ||
171 | 9 | 12 | ||
172 | 10 | def __init__(self, series=None, openstack=None, source=None): | 13 | def __init__(self, series=None, openstack=None, source=None): |
173 | 11 | """Initialize the deployment environment.""" | 14 | """Initialize the deployment environment.""" |
174 | @@ -40,11 +43,14 @@ | |||
175 | 40 | self.d.configure(service, config) | 43 | self.d.configure(service, config) |
176 | 41 | 44 | ||
177 | 42 | def _get_openstack_release(self): | 45 | def _get_openstack_release(self): |
183 | 43 | """Return an integer representing the enum value of the openstack | 46 | """Get openstack release. |
184 | 44 | release.""" | 47 | |
185 | 45 | self.precise_essex, self.precise_folsom, self.precise_grizzly, \ | 48 | Return an integer representing the enum value of the openstack |
186 | 46 | self.precise_havana, self.precise_icehouse, \ | 49 | release. |
187 | 47 | self.trusty_icehouse = range(6) | 50 | """ |
188 | 51 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, | ||
189 | 52 | self.precise_havana, self.precise_icehouse, | ||
190 | 53 | self.trusty_icehouse) = range(6) | ||
191 | 48 | releases = { | 54 | releases = { |
192 | 49 | ('precise', None): self.precise_essex, | 55 | ('precise', None): self.precise_essex, |
193 | 50 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | 56 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, |
194 | 51 | 57 | ||
195 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
196 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-10 21:43:51 +0000 | |||
197 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-08-13 13:59:42 +0000 | |||
198 | @@ -16,8 +16,11 @@ | |||
199 | 16 | 16 | ||
200 | 17 | 17 | ||
201 | 18 | class OpenStackAmuletUtils(AmuletUtils): | 18 | class OpenStackAmuletUtils(AmuletUtils): |
204 | 19 | """This class inherits from AmuletUtils and has additional support | 19 | """OpenStack amulet utilities. |
205 | 20 | that is specifically for use by OpenStack charms.""" | 20 | |
206 | 21 | This class inherits from AmuletUtils and has additional support | ||
207 | 22 | that is specifically for use by OpenStack charms. | ||
208 | 23 | """ | ||
209 | 21 | 24 | ||
210 | 22 | def __init__(self, log_level=ERROR): | 25 | def __init__(self, log_level=ERROR): |
211 | 23 | """Initialize the deployment environment.""" | 26 | """Initialize the deployment environment.""" |
212 | @@ -25,13 +28,17 @@ | |||
213 | 25 | 28 | ||
214 | 26 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | 29 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, |
215 | 27 | public_port, expected): | 30 | public_port, expected): |
218 | 28 | """Validate actual endpoint data vs expected endpoint data. The ports | 31 | """Validate endpoint data. |
219 | 29 | are used to find the matching endpoint.""" | 32 | |
220 | 33 | Validate actual endpoint data vs expected endpoint data. The ports | ||
221 | 34 | are used to find the matching endpoint. | ||
222 | 35 | """ | ||
223 | 30 | found = False | 36 | found = False |
224 | 31 | for ep in endpoints: | 37 | for ep in endpoints: |
225 | 32 | self.log.debug('endpoint: {}'.format(repr(ep))) | 38 | self.log.debug('endpoint: {}'.format(repr(ep))) |
228 | 33 | if admin_port in ep.adminurl and internal_port in ep.internalurl \ | 39 | if (admin_port in ep.adminurl and |
229 | 34 | and public_port in ep.publicurl: | 40 | internal_port in ep.internalurl and |
230 | 41 | public_port in ep.publicurl): | ||
231 | 35 | found = True | 42 | found = True |
232 | 36 | actual = {'id': ep.id, | 43 | actual = {'id': ep.id, |
233 | 37 | 'region': ep.region, | 44 | 'region': ep.region, |
234 | @@ -47,8 +54,11 @@ | |||
235 | 47 | return 'endpoint not found' | 54 | return 'endpoint not found' |
236 | 48 | 55 | ||
237 | 49 | def validate_svc_catalog_endpoint_data(self, expected, actual): | 56 | def validate_svc_catalog_endpoint_data(self, expected, actual): |
240 | 50 | """Validate a list of actual service catalog endpoints vs a list of | 57 | """Validate service catalog endpoint data. |
241 | 51 | expected service catalog endpoints.""" | 58 | |
242 | 59 | Validate a list of actual service catalog endpoints vs a list of | ||
243 | 60 | expected service catalog endpoints. | ||
244 | 61 | """ | ||
245 | 52 | self.log.debug('actual: {}'.format(repr(actual))) | 62 | self.log.debug('actual: {}'.format(repr(actual))) |
246 | 53 | for k, v in expected.iteritems(): | 63 | for k, v in expected.iteritems(): |
247 | 54 | if k in actual: | 64 | if k in actual: |
248 | @@ -60,8 +70,11 @@ | |||
249 | 60 | return ret | 70 | return ret |
250 | 61 | 71 | ||
251 | 62 | def validate_tenant_data(self, expected, actual): | 72 | def validate_tenant_data(self, expected, actual): |
254 | 63 | """Validate a list of actual tenant data vs list of expected tenant | 73 | """Validate tenant data. |
255 | 64 | data.""" | 74 | |
256 | 75 | Validate a list of actual tenant data vs list of expected tenant | ||
257 | 76 | data. | ||
258 | 77 | """ | ||
259 | 65 | self.log.debug('actual: {}'.format(repr(actual))) | 78 | self.log.debug('actual: {}'.format(repr(actual))) |
260 | 66 | for e in expected: | 79 | for e in expected: |
261 | 67 | found = False | 80 | found = False |
262 | @@ -78,8 +91,11 @@ | |||
263 | 78 | return ret | 91 | return ret |
264 | 79 | 92 | ||
265 | 80 | def validate_role_data(self, expected, actual): | 93 | def validate_role_data(self, expected, actual): |
268 | 81 | """Validate a list of actual role data vs a list of expected role | 94 | """Validate role data. |
269 | 82 | data.""" | 95 | |
270 | 96 | Validate a list of actual role data vs a list of expected role | ||
271 | 97 | data. | ||
272 | 98 | """ | ||
273 | 83 | self.log.debug('actual: {}'.format(repr(actual))) | 99 | self.log.debug('actual: {}'.format(repr(actual))) |
274 | 84 | for e in expected: | 100 | for e in expected: |
275 | 85 | found = False | 101 | found = False |
276 | @@ -95,8 +111,11 @@ | |||
277 | 95 | return ret | 111 | return ret |
278 | 96 | 112 | ||
279 | 97 | def validate_user_data(self, expected, actual): | 113 | def validate_user_data(self, expected, actual): |
282 | 98 | """Validate a list of actual user data vs a list of expected user | 114 | """Validate user data. |
283 | 99 | data.""" | 115 | |
284 | 116 | Validate a list of actual user data vs a list of expected user | ||
285 | 117 | data. | ||
286 | 118 | """ | ||
287 | 100 | self.log.debug('actual: {}'.format(repr(actual))) | 119 | self.log.debug('actual: {}'.format(repr(actual))) |
288 | 101 | for e in expected: | 120 | for e in expected: |
289 | 102 | found = False | 121 | found = False |
290 | @@ -114,21 +133,24 @@ | |||
291 | 114 | return ret | 133 | return ret |
292 | 115 | 134 | ||
293 | 116 | def validate_flavor_data(self, expected, actual): | 135 | def validate_flavor_data(self, expected, actual): |
295 | 117 | """Validate a list of actual flavors vs a list of expected flavors.""" | 136 | """Validate flavor data. |
296 | 137 | |||
297 | 138 | Validate a list of actual flavors vs a list of expected flavors. | ||
298 | 139 | """ | ||
299 | 118 | self.log.debug('actual: {}'.format(repr(actual))) | 140 | self.log.debug('actual: {}'.format(repr(actual))) |
300 | 119 | act = [a.name for a in actual] | 141 | act = [a.name for a in actual] |
301 | 120 | return self._validate_list_data(expected, act) | 142 | return self._validate_list_data(expected, act) |
302 | 121 | 143 | ||
303 | 122 | def tenant_exists(self, keystone, tenant): | 144 | def tenant_exists(self, keystone, tenant): |
305 | 123 | """Return True if tenant exists""" | 145 | """Return True if tenant exists.""" |
306 | 124 | return tenant in [t.name for t in keystone.tenants.list()] | 146 | return tenant in [t.name for t in keystone.tenants.list()] |
307 | 125 | 147 | ||
308 | 126 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | 148 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
309 | 127 | tenant): | 149 | tenant): |
310 | 128 | """Authenticates admin user with the keystone admin endpoint.""" | 150 | """Authenticates admin user with the keystone admin endpoint.""" |
314 | 129 | service_ip = \ | 151 | unit = keystone_sentry |
315 | 130 | keystone_sentry.relation('shared-db', | 152 | service_ip = unit.relation('shared-db', |
316 | 131 | 'mysql:shared-db')['private-address'] | 153 | 'mysql:shared-db')['private-address'] |
317 | 132 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | 154 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) |
318 | 133 | return keystone_client.Client(username=user, password=password, | 155 | return keystone_client.Client(username=user, password=password, |
319 | 134 | tenant_name=tenant, auth_url=ep) | 156 | tenant_name=tenant, auth_url=ep) |
320 | @@ -177,12 +199,40 @@ | |||
321 | 177 | image = glance.images.create(name=image_name, is_public=True, | 199 | image = glance.images.create(name=image_name, is_public=True, |
322 | 178 | disk_format='qcow2', | 200 | disk_format='qcow2', |
323 | 179 | container_format='bare', data=f) | 201 | container_format='bare', data=f) |
324 | 202 | count = 1 | ||
325 | 203 | status = image.status | ||
326 | 204 | while status != 'active' and count < 10: | ||
327 | 205 | time.sleep(3) | ||
328 | 206 | image = glance.images.get(image.id) | ||
329 | 207 | status = image.status | ||
330 | 208 | self.log.debug('image status: {}'.format(status)) | ||
331 | 209 | count += 1 | ||
332 | 210 | |||
333 | 211 | if status != 'active': | ||
334 | 212 | self.log.error('image creation timed out') | ||
335 | 213 | return None | ||
336 | 214 | |||
337 | 180 | return image | 215 | return image |
338 | 181 | 216 | ||
339 | 182 | def delete_image(self, glance, image): | 217 | def delete_image(self, glance, image): |
340 | 183 | """Delete the specified image.""" | 218 | """Delete the specified image.""" |
341 | 219 | num_before = len(list(glance.images.list())) | ||
342 | 184 | glance.images.delete(image) | 220 | glance.images.delete(image) |
343 | 185 | 221 | ||
344 | 222 | count = 1 | ||
345 | 223 | num_after = len(list(glance.images.list())) | ||
346 | 224 | while num_after != (num_before - 1) and count < 10: | ||
347 | 225 | time.sleep(3) | ||
348 | 226 | num_after = len(list(glance.images.list())) | ||
349 | 227 | self.log.debug('number of images: {}'.format(num_after)) | ||
350 | 228 | count += 1 | ||
351 | 229 | |||
352 | 230 | if num_after != (num_before - 1): | ||
353 | 231 | self.log.error('image deletion timed out') | ||
354 | 232 | return False | ||
355 | 233 | |||
356 | 234 | return True | ||
357 | 235 | |||
358 | 186 | def create_instance(self, nova, image_name, instance_name, flavor): | 236 | def create_instance(self, nova, image_name, instance_name, flavor): |
359 | 187 | """Create the specified instance.""" | 237 | """Create the specified instance.""" |
360 | 188 | image = nova.images.find(name=image_name) | 238 | image = nova.images.find(name=image_name) |
361 | @@ -199,11 +249,27 @@ | |||
362 | 199 | self.log.debug('instance status: {}'.format(status)) | 249 | self.log.debug('instance status: {}'.format(status)) |
363 | 200 | count += 1 | 250 | count += 1 |
364 | 201 | 251 | ||
366 | 202 | if status == 'BUILD': | 252 | if status != 'ACTIVE': |
367 | 253 | self.log.error('instance creation timed out') | ||
368 | 203 | return None | 254 | return None |
369 | 204 | 255 | ||
370 | 205 | return instance | 256 | return instance |
371 | 206 | 257 | ||
372 | 207 | def delete_instance(self, nova, instance): | 258 | def delete_instance(self, nova, instance): |
373 | 208 | """Delete the specified instance.""" | 259 | """Delete the specified instance.""" |
374 | 260 | num_before = len(list(nova.servers.list())) | ||
375 | 209 | nova.servers.delete(instance) | 261 | nova.servers.delete(instance) |
376 | 262 | |||
377 | 263 | count = 1 | ||
378 | 264 | num_after = len(list(nova.servers.list())) | ||
379 | 265 | while num_after != (num_before - 1) and count < 10: | ||
380 | 266 | time.sleep(3) | ||
381 | 267 | num_after = len(list(nova.servers.list())) | ||
382 | 268 | self.log.debug('number of instances: {}'.format(num_after)) | ||
383 | 269 | count += 1 | ||
384 | 270 | |||
385 | 271 | if num_after != (num_before - 1): | ||
386 | 272 | self.log.error('instance deletion timed out') | ||
387 | 273 | return False | ||
388 | 274 | |||
389 | 275 | return True | ||
390 | 210 | 276 | ||
391 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
392 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-07-25 09:37:25 +0000 | |||
393 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 13:59:42 +0000 | |||
394 | @@ -44,7 +44,10 @@ | |||
395 | 44 | neutron_plugin_attribute, | 44 | neutron_plugin_attribute, |
396 | 45 | ) | 45 | ) |
397 | 46 | 46 | ||
399 | 47 | from charmhelpers.contrib.network.ip import get_address_in_network | 47 | from charmhelpers.contrib.network.ip import ( |
400 | 48 | get_address_in_network, | ||
401 | 49 | get_ipv6_addr, | ||
402 | 50 | ) | ||
403 | 48 | 51 | ||
404 | 49 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 52 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
405 | 50 | 53 | ||
406 | @@ -401,9 +404,12 @@ | |||
407 | 401 | 404 | ||
408 | 402 | cluster_hosts = {} | 405 | cluster_hosts = {} |
409 | 403 | l_unit = local_unit().replace('/', '-') | 406 | l_unit = local_unit().replace('/', '-') |
413 | 404 | cluster_hosts[l_unit] = \ | 407 | if config('prefer-ipv6'): |
414 | 405 | get_address_in_network(config('os-internal-network'), | 408 | addr = get_ipv6_addr() |
415 | 406 | unit_get('private-address')) | 409 | else: |
416 | 410 | addr = unit_get('private-address') | ||
417 | 411 | cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), | ||
418 | 412 | addr) | ||
419 | 407 | 413 | ||
420 | 408 | for rid in relation_ids('cluster'): | 414 | for rid in relation_ids('cluster'): |
421 | 409 | for unit in related_units(rid): | 415 | for unit in related_units(rid): |
422 | @@ -414,6 +420,16 @@ | |||
423 | 414 | ctxt = { | 420 | ctxt = { |
424 | 415 | 'units': cluster_hosts, | 421 | 'units': cluster_hosts, |
425 | 416 | } | 422 | } |
426 | 423 | |||
427 | 424 | if config('prefer-ipv6'): | ||
428 | 425 | ctxt['local_host'] = 'ip6-localhost' | ||
429 | 426 | ctxt['haproxy_host'] = '::' | ||
430 | 427 | ctxt['stat_port'] = ':::8888' | ||
431 | 428 | else: | ||
432 | 429 | ctxt['local_host'] = '127.0.0.1' | ||
433 | 430 | ctxt['haproxy_host'] = '0.0.0.0' | ||
434 | 431 | ctxt['stat_port'] = ':8888' | ||
435 | 432 | |||
436 | 417 | if len(cluster_hosts.keys()) > 1: | 433 | if len(cluster_hosts.keys()) > 1: |
437 | 418 | # Enable haproxy when we have enough peers. | 434 | # Enable haproxy when we have enough peers. |
438 | 419 | log('Ensuring haproxy enabled in /etc/default/haproxy.') | 435 | log('Ensuring haproxy enabled in /etc/default/haproxy.') |
439 | @@ -753,6 +769,17 @@ | |||
440 | 753 | return ctxt | 769 | return ctxt |
441 | 754 | 770 | ||
442 | 755 | 771 | ||
443 | 772 | class LogLevelContext(OSContextGenerator): | ||
444 | 773 | |||
445 | 774 | def __call__(self): | ||
446 | 775 | ctxt = {} | ||
447 | 776 | ctxt['debug'] = \ | ||
448 | 777 | False if config('debug') is None else config('debug') | ||
449 | 778 | ctxt['verbose'] = \ | ||
450 | 779 | False if config('verbose') is None else config('verbose') | ||
451 | 780 | return ctxt | ||
452 | 781 | |||
453 | 782 | |||
454 | 756 | class SyslogContext(OSContextGenerator): | 783 | class SyslogContext(OSContextGenerator): |
455 | 757 | 784 | ||
456 | 758 | def __call__(self): | 785 | def __call__(self): |
457 | 759 | 786 | ||
458 | === modified file 'hooks/charmhelpers/contrib/openstack/ip.py' | |||
459 | --- hooks/charmhelpers/contrib/openstack/ip.py 2014-07-03 15:31:54 +0000 | |||
460 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2014-08-13 13:59:42 +0000 | |||
461 | @@ -7,6 +7,7 @@ | |||
462 | 7 | get_address_in_network, | 7 | get_address_in_network, |
463 | 8 | is_address_in_network, | 8 | is_address_in_network, |
464 | 9 | is_ipv6, | 9 | is_ipv6, |
465 | 10 | get_ipv6_addr, | ||
466 | 10 | ) | 11 | ) |
467 | 11 | 12 | ||
468 | 12 | from charmhelpers.contrib.hahelpers.cluster import is_clustered | 13 | from charmhelpers.contrib.hahelpers.cluster import is_clustered |
469 | @@ -64,10 +65,13 @@ | |||
470 | 64 | vip): | 65 | vip): |
471 | 65 | resolved_address = vip | 66 | resolved_address = vip |
472 | 66 | else: | 67 | else: |
473 | 68 | if config('prefer-ipv6'): | ||
474 | 69 | fallback_addr = get_ipv6_addr() | ||
475 | 70 | else: | ||
476 | 71 | fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) | ||
477 | 67 | resolved_address = get_address_in_network( | 72 | resolved_address = get_address_in_network( |
481 | 68 | config(_address_map[endpoint_type]['config']), | 73 | config(_address_map[endpoint_type]['config']), fallback_addr) |
482 | 69 | unit_get(_address_map[endpoint_type]['fallback']) | 74 | |
480 | 70 | ) | ||
483 | 71 | if resolved_address is None: | 75 | if resolved_address is None: |
484 | 72 | raise ValueError('Unable to resolve a suitable IP address' | 76 | raise ValueError('Unable to resolve a suitable IP address' |
485 | 73 | ' based on charm state and configuration') | 77 | ' based on charm state and configuration') |
486 | 74 | 78 | ||
487 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' | |||
488 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-03 13:03:26 +0000 | |||
489 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-08-13 13:59:42 +0000 | |||
490 | @@ -1,6 +1,6 @@ | |||
491 | 1 | global | 1 | global |
494 | 2 | log 127.0.0.1 local0 | 2 | log {{ local_host }} local0 |
495 | 3 | log 127.0.0.1 local1 notice | 3 | log {{ local_host }} local1 notice |
496 | 4 | maxconn 20000 | 4 | maxconn 20000 |
497 | 5 | user haproxy | 5 | user haproxy |
498 | 6 | group haproxy | 6 | group haproxy |
499 | @@ -17,7 +17,7 @@ | |||
500 | 17 | timeout client 30000 | 17 | timeout client 30000 |
501 | 18 | timeout server 30000 | 18 | timeout server 30000 |
502 | 19 | 19 | ||
504 | 20 | listen stats :8888 | 20 | listen stats {{ stat_port }} |
505 | 21 | mode http | 21 | mode http |
506 | 22 | stats enable | 22 | stats enable |
507 | 23 | stats hide-version | 23 | stats hide-version |
508 | 24 | 24 | ||
509 | === modified file 'hooks/charmhelpers/core/host.py' | |||
510 | --- hooks/charmhelpers/core/host.py 2014-07-25 09:37:25 +0000 | |||
511 | +++ hooks/charmhelpers/core/host.py 2014-08-13 13:59:42 +0000 | |||
512 | @@ -12,6 +12,8 @@ | |||
513 | 12 | import string | 12 | import string |
514 | 13 | import subprocess | 13 | import subprocess |
515 | 14 | import hashlib | 14 | import hashlib |
516 | 15 | import shutil | ||
517 | 16 | from contextlib import contextmanager | ||
518 | 15 | 17 | ||
519 | 16 | from collections import OrderedDict | 18 | from collections import OrderedDict |
520 | 17 | 19 | ||
521 | @@ -52,7 +54,7 @@ | |||
522 | 52 | def service_running(service): | 54 | def service_running(service): |
523 | 53 | """Determine whether a system service is running""" | 55 | """Determine whether a system service is running""" |
524 | 54 | try: | 56 | try: |
526 | 55 | output = subprocess.check_output(['service', service, 'status']) | 57 | output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) |
527 | 56 | except subprocess.CalledProcessError: | 58 | except subprocess.CalledProcessError: |
528 | 57 | return False | 59 | return False |
529 | 58 | else: | 60 | else: |
530 | @@ -62,6 +64,16 @@ | |||
531 | 62 | return False | 64 | return False |
532 | 63 | 65 | ||
533 | 64 | 66 | ||
534 | 67 | def service_available(service_name): | ||
535 | 68 | """Determine whether a system service is available""" | ||
536 | 69 | try: | ||
537 | 70 | subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) | ||
538 | 71 | except subprocess.CalledProcessError: | ||
539 | 72 | return False | ||
540 | 73 | else: | ||
541 | 74 | return True | ||
542 | 75 | |||
543 | 76 | |||
544 | 65 | def adduser(username, password=None, shell='/bin/bash', system_user=False): | 77 | def adduser(username, password=None, shell='/bin/bash', system_user=False): |
545 | 66 | """Add a user to the system""" | 78 | """Add a user to the system""" |
546 | 67 | try: | 79 | try: |
547 | @@ -329,3 +341,24 @@ | |||
548 | 329 | pkgcache = apt_pkg.Cache() | 341 | pkgcache = apt_pkg.Cache() |
549 | 330 | pkg = pkgcache[package] | 342 | pkg = pkgcache[package] |
550 | 331 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) | 343 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
551 | 344 | |||
552 | 345 | |||
553 | 346 | @contextmanager | ||
554 | 347 | def chdir(d): | ||
555 | 348 | cur = os.getcwd() | ||
556 | 349 | try: | ||
557 | 350 | yield os.chdir(d) | ||
558 | 351 | finally: | ||
559 | 352 | os.chdir(cur) | ||
560 | 353 | |||
561 | 354 | |||
562 | 355 | def chownr(path, owner, group): | ||
563 | 356 | uid = pwd.getpwnam(owner).pw_uid | ||
564 | 357 | gid = grp.getgrnam(group).gr_gid | ||
565 | 358 | |||
566 | 359 | for root, dirs, files in os.walk(path): | ||
567 | 360 | for name in dirs + files: | ||
568 | 361 | full = os.path.join(root, name) | ||
569 | 362 | broken_symlink = os.path.lexists(full) and not os.path.exists(full) | ||
570 | 363 | if not broken_symlink: | ||
571 | 364 | os.chown(full, uid, gid) | ||
572 | 332 | 365 | ||
573 | === added directory 'hooks/charmhelpers/core/services' | |||
574 | === added file 'hooks/charmhelpers/core/services/__init__.py' | |||
575 | --- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000 | |||
576 | +++ hooks/charmhelpers/core/services/__init__.py 2014-08-13 13:59:42 +0000 | |||
577 | @@ -0,0 +1,2 @@ | |||
578 | 1 | from .base import * | ||
579 | 2 | from .helpers import * | ||
580 | 0 | 3 | ||
581 | === added file 'hooks/charmhelpers/core/services/base.py' | |||
582 | --- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000 | |||
583 | +++ hooks/charmhelpers/core/services/base.py 2014-08-13 13:59:42 +0000 | |||
584 | @@ -0,0 +1,305 @@ | |||
585 | 1 | import os | ||
586 | 2 | import re | ||
587 | 3 | import json | ||
588 | 4 | from collections import Iterable | ||
589 | 5 | |||
590 | 6 | from charmhelpers.core import host | ||
591 | 7 | from charmhelpers.core import hookenv | ||
592 | 8 | |||
593 | 9 | |||
594 | 10 | __all__ = ['ServiceManager', 'ManagerCallback', | ||
595 | 11 | 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', | ||
596 | 12 | 'service_restart', 'service_stop'] | ||
597 | 13 | |||
598 | 14 | |||
599 | 15 | class ServiceManager(object): | ||
600 | 16 | def __init__(self, services=None): | ||
601 | 17 | """ | ||
602 | 18 | Register a list of services, given their definitions. | ||
603 | 19 | |||
604 | 20 | Traditional charm authoring is focused on implementing hooks. That is, | ||
605 | 21 | the charm author is thinking in terms of "What hook am I handling; what | ||
606 | 22 | does this hook need to do?" However, in most cases, the real question | ||
607 | 23 | should be "Do I have the information I need to configure and start this | ||
608 | 24 | piece of software and, if so, what are the steps for doing so?" The | ||
609 | 25 | ServiceManager framework tries to bring the focus to the data and the | ||
610 | 26 | setup tasks, in the most declarative way possible. | ||
611 | 27 | |||
612 | 28 | Service definitions are dicts in the following formats (all keys except | ||
613 | 29 | 'service' are optional):: | ||
614 | 30 | |||
615 | 31 | { | ||
616 | 32 | "service": <service name>, | ||
617 | 33 | "required_data": <list of required data contexts>, | ||
618 | 34 | "data_ready": <one or more callbacks>, | ||
619 | 35 | "data_lost": <one or more callbacks>, | ||
620 | 36 | "start": <one or more callbacks>, | ||
621 | 37 | "stop": <one or more callbacks>, | ||
622 | 38 | "ports": <list of ports to manage>, | ||
623 | 39 | } | ||
624 | 40 | |||
625 | 41 | The 'required_data' list should contain dicts of required data (or | ||
626 | 42 | dependency managers that act like dicts and know how to collect the data). | ||
627 | 43 | Only when all items in the 'required_data' list are populated are the list | ||
628 | 44 | of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more | ||
629 | 45 | information. | ||
630 | 46 | |||
631 | 47 | The 'data_ready' value should be either a single callback, or a list of | ||
632 | 48 | callbacks, to be called when all items in 'required_data' pass `is_ready()`. | ||
633 | 49 | Each callback will be called with the service name as the only parameter. | ||
634 | 50 | After all of the 'data_ready' callbacks are called, the 'start' callbacks | ||
635 | 51 | are fired. | ||
636 | 52 | |||
637 | 53 | The 'data_lost' value should be either a single callback, or a list of | ||
638 | 54 | callbacks, to be called when a 'required_data' item no longer passes | ||
639 | 55 | `is_ready()`. Each callback will be called with the service name as the | ||
640 | 56 | only parameter. After all of the 'data_lost' callbacks are called, | ||
641 | 57 | the 'stop' callbacks are fired. | ||
642 | 58 | |||
643 | 59 | The 'start' value should be either a single callback, or a list of | ||
644 | 60 | callbacks, to be called when starting the service, after the 'data_ready' | ||
645 | 61 | callbacks are complete. Each callback will be called with the service | ||
646 | 62 | name as the only parameter. This defaults to | ||
647 | 63 | `[host.service_start, services.open_ports]`. | ||
648 | 64 | |||
649 | 65 | The 'stop' value should be either a single callback, or a list of | ||
650 | 66 | callbacks, to be called when stopping the service. If the service is | ||
651 | 67 | being stopped because it no longer has all of its 'required_data', this | ||
652 | 68 | will be called after all of the 'data_lost' callbacks are complete. | ||
653 | 69 | Each callback will be called with the service name as the only parameter. | ||
654 | 70 | This defaults to `[services.close_ports, host.service_stop]`. | ||
655 | 71 | |||
656 | 72 | The 'ports' value should be a list of ports to manage. The default | ||
657 | 73 | 'start' handler will open the ports after the service is started, | ||
658 | 74 | and the default 'stop' handler will close the ports prior to stopping | ||
659 | 75 | the service. | ||
660 | 76 | |||
661 | 77 | |||
662 | 78 | Examples: | ||
663 | 79 | |||
664 | 80 | The following registers an Upstart service called bingod that depends on | ||
665 | 81 | a mongodb relation and which runs a custom `db_migrate` function prior to | ||
666 | 82 | restarting the service, and a Runit service called spadesd:: | ||
667 | 83 | |||
668 | 84 | manager = services.ServiceManager([ | ||
669 | 85 | { | ||
670 | 86 | 'service': 'bingod', | ||
671 | 87 | 'ports': [80, 443], | ||
672 | 88 | 'required_data': [MongoRelation(), config(), {'my': 'data'}], | ||
673 | 89 | 'data_ready': [ | ||
674 | 90 | services.template(source='bingod.conf'), | ||
675 | 91 | services.template(source='bingod.ini', | ||
676 | 92 | target='/etc/bingod.ini', | ||
677 | 93 | owner='bingo', perms=0400), | ||
678 | 94 | ], | ||
679 | 95 | }, | ||
680 | 96 | { | ||
681 | 97 | 'service': 'spadesd', | ||
682 | 98 | 'data_ready': services.template(source='spadesd_run.j2', | ||
683 | 99 | target='/etc/sv/spadesd/run', | ||
684 | 100 | perms=0555), | ||
685 | 101 | 'start': runit_start, | ||
686 | 102 | 'stop': runit_stop, | ||
687 | 103 | }, | ||
688 | 104 | ]) | ||
689 | 105 | manager.manage() | ||
690 | 106 | """ | ||
691 | 107 | self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') | ||
692 | 108 | self._ready = None | ||
693 | 109 | self.services = {} | ||
694 | 110 | for service in services or []: | ||
695 | 111 | service_name = service['service'] | ||
696 | 112 | self.services[service_name] = service | ||
697 | 113 | |||
698 | 114 | def manage(self): | ||
699 | 115 | """ | ||
700 | 116 | Handle the current hook by doing The Right Thing with the registered services. | ||
701 | 117 | """ | ||
702 | 118 | hook_name = hookenv.hook_name() | ||
703 | 119 | if hook_name == 'stop': | ||
704 | 120 | self.stop_services() | ||
705 | 121 | else: | ||
706 | 122 | self.provide_data() | ||
707 | 123 | self.reconfigure_services() | ||
708 | 124 | |||
709 | 125 | def provide_data(self): | ||
710 | 126 | hook_name = hookenv.hook_name() | ||
711 | 127 | for service in self.services.values(): | ||
712 | 128 | for provider in service.get('provided_data', []): | ||
713 | 129 | if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): | ||
714 | 130 | data = provider.provide_data() | ||
715 | 131 | if provider._is_ready(data): | ||
716 | 132 | hookenv.relation_set(None, data) | ||
717 | 133 | |||
718 | 134 | def reconfigure_services(self, *service_names): | ||
719 | 135 | """ | ||
720 | 136 | Update all files for one or more registered services, and, | ||
721 | 137 | if ready, optionally restart them. | ||
722 | 138 | |||
723 | 139 | If no service names are given, reconfigures all registered services. | ||
724 | 140 | """ | ||
725 | 141 | for service_name in service_names or self.services.keys(): | ||
726 | 142 | if self.is_ready(service_name): | ||
727 | 143 | self.fire_event('data_ready', service_name) | ||
728 | 144 | self.fire_event('start', service_name, default=[ | ||
729 | 145 | service_restart, | ||
730 | 146 | manage_ports]) | ||
731 | 147 | self.save_ready(service_name) | ||
732 | 148 | else: | ||
733 | 149 | if self.was_ready(service_name): | ||
734 | 150 | self.fire_event('data_lost', service_name) | ||
735 | 151 | self.fire_event('stop', service_name, default=[ | ||
736 | 152 | manage_ports, | ||
737 | 153 | service_stop]) | ||
738 | 154 | self.save_lost(service_name) | ||
739 | 155 | |||
740 | 156 | def stop_services(self, *service_names): | ||
741 | 157 | """ | ||
742 | 158 | Stop one or more registered services, by name. | ||
743 | 159 | |||
744 | 160 | If no service names are given, stops all registered services. | ||
745 | 161 | """ | ||
746 | 162 | for service_name in service_names or self.services.keys(): | ||
747 | 163 | self.fire_event('stop', service_name, default=[ | ||
748 | 164 | manage_ports, | ||
749 | 165 | service_stop]) | ||
750 | 166 | |||
751 | 167 | def get_service(self, service_name): | ||
752 | 168 | """ | ||
753 | 169 | Given the name of a registered service, return its service definition. | ||
754 | 170 | """ | ||
755 | 171 | service = self.services.get(service_name) | ||
756 | 172 | if not service: | ||
757 | 173 | raise KeyError('Service not registered: %s' % service_name) | ||
758 | 174 | return service | ||
759 | 175 | |||
760 | 176 | def fire_event(self, event_name, service_name, default=None): | ||
761 | 177 | """ | ||
762 | 178 | Fire a data_ready, data_lost, start, or stop event on a given service. | ||
763 | 179 | """ | ||
764 | 180 | service = self.get_service(service_name) | ||
765 | 181 | callbacks = service.get(event_name, default) | ||
766 | 182 | if not callbacks: | ||
767 | 183 | return | ||
768 | 184 | if not isinstance(callbacks, Iterable): | ||
769 | 185 | callbacks = [callbacks] | ||
770 | 186 | for callback in callbacks: | ||
771 | 187 | if isinstance(callback, ManagerCallback): | ||
772 | 188 | callback(self, service_name, event_name) | ||
773 | 189 | else: | ||
774 | 190 | callback(service_name) | ||
775 | 191 | |||
776 | 192 | def is_ready(self, service_name): | ||
777 | 193 | """ | ||
778 | 194 | Determine if a registered service is ready, by checking its 'required_data'. | ||
779 | 195 | |||
780 | 196 | A 'required_data' item can be any mapping type, and is considered ready | ||
781 | 197 | if `bool(item)` evaluates as True. | ||
782 | 198 | """ | ||
783 | 199 | service = self.get_service(service_name) | ||
784 | 200 | reqs = service.get('required_data', []) | ||
785 | 201 | return all(bool(req) for req in reqs) | ||
786 | 202 | |||
787 | 203 | def _load_ready_file(self): | ||
788 | 204 | if self._ready is not None: | ||
789 | 205 | return | ||
790 | 206 | if os.path.exists(self._ready_file): | ||
791 | 207 | with open(self._ready_file) as fp: | ||
792 | 208 | self._ready = set(json.load(fp)) | ||
793 | 209 | else: | ||
794 | 210 | self._ready = set() | ||
795 | 211 | |||
796 | 212 | def _save_ready_file(self): | ||
797 | 213 | if self._ready is None: | ||
798 | 214 | return | ||
799 | 215 | with open(self._ready_file, 'w') as fp: | ||
800 | 216 | json.dump(list(self._ready), fp) | ||
801 | 217 | |||
802 | 218 | def save_ready(self, service_name): | ||
803 | 219 | """ | ||
804 | 220 | Save an indicator that the given service is now data_ready. | ||
805 | 221 | """ | ||
806 | 222 | self._load_ready_file() | ||
807 | 223 | self._ready.add(service_name) | ||
808 | 224 | self._save_ready_file() | ||
809 | 225 | |||
810 | 226 | def save_lost(self, service_name): | ||
811 | 227 | """ | ||
812 | 228 | Save an indicator that the given service is no longer data_ready. | ||
813 | 229 | """ | ||
814 | 230 | self._load_ready_file() | ||
815 | 231 | self._ready.discard(service_name) | ||
816 | 232 | self._save_ready_file() | ||
817 | 233 | |||
818 | 234 | def was_ready(self, service_name): | ||
819 | 235 | """ | ||
820 | 236 | Determine if the given service was previously data_ready. | ||
821 | 237 | """ | ||
822 | 238 | self._load_ready_file() | ||
823 | 239 | return service_name in self._ready | ||
824 | 240 | |||
825 | 241 | |||
826 | 242 | class ManagerCallback(object): | ||
827 | 243 | """ | ||
828 | 244 | Special case of a callback that takes the `ServiceManager` instance | ||
829 | 245 | in addition to the service name. | ||
830 | 246 | |||
831 | 247 | Subclasses should implement `__call__` which should accept three parameters: | ||
832 | 248 | |||
833 | 249 | * `manager` The `ServiceManager` instance | ||
834 | 250 | * `service_name` The name of the service it's being triggered for | ||
835 | 251 | * `event_name` The name of the event that this callback is handling | ||
836 | 252 | """ | ||
837 | 253 | def __call__(self, manager, service_name, event_name): | ||
838 | 254 | raise NotImplementedError() | ||
839 | 255 | |||
840 | 256 | |||
841 | 257 | class PortManagerCallback(ManagerCallback): | ||
842 | 258 | """ | ||
843 | 259 | Callback class that will open or close ports, for use as either | ||
844 | 260 | a start or stop action. | ||
845 | 261 | """ | ||
846 | 262 | def __call__(self, manager, service_name, event_name): | ||
847 | 263 | service = manager.get_service(service_name) | ||
848 | 264 | new_ports = service.get('ports', []) | ||
849 | 265 | port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) | ||
850 | 266 | if os.path.exists(port_file): | ||
851 | 267 | with open(port_file) as fp: | ||
852 | 268 | old_ports = fp.read().split(',') | ||
853 | 269 | for old_port in old_ports: | ||
854 | 270 | if bool(old_port): | ||
855 | 271 | old_port = int(old_port) | ||
856 | 272 | if old_port not in new_ports: | ||
857 | 273 | hookenv.close_port(old_port) | ||
858 | 274 | with open(port_file, 'w') as fp: | ||
859 | 275 | fp.write(','.join(str(port) for port in new_ports)) | ||
860 | 276 | for port in new_ports: | ||
861 | 277 | if event_name == 'start': | ||
862 | 278 | hookenv.open_port(port) | ||
863 | 279 | elif event_name == 'stop': | ||
864 | 280 | hookenv.close_port(port) | ||
865 | 281 | |||
866 | 282 | |||
867 | 283 | def service_stop(service_name): | ||
868 | 284 | """ | ||
869 | 285 | Wrapper around host.service_stop to prevent spurious "unknown service" | ||
870 | 286 | messages in the logs. | ||
871 | 287 | """ | ||
872 | 288 | if host.service_running(service_name): | ||
873 | 289 | host.service_stop(service_name) | ||
874 | 290 | |||
875 | 291 | |||
876 | 292 | def service_restart(service_name): | ||
877 | 293 | """ | ||
878 | 294 | Wrapper around host.service_restart to prevent spurious "unknown service" | ||
879 | 295 | messages in the logs. | ||
880 | 296 | """ | ||
881 | 297 | if host.service_available(service_name): | ||
882 | 298 | if host.service_running(service_name): | ||
883 | 299 | host.service_restart(service_name) | ||
884 | 300 | else: | ||
885 | 301 | host.service_start(service_name) | ||
886 | 302 | |||
887 | 303 | |||
888 | 304 | # Convenience aliases | ||
889 | 305 | open_ports = close_ports = manage_ports = PortManagerCallback() | ||
890 | 0 | 306 | ||
891 | === added file 'hooks/charmhelpers/core/services/helpers.py' | |||
892 | --- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000 | |||
893 | +++ hooks/charmhelpers/core/services/helpers.py 2014-08-13 13:59:42 +0000 | |||
894 | @@ -0,0 +1,125 @@ | |||
895 | 1 | from charmhelpers.core import hookenv | ||
896 | 2 | from charmhelpers.core import templating | ||
897 | 3 | |||
898 | 4 | from charmhelpers.core.services.base import ManagerCallback | ||
899 | 5 | |||
900 | 6 | |||
901 | 7 | __all__ = ['RelationContext', 'TemplateCallback', | ||
902 | 8 | 'render_template', 'template'] | ||
903 | 9 | |||
904 | 10 | |||
905 | 11 | class RelationContext(dict): | ||
906 | 12 | """ | ||
907 | 13 | Base class for a context generator that gets relation data from juju. | ||
908 | 14 | |||
909 | 15 | Subclasses must provide the attributes `name`, which is the name of the | ||
910 | 16 | interface of interest, `interface`, which is the type of the interface of | ||
911 | 17 | interest, and `required_keys`, which is the set of keys required for the | ||
912 | 18 | relation to be considered complete. The data for all interfaces matching | ||
913 | 19 | the `name` attribute that are complete will used to populate the dictionary | ||
914 | 20 | values (see `get_data`, below). | ||
915 | 21 | |||
916 | 22 | The generated context will be namespaced under the interface type, to prevent | ||
917 | 23 | potential naming conflicts. | ||
918 | 24 | """ | ||
919 | 25 | name = None | ||
920 | 26 | interface = None | ||
921 | 27 | required_keys = [] | ||
922 | 28 | |||
923 | 29 | def __init__(self, *args, **kwargs): | ||
924 | 30 | super(RelationContext, self).__init__(*args, **kwargs) | ||
925 | 31 | self.get_data() | ||
926 | 32 | |||
927 | 33 | def __bool__(self): | ||
928 | 34 | """ | ||
929 | 35 | Returns True if all of the required_keys are available. | ||
930 | 36 | """ | ||
931 | 37 | return self.is_ready() | ||
932 | 38 | |||
933 | 39 | __nonzero__ = __bool__ | ||
934 | 40 | |||
935 | 41 | def __repr__(self): | ||
936 | 42 | return super(RelationContext, self).__repr__() | ||
937 | 43 | |||
938 | 44 | def is_ready(self): | ||
939 | 45 | """ | ||
940 | 46 | Returns True if all of the `required_keys` are available from any units. | ||
941 | 47 | """ | ||
942 | 48 | ready = len(self.get(self.name, [])) > 0 | ||
943 | 49 | if not ready: | ||
944 | 50 | hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) | ||
945 | 51 | return ready | ||
946 | 52 | |||
947 | 53 | def _is_ready(self, unit_data): | ||
948 | 54 | """ | ||
949 | 55 | Helper method that tests a set of relation data and returns True if | ||
950 | 56 | all of the `required_keys` are present. | ||
951 | 57 | """ | ||
952 | 58 | return set(unit_data.keys()).issuperset(set(self.required_keys)) | ||
953 | 59 | |||
954 | 60 | def get_data(self): | ||
955 | 61 | """ | ||
956 | 62 | Retrieve the relation data for each unit involved in a relation and, | ||
957 | 63 | if complete, store it in a list under `self[self.name]`. This | ||
958 | 64 | is automatically called when the RelationContext is instantiated. | ||
959 | 65 | |||
960 | 66 | The units are sorted lexographically first by the service ID, then by | ||
961 | 67 | the unit ID. Thus, if an interface has two other services, 'db:1' | ||
962 | 68 | and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', | ||
963 | 69 | and 'db:2' having one unit, 'mediawiki/0', all of which have a complete | ||
964 | 70 | set of data, the relation data for the units will be stored in the | ||
965 | 71 | order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. | ||
966 | 72 | |||
967 | 73 | If you only care about a single unit on the relation, you can just | ||
968 | 74 | access it as `{{ interface[0]['key'] }}`. However, if you can at all | ||
969 | 75 | support multiple units on a relation, you should iterate over the list, | ||
970 | 76 | like:: | ||
971 | 77 | |||
972 | 78 | {% for unit in interface -%} | ||
973 | 79 | {{ unit['key'] }}{% if not loop.last %},{% endif %} | ||
974 | 80 | {%- endfor %} | ||
975 | 81 | |||
976 | 82 | Note that since all sets of relation data from all related services and | ||
977 | 83 | units are in a single list, if you need to know which service or unit a | ||
978 | 84 | set of data came from, you'll need to extend this class to preserve | ||
979 | 85 | that information. | ||
980 | 86 | """ | ||
981 | 87 | if not hookenv.relation_ids(self.name): | ||
982 | 88 | return | ||
983 | 89 | |||
984 | 90 | ns = self.setdefault(self.name, []) | ||
985 | 91 | for rid in sorted(hookenv.relation_ids(self.name)): | ||
986 | 92 | for unit in sorted(hookenv.related_units(rid)): | ||
987 | 93 | reldata = hookenv.relation_get(rid=rid, unit=unit) | ||
988 | 94 | if self._is_ready(reldata): | ||
989 | 95 | ns.append(reldata) | ||
990 | 96 | |||
991 | 97 | def provide_data(self): | ||
992 | 98 | """ | ||
993 | 99 | Return data to be relation_set for this interface. | ||
994 | 100 | """ | ||
995 | 101 | return {} | ||
996 | 102 | |||
997 | 103 | |||
998 | 104 | class TemplateCallback(ManagerCallback): | ||
999 | 105 | """ | ||
1000 | 106 | Callback class that will render a template, for use as a ready action. | ||
1001 | 107 | """ | ||
1002 | 108 | def __init__(self, source, target, owner='root', group='root', perms=0444): | ||
1003 | 109 | self.source = source | ||
1004 | 110 | self.target = target | ||
1005 | 111 | self.owner = owner | ||
1006 | 112 | self.group = group | ||
1007 | 113 | self.perms = perms | ||
1008 | 114 | |||
1009 | 115 | def __call__(self, manager, service_name, event_name): | ||
1010 | 116 | service = manager.get_service(service_name) | ||
1011 | 117 | context = {} | ||
1012 | 118 | for ctx in service.get('required_data', []): | ||
1013 | 119 | context.update(ctx) | ||
1014 | 120 | templating.render(self.source, self.target, context, | ||
1015 | 121 | self.owner, self.group, self.perms) | ||
1016 | 122 | |||
1017 | 123 | |||
1018 | 124 | # Convenience aliases for templates | ||
1019 | 125 | render_template = template = TemplateCallback | ||
1020 | 0 | 126 | ||
1021 | === added file 'hooks/charmhelpers/core/templating.py' | |||
1022 | --- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000 | |||
1023 | +++ hooks/charmhelpers/core/templating.py 2014-08-13 13:59:42 +0000 | |||
1024 | @@ -0,0 +1,51 @@ | |||
1025 | 1 | import os | ||
1026 | 2 | |||
1027 | 3 | from charmhelpers.core import host | ||
1028 | 4 | from charmhelpers.core import hookenv | ||
1029 | 5 | |||
1030 | 6 | |||
1031 | 7 | def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): | ||
1032 | 8 | """ | ||
1033 | 9 | Render a template. | ||
1034 | 10 | |||
1035 | 11 | The `source` path, if not absolute, is relative to the `templates_dir`. | ||
1036 | 12 | |||
1037 | 13 | The `target` path should be absolute. | ||
1038 | 14 | |||
1039 | 15 | The context should be a dict containing the values to be replaced in the | ||
1040 | 16 | template. | ||
1041 | 17 | |||
1042 | 18 | The `owner`, `group`, and `perms` options will be passed to `write_file`. | ||
1043 | 19 | |||
1044 | 20 | If omitted, `templates_dir` defaults to the `templates` folder in the charm. | ||
1045 | 21 | |||
1046 | 22 | Note: Using this requires python-jinja2; if it is not installed, calling | ||
1047 | 23 | this will attempt to use charmhelpers.fetch.apt_install to install it. | ||
1048 | 24 | """ | ||
1049 | 25 | try: | ||
1050 | 26 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
1051 | 27 | except ImportError: | ||
1052 | 28 | try: | ||
1053 | 29 | from charmhelpers.fetch import apt_install | ||
1054 | 30 | except ImportError: | ||
1055 | 31 | hookenv.log('Could not import jinja2, and could not import ' | ||
1056 | 32 | 'charmhelpers.fetch to install it', | ||
1057 | 33 | level=hookenv.ERROR) | ||
1058 | 34 | raise | ||
1059 | 35 | apt_install('python-jinja2', fatal=True) | ||
1060 | 36 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
1061 | 37 | |||
1062 | 38 | if templates_dir is None: | ||
1063 | 39 | templates_dir = os.path.join(hookenv.charm_dir(), 'templates') | ||
1064 | 40 | loader = Environment(loader=FileSystemLoader(templates_dir)) | ||
1065 | 41 | try: | ||
1066 | 42 | source = source | ||
1067 | 43 | template = loader.get_template(source) | ||
1068 | 44 | except exceptions.TemplateNotFound as e: | ||
1069 | 45 | hookenv.log('Could not load template %s from %s.' % | ||
1070 | 46 | (source, templates_dir), | ||
1071 | 47 | level=hookenv.ERROR) | ||
1072 | 48 | raise e | ||
1073 | 49 | content = template.render(context) | ||
1074 | 50 | host.mkdir(os.path.dirname(target)) | ||
1075 | 51 | host.write_file(target, content, owner, group, perms) | ||
1076 | 0 | 52 | ||
1077 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
1078 | --- hooks/charmhelpers/fetch/__init__.py 2014-07-25 09:37:25 +0000 | |||
1079 | +++ hooks/charmhelpers/fetch/__init__.py 2014-08-13 13:59:42 +0000 | |||
1080 | @@ -122,6 +122,7 @@ | |||
1081 | 122 | # Tell apt to build an in-memory cache to prevent race conditions (if | 122 | # Tell apt to build an in-memory cache to prevent race conditions (if |
1082 | 123 | # another process is already building the cache). | 123 | # another process is already building the cache). |
1083 | 124 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | 124 | apt_pkg.config.set("Dir::Cache::pkgcache", "") |
1084 | 125 | apt_pkg.config.set("Dir::Cache::srcpkgcache", "") | ||
1085 | 125 | 126 | ||
1086 | 126 | cache = apt_pkg.Cache() | 127 | cache = apt_pkg.Cache() |
1087 | 127 | _pkgs = [] | 128 | _pkgs = [] |
1088 | 128 | 129 | ||
1089 | === modified file 'tests/charmhelpers/contrib/amulet/deployment.py' | |||
1090 | --- tests/charmhelpers/contrib/amulet/deployment.py 2014-07-25 09:37:25 +0000 | |||
1091 | +++ tests/charmhelpers/contrib/amulet/deployment.py 2014-08-13 13:59:42 +0000 | |||
1092 | @@ -1,9 +1,14 @@ | |||
1093 | 1 | import amulet | 1 | import amulet |
1094 | 2 | 2 | ||
1095 | 3 | import os | ||
1096 | 4 | |||
1097 | 3 | 5 | ||
1098 | 4 | class AmuletDeployment(object): | 6 | class AmuletDeployment(object): |
1101 | 5 | """This class provides generic Amulet deployment and test runner | 7 | """Amulet deployment. |
1102 | 6 | methods.""" | 8 | |
1103 | 9 | This class provides generic Amulet deployment and test runner | ||
1104 | 10 | methods. | ||
1105 | 11 | """ | ||
1106 | 7 | 12 | ||
1107 | 8 | def __init__(self, series=None): | 13 | def __init__(self, series=None): |
1108 | 9 | """Initialize the deployment environment.""" | 14 | """Initialize the deployment environment.""" |
1109 | @@ -16,11 +21,19 @@ | |||
1110 | 16 | self.d = amulet.Deployment() | 21 | self.d = amulet.Deployment() |
1111 | 17 | 22 | ||
1112 | 18 | def _add_services(self, this_service, other_services): | 23 | def _add_services(self, this_service, other_services): |
1114 | 19 | """Add services to the deployment where this_service is the local charm | 24 | """Add services. |
1115 | 25 | |||
1116 | 26 | Add services to the deployment where this_service is the local charm | ||
1117 | 20 | that we're focused on testing and other_services are the other | 27 | that we're focused on testing and other_services are the other |
1119 | 21 | charms that come from the charm store.""" | 28 | charms that come from the charm store. |
1120 | 29 | """ | ||
1121 | 22 | name, units = range(2) | 30 | name, units = range(2) |
1123 | 23 | self.this_service = this_service[name] | 31 | |
1124 | 32 | if this_service[name] != os.path.basename(os.getcwd()): | ||
1125 | 33 | s = this_service[name] | ||
1126 | 34 | msg = "The charm's root directory name needs to be {}".format(s) | ||
1127 | 35 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1128 | 36 | |||
1129 | 24 | self.d.add(this_service[name], units=this_service[units]) | 37 | self.d.add(this_service[name], units=this_service[units]) |
1130 | 25 | 38 | ||
1131 | 26 | for svc in other_services: | 39 | for svc in other_services: |
1132 | @@ -45,10 +58,10 @@ | |||
1133 | 45 | """Deploy environment and wait for all hooks to finish executing.""" | 58 | """Deploy environment and wait for all hooks to finish executing.""" |
1134 | 46 | try: | 59 | try: |
1135 | 47 | self.d.setup() | 60 | self.d.setup() |
1137 | 48 | self.d.sentry.wait() | 61 | self.d.sentry.wait(timeout=900) |
1138 | 49 | except amulet.helpers.TimeoutError: | 62 | except amulet.helpers.TimeoutError: |
1139 | 50 | amulet.raise_status(amulet.FAIL, msg="Deployment timed out") | 63 | amulet.raise_status(amulet.FAIL, msg="Deployment timed out") |
1141 | 51 | except: | 64 | except Exception: |
1142 | 52 | raise | 65 | raise |
1143 | 53 | 66 | ||
1144 | 54 | def run_tests(self): | 67 | def run_tests(self): |
1145 | 55 | 68 | ||
1146 | === modified file 'tests/charmhelpers/contrib/amulet/utils.py' | |||
1147 | --- tests/charmhelpers/contrib/amulet/utils.py 2014-07-25 09:37:25 +0000 | |||
1148 | +++ tests/charmhelpers/contrib/amulet/utils.py 2014-08-13 13:59:42 +0000 | |||
1149 | @@ -3,12 +3,15 @@ | |||
1150 | 3 | import logging | 3 | import logging |
1151 | 4 | import re | 4 | import re |
1152 | 5 | import sys | 5 | import sys |
1154 | 6 | from time import sleep | 6 | import time |
1155 | 7 | 7 | ||
1156 | 8 | 8 | ||
1157 | 9 | class AmuletUtils(object): | 9 | class AmuletUtils(object): |
1160 | 10 | """This class provides common utility functions that are used by Amulet | 10 | """Amulet utilities. |
1161 | 11 | tests.""" | 11 | |
1162 | 12 | This class provides common utility functions that are used by Amulet | ||
1163 | 13 | tests. | ||
1164 | 14 | """ | ||
1165 | 12 | 15 | ||
1166 | 13 | def __init__(self, log_level=logging.ERROR): | 16 | def __init__(self, log_level=logging.ERROR): |
1167 | 14 | self.log = self.get_logger(level=log_level) | 17 | self.log = self.get_logger(level=log_level) |
1168 | @@ -17,8 +20,8 @@ | |||
1169 | 17 | """Get a logger object that will log to stdout.""" | 20 | """Get a logger object that will log to stdout.""" |
1170 | 18 | log = logging | 21 | log = logging |
1171 | 19 | logger = log.getLogger(name) | 22 | logger = log.getLogger(name) |
1174 | 20 | fmt = \ | 23 | fmt = log.Formatter("%(asctime)s %(funcName)s " |
1175 | 21 | log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s") | 24 | "%(levelname)s: %(message)s") |
1176 | 22 | 25 | ||
1177 | 23 | handler = log.StreamHandler(stream=sys.stdout) | 26 | handler = log.StreamHandler(stream=sys.stdout) |
1178 | 24 | handler.setLevel(level) | 27 | handler.setLevel(level) |
1179 | @@ -38,7 +41,7 @@ | |||
1180 | 38 | def valid_url(self, url): | 41 | def valid_url(self, url): |
1181 | 39 | p = re.compile( | 42 | p = re.compile( |
1182 | 40 | r'^(?:http|ftp)s?://' | 43 | r'^(?:http|ftp)s?://' |
1184 | 41 | r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa | 44 | r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa |
1185 | 42 | r'localhost|' | 45 | r'localhost|' |
1186 | 43 | r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' | 46 | r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' |
1187 | 44 | r'(?::\d+)?' | 47 | r'(?::\d+)?' |
1188 | @@ -50,8 +53,11 @@ | |||
1189 | 50 | return False | 53 | return False |
1190 | 51 | 54 | ||
1191 | 52 | def validate_services(self, commands): | 55 | def validate_services(self, commands): |
1194 | 53 | """Verify the specified services are running on the corresponding | 56 | """Validate services. |
1195 | 54 | service units.""" | 57 | |
1196 | 58 | Verify the specified services are running on the corresponding | ||
1197 | 59 | service units. | ||
1198 | 60 | """ | ||
1199 | 55 | for k, v in commands.iteritems(): | 61 | for k, v in commands.iteritems(): |
1200 | 56 | for cmd in v: | 62 | for cmd in v: |
1201 | 57 | output, code = k.run(cmd) | 63 | output, code = k.run(cmd) |
1202 | @@ -66,9 +72,13 @@ | |||
1203 | 66 | config.readfp(io.StringIO(file_contents)) | 72 | config.readfp(io.StringIO(file_contents)) |
1204 | 67 | return config | 73 | return config |
1205 | 68 | 74 | ||
1209 | 69 | def validate_config_data(self, sentry_unit, config_file, section, expected): | 75 | def validate_config_data(self, sentry_unit, config_file, section, |
1210 | 70 | """Verify that the specified section of the config file contains | 76 | expected): |
1211 | 71 | the expected option key:value pairs.""" | 77 | """Validate config file data. |
1212 | 78 | |||
1213 | 79 | Verify that the specified section of the config file contains | ||
1214 | 80 | the expected option key:value pairs. | ||
1215 | 81 | """ | ||
1216 | 72 | config = self._get_config(sentry_unit, config_file) | 82 | config = self._get_config(sentry_unit, config_file) |
1217 | 73 | 83 | ||
1218 | 74 | if section != 'DEFAULT' and not config.has_section(section): | 84 | if section != 'DEFAULT' and not config.has_section(section): |
1219 | @@ -78,20 +88,23 @@ | |||
1220 | 78 | if not config.has_option(section, k): | 88 | if not config.has_option(section, k): |
1221 | 79 | return "section [{}] is missing option {}".format(section, k) | 89 | return "section [{}] is missing option {}".format(section, k) |
1222 | 80 | if config.get(section, k) != expected[k]: | 90 | if config.get(section, k) != expected[k]: |
1225 | 81 | return "section [{}] {}:{} != expected {}:{}".format(section, | 91 | return "section [{}] {}:{} != expected {}:{}".format( |
1226 | 82 | k, config.get(section, k), k, expected[k]) | 92 | section, k, config.get(section, k), k, expected[k]) |
1227 | 83 | return None | 93 | return None |
1228 | 84 | 94 | ||
1229 | 85 | def _validate_dict_data(self, expected, actual): | 95 | def _validate_dict_data(self, expected, actual): |
1231 | 86 | """Compare expected dictionary data vs actual dictionary data. | 96 | """Validate dictionary data. |
1232 | 97 | |||
1233 | 98 | Compare expected dictionary data vs actual dictionary data. | ||
1234 | 87 | The values in the 'expected' dictionary can be strings, bools, ints, | 99 | The values in the 'expected' dictionary can be strings, bools, ints, |
1235 | 88 | longs, or can be a function that evaluate a variable and returns a | 100 | longs, or can be a function that evaluate a variable and returns a |
1237 | 89 | bool.""" | 101 | bool. |
1238 | 102 | """ | ||
1239 | 90 | for k, v in expected.iteritems(): | 103 | for k, v in expected.iteritems(): |
1240 | 91 | if k in actual: | 104 | if k in actual: |
1244 | 92 | if isinstance(v, basestring) or \ | 105 | if (isinstance(v, basestring) or |
1245 | 93 | isinstance(v, bool) or \ | 106 | isinstance(v, bool) or |
1246 | 94 | isinstance(v, (int, long)): | 107 | isinstance(v, (int, long))): |
1247 | 95 | if v != actual[k]: | 108 | if v != actual[k]: |
1248 | 96 | return "{}:{}".format(k, actual[k]) | 109 | return "{}:{}".format(k, actual[k]) |
1249 | 97 | elif not v(actual[k]): | 110 | elif not v(actual[k]): |
1250 | @@ -114,7 +127,7 @@ | |||
1251 | 114 | return None | 127 | return None |
1252 | 115 | 128 | ||
1253 | 116 | def not_null(self, string): | 129 | def not_null(self, string): |
1255 | 117 | if string != None: | 130 | if string is not None: |
1256 | 118 | return True | 131 | return True |
1257 | 119 | else: | 132 | else: |
1258 | 120 | return False | 133 | return False |
1259 | @@ -128,9 +141,12 @@ | |||
1260 | 128 | return sentry_unit.directory_stat(directory)['mtime'] | 141 | return sentry_unit.directory_stat(directory)['mtime'] |
1261 | 129 | 142 | ||
1262 | 130 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): | 143 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): |
1264 | 131 | """Determine start time of the process based on the last modification | 144 | """Get process' start time. |
1265 | 145 | |||
1266 | 146 | Determine start time of the process based on the last modification | ||
1267 | 132 | time of the /proc/pid directory. If pgrep_full is True, the process | 147 | time of the /proc/pid directory. If pgrep_full is True, the process |
1269 | 133 | name is matched against the full command line.""" | 148 | name is matched against the full command line. |
1270 | 149 | """ | ||
1271 | 134 | if pgrep_full: | 150 | if pgrep_full: |
1272 | 135 | cmd = 'pgrep -o -f {}'.format(service) | 151 | cmd = 'pgrep -o -f {}'.format(service) |
1273 | 136 | else: | 152 | else: |
1274 | @@ -139,13 +155,16 @@ | |||
1275 | 139 | return self._get_dir_mtime(sentry_unit, proc_dir) | 155 | return self._get_dir_mtime(sentry_unit, proc_dir) |
1276 | 140 | 156 | ||
1277 | 141 | def service_restarted(self, sentry_unit, service, filename, | 157 | def service_restarted(self, sentry_unit, service, filename, |
1280 | 142 | pgrep_full=False): | 158 | pgrep_full=False, sleep_time=20): |
1281 | 143 | """Compare a service's start time vs a file's last modification time | 159 | """Check if service was restarted. |
1282 | 160 | |||
1283 | 161 | Compare a service's start time vs a file's last modification time | ||
1284 | 144 | (such as a config file for that service) to determine if the service | 162 | (such as a config file for that service) to determine if the service |
1289 | 145 | has been restarted.""" | 163 | has been restarted. |
1290 | 146 | sleep(10) | 164 | """ |
1291 | 147 | if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \ | 165 | time.sleep(sleep_time) |
1292 | 148 | self._get_file_mtime(sentry_unit, filename): | 166 | if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= |
1293 | 167 | self._get_file_mtime(sentry_unit, filename)): | ||
1294 | 149 | return True | 168 | return True |
1295 | 150 | else: | 169 | else: |
1296 | 151 | return False | 170 | return False |
1297 | 152 | 171 | ||
1298 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
1299 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-25 09:37:25 +0000 | |||
1300 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-08-13 13:59:42 +0000 | |||
1301 | @@ -4,8 +4,11 @@ | |||
1302 | 4 | 4 | ||
1303 | 5 | 5 | ||
1304 | 6 | class OpenStackAmuletDeployment(AmuletDeployment): | 6 | class OpenStackAmuletDeployment(AmuletDeployment): |
1307 | 7 | """This class inherits from AmuletDeployment and has additional support | 7 | """OpenStack amulet deployment. |
1308 | 8 | that is specifically for use by OpenStack charms.""" | 8 | |
1309 | 9 | This class inherits from AmuletDeployment and has additional support | ||
1310 | 10 | that is specifically for use by OpenStack charms. | ||
1311 | 11 | """ | ||
1312 | 9 | 12 | ||
1313 | 10 | def __init__(self, series=None, openstack=None, source=None): | 13 | def __init__(self, series=None, openstack=None, source=None): |
1314 | 11 | """Initialize the deployment environment.""" | 14 | """Initialize the deployment environment.""" |
1315 | @@ -40,11 +43,14 @@ | |||
1316 | 40 | self.d.configure(service, config) | 43 | self.d.configure(service, config) |
1317 | 41 | 44 | ||
1318 | 42 | def _get_openstack_release(self): | 45 | def _get_openstack_release(self): |
1324 | 43 | """Return an integer representing the enum value of the openstack | 46 | """Get openstack release. |
1325 | 44 | release.""" | 47 | |
1326 | 45 | self.precise_essex, self.precise_folsom, self.precise_grizzly, \ | 48 | Return an integer representing the enum value of the openstack |
1327 | 46 | self.precise_havana, self.precise_icehouse, \ | 49 | release. |
1328 | 47 | self.trusty_icehouse = range(6) | 50 | """ |
1329 | 51 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, | ||
1330 | 52 | self.precise_havana, self.precise_icehouse, | ||
1331 | 53 | self.trusty_icehouse) = range(6) | ||
1332 | 48 | releases = { | 54 | releases = { |
1333 | 49 | ('precise', None): self.precise_essex, | 55 | ('precise', None): self.precise_essex, |
1334 | 50 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | 56 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, |
1335 | 51 | 57 | ||
1336 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' | |||
1337 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-25 09:37:25 +0000 | |||
1338 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-08-13 13:59:42 +0000 | |||
1339 | @@ -16,8 +16,11 @@ | |||
1340 | 16 | 16 | ||
1341 | 17 | 17 | ||
1342 | 18 | class OpenStackAmuletUtils(AmuletUtils): | 18 | class OpenStackAmuletUtils(AmuletUtils): |
1345 | 19 | """This class inherits from AmuletUtils and has additional support | 19 | """OpenStack amulet utilities. |
1346 | 20 | that is specifically for use by OpenStack charms.""" | 20 | |
1347 | 21 | This class inherits from AmuletUtils and has additional support | ||
1348 | 22 | that is specifically for use by OpenStack charms. | ||
1349 | 23 | """ | ||
1350 | 21 | 24 | ||
1351 | 22 | def __init__(self, log_level=ERROR): | 25 | def __init__(self, log_level=ERROR): |
1352 | 23 | """Initialize the deployment environment.""" | 26 | """Initialize the deployment environment.""" |
1353 | @@ -25,13 +28,17 @@ | |||
1354 | 25 | 28 | ||
1355 | 26 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | 29 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, |
1356 | 27 | public_port, expected): | 30 | public_port, expected): |
1359 | 28 | """Validate actual endpoint data vs expected endpoint data. The ports | 31 | """Validate endpoint data. |
1360 | 29 | are used to find the matching endpoint.""" | 32 | |
1361 | 33 | Validate actual endpoint data vs expected endpoint data. The ports | ||
1362 | 34 | are used to find the matching endpoint. | ||
1363 | 35 | """ | ||
1364 | 30 | found = False | 36 | found = False |
1365 | 31 | for ep in endpoints: | 37 | for ep in endpoints: |
1366 | 32 | self.log.debug('endpoint: {}'.format(repr(ep))) | 38 | self.log.debug('endpoint: {}'.format(repr(ep))) |
1369 | 33 | if admin_port in ep.adminurl and internal_port in ep.internalurl \ | 39 | if (admin_port in ep.adminurl and |
1370 | 34 | and public_port in ep.publicurl: | 40 | internal_port in ep.internalurl and |
1371 | 41 | public_port in ep.publicurl): | ||
1372 | 35 | found = True | 42 | found = True |
1373 | 36 | actual = {'id': ep.id, | 43 | actual = {'id': ep.id, |
1374 | 37 | 'region': ep.region, | 44 | 'region': ep.region, |
1375 | @@ -47,8 +54,11 @@ | |||
1376 | 47 | return 'endpoint not found' | 54 | return 'endpoint not found' |
1377 | 48 | 55 | ||
1378 | 49 | def validate_svc_catalog_endpoint_data(self, expected, actual): | 56 | def validate_svc_catalog_endpoint_data(self, expected, actual): |
1381 | 50 | """Validate a list of actual service catalog endpoints vs a list of | 57 | """Validate service catalog endpoint data. |
1382 | 51 | expected service catalog endpoints.""" | 58 | |
1383 | 59 | Validate a list of actual service catalog endpoints vs a list of | ||
1384 | 60 | expected service catalog endpoints. | ||
1385 | 61 | """ | ||
1386 | 52 | self.log.debug('actual: {}'.format(repr(actual))) | 62 | self.log.debug('actual: {}'.format(repr(actual))) |
1387 | 53 | for k, v in expected.iteritems(): | 63 | for k, v in expected.iteritems(): |
1388 | 54 | if k in actual: | 64 | if k in actual: |
1389 | @@ -60,8 +70,11 @@ | |||
1390 | 60 | return ret | 70 | return ret |
1391 | 61 | 71 | ||
1392 | 62 | def validate_tenant_data(self, expected, actual): | 72 | def validate_tenant_data(self, expected, actual): |
1395 | 63 | """Validate a list of actual tenant data vs list of expected tenant | 73 | """Validate tenant data. |
1396 | 64 | data.""" | 74 | |
1397 | 75 | Validate a list of actual tenant data vs list of expected tenant | ||
1398 | 76 | data. | ||
1399 | 77 | """ | ||
1400 | 65 | self.log.debug('actual: {}'.format(repr(actual))) | 78 | self.log.debug('actual: {}'.format(repr(actual))) |
1401 | 66 | for e in expected: | 79 | for e in expected: |
1402 | 67 | found = False | 80 | found = False |
1403 | @@ -78,8 +91,11 @@ | |||
1404 | 78 | return ret | 91 | return ret |
1405 | 79 | 92 | ||
1406 | 80 | def validate_role_data(self, expected, actual): | 93 | def validate_role_data(self, expected, actual): |
1409 | 81 | """Validate a list of actual role data vs a list of expected role | 94 | """Validate role data. |
1410 | 82 | data.""" | 95 | |
1411 | 96 | Validate a list of actual role data vs a list of expected role | ||
1412 | 97 | data. | ||
1413 | 98 | """ | ||
1414 | 83 | self.log.debug('actual: {}'.format(repr(actual))) | 99 | self.log.debug('actual: {}'.format(repr(actual))) |
1415 | 84 | for e in expected: | 100 | for e in expected: |
1416 | 85 | found = False | 101 | found = False |
1417 | @@ -95,8 +111,11 @@ | |||
1418 | 95 | return ret | 111 | return ret |
1419 | 96 | 112 | ||
1420 | 97 | def validate_user_data(self, expected, actual): | 113 | def validate_user_data(self, expected, actual): |
1423 | 98 | """Validate a list of actual user data vs a list of expected user | 114 | """Validate user data. |
1424 | 99 | data.""" | 115 | |
1425 | 116 | Validate a list of actual user data vs a list of expected user | ||
1426 | 117 | data. | ||
1427 | 118 | """ | ||
1428 | 100 | self.log.debug('actual: {}'.format(repr(actual))) | 119 | self.log.debug('actual: {}'.format(repr(actual))) |
1429 | 101 | for e in expected: | 120 | for e in expected: |
1430 | 102 | found = False | 121 | found = False |
1431 | @@ -114,21 +133,24 @@ | |||
1432 | 114 | return ret | 133 | return ret |
1433 | 115 | 134 | ||
1434 | 116 | def validate_flavor_data(self, expected, actual): | 135 | def validate_flavor_data(self, expected, actual): |
1436 | 117 | """Validate a list of actual flavors vs a list of expected flavors.""" | 136 | """Validate flavor data. |
1437 | 137 | |||
1438 | 138 | Validate a list of actual flavors vs a list of expected flavors. | ||
1439 | 139 | """ | ||
1440 | 118 | self.log.debug('actual: {}'.format(repr(actual))) | 140 | self.log.debug('actual: {}'.format(repr(actual))) |
1441 | 119 | act = [a.name for a in actual] | 141 | act = [a.name for a in actual] |
1442 | 120 | return self._validate_list_data(expected, act) | 142 | return self._validate_list_data(expected, act) |
1443 | 121 | 143 | ||
1444 | 122 | def tenant_exists(self, keystone, tenant): | 144 | def tenant_exists(self, keystone, tenant): |
1446 | 123 | """Return True if tenant exists""" | 145 | """Return True if tenant exists.""" |
1447 | 124 | return tenant in [t.name for t in keystone.tenants.list()] | 146 | return tenant in [t.name for t in keystone.tenants.list()] |
1448 | 125 | 147 | ||
1449 | 126 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | 148 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
1450 | 127 | tenant): | 149 | tenant): |
1451 | 128 | """Authenticates admin user with the keystone admin endpoint.""" | 150 | """Authenticates admin user with the keystone admin endpoint.""" |
1455 | 129 | service_ip = \ | 151 | unit = keystone_sentry |
1456 | 130 | keystone_sentry.relation('shared-db', | 152 | service_ip = unit.relation('shared-db', |
1457 | 131 | 'mysql:shared-db')['private-address'] | 153 | 'mysql:shared-db')['private-address'] |
1458 | 132 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | 154 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) |
1459 | 133 | return keystone_client.Client(username=user, password=password, | 155 | return keystone_client.Client(username=user, password=password, |
1460 | 134 | tenant_name=tenant, auth_url=ep) | 156 | tenant_name=tenant, auth_url=ep) |
1461 | @@ -177,12 +199,40 @@ | |||
1462 | 177 | image = glance.images.create(name=image_name, is_public=True, | 199 | image = glance.images.create(name=image_name, is_public=True, |
1463 | 178 | disk_format='qcow2', | 200 | disk_format='qcow2', |
1464 | 179 | container_format='bare', data=f) | 201 | container_format='bare', data=f) |
1465 | 202 | count = 1 | ||
1466 | 203 | status = image.status | ||
1467 | 204 | while status != 'active' and count < 10: | ||
1468 | 205 | time.sleep(3) | ||
1469 | 206 | image = glance.images.get(image.id) | ||
1470 | 207 | status = image.status | ||
1471 | 208 | self.log.debug('image status: {}'.format(status)) | ||
1472 | 209 | count += 1 | ||
1473 | 210 | |||
1474 | 211 | if status != 'active': | ||
1475 | 212 | self.log.error('image creation timed out') | ||
1476 | 213 | return None | ||
1477 | 214 | |||
1478 | 180 | return image | 215 | return image |
1479 | 181 | 216 | ||
1480 | 182 | def delete_image(self, glance, image): | 217 | def delete_image(self, glance, image): |
1481 | 183 | """Delete the specified image.""" | 218 | """Delete the specified image.""" |
1482 | 219 | num_before = len(list(glance.images.list())) | ||
1483 | 184 | glance.images.delete(image) | 220 | glance.images.delete(image) |
1484 | 185 | 221 | ||
1485 | 222 | count = 1 | ||
1486 | 223 | num_after = len(list(glance.images.list())) | ||
1487 | 224 | while num_after != (num_before - 1) and count < 10: | ||
1488 | 225 | time.sleep(3) | ||
1489 | 226 | num_after = len(list(glance.images.list())) | ||
1490 | 227 | self.log.debug('number of images: {}'.format(num_after)) | ||
1491 | 228 | count += 1 | ||
1492 | 229 | |||
1493 | 230 | if num_after != (num_before - 1): | ||
1494 | 231 | self.log.error('image deletion timed out') | ||
1495 | 232 | return False | ||
1496 | 233 | |||
1497 | 234 | return True | ||
1498 | 235 | |||
1499 | 186 | def create_instance(self, nova, image_name, instance_name, flavor): | 236 | def create_instance(self, nova, image_name, instance_name, flavor): |
1500 | 187 | """Create the specified instance.""" | 237 | """Create the specified instance.""" |
1501 | 188 | image = nova.images.find(name=image_name) | 238 | image = nova.images.find(name=image_name) |
1502 | @@ -199,11 +249,27 @@ | |||
1503 | 199 | self.log.debug('instance status: {}'.format(status)) | 249 | self.log.debug('instance status: {}'.format(status)) |
1504 | 200 | count += 1 | 250 | count += 1 |
1505 | 201 | 251 | ||
1507 | 202 | if status == 'BUILD': | 252 | if status != 'ACTIVE': |
1508 | 253 | self.log.error('instance creation timed out') | ||
1509 | 203 | return None | 254 | return None |
1510 | 204 | 255 | ||
1511 | 205 | return instance | 256 | return instance |
1512 | 206 | 257 | ||
1513 | 207 | def delete_instance(self, nova, instance): | 258 | def delete_instance(self, nova, instance): |
1514 | 208 | """Delete the specified instance.""" | 259 | """Delete the specified instance.""" |
1515 | 260 | num_before = len(list(nova.servers.list())) | ||
1516 | 209 | nova.servers.delete(instance) | 261 | nova.servers.delete(instance) |
1517 | 262 | |||
1518 | 263 | count = 1 | ||
1519 | 264 | num_after = len(list(nova.servers.list())) | ||
1520 | 265 | while num_after != (num_before - 1) and count < 10: | ||
1521 | 266 | time.sleep(3) | ||
1522 | 267 | num_after = len(list(nova.servers.list())) | ||
1523 | 268 | self.log.debug('number of instances: {}'.format(num_after)) | ||
1524 | 269 | count += 1 | ||
1525 | 270 | |||
1526 | 271 | if num_after != (num_before - 1): | ||
1527 | 272 | self.log.error('instance deletion timed out') | ||
1528 | 273 | return False | ||
1529 | 274 | |||
1530 | 275 | return True |
Approved by jamespage