Merge lp:~gnuoy/charms/trusty/cinder/next-charm-sync into lp:~openstack-charmers-archive/charms/trusty/cinder/next
- Trusty Tahr (14.04)
- next-charm-sync
- Merge into next
Proposed by
Liam Young
Status: | Merged |
---|---|
Merged at revision: | 41 |
Proposed branch: | lp:~gnuoy/charms/trusty/cinder/next-charm-sync |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/cinder/next |
Diff against target: |
1100 lines (+735/-52) 14 files modified
hooks/charmhelpers/contrib/hahelpers/cluster.py (+55/-13) hooks/charmhelpers/contrib/network/ip.py (+19/-1) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+13/-7) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+86/-20) hooks/charmhelpers/contrib/openstack/context.py (+31/-4) hooks/charmhelpers/contrib/openstack/ip.py (+7/-3) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+3/-3) hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-0) hooks/charmhelpers/core/host.py (+34/-1) hooks/charmhelpers/core/services/__init__.py (+2/-0) hooks/charmhelpers/core/services/base.py (+305/-0) hooks/charmhelpers/core/services/helpers.py (+125/-0) hooks/charmhelpers/core/templating.py (+51/-0) hooks/charmhelpers/fetch/__init__.py (+1/-0) |
To merge this branch: | bzr merge lp:~gnuoy/charms/trusty/cinder/next-charm-sync |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Liam Young (community) | Approve | ||
Review via email: mp+230638@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' | |||
2 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-25 08:11:52 +0000 | |||
3 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-08-13 14:00:26 +0000 | |||
4 | @@ -6,6 +6,11 @@ | |||
5 | 6 | # Adam Gandelman <adamg@ubuntu.com> | 6 | # Adam Gandelman <adamg@ubuntu.com> |
6 | 7 | # | 7 | # |
7 | 8 | 8 | ||
8 | 9 | """ | ||
9 | 10 | Helpers for clustering and determining "cluster leadership" and other | ||
10 | 11 | clustering-related helpers. | ||
11 | 12 | """ | ||
12 | 13 | |||
13 | 9 | import subprocess | 14 | import subprocess |
14 | 10 | import os | 15 | import os |
15 | 11 | 16 | ||
16 | @@ -19,6 +24,7 @@ | |||
17 | 19 | config as config_get, | 24 | config as config_get, |
18 | 20 | INFO, | 25 | INFO, |
19 | 21 | ERROR, | 26 | ERROR, |
20 | 27 | WARNING, | ||
21 | 22 | unit_get, | 28 | unit_get, |
22 | 23 | ) | 29 | ) |
23 | 24 | 30 | ||
24 | @@ -27,6 +33,29 @@ | |||
25 | 27 | pass | 33 | pass |
26 | 28 | 34 | ||
27 | 29 | 35 | ||
28 | 36 | def is_elected_leader(resource): | ||
29 | 37 | """ | ||
30 | 38 | Returns True if the charm executing this is the elected cluster leader. | ||
31 | 39 | |||
32 | 40 | It relies on two mechanisms to determine leadership: | ||
33 | 41 | 1. If the charm is part of a corosync cluster, call corosync to | ||
34 | 42 | determine leadership. | ||
35 | 43 | 2. If the charm is not part of a corosync cluster, the leader is | ||
36 | 44 | determined as being "the alive unit with the lowest unit numer". In | ||
37 | 45 | other words, the oldest surviving unit. | ||
38 | 46 | """ | ||
39 | 47 | if is_clustered(): | ||
40 | 48 | if not is_crm_leader(resource): | ||
41 | 49 | log('Deferring action to CRM leader.', level=INFO) | ||
42 | 50 | return False | ||
43 | 51 | else: | ||
44 | 52 | peers = peer_units() | ||
45 | 53 | if peers and not oldest_peer(peers): | ||
46 | 54 | log('Deferring action to oldest service unit.', level=INFO) | ||
47 | 55 | return False | ||
48 | 56 | return True | ||
49 | 57 | |||
50 | 58 | |||
51 | 30 | def is_clustered(): | 59 | def is_clustered(): |
52 | 31 | for r_id in (relation_ids('ha') or []): | 60 | for r_id in (relation_ids('ha') or []): |
53 | 32 | for unit in (relation_list(r_id) or []): | 61 | for unit in (relation_list(r_id) or []): |
54 | @@ -38,7 +67,11 @@ | |||
55 | 38 | return False | 67 | return False |
56 | 39 | 68 | ||
57 | 40 | 69 | ||
59 | 41 | def is_leader(resource): | 70 | def is_crm_leader(resource): |
60 | 71 | """ | ||
61 | 72 | Returns True if the charm calling this is the elected corosync leader, | ||
62 | 73 | as returned by calling the external "crm" command. | ||
63 | 74 | """ | ||
64 | 42 | cmd = [ | 75 | cmd = [ |
65 | 43 | "crm", "resource", | 76 | "crm", "resource", |
66 | 44 | "show", resource | 77 | "show", resource |
67 | @@ -54,15 +87,31 @@ | |||
68 | 54 | return False | 87 | return False |
69 | 55 | 88 | ||
70 | 56 | 89 | ||
72 | 57 | def peer_units(): | 90 | def is_leader(resource): |
73 | 91 | log("is_leader is deprecated. Please consider using is_crm_leader " | ||
74 | 92 | "instead.", level=WARNING) | ||
75 | 93 | return is_crm_leader(resource) | ||
76 | 94 | |||
77 | 95 | |||
78 | 96 | def peer_units(peer_relation="cluster"): | ||
79 | 58 | peers = [] | 97 | peers = [] |
81 | 59 | for r_id in (relation_ids('cluster') or []): | 98 | for r_id in (relation_ids(peer_relation) or []): |
82 | 60 | for unit in (relation_list(r_id) or []): | 99 | for unit in (relation_list(r_id) or []): |
83 | 61 | peers.append(unit) | 100 | peers.append(unit) |
84 | 62 | return peers | 101 | return peers |
85 | 63 | 102 | ||
86 | 64 | 103 | ||
87 | 104 | def peer_ips(peer_relation='cluster', addr_key='private-address'): | ||
88 | 105 | '''Return a dict of peers and their private-address''' | ||
89 | 106 | peers = {} | ||
90 | 107 | for r_id in relation_ids(peer_relation): | ||
91 | 108 | for unit in relation_list(r_id): | ||
92 | 109 | peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) | ||
93 | 110 | return peers | ||
94 | 111 | |||
95 | 112 | |||
96 | 65 | def oldest_peer(peers): | 113 | def oldest_peer(peers): |
97 | 114 | """Determines who the oldest peer is by comparing unit numbers.""" | ||
98 | 66 | local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) | 115 | local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) |
99 | 67 | for peer in peers: | 116 | for peer in peers: |
100 | 68 | remote_unit_no = int(peer.split('/')[1]) | 117 | remote_unit_no = int(peer.split('/')[1]) |
101 | @@ -72,16 +121,9 @@ | |||
102 | 72 | 121 | ||
103 | 73 | 122 | ||
104 | 74 | def eligible_leader(resource): | 123 | def eligible_leader(resource): |
115 | 75 | if is_clustered(): | 124 | log("eligible_leader is deprecated. Please consider using " |
116 | 76 | if not is_leader(resource): | 125 | "is_elected_leader instead.", level=WARNING) |
117 | 77 | log('Deferring action to CRM leader.', level=INFO) | 126 | return is_elected_leader(resource) |
108 | 78 | return False | ||
109 | 79 | else: | ||
110 | 80 | peers = peer_units() | ||
111 | 81 | if peers and not oldest_peer(peers): | ||
112 | 82 | log('Deferring action to oldest service unit.', level=INFO) | ||
113 | 83 | return False | ||
114 | 84 | return True | ||
118 | 85 | 127 | ||
119 | 86 | 128 | ||
120 | 87 | def https(): | 129 | def https(): |
121 | 88 | 130 | ||
122 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' | |||
123 | --- hooks/charmhelpers/contrib/network/ip.py 2014-07-24 10:25:17 +0000 | |||
124 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-08-13 14:00:26 +0000 | |||
125 | @@ -4,7 +4,7 @@ | |||
126 | 4 | 4 | ||
127 | 5 | from charmhelpers.fetch import apt_install | 5 | from charmhelpers.fetch import apt_install |
128 | 6 | from charmhelpers.core.hookenv import ( | 6 | from charmhelpers.core.hookenv import ( |
130 | 7 | ERROR, log, | 7 | ERROR, log, config, |
131 | 8 | ) | 8 | ) |
132 | 9 | 9 | ||
133 | 10 | try: | 10 | try: |
134 | @@ -154,3 +154,21 @@ | |||
135 | 154 | get_iface_for_address = partial(_get_for_address, key='iface') | 154 | get_iface_for_address = partial(_get_for_address, key='iface') |
136 | 155 | 155 | ||
137 | 156 | get_netmask_for_address = partial(_get_for_address, key='netmask') | 156 | get_netmask_for_address = partial(_get_for_address, key='netmask') |
138 | 157 | |||
139 | 158 | |||
140 | 159 | def get_ipv6_addr(iface="eth0"): | ||
141 | 160 | try: | ||
142 | 161 | iface_addrs = netifaces.ifaddresses(iface) | ||
143 | 162 | if netifaces.AF_INET6 not in iface_addrs: | ||
144 | 163 | raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) | ||
145 | 164 | |||
146 | 165 | addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] | ||
147 | 166 | ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') | ||
148 | 167 | and config('vip') != a['addr']] | ||
149 | 168 | if not ipv6_addr: | ||
150 | 169 | raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) | ||
151 | 170 | |||
152 | 171 | return ipv6_addr[0] | ||
153 | 172 | |||
154 | 173 | except ValueError: | ||
155 | 174 | raise ValueError("Invalid interface '%s'" % iface) | ||
156 | 157 | 175 | ||
157 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
158 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-03 12:44:32 +0000 | |||
159 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-08-13 14:00:26 +0000 | |||
160 | @@ -4,8 +4,11 @@ | |||
161 | 4 | 4 | ||
162 | 5 | 5 | ||
163 | 6 | class OpenStackAmuletDeployment(AmuletDeployment): | 6 | class OpenStackAmuletDeployment(AmuletDeployment): |
166 | 7 | """This class inherits from AmuletDeployment and has additional support | 7 | """OpenStack amulet deployment. |
167 | 8 | that is specifically for use by OpenStack charms.""" | 8 | |
168 | 9 | This class inherits from AmuletDeployment and has additional support | ||
169 | 10 | that is specifically for use by OpenStack charms. | ||
170 | 11 | """ | ||
171 | 9 | 12 | ||
172 | 10 | def __init__(self, series=None, openstack=None, source=None): | 13 | def __init__(self, series=None, openstack=None, source=None): |
173 | 11 | """Initialize the deployment environment.""" | 14 | """Initialize the deployment environment.""" |
174 | @@ -40,11 +43,14 @@ | |||
175 | 40 | self.d.configure(service, config) | 43 | self.d.configure(service, config) |
176 | 41 | 44 | ||
177 | 42 | def _get_openstack_release(self): | 45 | def _get_openstack_release(self): |
183 | 43 | """Return an integer representing the enum value of the openstack | 46 | """Get openstack release. |
184 | 44 | release.""" | 47 | |
185 | 45 | self.precise_essex, self.precise_folsom, self.precise_grizzly, \ | 48 | Return an integer representing the enum value of the openstack |
186 | 46 | self.precise_havana, self.precise_icehouse, \ | 49 | release. |
187 | 47 | self.trusty_icehouse = range(6) | 50 | """ |
188 | 51 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, | ||
189 | 52 | self.precise_havana, self.precise_icehouse, | ||
190 | 53 | self.trusty_icehouse) = range(6) | ||
191 | 48 | releases = { | 54 | releases = { |
192 | 49 | ('precise', None): self.precise_essex, | 55 | ('precise', None): self.precise_essex, |
193 | 50 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | 56 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, |
194 | 51 | 57 | ||
195 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
196 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-03 12:44:32 +0000 | |||
197 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-08-13 14:00:26 +0000 | |||
198 | @@ -16,8 +16,11 @@ | |||
199 | 16 | 16 | ||
200 | 17 | 17 | ||
201 | 18 | class OpenStackAmuletUtils(AmuletUtils): | 18 | class OpenStackAmuletUtils(AmuletUtils): |
204 | 19 | """This class inherits from AmuletUtils and has additional support | 19 | """OpenStack amulet utilities. |
205 | 20 | that is specifically for use by OpenStack charms.""" | 20 | |
206 | 21 | This class inherits from AmuletUtils and has additional support | ||
207 | 22 | that is specifically for use by OpenStack charms. | ||
208 | 23 | """ | ||
209 | 21 | 24 | ||
210 | 22 | def __init__(self, log_level=ERROR): | 25 | def __init__(self, log_level=ERROR): |
211 | 23 | """Initialize the deployment environment.""" | 26 | """Initialize the deployment environment.""" |
212 | @@ -25,13 +28,17 @@ | |||
213 | 25 | 28 | ||
214 | 26 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | 29 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, |
215 | 27 | public_port, expected): | 30 | public_port, expected): |
218 | 28 | """Validate actual endpoint data vs expected endpoint data. The ports | 31 | """Validate endpoint data. |
219 | 29 | are used to find the matching endpoint.""" | 32 | |
220 | 33 | Validate actual endpoint data vs expected endpoint data. The ports | ||
221 | 34 | are used to find the matching endpoint. | ||
222 | 35 | """ | ||
223 | 30 | found = False | 36 | found = False |
224 | 31 | for ep in endpoints: | 37 | for ep in endpoints: |
225 | 32 | self.log.debug('endpoint: {}'.format(repr(ep))) | 38 | self.log.debug('endpoint: {}'.format(repr(ep))) |
228 | 33 | if admin_port in ep.adminurl and internal_port in ep.internalurl \ | 39 | if (admin_port in ep.adminurl and |
229 | 34 | and public_port in ep.publicurl: | 40 | internal_port in ep.internalurl and |
230 | 41 | public_port in ep.publicurl): | ||
231 | 35 | found = True | 42 | found = True |
232 | 36 | actual = {'id': ep.id, | 43 | actual = {'id': ep.id, |
233 | 37 | 'region': ep.region, | 44 | 'region': ep.region, |
234 | @@ -47,8 +54,11 @@ | |||
235 | 47 | return 'endpoint not found' | 54 | return 'endpoint not found' |
236 | 48 | 55 | ||
237 | 49 | def validate_svc_catalog_endpoint_data(self, expected, actual): | 56 | def validate_svc_catalog_endpoint_data(self, expected, actual): |
240 | 50 | """Validate a list of actual service catalog endpoints vs a list of | 57 | """Validate service catalog endpoint data. |
241 | 51 | expected service catalog endpoints.""" | 58 | |
242 | 59 | Validate a list of actual service catalog endpoints vs a list of | ||
243 | 60 | expected service catalog endpoints. | ||
244 | 61 | """ | ||
245 | 52 | self.log.debug('actual: {}'.format(repr(actual))) | 62 | self.log.debug('actual: {}'.format(repr(actual))) |
246 | 53 | for k, v in expected.iteritems(): | 63 | for k, v in expected.iteritems(): |
247 | 54 | if k in actual: | 64 | if k in actual: |
248 | @@ -60,8 +70,11 @@ | |||
249 | 60 | return ret | 70 | return ret |
250 | 61 | 71 | ||
251 | 62 | def validate_tenant_data(self, expected, actual): | 72 | def validate_tenant_data(self, expected, actual): |
254 | 63 | """Validate a list of actual tenant data vs list of expected tenant | 73 | """Validate tenant data. |
255 | 64 | data.""" | 74 | |
256 | 75 | Validate a list of actual tenant data vs list of expected tenant | ||
257 | 76 | data. | ||
258 | 77 | """ | ||
259 | 65 | self.log.debug('actual: {}'.format(repr(actual))) | 78 | self.log.debug('actual: {}'.format(repr(actual))) |
260 | 66 | for e in expected: | 79 | for e in expected: |
261 | 67 | found = False | 80 | found = False |
262 | @@ -78,8 +91,11 @@ | |||
263 | 78 | return ret | 91 | return ret |
264 | 79 | 92 | ||
265 | 80 | def validate_role_data(self, expected, actual): | 93 | def validate_role_data(self, expected, actual): |
268 | 81 | """Validate a list of actual role data vs a list of expected role | 94 | """Validate role data. |
269 | 82 | data.""" | 95 | |
270 | 96 | Validate a list of actual role data vs a list of expected role | ||
271 | 97 | data. | ||
272 | 98 | """ | ||
273 | 83 | self.log.debug('actual: {}'.format(repr(actual))) | 99 | self.log.debug('actual: {}'.format(repr(actual))) |
274 | 84 | for e in expected: | 100 | for e in expected: |
275 | 85 | found = False | 101 | found = False |
276 | @@ -95,8 +111,11 @@ | |||
277 | 95 | return ret | 111 | return ret |
278 | 96 | 112 | ||
279 | 97 | def validate_user_data(self, expected, actual): | 113 | def validate_user_data(self, expected, actual): |
282 | 98 | """Validate a list of actual user data vs a list of expected user | 114 | """Validate user data. |
283 | 99 | data.""" | 115 | |
284 | 116 | Validate a list of actual user data vs a list of expected user | ||
285 | 117 | data. | ||
286 | 118 | """ | ||
287 | 100 | self.log.debug('actual: {}'.format(repr(actual))) | 119 | self.log.debug('actual: {}'.format(repr(actual))) |
288 | 101 | for e in expected: | 120 | for e in expected: |
289 | 102 | found = False | 121 | found = False |
290 | @@ -114,21 +133,24 @@ | |||
291 | 114 | return ret | 133 | return ret |
292 | 115 | 134 | ||
293 | 116 | def validate_flavor_data(self, expected, actual): | 135 | def validate_flavor_data(self, expected, actual): |
295 | 117 | """Validate a list of actual flavors vs a list of expected flavors.""" | 136 | """Validate flavor data. |
296 | 137 | |||
297 | 138 | Validate a list of actual flavors vs a list of expected flavors. | ||
298 | 139 | """ | ||
299 | 118 | self.log.debug('actual: {}'.format(repr(actual))) | 140 | self.log.debug('actual: {}'.format(repr(actual))) |
300 | 119 | act = [a.name for a in actual] | 141 | act = [a.name for a in actual] |
301 | 120 | return self._validate_list_data(expected, act) | 142 | return self._validate_list_data(expected, act) |
302 | 121 | 143 | ||
303 | 122 | def tenant_exists(self, keystone, tenant): | 144 | def tenant_exists(self, keystone, tenant): |
305 | 123 | """Return True if tenant exists""" | 145 | """Return True if tenant exists.""" |
306 | 124 | return tenant in [t.name for t in keystone.tenants.list()] | 146 | return tenant in [t.name for t in keystone.tenants.list()] |
307 | 125 | 147 | ||
308 | 126 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | 148 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
309 | 127 | tenant): | 149 | tenant): |
310 | 128 | """Authenticates admin user with the keystone admin endpoint.""" | 150 | """Authenticates admin user with the keystone admin endpoint.""" |
314 | 129 | service_ip = \ | 151 | unit = keystone_sentry |
315 | 130 | keystone_sentry.relation('shared-db', | 152 | service_ip = unit.relation('shared-db', |
316 | 131 | 'mysql:shared-db')['private-address'] | 153 | 'mysql:shared-db')['private-address'] |
317 | 132 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | 154 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) |
318 | 133 | return keystone_client.Client(username=user, password=password, | 155 | return keystone_client.Client(username=user, password=password, |
319 | 134 | tenant_name=tenant, auth_url=ep) | 156 | tenant_name=tenant, auth_url=ep) |
320 | @@ -177,12 +199,40 @@ | |||
321 | 177 | image = glance.images.create(name=image_name, is_public=True, | 199 | image = glance.images.create(name=image_name, is_public=True, |
322 | 178 | disk_format='qcow2', | 200 | disk_format='qcow2', |
323 | 179 | container_format='bare', data=f) | 201 | container_format='bare', data=f) |
324 | 202 | count = 1 | ||
325 | 203 | status = image.status | ||
326 | 204 | while status != 'active' and count < 10: | ||
327 | 205 | time.sleep(3) | ||
328 | 206 | image = glance.images.get(image.id) | ||
329 | 207 | status = image.status | ||
330 | 208 | self.log.debug('image status: {}'.format(status)) | ||
331 | 209 | count += 1 | ||
332 | 210 | |||
333 | 211 | if status != 'active': | ||
334 | 212 | self.log.error('image creation timed out') | ||
335 | 213 | return None | ||
336 | 214 | |||
337 | 180 | return image | 215 | return image |
338 | 181 | 216 | ||
339 | 182 | def delete_image(self, glance, image): | 217 | def delete_image(self, glance, image): |
340 | 183 | """Delete the specified image.""" | 218 | """Delete the specified image.""" |
341 | 219 | num_before = len(list(glance.images.list())) | ||
342 | 184 | glance.images.delete(image) | 220 | glance.images.delete(image) |
343 | 185 | 221 | ||
344 | 222 | count = 1 | ||
345 | 223 | num_after = len(list(glance.images.list())) | ||
346 | 224 | while num_after != (num_before - 1) and count < 10: | ||
347 | 225 | time.sleep(3) | ||
348 | 226 | num_after = len(list(glance.images.list())) | ||
349 | 227 | self.log.debug('number of images: {}'.format(num_after)) | ||
350 | 228 | count += 1 | ||
351 | 229 | |||
352 | 230 | if num_after != (num_before - 1): | ||
353 | 231 | self.log.error('image deletion timed out') | ||
354 | 232 | return False | ||
355 | 233 | |||
356 | 234 | return True | ||
357 | 235 | |||
358 | 186 | def create_instance(self, nova, image_name, instance_name, flavor): | 236 | def create_instance(self, nova, image_name, instance_name, flavor): |
359 | 187 | """Create the specified instance.""" | 237 | """Create the specified instance.""" |
360 | 188 | image = nova.images.find(name=image_name) | 238 | image = nova.images.find(name=image_name) |
361 | @@ -199,11 +249,27 @@ | |||
362 | 199 | self.log.debug('instance status: {}'.format(status)) | 249 | self.log.debug('instance status: {}'.format(status)) |
363 | 200 | count += 1 | 250 | count += 1 |
364 | 201 | 251 | ||
366 | 202 | if status == 'BUILD': | 252 | if status != 'ACTIVE': |
367 | 253 | self.log.error('instance creation timed out') | ||
368 | 203 | return None | 254 | return None |
369 | 204 | 255 | ||
370 | 205 | return instance | 256 | return instance |
371 | 206 | 257 | ||
372 | 207 | def delete_instance(self, nova, instance): | 258 | def delete_instance(self, nova, instance): |
373 | 208 | """Delete the specified instance.""" | 259 | """Delete the specified instance.""" |
374 | 260 | num_before = len(list(nova.servers.list())) | ||
375 | 209 | nova.servers.delete(instance) | 261 | nova.servers.delete(instance) |
376 | 262 | |||
377 | 263 | count = 1 | ||
378 | 264 | num_after = len(list(nova.servers.list())) | ||
379 | 265 | while num_after != (num_before - 1) and count < 10: | ||
380 | 266 | time.sleep(3) | ||
381 | 267 | num_after = len(list(nova.servers.list())) | ||
382 | 268 | self.log.debug('number of instances: {}'.format(num_after)) | ||
383 | 269 | count += 1 | ||
384 | 270 | |||
385 | 271 | if num_after != (num_before - 1): | ||
386 | 272 | self.log.error('instance deletion timed out') | ||
387 | 273 | return False | ||
388 | 274 | |||
389 | 275 | return True | ||
390 | 210 | 276 | ||
391 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
392 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-07-25 08:11:52 +0000 | |||
393 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 14:00:26 +0000 | |||
394 | @@ -44,7 +44,10 @@ | |||
395 | 44 | neutron_plugin_attribute, | 44 | neutron_plugin_attribute, |
396 | 45 | ) | 45 | ) |
397 | 46 | 46 | ||
399 | 47 | from charmhelpers.contrib.network.ip import get_address_in_network | 47 | from charmhelpers.contrib.network.ip import ( |
400 | 48 | get_address_in_network, | ||
401 | 49 | get_ipv6_addr, | ||
402 | 50 | ) | ||
403 | 48 | 51 | ||
404 | 49 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 52 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
405 | 50 | 53 | ||
406 | @@ -401,9 +404,12 @@ | |||
407 | 401 | 404 | ||
408 | 402 | cluster_hosts = {} | 405 | cluster_hosts = {} |
409 | 403 | l_unit = local_unit().replace('/', '-') | 406 | l_unit = local_unit().replace('/', '-') |
413 | 404 | cluster_hosts[l_unit] = \ | 407 | if config('prefer-ipv6'): |
414 | 405 | get_address_in_network(config('os-internal-network'), | 408 | addr = get_ipv6_addr() |
415 | 406 | unit_get('private-address')) | 409 | else: |
416 | 410 | addr = unit_get('private-address') | ||
417 | 411 | cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), | ||
418 | 412 | addr) | ||
419 | 407 | 413 | ||
420 | 408 | for rid in relation_ids('cluster'): | 414 | for rid in relation_ids('cluster'): |
421 | 409 | for unit in related_units(rid): | 415 | for unit in related_units(rid): |
422 | @@ -414,6 +420,16 @@ | |||
423 | 414 | ctxt = { | 420 | ctxt = { |
424 | 415 | 'units': cluster_hosts, | 421 | 'units': cluster_hosts, |
425 | 416 | } | 422 | } |
426 | 423 | |||
427 | 424 | if config('prefer-ipv6'): | ||
428 | 425 | ctxt['local_host'] = 'ip6-localhost' | ||
429 | 426 | ctxt['haproxy_host'] = '::' | ||
430 | 427 | ctxt['stat_port'] = ':::8888' | ||
431 | 428 | else: | ||
432 | 429 | ctxt['local_host'] = '127.0.0.1' | ||
433 | 430 | ctxt['haproxy_host'] = '0.0.0.0' | ||
434 | 431 | ctxt['stat_port'] = ':8888' | ||
435 | 432 | |||
436 | 417 | if len(cluster_hosts.keys()) > 1: | 433 | if len(cluster_hosts.keys()) > 1: |
437 | 418 | # Enable haproxy when we have enough peers. | 434 | # Enable haproxy when we have enough peers. |
438 | 419 | log('Ensuring haproxy enabled in /etc/default/haproxy.') | 435 | log('Ensuring haproxy enabled in /etc/default/haproxy.') |
439 | @@ -753,6 +769,17 @@ | |||
440 | 753 | return ctxt | 769 | return ctxt |
441 | 754 | 770 | ||
442 | 755 | 771 | ||
443 | 772 | class LogLevelContext(OSContextGenerator): | ||
444 | 773 | |||
445 | 774 | def __call__(self): | ||
446 | 775 | ctxt = {} | ||
447 | 776 | ctxt['debug'] = \ | ||
448 | 777 | False if config('debug') is None else config('debug') | ||
449 | 778 | ctxt['verbose'] = \ | ||
450 | 779 | False if config('verbose') is None else config('verbose') | ||
451 | 780 | return ctxt | ||
452 | 781 | |||
453 | 782 | |||
454 | 756 | class SyslogContext(OSContextGenerator): | 783 | class SyslogContext(OSContextGenerator): |
455 | 757 | 784 | ||
456 | 758 | def __call__(self): | 785 | def __call__(self): |
457 | 759 | 786 | ||
458 | === modified file 'hooks/charmhelpers/contrib/openstack/ip.py' | |||
459 | --- hooks/charmhelpers/contrib/openstack/ip.py 2014-07-03 15:16:16 +0000 | |||
460 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2014-08-13 14:00:26 +0000 | |||
461 | @@ -7,6 +7,7 @@ | |||
462 | 7 | get_address_in_network, | 7 | get_address_in_network, |
463 | 8 | is_address_in_network, | 8 | is_address_in_network, |
464 | 9 | is_ipv6, | 9 | is_ipv6, |
465 | 10 | get_ipv6_addr, | ||
466 | 10 | ) | 11 | ) |
467 | 11 | 12 | ||
468 | 12 | from charmhelpers.contrib.hahelpers.cluster import is_clustered | 13 | from charmhelpers.contrib.hahelpers.cluster import is_clustered |
469 | @@ -64,10 +65,13 @@ | |||
470 | 64 | vip): | 65 | vip): |
471 | 65 | resolved_address = vip | 66 | resolved_address = vip |
472 | 66 | else: | 67 | else: |
473 | 68 | if config('prefer-ipv6'): | ||
474 | 69 | fallback_addr = get_ipv6_addr() | ||
475 | 70 | else: | ||
476 | 71 | fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) | ||
477 | 67 | resolved_address = get_address_in_network( | 72 | resolved_address = get_address_in_network( |
481 | 68 | config(_address_map[endpoint_type]['config']), | 73 | config(_address_map[endpoint_type]['config']), fallback_addr) |
482 | 69 | unit_get(_address_map[endpoint_type]['fallback']) | 74 | |
480 | 70 | ) | ||
483 | 71 | if resolved_address is None: | 75 | if resolved_address is None: |
484 | 72 | raise ValueError('Unable to resolve a suitable IP address' | 76 | raise ValueError('Unable to resolve a suitable IP address' |
485 | 73 | ' based on charm state and configuration') | 77 | ' based on charm state and configuration') |
486 | 74 | 78 | ||
487 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' | |||
488 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-03 12:56:00 +0000 | |||
489 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-08-13 14:00:26 +0000 | |||
490 | @@ -1,6 +1,6 @@ | |||
491 | 1 | global | 1 | global |
494 | 2 | log 127.0.0.1 local0 | 2 | log {{ local_host }} local0 |
495 | 3 | log 127.0.0.1 local1 notice | 3 | log {{ local_host }} local1 notice |
496 | 4 | maxconn 20000 | 4 | maxconn 20000 |
497 | 5 | user haproxy | 5 | user haproxy |
498 | 6 | group haproxy | 6 | group haproxy |
499 | @@ -17,7 +17,7 @@ | |||
500 | 17 | timeout client 30000 | 17 | timeout client 30000 |
501 | 18 | timeout server 30000 | 18 | timeout server 30000 |
502 | 19 | 19 | ||
504 | 20 | listen stats :8888 | 20 | listen stats {{ stat_port }} |
505 | 21 | mode http | 21 | mode http |
506 | 22 | stats enable | 22 | stats enable |
507 | 23 | stats hide-version | 23 | stats hide-version |
508 | 24 | 24 | ||
509 | === modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' | |||
510 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-25 08:11:52 +0000 | |||
511 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-08-13 14:00:26 +0000 | |||
512 | @@ -46,5 +46,8 @@ | |||
513 | 46 | :returns: boolean: True if the path represents a mounted device, False if | 46 | :returns: boolean: True if the path represents a mounted device, False if |
514 | 47 | it doesn't. | 47 | it doesn't. |
515 | 48 | ''' | 48 | ''' |
516 | 49 | is_partition = bool(re.search(r".*[0-9]+\b", device)) | ||
517 | 49 | out = check_output(['mount']) | 50 | out = check_output(['mount']) |
518 | 51 | if is_partition: | ||
519 | 52 | return bool(re.search(device + r"\b", out)) | ||
520 | 50 | return bool(re.search(device + r"[0-9]+\b", out)) | 53 | return bool(re.search(device + r"[0-9]+\b", out)) |
521 | 51 | 54 | ||
522 | === modified file 'hooks/charmhelpers/core/host.py' | |||
523 | --- hooks/charmhelpers/core/host.py 2014-07-25 08:11:52 +0000 | |||
524 | +++ hooks/charmhelpers/core/host.py 2014-08-13 14:00:26 +0000 | |||
525 | @@ -12,6 +12,8 @@ | |||
526 | 12 | import string | 12 | import string |
527 | 13 | import subprocess | 13 | import subprocess |
528 | 14 | import hashlib | 14 | import hashlib |
529 | 15 | import shutil | ||
530 | 16 | from contextlib import contextmanager | ||
531 | 15 | 17 | ||
532 | 16 | from collections import OrderedDict | 18 | from collections import OrderedDict |
533 | 17 | 19 | ||
534 | @@ -52,7 +54,7 @@ | |||
535 | 52 | def service_running(service): | 54 | def service_running(service): |
536 | 53 | """Determine whether a system service is running""" | 55 | """Determine whether a system service is running""" |
537 | 54 | try: | 56 | try: |
539 | 55 | output = subprocess.check_output(['service', service, 'status']) | 57 | output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) |
540 | 56 | except subprocess.CalledProcessError: | 58 | except subprocess.CalledProcessError: |
541 | 57 | return False | 59 | return False |
542 | 58 | else: | 60 | else: |
543 | @@ -62,6 +64,16 @@ | |||
544 | 62 | return False | 64 | return False |
545 | 63 | 65 | ||
546 | 64 | 66 | ||
547 | 67 | def service_available(service_name): | ||
548 | 68 | """Determine whether a system service is available""" | ||
549 | 69 | try: | ||
550 | 70 | subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) | ||
551 | 71 | except subprocess.CalledProcessError: | ||
552 | 72 | return False | ||
553 | 73 | else: | ||
554 | 74 | return True | ||
555 | 75 | |||
556 | 76 | |||
557 | 65 | def adduser(username, password=None, shell='/bin/bash', system_user=False): | 77 | def adduser(username, password=None, shell='/bin/bash', system_user=False): |
558 | 66 | """Add a user to the system""" | 78 | """Add a user to the system""" |
559 | 67 | try: | 79 | try: |
560 | @@ -329,3 +341,24 @@ | |||
561 | 329 | pkgcache = apt_pkg.Cache() | 341 | pkgcache = apt_pkg.Cache() |
562 | 330 | pkg = pkgcache[package] | 342 | pkg = pkgcache[package] |
563 | 331 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) | 343 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
564 | 344 | |||
565 | 345 | |||
566 | 346 | @contextmanager | ||
567 | 347 | def chdir(d): | ||
568 | 348 | cur = os.getcwd() | ||
569 | 349 | try: | ||
570 | 350 | yield os.chdir(d) | ||
571 | 351 | finally: | ||
572 | 352 | os.chdir(cur) | ||
573 | 353 | |||
574 | 354 | |||
575 | 355 | def chownr(path, owner, group): | ||
576 | 356 | uid = pwd.getpwnam(owner).pw_uid | ||
577 | 357 | gid = grp.getgrnam(group).gr_gid | ||
578 | 358 | |||
579 | 359 | for root, dirs, files in os.walk(path): | ||
580 | 360 | for name in dirs + files: | ||
581 | 361 | full = os.path.join(root, name) | ||
582 | 362 | broken_symlink = os.path.lexists(full) and not os.path.exists(full) | ||
583 | 363 | if not broken_symlink: | ||
584 | 364 | os.chown(full, uid, gid) | ||
585 | 332 | 365 | ||
586 | === added directory 'hooks/charmhelpers/core/services' | |||
587 | === added file 'hooks/charmhelpers/core/services/__init__.py' | |||
588 | --- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000 | |||
589 | +++ hooks/charmhelpers/core/services/__init__.py 2014-08-13 14:00:26 +0000 | |||
590 | @@ -0,0 +1,2 @@ | |||
591 | 1 | from .base import * | ||
592 | 2 | from .helpers import * | ||
593 | 0 | 3 | ||
594 | === added file 'hooks/charmhelpers/core/services/base.py' | |||
595 | --- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000 | |||
596 | +++ hooks/charmhelpers/core/services/base.py 2014-08-13 14:00:26 +0000 | |||
597 | @@ -0,0 +1,305 @@ | |||
598 | 1 | import os | ||
599 | 2 | import re | ||
600 | 3 | import json | ||
601 | 4 | from collections import Iterable | ||
602 | 5 | |||
603 | 6 | from charmhelpers.core import host | ||
604 | 7 | from charmhelpers.core import hookenv | ||
605 | 8 | |||
606 | 9 | |||
607 | 10 | __all__ = ['ServiceManager', 'ManagerCallback', | ||
608 | 11 | 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', | ||
609 | 12 | 'service_restart', 'service_stop'] | ||
610 | 13 | |||
611 | 14 | |||
612 | 15 | class ServiceManager(object): | ||
613 | 16 | def __init__(self, services=None): | ||
614 | 17 | """ | ||
615 | 18 | Register a list of services, given their definitions. | ||
616 | 19 | |||
617 | 20 | Traditional charm authoring is focused on implementing hooks. That is, | ||
618 | 21 | the charm author is thinking in terms of "What hook am I handling; what | ||
619 | 22 | does this hook need to do?" However, in most cases, the real question | ||
620 | 23 | should be "Do I have the information I need to configure and start this | ||
621 | 24 | piece of software and, if so, what are the steps for doing so?" The | ||
622 | 25 | ServiceManager framework tries to bring the focus to the data and the | ||
623 | 26 | setup tasks, in the most declarative way possible. | ||
624 | 27 | |||
625 | 28 | Service definitions are dicts in the following formats (all keys except | ||
626 | 29 | 'service' are optional):: | ||
627 | 30 | |||
628 | 31 | { | ||
629 | 32 | "service": <service name>, | ||
630 | 33 | "required_data": <list of required data contexts>, | ||
631 | 34 | "data_ready": <one or more callbacks>, | ||
632 | 35 | "data_lost": <one or more callbacks>, | ||
633 | 36 | "start": <one or more callbacks>, | ||
634 | 37 | "stop": <one or more callbacks>, | ||
635 | 38 | "ports": <list of ports to manage>, | ||
636 | 39 | } | ||
637 | 40 | |||
638 | 41 | The 'required_data' list should contain dicts of required data (or | ||
639 | 42 | dependency managers that act like dicts and know how to collect the data). | ||
640 | 43 | Only when all items in the 'required_data' list are populated are the list | ||
641 | 44 | of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more | ||
642 | 45 | information. | ||
643 | 46 | |||
644 | 47 | The 'data_ready' value should be either a single callback, or a list of | ||
645 | 48 | callbacks, to be called when all items in 'required_data' pass `is_ready()`. | ||
646 | 49 | Each callback will be called with the service name as the only parameter. | ||
647 | 50 | After all of the 'data_ready' callbacks are called, the 'start' callbacks | ||
648 | 51 | are fired. | ||
649 | 52 | |||
650 | 53 | The 'data_lost' value should be either a single callback, or a list of | ||
651 | 54 | callbacks, to be called when a 'required_data' item no longer passes | ||
652 | 55 | `is_ready()`. Each callback will be called with the service name as the | ||
653 | 56 | only parameter. After all of the 'data_lost' callbacks are called, | ||
654 | 57 | the 'stop' callbacks are fired. | ||
655 | 58 | |||
656 | 59 | The 'start' value should be either a single callback, or a list of | ||
657 | 60 | callbacks, to be called when starting the service, after the 'data_ready' | ||
658 | 61 | callbacks are complete. Each callback will be called with the service | ||
659 | 62 | name as the only parameter. This defaults to | ||
660 | 63 | `[host.service_start, services.open_ports]`. | ||
661 | 64 | |||
662 | 65 | The 'stop' value should be either a single callback, or a list of | ||
663 | 66 | callbacks, to be called when stopping the service. If the service is | ||
664 | 67 | being stopped because it no longer has all of its 'required_data', this | ||
665 | 68 | will be called after all of the 'data_lost' callbacks are complete. | ||
666 | 69 | Each callback will be called with the service name as the only parameter. | ||
667 | 70 | This defaults to `[services.close_ports, host.service_stop]`. | ||
668 | 71 | |||
669 | 72 | The 'ports' value should be a list of ports to manage. The default | ||
670 | 73 | 'start' handler will open the ports after the service is started, | ||
671 | 74 | and the default 'stop' handler will close the ports prior to stopping | ||
672 | 75 | the service. | ||
673 | 76 | |||
674 | 77 | |||
675 | 78 | Examples: | ||
676 | 79 | |||
677 | 80 | The following registers an Upstart service called bingod that depends on | ||
678 | 81 | a mongodb relation and which runs a custom `db_migrate` function prior to | ||
679 | 82 | restarting the service, and a Runit service called spadesd:: | ||
680 | 83 | |||
681 | 84 | manager = services.ServiceManager([ | ||
682 | 85 | { | ||
683 | 86 | 'service': 'bingod', | ||
684 | 87 | 'ports': [80, 443], | ||
685 | 88 | 'required_data': [MongoRelation(), config(), {'my': 'data'}], | ||
686 | 89 | 'data_ready': [ | ||
687 | 90 | services.template(source='bingod.conf'), | ||
688 | 91 | services.template(source='bingod.ini', | ||
689 | 92 | target='/etc/bingod.ini', | ||
690 | 93 | owner='bingo', perms=0400), | ||
691 | 94 | ], | ||
692 | 95 | }, | ||
693 | 96 | { | ||
694 | 97 | 'service': 'spadesd', | ||
695 | 98 | 'data_ready': services.template(source='spadesd_run.j2', | ||
696 | 99 | target='/etc/sv/spadesd/run', | ||
697 | 100 | perms=0555), | ||
698 | 101 | 'start': runit_start, | ||
699 | 102 | 'stop': runit_stop, | ||
700 | 103 | }, | ||
701 | 104 | ]) | ||
702 | 105 | manager.manage() | ||
703 | 106 | """ | ||
704 | 107 | self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') | ||
705 | 108 | self._ready = None | ||
706 | 109 | self.services = {} | ||
707 | 110 | for service in services or []: | ||
708 | 111 | service_name = service['service'] | ||
709 | 112 | self.services[service_name] = service | ||
710 | 113 | |||
711 | 114 | def manage(self): | ||
712 | 115 | """ | ||
713 | 116 | Handle the current hook by doing The Right Thing with the registered services. | ||
714 | 117 | """ | ||
715 | 118 | hook_name = hookenv.hook_name() | ||
716 | 119 | if hook_name == 'stop': | ||
717 | 120 | self.stop_services() | ||
718 | 121 | else: | ||
719 | 122 | self.provide_data() | ||
720 | 123 | self.reconfigure_services() | ||
721 | 124 | |||
722 | 125 | def provide_data(self): | ||
723 | 126 | hook_name = hookenv.hook_name() | ||
724 | 127 | for service in self.services.values(): | ||
725 | 128 | for provider in service.get('provided_data', []): | ||
726 | 129 | if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): | ||
727 | 130 | data = provider.provide_data() | ||
728 | 131 | if provider._is_ready(data): | ||
729 | 132 | hookenv.relation_set(None, data) | ||
730 | 133 | |||
731 | 134 | def reconfigure_services(self, *service_names): | ||
732 | 135 | """ | ||
733 | 136 | Update all files for one or more registered services, and, | ||
734 | 137 | if ready, optionally restart them. | ||
735 | 138 | |||
736 | 139 | If no service names are given, reconfigures all registered services. | ||
737 | 140 | """ | ||
738 | 141 | for service_name in service_names or self.services.keys(): | ||
739 | 142 | if self.is_ready(service_name): | ||
740 | 143 | self.fire_event('data_ready', service_name) | ||
741 | 144 | self.fire_event('start', service_name, default=[ | ||
742 | 145 | service_restart, | ||
743 | 146 | manage_ports]) | ||
744 | 147 | self.save_ready(service_name) | ||
745 | 148 | else: | ||
746 | 149 | if self.was_ready(service_name): | ||
747 | 150 | self.fire_event('data_lost', service_name) | ||
748 | 151 | self.fire_event('stop', service_name, default=[ | ||
749 | 152 | manage_ports, | ||
750 | 153 | service_stop]) | ||
751 | 154 | self.save_lost(service_name) | ||
752 | 155 | |||
753 | 156 | def stop_services(self, *service_names): | ||
754 | 157 | """ | ||
755 | 158 | Stop one or more registered services, by name. | ||
756 | 159 | |||
757 | 160 | If no service names are given, stops all registered services. | ||
758 | 161 | """ | ||
759 | 162 | for service_name in service_names or self.services.keys(): | ||
760 | 163 | self.fire_event('stop', service_name, default=[ | ||
761 | 164 | manage_ports, | ||
762 | 165 | service_stop]) | ||
763 | 166 | |||
764 | 167 | def get_service(self, service_name): | ||
765 | 168 | """ | ||
766 | 169 | Given the name of a registered service, return its service definition. | ||
767 | 170 | """ | ||
768 | 171 | service = self.services.get(service_name) | ||
769 | 172 | if not service: | ||
770 | 173 | raise KeyError('Service not registered: %s' % service_name) | ||
771 | 174 | return service | ||
772 | 175 | |||
773 | 176 | def fire_event(self, event_name, service_name, default=None): | ||
774 | 177 | """ | ||
775 | 178 | Fire a data_ready, data_lost, start, or stop event on a given service. | ||
776 | 179 | """ | ||
777 | 180 | service = self.get_service(service_name) | ||
778 | 181 | callbacks = service.get(event_name, default) | ||
779 | 182 | if not callbacks: | ||
780 | 183 | return | ||
781 | 184 | if not isinstance(callbacks, Iterable): | ||
782 | 185 | callbacks = [callbacks] | ||
783 | 186 | for callback in callbacks: | ||
784 | 187 | if isinstance(callback, ManagerCallback): | ||
785 | 188 | callback(self, service_name, event_name) | ||
786 | 189 | else: | ||
787 | 190 | callback(service_name) | ||
788 | 191 | |||
789 | 192 | def is_ready(self, service_name): | ||
790 | 193 | """ | ||
791 | 194 | Determine if a registered service is ready, by checking its 'required_data'. | ||
792 | 195 | |||
793 | 196 | A 'required_data' item can be any mapping type, and is considered ready | ||
794 | 197 | if `bool(item)` evaluates as True. | ||
795 | 198 | """ | ||
796 | 199 | service = self.get_service(service_name) | ||
797 | 200 | reqs = service.get('required_data', []) | ||
798 | 201 | return all(bool(req) for req in reqs) | ||
799 | 202 | |||
800 | 203 | def _load_ready_file(self): | ||
801 | 204 | if self._ready is not None: | ||
802 | 205 | return | ||
803 | 206 | if os.path.exists(self._ready_file): | ||
804 | 207 | with open(self._ready_file) as fp: | ||
805 | 208 | self._ready = set(json.load(fp)) | ||
806 | 209 | else: | ||
807 | 210 | self._ready = set() | ||
808 | 211 | |||
809 | 212 | def _save_ready_file(self): | ||
810 | 213 | if self._ready is None: | ||
811 | 214 | return | ||
812 | 215 | with open(self._ready_file, 'w') as fp: | ||
813 | 216 | json.dump(list(self._ready), fp) | ||
814 | 217 | |||
815 | 218 | def save_ready(self, service_name): | ||
816 | 219 | """ | ||
817 | 220 | Save an indicator that the given service is now data_ready. | ||
818 | 221 | """ | ||
819 | 222 | self._load_ready_file() | ||
820 | 223 | self._ready.add(service_name) | ||
821 | 224 | self._save_ready_file() | ||
822 | 225 | |||
823 | 226 | def save_lost(self, service_name): | ||
824 | 227 | """ | ||
825 | 228 | Save an indicator that the given service is no longer data_ready. | ||
826 | 229 | """ | ||
827 | 230 | self._load_ready_file() | ||
828 | 231 | self._ready.discard(service_name) | ||
829 | 232 | self._save_ready_file() | ||
830 | 233 | |||
831 | 234 | def was_ready(self, service_name): | ||
832 | 235 | """ | ||
833 | 236 | Determine if the given service was previously data_ready. | ||
834 | 237 | """ | ||
835 | 238 | self._load_ready_file() | ||
836 | 239 | return service_name in self._ready | ||
837 | 240 | |||
838 | 241 | |||
839 | 242 | class ManagerCallback(object): | ||
840 | 243 | """ | ||
841 | 244 | Special case of a callback that takes the `ServiceManager` instance | ||
842 | 245 | in addition to the service name. | ||
843 | 246 | |||
844 | 247 | Subclasses should implement `__call__` which should accept three parameters: | ||
845 | 248 | |||
846 | 249 | * `manager` The `ServiceManager` instance | ||
847 | 250 | * `service_name` The name of the service it's being triggered for | ||
848 | 251 | * `event_name` The name of the event that this callback is handling | ||
849 | 252 | """ | ||
850 | 253 | def __call__(self, manager, service_name, event_name): | ||
851 | 254 | raise NotImplementedError() | ||
852 | 255 | |||
853 | 256 | |||
854 | 257 | class PortManagerCallback(ManagerCallback): | ||
855 | 258 | """ | ||
856 | 259 | Callback class that will open or close ports, for use as either | ||
857 | 260 | a start or stop action. | ||
858 | 261 | """ | ||
859 | 262 | def __call__(self, manager, service_name, event_name): | ||
860 | 263 | service = manager.get_service(service_name) | ||
861 | 264 | new_ports = service.get('ports', []) | ||
862 | 265 | port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) | ||
863 | 266 | if os.path.exists(port_file): | ||
864 | 267 | with open(port_file) as fp: | ||
865 | 268 | old_ports = fp.read().split(',') | ||
866 | 269 | for old_port in old_ports: | ||
867 | 270 | if bool(old_port): | ||
868 | 271 | old_port = int(old_port) | ||
869 | 272 | if old_port not in new_ports: | ||
870 | 273 | hookenv.close_port(old_port) | ||
871 | 274 | with open(port_file, 'w') as fp: | ||
872 | 275 | fp.write(','.join(str(port) for port in new_ports)) | ||
873 | 276 | for port in new_ports: | ||
874 | 277 | if event_name == 'start': | ||
875 | 278 | hookenv.open_port(port) | ||
876 | 279 | elif event_name == 'stop': | ||
877 | 280 | hookenv.close_port(port) | ||
878 | 281 | |||
879 | 282 | |||
880 | 283 | def service_stop(service_name): | ||
881 | 284 | """ | ||
882 | 285 | Wrapper around host.service_stop to prevent spurious "unknown service" | ||
883 | 286 | messages in the logs. | ||
884 | 287 | """ | ||
885 | 288 | if host.service_running(service_name): | ||
886 | 289 | host.service_stop(service_name) | ||
887 | 290 | |||
888 | 291 | |||
889 | 292 | def service_restart(service_name): | ||
890 | 293 | """ | ||
891 | 294 | Wrapper around host.service_restart to prevent spurious "unknown service" | ||
892 | 295 | messages in the logs. | ||
893 | 296 | """ | ||
894 | 297 | if host.service_available(service_name): | ||
895 | 298 | if host.service_running(service_name): | ||
896 | 299 | host.service_restart(service_name) | ||
897 | 300 | else: | ||
898 | 301 | host.service_start(service_name) | ||
899 | 302 | |||
900 | 303 | |||
901 | 304 | # Convenience aliases | ||
902 | 305 | open_ports = close_ports = manage_ports = PortManagerCallback() | ||
903 | 0 | 306 | ||
904 | === added file 'hooks/charmhelpers/core/services/helpers.py' | |||
905 | --- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000 | |||
906 | +++ hooks/charmhelpers/core/services/helpers.py 2014-08-13 14:00:26 +0000 | |||
907 | @@ -0,0 +1,125 @@ | |||
908 | 1 | from charmhelpers.core import hookenv | ||
909 | 2 | from charmhelpers.core import templating | ||
910 | 3 | |||
911 | 4 | from charmhelpers.core.services.base import ManagerCallback | ||
912 | 5 | |||
913 | 6 | |||
914 | 7 | __all__ = ['RelationContext', 'TemplateCallback', | ||
915 | 8 | 'render_template', 'template'] | ||
916 | 9 | |||
917 | 10 | |||
918 | 11 | class RelationContext(dict): | ||
919 | 12 | """ | ||
920 | 13 | Base class for a context generator that gets relation data from juju. | ||
921 | 14 | |||
922 | 15 | Subclasses must provide the attributes `name`, which is the name of the | ||
923 | 16 | interface of interest, `interface`, which is the type of the interface of | ||
924 | 17 | interest, and `required_keys`, which is the set of keys required for the | ||
925 | 18 | relation to be considered complete. The data for all interfaces matching | ||
926 | 19 | the `name` attribute that are complete will used to populate the dictionary | ||
927 | 20 | values (see `get_data`, below). | ||
928 | 21 | |||
929 | 22 | The generated context will be namespaced under the interface type, to prevent | ||
930 | 23 | potential naming conflicts. | ||
931 | 24 | """ | ||
932 | 25 | name = None | ||
933 | 26 | interface = None | ||
934 | 27 | required_keys = [] | ||
935 | 28 | |||
936 | 29 | def __init__(self, *args, **kwargs): | ||
937 | 30 | super(RelationContext, self).__init__(*args, **kwargs) | ||
938 | 31 | self.get_data() | ||
939 | 32 | |||
940 | 33 | def __bool__(self): | ||
941 | 34 | """ | ||
942 | 35 | Returns True if all of the required_keys are available. | ||
943 | 36 | """ | ||
944 | 37 | return self.is_ready() | ||
945 | 38 | |||
946 | 39 | __nonzero__ = __bool__ | ||
947 | 40 | |||
948 | 41 | def __repr__(self): | ||
949 | 42 | return super(RelationContext, self).__repr__() | ||
950 | 43 | |||
951 | 44 | def is_ready(self): | ||
952 | 45 | """ | ||
953 | 46 | Returns True if all of the `required_keys` are available from any units. | ||
954 | 47 | """ | ||
955 | 48 | ready = len(self.get(self.name, [])) > 0 | ||
956 | 49 | if not ready: | ||
957 | 50 | hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) | ||
958 | 51 | return ready | ||
959 | 52 | |||
960 | 53 | def _is_ready(self, unit_data): | ||
961 | 54 | """ | ||
962 | 55 | Helper method that tests a set of relation data and returns True if | ||
963 | 56 | all of the `required_keys` are present. | ||
964 | 57 | """ | ||
965 | 58 | return set(unit_data.keys()).issuperset(set(self.required_keys)) | ||
966 | 59 | |||
967 | 60 | def get_data(self): | ||
968 | 61 | """ | ||
969 | 62 | Retrieve the relation data for each unit involved in a relation and, | ||
970 | 63 | if complete, store it in a list under `self[self.name]`. This | ||
971 | 64 | is automatically called when the RelationContext is instantiated. | ||
972 | 65 | |||
973 | 66 | The units are sorted lexographically first by the service ID, then by | ||
974 | 67 | the unit ID. Thus, if an interface has two other services, 'db:1' | ||
975 | 68 | and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', | ||
976 | 69 | and 'db:2' having one unit, 'mediawiki/0', all of which have a complete | ||
977 | 70 | set of data, the relation data for the units will be stored in the | ||
978 | 71 | order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. | ||
979 | 72 | |||
980 | 73 | If you only care about a single unit on the relation, you can just | ||
981 | 74 | access it as `{{ interface[0]['key'] }}`. However, if you can at all | ||
982 | 75 | support multiple units on a relation, you should iterate over the list, | ||
983 | 76 | like:: | ||
984 | 77 | |||
985 | 78 | {% for unit in interface -%} | ||
986 | 79 | {{ unit['key'] }}{% if not loop.last %},{% endif %} | ||
987 | 80 | {%- endfor %} | ||
988 | 81 | |||
989 | 82 | Note that since all sets of relation data from all related services and | ||
990 | 83 | units are in a single list, if you need to know which service or unit a | ||
991 | 84 | set of data came from, you'll need to extend this class to preserve | ||
992 | 85 | that information. | ||
993 | 86 | """ | ||
994 | 87 | if not hookenv.relation_ids(self.name): | ||
995 | 88 | return | ||
996 | 89 | |||
997 | 90 | ns = self.setdefault(self.name, []) | ||
998 | 91 | for rid in sorted(hookenv.relation_ids(self.name)): | ||
999 | 92 | for unit in sorted(hookenv.related_units(rid)): | ||
1000 | 93 | reldata = hookenv.relation_get(rid=rid, unit=unit) | ||
1001 | 94 | if self._is_ready(reldata): | ||
1002 | 95 | ns.append(reldata) | ||
1003 | 96 | |||
1004 | 97 | def provide_data(self): | ||
1005 | 98 | """ | ||
1006 | 99 | Return data to be relation_set for this interface. | ||
1007 | 100 | """ | ||
1008 | 101 | return {} | ||
1009 | 102 | |||
1010 | 103 | |||
1011 | 104 | class TemplateCallback(ManagerCallback): | ||
1012 | 105 | """ | ||
1013 | 106 | Callback class that will render a template, for use as a ready action. | ||
1014 | 107 | """ | ||
1015 | 108 | def __init__(self, source, target, owner='root', group='root', perms=0444): | ||
1016 | 109 | self.source = source | ||
1017 | 110 | self.target = target | ||
1018 | 111 | self.owner = owner | ||
1019 | 112 | self.group = group | ||
1020 | 113 | self.perms = perms | ||
1021 | 114 | |||
1022 | 115 | def __call__(self, manager, service_name, event_name): | ||
1023 | 116 | service = manager.get_service(service_name) | ||
1024 | 117 | context = {} | ||
1025 | 118 | for ctx in service.get('required_data', []): | ||
1026 | 119 | context.update(ctx) | ||
1027 | 120 | templating.render(self.source, self.target, context, | ||
1028 | 121 | self.owner, self.group, self.perms) | ||
1029 | 122 | |||
1030 | 123 | |||
1031 | 124 | # Convenience aliases for templates | ||
1032 | 125 | render_template = template = TemplateCallback | ||
1033 | 0 | 126 | ||
1034 | === added file 'hooks/charmhelpers/core/templating.py' | |||
1035 | --- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000 | |||
1036 | +++ hooks/charmhelpers/core/templating.py 2014-08-13 14:00:26 +0000 | |||
1037 | @@ -0,0 +1,51 @@ | |||
1038 | 1 | import os | ||
1039 | 2 | |||
1040 | 3 | from charmhelpers.core import host | ||
1041 | 4 | from charmhelpers.core import hookenv | ||
1042 | 5 | |||
1043 | 6 | |||
1044 | 7 | def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): | ||
1045 | 8 | """ | ||
1046 | 9 | Render a template. | ||
1047 | 10 | |||
1048 | 11 | The `source` path, if not absolute, is relative to the `templates_dir`. | ||
1049 | 12 | |||
1050 | 13 | The `target` path should be absolute. | ||
1051 | 14 | |||
1052 | 15 | The context should be a dict containing the values to be replaced in the | ||
1053 | 16 | template. | ||
1054 | 17 | |||
1055 | 18 | The `owner`, `group`, and `perms` options will be passed to `write_file`. | ||
1056 | 19 | |||
1057 | 20 | If omitted, `templates_dir` defaults to the `templates` folder in the charm. | ||
1058 | 21 | |||
1059 | 22 | Note: Using this requires python-jinja2; if it is not installed, calling | ||
1060 | 23 | this will attempt to use charmhelpers.fetch.apt_install to install it. | ||
1061 | 24 | """ | ||
1062 | 25 | try: | ||
1063 | 26 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
1064 | 27 | except ImportError: | ||
1065 | 28 | try: | ||
1066 | 29 | from charmhelpers.fetch import apt_install | ||
1067 | 30 | except ImportError: | ||
1068 | 31 | hookenv.log('Could not import jinja2, and could not import ' | ||
1069 | 32 | 'charmhelpers.fetch to install it', | ||
1070 | 33 | level=hookenv.ERROR) | ||
1071 | 34 | raise | ||
1072 | 35 | apt_install('python-jinja2', fatal=True) | ||
1073 | 36 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
1074 | 37 | |||
1075 | 38 | if templates_dir is None: | ||
1076 | 39 | templates_dir = os.path.join(hookenv.charm_dir(), 'templates') | ||
1077 | 40 | loader = Environment(loader=FileSystemLoader(templates_dir)) | ||
1078 | 41 | try: | ||
1079 | 42 | source = source | ||
1080 | 43 | template = loader.get_template(source) | ||
1081 | 44 | except exceptions.TemplateNotFound as e: | ||
1082 | 45 | hookenv.log('Could not load template %s from %s.' % | ||
1083 | 46 | (source, templates_dir), | ||
1084 | 47 | level=hookenv.ERROR) | ||
1085 | 48 | raise e | ||
1086 | 49 | content = template.render(context) | ||
1087 | 50 | host.mkdir(os.path.dirname(target)) | ||
1088 | 51 | host.write_file(target, content, owner, group, perms) | ||
1089 | 0 | 52 | ||
1090 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
1091 | --- hooks/charmhelpers/fetch/__init__.py 2014-07-25 08:11:52 +0000 | |||
1092 | +++ hooks/charmhelpers/fetch/__init__.py 2014-08-13 14:00:26 +0000 | |||
1093 | @@ -122,6 +122,7 @@ | |||
1094 | 122 | # Tell apt to build an in-memory cache to prevent race conditions (if | 122 | # Tell apt to build an in-memory cache to prevent race conditions (if |
1095 | 123 | # another process is already building the cache). | 123 | # another process is already building the cache). |
1096 | 124 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | 124 | apt_pkg.config.set("Dir::Cache::pkgcache", "") |
1097 | 125 | apt_pkg.config.set("Dir::Cache::srcpkgcache", "") | ||
1098 | 125 | 126 | ||
1099 | 126 | cache = apt_pkg.Cache() | 127 | cache = apt_pkg.Cache() |
1100 | 127 | _pkgs = [] | 128 | _pkgs = [] |
Approved by jamespage