Merge lp:~hopem/charms/trusty/ceph-osd/support-ipv6 into lp:~openstack-charmers-archive/charms/trusty/ceph-osd/next
- Trusty Tahr (14.04)
- support-ipv6
- Merge into next
Proposed by
Edward Hope-Morley
Status: | Merged |
---|---|
Merged at revision: | 28 |
Proposed branch: | lp:~hopem/charms/trusty/ceph-osd/support-ipv6 |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/ceph-osd/next |
Diff against target: |
1175 lines (+824/-40) 14 files modified
config.yaml (+3/-0) hooks/charmhelpers/contrib/network/ip.py (+102/-0) hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-0) hooks/charmhelpers/core/hookenv.py (+41/-16) hooks/charmhelpers/core/host.py (+36/-7) hooks/charmhelpers/core/services/__init__.py (+2/-0) hooks/charmhelpers/core/services/base.py (+313/-0) hooks/charmhelpers/core/services/helpers.py (+125/-0) hooks/charmhelpers/core/templating.py (+51/-0) hooks/charmhelpers/fetch/__init__.py (+51/-12) hooks/charmhelpers/fetch/archiveurl.py (+41/-1) hooks/hooks.py (+42/-4) hooks/utils.py (+11/-0) templates/ceph.conf (+3/-0) |
To merge this branch: | bzr merge lp:~hopem/charms/trusty/ceph-osd/support-ipv6 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Xiang Hui | Pending | ||
OpenStack Charmers | Pending | ||
Review via email: mp+235189@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
- 28. By Edward Hope-Morley
-
synced charm-helpers
- 29. By Edward Hope-Morley
-
fixed get_mon_hosts()
- 30. By Edward Hope-Morley
-
fixed get_mon_hosts()
- 31. By Edward Hope-Morley
-
synced ~xianghui/
charm-helpers/ format- ipv6 - 32. By Edward Hope-Morley
-
adjuested to new get_ipv6_addr
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'config.yaml' | |||
2 | --- config.yaml 2014-08-15 09:06:49 +0000 | |||
3 | +++ config.yaml 2014-09-21 19:34:01 +0000 | |||
4 | @@ -103,3 +103,6 @@ | |||
5 | 103 | description: | | 103 | description: | |
6 | 104 | The IP address and netmask of the cluster (back-side) network (e.g., | 104 | The IP address and netmask of the cluster (back-side) network (e.g., |
7 | 105 | 192.168.0.0/24) | 105 | 192.168.0.0/24) |
8 | 106 | prefer-ipv6: | ||
9 | 107 | type: boolean | ||
10 | 108 | default: False | ||
11 | 106 | \ No newline at end of file | 109 | \ No newline at end of file |
12 | 107 | 110 | ||
13 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' | |||
14 | --- hooks/charmhelpers/contrib/network/ip.py 2014-07-25 08:07:41 +0000 | |||
15 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-09-21 19:34:01 +0000 | |||
16 | @@ -1,3 +1,4 @@ | |||
17 | 1 | import glob | ||
18 | 1 | import sys | 2 | import sys |
19 | 2 | 3 | ||
20 | 3 | from functools import partial | 4 | from functools import partial |
21 | @@ -154,3 +155,104 @@ | |||
22 | 154 | get_iface_for_address = partial(_get_for_address, key='iface') | 155 | get_iface_for_address = partial(_get_for_address, key='iface') |
23 | 155 | 156 | ||
24 | 156 | get_netmask_for_address = partial(_get_for_address, key='netmask') | 157 | get_netmask_for_address = partial(_get_for_address, key='netmask') |
25 | 158 | |||
26 | 159 | |||
27 | 160 | def format_ipv6_addr(address): | ||
28 | 161 | """ | ||
29 | 162 | IPv6 needs to be wrapped with [] in url link to parse correctly. | ||
30 | 163 | """ | ||
31 | 164 | if is_ipv6(address): | ||
32 | 165 | address = "[%s]" % address | ||
33 | 166 | else: | ||
34 | 167 | log("Not an valid ipv6 address: %s" % address, | ||
35 | 168 | level=ERROR) | ||
36 | 169 | address = None | ||
37 | 170 | return address | ||
38 | 171 | |||
39 | 172 | |||
40 | 173 | def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): | ||
41 | 174 | """ | ||
42 | 175 | Return the assigned IP address for a given interface, if any, or []. | ||
43 | 176 | """ | ||
44 | 177 | # Extract nic if passed /dev/ethX | ||
45 | 178 | if '/' in iface: | ||
46 | 179 | iface = iface.split('/')[-1] | ||
47 | 180 | if not exc_list: | ||
48 | 181 | exc_list = [] | ||
49 | 182 | try: | ||
50 | 183 | inet_num = getattr(netifaces, inet_type) | ||
51 | 184 | except AttributeError: | ||
52 | 185 | raise Exception('Unknown inet type ' + str(inet_type)) | ||
53 | 186 | |||
54 | 187 | interfaces = netifaces.interfaces() | ||
55 | 188 | if inc_aliases: | ||
56 | 189 | ifaces = [] | ||
57 | 190 | for _iface in interfaces: | ||
58 | 191 | if iface == _iface or _iface.split(':')[0] == iface: | ||
59 | 192 | ifaces.append(_iface) | ||
60 | 193 | if fatal and not ifaces: | ||
61 | 194 | raise Exception("Invalid interface '%s'" % iface) | ||
62 | 195 | ifaces.sort() | ||
63 | 196 | else: | ||
64 | 197 | if iface not in interfaces: | ||
65 | 198 | if fatal: | ||
66 | 199 | raise Exception("%s not found " % (iface)) | ||
67 | 200 | else: | ||
68 | 201 | return [] | ||
69 | 202 | else: | ||
70 | 203 | ifaces = [iface] | ||
71 | 204 | |||
72 | 205 | addresses = [] | ||
73 | 206 | for netiface in ifaces: | ||
74 | 207 | net_info = netifaces.ifaddresses(netiface) | ||
75 | 208 | if inet_num in net_info: | ||
76 | 209 | for entry in net_info[inet_num]: | ||
77 | 210 | if 'addr' in entry and entry['addr'] not in exc_list: | ||
78 | 211 | addresses.append(entry['addr']) | ||
79 | 212 | if fatal and not addresses: | ||
80 | 213 | raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) | ||
81 | 214 | return addresses | ||
82 | 215 | |||
83 | 216 | get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') | ||
84 | 217 | |||
85 | 218 | |||
86 | 219 | def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): | ||
87 | 220 | """ | ||
88 | 221 | Return the assigned IPv6 address for a given interface, if any, or []. | ||
89 | 222 | """ | ||
90 | 223 | addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', | ||
91 | 224 | inc_aliases=inc_aliases, fatal=fatal, | ||
92 | 225 | exc_list=exc_list) | ||
93 | 226 | remotly_addressable = [] | ||
94 | 227 | for address in addresses: | ||
95 | 228 | if not address.startswith('fe80'): | ||
96 | 229 | remotly_addressable.append(address) | ||
97 | 230 | if fatal and not remotly_addressable: | ||
98 | 231 | raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) | ||
99 | 232 | return remotly_addressable | ||
100 | 233 | |||
101 | 234 | |||
102 | 235 | def get_bridges(vnic_dir='/sys/devices/virtual/net'): | ||
103 | 236 | """ | ||
104 | 237 | Return a list of bridges on the system or [] | ||
105 | 238 | """ | ||
106 | 239 | b_rgex = vnic_dir + '/*/bridge' | ||
107 | 240 | return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] | ||
108 | 241 | |||
109 | 242 | |||
110 | 243 | def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): | ||
111 | 244 | """ | ||
112 | 245 | Return a list of nics comprising a given bridge on the system or [] | ||
113 | 246 | """ | ||
114 | 247 | brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) | ||
115 | 248 | return [x.split('/')[-1] for x in glob.glob(brif_rgex)] | ||
116 | 249 | |||
117 | 250 | |||
118 | 251 | def is_bridge_member(nic): | ||
119 | 252 | """ | ||
120 | 253 | Check if a given nic is a member of a bridge | ||
121 | 254 | """ | ||
122 | 255 | for bridge in get_bridges(): | ||
123 | 256 | if nic in get_bridge_nics(bridge): | ||
124 | 257 | return True | ||
125 | 258 | return False | ||
126 | 157 | 259 | ||
127 | === modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' | |||
128 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-25 08:07:41 +0000 | |||
129 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-09-21 19:34:01 +0000 | |||
130 | @@ -46,5 +46,8 @@ | |||
131 | 46 | :returns: boolean: True if the path represents a mounted device, False if | 46 | :returns: boolean: True if the path represents a mounted device, False if |
132 | 47 | it doesn't. | 47 | it doesn't. |
133 | 48 | ''' | 48 | ''' |
134 | 49 | is_partition = bool(re.search(r".*[0-9]+\b", device)) | ||
135 | 49 | out = check_output(['mount']) | 50 | out = check_output(['mount']) |
136 | 51 | if is_partition: | ||
137 | 52 | return bool(re.search(device + r"\b", out)) | ||
138 | 50 | return bool(re.search(device + r"[0-9]+\b", out)) | 53 | return bool(re.search(device + r"[0-9]+\b", out)) |
139 | 51 | 54 | ||
140 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
141 | --- hooks/charmhelpers/core/hookenv.py 2014-07-25 08:07:41 +0000 | |||
142 | +++ hooks/charmhelpers/core/hookenv.py 2014-09-21 19:34:01 +0000 | |||
143 | @@ -156,12 +156,15 @@ | |||
144 | 156 | 156 | ||
145 | 157 | 157 | ||
146 | 158 | class Config(dict): | 158 | class Config(dict): |
153 | 159 | """A Juju charm config dictionary that can write itself to | 159 | """A dictionary representation of the charm's config.yaml, with some |
154 | 160 | disk (as json) and track which values have changed since | 160 | extra features: |
155 | 161 | the previous hook invocation. | 161 | |
156 | 162 | 162 | - See which values in the dictionary have changed since the previous hook. | |
157 | 163 | Do not instantiate this object directly - instead call | 163 | - For values that have changed, see what the previous value was. |
158 | 164 | ``hookenv.config()`` | 164 | - Store arbitrary data for use in a later hook. |
159 | 165 | |||
160 | 166 | NOTE: Do not instantiate this object directly - instead call | ||
161 | 167 | ``hookenv.config()``, which will return an instance of :class:`Config`. | ||
162 | 165 | 168 | ||
163 | 166 | Example usage:: | 169 | Example usage:: |
164 | 167 | 170 | ||
165 | @@ -170,8 +173,8 @@ | |||
166 | 170 | >>> config = hookenv.config() | 173 | >>> config = hookenv.config() |
167 | 171 | >>> config['foo'] | 174 | >>> config['foo'] |
168 | 172 | 'bar' | 175 | 'bar' |
169 | 176 | >>> # store a new key/value for later use | ||
170 | 173 | >>> config['mykey'] = 'myval' | 177 | >>> config['mykey'] = 'myval' |
171 | 174 | >>> config.save() | ||
172 | 175 | 178 | ||
173 | 176 | 179 | ||
174 | 177 | >>> # user runs `juju set mycharm foo=baz` | 180 | >>> # user runs `juju set mycharm foo=baz` |
175 | @@ -188,22 +191,34 @@ | |||
176 | 188 | >>> # keys/values that we add are preserved across hooks | 191 | >>> # keys/values that we add are preserved across hooks |
177 | 189 | >>> config['mykey'] | 192 | >>> config['mykey'] |
178 | 190 | 'myval' | 193 | 'myval' |
179 | 191 | >>> # don't forget to save at the end of hook! | ||
180 | 192 | >>> config.save() | ||
181 | 193 | 194 | ||
182 | 194 | """ | 195 | """ |
183 | 195 | CONFIG_FILE_NAME = '.juju-persistent-config' | 196 | CONFIG_FILE_NAME = '.juju-persistent-config' |
184 | 196 | 197 | ||
185 | 197 | def __init__(self, *args, **kw): | 198 | def __init__(self, *args, **kw): |
186 | 198 | super(Config, self).__init__(*args, **kw) | 199 | super(Config, self).__init__(*args, **kw) |
187 | 200 | self.implicit_save = True | ||
188 | 199 | self._prev_dict = None | 201 | self._prev_dict = None |
189 | 200 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) | 202 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) |
190 | 201 | if os.path.exists(self.path): | 203 | if os.path.exists(self.path): |
191 | 202 | self.load_previous() | 204 | self.load_previous() |
192 | 203 | 205 | ||
193 | 206 | def __getitem__(self, key): | ||
194 | 207 | """For regular dict lookups, check the current juju config first, | ||
195 | 208 | then the previous (saved) copy. This ensures that user-saved values | ||
196 | 209 | will be returned by a dict lookup. | ||
197 | 210 | |||
198 | 211 | """ | ||
199 | 212 | try: | ||
200 | 213 | return dict.__getitem__(self, key) | ||
201 | 214 | except KeyError: | ||
202 | 215 | return (self._prev_dict or {})[key] | ||
203 | 216 | |||
204 | 204 | def load_previous(self, path=None): | 217 | def load_previous(self, path=None): |
207 | 205 | """Load previous copy of config from disk so that current values | 218 | """Load previous copy of config from disk. |
208 | 206 | can be compared to previous values. | 219 | |
209 | 220 | In normal usage you don't need to call this method directly - it | ||
210 | 221 | is called automatically at object initialization. | ||
211 | 207 | 222 | ||
212 | 208 | :param path: | 223 | :param path: |
213 | 209 | 224 | ||
214 | @@ -218,8 +233,8 @@ | |||
215 | 218 | self._prev_dict = json.load(f) | 233 | self._prev_dict = json.load(f) |
216 | 219 | 234 | ||
217 | 220 | def changed(self, key): | 235 | def changed(self, key): |
220 | 221 | """Return true if the value for this key has changed since | 236 | """Return True if the current value for this key is different from |
221 | 222 | the last save. | 237 | the previous value. |
222 | 223 | 238 | ||
223 | 224 | """ | 239 | """ |
224 | 225 | if self._prev_dict is None: | 240 | if self._prev_dict is None: |
225 | @@ -228,7 +243,7 @@ | |||
226 | 228 | 243 | ||
227 | 229 | def previous(self, key): | 244 | def previous(self, key): |
228 | 230 | """Return previous value for this key, or None if there | 245 | """Return previous value for this key, or None if there |
230 | 231 | is no "previous" value. | 246 | is no previous value. |
231 | 232 | 247 | ||
232 | 233 | """ | 248 | """ |
233 | 234 | if self._prev_dict: | 249 | if self._prev_dict: |
234 | @@ -238,7 +253,13 @@ | |||
235 | 238 | def save(self): | 253 | def save(self): |
236 | 239 | """Save this config to disk. | 254 | """Save this config to disk. |
237 | 240 | 255 | ||
239 | 241 | Preserves items in _prev_dict that do not exist in self. | 256 | If the charm is using the :mod:`Services Framework <services.base>` |
240 | 257 | or :meth:'@hook <Hooks.hook>' decorator, this | ||
241 | 258 | is called automatically at the end of successful hook execution. | ||
242 | 259 | Otherwise, it should be called directly by user code. | ||
243 | 260 | |||
244 | 261 | To disable automatic saves, set ``implicit_save=False`` on this | ||
245 | 262 | instance. | ||
246 | 242 | 263 | ||
247 | 243 | """ | 264 | """ |
248 | 244 | if self._prev_dict: | 265 | if self._prev_dict: |
249 | @@ -285,8 +306,9 @@ | |||
250 | 285 | raise | 306 | raise |
251 | 286 | 307 | ||
252 | 287 | 308 | ||
254 | 288 | def relation_set(relation_id=None, relation_settings={}, **kwargs): | 309 | def relation_set(relation_id=None, relation_settings=None, **kwargs): |
255 | 289 | """Set relation information for the current unit""" | 310 | """Set relation information for the current unit""" |
256 | 311 | relation_settings = relation_settings if relation_settings else {} | ||
257 | 290 | relation_cmd_line = ['relation-set'] | 312 | relation_cmd_line = ['relation-set'] |
258 | 291 | if relation_id is not None: | 313 | if relation_id is not None: |
259 | 292 | relation_cmd_line.extend(('-r', relation_id)) | 314 | relation_cmd_line.extend(('-r', relation_id)) |
260 | @@ -477,6 +499,9 @@ | |||
261 | 477 | hook_name = os.path.basename(args[0]) | 499 | hook_name = os.path.basename(args[0]) |
262 | 478 | if hook_name in self._hooks: | 500 | if hook_name in self._hooks: |
263 | 479 | self._hooks[hook_name]() | 501 | self._hooks[hook_name]() |
264 | 502 | cfg = config() | ||
265 | 503 | if cfg.implicit_save: | ||
266 | 504 | cfg.save() | ||
267 | 480 | else: | 505 | else: |
268 | 481 | raise UnregisteredHookError(hook_name) | 506 | raise UnregisteredHookError(hook_name) |
269 | 482 | 507 | ||
270 | 483 | 508 | ||
271 | === modified file 'hooks/charmhelpers/core/host.py' | |||
272 | --- hooks/charmhelpers/core/host.py 2014-07-25 08:07:41 +0000 | |||
273 | +++ hooks/charmhelpers/core/host.py 2014-09-21 19:34:01 +0000 | |||
274 | @@ -12,6 +12,8 @@ | |||
275 | 12 | import string | 12 | import string |
276 | 13 | import subprocess | 13 | import subprocess |
277 | 14 | import hashlib | 14 | import hashlib |
278 | 15 | import shutil | ||
279 | 16 | from contextlib import contextmanager | ||
280 | 15 | 17 | ||
281 | 16 | from collections import OrderedDict | 18 | from collections import OrderedDict |
282 | 17 | 19 | ||
283 | @@ -52,7 +54,7 @@ | |||
284 | 52 | def service_running(service): | 54 | def service_running(service): |
285 | 53 | """Determine whether a system service is running""" | 55 | """Determine whether a system service is running""" |
286 | 54 | try: | 56 | try: |
288 | 55 | output = subprocess.check_output(['service', service, 'status']) | 57 | output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) |
289 | 56 | except subprocess.CalledProcessError: | 58 | except subprocess.CalledProcessError: |
290 | 57 | return False | 59 | return False |
291 | 58 | else: | 60 | else: |
292 | @@ -62,6 +64,16 @@ | |||
293 | 62 | return False | 64 | return False |
294 | 63 | 65 | ||
295 | 64 | 66 | ||
296 | 67 | def service_available(service_name): | ||
297 | 68 | """Determine whether a system service is available""" | ||
298 | 69 | try: | ||
299 | 70 | subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) | ||
300 | 71 | except subprocess.CalledProcessError: | ||
301 | 72 | return False | ||
302 | 73 | else: | ||
303 | 74 | return True | ||
304 | 75 | |||
305 | 76 | |||
306 | 65 | def adduser(username, password=None, shell='/bin/bash', system_user=False): | 77 | def adduser(username, password=None, shell='/bin/bash', system_user=False): |
307 | 66 | """Add a user to the system""" | 78 | """Add a user to the system""" |
308 | 67 | try: | 79 | try: |
309 | @@ -320,12 +332,29 @@ | |||
310 | 320 | 332 | ||
311 | 321 | ''' | 333 | ''' |
312 | 322 | import apt_pkg | 334 | import apt_pkg |
313 | 335 | from charmhelpers.fetch import apt_cache | ||
314 | 323 | if not pkgcache: | 336 | if not pkgcache: |
321 | 324 | apt_pkg.init() | 337 | pkgcache = apt_cache() |
316 | 325 | # Force Apt to build its cache in memory. That way we avoid race | ||
317 | 326 | # conditions with other applications building the cache in the same | ||
318 | 327 | # place. | ||
319 | 328 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | ||
320 | 329 | pkgcache = apt_pkg.Cache() | ||
322 | 330 | pkg = pkgcache[package] | 338 | pkg = pkgcache[package] |
323 | 331 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) | 339 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
324 | 340 | |||
325 | 341 | |||
326 | 342 | @contextmanager | ||
327 | 343 | def chdir(d): | ||
328 | 344 | cur = os.getcwd() | ||
329 | 345 | try: | ||
330 | 346 | yield os.chdir(d) | ||
331 | 347 | finally: | ||
332 | 348 | os.chdir(cur) | ||
333 | 349 | |||
334 | 350 | |||
335 | 351 | def chownr(path, owner, group): | ||
336 | 352 | uid = pwd.getpwnam(owner).pw_uid | ||
337 | 353 | gid = grp.getgrnam(group).gr_gid | ||
338 | 354 | |||
339 | 355 | for root, dirs, files in os.walk(path): | ||
340 | 356 | for name in dirs + files: | ||
341 | 357 | full = os.path.join(root, name) | ||
342 | 358 | broken_symlink = os.path.lexists(full) and not os.path.exists(full) | ||
343 | 359 | if not broken_symlink: | ||
344 | 360 | os.chown(full, uid, gid) | ||
345 | 332 | 361 | ||
346 | === added directory 'hooks/charmhelpers/core/services' | |||
347 | === added file 'hooks/charmhelpers/core/services/__init__.py' | |||
348 | --- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000 | |||
349 | +++ hooks/charmhelpers/core/services/__init__.py 2014-09-21 19:34:01 +0000 | |||
350 | @@ -0,0 +1,2 @@ | |||
351 | 1 | from .base import * | ||
352 | 2 | from .helpers import * | ||
353 | 0 | 3 | ||
354 | === added file 'hooks/charmhelpers/core/services/base.py' | |||
355 | --- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000 | |||
356 | +++ hooks/charmhelpers/core/services/base.py 2014-09-21 19:34:01 +0000 | |||
357 | @@ -0,0 +1,313 @@ | |||
358 | 1 | import os | ||
359 | 2 | import re | ||
360 | 3 | import json | ||
361 | 4 | from collections import Iterable | ||
362 | 5 | |||
363 | 6 | from charmhelpers.core import host | ||
364 | 7 | from charmhelpers.core import hookenv | ||
365 | 8 | |||
366 | 9 | |||
367 | 10 | __all__ = ['ServiceManager', 'ManagerCallback', | ||
368 | 11 | 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', | ||
369 | 12 | 'service_restart', 'service_stop'] | ||
370 | 13 | |||
371 | 14 | |||
372 | 15 | class ServiceManager(object): | ||
373 | 16 | def __init__(self, services=None): | ||
374 | 17 | """ | ||
375 | 18 | Register a list of services, given their definitions. | ||
376 | 19 | |||
377 | 20 | Service definitions are dicts in the following formats (all keys except | ||
378 | 21 | 'service' are optional):: | ||
379 | 22 | |||
380 | 23 | { | ||
381 | 24 | "service": <service name>, | ||
382 | 25 | "required_data": <list of required data contexts>, | ||
383 | 26 | "provided_data": <list of provided data contexts>, | ||
384 | 27 | "data_ready": <one or more callbacks>, | ||
385 | 28 | "data_lost": <one or more callbacks>, | ||
386 | 29 | "start": <one or more callbacks>, | ||
387 | 30 | "stop": <one or more callbacks>, | ||
388 | 31 | "ports": <list of ports to manage>, | ||
389 | 32 | } | ||
390 | 33 | |||
391 | 34 | The 'required_data' list should contain dicts of required data (or | ||
392 | 35 | dependency managers that act like dicts and know how to collect the data). | ||
393 | 36 | Only when all items in the 'required_data' list are populated are the list | ||
394 | 37 | of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more | ||
395 | 38 | information. | ||
396 | 39 | |||
397 | 40 | The 'provided_data' list should contain relation data providers, most likely | ||
398 | 41 | a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, | ||
399 | 42 | that will indicate a set of data to set on a given relation. | ||
400 | 43 | |||
401 | 44 | The 'data_ready' value should be either a single callback, or a list of | ||
402 | 45 | callbacks, to be called when all items in 'required_data' pass `is_ready()`. | ||
403 | 46 | Each callback will be called with the service name as the only parameter. | ||
404 | 47 | After all of the 'data_ready' callbacks are called, the 'start' callbacks | ||
405 | 48 | are fired. | ||
406 | 49 | |||
407 | 50 | The 'data_lost' value should be either a single callback, or a list of | ||
408 | 51 | callbacks, to be called when a 'required_data' item no longer passes | ||
409 | 52 | `is_ready()`. Each callback will be called with the service name as the | ||
410 | 53 | only parameter. After all of the 'data_lost' callbacks are called, | ||
411 | 54 | the 'stop' callbacks are fired. | ||
412 | 55 | |||
413 | 56 | The 'start' value should be either a single callback, or a list of | ||
414 | 57 | callbacks, to be called when starting the service, after the 'data_ready' | ||
415 | 58 | callbacks are complete. Each callback will be called with the service | ||
416 | 59 | name as the only parameter. This defaults to | ||
417 | 60 | `[host.service_start, services.open_ports]`. | ||
418 | 61 | |||
419 | 62 | The 'stop' value should be either a single callback, or a list of | ||
420 | 63 | callbacks, to be called when stopping the service. If the service is | ||
421 | 64 | being stopped because it no longer has all of its 'required_data', this | ||
422 | 65 | will be called after all of the 'data_lost' callbacks are complete. | ||
423 | 66 | Each callback will be called with the service name as the only parameter. | ||
424 | 67 | This defaults to `[services.close_ports, host.service_stop]`. | ||
425 | 68 | |||
426 | 69 | The 'ports' value should be a list of ports to manage. The default | ||
427 | 70 | 'start' handler will open the ports after the service is started, | ||
428 | 71 | and the default 'stop' handler will close the ports prior to stopping | ||
429 | 72 | the service. | ||
430 | 73 | |||
431 | 74 | |||
432 | 75 | Examples: | ||
433 | 76 | |||
434 | 77 | The following registers an Upstart service called bingod that depends on | ||
435 | 78 | a mongodb relation and which runs a custom `db_migrate` function prior to | ||
436 | 79 | restarting the service, and a Runit service called spadesd:: | ||
437 | 80 | |||
438 | 81 | manager = services.ServiceManager([ | ||
439 | 82 | { | ||
440 | 83 | 'service': 'bingod', | ||
441 | 84 | 'ports': [80, 443], | ||
442 | 85 | 'required_data': [MongoRelation(), config(), {'my': 'data'}], | ||
443 | 86 | 'data_ready': [ | ||
444 | 87 | services.template(source='bingod.conf'), | ||
445 | 88 | services.template(source='bingod.ini', | ||
446 | 89 | target='/etc/bingod.ini', | ||
447 | 90 | owner='bingo', perms=0400), | ||
448 | 91 | ], | ||
449 | 92 | }, | ||
450 | 93 | { | ||
451 | 94 | 'service': 'spadesd', | ||
452 | 95 | 'data_ready': services.template(source='spadesd_run.j2', | ||
453 | 96 | target='/etc/sv/spadesd/run', | ||
454 | 97 | perms=0555), | ||
455 | 98 | 'start': runit_start, | ||
456 | 99 | 'stop': runit_stop, | ||
457 | 100 | }, | ||
458 | 101 | ]) | ||
459 | 102 | manager.manage() | ||
460 | 103 | """ | ||
461 | 104 | self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') | ||
462 | 105 | self._ready = None | ||
463 | 106 | self.services = {} | ||
464 | 107 | for service in services or []: | ||
465 | 108 | service_name = service['service'] | ||
466 | 109 | self.services[service_name] = service | ||
467 | 110 | |||
468 | 111 | def manage(self): | ||
469 | 112 | """ | ||
470 | 113 | Handle the current hook by doing The Right Thing with the registered services. | ||
471 | 114 | """ | ||
472 | 115 | hook_name = hookenv.hook_name() | ||
473 | 116 | if hook_name == 'stop': | ||
474 | 117 | self.stop_services() | ||
475 | 118 | else: | ||
476 | 119 | self.provide_data() | ||
477 | 120 | self.reconfigure_services() | ||
478 | 121 | cfg = hookenv.config() | ||
479 | 122 | if cfg.implicit_save: | ||
480 | 123 | cfg.save() | ||
481 | 124 | |||
482 | 125 | def provide_data(self): | ||
483 | 126 | """ | ||
484 | 127 | Set the relation data for each provider in the ``provided_data`` list. | ||
485 | 128 | |||
486 | 129 | A provider must have a `name` attribute, which indicates which relation | ||
487 | 130 | to set data on, and a `provide_data()` method, which returns a dict of | ||
488 | 131 | data to set. | ||
489 | 132 | """ | ||
490 | 133 | hook_name = hookenv.hook_name() | ||
491 | 134 | for service in self.services.values(): | ||
492 | 135 | for provider in service.get('provided_data', []): | ||
493 | 136 | if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): | ||
494 | 137 | data = provider.provide_data() | ||
495 | 138 | _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data | ||
496 | 139 | if _ready: | ||
497 | 140 | hookenv.relation_set(None, data) | ||
498 | 141 | |||
499 | 142 | def reconfigure_services(self, *service_names): | ||
500 | 143 | """ | ||
501 | 144 | Update all files for one or more registered services, and, | ||
502 | 145 | if ready, optionally restart them. | ||
503 | 146 | |||
504 | 147 | If no service names are given, reconfigures all registered services. | ||
505 | 148 | """ | ||
506 | 149 | for service_name in service_names or self.services.keys(): | ||
507 | 150 | if self.is_ready(service_name): | ||
508 | 151 | self.fire_event('data_ready', service_name) | ||
509 | 152 | self.fire_event('start', service_name, default=[ | ||
510 | 153 | service_restart, | ||
511 | 154 | manage_ports]) | ||
512 | 155 | self.save_ready(service_name) | ||
513 | 156 | else: | ||
514 | 157 | if self.was_ready(service_name): | ||
515 | 158 | self.fire_event('data_lost', service_name) | ||
516 | 159 | self.fire_event('stop', service_name, default=[ | ||
517 | 160 | manage_ports, | ||
518 | 161 | service_stop]) | ||
519 | 162 | self.save_lost(service_name) | ||
520 | 163 | |||
521 | 164 | def stop_services(self, *service_names): | ||
522 | 165 | """ | ||
523 | 166 | Stop one or more registered services, by name. | ||
524 | 167 | |||
525 | 168 | If no service names are given, stops all registered services. | ||
526 | 169 | """ | ||
527 | 170 | for service_name in service_names or self.services.keys(): | ||
528 | 171 | self.fire_event('stop', service_name, default=[ | ||
529 | 172 | manage_ports, | ||
530 | 173 | service_stop]) | ||
531 | 174 | |||
532 | 175 | def get_service(self, service_name): | ||
533 | 176 | """ | ||
534 | 177 | Given the name of a registered service, return its service definition. | ||
535 | 178 | """ | ||
536 | 179 | service = self.services.get(service_name) | ||
537 | 180 | if not service: | ||
538 | 181 | raise KeyError('Service not registered: %s' % service_name) | ||
539 | 182 | return service | ||
540 | 183 | |||
541 | 184 | def fire_event(self, event_name, service_name, default=None): | ||
542 | 185 | """ | ||
543 | 186 | Fire a data_ready, data_lost, start, or stop event on a given service. | ||
544 | 187 | """ | ||
545 | 188 | service = self.get_service(service_name) | ||
546 | 189 | callbacks = service.get(event_name, default) | ||
547 | 190 | if not callbacks: | ||
548 | 191 | return | ||
549 | 192 | if not isinstance(callbacks, Iterable): | ||
550 | 193 | callbacks = [callbacks] | ||
551 | 194 | for callback in callbacks: | ||
552 | 195 | if isinstance(callback, ManagerCallback): | ||
553 | 196 | callback(self, service_name, event_name) | ||
554 | 197 | else: | ||
555 | 198 | callback(service_name) | ||
556 | 199 | |||
557 | 200 | def is_ready(self, service_name): | ||
558 | 201 | """ | ||
559 | 202 | Determine if a registered service is ready, by checking its 'required_data'. | ||
560 | 203 | |||
561 | 204 | A 'required_data' item can be any mapping type, and is considered ready | ||
562 | 205 | if `bool(item)` evaluates as True. | ||
563 | 206 | """ | ||
564 | 207 | service = self.get_service(service_name) | ||
565 | 208 | reqs = service.get('required_data', []) | ||
566 | 209 | return all(bool(req) for req in reqs) | ||
567 | 210 | |||
568 | 211 | def _load_ready_file(self): | ||
569 | 212 | if self._ready is not None: | ||
570 | 213 | return | ||
571 | 214 | if os.path.exists(self._ready_file): | ||
572 | 215 | with open(self._ready_file) as fp: | ||
573 | 216 | self._ready = set(json.load(fp)) | ||
574 | 217 | else: | ||
575 | 218 | self._ready = set() | ||
576 | 219 | |||
577 | 220 | def _save_ready_file(self): | ||
578 | 221 | if self._ready is None: | ||
579 | 222 | return | ||
580 | 223 | with open(self._ready_file, 'w') as fp: | ||
581 | 224 | json.dump(list(self._ready), fp) | ||
582 | 225 | |||
583 | 226 | def save_ready(self, service_name): | ||
584 | 227 | """ | ||
585 | 228 | Save an indicator that the given service is now data_ready. | ||
586 | 229 | """ | ||
587 | 230 | self._load_ready_file() | ||
588 | 231 | self._ready.add(service_name) | ||
589 | 232 | self._save_ready_file() | ||
590 | 233 | |||
591 | 234 | def save_lost(self, service_name): | ||
592 | 235 | """ | ||
593 | 236 | Save an indicator that the given service is no longer data_ready. | ||
594 | 237 | """ | ||
595 | 238 | self._load_ready_file() | ||
596 | 239 | self._ready.discard(service_name) | ||
597 | 240 | self._save_ready_file() | ||
598 | 241 | |||
599 | 242 | def was_ready(self, service_name): | ||
600 | 243 | """ | ||
601 | 244 | Determine if the given service was previously data_ready. | ||
602 | 245 | """ | ||
603 | 246 | self._load_ready_file() | ||
604 | 247 | return service_name in self._ready | ||
605 | 248 | |||
606 | 249 | |||
607 | 250 | class ManagerCallback(object): | ||
608 | 251 | """ | ||
609 | 252 | Special case of a callback that takes the `ServiceManager` instance | ||
610 | 253 | in addition to the service name. | ||
611 | 254 | |||
612 | 255 | Subclasses should implement `__call__` which should accept three parameters: | ||
613 | 256 | |||
614 | 257 | * `manager` The `ServiceManager` instance | ||
615 | 258 | * `service_name` The name of the service it's being triggered for | ||
616 | 259 | * `event_name` The name of the event that this callback is handling | ||
617 | 260 | """ | ||
618 | 261 | def __call__(self, manager, service_name, event_name): | ||
619 | 262 | raise NotImplementedError() | ||
620 | 263 | |||
621 | 264 | |||
622 | 265 | class PortManagerCallback(ManagerCallback): | ||
623 | 266 | """ | ||
624 | 267 | Callback class that will open or close ports, for use as either | ||
625 | 268 | a start or stop action. | ||
626 | 269 | """ | ||
627 | 270 | def __call__(self, manager, service_name, event_name): | ||
628 | 271 | service = manager.get_service(service_name) | ||
629 | 272 | new_ports = service.get('ports', []) | ||
630 | 273 | port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) | ||
631 | 274 | if os.path.exists(port_file): | ||
632 | 275 | with open(port_file) as fp: | ||
633 | 276 | old_ports = fp.read().split(',') | ||
634 | 277 | for old_port in old_ports: | ||
635 | 278 | if bool(old_port): | ||
636 | 279 | old_port = int(old_port) | ||
637 | 280 | if old_port not in new_ports: | ||
638 | 281 | hookenv.close_port(old_port) | ||
639 | 282 | with open(port_file, 'w') as fp: | ||
640 | 283 | fp.write(','.join(str(port) for port in new_ports)) | ||
641 | 284 | for port in new_ports: | ||
642 | 285 | if event_name == 'start': | ||
643 | 286 | hookenv.open_port(port) | ||
644 | 287 | elif event_name == 'stop': | ||
645 | 288 | hookenv.close_port(port) | ||
646 | 289 | |||
647 | 290 | |||
648 | 291 | def service_stop(service_name): | ||
649 | 292 | """ | ||
650 | 293 | Wrapper around host.service_stop to prevent spurious "unknown service" | ||
651 | 294 | messages in the logs. | ||
652 | 295 | """ | ||
653 | 296 | if host.service_running(service_name): | ||
654 | 297 | host.service_stop(service_name) | ||
655 | 298 | |||
656 | 299 | |||
657 | 300 | def service_restart(service_name): | ||
658 | 301 | """ | ||
659 | 302 | Wrapper around host.service_restart to prevent spurious "unknown service" | ||
660 | 303 | messages in the logs. | ||
661 | 304 | """ | ||
662 | 305 | if host.service_available(service_name): | ||
663 | 306 | if host.service_running(service_name): | ||
664 | 307 | host.service_restart(service_name) | ||
665 | 308 | else: | ||
666 | 309 | host.service_start(service_name) | ||
667 | 310 | |||
668 | 311 | |||
669 | 312 | # Convenience aliases | ||
670 | 313 | open_ports = close_ports = manage_ports = PortManagerCallback() | ||
671 | 0 | 314 | ||
672 | === added file 'hooks/charmhelpers/core/services/helpers.py' | |||
673 | --- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000 | |||
674 | +++ hooks/charmhelpers/core/services/helpers.py 2014-09-21 19:34:01 +0000 | |||
675 | @@ -0,0 +1,125 @@ | |||
676 | 1 | from charmhelpers.core import hookenv | ||
677 | 2 | from charmhelpers.core import templating | ||
678 | 3 | |||
679 | 4 | from charmhelpers.core.services.base import ManagerCallback | ||
680 | 5 | |||
681 | 6 | |||
682 | 7 | __all__ = ['RelationContext', 'TemplateCallback', | ||
683 | 8 | 'render_template', 'template'] | ||
684 | 9 | |||
685 | 10 | |||
686 | 11 | class RelationContext(dict): | ||
687 | 12 | """ | ||
688 | 13 | Base class for a context generator that gets relation data from juju. | ||
689 | 14 | |||
690 | 15 | Subclasses must provide the attributes `name`, which is the name of the | ||
691 | 16 | interface of interest, `interface`, which is the type of the interface of | ||
692 | 17 | interest, and `required_keys`, which is the set of keys required for the | ||
693 | 18 | relation to be considered complete. The data for all interfaces matching | ||
694 | 19 | the `name` attribute that are complete will used to populate the dictionary | ||
695 | 20 | values (see `get_data`, below). | ||
696 | 21 | |||
697 | 22 | The generated context will be namespaced under the interface type, to prevent | ||
698 | 23 | potential naming conflicts. | ||
699 | 24 | """ | ||
700 | 25 | name = None | ||
701 | 26 | interface = None | ||
702 | 27 | required_keys = [] | ||
703 | 28 | |||
704 | 29 | def __init__(self, *args, **kwargs): | ||
705 | 30 | super(RelationContext, self).__init__(*args, **kwargs) | ||
706 | 31 | self.get_data() | ||
707 | 32 | |||
708 | 33 | def __bool__(self): | ||
709 | 34 | """ | ||
710 | 35 | Returns True if all of the required_keys are available. | ||
711 | 36 | """ | ||
712 | 37 | return self.is_ready() | ||
713 | 38 | |||
714 | 39 | __nonzero__ = __bool__ | ||
715 | 40 | |||
716 | 41 | def __repr__(self): | ||
717 | 42 | return super(RelationContext, self).__repr__() | ||
718 | 43 | |||
719 | 44 | def is_ready(self): | ||
720 | 45 | """ | ||
721 | 46 | Returns True if all of the `required_keys` are available from any units. | ||
722 | 47 | """ | ||
723 | 48 | ready = len(self.get(self.name, [])) > 0 | ||
724 | 49 | if not ready: | ||
725 | 50 | hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) | ||
726 | 51 | return ready | ||
727 | 52 | |||
728 | 53 | def _is_ready(self, unit_data): | ||
729 | 54 | """ | ||
730 | 55 | Helper method that tests a set of relation data and returns True if | ||
731 | 56 | all of the `required_keys` are present. | ||
732 | 57 | """ | ||
733 | 58 | return set(unit_data.keys()).issuperset(set(self.required_keys)) | ||
734 | 59 | |||
735 | 60 | def get_data(self): | ||
736 | 61 | """ | ||
737 | 62 | Retrieve the relation data for each unit involved in a relation and, | ||
738 | 63 | if complete, store it in a list under `self[self.name]`. This | ||
739 | 64 | is automatically called when the RelationContext is instantiated. | ||
740 | 65 | |||
741 | 66 | The units are sorted lexographically first by the service ID, then by | ||
742 | 67 | the unit ID. Thus, if an interface has two other services, 'db:1' | ||
743 | 68 | and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', | ||
744 | 69 | and 'db:2' having one unit, 'mediawiki/0', all of which have a complete | ||
745 | 70 | set of data, the relation data for the units will be stored in the | ||
746 | 71 | order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. | ||
747 | 72 | |||
748 | 73 | If you only care about a single unit on the relation, you can just | ||
749 | 74 | access it as `{{ interface[0]['key'] }}`. However, if you can at all | ||
750 | 75 | support multiple units on a relation, you should iterate over the list, | ||
751 | 76 | like:: | ||
752 | 77 | |||
753 | 78 | {% for unit in interface -%} | ||
754 | 79 | {{ unit['key'] }}{% if not loop.last %},{% endif %} | ||
755 | 80 | {%- endfor %} | ||
756 | 81 | |||
757 | 82 | Note that since all sets of relation data from all related services and | ||
758 | 83 | units are in a single list, if you need to know which service or unit a | ||
759 | 84 | set of data came from, you'll need to extend this class to preserve | ||
760 | 85 | that information. | ||
761 | 86 | """ | ||
762 | 87 | if not hookenv.relation_ids(self.name): | ||
763 | 88 | return | ||
764 | 89 | |||
765 | 90 | ns = self.setdefault(self.name, []) | ||
766 | 91 | for rid in sorted(hookenv.relation_ids(self.name)): | ||
767 | 92 | for unit in sorted(hookenv.related_units(rid)): | ||
768 | 93 | reldata = hookenv.relation_get(rid=rid, unit=unit) | ||
769 | 94 | if self._is_ready(reldata): | ||
770 | 95 | ns.append(reldata) | ||
771 | 96 | |||
772 | 97 | def provide_data(self): | ||
773 | 98 | """ | ||
774 | 99 | Return data to be relation_set for this interface. | ||
775 | 100 | """ | ||
776 | 101 | return {} | ||
777 | 102 | |||
778 | 103 | |||
779 | 104 | class TemplateCallback(ManagerCallback): | ||
780 | 105 | """ | ||
781 | 106 | Callback class that will render a template, for use as a ready action. | ||
782 | 107 | """ | ||
783 | 108 | def __init__(self, source, target, owner='root', group='root', perms=0444): | ||
784 | 109 | self.source = source | ||
785 | 110 | self.target = target | ||
786 | 111 | self.owner = owner | ||
787 | 112 | self.group = group | ||
788 | 113 | self.perms = perms | ||
789 | 114 | |||
790 | 115 | def __call__(self, manager, service_name, event_name): | ||
791 | 116 | service = manager.get_service(service_name) | ||
792 | 117 | context = {} | ||
793 | 118 | for ctx in service.get('required_data', []): | ||
794 | 119 | context.update(ctx) | ||
795 | 120 | templating.render(self.source, self.target, context, | ||
796 | 121 | self.owner, self.group, self.perms) | ||
797 | 122 | |||
798 | 123 | |||
799 | 124 | # Convenience aliases for templates | ||
800 | 125 | render_template = template = TemplateCallback | ||
801 | 0 | 126 | ||
802 | === added file 'hooks/charmhelpers/core/templating.py' | |||
803 | --- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000 | |||
804 | +++ hooks/charmhelpers/core/templating.py 2014-09-21 19:34:01 +0000 | |||
805 | @@ -0,0 +1,51 @@ | |||
806 | 1 | import os | ||
807 | 2 | |||
808 | 3 | from charmhelpers.core import host | ||
809 | 4 | from charmhelpers.core import hookenv | ||
810 | 5 | |||
811 | 6 | |||
812 | 7 | def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): | ||
813 | 8 | """ | ||
814 | 9 | Render a template. | ||
815 | 10 | |||
816 | 11 | The `source` path, if not absolute, is relative to the `templates_dir`. | ||
817 | 12 | |||
818 | 13 | The `target` path should be absolute. | ||
819 | 14 | |||
820 | 15 | The context should be a dict containing the values to be replaced in the | ||
821 | 16 | template. | ||
822 | 17 | |||
823 | 18 | The `owner`, `group`, and `perms` options will be passed to `write_file`. | ||
824 | 19 | |||
825 | 20 | If omitted, `templates_dir` defaults to the `templates` folder in the charm. | ||
826 | 21 | |||
827 | 22 | Note: Using this requires python-jinja2; if it is not installed, calling | ||
828 | 23 | this will attempt to use charmhelpers.fetch.apt_install to install it. | ||
829 | 24 | """ | ||
830 | 25 | try: | ||
831 | 26 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
832 | 27 | except ImportError: | ||
833 | 28 | try: | ||
834 | 29 | from charmhelpers.fetch import apt_install | ||
835 | 30 | except ImportError: | ||
836 | 31 | hookenv.log('Could not import jinja2, and could not import ' | ||
837 | 32 | 'charmhelpers.fetch to install it', | ||
838 | 33 | level=hookenv.ERROR) | ||
839 | 34 | raise | ||
840 | 35 | apt_install('python-jinja2', fatal=True) | ||
841 | 36 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
842 | 37 | |||
843 | 38 | if templates_dir is None: | ||
844 | 39 | templates_dir = os.path.join(hookenv.charm_dir(), 'templates') | ||
845 | 40 | loader = Environment(loader=FileSystemLoader(templates_dir)) | ||
846 | 41 | try: | ||
847 | 42 | source = source | ||
848 | 43 | template = loader.get_template(source) | ||
849 | 44 | except exceptions.TemplateNotFound as e: | ||
850 | 45 | hookenv.log('Could not load template %s from %s.' % | ||
851 | 46 | (source, templates_dir), | ||
852 | 47 | level=hookenv.ERROR) | ||
853 | 48 | raise e | ||
854 | 49 | content = template.render(context) | ||
855 | 50 | host.mkdir(os.path.dirname(target)) | ||
856 | 51 | host.write_file(target, content, owner, group, perms) | ||
857 | 0 | 52 | ||
858 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
859 | --- hooks/charmhelpers/fetch/__init__.py 2014-07-25 08:07:41 +0000 | |||
860 | +++ hooks/charmhelpers/fetch/__init__.py 2014-09-21 19:34:01 +0000 | |||
861 | @@ -1,4 +1,5 @@ | |||
862 | 1 | import importlib | 1 | import importlib |
863 | 2 | from tempfile import NamedTemporaryFile | ||
864 | 2 | import time | 3 | import time |
865 | 3 | from yaml import safe_load | 4 | from yaml import safe_load |
866 | 4 | from charmhelpers.core.host import ( | 5 | from charmhelpers.core.host import ( |
867 | @@ -116,14 +117,7 @@ | |||
868 | 116 | 117 | ||
869 | 117 | def filter_installed_packages(packages): | 118 | def filter_installed_packages(packages): |
870 | 118 | """Returns a list of packages that require installation""" | 119 | """Returns a list of packages that require installation""" |
879 | 119 | import apt_pkg | 120 | cache = apt_cache() |
872 | 120 | apt_pkg.init() | ||
873 | 121 | |||
874 | 122 | # Tell apt to build an in-memory cache to prevent race conditions (if | ||
875 | 123 | # another process is already building the cache). | ||
876 | 124 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | ||
877 | 125 | |||
878 | 126 | cache = apt_pkg.Cache() | ||
880 | 127 | _pkgs = [] | 121 | _pkgs = [] |
881 | 128 | for package in packages: | 122 | for package in packages: |
882 | 129 | try: | 123 | try: |
883 | @@ -136,6 +130,16 @@ | |||
884 | 136 | return _pkgs | 130 | return _pkgs |
885 | 137 | 131 | ||
886 | 138 | 132 | ||
887 | 133 | def apt_cache(in_memory=True): | ||
888 | 134 | """Build and return an apt cache""" | ||
889 | 135 | import apt_pkg | ||
890 | 136 | apt_pkg.init() | ||
891 | 137 | if in_memory: | ||
892 | 138 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | ||
893 | 139 | apt_pkg.config.set("Dir::Cache::srcpkgcache", "") | ||
894 | 140 | return apt_pkg.Cache() | ||
895 | 141 | |||
896 | 142 | |||
897 | 139 | def apt_install(packages, options=None, fatal=False): | 143 | def apt_install(packages, options=None, fatal=False): |
898 | 140 | """Install one or more packages""" | 144 | """Install one or more packages""" |
899 | 141 | if options is None: | 145 | if options is None: |
900 | @@ -201,6 +205,27 @@ | |||
901 | 201 | 205 | ||
902 | 202 | 206 | ||
903 | 203 | def add_source(source, key=None): | 207 | def add_source(source, key=None): |
904 | 208 | """Add a package source to this system. | ||
905 | 209 | |||
906 | 210 | @param source: a URL or sources.list entry, as supported by | ||
907 | 211 | add-apt-repository(1). Examples: | ||
908 | 212 | ppa:charmers/example | ||
909 | 213 | deb https://stub:key@private.example.com/ubuntu trusty main | ||
910 | 214 | |||
911 | 215 | In addition: | ||
912 | 216 | 'proposed:' may be used to enable the standard 'proposed' | ||
913 | 217 | pocket for the release. | ||
914 | 218 | 'cloud:' may be used to activate official cloud archive pockets, | ||
915 | 219 | such as 'cloud:icehouse' | ||
916 | 220 | |||
917 | 221 | @param key: A key to be added to the system's APT keyring and used | ||
918 | 222 | to verify the signatures on packages. Ideally, this should be an | ||
919 | 223 | ASCII format GPG public key including the block headers. A GPG key | ||
920 | 224 | id may also be used, but be aware that only insecure protocols are | ||
921 | 225 | available to retrieve the actual public key from a public keyserver | ||
922 | 226 | placing your Juju environment at risk. ppa and cloud archive keys | ||
923 | 227 | are securely added automtically, so sould not be provided. | ||
924 | 228 | """ | ||
925 | 204 | if source is None: | 229 | if source is None: |
926 | 205 | log('Source is not present. Skipping') | 230 | log('Source is not present. Skipping') |
927 | 206 | return | 231 | return |
928 | @@ -225,10 +250,23 @@ | |||
929 | 225 | release = lsb_release()['DISTRIB_CODENAME'] | 250 | release = lsb_release()['DISTRIB_CODENAME'] |
930 | 226 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: | 251 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: |
931 | 227 | apt.write(PROPOSED_POCKET.format(release)) | 252 | apt.write(PROPOSED_POCKET.format(release)) |
932 | 253 | else: | ||
933 | 254 | raise SourceConfigError("Unknown source: {!r}".format(source)) | ||
934 | 255 | |||
935 | 228 | if key: | 256 | if key: |
939 | 229 | subprocess.check_call(['apt-key', 'adv', '--keyserver', | 257 | if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: |
940 | 230 | 'hkp://keyserver.ubuntu.com:80', '--recv', | 258 | with NamedTemporaryFile() as key_file: |
941 | 231 | key]) | 259 | key_file.write(key) |
942 | 260 | key_file.flush() | ||
943 | 261 | key_file.seek(0) | ||
944 | 262 | subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) | ||
945 | 263 | else: | ||
946 | 264 | # Note that hkp: is in no way a secure protocol. Using a | ||
947 | 265 | # GPG key id is pointless from a security POV unless you | ||
948 | 266 | # absolutely trust your network and DNS. | ||
949 | 267 | subprocess.check_call(['apt-key', 'adv', '--keyserver', | ||
950 | 268 | 'hkp://keyserver.ubuntu.com:80', '--recv', | ||
951 | 269 | key]) | ||
952 | 232 | 270 | ||
953 | 233 | 271 | ||
954 | 234 | def configure_sources(update=False, | 272 | def configure_sources(update=False, |
955 | @@ -238,7 +276,8 @@ | |||
956 | 238 | Configure multiple sources from charm configuration. | 276 | Configure multiple sources from charm configuration. |
957 | 239 | 277 | ||
958 | 240 | The lists are encoded as yaml fragments in the configuration. | 278 | The lists are encoded as yaml fragments in the configuration. |
960 | 241 | The frament needs to be included as a string. | 279 | The frament needs to be included as a string. Sources and their |
961 | 280 | corresponding keys are of the types supported by add_source(). | ||
962 | 242 | 281 | ||
963 | 243 | Example config: | 282 | Example config: |
964 | 244 | install_sources: | | 283 | install_sources: | |
965 | 245 | 284 | ||
966 | === modified file 'hooks/charmhelpers/fetch/archiveurl.py' | |||
967 | --- hooks/charmhelpers/fetch/archiveurl.py 2014-04-16 08:37:56 +0000 | |||
968 | +++ hooks/charmhelpers/fetch/archiveurl.py 2014-09-21 19:34:01 +0000 | |||
969 | @@ -1,6 +1,8 @@ | |||
970 | 1 | import os | 1 | import os |
971 | 2 | import urllib2 | 2 | import urllib2 |
972 | 3 | from urllib import urlretrieve | ||
973 | 3 | import urlparse | 4 | import urlparse |
974 | 5 | import hashlib | ||
975 | 4 | 6 | ||
976 | 5 | from charmhelpers.fetch import ( | 7 | from charmhelpers.fetch import ( |
977 | 6 | BaseFetchHandler, | 8 | BaseFetchHandler, |
978 | @@ -12,7 +14,17 @@ | |||
979 | 12 | ) | 14 | ) |
980 | 13 | from charmhelpers.core.host import mkdir | 15 | from charmhelpers.core.host import mkdir |
981 | 14 | 16 | ||
983 | 15 | 17 | """ | |
984 | 18 | This class is a plugin for charmhelpers.fetch.install_remote. | ||
985 | 19 | |||
986 | 20 | It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/. | ||
987 | 21 | |||
988 | 22 | Example usage: | ||
989 | 23 | install_remote("https://example.com/some/archive.tar.gz") | ||
990 | 24 | # Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/. | ||
991 | 25 | |||
992 | 26 | See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types. | ||
993 | 27 | """ | ||
994 | 16 | class ArchiveUrlFetchHandler(BaseFetchHandler): | 28 | class ArchiveUrlFetchHandler(BaseFetchHandler): |
995 | 17 | """Handler for archives via generic URLs""" | 29 | """Handler for archives via generic URLs""" |
996 | 18 | def can_handle(self, source): | 30 | def can_handle(self, source): |
997 | @@ -61,3 +73,31 @@ | |||
998 | 61 | except OSError as e: | 73 | except OSError as e: |
999 | 62 | raise UnhandledSource(e.strerror) | 74 | raise UnhandledSource(e.strerror) |
1000 | 63 | return extract(dld_file) | 75 | return extract(dld_file) |
1001 | 76 | |||
1002 | 77 | # Mandatory file validation via Sha1 or MD5 hashing. | ||
1003 | 78 | def download_and_validate(self, url, hashsum, validate="sha1"): | ||
1004 | 79 | if validate == 'sha1' and len(hashsum) != 40: | ||
1005 | 80 | raise ValueError("HashSum must be = 40 characters when using sha1" | ||
1006 | 81 | " validation") | ||
1007 | 82 | if validate == 'md5' and len(hashsum) != 32: | ||
1008 | 83 | raise ValueError("HashSum must be = 32 characters when using md5" | ||
1009 | 84 | " validation") | ||
1010 | 85 | tempfile, headers = urlretrieve(url) | ||
1011 | 86 | self.validate_file(tempfile, hashsum, validate) | ||
1012 | 87 | return tempfile | ||
1013 | 88 | |||
1014 | 89 | # Predicate method that returns status of hash matching expected hash. | ||
1015 | 90 | def validate_file(self, source, hashsum, vmethod='sha1'): | ||
1016 | 91 | if vmethod != 'sha1' and vmethod != 'md5': | ||
1017 | 92 | raise ValueError("Validation Method not supported") | ||
1018 | 93 | |||
1019 | 94 | if vmethod == 'md5': | ||
1020 | 95 | m = hashlib.md5() | ||
1021 | 96 | if vmethod == 'sha1': | ||
1022 | 97 | m = hashlib.sha1() | ||
1023 | 98 | with open(source) as f: | ||
1024 | 99 | for line in f: | ||
1025 | 100 | m.update(line) | ||
1026 | 101 | if hashsum != m.hexdigest(): | ||
1027 | 102 | msg = "Hash Mismatch on {} expected {} got {}" | ||
1028 | 103 | raise ValueError(msg.format(source, hashsum, m.hexdigest())) | ||
1029 | 64 | 104 | ||
1030 | === modified file 'hooks/hooks.py' | |||
1031 | --- hooks/hooks.py 2014-08-15 09:06:49 +0000 | |||
1032 | +++ hooks/hooks.py 2014-09-21 19:34:01 +0000 | |||
1033 | @@ -15,14 +15,17 @@ | |||
1034 | 15 | import ceph | 15 | import ceph |
1035 | 16 | from charmhelpers.core.hookenv import ( | 16 | from charmhelpers.core.hookenv import ( |
1036 | 17 | log, | 17 | log, |
1037 | 18 | WARNING, | ||
1038 | 18 | ERROR, | 19 | ERROR, |
1039 | 19 | config, | 20 | config, |
1040 | 20 | relation_ids, | 21 | relation_ids, |
1041 | 21 | related_units, | 22 | related_units, |
1042 | 22 | relation_get, | 23 | relation_get, |
1043 | 24 | relation_set, | ||
1044 | 23 | Hooks, | 25 | Hooks, |
1045 | 24 | UnregisteredHookError, | 26 | UnregisteredHookError, |
1047 | 25 | service_name | 27 | service_name, |
1048 | 28 | unit_get | ||
1049 | 26 | ) | 29 | ) |
1050 | 27 | from charmhelpers.core.host import ( | 30 | from charmhelpers.core.host import ( |
1051 | 28 | umount, | 31 | umount, |
1052 | @@ -39,10 +42,14 @@ | |||
1053 | 39 | from utils import ( | 42 | from utils import ( |
1054 | 40 | render_template, | 43 | render_template, |
1055 | 41 | get_host_ip, | 44 | get_host_ip, |
1056 | 45 | setup_ipv6 | ||
1057 | 42 | ) | 46 | ) |
1058 | 43 | 47 | ||
1059 | 44 | from charmhelpers.contrib.openstack.alternatives import install_alternative | 48 | from charmhelpers.contrib.openstack.alternatives import install_alternative |
1061 | 45 | from charmhelpers.contrib.network.ip import is_ipv6 | 49 | from charmhelpers.contrib.network.ip import ( |
1062 | 50 | is_ipv6, | ||
1063 | 51 | get_ipv6_addr | ||
1064 | 52 | ) | ||
1065 | 46 | 53 | ||
1066 | 47 | hooks = Hooks() | 54 | hooks = Hooks() |
1067 | 48 | 55 | ||
1068 | @@ -58,6 +65,10 @@ | |||
1069 | 58 | def install(): | 65 | def install(): |
1070 | 59 | add_source(config('source'), config('key')) | 66 | add_source(config('source'), config('key')) |
1071 | 60 | apt_update(fatal=True) | 67 | apt_update(fatal=True) |
1072 | 68 | |||
1073 | 69 | if config('prefer-ipv6'): | ||
1074 | 70 | setup_ipv6() | ||
1075 | 71 | |||
1076 | 61 | apt_install(packages=ceph.PACKAGES, fatal=True) | 72 | apt_install(packages=ceph.PACKAGES, fatal=True) |
1077 | 62 | install_upstart_scripts() | 73 | install_upstart_scripts() |
1078 | 63 | 74 | ||
1079 | @@ -76,6 +87,14 @@ | |||
1080 | 76 | 'ceph_public_network': config('ceph-public-network'), | 87 | 'ceph_public_network': config('ceph-public-network'), |
1081 | 77 | 'ceph_cluster_network': config('ceph-cluster-network'), | 88 | 'ceph_cluster_network': config('ceph-cluster-network'), |
1082 | 78 | } | 89 | } |
1083 | 90 | |||
1084 | 91 | if config('prefer-ipv6'): | ||
1085 | 92 | host_ip = get_ipv6_addr()[0] | ||
1086 | 93 | if host_ip: | ||
1087 | 94 | cephcontext['host_ip'] = host_ip | ||
1088 | 95 | else: | ||
1089 | 96 | log("Unable to obtain host address", level=WARNING) | ||
1090 | 97 | |||
1091 | 79 | # Install ceph.conf as an alternative to support | 98 | # Install ceph.conf as an alternative to support |
1092 | 80 | # co-existence with other charms that write this file | 99 | # co-existence with other charms that write this file |
1093 | 81 | charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) | 100 | charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) |
1094 | @@ -95,6 +114,9 @@ | |||
1095 | 95 | log('Invalid OSD disk format configuration specified', level=ERROR) | 114 | log('Invalid OSD disk format configuration specified', level=ERROR) |
1096 | 96 | sys.exit(1) | 115 | sys.exit(1) |
1097 | 97 | 116 | ||
1098 | 117 | if config('prefer-ipv6'): | ||
1099 | 118 | setup_ipv6() | ||
1100 | 119 | |||
1101 | 98 | e_mountpoint = config('ephemeral-unmount') | 120 | e_mountpoint = config('ephemeral-unmount') |
1102 | 99 | if (e_mountpoint and ceph.filesystem_mounted(e_mountpoint)): | 121 | if (e_mountpoint and ceph.filesystem_mounted(e_mountpoint)): |
1103 | 100 | umount(e_mountpoint) | 122 | umount(e_mountpoint) |
1104 | @@ -120,8 +142,13 @@ | |||
1105 | 120 | hosts = [] | 142 | hosts = [] |
1106 | 121 | for relid in relation_ids('mon'): | 143 | for relid in relation_ids('mon'): |
1107 | 122 | for unit in related_units(relid): | 144 | for unit in related_units(relid): |
1110 | 123 | addr = relation_get('ceph-public-address', unit, relid) or \ | 145 | addr = relation_get('ceph-public-address', unit, relid) |
1111 | 124 | get_host_ip(relation_get('private-address', unit, relid)) | 146 | if not addr: |
1112 | 147 | if config('prefer-ipv6'): | ||
1113 | 148 | addr = relation_get('private-address', unit, relid) | ||
1114 | 149 | else: | ||
1115 | 150 | get_host_ip(relation_get('private-address', unit, relid)) | ||
1116 | 151 | |||
1117 | 125 | if addr is not None: | 152 | if addr is not None: |
1118 | 126 | if is_ipv6(addr): | 153 | if is_ipv6(addr): |
1119 | 127 | hosts.append('[{}]:6789'.format(addr)) | 154 | hosts.append('[{}]:6789'.format(addr)) |
1120 | @@ -166,6 +193,17 @@ | |||
1121 | 166 | @hooks.hook('mon-relation-changed', | 193 | @hooks.hook('mon-relation-changed', |
1122 | 167 | 'mon-relation-departed') | 194 | 'mon-relation-departed') |
1123 | 168 | def mon_relation(): | 195 | def mon_relation(): |
1124 | 196 | if config('prefer-ipv6'): | ||
1125 | 197 | host = get_ipv6_addr()[0] | ||
1126 | 198 | else: | ||
1127 | 199 | host = unit_get('private-address') | ||
1128 | 200 | |||
1129 | 201 | if host: | ||
1130 | 202 | relation_data = {'private-address': host} | ||
1131 | 203 | relation_set(**relation_data) | ||
1132 | 204 | else: | ||
1133 | 205 | log("Unable to obtain host address", level=WARNING) | ||
1134 | 206 | |||
1135 | 169 | bootstrap_key = relation_get('osd_bootstrap_key') | 207 | bootstrap_key = relation_get('osd_bootstrap_key') |
1136 | 170 | if get_fsid() and get_auth() and bootstrap_key: | 208 | if get_fsid() and get_auth() and bootstrap_key: |
1137 | 171 | log('mon has provided conf- scanning disks') | 209 | log('mon has provided conf- scanning disks') |
1138 | 172 | 210 | ||
1139 | === modified file 'hooks/utils.py' | |||
1140 | --- hooks/utils.py 2013-10-10 10:49:36 +0000 | |||
1141 | +++ hooks/utils.py 2014-09-21 19:34:01 +0000 | |||
1142 | @@ -18,6 +18,10 @@ | |||
1143 | 18 | filter_installed_packages | 18 | filter_installed_packages |
1144 | 19 | ) | 19 | ) |
1145 | 20 | 20 | ||
1146 | 21 | from charmhelpers.core.host import ( | ||
1147 | 22 | lsb_release | ||
1148 | 23 | ) | ||
1149 | 24 | |||
1150 | 21 | TEMPLATES_DIR = 'templates' | 25 | TEMPLATES_DIR = 'templates' |
1151 | 22 | 26 | ||
1152 | 23 | try: | 27 | try: |
1153 | @@ -72,3 +76,10 @@ | |||
1154 | 72 | answers = dns.resolver.query(hostname, 'A') | 76 | answers = dns.resolver.query(hostname, 'A') |
1155 | 73 | if answers: | 77 | if answers: |
1156 | 74 | return answers[0].address | 78 | return answers[0].address |
1157 | 79 | |||
1158 | 80 | |||
1159 | 81 | def setup_ipv6(): | ||
1160 | 82 | ubuntu_rel = float(lsb_release()['DISTRIB_RELEASE']) | ||
1161 | 83 | if ubuntu_rel < 14.04: | ||
1162 | 84 | raise Exception("IPv6 is not supported for Ubuntu " | ||
1163 | 85 | "versions less than Trusty 14.04") | ||
1164 | 75 | 86 | ||
1165 | === modified file 'templates/ceph.conf' | |||
1166 | --- templates/ceph.conf 2014-07-25 08:07:41 +0000 | |||
1167 | +++ templates/ceph.conf 2014-09-21 19:34:01 +0000 | |||
1168 | @@ -32,3 +32,6 @@ | |||
1169 | 32 | osd journal size = {{ osd_journal_size }} | 32 | osd journal size = {{ osd_journal_size }} |
1170 | 33 | filestore xattr use omap = true | 33 | filestore xattr use omap = true |
1171 | 34 | 34 | ||
1172 | 35 | host = {{ hostname }} | ||
1173 | 36 | public addr = {{ host_ip }} | ||
1174 | 37 | cluster addr = {{ host_ip }} | ||
1175 | 35 | \ No newline at end of file | 38 | \ No newline at end of file |