Merge lp:~stub/charms/precise/pgbouncer/charmhelpers into lp:charms/pgbouncer

Proposed by Stuart Bishop
Status: Merged
Merged at revision: 61
Proposed branch: lp:~stub/charms/precise/pgbouncer/charmhelpers
Merge into: lp:charms/pgbouncer
Diff against target: 2035 lines (+1975/-0)
11 files modified
charm-helpers.yaml (+5/-0)
hooks/charmhelpers/core/fstab.py (+116/-0)
hooks/charmhelpers/core/hookenv.py (+500/-0)
hooks/charmhelpers/core/host.py (+364/-0)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+305/-0)
hooks/charmhelpers/core/services/helpers.py (+125/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+394/-0)
hooks/charmhelpers/fetch/archiveurl.py (+63/-0)
hooks/charmhelpers/fetch/bzrurl.py (+50/-0)
To merge this branch: bzr merge lp:~stub/charms/precise/pgbouncer/charmhelpers
Reviewer Review Type Date Requested Status
Marco Ceppi (community) Approve
Stuart Bishop (community) Approve
Review via email: mp+226637@code.launchpad.net

Description of the change

Import charm-helpers code for Python port.

To post a comment you must log in.
Revision history for this message
Stuart Bishop (stub) wrote :

charm-helpers sync, noop review

review: Approve
62. By Stuart Bishop

Update charm-helpers

Revision history for this message
Marco Ceppi (marcoceppi) wrote :

LGTM

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file 'charm-helpers.yaml'
2--- charm-helpers.yaml 1970-01-01 00:00:00 +0000
3+++ charm-helpers.yaml 2014-08-21 11:32:42 +0000
4@@ -0,0 +1,5 @@
5+destination: hooks/charmhelpers
6+branch: lp:charm-helpers
7+include:
8+ - core
9+ - fetch
10
11=== added directory 'hooks/charmhelpers'
12=== added file 'hooks/charmhelpers/__init__.py'
13=== added directory 'hooks/charmhelpers/core'
14=== added file 'hooks/charmhelpers/core/__init__.py'
15=== added file 'hooks/charmhelpers/core/fstab.py'
16--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
17+++ hooks/charmhelpers/core/fstab.py 2014-08-21 11:32:42 +0000
18@@ -0,0 +1,116 @@
19+#!/usr/bin/env python
20+# -*- coding: utf-8 -*-
21+
22+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
23+
24+import os
25+
26+
27+class Fstab(file):
28+ """This class extends file in order to implement a file reader/writer
29+ for file `/etc/fstab`
30+ """
31+
32+ class Entry(object):
33+ """Entry class represents a non-comment line on the `/etc/fstab` file
34+ """
35+ def __init__(self, device, mountpoint, filesystem,
36+ options, d=0, p=0):
37+ self.device = device
38+ self.mountpoint = mountpoint
39+ self.filesystem = filesystem
40+
41+ if not options:
42+ options = "defaults"
43+
44+ self.options = options
45+ self.d = d
46+ self.p = p
47+
48+ def __eq__(self, o):
49+ return str(self) == str(o)
50+
51+ def __str__(self):
52+ return "{} {} {} {} {} {}".format(self.device,
53+ self.mountpoint,
54+ self.filesystem,
55+ self.options,
56+ self.d,
57+ self.p)
58+
59+ DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
60+
61+ def __init__(self, path=None):
62+ if path:
63+ self._path = path
64+ else:
65+ self._path = self.DEFAULT_PATH
66+ file.__init__(self, self._path, 'r+')
67+
68+ def _hydrate_entry(self, line):
69+ # NOTE: use split with no arguments to split on any
70+ # whitespace including tabs
71+ return Fstab.Entry(*filter(
72+ lambda x: x not in ('', None),
73+ line.strip("\n").split()))
74+
75+ @property
76+ def entries(self):
77+ self.seek(0)
78+ for line in self.readlines():
79+ try:
80+ if not line.startswith("#"):
81+ yield self._hydrate_entry(line)
82+ except ValueError:
83+ pass
84+
85+ def get_entry_by_attr(self, attr, value):
86+ for entry in self.entries:
87+ e_attr = getattr(entry, attr)
88+ if e_attr == value:
89+ return entry
90+ return None
91+
92+ def add_entry(self, entry):
93+ if self.get_entry_by_attr('device', entry.device):
94+ return False
95+
96+ self.write(str(entry) + '\n')
97+ self.truncate()
98+ return entry
99+
100+ def remove_entry(self, entry):
101+ self.seek(0)
102+
103+ lines = self.readlines()
104+
105+ found = False
106+ for index, line in enumerate(lines):
107+ if not line.startswith("#"):
108+ if self._hydrate_entry(line) == entry:
109+ found = True
110+ break
111+
112+ if not found:
113+ return False
114+
115+ lines.remove(line)
116+
117+ self.seek(0)
118+ self.write(''.join(lines))
119+ self.truncate()
120+ return True
121+
122+ @classmethod
123+ def remove_by_mountpoint(cls, mountpoint, path=None):
124+ fstab = cls(path=path)
125+ entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
126+ if entry:
127+ return fstab.remove_entry(entry)
128+ return False
129+
130+ @classmethod
131+ def add(cls, device, mountpoint, filesystem, options=None, path=None):
132+ return cls(path=path).add_entry(Fstab.Entry(device,
133+ mountpoint, filesystem,
134+ options=options))
135
136=== added file 'hooks/charmhelpers/core/hookenv.py'
137--- hooks/charmhelpers/core/hookenv.py 1970-01-01 00:00:00 +0000
138+++ hooks/charmhelpers/core/hookenv.py 2014-08-21 11:32:42 +0000
139@@ -0,0 +1,500 @@
140+"Interactions with the Juju environment"
141+# Copyright 2013 Canonical Ltd.
142+#
143+# Authors:
144+# Charm Helpers Developers <juju@lists.ubuntu.com>
145+
146+import os
147+import json
148+import yaml
149+import subprocess
150+import sys
151+import UserDict
152+from subprocess import CalledProcessError
153+
154+CRITICAL = "CRITICAL"
155+ERROR = "ERROR"
156+WARNING = "WARNING"
157+INFO = "INFO"
158+DEBUG = "DEBUG"
159+MARKER = object()
160+
161+cache = {}
162+
163+
164+def cached(func):
165+ """Cache return values for multiple executions of func + args
166+
167+ For example::
168+
169+ @cached
170+ def unit_get(attribute):
171+ pass
172+
173+ unit_get('test')
174+
175+ will cache the result of unit_get + 'test' for future calls.
176+ """
177+ def wrapper(*args, **kwargs):
178+ global cache
179+ key = str((func, args, kwargs))
180+ try:
181+ return cache[key]
182+ except KeyError:
183+ res = func(*args, **kwargs)
184+ cache[key] = res
185+ return res
186+ return wrapper
187+
188+
189+def flush(key):
190+ """Flushes any entries from function cache where the
191+ key is found in the function+args """
192+ flush_list = []
193+ for item in cache:
194+ if key in item:
195+ flush_list.append(item)
196+ for item in flush_list:
197+ del cache[item]
198+
199+
200+def log(message, level=None):
201+ """Write a message to the juju log"""
202+ command = ['juju-log']
203+ if level:
204+ command += ['-l', level]
205+ command += [message]
206+ subprocess.call(command)
207+
208+
209+class Serializable(UserDict.IterableUserDict):
210+ """Wrapper, an object that can be serialized to yaml or json"""
211+
212+ def __init__(self, obj):
213+ # wrap the object
214+ UserDict.IterableUserDict.__init__(self)
215+ self.data = obj
216+
217+ def __getattr__(self, attr):
218+ # See if this object has attribute.
219+ if attr in ("json", "yaml", "data"):
220+ return self.__dict__[attr]
221+ # Check for attribute in wrapped object.
222+ got = getattr(self.data, attr, MARKER)
223+ if got is not MARKER:
224+ return got
225+ # Proxy to the wrapped object via dict interface.
226+ try:
227+ return self.data[attr]
228+ except KeyError:
229+ raise AttributeError(attr)
230+
231+ def __getstate__(self):
232+ # Pickle as a standard dictionary.
233+ return self.data
234+
235+ def __setstate__(self, state):
236+ # Unpickle into our wrapper.
237+ self.data = state
238+
239+ def json(self):
240+ """Serialize the object to json"""
241+ return json.dumps(self.data)
242+
243+ def yaml(self):
244+ """Serialize the object to yaml"""
245+ return yaml.dump(self.data)
246+
247+
248+def execution_environment():
249+ """A convenient bundling of the current execution context"""
250+ context = {}
251+ context['conf'] = config()
252+ if relation_id():
253+ context['reltype'] = relation_type()
254+ context['relid'] = relation_id()
255+ context['rel'] = relation_get()
256+ context['unit'] = local_unit()
257+ context['rels'] = relations()
258+ context['env'] = os.environ
259+ return context
260+
261+
262+def in_relation_hook():
263+ """Determine whether we're running in a relation hook"""
264+ return 'JUJU_RELATION' in os.environ
265+
266+
267+def relation_type():
268+ """The scope for the current relation hook"""
269+ return os.environ.get('JUJU_RELATION', None)
270+
271+
272+def relation_id():
273+ """The relation ID for the current relation hook"""
274+ return os.environ.get('JUJU_RELATION_ID', None)
275+
276+
277+def local_unit():
278+ """Local unit ID"""
279+ return os.environ['JUJU_UNIT_NAME']
280+
281+
282+def remote_unit():
283+ """The remote unit for the current relation hook"""
284+ return os.environ['JUJU_REMOTE_UNIT']
285+
286+
287+def service_name():
288+ """The name service group this unit belongs to"""
289+ return local_unit().split('/')[0]
290+
291+
292+def hook_name():
293+ """The name of the currently executing hook"""
294+ return os.path.basename(sys.argv[0])
295+
296+
297+class Config(dict):
298+ """A Juju charm config dictionary that can write itself to
299+ disk (as json) and track which values have changed since
300+ the previous hook invocation.
301+
302+ Do not instantiate this object directly - instead call
303+ ``hookenv.config()``
304+
305+ Example usage::
306+
307+ >>> # inside a hook
308+ >>> from charmhelpers.core import hookenv
309+ >>> config = hookenv.config()
310+ >>> config['foo']
311+ 'bar'
312+ >>> config['mykey'] = 'myval'
313+ >>> config.save()
314+
315+
316+ >>> # user runs `juju set mycharm foo=baz`
317+ >>> # now we're inside subsequent config-changed hook
318+ >>> config = hookenv.config()
319+ >>> config['foo']
320+ 'baz'
321+ >>> # test to see if this val has changed since last hook
322+ >>> config.changed('foo')
323+ True
324+ >>> # what was the previous value?
325+ >>> config.previous('foo')
326+ 'bar'
327+ >>> # keys/values that we add are preserved across hooks
328+ >>> config['mykey']
329+ 'myval'
330+ >>> # don't forget to save at the end of hook!
331+ >>> config.save()
332+
333+ """
334+ CONFIG_FILE_NAME = '.juju-persistent-config'
335+
336+ def __init__(self, *args, **kw):
337+ super(Config, self).__init__(*args, **kw)
338+ self._prev_dict = None
339+ self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
340+ if os.path.exists(self.path):
341+ self.load_previous()
342+
343+ def load_previous(self, path=None):
344+ """Load previous copy of config from disk so that current values
345+ can be compared to previous values.
346+
347+ :param path:
348+
349+ File path from which to load the previous config. If `None`,
350+ config is loaded from the default location. If `path` is
351+ specified, subsequent `save()` calls will write to the same
352+ path.
353+
354+ """
355+ self.path = path or self.path
356+ with open(self.path) as f:
357+ self._prev_dict = json.load(f)
358+
359+ def changed(self, key):
360+ """Return true if the value for this key has changed since
361+ the last save.
362+
363+ """
364+ if self._prev_dict is None:
365+ return True
366+ return self.previous(key) != self.get(key)
367+
368+ def previous(self, key):
369+ """Return previous value for this key, or None if there
370+ is no "previous" value.
371+
372+ """
373+ if self._prev_dict:
374+ return self._prev_dict.get(key)
375+ return None
376+
377+ def save(self):
378+ """Save this config to disk.
379+
380+ Preserves items in _prev_dict that do not exist in self.
381+
382+ """
383+ if self._prev_dict:
384+ for k, v in self._prev_dict.iteritems():
385+ if k not in self:
386+ self[k] = v
387+ with open(self.path, 'w') as f:
388+ json.dump(self, f)
389+
390+
391+@cached
392+def config(scope=None):
393+ """Juju charm configuration"""
394+ config_cmd_line = ['config-get']
395+ if scope is not None:
396+ config_cmd_line.append(scope)
397+ config_cmd_line.append('--format=json')
398+ try:
399+ config_data = json.loads(subprocess.check_output(config_cmd_line))
400+ if scope is not None:
401+ return config_data
402+ return Config(config_data)
403+ except ValueError:
404+ return None
405+
406+
407+@cached
408+def relation_get(attribute=None, unit=None, rid=None):
409+ """Get relation information"""
410+ _args = ['relation-get', '--format=json']
411+ if rid:
412+ _args.append('-r')
413+ _args.append(rid)
414+ _args.append(attribute or '-')
415+ if unit:
416+ _args.append(unit)
417+ try:
418+ return json.loads(subprocess.check_output(_args))
419+ except ValueError:
420+ return None
421+ except CalledProcessError, e:
422+ if e.returncode == 2:
423+ return None
424+ raise
425+
426+
427+def relation_set(relation_id=None, relation_settings=None, **kwargs):
428+ """Set relation information for the current unit"""
429+ relation_settings = relation_settings if relation_settings else {}
430+ relation_cmd_line = ['relation-set']
431+ if relation_id is not None:
432+ relation_cmd_line.extend(('-r', relation_id))
433+ for k, v in (relation_settings.items() + kwargs.items()):
434+ if v is None:
435+ relation_cmd_line.append('{}='.format(k))
436+ else:
437+ relation_cmd_line.append('{}={}'.format(k, v))
438+ subprocess.check_call(relation_cmd_line)
439+ # Flush cache of any relation-gets for local unit
440+ flush(local_unit())
441+
442+
443+@cached
444+def relation_ids(reltype=None):
445+ """A list of relation_ids"""
446+ reltype = reltype or relation_type()
447+ relid_cmd_line = ['relation-ids', '--format=json']
448+ if reltype is not None:
449+ relid_cmd_line.append(reltype)
450+ return json.loads(subprocess.check_output(relid_cmd_line)) or []
451+ return []
452+
453+
454+@cached
455+def related_units(relid=None):
456+ """A list of related units"""
457+ relid = relid or relation_id()
458+ units_cmd_line = ['relation-list', '--format=json']
459+ if relid is not None:
460+ units_cmd_line.extend(('-r', relid))
461+ return json.loads(subprocess.check_output(units_cmd_line)) or []
462+
463+
464+@cached
465+def relation_for_unit(unit=None, rid=None):
466+ """Get the json represenation of a unit's relation"""
467+ unit = unit or remote_unit()
468+ relation = relation_get(unit=unit, rid=rid)
469+ for key in relation:
470+ if key.endswith('-list'):
471+ relation[key] = relation[key].split()
472+ relation['__unit__'] = unit
473+ return relation
474+
475+
476+@cached
477+def relations_for_id(relid=None):
478+ """Get relations of a specific relation ID"""
479+ relation_data = []
480+ relid = relid or relation_ids()
481+ for unit in related_units(relid):
482+ unit_data = relation_for_unit(unit, relid)
483+ unit_data['__relid__'] = relid
484+ relation_data.append(unit_data)
485+ return relation_data
486+
487+
488+@cached
489+def relations_of_type(reltype=None):
490+ """Get relations of a specific type"""
491+ relation_data = []
492+ reltype = reltype or relation_type()
493+ for relid in relation_ids(reltype):
494+ for relation in relations_for_id(relid):
495+ relation['__relid__'] = relid
496+ relation_data.append(relation)
497+ return relation_data
498+
499+
500+@cached
501+def relation_types():
502+ """Get a list of relation types supported by this charm"""
503+ charmdir = os.environ.get('CHARM_DIR', '')
504+ mdf = open(os.path.join(charmdir, 'metadata.yaml'))
505+ md = yaml.safe_load(mdf)
506+ rel_types = []
507+ for key in ('provides', 'requires', 'peers'):
508+ section = md.get(key)
509+ if section:
510+ rel_types.extend(section.keys())
511+ mdf.close()
512+ return rel_types
513+
514+
515+@cached
516+def relations():
517+ """Get a nested dictionary of relation data for all related units"""
518+ rels = {}
519+ for reltype in relation_types():
520+ relids = {}
521+ for relid in relation_ids(reltype):
522+ units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
523+ for unit in related_units(relid):
524+ reldata = relation_get(unit=unit, rid=relid)
525+ units[unit] = reldata
526+ relids[relid] = units
527+ rels[reltype] = relids
528+ return rels
529+
530+
531+@cached
532+def is_relation_made(relation, keys='private-address'):
533+ '''
534+ Determine whether a relation is established by checking for
535+ presence of key(s). If a list of keys is provided, they
536+ must all be present for the relation to be identified as made
537+ '''
538+ if isinstance(keys, str):
539+ keys = [keys]
540+ for r_id in relation_ids(relation):
541+ for unit in related_units(r_id):
542+ context = {}
543+ for k in keys:
544+ context[k] = relation_get(k, rid=r_id,
545+ unit=unit)
546+ if None not in context.values():
547+ return True
548+ return False
549+
550+
551+def open_port(port, protocol="TCP"):
552+ """Open a service network port"""
553+ _args = ['open-port']
554+ _args.append('{}/{}'.format(port, protocol))
555+ subprocess.check_call(_args)
556+
557+
558+def close_port(port, protocol="TCP"):
559+ """Close a service network port"""
560+ _args = ['close-port']
561+ _args.append('{}/{}'.format(port, protocol))
562+ subprocess.check_call(_args)
563+
564+
565+@cached
566+def unit_get(attribute):
567+ """Get the unit ID for the remote unit"""
568+ _args = ['unit-get', '--format=json', attribute]
569+ try:
570+ return json.loads(subprocess.check_output(_args))
571+ except ValueError:
572+ return None
573+
574+
575+def unit_private_ip():
576+ """Get this unit's private IP address"""
577+ return unit_get('private-address')
578+
579+
580+class UnregisteredHookError(Exception):
581+ """Raised when an undefined hook is called"""
582+ pass
583+
584+
585+class Hooks(object):
586+ """A convenient handler for hook functions.
587+
588+ Example::
589+
590+ hooks = Hooks()
591+
592+ # register a hook, taking its name from the function name
593+ @hooks.hook()
594+ def install():
595+ pass # your code here
596+
597+ # register a hook, providing a custom hook name
598+ @hooks.hook("config-changed")
599+ def config_changed():
600+ pass # your code here
601+
602+ if __name__ == "__main__":
603+ # execute a hook based on the name the program is called by
604+ hooks.execute(sys.argv)
605+ """
606+
607+ def __init__(self):
608+ super(Hooks, self).__init__()
609+ self._hooks = {}
610+
611+ def register(self, name, function):
612+ """Register a hook"""
613+ self._hooks[name] = function
614+
615+ def execute(self, args):
616+ """Execute a registered hook based on args[0]"""
617+ hook_name = os.path.basename(args[0])
618+ if hook_name in self._hooks:
619+ self._hooks[hook_name]()
620+ else:
621+ raise UnregisteredHookError(hook_name)
622+
623+ def hook(self, *hook_names):
624+ """Decorator, registering them as hooks"""
625+ def wrapper(decorated):
626+ for hook_name in hook_names:
627+ self.register(hook_name, decorated)
628+ else:
629+ self.register(decorated.__name__, decorated)
630+ if '_' in decorated.__name__:
631+ self.register(
632+ decorated.__name__.replace('_', '-'), decorated)
633+ return decorated
634+ return wrapper
635+
636+
637+def charm_dir():
638+ """Return the root directory of the current charm"""
639+ return os.environ.get('CHARM_DIR')
640
641=== added file 'hooks/charmhelpers/core/host.py'
642--- hooks/charmhelpers/core/host.py 1970-01-01 00:00:00 +0000
643+++ hooks/charmhelpers/core/host.py 2014-08-21 11:32:42 +0000
644@@ -0,0 +1,364 @@
645+"""Tools for working with the host system"""
646+# Copyright 2012 Canonical Ltd.
647+#
648+# Authors:
649+# Nick Moffitt <nick.moffitt@canonical.com>
650+# Matthew Wedgwood <matthew.wedgwood@canonical.com>
651+
652+import os
653+import pwd
654+import grp
655+import random
656+import string
657+import subprocess
658+import hashlib
659+import shutil
660+from contextlib import contextmanager
661+
662+from collections import OrderedDict
663+
664+from hookenv import log
665+from fstab import Fstab
666+
667+
668+def service_start(service_name):
669+ """Start a system service"""
670+ return service('start', service_name)
671+
672+
673+def service_stop(service_name):
674+ """Stop a system service"""
675+ return service('stop', service_name)
676+
677+
678+def service_restart(service_name):
679+ """Restart a system service"""
680+ return service('restart', service_name)
681+
682+
683+def service_reload(service_name, restart_on_failure=False):
684+ """Reload a system service, optionally falling back to restart if
685+ reload fails"""
686+ service_result = service('reload', service_name)
687+ if not service_result and restart_on_failure:
688+ service_result = service('restart', service_name)
689+ return service_result
690+
691+
692+def service(action, service_name):
693+ """Control a system service"""
694+ cmd = ['service', service_name, action]
695+ return subprocess.call(cmd) == 0
696+
697+
698+def service_running(service):
699+ """Determine whether a system service is running"""
700+ try:
701+ output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
702+ except subprocess.CalledProcessError:
703+ return False
704+ else:
705+ if ("start/running" in output or "is running" in output):
706+ return True
707+ else:
708+ return False
709+
710+
711+def service_available(service_name):
712+ """Determine whether a system service is available"""
713+ try:
714+ subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
715+ except subprocess.CalledProcessError:
716+ return False
717+ else:
718+ return True
719+
720+
721+def adduser(username, password=None, shell='/bin/bash', system_user=False):
722+ """Add a user to the system"""
723+ try:
724+ user_info = pwd.getpwnam(username)
725+ log('user {0} already exists!'.format(username))
726+ except KeyError:
727+ log('creating user {0}'.format(username))
728+ cmd = ['useradd']
729+ if system_user or password is None:
730+ cmd.append('--system')
731+ else:
732+ cmd.extend([
733+ '--create-home',
734+ '--shell', shell,
735+ '--password', password,
736+ ])
737+ cmd.append(username)
738+ subprocess.check_call(cmd)
739+ user_info = pwd.getpwnam(username)
740+ return user_info
741+
742+
743+def add_user_to_group(username, group):
744+ """Add a user to a group"""
745+ cmd = [
746+ 'gpasswd', '-a',
747+ username,
748+ group
749+ ]
750+ log("Adding user {} to group {}".format(username, group))
751+ subprocess.check_call(cmd)
752+
753+
754+def rsync(from_path, to_path, flags='-r', options=None):
755+ """Replicate the contents of a path"""
756+ options = options or ['--delete', '--executability']
757+ cmd = ['/usr/bin/rsync', flags]
758+ cmd.extend(options)
759+ cmd.append(from_path)
760+ cmd.append(to_path)
761+ log(" ".join(cmd))
762+ return subprocess.check_output(cmd).strip()
763+
764+
765+def symlink(source, destination):
766+ """Create a symbolic link"""
767+ log("Symlinking {} as {}".format(source, destination))
768+ cmd = [
769+ 'ln',
770+ '-sf',
771+ source,
772+ destination,
773+ ]
774+ subprocess.check_call(cmd)
775+
776+
777+def mkdir(path, owner='root', group='root', perms=0555, force=False):
778+ """Create a directory"""
779+ log("Making dir {} {}:{} {:o}".format(path, owner, group,
780+ perms))
781+ uid = pwd.getpwnam(owner).pw_uid
782+ gid = grp.getgrnam(group).gr_gid
783+ realpath = os.path.abspath(path)
784+ if os.path.exists(realpath):
785+ if force and not os.path.isdir(realpath):
786+ log("Removing non-directory file {} prior to mkdir()".format(path))
787+ os.unlink(realpath)
788+ else:
789+ os.makedirs(realpath, perms)
790+ os.chown(realpath, uid, gid)
791+
792+
793+def write_file(path, content, owner='root', group='root', perms=0444):
794+ """Create or overwrite a file with the contents of a string"""
795+ log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
796+ uid = pwd.getpwnam(owner).pw_uid
797+ gid = grp.getgrnam(group).gr_gid
798+ with open(path, 'w') as target:
799+ os.fchown(target.fileno(), uid, gid)
800+ os.fchmod(target.fileno(), perms)
801+ target.write(content)
802+
803+
804+def fstab_remove(mp):
805+ """Remove the given mountpoint entry from /etc/fstab
806+ """
807+ return Fstab.remove_by_mountpoint(mp)
808+
809+
810+def fstab_add(dev, mp, fs, options=None):
811+ """Adds the given device entry to the /etc/fstab file
812+ """
813+ return Fstab.add(dev, mp, fs, options=options)
814+
815+
816+def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
817+ """Mount a filesystem at a particular mountpoint"""
818+ cmd_args = ['mount']
819+ if options is not None:
820+ cmd_args.extend(['-o', options])
821+ cmd_args.extend([device, mountpoint])
822+ try:
823+ subprocess.check_output(cmd_args)
824+ except subprocess.CalledProcessError, e:
825+ log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
826+ return False
827+
828+ if persist:
829+ return fstab_add(device, mountpoint, filesystem, options=options)
830+ return True
831+
832+
833+def umount(mountpoint, persist=False):
834+ """Unmount a filesystem"""
835+ cmd_args = ['umount', mountpoint]
836+ try:
837+ subprocess.check_output(cmd_args)
838+ except subprocess.CalledProcessError, e:
839+ log('Error unmounting {}\n{}'.format(mountpoint, e.output))
840+ return False
841+
842+ if persist:
843+ return fstab_remove(mountpoint)
844+ return True
845+
846+
847+def mounts():
848+ """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
849+ with open('/proc/mounts') as f:
850+ # [['/mount/point','/dev/path'],[...]]
851+ system_mounts = [m[1::-1] for m in [l.strip().split()
852+ for l in f.readlines()]]
853+ return system_mounts
854+
855+
856+def file_hash(path):
857+ """Generate a md5 hash of the contents of 'path' or None if not found """
858+ if os.path.exists(path):
859+ h = hashlib.md5()
860+ with open(path, 'r') as source:
861+ h.update(source.read()) # IGNORE:E1101 - it does have update
862+ return h.hexdigest()
863+ else:
864+ return None
865+
866+
867+def restart_on_change(restart_map, stopstart=False):
868+ """Restart services based on configuration files changing
869+
870+ This function is used a decorator, for example::
871+
872+ @restart_on_change({
873+ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
874+ })
875+ def ceph_client_changed():
876+ pass # your code here
877+
878+ In this example, the cinder-api and cinder-volume services
879+ would be restarted if /etc/ceph/ceph.conf is changed by the
880+ ceph_client_changed function.
881+ """
882+ def wrap(f):
883+ def wrapped_f(*args):
884+ checksums = {}
885+ for path in restart_map:
886+ checksums[path] = file_hash(path)
887+ f(*args)
888+ restarts = []
889+ for path in restart_map:
890+ if checksums[path] != file_hash(path):
891+ restarts += restart_map[path]
892+ services_list = list(OrderedDict.fromkeys(restarts))
893+ if not stopstart:
894+ for service_name in services_list:
895+ service('restart', service_name)
896+ else:
897+ for action in ['stop', 'start']:
898+ for service_name in services_list:
899+ service(action, service_name)
900+ return wrapped_f
901+ return wrap
902+
903+
904+def lsb_release():
905+ """Return /etc/lsb-release in a dict"""
906+ d = {}
907+ with open('/etc/lsb-release', 'r') as lsb:
908+ for l in lsb:
909+ k, v = l.split('=')
910+ d[k.strip()] = v.strip()
911+ return d
912+
913+
914+def pwgen(length=None):
915+ """Generate a random pasword."""
916+ if length is None:
917+ length = random.choice(range(35, 45))
918+ alphanumeric_chars = [
919+ l for l in (string.letters + string.digits)
920+ if l not in 'l0QD1vAEIOUaeiou']
921+ random_chars = [
922+ random.choice(alphanumeric_chars) for _ in range(length)]
923+ return(''.join(random_chars))
924+
925+
926+def list_nics(nic_type):
927+ '''Return a list of nics of given type(s)'''
928+ if isinstance(nic_type, basestring):
929+ int_types = [nic_type]
930+ else:
931+ int_types = nic_type
932+ interfaces = []
933+ for int_type in int_types:
934+ cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
935+ ip_output = subprocess.check_output(cmd).split('\n')
936+ ip_output = (line for line in ip_output if line)
937+ for line in ip_output:
938+ if line.split()[1].startswith(int_type):
939+ interfaces.append(line.split()[1].replace(":", ""))
940+ return interfaces
941+
942+
943+def set_nic_mtu(nic, mtu):
944+ '''Set MTU on a network interface'''
945+ cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
946+ subprocess.check_call(cmd)
947+
948+
949+def get_nic_mtu(nic):
950+ cmd = ['ip', 'addr', 'show', nic]
951+ ip_output = subprocess.check_output(cmd).split('\n')
952+ mtu = ""
953+ for line in ip_output:
954+ words = line.split()
955+ if 'mtu' in words:
956+ mtu = words[words.index("mtu") + 1]
957+ return mtu
958+
959+
960+def get_nic_hwaddr(nic):
961+ cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
962+ ip_output = subprocess.check_output(cmd)
963+ hwaddr = ""
964+ words = ip_output.split()
965+ if 'link/ether' in words:
966+ hwaddr = words[words.index('link/ether') + 1]
967+ return hwaddr
968+
969+
970+def cmp_pkgrevno(package, revno, pkgcache=None):
971+ '''Compare supplied revno with the revno of the installed package
972+
973+ * 1 => Installed revno is greater than supplied arg
974+ * 0 => Installed revno is the same as supplied arg
975+ * -1 => Installed revno is less than supplied arg
976+
977+ '''
978+ import apt_pkg
979+ if not pkgcache:
980+ apt_pkg.init()
981+ # Force Apt to build its cache in memory. That way we avoid race
982+ # conditions with other applications building the cache in the same
983+ # place.
984+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
985+ pkgcache = apt_pkg.Cache()
986+ pkg = pkgcache[package]
987+ return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
988+
989+
990+@contextmanager
991+def chdir(d):
992+ cur = os.getcwd()
993+ try:
994+ yield os.chdir(d)
995+ finally:
996+ os.chdir(cur)
997+
998+
999+def chownr(path, owner, group):
1000+ uid = pwd.getpwnam(owner).pw_uid
1001+ gid = grp.getgrnam(group).gr_gid
1002+
1003+ for root, dirs, files in os.walk(path):
1004+ for name in dirs + files:
1005+ full = os.path.join(root, name)
1006+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
1007+ if not broken_symlink:
1008+ os.chown(full, uid, gid)
1009
1010=== added directory 'hooks/charmhelpers/core/services'
1011=== added file 'hooks/charmhelpers/core/services/__init__.py'
1012--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
1013+++ hooks/charmhelpers/core/services/__init__.py 2014-08-21 11:32:42 +0000
1014@@ -0,0 +1,2 @@
1015+from .base import *
1016+from .helpers import *
1017
1018=== added file 'hooks/charmhelpers/core/services/base.py'
1019--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
1020+++ hooks/charmhelpers/core/services/base.py 2014-08-21 11:32:42 +0000
1021@@ -0,0 +1,305 @@
1022+import os
1023+import re
1024+import json
1025+from collections import Iterable
1026+
1027+from charmhelpers.core import host
1028+from charmhelpers.core import hookenv
1029+
1030+
1031+__all__ = ['ServiceManager', 'ManagerCallback',
1032+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
1033+ 'service_restart', 'service_stop']
1034+
1035+
1036+class ServiceManager(object):
1037+ def __init__(self, services=None):
1038+ """
1039+ Register a list of services, given their definitions.
1040+
1041+ Traditional charm authoring is focused on implementing hooks. That is,
1042+ the charm author is thinking in terms of "What hook am I handling; what
1043+ does this hook need to do?" However, in most cases, the real question
1044+ should be "Do I have the information I need to configure and start this
1045+ piece of software and, if so, what are the steps for doing so?" The
1046+ ServiceManager framework tries to bring the focus to the data and the
1047+ setup tasks, in the most declarative way possible.
1048+
1049+ Service definitions are dicts in the following formats (all keys except
1050+ 'service' are optional)::
1051+
1052+ {
1053+ "service": <service name>,
1054+ "required_data": <list of required data contexts>,
1055+ "data_ready": <one or more callbacks>,
1056+ "data_lost": <one or more callbacks>,
1057+ "start": <one or more callbacks>,
1058+ "stop": <one or more callbacks>,
1059+ "ports": <list of ports to manage>,
1060+ }
1061+
1062+ The 'required_data' list should contain dicts of required data (or
1063+ dependency managers that act like dicts and know how to collect the data).
1064+ Only when all items in the 'required_data' list are populated are the list
1065+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
1066+ information.
1067+
1068+ The 'data_ready' value should be either a single callback, or a list of
1069+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
1070+ Each callback will be called with the service name as the only parameter.
1071+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
1072+ are fired.
1073+
1074+ The 'data_lost' value should be either a single callback, or a list of
1075+ callbacks, to be called when a 'required_data' item no longer passes
1076+ `is_ready()`. Each callback will be called with the service name as the
1077+ only parameter. After all of the 'data_lost' callbacks are called,
1078+ the 'stop' callbacks are fired.
1079+
1080+ The 'start' value should be either a single callback, or a list of
1081+ callbacks, to be called when starting the service, after the 'data_ready'
1082+ callbacks are complete. Each callback will be called with the service
1083+ name as the only parameter. This defaults to
1084+ `[host.service_start, services.open_ports]`.
1085+
1086+ The 'stop' value should be either a single callback, or a list of
1087+ callbacks, to be called when stopping the service. If the service is
1088+ being stopped because it no longer has all of its 'required_data', this
1089+ will be called after all of the 'data_lost' callbacks are complete.
1090+ Each callback will be called with the service name as the only parameter.
1091+ This defaults to `[services.close_ports, host.service_stop]`.
1092+
1093+ The 'ports' value should be a list of ports to manage. The default
1094+ 'start' handler will open the ports after the service is started,
1095+ and the default 'stop' handler will close the ports prior to stopping
1096+ the service.
1097+
1098+
1099+ Examples:
1100+
1101+ The following registers an Upstart service called bingod that depends on
1102+ a mongodb relation and which runs a custom `db_migrate` function prior to
1103+ restarting the service, and a Runit service called spadesd::
1104+
1105+ manager = services.ServiceManager([
1106+ {
1107+ 'service': 'bingod',
1108+ 'ports': [80, 443],
1109+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
1110+ 'data_ready': [
1111+ services.template(source='bingod.conf'),
1112+ services.template(source='bingod.ini',
1113+ target='/etc/bingod.ini',
1114+ owner='bingo', perms=0400),
1115+ ],
1116+ },
1117+ {
1118+ 'service': 'spadesd',
1119+ 'data_ready': services.template(source='spadesd_run.j2',
1120+ target='/etc/sv/spadesd/run',
1121+ perms=0555),
1122+ 'start': runit_start,
1123+ 'stop': runit_stop,
1124+ },
1125+ ])
1126+ manager.manage()
1127+ """
1128+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
1129+ self._ready = None
1130+ self.services = {}
1131+ for service in services or []:
1132+ service_name = service['service']
1133+ self.services[service_name] = service
1134+
1135+ def manage(self):
1136+ """
1137+ Handle the current hook by doing The Right Thing with the registered services.
1138+ """
1139+ hook_name = hookenv.hook_name()
1140+ if hook_name == 'stop':
1141+ self.stop_services()
1142+ else:
1143+ self.provide_data()
1144+ self.reconfigure_services()
1145+
1146+ def provide_data(self):
1147+ hook_name = hookenv.hook_name()
1148+ for service in self.services.values():
1149+ for provider in service.get('provided_data', []):
1150+ if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
1151+ data = provider.provide_data()
1152+ if provider._is_ready(data):
1153+ hookenv.relation_set(None, data)
1154+
1155+ def reconfigure_services(self, *service_names):
1156+ """
1157+ Update all files for one or more registered services, and,
1158+ if ready, optionally restart them.
1159+
1160+ If no service names are given, reconfigures all registered services.
1161+ """
1162+ for service_name in service_names or self.services.keys():
1163+ if self.is_ready(service_name):
1164+ self.fire_event('data_ready', service_name)
1165+ self.fire_event('start', service_name, default=[
1166+ service_restart,
1167+ manage_ports])
1168+ self.save_ready(service_name)
1169+ else:
1170+ if self.was_ready(service_name):
1171+ self.fire_event('data_lost', service_name)
1172+ self.fire_event('stop', service_name, default=[
1173+ manage_ports,
1174+ service_stop])
1175+ self.save_lost(service_name)
1176+
1177+ def stop_services(self, *service_names):
1178+ """
1179+ Stop one or more registered services, by name.
1180+
1181+ If no service names are given, stops all registered services.
1182+ """
1183+ for service_name in service_names or self.services.keys():
1184+ self.fire_event('stop', service_name, default=[
1185+ manage_ports,
1186+ service_stop])
1187+
1188+ def get_service(self, service_name):
1189+ """
1190+ Given the name of a registered service, return its service definition.
1191+ """
1192+ service = self.services.get(service_name)
1193+ if not service:
1194+ raise KeyError('Service not registered: %s' % service_name)
1195+ return service
1196+
1197+ def fire_event(self, event_name, service_name, default=None):
1198+ """
1199+ Fire a data_ready, data_lost, start, or stop event on a given service.
1200+ """
1201+ service = self.get_service(service_name)
1202+ callbacks = service.get(event_name, default)
1203+ if not callbacks:
1204+ return
1205+ if not isinstance(callbacks, Iterable):
1206+ callbacks = [callbacks]
1207+ for callback in callbacks:
1208+ if isinstance(callback, ManagerCallback):
1209+ callback(self, service_name, event_name)
1210+ else:
1211+ callback(service_name)
1212+
1213+ def is_ready(self, service_name):
1214+ """
1215+ Determine if a registered service is ready, by checking its 'required_data'.
1216+
1217+ A 'required_data' item can be any mapping type, and is considered ready
1218+ if `bool(item)` evaluates as True.
1219+ """
1220+ service = self.get_service(service_name)
1221+ reqs = service.get('required_data', [])
1222+ return all(bool(req) for req in reqs)
1223+
1224+ def _load_ready_file(self):
1225+ if self._ready is not None:
1226+ return
1227+ if os.path.exists(self._ready_file):
1228+ with open(self._ready_file) as fp:
1229+ self._ready = set(json.load(fp))
1230+ else:
1231+ self._ready = set()
1232+
1233+ def _save_ready_file(self):
1234+ if self._ready is None:
1235+ return
1236+ with open(self._ready_file, 'w') as fp:
1237+ json.dump(list(self._ready), fp)
1238+
1239+ def save_ready(self, service_name):
1240+ """
1241+ Save an indicator that the given service is now data_ready.
1242+ """
1243+ self._load_ready_file()
1244+ self._ready.add(service_name)
1245+ self._save_ready_file()
1246+
1247+ def save_lost(self, service_name):
1248+ """
1249+ Save an indicator that the given service is no longer data_ready.
1250+ """
1251+ self._load_ready_file()
1252+ self._ready.discard(service_name)
1253+ self._save_ready_file()
1254+
1255+ def was_ready(self, service_name):
1256+ """
1257+ Determine if the given service was previously data_ready.
1258+ """
1259+ self._load_ready_file()
1260+ return service_name in self._ready
1261+
1262+
1263+class ManagerCallback(object):
1264+ """
1265+ Special case of a callback that takes the `ServiceManager` instance
1266+ in addition to the service name.
1267+
1268+ Subclasses should implement `__call__` which should accept three parameters:
1269+
1270+ * `manager` The `ServiceManager` instance
1271+ * `service_name` The name of the service it's being triggered for
1272+ * `event_name` The name of the event that this callback is handling
1273+ """
1274+ def __call__(self, manager, service_name, event_name):
1275+ raise NotImplementedError()
1276+
1277+
1278+class PortManagerCallback(ManagerCallback):
1279+ """
1280+ Callback class that will open or close ports, for use as either
1281+ a start or stop action.
1282+ """
1283+ def __call__(self, manager, service_name, event_name):
1284+ service = manager.get_service(service_name)
1285+ new_ports = service.get('ports', [])
1286+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
1287+ if os.path.exists(port_file):
1288+ with open(port_file) as fp:
1289+ old_ports = fp.read().split(',')
1290+ for old_port in old_ports:
1291+ if bool(old_port):
1292+ old_port = int(old_port)
1293+ if old_port not in new_ports:
1294+ hookenv.close_port(old_port)
1295+ with open(port_file, 'w') as fp:
1296+ fp.write(','.join(str(port) for port in new_ports))
1297+ for port in new_ports:
1298+ if event_name == 'start':
1299+ hookenv.open_port(port)
1300+ elif event_name == 'stop':
1301+ hookenv.close_port(port)
1302+
1303+
1304+def service_stop(service_name):
1305+ """
1306+ Wrapper around host.service_stop to prevent spurious "unknown service"
1307+ messages in the logs.
1308+ """
1309+ if host.service_running(service_name):
1310+ host.service_stop(service_name)
1311+
1312+
1313+def service_restart(service_name):
1314+ """
1315+ Wrapper around host.service_restart to prevent spurious "unknown service"
1316+ messages in the logs.
1317+ """
1318+ if host.service_available(service_name):
1319+ if host.service_running(service_name):
1320+ host.service_restart(service_name)
1321+ else:
1322+ host.service_start(service_name)
1323+
1324+
1325+# Convenience aliases
1326+open_ports = close_ports = manage_ports = PortManagerCallback()
1327
1328=== added file 'hooks/charmhelpers/core/services/helpers.py'
1329--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
1330+++ hooks/charmhelpers/core/services/helpers.py 2014-08-21 11:32:42 +0000
1331@@ -0,0 +1,125 @@
1332+from charmhelpers.core import hookenv
1333+from charmhelpers.core import templating
1334+
1335+from charmhelpers.core.services.base import ManagerCallback
1336+
1337+
1338+__all__ = ['RelationContext', 'TemplateCallback',
1339+ 'render_template', 'template']
1340+
1341+
1342+class RelationContext(dict):
1343+ """
1344+ Base class for a context generator that gets relation data from juju.
1345+
1346+ Subclasses must provide the attributes `name`, which is the name of the
1347+ interface of interest, `interface`, which is the type of the interface of
1348+ interest, and `required_keys`, which is the set of keys required for the
1349+ relation to be considered complete. The data for all interfaces matching
1350+ the `name` attribute that are complete will used to populate the dictionary
1351+ values (see `get_data`, below).
1352+
1353+ The generated context will be namespaced under the interface type, to prevent
1354+ potential naming conflicts.
1355+ """
1356+ name = None
1357+ interface = None
1358+ required_keys = []
1359+
1360+ def __init__(self, *args, **kwargs):
1361+ super(RelationContext, self).__init__(*args, **kwargs)
1362+ self.get_data()
1363+
1364+ def __bool__(self):
1365+ """
1366+ Returns True if all of the required_keys are available.
1367+ """
1368+ return self.is_ready()
1369+
1370+ __nonzero__ = __bool__
1371+
1372+ def __repr__(self):
1373+ return super(RelationContext, self).__repr__()
1374+
1375+ def is_ready(self):
1376+ """
1377+ Returns True if all of the `required_keys` are available from any units.
1378+ """
1379+ ready = len(self.get(self.name, [])) > 0
1380+ if not ready:
1381+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
1382+ return ready
1383+
1384+ def _is_ready(self, unit_data):
1385+ """
1386+ Helper method that tests a set of relation data and returns True if
1387+ all of the `required_keys` are present.
1388+ """
1389+ return set(unit_data.keys()).issuperset(set(self.required_keys))
1390+
1391+ def get_data(self):
1392+ """
1393+ Retrieve the relation data for each unit involved in a relation and,
1394+ if complete, store it in a list under `self[self.name]`. This
1395+ is automatically called when the RelationContext is instantiated.
1396+
1397+ The units are sorted lexographically first by the service ID, then by
1398+ the unit ID. Thus, if an interface has two other services, 'db:1'
1399+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
1400+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
1401+ set of data, the relation data for the units will be stored in the
1402+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
1403+
1404+ If you only care about a single unit on the relation, you can just
1405+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
1406+ support multiple units on a relation, you should iterate over the list,
1407+ like::
1408+
1409+ {% for unit in interface -%}
1410+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
1411+ {%- endfor %}
1412+
1413+ Note that since all sets of relation data from all related services and
1414+ units are in a single list, if you need to know which service or unit a
1415+ set of data came from, you'll need to extend this class to preserve
1416+ that information.
1417+ """
1418+ if not hookenv.relation_ids(self.name):
1419+ return
1420+
1421+ ns = self.setdefault(self.name, [])
1422+ for rid in sorted(hookenv.relation_ids(self.name)):
1423+ for unit in sorted(hookenv.related_units(rid)):
1424+ reldata = hookenv.relation_get(rid=rid, unit=unit)
1425+ if self._is_ready(reldata):
1426+ ns.append(reldata)
1427+
1428+ def provide_data(self):
1429+ """
1430+ Return data to be relation_set for this interface.
1431+ """
1432+ return {}
1433+
1434+
1435+class TemplateCallback(ManagerCallback):
1436+ """
1437+ Callback class that will render a template, for use as a ready action.
1438+ """
1439+ def __init__(self, source, target, owner='root', group='root', perms=0444):
1440+ self.source = source
1441+ self.target = target
1442+ self.owner = owner
1443+ self.group = group
1444+ self.perms = perms
1445+
1446+ def __call__(self, manager, service_name, event_name):
1447+ service = manager.get_service(service_name)
1448+ context = {}
1449+ for ctx in service.get('required_data', []):
1450+ context.update(ctx)
1451+ templating.render(self.source, self.target, context,
1452+ self.owner, self.group, self.perms)
1453+
1454+
1455+# Convenience aliases for templates
1456+render_template = template = TemplateCallback
1457
1458=== added file 'hooks/charmhelpers/core/templating.py'
1459--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
1460+++ hooks/charmhelpers/core/templating.py 2014-08-21 11:32:42 +0000
1461@@ -0,0 +1,51 @@
1462+import os
1463+
1464+from charmhelpers.core import host
1465+from charmhelpers.core import hookenv
1466+
1467+
1468+def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
1469+ """
1470+ Render a template.
1471+
1472+ The `source` path, if not absolute, is relative to the `templates_dir`.
1473+
1474+ The `target` path should be absolute.
1475+
1476+ The context should be a dict containing the values to be replaced in the
1477+ template.
1478+
1479+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
1480+
1481+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
1482+
1483+ Note: Using this requires python-jinja2; if it is not installed, calling
1484+ this will attempt to use charmhelpers.fetch.apt_install to install it.
1485+ """
1486+ try:
1487+ from jinja2 import FileSystemLoader, Environment, exceptions
1488+ except ImportError:
1489+ try:
1490+ from charmhelpers.fetch import apt_install
1491+ except ImportError:
1492+ hookenv.log('Could not import jinja2, and could not import '
1493+ 'charmhelpers.fetch to install it',
1494+ level=hookenv.ERROR)
1495+ raise
1496+ apt_install('python-jinja2', fatal=True)
1497+ from jinja2 import FileSystemLoader, Environment, exceptions
1498+
1499+ if templates_dir is None:
1500+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
1501+ loader = Environment(loader=FileSystemLoader(templates_dir))
1502+ try:
1503+ source = source
1504+ template = loader.get_template(source)
1505+ except exceptions.TemplateNotFound as e:
1506+ hookenv.log('Could not load template %s from %s.' %
1507+ (source, templates_dir),
1508+ level=hookenv.ERROR)
1509+ raise e
1510+ content = template.render(context)
1511+ host.mkdir(os.path.dirname(target))
1512+ host.write_file(target, content, owner, group, perms)
1513
1514=== added directory 'hooks/charmhelpers/fetch'
1515=== added file 'hooks/charmhelpers/fetch/__init__.py'
1516--- hooks/charmhelpers/fetch/__init__.py 1970-01-01 00:00:00 +0000
1517+++ hooks/charmhelpers/fetch/__init__.py 2014-08-21 11:32:42 +0000
1518@@ -0,0 +1,394 @@
1519+import importlib
1520+from tempfile import NamedTemporaryFile
1521+import time
1522+from yaml import safe_load
1523+from charmhelpers.core.host import (
1524+ lsb_release
1525+)
1526+from urlparse import (
1527+ urlparse,
1528+ urlunparse,
1529+)
1530+import subprocess
1531+from charmhelpers.core.hookenv import (
1532+ config,
1533+ log,
1534+)
1535+import os
1536+
1537+
1538+CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
1539+deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
1540+"""
1541+PROPOSED_POCKET = """# Proposed
1542+deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
1543+"""
1544+CLOUD_ARCHIVE_POCKETS = {
1545+ # Folsom
1546+ 'folsom': 'precise-updates/folsom',
1547+ 'precise-folsom': 'precise-updates/folsom',
1548+ 'precise-folsom/updates': 'precise-updates/folsom',
1549+ 'precise-updates/folsom': 'precise-updates/folsom',
1550+ 'folsom/proposed': 'precise-proposed/folsom',
1551+ 'precise-folsom/proposed': 'precise-proposed/folsom',
1552+ 'precise-proposed/folsom': 'precise-proposed/folsom',
1553+ # Grizzly
1554+ 'grizzly': 'precise-updates/grizzly',
1555+ 'precise-grizzly': 'precise-updates/grizzly',
1556+ 'precise-grizzly/updates': 'precise-updates/grizzly',
1557+ 'precise-updates/grizzly': 'precise-updates/grizzly',
1558+ 'grizzly/proposed': 'precise-proposed/grizzly',
1559+ 'precise-grizzly/proposed': 'precise-proposed/grizzly',
1560+ 'precise-proposed/grizzly': 'precise-proposed/grizzly',
1561+ # Havana
1562+ 'havana': 'precise-updates/havana',
1563+ 'precise-havana': 'precise-updates/havana',
1564+ 'precise-havana/updates': 'precise-updates/havana',
1565+ 'precise-updates/havana': 'precise-updates/havana',
1566+ 'havana/proposed': 'precise-proposed/havana',
1567+ 'precise-havana/proposed': 'precise-proposed/havana',
1568+ 'precise-proposed/havana': 'precise-proposed/havana',
1569+ # Icehouse
1570+ 'icehouse': 'precise-updates/icehouse',
1571+ 'precise-icehouse': 'precise-updates/icehouse',
1572+ 'precise-icehouse/updates': 'precise-updates/icehouse',
1573+ 'precise-updates/icehouse': 'precise-updates/icehouse',
1574+ 'icehouse/proposed': 'precise-proposed/icehouse',
1575+ 'precise-icehouse/proposed': 'precise-proposed/icehouse',
1576+ 'precise-proposed/icehouse': 'precise-proposed/icehouse',
1577+ # Juno
1578+ 'juno': 'trusty-updates/juno',
1579+ 'trusty-juno': 'trusty-updates/juno',
1580+ 'trusty-juno/updates': 'trusty-updates/juno',
1581+ 'trusty-updates/juno': 'trusty-updates/juno',
1582+ 'juno/proposed': 'trusty-proposed/juno',
1583+ 'juno/proposed': 'trusty-proposed/juno',
1584+ 'trusty-juno/proposed': 'trusty-proposed/juno',
1585+ 'trusty-proposed/juno': 'trusty-proposed/juno',
1586+}
1587+
1588+# The order of this list is very important. Handlers should be listed in from
1589+# least- to most-specific URL matching.
1590+FETCH_HANDLERS = (
1591+ 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
1592+ 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
1593+)
1594+
1595+APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
1596+APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
1597+APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
1598+
1599+
1600+class SourceConfigError(Exception):
1601+ pass
1602+
1603+
1604+class UnhandledSource(Exception):
1605+ pass
1606+
1607+
1608+class AptLockError(Exception):
1609+ pass
1610+
1611+
1612+class BaseFetchHandler(object):
1613+
1614+ """Base class for FetchHandler implementations in fetch plugins"""
1615+
1616+ def can_handle(self, source):
1617+ """Returns True if the source can be handled. Otherwise returns
1618+ a string explaining why it cannot"""
1619+ return "Wrong source type"
1620+
1621+ def install(self, source):
1622+ """Try to download and unpack the source. Return the path to the
1623+ unpacked files or raise UnhandledSource."""
1624+ raise UnhandledSource("Wrong source type {}".format(source))
1625+
1626+ def parse_url(self, url):
1627+ return urlparse(url)
1628+
1629+ def base_url(self, url):
1630+ """Return url without querystring or fragment"""
1631+ parts = list(self.parse_url(url))
1632+ parts[4:] = ['' for i in parts[4:]]
1633+ return urlunparse(parts)
1634+
1635+
1636+def filter_installed_packages(packages):
1637+ """Returns a list of packages that require installation"""
1638+ import apt_pkg
1639+ apt_pkg.init()
1640+
1641+ # Tell apt to build an in-memory cache to prevent race conditions (if
1642+ # another process is already building the cache).
1643+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
1644+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
1645+
1646+ cache = apt_pkg.Cache()
1647+ _pkgs = []
1648+ for package in packages:
1649+ try:
1650+ p = cache[package]
1651+ p.current_ver or _pkgs.append(package)
1652+ except KeyError:
1653+ log('Package {} has no installation candidate.'.format(package),
1654+ level='WARNING')
1655+ _pkgs.append(package)
1656+ return _pkgs
1657+
1658+
1659+def apt_install(packages, options=None, fatal=False):
1660+ """Install one or more packages"""
1661+ if options is None:
1662+ options = ['--option=Dpkg::Options::=--force-confold']
1663+
1664+ cmd = ['apt-get', '--assume-yes']
1665+ cmd.extend(options)
1666+ cmd.append('install')
1667+ if isinstance(packages, basestring):
1668+ cmd.append(packages)
1669+ else:
1670+ cmd.extend(packages)
1671+ log("Installing {} with options: {}".format(packages,
1672+ options))
1673+ _run_apt_command(cmd, fatal)
1674+
1675+
1676+def apt_upgrade(options=None, fatal=False, dist=False):
1677+ """Upgrade all packages"""
1678+ if options is None:
1679+ options = ['--option=Dpkg::Options::=--force-confold']
1680+
1681+ cmd = ['apt-get', '--assume-yes']
1682+ cmd.extend(options)
1683+ if dist:
1684+ cmd.append('dist-upgrade')
1685+ else:
1686+ cmd.append('upgrade')
1687+ log("Upgrading with options: {}".format(options))
1688+ _run_apt_command(cmd, fatal)
1689+
1690+
1691+def apt_update(fatal=False):
1692+ """Update local apt cache"""
1693+ cmd = ['apt-get', 'update']
1694+ _run_apt_command(cmd, fatal)
1695+
1696+
1697+def apt_purge(packages, fatal=False):
1698+ """Purge one or more packages"""
1699+ cmd = ['apt-get', '--assume-yes', 'purge']
1700+ if isinstance(packages, basestring):
1701+ cmd.append(packages)
1702+ else:
1703+ cmd.extend(packages)
1704+ log("Purging {}".format(packages))
1705+ _run_apt_command(cmd, fatal)
1706+
1707+
1708+def apt_hold(packages, fatal=False):
1709+ """Hold one or more packages"""
1710+ cmd = ['apt-mark', 'hold']
1711+ if isinstance(packages, basestring):
1712+ cmd.append(packages)
1713+ else:
1714+ cmd.extend(packages)
1715+ log("Holding {}".format(packages))
1716+
1717+ if fatal:
1718+ subprocess.check_call(cmd)
1719+ else:
1720+ subprocess.call(cmd)
1721+
1722+
1723+def add_source(source, key=None):
1724+ """Add a package source to this system.
1725+
1726+ @param source: a URL or sources.list entry, as supported by
1727+ add-apt-repository(1). Examples:
1728+ ppa:charmers/example
1729+ deb https://stub:key@private.example.com/ubuntu trusty main
1730+
1731+ In addition:
1732+ 'proposed:' may be used to enable the standard 'proposed'
1733+ pocket for the release.
1734+ 'cloud:' may be used to activate official cloud archive pockets,
1735+ such as 'cloud:icehouse'
1736+
1737+ @param key: A key to be added to the system's APT keyring and used
1738+ to verify the signatures on packages. Ideally, this should be an
1739+ ASCII format GPG public key including the block headers. A GPG key
1740+ id may also be used, but be aware that only insecure protocols are
1741+ available to retrieve the actual public key from a public keyserver
1742+ placing your Juju environment at risk. ppa and cloud archive keys
1743+ are securely added automtically, so sould not be provided.
1744+ """
1745+ if source is None:
1746+ log('Source is not present. Skipping')
1747+ return
1748+
1749+ if (source.startswith('ppa:') or
1750+ source.startswith('http') or
1751+ source.startswith('deb ') or
1752+ source.startswith('cloud-archive:')):
1753+ subprocess.check_call(['add-apt-repository', '--yes', source])
1754+ elif source.startswith('cloud:'):
1755+ apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
1756+ fatal=True)
1757+ pocket = source.split(':')[-1]
1758+ if pocket not in CLOUD_ARCHIVE_POCKETS:
1759+ raise SourceConfigError(
1760+ 'Unsupported cloud: source option %s' %
1761+ pocket)
1762+ actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
1763+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
1764+ apt.write(CLOUD_ARCHIVE.format(actual_pocket))
1765+ elif source == 'proposed':
1766+ release = lsb_release()['DISTRIB_CODENAME']
1767+ with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
1768+ apt.write(PROPOSED_POCKET.format(release))
1769+ else:
1770+ raise SourceConfigError("Unknown source: {!r}".format(source))
1771+
1772+ if key:
1773+ if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
1774+ with NamedTemporaryFile() as key_file:
1775+ key_file.write(key)
1776+ key_file.flush()
1777+ key_file.seek(0)
1778+ subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
1779+ else:
1780+ # Note that hkp: is in no way a secure protocol. Using a
1781+ # GPG key id is pointless from a security POV unless you
1782+ # absolutely trust your network and DNS.
1783+ subprocess.check_call(['apt-key', 'adv', '--keyserver',
1784+ 'hkp://keyserver.ubuntu.com:80', '--recv',
1785+ key])
1786+
1787+
1788+def configure_sources(update=False,
1789+ sources_var='install_sources',
1790+ keys_var='install_keys'):
1791+ """
1792+ Configure multiple sources from charm configuration.
1793+
1794+ The lists are encoded as yaml fragments in the configuration.
1795+ The frament needs to be included as a string. Sources and their
1796+ corresponding keys are of the types supported by add_source().
1797+
1798+ Example config:
1799+ install_sources: |
1800+ - "ppa:foo"
1801+ - "http://example.com/repo precise main"
1802+ install_keys: |
1803+ - null
1804+ - "a1b2c3d4"
1805+
1806+ Note that 'null' (a.k.a. None) should not be quoted.
1807+ """
1808+ sources = safe_load((config(sources_var) or '').strip()) or []
1809+ keys = safe_load((config(keys_var) or '').strip()) or None
1810+
1811+ if isinstance(sources, basestring):
1812+ sources = [sources]
1813+
1814+ if keys is None:
1815+ for source in sources:
1816+ add_source(source, None)
1817+ else:
1818+ if isinstance(keys, basestring):
1819+ keys = [keys]
1820+
1821+ if len(sources) != len(keys):
1822+ raise SourceConfigError(
1823+ 'Install sources and keys lists are different lengths')
1824+ for source, key in zip(sources, keys):
1825+ add_source(source, key)
1826+ if update:
1827+ apt_update(fatal=True)
1828+
1829+
1830+def install_remote(source):
1831+ """
1832+ Install a file tree from a remote source
1833+
1834+ The specified source should be a url of the form:
1835+ scheme://[host]/path[#[option=value][&...]]
1836+
1837+ Schemes supported are based on this modules submodules
1838+ Options supported are submodule-specific"""
1839+ # We ONLY check for True here because can_handle may return a string
1840+ # explaining why it can't handle a given source.
1841+ handlers = [h for h in plugins() if h.can_handle(source) is True]
1842+ installed_to = None
1843+ for handler in handlers:
1844+ try:
1845+ installed_to = handler.install(source)
1846+ except UnhandledSource:
1847+ pass
1848+ if not installed_to:
1849+ raise UnhandledSource("No handler found for source {}".format(source))
1850+ return installed_to
1851+
1852+
1853+def install_from_config(config_var_name):
1854+ charm_config = config()
1855+ source = charm_config[config_var_name]
1856+ return install_remote(source)
1857+
1858+
1859+def plugins(fetch_handlers=None):
1860+ if not fetch_handlers:
1861+ fetch_handlers = FETCH_HANDLERS
1862+ plugin_list = []
1863+ for handler_name in fetch_handlers:
1864+ package, classname = handler_name.rsplit('.', 1)
1865+ try:
1866+ handler_class = getattr(
1867+ importlib.import_module(package),
1868+ classname)
1869+ plugin_list.append(handler_class())
1870+ except (ImportError, AttributeError):
1871+ # Skip missing plugins so that they can be ommitted from
1872+ # installation if desired
1873+ log("FetchHandler {} not found, skipping plugin".format(
1874+ handler_name))
1875+ return plugin_list
1876+
1877+
1878+def _run_apt_command(cmd, fatal=False):
1879+ """
1880+ Run an APT command, checking output and retrying if the fatal flag is set
1881+ to True.
1882+
1883+ :param: cmd: str: The apt command to run.
1884+ :param: fatal: bool: Whether the command's output should be checked and
1885+ retried.
1886+ """
1887+ env = os.environ.copy()
1888+
1889+ if 'DEBIAN_FRONTEND' not in env:
1890+ env['DEBIAN_FRONTEND'] = 'noninteractive'
1891+
1892+ if fatal:
1893+ retry_count = 0
1894+ result = None
1895+
1896+ # If the command is considered "fatal", we need to retry if the apt
1897+ # lock was not acquired.
1898+
1899+ while result is None or result == APT_NO_LOCK:
1900+ try:
1901+ result = subprocess.check_call(cmd, env=env)
1902+ except subprocess.CalledProcessError, e:
1903+ retry_count = retry_count + 1
1904+ if retry_count > APT_NO_LOCK_RETRY_COUNT:
1905+ raise
1906+ result = e.returncode
1907+ log("Couldn't acquire DPKG lock. Will retry in {} seconds."
1908+ "".format(APT_NO_LOCK_RETRY_DELAY))
1909+ time.sleep(APT_NO_LOCK_RETRY_DELAY)
1910+
1911+ else:
1912+ subprocess.call(cmd, env=env)
1913
1914=== added file 'hooks/charmhelpers/fetch/archiveurl.py'
1915--- hooks/charmhelpers/fetch/archiveurl.py 1970-01-01 00:00:00 +0000
1916+++ hooks/charmhelpers/fetch/archiveurl.py 2014-08-21 11:32:42 +0000
1917@@ -0,0 +1,63 @@
1918+import os
1919+import urllib2
1920+import urlparse
1921+
1922+from charmhelpers.fetch import (
1923+ BaseFetchHandler,
1924+ UnhandledSource
1925+)
1926+from charmhelpers.payload.archive import (
1927+ get_archive_handler,
1928+ extract,
1929+)
1930+from charmhelpers.core.host import mkdir
1931+
1932+
1933+class ArchiveUrlFetchHandler(BaseFetchHandler):
1934+ """Handler for archives via generic URLs"""
1935+ def can_handle(self, source):
1936+ url_parts = self.parse_url(source)
1937+ if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
1938+ return "Wrong source type"
1939+ if get_archive_handler(self.base_url(source)):
1940+ return True
1941+ return False
1942+
1943+ def download(self, source, dest):
1944+ # propogate all exceptions
1945+ # URLError, OSError, etc
1946+ proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
1947+ if proto in ('http', 'https'):
1948+ auth, barehost = urllib2.splituser(netloc)
1949+ if auth is not None:
1950+ source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
1951+ username, password = urllib2.splitpasswd(auth)
1952+ passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
1953+ # Realm is set to None in add_password to force the username and password
1954+ # to be used whatever the realm
1955+ passman.add_password(None, source, username, password)
1956+ authhandler = urllib2.HTTPBasicAuthHandler(passman)
1957+ opener = urllib2.build_opener(authhandler)
1958+ urllib2.install_opener(opener)
1959+ response = urllib2.urlopen(source)
1960+ try:
1961+ with open(dest, 'w') as dest_file:
1962+ dest_file.write(response.read())
1963+ except Exception as e:
1964+ if os.path.isfile(dest):
1965+ os.unlink(dest)
1966+ raise e
1967+
1968+ def install(self, source):
1969+ url_parts = self.parse_url(source)
1970+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
1971+ if not os.path.exists(dest_dir):
1972+ mkdir(dest_dir, perms=0755)
1973+ dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
1974+ try:
1975+ self.download(source, dld_file)
1976+ except urllib2.URLError as e:
1977+ raise UnhandledSource(e.reason)
1978+ except OSError as e:
1979+ raise UnhandledSource(e.strerror)
1980+ return extract(dld_file)
1981
1982=== added file 'hooks/charmhelpers/fetch/bzrurl.py'
1983--- hooks/charmhelpers/fetch/bzrurl.py 1970-01-01 00:00:00 +0000
1984+++ hooks/charmhelpers/fetch/bzrurl.py 2014-08-21 11:32:42 +0000
1985@@ -0,0 +1,50 @@
1986+import os
1987+from charmhelpers.fetch import (
1988+ BaseFetchHandler,
1989+ UnhandledSource
1990+)
1991+from charmhelpers.core.host import mkdir
1992+
1993+try:
1994+ from bzrlib.branch import Branch
1995+except ImportError:
1996+ from charmhelpers.fetch import apt_install
1997+ apt_install("python-bzrlib")
1998+ from bzrlib.branch import Branch
1999+
2000+
2001+class BzrUrlFetchHandler(BaseFetchHandler):
2002+ """Handler for bazaar branches via generic and lp URLs"""
2003+ def can_handle(self, source):
2004+ url_parts = self.parse_url(source)
2005+ if url_parts.scheme not in ('bzr+ssh', 'lp'):
2006+ return False
2007+ else:
2008+ return True
2009+
2010+ def branch(self, source, dest):
2011+ url_parts = self.parse_url(source)
2012+ # If we use lp:branchname scheme we need to load plugins
2013+ if not self.can_handle(source):
2014+ raise UnhandledSource("Cannot handle {}".format(source))
2015+ if url_parts.scheme == "lp":
2016+ from bzrlib.plugin import load_plugins
2017+ load_plugins()
2018+ try:
2019+ remote_branch = Branch.open(source)
2020+ remote_branch.bzrdir.sprout(dest).open_branch()
2021+ except Exception as e:
2022+ raise e
2023+
2024+ def install(self, source):
2025+ url_parts = self.parse_url(source)
2026+ branch_name = url_parts.path.strip("/").split("/")[-1]
2027+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
2028+ branch_name)
2029+ if not os.path.exists(dest_dir):
2030+ mkdir(dest_dir, perms=0755)
2031+ try:
2032+ self.branch(source, dest_dir)
2033+ except OSError as e:
2034+ raise UnhandledSource(e.strerror)
2035+ return dest_dir

Subscribers

People subscribed via source and target branches

to all changes: