Merge lp:~thumper/charms/trusty/python-django/clean-contrib into lp:charms/python-django

Proposed by Tim Penhey
Status: Merged
Merged at revision: 39
Proposed branch: lp:~thumper/charms/trusty/python-django/clean-contrib
Merge into: lp:charms/python-django
Diff against target: 5075 lines (+0/-4900)
29 files modified
charm-helpers.yaml (+0/-1)
hooks/charmhelpers/contrib/ansible/__init__.py (+0/-165)
hooks/charmhelpers/contrib/charmhelpers/__init__.py (+0/-184)
hooks/charmhelpers/contrib/charmsupport/nrpe.py (+0/-216)
hooks/charmhelpers/contrib/charmsupport/volumes.py (+0/-156)
hooks/charmhelpers/contrib/hahelpers/apache.py (+0/-59)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+0/-183)
hooks/charmhelpers/contrib/jujugui/utils.py (+0/-602)
hooks/charmhelpers/contrib/network/ip.py (+0/-69)
hooks/charmhelpers/contrib/network/ovs/__init__.py (+0/-75)
hooks/charmhelpers/contrib/openstack/alternatives.py (+0/-17)
hooks/charmhelpers/contrib/openstack/context.py (+0/-700)
hooks/charmhelpers/contrib/openstack/neutron.py (+0/-171)
hooks/charmhelpers/contrib/openstack/templates/__init__.py (+0/-2)
hooks/charmhelpers/contrib/openstack/templating.py (+0/-280)
hooks/charmhelpers/contrib/openstack/utils.py (+0/-450)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+0/-83)
hooks/charmhelpers/contrib/python/packages.py (+0/-76)
hooks/charmhelpers/contrib/python/version.py (+0/-18)
hooks/charmhelpers/contrib/saltstack/__init__.py (+0/-102)
hooks/charmhelpers/contrib/ssl/__init__.py (+0/-78)
hooks/charmhelpers/contrib/ssl/service.py (+0/-267)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+0/-387)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+0/-62)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+0/-88)
hooks/charmhelpers/contrib/storage/linux/utils.py (+0/-35)
hooks/charmhelpers/contrib/templating/contexts.py (+0/-104)
hooks/charmhelpers/contrib/templating/pyformat.py (+0/-13)
hooks/charmhelpers/contrib/unison/__init__.py (+0/-257)
To merge this branch: bzr merge lp:~thumper/charms/trusty/python-django/clean-contrib
Reviewer Review Type Date Requested Status
Tim Van Steenburgh (community) Approve
Review via email: mp+260388@code.launchpad.net

Description of the change

This branch just removes the charmhelpers/contrib package.

It isn't used anywhere.

To post a comment you must log in.
Revision history for this message
Tim Van Steenburgh (tvansteenburgh) wrote :

+1 LGTM.

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'charm-helpers.yaml'
2--- charm-helpers.yaml 2013-11-26 17:12:54 +0000
3+++ charm-helpers.yaml 2015-05-27 22:03:53 +0000
4@@ -3,4 +3,3 @@
5 include:
6 - core
7 - fetch
8- - contrib
9
10=== removed directory 'hooks/charmhelpers/contrib'
11=== removed file 'hooks/charmhelpers/contrib/__init__.py'
12=== removed directory 'hooks/charmhelpers/contrib/ansible'
13=== removed file 'hooks/charmhelpers/contrib/ansible/__init__.py'
14--- hooks/charmhelpers/contrib/ansible/__init__.py 2013-11-26 17:12:54 +0000
15+++ hooks/charmhelpers/contrib/ansible/__init__.py 1970-01-01 00:00:00 +0000
16@@ -1,165 +0,0 @@
17-# Copyright 2013 Canonical Ltd.
18-#
19-# Authors:
20-# Charm Helpers Developers <juju@lists.ubuntu.com>
21-"""Charm Helpers ansible - declare the state of your machines.
22-
23-This helper enables you to declare your machine state, rather than
24-program it procedurally (and have to test each change to your procedures).
25-Your install hook can be as simple as:
26-
27-{{{
28-import charmhelpers.contrib.ansible
29-
30-
31-def install():
32- charmhelpers.contrib.ansible.install_ansible_support()
33- charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml')
34-}}}
35-
36-and won't need to change (nor will its tests) when you change the machine
37-state.
38-
39-All of your juju config and relation-data are available as template
40-variables within your playbooks and templates. An install playbook looks
41-something like:
42-
43-{{{
44----
45-- hosts: localhost
46- user: root
47-
48- tasks:
49- - name: Add private repositories.
50- template:
51- src: ../templates/private-repositories.list.jinja2
52- dest: /etc/apt/sources.list.d/private.list
53-
54- - name: Update the cache.
55- apt: update_cache=yes
56-
57- - name: Install dependencies.
58- apt: pkg={{ item }}
59- with_items:
60- - python-mimeparse
61- - python-webob
62- - sunburnt
63-
64- - name: Setup groups.
65- group: name={{ item.name }} gid={{ item.gid }}
66- with_items:
67- - { name: 'deploy_user', gid: 1800 }
68- - { name: 'service_user', gid: 1500 }
69-
70- ...
71-}}}
72-
73-Read more online about playbooks[1] and standard ansible modules[2].
74-
75-[1] http://www.ansibleworks.com/docs/playbooks.html
76-[2] http://www.ansibleworks.com/docs/modules.html
77-"""
78-import os
79-import subprocess
80-
81-import charmhelpers.contrib.templating.contexts
82-import charmhelpers.core.host
83-import charmhelpers.core.hookenv
84-import charmhelpers.fetch
85-
86-
87-charm_dir = os.environ.get('CHARM_DIR', '')
88-ansible_hosts_path = '/etc/ansible/hosts'
89-# Ansible will automatically include any vars in the following
90-# file in its inventory when run locally.
91-ansible_vars_path = '/etc/ansible/host_vars/localhost'
92-
93-
94-def install_ansible_support(from_ppa=True):
95- """Installs the ansible package.
96-
97- By default it is installed from the PPA [1] linked from
98- the ansible website [2].
99-
100- [1] https://launchpad.net/~rquillo/+archive/ansible
101- [2] http://www.ansibleworks.com/docs/gettingstarted.html#ubuntu-and-debian
102-
103- If from_ppa is false, you must ensure that the package is available
104- from a configured repository.
105- """
106- if from_ppa:
107- charmhelpers.fetch.add_source('ppa:rquillo/ansible')
108- charmhelpers.fetch.apt_update(fatal=True)
109- charmhelpers.fetch.apt_install('ansible')
110- with open(ansible_hosts_path, 'w+') as hosts_file:
111- hosts_file.write('localhost ansible_connection=local')
112-
113-
114-def apply_playbook(playbook, tags=None):
115- tags = tags or []
116- tags = ",".join(tags)
117- charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
118- ansible_vars_path, namespace_separator='__',
119- allow_hyphens_in_keys=False)
120- call = [
121- 'ansible-playbook',
122- '-c',
123- 'local',
124- playbook,
125- ]
126- if tags:
127- call.extend(['--tags', '{}'.format(tags)])
128- subprocess.check_call(call)
129-
130-
131-class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
132- """Run a playbook with the hook-name as the tag.
133-
134- This helper builds on the standard hookenv.Hooks helper,
135- but additionally runs the playbook with the hook-name specified
136- using --tags (ie. running all the tasks tagged with the hook-name).
137-
138- Example:
139- hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml')
140-
141- # All the tasks within my_machine_state.yaml tagged with 'install'
142- # will be run automatically after do_custom_work()
143- @hooks.hook()
144- def install():
145- do_custom_work()
146-
147- # For most of your hooks, you won't need to do anything other
148- # than run the tagged tasks for the hook:
149- @hooks.hook('config-changed', 'start', 'stop')
150- def just_use_playbook():
151- pass
152-
153- # As a convenience, you can avoid the above noop function by specifying
154- # the hooks which are handled by ansible-only and they'll be registered
155- # for you:
156- # hooks = AnsibleHooks(
157- # 'playbooks/my_machine_state.yaml',
158- # default_hooks=['config-changed', 'start', 'stop'])
159-
160- if __name__ == "__main__":
161- # execute a hook based on the name the program is called by
162- hooks.execute(sys.argv)
163- """
164-
165- def __init__(self, playbook_path, default_hooks=None):
166- """Register any hooks handled by ansible."""
167- super(AnsibleHooks, self).__init__()
168-
169- self.playbook_path = playbook_path
170-
171- default_hooks = default_hooks or []
172- noop = lambda *args, **kwargs: None
173- for hook in default_hooks:
174- self.register(hook, noop)
175-
176- def execute(self, args):
177- """Execute the hook followed by the playbook using the hook as tag."""
178- super(AnsibleHooks, self).execute(args)
179- hook_name = os.path.basename(args[0])
180- charmhelpers.contrib.ansible.apply_playbook(
181- self.playbook_path, tags=[hook_name])
182
183=== removed directory 'hooks/charmhelpers/contrib/charmhelpers'
184=== removed file 'hooks/charmhelpers/contrib/charmhelpers/__init__.py'
185--- hooks/charmhelpers/contrib/charmhelpers/__init__.py 2013-11-26 17:12:54 +0000
186+++ hooks/charmhelpers/contrib/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000
187@@ -1,184 +0,0 @@
188-# Copyright 2012 Canonical Ltd. This software is licensed under the
189-# GNU Affero General Public License version 3 (see the file LICENSE).
190-
191-import warnings
192-warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning)
193-
194-"""Helper functions for writing Juju charms in Python."""
195-
196-__metaclass__ = type
197-__all__ = [
198- #'get_config', # core.hookenv.config()
199- #'log', # core.hookenv.log()
200- #'log_entry', # core.hookenv.log()
201- #'log_exit', # core.hookenv.log()
202- #'relation_get', # core.hookenv.relation_get()
203- #'relation_set', # core.hookenv.relation_set()
204- #'relation_ids', # core.hookenv.relation_ids()
205- #'relation_list', # core.hookenv.relation_units()
206- #'config_get', # core.hookenv.config()
207- #'unit_get', # core.hookenv.unit_get()
208- #'open_port', # core.hookenv.open_port()
209- #'close_port', # core.hookenv.close_port()
210- #'service_control', # core.host.service()
211- 'unit_info', # client-side, NOT IMPLEMENTED
212- 'wait_for_machine', # client-side, NOT IMPLEMENTED
213- 'wait_for_page_contents', # client-side, NOT IMPLEMENTED
214- 'wait_for_relation', # client-side, NOT IMPLEMENTED
215- 'wait_for_unit', # client-side, NOT IMPLEMENTED
216-]
217-
218-import operator
219-from shelltoolbox import (
220- command,
221-)
222-import tempfile
223-import time
224-import urllib2
225-import yaml
226-
227-SLEEP_AMOUNT = 0.1
228-# We create a juju_status Command here because it makes testing much,
229-# much easier.
230-juju_status = lambda: command('juju')('status')
231-
232-# re-implemented as charmhelpers.fetch.configure_sources()
233-#def configure_source(update=False):
234-# source = config_get('source')
235-# if ((source.startswith('ppa:') or
236-# source.startswith('cloud:') or
237-# source.startswith('http:'))):
238-# run('add-apt-repository', source)
239-# if source.startswith("http:"):
240-# run('apt-key', 'import', config_get('key'))
241-# if update:
242-# run('apt-get', 'update')
243-
244-
245-# DEPRECATED: client-side only
246-def make_charm_config_file(charm_config):
247- charm_config_file = tempfile.NamedTemporaryFile()
248- charm_config_file.write(yaml.dump(charm_config))
249- charm_config_file.flush()
250- # The NamedTemporaryFile instance is returned instead of just the name
251- # because we want to take advantage of garbage collection-triggered
252- # deletion of the temp file when it goes out of scope in the caller.
253- return charm_config_file
254-
255-
256-# DEPRECATED: client-side only
257-def unit_info(service_name, item_name, data=None, unit=None):
258- if data is None:
259- data = yaml.safe_load(juju_status())
260- service = data['services'].get(service_name)
261- if service is None:
262- # XXX 2012-02-08 gmb:
263- # This allows us to cope with the race condition that we
264- # have between deploying a service and having it come up in
265- # `juju status`. We could probably do with cleaning it up so
266- # that it fails a bit more noisily after a while.
267- return ''
268- units = service['units']
269- if unit is not None:
270- item = units[unit][item_name]
271- else:
272- # It might seem odd to sort the units here, but we do it to
273- # ensure that when no unit is specified, the first unit for the
274- # service (or at least the one with the lowest number) is the
275- # one whose data gets returned.
276- sorted_unit_names = sorted(units.keys())
277- item = units[sorted_unit_names[0]][item_name]
278- return item
279-
280-
281-# DEPRECATED: client-side only
282-def get_machine_data():
283- return yaml.safe_load(juju_status())['machines']
284-
285-
286-# DEPRECATED: client-side only
287-def wait_for_machine(num_machines=1, timeout=300):
288- """Wait `timeout` seconds for `num_machines` machines to come up.
289-
290- This wait_for... function can be called by other wait_for functions
291- whose timeouts might be too short in situations where only a bare
292- Juju setup has been bootstrapped.
293-
294- :return: A tuple of (num_machines, time_taken). This is used for
295- testing.
296- """
297- # You may think this is a hack, and you'd be right. The easiest way
298- # to tell what environment we're working in (LXC vs EC2) is to check
299- # the dns-name of the first machine. If it's localhost we're in LXC
300- # and we can just return here.
301- if get_machine_data()[0]['dns-name'] == 'localhost':
302- return 1, 0
303- start_time = time.time()
304- while True:
305- # Drop the first machine, since it's the Zookeeper and that's
306- # not a machine that we need to wait for. This will only work
307- # for EC2 environments, which is why we return early above if
308- # we're in LXC.
309- machine_data = get_machine_data()
310- non_zookeeper_machines = [
311- machine_data[key] for key in machine_data.keys()[1:]]
312- if len(non_zookeeper_machines) >= num_machines:
313- all_machines_running = True
314- for machine in non_zookeeper_machines:
315- if machine.get('instance-state') != 'running':
316- all_machines_running = False
317- break
318- if all_machines_running:
319- break
320- if time.time() - start_time >= timeout:
321- raise RuntimeError('timeout waiting for service to start')
322- time.sleep(SLEEP_AMOUNT)
323- return num_machines, time.time() - start_time
324-
325-
326-# DEPRECATED: client-side only
327-def wait_for_unit(service_name, timeout=480):
328- """Wait `timeout` seconds for a given service name to come up."""
329- wait_for_machine(num_machines=1)
330- start_time = time.time()
331- while True:
332- state = unit_info(service_name, 'agent-state')
333- if 'error' in state or state == 'started':
334- break
335- if time.time() - start_time >= timeout:
336- raise RuntimeError('timeout waiting for service to start')
337- time.sleep(SLEEP_AMOUNT)
338- if state != 'started':
339- raise RuntimeError('unit did not start, agent-state: ' + state)
340-
341-
342-# DEPRECATED: client-side only
343-def wait_for_relation(service_name, relation_name, timeout=120):
344- """Wait `timeout` seconds for a given relation to come up."""
345- start_time = time.time()
346- while True:
347- relation = unit_info(service_name, 'relations').get(relation_name)
348- if relation is not None and relation['state'] == 'up':
349- break
350- if time.time() - start_time >= timeout:
351- raise RuntimeError('timeout waiting for relation to be up')
352- time.sleep(SLEEP_AMOUNT)
353-
354-
355-# DEPRECATED: client-side only
356-def wait_for_page_contents(url, contents, timeout=120, validate=None):
357- if validate is None:
358- validate = operator.contains
359- start_time = time.time()
360- while True:
361- try:
362- stream = urllib2.urlopen(url)
363- except (urllib2.HTTPError, urllib2.URLError):
364- pass
365- else:
366- page = stream.read()
367- if validate(page, contents):
368- return page
369- if time.time() - start_time >= timeout:
370- raise RuntimeError('timeout waiting for contents of ' + url)
371- time.sleep(SLEEP_AMOUNT)
372
373=== removed directory 'hooks/charmhelpers/contrib/charmsupport'
374=== removed file 'hooks/charmhelpers/contrib/charmsupport/__init__.py'
375=== removed file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py'
376--- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2013-11-26 17:12:54 +0000
377+++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000
378@@ -1,216 +0,0 @@
379-"""Compatibility with the nrpe-external-master charm"""
380-# Copyright 2012 Canonical Ltd.
381-#
382-# Authors:
383-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
384-
385-import subprocess
386-import pwd
387-import grp
388-import os
389-import re
390-import shlex
391-import yaml
392-
393-from charmhelpers.core.hookenv import (
394- config,
395- local_unit,
396- log,
397- relation_ids,
398- relation_set,
399-)
400-
401-from charmhelpers.core.host import service
402-
403-# This module adds compatibility with the nrpe-external-master and plain nrpe
404-# subordinate charms. To use it in your charm:
405-#
406-# 1. Update metadata.yaml
407-#
408-# provides:
409-# (...)
410-# nrpe-external-master:
411-# interface: nrpe-external-master
412-# scope: container
413-#
414-# and/or
415-#
416-# provides:
417-# (...)
418-# local-monitors:
419-# interface: local-monitors
420-# scope: container
421-
422-#
423-# 2. Add the following to config.yaml
424-#
425-# nagios_context:
426-# default: "juju"
427-# type: string
428-# description: |
429-# Used by the nrpe subordinate charms.
430-# A string that will be prepended to instance name to set the host name
431-# in nagios. So for instance the hostname would be something like:
432-# juju-myservice-0
433-# If you're running multiple environments with the same services in them
434-# this allows you to differentiate between them.
435-#
436-# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
437-#
438-# 4. Update your hooks.py with something like this:
439-#
440-# from charmsupport.nrpe import NRPE
441-# (...)
442-# def update_nrpe_config():
443-# nrpe_compat = NRPE()
444-# nrpe_compat.add_check(
445-# shortname = "myservice",
446-# description = "Check MyService",
447-# check_cmd = "check_http -w 2 -c 10 http://localhost"
448-# )
449-# nrpe_compat.add_check(
450-# "myservice_other",
451-# "Check for widget failures",
452-# check_cmd = "/srv/myapp/scripts/widget_check"
453-# )
454-# nrpe_compat.write()
455-#
456-# def config_changed():
457-# (...)
458-# update_nrpe_config()
459-#
460-# def nrpe_external_master_relation_changed():
461-# update_nrpe_config()
462-#
463-# def local_monitors_relation_changed():
464-# update_nrpe_config()
465-#
466-# 5. ln -s hooks.py nrpe-external-master-relation-changed
467-# ln -s hooks.py local-monitors-relation-changed
468-
469-
470-class CheckException(Exception):
471- pass
472-
473-
474-class Check(object):
475- shortname_re = '[A-Za-z0-9-_]+$'
476- service_template = ("""
477-#---------------------------------------------------
478-# This file is Juju managed
479-#---------------------------------------------------
480-define service {{
481- use active-service
482- host_name {nagios_hostname}
483- service_description {nagios_hostname}[{shortname}] """
484- """{description}
485- check_command check_nrpe!{command}
486- servicegroups {nagios_servicegroup}
487-}}
488-""")
489-
490- def __init__(self, shortname, description, check_cmd):
491- super(Check, self).__init__()
492- # XXX: could be better to calculate this from the service name
493- if not re.match(self.shortname_re, shortname):
494- raise CheckException("shortname must match {}".format(
495- Check.shortname_re))
496- self.shortname = shortname
497- self.command = "check_{}".format(shortname)
498- # Note: a set of invalid characters is defined by the
499- # Nagios server config
500- # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
501- self.description = description
502- self.check_cmd = self._locate_cmd(check_cmd)
503-
504- def _locate_cmd(self, check_cmd):
505- search_path = (
506- '/usr/lib/nagios/plugins',
507- '/usr/local/lib/nagios/plugins',
508- )
509- parts = shlex.split(check_cmd)
510- for path in search_path:
511- if os.path.exists(os.path.join(path, parts[0])):
512- command = os.path.join(path, parts[0])
513- if len(parts) > 1:
514- command += " " + " ".join(parts[1:])
515- return command
516- log('Check command not found: {}'.format(parts[0]))
517- return ''
518-
519- def write(self, nagios_context, hostname):
520- nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
521- self.command)
522- with open(nrpe_check_file, 'w') as nrpe_check_config:
523- nrpe_check_config.write("# check {}\n".format(self.shortname))
524- nrpe_check_config.write("command[{}]={}\n".format(
525- self.command, self.check_cmd))
526-
527- if not os.path.exists(NRPE.nagios_exportdir):
528- log('Not writing service config as {} is not accessible'.format(
529- NRPE.nagios_exportdir))
530- else:
531- self.write_service_config(nagios_context, hostname)
532-
533- def write_service_config(self, nagios_context, hostname):
534- for f in os.listdir(NRPE.nagios_exportdir):
535- if re.search('.*{}.cfg'.format(self.command), f):
536- os.remove(os.path.join(NRPE.nagios_exportdir, f))
537-
538- templ_vars = {
539- 'nagios_hostname': hostname,
540- 'nagios_servicegroup': nagios_context,
541- 'description': self.description,
542- 'shortname': self.shortname,
543- 'command': self.command,
544- }
545- nrpe_service_text = Check.service_template.format(**templ_vars)
546- nrpe_service_file = '{}/service__{}_{}.cfg'.format(
547- NRPE.nagios_exportdir, hostname, self.command)
548- with open(nrpe_service_file, 'w') as nrpe_service_config:
549- nrpe_service_config.write(str(nrpe_service_text))
550-
551- def run(self):
552- subprocess.call(self.check_cmd)
553-
554-
555-class NRPE(object):
556- nagios_logdir = '/var/log/nagios'
557- nagios_exportdir = '/var/lib/nagios/export'
558- nrpe_confdir = '/etc/nagios/nrpe.d'
559-
560- def __init__(self):
561- super(NRPE, self).__init__()
562- self.config = config()
563- self.nagios_context = self.config['nagios_context']
564- self.unit_name = local_unit().replace('/', '-')
565- self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
566- self.checks = []
567-
568- def add_check(self, *args, **kwargs):
569- self.checks.append(Check(*args, **kwargs))
570-
571- def write(self):
572- try:
573- nagios_uid = pwd.getpwnam('nagios').pw_uid
574- nagios_gid = grp.getgrnam('nagios').gr_gid
575- except:
576- log("Nagios user not set up, nrpe checks not updated")
577- return
578-
579- if not os.path.exists(NRPE.nagios_logdir):
580- os.mkdir(NRPE.nagios_logdir)
581- os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
582-
583- nrpe_monitors = {}
584- monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
585- for nrpecheck in self.checks:
586- nrpecheck.write(self.nagios_context, self.hostname)
587- nrpe_monitors[nrpecheck.shortname] = {
588- "command": nrpecheck.command,
589- }
590-
591- service('restart', 'nagios-nrpe-server')
592-
593- for rid in relation_ids("local-monitors"):
594- relation_set(relation_id=rid, monitors=yaml.dump(monitors))
595
596=== removed file 'hooks/charmhelpers/contrib/charmsupport/volumes.py'
597--- hooks/charmhelpers/contrib/charmsupport/volumes.py 2013-11-26 17:12:54 +0000
598+++ hooks/charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000
599@@ -1,156 +0,0 @@
600-'''
601-Functions for managing volumes in juju units. One volume is supported per unit.
602-Subordinates may have their own storage, provided it is on its own partition.
603-
604-Configuration stanzas:
605- volume-ephemeral:
606- type: boolean
607- default: true
608- description: >
609- If false, a volume is mounted as sepecified in "volume-map"
610- If true, ephemeral storage will be used, meaning that log data
611- will only exist as long as the machine. YOU HAVE BEEN WARNED.
612- volume-map:
613- type: string
614- default: {}
615- description: >
616- YAML map of units to device names, e.g:
617- "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
618- Service units will raise a configure-error if volume-ephemeral
619- is 'true' and no volume-map value is set. Use 'juju set' to set a
620- value and 'juju resolved' to complete configuration.
621-
622-Usage:
623- from charmsupport.volumes import configure_volume, VolumeConfigurationError
624- from charmsupport.hookenv import log, ERROR
625- def post_mount_hook():
626- stop_service('myservice')
627- def post_mount_hook():
628- start_service('myservice')
629-
630- if __name__ == '__main__':
631- try:
632- configure_volume(before_change=pre_mount_hook,
633- after_change=post_mount_hook)
634- except VolumeConfigurationError:
635- log('Storage could not be configured', ERROR)
636-'''
637-
638-# XXX: Known limitations
639-# - fstab is neither consulted nor updated
640-
641-import os
642-from charmhelpers.core import hookenv
643-from charmhelpers.core import host
644-import yaml
645-
646-
647-MOUNT_BASE = '/srv/juju/volumes'
648-
649-
650-class VolumeConfigurationError(Exception):
651- '''Volume configuration data is missing or invalid'''
652- pass
653-
654-
655-def get_config():
656- '''Gather and sanity-check volume configuration data'''
657- volume_config = {}
658- config = hookenv.config()
659-
660- errors = False
661-
662- if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
663- volume_config['ephemeral'] = True
664- else:
665- volume_config['ephemeral'] = False
666-
667- try:
668- volume_map = yaml.safe_load(config.get('volume-map', '{}'))
669- except yaml.YAMLError as e:
670- hookenv.log("Error parsing YAML volume-map: {}".format(e),
671- hookenv.ERROR)
672- errors = True
673- if volume_map is None:
674- # probably an empty string
675- volume_map = {}
676- elif not isinstance(volume_map, dict):
677- hookenv.log("Volume-map should be a dictionary, not {}".format(
678- type(volume_map)))
679- errors = True
680-
681- volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
682- if volume_config['device'] and volume_config['ephemeral']:
683- # asked for ephemeral storage but also defined a volume ID
684- hookenv.log('A volume is defined for this unit, but ephemeral '
685- 'storage was requested', hookenv.ERROR)
686- errors = True
687- elif not volume_config['device'] and not volume_config['ephemeral']:
688- # asked for permanent storage but did not define volume ID
689- hookenv.log('Ephemeral storage was requested, but there is no volume '
690- 'defined for this unit.', hookenv.ERROR)
691- errors = True
692-
693- unit_mount_name = hookenv.local_unit().replace('/', '-')
694- volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
695-
696- if errors:
697- return None
698- return volume_config
699-
700-
701-def mount_volume(config):
702- if os.path.exists(config['mountpoint']):
703- if not os.path.isdir(config['mountpoint']):
704- hookenv.log('Not a directory: {}'.format(config['mountpoint']))
705- raise VolumeConfigurationError()
706- else:
707- host.mkdir(config['mountpoint'])
708- if os.path.ismount(config['mountpoint']):
709- unmount_volume(config)
710- if not host.mount(config['device'], config['mountpoint'], persist=True):
711- raise VolumeConfigurationError()
712-
713-
714-def unmount_volume(config):
715- if os.path.ismount(config['mountpoint']):
716- if not host.umount(config['mountpoint'], persist=True):
717- raise VolumeConfigurationError()
718-
719-
720-def managed_mounts():
721- '''List of all mounted managed volumes'''
722- return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
723-
724-
725-def configure_volume(before_change=lambda: None, after_change=lambda: None):
726- '''Set up storage (or don't) according to the charm's volume configuration.
727- Returns the mount point or "ephemeral". before_change and after_change
728- are optional functions to be called if the volume configuration changes.
729- '''
730-
731- config = get_config()
732- if not config:
733- hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
734- raise VolumeConfigurationError()
735-
736- if config['ephemeral']:
737- if os.path.ismount(config['mountpoint']):
738- before_change()
739- unmount_volume(config)
740- after_change()
741- return 'ephemeral'
742- else:
743- # persistent storage
744- if os.path.ismount(config['mountpoint']):
745- mounts = dict(managed_mounts())
746- if mounts.get(config['mountpoint']) != config['device']:
747- before_change()
748- unmount_volume(config)
749- mount_volume(config)
750- after_change()
751- else:
752- before_change()
753- mount_volume(config)
754- after_change()
755- return config['mountpoint']
756
757=== removed directory 'hooks/charmhelpers/contrib/hahelpers'
758=== removed file 'hooks/charmhelpers/contrib/hahelpers/__init__.py'
759=== removed file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
760--- hooks/charmhelpers/contrib/hahelpers/apache.py 2014-05-09 20:11:59 +0000
761+++ hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000
762@@ -1,59 +0,0 @@
763-#
764-# Copyright 2012 Canonical Ltd.
765-#
766-# This file is sourced from lp:openstack-charm-helpers
767-#
768-# Authors:
769-# James Page <james.page@ubuntu.com>
770-# Adam Gandelman <adamg@ubuntu.com>
771-#
772-
773-import subprocess
774-
775-from charmhelpers.core.hookenv import (
776- config as config_get,
777- relation_get,
778- relation_ids,
779- related_units as relation_list,
780- log,
781- INFO,
782-)
783-
784-
785-def get_cert():
786- cert = config_get('ssl_cert')
787- key = config_get('ssl_key')
788- if not (cert and key):
789- log("Inspecting identity-service relations for SSL certificate.",
790- level=INFO)
791- cert = key = None
792- for r_id in relation_ids('identity-service'):
793- for unit in relation_list(r_id):
794- if not cert:
795- cert = relation_get('ssl_cert',
796- rid=r_id, unit=unit)
797- if not key:
798- key = relation_get('ssl_key',
799- rid=r_id, unit=unit)
800- return (cert, key)
801-
802-
803-def get_ca_cert():
804- ca_cert = config_get('ssl_ca')
805- if ca_cert is None:
806- log("Inspecting identity-service relations for CA SSL certificate.",
807- level=INFO)
808- for r_id in relation_ids('identity-service'):
809- for unit in relation_list(r_id):
810- if ca_cert is None:
811- ca_cert = relation_get('ca_cert',
812- rid=r_id, unit=unit)
813- return ca_cert
814-
815-
816-def install_ca_cert(ca_cert):
817- if ca_cert:
818- with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
819- 'w') as crt:
820- crt.write(ca_cert)
821- subprocess.check_call(['update-ca-certificates', '--fresh'])
822
823=== removed file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
824--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-05-09 20:11:59 +0000
825+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
826@@ -1,183 +0,0 @@
827-#
828-# Copyright 2012 Canonical Ltd.
829-#
830-# Authors:
831-# James Page <james.page@ubuntu.com>
832-# Adam Gandelman <adamg@ubuntu.com>
833-#
834-
835-import subprocess
836-import os
837-
838-from socket import gethostname as get_unit_hostname
839-
840-from charmhelpers.core.hookenv import (
841- log,
842- relation_ids,
843- related_units as relation_list,
844- relation_get,
845- config as config_get,
846- INFO,
847- ERROR,
848- unit_get,
849-)
850-
851-
852-class HAIncompleteConfig(Exception):
853- pass
854-
855-
856-def is_clustered():
857- for r_id in (relation_ids('ha') or []):
858- for unit in (relation_list(r_id) or []):
859- clustered = relation_get('clustered',
860- rid=r_id,
861- unit=unit)
862- if clustered:
863- return True
864- return False
865-
866-
867-def is_leader(resource):
868- cmd = [
869- "crm", "resource",
870- "show", resource
871- ]
872- try:
873- status = subprocess.check_output(cmd)
874- except subprocess.CalledProcessError:
875- return False
876- else:
877- if get_unit_hostname() in status:
878- return True
879- else:
880- return False
881-
882-
883-def peer_units():
884- peers = []
885- for r_id in (relation_ids('cluster') or []):
886- for unit in (relation_list(r_id) or []):
887- peers.append(unit)
888- return peers
889-
890-
891-def oldest_peer(peers):
892- local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
893- for peer in peers:
894- remote_unit_no = int(peer.split('/')[1])
895- if remote_unit_no < local_unit_no:
896- return False
897- return True
898-
899-
900-def eligible_leader(resource):
901- if is_clustered():
902- if not is_leader(resource):
903- log('Deferring action to CRM leader.', level=INFO)
904- return False
905- else:
906- peers = peer_units()
907- if peers and not oldest_peer(peers):
908- log('Deferring action to oldest service unit.', level=INFO)
909- return False
910- return True
911-
912-
913-def https():
914- '''
915- Determines whether enough data has been provided in configuration
916- or relation data to configure HTTPS
917- .
918- returns: boolean
919- '''
920- if config_get('use-https') == "yes":
921- return True
922- if config_get('ssl_cert') and config_get('ssl_key'):
923- return True
924- for r_id in relation_ids('identity-service'):
925- for unit in relation_list(r_id):
926- rel_state = [
927- relation_get('https_keystone', rid=r_id, unit=unit),
928- relation_get('ssl_cert', rid=r_id, unit=unit),
929- relation_get('ssl_key', rid=r_id, unit=unit),
930- relation_get('ca_cert', rid=r_id, unit=unit),
931- ]
932- # NOTE: works around (LP: #1203241)
933- if (None not in rel_state) and ('' not in rel_state):
934- return True
935- return False
936-
937-
938-def determine_api_port(public_port):
939- '''
940- Determine correct API server listening port based on
941- existence of HTTPS reverse proxy and/or haproxy.
942-
943- public_port: int: standard public port for given service
944-
945- returns: int: the correct listening port for the API service
946- '''
947- i = 0
948- if len(peer_units()) > 0 or is_clustered():
949- i += 1
950- if https():
951- i += 1
952- return public_port - (i * 10)
953-
954-
955-def determine_apache_port(public_port):
956- '''
957- Description: Determine correct apache listening port based on public IP +
958- state of the cluster.
959-
960- public_port: int: standard public port for given service
961-
962- returns: int: the correct listening port for the HAProxy service
963- '''
964- i = 0
965- if len(peer_units()) > 0 or is_clustered():
966- i += 1
967- return public_port - (i * 10)
968-
969-
970-def get_hacluster_config():
971- '''
972- Obtains all relevant configuration from charm configuration required
973- for initiating a relation to hacluster:
974-
975- ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
976-
977- returns: dict: A dict containing settings keyed by setting name.
978- raises: HAIncompleteConfig if settings are missing.
979- '''
980- settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
981- conf = {}
982- for setting in settings:
983- conf[setting] = config_get(setting)
984- missing = []
985- [missing.append(s) for s, v in conf.iteritems() if v is None]
986- if missing:
987- log('Insufficient config data to configure hacluster.', level=ERROR)
988- raise HAIncompleteConfig
989- return conf
990-
991-
992-def canonical_url(configs, vip_setting='vip'):
993- '''
994- Returns the correct HTTP URL to this host given the state of HTTPS
995- configuration and hacluster.
996-
997- :configs : OSTemplateRenderer: A config tempating object to inspect for
998- a complete https context.
999- :vip_setting: str: Setting in charm config that specifies
1000- VIP address.
1001- '''
1002- scheme = 'http'
1003- if 'https' in configs.complete_contexts():
1004- scheme = 'https'
1005- if is_clustered():
1006- addr = config_get(vip_setting)
1007- else:
1008- addr = unit_get('private-address')
1009- return '%s://%s' % (scheme, addr)
1010
1011=== removed directory 'hooks/charmhelpers/contrib/jujugui'
1012=== removed file 'hooks/charmhelpers/contrib/jujugui/__init__.py'
1013=== removed file 'hooks/charmhelpers/contrib/jujugui/utils.py'
1014--- hooks/charmhelpers/contrib/jujugui/utils.py 2013-11-26 17:12:54 +0000
1015+++ hooks/charmhelpers/contrib/jujugui/utils.py 1970-01-01 00:00:00 +0000
1016@@ -1,602 +0,0 @@
1017-"""Juju GUI charm utilities."""
1018-
1019-__all__ = [
1020- 'AGENT',
1021- 'APACHE',
1022- 'API_PORT',
1023- 'CURRENT_DIR',
1024- 'HAPROXY',
1025- 'IMPROV',
1026- 'JUJU_DIR',
1027- 'JUJU_GUI_DIR',
1028- 'JUJU_GUI_SITE',
1029- 'JUJU_PEM',
1030- 'WEB_PORT',
1031- 'bzr_checkout',
1032- 'chain',
1033- 'cmd_log',
1034- 'fetch_api',
1035- 'fetch_gui',
1036- 'find_missing_packages',
1037- 'first_path_in_dir',
1038- 'get_api_address',
1039- 'get_npm_cache_archive_url',
1040- 'get_release_file_url',
1041- 'get_staging_dependencies',
1042- 'get_zookeeper_address',
1043- 'legacy_juju',
1044- 'log_hook',
1045- 'merge',
1046- 'parse_source',
1047- 'prime_npm_cache',
1048- 'render_to_file',
1049- 'save_or_create_certificates',
1050- 'setup_apache',
1051- 'setup_gui',
1052- 'start_agent',
1053- 'start_gui',
1054- 'start_improv',
1055- 'write_apache_config',
1056-]
1057-
1058-from contextlib import contextmanager
1059-import errno
1060-import json
1061-import os
1062-import logging
1063-import shutil
1064-from subprocess import CalledProcessError
1065-import tempfile
1066-from urlparse import urlparse
1067-
1068-import apt
1069-import tempita
1070-
1071-from launchpadlib.launchpad import Launchpad
1072-from shelltoolbox import (
1073- Serializer,
1074- apt_get_install,
1075- command,
1076- environ,
1077- install_extra_repositories,
1078- run,
1079- script_name,
1080- search_file,
1081- su,
1082-)
1083-from charmhelpers.core.host import (
1084- service_start,
1085-)
1086-from charmhelpers.core.hookenv import (
1087- log,
1088- config,
1089- unit_get,
1090-)
1091-
1092-
1093-AGENT = 'juju-api-agent'
1094-APACHE = 'apache2'
1095-IMPROV = 'juju-api-improv'
1096-HAPROXY = 'haproxy'
1097-
1098-API_PORT = 8080
1099-WEB_PORT = 8000
1100-
1101-CURRENT_DIR = os.getcwd()
1102-JUJU_DIR = os.path.join(CURRENT_DIR, 'juju')
1103-JUJU_GUI_DIR = os.path.join(CURRENT_DIR, 'juju-gui')
1104-JUJU_GUI_SITE = '/etc/apache2/sites-available/juju-gui'
1105-JUJU_GUI_PORTS = '/etc/apache2/ports.conf'
1106-JUJU_PEM = 'juju.includes-private-key.pem'
1107-BUILD_REPOSITORIES = ('ppa:chris-lea/node.js-legacy',)
1108-DEB_BUILD_DEPENDENCIES = (
1109- 'bzr', 'imagemagick', 'make', 'nodejs', 'npm',
1110-)
1111-DEB_STAGE_DEPENDENCIES = (
1112- 'zookeeper',
1113-)
1114-
1115-
1116-# Store the configuration from on invocation to the next.
1117-config_json = Serializer('/tmp/config.json')
1118-# Bazaar checkout command.
1119-bzr_checkout = command('bzr', 'co', '--lightweight')
1120-# Whether or not the charm is deployed using juju-core.
1121-# If juju-core has been used to deploy the charm, an agent.conf file must
1122-# be present in the charm parent directory.
1123-legacy_juju = lambda: not os.path.exists(
1124- os.path.join(CURRENT_DIR, '..', 'agent.conf'))
1125-
1126-
1127-def _get_build_dependencies():
1128- """Install deb dependencies for building."""
1129- log('Installing build dependencies.')
1130- cmd_log(install_extra_repositories(*BUILD_REPOSITORIES))
1131- cmd_log(apt_get_install(*DEB_BUILD_DEPENDENCIES))
1132-
1133-
1134-def get_api_address(unit_dir):
1135- """Return the Juju API address stored in the uniter agent.conf file."""
1136- import yaml # python-yaml is only installed if juju-core is used.
1137- # XXX 2013-03-27 frankban bug=1161443:
1138- # currently the uniter agent.conf file does not include the API
1139- # address. For now retrieve it from the machine agent file.
1140- base_dir = os.path.abspath(os.path.join(unit_dir, '..'))
1141- for dirname in os.listdir(base_dir):
1142- if dirname.startswith('machine-'):
1143- agent_conf = os.path.join(base_dir, dirname, 'agent.conf')
1144- break
1145- else:
1146- raise IOError('Juju agent configuration file not found.')
1147- contents = yaml.load(open(agent_conf))
1148- return contents['apiinfo']['addrs'][0]
1149-
1150-
1151-def get_staging_dependencies():
1152- """Install deb dependencies for the stage (improv) environment."""
1153- log('Installing stage dependencies.')
1154- cmd_log(apt_get_install(*DEB_STAGE_DEPENDENCIES))
1155-
1156-
1157-def first_path_in_dir(directory):
1158- """Return the full path of the first file/dir in *directory*."""
1159- return os.path.join(directory, os.listdir(directory)[0])
1160-
1161-
1162-def _get_by_attr(collection, attr, value):
1163- """Return the first item in collection having attr == value.
1164-
1165- Return None if the item is not found.
1166- """
1167- for item in collection:
1168- if getattr(item, attr) == value:
1169- return item
1170-
1171-
1172-def get_release_file_url(project, series_name, release_version):
1173- """Return the URL of the release file hosted in Launchpad.
1174-
1175- The returned URL points to a release file for the given project, series
1176- name and release version.
1177- The argument *project* is a project object as returned by launchpadlib.
1178- The arguments *series_name* and *release_version* are strings. If
1179- *release_version* is None, the URL of the latest release will be returned.
1180- """
1181- series = _get_by_attr(project.series, 'name', series_name)
1182- if series is None:
1183- raise ValueError('%r: series not found' % series_name)
1184- # Releases are returned by Launchpad in reverse date order.
1185- releases = list(series.releases)
1186- if not releases:
1187- raise ValueError('%r: series does not contain releases' % series_name)
1188- if release_version is not None:
1189- release = _get_by_attr(releases, 'version', release_version)
1190- if release is None:
1191- raise ValueError('%r: release not found' % release_version)
1192- releases = [release]
1193- for release in releases:
1194- for file_ in release.files:
1195- if str(file_).endswith('.tgz'):
1196- return file_.file_link
1197- raise ValueError('%r: file not found' % release_version)
1198-
1199-
1200-def get_zookeeper_address(agent_file_path):
1201- """Retrieve the Zookeeper address contained in the given *agent_file_path*.
1202-
1203- The *agent_file_path* is a path to a file containing a line similar to the
1204- following::
1205-
1206- env JUJU_ZOOKEEPER="address"
1207- """
1208- line = search_file('JUJU_ZOOKEEPER', agent_file_path).strip()
1209- return line.split('=')[1].strip('"')
1210-
1211-
1212-@contextmanager
1213-def log_hook():
1214- """Log when a hook starts and stops its execution.
1215-
1216- Also log to stdout possible CalledProcessError exceptions raised executing
1217- the hook.
1218- """
1219- script = script_name()
1220- log(">>> Entering {}".format(script))
1221- try:
1222- yield
1223- except CalledProcessError as err:
1224- log('Exception caught:')
1225- log(err.output)
1226- raise
1227- finally:
1228- log("<<< Exiting {}".format(script))
1229-
1230-
1231-def parse_source(source):
1232- """Parse the ``juju-gui-source`` option.
1233-
1234- Return a tuple of two elements representing info on how to deploy Juju GUI.
1235- Examples:
1236- - ('stable', None): latest stable release;
1237- - ('stable', '0.1.0'): stable release v0.1.0;
1238- - ('trunk', None): latest trunk release;
1239- - ('trunk', '0.1.0+build.1'): trunk release v0.1.0 bzr revision 1;
1240- - ('branch', 'lp:juju-gui'): release is made from a branch;
1241- - ('url', 'http://example.com/gui'): release from a downloaded file.
1242- """
1243- if source.startswith('url:'):
1244- source = source[4:]
1245- # Support file paths, including relative paths.
1246- if urlparse(source).scheme == '':
1247- if not source.startswith('/'):
1248- source = os.path.join(os.path.abspath(CURRENT_DIR), source)
1249- source = "file://%s" % source
1250- return 'url', source
1251- if source in ('stable', 'trunk'):
1252- return source, None
1253- if source.startswith('lp:') or source.startswith('http://'):
1254- return 'branch', source
1255- if 'build' in source:
1256- return 'trunk', source
1257- return 'stable', source
1258-
1259-
1260-def render_to_file(template_name, context, destination):
1261- """Render the given *template_name* into *destination* using *context*.
1262-
1263- The tempita template language is used to render contents
1264- (see http://pythonpaste.org/tempita/).
1265- The argument *template_name* is the name or path of the template file:
1266- it may be either a path relative to ``../config`` or an absolute path.
1267- The argument *destination* is a file path.
1268- The argument *context* is a dict-like object.
1269- """
1270- template_path = os.path.abspath(template_name)
1271- template = tempita.Template.from_filename(template_path)
1272- with open(destination, 'w') as stream:
1273- stream.write(template.substitute(context))
1274-
1275-
1276-results_log = None
1277-
1278-
1279-def _setupLogging():
1280- global results_log
1281- if results_log is not None:
1282- return
1283- cfg = config()
1284- logging.basicConfig(
1285- filename=cfg['command-log-file'],
1286- level=logging.INFO,
1287- format="%(asctime)s: %(name)s@%(levelname)s %(message)s")
1288- results_log = logging.getLogger('juju-gui')
1289-
1290-
1291-def cmd_log(results):
1292- global results_log
1293- if not results:
1294- return
1295- if results_log is None:
1296- _setupLogging()
1297- # Since 'results' may be multi-line output, start it on a separate line
1298- # from the logger timestamp, etc.
1299- results_log.info('\n' + results)
1300-
1301-
1302-def start_improv(staging_env, ssl_cert_path,
1303- config_path='/etc/init/juju-api-improv.conf'):
1304- """Start a simulated juju environment using ``improv.py``."""
1305- log('Setting up staging start up script.')
1306- context = {
1307- 'juju_dir': JUJU_DIR,
1308- 'keys': ssl_cert_path,
1309- 'port': API_PORT,
1310- 'staging_env': staging_env,
1311- }
1312- render_to_file('config/juju-api-improv.conf.template', context, config_path)
1313- log('Starting the staging backend.')
1314- with su('root'):
1315- service_start(IMPROV)
1316-
1317-
1318-def start_agent(
1319- ssl_cert_path, config_path='/etc/init/juju-api-agent.conf',
1320- read_only=False):
1321- """Start the Juju agent and connect to the current environment."""
1322- # Retrieve the Zookeeper address from the start up script.
1323- unit_dir = os.path.realpath(os.path.join(CURRENT_DIR, '..'))
1324- agent_file = '/etc/init/juju-{0}.conf'.format(os.path.basename(unit_dir))
1325- zookeeper = get_zookeeper_address(agent_file)
1326- log('Setting up API agent start up script.')
1327- context = {
1328- 'juju_dir': JUJU_DIR,
1329- 'keys': ssl_cert_path,
1330- 'port': API_PORT,
1331- 'zookeeper': zookeeper,
1332- 'read_only': read_only
1333- }
1334- render_to_file('config/juju-api-agent.conf.template', context, config_path)
1335- log('Starting API agent.')
1336- with su('root'):
1337- service_start(AGENT)
1338-
1339-
1340-def start_gui(
1341- console_enabled, login_help, readonly, in_staging, ssl_cert_path,
1342- charmworld_url, serve_tests, haproxy_path='/etc/haproxy/haproxy.cfg',
1343- config_js_path=None, secure=True, sandbox=False):
1344- """Set up and start the Juju GUI server."""
1345- with su('root'):
1346- run('chown', '-R', 'ubuntu:', JUJU_GUI_DIR)
1347- # XXX 2013-02-05 frankban bug=1116320:
1348- # External insecure resources are still loaded when testing in the
1349- # debug environment. For now, switch to the production environment if
1350- # the charm is configured to serve tests.
1351- if in_staging and not serve_tests:
1352- build_dirname = 'build-debug'
1353- else:
1354- build_dirname = 'build-prod'
1355- build_dir = os.path.join(JUJU_GUI_DIR, build_dirname)
1356- log('Generating the Juju GUI configuration file.')
1357- is_legacy_juju = legacy_juju()
1358- user, password = None, None
1359- if (is_legacy_juju and in_staging) or sandbox:
1360- user, password = 'admin', 'admin'
1361- else:
1362- user, password = None, None
1363-
1364- api_backend = 'python' if is_legacy_juju else 'go'
1365- if secure:
1366- protocol = 'wss'
1367- else:
1368- log('Running in insecure mode! Port 80 will serve unencrypted.')
1369- protocol = 'ws'
1370-
1371- context = {
1372- 'raw_protocol': protocol,
1373- 'address': unit_get('public-address'),
1374- 'console_enabled': json.dumps(console_enabled),
1375- 'login_help': json.dumps(login_help),
1376- 'password': json.dumps(password),
1377- 'api_backend': json.dumps(api_backend),
1378- 'readonly': json.dumps(readonly),
1379- 'user': json.dumps(user),
1380- 'protocol': json.dumps(protocol),
1381- 'sandbox': json.dumps(sandbox),
1382- 'charmworld_url': json.dumps(charmworld_url),
1383- }
1384- if config_js_path is None:
1385- config_js_path = os.path.join(
1386- build_dir, 'juju-ui', 'assets', 'config.js')
1387- render_to_file('config/config.js.template', context, config_js_path)
1388-
1389- write_apache_config(build_dir, serve_tests)
1390-
1391- log('Generating haproxy configuration file.')
1392- if is_legacy_juju:
1393- # The PyJuju API agent is listening on localhost.
1394- api_address = '127.0.0.1:{0}'.format(API_PORT)
1395- else:
1396- # Retrieve the juju-core API server address.
1397- api_address = get_api_address(os.path.join(CURRENT_DIR, '..'))
1398- context = {
1399- 'api_address': api_address,
1400- 'api_pem': JUJU_PEM,
1401- 'legacy_juju': is_legacy_juju,
1402- 'ssl_cert_path': ssl_cert_path,
1403- # In PyJuju environments, use the same certificate for both HTTPS and
1404- # WebSocket connections. In juju-core the system already has the proper
1405- # certificate installed.
1406- 'web_pem': JUJU_PEM,
1407- 'web_port': WEB_PORT,
1408- 'secure': secure
1409- }
1410- render_to_file('config/haproxy.cfg.template', context, haproxy_path)
1411- log('Starting Juju GUI.')
1412-
1413-
1414-def write_apache_config(build_dir, serve_tests=False):
1415- log('Generating the apache site configuration file.')
1416- context = {
1417- 'port': WEB_PORT,
1418- 'serve_tests': serve_tests,
1419- 'server_root': build_dir,
1420- 'tests_root': os.path.join(JUJU_GUI_DIR, 'test', ''),
1421- }
1422- render_to_file('config/apache-ports.template', context, JUJU_GUI_PORTS)
1423- render_to_file('config/apache-site.template', context, JUJU_GUI_SITE)
1424-
1425-
1426-def get_npm_cache_archive_url(Launchpad=Launchpad):
1427- """Figure out the URL of the most recent NPM cache archive on Launchpad."""
1428- launchpad = Launchpad.login_anonymously('Juju GUI charm', 'production')
1429- project = launchpad.projects['juju-gui']
1430- # Find the URL of the most recently created NPM cache archive.
1431- npm_cache_url = get_release_file_url(project, 'npm-cache', None)
1432- return npm_cache_url
1433-
1434-
1435-def prime_npm_cache(npm_cache_url):
1436- """Download NPM cache archive and prime the NPM cache with it."""
1437- # Download the cache archive and then uncompress it into the NPM cache.
1438- npm_cache_archive = os.path.join(CURRENT_DIR, 'npm-cache.tgz')
1439- cmd_log(run('curl', '-L', '-o', npm_cache_archive, npm_cache_url))
1440- npm_cache_dir = os.path.expanduser('~/.npm')
1441- # The NPM cache directory probably does not exist, so make it if not.
1442- try:
1443- os.mkdir(npm_cache_dir)
1444- except OSError, e:
1445- # If the directory already exists then ignore the error.
1446- if e.errno != errno.EEXIST: # File exists.
1447- raise
1448- uncompress = command('tar', '-x', '-z', '-C', npm_cache_dir, '-f')
1449- cmd_log(uncompress(npm_cache_archive))
1450-
1451-
1452-def fetch_gui(juju_gui_source, logpath):
1453- """Retrieve the Juju GUI release/branch."""
1454- # Retrieve a Juju GUI release.
1455- origin, version_or_branch = parse_source(juju_gui_source)
1456- if origin == 'branch':
1457- # Make sure we have the dependencies necessary for us to actually make
1458- # a build.
1459- _get_build_dependencies()
1460- # Create a release starting from a branch.
1461- juju_gui_source_dir = os.path.join(CURRENT_DIR, 'juju-gui-source')
1462- log('Retrieving Juju GUI source checkout from %s.' % version_or_branch)
1463- cmd_log(run('rm', '-rf', juju_gui_source_dir))
1464- cmd_log(bzr_checkout(version_or_branch, juju_gui_source_dir))
1465- log('Preparing a Juju GUI release.')
1466- logdir = os.path.dirname(logpath)
1467- fd, name = tempfile.mkstemp(prefix='make-distfile-', dir=logdir)
1468- log('Output from "make distfile" sent to %s' % name)
1469- with environ(NO_BZR='1'):
1470- run('make', '-C', juju_gui_source_dir, 'distfile',
1471- stdout=fd, stderr=fd)
1472- release_tarball = first_path_in_dir(
1473- os.path.join(juju_gui_source_dir, 'releases'))
1474- else:
1475- log('Retrieving Juju GUI release.')
1476- if origin == 'url':
1477- file_url = version_or_branch
1478- else:
1479- # Retrieve a release from Launchpad.
1480- launchpad = Launchpad.login_anonymously(
1481- 'Juju GUI charm', 'production')
1482- project = launchpad.projects['juju-gui']
1483- file_url = get_release_file_url(project, origin, version_or_branch)
1484- log('Downloading release file from %s.' % file_url)
1485- release_tarball = os.path.join(CURRENT_DIR, 'release.tgz')
1486- cmd_log(run('curl', '-L', '-o', release_tarball, file_url))
1487- return release_tarball
1488-
1489-
1490-def fetch_api(juju_api_branch):
1491- """Retrieve the Juju branch."""
1492- # Retrieve Juju API source checkout.
1493- log('Retrieving Juju API source checkout.')
1494- cmd_log(run('rm', '-rf', JUJU_DIR))
1495- cmd_log(bzr_checkout(juju_api_branch, JUJU_DIR))
1496-
1497-
1498-def setup_gui(release_tarball):
1499- """Set up Juju GUI."""
1500- # Uncompress the release tarball.
1501- log('Installing Juju GUI.')
1502- release_dir = os.path.join(CURRENT_DIR, 'release')
1503- cmd_log(run('rm', '-rf', release_dir))
1504- os.mkdir(release_dir)
1505- uncompress = command('tar', '-x', '-z', '-C', release_dir, '-f')
1506- cmd_log(uncompress(release_tarball))
1507- # Link the Juju GUI dir to the contents of the release tarball.
1508- cmd_log(run('ln', '-sf', first_path_in_dir(release_dir), JUJU_GUI_DIR))
1509-
1510-
1511-def setup_apache():
1512- """Set up apache."""
1513- log('Setting up apache.')
1514- if not os.path.exists(JUJU_GUI_SITE):
1515- cmd_log(run('touch', JUJU_GUI_SITE))
1516- cmd_log(run('chown', 'ubuntu:', JUJU_GUI_SITE))
1517- cmd_log(
1518- run('ln', '-s', JUJU_GUI_SITE,
1519- '/etc/apache2/sites-enabled/juju-gui'))
1520-
1521- if not os.path.exists(JUJU_GUI_PORTS):
1522- cmd_log(run('touch', JUJU_GUI_PORTS))
1523- cmd_log(run('chown', 'ubuntu:', JUJU_GUI_PORTS))
1524-
1525- with su('root'):
1526- run('a2dissite', 'default')
1527- run('a2ensite', 'juju-gui')
1528-
1529-
1530-def save_or_create_certificates(
1531- ssl_cert_path, ssl_cert_contents, ssl_key_contents):
1532- """Generate the SSL certificates.
1533-
1534- If both *ssl_cert_contents* and *ssl_key_contents* are provided, use them
1535- as certificates; otherwise, generate them.
1536-
1537- Also create a pem file, suitable for use in the haproxy configuration,
1538- concatenating the key and the certificate files.
1539- """
1540- crt_path = os.path.join(ssl_cert_path, 'juju.crt')
1541- key_path = os.path.join(ssl_cert_path, 'juju.key')
1542- if not os.path.exists(ssl_cert_path):
1543- os.makedirs(ssl_cert_path)
1544- if ssl_cert_contents and ssl_key_contents:
1545- # Save the provided certificates.
1546- with open(crt_path, 'w') as cert_file:
1547- cert_file.write(ssl_cert_contents)
1548- with open(key_path, 'w') as key_file:
1549- key_file.write(ssl_key_contents)
1550- else:
1551- # Generate certificates.
1552- # See http://superuser.com/questions/226192/openssl-without-prompt
1553- cmd_log(run(
1554- 'openssl', 'req', '-new', '-newkey', 'rsa:4096',
1555- '-days', '365', '-nodes', '-x509', '-subj',
1556- # These are arbitrary test values for the certificate.
1557- '/C=GB/ST=Juju/L=GUI/O=Ubuntu/CN=juju.ubuntu.com',
1558- '-keyout', key_path, '-out', crt_path))
1559- # Generate the pem file.
1560- pem_path = os.path.join(ssl_cert_path, JUJU_PEM)
1561- if os.path.exists(pem_path):
1562- os.remove(pem_path)
1563- with open(pem_path, 'w') as pem_file:
1564- shutil.copyfileobj(open(key_path), pem_file)
1565- shutil.copyfileobj(open(crt_path), pem_file)
1566-
1567-
1568-def find_missing_packages(*packages):
1569- """Given a list of packages, return the packages which are not installed.
1570- """
1571- cache = apt.Cache()
1572- missing = set()
1573- for pkg_name in packages:
1574- try:
1575- pkg = cache[pkg_name]
1576- except KeyError:
1577- missing.add(pkg_name)
1578- continue
1579- if pkg.is_installed:
1580- continue
1581- missing.add(pkg_name)
1582- return missing
1583-
1584-
1585-## Backend support decorators
1586-
1587-def chain(name):
1588- """Helper method to compose a set of mixin objects into a callable.
1589-
1590- Each method is called in the context of its mixin instance, and its
1591- argument is the Backend instance.
1592- """
1593- # Chain method calls through all implementing mixins.
1594- def method(self):
1595- for mixin in self.mixins:
1596- a_callable = getattr(type(mixin), name, None)
1597- if a_callable:
1598- a_callable(mixin, self)
1599-
1600- method.__name__ = name
1601- return method
1602-
1603-
1604-def merge(name):
1605- """Helper to merge a property from a set of strategy objects
1606- into a unified set.
1607- """
1608- # Return merged property from every providing mixin as a set.
1609- @property
1610- def method(self):
1611- result = set()
1612- for mixin in self.mixins:
1613- segment = getattr(type(mixin), name, None)
1614- if segment and isinstance(segment, (list, tuple, set)):
1615- result |= set(segment)
1616-
1617- return result
1618- return method
1619
1620=== removed directory 'hooks/charmhelpers/contrib/network'
1621=== removed file 'hooks/charmhelpers/contrib/network/__init__.py'
1622=== removed file 'hooks/charmhelpers/contrib/network/ip.py'
1623--- hooks/charmhelpers/contrib/network/ip.py 2014-05-09 20:11:59 +0000
1624+++ hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
1625@@ -1,69 +0,0 @@
1626-import sys
1627-
1628-from charmhelpers.fetch import apt_install
1629-from charmhelpers.core.hookenv import (
1630- ERROR, log,
1631-)
1632-
1633-try:
1634- import netifaces
1635-except ImportError:
1636- apt_install('python-netifaces')
1637- import netifaces
1638-
1639-try:
1640- import netaddr
1641-except ImportError:
1642- apt_install('python-netaddr')
1643- import netaddr
1644-
1645-
1646-def _validate_cidr(network):
1647- try:
1648- netaddr.IPNetwork(network)
1649- except (netaddr.core.AddrFormatError, ValueError):
1650- raise ValueError("Network (%s) is not in CIDR presentation format" %
1651- network)
1652-
1653-
1654-def get_address_in_network(network, fallback=None, fatal=False):
1655- """
1656- Get an IPv4 address within the network from the host.
1657-
1658- Args:
1659- network (str): CIDR presentation format. For example,
1660- '192.168.1.0/24'.
1661- fallback (str): If no address is found, return fallback.
1662- fatal (boolean): If no address is found, fallback is not
1663- set and fatal is True then exit(1).
1664- """
1665-
1666- def not_found_error_out():
1667- log("No IP address found in network: %s" % network,
1668- level=ERROR)
1669- sys.exit(1)
1670-
1671- if network is None:
1672- if fallback is not None:
1673- return fallback
1674- else:
1675- if fatal:
1676- not_found_error_out()
1677-
1678- _validate_cidr(network)
1679- for iface in netifaces.interfaces():
1680- addresses = netifaces.ifaddresses(iface)
1681- if netifaces.AF_INET in addresses:
1682- addr = addresses[netifaces.AF_INET][0]['addr']
1683- netmask = addresses[netifaces.AF_INET][0]['netmask']
1684- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
1685- if cidr in netaddr.IPNetwork(network):
1686- return str(cidr.ip)
1687-
1688- if fallback is not None:
1689- return fallback
1690-
1691- if fatal:
1692- not_found_error_out()
1693-
1694- return None
1695
1696=== removed directory 'hooks/charmhelpers/contrib/network/ovs'
1697=== removed file 'hooks/charmhelpers/contrib/network/ovs/__init__.py'
1698--- hooks/charmhelpers/contrib/network/ovs/__init__.py 2013-11-26 17:12:54 +0000
1699+++ hooks/charmhelpers/contrib/network/ovs/__init__.py 1970-01-01 00:00:00 +0000
1700@@ -1,75 +0,0 @@
1701-''' Helpers for interacting with OpenvSwitch '''
1702-import subprocess
1703-import os
1704-from charmhelpers.core.hookenv import (
1705- log, WARNING
1706-)
1707-from charmhelpers.core.host import (
1708- service
1709-)
1710-
1711-
1712-def add_bridge(name):
1713- ''' Add the named bridge to openvswitch '''
1714- log('Creating bridge {}'.format(name))
1715- subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
1716-
1717-
1718-def del_bridge(name):
1719- ''' Delete the named bridge from openvswitch '''
1720- log('Deleting bridge {}'.format(name))
1721- subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
1722-
1723-
1724-def add_bridge_port(name, port):
1725- ''' Add a port to the named openvswitch bridge '''
1726- log('Adding port {} to bridge {}'.format(port, name))
1727- subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
1728- name, port])
1729- subprocess.check_call(["ip", "link", "set", port, "up"])
1730-
1731-
1732-def del_bridge_port(name, port):
1733- ''' Delete a port from the named openvswitch bridge '''
1734- log('Deleting port {} from bridge {}'.format(port, name))
1735- subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
1736- name, port])
1737- subprocess.check_call(["ip", "link", "set", port, "down"])
1738-
1739-
1740-def set_manager(manager):
1741- ''' Set the controller for the local openvswitch '''
1742- log('Setting manager for local ovs to {}'.format(manager))
1743- subprocess.check_call(['ovs-vsctl', 'set-manager',
1744- 'ssl:{}'.format(manager)])
1745-
1746-
1747-CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
1748-
1749-
1750-def get_certificate():
1751- ''' Read openvswitch certificate from disk '''
1752- if os.path.exists(CERT_PATH):
1753- log('Reading ovs certificate from {}'.format(CERT_PATH))
1754- with open(CERT_PATH, 'r') as cert:
1755- full_cert = cert.read()
1756- begin_marker = "-----BEGIN CERTIFICATE-----"
1757- end_marker = "-----END CERTIFICATE-----"
1758- begin_index = full_cert.find(begin_marker)
1759- end_index = full_cert.rfind(end_marker)
1760- if end_index == -1 or begin_index == -1:
1761- raise RuntimeError("Certificate does not contain valid begin"
1762- " and end markers.")
1763- full_cert = full_cert[begin_index:(end_index + len(end_marker))]
1764- return full_cert
1765- else:
1766- log('Certificate not found', level=WARNING)
1767- return None
1768-
1769-
1770-def full_restart():
1771- ''' Full restart and reload of openvswitch '''
1772- if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
1773- service('start', 'openvswitch-force-reload-kmod')
1774- else:
1775- service('force-reload-kmod', 'openvswitch-switch')
1776
1777=== removed directory 'hooks/charmhelpers/contrib/openstack'
1778=== removed file 'hooks/charmhelpers/contrib/openstack/__init__.py'
1779=== removed file 'hooks/charmhelpers/contrib/openstack/alternatives.py'
1780--- hooks/charmhelpers/contrib/openstack/alternatives.py 2013-11-26 17:12:54 +0000
1781+++ hooks/charmhelpers/contrib/openstack/alternatives.py 1970-01-01 00:00:00 +0000
1782@@ -1,17 +0,0 @@
1783-''' Helper for managing alternatives for file conflict resolution '''
1784-
1785-import subprocess
1786-import shutil
1787-import os
1788-
1789-
1790-def install_alternative(name, target, source, priority=50):
1791- ''' Install alternative configuration '''
1792- if (os.path.exists(target) and not os.path.islink(target)):
1793- # Move existing file/directory away before installing
1794- shutil.move(target, '{}.bak'.format(target))
1795- cmd = [
1796- 'update-alternatives', '--force', '--install',
1797- target, name, source, str(priority)
1798- ]
1799- subprocess.check_call(cmd)
1800
1801=== removed file 'hooks/charmhelpers/contrib/openstack/context.py'
1802--- hooks/charmhelpers/contrib/openstack/context.py 2014-05-09 20:11:59 +0000
1803+++ hooks/charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
1804@@ -1,700 +0,0 @@
1805-import json
1806-import os
1807-import time
1808-
1809-from base64 import b64decode
1810-
1811-from subprocess import (
1812- check_call
1813-)
1814-
1815-
1816-from charmhelpers.fetch import (
1817- apt_install,
1818- filter_installed_packages,
1819-)
1820-
1821-from charmhelpers.core.hookenv import (
1822- config,
1823- local_unit,
1824- log,
1825- relation_get,
1826- relation_ids,
1827- related_units,
1828- unit_get,
1829- unit_private_ip,
1830- ERROR,
1831-)
1832-
1833-from charmhelpers.contrib.hahelpers.cluster import (
1834- determine_apache_port,
1835- determine_api_port,
1836- https,
1837- is_clustered
1838-)
1839-
1840-from charmhelpers.contrib.hahelpers.apache import (
1841- get_cert,
1842- get_ca_cert,
1843-)
1844-
1845-from charmhelpers.contrib.openstack.neutron import (
1846- neutron_plugin_attribute,
1847-)
1848-
1849-CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
1850-
1851-
1852-class OSContextError(Exception):
1853- pass
1854-
1855-
1856-def ensure_packages(packages):
1857- '''Install but do not upgrade required plugin packages'''
1858- required = filter_installed_packages(packages)
1859- if required:
1860- apt_install(required, fatal=True)
1861-
1862-
1863-def context_complete(ctxt):
1864- _missing = []
1865- for k, v in ctxt.iteritems():
1866- if v is None or v == '':
1867- _missing.append(k)
1868- if _missing:
1869- log('Missing required data: %s' % ' '.join(_missing), level='INFO')
1870- return False
1871- return True
1872-
1873-
1874-def config_flags_parser(config_flags):
1875- if config_flags.find('==') >= 0:
1876- log("config_flags is not in expected format (key=value)",
1877- level=ERROR)
1878- raise OSContextError
1879- # strip the following from each value.
1880- post_strippers = ' ,'
1881- # we strip any leading/trailing '=' or ' ' from the string then
1882- # split on '='.
1883- split = config_flags.strip(' =').split('=')
1884- limit = len(split)
1885- flags = {}
1886- for i in xrange(0, limit - 1):
1887- current = split[i]
1888- next = split[i + 1]
1889- vindex = next.rfind(',')
1890- if (i == limit - 2) or (vindex < 0):
1891- value = next
1892- else:
1893- value = next[:vindex]
1894-
1895- if i == 0:
1896- key = current
1897- else:
1898- # if this not the first entry, expect an embedded key.
1899- index = current.rfind(',')
1900- if index < 0:
1901- log("invalid config value(s) at index %s" % (i),
1902- level=ERROR)
1903- raise OSContextError
1904- key = current[index + 1:]
1905-
1906- # Add to collection.
1907- flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
1908- return flags
1909-
1910-
1911-class OSContextGenerator(object):
1912- interfaces = []
1913-
1914- def __call__(self):
1915- raise NotImplementedError
1916-
1917-
1918-class SharedDBContext(OSContextGenerator):
1919- interfaces = ['shared-db']
1920-
1921- def __init__(self,
1922- database=None, user=None, relation_prefix=None, ssl_dir=None):
1923- '''
1924- Allows inspecting relation for settings prefixed with relation_prefix.
1925- This is useful for parsing access for multiple databases returned via
1926- the shared-db interface (eg, nova_password, quantum_password)
1927- '''
1928- self.relation_prefix = relation_prefix
1929- self.database = database
1930- self.user = user
1931- self.ssl_dir = ssl_dir
1932-
1933- def __call__(self):
1934- self.database = self.database or config('database')
1935- self.user = self.user or config('database-user')
1936- if None in [self.database, self.user]:
1937- log('Could not generate shared_db context. '
1938- 'Missing required charm config options. '
1939- '(database name and user)')
1940- raise OSContextError
1941- ctxt = {}
1942-
1943- password_setting = 'password'
1944- if self.relation_prefix:
1945- password_setting = self.relation_prefix + '_password'
1946-
1947- for rid in relation_ids('shared-db'):
1948- for unit in related_units(rid):
1949- rdata = relation_get(rid=rid, unit=unit)
1950- ctxt = {
1951- 'database_host': rdata.get('db_host'),
1952- 'database': self.database,
1953- 'database_user': self.user,
1954- 'database_password': rdata.get(password_setting),
1955- 'database_type': 'mysql'
1956- }
1957- if context_complete(ctxt):
1958- db_ssl(rdata, ctxt, self.ssl_dir)
1959- return ctxt
1960- return {}
1961-
1962-
1963-class PostgresqlDBContext(OSContextGenerator):
1964- interfaces = ['pgsql-db']
1965-
1966- def __init__(self, database=None):
1967- self.database = database
1968-
1969- def __call__(self):
1970- self.database = self.database or config('database')
1971- if self.database is None:
1972- log('Could not generate postgresql_db context. '
1973- 'Missing required charm config options. '
1974- '(database name)')
1975- raise OSContextError
1976- ctxt = {}
1977-
1978- for rid in relation_ids(self.interfaces[0]):
1979- for unit in related_units(rid):
1980- ctxt = {
1981- 'database_host': relation_get('host', rid=rid, unit=unit),
1982- 'database': self.database,
1983- 'database_user': relation_get('user', rid=rid, unit=unit),
1984- 'database_password': relation_get('password', rid=rid, unit=unit),
1985- 'database_type': 'postgresql',
1986- }
1987- if context_complete(ctxt):
1988- return ctxt
1989- return {}
1990-
1991-
1992-def db_ssl(rdata, ctxt, ssl_dir):
1993- if 'ssl_ca' in rdata and ssl_dir:
1994- ca_path = os.path.join(ssl_dir, 'db-client.ca')
1995- with open(ca_path, 'w') as fh:
1996- fh.write(b64decode(rdata['ssl_ca']))
1997- ctxt['database_ssl_ca'] = ca_path
1998- elif 'ssl_ca' in rdata:
1999- log("Charm not setup for ssl support but ssl ca found")
2000- return ctxt
2001- if 'ssl_cert' in rdata:
2002- cert_path = os.path.join(
2003- ssl_dir, 'db-client.cert')
2004- if not os.path.exists(cert_path):
2005- log("Waiting 1m for ssl client cert validity")
2006- time.sleep(60)
2007- with open(cert_path, 'w') as fh:
2008- fh.write(b64decode(rdata['ssl_cert']))
2009- ctxt['database_ssl_cert'] = cert_path
2010- key_path = os.path.join(ssl_dir, 'db-client.key')
2011- with open(key_path, 'w') as fh:
2012- fh.write(b64decode(rdata['ssl_key']))
2013- ctxt['database_ssl_key'] = key_path
2014- return ctxt
2015-
2016-
2017-class IdentityServiceContext(OSContextGenerator):
2018- interfaces = ['identity-service']
2019-
2020- def __call__(self):
2021- log('Generating template context for identity-service')
2022- ctxt = {}
2023-
2024- for rid in relation_ids('identity-service'):
2025- for unit in related_units(rid):
2026- rdata = relation_get(rid=rid, unit=unit)
2027- ctxt = {
2028- 'service_port': rdata.get('service_port'),
2029- 'service_host': rdata.get('service_host'),
2030- 'auth_host': rdata.get('auth_host'),
2031- 'auth_port': rdata.get('auth_port'),
2032- 'admin_tenant_name': rdata.get('service_tenant'),
2033- 'admin_user': rdata.get('service_username'),
2034- 'admin_password': rdata.get('service_password'),
2035- 'service_protocol':
2036- rdata.get('service_protocol') or 'http',
2037- 'auth_protocol':
2038- rdata.get('auth_protocol') or 'http',
2039- }
2040- if context_complete(ctxt):
2041- # NOTE(jamespage) this is required for >= icehouse
2042- # so a missing value just indicates keystone needs
2043- # upgrading
2044- ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
2045- return ctxt
2046- return {}
2047-
2048-
2049-class AMQPContext(OSContextGenerator):
2050- interfaces = ['amqp']
2051-
2052- def __init__(self, ssl_dir=None):
2053- self.ssl_dir = ssl_dir
2054-
2055- def __call__(self):
2056- log('Generating template context for amqp')
2057- conf = config()
2058- try:
2059- username = conf['rabbit-user']
2060- vhost = conf['rabbit-vhost']
2061- except KeyError as e:
2062- log('Could not generate shared_db context. '
2063- 'Missing required charm config options: %s.' % e)
2064- raise OSContextError
2065- ctxt = {}
2066- for rid in relation_ids('amqp'):
2067- ha_vip_only = False
2068- for unit in related_units(rid):
2069- if relation_get('clustered', rid=rid, unit=unit):
2070- ctxt['clustered'] = True
2071- ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
2072- unit=unit)
2073- else:
2074- ctxt['rabbitmq_host'] = relation_get('private-address',
2075- rid=rid, unit=unit)
2076- ctxt.update({
2077- 'rabbitmq_user': username,
2078- 'rabbitmq_password': relation_get('password', rid=rid,
2079- unit=unit),
2080- 'rabbitmq_virtual_host': vhost,
2081- })
2082-
2083- ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
2084- if ssl_port:
2085- ctxt['rabbit_ssl_port'] = ssl_port
2086- ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
2087- if ssl_ca:
2088- ctxt['rabbit_ssl_ca'] = ssl_ca
2089-
2090- if relation_get('ha_queues', rid=rid, unit=unit) is not None:
2091- ctxt['rabbitmq_ha_queues'] = True
2092-
2093- ha_vip_only = relation_get('ha-vip-only',
2094- rid=rid, unit=unit) is not None
2095-
2096- if context_complete(ctxt):
2097- if 'rabbit_ssl_ca' in ctxt:
2098- if not self.ssl_dir:
2099- log(("Charm not setup for ssl support "
2100- "but ssl ca found"))
2101- break
2102- ca_path = os.path.join(
2103- self.ssl_dir, 'rabbit-client-ca.pem')
2104- with open(ca_path, 'w') as fh:
2105- fh.write(b64decode(ctxt['rabbit_ssl_ca']))
2106- ctxt['rabbit_ssl_ca'] = ca_path
2107- # Sufficient information found = break out!
2108- break
2109- # Used for active/active rabbitmq >= grizzly
2110- if ('clustered' not in ctxt or ha_vip_only) \
2111- and len(related_units(rid)) > 1:
2112- rabbitmq_hosts = []
2113- for unit in related_units(rid):
2114- rabbitmq_hosts.append(relation_get('private-address',
2115- rid=rid, unit=unit))
2116- ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
2117- if not context_complete(ctxt):
2118- return {}
2119- else:
2120- return ctxt
2121-
2122-
2123-class CephContext(OSContextGenerator):
2124- interfaces = ['ceph']
2125-
2126- def __call__(self):
2127- '''This generates context for /etc/ceph/ceph.conf templates'''
2128- if not relation_ids('ceph'):
2129- return {}
2130-
2131- log('Generating template context for ceph')
2132-
2133- mon_hosts = []
2134- auth = None
2135- key = None
2136- use_syslog = str(config('use-syslog')).lower()
2137- for rid in relation_ids('ceph'):
2138- for unit in related_units(rid):
2139- mon_hosts.append(relation_get('private-address', rid=rid,
2140- unit=unit))
2141- auth = relation_get('auth', rid=rid, unit=unit)
2142- key = relation_get('key', rid=rid, unit=unit)
2143-
2144- ctxt = {
2145- 'mon_hosts': ' '.join(mon_hosts),
2146- 'auth': auth,
2147- 'key': key,
2148- 'use_syslog': use_syslog
2149- }
2150-
2151- if not os.path.isdir('/etc/ceph'):
2152- os.mkdir('/etc/ceph')
2153-
2154- if not context_complete(ctxt):
2155- return {}
2156-
2157- ensure_packages(['ceph-common'])
2158-
2159- return ctxt
2160-
2161-
2162-class HAProxyContext(OSContextGenerator):
2163- interfaces = ['cluster']
2164-
2165- def __call__(self):
2166- '''
2167- Builds half a context for the haproxy template, which describes
2168- all peers to be included in the cluster. Each charm needs to include
2169- its own context generator that describes the port mapping.
2170- '''
2171- if not relation_ids('cluster'):
2172- return {}
2173-
2174- cluster_hosts = {}
2175- l_unit = local_unit().replace('/', '-')
2176- cluster_hosts[l_unit] = unit_get('private-address')
2177-
2178- for rid in relation_ids('cluster'):
2179- for unit in related_units(rid):
2180- _unit = unit.replace('/', '-')
2181- addr = relation_get('private-address', rid=rid, unit=unit)
2182- cluster_hosts[_unit] = addr
2183-
2184- ctxt = {
2185- 'units': cluster_hosts,
2186- }
2187- if len(cluster_hosts.keys()) > 1:
2188- # Enable haproxy when we have enough peers.
2189- log('Ensuring haproxy enabled in /etc/default/haproxy.')
2190- with open('/etc/default/haproxy', 'w') as out:
2191- out.write('ENABLED=1\n')
2192- return ctxt
2193- log('HAProxy context is incomplete, this unit has no peers.')
2194- return {}
2195-
2196-
2197-class ImageServiceContext(OSContextGenerator):
2198- interfaces = ['image-service']
2199-
2200- def __call__(self):
2201- '''
2202- Obtains the glance API server from the image-service relation. Useful
2203- in nova and cinder (currently).
2204- '''
2205- log('Generating template context for image-service.')
2206- rids = relation_ids('image-service')
2207- if not rids:
2208- return {}
2209- for rid in rids:
2210- for unit in related_units(rid):
2211- api_server = relation_get('glance-api-server',
2212- rid=rid, unit=unit)
2213- if api_server:
2214- return {'glance_api_servers': api_server}
2215- log('ImageService context is incomplete. '
2216- 'Missing required relation data.')
2217- return {}
2218-
2219-
2220-class ApacheSSLContext(OSContextGenerator):
2221-
2222- """
2223- Generates a context for an apache vhost configuration that configures
2224- HTTPS reverse proxying for one or many endpoints. Generated context
2225- looks something like:
2226- {
2227- 'namespace': 'cinder',
2228- 'private_address': 'iscsi.mycinderhost.com',
2229- 'endpoints': [(8776, 8766), (8777, 8767)]
2230- }
2231-
2232- The endpoints list consists of a tuples mapping external ports
2233- to internal ports.
2234- """
2235- interfaces = ['https']
2236-
2237- # charms should inherit this context and set external ports
2238- # and service namespace accordingly.
2239- external_ports = []
2240- service_namespace = None
2241-
2242- def enable_modules(self):
2243- cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
2244- check_call(cmd)
2245-
2246- def configure_cert(self):
2247- if not os.path.isdir('/etc/apache2/ssl'):
2248- os.mkdir('/etc/apache2/ssl')
2249- ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
2250- if not os.path.isdir(ssl_dir):
2251- os.mkdir(ssl_dir)
2252- cert, key = get_cert()
2253- with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
2254- cert_out.write(b64decode(cert))
2255- with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
2256- key_out.write(b64decode(key))
2257- ca_cert = get_ca_cert()
2258- if ca_cert:
2259- with open(CA_CERT_PATH, 'w') as ca_out:
2260- ca_out.write(b64decode(ca_cert))
2261- check_call(['update-ca-certificates'])
2262-
2263- def __call__(self):
2264- if isinstance(self.external_ports, basestring):
2265- self.external_ports = [self.external_ports]
2266- if (not self.external_ports or not https()):
2267- return {}
2268-
2269- self.configure_cert()
2270- self.enable_modules()
2271-
2272- ctxt = {
2273- 'namespace': self.service_namespace,
2274- 'private_address': unit_get('private-address'),
2275- 'endpoints': []
2276- }
2277- if is_clustered():
2278- ctxt['private_address'] = config('vip')
2279- for api_port in self.external_ports:
2280- ext_port = determine_apache_port(api_port)
2281- int_port = determine_api_port(api_port)
2282- portmap = (int(ext_port), int(int_port))
2283- ctxt['endpoints'].append(portmap)
2284- return ctxt
2285-
2286-
2287-class NeutronContext(OSContextGenerator):
2288- interfaces = []
2289-
2290- @property
2291- def plugin(self):
2292- return None
2293-
2294- @property
2295- def network_manager(self):
2296- return None
2297-
2298- @property
2299- def packages(self):
2300- return neutron_plugin_attribute(
2301- self.plugin, 'packages', self.network_manager)
2302-
2303- @property
2304- def neutron_security_groups(self):
2305- return None
2306-
2307- def _ensure_packages(self):
2308- [ensure_packages(pkgs) for pkgs in self.packages]
2309-
2310- def _save_flag_file(self):
2311- if self.network_manager == 'quantum':
2312- _file = '/etc/nova/quantum_plugin.conf'
2313- else:
2314- _file = '/etc/nova/neutron_plugin.conf'
2315- with open(_file, 'wb') as out:
2316- out.write(self.plugin + '\n')
2317-
2318- def ovs_ctxt(self):
2319- driver = neutron_plugin_attribute(self.plugin, 'driver',
2320- self.network_manager)
2321- config = neutron_plugin_attribute(self.plugin, 'config',
2322- self.network_manager)
2323- ovs_ctxt = {
2324- 'core_plugin': driver,
2325- 'neutron_plugin': 'ovs',
2326- 'neutron_security_groups': self.neutron_security_groups,
2327- 'local_ip': unit_private_ip(),
2328- 'config': config
2329- }
2330-
2331- return ovs_ctxt
2332-
2333- def nvp_ctxt(self):
2334- driver = neutron_plugin_attribute(self.plugin, 'driver',
2335- self.network_manager)
2336- config = neutron_plugin_attribute(self.plugin, 'config',
2337- self.network_manager)
2338- nvp_ctxt = {
2339- 'core_plugin': driver,
2340- 'neutron_plugin': 'nvp',
2341- 'neutron_security_groups': self.neutron_security_groups,
2342- 'local_ip': unit_private_ip(),
2343- 'config': config
2344- }
2345-
2346- return nvp_ctxt
2347-
2348- def neutron_ctxt(self):
2349- if https():
2350- proto = 'https'
2351- else:
2352- proto = 'http'
2353- if is_clustered():
2354- host = config('vip')
2355- else:
2356- host = unit_get('private-address')
2357- url = '%s://%s:%s' % (proto, host, '9696')
2358- ctxt = {
2359- 'network_manager': self.network_manager,
2360- 'neutron_url': url,
2361- }
2362- return ctxt
2363-
2364- def __call__(self):
2365- self._ensure_packages()
2366-
2367- if self.network_manager not in ['quantum', 'neutron']:
2368- return {}
2369-
2370- if not self.plugin:
2371- return {}
2372-
2373- ctxt = self.neutron_ctxt()
2374-
2375- if self.plugin == 'ovs':
2376- ctxt.update(self.ovs_ctxt())
2377- elif self.plugin == 'nvp':
2378- ctxt.update(self.nvp_ctxt())
2379-
2380- alchemy_flags = config('neutron-alchemy-flags')
2381- if alchemy_flags:
2382- flags = config_flags_parser(alchemy_flags)
2383- ctxt['neutron_alchemy_flags'] = flags
2384-
2385- self._save_flag_file()
2386- return ctxt
2387-
2388-
2389-class OSConfigFlagContext(OSContextGenerator):
2390-
2391- """
2392- Responsible for adding user-defined config-flags in charm config to a
2393- template context.
2394-
2395- NOTE: the value of config-flags may be a comma-separated list of
2396- key=value pairs and some Openstack config files support
2397- comma-separated lists as values.
2398- """
2399-
2400- def __call__(self):
2401- config_flags = config('config-flags')
2402- if not config_flags:
2403- return {}
2404-
2405- flags = config_flags_parser(config_flags)
2406- return {'user_config_flags': flags}
2407-
2408-
2409-class SubordinateConfigContext(OSContextGenerator):
2410-
2411- """
2412- Responsible for inspecting relations to subordinates that
2413- may be exporting required config via a json blob.
2414-
2415- The subordinate interface allows subordinates to export their
2416- configuration requirements to the principle for multiple config
2417- files and multiple serivces. Ie, a subordinate that has interfaces
2418- to both glance and nova may export to following yaml blob as json:
2419-
2420- glance:
2421- /etc/glance/glance-api.conf:
2422- sections:
2423- DEFAULT:
2424- - [key1, value1]
2425- /etc/glance/glance-registry.conf:
2426- MYSECTION:
2427- - [key2, value2]
2428- nova:
2429- /etc/nova/nova.conf:
2430- sections:
2431- DEFAULT:
2432- - [key3, value3]
2433-
2434-
2435- It is then up to the principle charms to subscribe this context to
2436- the service+config file it is interestd in. Configuration data will
2437- be available in the template context, in glance's case, as:
2438- ctxt = {
2439- ... other context ...
2440- 'subordinate_config': {
2441- 'DEFAULT': {
2442- 'key1': 'value1',
2443- },
2444- 'MYSECTION': {
2445- 'key2': 'value2',
2446- },
2447- }
2448- }
2449-
2450- """
2451-
2452- def __init__(self, service, config_file, interface):
2453- """
2454- :param service : Service name key to query in any subordinate
2455- data found
2456- :param config_file : Service's config file to query sections
2457- :param interface : Subordinate interface to inspect
2458- """
2459- self.service = service
2460- self.config_file = config_file
2461- self.interface = interface
2462-
2463- def __call__(self):
2464- ctxt = {}
2465- for rid in relation_ids(self.interface):
2466- for unit in related_units(rid):
2467- sub_config = relation_get('subordinate_configuration',
2468- rid=rid, unit=unit)
2469- if sub_config and sub_config != '':
2470- try:
2471- sub_config = json.loads(sub_config)
2472- except:
2473- log('Could not parse JSON from subordinate_config '
2474- 'setting from %s' % rid, level=ERROR)
2475- continue
2476-
2477- if self.service not in sub_config:
2478- log('Found subordinate_config on %s but it contained'
2479- 'nothing for %s service' % (rid, self.service))
2480- continue
2481-
2482- sub_config = sub_config[self.service]
2483- if self.config_file not in sub_config:
2484- log('Found subordinate_config on %s but it contained'
2485- 'nothing for %s' % (rid, self.config_file))
2486- continue
2487-
2488- sub_config = sub_config[self.config_file]
2489- for k, v in sub_config.iteritems():
2490- ctxt[k] = v
2491-
2492- if not ctxt:
2493- ctxt['sections'] = {}
2494-
2495- return ctxt
2496-
2497-
2498-class SyslogContext(OSContextGenerator):
2499-
2500- def __call__(self):
2501- ctxt = {
2502- 'use_syslog': config('use-syslog')
2503- }
2504- return ctxt
2505
2506=== removed file 'hooks/charmhelpers/contrib/openstack/neutron.py'
2507--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-09 20:11:59 +0000
2508+++ hooks/charmhelpers/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
2509@@ -1,171 +0,0 @@
2510-# Various utilies for dealing with Neutron and the renaming from Quantum.
2511-
2512-from subprocess import check_output
2513-
2514-from charmhelpers.core.hookenv import (
2515- config,
2516- log,
2517- ERROR,
2518-)
2519-
2520-from charmhelpers.contrib.openstack.utils import os_release
2521-
2522-
2523-def headers_package():
2524- """Ensures correct linux-headers for running kernel are installed,
2525- for building DKMS package"""
2526- kver = check_output(['uname', '-r']).strip()
2527- return 'linux-headers-%s' % kver
2528-
2529-QUANTUM_CONF_DIR = '/etc/quantum'
2530-
2531-
2532-def kernel_version():
2533- """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
2534- kver = check_output(['uname', '-r']).strip()
2535- kver = kver.split('.')
2536- return (int(kver[0]), int(kver[1]))
2537-
2538-
2539-def determine_dkms_package():
2540- """ Determine which DKMS package should be used based on kernel version """
2541- # NOTE: 3.13 kernels have support for GRE and VXLAN native
2542- if kernel_version() >= (3, 13):
2543- return []
2544- else:
2545- return ['openvswitch-datapath-dkms']
2546-
2547-
2548-# legacy
2549-
2550-
2551-def quantum_plugins():
2552- from charmhelpers.contrib.openstack import context
2553- return {
2554- 'ovs': {
2555- 'config': '/etc/quantum/plugins/openvswitch/'
2556- 'ovs_quantum_plugin.ini',
2557- 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
2558- 'OVSQuantumPluginV2',
2559- 'contexts': [
2560- context.SharedDBContext(user=config('neutron-database-user'),
2561- database=config('neutron-database'),
2562- relation_prefix='neutron',
2563- ssl_dir=QUANTUM_CONF_DIR)],
2564- 'services': ['quantum-plugin-openvswitch-agent'],
2565- 'packages': [[headers_package()] + determine_dkms_package(),
2566- ['quantum-plugin-openvswitch-agent']],
2567- 'server_packages': ['quantum-server',
2568- 'quantum-plugin-openvswitch'],
2569- 'server_services': ['quantum-server']
2570- },
2571- 'nvp': {
2572- 'config': '/etc/quantum/plugins/nicira/nvp.ini',
2573- 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
2574- 'QuantumPlugin.NvpPluginV2',
2575- 'contexts': [
2576- context.SharedDBContext(user=config('neutron-database-user'),
2577- database=config('neutron-database'),
2578- relation_prefix='neutron',
2579- ssl_dir=QUANTUM_CONF_DIR)],
2580- 'services': [],
2581- 'packages': [],
2582- 'server_packages': ['quantum-server',
2583- 'quantum-plugin-nicira'],
2584- 'server_services': ['quantum-server']
2585- }
2586- }
2587-
2588-NEUTRON_CONF_DIR = '/etc/neutron'
2589-
2590-
2591-def neutron_plugins():
2592- from charmhelpers.contrib.openstack import context
2593- release = os_release('nova-common')
2594- plugins = {
2595- 'ovs': {
2596- 'config': '/etc/neutron/plugins/openvswitch/'
2597- 'ovs_neutron_plugin.ini',
2598- 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
2599- 'OVSNeutronPluginV2',
2600- 'contexts': [
2601- context.SharedDBContext(user=config('neutron-database-user'),
2602- database=config('neutron-database'),
2603- relation_prefix='neutron',
2604- ssl_dir=NEUTRON_CONF_DIR)],
2605- 'services': ['neutron-plugin-openvswitch-agent'],
2606- 'packages': [[headers_package()] + determine_dkms_package(),
2607- ['neutron-plugin-openvswitch-agent']],
2608- 'server_packages': ['neutron-server',
2609- 'neutron-plugin-openvswitch'],
2610- 'server_services': ['neutron-server']
2611- },
2612- 'nvp': {
2613- 'config': '/etc/neutron/plugins/nicira/nvp.ini',
2614- 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
2615- 'NeutronPlugin.NvpPluginV2',
2616- 'contexts': [
2617- context.SharedDBContext(user=config('neutron-database-user'),
2618- database=config('neutron-database'),
2619- relation_prefix='neutron',
2620- ssl_dir=NEUTRON_CONF_DIR)],
2621- 'services': [],
2622- 'packages': [],
2623- 'server_packages': ['neutron-server',
2624- 'neutron-plugin-nicira'],
2625- 'server_services': ['neutron-server']
2626- }
2627- }
2628- # NOTE: patch in ml2 plugin for icehouse onwards
2629- if release >= 'icehouse':
2630- plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
2631- plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
2632- plugins['ovs']['server_packages'] = ['neutron-server',
2633- 'neutron-plugin-ml2']
2634- return plugins
2635-
2636-
2637-def neutron_plugin_attribute(plugin, attr, net_manager=None):
2638- manager = net_manager or network_manager()
2639- if manager == 'quantum':
2640- plugins = quantum_plugins()
2641- elif manager == 'neutron':
2642- plugins = neutron_plugins()
2643- else:
2644- log('Error: Network manager does not support plugins.')
2645- raise Exception
2646-
2647- try:
2648- _plugin = plugins[plugin]
2649- except KeyError:
2650- log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
2651- raise Exception
2652-
2653- try:
2654- return _plugin[attr]
2655- except KeyError:
2656- return None
2657-
2658-
2659-def network_manager():
2660- '''
2661- Deals with the renaming of Quantum to Neutron in H and any situations
2662- that require compatability (eg, deploying H with network-manager=quantum,
2663- upgrading from G).
2664- '''
2665- release = os_release('nova-common')
2666- manager = config('network-manager').lower()
2667-
2668- if manager not in ['quantum', 'neutron']:
2669- return manager
2670-
2671- if release in ['essex']:
2672- # E does not support neutron
2673- log('Neutron networking not supported in Essex.', level=ERROR)
2674- raise Exception
2675- elif release in ['folsom', 'grizzly']:
2676- # neutron is named quantum in F and G
2677- return 'quantum'
2678- else:
2679- # ensure accurate naming for all releases post-H
2680- return 'neutron'
2681
2682=== removed directory 'hooks/charmhelpers/contrib/openstack/templates'
2683=== removed file 'hooks/charmhelpers/contrib/openstack/templates/__init__.py'
2684--- hooks/charmhelpers/contrib/openstack/templates/__init__.py 2013-11-26 17:12:54 +0000
2685+++ hooks/charmhelpers/contrib/openstack/templates/__init__.py 1970-01-01 00:00:00 +0000
2686@@ -1,2 +0,0 @@
2687-# dummy __init__.py to fool syncer into thinking this is a syncable python
2688-# module
2689
2690=== removed file 'hooks/charmhelpers/contrib/openstack/templating.py'
2691--- hooks/charmhelpers/contrib/openstack/templating.py 2013-11-26 17:12:54 +0000
2692+++ hooks/charmhelpers/contrib/openstack/templating.py 1970-01-01 00:00:00 +0000
2693@@ -1,280 +0,0 @@
2694-import os
2695-
2696-from charmhelpers.fetch import apt_install
2697-
2698-from charmhelpers.core.hookenv import (
2699- log,
2700- ERROR,
2701- INFO
2702-)
2703-
2704-from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
2705-
2706-try:
2707- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
2708-except ImportError:
2709- # python-jinja2 may not be installed yet, or we're running unittests.
2710- FileSystemLoader = ChoiceLoader = Environment = exceptions = None
2711-
2712-
2713-class OSConfigException(Exception):
2714- pass
2715-
2716-
2717-def get_loader(templates_dir, os_release):
2718- """
2719- Create a jinja2.ChoiceLoader containing template dirs up to
2720- and including os_release. If directory template directory
2721- is missing at templates_dir, it will be omitted from the loader.
2722- templates_dir is added to the bottom of the search list as a base
2723- loading dir.
2724-
2725- A charm may also ship a templates dir with this module
2726- and it will be appended to the bottom of the search list, eg:
2727- hooks/charmhelpers/contrib/openstack/templates.
2728-
2729- :param templates_dir: str: Base template directory containing release
2730- sub-directories.
2731- :param os_release : str: OpenStack release codename to construct template
2732- loader.
2733-
2734- :returns : jinja2.ChoiceLoader constructed with a list of
2735- jinja2.FilesystemLoaders, ordered in descending
2736- order by OpenStack release.
2737- """
2738- tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
2739- for rel in OPENSTACK_CODENAMES.itervalues()]
2740-
2741- if not os.path.isdir(templates_dir):
2742- log('Templates directory not found @ %s.' % templates_dir,
2743- level=ERROR)
2744- raise OSConfigException
2745-
2746- # the bottom contains tempaltes_dir and possibly a common templates dir
2747- # shipped with the helper.
2748- loaders = [FileSystemLoader(templates_dir)]
2749- helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
2750- if os.path.isdir(helper_templates):
2751- loaders.append(FileSystemLoader(helper_templates))
2752-
2753- for rel, tmpl_dir in tmpl_dirs:
2754- if os.path.isdir(tmpl_dir):
2755- loaders.insert(0, FileSystemLoader(tmpl_dir))
2756- if rel == os_release:
2757- break
2758- log('Creating choice loader with dirs: %s' %
2759- [l.searchpath for l in loaders], level=INFO)
2760- return ChoiceLoader(loaders)
2761-
2762-
2763-class OSConfigTemplate(object):
2764- """
2765- Associates a config file template with a list of context generators.
2766- Responsible for constructing a template context based on those generators.
2767- """
2768- def __init__(self, config_file, contexts):
2769- self.config_file = config_file
2770-
2771- if hasattr(contexts, '__call__'):
2772- self.contexts = [contexts]
2773- else:
2774- self.contexts = contexts
2775-
2776- self._complete_contexts = []
2777-
2778- def context(self):
2779- ctxt = {}
2780- for context in self.contexts:
2781- _ctxt = context()
2782- if _ctxt:
2783- ctxt.update(_ctxt)
2784- # track interfaces for every complete context.
2785- [self._complete_contexts.append(interface)
2786- for interface in context.interfaces
2787- if interface not in self._complete_contexts]
2788- return ctxt
2789-
2790- def complete_contexts(self):
2791- '''
2792- Return a list of interfaces that have atisfied contexts.
2793- '''
2794- if self._complete_contexts:
2795- return self._complete_contexts
2796- self.context()
2797- return self._complete_contexts
2798-
2799-
2800-class OSConfigRenderer(object):
2801- """
2802- This class provides a common templating system to be used by OpenStack
2803- charms. It is intended to help charms share common code and templates,
2804- and ease the burden of managing config templates across multiple OpenStack
2805- releases.
2806-
2807- Basic usage:
2808- # import some common context generates from charmhelpers
2809- from charmhelpers.contrib.openstack import context
2810-
2811- # Create a renderer object for a specific OS release.
2812- configs = OSConfigRenderer(templates_dir='/tmp/templates',
2813- openstack_release='folsom')
2814- # register some config files with context generators.
2815- configs.register(config_file='/etc/nova/nova.conf',
2816- contexts=[context.SharedDBContext(),
2817- context.AMQPContext()])
2818- configs.register(config_file='/etc/nova/api-paste.ini',
2819- contexts=[context.IdentityServiceContext()])
2820- configs.register(config_file='/etc/haproxy/haproxy.conf',
2821- contexts=[context.HAProxyContext()])
2822- # write out a single config
2823- configs.write('/etc/nova/nova.conf')
2824- # write out all registered configs
2825- configs.write_all()
2826-
2827- Details:
2828-
2829- OpenStack Releases and template loading
2830- ---------------------------------------
2831- When the object is instantiated, it is associated with a specific OS
2832- release. This dictates how the template loader will be constructed.
2833-
2834- The constructed loader attempts to load the template from several places
2835- in the following order:
2836- - from the most recent OS release-specific template dir (if one exists)
2837- - the base templates_dir
2838- - a template directory shipped in the charm with this helper file.
2839-
2840-
2841- For the example above, '/tmp/templates' contains the following structure:
2842- /tmp/templates/nova.conf
2843- /tmp/templates/api-paste.ini
2844- /tmp/templates/grizzly/api-paste.ini
2845- /tmp/templates/havana/api-paste.ini
2846-
2847- Since it was registered with the grizzly release, it first seraches
2848- the grizzly directory for nova.conf, then the templates dir.
2849-
2850- When writing api-paste.ini, it will find the template in the grizzly
2851- directory.
2852-
2853- If the object were created with folsom, it would fall back to the
2854- base templates dir for its api-paste.ini template.
2855-
2856- This system should help manage changes in config files through
2857- openstack releases, allowing charms to fall back to the most recently
2858- updated config template for a given release
2859-
2860- The haproxy.conf, since it is not shipped in the templates dir, will
2861- be loaded from the module directory's template directory, eg
2862- $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
2863- us to ship common templates (haproxy, apache) with the helpers.
2864-
2865- Context generators
2866- ---------------------------------------
2867- Context generators are used to generate template contexts during hook
2868- execution. Doing so may require inspecting service relations, charm
2869- config, etc. When registered, a config file is associated with a list
2870- of generators. When a template is rendered and written, all context
2871- generates are called in a chain to generate the context dictionary
2872- passed to the jinja2 template. See context.py for more info.
2873- """
2874- def __init__(self, templates_dir, openstack_release):
2875- if not os.path.isdir(templates_dir):
2876- log('Could not locate templates dir %s' % templates_dir,
2877- level=ERROR)
2878- raise OSConfigException
2879-
2880- self.templates_dir = templates_dir
2881- self.openstack_release = openstack_release
2882- self.templates = {}
2883- self._tmpl_env = None
2884-
2885- if None in [Environment, ChoiceLoader, FileSystemLoader]:
2886- # if this code is running, the object is created pre-install hook.
2887- # jinja2 shouldn't get touched until the module is reloaded on next
2888- # hook execution, with proper jinja2 bits successfully imported.
2889- apt_install('python-jinja2')
2890-
2891- def register(self, config_file, contexts):
2892- """
2893- Register a config file with a list of context generators to be called
2894- during rendering.
2895- """
2896- self.templates[config_file] = OSConfigTemplate(config_file=config_file,
2897- contexts=contexts)
2898- log('Registered config file: %s' % config_file, level=INFO)
2899-
2900- def _get_tmpl_env(self):
2901- if not self._tmpl_env:
2902- loader = get_loader(self.templates_dir, self.openstack_release)
2903- self._tmpl_env = Environment(loader=loader)
2904-
2905- def _get_template(self, template):
2906- self._get_tmpl_env()
2907- template = self._tmpl_env.get_template(template)
2908- log('Loaded template from %s' % template.filename, level=INFO)
2909- return template
2910-
2911- def render(self, config_file):
2912- if config_file not in self.templates:
2913- log('Config not registered: %s' % config_file, level=ERROR)
2914- raise OSConfigException
2915- ctxt = self.templates[config_file].context()
2916-
2917- _tmpl = os.path.basename(config_file)
2918- try:
2919- template = self._get_template(_tmpl)
2920- except exceptions.TemplateNotFound:
2921- # if no template is found with basename, try looking for it
2922- # using a munged full path, eg:
2923- # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
2924- _tmpl = '_'.join(config_file.split('/')[1:])
2925- try:
2926- template = self._get_template(_tmpl)
2927- except exceptions.TemplateNotFound as e:
2928- log('Could not load template from %s by %s or %s.' %
2929- (self.templates_dir, os.path.basename(config_file), _tmpl),
2930- level=ERROR)
2931- raise e
2932-
2933- log('Rendering from template: %s' % _tmpl, level=INFO)
2934- return template.render(ctxt)
2935-
2936- def write(self, config_file):
2937- """
2938- Write a single config file, raises if config file is not registered.
2939- """
2940- if config_file not in self.templates:
2941- log('Config not registered: %s' % config_file, level=ERROR)
2942- raise OSConfigException
2943-
2944- _out = self.render(config_file)
2945-
2946- with open(config_file, 'wb') as out:
2947- out.write(_out)
2948-
2949- log('Wrote template %s.' % config_file, level=INFO)
2950-
2951- def write_all(self):
2952- """
2953- Write out all registered config files.
2954- """
2955- [self.write(k) for k in self.templates.iterkeys()]
2956-
2957- def set_release(self, openstack_release):
2958- """
2959- Resets the template environment and generates a new template loader
2960- based on a the new openstack release.
2961- """
2962- self._tmpl_env = None
2963- self.openstack_release = openstack_release
2964- self._get_tmpl_env()
2965-
2966- def complete_contexts(self):
2967- '''
2968- Returns a list of context interfaces that yield a complete context.
2969- '''
2970- interfaces = []
2971- [interfaces.extend(i.complete_contexts())
2972- for i in self.templates.itervalues()]
2973- return interfaces
2974
2975=== removed file 'hooks/charmhelpers/contrib/openstack/utils.py'
2976--- hooks/charmhelpers/contrib/openstack/utils.py 2014-05-09 20:11:59 +0000
2977+++ hooks/charmhelpers/contrib/openstack/utils.py 1970-01-01 00:00:00 +0000
2978@@ -1,450 +0,0 @@
2979-#!/usr/bin/python
2980-
2981-# Common python helper functions used for OpenStack charms.
2982-from collections import OrderedDict
2983-
2984-import apt_pkg as apt
2985-import subprocess
2986-import os
2987-import socket
2988-import sys
2989-
2990-from charmhelpers.core.hookenv import (
2991- config,
2992- log as juju_log,
2993- charm_dir,
2994- ERROR,
2995- INFO
2996-)
2997-
2998-from charmhelpers.contrib.storage.linux.lvm import (
2999- deactivate_lvm_volume_group,
3000- is_lvm_physical_volume,
3001- remove_lvm_physical_volume,
3002-)
3003-
3004-from charmhelpers.core.host import lsb_release, mounts, umount
3005-from charmhelpers.fetch import apt_install
3006-from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
3007-from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
3008-
3009-CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
3010-CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
3011-
3012-DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
3013- 'restricted main multiverse universe')
3014-
3015-
3016-UBUNTU_OPENSTACK_RELEASE = OrderedDict([
3017- ('oneiric', 'diablo'),
3018- ('precise', 'essex'),
3019- ('quantal', 'folsom'),
3020- ('raring', 'grizzly'),
3021- ('saucy', 'havana'),
3022- ('trusty', 'icehouse')
3023-])
3024-
3025-
3026-OPENSTACK_CODENAMES = OrderedDict([
3027- ('2011.2', 'diablo'),
3028- ('2012.1', 'essex'),
3029- ('2012.2', 'folsom'),
3030- ('2013.1', 'grizzly'),
3031- ('2013.2', 'havana'),
3032- ('2014.1', 'icehouse'),
3033-])
3034-
3035-# The ugly duckling
3036-SWIFT_CODENAMES = OrderedDict([
3037- ('1.4.3', 'diablo'),
3038- ('1.4.8', 'essex'),
3039- ('1.7.4', 'folsom'),
3040- ('1.8.0', 'grizzly'),
3041- ('1.7.7', 'grizzly'),
3042- ('1.7.6', 'grizzly'),
3043- ('1.10.0', 'havana'),
3044- ('1.9.1', 'havana'),
3045- ('1.9.0', 'havana'),
3046- ('1.13.1', 'icehouse'),
3047- ('1.13.0', 'icehouse'),
3048- ('1.12.0', 'icehouse'),
3049- ('1.11.0', 'icehouse'),
3050-])
3051-
3052-DEFAULT_LOOPBACK_SIZE = '5G'
3053-
3054-
3055-def error_out(msg):
3056- juju_log("FATAL ERROR: %s" % msg, level='ERROR')
3057- sys.exit(1)
3058-
3059-
3060-def get_os_codename_install_source(src):
3061- '''Derive OpenStack release codename from a given installation source.'''
3062- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
3063- rel = ''
3064- if src in ['distro', 'distro-proposed']:
3065- try:
3066- rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
3067- except KeyError:
3068- e = 'Could not derive openstack release for '\
3069- 'this Ubuntu release: %s' % ubuntu_rel
3070- error_out(e)
3071- return rel
3072-
3073- if src.startswith('cloud:'):
3074- ca_rel = src.split(':')[1]
3075- ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
3076- return ca_rel
3077-
3078- # Best guess match based on deb string provided
3079- if src.startswith('deb') or src.startswith('ppa'):
3080- for k, v in OPENSTACK_CODENAMES.iteritems():
3081- if v in src:
3082- return v
3083-
3084-
3085-def get_os_version_install_source(src):
3086- codename = get_os_codename_install_source(src)
3087- return get_os_version_codename(codename)
3088-
3089-
3090-def get_os_codename_version(vers):
3091- '''Determine OpenStack codename from version number.'''
3092- try:
3093- return OPENSTACK_CODENAMES[vers]
3094- except KeyError:
3095- e = 'Could not determine OpenStack codename for version %s' % vers
3096- error_out(e)
3097-
3098-
3099-def get_os_version_codename(codename):
3100- '''Determine OpenStack version number from codename.'''
3101- for k, v in OPENSTACK_CODENAMES.iteritems():
3102- if v == codename:
3103- return k
3104- e = 'Could not derive OpenStack version for '\
3105- 'codename: %s' % codename
3106- error_out(e)
3107-
3108-
3109-def get_os_codename_package(package, fatal=True):
3110- '''Derive OpenStack release codename from an installed package.'''
3111- apt.init()
3112- cache = apt.Cache()
3113-
3114- try:
3115- pkg = cache[package]
3116- except:
3117- if not fatal:
3118- return None
3119- # the package is unknown to the current apt cache.
3120- e = 'Could not determine version of package with no installation '\
3121- 'candidate: %s' % package
3122- error_out(e)
3123-
3124- if not pkg.current_ver:
3125- if not fatal:
3126- return None
3127- # package is known, but no version is currently installed.
3128- e = 'Could not determine version of uninstalled package: %s' % package
3129- error_out(e)
3130-
3131- vers = apt.upstream_version(pkg.current_ver.ver_str)
3132-
3133- try:
3134- if 'swift' in pkg.name:
3135- swift_vers = vers[:5]
3136- if swift_vers not in SWIFT_CODENAMES:
3137- # Deal with 1.10.0 upward
3138- swift_vers = vers[:6]
3139- return SWIFT_CODENAMES[swift_vers]
3140- else:
3141- vers = vers[:6]
3142- return OPENSTACK_CODENAMES[vers]
3143- except KeyError:
3144- e = 'Could not determine OpenStack codename for version %s' % vers
3145- error_out(e)
3146-
3147-
3148-def get_os_version_package(pkg, fatal=True):
3149- '''Derive OpenStack version number from an installed package.'''
3150- codename = get_os_codename_package(pkg, fatal=fatal)
3151-
3152- if not codename:
3153- return None
3154-
3155- if 'swift' in pkg:
3156- vers_map = SWIFT_CODENAMES
3157- else:
3158- vers_map = OPENSTACK_CODENAMES
3159-
3160- for version, cname in vers_map.iteritems():
3161- if cname == codename:
3162- return version
3163- #e = "Could not determine OpenStack version for package: %s" % pkg
3164- #error_out(e)
3165-
3166-
3167-os_rel = None
3168-
3169-
3170-def os_release(package, base='essex'):
3171- '''
3172- Returns OpenStack release codename from a cached global.
3173- If the codename can not be determined from either an installed package or
3174- the installation source, the earliest release supported by the charm should
3175- be returned.
3176- '''
3177- global os_rel
3178- if os_rel:
3179- return os_rel
3180- os_rel = (get_os_codename_package(package, fatal=False) or
3181- get_os_codename_install_source(config('openstack-origin')) or
3182- base)
3183- return os_rel
3184-
3185-
3186-def import_key(keyid):
3187- cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
3188- "--recv-keys %s" % keyid
3189- try:
3190- subprocess.check_call(cmd.split(' '))
3191- except subprocess.CalledProcessError:
3192- error_out("Error importing repo key %s" % keyid)
3193-
3194-
3195-def configure_installation_source(rel):
3196- '''Configure apt installation source.'''
3197- if rel == 'distro':
3198- return
3199- elif rel == 'distro-proposed':
3200- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
3201- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
3202- f.write(DISTRO_PROPOSED % ubuntu_rel)
3203- elif rel[:4] == "ppa:":
3204- src = rel
3205- subprocess.check_call(["add-apt-repository", "-y", src])
3206- elif rel[:3] == "deb":
3207- l = len(rel.split('|'))
3208- if l == 2:
3209- src, key = rel.split('|')
3210- juju_log("Importing PPA key from keyserver for %s" % src)
3211- import_key(key)
3212- elif l == 1:
3213- src = rel
3214- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
3215- f.write(src)
3216- elif rel[:6] == 'cloud:':
3217- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
3218- rel = rel.split(':')[1]
3219- u_rel = rel.split('-')[0]
3220- ca_rel = rel.split('-')[1]
3221-
3222- if u_rel != ubuntu_rel:
3223- e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
3224- 'version (%s)' % (ca_rel, ubuntu_rel)
3225- error_out(e)
3226-
3227- if 'staging' in ca_rel:
3228- # staging is just a regular PPA.
3229- os_rel = ca_rel.split('/')[0]
3230- ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
3231- cmd = 'add-apt-repository -y %s' % ppa
3232- subprocess.check_call(cmd.split(' '))
3233- return
3234-
3235- # map charm config options to actual archive pockets.
3236- pockets = {
3237- 'folsom': 'precise-updates/folsom',
3238- 'folsom/updates': 'precise-updates/folsom',
3239- 'folsom/proposed': 'precise-proposed/folsom',
3240- 'grizzly': 'precise-updates/grizzly',
3241- 'grizzly/updates': 'precise-updates/grizzly',
3242- 'grizzly/proposed': 'precise-proposed/grizzly',
3243- 'havana': 'precise-updates/havana',
3244- 'havana/updates': 'precise-updates/havana',
3245- 'havana/proposed': 'precise-proposed/havana',
3246- 'icehouse': 'precise-updates/icehouse',
3247- 'icehouse/updates': 'precise-updates/icehouse',
3248- 'icehouse/proposed': 'precise-proposed/icehouse',
3249- }
3250-
3251- try:
3252- pocket = pockets[ca_rel]
3253- except KeyError:
3254- e = 'Invalid Cloud Archive release specified: %s' % rel
3255- error_out(e)
3256-
3257- src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
3258- apt_install('ubuntu-cloud-keyring', fatal=True)
3259-
3260- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
3261- f.write(src)
3262- else:
3263- error_out("Invalid openstack-release specified: %s" % rel)
3264-
3265-
3266-def save_script_rc(script_path="scripts/scriptrc", **env_vars):
3267- """
3268- Write an rc file in the charm-delivered directory containing
3269- exported environment variables provided by env_vars. Any charm scripts run
3270- outside the juju hook environment can source this scriptrc to obtain
3271- updated config information necessary to perform health checks or
3272- service changes.
3273- """
3274- juju_rc_path = "%s/%s" % (charm_dir(), script_path)
3275- if not os.path.exists(os.path.dirname(juju_rc_path)):
3276- os.mkdir(os.path.dirname(juju_rc_path))
3277- with open(juju_rc_path, 'wb') as rc_script:
3278- rc_script.write(
3279- "#!/bin/bash\n")
3280- [rc_script.write('export %s=%s\n' % (u, p))
3281- for u, p in env_vars.iteritems() if u != "script_path"]
3282-
3283-
3284-def openstack_upgrade_available(package):
3285- """
3286- Determines if an OpenStack upgrade is available from installation
3287- source, based on version of installed package.
3288-
3289- :param package: str: Name of installed package.
3290-
3291- :returns: bool: : Returns True if configured installation source offers
3292- a newer version of package.
3293-
3294- """
3295-
3296- src = config('openstack-origin')
3297- cur_vers = get_os_version_package(package)
3298- available_vers = get_os_version_install_source(src)
3299- apt.init()
3300- return apt.version_compare(available_vers, cur_vers) == 1
3301-
3302-
3303-def ensure_block_device(block_device):
3304- '''
3305- Confirm block_device, create as loopback if necessary.
3306-
3307- :param block_device: str: Full path of block device to ensure.
3308-
3309- :returns: str: Full path of ensured block device.
3310- '''
3311- _none = ['None', 'none', None]
3312- if (block_device in _none):
3313- error_out('prepare_storage(): Missing required input: '
3314- 'block_device=%s.' % block_device, level=ERROR)
3315-
3316- if block_device.startswith('/dev/'):
3317- bdev = block_device
3318- elif block_device.startswith('/'):
3319- _bd = block_device.split('|')
3320- if len(_bd) == 2:
3321- bdev, size = _bd
3322- else:
3323- bdev = block_device
3324- size = DEFAULT_LOOPBACK_SIZE
3325- bdev = ensure_loopback_device(bdev, size)
3326- else:
3327- bdev = '/dev/%s' % block_device
3328-
3329- if not is_block_device(bdev):
3330- error_out('Failed to locate valid block device at %s' % bdev,
3331- level=ERROR)
3332-
3333- return bdev
3334-
3335-
3336-def clean_storage(block_device):
3337- '''
3338- Ensures a block device is clean. That is:
3339- - unmounted
3340- - any lvm volume groups are deactivated
3341- - any lvm physical device signatures removed
3342- - partition table wiped
3343-
3344- :param block_device: str: Full path to block device to clean.
3345- '''
3346- for mp, d in mounts():
3347- if d == block_device:
3348- juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
3349- (d, mp), level=INFO)
3350- umount(mp, persist=True)
3351-
3352- if is_lvm_physical_volume(block_device):
3353- deactivate_lvm_volume_group(block_device)
3354- remove_lvm_physical_volume(block_device)
3355- else:
3356- zap_disk(block_device)
3357-
3358-
3359-def is_ip(address):
3360- """
3361- Returns True if address is a valid IP address.
3362- """
3363- try:
3364- # Test to see if already an IPv4 address
3365- socket.inet_aton(address)
3366- return True
3367- except socket.error:
3368- return False
3369-
3370-
3371-def ns_query(address):
3372- try:
3373- import dns.resolver
3374- except ImportError:
3375- apt_install('python-dnspython')
3376- import dns.resolver
3377-
3378- if isinstance(address, dns.name.Name):
3379- rtype = 'PTR'
3380- elif isinstance(address, basestring):
3381- rtype = 'A'
3382- else:
3383- return None
3384-
3385- answers = dns.resolver.query(address, rtype)
3386- if answers:
3387- return str(answers[0])
3388- return None
3389-
3390-
3391-def get_host_ip(hostname):
3392- """
3393- Resolves the IP for a given hostname, or returns
3394- the input if it is already an IP.
3395- """
3396- if is_ip(hostname):
3397- return hostname
3398-
3399- return ns_query(hostname)
3400-
3401-
3402-def get_hostname(address, fqdn=True):
3403- """
3404- Resolves hostname for given IP, or returns the input
3405- if it is already a hostname.
3406- """
3407- if is_ip(address):
3408- try:
3409- import dns.reversename
3410- except ImportError:
3411- apt_install('python-dnspython')
3412- import dns.reversename
3413-
3414- rev = dns.reversename.from_address(address)
3415- result = ns_query(rev)
3416- if not result:
3417- return None
3418- else:
3419- result = address
3420-
3421- if fqdn:
3422- # strip trailing .
3423- if result.endswith('.'):
3424- return result[:-1]
3425- else:
3426- return result
3427- else:
3428- return result.split('.')[0]
3429
3430=== removed directory 'hooks/charmhelpers/contrib/peerstorage'
3431=== removed file 'hooks/charmhelpers/contrib/peerstorage/__init__.py'
3432--- hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-05-09 20:11:59 +0000
3433+++ hooks/charmhelpers/contrib/peerstorage/__init__.py 1970-01-01 00:00:00 +0000
3434@@ -1,83 +0,0 @@
3435-from charmhelpers.core.hookenv import (
3436- relation_ids,
3437- relation_get,
3438- local_unit,
3439- relation_set,
3440-)
3441-
3442-"""
3443-This helper provides functions to support use of a peer relation
3444-for basic key/value storage, with the added benefit that all storage
3445-can be replicated across peer units, so this is really useful for
3446-services that issue usernames/passwords to remote services.
3447-
3448-def shared_db_changed()
3449- # Only the lead unit should create passwords
3450- if not is_leader():
3451- return
3452- username = relation_get('username')
3453- key = '{}.password'.format(username)
3454- # Attempt to retrieve any existing password for this user
3455- password = peer_retrieve(key)
3456- if password is None:
3457- # New user, create password and store
3458- password = pwgen(length=64)
3459- peer_store(key, password)
3460- create_access(username, password)
3461- relation_set(password=password)
3462-
3463-
3464-def cluster_changed()
3465- # Echo any relation data other that *-address
3466- # back onto the peer relation so all units have
3467- # all *.password keys stored on their local relation
3468- # for later retrieval.
3469- peer_echo()
3470-
3471-"""
3472-
3473-
3474-def peer_retrieve(key, relation_name='cluster'):
3475- """ Retrieve a named key from peer relation relation_name """
3476- cluster_rels = relation_ids(relation_name)
3477- if len(cluster_rels) > 0:
3478- cluster_rid = cluster_rels[0]
3479- return relation_get(attribute=key, rid=cluster_rid,
3480- unit=local_unit())
3481- else:
3482- raise ValueError('Unable to detect'
3483- 'peer relation {}'.format(relation_name))
3484-
3485-
3486-def peer_store(key, value, relation_name='cluster'):
3487- """ Store the key/value pair on the named peer relation relation_name """
3488- cluster_rels = relation_ids(relation_name)
3489- if len(cluster_rels) > 0:
3490- cluster_rid = cluster_rels[0]
3491- relation_set(relation_id=cluster_rid,
3492- relation_settings={key: value})
3493- else:
3494- raise ValueError('Unable to detect '
3495- 'peer relation {}'.format(relation_name))
3496-
3497-
3498-def peer_echo(includes=None):
3499- """Echo filtered attributes back onto the same relation for storage
3500-
3501- Note that this helper must only be called within a peer relation
3502- changed hook
3503- """
3504- rdata = relation_get()
3505- echo_data = {}
3506- if includes is None:
3507- echo_data = rdata.copy()
3508- for ex in ['private-address', 'public-address']:
3509- if ex in echo_data:
3510- echo_data.pop(ex)
3511- else:
3512- for attribute, value in rdata.iteritems():
3513- for include in includes:
3514- if include in attribute:
3515- echo_data[attribute] = value
3516- if len(echo_data) > 0:
3517- relation_set(relation_settings=echo_data)
3518
3519=== removed directory 'hooks/charmhelpers/contrib/python'
3520=== removed file 'hooks/charmhelpers/contrib/python/__init__.py'
3521=== removed file 'hooks/charmhelpers/contrib/python/packages.py'
3522--- hooks/charmhelpers/contrib/python/packages.py 2014-05-09 20:11:59 +0000
3523+++ hooks/charmhelpers/contrib/python/packages.py 1970-01-01 00:00:00 +0000
3524@@ -1,76 +0,0 @@
3525-#!/usr/bin/env python
3526-# coding: utf-8
3527-
3528-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
3529-
3530-from charmhelpers.fetch import apt_install
3531-from charmhelpers.core.hookenv import log
3532-
3533-try:
3534- from pip import main as pip_execute
3535-except ImportError:
3536- apt_install('python-pip')
3537- from pip import main as pip_execute
3538-
3539-
3540-def parse_options(given, available):
3541- """Given a set of options, check if available"""
3542- for key, value in given.items():
3543- if key in available:
3544- yield "--{0}={1}".format(key, value)
3545-
3546-
3547-def pip_install_requirements(requirements, **options):
3548- """Install a requirements file """
3549- command = ["install"]
3550-
3551- available_options = ('proxy', 'src', 'log', )
3552- for option in parse_options(options, available_options):
3553- command.append(option)
3554-
3555- command.append("-r {0}".format(requirements))
3556- log("Installing from file: {} with options: {}".format(requirements,
3557- command))
3558- pip_execute(command)
3559-
3560-
3561-def pip_install(package, fatal=False, **options):
3562- """Install a python package"""
3563- command = ["install"]
3564-
3565- available_options = ('proxy', 'src', 'log', "index-url", )
3566- for option in parse_options(options, available_options):
3567- command.append(option)
3568-
3569- if isinstance(package, list):
3570- command.extend(package)
3571- else:
3572- command.append(package)
3573-
3574- log("Installing {} package with options: {}".format(package,
3575- command))
3576- pip_execute(command)
3577-
3578-
3579-def pip_uninstall(package, **options):
3580- """Uninstall a python package"""
3581- command = ["uninstall", "-q", "-y"]
3582-
3583- available_options = ('proxy', 'log', )
3584- for option in parse_options(options, available_options):
3585- command.append(option)
3586-
3587- if isinstance(package, list):
3588- command.extend(package)
3589- else:
3590- command.append(package)
3591-
3592- log("Uninstalling {} package with options: {}".format(package,
3593- command))
3594- pip_execute(command)
3595-
3596-
3597-def pip_list():
3598- """Returns the list of current python installed packages
3599- """
3600- return pip_execute(["list"])
3601
3602=== removed file 'hooks/charmhelpers/contrib/python/version.py'
3603--- hooks/charmhelpers/contrib/python/version.py 2014-05-09 20:11:59 +0000
3604+++ hooks/charmhelpers/contrib/python/version.py 1970-01-01 00:00:00 +0000
3605@@ -1,18 +0,0 @@
3606-#!/usr/bin/env python
3607-# coding: utf-8
3608-
3609-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
3610-
3611-import sys
3612-
3613-
3614-def current_version():
3615- """Current system python version"""
3616- return sys.version_info
3617-
3618-
3619-def current_version_string():
3620- """Current system python version as string major.minor.micro"""
3621- return "{0}.{1}.{2}".format(sys.version_info.major,
3622- sys.version_info.minor,
3623- sys.version_info.micro)
3624
3625=== removed directory 'hooks/charmhelpers/contrib/saltstack'
3626=== removed file 'hooks/charmhelpers/contrib/saltstack/__init__.py'
3627--- hooks/charmhelpers/contrib/saltstack/__init__.py 2013-11-26 17:12:54 +0000
3628+++ hooks/charmhelpers/contrib/saltstack/__init__.py 1970-01-01 00:00:00 +0000
3629@@ -1,102 +0,0 @@
3630-"""Charm Helpers saltstack - declare the state of your machines.
3631-
3632-This helper enables you to declare your machine state, rather than
3633-program it procedurally (and have to test each change to your procedures).
3634-Your install hook can be as simple as:
3635-
3636-{{{
3637-from charmhelpers.contrib.saltstack import (
3638- install_salt_support,
3639- update_machine_state,
3640-)
3641-
3642-
3643-def install():
3644- install_salt_support()
3645- update_machine_state('machine_states/dependencies.yaml')
3646- update_machine_state('machine_states/installed.yaml')
3647-}}}
3648-
3649-and won't need to change (nor will its tests) when you change the machine
3650-state.
3651-
3652-It's using a python package called salt-minion which allows various formats for
3653-specifying resources, such as:
3654-
3655-{{{
3656-/srv/{{ basedir }}:
3657- file.directory:
3658- - group: ubunet
3659- - user: ubunet
3660- - require:
3661- - user: ubunet
3662- - recurse:
3663- - user
3664- - group
3665-
3666-ubunet:
3667- group.present:
3668- - gid: 1500
3669- user.present:
3670- - uid: 1500
3671- - gid: 1500
3672- - createhome: False
3673- - require:
3674- - group: ubunet
3675-}}}
3676-
3677-The docs for all the different state definitions are at:
3678- http://docs.saltstack.com/ref/states/all/
3679-
3680-
3681-TODO:
3682- * Add test helpers which will ensure that machine state definitions
3683- are functionally (but not necessarily logically) correct (ie. getting
3684- salt to parse all state defs.
3685- * Add a link to a public bootstrap charm example / blogpost.
3686- * Find a way to obviate the need to use the grains['charm_dir'] syntax
3687- in templates.
3688-"""
3689-# Copyright 2013 Canonical Ltd.
3690-#
3691-# Authors:
3692-# Charm Helpers Developers <juju@lists.ubuntu.com>
3693-import subprocess
3694-
3695-import charmhelpers.contrib.templating.contexts
3696-import charmhelpers.core.host
3697-import charmhelpers.core.hookenv
3698-
3699-
3700-salt_grains_path = '/etc/salt/grains'
3701-
3702-
3703-def install_salt_support(from_ppa=True):
3704- """Installs the salt-minion helper for machine state.
3705-
3706- By default the salt-minion package is installed from
3707- the saltstack PPA. If from_ppa is False you must ensure
3708- that the salt-minion package is available in the apt cache.
3709- """
3710- if from_ppa:
3711- subprocess.check_call([
3712- '/usr/bin/add-apt-repository',
3713- '--yes',
3714- 'ppa:saltstack/salt',
3715- ])
3716- subprocess.check_call(['/usr/bin/apt-get', 'update'])
3717- # We install salt-common as salt-minion would run the salt-minion
3718- # daemon.
3719- charmhelpers.fetch.apt_install('salt-common')
3720-
3721-
3722-def update_machine_state(state_path):
3723- """Update the machine state using the provided state declaration."""
3724- charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
3725- salt_grains_path)
3726- subprocess.check_call([
3727- 'salt-call',
3728- '--local',
3729- 'state.template',
3730- state_path,
3731- ])
3732
3733=== removed directory 'hooks/charmhelpers/contrib/ssl'
3734=== removed file 'hooks/charmhelpers/contrib/ssl/__init__.py'
3735--- hooks/charmhelpers/contrib/ssl/__init__.py 2013-11-26 17:12:54 +0000
3736+++ hooks/charmhelpers/contrib/ssl/__init__.py 1970-01-01 00:00:00 +0000
3737@@ -1,78 +0,0 @@
3738-import subprocess
3739-from charmhelpers.core import hookenv
3740-
3741-
3742-def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None):
3743- """Generate selfsigned SSL keypair
3744-
3745- You must provide one of the 3 optional arguments:
3746- config, subject or cn
3747- If more than one is provided the leftmost will be used
3748-
3749- Arguments:
3750- keyfile -- (required) full path to the keyfile to be created
3751- certfile -- (required) full path to the certfile to be created
3752- keysize -- (optional) SSL key length
3753- config -- (optional) openssl configuration file
3754- subject -- (optional) dictionary with SSL subject variables
3755- cn -- (optional) cerfificate common name
3756-
3757- Required keys in subject dict:
3758- cn -- Common name (eq. FQDN)
3759-
3760- Optional keys in subject dict
3761- country -- Country Name (2 letter code)
3762- state -- State or Province Name (full name)
3763- locality -- Locality Name (eg, city)
3764- organization -- Organization Name (eg, company)
3765- organizational_unit -- Organizational Unit Name (eg, section)
3766- email -- Email Address
3767- """
3768-
3769- cmd = []
3770- if config:
3771- cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
3772- "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
3773- "-keyout", keyfile,
3774- "-out", certfile, "-config", config]
3775- elif subject:
3776- ssl_subject = ""
3777- if "country" in subject:
3778- ssl_subject = ssl_subject + "/C={}".format(subject["country"])
3779- if "state" in subject:
3780- ssl_subject = ssl_subject + "/ST={}".format(subject["state"])
3781- if "locality" in subject:
3782- ssl_subject = ssl_subject + "/L={}".format(subject["locality"])
3783- if "organization" in subject:
3784- ssl_subject = ssl_subject + "/O={}".format(subject["organization"])
3785- if "organizational_unit" in subject:
3786- ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"])
3787- if "cn" in subject:
3788- ssl_subject = ssl_subject + "/CN={}".format(subject["cn"])
3789- else:
3790- hookenv.log("When using \"subject\" argument you must "
3791- "provide \"cn\" field at very least")
3792- return False
3793- if "email" in subject:
3794- ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"])
3795-
3796- cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
3797- "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
3798- "-keyout", keyfile,
3799- "-out", certfile, "-subj", ssl_subject]
3800- elif cn:
3801- cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
3802- "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
3803- "-keyout", keyfile,
3804- "-out", certfile, "-subj", "/CN={}".format(cn)]
3805-
3806- if not cmd:
3807- hookenv.log("No config, subject or cn provided,"
3808- "unable to generate self signed SSL certificates")
3809- return False
3810- try:
3811- subprocess.check_call(cmd)
3812- return True
3813- except Exception as e:
3814- print "Execution of openssl command failed:\n{}".format(e)
3815- return False
3816
3817=== removed file 'hooks/charmhelpers/contrib/ssl/service.py'
3818--- hooks/charmhelpers/contrib/ssl/service.py 2014-05-09 20:11:59 +0000
3819+++ hooks/charmhelpers/contrib/ssl/service.py 1970-01-01 00:00:00 +0000
3820@@ -1,267 +0,0 @@
3821-import logging
3822-import os
3823-from os.path import join as path_join
3824-from os.path import exists
3825-import subprocess
3826-
3827-
3828-log = logging.getLogger("service_ca")
3829-
3830-logging.basicConfig(level=logging.DEBUG)
3831-
3832-STD_CERT = "standard"
3833-
3834-# Mysql server is fairly picky about cert creation
3835-# and types, spec its creation separately for now.
3836-MYSQL_CERT = "mysql"
3837-
3838-
3839-class ServiceCA(object):
3840-
3841- default_expiry = str(365 * 2)
3842- default_ca_expiry = str(365 * 6)
3843-
3844- def __init__(self, name, ca_dir, cert_type=STD_CERT):
3845- self.name = name
3846- self.ca_dir = ca_dir
3847- self.cert_type = cert_type
3848-
3849- ###############
3850- # Hook Helper API
3851- @staticmethod
3852- def get_ca(type=STD_CERT):
3853- service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
3854- ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca')
3855- ca = ServiceCA(service_name, ca_path, type)
3856- ca.init()
3857- return ca
3858-
3859- @classmethod
3860- def get_service_cert(cls, type=STD_CERT):
3861- service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
3862- ca = cls.get_ca()
3863- crt, key = ca.get_or_create_cert(service_name)
3864- return crt, key, ca.get_ca_bundle()
3865-
3866- ###############
3867-
3868- def init(self):
3869- log.debug("initializing service ca")
3870- if not exists(self.ca_dir):
3871- self._init_ca_dir(self.ca_dir)
3872- self._init_ca()
3873-
3874- @property
3875- def ca_key(self):
3876- return path_join(self.ca_dir, 'private', 'cacert.key')
3877-
3878- @property
3879- def ca_cert(self):
3880- return path_join(self.ca_dir, 'cacert.pem')
3881-
3882- @property
3883- def ca_conf(self):
3884- return path_join(self.ca_dir, 'ca.cnf')
3885-
3886- @property
3887- def signing_conf(self):
3888- return path_join(self.ca_dir, 'signing.cnf')
3889-
3890- def _init_ca_dir(self, ca_dir):
3891- os.mkdir(ca_dir)
3892- for i in ['certs', 'crl', 'newcerts', 'private']:
3893- sd = path_join(ca_dir, i)
3894- if not exists(sd):
3895- os.mkdir(sd)
3896-
3897- if not exists(path_join(ca_dir, 'serial')):
3898- with open(path_join(ca_dir, 'serial'), 'wb') as fh:
3899- fh.write('02\n')
3900-
3901- if not exists(path_join(ca_dir, 'index.txt')):
3902- with open(path_join(ca_dir, 'index.txt'), 'wb') as fh:
3903- fh.write('')
3904-
3905- def _init_ca(self):
3906- """Generate the root ca's cert and key.
3907- """
3908- if not exists(path_join(self.ca_dir, 'ca.cnf')):
3909- with open(path_join(self.ca_dir, 'ca.cnf'), 'wb') as fh:
3910- fh.write(
3911- CA_CONF_TEMPLATE % (self.get_conf_variables()))
3912-
3913- if not exists(path_join(self.ca_dir, 'signing.cnf')):
3914- with open(path_join(self.ca_dir, 'signing.cnf'), 'wb') as fh:
3915- fh.write(
3916- SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
3917-
3918- if exists(self.ca_cert) or exists(self.ca_key):
3919- raise RuntimeError("Initialized called when CA already exists")
3920- cmd = ['openssl', 'req', '-config', self.ca_conf,
3921- '-x509', '-nodes', '-newkey', 'rsa',
3922- '-days', self.default_ca_expiry,
3923- '-keyout', self.ca_key, '-out', self.ca_cert,
3924- '-outform', 'PEM']
3925- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
3926- log.debug("CA Init:\n %s", output)
3927-
3928- def get_conf_variables(self):
3929- return dict(
3930- org_name="juju",
3931- org_unit_name="%s service" % self.name,
3932- common_name=self.name,
3933- ca_dir=self.ca_dir)
3934-
3935- def get_or_create_cert(self, common_name):
3936- if common_name in self:
3937- return self.get_certificate(common_name)
3938- return self.create_certificate(common_name)
3939-
3940- def create_certificate(self, common_name):
3941- if common_name in self:
3942- return self.get_certificate(common_name)
3943- key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
3944- crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
3945- csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name)
3946- self._create_certificate(common_name, key_p, csr_p, crt_p)
3947- return self.get_certificate(common_name)
3948-
3949- def get_certificate(self, common_name):
3950- if not common_name in self:
3951- raise ValueError("No certificate for %s" % common_name)
3952- key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
3953- crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
3954- with open(crt_p) as fh:
3955- crt = fh.read()
3956- with open(key_p) as fh:
3957- key = fh.read()
3958- return crt, key
3959-
3960- def __contains__(self, common_name):
3961- crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
3962- return exists(crt_p)
3963-
3964- def _create_certificate(self, common_name, key_p, csr_p, crt_p):
3965- template_vars = self.get_conf_variables()
3966- template_vars['common_name'] = common_name
3967- subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
3968- template_vars)
3969-
3970- log.debug("CA Create Cert %s", common_name)
3971- cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
3972- '-nodes', '-days', self.default_expiry,
3973- '-keyout', key_p, '-out', csr_p, '-subj', subj]
3974- subprocess.check_call(cmd)
3975- cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
3976- subprocess.check_call(cmd)
3977-
3978- log.debug("CA Sign Cert %s", common_name)
3979- if self.cert_type == MYSQL_CERT:
3980- cmd = ['openssl', 'x509', '-req',
3981- '-in', csr_p, '-days', self.default_expiry,
3982- '-CA', self.ca_cert, '-CAkey', self.ca_key,
3983- '-set_serial', '01', '-out', crt_p]
3984- else:
3985- cmd = ['openssl', 'ca', '-config', self.signing_conf,
3986- '-extensions', 'req_extensions',
3987- '-days', self.default_expiry, '-notext',
3988- '-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
3989- log.debug("running %s", " ".join(cmd))
3990- subprocess.check_call(cmd)
3991-
3992- def get_ca_bundle(self):
3993- with open(self.ca_cert) as fh:
3994- return fh.read()
3995-
3996-
3997-CA_CONF_TEMPLATE = """
3998-[ ca ]
3999-default_ca = CA_default
4000-
4001-[ CA_default ]
4002-dir = %(ca_dir)s
4003-policy = policy_match
4004-database = $dir/index.txt
4005-serial = $dir/serial
4006-certs = $dir/certs
4007-crl_dir = $dir/crl
4008-new_certs_dir = $dir/newcerts
4009-certificate = $dir/cacert.pem
4010-private_key = $dir/private/cacert.key
4011-RANDFILE = $dir/private/.rand
4012-default_md = default
4013-
4014-[ req ]
4015-default_bits = 1024
4016-default_md = sha1
4017-
4018-prompt = no
4019-distinguished_name = ca_distinguished_name
4020-
4021-x509_extensions = ca_extensions
4022-
4023-[ ca_distinguished_name ]
4024-organizationName = %(org_name)s
4025-organizationalUnitName = %(org_unit_name)s Certificate Authority
4026-
4027-
4028-[ policy_match ]
4029-countryName = optional
4030-stateOrProvinceName = optional
4031-organizationName = match
4032-organizationalUnitName = optional
4033-commonName = supplied
4034-
4035-[ ca_extensions ]
4036-basicConstraints = critical,CA:true
4037-subjectKeyIdentifier = hash
4038-authorityKeyIdentifier = keyid:always, issuer
4039-keyUsage = cRLSign, keyCertSign
4040-"""
4041-
4042-
4043-SIGNING_CONF_TEMPLATE = """
4044-[ ca ]
4045-default_ca = CA_default
4046-
4047-[ CA_default ]
4048-dir = %(ca_dir)s
4049-policy = policy_match
4050-database = $dir/index.txt
4051-serial = $dir/serial
4052-certs = $dir/certs
4053-crl_dir = $dir/crl
4054-new_certs_dir = $dir/newcerts
4055-certificate = $dir/cacert.pem
4056-private_key = $dir/private/cacert.key
4057-RANDFILE = $dir/private/.rand
4058-default_md = default
4059-
4060-[ req ]
4061-default_bits = 1024
4062-default_md = sha1
4063-
4064-prompt = no
4065-distinguished_name = req_distinguished_name
4066-
4067-x509_extensions = req_extensions
4068-
4069-[ req_distinguished_name ]
4070-organizationName = %(org_name)s
4071-organizationalUnitName = %(org_unit_name)s machine resources
4072-commonName = %(common_name)s
4073-
4074-[ policy_match ]
4075-countryName = optional
4076-stateOrProvinceName = optional
4077-organizationName = match
4078-organizationalUnitName = optional
4079-commonName = supplied
4080-
4081-[ req_extensions ]
4082-basicConstraints = CA:false
4083-subjectKeyIdentifier = hash
4084-authorityKeyIdentifier = keyid:always, issuer
4085-keyUsage = digitalSignature, keyEncipherment, keyAgreement
4086-extendedKeyUsage = serverAuth, clientAuth
4087-"""
4088
4089=== removed directory 'hooks/charmhelpers/contrib/storage'
4090=== removed file 'hooks/charmhelpers/contrib/storage/__init__.py'
4091=== removed directory 'hooks/charmhelpers/contrib/storage/linux'
4092=== removed file 'hooks/charmhelpers/contrib/storage/linux/__init__.py'
4093=== removed file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
4094--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-05-09 20:11:59 +0000
4095+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 1970-01-01 00:00:00 +0000
4096@@ -1,387 +0,0 @@
4097-#
4098-# Copyright 2012 Canonical Ltd.
4099-#
4100-# This file is sourced from lp:openstack-charm-helpers
4101-#
4102-# Authors:
4103-# James Page <james.page@ubuntu.com>
4104-# Adam Gandelman <adamg@ubuntu.com>
4105-#
4106-
4107-import os
4108-import shutil
4109-import json
4110-import time
4111-
4112-from subprocess import (
4113- check_call,
4114- check_output,
4115- CalledProcessError
4116-)
4117-
4118-from charmhelpers.core.hookenv import (
4119- relation_get,
4120- relation_ids,
4121- related_units,
4122- log,
4123- INFO,
4124- WARNING,
4125- ERROR
4126-)
4127-
4128-from charmhelpers.core.host import (
4129- mount,
4130- mounts,
4131- service_start,
4132- service_stop,
4133- service_running,
4134- umount,
4135-)
4136-
4137-from charmhelpers.fetch import (
4138- apt_install,
4139-)
4140-
4141-KEYRING = '/etc/ceph/ceph.client.{}.keyring'
4142-KEYFILE = '/etc/ceph/ceph.client.{}.key'
4143-
4144-CEPH_CONF = """[global]
4145- auth supported = {auth}
4146- keyring = {keyring}
4147- mon host = {mon_hosts}
4148- log to syslog = {use_syslog}
4149- err to syslog = {use_syslog}
4150- clog to syslog = {use_syslog}
4151-"""
4152-
4153-
4154-def install():
4155- ''' Basic Ceph client installation '''
4156- ceph_dir = "/etc/ceph"
4157- if not os.path.exists(ceph_dir):
4158- os.mkdir(ceph_dir)
4159- apt_install('ceph-common', fatal=True)
4160-
4161-
4162-def rbd_exists(service, pool, rbd_img):
4163- ''' Check to see if a RADOS block device exists '''
4164- try:
4165- out = check_output(['rbd', 'list', '--id', service,
4166- '--pool', pool])
4167- except CalledProcessError:
4168- return False
4169- else:
4170- return rbd_img in out
4171-
4172-
4173-def create_rbd_image(service, pool, image, sizemb):
4174- ''' Create a new RADOS block device '''
4175- cmd = [
4176- 'rbd',
4177- 'create',
4178- image,
4179- '--size',
4180- str(sizemb),
4181- '--id',
4182- service,
4183- '--pool',
4184- pool
4185- ]
4186- check_call(cmd)
4187-
4188-
4189-def pool_exists(service, name):
4190- ''' Check to see if a RADOS pool already exists '''
4191- try:
4192- out = check_output(['rados', '--id', service, 'lspools'])
4193- except CalledProcessError:
4194- return False
4195- else:
4196- return name in out
4197-
4198-
4199-def get_osds(service):
4200- '''
4201- Return a list of all Ceph Object Storage Daemons
4202- currently in the cluster
4203- '''
4204- version = ceph_version()
4205- if version and version >= '0.56':
4206- return json.loads(check_output(['ceph', '--id', service,
4207- 'osd', 'ls', '--format=json']))
4208- else:
4209- return None
4210-
4211-
4212-def create_pool(service, name, replicas=2):
4213- ''' Create a new RADOS pool '''
4214- if pool_exists(service, name):
4215- log("Ceph pool {} already exists, skipping creation".format(name),
4216- level=WARNING)
4217- return
4218- # Calculate the number of placement groups based
4219- # on upstream recommended best practices.
4220- osds = get_osds(service)
4221- if osds:
4222- pgnum = (len(osds) * 100 / replicas)
4223- else:
4224- # NOTE(james-page): Default to 200 for older ceph versions
4225- # which don't support OSD query from cli
4226- pgnum = 200
4227- cmd = [
4228- 'ceph', '--id', service,
4229- 'osd', 'pool', 'create',
4230- name, str(pgnum)
4231- ]
4232- check_call(cmd)
4233- cmd = [
4234- 'ceph', '--id', service,
4235- 'osd', 'pool', 'set', name,
4236- 'size', str(replicas)
4237- ]
4238- check_call(cmd)
4239-
4240-
4241-def delete_pool(service, name):
4242- ''' Delete a RADOS pool from ceph '''
4243- cmd = [
4244- 'ceph', '--id', service,
4245- 'osd', 'pool', 'delete',
4246- name, '--yes-i-really-really-mean-it'
4247- ]
4248- check_call(cmd)
4249-
4250-
4251-def _keyfile_path(service):
4252- return KEYFILE.format(service)
4253-
4254-
4255-def _keyring_path(service):
4256- return KEYRING.format(service)
4257-
4258-
4259-def create_keyring(service, key):
4260- ''' Create a new Ceph keyring containing key'''
4261- keyring = _keyring_path(service)
4262- if os.path.exists(keyring):
4263- log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
4264- return
4265- cmd = [
4266- 'ceph-authtool',
4267- keyring,
4268- '--create-keyring',
4269- '--name=client.{}'.format(service),
4270- '--add-key={}'.format(key)
4271- ]
4272- check_call(cmd)
4273- log('ceph: Created new ring at %s.' % keyring, level=INFO)
4274-
4275-
4276-def create_key_file(service, key):
4277- ''' Create a file containing key '''
4278- keyfile = _keyfile_path(service)
4279- if os.path.exists(keyfile):
4280- log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
4281- return
4282- with open(keyfile, 'w') as fd:
4283- fd.write(key)
4284- log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
4285-
4286-
4287-def get_ceph_nodes():
4288- ''' Query named relation 'ceph' to detemine current nodes '''
4289- hosts = []
4290- for r_id in relation_ids('ceph'):
4291- for unit in related_units(r_id):
4292- hosts.append(relation_get('private-address', unit=unit, rid=r_id))
4293- return hosts
4294-
4295-
4296-def configure(service, key, auth, use_syslog):
4297- ''' Perform basic configuration of Ceph '''
4298- create_keyring(service, key)
4299- create_key_file(service, key)
4300- hosts = get_ceph_nodes()
4301- with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
4302- ceph_conf.write(CEPH_CONF.format(auth=auth,
4303- keyring=_keyring_path(service),
4304- mon_hosts=",".join(map(str, hosts)),
4305- use_syslog=use_syslog))
4306- modprobe('rbd')
4307-
4308-
4309-def image_mapped(name):
4310- ''' Determine whether a RADOS block device is mapped locally '''
4311- try:
4312- out = check_output(['rbd', 'showmapped'])
4313- except CalledProcessError:
4314- return False
4315- else:
4316- return name in out
4317-
4318-
4319-def map_block_storage(service, pool, image):
4320- ''' Map a RADOS block device for local use '''
4321- cmd = [
4322- 'rbd',
4323- 'map',
4324- '{}/{}'.format(pool, image),
4325- '--user',
4326- service,
4327- '--secret',
4328- _keyfile_path(service),
4329- ]
4330- check_call(cmd)
4331-
4332-
4333-def filesystem_mounted(fs):
4334- ''' Determine whether a filesytems is already mounted '''
4335- return fs in [f for f, m in mounts()]
4336-
4337-
4338-def make_filesystem(blk_device, fstype='ext4', timeout=10):
4339- ''' Make a new filesystem on the specified block device '''
4340- count = 0
4341- e_noent = os.errno.ENOENT
4342- while not os.path.exists(blk_device):
4343- if count >= timeout:
4344- log('ceph: gave up waiting on block device %s' % blk_device,
4345- level=ERROR)
4346- raise IOError(e_noent, os.strerror(e_noent), blk_device)
4347- log('ceph: waiting for block device %s to appear' % blk_device,
4348- level=INFO)
4349- count += 1
4350- time.sleep(1)
4351- else:
4352- log('ceph: Formatting block device %s as filesystem %s.' %
4353- (blk_device, fstype), level=INFO)
4354- check_call(['mkfs', '-t', fstype, blk_device])
4355-
4356-
4357-def place_data_on_block_device(blk_device, data_src_dst):
4358- ''' Migrate data in data_src_dst to blk_device and then remount '''
4359- # mount block device into /mnt
4360- mount(blk_device, '/mnt')
4361- # copy data to /mnt
4362- copy_files(data_src_dst, '/mnt')
4363- # umount block device
4364- umount('/mnt')
4365- # Grab user/group ID's from original source
4366- _dir = os.stat(data_src_dst)
4367- uid = _dir.st_uid
4368- gid = _dir.st_gid
4369- # re-mount where the data should originally be
4370- # TODO: persist is currently a NO-OP in core.host
4371- mount(blk_device, data_src_dst, persist=True)
4372- # ensure original ownership of new mount.
4373- os.chown(data_src_dst, uid, gid)
4374-
4375-
4376-# TODO: re-use
4377-def modprobe(module):
4378- ''' Load a kernel module and configure for auto-load on reboot '''
4379- log('ceph: Loading kernel module', level=INFO)
4380- cmd = ['modprobe', module]
4381- check_call(cmd)
4382- with open('/etc/modules', 'r+') as modules:
4383- if module not in modules.read():
4384- modules.write(module)
4385-
4386-
4387-def copy_files(src, dst, symlinks=False, ignore=None):
4388- ''' Copy files from src to dst '''
4389- for item in os.listdir(src):
4390- s = os.path.join(src, item)
4391- d = os.path.join(dst, item)
4392- if os.path.isdir(s):
4393- shutil.copytree(s, d, symlinks, ignore)
4394- else:
4395- shutil.copy2(s, d)
4396-
4397-
4398-def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
4399- blk_device, fstype, system_services=[]):
4400- """
4401- NOTE: This function must only be called from a single service unit for
4402- the same rbd_img otherwise data loss will occur.
4403-
4404- Ensures given pool and RBD image exists, is mapped to a block device,
4405- and the device is formatted and mounted at the given mount_point.
4406-
4407- If formatting a device for the first time, data existing at mount_point
4408- will be migrated to the RBD device before being re-mounted.
4409-
4410- All services listed in system_services will be stopped prior to data
4411- migration and restarted when complete.
4412- """
4413- # Ensure pool, RBD image, RBD mappings are in place.
4414- if not pool_exists(service, pool):
4415- log('ceph: Creating new pool {}.'.format(pool))
4416- create_pool(service, pool)
4417-
4418- if not rbd_exists(service, pool, rbd_img):
4419- log('ceph: Creating RBD image ({}).'.format(rbd_img))
4420- create_rbd_image(service, pool, rbd_img, sizemb)
4421-
4422- if not image_mapped(rbd_img):
4423- log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
4424- map_block_storage(service, pool, rbd_img)
4425-
4426- # make file system
4427- # TODO: What happens if for whatever reason this is run again and
4428- # the data is already in the rbd device and/or is mounted??
4429- # When it is mounted already, it will fail to make the fs
4430- # XXX: This is really sketchy! Need to at least add an fstab entry
4431- # otherwise this hook will blow away existing data if its executed
4432- # after a reboot.
4433- if not filesystem_mounted(mount_point):
4434- make_filesystem(blk_device, fstype)
4435-
4436- for svc in system_services:
4437- if service_running(svc):
4438- log('ceph: Stopping services {} prior to migrating data.'
4439- .format(svc))
4440- service_stop(svc)
4441-
4442- place_data_on_block_device(blk_device, mount_point)
4443-
4444- for svc in system_services:
4445- log('ceph: Starting service {} after migrating data.'
4446- .format(svc))
4447- service_start(svc)
4448-
4449-
4450-def ensure_ceph_keyring(service, user=None, group=None):
4451- '''
4452- Ensures a ceph keyring is created for a named service
4453- and optionally ensures user and group ownership.
4454-
4455- Returns False if no ceph key is available in relation state.
4456- '''
4457- key = None
4458- for rid in relation_ids('ceph'):
4459- for unit in related_units(rid):
4460- key = relation_get('key', rid=rid, unit=unit)
4461- if key:
4462- break
4463- if not key:
4464- return False
4465- create_keyring(service=service, key=key)
4466- keyring = _keyring_path(service)
4467- if user and group:
4468- check_call(['chown', '%s.%s' % (user, group), keyring])
4469- return True
4470-
4471-
4472-def ceph_version():
4473- ''' Retrieve the local version of ceph '''
4474- if os.path.exists('/usr/bin/ceph'):
4475- cmd = ['ceph', '-v']
4476- output = check_output(cmd)
4477- output = output.split()
4478- if len(output) > 3:
4479- return output[2]
4480- else:
4481- return None
4482- else:
4483- return None
4484
4485=== removed file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
4486--- hooks/charmhelpers/contrib/storage/linux/loopback.py 2013-11-26 17:12:54 +0000
4487+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 1970-01-01 00:00:00 +0000
4488@@ -1,62 +0,0 @@
4489-
4490-import os
4491-import re
4492-
4493-from subprocess import (
4494- check_call,
4495- check_output,
4496-)
4497-
4498-
4499-##################################################
4500-# loopback device helpers.
4501-##################################################
4502-def loopback_devices():
4503- '''
4504- Parse through 'losetup -a' output to determine currently mapped
4505- loopback devices. Output is expected to look like:
4506-
4507- /dev/loop0: [0807]:961814 (/tmp/my.img)
4508-
4509- :returns: dict: a dict mapping {loopback_dev: backing_file}
4510- '''
4511- loopbacks = {}
4512- cmd = ['losetup', '-a']
4513- devs = [d.strip().split(' ') for d in
4514- check_output(cmd).splitlines() if d != '']
4515- for dev, _, f in devs:
4516- loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
4517- return loopbacks
4518-
4519-
4520-def create_loopback(file_path):
4521- '''
4522- Create a loopback device for a given backing file.
4523-
4524- :returns: str: Full path to new loopback device (eg, /dev/loop0)
4525- '''
4526- file_path = os.path.abspath(file_path)
4527- check_call(['losetup', '--find', file_path])
4528- for d, f in loopback_devices().iteritems():
4529- if f == file_path:
4530- return d
4531-
4532-
4533-def ensure_loopback_device(path, size):
4534- '''
4535- Ensure a loopback device exists for a given backing file path and size.
4536- If it a loopback device is not mapped to file, a new one will be created.
4537-
4538- TODO: Confirm size of found loopback device.
4539-
4540- :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
4541- '''
4542- for d, f in loopback_devices().iteritems():
4543- if f == path:
4544- return d
4545-
4546- if not os.path.exists(path):
4547- cmd = ['truncate', '--size', size, path]
4548- check_call(cmd)
4549-
4550- return create_loopback(path)
4551
4552=== removed file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
4553--- hooks/charmhelpers/contrib/storage/linux/lvm.py 2013-11-26 17:12:54 +0000
4554+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 1970-01-01 00:00:00 +0000
4555@@ -1,88 +0,0 @@
4556-from subprocess import (
4557- CalledProcessError,
4558- check_call,
4559- check_output,
4560- Popen,
4561- PIPE,
4562-)
4563-
4564-
4565-##################################################
4566-# LVM helpers.
4567-##################################################
4568-def deactivate_lvm_volume_group(block_device):
4569- '''
4570- Deactivate any volume gruop associated with an LVM physical volume.
4571-
4572- :param block_device: str: Full path to LVM physical volume
4573- '''
4574- vg = list_lvm_volume_group(block_device)
4575- if vg:
4576- cmd = ['vgchange', '-an', vg]
4577- check_call(cmd)
4578-
4579-
4580-def is_lvm_physical_volume(block_device):
4581- '''
4582- Determine whether a block device is initialized as an LVM PV.
4583-
4584- :param block_device: str: Full path of block device to inspect.
4585-
4586- :returns: boolean: True if block device is a PV, False if not.
4587- '''
4588- try:
4589- check_output(['pvdisplay', block_device])
4590- return True
4591- except CalledProcessError:
4592- return False
4593-
4594-
4595-def remove_lvm_physical_volume(block_device):
4596- '''
4597- Remove LVM PV signatures from a given block device.
4598-
4599- :param block_device: str: Full path of block device to scrub.
4600- '''
4601- p = Popen(['pvremove', '-ff', block_device],
4602- stdin=PIPE)
4603- p.communicate(input='y\n')
4604-
4605-
4606-def list_lvm_volume_group(block_device):
4607- '''
4608- List LVM volume group associated with a given block device.
4609-
4610- Assumes block device is a valid LVM PV.
4611-
4612- :param block_device: str: Full path of block device to inspect.
4613-
4614- :returns: str: Name of volume group associated with block device or None
4615- '''
4616- vg = None
4617- pvd = check_output(['pvdisplay', block_device]).splitlines()
4618- for l in pvd:
4619- if l.strip().startswith('VG Name'):
4620- vg = ' '.join(l.split()).split(' ').pop()
4621- return vg
4622-
4623-
4624-def create_lvm_physical_volume(block_device):
4625- '''
4626- Initialize a block device as an LVM physical volume.
4627-
4628- :param block_device: str: Full path of block device to initialize.
4629-
4630- '''
4631- check_call(['pvcreate', block_device])
4632-
4633-
4634-def create_lvm_volume_group(volume_group, block_device):
4635- '''
4636- Create an LVM volume group backed by a given block device.
4637-
4638- Assumes block device has already been initialized as an LVM PV.
4639-
4640- :param volume_group: str: Name of volume group to create.
4641- :block_device: str: Full path of PV-initialized block device.
4642- '''
4643- check_call(['vgcreate', volume_group, block_device])
4644
4645=== removed file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
4646--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-09 20:11:59 +0000
4647+++ hooks/charmhelpers/contrib/storage/linux/utils.py 1970-01-01 00:00:00 +0000
4648@@ -1,35 +0,0 @@
4649-from os import stat
4650-from stat import S_ISBLK
4651-
4652-from subprocess import (
4653- check_call,
4654- check_output,
4655- call
4656-)
4657-
4658-
4659-def is_block_device(path):
4660- '''
4661- Confirm device at path is a valid block device node.
4662-
4663- :returns: boolean: True if path is a block device, False if not.
4664- '''
4665- return S_ISBLK(stat(path).st_mode)
4666-
4667-
4668-def zap_disk(block_device):
4669- '''
4670- Clear a block device of partition table. Relies on sgdisk, which is
4671- installed as pat of the 'gdisk' package in Ubuntu.
4672-
4673- :param block_device: str: Full path of block device to clean.
4674- '''
4675- # sometimes sgdisk exits non-zero; this is OK, dd will clean up
4676- call(['sgdisk', '--zap-all', '--mbrtogpt',
4677- '--clear', block_device])
4678- dev_end = check_output(['blockdev', '--getsz', block_device])
4679- gpt_end = int(dev_end.split()[0]) - 100
4680- check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device),
4681- 'bs=1M', 'count=1'])
4682- check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device),
4683- 'bs=512', 'count=100', 'seek=%s'%(gpt_end)])
4684
4685=== removed directory 'hooks/charmhelpers/contrib/templating'
4686=== removed file 'hooks/charmhelpers/contrib/templating/__init__.py'
4687=== removed file 'hooks/charmhelpers/contrib/templating/contexts.py'
4688--- hooks/charmhelpers/contrib/templating/contexts.py 2014-05-09 20:11:59 +0000
4689+++ hooks/charmhelpers/contrib/templating/contexts.py 1970-01-01 00:00:00 +0000
4690@@ -1,104 +0,0 @@
4691-# Copyright 2013 Canonical Ltd.
4692-#
4693-# Authors:
4694-# Charm Helpers Developers <juju@lists.ubuntu.com>
4695-"""A helper to create a yaml cache of config with namespaced relation data."""
4696-import os
4697-import yaml
4698-
4699-import charmhelpers.core.hookenv
4700-
4701-
4702-charm_dir = os.environ.get('CHARM_DIR', '')
4703-
4704-
4705-def dict_keys_without_hyphens(a_dict):
4706- """Return the a new dict with underscores instead of hyphens in keys."""
4707- return dict(
4708- (key.replace('-', '_'), val) for key, val in a_dict.items())
4709-
4710-
4711-def update_relations(context, namespace_separator=':'):
4712- """Update the context with the relation data."""
4713- # Add any relation data prefixed with the relation type.
4714- relation_type = charmhelpers.core.hookenv.relation_type()
4715- relations = []
4716- context['current_relation'] = {}
4717- if relation_type is not None:
4718- relation_data = charmhelpers.core.hookenv.relation_get()
4719- context['current_relation'] = relation_data
4720- # Deprecated: the following use of relation data as keys
4721- # directly in the context will be removed.
4722- relation_data = dict(
4723- ("{relation_type}{namespace_separator}{key}".format(
4724- relation_type=relation_type,
4725- key=key,
4726- namespace_separator=namespace_separator), val)
4727- for key, val in relation_data.items())
4728- relation_data = dict_keys_without_hyphens(relation_data)
4729- context.update(relation_data)
4730- relations = charmhelpers.core.hookenv.relations_of_type(relation_type)
4731- relations = [dict_keys_without_hyphens(rel) for rel in relations]
4732-
4733- if 'relations_deprecated' not in context:
4734- context['relations_deprecated'] = {}
4735- if relation_type is not None:
4736- relation_type = relation_type.replace('-', '_')
4737- context['relations_deprecated'][relation_type] = relations
4738-
4739- context['relations'] = charmhelpers.core.hookenv.relations()
4740-
4741-
4742-def juju_state_to_yaml(yaml_path, namespace_separator=':',
4743- allow_hyphens_in_keys=True):
4744- """Update the juju config and state in a yaml file.
4745-
4746- This includes any current relation-get data, and the charm
4747- directory.
4748-
4749- This function was created for the ansible and saltstack
4750- support, as those libraries can use a yaml file to supply
4751- context to templates, but it may be useful generally to
4752- create and update an on-disk cache of all the config, including
4753- previous relation data.
4754-
4755- By default, hyphens are allowed in keys as this is supported
4756- by yaml, but for tools like ansible, hyphens are not valid [1].
4757-
4758- [1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
4759- """
4760- config = charmhelpers.core.hookenv.config()
4761-
4762- # Add the charm_dir which we will need to refer to charm
4763- # file resources etc.
4764- config['charm_dir'] = charm_dir
4765- config['local_unit'] = charmhelpers.core.hookenv.local_unit()
4766- config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip()
4767- config['unit_public_address'] = charmhelpers.core.hookenv.unit_get(
4768- 'public-address'
4769- )
4770-
4771- # Don't use non-standard tags for unicode which will not
4772- # work when salt uses yaml.load_safe.
4773- yaml.add_representer(unicode, lambda dumper,
4774- value: dumper.represent_scalar(
4775- u'tag:yaml.org,2002:str', value))
4776-
4777- yaml_dir = os.path.dirname(yaml_path)
4778- if not os.path.exists(yaml_dir):
4779- os.makedirs(yaml_dir)
4780-
4781- if os.path.exists(yaml_path):
4782- with open(yaml_path, "r") as existing_vars_file:
4783- existing_vars = yaml.load(existing_vars_file.read())
4784- else:
4785- existing_vars = {}
4786-
4787- if not allow_hyphens_in_keys:
4788- config = dict_keys_without_hyphens(config)
4789- existing_vars.update(config)
4790-
4791- update_relations(existing_vars, namespace_separator)
4792-
4793- with open(yaml_path, "w+") as fp:
4794- fp.write(yaml.dump(existing_vars, default_flow_style=False))
4795
4796=== removed file 'hooks/charmhelpers/contrib/templating/pyformat.py'
4797--- hooks/charmhelpers/contrib/templating/pyformat.py 2013-11-26 17:12:54 +0000
4798+++ hooks/charmhelpers/contrib/templating/pyformat.py 1970-01-01 00:00:00 +0000
4799@@ -1,13 +0,0 @@
4800-'''
4801-Templating using standard Python str.format() method.
4802-'''
4803-
4804-from charmhelpers.core import hookenv
4805-
4806-
4807-def render(template, extra={}, **kwargs):
4808- """Return the template rendered using Python's str.format()."""
4809- context = hookenv.execution_environment()
4810- context.update(extra)
4811- context.update(kwargs)
4812- return template.format(**context)
4813
4814=== removed directory 'hooks/charmhelpers/contrib/unison'
4815=== removed file 'hooks/charmhelpers/contrib/unison/__init__.py'
4816--- hooks/charmhelpers/contrib/unison/__init__.py 2014-05-09 20:11:59 +0000
4817+++ hooks/charmhelpers/contrib/unison/__init__.py 1970-01-01 00:00:00 +0000
4818@@ -1,257 +0,0 @@
4819-# Easy file synchronization among peer units using ssh + unison.
4820-#
4821-# From *both* peer relation -joined and -changed, add a call to
4822-# ssh_authorized_peers() describing the peer relation and the desired
4823-# user + group. After all peer relations have settled, all hosts should
4824-# be able to connect to on another via key auth'd ssh as the specified user.
4825-#
4826-# Other hooks are then free to synchronize files and directories using
4827-# sync_to_peers().
4828-#
4829-# For a peer relation named 'cluster', for example:
4830-#
4831-# cluster-relation-joined:
4832-# ...
4833-# ssh_authorized_peers(peer_interface='cluster',
4834-# user='juju_ssh', group='juju_ssh',
4835-# ensure_user=True)
4836-# ...
4837-#
4838-# cluster-relation-changed:
4839-# ...
4840-# ssh_authorized_peers(peer_interface='cluster',
4841-# user='juju_ssh', group='juju_ssh',
4842-# ensure_user=True)
4843-# ...
4844-#
4845-# Hooks are now free to sync files as easily as:
4846-#
4847-# files = ['/etc/fstab', '/etc/apt.conf.d/']
4848-# sync_to_peers(peer_interface='cluster',
4849-# user='juju_ssh, paths=[files])
4850-#
4851-# It is assumed the charm itself has setup permissions on each unit
4852-# such that 'juju_ssh' has read + write permissions. Also assumed
4853-# that the calling charm takes care of leader delegation.
4854-#
4855-# Additionally files can be synchronized only to an specific unit:
4856-# sync_to_peer(slave_address, user='juju_ssh',
4857-# paths=[files], verbose=False)
4858-
4859-import os
4860-import pwd
4861-
4862-from copy import copy
4863-from subprocess import check_call, check_output
4864-
4865-from charmhelpers.core.host import (
4866- adduser,
4867- add_user_to_group,
4868-)
4869-
4870-from charmhelpers.core.hookenv import (
4871- log,
4872- hook_name,
4873- relation_ids,
4874- related_units,
4875- relation_set,
4876- relation_get,
4877- unit_private_ip,
4878- ERROR,
4879-)
4880-
4881-BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false',
4882- '-fastcheck=true', '-group=false', '-owner=false',
4883- '-prefer=newer', '-times=true']
4884-
4885-
4886-def get_homedir(user):
4887- try:
4888- user = pwd.getpwnam(user)
4889- return user.pw_dir
4890- except KeyError:
4891- log('Could not get homedir for user %s: user exists?', ERROR)
4892- raise Exception
4893-
4894-
4895-def create_private_key(user, priv_key_path):
4896- if not os.path.isfile(priv_key_path):
4897- log('Generating new SSH key for user %s.' % user)
4898- cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
4899- '-f', priv_key_path]
4900- check_call(cmd)
4901- else:
4902- log('SSH key already exists at %s.' % priv_key_path)
4903- check_call(['chown', user, priv_key_path])
4904- check_call(['chmod', '0600', priv_key_path])
4905-
4906-
4907-def create_public_key(user, priv_key_path, pub_key_path):
4908- if not os.path.isfile(pub_key_path):
4909- log('Generating missing ssh public key @ %s.' % pub_key_path)
4910- cmd = ['ssh-keygen', '-y', '-f', priv_key_path]
4911- p = check_output(cmd).strip()
4912- with open(pub_key_path, 'wb') as out:
4913- out.write(p)
4914- check_call(['chown', user, pub_key_path])
4915-
4916-
4917-def get_keypair(user):
4918- home_dir = get_homedir(user)
4919- ssh_dir = os.path.join(home_dir, '.ssh')
4920- priv_key = os.path.join(ssh_dir, 'id_rsa')
4921- pub_key = '%s.pub' % priv_key
4922-
4923- if not os.path.isdir(ssh_dir):
4924- os.mkdir(ssh_dir)
4925- check_call(['chown', '-R', user, ssh_dir])
4926-
4927- create_private_key(user, priv_key)
4928- create_public_key(user, priv_key, pub_key)
4929-
4930- with open(priv_key, 'r') as p:
4931- _priv = p.read().strip()
4932-
4933- with open(pub_key, 'r') as p:
4934- _pub = p.read().strip()
4935-
4936- return (_priv, _pub)
4937-
4938-
4939-def write_authorized_keys(user, keys):
4940- home_dir = get_homedir(user)
4941- ssh_dir = os.path.join(home_dir, '.ssh')
4942- auth_keys = os.path.join(ssh_dir, 'authorized_keys')
4943- log('Syncing authorized_keys @ %s.' % auth_keys)
4944- with open(auth_keys, 'wb') as out:
4945- for k in keys:
4946- out.write('%s\n' % k)
4947-
4948-
4949-def write_known_hosts(user, hosts):
4950- home_dir = get_homedir(user)
4951- ssh_dir = os.path.join(home_dir, '.ssh')
4952- known_hosts = os.path.join(ssh_dir, 'known_hosts')
4953- khosts = []
4954- for host in hosts:
4955- cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
4956- remote_key = check_output(cmd).strip()
4957- khosts.append(remote_key)
4958- log('Syncing known_hosts @ %s.' % known_hosts)
4959- with open(known_hosts, 'wb') as out:
4960- for host in khosts:
4961- out.write('%s\n' % host)
4962-
4963-
4964-def ensure_user(user, group=None):
4965- adduser(user)
4966- if group:
4967- add_user_to_group(user, group)
4968-
4969-
4970-def ssh_authorized_peers(peer_interface, user, group=None,
4971- ensure_local_user=False):
4972- """
4973- Main setup function, should be called from both peer -changed and -joined
4974- hooks with the same parameters.
4975- """
4976- if ensure_local_user:
4977- ensure_user(user, group)
4978- priv_key, pub_key = get_keypair(user)
4979- hook = hook_name()
4980- if hook == '%s-relation-joined' % peer_interface:
4981- relation_set(ssh_pub_key=pub_key)
4982- elif hook == '%s-relation-changed' % peer_interface:
4983- hosts = []
4984- keys = []
4985-
4986- for r_id in relation_ids(peer_interface):
4987- for unit in related_units(r_id):
4988- ssh_pub_key = relation_get('ssh_pub_key',
4989- rid=r_id,
4990- unit=unit)
4991- priv_addr = relation_get('private-address',
4992- rid=r_id,
4993- unit=unit)
4994- if ssh_pub_key:
4995- keys.append(ssh_pub_key)
4996- hosts.append(priv_addr)
4997- else:
4998- log('ssh_authorized_peers(): ssh_pub_key '
4999- 'missing for unit %s, skipping.' % unit)
5000- write_authorized_keys(user, keys)
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches