Merge lp:~james-page/charms/trusty/neutron-openvswitch/lp1515008-stable into lp:~gnuoy/charms/trusty/neutron-openvswitch/neutron-refactor

Proposed by James Page
Status: Superseded
Proposed branch: lp:~james-page/charms/trusty/neutron-openvswitch/lp1515008-stable
Merge into: lp:~gnuoy/charms/trusty/neutron-openvswitch/neutron-refactor
Diff against target: 16850 lines (+12934/-1250)
122 files modified
.bzrignore (+2/-0)
.project (+17/-0)
.pydevproject (+9/-0)
Makefile (+21/-6)
README.md (+134/-18)
actions.yaml (+2/-0)
actions/git_reinstall.py (+45/-0)
charm-helpers-hooks.yaml (+13/-0)
charm-helpers-sync.yaml (+0/-10)
charm-helpers-tests.yaml (+5/-0)
config.yaml (+101/-16)
hooks/charmhelpers/__init__.py (+38/-0)
hooks/charmhelpers/cli/__init__.py (+191/-0)
hooks/charmhelpers/cli/benchmark.py (+36/-0)
hooks/charmhelpers/cli/commands.py (+32/-0)
hooks/charmhelpers/cli/hookenv.py (+23/-0)
hooks/charmhelpers/cli/host.py (+31/-0)
hooks/charmhelpers/cli/unitdata.py (+39/-0)
hooks/charmhelpers/contrib/__init__.py (+15/-0)
hooks/charmhelpers/contrib/hahelpers/__init__.py (+15/-0)
hooks/charmhelpers/contrib/hahelpers/apache.py (+26/-3)
hooks/charmhelpers/contrib/hahelpers/ceph.py (+0/-297)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+172/-39)
hooks/charmhelpers/contrib/network/__init__.py (+15/-0)
hooks/charmhelpers/contrib/network/ip.py (+456/-0)
hooks/charmhelpers/contrib/network/ovs/__init__.py (+22/-1)
hooks/charmhelpers/contrib/openstack/__init__.py (+15/-0)
hooks/charmhelpers/contrib/openstack/alternatives.py (+16/-0)
hooks/charmhelpers/contrib/openstack/amulet/__init__.py (+15/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+197/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+963/-0)
hooks/charmhelpers/contrib/openstack/context.py (+963/-236)
hooks/charmhelpers/contrib/openstack/files/__init__.py (+18/-0)
hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh (+32/-0)
hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh (+30/-0)
hooks/charmhelpers/contrib/openstack/ip.py (+151/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+189/-4)
hooks/charmhelpers/contrib/openstack/templates/__init__.py (+16/-0)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+12/-6)
hooks/charmhelpers/contrib/openstack/templates/git.upstart (+17/-0)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+30/-8)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+9/-8)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+9/-8)
hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken (+9/-0)
hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo (+22/-0)
hooks/charmhelpers/contrib/openstack/templates/section-zeromq (+14/-0)
hooks/charmhelpers/contrib/openstack/templating.py (+74/-31)
hooks/charmhelpers/contrib/openstack/utils.py (+631/-104)
hooks/charmhelpers/contrib/python/__init__.py (+15/-0)
hooks/charmhelpers/contrib/python/packages.py (+121/-0)
hooks/charmhelpers/contrib/storage/__init__.py (+15/-0)
hooks/charmhelpers/contrib/storage/linux/__init__.py (+15/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+388/-118)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+19/-3)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+18/-1)
hooks/charmhelpers/contrib/storage/linux/utils.py (+44/-8)
hooks/charmhelpers/core/__init__.py (+15/-0)
hooks/charmhelpers/core/decorators.py (+57/-0)
hooks/charmhelpers/core/files.py (+45/-0)
hooks/charmhelpers/core/fstab.py (+134/-0)
hooks/charmhelpers/core/hookenv.py (+566/-37)
hooks/charmhelpers/core/host.py (+342/-53)
hooks/charmhelpers/core/hugepage.py (+69/-0)
hooks/charmhelpers/core/kernel.py (+68/-0)
hooks/charmhelpers/core/services/__init__.py (+18/-0)
hooks/charmhelpers/core/services/base.py (+353/-0)
hooks/charmhelpers/core/services/helpers.py (+283/-0)
hooks/charmhelpers/core/strutils.py (+72/-0)
hooks/charmhelpers/core/sysctl.py (+56/-0)
hooks/charmhelpers/core/templating.py (+68/-0)
hooks/charmhelpers/core/unitdata.py (+521/-0)
hooks/charmhelpers/fetch/__init__.py (+255/-107)
hooks/charmhelpers/fetch/archiveurl.py (+121/-17)
hooks/charmhelpers/fetch/bzrurl.py (+32/-3)
hooks/charmhelpers/fetch/giturl.py (+73/-0)
hooks/charmhelpers/payload/__init__.py (+16/-0)
hooks/charmhelpers/payload/execd.py (+16/-0)
hooks/neutron_ovs_context.py (+102/-30)
hooks/neutron_ovs_hooks.py (+87/-9)
hooks/neutron_ovs_utils.py (+339/-2)
metadata.yaml (+17/-3)
templates/ext-port.conf (+16/-0)
templates/git/neutron_sudoers (+4/-0)
templates/git/upstart/neutron-ovs-cleanup.upstart (+17/-0)
templates/git/upstart/neutron-plugin-openvswitch-agent.upstart (+18/-0)
templates/icehouse/dhcp_agent.ini (+14/-0)
templates/icehouse/metadata_agent.ini (+20/-0)
templates/icehouse/ml2_conf.ini (+16/-5)
templates/icehouse/neutron.conf (+6/-3)
templates/juno/fwaas_driver.ini (+7/-0)
templates/juno/l3_agent.ini (+7/-0)
templates/juno/metadata_agent.ini (+20/-0)
templates/juno/ml2_conf.ini (+43/-0)
templates/kilo/fwaas_driver.ini (+8/-0)
templates/kilo/neutron.conf (+42/-0)
templates/os-charm-phy-nic-mtu.conf (+22/-0)
tests/00-setup (+17/-0)
tests/014-basic-precise-icehouse (+11/-0)
tests/015-basic-trusty-icehouse (+9/-0)
tests/016-basic-trusty-juno (+11/-0)
tests/017-basic-trusty-kilo (+11/-0)
tests/019-basic-vivid-kilo (+9/-0)
tests/050-basic-trusty-icehouse-git (+9/-0)
tests/051-basic-trusty-juno-git (+12/-0)
tests/052-basic-trusty-kilo-git (+12/-0)
tests/README (+53/-0)
tests/basic_deployment.py (+256/-0)
tests/charmhelpers/__init__.py (+38/-0)
tests/charmhelpers/contrib/__init__.py (+15/-0)
tests/charmhelpers/contrib/amulet/__init__.py (+15/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+95/-0)
tests/charmhelpers/contrib/amulet/utils.py (+818/-0)
tests/charmhelpers/contrib/openstack/__init__.py (+15/-0)
tests/charmhelpers/contrib/openstack/amulet/__init__.py (+15/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+197/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+963/-0)
tests/tests.yaml (+20/-0)
unit_tests/__init__.py (+2/-0)
unit_tests/test_actions_git_reinstall.py (+105/-0)
unit_tests/test_neutron_ovs_context.py (+246/-20)
unit_tests/test_neutron_ovs_hooks.py (+154/-17)
unit_tests/test_neutron_ovs_utils.py (+314/-19)
To merge this branch: bzr merge lp:~james-page/charms/trusty/neutron-openvswitch/lp1515008-stable
Reviewer Review Type Date Requested Status
Liam Young Pending
Review via email: mp+277336@code.launchpad.net

Description of the change

Fixup handling of dvr and local dhcp configurations

To post a comment you must log in.

Unmerged revisions

73. By James Page

Fixup handling of dvr and local dhcp configurations

72. By Corey Bryant

[beisner,r=corey.bryant] Enable stable amulet tests and stable charm-helper syncs.

71. By James Page

15.10 Charm release

70. By Liam Young

Charmhelper sync

69. By Corey Bryant

[beisner,r=corey.bryant] Point charmhelper sync and amulet tests at stable branches.

68. By James Page

[gnuoy] 15.07 Charm release

67. By Corey Bryant

[corey.bryant,trivial] Update deploy from source README indentation.

66. By Corey Bryant

[corey.bryant,trivial] Update deploy from source README samples.

65. By Corey Bryant

[corey.bryant,trivial] Fix deploy from source README

64. By Liam Young

Point charmhelper sync and amulet tests at stable branches

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file '.bzrignore'
--- .bzrignore 2014-06-19 09:56:25 +0000
+++ .bzrignore 2015-11-12 11:46:11 +0000
@@ -1,1 +1,3 @@
1bin
1.coverage2.coverage
3tags
24
=== added file '.project'
--- .project 1970-01-01 00:00:00 +0000
+++ .project 2015-11-12 11:46:11 +0000
@@ -0,0 +1,17 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<projectDescription>
3 <name>neutron-openvswitch</name>
4 <comment></comment>
5 <projects>
6 </projects>
7 <buildSpec>
8 <buildCommand>
9 <name>org.python.pydev.PyDevBuilder</name>
10 <arguments>
11 </arguments>
12 </buildCommand>
13 </buildSpec>
14 <natures>
15 <nature>org.python.pydev.pythonNature</nature>
16 </natures>
17</projectDescription>
018
=== added file '.pydevproject'
--- .pydevproject 1970-01-01 00:00:00 +0000
+++ .pydevproject 2015-11-12 11:46:11 +0000
@@ -0,0 +1,9 @@
1<?xml version="1.0" encoding="UTF-8" standalone="no"?>
2<?eclipse-pydev version="1.0"?><pydev_project>
3<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
4<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
5<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
6<path>/neutron-openvswitch/hooks</path>
7<path>/neutron-openvswitch/unit_tests</path>
8</pydev_pathproperty>
9</pydev_project>
010
=== modified file 'Makefile'
--- Makefile 2014-06-19 09:56:25 +0000
+++ Makefile 2015-11-12 11:46:11 +0000
@@ -2,13 +2,28 @@
2PYTHON := /usr/bin/env python2PYTHON := /usr/bin/env python
33
4lint:4lint:
5 @flake8 --exclude hooks/charmhelpers hooks5 @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \
6 @flake8 --exclude hooks/charmhelpers unit_tests6 actions hooks unit_tests tests
7 @charm proof7 @charm proof
88
9test:9test:
10 @# Bundletester expects unit tests here
10 @echo Starting tests...11 @echo Starting tests...
11 @$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests12 @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests
1213
13sync:14functional_test:
14 @charm-helper-sync -c charm-helpers-sync.yaml15 @echo Starting Amulet tests...
16 @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700
17
18bin/charm_helpers_sync.py:
19 @mkdir -p bin
20 @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
21 > bin/charm_helpers_sync.py
22
23sync: bin/charm_helpers_sync.py
24 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
25 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
26
27publish: lint test
28 bzr push lp:charms/neutron-openvswitch
29 bzr push lp:charms/trusty/neutron-openvswitch
1530
=== modified file 'README.md'
--- README.md 2014-06-23 13:00:45 +0000
+++ README.md 2015-11-12 11:46:11 +0000
@@ -1,18 +1,134 @@
1Overview 1# Overview
2--------2
33This subordinate charm provides the Neutron OpenvSwitch configuration for a compute node.
4This subordinate charm provides the Neutron OVS configuration for a compute4
5node. Oncde deployed it takes over the management of the neutron configuration5Once deployed it takes over the management of the Neutron base and plugin configuration on the compute node.
6and plugin configuration on the compute node. It expects three relations:6
77# Usage
81) Relation with principle compute node8
92) Relation with message broker. If a single message broker is being used for 9To deploy (partial deployment of linked charms only):
10 the openstack deployemnt then it can relat to that. If a seperate neutron 10
11 message broker is being used it should relate to that.11 juju deploy rabbitmq-server
123) Relation with neutron-api principle charm (not nova-cloud-controller)12 juju deploy neutron-api
1313 juju deploy nova-compute
14Restrictions:14 juju deploy neutron-openvswitch
15------------15 juju add-relation neutron-openvswitch nova-compute
1616 juju add-relation neutron-openvswitch neutron-api
17It should only be used with Icehouse and above and requires a seperate17 juju add-relation neutron-openvswitch rabbitmq-server
18neutron-api service to have been deployed.18
19Note that the rabbitmq-server can optionally be a different instance of the rabbitmq-server charm than used by OpenStack Nova:
20
21 juju deploy rabbitmq-server rmq-neutron
22 juju add-relation neutron-openvswitch rmq-neutron
23 juju add-relation neutron-api rmq-neutron
24
25The neutron-api and neutron-openvswitch charms must be related to the same instance of the rabbitmq-server charm.
26
27# Restrictions
28
29It should only be used with OpenStack Icehouse and above and requires a seperate neutron-api service to have been deployed.
30
31# Disabling security group management
32
33WARNING: this feature allows you to effectively disable security on your cloud!
34
35This charm has a configuration option to allow users to disable any per-instance security group management; this must used with neutron-security-groups enabled in the neutron-api charm and could be used to turn off security on selected set of compute nodes:
36
37 juju deploy neutron-openvswitch neutron-openvswitch-insecure
38 juju set neutron-openvswitch-insecure disable-security-groups=True
39 juju deploy nova-compute nova-compute-insecure
40 juju add-relation nova-compute-insecure neutron-openvswitch-insecure
41 ...
42
43These compute nodes could then be accessed by cloud users via use of host aggregates with specific flavors to target instances to hypervisors with no per-instance security.
44
45# Deploying from source
46
47The minimum openstack-origin-git config required to deploy from source is:
48
49 openstack-origin-git: include-file://neutron-juno.yaml
50
51 neutron-juno.yaml
52 repositories:
53 - {name: requirements,
54 repository: 'git://github.com/openstack/requirements',
55 branch: stable/juno}
56 - {name: neutron,
57 repository: 'git://github.com/openstack/neutron',
58 branch: stable/juno}
59
60Note that there are only two 'name' values the charm knows about: 'requirements'
61and 'neutron'. These repositories must correspond to these 'name' values.
62Additionally, the requirements repository must be specified first and the
63neutron repository must be specified last. All other repostories are installed
64in the order in which they are specified.
65
66The following is a full list of current tip repos (may not be up-to-date):
67
68 openstack-origin-git: include-file://neutron-master.yaml
69
70 neutron-master.yaml
71 repositories:
72 - {name: requirements,
73 repository: 'git://github.com/openstack/requirements',
74 branch: master}
75 - {name: oslo-concurrency,
76 repository: 'git://github.com/openstack/oslo.concurrency',
77 branch: master}
78 - {name: oslo-config,
79 repository: 'git://github.com/openstack/oslo.config',
80 branch: master}
81 - {name: oslo-context,
82 repository: 'git://github.com/openstack/oslo.context',
83 branch: master}
84 - {name: oslo-db,
85 repository: 'git://github.com/openstack/oslo.db',
86 branch: master}
87 - {name: oslo-i18n,
88 repository: 'git://github.com/openstack/oslo.i18n',
89 branch: master}
90 - {name: oslo-messaging,
91 repository: 'git://github.com/openstack/oslo.messaging',
92 branch: master}
93 - {name: oslo-middleware,
94 repository': 'git://github.com/openstack/oslo.middleware',
95 branch: master}
96 - {name: oslo-rootwrap',
97 repository: 'git://github.com/openstack/oslo.rootwrap',
98 branch: master}
99 - {name: oslo-serialization,
100 repository: 'git://github.com/openstack/oslo.serialization',
101 branch: master}
102 - {name: oslo-utils,
103 repository: 'git://github.com/openstack/oslo.utils',
104 branch: master}
105 - {name: pbr,
106 repository: 'git://github.com/openstack-dev/pbr',
107 branch: master}
108 - {name: stevedore,
109 repository: 'git://github.com/openstack/stevedore',
110 branch: 'master'}
111 - {name: python-keystoneclient,
112 repository: 'git://github.com/openstack/python-keystoneclient',
113 branch: master}
114 - {name: python-neutronclient,
115 repository: 'git://github.com/openstack/python-neutronclient',
116 branch: master}
117 - {name: python-novaclient,
118 repository': 'git://github.com/openstack/python-novaclient',
119 branch: master}
120 - {name: keystonemiddleware,
121 repository: 'git://github.com/openstack/keystonemiddleware',
122 branch: master}
123 - {name: neutron-fwaas,
124 repository': 'git://github.com/openstack/neutron-fwaas',
125 branch: master}
126 - {name: neutron-lbaas,
127 repository: 'git://github.com/openstack/neutron-lbaas',
128 branch: master}
129 - {name: neutron-vpnaas,
130 repository: 'git://github.com/openstack/neutron-vpnaas',
131 branch: master}
132 - {name: neutron,
133 repository: 'git://github.com/openstack/neutron',
134 branch: master}
19135
=== added directory 'actions'
=== added file 'actions.yaml'
--- actions.yaml 1970-01-01 00:00:00 +0000
+++ actions.yaml 2015-11-12 11:46:11 +0000
@@ -0,0 +1,2 @@
1git-reinstall:
2 description: Reinstall neutron-openvswitch from the openstack-origin-git repositories.
03
=== added symlink 'actions/git-reinstall'
=== target is u'git_reinstall.py'
=== added file 'actions/git_reinstall.py'
--- actions/git_reinstall.py 1970-01-01 00:00:00 +0000
+++ actions/git_reinstall.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,45 @@
1#!/usr/bin/python
2import sys
3import traceback
4
5sys.path.append('hooks/')
6
7from charmhelpers.contrib.openstack.utils import (
8 git_install_requested,
9)
10
11from charmhelpers.core.hookenv import (
12 action_set,
13 action_fail,
14 config,
15)
16
17from neutron_ovs_utils import (
18 git_install,
19)
20
21from neutron_ovs_hooks import (
22 config_changed,
23)
24
25
26def git_reinstall():
27 """Reinstall from source and restart services.
28
29 If the openstack-origin-git config option was used to install openstack
30 from source git repositories, then this action can be used to reinstall
31 from updated git repositories, followed by a restart of services."""
32 if not git_install_requested():
33 action_fail('openstack-origin-git is not configured')
34 return
35
36 try:
37 git_install(config('openstack-origin-git'))
38 config_changed()
39 except:
40 action_set({'traceback': traceback.format_exc()})
41 action_fail('git-reinstall resulted in an unexpected error')
42
43
44if __name__ == '__main__':
45 git_reinstall()
046
=== added file 'charm-helpers-hooks.yaml'
--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers-hooks.yaml 2015-11-12 11:46:11 +0000
@@ -0,0 +1,13 @@
1branch: lp:~openstack-charmers/charm-helpers/stable
2destination: hooks/charmhelpers
3include:
4 - core
5 - cli
6 - fetch
7 - contrib.openstack|inc=*
8 - contrib.hahelpers
9 - contrib.network.ovs
10 - contrib.storage.linux
11 - payload.execd
12 - contrib.network.ip
13 - contrib.python.packages
014
=== removed file 'charm-helpers-sync.yaml'
--- charm-helpers-sync.yaml 2014-06-19 09:56:25 +0000
+++ charm-helpers-sync.yaml 1970-01-01 00:00:00 +0000
@@ -1,10 +0,0 @@
1branch: lp:charm-helpers
2destination: hooks/charmhelpers
3include:
4 - core
5 - fetch
6 - contrib.openstack
7 - contrib.hahelpers
8 - contrib.network.ovs
9 - contrib.storage.linux
10 - payload.execd
110
=== added file 'charm-helpers-tests.yaml'
--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers-tests.yaml 2015-11-12 11:46:11 +0000
@@ -0,0 +1,5 @@
1branch: lp:~openstack-charmers/charm-helpers/stable
2destination: tests/charmhelpers
3include:
4 - contrib.amulet
5 - contrib.openstack.amulet
06
=== modified file 'config.yaml'
--- config.yaml 2014-06-23 11:49:58 +0000
+++ config.yaml 2015-11-12 11:46:11 +0000
@@ -1,23 +1,108 @@
1options:1options:
2 debug:
3 default: False
4 type: boolean
5 description: Enable debug logging.
6 verbose:
7 default: False
8 type: boolean
9 description: Enable verbose logging.
10 use-syslog:
11 type: boolean
12 default: False
13 description: |
14 Setting this to True will allow supporting services to log to syslog.
15 openstack-origin-git:
16 default:
17 type: string
18 description: |
19 Specifies a YAML-formatted dictionary listing the git
20 repositories and branches from which to install OpenStack and
21 its dependencies.
22
23 When openstack-origin-git is specified, openstack-specific
24 packages will be installed from source rather than from the
25 the nova-compute charm's openstack-origin repository.
26
27 Note that the installed config files will be determined based on
28 the OpenStack release of the nova-compute charm's openstack-origin
29 option.
30
31 For more details see README.md.
2 rabbit-user:32 rabbit-user:
3 default: neutron33 default: neutron
4 type: string34 type: string
5 description: Username used to access rabbitmq queue35 description: Username used to access RabbitMQ queue
6 rabbit-vhost:36 rabbit-vhost:
7 default: openstack37 default: openstack
8 type: string38 type: string
9 description: Rabbitmq vhost39 description: RabbitMQ vhost
10 use-syslog:40 data-port:
11 type: boolean41 type: string
12 default: False42 default:
13 description: |43 description: |
14 By default, all services will log into their corresponding log files.44 Space-delimited list of bridge:port mappings. Ports will be added to
15 Setting this to True will force all services to log to the syslog.45 their corresponding bridge. The bridges will allow usage of flat or
16 debug:46 VLAN network types with Neutron and should match this defined in
17 default: False47 bridge-mappings.
18 type: boolean48 .
19 description: Enable debug logging49 Ports provided can be the name or MAC address of the interface to be
20 verbose:50 added to the bridge. If MAC addresses are used, you may provide multiple
21 default: False51 bridge:mac for the same bridge so as to be able to configure multiple
22 type: boolean52 units. In this case the charm will run through the provided MAC addresses
23 description: Enable verbose logging53 for each bridge until it finds one it can resolve to an interface name.
54 disable-security-groups:
55 type: boolean
56 default: false
57 description: |
58 Disable neutron based security groups - setting this configuration option
59 will override any settings configured via the neutron-api charm.
60 .
61 BE CAREFUL - this option allows you to disable all port level security
62 within an OpenStack cloud.
63 bridge-mappings:
64 type: string
65 default: 'physnet1:br-data'
66 description: |
67 Space-delimited list of ML2 data bridge mappings with format
68 <provider>:<bridge>.
69 flat-network-providers:
70 type: string
71 default:
72 description: |
73 Space-delimited list of Neutron flat network providers.
74 vlan-ranges:
75 type: string
76 default: "physnet1:1000:2000"
77 description: |
78 Space-delimited list of <physical_network>:<vlan_min>:<vlan_max> or
79 <physical_network> specifying physical_network names usable for VLAN
80 provider and tenant networks, as well as ranges of VLAN tags on each
81 available for allocation to tenant networks.
82 # Network configuration options
83 # by default all access is over 'private-address'
84 os-data-network:
85 type: string
86 default:
87 description: |
88 The IP address and netmask of the OpenStack Data network (e.g.,
89 192.168.0.0/24)
90 .
91 This network will be used for tenant network traffic in overlay
92 networks.
93 ext-port:
94 type: string
95 default:
96 description: |
97 A space-separated list of external ports to use for routing of instance
98 traffic to the external public network. Valid values are either MAC
99 addresses (in which case only MAC addresses for interfaces without an IP
100 address already assigned will be used), or interfaces (eth0)
101 enable-local-dhcp-and-metadata:
102 type: boolean
103 default: false
104 description: |
105 Enable local Neutron DHCP and Metadata Agents. This is useful for deployments
106 which do not include a neutron-gateway (do not require l3, lbaas or vpnaas
107 services) and should only be used in-conjunction with flat or VLAN provider
108 networks configurations.
24109
=== added file 'hooks/charmhelpers/__init__.py'
--- hooks/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/__init__.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,38 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17# Bootstrap charm-helpers, installing its dependencies if necessary using
18# only standard libraries.
19import subprocess
20import sys
21
22try:
23 import six # flake8: noqa
24except ImportError:
25 if sys.version_info.major == 2:
26 subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
27 else:
28 subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
29 import six # flake8: noqa
30
31try:
32 import yaml # flake8: noqa
33except ImportError:
34 if sys.version_info.major == 2:
35 subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
36 else:
37 subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
38 import yaml # flake8: noqa
039
=== removed file 'hooks/charmhelpers/__init__.py'
=== added directory 'hooks/charmhelpers/cli'
=== added file 'hooks/charmhelpers/cli/__init__.py'
--- hooks/charmhelpers/cli/__init__.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/cli/__init__.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,191 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import inspect
18import argparse
19import sys
20
21from six.moves import zip
22
23from charmhelpers.core import unitdata
24
25
26class OutputFormatter(object):
27 def __init__(self, outfile=sys.stdout):
28 self.formats = (
29 "raw",
30 "json",
31 "py",
32 "yaml",
33 "csv",
34 "tab",
35 )
36 self.outfile = outfile
37
38 def add_arguments(self, argument_parser):
39 formatgroup = argument_parser.add_mutually_exclusive_group()
40 choices = self.supported_formats
41 formatgroup.add_argument("--format", metavar='FMT',
42 help="Select output format for returned data, "
43 "where FMT is one of: {}".format(choices),
44 choices=choices, default='raw')
45 for fmt in self.formats:
46 fmtfunc = getattr(self, fmt)
47 formatgroup.add_argument("-{}".format(fmt[0]),
48 "--{}".format(fmt), action='store_const',
49 const=fmt, dest='format',
50 help=fmtfunc.__doc__)
51
52 @property
53 def supported_formats(self):
54 return self.formats
55
56 def raw(self, output):
57 """Output data as raw string (default)"""
58 if isinstance(output, (list, tuple)):
59 output = '\n'.join(map(str, output))
60 self.outfile.write(str(output))
61
62 def py(self, output):
63 """Output data as a nicely-formatted python data structure"""
64 import pprint
65 pprint.pprint(output, stream=self.outfile)
66
67 def json(self, output):
68 """Output data in JSON format"""
69 import json
70 json.dump(output, self.outfile)
71
72 def yaml(self, output):
73 """Output data in YAML format"""
74 import yaml
75 yaml.safe_dump(output, self.outfile)
76
77 def csv(self, output):
78 """Output data as excel-compatible CSV"""
79 import csv
80 csvwriter = csv.writer(self.outfile)
81 csvwriter.writerows(output)
82
83 def tab(self, output):
84 """Output data in excel-compatible tab-delimited format"""
85 import csv
86 csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
87 csvwriter.writerows(output)
88
89 def format_output(self, output, fmt='raw'):
90 fmtfunc = getattr(self, fmt)
91 fmtfunc(output)
92
93
94class CommandLine(object):
95 argument_parser = None
96 subparsers = None
97 formatter = None
98 exit_code = 0
99
100 def __init__(self):
101 if not self.argument_parser:
102 self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
103 if not self.formatter:
104 self.formatter = OutputFormatter()
105 self.formatter.add_arguments(self.argument_parser)
106 if not self.subparsers:
107 self.subparsers = self.argument_parser.add_subparsers(help='Commands')
108
109 def subcommand(self, command_name=None):
110 """
111 Decorate a function as a subcommand. Use its arguments as the
112 command-line arguments"""
113 def wrapper(decorated):
114 cmd_name = command_name or decorated.__name__
115 subparser = self.subparsers.add_parser(cmd_name,
116 description=decorated.__doc__)
117 for args, kwargs in describe_arguments(decorated):
118 subparser.add_argument(*args, **kwargs)
119 subparser.set_defaults(func=decorated)
120 return decorated
121 return wrapper
122
123 def test_command(self, decorated):
124 """
125 Subcommand is a boolean test function, so bool return values should be
126 converted to a 0/1 exit code.
127 """
128 decorated._cli_test_command = True
129 return decorated
130
131 def no_output(self, decorated):
132 """
133 Subcommand is not expected to return a value, so don't print a spurious None.
134 """
135 decorated._cli_no_output = True
136 return decorated
137
138 def subcommand_builder(self, command_name, description=None):
139 """
140 Decorate a function that builds a subcommand. Builders should accept a
141 single argument (the subparser instance) and return the function to be
142 run as the command."""
143 def wrapper(decorated):
144 subparser = self.subparsers.add_parser(command_name)
145 func = decorated(subparser)
146 subparser.set_defaults(func=func)
147 subparser.description = description or func.__doc__
148 return wrapper
149
150 def run(self):
151 "Run cli, processing arguments and executing subcommands."
152 arguments = self.argument_parser.parse_args()
153 argspec = inspect.getargspec(arguments.func)
154 vargs = []
155 for arg in argspec.args:
156 vargs.append(getattr(arguments, arg))
157 if argspec.varargs:
158 vargs.extend(getattr(arguments, argspec.varargs))
159 output = arguments.func(*vargs)
160 if getattr(arguments.func, '_cli_test_command', False):
161 self.exit_code = 0 if output else 1
162 output = ''
163 if getattr(arguments.func, '_cli_no_output', False):
164 output = ''
165 self.formatter.format_output(output, arguments.format)
166 if unitdata._KV:
167 unitdata._KV.flush()
168
169
170cmdline = CommandLine()
171
172
173def describe_arguments(func):
174 """
175 Analyze a function's signature and return a data structure suitable for
176 passing in as arguments to an argparse parser's add_argument() method."""
177
178 argspec = inspect.getargspec(func)
179 # we should probably raise an exception somewhere if func includes **kwargs
180 if argspec.defaults:
181 positional_args = argspec.args[:-len(argspec.defaults)]
182 keyword_names = argspec.args[-len(argspec.defaults):]
183 for arg, default in zip(keyword_names, argspec.defaults):
184 yield ('--{}'.format(arg),), {'default': default}
185 else:
186 positional_args = argspec.args
187
188 for arg in positional_args:
189 yield (arg,), {}
190 if argspec.varargs:
191 yield (argspec.varargs,), {'nargs': '*'}
0192
=== added file 'hooks/charmhelpers/cli/benchmark.py'
--- hooks/charmhelpers/cli/benchmark.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/cli/benchmark.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,36 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.contrib.benchmark import Benchmark
19
20
21@cmdline.subcommand(command_name='benchmark-start')
22def start():
23 Benchmark.start()
24
25
26@cmdline.subcommand(command_name='benchmark-finish')
27def finish():
28 Benchmark.finish()
29
30
31@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
32def service(subparser):
33 subparser.add_argument("value", help="The composite score.")
34 subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
35 subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
36 return Benchmark.set_composite_score
037
=== added file 'hooks/charmhelpers/cli/commands.py'
--- hooks/charmhelpers/cli/commands.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/cli/commands.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,32 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17"""
18This module loads sub-modules into the python runtime so they can be
19discovered via the inspect module. In order to prevent flake8 from (rightfully)
20telling us these are unused modules, throw a ' # noqa' at the end of each import
21so that the warning is suppressed.
22"""
23
24from . import CommandLine # noqa
25
26"""
27Import the sub-modules which have decorated subcommands to register with chlp.
28"""
29from . import host # noqa
30from . import benchmark # noqa
31from . import unitdata # noqa
32from . import hookenv # noqa
033
=== added file 'hooks/charmhelpers/cli/hookenv.py'
--- hooks/charmhelpers/cli/hookenv.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/cli/hookenv.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,23 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.core import hookenv
19
20
21cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
22cmdline.subcommand('service-name')(hookenv.service_name)
23cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
024
=== added file 'hooks/charmhelpers/cli/host.py'
--- hooks/charmhelpers/cli/host.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/cli/host.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,31 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.core import host
19
20
21@cmdline.subcommand()
22def mounts():
23 "List mounts"
24 return host.mounts()
25
26
27@cmdline.subcommand_builder('service', description="Control system services")
28def service(subparser):
29 subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
30 subparser.add_argument("service_name", help="Name of the service to control")
31 return host.service
032
=== added file 'hooks/charmhelpers/cli/unitdata.py'
--- hooks/charmhelpers/cli/unitdata.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/cli/unitdata.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,39 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.core import unitdata
19
20
21@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
22def unitdata_cmd(subparser):
23 nested = subparser.add_subparsers()
24 get_cmd = nested.add_parser('get', help='Retrieve data')
25 get_cmd.add_argument('key', help='Key to retrieve the value of')
26 get_cmd.set_defaults(action='get', value=None)
27 set_cmd = nested.add_parser('set', help='Store data')
28 set_cmd.add_argument('key', help='Key to set')
29 set_cmd.add_argument('value', help='Value to store')
30 set_cmd.set_defaults(action='set')
31
32 def _unitdata_cmd(action, key, value):
33 if action == 'get':
34 return unitdata.kv().get(key)
35 elif action == 'set':
36 unitdata.kv().set(key, value)
37 unitdata.kv().flush()
38 return ''
39 return _unitdata_cmd
040
=== modified file 'hooks/charmhelpers/contrib/__init__.py'
--- hooks/charmhelpers/contrib/__init__.py 2014-06-05 10:59:23 +0000
+++ hooks/charmhelpers/contrib/__init__.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,15 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
016
=== modified file 'hooks/charmhelpers/contrib/hahelpers/__init__.py'
--- hooks/charmhelpers/contrib/hahelpers/__init__.py 2014-06-05 10:59:23 +0000
+++ hooks/charmhelpers/contrib/hahelpers/__init__.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,15 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
016
=== modified file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
--- hooks/charmhelpers/contrib/hahelpers/apache.py 2014-06-05 10:59:23 +0000
+++ hooks/charmhelpers/contrib/hahelpers/apache.py 2015-11-12 11:46:11 +0000
@@ -1,3 +1,19 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
1#17#
2# Copyright 2012 Canonical Ltd.18# Copyright 2012 Canonical Ltd.
3#19#
@@ -20,20 +36,27 @@
20)36)
2137
2238
23def get_cert():39def get_cert(cn=None):
40 # TODO: deal with multiple https endpoints via charm config
24 cert = config_get('ssl_cert')41 cert = config_get('ssl_cert')
25 key = config_get('ssl_key')42 key = config_get('ssl_key')
26 if not (cert and key):43 if not (cert and key):
27 log("Inspecting identity-service relations for SSL certificate.",44 log("Inspecting identity-service relations for SSL certificate.",
28 level=INFO)45 level=INFO)
29 cert = key = None46 cert = key = None
47 if cn:
48 ssl_cert_attr = 'ssl_cert_{}'.format(cn)
49 ssl_key_attr = 'ssl_key_{}'.format(cn)
50 else:
51 ssl_cert_attr = 'ssl_cert'
52 ssl_key_attr = 'ssl_key'
30 for r_id in relation_ids('identity-service'):53 for r_id in relation_ids('identity-service'):
31 for unit in relation_list(r_id):54 for unit in relation_list(r_id):
32 if not cert:55 if not cert:
33 cert = relation_get('ssl_cert',56 cert = relation_get(ssl_cert_attr,
34 rid=r_id, unit=unit)57 rid=r_id, unit=unit)
35 if not key:58 if not key:
36 key = relation_get('ssl_key',59 key = relation_get(ssl_key_attr,
37 rid=r_id, unit=unit)60 rid=r_id, unit=unit)
38 return (cert, key)61 return (cert, key)
3962
4063
=== removed file 'hooks/charmhelpers/contrib/hahelpers/ceph.py'
--- hooks/charmhelpers/contrib/hahelpers/ceph.py 2014-06-05 10:59:23 +0000
+++ hooks/charmhelpers/contrib/hahelpers/ceph.py 1970-01-01 00:00:00 +0000
@@ -1,297 +0,0 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# This file is sourced from lp:openstack-charm-helpers
5#
6# Authors:
7# James Page <james.page@ubuntu.com>
8# Adam Gandelman <adamg@ubuntu.com>
9#
10
11import commands
12import os
13import shutil
14import time
15
16from subprocess import (
17 check_call,
18 check_output,
19 CalledProcessError
20)
21
22from charmhelpers.core.hookenv import (
23 relation_get,
24 relation_ids,
25 related_units,
26 log,
27 INFO,
28 ERROR
29)
30
31from charmhelpers.fetch import (
32 apt_install,
33)
34
35from charmhelpers.core.host import (
36 mount,
37 mounts,
38 service_start,
39 service_stop,
40 umount,
41)
42
43KEYRING = '/etc/ceph/ceph.client.%s.keyring'
44KEYFILE = '/etc/ceph/ceph.client.%s.key'
45
46CEPH_CONF = """[global]
47 auth supported = %(auth)s
48 keyring = %(keyring)s
49 mon host = %(mon_hosts)s
50 log to syslog = %(use_syslog)s
51 err to syslog = %(use_syslog)s
52 clog to syslog = %(use_syslog)s
53"""
54
55
56def running(service):
57 # this local util can be dropped as soon the following branch lands
58 # in lp:charm-helpers
59 # https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/
60 try:
61 output = check_output(['service', service, 'status'])
62 except CalledProcessError:
63 return False
64 else:
65 if ("start/running" in output or "is running" in output):
66 return True
67 else:
68 return False
69
70
71def install():
72 ceph_dir = "/etc/ceph"
73 if not os.path.isdir(ceph_dir):
74 os.mkdir(ceph_dir)
75 apt_install('ceph-common', fatal=True)
76
77
78def rbd_exists(service, pool, rbd_img):
79 (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
80 (service, pool))
81 return rbd_img in out
82
83
84def create_rbd_image(service, pool, image, sizemb):
85 cmd = [
86 'rbd',
87 'create',
88 image,
89 '--size',
90 str(sizemb),
91 '--id',
92 service,
93 '--pool',
94 pool
95 ]
96 check_call(cmd)
97
98
99def pool_exists(service, name):
100 (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
101 return name in out
102
103
104def create_pool(service, name):
105 cmd = [
106 'rados',
107 '--id',
108 service,
109 'mkpool',
110 name
111 ]
112 check_call(cmd)
113
114
115def keyfile_path(service):
116 return KEYFILE % service
117
118
119def keyring_path(service):
120 return KEYRING % service
121
122
123def create_keyring(service, key):
124 keyring = keyring_path(service)
125 if os.path.exists(keyring):
126 log('ceph: Keyring exists at %s.' % keyring, level=INFO)
127 cmd = [
128 'ceph-authtool',
129 keyring,
130 '--create-keyring',
131 '--name=client.%s' % service,
132 '--add-key=%s' % key
133 ]
134 check_call(cmd)
135 log('ceph: Created new ring at %s.' % keyring, level=INFO)
136
137
138def create_key_file(service, key):
139 # create a file containing the key
140 keyfile = keyfile_path(service)
141 if os.path.exists(keyfile):
142 log('ceph: Keyfile exists at %s.' % keyfile, level=INFO)
143 fd = open(keyfile, 'w')
144 fd.write(key)
145 fd.close()
146 log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
147
148
149def get_ceph_nodes():
150 hosts = []
151 for r_id in relation_ids('ceph'):
152 for unit in related_units(r_id):
153 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
154 return hosts
155
156
157def configure(service, key, auth):
158 create_keyring(service, key)
159 create_key_file(service, key)
160 hosts = get_ceph_nodes()
161 mon_hosts = ",".join(map(str, hosts))
162 keyring = keyring_path(service)
163 with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
164 ceph_conf.write(CEPH_CONF % locals())
165 modprobe_kernel_module('rbd')
166
167
168def image_mapped(image_name):
169 (rc, out) = commands.getstatusoutput('rbd showmapped')
170 return image_name in out
171
172
173def map_block_storage(service, pool, image):
174 cmd = [
175 'rbd',
176 'map',
177 '%s/%s' % (pool, image),
178 '--user',
179 service,
180 '--secret',
181 keyfile_path(service),
182 ]
183 check_call(cmd)
184
185
186def filesystem_mounted(fs):
187 return fs in [f for m, f in mounts()]
188
189
190def make_filesystem(blk_device, fstype='ext4', timeout=10):
191 count = 0
192 e_noent = os.errno.ENOENT
193 while not os.path.exists(blk_device):
194 if count >= timeout:
195 log('ceph: gave up waiting on block device %s' % blk_device,
196 level=ERROR)
197 raise IOError(e_noent, os.strerror(e_noent), blk_device)
198 log('ceph: waiting for block device %s to appear' % blk_device,
199 level=INFO)
200 count += 1
201 time.sleep(1)
202 else:
203 log('ceph: Formatting block device %s as filesystem %s.' %
204 (blk_device, fstype), level=INFO)
205 check_call(['mkfs', '-t', fstype, blk_device])
206
207
208def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
209 # mount block device into /mnt
210 mount(blk_device, '/mnt')
211
212 # copy data to /mnt
213 try:
214 copy_files(data_src_dst, '/mnt')
215 except:
216 pass
217
218 # umount block device
219 umount('/mnt')
220
221 _dir = os.stat(data_src_dst)
222 uid = _dir.st_uid
223 gid = _dir.st_gid
224
225 # re-mount where the data should originally be
226 mount(blk_device, data_src_dst, persist=True)
227
228 # ensure original ownership of new mount.
229 cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
230 check_call(cmd)
231
232
233# TODO: re-use
234def modprobe_kernel_module(module):
235 log('ceph: Loading kernel module', level=INFO)
236 cmd = ['modprobe', module]
237 check_call(cmd)
238 cmd = 'echo %s >> /etc/modules' % module
239 check_call(cmd, shell=True)
240
241
242def copy_files(src, dst, symlinks=False, ignore=None):
243 for item in os.listdir(src):
244 s = os.path.join(src, item)
245 d = os.path.join(dst, item)
246 if os.path.isdir(s):
247 shutil.copytree(s, d, symlinks, ignore)
248 else:
249 shutil.copy2(s, d)
250
251
252def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
253 blk_device, fstype, system_services=[]):
254 """
255 To be called from the current cluster leader.
256 Ensures given pool and RBD image exists, is mapped to a block device,
257 and the device is formatted and mounted at the given mount_point.
258
259 If formatting a device for the first time, data existing at mount_point
260 will be migrated to the RBD device before being remounted.
261
262 All services listed in system_services will be stopped prior to data
263 migration and restarted when complete.
264 """
265 # Ensure pool, RBD image, RBD mappings are in place.
266 if not pool_exists(service, pool):
267 log('ceph: Creating new pool %s.' % pool, level=INFO)
268 create_pool(service, pool)
269
270 if not rbd_exists(service, pool, rbd_img):
271 log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO)
272 create_rbd_image(service, pool, rbd_img, sizemb)
273
274 if not image_mapped(rbd_img):
275 log('ceph: Mapping RBD Image as a Block Device.', level=INFO)
276 map_block_storage(service, pool, rbd_img)
277
278 # make file system
279 # TODO: What happens if for whatever reason this is run again and
280 # the data is already in the rbd device and/or is mounted??
281 # When it is mounted already, it will fail to make the fs
282 # XXX: This is really sketchy! Need to at least add an fstab entry
283 # otherwise this hook will blow away existing data if its executed
284 # after a reboot.
285 if not filesystem_mounted(mount_point):
286 make_filesystem(blk_device, fstype)
287
288 for svc in system_services:
289 if running(svc):
290 log('Stopping services %s prior to migrating data.' % svc,
291 level=INFO)
292 service_stop(svc)
293
294 place_data_on_ceph(service, blk_device, mount_point, fstype)
295
296 for svc in system_services:
297 service_start(svc)
2980
=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-06-05 10:59:23 +0000
+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-11-12 11:46:11 +0000
@@ -1,3 +1,19 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
1#17#
2# Copyright 2012 Canonical Ltd.18# Copyright 2012 Canonical Ltd.
3#19#
@@ -6,11 +22,18 @@
6# Adam Gandelman <adamg@ubuntu.com>22# Adam Gandelman <adamg@ubuntu.com>
7#23#
824
25"""
26Helpers for clustering and determining "cluster leadership" and other
27clustering-related helpers.
28"""
29
9import subprocess30import subprocess
10import os31import os
1132
12from socket import gethostname as get_unit_hostname33from socket import gethostname as get_unit_hostname
1334
35import six
36
14from charmhelpers.core.hookenv import (37from charmhelpers.core.hookenv import (
15 log,38 log,
16 relation_ids,39 relation_ids,
@@ -19,14 +42,64 @@
19 config as config_get,42 config as config_get,
20 INFO,43 INFO,
21 ERROR,44 ERROR,
45 WARNING,
22 unit_get,46 unit_get,
23)47 is_leader as juju_is_leader
48)
49from charmhelpers.core.decorators import (
50 retry_on_exception,
51)
52from charmhelpers.core.strutils import (
53 bool_from_string,
54)
55
56DC_RESOURCE_NAME = 'DC'
2457
2558
26class HAIncompleteConfig(Exception):59class HAIncompleteConfig(Exception):
27 pass60 pass
2861
2962
63class CRMResourceNotFound(Exception):
64 pass
65
66
67class CRMDCNotFound(Exception):
68 pass
69
70
71def is_elected_leader(resource):
72 """
73 Returns True if the charm executing this is the elected cluster leader.
74
75 It relies on two mechanisms to determine leadership:
76 1. If juju is sufficiently new and leadership election is supported,
77 the is_leader command will be used.
78 2. If the charm is part of a corosync cluster, call corosync to
79 determine leadership.
80 3. If the charm is not part of a corosync cluster, the leader is
81 determined as being "the alive unit with the lowest unit numer". In
82 other words, the oldest surviving unit.
83 """
84 try:
85 return juju_is_leader()
86 except NotImplementedError:
87 log('Juju leadership election feature not enabled'
88 ', using fallback support',
89 level=WARNING)
90
91 if is_clustered():
92 if not is_crm_leader(resource):
93 log('Deferring action to CRM leader.', level=INFO)
94 return False
95 else:
96 peers = peer_units()
97 if peers and not oldest_peer(peers):
98 log('Deferring action to oldest service unit.', level=INFO)
99 return False
100 return True
101
102
30def is_clustered():103def is_clustered():
31 for r_id in (relation_ids('ha') or []):104 for r_id in (relation_ids('ha') or []):
32 for unit in (relation_list(r_id) or []):105 for unit in (relation_list(r_id) or []):
@@ -38,31 +111,85 @@
38 return False111 return False
39112
40113
114def is_crm_dc():
115 """
116 Determine leadership by querying the pacemaker Designated Controller
117 """
118 cmd = ['crm', 'status']
119 try:
120 status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
121 if not isinstance(status, six.text_type):
122 status = six.text_type(status, "utf-8")
123 except subprocess.CalledProcessError as ex:
124 raise CRMDCNotFound(str(ex))
125
126 current_dc = ''
127 for line in status.split('\n'):
128 if line.startswith('Current DC'):
129 # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
130 current_dc = line.split(':')[1].split()[0]
131 if current_dc == get_unit_hostname():
132 return True
133 elif current_dc == 'NONE':
134 raise CRMDCNotFound('Current DC: NONE')
135
136 return False
137
138
139@retry_on_exception(5, base_delay=2,
140 exc_type=(CRMResourceNotFound, CRMDCNotFound))
141def is_crm_leader(resource, retry=False):
142 """
143 Returns True if the charm calling this is the elected corosync leader,
144 as returned by calling the external "crm" command.
145
146 We allow this operation to be retried to avoid the possibility of getting a
147 false negative. See LP #1396246 for more info.
148 """
149 if resource == DC_RESOURCE_NAME:
150 return is_crm_dc()
151 cmd = ['crm', 'resource', 'show', resource]
152 try:
153 status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
154 if not isinstance(status, six.text_type):
155 status = six.text_type(status, "utf-8")
156 except subprocess.CalledProcessError:
157 status = None
158
159 if status and get_unit_hostname() in status:
160 return True
161
162 if status and "resource %s is NOT running" % (resource) in status:
163 raise CRMResourceNotFound("CRM resource %s not found" % (resource))
164
165 return False
166
167
41def is_leader(resource):168def is_leader(resource):
42 cmd = [169 log("is_leader is deprecated. Please consider using is_crm_leader "
43 "crm", "resource",170 "instead.", level=WARNING)
44 "show", resource171 return is_crm_leader(resource)
45 ]172
46 try:173
47 status = subprocess.check_output(cmd)174def peer_units(peer_relation="cluster"):
48 except subprocess.CalledProcessError:
49 return False
50 else:
51 if get_unit_hostname() in status:
52 return True
53 else:
54 return False
55
56
57def peer_units():
58 peers = []175 peers = []
59 for r_id in (relation_ids('cluster') or []):176 for r_id in (relation_ids(peer_relation) or []):
60 for unit in (relation_list(r_id) or []):177 for unit in (relation_list(r_id) or []):
61 peers.append(unit)178 peers.append(unit)
62 return peers179 return peers
63180
64181
182def peer_ips(peer_relation='cluster', addr_key='private-address'):
183 '''Return a dict of peers and their private-address'''
184 peers = {}
185 for r_id in relation_ids(peer_relation):
186 for unit in relation_list(r_id):
187 peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
188 return peers
189
190
65def oldest_peer(peers):191def oldest_peer(peers):
192 """Determines who the oldest peer is by comparing unit numbers."""
66 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])193 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
67 for peer in peers:194 for peer in peers:
68 remote_unit_no = int(peer.split('/')[1])195 remote_unit_no = int(peer.split('/')[1])
@@ -72,16 +199,9 @@
72199
73200
74def eligible_leader(resource):201def eligible_leader(resource):
75 if is_clustered():202 log("eligible_leader is deprecated. Please consider using "
76 if not is_leader(resource):203 "is_elected_leader instead.", level=WARNING)
77 log('Deferring action to CRM leader.', level=INFO)204 return is_elected_leader(resource)
78 return False
79 else:
80 peers = peer_units()
81 if peers and not oldest_peer(peers):
82 log('Deferring action to oldest service unit.', level=INFO)
83 return False
84 return True
85205
86206
87def https():207def https():
@@ -91,16 +211,16 @@
91 .211 .
92 returns: boolean212 returns: boolean
93 '''213 '''
94 if config_get('use-https') == "yes":214 use_https = config_get('use-https')
215 if use_https and bool_from_string(use_https):
95 return True216 return True
96 if config_get('ssl_cert') and config_get('ssl_key'):217 if config_get('ssl_cert') and config_get('ssl_key'):
97 return True218 return True
98 for r_id in relation_ids('identity-service'):219 for r_id in relation_ids('identity-service'):
99 for unit in relation_list(r_id):220 for unit in relation_list(r_id):
221 # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
100 rel_state = [222 rel_state = [
101 relation_get('https_keystone', rid=r_id, unit=unit),223 relation_get('https_keystone', rid=r_id, unit=unit),
102 relation_get('ssl_cert', rid=r_id, unit=unit),
103 relation_get('ssl_key', rid=r_id, unit=unit),
104 relation_get('ca_cert', rid=r_id, unit=unit),224 relation_get('ca_cert', rid=r_id, unit=unit),
105 ]225 ]
106 # NOTE: works around (LP: #1203241)226 # NOTE: works around (LP: #1203241)
@@ -109,54 +229,66 @@
109 return False229 return False
110230
111231
112def determine_api_port(public_port):232def determine_api_port(public_port, singlenode_mode=False):
113 '''233 '''
114 Determine correct API server listening port based on234 Determine correct API server listening port based on
115 existence of HTTPS reverse proxy and/or haproxy.235 existence of HTTPS reverse proxy and/or haproxy.
116236
117 public_port: int: standard public port for given service237 public_port: int: standard public port for given service
118238
239 singlenode_mode: boolean: Shuffle ports when only a single unit is present
240
119 returns: int: the correct listening port for the API service241 returns: int: the correct listening port for the API service
120 '''242 '''
121 i = 0243 i = 0
122 if len(peer_units()) > 0 or is_clustered():244 if singlenode_mode:
245 i += 1
246 elif len(peer_units()) > 0 or is_clustered():
123 i += 1247 i += 1
124 if https():248 if https():
125 i += 1249 i += 1
126 return public_port - (i * 10)250 return public_port - (i * 10)
127251
128252
129def determine_apache_port(public_port):253def determine_apache_port(public_port, singlenode_mode=False):
130 '''254 '''
131 Description: Determine correct apache listening port based on public IP +255 Description: Determine correct apache listening port based on public IP +
132 state of the cluster.256 state of the cluster.
133257
134 public_port: int: standard public port for given service258 public_port: int: standard public port for given service
135259
260 singlenode_mode: boolean: Shuffle ports when only a single unit is present
261
136 returns: int: the correct listening port for the HAProxy service262 returns: int: the correct listening port for the HAProxy service
137 '''263 '''
138 i = 0264 i = 0
139 if len(peer_units()) > 0 or is_clustered():265 if singlenode_mode:
266 i += 1
267 elif len(peer_units()) > 0 or is_clustered():
140 i += 1268 i += 1
141 return public_port - (i * 10)269 return public_port - (i * 10)
142270
143271
144def get_hacluster_config():272def get_hacluster_config(exclude_keys=None):
145 '''273 '''
146 Obtains all relevant configuration from charm configuration required274 Obtains all relevant configuration from charm configuration required
147 for initiating a relation to hacluster:275 for initiating a relation to hacluster:
148276
149 ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr277 ha-bindiface, ha-mcastport, vip
150278
279 param: exclude_keys: list of setting key(s) to be excluded.
151 returns: dict: A dict containing settings keyed by setting name.280 returns: dict: A dict containing settings keyed by setting name.
152 raises: HAIncompleteConfig if settings are missing.281 raises: HAIncompleteConfig if settings are missing.
153 '''282 '''
154 settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']283 settings = ['ha-bindiface', 'ha-mcastport', 'vip']
155 conf = {}284 conf = {}
156 for setting in settings:285 for setting in settings:
286 if exclude_keys and setting in exclude_keys:
287 continue
288
157 conf[setting] = config_get(setting)289 conf[setting] = config_get(setting)
158 missing = []290 missing = []
159 [missing.append(s) for s, v in conf.iteritems() if v is None]291 [missing.append(s) for s, v in six.iteritems(conf) if v is None]
160 if missing:292 if missing:
161 log('Insufficient config data to configure hacluster.', level=ERROR)293 log('Insufficient config data to configure hacluster.', level=ERROR)
162 raise HAIncompleteConfig294 raise HAIncompleteConfig
@@ -170,6 +302,7 @@
170302
171 :configs : OSTemplateRenderer: A config tempating object to inspect for303 :configs : OSTemplateRenderer: A config tempating object to inspect for
172 a complete https context.304 a complete https context.
305
173 :vip_setting: str: Setting in charm config that specifies306 :vip_setting: str: Setting in charm config that specifies
174 VIP address.307 VIP address.
175 '''308 '''
176309
=== modified file 'hooks/charmhelpers/contrib/network/__init__.py'
--- hooks/charmhelpers/contrib/network/__init__.py 2014-06-05 10:59:23 +0000
+++ hooks/charmhelpers/contrib/network/__init__.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,15 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
016
=== added file 'hooks/charmhelpers/contrib/network/ip.py'
--- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/network/ip.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,456 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import glob
18import re
19import subprocess
20import six
21import socket
22
23from functools import partial
24
25from charmhelpers.core.hookenv import unit_get
26from charmhelpers.fetch import apt_install, apt_update
27from charmhelpers.core.hookenv import (
28 log,
29 WARNING,
30)
31
32try:
33 import netifaces
34except ImportError:
35 apt_update(fatal=True)
36 apt_install('python-netifaces', fatal=True)
37 import netifaces
38
39try:
40 import netaddr
41except ImportError:
42 apt_update(fatal=True)
43 apt_install('python-netaddr', fatal=True)
44 import netaddr
45
46
47def _validate_cidr(network):
48 try:
49 netaddr.IPNetwork(network)
50 except (netaddr.core.AddrFormatError, ValueError):
51 raise ValueError("Network (%s) is not in CIDR presentation format" %
52 network)
53
54
55def no_ip_found_error_out(network):
56 errmsg = ("No IP address found in network: %s" % network)
57 raise ValueError(errmsg)
58
59
60def get_address_in_network(network, fallback=None, fatal=False):
61 """Get an IPv4 or IPv6 address within the network from the host.
62
63 :param network (str): CIDR presentation format. For example,
64 '192.168.1.0/24'.
65 :param fallback (str): If no address is found, return fallback.
66 :param fatal (boolean): If no address is found, fallback is not
67 set and fatal is True then exit(1).
68 """
69 if network is None:
70 if fallback is not None:
71 return fallback
72
73 if fatal:
74 no_ip_found_error_out(network)
75 else:
76 return None
77
78 _validate_cidr(network)
79 network = netaddr.IPNetwork(network)
80 for iface in netifaces.interfaces():
81 addresses = netifaces.ifaddresses(iface)
82 if network.version == 4 and netifaces.AF_INET in addresses:
83 addr = addresses[netifaces.AF_INET][0]['addr']
84 netmask = addresses[netifaces.AF_INET][0]['netmask']
85 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
86 if cidr in network:
87 return str(cidr.ip)
88
89 if network.version == 6 and netifaces.AF_INET6 in addresses:
90 for addr in addresses[netifaces.AF_INET6]:
91 if not addr['addr'].startswith('fe80'):
92 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
93 addr['netmask']))
94 if cidr in network:
95 return str(cidr.ip)
96
97 if fallback is not None:
98 return fallback
99
100 if fatal:
101 no_ip_found_error_out(network)
102
103 return None
104
105
106def is_ipv6(address):
107 """Determine whether provided address is IPv6 or not."""
108 try:
109 address = netaddr.IPAddress(address)
110 except netaddr.AddrFormatError:
111 # probably a hostname - so not an address at all!
112 return False
113
114 return address.version == 6
115
116
117def is_address_in_network(network, address):
118 """
119 Determine whether the provided address is within a network range.
120
121 :param network (str): CIDR presentation format. For example,
122 '192.168.1.0/24'.
123 :param address: An individual IPv4 or IPv6 address without a net
124 mask or subnet prefix. For example, '192.168.1.1'.
125 :returns boolean: Flag indicating whether address is in network.
126 """
127 try:
128 network = netaddr.IPNetwork(network)
129 except (netaddr.core.AddrFormatError, ValueError):
130 raise ValueError("Network (%s) is not in CIDR presentation format" %
131 network)
132
133 try:
134 address = netaddr.IPAddress(address)
135 except (netaddr.core.AddrFormatError, ValueError):
136 raise ValueError("Address (%s) is not in correct presentation format" %
137 address)
138
139 if address in network:
140 return True
141 else:
142 return False
143
144
145def _get_for_address(address, key):
146 """Retrieve an attribute of or the physical interface that
147 the IP address provided could be bound to.
148
149 :param address (str): An individual IPv4 or IPv6 address without a net
150 mask or subnet prefix. For example, '192.168.1.1'.
151 :param key: 'iface' for the physical interface name or an attribute
152 of the configured interface, for example 'netmask'.
153 :returns str: Requested attribute or None if address is not bindable.
154 """
155 address = netaddr.IPAddress(address)
156 for iface in netifaces.interfaces():
157 addresses = netifaces.ifaddresses(iface)
158 if address.version == 4 and netifaces.AF_INET in addresses:
159 addr = addresses[netifaces.AF_INET][0]['addr']
160 netmask = addresses[netifaces.AF_INET][0]['netmask']
161 network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
162 cidr = network.cidr
163 if address in cidr:
164 if key == 'iface':
165 return iface
166 else:
167 return addresses[netifaces.AF_INET][0][key]
168
169 if address.version == 6 and netifaces.AF_INET6 in addresses:
170 for addr in addresses[netifaces.AF_INET6]:
171 if not addr['addr'].startswith('fe80'):
172 network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
173 addr['netmask']))
174 cidr = network.cidr
175 if address in cidr:
176 if key == 'iface':
177 return iface
178 elif key == 'netmask' and cidr:
179 return str(cidr).split('/')[1]
180 else:
181 return addr[key]
182
183 return None
184
185
186get_iface_for_address = partial(_get_for_address, key='iface')
187
188
189get_netmask_for_address = partial(_get_for_address, key='netmask')
190
191
192def format_ipv6_addr(address):
193 """If address is IPv6, wrap it in '[]' otherwise return None.
194
195 This is required by most configuration files when specifying IPv6
196 addresses.
197 """
198 if is_ipv6(address):
199 return "[%s]" % address
200
201 return None
202
203
204def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
205 fatal=True, exc_list=None):
206 """Return the assigned IP address for a given interface, if any."""
207 # Extract nic if passed /dev/ethX
208 if '/' in iface:
209 iface = iface.split('/')[-1]
210
211 if not exc_list:
212 exc_list = []
213
214 try:
215 inet_num = getattr(netifaces, inet_type)
216 except AttributeError:
217 raise Exception("Unknown inet type '%s'" % str(inet_type))
218
219 interfaces = netifaces.interfaces()
220 if inc_aliases:
221 ifaces = []
222 for _iface in interfaces:
223 if iface == _iface or _iface.split(':')[0] == iface:
224 ifaces.append(_iface)
225
226 if fatal and not ifaces:
227 raise Exception("Invalid interface '%s'" % iface)
228
229 ifaces.sort()
230 else:
231 if iface not in interfaces:
232 if fatal:
233 raise Exception("Interface '%s' not found " % (iface))
234 else:
235 return []
236
237 else:
238 ifaces = [iface]
239
240 addresses = []
241 for netiface in ifaces:
242 net_info = netifaces.ifaddresses(netiface)
243 if inet_num in net_info:
244 for entry in net_info[inet_num]:
245 if 'addr' in entry and entry['addr'] not in exc_list:
246 addresses.append(entry['addr'])
247
248 if fatal and not addresses:
249 raise Exception("Interface '%s' doesn't have any %s addresses." %
250 (iface, inet_type))
251
252 return sorted(addresses)
253
254
255get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
256
257
258def get_iface_from_addr(addr):
259 """Work out on which interface the provided address is configured."""
260 for iface in netifaces.interfaces():
261 addresses = netifaces.ifaddresses(iface)
262 for inet_type in addresses:
263 for _addr in addresses[inet_type]:
264 _addr = _addr['addr']
265 # link local
266 ll_key = re.compile("(.+)%.*")
267 raw = re.match(ll_key, _addr)
268 if raw:
269 _addr = raw.group(1)
270
271 if _addr == addr:
272 log("Address '%s' is configured on iface '%s'" %
273 (addr, iface))
274 return iface
275
276 msg = "Unable to infer net iface on which '%s' is configured" % (addr)
277 raise Exception(msg)
278
279
280def sniff_iface(f):
281 """Ensure decorated function is called with a value for iface.
282
283 If no iface provided, inject net iface inferred from unit private address.
284 """
285 def iface_sniffer(*args, **kwargs):
286 if not kwargs.get('iface', None):
287 kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
288
289 return f(*args, **kwargs)
290
291 return iface_sniffer
292
293
294@sniff_iface
295def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
296 dynamic_only=True):
297 """Get assigned IPv6 address for a given interface.
298
299 Returns list of addresses found. If no address found, returns empty list.
300
301 If iface is None, we infer the current primary interface by doing a reverse
302 lookup on the unit private-address.
303
304 We currently only support scope global IPv6 addresses i.e. non-temporary
305 addresses. If no global IPv6 address is found, return the first one found
306 in the ipv6 address list.
307 """
308 addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
309 inc_aliases=inc_aliases, fatal=fatal,
310 exc_list=exc_list)
311
312 if addresses:
313 global_addrs = []
314 for addr in addresses:
315 key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
316 m = re.match(key_scope_link_local, addr)
317 if m:
318 eui_64_mac = m.group(1)
319 iface = m.group(2)
320 else:
321 global_addrs.append(addr)
322
323 if global_addrs:
324 # Make sure any found global addresses are not temporary
325 cmd = ['ip', 'addr', 'show', iface]
326 out = subprocess.check_output(cmd).decode('UTF-8')
327 if dynamic_only:
328 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
329 else:
330 key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
331
332 addrs = []
333 for line in out.split('\n'):
334 line = line.strip()
335 m = re.match(key, line)
336 if m and 'temporary' not in line:
337 # Return the first valid address we find
338 for addr in global_addrs:
339 if m.group(1) == addr:
340 if not dynamic_only or \
341 m.group(1).endswith(eui_64_mac):
342 addrs.append(addr)
343
344 if addrs:
345 return addrs
346
347 if fatal:
348 raise Exception("Interface '%s' does not have a scope global "
349 "non-temporary ipv6 address." % iface)
350
351 return []
352
353
354def get_bridges(vnic_dir='/sys/devices/virtual/net'):
355 """Return a list of bridges on the system."""
356 b_regex = "%s/*/bridge" % vnic_dir
357 return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
358
359
360def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
361 """Return a list of nics comprising a given bridge on the system."""
362 brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
363 return [x.split('/')[-1] for x in glob.glob(brif_regex)]
364
365
366def is_bridge_member(nic):
367 """Check if a given nic is a member of a bridge."""
368 for bridge in get_bridges():
369 if nic in get_bridge_nics(bridge):
370 return True
371
372 return False
373
374
375def is_ip(address):
376 """
377 Returns True if address is a valid IP address.
378 """
379 try:
380 # Test to see if already an IPv4 address
381 socket.inet_aton(address)
382 return True
383 except socket.error:
384 return False
385
386
387def ns_query(address):
388 try:
389 import dns.resolver
390 except ImportError:
391 apt_install('python-dnspython')
392 import dns.resolver
393
394 if isinstance(address, dns.name.Name):
395 rtype = 'PTR'
396 elif isinstance(address, six.string_types):
397 rtype = 'A'
398 else:
399 return None
400
401 answers = dns.resolver.query(address, rtype)
402 if answers:
403 return str(answers[0])
404 return None
405
406
407def get_host_ip(hostname, fallback=None):
408 """
409 Resolves the IP for a given hostname, or returns
410 the input if it is already an IP.
411 """
412 if is_ip(hostname):
413 return hostname
414
415 ip_addr = ns_query(hostname)
416 if not ip_addr:
417 try:
418 ip_addr = socket.gethostbyname(hostname)
419 except:
420 log("Failed to resolve hostname '%s'" % (hostname),
421 level=WARNING)
422 return fallback
423 return ip_addr
424
425
426def get_hostname(address, fqdn=True):
427 """
428 Resolves hostname for given IP, or returns the input
429 if it is already a hostname.
430 """
431 if is_ip(address):
432 try:
433 import dns.reversename
434 except ImportError:
435 apt_install("python-dnspython")
436 import dns.reversename
437
438 rev = dns.reversename.from_address(address)
439 result = ns_query(rev)
440
441 if not result:
442 try:
443 result = socket.gethostbyaddr(address)[0]
444 except:
445 return None
446 else:
447 result = address
448
449 if fqdn:
450 # strip trailing .
451 if result.endswith('.'):
452 return result[:-1]
453 else:
454 return result
455 else:
456 return result.split('.')[0]
0457
=== modified file 'hooks/charmhelpers/contrib/network/ovs/__init__.py'
--- hooks/charmhelpers/contrib/network/ovs/__init__.py 2014-06-05 10:59:23 +0000
+++ hooks/charmhelpers/contrib/network/ovs/__init__.py 2015-11-12 11:46:11 +0000
@@ -1,3 +1,19 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
1''' Helpers for interacting with OpenvSwitch '''17''' Helpers for interacting with OpenvSwitch '''
2import subprocess18import subprocess
3import os19import os
@@ -21,12 +37,16 @@
21 subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])37 subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
2238
2339
24def add_bridge_port(name, port):40def add_bridge_port(name, port, promisc=False):
25 ''' Add a port to the named openvswitch bridge '''41 ''' Add a port to the named openvswitch bridge '''
26 log('Adding port {} to bridge {}'.format(port, name))42 log('Adding port {} to bridge {}'.format(port, name))
27 subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",43 subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
28 name, port])44 name, port])
29 subprocess.check_call(["ip", "link", "set", port, "up"])45 subprocess.check_call(["ip", "link", "set", port, "up"])
46 if promisc:
47 subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
48 else:
49 subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
3050
3151
32def del_bridge_port(name, port):52def del_bridge_port(name, port):
@@ -35,6 +55,7 @@
35 subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",55 subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
36 name, port])56 name, port])
37 subprocess.check_call(["ip", "link", "set", port, "down"])57 subprocess.check_call(["ip", "link", "set", port, "down"])
58 subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
3859
3960
40def set_manager(manager):61def set_manager(manager):
4162
=== modified file 'hooks/charmhelpers/contrib/openstack/__init__.py'
--- hooks/charmhelpers/contrib/openstack/__init__.py 2014-06-05 10:59:23 +0000
+++ hooks/charmhelpers/contrib/openstack/__init__.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,15 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
016
=== modified file 'hooks/charmhelpers/contrib/openstack/alternatives.py'
--- hooks/charmhelpers/contrib/openstack/alternatives.py 2014-06-05 10:59:23 +0000
+++ hooks/charmhelpers/contrib/openstack/alternatives.py 2015-11-12 11:46:11 +0000
@@ -1,3 +1,19 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
1''' Helper for managing alternatives for file conflict resolution '''17''' Helper for managing alternatives for file conflict resolution '''
218
3import subprocess19import subprocess
420
=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
--- hooks/charmhelpers/contrib/openstack/amulet/__init__.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/__init__.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,15 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
016
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,197 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import six
18from collections import OrderedDict
19from charmhelpers.contrib.amulet.deployment import (
20 AmuletDeployment
21)
22
23
24class OpenStackAmuletDeployment(AmuletDeployment):
25 """OpenStack amulet deployment.
26
27 This class inherits from AmuletDeployment and has additional support
28 that is specifically for use by OpenStack charms.
29 """
30
31 def __init__(self, series=None, openstack=None, source=None, stable=True):
32 """Initialize the deployment environment."""
33 super(OpenStackAmuletDeployment, self).__init__(series)
34 self.openstack = openstack
35 self.source = source
36 self.stable = stable
37 # Note(coreycb): this needs to be changed when new next branches come
38 # out.
39 self.current_next = "trusty"
40
41 def _determine_branch_locations(self, other_services):
42 """Determine the branch locations for the other services.
43
44 Determine if the local branch being tested is derived from its
45 stable or next (dev) branch, and based on this, use the corresonding
46 stable or next branches for the other_services."""
47
48 # Charms outside the lp:~openstack-charmers namespace
49 base_charms = ['mysql', 'mongodb', 'nrpe']
50
51 # Force these charms to current series even when using an older series.
52 # ie. Use trusty/nrpe even when series is precise, as the P charm
53 # does not possess the necessary external master config and hooks.
54 force_series_current = ['nrpe']
55
56 if self.series in ['precise', 'trusty']:
57 base_series = self.series
58 else:
59 base_series = self.current_next
60
61 for svc in other_services:
62 if svc['name'] in force_series_current:
63 base_series = self.current_next
64 # If a location has been explicitly set, use it
65 if svc.get('location'):
66 continue
67 if self.stable:
68 temp = 'lp:charms/{}/{}'
69 svc['location'] = temp.format(base_series,
70 svc['name'])
71 else:
72 if svc['name'] in base_charms:
73 temp = 'lp:charms/{}/{}'
74 svc['location'] = temp.format(base_series,
75 svc['name'])
76 else:
77 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
78 svc['location'] = temp.format(self.current_next,
79 svc['name'])
80
81 return other_services
82
83 def _add_services(self, this_service, other_services):
84 """Add services to the deployment and set openstack-origin/source."""
85 other_services = self._determine_branch_locations(other_services)
86
87 super(OpenStackAmuletDeployment, self)._add_services(this_service,
88 other_services)
89
90 services = other_services
91 services.append(this_service)
92
93 # Charms which should use the source config option
94 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
95 'ceph-osd', 'ceph-radosgw']
96
97 # Charms which can not use openstack-origin, ie. many subordinates
98 no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
99
100 if self.openstack:
101 for svc in services:
102 if svc['name'] not in use_source + no_origin:
103 config = {'openstack-origin': self.openstack}
104 self.d.configure(svc['name'], config)
105
106 if self.source:
107 for svc in services:
108 if svc['name'] in use_source and svc['name'] not in no_origin:
109 config = {'source': self.source}
110 self.d.configure(svc['name'], config)
111
112 def _configure_services(self, configs):
113 """Configure all of the services."""
114 for service, config in six.iteritems(configs):
115 self.d.configure(service, config)
116
117 def _get_openstack_release(self):
118 """Get openstack release.
119
120 Return an integer representing the enum value of the openstack
121 release.
122 """
123 # Must be ordered by OpenStack release (not by Ubuntu release):
124 (self.precise_essex, self.precise_folsom, self.precise_grizzly,
125 self.precise_havana, self.precise_icehouse,
126 self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
127 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
128 self.wily_liberty) = range(12)
129
130 releases = {
131 ('precise', None): self.precise_essex,
132 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
133 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
134 ('precise', 'cloud:precise-havana'): self.precise_havana,
135 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
136 ('trusty', None): self.trusty_icehouse,
137 ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
138 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
139 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
140 ('utopic', None): self.utopic_juno,
141 ('vivid', None): self.vivid_kilo,
142 ('wily', None): self.wily_liberty}
143 return releases[(self.series, self.openstack)]
144
145 def _get_openstack_release_string(self):
146 """Get openstack release string.
147
148 Return a string representing the openstack release.
149 """
150 releases = OrderedDict([
151 ('precise', 'essex'),
152 ('quantal', 'folsom'),
153 ('raring', 'grizzly'),
154 ('saucy', 'havana'),
155 ('trusty', 'icehouse'),
156 ('utopic', 'juno'),
157 ('vivid', 'kilo'),
158 ('wily', 'liberty'),
159 ])
160 if self.openstack:
161 os_origin = self.openstack.split(':')[1]
162 return os_origin.split('%s-' % self.series)[1].split('/')[0]
163 else:
164 return releases[self.series]
165
166 def get_ceph_expected_pools(self, radosgw=False):
167 """Return a list of expected ceph pools in a ceph + cinder + glance
168 test scenario, based on OpenStack release and whether ceph radosgw
169 is flagged as present or not."""
170
171 if self._get_openstack_release() >= self.trusty_kilo:
172 # Kilo or later
173 pools = [
174 'rbd',
175 'cinder',
176 'glance'
177 ]
178 else:
179 # Juno or earlier
180 pools = [
181 'data',
182 'metadata',
183 'rbd',
184 'cinder',
185 'glance'
186 ]
187
188 if radosgw:
189 pools.extend([
190 '.rgw.root',
191 '.rgw.control',
192 '.rgw',
193 '.rgw.gc',
194 '.users.uid'
195 ])
196
197 return pools
0198
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,963 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import amulet
18import json
19import logging
20import os
21import six
22import time
23import urllib
24
25import cinderclient.v1.client as cinder_client
26import glanceclient.v1.client as glance_client
27import heatclient.v1.client as heat_client
28import keystoneclient.v2_0 as keystone_client
29import novaclient.v1_1.client as nova_client
30import pika
31import swiftclient
32
33from charmhelpers.contrib.amulet.utils import (
34 AmuletUtils
35)
36
37DEBUG = logging.DEBUG
38ERROR = logging.ERROR
39
40
41class OpenStackAmuletUtils(AmuletUtils):
42 """OpenStack amulet utilities.
43
44 This class inherits from AmuletUtils and has additional support
45 that is specifically for use by OpenStack charm tests.
46 """
47
48 def __init__(self, log_level=ERROR):
49 """Initialize the deployment environment."""
50 super(OpenStackAmuletUtils, self).__init__(log_level)
51
52 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
53 public_port, expected):
54 """Validate endpoint data.
55
56 Validate actual endpoint data vs expected endpoint data. The ports
57 are used to find the matching endpoint.
58 """
59 self.log.debug('Validating endpoint data...')
60 self.log.debug('actual: {}'.format(repr(endpoints)))
61 found = False
62 for ep in endpoints:
63 self.log.debug('endpoint: {}'.format(repr(ep)))
64 if (admin_port in ep.adminurl and
65 internal_port in ep.internalurl and
66 public_port in ep.publicurl):
67 found = True
68 actual = {'id': ep.id,
69 'region': ep.region,
70 'adminurl': ep.adminurl,
71 'internalurl': ep.internalurl,
72 'publicurl': ep.publicurl,
73 'service_id': ep.service_id}
74 ret = self._validate_dict_data(expected, actual)
75 if ret:
76 return 'unexpected endpoint data - {}'.format(ret)
77
78 if not found:
79 return 'endpoint not found'
80
81 def validate_svc_catalog_endpoint_data(self, expected, actual):
82 """Validate service catalog endpoint data.
83
84 Validate a list of actual service catalog endpoints vs a list of
85 expected service catalog endpoints.
86 """
87 self.log.debug('Validating service catalog endpoint data...')
88 self.log.debug('actual: {}'.format(repr(actual)))
89 for k, v in six.iteritems(expected):
90 if k in actual:
91 ret = self._validate_dict_data(expected[k][0], actual[k][0])
92 if ret:
93 return self.endpoint_error(k, ret)
94 else:
95 return "endpoint {} does not exist".format(k)
96 return ret
97
98 def validate_tenant_data(self, expected, actual):
99 """Validate tenant data.
100
101 Validate a list of actual tenant data vs list of expected tenant
102 data.
103 """
104 self.log.debug('Validating tenant data...')
105 self.log.debug('actual: {}'.format(repr(actual)))
106 for e in expected:
107 found = False
108 for act in actual:
109 a = {'enabled': act.enabled, 'description': act.description,
110 'name': act.name, 'id': act.id}
111 if e['name'] == a['name']:
112 found = True
113 ret = self._validate_dict_data(e, a)
114 if ret:
115 return "unexpected tenant data - {}".format(ret)
116 if not found:
117 return "tenant {} does not exist".format(e['name'])
118 return ret
119
120 def validate_role_data(self, expected, actual):
121 """Validate role data.
122
123 Validate a list of actual role data vs a list of expected role
124 data.
125 """
126 self.log.debug('Validating role data...')
127 self.log.debug('actual: {}'.format(repr(actual)))
128 for e in expected:
129 found = False
130 for act in actual:
131 a = {'name': act.name, 'id': act.id}
132 if e['name'] == a['name']:
133 found = True
134 ret = self._validate_dict_data(e, a)
135 if ret:
136 return "unexpected role data - {}".format(ret)
137 if not found:
138 return "role {} does not exist".format(e['name'])
139 return ret
140
141 def validate_user_data(self, expected, actual):
142 """Validate user data.
143
144 Validate a list of actual user data vs a list of expected user
145 data.
146 """
147 self.log.debug('Validating user data...')
148 self.log.debug('actual: {}'.format(repr(actual)))
149 for e in expected:
150 found = False
151 for act in actual:
152 a = {'enabled': act.enabled, 'name': act.name,
153 'email': act.email, 'tenantId': act.tenantId,
154 'id': act.id}
155 if e['name'] == a['name']:
156 found = True
157 ret = self._validate_dict_data(e, a)
158 if ret:
159 return "unexpected user data - {}".format(ret)
160 if not found:
161 return "user {} does not exist".format(e['name'])
162 return ret
163
164 def validate_flavor_data(self, expected, actual):
165 """Validate flavor data.
166
167 Validate a list of actual flavors vs a list of expected flavors.
168 """
169 self.log.debug('Validating flavor data...')
170 self.log.debug('actual: {}'.format(repr(actual)))
171 act = [a.name for a in actual]
172 return self._validate_list_data(expected, act)
173
174 def tenant_exists(self, keystone, tenant):
175 """Return True if tenant exists."""
176 self.log.debug('Checking if tenant exists ({})...'.format(tenant))
177 return tenant in [t.name for t in keystone.tenants.list()]
178
179 def authenticate_cinder_admin(self, keystone_sentry, username,
180 password, tenant):
181 """Authenticates admin user with cinder."""
182 # NOTE(beisner): cinder python client doesn't accept tokens.
183 service_ip = \
184 keystone_sentry.relation('shared-db',
185 'mysql:shared-db')['private-address']
186 ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
187 return cinder_client.Client(username, password, tenant, ept)
188
189 def authenticate_keystone_admin(self, keystone_sentry, user, password,
190 tenant):
191 """Authenticates admin user with the keystone admin endpoint."""
192 self.log.debug('Authenticating keystone admin...')
193 unit = keystone_sentry
194 service_ip = unit.relation('shared-db',
195 'mysql:shared-db')['private-address']
196 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
197 return keystone_client.Client(username=user, password=password,
198 tenant_name=tenant, auth_url=ep)
199
200 def authenticate_keystone_user(self, keystone, user, password, tenant):
201 """Authenticates a regular user with the keystone public endpoint."""
202 self.log.debug('Authenticating keystone user ({})...'.format(user))
203 ep = keystone.service_catalog.url_for(service_type='identity',
204 endpoint_type='publicURL')
205 return keystone_client.Client(username=user, password=password,
206 tenant_name=tenant, auth_url=ep)
207
208 def authenticate_glance_admin(self, keystone):
209 """Authenticates admin user with glance."""
210 self.log.debug('Authenticating glance admin...')
211 ep = keystone.service_catalog.url_for(service_type='image',
212 endpoint_type='adminURL')
213 return glance_client.Client(ep, token=keystone.auth_token)
214
215 def authenticate_heat_admin(self, keystone):
216 """Authenticates the admin user with heat."""
217 self.log.debug('Authenticating heat admin...')
218 ep = keystone.service_catalog.url_for(service_type='orchestration',
219 endpoint_type='publicURL')
220 return heat_client.Client(endpoint=ep, token=keystone.auth_token)
221
222 def authenticate_nova_user(self, keystone, user, password, tenant):
223 """Authenticates a regular user with nova-api."""
224 self.log.debug('Authenticating nova user ({})...'.format(user))
225 ep = keystone.service_catalog.url_for(service_type='identity',
226 endpoint_type='publicURL')
227 return nova_client.Client(username=user, api_key=password,
228 project_id=tenant, auth_url=ep)
229
230 def authenticate_swift_user(self, keystone, user, password, tenant):
231 """Authenticates a regular user with swift api."""
232 self.log.debug('Authenticating swift user ({})...'.format(user))
233 ep = keystone.service_catalog.url_for(service_type='identity',
234 endpoint_type='publicURL')
235 return swiftclient.Connection(authurl=ep,
236 user=user,
237 key=password,
238 tenant_name=tenant,
239 auth_version='2.0')
240
241 def create_cirros_image(self, glance, image_name):
242 """Download the latest cirros image and upload it to glance,
243 validate and return a resource pointer.
244
245 :param glance: pointer to authenticated glance connection
246 :param image_name: display name for new image
247 :returns: glance image pointer
248 """
249 self.log.debug('Creating glance cirros image '
250 '({})...'.format(image_name))
251
252 # Download cirros image
253 http_proxy = os.getenv('AMULET_HTTP_PROXY')
254 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
255 if http_proxy:
256 proxies = {'http': http_proxy}
257 opener = urllib.FancyURLopener(proxies)
258 else:
259 opener = urllib.FancyURLopener()
260
261 f = opener.open('http://download.cirros-cloud.net/version/released')
262 version = f.read().strip()
263 cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
264 local_path = os.path.join('tests', cirros_img)
265
266 if not os.path.exists(local_path):
267 cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
268 version, cirros_img)
269 opener.retrieve(cirros_url, local_path)
270 f.close()
271
272 # Create glance image
273 with open(local_path) as f:
274 image = glance.images.create(name=image_name, is_public=True,
275 disk_format='qcow2',
276 container_format='bare', data=f)
277
278 # Wait for image to reach active status
279 img_id = image.id
280 ret = self.resource_reaches_status(glance.images, img_id,
281 expected_stat='active',
282 msg='Image status wait')
283 if not ret:
284 msg = 'Glance image failed to reach expected state.'
285 amulet.raise_status(amulet.FAIL, msg=msg)
286
287 # Re-validate new image
288 self.log.debug('Validating image attributes...')
289 val_img_name = glance.images.get(img_id).name
290 val_img_stat = glance.images.get(img_id).status
291 val_img_pub = glance.images.get(img_id).is_public
292 val_img_cfmt = glance.images.get(img_id).container_format
293 val_img_dfmt = glance.images.get(img_id).disk_format
294 msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
295 'container fmt:{} disk fmt:{}'.format(
296 val_img_name, val_img_pub, img_id,
297 val_img_stat, val_img_cfmt, val_img_dfmt))
298
299 if val_img_name == image_name and val_img_stat == 'active' \
300 and val_img_pub is True and val_img_cfmt == 'bare' \
301 and val_img_dfmt == 'qcow2':
302 self.log.debug(msg_attr)
303 else:
304 msg = ('Volume validation failed, {}'.format(msg_attr))
305 amulet.raise_status(amulet.FAIL, msg=msg)
306
307 return image
308
309 def delete_image(self, glance, image):
310 """Delete the specified image."""
311
312 # /!\ DEPRECATION WARNING
313 self.log.warn('/!\\ DEPRECATION WARNING: use '
314 'delete_resource instead of delete_image.')
315 self.log.debug('Deleting glance image ({})...'.format(image))
316 return self.delete_resource(glance.images, image, msg='glance image')
317
318 def create_instance(self, nova, image_name, instance_name, flavor):
319 """Create the specified instance."""
320 self.log.debug('Creating instance '
321 '({}|{}|{})'.format(instance_name, image_name, flavor))
322 image = nova.images.find(name=image_name)
323 flavor = nova.flavors.find(name=flavor)
324 instance = nova.servers.create(name=instance_name, image=image,
325 flavor=flavor)
326
327 count = 1
328 status = instance.status
329 while status != 'ACTIVE' and count < 60:
330 time.sleep(3)
331 instance = nova.servers.get(instance.id)
332 status = instance.status
333 self.log.debug('instance status: {}'.format(status))
334 count += 1
335
336 if status != 'ACTIVE':
337 self.log.error('instance creation timed out')
338 return None
339
340 return instance
341
342 def delete_instance(self, nova, instance):
343 """Delete the specified instance."""
344
345 # /!\ DEPRECATION WARNING
346 self.log.warn('/!\\ DEPRECATION WARNING: use '
347 'delete_resource instead of delete_instance.')
348 self.log.debug('Deleting instance ({})...'.format(instance))
349 return self.delete_resource(nova.servers, instance,
350 msg='nova instance')
351
352 def create_or_get_keypair(self, nova, keypair_name="testkey"):
353 """Create a new keypair, or return pointer if it already exists."""
354 try:
355 _keypair = nova.keypairs.get(keypair_name)
356 self.log.debug('Keypair ({}) already exists, '
357 'using it.'.format(keypair_name))
358 return _keypair
359 except:
360 self.log.debug('Keypair ({}) does not exist, '
361 'creating it.'.format(keypair_name))
362
363 _keypair = nova.keypairs.create(name=keypair_name)
364 return _keypair
365
366 def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
367 img_id=None, src_vol_id=None, snap_id=None):
368 """Create cinder volume, optionally from a glance image, OR
369 optionally as a clone of an existing volume, OR optionally
370 from a snapshot. Wait for the new volume status to reach
371 the expected status, validate and return a resource pointer.
372
373 :param vol_name: cinder volume display name
374 :param vol_size: size in gigabytes
375 :param img_id: optional glance image id
376 :param src_vol_id: optional source volume id to clone
377 :param snap_id: optional snapshot id to use
378 :returns: cinder volume pointer
379 """
380 # Handle parameter input and avoid impossible combinations
381 if img_id and not src_vol_id and not snap_id:
382 # Create volume from image
383 self.log.debug('Creating cinder volume from glance image...')
384 bootable = 'true'
385 elif src_vol_id and not img_id and not snap_id:
386 # Clone an existing volume
387 self.log.debug('Cloning cinder volume...')
388 bootable = cinder.volumes.get(src_vol_id).bootable
389 elif snap_id and not src_vol_id and not img_id:
390 # Create volume from snapshot
391 self.log.debug('Creating cinder volume from snapshot...')
392 snap = cinder.volume_snapshots.find(id=snap_id)
393 vol_size = snap.size
394 snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
395 bootable = cinder.volumes.get(snap_vol_id).bootable
396 elif not img_id and not src_vol_id and not snap_id:
397 # Create volume
398 self.log.debug('Creating cinder volume...')
399 bootable = 'false'
400 else:
401 # Impossible combination of parameters
402 msg = ('Invalid method use - name:{} size:{} img_id:{} '
403 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
404 img_id, src_vol_id,
405 snap_id))
406 amulet.raise_status(amulet.FAIL, msg=msg)
407
408 # Create new volume
409 try:
410 vol_new = cinder.volumes.create(display_name=vol_name,
411 imageRef=img_id,
412 size=vol_size,
413 source_volid=src_vol_id,
414 snapshot_id=snap_id)
415 vol_id = vol_new.id
416 except Exception as e:
417 msg = 'Failed to create volume: {}'.format(e)
418 amulet.raise_status(amulet.FAIL, msg=msg)
419
420 # Wait for volume to reach available status
421 ret = self.resource_reaches_status(cinder.volumes, vol_id,
422 expected_stat="available",
423 msg="Volume status wait")
424 if not ret:
425 msg = 'Cinder volume failed to reach expected state.'
426 amulet.raise_status(amulet.FAIL, msg=msg)
427
428 # Re-validate new volume
429 self.log.debug('Validating volume attributes...')
430 val_vol_name = cinder.volumes.get(vol_id).display_name
431 val_vol_boot = cinder.volumes.get(vol_id).bootable
432 val_vol_stat = cinder.volumes.get(vol_id).status
433 val_vol_size = cinder.volumes.get(vol_id).size
434 msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
435 '{} size:{}'.format(val_vol_name, vol_id,
436 val_vol_stat, val_vol_boot,
437 val_vol_size))
438
439 if val_vol_boot == bootable and val_vol_stat == 'available' \
440 and val_vol_name == vol_name and val_vol_size == vol_size:
441 self.log.debug(msg_attr)
442 else:
443 msg = ('Volume validation failed, {}'.format(msg_attr))
444 amulet.raise_status(amulet.FAIL, msg=msg)
445
446 return vol_new
447
448 def delete_resource(self, resource, resource_id,
449 msg="resource", max_wait=120):
450 """Delete one openstack resource, such as one instance, keypair,
451 image, volume, stack, etc., and confirm deletion within max wait time.
452
453 :param resource: pointer to os resource type, ex:glance_client.images
454 :param resource_id: unique name or id for the openstack resource
455 :param msg: text to identify purpose in logging
456 :param max_wait: maximum wait time in seconds
457 :returns: True if successful, otherwise False
458 """
459 self.log.debug('Deleting OpenStack resource '
460 '{} ({})'.format(resource_id, msg))
461 num_before = len(list(resource.list()))
462 resource.delete(resource_id)
463
464 tries = 0
465 num_after = len(list(resource.list()))
466 while num_after != (num_before - 1) and tries < (max_wait / 4):
467 self.log.debug('{} delete check: '
468 '{} [{}:{}] {}'.format(msg, tries,
469 num_before,
470 num_after,
471 resource_id))
472 time.sleep(4)
473 num_after = len(list(resource.list()))
474 tries += 1
475
476 self.log.debug('{}: expected, actual count = {}, '
477 '{}'.format(msg, num_before - 1, num_after))
478
479 if num_after == (num_before - 1):
480 return True
481 else:
482 self.log.error('{} delete timed out'.format(msg))
483 return False
484
485 def resource_reaches_status(self, resource, resource_id,
486 expected_stat='available',
487 msg='resource', max_wait=120):
488 """Wait for an openstack resources status to reach an
489 expected status within a specified time. Useful to confirm that
490 nova instances, cinder vols, snapshots, glance images, heat stacks
491 and other resources eventually reach the expected status.
492
493 :param resource: pointer to os resource type, ex: heat_client.stacks
494 :param resource_id: unique id for the openstack resource
495 :param expected_stat: status to expect resource to reach
496 :param msg: text to identify purpose in logging
497 :param max_wait: maximum wait time in seconds
498 :returns: True if successful, False if status is not reached
499 """
500
501 tries = 0
502 resource_stat = resource.get(resource_id).status
503 while resource_stat != expected_stat and tries < (max_wait / 4):
504 self.log.debug('{} status check: '
505 '{} [{}:{}] {}'.format(msg, tries,
506 resource_stat,
507 expected_stat,
508 resource_id))
509 time.sleep(4)
510 resource_stat = resource.get(resource_id).status
511 tries += 1
512
513 self.log.debug('{}: expected, actual status = {}, '
514 '{}'.format(msg, resource_stat, expected_stat))
515
516 if resource_stat == expected_stat:
517 return True
518 else:
519 self.log.debug('{} never reached expected status: '
520 '{}'.format(resource_id, expected_stat))
521 return False
522
523 def get_ceph_osd_id_cmd(self, index):
524 """Produce a shell command that will return a ceph-osd id."""
525 return ("`initctl list | grep 'ceph-osd ' | "
526 "awk 'NR=={} {{ print $2 }}' | "
527 "grep -o '[0-9]*'`".format(index + 1))
528
529 def get_ceph_pools(self, sentry_unit):
530 """Return a dict of ceph pools from a single ceph unit, with
531 pool name as keys, pool id as vals."""
532 pools = {}
533 cmd = 'sudo ceph osd lspools'
534 output, code = sentry_unit.run(cmd)
535 if code != 0:
536 msg = ('{} `{}` returned {} '
537 '{}'.format(sentry_unit.info['unit_name'],
538 cmd, code, output))
539 amulet.raise_status(amulet.FAIL, msg=msg)
540
541 # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
542 for pool in str(output).split(','):
543 pool_id_name = pool.split(' ')
544 if len(pool_id_name) == 2:
545 pool_id = pool_id_name[0]
546 pool_name = pool_id_name[1]
547 pools[pool_name] = int(pool_id)
548
549 self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
550 pools))
551 return pools
552
553 def get_ceph_df(self, sentry_unit):
554 """Return dict of ceph df json output, including ceph pool state.
555
556 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
557 :returns: Dict of ceph df output
558 """
559 cmd = 'sudo ceph df --format=json'
560 output, code = sentry_unit.run(cmd)
561 if code != 0:
562 msg = ('{} `{}` returned {} '
563 '{}'.format(sentry_unit.info['unit_name'],
564 cmd, code, output))
565 amulet.raise_status(amulet.FAIL, msg=msg)
566 return json.loads(output)
567
568 def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
569 """Take a sample of attributes of a ceph pool, returning ceph
570 pool name, object count and disk space used for the specified
571 pool ID number.
572
573 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
574 :param pool_id: Ceph pool ID
575 :returns: List of pool name, object count, kb disk space used
576 """
577 df = self.get_ceph_df(sentry_unit)
578 pool_name = df['pools'][pool_id]['name']
579 obj_count = df['pools'][pool_id]['stats']['objects']
580 kb_used = df['pools'][pool_id]['stats']['kb_used']
581 self.log.debug('Ceph {} pool (ID {}): {} objects, '
582 '{} kb used'.format(pool_name, pool_id,
583 obj_count, kb_used))
584 return pool_name, obj_count, kb_used
585
586 def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
587 """Validate ceph pool samples taken over time, such as pool
588 object counts or pool kb used, before adding, after adding, and
589 after deleting items which affect those pool attributes. The
590 2nd element is expected to be greater than the 1st; 3rd is expected
591 to be less than the 2nd.
592
593 :param samples: List containing 3 data samples
594 :param sample_type: String for logging and usage context
595 :returns: None if successful, Failure message otherwise
596 """
597 original, created, deleted = range(3)
598 if samples[created] <= samples[original] or \
599 samples[deleted] >= samples[created]:
600 return ('Ceph {} samples ({}) '
601 'unexpected.'.format(sample_type, samples))
602 else:
603 self.log.debug('Ceph {} samples (OK): '
604 '{}'.format(sample_type, samples))
605 return None
606
607# rabbitmq/amqp specific helpers:
608 def add_rmq_test_user(self, sentry_units,
609 username="testuser1", password="changeme"):
610 """Add a test user via the first rmq juju unit, check connection as
611 the new user against all sentry units.
612
613 :param sentry_units: list of sentry unit pointers
614 :param username: amqp user name, default to testuser1
615 :param password: amqp user password
616 :returns: None if successful. Raise on error.
617 """
618 self.log.debug('Adding rmq user ({})...'.format(username))
619
620 # Check that user does not already exist
621 cmd_user_list = 'rabbitmqctl list_users'
622 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
623 if username in output:
624 self.log.warning('User ({}) already exists, returning '
625 'gracefully.'.format(username))
626 return
627
628 perms = '".*" ".*" ".*"'
629 cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
630 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
631
632 # Add user via first unit
633 for cmd in cmds:
634 output, _ = self.run_cmd_unit(sentry_units[0], cmd)
635
636 # Check connection against the other sentry_units
637 self.log.debug('Checking user connect against units...')
638 for sentry_unit in sentry_units:
639 connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
640 username=username,
641 password=password)
642 connection.close()
643
644 def delete_rmq_test_user(self, sentry_units, username="testuser1"):
645 """Delete a rabbitmq user via the first rmq juju unit.
646
647 :param sentry_units: list of sentry unit pointers
648 :param username: amqp user name, default to testuser1
649 :param password: amqp user password
650 :returns: None if successful or no such user.
651 """
652 self.log.debug('Deleting rmq user ({})...'.format(username))
653
654 # Check that the user exists
655 cmd_user_list = 'rabbitmqctl list_users'
656 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
657
658 if username not in output:
659 self.log.warning('User ({}) does not exist, returning '
660 'gracefully.'.format(username))
661 return
662
663 # Delete the user
664 cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
665 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
666
667 def get_rmq_cluster_status(self, sentry_unit):
668 """Execute rabbitmq cluster status command on a unit and return
669 the full output.
670
671 :param unit: sentry unit
672 :returns: String containing console output of cluster status command
673 """
674 cmd = 'rabbitmqctl cluster_status'
675 output, _ = self.run_cmd_unit(sentry_unit, cmd)
676 self.log.debug('{} cluster_status:\n{}'.format(
677 sentry_unit.info['unit_name'], output))
678 return str(output)
679
680 def get_rmq_cluster_running_nodes(self, sentry_unit):
681 """Parse rabbitmqctl cluster_status output string, return list of
682 running rabbitmq cluster nodes.
683
684 :param unit: sentry unit
685 :returns: List containing node names of running nodes
686 """
687 # NOTE(beisner): rabbitmqctl cluster_status output is not
688 # json-parsable, do string chop foo, then json.loads that.
689 str_stat = self.get_rmq_cluster_status(sentry_unit)
690 if 'running_nodes' in str_stat:
691 pos_start = str_stat.find("{running_nodes,") + 15
692 pos_end = str_stat.find("]},", pos_start) + 1
693 str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
694 run_nodes = json.loads(str_run_nodes)
695 return run_nodes
696 else:
697 return []
698
699 def validate_rmq_cluster_running_nodes(self, sentry_units):
700 """Check that all rmq unit hostnames are represented in the
701 cluster_status output of all units.
702
703 :param host_names: dict of juju unit names to host names
704 :param units: list of sentry unit pointers (all rmq units)
705 :returns: None if successful, otherwise return error message
706 """
707 host_names = self.get_unit_hostnames(sentry_units)
708 errors = []
709
710 # Query every unit for cluster_status running nodes
711 for query_unit in sentry_units:
712 query_unit_name = query_unit.info['unit_name']
713 running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
714
715 # Confirm that every unit is represented in the queried unit's
716 # cluster_status running nodes output.
717 for validate_unit in sentry_units:
718 val_host_name = host_names[validate_unit.info['unit_name']]
719 val_node_name = 'rabbit@{}'.format(val_host_name)
720
721 if val_node_name not in running_nodes:
722 errors.append('Cluster member check failed on {}: {} not '
723 'in {}\n'.format(query_unit_name,
724 val_node_name,
725 running_nodes))
726 if errors:
727 return ''.join(errors)
728
729 def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
730 """Check a single juju rmq unit for ssl and port in the config file."""
731 host = sentry_unit.info['public-address']
732 unit_name = sentry_unit.info['unit_name']
733
734 conf_file = '/etc/rabbitmq/rabbitmq.config'
735 conf_contents = str(self.file_contents_safe(sentry_unit,
736 conf_file, max_wait=16))
737 # Checks
738 conf_ssl = 'ssl' in conf_contents
739 conf_port = str(port) in conf_contents
740
741 # Port explicitly checked in config
742 if port and conf_port and conf_ssl:
743 self.log.debug('SSL is enabled @{}:{} '
744 '({})'.format(host, port, unit_name))
745 return True
746 elif port and not conf_port and conf_ssl:
747 self.log.debug('SSL is enabled @{} but not on port {} '
748 '({})'.format(host, port, unit_name))
749 return False
750 # Port not checked (useful when checking that ssl is disabled)
751 elif not port and conf_ssl:
752 self.log.debug('SSL is enabled @{}:{} '
753 '({})'.format(host, port, unit_name))
754 return True
755 elif not port and not conf_ssl:
756 self.log.debug('SSL not enabled @{}:{} '
757 '({})'.format(host, port, unit_name))
758 return False
759 else:
760 msg = ('Unknown condition when checking SSL status @{}:{} '
761 '({})'.format(host, port, unit_name))
762 amulet.raise_status(amulet.FAIL, msg)
763
764 def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
765 """Check that ssl is enabled on rmq juju sentry units.
766
767 :param sentry_units: list of all rmq sentry units
768 :param port: optional ssl port override to validate
769 :returns: None if successful, otherwise return error message
770 """
771 for sentry_unit in sentry_units:
772 if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
773 return ('Unexpected condition: ssl is disabled on unit '
774 '({})'.format(sentry_unit.info['unit_name']))
775 return None
776
777 def validate_rmq_ssl_disabled_units(self, sentry_units):
778 """Check that ssl is enabled on listed rmq juju sentry units.
779
780 :param sentry_units: list of all rmq sentry units
781 :returns: True if successful. Raise on error.
782 """
783 for sentry_unit in sentry_units:
784 if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
785 return ('Unexpected condition: ssl is enabled on unit '
786 '({})'.format(sentry_unit.info['unit_name']))
787 return None
788
789 def configure_rmq_ssl_on(self, sentry_units, deployment,
790 port=None, max_wait=60):
791 """Turn ssl charm config option on, with optional non-default
792 ssl port specification. Confirm that it is enabled on every
793 unit.
794
795 :param sentry_units: list of sentry units
796 :param deployment: amulet deployment object pointer
797 :param port: amqp port, use defaults if None
798 :param max_wait: maximum time to wait in seconds to confirm
799 :returns: None if successful. Raise on error.
800 """
801 self.log.debug('Setting ssl charm config option: on')
802
803 # Enable RMQ SSL
804 config = {'ssl': 'on'}
805 if port:
806 config['ssl_port'] = port
807
808 deployment.configure('rabbitmq-server', config)
809
810 # Confirm
811 tries = 0
812 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
813 while ret and tries < (max_wait / 4):
814 time.sleep(4)
815 self.log.debug('Attempt {}: {}'.format(tries, ret))
816 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
817 tries += 1
818
819 if ret:
820 amulet.raise_status(amulet.FAIL, ret)
821
822 def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
823 """Turn ssl charm config option off, confirm that it is disabled
824 on every unit.
825
826 :param sentry_units: list of sentry units
827 :param deployment: amulet deployment object pointer
828 :param max_wait: maximum time to wait in seconds to confirm
829 :returns: None if successful. Raise on error.
830 """
831 self.log.debug('Setting ssl charm config option: off')
832
833 # Disable RMQ SSL
834 config = {'ssl': 'off'}
835 deployment.configure('rabbitmq-server', config)
836
837 # Confirm
838 tries = 0
839 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
840 while ret and tries < (max_wait / 4):
841 time.sleep(4)
842 self.log.debug('Attempt {}: {}'.format(tries, ret))
843 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
844 tries += 1
845
846 if ret:
847 amulet.raise_status(amulet.FAIL, ret)
848
849 def connect_amqp_by_unit(self, sentry_unit, ssl=False,
850 port=None, fatal=True,
851 username="testuser1", password="changeme"):
852 """Establish and return a pika amqp connection to the rabbitmq service
853 running on a rmq juju unit.
854
855 :param sentry_unit: sentry unit pointer
856 :param ssl: boolean, default to False
857 :param port: amqp port, use defaults if None
858 :param fatal: boolean, default to True (raises on connect error)
859 :param username: amqp user name, default to testuser1
860 :param password: amqp user password
861 :returns: pika amqp connection pointer or None if failed and non-fatal
862 """
863 host = sentry_unit.info['public-address']
864 unit_name = sentry_unit.info['unit_name']
865
866 # Default port logic if port is not specified
867 if ssl and not port:
868 port = 5671
869 elif not ssl and not port:
870 port = 5672
871
872 self.log.debug('Connecting to amqp on {}:{} ({}) as '
873 '{}...'.format(host, port, unit_name, username))
874
875 try:
876 credentials = pika.PlainCredentials(username, password)
877 parameters = pika.ConnectionParameters(host=host, port=port,
878 credentials=credentials,
879 ssl=ssl,
880 connection_attempts=3,
881 retry_delay=5,
882 socket_timeout=1)
883 connection = pika.BlockingConnection(parameters)
884 assert connection.server_properties['product'] == 'RabbitMQ'
885 self.log.debug('Connect OK')
886 return connection
887 except Exception as e:
888 msg = ('amqp connection failed to {}:{} as '
889 '{} ({})'.format(host, port, username, str(e)))
890 if fatal:
891 amulet.raise_status(amulet.FAIL, msg)
892 else:
893 self.log.warn(msg)
894 return None
895
896 def publish_amqp_message_by_unit(self, sentry_unit, message,
897 queue="test", ssl=False,
898 username="testuser1",
899 password="changeme",
900 port=None):
901 """Publish an amqp message to a rmq juju unit.
902
903 :param sentry_unit: sentry unit pointer
904 :param message: amqp message string
905 :param queue: message queue, default to test
906 :param username: amqp user name, default to testuser1
907 :param password: amqp user password
908 :param ssl: boolean, default to False
909 :param port: amqp port, use defaults if None
910 :returns: None. Raises exception if publish failed.
911 """
912 self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
913 message))
914 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
915 port=port,
916 username=username,
917 password=password)
918
919 # NOTE(beisner): extra debug here re: pika hang potential:
920 # https://github.com/pika/pika/issues/297
921 # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
922 self.log.debug('Defining channel...')
923 channel = connection.channel()
924 self.log.debug('Declaring queue...')
925 channel.queue_declare(queue=queue, auto_delete=False, durable=True)
926 self.log.debug('Publishing message...')
927 channel.basic_publish(exchange='', routing_key=queue, body=message)
928 self.log.debug('Closing channel...')
929 channel.close()
930 self.log.debug('Closing connection...')
931 connection.close()
932
933 def get_amqp_message_by_unit(self, sentry_unit, queue="test",
934 username="testuser1",
935 password="changeme",
936 ssl=False, port=None):
937 """Get an amqp message from a rmq juju unit.
938
939 :param sentry_unit: sentry unit pointer
940 :param queue: message queue, default to test
941 :param username: amqp user name, default to testuser1
942 :param password: amqp user password
943 :param ssl: boolean, default to False
944 :param port: amqp port, use defaults if None
945 :returns: amqp message body as string. Raise if get fails.
946 """
947 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
948 port=port,
949 username=username,
950 password=password)
951 channel = connection.channel()
952 method_frame, _, body = channel.basic_get(queue)
953
954 if method_frame:
955 self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
956 body))
957 channel.basic_ack(method_frame.delivery_tag)
958 channel.close()
959 connection.close()
960 return body
961 else:
962 msg = 'No message retrieved.'
963 amulet.raise_status(amulet.FAIL, msg)
0964
=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2014-06-11 09:44:51 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 2015-11-12 11:46:11 +0000
@@ -1,48 +1,94 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import glob
1import json18import json
2import os19import os
20import re
3import time21import time
4
5from base64 import b64decode22from base64 import b64decode
623from subprocess import check_call
7from subprocess import (24
8 check_call25import six
9)26import yaml
10
1127
12from charmhelpers.fetch import (28from charmhelpers.fetch import (
13 apt_install,29 apt_install,
14 filter_installed_packages,30 filter_installed_packages,
15)31)
16
17from charmhelpers.core.hookenv import (32from charmhelpers.core.hookenv import (
18 config,33 config,
34 is_relation_made,
19 local_unit,35 local_unit,
20 log,36 log,
21 relation_get,37 relation_get,
22 relation_ids,38 relation_ids,
23 related_units,39 related_units,
40 relation_set,
24 unit_get,41 unit_get,
25 unit_private_ip,42 unit_private_ip,
43 charm_name,
44 DEBUG,
45 INFO,
46 WARNING,
26 ERROR,47 ERROR,
27)48)
2849
50from charmhelpers.core.sysctl import create as sysctl_create
51from charmhelpers.core.strutils import bool_from_string
52
53from charmhelpers.core.host import (
54 get_bond_master,
55 is_phy_iface,
56 list_nics,
57 get_nic_hwaddr,
58 mkdir,
59 write_file,
60)
29from charmhelpers.contrib.hahelpers.cluster import (61from charmhelpers.contrib.hahelpers.cluster import (
30 determine_apache_port,62 determine_apache_port,
31 determine_api_port,63 determine_api_port,
32 https,64 https,
33 is_clustered65 is_clustered,
34)66)
35
36from charmhelpers.contrib.hahelpers.apache import (67from charmhelpers.contrib.hahelpers.apache import (
37 get_cert,68 get_cert,
38 get_ca_cert,69 get_ca_cert,
70 install_ca_cert,
39)71)
40
41from charmhelpers.contrib.openstack.neutron import (72from charmhelpers.contrib.openstack.neutron import (
42 neutron_plugin_attribute,73 neutron_plugin_attribute,
43)74 parse_data_port_mappings,
4475)
76from charmhelpers.contrib.openstack.ip import (
77 resolve_address,
78 INTERNAL,
79)
80from charmhelpers.contrib.network.ip import (
81 get_address_in_network,
82 get_ipv4_addr,
83 get_ipv6_addr,
84 get_netmask_for_address,
85 format_ipv6_addr,
86 is_address_in_network,
87 is_bridge_member,
88)
89from charmhelpers.contrib.openstack.utils import get_host_ip
45CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'90CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
91ADDRESS_TYPES = ['admin', 'internal', 'public']
4692
4793
48class OSContextError(Exception):94class OSContextError(Exception):
@@ -50,7 +96,7 @@
5096
5197
52def ensure_packages(packages):98def ensure_packages(packages):
53 '''Install but do not upgrade required plugin packages'''99 """Install but do not upgrade required plugin packages."""
54 required = filter_installed_packages(packages)100 required = filter_installed_packages(packages)
55 if required:101 if required:
56 apt_install(required, fatal=True)102 apt_install(required, fatal=True)
@@ -58,20 +104,62 @@
58104
59def context_complete(ctxt):105def context_complete(ctxt):
60 _missing = []106 _missing = []
61 for k, v in ctxt.iteritems():107 for k, v in six.iteritems(ctxt):
62 if v is None or v == '':108 if v is None or v == '':
63 _missing.append(k)109 _missing.append(k)
110
64 if _missing:111 if _missing:
65 log('Missing required data: %s' % ' '.join(_missing), level='INFO')112 log('Missing required data: %s' % ' '.join(_missing), level=INFO)
66 return False113 return False
114
67 return True115 return True
68116
69117
70def config_flags_parser(config_flags):118def config_flags_parser(config_flags):
119 """Parses config flags string into dict.
120
121 This parsing method supports a few different formats for the config
122 flag values to be parsed:
123
124 1. A string in the simple format of key=value pairs, with the possibility
125 of specifying multiple key value pairs within the same string. For
126 example, a string in the format of 'key1=value1, key2=value2' will
127 return a dict of:
128
129 {'key1': 'value1',
130 'key2': 'value2'}.
131
132 2. A string in the above format, but supporting a comma-delimited list
133 of values for the same key. For example, a string in the format of
134 'key1=value1, key2=value3,value4,value5' will return a dict of:
135
136 {'key1', 'value1',
137 'key2', 'value2,value3,value4'}
138
139 3. A string containing a colon character (:) prior to an equal
140 character (=) will be treated as yaml and parsed as such. This can be
141 used to specify more complex key value pairs. For example,
142 a string in the format of 'key1: subkey1=value1, subkey2=value2' will
143 return a dict of:
144
145 {'key1', 'subkey1=value1, subkey2=value2'}
146
147 The provided config_flags string may be a list of comma-separated values
148 which themselves may be comma-separated list of values.
149 """
150 # If we find a colon before an equals sign then treat it as yaml.
151 # Note: limit it to finding the colon first since this indicates assignment
152 # for inline yaml.
153 colon = config_flags.find(':')
154 equals = config_flags.find('=')
155 if colon > 0:
156 if colon < equals or equals < 0:
157 return yaml.safe_load(config_flags)
158
71 if config_flags.find('==') >= 0:159 if config_flags.find('==') >= 0:
72 log("config_flags is not in expected format (key=value)",160 log("config_flags is not in expected format (key=value)", level=ERROR)
73 level=ERROR)
74 raise OSContextError161 raise OSContextError
162
75 # strip the following from each value.163 # strip the following from each value.
76 post_strippers = ' ,'164 post_strippers = ' ,'
77 # we strip any leading/trailing '=' or ' ' from the string then165 # we strip any leading/trailing '=' or ' ' from the string then
@@ -79,7 +167,7 @@
79 split = config_flags.strip(' =').split('=')167 split = config_flags.strip(' =').split('=')
80 limit = len(split)168 limit = len(split)
81 flags = {}169 flags = {}
82 for i in xrange(0, limit - 1):170 for i in range(0, limit - 1):
83 current = split[i]171 current = split[i]
84 next = split[i + 1]172 next = split[i + 1]
85 vindex = next.rfind(',')173 vindex = next.rfind(',')
@@ -94,63 +182,125 @@
94 # if this not the first entry, expect an embedded key.182 # if this not the first entry, expect an embedded key.
95 index = current.rfind(',')183 index = current.rfind(',')
96 if index < 0:184 if index < 0:
97 log("invalid config value(s) at index %s" % (i),185 log("Invalid config value(s) at index %s" % (i), level=ERROR)
98 level=ERROR)
99 raise OSContextError186 raise OSContextError
100 key = current[index + 1:]187 key = current[index + 1:]
101188
102 # Add to collection.189 # Add to collection.
103 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)190 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
191
104 return flags192 return flags
105193
106194
107class OSContextGenerator(object):195class OSContextGenerator(object):
196 """Base class for all context generators."""
108 interfaces = []197 interfaces = []
198 related = False
199 complete = False
200 missing_data = []
109201
110 def __call__(self):202 def __call__(self):
111 raise NotImplementedError203 raise NotImplementedError
112204
205 def context_complete(self, ctxt):
206 """Check for missing data for the required context data.
207 Set self.missing_data if it exists and return False.
208 Set self.complete if no missing data and return True.
209 """
210 # Fresh start
211 self.complete = False
212 self.missing_data = []
213 for k, v in six.iteritems(ctxt):
214 if v is None or v == '':
215 if k not in self.missing_data:
216 self.missing_data.append(k)
217
218 if self.missing_data:
219 self.complete = False
220 log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
221 else:
222 self.complete = True
223 return self.complete
224
225 def get_related(self):
226 """Check if any of the context interfaces have relation ids.
227 Set self.related and return True if one of the interfaces
228 has relation ids.
229 """
230 # Fresh start
231 self.related = False
232 try:
233 for interface in self.interfaces:
234 if relation_ids(interface):
235 self.related = True
236 return self.related
237 except AttributeError as e:
238 log("{} {}"
239 "".format(self, e), 'INFO')
240 return self.related
241
113242
114class SharedDBContext(OSContextGenerator):243class SharedDBContext(OSContextGenerator):
115 interfaces = ['shared-db']244 interfaces = ['shared-db']
116245
117 def __init__(self,246 def __init__(self,
118 database=None, user=None, relation_prefix=None, ssl_dir=None):247 database=None, user=None, relation_prefix=None, ssl_dir=None):
119 '''248 """Allows inspecting relation for settings prefixed with
120 Allows inspecting relation for settings prefixed with relation_prefix.249 relation_prefix. This is useful for parsing access for multiple
121 This is useful for parsing access for multiple databases returned via250 databases returned via the shared-db interface (eg, nova_password,
122 the shared-db interface (eg, nova_password, quantum_password)251 quantum_password)
123 '''252 """
124 self.relation_prefix = relation_prefix253 self.relation_prefix = relation_prefix
125 self.database = database254 self.database = database
126 self.user = user255 self.user = user
127 self.ssl_dir = ssl_dir256 self.ssl_dir = ssl_dir
257 self.rel_name = self.interfaces[0]
128258
129 def __call__(self):259 def __call__(self):
130 self.database = self.database or config('database')260 self.database = self.database or config('database')
131 self.user = self.user or config('database-user')261 self.user = self.user or config('database-user')
132 if None in [self.database, self.user]:262 if None in [self.database, self.user]:
133 log('Could not generate shared_db context. '263 log("Could not generate shared_db context. Missing required charm "
134 'Missing required charm config options. '264 "config options. (database name and user)", level=ERROR)
135 '(database name and user)')
136 raise OSContextError265 raise OSContextError
266
137 ctxt = {}267 ctxt = {}
138268
269 # NOTE(jamespage) if mysql charm provides a network upon which
270 # access to the database should be made, reconfigure relation
271 # with the service units local address and defer execution
272 access_network = relation_get('access-network')
273 if access_network is not None:
274 if self.relation_prefix is not None:
275 hostname_key = "{}_hostname".format(self.relation_prefix)
276 else:
277 hostname_key = "hostname"
278 access_hostname = get_address_in_network(access_network,
279 unit_get('private-address'))
280 set_hostname = relation_get(attribute=hostname_key,
281 unit=local_unit())
282 if set_hostname != access_hostname:
283 relation_set(relation_settings={hostname_key: access_hostname})
284 return None # Defer any further hook execution for now....
285
139 password_setting = 'password'286 password_setting = 'password'
140 if self.relation_prefix:287 if self.relation_prefix:
141 password_setting = self.relation_prefix + '_password'288 password_setting = self.relation_prefix + '_password'
142289
143 for rid in relation_ids('shared-db'):290 for rid in relation_ids(self.interfaces[0]):
291 self.related = True
144 for unit in related_units(rid):292 for unit in related_units(rid):
145 rdata = relation_get(rid=rid, unit=unit)293 rdata = relation_get(rid=rid, unit=unit)
294 host = rdata.get('db_host')
295 host = format_ipv6_addr(host) or host
146 ctxt = {296 ctxt = {
147 'database_host': rdata.get('db_host'),297 'database_host': host,
148 'database': self.database,298 'database': self.database,
149 'database_user': self.user,299 'database_user': self.user,
150 'database_password': rdata.get(password_setting),300 'database_password': rdata.get(password_setting),
151 'database_type': 'mysql'301 'database_type': 'mysql'
152 }302 }
153 if context_complete(ctxt):303 if self.context_complete(ctxt):
154 db_ssl(rdata, ctxt, self.ssl_dir)304 db_ssl(rdata, ctxt, self.ssl_dir)
155 return ctxt305 return ctxt
156 return {}306 return {}
@@ -165,23 +315,25 @@
165 def __call__(self):315 def __call__(self):
166 self.database = self.database or config('database')316 self.database = self.database or config('database')
167 if self.database is None:317 if self.database is None:
168 log('Could not generate postgresql_db context. '318 log('Could not generate postgresql_db context. Missing required '
169 'Missing required charm config options. '319 'charm config options. (database name)', level=ERROR)
170 '(database name)')
171 raise OSContextError320 raise OSContextError
321
172 ctxt = {}322 ctxt = {}
173
174 for rid in relation_ids(self.interfaces[0]):323 for rid in relation_ids(self.interfaces[0]):
324 self.related = True
175 for unit in related_units(rid):325 for unit in related_units(rid):
176 ctxt = {326 rel_host = relation_get('host', rid=rid, unit=unit)
177 'database_host': relation_get('host', rid=rid, unit=unit),327 rel_user = relation_get('user', rid=rid, unit=unit)
178 'database': self.database,328 rel_passwd = relation_get('password', rid=rid, unit=unit)
179 'database_user': relation_get('user', rid=rid, unit=unit),329 ctxt = {'database_host': rel_host,
180 'database_password': relation_get('password', rid=rid, unit=unit),330 'database': self.database,
181 'database_type': 'postgresql',331 'database_user': rel_user,
182 }332 'database_password': rel_passwd,
183 if context_complete(ctxt):333 'database_type': 'postgresql'}
334 if self.context_complete(ctxt):
184 return ctxt335 return ctxt
336
185 return {}337 return {}
186338
187339
@@ -190,85 +342,126 @@
190 ca_path = os.path.join(ssl_dir, 'db-client.ca')342 ca_path = os.path.join(ssl_dir, 'db-client.ca')
191 with open(ca_path, 'w') as fh:343 with open(ca_path, 'w') as fh:
192 fh.write(b64decode(rdata['ssl_ca']))344 fh.write(b64decode(rdata['ssl_ca']))
345
193 ctxt['database_ssl_ca'] = ca_path346 ctxt['database_ssl_ca'] = ca_path
194 elif 'ssl_ca' in rdata:347 elif 'ssl_ca' in rdata:
195 log("Charm not setup for ssl support but ssl ca found")348 log("Charm not setup for ssl support but ssl ca found", level=INFO)
196 return ctxt349 return ctxt
350
197 if 'ssl_cert' in rdata:351 if 'ssl_cert' in rdata:
198 cert_path = os.path.join(352 cert_path = os.path.join(
199 ssl_dir, 'db-client.cert')353 ssl_dir, 'db-client.cert')
200 if not os.path.exists(cert_path):354 if not os.path.exists(cert_path):
201 log("Waiting 1m for ssl client cert validity")355 log("Waiting 1m for ssl client cert validity", level=INFO)
202 time.sleep(60)356 time.sleep(60)
357
203 with open(cert_path, 'w') as fh:358 with open(cert_path, 'w') as fh:
204 fh.write(b64decode(rdata['ssl_cert']))359 fh.write(b64decode(rdata['ssl_cert']))
360
205 ctxt['database_ssl_cert'] = cert_path361 ctxt['database_ssl_cert'] = cert_path
206 key_path = os.path.join(ssl_dir, 'db-client.key')362 key_path = os.path.join(ssl_dir, 'db-client.key')
207 with open(key_path, 'w') as fh:363 with open(key_path, 'w') as fh:
208 fh.write(b64decode(rdata['ssl_key']))364 fh.write(b64decode(rdata['ssl_key']))
365
209 ctxt['database_ssl_key'] = key_path366 ctxt['database_ssl_key'] = key_path
367
210 return ctxt368 return ctxt
211369
212370
213class IdentityServiceContext(OSContextGenerator):371class IdentityServiceContext(OSContextGenerator):
214 interfaces = ['identity-service']372
373 def __init__(self, service=None, service_user=None, rel_name='identity-service'):
374 self.service = service
375 self.service_user = service_user
376 self.rel_name = rel_name
377 self.interfaces = [self.rel_name]
215378
216 def __call__(self):379 def __call__(self):
217 log('Generating template context for identity-service')380 log('Generating template context for ' + self.rel_name, level=DEBUG)
218 ctxt = {}381 ctxt = {}
219382
220 for rid in relation_ids('identity-service'):383 if self.service and self.service_user:
384 # This is required for pki token signing if we don't want /tmp to
385 # be used.
386 cachedir = '/var/cache/%s' % (self.service)
387 if not os.path.isdir(cachedir):
388 log("Creating service cache dir %s" % (cachedir), level=DEBUG)
389 mkdir(path=cachedir, owner=self.service_user,
390 group=self.service_user, perms=0o700)
391
392 ctxt['signing_dir'] = cachedir
393
394 for rid in relation_ids(self.rel_name):
395 self.related = True
221 for unit in related_units(rid):396 for unit in related_units(rid):
222 rdata = relation_get(rid=rid, unit=unit)397 rdata = relation_get(rid=rid, unit=unit)
223 ctxt = {398 serv_host = rdata.get('service_host')
224 'service_port': rdata.get('service_port'),399 serv_host = format_ipv6_addr(serv_host) or serv_host
225 'service_host': rdata.get('service_host'),400 auth_host = rdata.get('auth_host')
226 'auth_host': rdata.get('auth_host'),401 auth_host = format_ipv6_addr(auth_host) or auth_host
227 'auth_port': rdata.get('auth_port'),402 svc_protocol = rdata.get('service_protocol') or 'http'
228 'admin_tenant_name': rdata.get('service_tenant'),403 auth_protocol = rdata.get('auth_protocol') or 'http'
229 'admin_user': rdata.get('service_username'),404 ctxt.update({'service_port': rdata.get('service_port'),
230 'admin_password': rdata.get('service_password'),405 'service_host': serv_host,
231 'service_protocol':406 'auth_host': auth_host,
232 rdata.get('service_protocol') or 'http',407 'auth_port': rdata.get('auth_port'),
233 'auth_protocol':408 'admin_tenant_name': rdata.get('service_tenant'),
234 rdata.get('auth_protocol') or 'http',409 'admin_user': rdata.get('service_username'),
235 }410 'admin_password': rdata.get('service_password'),
236 if context_complete(ctxt):411 'service_protocol': svc_protocol,
412 'auth_protocol': auth_protocol})
413
414 if self.context_complete(ctxt):
237 # NOTE(jamespage) this is required for >= icehouse415 # NOTE(jamespage) this is required for >= icehouse
238 # so a missing value just indicates keystone needs416 # so a missing value just indicates keystone needs
239 # upgrading417 # upgrading
240 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')418 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
241 return ctxt419 return ctxt
420
242 return {}421 return {}
243422
244423
245class AMQPContext(OSContextGenerator):424class AMQPContext(OSContextGenerator):
246 interfaces = ['amqp']
247425
248 def __init__(self, ssl_dir=None):426 def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
249 self.ssl_dir = ssl_dir427 self.ssl_dir = ssl_dir
428 self.rel_name = rel_name
429 self.relation_prefix = relation_prefix
430 self.interfaces = [rel_name]
250431
251 def __call__(self):432 def __call__(self):
252 log('Generating template context for amqp')433 log('Generating template context for amqp', level=DEBUG)
253 conf = config()434 conf = config()
435 if self.relation_prefix:
436 user_setting = '%s-rabbit-user' % (self.relation_prefix)
437 vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
438 else:
439 user_setting = 'rabbit-user'
440 vhost_setting = 'rabbit-vhost'
441
254 try:442 try:
255 username = conf['rabbit-user']443 username = conf[user_setting]
256 vhost = conf['rabbit-vhost']444 vhost = conf[vhost_setting]
257 except KeyError as e:445 except KeyError as e:
258 log('Could not generate shared_db context. '446 log('Could not generate shared_db context. Missing required charm '
259 'Missing required charm config options: %s.' % e)447 'config options: %s.' % e, level=ERROR)
260 raise OSContextError448 raise OSContextError
449
261 ctxt = {}450 ctxt = {}
262 for rid in relation_ids('amqp'):451 for rid in relation_ids(self.rel_name):
263 ha_vip_only = False452 ha_vip_only = False
453 self.related = True
264 for unit in related_units(rid):454 for unit in related_units(rid):
265 if relation_get('clustered', rid=rid, unit=unit):455 if relation_get('clustered', rid=rid, unit=unit):
266 ctxt['clustered'] = True456 ctxt['clustered'] = True
267 ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,457 vip = relation_get('vip', rid=rid, unit=unit)
268 unit=unit)458 vip = format_ipv6_addr(vip) or vip
459 ctxt['rabbitmq_host'] = vip
269 else:460 else:
270 ctxt['rabbitmq_host'] = relation_get('private-address',461 host = relation_get('private-address', rid=rid, unit=unit)
271 rid=rid, unit=unit)462 host = format_ipv6_addr(host) or host
463 ctxt['rabbitmq_host'] = host
464
272 ctxt.update({465 ctxt.update({
273 'rabbitmq_user': username,466 'rabbitmq_user': username,
274 'rabbitmq_password': relation_get('password', rid=rid,467 'rabbitmq_password': relation_get('password', rid=rid,
@@ -279,6 +472,7 @@
279 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)472 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
280 if ssl_port:473 if ssl_port:
281 ctxt['rabbit_ssl_port'] = ssl_port474 ctxt['rabbit_ssl_port'] = ssl_port
475
282 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)476 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
283 if ssl_ca:477 if ssl_ca:
284 ctxt['rabbit_ssl_ca'] = ssl_ca478 ctxt['rabbit_ssl_ca'] = ssl_ca
@@ -289,104 +483,172 @@
289 ha_vip_only = relation_get('ha-vip-only',483 ha_vip_only = relation_get('ha-vip-only',
290 rid=rid, unit=unit) is not None484 rid=rid, unit=unit) is not None
291485
292 if context_complete(ctxt):486 if self.context_complete(ctxt):
293 if 'rabbit_ssl_ca' in ctxt:487 if 'rabbit_ssl_ca' in ctxt:
294 if not self.ssl_dir:488 if not self.ssl_dir:
295 log(("Charm not setup for ssl support "489 log("Charm not setup for ssl support but ssl ca "
296 "but ssl ca found"))490 "found", level=INFO)
297 break491 break
492
298 ca_path = os.path.join(493 ca_path = os.path.join(
299 self.ssl_dir, 'rabbit-client-ca.pem')494 self.ssl_dir, 'rabbit-client-ca.pem')
300 with open(ca_path, 'w') as fh:495 with open(ca_path, 'w') as fh:
301 fh.write(b64decode(ctxt['rabbit_ssl_ca']))496 fh.write(b64decode(ctxt['rabbit_ssl_ca']))
302 ctxt['rabbit_ssl_ca'] = ca_path497 ctxt['rabbit_ssl_ca'] = ca_path
498
303 # Sufficient information found = break out!499 # Sufficient information found = break out!
304 break500 break
501
305 # Used for active/active rabbitmq >= grizzly502 # Used for active/active rabbitmq >= grizzly
306 if ('clustered' not in ctxt or ha_vip_only) \503 if (('clustered' not in ctxt or ha_vip_only) and
307 and len(related_units(rid)) > 1:504 len(related_units(rid)) > 1):
308 rabbitmq_hosts = []505 rabbitmq_hosts = []
309 for unit in related_units(rid):506 for unit in related_units(rid):
310 rabbitmq_hosts.append(relation_get('private-address',507 host = relation_get('private-address', rid=rid, unit=unit)
311 rid=rid, unit=unit))508 host = format_ipv6_addr(host) or host
312 ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)509 rabbitmq_hosts.append(host)
313 if not context_complete(ctxt):510
511 ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
512
513 oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
514 if oslo_messaging_flags:
515 ctxt['oslo_messaging_flags'] = config_flags_parser(
516 oslo_messaging_flags)
517
518 if not self.complete:
314 return {}519 return {}
315 else:520
316 return ctxt521 return ctxt
317522
318523
319class CephContext(OSContextGenerator):524class CephContext(OSContextGenerator):
525 """Generates context for /etc/ceph/ceph.conf templates."""
320 interfaces = ['ceph']526 interfaces = ['ceph']
321527
322 def __call__(self):528 def __call__(self):
323 '''This generates context for /etc/ceph/ceph.conf templates'''
324 if not relation_ids('ceph'):529 if not relation_ids('ceph'):
325 return {}530 return {}
326531
327 log('Generating template context for ceph')532 log('Generating template context for ceph', level=DEBUG)
328
329 mon_hosts = []533 mon_hosts = []
330 auth = None534 ctxt = {
331 key = None535 'use_syslog': str(config('use-syslog')).lower()
332 use_syslog = str(config('use-syslog')).lower()536 }
333 for rid in relation_ids('ceph'):537 for rid in relation_ids('ceph'):
334 for unit in related_units(rid):538 for unit in related_units(rid):
335 mon_hosts.append(relation_get('private-address', rid=rid,539 if not ctxt.get('auth'):
336 unit=unit))540 ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
337 auth = relation_get('auth', rid=rid, unit=unit)541 if not ctxt.get('key'):
338 key = relation_get('key', rid=rid, unit=unit)542 ctxt['key'] = relation_get('key', rid=rid, unit=unit)
543 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
544 unit=unit)
545 unit_priv_addr = relation_get('private-address', rid=rid,
546 unit=unit)
547 ceph_addr = ceph_pub_addr or unit_priv_addr
548 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
549 mon_hosts.append(ceph_addr)
339550
340 ctxt = {551 ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
341 'mon_hosts': ' '.join(mon_hosts),
342 'auth': auth,
343 'key': key,
344 'use_syslog': use_syslog
345 }
346552
347 if not os.path.isdir('/etc/ceph'):553 if not os.path.isdir('/etc/ceph'):
348 os.mkdir('/etc/ceph')554 os.mkdir('/etc/ceph')
349555
350 if not context_complete(ctxt):556 if not self.context_complete(ctxt):
351 return {}557 return {}
352558
353 ensure_packages(['ceph-common'])559 ensure_packages(['ceph-common'])
354
355 return ctxt560 return ctxt
356561
357562
358class HAProxyContext(OSContextGenerator):563class HAProxyContext(OSContextGenerator):
564 """Provides half a context for the haproxy template, which describes
565 all peers to be included in the cluster. Each charm needs to include
566 its own context generator that describes the port mapping.
567 """
359 interfaces = ['cluster']568 interfaces = ['cluster']
360569
570 def __init__(self, singlenode_mode=False):
571 self.singlenode_mode = singlenode_mode
572
361 def __call__(self):573 def __call__(self):
362 '''574 if not relation_ids('cluster') and not self.singlenode_mode:
363 Builds half a context for the haproxy template, which describes
364 all peers to be included in the cluster. Each charm needs to include
365 its own context generator that describes the port mapping.
366 '''
367 if not relation_ids('cluster'):
368 return {}575 return {}
369576
577 if config('prefer-ipv6'):
578 addr = get_ipv6_addr(exc_list=[config('vip')])[0]
579 else:
580 addr = get_host_ip(unit_get('private-address'))
581
582 l_unit = local_unit().replace('/', '-')
370 cluster_hosts = {}583 cluster_hosts = {}
371 l_unit = local_unit().replace('/', '-')584
372 cluster_hosts[l_unit] = unit_get('private-address')585 # NOTE(jamespage): build out map of configured network endpoints
373586 # and associated backends
587 for addr_type in ADDRESS_TYPES:
588 cfg_opt = 'os-{}-network'.format(addr_type)
589 laddr = get_address_in_network(config(cfg_opt))
590 if laddr:
591 netmask = get_netmask_for_address(laddr)
592 cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
593 netmask),
594 'backends': {l_unit: laddr}}
595 for rid in relation_ids('cluster'):
596 for unit in related_units(rid):
597 _laddr = relation_get('{}-address'.format(addr_type),
598 rid=rid, unit=unit)
599 if _laddr:
600 _unit = unit.replace('/', '-')
601 cluster_hosts[laddr]['backends'][_unit] = _laddr
602
603 # NOTE(jamespage) add backend based on private address - this
604 # with either be the only backend or the fallback if no acls
605 # match in the frontend
606 cluster_hosts[addr] = {}
607 netmask = get_netmask_for_address(addr)
608 cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
609 'backends': {l_unit: addr}}
374 for rid in relation_ids('cluster'):610 for rid in relation_ids('cluster'):
375 for unit in related_units(rid):611 for unit in related_units(rid):
376 _unit = unit.replace('/', '-')612 _laddr = relation_get('private-address',
377 addr = relation_get('private-address', rid=rid, unit=unit)613 rid=rid, unit=unit)
378 cluster_hosts[_unit] = addr614 if _laddr:
615 _unit = unit.replace('/', '-')
616 cluster_hosts[addr]['backends'][_unit] = _laddr
379617
380 ctxt = {618 ctxt = {
381 'units': cluster_hosts,619 'frontends': cluster_hosts,
620 'default_backend': addr
382 }621 }
383 if len(cluster_hosts.keys()) > 1:622
384 # Enable haproxy when we have enough peers.623 if config('haproxy-server-timeout'):
385 log('Ensuring haproxy enabled in /etc/default/haproxy.')624 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
386 with open('/etc/default/haproxy', 'w') as out:625
387 out.write('ENABLED=1\n')626 if config('haproxy-client-timeout'):
388 return ctxt627 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
389 log('HAProxy context is incomplete, this unit has no peers.')628
629 if config('prefer-ipv6'):
630 ctxt['ipv6'] = True
631 ctxt['local_host'] = 'ip6-localhost'
632 ctxt['haproxy_host'] = '::'
633 ctxt['stat_port'] = ':::8888'
634 else:
635 ctxt['local_host'] = '127.0.0.1'
636 ctxt['haproxy_host'] = '0.0.0.0'
637 ctxt['stat_port'] = ':8888'
638
639 for frontend in cluster_hosts:
640 if (len(cluster_hosts[frontend]['backends']) > 1 or
641 self.singlenode_mode):
642 # Enable haproxy when we have enough peers.
643 log('Ensuring haproxy enabled in /etc/default/haproxy.',
644 level=DEBUG)
645 with open('/etc/default/haproxy', 'w') as out:
646 out.write('ENABLED=1\n')
647
648 return ctxt
649
650 log('HAProxy context is incomplete, this unit has no peers.',
651 level=INFO)
390 return {}652 return {}
391653
392654
@@ -394,36 +656,36 @@
394 interfaces = ['image-service']656 interfaces = ['image-service']
395657
396 def __call__(self):658 def __call__(self):
397 '''659 """Obtains the glance API server from the image-service relation.
398 Obtains the glance API server from the image-service relation. Useful660 Useful in nova and cinder (currently).
399 in nova and cinder (currently).661 """
400 '''662 log('Generating template context for image-service.', level=DEBUG)
401 log('Generating template context for image-service.')
402 rids = relation_ids('image-service')663 rids = relation_ids('image-service')
403 if not rids:664 if not rids:
404 return {}665 return {}
666
405 for rid in rids:667 for rid in rids:
406 for unit in related_units(rid):668 for unit in related_units(rid):
407 api_server = relation_get('glance-api-server',669 api_server = relation_get('glance-api-server',
408 rid=rid, unit=unit)670 rid=rid, unit=unit)
409 if api_server:671 if api_server:
410 return {'glance_api_servers': api_server}672 return {'glance_api_servers': api_server}
411 log('ImageService context is incomplete. '673
412 'Missing required relation data.')674 log("ImageService context is incomplete. Missing required relation "
675 "data.", level=INFO)
413 return {}676 return {}
414677
415678
416class ApacheSSLContext(OSContextGenerator):679class ApacheSSLContext(OSContextGenerator):
417680 """Generates a context for an apache vhost configuration that configures
418 """
419 Generates a context for an apache vhost configuration that configures
420 HTTPS reverse proxying for one or many endpoints. Generated context681 HTTPS reverse proxying for one or many endpoints. Generated context
421 looks something like:682 looks something like::
422 {683
423 'namespace': 'cinder',684 {
424 'private_address': 'iscsi.mycinderhost.com',685 'namespace': 'cinder',
425 'endpoints': [(8776, 8766), (8777, 8767)]686 'private_address': 'iscsi.mycinderhost.com',
426 }687 'endpoints': [(8776, 8766), (8777, 8767)]
688 }
427689
428 The endpoints list consists of a tuples mapping external ports690 The endpoints list consists of a tuples mapping external ports
429 to internal ports.691 to internal ports.
@@ -439,44 +701,119 @@
439 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']701 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
440 check_call(cmd)702 check_call(cmd)
441703
442 def configure_cert(self):704 def configure_cert(self, cn=None):
443 if not os.path.isdir('/etc/apache2/ssl'):
444 os.mkdir('/etc/apache2/ssl')
445 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)705 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
446 if not os.path.isdir(ssl_dir):706 mkdir(path=ssl_dir)
447 os.mkdir(ssl_dir)707 cert, key = get_cert(cn)
448 cert, key = get_cert()708 if cn:
449 with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:709 cert_filename = 'cert_{}'.format(cn)
450 cert_out.write(b64decode(cert))710 key_filename = 'key_{}'.format(cn)
451 with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:711 else:
452 key_out.write(b64decode(key))712 cert_filename = 'cert'
713 key_filename = 'key'
714
715 write_file(path=os.path.join(ssl_dir, cert_filename),
716 content=b64decode(cert))
717 write_file(path=os.path.join(ssl_dir, key_filename),
718 content=b64decode(key))
719
720 def configure_ca(self):
453 ca_cert = get_ca_cert()721 ca_cert = get_ca_cert()
454 if ca_cert:722 if ca_cert:
455 with open(CA_CERT_PATH, 'w') as ca_out:723 install_ca_cert(b64decode(ca_cert))
456 ca_out.write(b64decode(ca_cert))724
457 check_call(['update-ca-certificates'])725 def canonical_names(self):
726 """Figure out which canonical names clients will access this service.
727 """
728 cns = []
729 for r_id in relation_ids('identity-service'):
730 for unit in related_units(r_id):
731 rdata = relation_get(rid=r_id, unit=unit)
732 for k in rdata:
733 if k.startswith('ssl_key_'):
734 cns.append(k.lstrip('ssl_key_'))
735
736 return sorted(list(set(cns)))
737
738 def get_network_addresses(self):
739 """For each network configured, return corresponding address and vip
740 (if available).
741
742 Returns a list of tuples of the form:
743
744 [(address_in_net_a, vip_in_net_a),
745 (address_in_net_b, vip_in_net_b),
746 ...]
747
748 or, if no vip(s) available:
749
750 [(address_in_net_a, address_in_net_a),
751 (address_in_net_b, address_in_net_b),
752 ...]
753 """
754 addresses = []
755 if config('vip'):
756 vips = config('vip').split()
757 else:
758 vips = []
759
760 for net_type in ['os-internal-network', 'os-admin-network',
761 'os-public-network']:
762 addr = get_address_in_network(config(net_type),
763 unit_get('private-address'))
764 if len(vips) > 1 and is_clustered():
765 if not config(net_type):
766 log("Multiple networks configured but net_type "
767 "is None (%s)." % net_type, level=WARNING)
768 continue
769
770 for vip in vips:
771 if is_address_in_network(config(net_type), vip):
772 addresses.append((addr, vip))
773 break
774
775 elif is_clustered() and config('vip'):
776 addresses.append((addr, config('vip')))
777 else:
778 addresses.append((addr, addr))
779
780 return sorted(addresses)
458781
459 def __call__(self):782 def __call__(self):
460 if isinstance(self.external_ports, basestring):783 if isinstance(self.external_ports, six.string_types):
461 self.external_ports = [self.external_ports]784 self.external_ports = [self.external_ports]
462 if (not self.external_ports or not https()):785
786 if not self.external_ports or not https():
463 return {}787 return {}
464788
465 self.configure_cert()789 self.configure_ca()
466 self.enable_modules()790 self.enable_modules()
467791
468 ctxt = {792 ctxt = {'namespace': self.service_namespace,
469 'namespace': self.service_namespace,793 'endpoints': [],
470 'private_address': unit_get('private-address'),794 'ext_ports': []}
471 'endpoints': []795
472 }796 cns = self.canonical_names()
473 if is_clustered():797 if cns:
474 ctxt['private_address'] = config('vip')798 for cn in cns:
475 for api_port in self.external_ports:799 self.configure_cert(cn)
476 ext_port = determine_apache_port(api_port)800 else:
477 int_port = determine_api_port(api_port)801 # Expect cert/key provided in config (currently assumed that ca
478 portmap = (int(ext_port), int(int_port))802 # uses ip for cn)
479 ctxt['endpoints'].append(portmap)803 cn = resolve_address(endpoint_type=INTERNAL)
804 self.configure_cert(cn)
805
806 addresses = self.get_network_addresses()
807 for address, endpoint in sorted(set(addresses)):
808 for api_port in self.external_ports:
809 ext_port = determine_apache_port(api_port,
810 singlenode_mode=True)
811 int_port = determine_api_port(api_port, singlenode_mode=True)
812 portmap = (address, endpoint, int(ext_port), int(int_port))
813 ctxt['endpoints'].append(portmap)
814 ctxt['ext_ports'].append(int(ext_port))
815
816 ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
480 return ctxt817 return ctxt
481818
482819
@@ -493,21 +830,23 @@
493830
494 @property831 @property
495 def packages(self):832 def packages(self):
496 return neutron_plugin_attribute(833 return neutron_plugin_attribute(self.plugin, 'packages',
497 self.plugin, 'packages', self.network_manager)834 self.network_manager)
498835
499 @property836 @property
500 def neutron_security_groups(self):837 def neutron_security_groups(self):
501 return None838 return None
502839
503 def _ensure_packages(self):840 def _ensure_packages(self):
504 [ensure_packages(pkgs) for pkgs in self.packages]841 for pkgs in self.packages:
842 ensure_packages(pkgs)
505843
506 def _save_flag_file(self):844 def _save_flag_file(self):
507 if self.network_manager == 'quantum':845 if self.network_manager == 'quantum':
508 _file = '/etc/nova/quantum_plugin.conf'846 _file = '/etc/nova/quantum_plugin.conf'
509 else:847 else:
510 _file = '/etc/nova/neutron_plugin.conf'848 _file = '/etc/nova/neutron_plugin.conf'
849
511 with open(_file, 'wb') as out:850 with open(_file, 'wb') as out:
512 out.write(self.plugin + '\n')851 out.write(self.plugin + '\n')
513852
@@ -516,50 +855,104 @@
516 self.network_manager)855 self.network_manager)
517 config = neutron_plugin_attribute(self.plugin, 'config',856 config = neutron_plugin_attribute(self.plugin, 'config',
518 self.network_manager)857 self.network_manager)
519 ovs_ctxt = {858 ovs_ctxt = {'core_plugin': driver,
520 'core_plugin': driver,859 'neutron_plugin': 'ovs',
521 'neutron_plugin': 'ovs',860 'neutron_security_groups': self.neutron_security_groups,
522 'neutron_security_groups': self.neutron_security_groups,861 'local_ip': unit_private_ip(),
523 'local_ip': unit_private_ip(),862 'config': config}
524 'config': config
525 }
526863
527 return ovs_ctxt864 return ovs_ctxt
528865
866 def nuage_ctxt(self):
867 driver = neutron_plugin_attribute(self.plugin, 'driver',
868 self.network_manager)
869 config = neutron_plugin_attribute(self.plugin, 'config',
870 self.network_manager)
871 nuage_ctxt = {'core_plugin': driver,
872 'neutron_plugin': 'vsp',
873 'neutron_security_groups': self.neutron_security_groups,
874 'local_ip': unit_private_ip(),
875 'config': config}
876
877 return nuage_ctxt
878
529 def nvp_ctxt(self):879 def nvp_ctxt(self):
530 driver = neutron_plugin_attribute(self.plugin, 'driver',880 driver = neutron_plugin_attribute(self.plugin, 'driver',
531 self.network_manager)881 self.network_manager)
532 config = neutron_plugin_attribute(self.plugin, 'config',882 config = neutron_plugin_attribute(self.plugin, 'config',
533 self.network_manager)883 self.network_manager)
534 nvp_ctxt = {884 nvp_ctxt = {'core_plugin': driver,
535 'core_plugin': driver,885 'neutron_plugin': 'nvp',
536 'neutron_plugin': 'nvp',886 'neutron_security_groups': self.neutron_security_groups,
537 'neutron_security_groups': self.neutron_security_groups,887 'local_ip': unit_private_ip(),
538 'local_ip': unit_private_ip(),888 'config': config}
539 'config': config
540 }
541889
542 return nvp_ctxt890 return nvp_ctxt
543891
892 def n1kv_ctxt(self):
893 driver = neutron_plugin_attribute(self.plugin, 'driver',
894 self.network_manager)
895 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
896 self.network_manager)
897 n1kv_user_config_flags = config('n1kv-config-flags')
898 restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
899 n1kv_ctxt = {'core_plugin': driver,
900 'neutron_plugin': 'n1kv',
901 'neutron_security_groups': self.neutron_security_groups,
902 'local_ip': unit_private_ip(),
903 'config': n1kv_config,
904 'vsm_ip': config('n1kv-vsm-ip'),
905 'vsm_username': config('n1kv-vsm-username'),
906 'vsm_password': config('n1kv-vsm-password'),
907 'restrict_policy_profiles': restrict_policy_profiles}
908
909 if n1kv_user_config_flags:
910 flags = config_flags_parser(n1kv_user_config_flags)
911 n1kv_ctxt['user_config_flags'] = flags
912
913 return n1kv_ctxt
914
915 def calico_ctxt(self):
916 driver = neutron_plugin_attribute(self.plugin, 'driver',
917 self.network_manager)
918 config = neutron_plugin_attribute(self.plugin, 'config',
919 self.network_manager)
920 calico_ctxt = {'core_plugin': driver,
921 'neutron_plugin': 'Calico',
922 'neutron_security_groups': self.neutron_security_groups,
923 'local_ip': unit_private_ip(),
924 'config': config}
925
926 return calico_ctxt
927
544 def neutron_ctxt(self):928 def neutron_ctxt(self):
545 if https():929 if https():
546 proto = 'https'930 proto = 'https'
547 else:931 else:
548 proto = 'http'932 proto = 'http'
933
549 if is_clustered():934 if is_clustered():
550 host = config('vip')935 host = config('vip')
551 else:936 else:
552 host = unit_get('private-address')937 host = unit_get('private-address')
553 url = '%s://%s:%s' % (proto, host, '9696')938
554 ctxt = {939 ctxt = {'network_manager': self.network_manager,
555 'network_manager': self.network_manager,940 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
556 'neutron_url': url,
557 }
558 return ctxt941 return ctxt
559942
943 def pg_ctxt(self):
944 driver = neutron_plugin_attribute(self.plugin, 'driver',
945 self.network_manager)
946 config = neutron_plugin_attribute(self.plugin, 'config',
947 self.network_manager)
948 ovs_ctxt = {'core_plugin': driver,
949 'neutron_plugin': 'plumgrid',
950 'neutron_security_groups': self.neutron_security_groups,
951 'local_ip': unit_private_ip(),
952 'config': config}
953 return ovs_ctxt
954
560 def __call__(self):955 def __call__(self):
561 self._ensure_packages()
562
563 if self.network_manager not in ['quantum', 'neutron']:956 if self.network_manager not in ['quantum', 'neutron']:
564 return {}957 return {}
565958
@@ -570,8 +963,16 @@
570963
571 if self.plugin == 'ovs':964 if self.plugin == 'ovs':
572 ctxt.update(self.ovs_ctxt())965 ctxt.update(self.ovs_ctxt())
573 elif self.plugin == 'nvp':966 elif self.plugin in ['nvp', 'nsx']:
574 ctxt.update(self.nvp_ctxt())967 ctxt.update(self.nvp_ctxt())
968 elif self.plugin == 'n1kv':
969 ctxt.update(self.n1kv_ctxt())
970 elif self.plugin == 'Calico':
971 ctxt.update(self.calico_ctxt())
972 elif self.plugin == 'vsp':
973 ctxt.update(self.nuage_ctxt())
974 elif self.plugin == 'plumgrid':
975 ctxt.update(self.pg_ctxt())
575976
576 alchemy_flags = config('neutron-alchemy-flags')977 alchemy_flags = config('neutron-alchemy-flags')
577 if alchemy_flags:978 if alchemy_flags:
@@ -582,24 +983,94 @@
582 return ctxt983 return ctxt
583984
584985
986class NeutronPortContext(OSContextGenerator):
987
988 def resolve_ports(self, ports):
989 """Resolve NICs not yet bound to bridge(s)
990
991 If hwaddress provided then returns resolved hwaddress otherwise NIC.
992 """
993 if not ports:
994 return None
995
996 hwaddr_to_nic = {}
997 hwaddr_to_ip = {}
998 for nic in list_nics():
999 # Ignore virtual interfaces (bond masters will be identified from
1000 # their slaves)
1001 if not is_phy_iface(nic):
1002 continue
1003
1004 _nic = get_bond_master(nic)
1005 if _nic:
1006 log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
1007 level=DEBUG)
1008 nic = _nic
1009
1010 hwaddr = get_nic_hwaddr(nic)
1011 hwaddr_to_nic[hwaddr] = nic
1012 addresses = get_ipv4_addr(nic, fatal=False)
1013 addresses += get_ipv6_addr(iface=nic, fatal=False)
1014 hwaddr_to_ip[hwaddr] = addresses
1015
1016 resolved = []
1017 mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
1018 for entry in ports:
1019 if re.match(mac_regex, entry):
1020 # NIC is in known NICs and does NOT hace an IP address
1021 if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
1022 # If the nic is part of a bridge then don't use it
1023 if is_bridge_member(hwaddr_to_nic[entry]):
1024 continue
1025
1026 # Entry is a MAC address for a valid interface that doesn't
1027 # have an IP address assigned yet.
1028 resolved.append(hwaddr_to_nic[entry])
1029 else:
1030 # If the passed entry is not a MAC address, assume it's a valid
1031 # interface, and that the user put it there on purpose (we can
1032 # trust it to be the real external network).
1033 resolved.append(entry)
1034
1035 # Ensure no duplicates
1036 return list(set(resolved))
1037
1038
585class OSConfigFlagContext(OSContextGenerator):1039class OSConfigFlagContext(OSContextGenerator):
5861040 """Provides support for user-defined config flags.
587 """1041
588 Responsible for adding user-defined config-flags in charm config to a1042 Users can define a comma-seperated list of key=value pairs
589 template context.1043 in the charm configuration and apply them at any point in
5901044 any file by using a template flag.
591 NOTE: the value of config-flags may be a comma-separated list of1045
592 key=value pairs and some Openstack config files support1046 Sometimes users might want config flags inserted within a
593 comma-separated lists as values.1047 specific section so this class allows users to specify the
594 """1048 template flag name, allowing for multiple template flags
5951049 (sections) within the same context.
596 def __call__(self):1050
597 config_flags = config('config-flags')1051 NOTE: the value of config-flags may be a comma-separated list of
598 if not config_flags:1052 key=value pairs and some Openstack config files support
599 return {}1053 comma-separated lists as values.
6001054 """
601 flags = config_flags_parser(config_flags)1055
602 return {'user_config_flags': flags}1056 def __init__(self, charm_flag='config-flags',
1057 template_flag='user_config_flags'):
1058 """
1059 :param charm_flag: config flags in charm configuration.
1060 :param template_flag: insert point for user-defined flags in template
1061 file.
1062 """
1063 super(OSConfigFlagContext, self).__init__()
1064 self._charm_flag = charm_flag
1065 self._template_flag = template_flag
1066
1067 def __call__(self):
1068 config_flags = config(self._charm_flag)
1069 if not config_flags:
1070 return {}
1071
1072 return {self._template_flag:
1073 config_flags_parser(config_flags)}
6031074
6041075
605class SubordinateConfigContext(OSContextGenerator):1076class SubordinateConfigContext(OSContextGenerator):
@@ -611,7 +1082,7 @@
611 The subordinate interface allows subordinates to export their1082 The subordinate interface allows subordinates to export their
612 configuration requirements to the principle for multiple config1083 configuration requirements to the principle for multiple config
613 files and multiple serivces. Ie, a subordinate that has interfaces1084 files and multiple serivces. Ie, a subordinate that has interfaces
614 to both glance and nova may export to following yaml blob as json:1085 to both glance and nova may export to following yaml blob as json::
6151086
616 glance:1087 glance:
617 /etc/glance/glance-api.conf:1088 /etc/glance/glance-api.conf:
@@ -630,7 +1101,8 @@
6301101
631 It is then up to the principle charms to subscribe this context to1102 It is then up to the principle charms to subscribe this context to
632 the service+config file it is interestd in. Configuration data will1103 the service+config file it is interestd in. Configuration data will
633 be available in the template context, in glance's case, as:1104 be available in the template context, in glance's case, as::
1105
634 ctxt = {1106 ctxt = {
635 ... other context ...1107 ... other context ...
636 'subordinate_config': {1108 'subordinate_config': {
@@ -642,7 +1114,6 @@
642 },1114 },
643 }1115 }
644 }1116 }
645
646 """1117 """
6471118
648 def __init__(self, service, config_file, interface):1119 def __init__(self, service, config_file, interface):
@@ -652,13 +1123,22 @@
652 :param config_file : Service's config file to query sections1123 :param config_file : Service's config file to query sections
653 :param interface : Subordinate interface to inspect1124 :param interface : Subordinate interface to inspect
654 """1125 """
655 self.service = service
656 self.config_file = config_file1126 self.config_file = config_file
657 self.interface = interface1127 if isinstance(service, list):
1128 self.services = service
1129 else:
1130 self.services = [service]
1131 if isinstance(interface, list):
1132 self.interfaces = interface
1133 else:
1134 self.interfaces = [interface]
6581135
659 def __call__(self):1136 def __call__(self):
660 ctxt = {}1137 ctxt = {'sections': {}}
661 for rid in relation_ids(self.interface):1138 rids = []
1139 for interface in self.interfaces:
1140 rids.extend(relation_ids(interface))
1141 for rid in rids:
662 for unit in related_units(rid):1142 for unit in related_units(rid):
663 sub_config = relation_get('subordinate_configuration',1143 sub_config = relation_get('subordinate_configuration',
664 rid=rid, unit=unit)1144 rid=rid, unit=unit)
@@ -670,23 +1150,44 @@
670 'setting from %s' % rid, level=ERROR)1150 'setting from %s' % rid, level=ERROR)
671 continue1151 continue
6721152
673 if self.service not in sub_config:1153 for service in self.services:
674 log('Found subordinate_config on %s but it contained'1154 if service not in sub_config:
675 'nothing for %s service' % (rid, self.service))1155 log('Found subordinate_config on %s but it contained'
676 continue1156 'nothing for %s service' % (rid, service),
6771157 level=INFO)
678 sub_config = sub_config[self.service]1158 continue
679 if self.config_file not in sub_config:1159
680 log('Found subordinate_config on %s but it contained'1160 sub_config = sub_config[service]
681 'nothing for %s' % (rid, self.config_file))1161 if self.config_file not in sub_config:
682 continue1162 log('Found subordinate_config on %s but it contained'
6831163 'nothing for %s' % (rid, self.config_file),
684 sub_config = sub_config[self.config_file]1164 level=INFO)
685 for k, v in sub_config.iteritems():1165 continue
686 ctxt[k] = v1166
6871167 sub_config = sub_config[self.config_file]
688 if not ctxt:1168 for k, v in six.iteritems(sub_config):
689 ctxt['sections'] = {}1169 if k == 'sections':
1170 for section, config_list in six.iteritems(v):
1171 log("adding section '%s'" % (section),
1172 level=DEBUG)
1173 if ctxt[k].get(section):
1174 ctxt[k][section].extend(config_list)
1175 else:
1176 ctxt[k][section] = config_list
1177 else:
1178 ctxt[k] = v
1179 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1180 return ctxt
1181
1182
1183class LogLevelContext(OSContextGenerator):
1184
1185 def __call__(self):
1186 ctxt = {}
1187 ctxt['debug'] = \
1188 False if config('debug') is None else config('debug')
1189 ctxt['verbose'] = \
1190 False if config('verbose') is None else config('verbose')
6901191
691 return ctxt1192 return ctxt
6921193
@@ -694,7 +1195,233 @@
694class SyslogContext(OSContextGenerator):1195class SyslogContext(OSContextGenerator):
6951196
696 def __call__(self):1197 def __call__(self):
697 ctxt = {1198 ctxt = {'use_syslog': config('use-syslog')}
698 'use_syslog': config('use-syslog')1199 return ctxt
1200
1201
1202class BindHostContext(OSContextGenerator):
1203
1204 def __call__(self):
1205 if config('prefer-ipv6'):
1206 return {'bind_host': '::'}
1207 else:
1208 return {'bind_host': '0.0.0.0'}
1209
1210
1211class WorkerConfigContext(OSContextGenerator):
1212
1213 @property
1214 def num_cpus(self):
1215 try:
1216 from psutil import NUM_CPUS
1217 except ImportError:
1218 apt_install('python-psutil', fatal=True)
1219 from psutil import NUM_CPUS
1220
1221 return NUM_CPUS
1222
1223 def __call__(self):
1224 multiplier = config('worker-multiplier') or 0
1225 ctxt = {"workers": self.num_cpus * multiplier}
1226 return ctxt
1227
1228
1229class ZeroMQContext(OSContextGenerator):
1230 interfaces = ['zeromq-configuration']
1231
1232 def __call__(self):
1233 ctxt = {}
1234 if is_relation_made('zeromq-configuration', 'host'):
1235 for rid in relation_ids('zeromq-configuration'):
1236 for unit in related_units(rid):
1237 ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
1238 ctxt['zmq_host'] = relation_get('host', unit, rid)
1239 ctxt['zmq_redis_address'] = relation_get(
1240 'zmq_redis_address', unit, rid)
1241
1242 return ctxt
1243
1244
1245class NotificationDriverContext(OSContextGenerator):
1246
1247 def __init__(self, zmq_relation='zeromq-configuration',
1248 amqp_relation='amqp'):
1249 """
1250 :param zmq_relation: Name of Zeromq relation to check
1251 """
1252 self.zmq_relation = zmq_relation
1253 self.amqp_relation = amqp_relation
1254
1255 def __call__(self):
1256 ctxt = {'notifications': 'False'}
1257 if is_relation_made(self.amqp_relation):
1258 ctxt['notifications'] = "True"
1259
1260 return ctxt
1261
1262
1263class SysctlContext(OSContextGenerator):
1264 """This context check if the 'sysctl' option exists on configuration
1265 then creates a file with the loaded contents"""
1266 def __call__(self):
1267 sysctl_dict = config('sysctl')
1268 if sysctl_dict:
1269 sysctl_create(sysctl_dict,
1270 '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
1271 return {'sysctl': sysctl_dict}
1272
1273
1274class NeutronAPIContext(OSContextGenerator):
1275 '''
1276 Inspects current neutron-plugin-api relation for neutron settings. Return
1277 defaults if it is not present.
1278 '''
1279 interfaces = ['neutron-plugin-api']
1280
1281 def __call__(self):
1282 self.neutron_defaults = {
1283 'l2_population': {
1284 'rel_key': 'l2-population',
1285 'default': False,
1286 },
1287 'overlay_network_type': {
1288 'rel_key': 'overlay-network-type',
1289 'default': 'gre',
1290 },
1291 'neutron_security_groups': {
1292 'rel_key': 'neutron-security-groups',
1293 'default': False,
1294 },
1295 'network_device_mtu': {
1296 'rel_key': 'network-device-mtu',
1297 'default': None,
1298 },
1299 'enable_dvr': {
1300 'rel_key': 'enable-dvr',
1301 'default': False,
1302 },
1303 'enable_l3ha': {
1304 'rel_key': 'enable-l3ha',
1305 'default': False,
1306 },
699 }1307 }
700 return ctxt1308 ctxt = self.get_neutron_options({})
1309 for rid in relation_ids('neutron-plugin-api'):
1310 for unit in related_units(rid):
1311 rdata = relation_get(rid=rid, unit=unit)
1312 if 'l2-population' in rdata:
1313 ctxt.update(self.get_neutron_options(rdata))
1314
1315 return ctxt
1316
1317 def get_neutron_options(self, rdata):
1318 settings = {}
1319 for nkey in self.neutron_defaults.keys():
1320 defv = self.neutron_defaults[nkey]['default']
1321 rkey = self.neutron_defaults[nkey]['rel_key']
1322 if rkey in rdata.keys():
1323 if type(defv) is bool:
1324 settings[nkey] = bool_from_string(rdata[rkey])
1325 else:
1326 settings[nkey] = rdata[rkey]
1327 else:
1328 settings[nkey] = defv
1329 return settings
1330
1331
1332class ExternalPortContext(NeutronPortContext):
1333
1334 def __call__(self):
1335 ctxt = {}
1336 ports = config('ext-port')
1337 if ports:
1338 ports = [p.strip() for p in ports.split()]
1339 ports = self.resolve_ports(ports)
1340 if ports:
1341 ctxt = {"ext_port": ports[0]}
1342 napi_settings = NeutronAPIContext()()
1343 mtu = napi_settings.get('network_device_mtu')
1344 if mtu:
1345 ctxt['ext_port_mtu'] = mtu
1346
1347 return ctxt
1348
1349
1350class DataPortContext(NeutronPortContext):
1351
1352 def __call__(self):
1353 ports = config('data-port')
1354 if ports:
1355 # Map of {port/mac:bridge}
1356 portmap = parse_data_port_mappings(ports)
1357 ports = portmap.keys()
1358 # Resolve provided ports or mac addresses and filter out those
1359 # already attached to a bridge.
1360 resolved = self.resolve_ports(ports)
1361 # FIXME: is this necessary?
1362 normalized = {get_nic_hwaddr(port): port for port in resolved
1363 if port not in ports}
1364 normalized.update({port: port for port in resolved
1365 if port in ports})
1366 if resolved:
1367 return {normalized[port]: bridge for port, bridge in
1368 six.iteritems(portmap) if port in normalized.keys()}
1369
1370 return None
1371
1372
1373class PhyNICMTUContext(DataPortContext):
1374
1375 def __call__(self):
1376 ctxt = {}
1377 mappings = super(PhyNICMTUContext, self).__call__()
1378 if mappings and mappings.keys():
1379 ports = sorted(mappings.keys())
1380 napi_settings = NeutronAPIContext()()
1381 mtu = napi_settings.get('network_device_mtu')
1382 all_ports = set()
1383 # If any of ports is a vlan device, its underlying device must have
1384 # mtu applied first.
1385 for port in ports:
1386 for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
1387 lport = os.path.basename(lport)
1388 all_ports.add(lport.split('_')[1])
1389
1390 all_ports = list(all_ports)
1391 all_ports.extend(ports)
1392 if mtu:
1393 ctxt["devs"] = '\\n'.join(all_ports)
1394 ctxt['mtu'] = mtu
1395
1396 return ctxt
1397
1398
1399class NetworkServiceContext(OSContextGenerator):
1400
1401 def __init__(self, rel_name='quantum-network-service'):
1402 self.rel_name = rel_name
1403 self.interfaces = [rel_name]
1404
1405 def __call__(self):
1406 for rid in relation_ids(self.rel_name):
1407 for unit in related_units(rid):
1408 rdata = relation_get(rid=rid, unit=unit)
1409 ctxt = {
1410 'keystone_host': rdata.get('keystone_host'),
1411 'service_port': rdata.get('service_port'),
1412 'auth_port': rdata.get('auth_port'),
1413 'service_tenant': rdata.get('service_tenant'),
1414 'service_username': rdata.get('service_username'),
1415 'service_password': rdata.get('service_password'),
1416 'quantum_host': rdata.get('quantum_host'),
1417 'quantum_port': rdata.get('quantum_port'),
1418 'quantum_url': rdata.get('quantum_url'),
1419 'region': rdata.get('region'),
1420 'service_protocol':
1421 rdata.get('service_protocol') or 'http',
1422 'auth_protocol':
1423 rdata.get('auth_protocol') or 'http',
1424 }
1425 if self.context_complete(ctxt):
1426 return ctxt
1427 return {}
7011428
=== added directory 'hooks/charmhelpers/contrib/openstack/files'
=== added file 'hooks/charmhelpers/contrib/openstack/files/__init__.py'
--- hooks/charmhelpers/contrib/openstack/files/__init__.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/files/__init__.py 2015-11-12 11:46:11 +0000
@@ -0,0 +1,18 @@
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: