Merge lp:~corey.bryant/charms/trusty/quantum-gateway/end-of-life into lp:~openstack-charmers/charms/trusty/quantum-gateway/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 114
Proposed branch: lp:~corey.bryant/charms/trusty/quantum-gateway/end-of-life
Merge into: lp:~openstack-charmers/charms/trusty/quantum-gateway/next
Diff against target: 15436 lines (+344/-14080)
138 files modified
.bzrignore (+0/-1)
.coveragerc (+0/-6)
.project (+0/-17)
.pydevproject (+0/-9)
Makefile (+1/-13)
README.md (+1/-207)
actions.yaml (+0/-2)
actions/git_reinstall.py (+0/-45)
charm-helpers-hooks.yaml (+0/-9)
charm-helpers-tests.yaml (+0/-5)
files/NeutronAgentMon (+0/-155)
files/neutron-ha-monitor.conf (+0/-4)
files/neutron-ha-monitor.py (+0/-436)
hooks/charmhelpers/contrib/__init__.py (+0/-15)
hooks/charmhelpers/contrib/charmsupport/__init__.py (+0/-15)
hooks/charmhelpers/contrib/charmsupport/nrpe.py (+0/-360)
hooks/charmhelpers/contrib/charmsupport/volumes.py (+0/-175)
hooks/charmhelpers/contrib/hahelpers/__init__.py (+0/-15)
hooks/charmhelpers/contrib/hahelpers/apache.py (+0/-82)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+0/-272)
hooks/charmhelpers/contrib/network/__init__.py (+0/-15)
hooks/charmhelpers/contrib/network/ip.py (+0/-450)
hooks/charmhelpers/contrib/network/ovs/__init__.py (+0/-96)
hooks/charmhelpers/contrib/network/ufw.py (+0/-276)
hooks/charmhelpers/contrib/openstack/__init__.py (+0/-15)
hooks/charmhelpers/contrib/openstack/alternatives.py (+0/-33)
hooks/charmhelpers/contrib/openstack/amulet/__init__.py (+0/-15)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+0/-146)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+0/-294)
hooks/charmhelpers/contrib/openstack/context.py (+0/-1328)
hooks/charmhelpers/contrib/openstack/files/__init__.py (+0/-18)
hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh (+0/-32)
hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh (+0/-30)
hooks/charmhelpers/contrib/openstack/ip.py (+0/-146)
hooks/charmhelpers/contrib/openstack/neutron.py (+0/-322)
hooks/charmhelpers/contrib/openstack/templates/__init__.py (+0/-18)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+0/-15)
hooks/charmhelpers/contrib/openstack/templates/git.upstart (+0/-17)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+0/-58)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+0/-24)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+0/-24)
hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken (+0/-9)
hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo (+0/-22)
hooks/charmhelpers/contrib/openstack/templates/section-zeromq (+0/-14)
hooks/charmhelpers/contrib/openstack/templating.py (+0/-295)
hooks/charmhelpers/contrib/openstack/utils.py (+0/-642)
hooks/charmhelpers/contrib/python/__init__.py (+0/-15)
hooks/charmhelpers/contrib/python/debug.py (+0/-56)
hooks/charmhelpers/contrib/python/packages.py (+0/-96)
hooks/charmhelpers/contrib/python/rpdb.py (+0/-58)
hooks/charmhelpers/contrib/python/version.py (+0/-34)
hooks/charmhelpers/contrib/storage/__init__.py (+0/-15)
hooks/charmhelpers/contrib/storage/linux/__init__.py (+0/-15)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+0/-444)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+0/-78)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+0/-105)
hooks/charmhelpers/contrib/storage/linux/utils.py (+0/-70)
hooks/charmhelpers/core/hookenv.py (+232/-38)
hooks/charmhelpers/core/host.py (+56/-12)
hooks/charmhelpers/core/services/base.py (+43/-19)
hooks/charmhelpers/core/services/helpers.py (+2/-2)
hooks/charmhelpers/fetch/__init__.py (+0/-439)
hooks/charmhelpers/fetch/archiveurl.py (+0/-161)
hooks/charmhelpers/fetch/bzrurl.py (+0/-78)
hooks/charmhelpers/fetch/giturl.py (+0/-71)
hooks/charmhelpers/payload/__init__.py (+0/-17)
hooks/charmhelpers/payload/execd.py (+0/-66)
hooks/quantum_contexts.py (+0/-193)
hooks/quantum_hooks.py (+7/-327)
hooks/quantum_utils.py (+0/-1153)
metadata.yaml (+2/-0)
setup.cfg (+0/-5)
templates/ext-port.conf (+0/-16)
templates/folsom/dhcp_agent.ini (+0/-10)
templates/folsom/l3_agent.ini (+0/-8)
templates/folsom/metadata_agent.ini (+0/-12)
templates/folsom/nova.conf (+0/-26)
templates/folsom/ovs_quantum_plugin.ini (+0/-8)
templates/folsom/quantum.conf (+0/-15)
templates/git/cron.d/neutron-dhcp-agent-netns-cleanup (+0/-4)
templates/git/cron.d/neutron-l3-agent-netns-cleanup (+0/-4)
templates/git/cron.d/neutron-lbaas-agent-netns-cleanup (+0/-4)
templates/git/neutron_sudoers (+0/-4)
templates/git/upstart/neutron-agent.upstart (+0/-25)
templates/git/upstart/neutron-ovs-cleanup.upstart (+0/-13)
templates/git/upstart/neutron-server.upstart (+0/-22)
templates/grizzly/nova.conf (+0/-22)
templates/grizzly/quantum.conf (+0/-11)
templates/havana/dhcp_agent.ini (+0/-27)
templates/havana/dnsmasq.conf (+0/-3)
templates/havana/fwaas_driver.ini (+0/-7)
templates/havana/l3_agent.ini (+0/-24)
templates/havana/lbaas_agent.ini (+0/-8)
templates/havana/metadata_agent.ini (+0/-18)
templates/havana/metering_agent.ini (+0/-10)
templates/havana/neutron.conf (+0/-16)
templates/havana/nova.conf (+0/-27)
templates/havana/ovs_neutron_plugin.ini (+0/-14)
templates/havana/vpn_agent.ini (+0/-8)
templates/icehouse/metadata_agent.ini (+0/-19)
templates/icehouse/ml2_conf.ini (+0/-35)
templates/icehouse/neutron.conf (+0/-18)
templates/juno/l3_agent.ini (+0/-25)
templates/juno/ml2_conf.ini (+0/-36)
templates/kilo/fwaas_driver.ini (+0/-8)
templates/kilo/lbaas_agent.ini (+0/-13)
templates/kilo/neutron.conf (+0/-25)
templates/kilo/nova.conf (+0/-33)
templates/kilo/vpn_agent.ini (+0/-9)
templates/os-charm-phy-nic-mtu.conf (+0/-22)
templates/parts/database (+0/-1)
templates/parts/rabbitmq (+0/-21)
tests/00-setup (+0/-11)
tests/014-basic-precise-icehouse (+0/-11)
tests/015-basic-trusty-icehouse (+0/-9)
tests/016-basic-trusty-juno (+0/-11)
tests/017-basic-trusty-kilo (+0/-11)
tests/018-basic-utopic-juno (+0/-9)
tests/019-basic-vivid-kilo (+0/-9)
tests/050-basic-trusty-icehouse-git (+0/-9)
tests/051-basic-trusty-juno-git (+0/-12)
tests/README (+0/-53)
tests/basic_deployment.py (+0/-656)
tests/charmhelpers/__init__.py (+0/-38)
tests/charmhelpers/contrib/__init__.py (+0/-15)
tests/charmhelpers/contrib/amulet/__init__.py (+0/-15)
tests/charmhelpers/contrib/amulet/deployment.py (+0/-93)
tests/charmhelpers/contrib/amulet/utils.py (+0/-323)
tests/charmhelpers/contrib/openstack/__init__.py (+0/-15)
tests/charmhelpers/contrib/openstack/amulet/__init__.py (+0/-15)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+0/-146)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+0/-294)
unit_tests/__init__.py (+0/-4)
unit_tests/test_actions_git_reinstall.py (+0/-105)
unit_tests/test_quantum_contexts.py (+0/-274)
unit_tests/test_quantum_hooks.py (+0/-370)
unit_tests/test_quantum_utils.py (+0/-1077)
unit_tests/test_utils.py (+0/-100)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/quantum-gateway/end-of-life
Reviewer Review Type Date Requested Status
James Page Approve
Review via email: mp+265035@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Corey Bryant (corey.bryant) wrote :

We need to let users know they shouldn't use the quantum-gateway charm. This patch uses status-set to let users know it's EOL and they should use the neutron-gateway charm instead.

The two options I debated were:
 1) sys.exit(1) in install hook (doesn't allow you to set a status message; could set log message but user has to look in log)
 2) set status to 'blocked' with corresponding message* (doesn't fail the deployment but displays a clear message to 'juju status')

* status-set can only set the state to 'maintenance', 'blocked', 'waiting', or 'active'.

I went with option 2 in this patch, so deployment will look successful and 'juju status' will show the following:

'status-get'
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
  neutron-gateway:
    charm: local:trusty/quantum-gateway-64
    exposed: false
    service-status:
      current: blocked
      message: Charm has reached end-of-life. Please use neutron-gateway charm.
      since: 16 Jul 2015 18:11:14Z
    relations:
      amqp:
      - rabbitmq-server
      cluster:
      - neutron-gateway
      neutron-plugin-api:
      - neutron-api
      quantum-network-service:
      - nova-cloud-controller
      shared-db:
      - mysql
    units:
      neutron-gateway/0:
        workload-status:
          current: blocked
          message: Charm has reached end-of-life. Please use neutron-gateway charm.
          since: 16 Jul 2015 18:11:14Z
        agent-status:
          current: idle
          since: 16 Jul 2015 18:17:59Z
          version: 1.24.2
        agent-state: started
        agent-version: 1.24.2
        machine: "12"
        public-address: 10.5.3.37

Revision history for this message
Corey Bryant (corey.bryant) wrote :

I also stripped out any functionality from the charm.

115. By Corey Bryant

quantum-gateway charm has reached end-of-life

Strip all functionality from charm and issue status message
reporting end-of-life and pointing users to neutron-gateway charm.

Revision history for this message
James Page (james-page) wrote :

Aside from the amendment to the README I think this is OK.

review: Approve
Revision history for this message
Corey Bryant (corey.bryant) wrote :

James, Good point. I responded below. I hit a bug testing the charm upgrade and proposed a fix to charm-helpers.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file '.bzrignore'
--- .bzrignore 2015-04-10 14:22:04 +0000
+++ .bzrignore 2015-07-16 19:59:55 +0000
@@ -1,3 +1,2 @@
1bin1bin
2.coverage
3tags2tags
43
=== removed file '.coveragerc'
--- .coveragerc 2013-07-19 09:46:25 +0000
+++ .coveragerc 1970-01-01 00:00:00 +0000
@@ -1,6 +0,0 @@
1[report]
2# Regexes for lines to exclude from consideration
3exclude_lines =
4 if __name__ == .__main__.:
5include=
6 hooks/quantum_*
70
=== removed file '.project'
--- .project 2012-12-06 10:22:24 +0000
+++ .project 1970-01-01 00:00:00 +0000
@@ -1,17 +0,0 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<projectDescription>
3 <name>quantum-gateway</name>
4 <comment></comment>
5 <projects>
6 </projects>
7 <buildSpec>
8 <buildCommand>
9 <name>org.python.pydev.PyDevBuilder</name>
10 <arguments>
11 </arguments>
12 </buildCommand>
13 </buildSpec>
14 <natures>
15 <nature>org.python.pydev.pythonNature</nature>
16 </natures>
17</projectDescription>
180
=== removed file '.pydevproject'
--- .pydevproject 2013-07-19 09:46:25 +0000
+++ .pydevproject 1970-01-01 00:00:00 +0000
@@ -1,9 +0,0 @@
1<?xml version="1.0" encoding="UTF-8" standalone="no"?>
2<?eclipse-pydev version="1.0"?><pydev_project>
3<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
4<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
5<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
6<path>/quantum-gateway/hooks</path>
7<path>/quantum-gateway/unit_tests</path>
8</pydev_pathproperty>
9</pydev_project>
100
=== modified file 'Makefile'
--- Makefile 2015-04-16 21:32:07 +0000
+++ Makefile 2015-07-16 19:59:55 +0000
@@ -2,13 +2,9 @@
2PYTHON := /usr/bin/env python2PYTHON := /usr/bin/env python
33
4lint:4lint:
5 @flake8 --exclude hooks/charmhelpers actions hooks unit_tests tests5 @flake8 --exclude hooks/charmhelpers hooks
6 @charm proof6 @charm proof
77
8unit_test:
9 @echo Starting unit tests...
10 @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
11
12bin/charm_helpers_sync.py:8bin/charm_helpers_sync.py:
13 @mkdir -p bin9 @mkdir -p bin
14 @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \10 @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
@@ -16,14 +12,6 @@
1612
17sync: bin/charm_helpers_sync.py13sync: bin/charm_helpers_sync.py
18 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml14 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
19 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
20
21test:
22 @echo Starting Amulet tests...
23 # coreycb note: The -v should only be temporary until Amulet sends
24 # raise_status() messages to stderr:
25 # https://bugs.launchpad.net/amulet/+bug/1320357
26 @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700
2715
28publish: lint unit_test16publish: lint unit_test
29 bzr push lp:charms/quantum-gateway17 bzr push lp:charms/quantum-gateway
3018
=== modified file 'README.md'
--- README.md 2015-05-01 10:33:20 +0000
+++ README.md 2015-07-16 19:59:55 +0000
@@ -1,210 +1,4 @@
1Overview1Overview
2--------2--------
33
4Neutron provides flexible software defined networking (SDN) for OpenStack.4This charm has reached end-of-life. Please use the neutron-gateway charm.
5
6This charm is designed to be used in conjunction with the rest of the OpenStack
7related charms in the charm store) to virtualized the network that Nova Compute
8instances plug into.
9
10Its designed as a replacement for nova-network; however it does not yet
11support all of the features as nova-network (such as multihost) so may not
12be suitable for all.
13
14Neutron supports a rich plugin/extension framework for propriety networking
15solutions and supports (in core) Nicira NVP, NEC, Cisco and others...
16
17The Openstack charms currently only support the fully free OpenvSwitch plugin
18and implements the 'Provider Router with Private Networks' use case.
19
20See the upstream [Neutron documentation](http://docs.openstack.org/trunk/openstack-network/admin/content/use_cases_single_router.html)
21for more details.
22
23
24Usage
25-----
26
27In order to use Neutron with Openstack, you will need to deploy the
28nova-compute and nova-cloud-controller charms with the network-manager
29configuration set to 'Neutron':
30
31 nova-cloud-controller:
32 network-manager: Neutron
33
34This decision must be made prior to deploying Openstack with Juju as
35Neutron is deployed baked into these charms from install onwards:
36
37 juju deploy nova-compute
38 juju deploy --config config.yaml nova-cloud-controller
39 juju add-relation nova-compute nova-cloud-controller
40
41The Neutron Gateway can then be added to the deploying:
42
43 juju deploy quantum-gateway
44 juju add-relation quantum-gateway mysql
45 juju add-relation quantum-gateway rabbitmq-server
46 juju add-relation quantum-gateway nova-cloud-controller
47
48The gateway provides two key services; L3 network routing and DHCP services.
49
50These are both required in a fully functional Neutron Openstack deployment.
51
52See upstream [Neutron multi extnet](http://docs.openstack.org/trunk/config-reference/content/adv_cfg_l3_agent_multi_extnet.html)
53
54Configuration Options
55---------------------
56
57External Port Configuration
58===========================
59
60If the port to be used for external traffic is consistent accross all physical
61servers then is can be specified by simply setting ext-port to the nic id:
62
63 quantum-gateway:
64 ext-port: eth2
65
66However, if it varies between hosts then the mac addresses of the external
67nics for each host can be passed as a space seperated list:
68
69 quantum-gateway:
70 ext-port: <MAC ext port host 1> <MAC ext port host 2> <MAC ext port host 3>
71
72
73Multiple Floating Pools
74=======================
75
76If multiple floating pools are needed then an L3 agent (which corresponds to
77a quantum-gateway for the sake of this charm) is needed for each one. Each
78gateway needs to be deployed as a seperate service so that the external
79network id can be set differently for each gateway e.g.
80
81 juju deploy quantum-gateway quantum-gateway-extnet1
82 juju add-relation quantum-gateway-extnet1 mysql
83 juju add-relation quantum-gateway-extnet1 rabbitmq-server
84 juju add-relation quantum-gateway-extnet1 nova-cloud-controller
85 juju deploy quantum-gateway quantum-gateway-extnet2
86 juju add-relation quantum-gateway-extnet2 mysql
87 juju add-relation quantum-gateway-extnet2 rabbitmq-server
88 juju add-relation quantum-gateway-extnet2 nova-cloud-controller
89
90 Create extnet1 and extnet2 via neutron client and take a note of their ids
91
92 juju set quantum-gateway-extnet1 "run-internal-router=leader"
93 juju set quantum-gateway-extnet2 "run-internal-router=none"
94 juju set quantum-gateway-extnet1 "external-network-id=<extnet1 id>"
95 juju set quantum-gateway-extnet2 "external-network-id=<extnet2 id>"
96
97Instance MTU
98============
99
100When using Open vSwitch plugin with GRE tunnels default MTU of 1500 can cause
101packet fragmentation due to GRE overhead. One solution is to increase the MTU on
102physical hosts and network equipment. When this is not possible or practical the
103charm's instance-mtu option can be used to reduce instance MTU via DHCP.
104
105 juju set quantum-gateway instance-mtu=1400
106
107OpenStack upstream documentation recomments a MTU value of 1400:
108[Openstack documentation](http://docs.openstack.org/admin-guide-cloud/content/openvswitch_plugin.html)
109
110Note that this option was added in Havana and will be ignored in older releases.
111
112Deploying from source
113=====================
114
115The minimum openstack-origin-git config required to deploy from source is:
116
117 openstack-origin-git: include-file://neutron-juno.yaml
118
119 neutron-juno.yaml
120 -----------------
121 repositories:
122 - {name: requirements,
123 repository: 'git://github.com/openstack/requirements',
124 branch: stable/juno}
125 - {name: neutron,
126 repository: 'git://github.com/openstack/neutron',
127 branch: stable/juno}
128
129Note that there are only two 'name' values the charm knows about: 'requirements'
130and 'neutron'. These repositories must correspond to these 'name' values.
131Additionally, the requirements repository must be specified first and the
132neutron repository must be specified last. All other repostories are installed
133in the order in which they are specified.
134
135The following is a full list of current tip repos (may not be up-to-date):
136
137 openstack-origin-git: include-file://neutron-master.yaml
138
139 neutron-master.yaml
140 -------------------
141 repositories:
142 - {name: requirements,
143 repository: 'git://github.com/openstack/requirements',
144 branch: master}
145 - {name: oslo-concurrency,
146 repository: 'git://github.com/openstack/oslo.concurrency',
147 branch: master}
148 - {name: oslo-config,
149 repository: 'git://github.com/openstack/oslo.config',
150 branch: master}
151 - {name: oslo-context,
152 repository: 'git://github.com/openstack/oslo.context',
153 branch: master}
154 - {name: oslo-db,
155 repository: 'git://github.com/openstack/oslo.db',
156 branch: master}
157 - {name: oslo-i18n,
158 repository: 'git://github.com/openstack/oslo.i18n',
159 branch: master}
160 - {name: oslo-messaging,
161 repository: 'git://github.com/openstack/oslo.messaging',
162 branch: master}
163 - {name: oslo-middleware,
164 repository': 'git://github.com/openstack/oslo.middleware',
165 branch: master}
166 - {name: oslo-rootwrap',
167 repository: 'git://github.com/openstack/oslo.rootwrap',
168 branch: master}
169 - {name: oslo-serialization,
170 repository: 'git://github.com/openstack/oslo.serialization',
171 branch: master}
172 - {name: oslo-utils,
173 repository: 'git://github.com/openstack/oslo.utils',
174 branch: master}
175 - {name: pbr,
176 repository: 'git://github.com/openstack-dev/pbr',
177 branch: master}
178 - {name: stevedore,
179 repository: 'git://github.com/openstack/stevedore',
180 branch: 'master'}
181 - {name: python-keystoneclient,
182 repository: 'git://github.com/openstack/python-keystoneclient',
183 branch: master}
184 - {name: python-neutronclient,
185 repository: 'git://github.com/openstack/python-neutronclient',
186 branch: master}
187 - {name: python-novaclient,
188 repository': 'git://github.com/openstack/python-novaclient',
189 branch: master}
190 - {name: keystonemiddleware,
191 repository: 'git://github.com/openstack/keystonemiddleware',
192 branch: master}
193 - {name: neutron-fwaas,
194 repository': 'git://github.com/openstack/neutron-fwaas',
195 branch: master}
196 - {name: neutron-lbaas,
197 repository: 'git://github.com/openstack/neutron-lbaas',
198 branch: master}
199 - {name: neutron-vpnaas,
200 repository: 'git://github.com/openstack/neutron-vpnaas',
201 branch: master}
202 - {name: neutron,
203 repository: 'git://github.com/openstack/neutron',
204 branch: master}
205
206TODO
207----
208
209 * Provide more network configuration use cases.
210 * Support VLAN in addition to GRE+OpenFlow for L2 separation.
2115
=== removed directory 'actions'
=== removed file 'actions.yaml'
--- actions.yaml 2015-04-10 14:22:04 +0000
+++ actions.yaml 1970-01-01 00:00:00 +0000
@@ -1,2 +0,0 @@
1git-reinstall:
2 description: Reinstall quantum-gateway from the openstack-origin-git repositories.
30
=== removed symlink 'actions/git-reinstall'
=== target was u'git_reinstall.py'
=== removed file 'actions/git_reinstall.py'
--- actions/git_reinstall.py 2015-04-15 16:46:22 +0000
+++ actions/git_reinstall.py 1970-01-01 00:00:00 +0000
@@ -1,45 +0,0 @@
1#!/usr/bin/python
2import sys
3import traceback
4
5sys.path.append('hooks/')
6
7from charmhelpers.contrib.openstack.utils import (
8 git_install_requested,
9)
10
11from charmhelpers.core.hookenv import (
12 action_set,
13 action_fail,
14 config,
15)
16
17from quantum_utils import (
18 git_install,
19)
20
21from quantum_hooks import (
22 config_changed,
23)
24
25
26def git_reinstall():
27 """Reinstall from source and restart services.
28
29 If the openstack-origin-git config option was used to install openstack
30 from source git repositories, then this action can be used to reinstall
31 from updated git repositories, followed by a restart of services."""
32 if not git_install_requested():
33 action_fail('openstack-origin-git is not configured')
34 return
35
36 try:
37 git_install(config('openstack-origin-git'))
38 config_changed()
39 except:
40 action_set({'traceback': traceback.format_exc()})
41 action_fail('git-reinstall resulted in an unexpected error')
42
43
44if __name__ == '__main__':
45 git_reinstall()
460
=== modified file 'charm-helpers-hooks.yaml'
--- charm-helpers-hooks.yaml 2015-05-02 22:35:06 +0000
+++ charm-helpers-hooks.yaml 2015-07-16 19:59:55 +0000
@@ -2,12 +2,3 @@
2destination: hooks/charmhelpers2destination: hooks/charmhelpers
3include:3include:
4 - core4 - core
5 - fetch
6 - contrib.openstack|inc=*
7 - contrib.hahelpers
8 - contrib.network
9 - contrib.python.packages
10 - contrib.storage.linux
11 - contrib.python
12 - payload.execd
13 - contrib.charmsupport
145
=== removed file 'charm-helpers-tests.yaml'
--- charm-helpers-tests.yaml 2015-05-02 22:35:06 +0000
+++ charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
@@ -1,5 +0,0 @@
1branch: lp:charm-helpers
2destination: tests/charmhelpers
3include:
4 - contrib.amulet
5 - contrib.openstack.amulet
60
=== removed directory 'files'
=== removed file 'files/NeutronAgentMon'
--- files/NeutronAgentMon 2015-01-15 10:00:38 +0000
+++ files/NeutronAgentMon 1970-01-01 00:00:00 +0000
@@ -1,155 +0,0 @@
1#!/bin/sh
2#
3#
4# NeutronAgentMon OCF RA.
5# Starts crm_mon in background which logs cluster status as
6# html to the specified file.
7#
8# Copyright 2014 Canonical Ltd.
9#
10# Authors: Hui Xiang <hui.xiang@canonical.com>
11# Edward Hope-Morley <edward.hope-morley@canonical.com>
12#
13# OCF instance parameters:
14# OCF_RESKEY_file
15
16#######################################################################
17# Initialization:
18: ${OCF_FUNCTIONS=${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs}
19. ${OCF_FUNCTIONS}
20: ${__OCF_ACTION=$1}
21
22#######################################################################
23
24meta_data() {
25 cat <<END
26<?xml version="1.0"?>
27<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
28<resource-agent name="NeutronAgentMon">
29<version>1.0</version>
30
31<longdesc lang="en">
32This is a NeutronAgentMon Resource Agent.
33It monitors the 'neutron-ha-monitor daemon' status.
34</longdesc>
35<shortdesc lang="en">Monitor '/usr/local/bin/neutron-ha-monitor.py' in the background.</shortdesc>
36
37<parameters>
38
39<parameter name="file" unique="0">
40<longdesc lang="en">
41The file we want to run as a daemon.
42</longdesc>
43<shortdesc lang="en">The file we want to run as a daemon.</shortdesc>
44<content type="string" default="/usr/local/bin/neutron-ha-monitor.py" />
45</parameter>
46
47</parameters>
48
49<actions>
50<action name="start" timeout="20" />
51<action name="stop" timeout="20" />
52<action name="monitor" depth="0" timeout="20" interval="60" />
53<action name="meta-data" timeout="5" />
54<action name="validate-all" timeout="30" />
55</actions>
56</resource-agent>
57END
58}
59
60#######################################################################
61
62NeutronAgentMon_usage() {
63 cat <<END
64usage: $0 {start|stop|monitor|validate-all|meta-data}
65
66Expects to have a fully populated OCF RA-compliant environment set.
67END
68}
69
70NeutronAgentMon_exit() {
71 if [ $1 != 0 ]; then
72 exit $OCF_ERR_GENERIC
73 else
74 exit $OCF_SUCCESS
75 fi
76}
77
78NeutronAgentMon_start() {
79 pid=`sudo ps -aux | grep neutron-ha-m\[o\]nitor.py | awk -F' ' '{print $2}'`
80 if [ -z $pid ]; then
81 ocf_log info "[NeutronAgentMon_start] Start Monitor daemon."
82 sudo mkdir -p /var/log/neutron-ha
83 sudo python /usr/local/bin/neutron-ha-monitor.py \
84 --config-file /var/lib/juju-neutron-ha/neutron-ha-monitor.conf \
85 --log-file /var/log/neutron-ha/monitor.log >> /dev/null 2>&1 & echo $!
86 sleep 5
87 else
88 ocf_log warn "[NeutronAgentMon_start] Monitor daemon already running."
89 fi
90 NeutronAgentMon_exit $?
91}
92
93NeutronAgentMon_stop() {
94 pid=`sudo ps -aux | grep neutron-ha-m\[o\]nitor.py | awk -F' ' '{print $2}'`
95 if [ ! -z $pid ]; then
96 sudo kill -s 9 $pid
97 ocf_log info "[NeutronAgentMon_stop] Pid $pid is killed."
98 else
99 ocf_log warn "[NeutronAgentMon_stop] Monitor daemon already stopped."
100 fi
101 NeutronAgentMon_exit 0
102}
103
104NeutronAgentMon_monitor() {
105 pid=`sudo ps -aux | grep neutron-ha-m\[o\]nitor.py | awk -F' ' '{print $2}'`
106 if [ ! -z $pid ]; then
107 ocf_log info "[NeutronAgentMon_monitor] success."
108 exit $OCF_SUCCESS
109 fi
110 exit $OCF_NOT_RUNNING
111}
112
113NeutronAgentMon_validate() {
114# Existence of the user
115 if [ -f $OCF_RESKEY_file ]; then
116 echo "Validate OK"
117 return $OCF_SUCCESS
118 else
119 ocf_log err "The file $OCF_RESKEY_file does not exist!"
120 exit $OCF_ERR_ARGS
121 fi
122}
123
124if [ $# -ne 1 ]; then
125 NeutronAgentMon_usage
126 exit $OCF_ERR_ARGS
127fi
128
129: ${OCF_RESKEY_update:="15000"}
130: ${OCF_RESKEY_pidfile:="/tmp/NeutronAgentMon_${OCF_RESOURCE_INSTANCE}.pid"}
131: ${OCF_RESKEY_htmlfile:="/tmp/NeutronAgentMon_${OCF_RESOURCE_INSTANCE}.html"}
132
133OCF_RESKEY_update=`expr $OCF_RESKEY_update / 1000`
134
135case $__OCF_ACTION in
136meta-data) meta_data
137 exit $OCF_SUCCESS
138 ;;
139start) NeutronAgentMon_start
140 ;;
141stop) NeutronAgentMon_stop
142 ;;
143monitor) NeutronAgentMon_monitor
144 ;;
145validate-all) NeutronAgentMon_validate
146 ;;
147usage|help) NeutronAgentMon_usage
148 exit $OCF_SUCCESS
149 ;;
150*) NeutronAgentMon_usage
151 exit $OCF_ERR_UNIMPLEMENTED
152 ;;
153esac
154
155exit $?
1560
=== removed file 'files/neutron-ha-monitor.conf'
--- files/neutron-ha-monitor.conf 2015-01-15 10:00:38 +0000
+++ files/neutron-ha-monitor.conf 1970-01-01 00:00:00 +0000
@@ -1,4 +0,0 @@
1[DEFAULT]
2verbose=True
3#debug=True
4check_interval=8
50
=== removed file 'files/neutron-ha-monitor.py'
--- files/neutron-ha-monitor.py 2015-01-19 10:43:09 +0000
+++ files/neutron-ha-monitor.py 1970-01-01 00:00:00 +0000
@@ -1,436 +0,0 @@
1# Copyright 2014 Canonical Ltd.
2#
3# Authors: Hui Xiang <hui.xiang@canonical.com>
4# Joshua Zhang <joshua.zhang@canonical.com>
5# Edward Hope-Morley <edward.hope-morley@canonical.com>
6#
7
8"""
9Helpers for monitoring Neutron agents, reschedule failed agents,
10cleaned resources on failed nodes.
11"""
12
13import os
14import re
15import sys
16import signal
17import socket
18import subprocess
19import time
20
21from oslo.config import cfg
22from neutron.agent.linux import ovs_lib
23from neutron.agent.linux import ip_lib
24from neutron.common import exceptions
25from neutron.openstack.common import log as logging
26
27LOG = logging.getLogger(__name__)
28
29
30class Daemon(object):
31 """A generic daemon class.
32
33 Usage: subclass the Daemon class and override the run() method
34 """
35 def __init__(self, stdin='/dev/null', stdout='/dev/null',
36 stderr='/dev/null', procname='python'):
37 self.stdin = stdin
38 self.stdout = stdout
39 self.stderr = stderr
40 self.procname = procname
41
42 def _fork(self):
43 try:
44 pid = os.fork()
45 if pid > 0:
46 sys.exit(0)
47 except OSError:
48 LOG.exception('Fork failed')
49 sys.exit(1)
50
51 def daemonize(self):
52 """Daemonize process by doing Stevens double fork."""
53 # fork first time
54 self._fork()
55
56 # decouple from parent environment
57 os.chdir("/")
58 os.setsid()
59 os.umask(0)
60 # fork second time
61 self._fork()
62
63 # redirect standard file descriptors
64 sys.stdout.flush()
65 sys.stderr.flush()
66 stdin = open(self.stdin, 'r')
67 stdout = open(self.stdout, 'a+')
68 stderr = open(self.stderr, 'a+', 0)
69 os.dup2(stdin.fileno(), sys.stdin.fileno())
70 os.dup2(stdout.fileno(), sys.stdout.fileno())
71 os.dup2(stderr.fileno(), sys.stderr.fileno())
72
73 signal.signal(signal.SIGTERM, self.handle_sigterm)
74
75 def handle_sigterm(self, signum, frame):
76 sys.exit(0)
77
78 def start(self):
79 """Start the daemon."""
80 self.daemonize()
81 self.run()
82
83 def run(self):
84 """Override this method when subclassing Daemon.
85
86 start() will call this method after the process has daemonized.
87 """
88 pass
89
90
91class MonitorNeutronAgentsDaemon(Daemon):
92 def __init__(self):
93 super(MonitorNeutronAgentsDaemon, self).__init__()
94 logging.setup('Neuron-HA-Monitor')
95 LOG.info('Monitor Neutron Agent Loop Init')
96 self.hostname = None
97 self.env = {}
98
99 def get_env(self):
100 envrc_f = '/etc/legacy_ha_envrc'
101 envrc_f_m = False
102 if os.path.isfile(envrc_f):
103 ctime = time.ctime(os.stat(envrc_f).st_ctime)
104 mtime = time.ctime(os.stat(envrc_f).st_mtime)
105 if ctime != mtime:
106 envrc_f_m = True
107
108 if not self.env or envrc_f_m:
109 with open(envrc_f, 'r') as f:
110 for line in f:
111 data = line.strip().split('=')
112 if data and data[0] and data[1]:
113 self.env[data[0]] = data[1]
114 else:
115 raise Exception("OpenStack env data uncomplete.")
116 return self.env
117
118 def get_hostname(self):
119 if not self.hostname:
120 self.hostname = socket.gethostname()
121 return self.hostname
122
123 def get_root_helper(self):
124 return 'sudo'
125
126 def list_monitor_res(self):
127 # List crm resource 'cl_monitor' running node
128 nodes = []
129 cmd = ['crm', 'resource', 'show', 'cl_monitor']
130 output = subprocess.check_output(cmd)
131 pattern = re.compile('resource cl_monitor is running on: (.*) ')
132 nodes = pattern.findall(output)
133 return nodes
134
135 def get_crm_res_lead_node(self):
136 nodes = self.list_monitor_res()
137 if nodes:
138 return nodes[0].strip()
139 else:
140 LOG.error('Failed to get crm resource.')
141 return None
142
143 def unplug_device(self, device):
144 try:
145 device.link.delete()
146 except RuntimeError:
147 root_helper = self.get_root_helper()
148 # Maybe the device is OVS port, so try to delete
149 bridge_name = ovs_lib.get_bridge_for_iface(root_helper,
150 device.name)
151 if bridge_name:
152 bridge = ovs_lib.OVSBridge(bridge_name, root_helper)
153 bridge.delete_port(device.name)
154 else:
155 LOG.debug('Unable to find bridge for device: %s', device.name)
156
157 def get_pattern(self, key, text):
158 if not key or not text:
159 LOG.debug('Invalid key(%s) or text(%s)' % (key, text))
160 return None
161
162 pattern = re.compile('%s' % key)
163 result = pattern.findall(text)
164 return result
165
166 def _cleanup(self, key1, key2):
167 namespaces = []
168 if key1:
169 for k in key1.iterkeys():
170 namespaces.append(key2 + '-' + k)
171 else:
172 try:
173 cmd = ['sudo', 'ip', 'netns']
174 ns = subprocess.check_output(cmd)
175 namespaces = self.get_pattern('(%s.*)' % key2, ns)
176 except RuntimeError as e:
177 LOG.error('Failed to list namespace, (%s)' % e)
178
179 if namespaces:
180 LOG.info('Namespaces: %s is going to be deleted.' % namespaces)
181 self.destroy_namespaces(namespaces)
182
183 def cleanup_dhcp(self, networks):
184 self._cleanup(networks, 'qdhcp')
185
186 def cleanup_router(self, routers):
187 self._cleanup(routers, 'qrouter')
188
189 def destroy_namespaces(self, namespaces):
190 try:
191 root_helper = self.get_root_helper()
192 for namespace in namespaces:
193 ip = ip_lib.IPWrapper(root_helper, namespace)
194 if ip.netns.exists(namespace):
195 for device in ip.get_devices(exclude_loopback=True):
196 self.unplug_device(device)
197
198 ip.garbage_collect_namespace()
199 except Exception:
200 LOG.exception('Error unable to destroy namespace: %s', namespace)
201
202 def is_same_host(self, host):
203 return str(host).strip() == self.get_hostname()
204
205 def validate_reschedule(self):
206 crm_no_1_node = self.get_crm_res_lead_node()
207 if not crm_no_1_node:
208 LOG.error('No crm first node could be found.')
209 return False
210
211 if not self.is_same_host(crm_no_1_node):
212 LOG.warn('Only the first crm node %s could reschedule. '
213 % crm_no_1_node)
214 return False
215 return True
216
217 def l3_agents_reschedule(self, l3_agents, routers, quantum):
218 if not self.validate_reschedule():
219 return
220
221 index = 0
222 for router_id in routers:
223 agent = index % len(l3_agents)
224 LOG.info('Moving router %s from %s to %s' %
225 (router_id, routers[router_id], l3_agents[agent]))
226 try:
227 quantum.remove_router_from_l3_agent(l3_agent=routers[router_id],
228 router_id=router_id)
229 except exceptions.NeutronException as e:
230 LOG.error('Remove router raised exception: %s' % e)
231 try:
232 quantum.add_router_to_l3_agent(l3_agent=l3_agents[agent],
233 body={'router_id': router_id})
234 except exceptions.NeutronException as e:
235 LOG.error('Add router raised exception: %s' % e)
236 index += 1
237
238 def dhcp_agents_reschedule(self, dhcp_agents, networks, quantum):
239 if not self.validate_reschedule():
240 return
241
242 index = 0
243 for network_id in networks:
244 agent = index % len(dhcp_agents)
245 LOG.info('Moving network %s from %s to %s' % (network_id,
246 networks[network_id], dhcp_agents[agent]))
247 try:
248 quantum.remove_network_from_dhcp_agent(
249 dhcp_agent=networks[network_id], network_id=network_id)
250 except exceptions.NeutronException as e:
251 LOG.error('Remove network raised exception: %s' % e)
252 try:
253 quantum.add_network_to_dhcp_agent(
254 dhcp_agent=dhcp_agents[agent],
255 body={'network_id': network_id})
256 except exceptions.NeutronException as e:
257 LOG.error('Add network raised exception: %s' % e)
258 index += 1
259
260 def get_quantum_client(self):
261 env = self.get_env()
262 if not env:
263 LOG.info('Unable to re-assign resources at this time')
264 return None
265
266 try:
267 from quantumclient.v2_0 import client
268 except ImportError:
269 # Try to import neutronclient instead for havana+
270 from neutronclient.v2_0 import client
271
272 auth_url = '%(auth_protocol)s://%(keystone_host)s:%(auth_port)s/v2.0' \
273 % env
274 quantum = client.Client(username=env['service_username'],
275 password=env['service_password'],
276 tenant_name=env['service_tenant'],
277 auth_url=auth_url,
278 region_name=env['region'])
279 return quantum
280
281 def reassign_agent_resources(self, quantum=None):
282 """Use agent scheduler API to detect down agents and re-schedule"""
283 if not quantum:
284 LOG.error('Failed to get quantum client.')
285 return
286
287 try:
288 DHCP_AGENT = "DHCP Agent"
289 L3_AGENT = "L3 Agent"
290 agents = quantum.list_agents(agent_type=DHCP_AGENT)
291 except exceptions.NeutronException as e:
292 LOG.error('Failed to get quantum agents, %s' % e)
293 return
294
295 dhcp_agents = []
296 l3_agents = []
297 networks = {}
298 for agent in agents['agents']:
299 hosted_networks = quantum.list_networks_on_dhcp_agent(
300 agent['id'])['networks']
301 if not agent['alive']:
302 LOG.info('DHCP Agent %s down' % agent['id'])
303 for network in hosted_networks:
304 networks[network['id']] = agent['id']
305 if self.is_same_host(agent['host']):
306 self.cleanup_dhcp(networks)
307 else:
308 dhcp_agents.append(agent['id'])
309 LOG.info('Active dhcp agents: %s' % agent['id'])
310 if not hosted_networks and self.is_same_host(agent['host']):
311 self.cleanup_dhcp(None)
312
313 agents = quantum.list_agents(agent_type=L3_AGENT)
314 routers = {}
315 for agent in agents['agents']:
316 hosted_routers = quantum.list_routers_on_l3_agent(
317 agent['id'])['routers']
318 if not agent['alive']:
319 LOG.info('L3 Agent %s down' % agent['id'])
320 for router in hosted_routers:
321 routers[router['id']] = agent['id']
322 if self.is_same_host(agent['host']):
323 self.cleanup_router(routers)
324 else:
325 l3_agents.append(agent['id'])
326 LOG.info('Active l3 agents: %s' % agent['id'])
327 if not hosted_routers and self.is_same_host(agent['host']):
328 self.cleanup_router(None)
329
330 if not networks and not routers:
331 LOG.info('No networks and routers hosted on failed agents.')
332 return
333
334 if len(dhcp_agents) == 0 and len(l3_agents) == 0:
335 LOG.error('Unable to relocate resources, there are %s dhcp_agents '
336 'and %s l3_agents in this cluster' % (len(dhcp_agents),
337 len(l3_agents)))
338 return
339
340 if len(l3_agents) > 0:
341 self.l3_agents_reschedule(l3_agents, routers, quantum)
342 # new l3 node will not create a tunnel if don't restart ovs process
343
344 if len(dhcp_agents) > 0:
345 self.dhcp_agents_reschedule(dhcp_agents, networks, quantum)
346
347
348 def check_ovs_tunnel(self, quantum=None):
349 '''
350 Work around for Bug #1411163
351 No fdb entries added when failover dhcp and l3 agent together.
352 '''
353 if not quantum:
354 LOG.error('Failed to get quantum client.')
355 return
356
357 try:
358 OVS_AGENT = 'Open vSwitch agent'
359 agents = quantum.list_agents(agent_type=OVS_AGENT)
360 except exceptions.NeutronException as e:
361 LOG.error('No ovs agent found on localhost, error:%s.' % e)
362 return
363
364 for agent in agents['agents']:
365 if self.is_same_host(agent['host']) and agent['alive']:
366 conf = agent['configurations']
367 if 'gre' in conf['tunnel_types'] and conf['l2_population'] \
368 and conf['devices']:
369 LOG.debug('local ovs agent:%s' % agent)
370 ovs_output = subprocess.check_output(['ovs-vsctl',
371 'list-ports', 'br-tun'])
372 ports = ovs_output.strip().split('\n')
373 look_up_gre_port = False
374 for port in ports:
375 if port.startswith('gre-'):
376 look_up_gre_port = True
377 break
378 if not look_up_gre_port:
379 try:
380 LOG.error('Local agent has devices, but no ovs tunnel is created,'
381 'restart ovs agent.')
382 cmd = ['sudo', 'service', 'neutron-plugin-openvswitch-agent',
383 'restart']
384 subprocess.call(cmd)
385 except subprocess.CalledProcessError:
386 LOG.error('Failed to restart neutron-plugin-openvswitch-agent.')
387
388 def check_local_agents(self):
389 services = ['openvswitch-switch', 'neutron-dhcp-agent',
390 'neutron-metadata-agent', 'neutron-vpn-agent']
391 for s in services:
392 status = ['sudo', 'service', s, 'status']
393 restart = ['sudo', 'service', s, 'restart']
394 start = ['sudo', 'service', s, 'start']
395 stop = '%s stop/waiting' % s
396 try:
397 output = subprocess.check_output(status)
398 if output.strip() == stop:
399 subprocess.check_output(start)
400 LOG.error('Restart service: %s' % s)
401 if s == 'neutron-metadata-agent':
402 subprocess.check_output(['sudo', 'service',
403 'neutron-vpn-agent',
404 'restart'])
405 LOG.error('Restart neutron-vpn-agent')
406 except subprocess.CalledProcessError:
407 LOG.error('Restart service: %s' % s)
408 subprocess.check_output(restart)
409 if s == 'neutron-metadata-agent':
410 subprocess.check_output(['sudo', 'service',
411 'neutron-vpn-agent',
412 'restart'])
413
414 def run(self):
415 while True:
416 LOG.info('Monitor Neutron HA Agent Loop Start')
417 quantum = self.get_quantum_client()
418 self.reassign_agent_resources(quantum=quantum)
419 self.check_ovs_tunnel(quantum=quantum)
420 self.check_local_agents()
421 LOG.info('sleep %s' % cfg.CONF.check_interval)
422 time.sleep(float(cfg.CONF.check_interval))
423
424
425if __name__ == '__main__':
426 opts = [
427 cfg.StrOpt('check_interval',
428 default=8,
429 help='Check Neutron Agents interval.'),
430 ]
431
432 cfg.CONF.register_cli_opts(opts)
433 cfg.CONF(project='monitor_neutron_agents', default_config_files=[])
434 logging.setup('Neuron-HA-Monitor')
435 monitor_daemon = MonitorNeutronAgentsDaemon()
436 monitor_daemon.start()
4370
=== removed symlink 'hooks/amqp-nova-relation-changed'
=== target was u'quantum_hooks.py'
=== removed symlink 'hooks/amqp-nova-relation-departed'
=== target was u'quantum_hooks.py'
=== removed symlink 'hooks/amqp-nova-relation-joined'
=== target was u'quantum_hooks.py'
=== removed symlink 'hooks/amqp-relation-changed'
=== target was u'quantum_hooks.py'
=== removed symlink 'hooks/amqp-relation-departed'
=== target was u'quantum_hooks.py'
=== removed symlink 'hooks/amqp-relation-joined'
=== target was u'quantum_hooks.py'
=== removed directory 'hooks/charmhelpers/contrib'
=== removed file 'hooks/charmhelpers/contrib/__init__.py'
--- hooks/charmhelpers/contrib/__init__.py 2015-01-23 11:08:26 +0000
+++ hooks/charmhelpers/contrib/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,15 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
160
=== removed directory 'hooks/charmhelpers/contrib/charmsupport'
=== removed file 'hooks/charmhelpers/contrib/charmsupport/__init__.py'
--- hooks/charmhelpers/contrib/charmsupport/__init__.py 2015-01-23 11:08:26 +0000
+++ hooks/charmhelpers/contrib/charmsupport/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,15 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
160
=== removed file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py'
--- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2015-04-19 09:02:48 +0000
+++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000
@@ -1,360 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17"""Compatibility with the nrpe-external-master charm"""
18# Copyright 2012 Canonical Ltd.
19#
20# Authors:
21# Matthew Wedgwood <matthew.wedgwood@canonical.com>
22
23import subprocess
24import pwd
25import grp
26import os
27import glob
28import shutil
29import re
30import shlex
31import yaml
32
33from charmhelpers.core.hookenv import (
34 config,
35 local_unit,
36 log,
37 relation_ids,
38 relation_set,
39 relations_of_type,
40)
41
42from charmhelpers.core.host import service
43
44# This module adds compatibility with the nrpe-external-master and plain nrpe
45# subordinate charms. To use it in your charm:
46#
47# 1. Update metadata.yaml
48#
49# provides:
50# (...)
51# nrpe-external-master:
52# interface: nrpe-external-master
53# scope: container
54#
55# and/or
56#
57# provides:
58# (...)
59# local-monitors:
60# interface: local-monitors
61# scope: container
62
63#
64# 2. Add the following to config.yaml
65#
66# nagios_context:
67# default: "juju"
68# type: string
69# description: |
70# Used by the nrpe subordinate charms.
71# A string that will be prepended to instance name to set the host name
72# in nagios. So for instance the hostname would be something like:
73# juju-myservice-0
74# If you're running multiple environments with the same services in them
75# this allows you to differentiate between them.
76# nagios_servicegroups:
77# default: ""
78# type: string
79# description: |
80# A comma-separated list of nagios servicegroups.
81# If left empty, the nagios_context will be used as the servicegroup
82#
83# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
84#
85# 4. Update your hooks.py with something like this:
86#
87# from charmsupport.nrpe import NRPE
88# (...)
89# def update_nrpe_config():
90# nrpe_compat = NRPE()
91# nrpe_compat.add_check(
92# shortname = "myservice",
93# description = "Check MyService",
94# check_cmd = "check_http -w 2 -c 10 http://localhost"
95# )
96# nrpe_compat.add_check(
97# "myservice_other",
98# "Check for widget failures",
99# check_cmd = "/srv/myapp/scripts/widget_check"
100# )
101# nrpe_compat.write()
102#
103# def config_changed():
104# (...)
105# update_nrpe_config()
106#
107# def nrpe_external_master_relation_changed():
108# update_nrpe_config()
109#
110# def local_monitors_relation_changed():
111# update_nrpe_config()
112#
113# 5. ln -s hooks.py nrpe-external-master-relation-changed
114# ln -s hooks.py local-monitors-relation-changed
115
116
117class CheckException(Exception):
118 pass
119
120
121class Check(object):
122 shortname_re = '[A-Za-z0-9-_]+$'
123 service_template = ("""
124#---------------------------------------------------
125# This file is Juju managed
126#---------------------------------------------------
127define service {{
128 use active-service
129 host_name {nagios_hostname}
130 service_description {nagios_hostname}[{shortname}] """
131 """{description}
132 check_command check_nrpe!{command}
133 servicegroups {nagios_servicegroup}
134}}
135""")
136
137 def __init__(self, shortname, description, check_cmd):
138 super(Check, self).__init__()
139 # XXX: could be better to calculate this from the service name
140 if not re.match(self.shortname_re, shortname):
141 raise CheckException("shortname must match {}".format(
142 Check.shortname_re))
143 self.shortname = shortname
144 self.command = "check_{}".format(shortname)
145 # Note: a set of invalid characters is defined by the
146 # Nagios server config
147 # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
148 self.description = description
149 self.check_cmd = self._locate_cmd(check_cmd)
150
151 def _locate_cmd(self, check_cmd):
152 search_path = (
153 '/usr/lib/nagios/plugins',
154 '/usr/local/lib/nagios/plugins',
155 )
156 parts = shlex.split(check_cmd)
157 for path in search_path:
158 if os.path.exists(os.path.join(path, parts[0])):
159 command = os.path.join(path, parts[0])
160 if len(parts) > 1:
161 command += " " + " ".join(parts[1:])
162 return command
163 log('Check command not found: {}'.format(parts[0]))
164 return ''
165
166 def write(self, nagios_context, hostname, nagios_servicegroups):
167 nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
168 self.command)
169 with open(nrpe_check_file, 'w') as nrpe_check_config:
170 nrpe_check_config.write("# check {}\n".format(self.shortname))
171 nrpe_check_config.write("command[{}]={}\n".format(
172 self.command, self.check_cmd))
173
174 if not os.path.exists(NRPE.nagios_exportdir):
175 log('Not writing service config as {} is not accessible'.format(
176 NRPE.nagios_exportdir))
177 else:
178 self.write_service_config(nagios_context, hostname,
179 nagios_servicegroups)
180
181 def write_service_config(self, nagios_context, hostname,
182 nagios_servicegroups):
183 for f in os.listdir(NRPE.nagios_exportdir):
184 if re.search('.*{}.cfg'.format(self.command), f):
185 os.remove(os.path.join(NRPE.nagios_exportdir, f))
186
187 templ_vars = {
188 'nagios_hostname': hostname,
189 'nagios_servicegroup': nagios_servicegroups,
190 'description': self.description,
191 'shortname': self.shortname,
192 'command': self.command,
193 }
194 nrpe_service_text = Check.service_template.format(**templ_vars)
195 nrpe_service_file = '{}/service__{}_{}.cfg'.format(
196 NRPE.nagios_exportdir, hostname, self.command)
197 with open(nrpe_service_file, 'w') as nrpe_service_config:
198 nrpe_service_config.write(str(nrpe_service_text))
199
200 def run(self):
201 subprocess.call(self.check_cmd)
202
203
204class NRPE(object):
205 nagios_logdir = '/var/log/nagios'
206 nagios_exportdir = '/var/lib/nagios/export'
207 nrpe_confdir = '/etc/nagios/nrpe.d'
208
209 def __init__(self, hostname=None):
210 super(NRPE, self).__init__()
211 self.config = config()
212 self.nagios_context = self.config['nagios_context']
213 if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
214 self.nagios_servicegroups = self.config['nagios_servicegroups']
215 else:
216 self.nagios_servicegroups = self.nagios_context
217 self.unit_name = local_unit().replace('/', '-')
218 if hostname:
219 self.hostname = hostname
220 else:
221 self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
222 self.checks = []
223
224 def add_check(self, *args, **kwargs):
225 self.checks.append(Check(*args, **kwargs))
226
227 def write(self):
228 try:
229 nagios_uid = pwd.getpwnam('nagios').pw_uid
230 nagios_gid = grp.getgrnam('nagios').gr_gid
231 except:
232 log("Nagios user not set up, nrpe checks not updated")
233 return
234
235 if not os.path.exists(NRPE.nagios_logdir):
236 os.mkdir(NRPE.nagios_logdir)
237 os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
238
239 nrpe_monitors = {}
240 monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
241 for nrpecheck in self.checks:
242 nrpecheck.write(self.nagios_context, self.hostname,
243 self.nagios_servicegroups)
244 nrpe_monitors[nrpecheck.shortname] = {
245 "command": nrpecheck.command,
246 }
247
248 service('restart', 'nagios-nrpe-server')
249
250 monitor_ids = relation_ids("local-monitors") + \
251 relation_ids("nrpe-external-master")
252 for rid in monitor_ids:
253 relation_set(relation_id=rid, monitors=yaml.dump(monitors))
254
255
256def get_nagios_hostcontext(relation_name='nrpe-external-master'):
257 """
258 Query relation with nrpe subordinate, return the nagios_host_context
259
260 :param str relation_name: Name of relation nrpe sub joined to
261 """
262 for rel in relations_of_type(relation_name):
263 if 'nagios_hostname' in rel:
264 return rel['nagios_host_context']
265
266
267def get_nagios_hostname(relation_name='nrpe-external-master'):
268 """
269 Query relation with nrpe subordinate, return the nagios_hostname
270
271 :param str relation_name: Name of relation nrpe sub joined to
272 """
273 for rel in relations_of_type(relation_name):
274 if 'nagios_hostname' in rel:
275 return rel['nagios_hostname']
276
277
278def get_nagios_unit_name(relation_name='nrpe-external-master'):
279 """
280 Return the nagios unit name prepended with host_context if needed
281
282 :param str relation_name: Name of relation nrpe sub joined to
283 """
284 host_context = get_nagios_hostcontext(relation_name)
285 if host_context:
286 unit = "%s:%s" % (host_context, local_unit())
287 else:
288 unit = local_unit()
289 return unit
290
291
292def add_init_service_checks(nrpe, services, unit_name):
293 """
294 Add checks for each service in list
295
296 :param NRPE nrpe: NRPE object to add check to
297 :param list services: List of services to check
298 :param str unit_name: Unit name to use in check description
299 """
300 for svc in services:
301 upstart_init = '/etc/init/%s.conf' % svc
302 sysv_init = '/etc/init.d/%s' % svc
303 if os.path.exists(upstart_init):
304 nrpe.add_check(
305 shortname=svc,
306 description='process check {%s}' % unit_name,
307 check_cmd='check_upstart_job %s' % svc
308 )
309 elif os.path.exists(sysv_init):
310 cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
311 cron_file = ('*/5 * * * * root '
312 '/usr/local/lib/nagios/plugins/check_exit_status.pl '
313 '-s /etc/init.d/%s status > '
314 '/var/lib/nagios/service-check-%s.txt\n' % (svc,
315 svc)
316 )
317 f = open(cronpath, 'w')
318 f.write(cron_file)
319 f.close()
320 nrpe.add_check(
321 shortname=svc,
322 description='process check {%s}' % unit_name,
323 check_cmd='check_status_file.py -f '
324 '/var/lib/nagios/service-check-%s.txt' % svc,
325 )
326
327
328def copy_nrpe_checks():
329 """
330 Copy the nrpe checks into place
331
332 """
333 NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
334 nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
335 'charmhelpers', 'contrib', 'openstack',
336 'files')
337
338 if not os.path.exists(NAGIOS_PLUGINS):
339 os.makedirs(NAGIOS_PLUGINS)
340 for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
341 if os.path.isfile(fname):
342 shutil.copy2(fname,
343 os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
344
345
346def add_haproxy_checks(nrpe, unit_name):
347 """
348 Add checks for each service in list
349
350 :param NRPE nrpe: NRPE object to add check to
351 :param str unit_name: Unit name to use in check description
352 """
353 nrpe.add_check(
354 shortname='haproxy_servers',
355 description='Check HAProxy {%s}' % unit_name,
356 check_cmd='check_haproxy.sh')
357 nrpe.add_check(
358 shortname='haproxy_queue',
359 description='Check HAProxy queue depth {%s}' % unit_name,
360 check_cmd='check_haproxy_queue_depth.sh')
3610
=== removed file 'hooks/charmhelpers/contrib/charmsupport/volumes.py'
--- hooks/charmhelpers/contrib/charmsupport/volumes.py 2015-01-23 11:08:26 +0000
+++ hooks/charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000
@@ -1,175 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17'''
18Functions for managing volumes in juju units. One volume is supported per unit.
19Subordinates may have their own storage, provided it is on its own partition.
20
21Configuration stanzas::
22
23 volume-ephemeral:
24 type: boolean
25 default: true
26 description: >
27 If false, a volume is mounted as sepecified in "volume-map"
28 If true, ephemeral storage will be used, meaning that log data
29 will only exist as long as the machine. YOU HAVE BEEN WARNED.
30 volume-map:
31 type: string
32 default: {}
33 description: >
34 YAML map of units to device names, e.g:
35 "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
36 Service units will raise a configure-error if volume-ephemeral
37 is 'true' and no volume-map value is set. Use 'juju set' to set a
38 value and 'juju resolved' to complete configuration.
39
40Usage::
41
42 from charmsupport.volumes import configure_volume, VolumeConfigurationError
43 from charmsupport.hookenv import log, ERROR
44 def post_mount_hook():
45 stop_service('myservice')
46 def post_mount_hook():
47 start_service('myservice')
48
49 if __name__ == '__main__':
50 try:
51 configure_volume(before_change=pre_mount_hook,
52 after_change=post_mount_hook)
53 except VolumeConfigurationError:
54 log('Storage could not be configured', ERROR)
55
56'''
57
58# XXX: Known limitations
59# - fstab is neither consulted nor updated
60
61import os
62from charmhelpers.core import hookenv
63from charmhelpers.core import host
64import yaml
65
66
67MOUNT_BASE = '/srv/juju/volumes'
68
69
70class VolumeConfigurationError(Exception):
71 '''Volume configuration data is missing or invalid'''
72 pass
73
74
75def get_config():
76 '''Gather and sanity-check volume configuration data'''
77 volume_config = {}
78 config = hookenv.config()
79
80 errors = False
81
82 if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
83 volume_config['ephemeral'] = True
84 else:
85 volume_config['ephemeral'] = False
86
87 try:
88 volume_map = yaml.safe_load(config.get('volume-map', '{}'))
89 except yaml.YAMLError as e:
90 hookenv.log("Error parsing YAML volume-map: {}".format(e),
91 hookenv.ERROR)
92 errors = True
93 if volume_map is None:
94 # probably an empty string
95 volume_map = {}
96 elif not isinstance(volume_map, dict):
97 hookenv.log("Volume-map should be a dictionary, not {}".format(
98 type(volume_map)))
99 errors = True
100
101 volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
102 if volume_config['device'] and volume_config['ephemeral']:
103 # asked for ephemeral storage but also defined a volume ID
104 hookenv.log('A volume is defined for this unit, but ephemeral '
105 'storage was requested', hookenv.ERROR)
106 errors = True
107 elif not volume_config['device'] and not volume_config['ephemeral']:
108 # asked for permanent storage but did not define volume ID
109 hookenv.log('Ephemeral storage was requested, but there is no volume '
110 'defined for this unit.', hookenv.ERROR)
111 errors = True
112
113 unit_mount_name = hookenv.local_unit().replace('/', '-')
114 volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
115
116 if errors:
117 return None
118 return volume_config
119
120
121def mount_volume(config):
122 if os.path.exists(config['mountpoint']):
123 if not os.path.isdir(config['mountpoint']):
124 hookenv.log('Not a directory: {}'.format(config['mountpoint']))
125 raise VolumeConfigurationError()
126 else:
127 host.mkdir(config['mountpoint'])
128 if os.path.ismount(config['mountpoint']):
129 unmount_volume(config)
130 if not host.mount(config['device'], config['mountpoint'], persist=True):
131 raise VolumeConfigurationError()
132
133
134def unmount_volume(config):
135 if os.path.ismount(config['mountpoint']):
136 if not host.umount(config['mountpoint'], persist=True):
137 raise VolumeConfigurationError()
138
139
140def managed_mounts():
141 '''List of all mounted managed volumes'''
142 return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
143
144
145def configure_volume(before_change=lambda: None, after_change=lambda: None):
146 '''Set up storage (or don't) according to the charm's volume configuration.
147 Returns the mount point or "ephemeral". before_change and after_change
148 are optional functions to be called if the volume configuration changes.
149 '''
150
151 config = get_config()
152 if not config:
153 hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
154 raise VolumeConfigurationError()
155
156 if config['ephemeral']:
157 if os.path.ismount(config['mountpoint']):
158 before_change()
159 unmount_volume(config)
160 after_change()
161 return 'ephemeral'
162 else:
163 # persistent storage
164 if os.path.ismount(config['mountpoint']):
165 mounts = dict(managed_mounts())
166 if mounts.get(config['mountpoint']) != config['device']:
167 before_change()
168 unmount_volume(config)
169 mount_volume(config)
170 after_change()
171 else:
172 before_change()
173 mount_volume(config)
174 after_change()
175 return config['mountpoint']
1760
=== removed directory 'hooks/charmhelpers/contrib/hahelpers'
=== removed file 'hooks/charmhelpers/contrib/hahelpers/__init__.py'
--- hooks/charmhelpers/contrib/hahelpers/__init__.py 2015-01-23 11:08:26 +0000
+++ hooks/charmhelpers/contrib/hahelpers/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,15 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
160
=== removed file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
--- hooks/charmhelpers/contrib/hahelpers/apache.py 2015-02-24 12:07:07 +0000
+++ hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000
@@ -1,82 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17#
18# Copyright 2012 Canonical Ltd.
19#
20# This file is sourced from lp:openstack-charm-helpers
21#
22# Authors:
23# James Page <james.page@ubuntu.com>
24# Adam Gandelman <adamg@ubuntu.com>
25#
26
27import subprocess
28
29from charmhelpers.core.hookenv import (
30 config as config_get,
31 relation_get,
32 relation_ids,
33 related_units as relation_list,
34 log,
35 INFO,
36)
37
38
39def get_cert(cn=None):
40 # TODO: deal with multiple https endpoints via charm config
41 cert = config_get('ssl_cert')
42 key = config_get('ssl_key')
43 if not (cert and key):
44 log("Inspecting identity-service relations for SSL certificate.",
45 level=INFO)
46 cert = key = None
47 if cn:
48 ssl_cert_attr = 'ssl_cert_{}'.format(cn)
49 ssl_key_attr = 'ssl_key_{}'.format(cn)
50 else:
51 ssl_cert_attr = 'ssl_cert'
52 ssl_key_attr = 'ssl_key'
53 for r_id in relation_ids('identity-service'):
54 for unit in relation_list(r_id):
55 if not cert:
56 cert = relation_get(ssl_cert_attr,
57 rid=r_id, unit=unit)
58 if not key:
59 key = relation_get(ssl_key_attr,
60 rid=r_id, unit=unit)
61 return (cert, key)
62
63
64def get_ca_cert():
65 ca_cert = config_get('ssl_ca')
66 if ca_cert is None:
67 log("Inspecting identity-service relations for CA SSL certificate.",
68 level=INFO)
69 for r_id in relation_ids('identity-service'):
70 for unit in relation_list(r_id):
71 if ca_cert is None:
72 ca_cert = relation_get('ca_cert',
73 rid=r_id, unit=unit)
74 return ca_cert
75
76
77def install_ca_cert(ca_cert):
78 if ca_cert:
79 with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
80 'w') as crt:
81 crt.write(ca_cert)
82 subprocess.check_call(['update-ca-certificates', '--fresh'])
830
=== removed file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-03-31 15:13:53 +0000
+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
@@ -1,272 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17#
18# Copyright 2012 Canonical Ltd.
19#
20# Authors:
21# James Page <james.page@ubuntu.com>
22# Adam Gandelman <adamg@ubuntu.com>
23#
24
25"""
26Helpers for clustering and determining "cluster leadership" and other
27clustering-related helpers.
28"""
29
30import subprocess
31import os
32
33from socket import gethostname as get_unit_hostname
34
35import six
36
37from charmhelpers.core.hookenv import (
38 log,
39 relation_ids,
40 related_units as relation_list,
41 relation_get,
42 config as config_get,
43 INFO,
44 ERROR,
45 WARNING,
46 unit_get,
47)
48from charmhelpers.core.decorators import (
49 retry_on_exception,
50)
51from charmhelpers.core.strutils import (
52 bool_from_string,
53)
54
55
56class HAIncompleteConfig(Exception):
57 pass
58
59
60class CRMResourceNotFound(Exception):
61 pass
62
63
64def is_elected_leader(resource):
65 """
66 Returns True if the charm executing this is the elected cluster leader.
67
68 It relies on two mechanisms to determine leadership:
69 1. If the charm is part of a corosync cluster, call corosync to
70 determine leadership.
71 2. If the charm is not part of a corosync cluster, the leader is
72 determined as being "the alive unit with the lowest unit numer". In
73 other words, the oldest surviving unit.
74 """
75 if is_clustered():
76 if not is_crm_leader(resource):
77 log('Deferring action to CRM leader.', level=INFO)
78 return False
79 else:
80 peers = peer_units()
81 if peers and not oldest_peer(peers):
82 log('Deferring action to oldest service unit.', level=INFO)
83 return False
84 return True
85
86
87def is_clustered():
88 for r_id in (relation_ids('ha') or []):
89 for unit in (relation_list(r_id) or []):
90 clustered = relation_get('clustered',
91 rid=r_id,
92 unit=unit)
93 if clustered:
94 return True
95 return False
96
97
98@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
99def is_crm_leader(resource, retry=False):
100 """
101 Returns True if the charm calling this is the elected corosync leader,
102 as returned by calling the external "crm" command.
103
104 We allow this operation to be retried to avoid the possibility of getting a
105 false negative. See LP #1396246 for more info.
106 """
107 cmd = ['crm', 'resource', 'show', resource]
108 try:
109 status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
110 if not isinstance(status, six.text_type):
111 status = six.text_type(status, "utf-8")
112 except subprocess.CalledProcessError:
113 status = None
114
115 if status and get_unit_hostname() in status:
116 return True
117
118 if status and "resource %s is NOT running" % (resource) in status:
119 raise CRMResourceNotFound("CRM resource %s not found" % (resource))
120
121 return False
122
123
124def is_leader(resource):
125 log("is_leader is deprecated. Please consider using is_crm_leader "
126 "instead.", level=WARNING)
127 return is_crm_leader(resource)
128
129
130def peer_units(peer_relation="cluster"):
131 peers = []
132 for r_id in (relation_ids(peer_relation) or []):
133 for unit in (relation_list(r_id) or []):
134 peers.append(unit)
135 return peers
136
137
138def peer_ips(peer_relation='cluster', addr_key='private-address'):
139 '''Return a dict of peers and their private-address'''
140 peers = {}
141 for r_id in relation_ids(peer_relation):
142 for unit in relation_list(r_id):
143 peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
144 return peers
145
146
147def oldest_peer(peers):
148 """Determines who the oldest peer is by comparing unit numbers."""
149 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
150 for peer in peers:
151 remote_unit_no = int(peer.split('/')[1])
152 if remote_unit_no < local_unit_no:
153 return False
154 return True
155
156
157def eligible_leader(resource):
158 log("eligible_leader is deprecated. Please consider using "
159 "is_elected_leader instead.", level=WARNING)
160 return is_elected_leader(resource)
161
162
163def https():
164 '''
165 Determines whether enough data has been provided in configuration
166 or relation data to configure HTTPS
167 .
168 returns: boolean
169 '''
170 use_https = config_get('use-https')
171 if use_https and bool_from_string(use_https):
172 return True
173 if config_get('ssl_cert') and config_get('ssl_key'):
174 return True
175 for r_id in relation_ids('identity-service'):
176 for unit in relation_list(r_id):
177 # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
178 rel_state = [
179 relation_get('https_keystone', rid=r_id, unit=unit),
180 relation_get('ca_cert', rid=r_id, unit=unit),
181 ]
182 # NOTE: works around (LP: #1203241)
183 if (None not in rel_state) and ('' not in rel_state):
184 return True
185 return False
186
187
188def determine_api_port(public_port, singlenode_mode=False):
189 '''
190 Determine correct API server listening port based on
191 existence of HTTPS reverse proxy and/or haproxy.
192
193 public_port: int: standard public port for given service
194
195 singlenode_mode: boolean: Shuffle ports when only a single unit is present
196
197 returns: int: the correct listening port for the API service
198 '''
199 i = 0
200 if singlenode_mode:
201 i += 1
202 elif len(peer_units()) > 0 or is_clustered():
203 i += 1
204 if https():
205 i += 1
206 return public_port - (i * 10)
207
208
209def determine_apache_port(public_port, singlenode_mode=False):
210 '''
211 Description: Determine correct apache listening port based on public IP +
212 state of the cluster.
213
214 public_port: int: standard public port for given service
215
216 singlenode_mode: boolean: Shuffle ports when only a single unit is present
217
218 returns: int: the correct listening port for the HAProxy service
219 '''
220 i = 0
221 if singlenode_mode:
222 i += 1
223 elif len(peer_units()) > 0 or is_clustered():
224 i += 1
225 return public_port - (i * 10)
226
227
228def get_hacluster_config(exclude_keys=None):
229 '''
230 Obtains all relevant configuration from charm configuration required
231 for initiating a relation to hacluster:
232
233 ha-bindiface, ha-mcastport, vip
234
235 param: exclude_keys: list of setting key(s) to be excluded.
236 returns: dict: A dict containing settings keyed by setting name.
237 raises: HAIncompleteConfig if settings are missing.
238 '''
239 settings = ['ha-bindiface', 'ha-mcastport', 'vip']
240 conf = {}
241 for setting in settings:
242 if exclude_keys and setting in exclude_keys:
243 continue
244
245 conf[setting] = config_get(setting)
246 missing = []
247 [missing.append(s) for s, v in six.iteritems(conf) if v is None]
248 if missing:
249 log('Insufficient config data to configure hacluster.', level=ERROR)
250 raise HAIncompleteConfig
251 return conf
252
253
254def canonical_url(configs, vip_setting='vip'):
255 '''
256 Returns the correct HTTP URL to this host given the state of HTTPS
257 configuration and hacluster.
258
259 :configs : OSTemplateRenderer: A config tempating object to inspect for
260 a complete https context.
261
262 :vip_setting: str: Setting in charm config that specifies
263 VIP address.
264 '''
265 scheme = 'http'
266 if 'https' in configs.complete_contexts():
267 scheme = 'https'
268 if is_clustered():
269 addr = config_get(vip_setting)
270 else:
271 addr = unit_get('private-address')
272 return '%s://%s' % (scheme, addr)
2730
=== removed directory 'hooks/charmhelpers/contrib/network'
=== removed file 'hooks/charmhelpers/contrib/network/__init__.py'
--- hooks/charmhelpers/contrib/network/__init__.py 2015-01-23 11:08:26 +0000
+++ hooks/charmhelpers/contrib/network/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,15 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
160
=== removed file 'hooks/charmhelpers/contrib/network/ip.py'
--- hooks/charmhelpers/contrib/network/ip.py 2015-03-31 15:13:53 +0000
+++ hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
@@ -1,450 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import glob
18import re
19import subprocess
20import six
21import socket
22
23from functools import partial
24
25from charmhelpers.core.hookenv import unit_get
26from charmhelpers.fetch import apt_install
27from charmhelpers.core.hookenv import (
28 log,
29 WARNING,
30)
31
32try:
33 import netifaces
34except ImportError:
35 apt_install('python-netifaces')
36 import netifaces
37
38try:
39 import netaddr
40except ImportError:
41 apt_install('python-netaddr')
42 import netaddr
43
44
45def _validate_cidr(network):
46 try:
47 netaddr.IPNetwork(network)
48 except (netaddr.core.AddrFormatError, ValueError):
49 raise ValueError("Network (%s) is not in CIDR presentation format" %
50 network)
51
52
53def no_ip_found_error_out(network):
54 errmsg = ("No IP address found in network: %s" % network)
55 raise ValueError(errmsg)
56
57
58def get_address_in_network(network, fallback=None, fatal=False):
59 """Get an IPv4 or IPv6 address within the network from the host.
60
61 :param network (str): CIDR presentation format. For example,
62 '192.168.1.0/24'.
63 :param fallback (str): If no address is found, return fallback.
64 :param fatal (boolean): If no address is found, fallback is not
65 set and fatal is True then exit(1).
66 """
67 if network is None:
68 if fallback is not None:
69 return fallback
70
71 if fatal:
72 no_ip_found_error_out(network)
73 else:
74 return None
75
76 _validate_cidr(network)
77 network = netaddr.IPNetwork(network)
78 for iface in netifaces.interfaces():
79 addresses = netifaces.ifaddresses(iface)
80 if network.version == 4 and netifaces.AF_INET in addresses:
81 addr = addresses[netifaces.AF_INET][0]['addr']
82 netmask = addresses[netifaces.AF_INET][0]['netmask']
83 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
84 if cidr in network:
85 return str(cidr.ip)
86
87 if network.version == 6 and netifaces.AF_INET6 in addresses:
88 for addr in addresses[netifaces.AF_INET6]:
89 if not addr['addr'].startswith('fe80'):
90 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
91 addr['netmask']))
92 if cidr in network:
93 return str(cidr.ip)
94
95 if fallback is not None:
96 return fallback
97
98 if fatal:
99 no_ip_found_error_out(network)
100
101 return None
102
103
104def is_ipv6(address):
105 """Determine whether provided address is IPv6 or not."""
106 try:
107 address = netaddr.IPAddress(address)
108 except netaddr.AddrFormatError:
109 # probably a hostname - so not an address at all!
110 return False
111
112 return address.version == 6
113
114
115def is_address_in_network(network, address):
116 """
117 Determine whether the provided address is within a network range.
118
119 :param network (str): CIDR presentation format. For example,
120 '192.168.1.0/24'.
121 :param address: An individual IPv4 or IPv6 address without a net
122 mask or subnet prefix. For example, '192.168.1.1'.
123 :returns boolean: Flag indicating whether address is in network.
124 """
125 try:
126 network = netaddr.IPNetwork(network)
127 except (netaddr.core.AddrFormatError, ValueError):
128 raise ValueError("Network (%s) is not in CIDR presentation format" %
129 network)
130
131 try:
132 address = netaddr.IPAddress(address)
133 except (netaddr.core.AddrFormatError, ValueError):
134 raise ValueError("Address (%s) is not in correct presentation format" %
135 address)
136
137 if address in network:
138 return True
139 else:
140 return False
141
142
143def _get_for_address(address, key):
144 """Retrieve an attribute of or the physical interface that
145 the IP address provided could be bound to.
146
147 :param address (str): An individual IPv4 or IPv6 address without a net
148 mask or subnet prefix. For example, '192.168.1.1'.
149 :param key: 'iface' for the physical interface name or an attribute
150 of the configured interface, for example 'netmask'.
151 :returns str: Requested attribute or None if address is not bindable.
152 """
153 address = netaddr.IPAddress(address)
154 for iface in netifaces.interfaces():
155 addresses = netifaces.ifaddresses(iface)
156 if address.version == 4 and netifaces.AF_INET in addresses:
157 addr = addresses[netifaces.AF_INET][0]['addr']
158 netmask = addresses[netifaces.AF_INET][0]['netmask']
159 network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
160 cidr = network.cidr
161 if address in cidr:
162 if key == 'iface':
163 return iface
164 else:
165 return addresses[netifaces.AF_INET][0][key]
166
167 if address.version == 6 and netifaces.AF_INET6 in addresses:
168 for addr in addresses[netifaces.AF_INET6]:
169 if not addr['addr'].startswith('fe80'):
170 network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
171 addr['netmask']))
172 cidr = network.cidr
173 if address in cidr:
174 if key == 'iface':
175 return iface
176 elif key == 'netmask' and cidr:
177 return str(cidr).split('/')[1]
178 else:
179 return addr[key]
180
181 return None
182
183
184get_iface_for_address = partial(_get_for_address, key='iface')
185
186
187get_netmask_for_address = partial(_get_for_address, key='netmask')
188
189
190def format_ipv6_addr(address):
191 """If address is IPv6, wrap it in '[]' otherwise return None.
192
193 This is required by most configuration files when specifying IPv6
194 addresses.
195 """
196 if is_ipv6(address):
197 return "[%s]" % address
198
199 return None
200
201
202def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
203 fatal=True, exc_list=None):
204 """Return the assigned IP address for a given interface, if any."""
205 # Extract nic if passed /dev/ethX
206 if '/' in iface:
207 iface = iface.split('/')[-1]
208
209 if not exc_list:
210 exc_list = []
211
212 try:
213 inet_num = getattr(netifaces, inet_type)
214 except AttributeError:
215 raise Exception("Unknown inet type '%s'" % str(inet_type))
216
217 interfaces = netifaces.interfaces()
218 if inc_aliases:
219 ifaces = []
220 for _iface in interfaces:
221 if iface == _iface or _iface.split(':')[0] == iface:
222 ifaces.append(_iface)
223
224 if fatal and not ifaces:
225 raise Exception("Invalid interface '%s'" % iface)
226
227 ifaces.sort()
228 else:
229 if iface not in interfaces:
230 if fatal:
231 raise Exception("Interface '%s' not found " % (iface))
232 else:
233 return []
234
235 else:
236 ifaces = [iface]
237
238 addresses = []
239 for netiface in ifaces:
240 net_info = netifaces.ifaddresses(netiface)
241 if inet_num in net_info:
242 for entry in net_info[inet_num]:
243 if 'addr' in entry and entry['addr'] not in exc_list:
244 addresses.append(entry['addr'])
245
246 if fatal and not addresses:
247 raise Exception("Interface '%s' doesn't have any %s addresses." %
248 (iface, inet_type))
249
250 return sorted(addresses)
251
252
253get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
254
255
256def get_iface_from_addr(addr):
257 """Work out on which interface the provided address is configured."""
258 for iface in netifaces.interfaces():
259 addresses = netifaces.ifaddresses(iface)
260 for inet_type in addresses:
261 for _addr in addresses[inet_type]:
262 _addr = _addr['addr']
263 # link local
264 ll_key = re.compile("(.+)%.*")
265 raw = re.match(ll_key, _addr)
266 if raw:
267 _addr = raw.group(1)
268
269 if _addr == addr:
270 log("Address '%s' is configured on iface '%s'" %
271 (addr, iface))
272 return iface
273
274 msg = "Unable to infer net iface on which '%s' is configured" % (addr)
275 raise Exception(msg)
276
277
278def sniff_iface(f):
279 """Ensure decorated function is called with a value for iface.
280
281 If no iface provided, inject net iface inferred from unit private address.
282 """
283 def iface_sniffer(*args, **kwargs):
284 if not kwargs.get('iface', None):
285 kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
286
287 return f(*args, **kwargs)
288
289 return iface_sniffer
290
291
292@sniff_iface
293def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
294 dynamic_only=True):
295 """Get assigned IPv6 address for a given interface.
296
297 Returns list of addresses found. If no address found, returns empty list.
298
299 If iface is None, we infer the current primary interface by doing a reverse
300 lookup on the unit private-address.
301
302 We currently only support scope global IPv6 addresses i.e. non-temporary
303 addresses. If no global IPv6 address is found, return the first one found
304 in the ipv6 address list.
305 """
306 addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
307 inc_aliases=inc_aliases, fatal=fatal,
308 exc_list=exc_list)
309
310 if addresses:
311 global_addrs = []
312 for addr in addresses:
313 key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
314 m = re.match(key_scope_link_local, addr)
315 if m:
316 eui_64_mac = m.group(1)
317 iface = m.group(2)
318 else:
319 global_addrs.append(addr)
320
321 if global_addrs:
322 # Make sure any found global addresses are not temporary
323 cmd = ['ip', 'addr', 'show', iface]
324 out = subprocess.check_output(cmd).decode('UTF-8')
325 if dynamic_only:
326 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
327 else:
328 key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
329
330 addrs = []
331 for line in out.split('\n'):
332 line = line.strip()
333 m = re.match(key, line)
334 if m and 'temporary' not in line:
335 # Return the first valid address we find
336 for addr in global_addrs:
337 if m.group(1) == addr:
338 if not dynamic_only or \
339 m.group(1).endswith(eui_64_mac):
340 addrs.append(addr)
341
342 if addrs:
343 return addrs
344
345 if fatal:
346 raise Exception("Interface '%s' does not have a scope global "
347 "non-temporary ipv6 address." % iface)
348
349 return []
350
351
352def get_bridges(vnic_dir='/sys/devices/virtual/net'):
353 """Return a list of bridges on the system."""
354 b_regex = "%s/*/bridge" % vnic_dir
355 return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
356
357
358def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
359 """Return a list of nics comprising a given bridge on the system."""
360 brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
361 return [x.split('/')[-1] for x in glob.glob(brif_regex)]
362
363
364def is_bridge_member(nic):
365 """Check if a given nic is a member of a bridge."""
366 for bridge in get_bridges():
367 if nic in get_bridge_nics(bridge):
368 return True
369
370 return False
371
372
373def is_ip(address):
374 """
375 Returns True if address is a valid IP address.
376 """
377 try:
378 # Test to see if already an IPv4 address
379 socket.inet_aton(address)
380 return True
381 except socket.error:
382 return False
383
384
385def ns_query(address):
386 try:
387 import dns.resolver
388 except ImportError:
389 apt_install('python-dnspython')
390 import dns.resolver
391
392 if isinstance(address, dns.name.Name):
393 rtype = 'PTR'
394 elif isinstance(address, six.string_types):
395 rtype = 'A'
396 else:
397 return None
398
399 answers = dns.resolver.query(address, rtype)
400 if answers:
401 return str(answers[0])
402 return None
403
404
405def get_host_ip(hostname, fallback=None):
406 """
407 Resolves the IP for a given hostname, or returns
408 the input if it is already an IP.
409 """
410 if is_ip(hostname):
411 return hostname
412
413 ip_addr = ns_query(hostname)
414 if not ip_addr:
415 try:
416 ip_addr = socket.gethostbyname(hostname)
417 except:
418 log("Failed to resolve hostname '%s'" % (hostname),
419 level=WARNING)
420 return fallback
421 return ip_addr
422
423
424def get_hostname(address, fqdn=True):
425 """
426 Resolves hostname for given IP, or returns the input
427 if it is already a hostname.
428 """
429 if is_ip(address):
430 try:
431 import dns.reversename
432 except ImportError:
433 apt_install("python-dnspython")
434 import dns.reversename
435
436 rev = dns.reversename.from_address(address)
437 result = ns_query(rev)
438 if not result:
439 return None
440 else:
441 result = address
442
443 if fqdn:
444 # strip trailing .
445 if result.endswith('.'):
446 return result[:-1]
447 else:
448 return result
449 else:
450 return result.split('.')[0]
4510
=== removed directory 'hooks/charmhelpers/contrib/network/ovs'
=== removed file 'hooks/charmhelpers/contrib/network/ovs/__init__.py'
--- hooks/charmhelpers/contrib/network/ovs/__init__.py 2015-01-23 11:08:26 +0000
+++ hooks/charmhelpers/contrib/network/ovs/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,96 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17''' Helpers for interacting with OpenvSwitch '''
18import subprocess
19import os
20from charmhelpers.core.hookenv import (
21 log, WARNING
22)
23from charmhelpers.core.host import (
24 service
25)
26
27
28def add_bridge(name):
29 ''' Add the named bridge to openvswitch '''
30 log('Creating bridge {}'.format(name))
31 subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
32
33
34def del_bridge(name):
35 ''' Delete the named bridge from openvswitch '''
36 log('Deleting bridge {}'.format(name))
37 subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
38
39
40def add_bridge_port(name, port, promisc=False):
41 ''' Add a port to the named openvswitch bridge '''
42 log('Adding port {} to bridge {}'.format(port, name))
43 subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
44 name, port])
45 subprocess.check_call(["ip", "link", "set", port, "up"])
46 if promisc:
47 subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
48 else:
49 subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
50
51
52def del_bridge_port(name, port):
53 ''' Delete a port from the named openvswitch bridge '''
54 log('Deleting port {} from bridge {}'.format(port, name))
55 subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
56 name, port])
57 subprocess.check_call(["ip", "link", "set", port, "down"])
58 subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
59
60
61def set_manager(manager):
62 ''' Set the controller for the local openvswitch '''
63 log('Setting manager for local ovs to {}'.format(manager))
64 subprocess.check_call(['ovs-vsctl', 'set-manager',
65 'ssl:{}'.format(manager)])
66
67
68CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
69
70
71def get_certificate():
72 ''' Read openvswitch certificate from disk '''
73 if os.path.exists(CERT_PATH):
74 log('Reading ovs certificate from {}'.format(CERT_PATH))
75 with open(CERT_PATH, 'r') as cert:
76 full_cert = cert.read()
77 begin_marker = "-----BEGIN CERTIFICATE-----"
78 end_marker = "-----END CERTIFICATE-----"
79 begin_index = full_cert.find(begin_marker)
80 end_index = full_cert.rfind(end_marker)
81 if end_index == -1 or begin_index == -1:
82 raise RuntimeError("Certificate does not contain valid begin"
83 " and end markers.")
84 full_cert = full_cert[begin_index:(end_index + len(end_marker))]
85 return full_cert
86 else:
87 log('Certificate not found', level=WARNING)
88 return None
89
90
91def full_restart():
92 ''' Full restart and reload of openvswitch '''
93 if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
94 service('start', 'openvswitch-force-reload-kmod')
95 else:
96 service('force-reload-kmod', 'openvswitch-switch')
970
=== removed file 'hooks/charmhelpers/contrib/network/ufw.py'
--- hooks/charmhelpers/contrib/network/ufw.py 2015-03-23 18:25:01 +0000
+++ hooks/charmhelpers/contrib/network/ufw.py 1970-01-01 00:00:00 +0000
@@ -1,276 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17"""
18This module contains helpers to add and remove ufw rules.
19
20Examples:
21
22- open SSH port for subnet 10.0.3.0/24:
23
24 >>> from charmhelpers.contrib.network import ufw
25 >>> ufw.enable()
26 >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
27
28- open service by name as defined in /etc/services:
29
30 >>> from charmhelpers.contrib.network import ufw
31 >>> ufw.enable()
32 >>> ufw.service('ssh', 'open')
33
34- close service by port number:
35
36 >>> from charmhelpers.contrib.network import ufw
37 >>> ufw.enable()
38 >>> ufw.service('4949', 'close') # munin
39"""
40import re
41import os
42import subprocess
43from charmhelpers.core import hookenv
44
45__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
46
47
48class UFWError(Exception):
49 pass
50
51
52class UFWIPv6Error(UFWError):
53 pass
54
55
56def is_enabled():
57 """
58 Check if `ufw` is enabled
59
60 :returns: True if ufw is enabled
61 """
62 output = subprocess.check_output(['ufw', 'status'],
63 universal_newlines=True,
64 env={'LANG': 'en_US',
65 'PATH': os.environ['PATH']})
66
67 m = re.findall(r'^Status: active\n', output, re.M)
68
69 return len(m) >= 1
70
71
72def is_ipv6_ok(soft_fail=False):
73 """
74 Check if IPv6 support is present and ip6tables functional
75
76 :param soft_fail: If set to True and IPv6 support is broken, then reports
77 that the host doesn't have IPv6 support, otherwise a
78 UFWIPv6Error exception is raised.
79 :returns: True if IPv6 is working, False otherwise
80 """
81
82 # do we have IPv6 in the machine?
83 if os.path.isdir('/proc/sys/net/ipv6'):
84 # is ip6tables kernel module loaded?
85 lsmod = subprocess.check_output(['lsmod'], universal_newlines=True)
86 matches = re.findall('^ip6_tables[ ]+', lsmod, re.M)
87 if len(matches) == 0:
88 # ip6tables support isn't complete, let's try to load it
89 try:
90 subprocess.check_output(['modprobe', 'ip6_tables'],
91 universal_newlines=True)
92 # great, we could load the module
93 return True
94 except subprocess.CalledProcessError as ex:
95 hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
96 level="WARN")
97 # we are in a world where ip6tables isn't working
98 if soft_fail:
99 # so we inform that the machine doesn't have IPv6
100 return False
101 else:
102 raise UFWIPv6Error("IPv6 firewall support broken")
103 else:
104 # the module is present :)
105 return True
106
107 else:
108 # the system doesn't have IPv6
109 return False
110
111
112def disable_ipv6():
113 """
114 Disable ufw IPv6 support in /etc/default/ufw
115 """
116 exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
117 '/etc/default/ufw'])
118 if exit_code == 0:
119 hookenv.log('IPv6 support in ufw disabled', level='INFO')
120 else:
121 hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
122 raise UFWError("Couldn't disable IPv6 support in ufw")
123
124
125def enable(soft_fail=False):
126 """
127 Enable ufw
128
129 :param soft_fail: If set to True silently disables IPv6 support in ufw,
130 otherwise a UFWIPv6Error exception is raised when IP6
131 support is broken.
132 :returns: True if ufw is successfully enabled
133 """
134 if is_enabled():
135 return True
136
137 if not is_ipv6_ok(soft_fail):
138 disable_ipv6()
139
140 output = subprocess.check_output(['ufw', 'enable'],
141 universal_newlines=True,
142 env={'LANG': 'en_US',
143 'PATH': os.environ['PATH']})
144
145 m = re.findall('^Firewall is active and enabled on system startup\n',
146 output, re.M)
147 hookenv.log(output, level='DEBUG')
148
149 if len(m) == 0:
150 hookenv.log("ufw couldn't be enabled", level='WARN')
151 return False
152 else:
153 hookenv.log("ufw enabled", level='INFO')
154 return True
155
156
157def disable():
158 """
159 Disable ufw
160
161 :returns: True if ufw is successfully disabled
162 """
163 if not is_enabled():
164 return True
165
166 output = subprocess.check_output(['ufw', 'disable'],
167 universal_newlines=True,
168 env={'LANG': 'en_US',
169 'PATH': os.environ['PATH']})
170
171 m = re.findall(r'^Firewall stopped and disabled on system startup\n',
172 output, re.M)
173 hookenv.log(output, level='DEBUG')
174
175 if len(m) == 0:
176 hookenv.log("ufw couldn't be disabled", level='WARN')
177 return False
178 else:
179 hookenv.log("ufw disabled", level='INFO')
180 return True
181
182
183def modify_access(src, dst='any', port=None, proto=None, action='allow'):
184 """
185 Grant access to an address or subnet
186
187 :param src: address (e.g. 192.168.1.234) or subnet
188 (e.g. 192.168.1.0/24).
189 :param dst: destiny of the connection, if the machine has multiple IPs and
190 connections to only one of those have to accepted this is the
191 field has to be set.
192 :param port: destiny port
193 :param proto: protocol (tcp or udp)
194 :param action: `allow` or `delete`
195 """
196 if not is_enabled():
197 hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
198 return
199
200 if action == 'delete':
201 cmd = ['ufw', 'delete', 'allow']
202 else:
203 cmd = ['ufw', action]
204
205 if src is not None:
206 cmd += ['from', src]
207
208 if dst is not None:
209 cmd += ['to', dst]
210
211 if port is not None:
212 cmd += ['port', str(port)]
213
214 if proto is not None:
215 cmd += ['proto', proto]
216
217 hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
218 p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
219 (stdout, stderr) = p.communicate()
220
221 hookenv.log(stdout, level='INFO')
222
223 if p.returncode != 0:
224 hookenv.log(stderr, level='ERROR')
225 hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
226 p.returncode),
227 level='ERROR')
228
229
230def grant_access(src, dst='any', port=None, proto=None):
231 """
232 Grant access to an address or subnet
233
234 :param src: address (e.g. 192.168.1.234) or subnet
235 (e.g. 192.168.1.0/24).
236 :param dst: destiny of the connection, if the machine has multiple IPs and
237 connections to only one of those have to accepted this is the
238 field has to be set.
239 :param port: destiny port
240 :param proto: protocol (tcp or udp)
241 """
242 return modify_access(src, dst=dst, port=port, proto=proto, action='allow')
243
244
245def revoke_access(src, dst='any', port=None, proto=None):
246 """
247 Revoke access to an address or subnet
248
249 :param src: address (e.g. 192.168.1.234) or subnet
250 (e.g. 192.168.1.0/24).
251 :param dst: destiny of the connection, if the machine has multiple IPs and
252 connections to only one of those have to accepted this is the
253 field has to be set.
254 :param port: destiny port
255 :param proto: protocol (tcp or udp)
256 """
257 return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
258
259
260def service(name, action):
261 """
262 Open/close access to a service
263
264 :param name: could be a service name defined in `/etc/services` or a port
265 number.
266 :param action: `open` or `close`
267 """
268 if action == 'open':
269 subprocess.check_output(['ufw', 'allow', str(name)],
270 universal_newlines=True)
271 elif action == 'close':
272 subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
273 universal_newlines=True)
274 else:
275 raise UFWError(("'{}' not supported, use 'allow' "
276 "or 'delete'").format(action))
2770
=== removed directory 'hooks/charmhelpers/contrib/openstack'
=== removed file 'hooks/charmhelpers/contrib/openstack/__init__.py'
--- hooks/charmhelpers/contrib/openstack/__init__.py 2015-01-23 11:08:26 +0000
+++ hooks/charmhelpers/contrib/openstack/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,15 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
160
=== removed file 'hooks/charmhelpers/contrib/openstack/alternatives.py'
--- hooks/charmhelpers/contrib/openstack/alternatives.py 2015-01-23 11:08:26 +0000
+++ hooks/charmhelpers/contrib/openstack/alternatives.py 1970-01-01 00:00:00 +0000
@@ -1,33 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17''' Helper for managing alternatives for file conflict resolution '''
18
19import subprocess
20import shutil
21import os
22
23
24def install_alternative(name, target, source, priority=50):
25 ''' Install alternative configuration '''
26 if (os.path.exists(target) and not os.path.islink(target)):
27 # Move existing file/directory away before installing
28 shutil.move(target, '{}.bak'.format(target))
29 cmd = [
30 'update-alternatives', '--force', '--install',
31 target, name, source, str(priority)
32 ]
33 subprocess.check_call(cmd)
340
=== removed directory 'hooks/charmhelpers/contrib/openstack/amulet'
=== removed file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
--- hooks/charmhelpers/contrib/openstack/amulet/__init__.py 2015-01-23 11:08:26 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,15 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
160
=== removed file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-04-23 14:54:24 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
@@ -1,146 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import six
18from collections import OrderedDict
19from charmhelpers.contrib.amulet.deployment import (
20 AmuletDeployment
21)
22
23
24class OpenStackAmuletDeployment(AmuletDeployment):
25 """OpenStack amulet deployment.
26
27 This class inherits from AmuletDeployment and has additional support
28 that is specifically for use by OpenStack charms.
29 """
30
31 def __init__(self, series=None, openstack=None, source=None, stable=True):
32 """Initialize the deployment environment."""
33 super(OpenStackAmuletDeployment, self).__init__(series)
34 self.openstack = openstack
35 self.source = source
36 self.stable = stable
37 # Note(coreycb): this needs to be changed when new next branches come
38 # out.
39 self.current_next = "trusty"
40
41 def _determine_branch_locations(self, other_services):
42 """Determine the branch locations for the other services.
43
44 Determine if the local branch being tested is derived from its
45 stable or next (dev) branch, and based on this, use the corresonding
46 stable or next branches for the other_services."""
47 base_charms = ['mysql', 'mongodb']
48
49 if self.series in ['precise', 'trusty']:
50 base_series = self.series
51 else:
52 base_series = self.current_next
53
54 if self.stable:
55 for svc in other_services:
56 temp = 'lp:charms/{}/{}'
57 svc['location'] = temp.format(base_series,
58 svc['name'])
59 else:
60 for svc in other_services:
61 if svc['name'] in base_charms:
62 temp = 'lp:charms/{}/{}'
63 svc['location'] = temp.format(base_series,
64 svc['name'])
65 else:
66 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
67 svc['location'] = temp.format(self.current_next,
68 svc['name'])
69 return other_services
70
71 def _add_services(self, this_service, other_services):
72 """Add services to the deployment and set openstack-origin/source."""
73 other_services = self._determine_branch_locations(other_services)
74
75 super(OpenStackAmuletDeployment, self)._add_services(this_service,
76 other_services)
77
78 services = other_services
79 services.append(this_service)
80 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
81 'ceph-osd', 'ceph-radosgw']
82 # Openstack subordinate charms do not expose an origin option as that
83 # is controlled by the principle
84 ignore = ['neutron-openvswitch']
85
86 if self.openstack:
87 for svc in services:
88 if svc['name'] not in use_source + ignore:
89 config = {'openstack-origin': self.openstack}
90 self.d.configure(svc['name'], config)
91
92 if self.source:
93 for svc in services:
94 if svc['name'] in use_source and svc['name'] not in ignore:
95 config = {'source': self.source}
96 self.d.configure(svc['name'], config)
97
98 def _configure_services(self, configs):
99 """Configure all of the services."""
100 for service, config in six.iteritems(configs):
101 self.d.configure(service, config)
102
103 def _get_openstack_release(self):
104 """Get openstack release.
105
106 Return an integer representing the enum value of the openstack
107 release.
108 """
109 # Must be ordered by OpenStack release (not by Ubuntu release):
110 (self.precise_essex, self.precise_folsom, self.precise_grizzly,
111 self.precise_havana, self.precise_icehouse,
112 self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
113 self.trusty_kilo, self.vivid_kilo) = range(10)
114
115 releases = {
116 ('precise', None): self.precise_essex,
117 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
118 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
119 ('precise', 'cloud:precise-havana'): self.precise_havana,
120 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
121 ('trusty', None): self.trusty_icehouse,
122 ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
123 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
124 ('utopic', None): self.utopic_juno,
125 ('vivid', None): self.vivid_kilo}
126 return releases[(self.series, self.openstack)]
127
128 def _get_openstack_release_string(self):
129 """Get openstack release string.
130
131 Return a string representing the openstack release.
132 """
133 releases = OrderedDict([
134 ('precise', 'essex'),
135 ('quantal', 'folsom'),
136 ('raring', 'grizzly'),
137 ('saucy', 'havana'),
138 ('trusty', 'icehouse'),
139 ('utopic', 'juno'),
140 ('vivid', 'kilo'),
141 ])
142 if self.openstack:
143 os_origin = self.openstack.split(':')[1]
144 return os_origin.split('%s-' % self.series)[1].split('/')[0]
145 else:
146 return releases[self.series]
1470
=== removed file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-03-31 15:13:53 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
@@ -1,294 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import logging
18import os
19import time
20import urllib
21
22import glanceclient.v1.client as glance_client
23import keystoneclient.v2_0 as keystone_client
24import novaclient.v1_1.client as nova_client
25
26import six
27
28from charmhelpers.contrib.amulet.utils import (
29 AmuletUtils
30)
31
32DEBUG = logging.DEBUG
33ERROR = logging.ERROR
34
35
36class OpenStackAmuletUtils(AmuletUtils):
37 """OpenStack amulet utilities.
38
39 This class inherits from AmuletUtils and has additional support
40 that is specifically for use by OpenStack charms.
41 """
42
43 def __init__(self, log_level=ERROR):
44 """Initialize the deployment environment."""
45 super(OpenStackAmuletUtils, self).__init__(log_level)
46
47 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
48 public_port, expected):
49 """Validate endpoint data.
50
51 Validate actual endpoint data vs expected endpoint data. The ports
52 are used to find the matching endpoint.
53 """
54 found = False
55 for ep in endpoints:
56 self.log.debug('endpoint: {}'.format(repr(ep)))
57 if (admin_port in ep.adminurl and
58 internal_port in ep.internalurl and
59 public_port in ep.publicurl):
60 found = True
61 actual = {'id': ep.id,
62 'region': ep.region,
63 'adminurl': ep.adminurl,
64 'internalurl': ep.internalurl,
65 'publicurl': ep.publicurl,
66 'service_id': ep.service_id}
67 ret = self._validate_dict_data(expected, actual)
68 if ret:
69 return 'unexpected endpoint data - {}'.format(ret)
70
71 if not found:
72 return 'endpoint not found'
73
74 def validate_svc_catalog_endpoint_data(self, expected, actual):
75 """Validate service catalog endpoint data.
76
77 Validate a list of actual service catalog endpoints vs a list of
78 expected service catalog endpoints.
79 """
80 self.log.debug('actual: {}'.format(repr(actual)))
81 for k, v in six.iteritems(expected):
82 if k in actual:
83 ret = self._validate_dict_data(expected[k][0], actual[k][0])
84 if ret:
85 return self.endpoint_error(k, ret)
86 else:
87 return "endpoint {} does not exist".format(k)
88 return ret
89
90 def validate_tenant_data(self, expected, actual):
91 """Validate tenant data.
92
93 Validate a list of actual tenant data vs list of expected tenant
94 data.
95 """
96 self.log.debug('actual: {}'.format(repr(actual)))
97 for e in expected:
98 found = False
99 for act in actual:
100 a = {'enabled': act.enabled, 'description': act.description,
101 'name': act.name, 'id': act.id}
102 if e['name'] == a['name']:
103 found = True
104 ret = self._validate_dict_data(e, a)
105 if ret:
106 return "unexpected tenant data - {}".format(ret)
107 if not found:
108 return "tenant {} does not exist".format(e['name'])
109 return ret
110
111 def validate_role_data(self, expected, actual):
112 """Validate role data.
113
114 Validate a list of actual role data vs a list of expected role
115 data.
116 """
117 self.log.debug('actual: {}'.format(repr(actual)))
118 for e in expected:
119 found = False
120 for act in actual:
121 a = {'name': act.name, 'id': act.id}
122 if e['name'] == a['name']:
123 found = True
124 ret = self._validate_dict_data(e, a)
125 if ret:
126 return "unexpected role data - {}".format(ret)
127 if not found:
128 return "role {} does not exist".format(e['name'])
129 return ret
130
131 def validate_user_data(self, expected, actual):
132 """Validate user data.
133
134 Validate a list of actual user data vs a list of expected user
135 data.
136 """
137 self.log.debug('actual: {}'.format(repr(actual)))
138 for e in expected:
139 found = False
140 for act in actual:
141 a = {'enabled': act.enabled, 'name': act.name,
142 'email': act.email, 'tenantId': act.tenantId,
143 'id': act.id}
144 if e['name'] == a['name']:
145 found = True
146 ret = self._validate_dict_data(e, a)
147 if ret:
148 return "unexpected user data - {}".format(ret)
149 if not found:
150 return "user {} does not exist".format(e['name'])
151 return ret
152
153 def validate_flavor_data(self, expected, actual):
154 """Validate flavor data.
155
156 Validate a list of actual flavors vs a list of expected flavors.
157 """
158 self.log.debug('actual: {}'.format(repr(actual)))
159 act = [a.name for a in actual]
160 return self._validate_list_data(expected, act)
161
162 def tenant_exists(self, keystone, tenant):
163 """Return True if tenant exists."""
164 return tenant in [t.name for t in keystone.tenants.list()]
165
166 def authenticate_keystone_admin(self, keystone_sentry, user, password,
167 tenant):
168 """Authenticates admin user with the keystone admin endpoint."""
169 unit = keystone_sentry
170 service_ip = unit.relation('shared-db',
171 'mysql:shared-db')['private-address']
172 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
173 return keystone_client.Client(username=user, password=password,
174 tenant_name=tenant, auth_url=ep)
175
176 def authenticate_keystone_user(self, keystone, user, password, tenant):
177 """Authenticates a regular user with the keystone public endpoint."""
178 ep = keystone.service_catalog.url_for(service_type='identity',
179 endpoint_type='publicURL')
180 return keystone_client.Client(username=user, password=password,
181 tenant_name=tenant, auth_url=ep)
182
183 def authenticate_glance_admin(self, keystone):
184 """Authenticates admin user with glance."""
185 ep = keystone.service_catalog.url_for(service_type='image',
186 endpoint_type='adminURL')
187 return glance_client.Client(ep, token=keystone.auth_token)
188
189 def authenticate_nova_user(self, keystone, user, password, tenant):
190 """Authenticates a regular user with nova-api."""
191 ep = keystone.service_catalog.url_for(service_type='identity',
192 endpoint_type='publicURL')
193 return nova_client.Client(username=user, api_key=password,
194 project_id=tenant, auth_url=ep)
195
196 def create_cirros_image(self, glance, image_name):
197 """Download the latest cirros image and upload it to glance."""
198 http_proxy = os.getenv('AMULET_HTTP_PROXY')
199 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
200 if http_proxy:
201 proxies = {'http': http_proxy}
202 opener = urllib.FancyURLopener(proxies)
203 else:
204 opener = urllib.FancyURLopener()
205
206 f = opener.open("http://download.cirros-cloud.net/version/released")
207 version = f.read().strip()
208 cirros_img = "cirros-{}-x86_64-disk.img".format(version)
209 local_path = os.path.join('tests', cirros_img)
210
211 if not os.path.exists(local_path):
212 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
213 version, cirros_img)
214 opener.retrieve(cirros_url, local_path)
215 f.close()
216
217 with open(local_path) as f:
218 image = glance.images.create(name=image_name, is_public=True,
219 disk_format='qcow2',
220 container_format='bare', data=f)
221 count = 1
222 status = image.status
223 while status != 'active' and count < 10:
224 time.sleep(3)
225 image = glance.images.get(image.id)
226 status = image.status
227 self.log.debug('image status: {}'.format(status))
228 count += 1
229
230 if status != 'active':
231 self.log.error('image creation timed out')
232 return None
233
234 return image
235
236 def delete_image(self, glance, image):
237 """Delete the specified image."""
238 num_before = len(list(glance.images.list()))
239 glance.images.delete(image)
240
241 count = 1
242 num_after = len(list(glance.images.list()))
243 while num_after != (num_before - 1) and count < 10:
244 time.sleep(3)
245 num_after = len(list(glance.images.list()))
246 self.log.debug('number of images: {}'.format(num_after))
247 count += 1
248
249 if num_after != (num_before - 1):
250 self.log.error('image deletion timed out')
251 return False
252
253 return True
254
255 def create_instance(self, nova, image_name, instance_name, flavor):
256 """Create the specified instance."""
257 image = nova.images.find(name=image_name)
258 flavor = nova.flavors.find(name=flavor)
259 instance = nova.servers.create(name=instance_name, image=image,
260 flavor=flavor)
261
262 count = 1
263 status = instance.status
264 while status != 'ACTIVE' and count < 60:
265 time.sleep(3)
266 instance = nova.servers.get(instance.id)
267 status = instance.status
268 self.log.debug('instance status: {}'.format(status))
269 count += 1
270
271 if status != 'ACTIVE':
272 self.log.error('instance creation timed out')
273 return None
274
275 return instance
276
277 def delete_instance(self, nova, instance):
278 """Delete the specified instance."""
279 num_before = len(list(nova.servers.list()))
280 nova.servers.delete(instance)
281
282 count = 1
283 num_after = len(list(nova.servers.list()))
284 while num_after != (num_before - 1) and count < 10:
285 time.sleep(3)
286 num_after = len(list(nova.servers.list()))
287 self.log.debug('number of instances: {}'.format(num_after))
288 count += 1
289
290 if num_after != (num_before - 1):
291 self.log.error('instance deletion timed out')
292 return False
293
294 return True
2950
=== removed file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2015-04-16 21:35:13 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
@@ -1,1328 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import json
18import os
19import re
20import time
21from base64 import b64decode
22from subprocess import check_call
23
24import six
25import yaml
26
27from charmhelpers.fetch import (
28 apt_install,
29 filter_installed_packages,
30)
31from charmhelpers.core.hookenv import (
32 config,
33 is_relation_made,
34 local_unit,
35 log,
36 relation_get,
37 relation_ids,
38 related_units,
39 relation_set,
40 unit_get,
41 unit_private_ip,
42 charm_name,
43 DEBUG,
44 INFO,
45 WARNING,
46 ERROR,
47)
48
49from charmhelpers.core.sysctl import create as sysctl_create
50from charmhelpers.core.strutils import bool_from_string
51
52from charmhelpers.core.host import (
53 list_nics,
54 get_nic_hwaddr,
55 mkdir,
56 write_file,
57)
58from charmhelpers.contrib.hahelpers.cluster import (
59 determine_apache_port,
60 determine_api_port,
61 https,
62 is_clustered,
63)
64from charmhelpers.contrib.hahelpers.apache import (
65 get_cert,
66 get_ca_cert,
67 install_ca_cert,
68)
69from charmhelpers.contrib.openstack.neutron import (
70 neutron_plugin_attribute,
71 parse_data_port_mappings,
72)
73from charmhelpers.contrib.openstack.ip import (
74 resolve_address,
75 INTERNAL,
76)
77from charmhelpers.contrib.network.ip import (
78 get_address_in_network,
79 get_ipv4_addr,
80 get_ipv6_addr,
81 get_netmask_for_address,
82 format_ipv6_addr,
83 is_address_in_network,
84 is_bridge_member,
85)
86from charmhelpers.contrib.openstack.utils import get_host_ip
87CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
88ADDRESS_TYPES = ['admin', 'internal', 'public']
89
90
91class OSContextError(Exception):
92 pass
93
94
95def ensure_packages(packages):
96 """Install but do not upgrade required plugin packages."""
97 required = filter_installed_packages(packages)
98 if required:
99 apt_install(required, fatal=True)
100
101
102def context_complete(ctxt):
103 _missing = []
104 for k, v in six.iteritems(ctxt):
105 if v is None or v == '':
106 _missing.append(k)
107
108 if _missing:
109 log('Missing required data: %s' % ' '.join(_missing), level=INFO)
110 return False
111
112 return True
113
114
115def config_flags_parser(config_flags):
116 """Parses config flags string into dict.
117
118 This parsing method supports a few different formats for the config
119 flag values to be parsed:
120
121 1. A string in the simple format of key=value pairs, with the possibility
122 of specifying multiple key value pairs within the same string. For
123 example, a string in the format of 'key1=value1, key2=value2' will
124 return a dict of:
125 {'key1': 'value1',
126 'key2': 'value2'}.
127
128 2. A string in the above format, but supporting a comma-delimited list
129 of values for the same key. For example, a string in the format of
130 'key1=value1, key2=value3,value4,value5' will return a dict of:
131 {'key1', 'value1',
132 'key2', 'value2,value3,value4'}
133
134 3. A string containing a colon character (:) prior to an equal
135 character (=) will be treated as yaml and parsed as such. This can be
136 used to specify more complex key value pairs. For example,
137 a string in the format of 'key1: subkey1=value1, subkey2=value2' will
138 return a dict of:
139 {'key1', 'subkey1=value1, subkey2=value2'}
140
141 The provided config_flags string may be a list of comma-separated values
142 which themselves may be comma-separated list of values.
143 """
144 # If we find a colon before an equals sign then treat it as yaml.
145 # Note: limit it to finding the colon first since this indicates assignment
146 # for inline yaml.
147 colon = config_flags.find(':')
148 equals = config_flags.find('=')
149 if colon > 0:
150 if colon < equals or equals < 0:
151 return yaml.safe_load(config_flags)
152
153 if config_flags.find('==') >= 0:
154 log("config_flags is not in expected format (key=value)", level=ERROR)
155 raise OSContextError
156
157 # strip the following from each value.
158 post_strippers = ' ,'
159 # we strip any leading/trailing '=' or ' ' from the string then
160 # split on '='.
161 split = config_flags.strip(' =').split('=')
162 limit = len(split)
163 flags = {}
164 for i in range(0, limit - 1):
165 current = split[i]
166 next = split[i + 1]
167 vindex = next.rfind(',')
168 if (i == limit - 2) or (vindex < 0):
169 value = next
170 else:
171 value = next[:vindex]
172
173 if i == 0:
174 key = current
175 else:
176 # if this not the first entry, expect an embedded key.
177 index = current.rfind(',')
178 if index < 0:
179 log("Invalid config value(s) at index %s" % (i), level=ERROR)
180 raise OSContextError
181 key = current[index + 1:]
182
183 # Add to collection.
184 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
185
186 return flags
187
188
189class OSContextGenerator(object):
190 """Base class for all context generators."""
191 interfaces = []
192
193 def __call__(self):
194 raise NotImplementedError
195
196
197class SharedDBContext(OSContextGenerator):
198 interfaces = ['shared-db']
199
200 def __init__(self,
201 database=None, user=None, relation_prefix=None, ssl_dir=None):
202 """Allows inspecting relation for settings prefixed with
203 relation_prefix. This is useful for parsing access for multiple
204 databases returned via the shared-db interface (eg, nova_password,
205 quantum_password)
206 """
207 self.relation_prefix = relation_prefix
208 self.database = database
209 self.user = user
210 self.ssl_dir = ssl_dir
211
212 def __call__(self):
213 self.database = self.database or config('database')
214 self.user = self.user or config('database-user')
215 if None in [self.database, self.user]:
216 log("Could not generate shared_db context. Missing required charm "
217 "config options. (database name and user)", level=ERROR)
218 raise OSContextError
219
220 ctxt = {}
221
222 # NOTE(jamespage) if mysql charm provides a network upon which
223 # access to the database should be made, reconfigure relation
224 # with the service units local address and defer execution
225 access_network = relation_get('access-network')
226 if access_network is not None:
227 if self.relation_prefix is not None:
228 hostname_key = "{}_hostname".format(self.relation_prefix)
229 else:
230 hostname_key = "hostname"
231 access_hostname = get_address_in_network(access_network,
232 unit_get('private-address'))
233 set_hostname = relation_get(attribute=hostname_key,
234 unit=local_unit())
235 if set_hostname != access_hostname:
236 relation_set(relation_settings={hostname_key: access_hostname})
237 return None # Defer any further hook execution for now....
238
239 password_setting = 'password'
240 if self.relation_prefix:
241 password_setting = self.relation_prefix + '_password'
242
243 for rid in relation_ids('shared-db'):
244 for unit in related_units(rid):
245 rdata = relation_get(rid=rid, unit=unit)
246 host = rdata.get('db_host')
247 host = format_ipv6_addr(host) or host
248 ctxt = {
249 'database_host': host,
250 'database': self.database,
251 'database_user': self.user,
252 'database_password': rdata.get(password_setting),
253 'database_type': 'mysql'
254 }
255 if context_complete(ctxt):
256 db_ssl(rdata, ctxt, self.ssl_dir)
257 return ctxt
258 return {}
259
260
261class PostgresqlDBContext(OSContextGenerator):
262 interfaces = ['pgsql-db']
263
264 def __init__(self, database=None):
265 self.database = database
266
267 def __call__(self):
268 self.database = self.database or config('database')
269 if self.database is None:
270 log('Could not generate postgresql_db context. Missing required '
271 'charm config options. (database name)', level=ERROR)
272 raise OSContextError
273
274 ctxt = {}
275 for rid in relation_ids(self.interfaces[0]):
276 for unit in related_units(rid):
277 rel_host = relation_get('host', rid=rid, unit=unit)
278 rel_user = relation_get('user', rid=rid, unit=unit)
279 rel_passwd = relation_get('password', rid=rid, unit=unit)
280 ctxt = {'database_host': rel_host,
281 'database': self.database,
282 'database_user': rel_user,
283 'database_password': rel_passwd,
284 'database_type': 'postgresql'}
285 if context_complete(ctxt):
286 return ctxt
287
288 return {}
289
290
291def db_ssl(rdata, ctxt, ssl_dir):
292 if 'ssl_ca' in rdata and ssl_dir:
293 ca_path = os.path.join(ssl_dir, 'db-client.ca')
294 with open(ca_path, 'w') as fh:
295 fh.write(b64decode(rdata['ssl_ca']))
296
297 ctxt['database_ssl_ca'] = ca_path
298 elif 'ssl_ca' in rdata:
299 log("Charm not setup for ssl support but ssl ca found", level=INFO)
300 return ctxt
301
302 if 'ssl_cert' in rdata:
303 cert_path = os.path.join(
304 ssl_dir, 'db-client.cert')
305 if not os.path.exists(cert_path):
306 log("Waiting 1m for ssl client cert validity", level=INFO)
307 time.sleep(60)
308
309 with open(cert_path, 'w') as fh:
310 fh.write(b64decode(rdata['ssl_cert']))
311
312 ctxt['database_ssl_cert'] = cert_path
313 key_path = os.path.join(ssl_dir, 'db-client.key')
314 with open(key_path, 'w') as fh:
315 fh.write(b64decode(rdata['ssl_key']))
316
317 ctxt['database_ssl_key'] = key_path
318
319 return ctxt
320
321
322class IdentityServiceContext(OSContextGenerator):
323
324 def __init__(self, service=None, service_user=None, rel_name='identity-service'):
325 self.service = service
326 self.service_user = service_user
327 self.rel_name = rel_name
328 self.interfaces = [self.rel_name]
329
330 def __call__(self):
331 log('Generating template context for ' + self.rel_name, level=DEBUG)
332 ctxt = {}
333
334 if self.service and self.service_user:
335 # This is required for pki token signing if we don't want /tmp to
336 # be used.
337 cachedir = '/var/cache/%s' % (self.service)
338 if not os.path.isdir(cachedir):
339 log("Creating service cache dir %s" % (cachedir), level=DEBUG)
340 mkdir(path=cachedir, owner=self.service_user,
341 group=self.service_user, perms=0o700)
342
343 ctxt['signing_dir'] = cachedir
344
345 for rid in relation_ids(self.rel_name):
346 for unit in related_units(rid):
347 rdata = relation_get(rid=rid, unit=unit)
348 serv_host = rdata.get('service_host')
349 serv_host = format_ipv6_addr(serv_host) or serv_host
350 auth_host = rdata.get('auth_host')
351 auth_host = format_ipv6_addr(auth_host) or auth_host
352 svc_protocol = rdata.get('service_protocol') or 'http'
353 auth_protocol = rdata.get('auth_protocol') or 'http'
354 ctxt.update({'service_port': rdata.get('service_port'),
355 'service_host': serv_host,
356 'auth_host': auth_host,
357 'auth_port': rdata.get('auth_port'),
358 'admin_tenant_name': rdata.get('service_tenant'),
359 'admin_user': rdata.get('service_username'),
360 'admin_password': rdata.get('service_password'),
361 'service_protocol': svc_protocol,
362 'auth_protocol': auth_protocol})
363
364 if context_complete(ctxt):
365 # NOTE(jamespage) this is required for >= icehouse
366 # so a missing value just indicates keystone needs
367 # upgrading
368 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
369 return ctxt
370
371 return {}
372
373
374class AMQPContext(OSContextGenerator):
375
376 def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
377 self.ssl_dir = ssl_dir
378 self.rel_name = rel_name
379 self.relation_prefix = relation_prefix
380 self.interfaces = [rel_name]
381
382 def __call__(self):
383 log('Generating template context for amqp', level=DEBUG)
384 conf = config()
385 if self.relation_prefix:
386 user_setting = '%s-rabbit-user' % (self.relation_prefix)
387 vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
388 else:
389 user_setting = 'rabbit-user'
390 vhost_setting = 'rabbit-vhost'
391
392 try:
393 username = conf[user_setting]
394 vhost = conf[vhost_setting]
395 except KeyError as e:
396 log('Could not generate shared_db context. Missing required charm '
397 'config options: %s.' % e, level=ERROR)
398 raise OSContextError
399
400 ctxt = {}
401 for rid in relation_ids(self.rel_name):
402 ha_vip_only = False
403 for unit in related_units(rid):
404 if relation_get('clustered', rid=rid, unit=unit):
405 ctxt['clustered'] = True
406 vip = relation_get('vip', rid=rid, unit=unit)
407 vip = format_ipv6_addr(vip) or vip
408 ctxt['rabbitmq_host'] = vip
409 else:
410 host = relation_get('private-address', rid=rid, unit=unit)
411 host = format_ipv6_addr(host) or host
412 ctxt['rabbitmq_host'] = host
413
414 ctxt.update({
415 'rabbitmq_user': username,
416 'rabbitmq_password': relation_get('password', rid=rid,
417 unit=unit),
418 'rabbitmq_virtual_host': vhost,
419 })
420
421 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
422 if ssl_port:
423 ctxt['rabbit_ssl_port'] = ssl_port
424
425 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
426 if ssl_ca:
427 ctxt['rabbit_ssl_ca'] = ssl_ca
428
429 if relation_get('ha_queues', rid=rid, unit=unit) is not None:
430 ctxt['rabbitmq_ha_queues'] = True
431
432 ha_vip_only = relation_get('ha-vip-only',
433 rid=rid, unit=unit) is not None
434
435 if context_complete(ctxt):
436 if 'rabbit_ssl_ca' in ctxt:
437 if not self.ssl_dir:
438 log("Charm not setup for ssl support but ssl ca "
439 "found", level=INFO)
440 break
441
442 ca_path = os.path.join(
443 self.ssl_dir, 'rabbit-client-ca.pem')
444 with open(ca_path, 'w') as fh:
445 fh.write(b64decode(ctxt['rabbit_ssl_ca']))
446 ctxt['rabbit_ssl_ca'] = ca_path
447
448 # Sufficient information found = break out!
449 break
450
451 # Used for active/active rabbitmq >= grizzly
452 if (('clustered' not in ctxt or ha_vip_only) and
453 len(related_units(rid)) > 1):
454 rabbitmq_hosts = []
455 for unit in related_units(rid):
456 host = relation_get('private-address', rid=rid, unit=unit)
457 host = format_ipv6_addr(host) or host
458 rabbitmq_hosts.append(host)
459
460 ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
461
462 oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
463 if oslo_messaging_flags:
464 ctxt['oslo_messaging_flags'] = config_flags_parser(
465 oslo_messaging_flags)
466
467 if not context_complete(ctxt):
468 return {}
469
470 return ctxt
471
472
473class CephContext(OSContextGenerator):
474 """Generates context for /etc/ceph/ceph.conf templates."""
475 interfaces = ['ceph']
476
477 def __call__(self):
478 if not relation_ids('ceph'):
479 return {}
480
481 log('Generating template context for ceph', level=DEBUG)
482 mon_hosts = []
483 auth = None
484 key = None
485 use_syslog = str(config('use-syslog')).lower()
486 for rid in relation_ids('ceph'):
487 for unit in related_units(rid):
488 auth = relation_get('auth', rid=rid, unit=unit)
489 key = relation_get('key', rid=rid, unit=unit)
490 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
491 unit=unit)
492 unit_priv_addr = relation_get('private-address', rid=rid,
493 unit=unit)
494 ceph_addr = ceph_pub_addr or unit_priv_addr
495 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
496 mon_hosts.append(ceph_addr)
497
498 ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
499 'auth': auth,
500 'key': key,
501 'use_syslog': use_syslog}
502
503 if not os.path.isdir('/etc/ceph'):
504 os.mkdir('/etc/ceph')
505
506 if not context_complete(ctxt):
507 return {}
508
509 ensure_packages(['ceph-common'])
510 return ctxt
511
512
513class HAProxyContext(OSContextGenerator):
514 """Provides half a context for the haproxy template, which describes
515 all peers to be included in the cluster. Each charm needs to include
516 its own context generator that describes the port mapping.
517 """
518 interfaces = ['cluster']
519
520 def __init__(self, singlenode_mode=False):
521 self.singlenode_mode = singlenode_mode
522
523 def __call__(self):
524 if not relation_ids('cluster') and not self.singlenode_mode:
525 return {}
526
527 if config('prefer-ipv6'):
528 addr = get_ipv6_addr(exc_list=[config('vip')])[0]
529 else:
530 addr = get_host_ip(unit_get('private-address'))
531
532 l_unit = local_unit().replace('/', '-')
533 cluster_hosts = {}
534
535 # NOTE(jamespage): build out map of configured network endpoints
536 # and associated backends
537 for addr_type in ADDRESS_TYPES:
538 cfg_opt = 'os-{}-network'.format(addr_type)
539 laddr = get_address_in_network(config(cfg_opt))
540 if laddr:
541 netmask = get_netmask_for_address(laddr)
542 cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
543 netmask),
544 'backends': {l_unit: laddr}}
545 for rid in relation_ids('cluster'):
546 for unit in related_units(rid):
547 _laddr = relation_get('{}-address'.format(addr_type),
548 rid=rid, unit=unit)
549 if _laddr:
550 _unit = unit.replace('/', '-')
551 cluster_hosts[laddr]['backends'][_unit] = _laddr
552
553 # NOTE(jamespage) add backend based on private address - this
554 # with either be the only backend or the fallback if no acls
555 # match in the frontend
556 cluster_hosts[addr] = {}
557 netmask = get_netmask_for_address(addr)
558 cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
559 'backends': {l_unit: addr}}
560 for rid in relation_ids('cluster'):
561 for unit in related_units(rid):
562 _laddr = relation_get('private-address',
563 rid=rid, unit=unit)
564 if _laddr:
565 _unit = unit.replace('/', '-')
566 cluster_hosts[addr]['backends'][_unit] = _laddr
567
568 ctxt = {
569 'frontends': cluster_hosts,
570 'default_backend': addr
571 }
572
573 if config('haproxy-server-timeout'):
574 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
575
576 if config('haproxy-client-timeout'):
577 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
578
579 if config('prefer-ipv6'):
580 ctxt['ipv6'] = True
581 ctxt['local_host'] = 'ip6-localhost'
582 ctxt['haproxy_host'] = '::'
583 ctxt['stat_port'] = ':::8888'
584 else:
585 ctxt['local_host'] = '127.0.0.1'
586 ctxt['haproxy_host'] = '0.0.0.0'
587 ctxt['stat_port'] = ':8888'
588
589 for frontend in cluster_hosts:
590 if (len(cluster_hosts[frontend]['backends']) > 1 or
591 self.singlenode_mode):
592 # Enable haproxy when we have enough peers.
593 log('Ensuring haproxy enabled in /etc/default/haproxy.',
594 level=DEBUG)
595 with open('/etc/default/haproxy', 'w') as out:
596 out.write('ENABLED=1\n')
597
598 return ctxt
599
600 log('HAProxy context is incomplete, this unit has no peers.',
601 level=INFO)
602 return {}
603
604
605class ImageServiceContext(OSContextGenerator):
606 interfaces = ['image-service']
607
608 def __call__(self):
609 """Obtains the glance API server from the image-service relation.
610 Useful in nova and cinder (currently).
611 """
612 log('Generating template context for image-service.', level=DEBUG)
613 rids = relation_ids('image-service')
614 if not rids:
615 return {}
616
617 for rid in rids:
618 for unit in related_units(rid):
619 api_server = relation_get('glance-api-server',
620 rid=rid, unit=unit)
621 if api_server:
622 return {'glance_api_servers': api_server}
623
624 log("ImageService context is incomplete. Missing required relation "
625 "data.", level=INFO)
626 return {}
627
628
629class ApacheSSLContext(OSContextGenerator):
630 """Generates a context for an apache vhost configuration that configures
631 HTTPS reverse proxying for one or many endpoints. Generated context
632 looks something like::
633
634 {
635 'namespace': 'cinder',
636 'private_address': 'iscsi.mycinderhost.com',
637 'endpoints': [(8776, 8766), (8777, 8767)]
638 }
639
640 The endpoints list consists of a tuples mapping external ports
641 to internal ports.
642 """
643 interfaces = ['https']
644
645 # charms should inherit this context and set external ports
646 # and service namespace accordingly.
647 external_ports = []
648 service_namespace = None
649
650 def enable_modules(self):
651 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
652 check_call(cmd)
653
654 def configure_cert(self, cn=None):
655 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
656 mkdir(path=ssl_dir)
657 cert, key = get_cert(cn)
658 if cn:
659 cert_filename = 'cert_{}'.format(cn)
660 key_filename = 'key_{}'.format(cn)
661 else:
662 cert_filename = 'cert'
663 key_filename = 'key'
664
665 write_file(path=os.path.join(ssl_dir, cert_filename),
666 content=b64decode(cert))
667 write_file(path=os.path.join(ssl_dir, key_filename),
668 content=b64decode(key))
669
670 def configure_ca(self):
671 ca_cert = get_ca_cert()
672 if ca_cert:
673 install_ca_cert(b64decode(ca_cert))
674
675 def canonical_names(self):
676 """Figure out which canonical names clients will access this service.
677 """
678 cns = []
679 for r_id in relation_ids('identity-service'):
680 for unit in related_units(r_id):
681 rdata = relation_get(rid=r_id, unit=unit)
682 for k in rdata:
683 if k.startswith('ssl_key_'):
684 cns.append(k.lstrip('ssl_key_'))
685
686 return sorted(list(set(cns)))
687
688 def get_network_addresses(self):
689 """For each network configured, return corresponding address and vip
690 (if available).
691
692 Returns a list of tuples of the form:
693
694 [(address_in_net_a, vip_in_net_a),
695 (address_in_net_b, vip_in_net_b),
696 ...]
697
698 or, if no vip(s) available:
699
700 [(address_in_net_a, address_in_net_a),
701 (address_in_net_b, address_in_net_b),
702 ...]
703 """
704 addresses = []
705 if config('vip'):
706 vips = config('vip').split()
707 else:
708 vips = []
709
710 for net_type in ['os-internal-network', 'os-admin-network',
711 'os-public-network']:
712 addr = get_address_in_network(config(net_type),
713 unit_get('private-address'))
714 if len(vips) > 1 and is_clustered():
715 if not config(net_type):
716 log("Multiple networks configured but net_type "
717 "is None (%s)." % net_type, level=WARNING)
718 continue
719
720 for vip in vips:
721 if is_address_in_network(config(net_type), vip):
722 addresses.append((addr, vip))
723 break
724
725 elif is_clustered() and config('vip'):
726 addresses.append((addr, config('vip')))
727 else:
728 addresses.append((addr, addr))
729
730 return sorted(addresses)
731
732 def __call__(self):
733 if isinstance(self.external_ports, six.string_types):
734 self.external_ports = [self.external_ports]
735
736 if not self.external_ports or not https():
737 return {}
738
739 self.configure_ca()
740 self.enable_modules()
741
742 ctxt = {'namespace': self.service_namespace,
743 'endpoints': [],
744 'ext_ports': []}
745
746 cns = self.canonical_names()
747 if cns:
748 for cn in cns:
749 self.configure_cert(cn)
750 else:
751 # Expect cert/key provided in config (currently assumed that ca
752 # uses ip for cn)
753 cn = resolve_address(endpoint_type=INTERNAL)
754 self.configure_cert(cn)
755
756 addresses = self.get_network_addresses()
757 for address, endpoint in sorted(set(addresses)):
758 for api_port in self.external_ports:
759 ext_port = determine_apache_port(api_port,
760 singlenode_mode=True)
761 int_port = determine_api_port(api_port, singlenode_mode=True)
762 portmap = (address, endpoint, int(ext_port), int(int_port))
763 ctxt['endpoints'].append(portmap)
764 ctxt['ext_ports'].append(int(ext_port))
765
766 ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
767 return ctxt
768
769
770class NeutronContext(OSContextGenerator):
771 interfaces = []
772
773 @property
774 def plugin(self):
775 return None
776
777 @property
778 def network_manager(self):
779 return None
780
781 @property
782 def packages(self):
783 return neutron_plugin_attribute(self.plugin, 'packages',
784 self.network_manager)
785
786 @property
787 def neutron_security_groups(self):
788 return None
789
790 def _ensure_packages(self):
791 for pkgs in self.packages:
792 ensure_packages(pkgs)
793
794 def _save_flag_file(self):
795 if self.network_manager == 'quantum':
796 _file = '/etc/nova/quantum_plugin.conf'
797 else:
798 _file = '/etc/nova/neutron_plugin.conf'
799
800 with open(_file, 'wb') as out:
801 out.write(self.plugin + '\n')
802
803 def ovs_ctxt(self):
804 driver = neutron_plugin_attribute(self.plugin, 'driver',
805 self.network_manager)
806 config = neutron_plugin_attribute(self.plugin, 'config',
807 self.network_manager)
808 ovs_ctxt = {'core_plugin': driver,
809 'neutron_plugin': 'ovs',
810 'neutron_security_groups': self.neutron_security_groups,
811 'local_ip': unit_private_ip(),
812 'config': config}
813
814 return ovs_ctxt
815
816 def nuage_ctxt(self):
817 driver = neutron_plugin_attribute(self.plugin, 'driver',
818 self.network_manager)
819 config = neutron_plugin_attribute(self.plugin, 'config',
820 self.network_manager)
821 nuage_ctxt = {'core_plugin': driver,
822 'neutron_plugin': 'vsp',
823 'neutron_security_groups': self.neutron_security_groups,
824 'local_ip': unit_private_ip(),
825 'config': config}
826
827 return nuage_ctxt
828
829 def nvp_ctxt(self):
830 driver = neutron_plugin_attribute(self.plugin, 'driver',
831 self.network_manager)
832 config = neutron_plugin_attribute(self.plugin, 'config',
833 self.network_manager)
834 nvp_ctxt = {'core_plugin': driver,
835 'neutron_plugin': 'nvp',
836 'neutron_security_groups': self.neutron_security_groups,
837 'local_ip': unit_private_ip(),
838 'config': config}
839
840 return nvp_ctxt
841
842 def n1kv_ctxt(self):
843 driver = neutron_plugin_attribute(self.plugin, 'driver',
844 self.network_manager)
845 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
846 self.network_manager)
847 n1kv_user_config_flags = config('n1kv-config-flags')
848 restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
849 n1kv_ctxt = {'core_plugin': driver,
850 'neutron_plugin': 'n1kv',
851 'neutron_security_groups': self.neutron_security_groups,
852 'local_ip': unit_private_ip(),
853 'config': n1kv_config,
854 'vsm_ip': config('n1kv-vsm-ip'),
855 'vsm_username': config('n1kv-vsm-username'),
856 'vsm_password': config('n1kv-vsm-password'),
857 'restrict_policy_profiles': restrict_policy_profiles}
858
859 if n1kv_user_config_flags:
860 flags = config_flags_parser(n1kv_user_config_flags)
861 n1kv_ctxt['user_config_flags'] = flags
862
863 return n1kv_ctxt
864
865 def calico_ctxt(self):
866 driver = neutron_plugin_attribute(self.plugin, 'driver',
867 self.network_manager)
868 config = neutron_plugin_attribute(self.plugin, 'config',
869 self.network_manager)
870 calico_ctxt = {'core_plugin': driver,
871 'neutron_plugin': 'Calico',
872 'neutron_security_groups': self.neutron_security_groups,
873 'local_ip': unit_private_ip(),
874 'config': config}
875
876 return calico_ctxt
877
878 def neutron_ctxt(self):
879 if https():
880 proto = 'https'
881 else:
882 proto = 'http'
883
884 if is_clustered():
885 host = config('vip')
886 else:
887 host = unit_get('private-address')
888
889 ctxt = {'network_manager': self.network_manager,
890 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
891 return ctxt
892
893 def __call__(self):
894 self._ensure_packages()
895
896 if self.network_manager not in ['quantum', 'neutron']:
897 return {}
898
899 if not self.plugin:
900 return {}
901
902 ctxt = self.neutron_ctxt()
903
904 if self.plugin == 'ovs':
905 ctxt.update(self.ovs_ctxt())
906 elif self.plugin in ['nvp', 'nsx']:
907 ctxt.update(self.nvp_ctxt())
908 elif self.plugin == 'n1kv':
909 ctxt.update(self.n1kv_ctxt())
910 elif self.plugin == 'Calico':
911 ctxt.update(self.calico_ctxt())
912 elif self.plugin == 'vsp':
913 ctxt.update(self.nuage_ctxt())
914
915 alchemy_flags = config('neutron-alchemy-flags')
916 if alchemy_flags:
917 flags = config_flags_parser(alchemy_flags)
918 ctxt['neutron_alchemy_flags'] = flags
919
920 self._save_flag_file()
921 return ctxt
922
923
924class NeutronPortContext(OSContextGenerator):
925 NIC_PREFIXES = ['eth', 'bond']
926
927 def resolve_ports(self, ports):
928 """Resolve NICs not yet bound to bridge(s)
929
930 If hwaddress provided then returns resolved hwaddress otherwise NIC.
931 """
932 if not ports:
933 return None
934
935 hwaddr_to_nic = {}
936 hwaddr_to_ip = {}
937 for nic in list_nics(self.NIC_PREFIXES):
938 hwaddr = get_nic_hwaddr(nic)
939 hwaddr_to_nic[hwaddr] = nic
940 addresses = get_ipv4_addr(nic, fatal=False)
941 addresses += get_ipv6_addr(iface=nic, fatal=False)
942 hwaddr_to_ip[hwaddr] = addresses
943
944 resolved = []
945 mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
946 for entry in ports:
947 if re.match(mac_regex, entry):
948 # NIC is in known NICs and does NOT hace an IP address
949 if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
950 # If the nic is part of a bridge then don't use it
951 if is_bridge_member(hwaddr_to_nic[entry]):
952 continue
953
954 # Entry is a MAC address for a valid interface that doesn't
955 # have an IP address assigned yet.
956 resolved.append(hwaddr_to_nic[entry])
957 else:
958 # If the passed entry is not a MAC address, assume it's a valid
959 # interface, and that the user put it there on purpose (we can
960 # trust it to be the real external network).
961 resolved.append(entry)
962
963 return resolved
964
965
966class OSConfigFlagContext(OSContextGenerator):
967 """Provides support for user-defined config flags.
968
969 Users can define a comma-seperated list of key=value pairs
970 in the charm configuration and apply them at any point in
971 any file by using a template flag.
972
973 Sometimes users might want config flags inserted within a
974 specific section so this class allows users to specify the
975 template flag name, allowing for multiple template flags
976 (sections) within the same context.
977
978 NOTE: the value of config-flags may be a comma-separated list of
979 key=value pairs and some Openstack config files support
980 comma-separated lists as values.
981 """
982
983 def __init__(self, charm_flag='config-flags',
984 template_flag='user_config_flags'):
985 """
986 :param charm_flag: config flags in charm configuration.
987 :param template_flag: insert point for user-defined flags in template
988 file.
989 """
990 super(OSConfigFlagContext, self).__init__()
991 self._charm_flag = charm_flag
992 self._template_flag = template_flag
993
994 def __call__(self):
995 config_flags = config(self._charm_flag)
996 if not config_flags:
997 return {}
998
999 return {self._template_flag:
1000 config_flags_parser(config_flags)}
1001
1002
1003class SubordinateConfigContext(OSContextGenerator):
1004
1005 """
1006 Responsible for inspecting relations to subordinates that
1007 may be exporting required config via a json blob.
1008
1009 The subordinate interface allows subordinates to export their
1010 configuration requirements to the principle for multiple config
1011 files and multiple serivces. Ie, a subordinate that has interfaces
1012 to both glance and nova may export to following yaml blob as json::
1013
1014 glance:
1015 /etc/glance/glance-api.conf:
1016 sections:
1017 DEFAULT:
1018 - [key1, value1]
1019 /etc/glance/glance-registry.conf:
1020 MYSECTION:
1021 - [key2, value2]
1022 nova:
1023 /etc/nova/nova.conf:
1024 sections:
1025 DEFAULT:
1026 - [key3, value3]
1027
1028
1029 It is then up to the principle charms to subscribe this context to
1030 the service+config file it is interestd in. Configuration data will
1031 be available in the template context, in glance's case, as::
1032
1033 ctxt = {
1034 ... other context ...
1035 'subordinate_config': {
1036 'DEFAULT': {
1037 'key1': 'value1',
1038 },
1039 'MYSECTION': {
1040 'key2': 'value2',
1041 },
1042 }
1043 }
1044 """
1045
1046 def __init__(self, service, config_file, interface):
1047 """
1048 :param service : Service name key to query in any subordinate
1049 data found
1050 :param config_file : Service's config file to query sections
1051 :param interface : Subordinate interface to inspect
1052 """
1053 self.service = service
1054 self.config_file = config_file
1055 self.interface = interface
1056
1057 def __call__(self):
1058 ctxt = {'sections': {}}
1059 for rid in relation_ids(self.interface):
1060 for unit in related_units(rid):
1061 sub_config = relation_get('subordinate_configuration',
1062 rid=rid, unit=unit)
1063 if sub_config and sub_config != '':
1064 try:
1065 sub_config = json.loads(sub_config)
1066 except:
1067 log('Could not parse JSON from subordinate_config '
1068 'setting from %s' % rid, level=ERROR)
1069 continue
1070
1071 if self.service not in sub_config:
1072 log('Found subordinate_config on %s but it contained'
1073 'nothing for %s service' % (rid, self.service),
1074 level=INFO)
1075 continue
1076
1077 sub_config = sub_config[self.service]
1078 if self.config_file not in sub_config:
1079 log('Found subordinate_config on %s but it contained'
1080 'nothing for %s' % (rid, self.config_file),
1081 level=INFO)
1082 continue
1083
1084 sub_config = sub_config[self.config_file]
1085 for k, v in six.iteritems(sub_config):
1086 if k == 'sections':
1087 for section, config_dict in six.iteritems(v):
1088 log("adding section '%s'" % (section),
1089 level=DEBUG)
1090 ctxt[k][section] = config_dict
1091 else:
1092 ctxt[k] = v
1093
1094 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1095 return ctxt
1096
1097
1098class LogLevelContext(OSContextGenerator):
1099
1100 def __call__(self):
1101 ctxt = {}
1102 ctxt['debug'] = \
1103 False if config('debug') is None else config('debug')
1104 ctxt['verbose'] = \
1105 False if config('verbose') is None else config('verbose')
1106
1107 return ctxt
1108
1109
1110class SyslogContext(OSContextGenerator):
1111
1112 def __call__(self):
1113 ctxt = {'use_syslog': config('use-syslog')}
1114 return ctxt
1115
1116
1117class BindHostContext(OSContextGenerator):
1118
1119 def __call__(self):
1120 if config('prefer-ipv6'):
1121 return {'bind_host': '::'}
1122 else:
1123 return {'bind_host': '0.0.0.0'}
1124
1125
1126class WorkerConfigContext(OSContextGenerator):
1127
1128 @property
1129 def num_cpus(self):
1130 try:
1131 from psutil import NUM_CPUS
1132 except ImportError:
1133 apt_install('python-psutil', fatal=True)
1134 from psutil import NUM_CPUS
1135
1136 return NUM_CPUS
1137
1138 def __call__(self):
1139 multiplier = config('worker-multiplier') or 0
1140 ctxt = {"workers": self.num_cpus * multiplier}
1141 return ctxt
1142
1143
1144class ZeroMQContext(OSContextGenerator):
1145 interfaces = ['zeromq-configuration']
1146
1147 def __call__(self):
1148 ctxt = {}
1149 if is_relation_made('zeromq-configuration', 'host'):
1150 for rid in relation_ids('zeromq-configuration'):
1151 for unit in related_units(rid):
1152 ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
1153 ctxt['zmq_host'] = relation_get('host', unit, rid)
1154 ctxt['zmq_redis_address'] = relation_get(
1155 'zmq_redis_address', unit, rid)
1156
1157 return ctxt
1158
1159
1160class NotificationDriverContext(OSContextGenerator):
1161
1162 def __init__(self, zmq_relation='zeromq-configuration',
1163 amqp_relation='amqp'):
1164 """
1165 :param zmq_relation: Name of Zeromq relation to check
1166 """
1167 self.zmq_relation = zmq_relation
1168 self.amqp_relation = amqp_relation
1169
1170 def __call__(self):
1171 ctxt = {'notifications': 'False'}
1172 if is_relation_made(self.amqp_relation):
1173 ctxt['notifications'] = "True"
1174
1175 return ctxt
1176
1177
1178class SysctlContext(OSContextGenerator):
1179 """This context check if the 'sysctl' option exists on configuration
1180 then creates a file with the loaded contents"""
1181 def __call__(self):
1182 sysctl_dict = config('sysctl')
1183 if sysctl_dict:
1184 sysctl_create(sysctl_dict,
1185 '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
1186 return {'sysctl': sysctl_dict}
1187
1188
1189class NeutronAPIContext(OSContextGenerator):
1190 '''
1191 Inspects current neutron-plugin-api relation for neutron settings. Return
1192 defaults if it is not present.
1193 '''
1194 interfaces = ['neutron-plugin-api']
1195
1196 def __call__(self):
1197 self.neutron_defaults = {
1198 'l2_population': {
1199 'rel_key': 'l2-population',
1200 'default': False,
1201 },
1202 'overlay_network_type': {
1203 'rel_key': 'overlay-network-type',
1204 'default': 'gre',
1205 },
1206 'neutron_security_groups': {
1207 'rel_key': 'neutron-security-groups',
1208 'default': False,
1209 },
1210 'network_device_mtu': {
1211 'rel_key': 'network-device-mtu',
1212 'default': None,
1213 },
1214 'enable_dvr': {
1215 'rel_key': 'enable-dvr',
1216 'default': False,
1217 },
1218 'enable_l3ha': {
1219 'rel_key': 'enable-l3ha',
1220 'default': False,
1221 },
1222 }
1223 ctxt = self.get_neutron_options({})
1224 for rid in relation_ids('neutron-plugin-api'):
1225 for unit in related_units(rid):
1226 rdata = relation_get(rid=rid, unit=unit)
1227 if 'l2-population' in rdata:
1228 ctxt.update(self.get_neutron_options(rdata))
1229
1230 return ctxt
1231
1232 def get_neutron_options(self, rdata):
1233 settings = {}
1234 for nkey in self.neutron_defaults.keys():
1235 defv = self.neutron_defaults[nkey]['default']
1236 rkey = self.neutron_defaults[nkey]['rel_key']
1237 if rkey in rdata.keys():
1238 if type(defv) is bool:
1239 settings[nkey] = bool_from_string(rdata[rkey])
1240 else:
1241 settings[nkey] = rdata[rkey]
1242 else:
1243 settings[nkey] = defv
1244 return settings
1245
1246
1247class ExternalPortContext(NeutronPortContext):
1248
1249 def __call__(self):
1250 ctxt = {}
1251 ports = config('ext-port')
1252 if ports:
1253 ports = [p.strip() for p in ports.split()]
1254 ports = self.resolve_ports(ports)
1255 if ports:
1256 ctxt = {"ext_port": ports[0]}
1257 napi_settings = NeutronAPIContext()()
1258 mtu = napi_settings.get('network_device_mtu')
1259 if mtu:
1260 ctxt['ext_port_mtu'] = mtu
1261
1262 return ctxt
1263
1264
1265class DataPortContext(NeutronPortContext):
1266
1267 def __call__(self):
1268 ports = config('data-port')
1269 if ports:
1270 portmap = parse_data_port_mappings(ports)
1271 ports = portmap.values()
1272 resolved = self.resolve_ports(ports)
1273 normalized = {get_nic_hwaddr(port): port for port in resolved
1274 if port not in ports}
1275 normalized.update({port: port for port in resolved
1276 if port in ports})
1277 if resolved:
1278 return {bridge: normalized[port] for bridge, port in
1279 six.iteritems(portmap) if port in normalized.keys()}
1280
1281 return None
1282
1283
1284class PhyNICMTUContext(DataPortContext):
1285
1286 def __call__(self):
1287 ctxt = {}
1288 mappings = super(PhyNICMTUContext, self).__call__()
1289 if mappings and mappings.values():
1290 ports = mappings.values()
1291 napi_settings = NeutronAPIContext()()
1292 mtu = napi_settings.get('network_device_mtu')
1293 if mtu:
1294 ctxt["devs"] = '\\n'.join(ports)
1295 ctxt['mtu'] = mtu
1296
1297 return ctxt
1298
1299
1300class NetworkServiceContext(OSContextGenerator):
1301
1302 def __init__(self, rel_name='quantum-network-service'):
1303 self.rel_name = rel_name
1304 self.interfaces = [rel_name]
1305
1306 def __call__(self):
1307 for rid in relation_ids(self.rel_name):
1308 for unit in related_units(rid):
1309 rdata = relation_get(rid=rid, unit=unit)
1310 ctxt = {
1311 'keystone_host': rdata.get('keystone_host'),
1312 'service_port': rdata.get('service_port'),
1313 'auth_port': rdata.get('auth_port'),
1314 'service_tenant': rdata.get('service_tenant'),
1315 'service_username': rdata.get('service_username'),
1316 'service_password': rdata.get('service_password'),
1317 'quantum_host': rdata.get('quantum_host'),
1318 'quantum_port': rdata.get('quantum_port'),
1319 'quantum_url': rdata.get('quantum_url'),
1320 'region': rdata.get('region'),
1321 'service_protocol':
1322 rdata.get('service_protocol') or 'http',
1323 'auth_protocol':
1324 rdata.get('auth_protocol') or 'http',
1325 }
1326 if context_complete(ctxt):
1327 return ctxt
1328 return {}
13290
=== removed directory 'hooks/charmhelpers/contrib/openstack/files'
=== removed file 'hooks/charmhelpers/contrib/openstack/files/__init__.py'
--- hooks/charmhelpers/contrib/openstack/files/__init__.py 2015-02-19 05:17:57 +0000
+++ hooks/charmhelpers/contrib/openstack/files/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,18 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17# dummy __init__.py to fool syncer into thinking this is a syncable python
18# module
190
=== removed file 'hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh'
--- hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 2015-02-25 23:34:09 +0000
+++ hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 1970-01-01 00:00:00 +0000
@@ -1,32 +0,0 @@
1#!/bin/bash
2#--------------------------------------------
3# This file is managed by Juju
4#--------------------------------------------
5#
6# Copyright 2009,2012 Canonical Ltd.
7# Author: Tom Haddon
8
9CRITICAL=0
10NOTACTIVE=''
11LOGFILE=/var/log/nagios/check_haproxy.log
12AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
13
14for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
15do
16 output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
17 if [ $? != 0 ]; then
18 date >> $LOGFILE
19 echo $output >> $LOGFILE
20 /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
21 CRITICAL=1
22 NOTACTIVE="${NOTACTIVE} $appserver"
23 fi
24done
25
26if [ $CRITICAL = 1 ]; then
27 echo "CRITICAL:${NOTACTIVE}"
28 exit 2
29fi
30
31echo "OK: All haproxy instances looking good"
32exit 0
330
=== removed file 'hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh'
--- hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh 2015-02-25 23:34:09 +0000
+++ hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh 1970-01-01 00:00:00 +0000
@@ -1,30 +0,0 @@
1#!/bin/bash
2#--------------------------------------------
3# This file is managed by Juju
4#--------------------------------------------
5#
6# Copyright 2009,2012 Canonical Ltd.
7# Author: Tom Haddon
8
9# These should be config options at some stage
10CURRQthrsh=0
11MAXQthrsh=100
12
13AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
14
15HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
16
17for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
18do
19 CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
20 MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
21
22 if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
23 echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
24 exit 2
25 fi
26done
27
28echo "OK: All haproxy queue depths looking good"
29exit 0
30
310
=== removed file 'hooks/charmhelpers/contrib/openstack/ip.py'
--- hooks/charmhelpers/contrib/openstack/ip.py 2015-03-31 15:13:53 +0000
+++ hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
@@ -1,146 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from charmhelpers.core.hookenv import (
18 config,
19 unit_get,
20)
21from charmhelpers.contrib.network.ip import (
22 get_address_in_network,
23 is_address_in_network,
24 is_ipv6,
25 get_ipv6_addr,
26)
27from charmhelpers.contrib.hahelpers.cluster import is_clustered
28
29from functools import partial
30
31PUBLIC = 'public'
32INTERNAL = 'int'
33ADMIN = 'admin'
34
35ADDRESS_MAP = {
36 PUBLIC: {
37 'config': 'os-public-network',
38 'fallback': 'public-address'
39 },
40 INTERNAL: {
41 'config': 'os-internal-network',
42 'fallback': 'private-address'
43 },
44 ADMIN: {
45 'config': 'os-admin-network',
46 'fallback': 'private-address'
47 }
48}
49
50
51def canonical_url(configs, endpoint_type=PUBLIC):
52 """Returns the correct HTTP URL to this host given the state of HTTPS
53 configuration, hacluster and charm configuration.
54
55 :param configs: OSTemplateRenderer config templating object to inspect
56 for a complete https context.
57 :param endpoint_type: str endpoint type to resolve.
58 :param returns: str base URL for services on the current service unit.
59 """
60 scheme = 'http'
61 if 'https' in configs.complete_contexts():
62 scheme = 'https'
63 address = resolve_address(endpoint_type)
64 if is_ipv6(address):
65 address = "[{}]".format(address)
66 return '%s://%s' % (scheme, address)
67
68
69def resolve_address(endpoint_type=PUBLIC):
70 """Return unit address depending on net config.
71
72 If unit is clustered with vip(s) and has net splits defined, return vip on
73 correct network. If clustered with no nets defined, return primary vip.
74
75 If not clustered, return unit address ensuring address is on configured net
76 split if one is configured.
77
78 :param endpoint_type: Network endpoing type
79 """
80 resolved_address = None
81 vips = config('vip')
82 if vips:
83 vips = vips.split()
84
85 net_type = ADDRESS_MAP[endpoint_type]['config']
86 net_addr = config(net_type)
87 net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
88 clustered = is_clustered()
89 if clustered:
90 if not net_addr:
91 # If no net-splits defined, we expect a single vip
92 resolved_address = vips[0]
93 else:
94 for vip in vips:
95 if is_address_in_network(net_addr, vip):
96 resolved_address = vip
97 break
98 else:
99 if config('prefer-ipv6'):
100 fallback_addr = get_ipv6_addr(exc_list=vips)[0]
101 else:
102 fallback_addr = unit_get(net_fallback)
103
104 resolved_address = get_address_in_network(net_addr, fallback_addr)
105
106 if resolved_address is None:
107 raise ValueError("Unable to resolve a suitable IP address based on "
108 "charm state and configuration. (net_type=%s, "
109 "clustered=%s)" % (net_type, clustered))
110
111 return resolved_address
112
113
114def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC,
115 override=None):
116 """Returns the correct endpoint URL to advertise to Keystone.
117
118 This method provides the correct endpoint URL which should be advertised to
119 the keystone charm for endpoint creation. This method allows for the url to
120 be overridden to force a keystone endpoint to have specific URL for any of
121 the defined scopes (admin, internal, public).
122
123 :param configs: OSTemplateRenderer config templating object to inspect
124 for a complete https context.
125 :param url_template: str format string for creating the url template. Only
126 two values will be passed - the scheme+hostname
127 returned by the canonical_url and the port.
128 :param endpoint_type: str endpoint type to resolve.
129 :param override: str the name of the config option which overrides the
130 endpoint URL defined by the charm itself. None will
131 disable any overrides (default).
132 """
133 if override:
134 # Return any user-defined overrides for the keystone endpoint URL.
135 user_value = config(override)
136 if user_value:
137 return user_value.strip()
138
139 return url_template % (canonical_url(configs, endpoint_type), port)
140
141
142public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC)
143
144internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL)
145
146admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN)
1470
=== removed file 'hooks/charmhelpers/contrib/openstack/neutron.py'
--- hooks/charmhelpers/contrib/openstack/neutron.py 2015-04-16 20:07:38 +0000
+++ hooks/charmhelpers/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
@@ -1,322 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17# Various utilies for dealing with Neutron and the renaming from Quantum.
18
19import six
20from subprocess import check_output
21
22from charmhelpers.core.hookenv import (
23 config,
24 log,
25 ERROR,
26)
27
28from charmhelpers.contrib.openstack.utils import os_release
29
30
31def headers_package():
32 """Ensures correct linux-headers for running kernel are installed,
33 for building DKMS package"""
34 kver = check_output(['uname', '-r']).decode('UTF-8').strip()
35 return 'linux-headers-%s' % kver
36
37QUANTUM_CONF_DIR = '/etc/quantum'
38
39
40def kernel_version():
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches