Merge lp:~corey.bryant/charms/trusty/quantum-gateway/end-of-life into lp:~openstack-charmers/charms/trusty/quantum-gateway/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 114
Proposed branch: lp:~corey.bryant/charms/trusty/quantum-gateway/end-of-life
Merge into: lp:~openstack-charmers/charms/trusty/quantum-gateway/next
Diff against target: 15436 lines (+344/-14080)
138 files modified
.bzrignore (+0/-1)
.coveragerc (+0/-6)
.project (+0/-17)
.pydevproject (+0/-9)
Makefile (+1/-13)
README.md (+1/-207)
actions.yaml (+0/-2)
actions/git_reinstall.py (+0/-45)
charm-helpers-hooks.yaml (+0/-9)
charm-helpers-tests.yaml (+0/-5)
files/NeutronAgentMon (+0/-155)
files/neutron-ha-monitor.conf (+0/-4)
files/neutron-ha-monitor.py (+0/-436)
hooks/charmhelpers/contrib/__init__.py (+0/-15)
hooks/charmhelpers/contrib/charmsupport/__init__.py (+0/-15)
hooks/charmhelpers/contrib/charmsupport/nrpe.py (+0/-360)
hooks/charmhelpers/contrib/charmsupport/volumes.py (+0/-175)
hooks/charmhelpers/contrib/hahelpers/__init__.py (+0/-15)
hooks/charmhelpers/contrib/hahelpers/apache.py (+0/-82)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+0/-272)
hooks/charmhelpers/contrib/network/__init__.py (+0/-15)
hooks/charmhelpers/contrib/network/ip.py (+0/-450)
hooks/charmhelpers/contrib/network/ovs/__init__.py (+0/-96)
hooks/charmhelpers/contrib/network/ufw.py (+0/-276)
hooks/charmhelpers/contrib/openstack/__init__.py (+0/-15)
hooks/charmhelpers/contrib/openstack/alternatives.py (+0/-33)
hooks/charmhelpers/contrib/openstack/amulet/__init__.py (+0/-15)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+0/-146)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+0/-294)
hooks/charmhelpers/contrib/openstack/context.py (+0/-1328)
hooks/charmhelpers/contrib/openstack/files/__init__.py (+0/-18)
hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh (+0/-32)
hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh (+0/-30)
hooks/charmhelpers/contrib/openstack/ip.py (+0/-146)
hooks/charmhelpers/contrib/openstack/neutron.py (+0/-322)
hooks/charmhelpers/contrib/openstack/templates/__init__.py (+0/-18)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+0/-15)
hooks/charmhelpers/contrib/openstack/templates/git.upstart (+0/-17)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+0/-58)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+0/-24)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+0/-24)
hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken (+0/-9)
hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo (+0/-22)
hooks/charmhelpers/contrib/openstack/templates/section-zeromq (+0/-14)
hooks/charmhelpers/contrib/openstack/templating.py (+0/-295)
hooks/charmhelpers/contrib/openstack/utils.py (+0/-642)
hooks/charmhelpers/contrib/python/__init__.py (+0/-15)
hooks/charmhelpers/contrib/python/debug.py (+0/-56)
hooks/charmhelpers/contrib/python/packages.py (+0/-96)
hooks/charmhelpers/contrib/python/rpdb.py (+0/-58)
hooks/charmhelpers/contrib/python/version.py (+0/-34)
hooks/charmhelpers/contrib/storage/__init__.py (+0/-15)
hooks/charmhelpers/contrib/storage/linux/__init__.py (+0/-15)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+0/-444)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+0/-78)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+0/-105)
hooks/charmhelpers/contrib/storage/linux/utils.py (+0/-70)
hooks/charmhelpers/core/hookenv.py (+232/-38)
hooks/charmhelpers/core/host.py (+56/-12)
hooks/charmhelpers/core/services/base.py (+43/-19)
hooks/charmhelpers/core/services/helpers.py (+2/-2)
hooks/charmhelpers/fetch/__init__.py (+0/-439)
hooks/charmhelpers/fetch/archiveurl.py (+0/-161)
hooks/charmhelpers/fetch/bzrurl.py (+0/-78)
hooks/charmhelpers/fetch/giturl.py (+0/-71)
hooks/charmhelpers/payload/__init__.py (+0/-17)
hooks/charmhelpers/payload/execd.py (+0/-66)
hooks/quantum_contexts.py (+0/-193)
hooks/quantum_hooks.py (+7/-327)
hooks/quantum_utils.py (+0/-1153)
metadata.yaml (+2/-0)
setup.cfg (+0/-5)
templates/ext-port.conf (+0/-16)
templates/folsom/dhcp_agent.ini (+0/-10)
templates/folsom/l3_agent.ini (+0/-8)
templates/folsom/metadata_agent.ini (+0/-12)
templates/folsom/nova.conf (+0/-26)
templates/folsom/ovs_quantum_plugin.ini (+0/-8)
templates/folsom/quantum.conf (+0/-15)
templates/git/cron.d/neutron-dhcp-agent-netns-cleanup (+0/-4)
templates/git/cron.d/neutron-l3-agent-netns-cleanup (+0/-4)
templates/git/cron.d/neutron-lbaas-agent-netns-cleanup (+0/-4)
templates/git/neutron_sudoers (+0/-4)
templates/git/upstart/neutron-agent.upstart (+0/-25)
templates/git/upstart/neutron-ovs-cleanup.upstart (+0/-13)
templates/git/upstart/neutron-server.upstart (+0/-22)
templates/grizzly/nova.conf (+0/-22)
templates/grizzly/quantum.conf (+0/-11)
templates/havana/dhcp_agent.ini (+0/-27)
templates/havana/dnsmasq.conf (+0/-3)
templates/havana/fwaas_driver.ini (+0/-7)
templates/havana/l3_agent.ini (+0/-24)
templates/havana/lbaas_agent.ini (+0/-8)
templates/havana/metadata_agent.ini (+0/-18)
templates/havana/metering_agent.ini (+0/-10)
templates/havana/neutron.conf (+0/-16)
templates/havana/nova.conf (+0/-27)
templates/havana/ovs_neutron_plugin.ini (+0/-14)
templates/havana/vpn_agent.ini (+0/-8)
templates/icehouse/metadata_agent.ini (+0/-19)
templates/icehouse/ml2_conf.ini (+0/-35)
templates/icehouse/neutron.conf (+0/-18)
templates/juno/l3_agent.ini (+0/-25)
templates/juno/ml2_conf.ini (+0/-36)
templates/kilo/fwaas_driver.ini (+0/-8)
templates/kilo/lbaas_agent.ini (+0/-13)
templates/kilo/neutron.conf (+0/-25)
templates/kilo/nova.conf (+0/-33)
templates/kilo/vpn_agent.ini (+0/-9)
templates/os-charm-phy-nic-mtu.conf (+0/-22)
templates/parts/database (+0/-1)
templates/parts/rabbitmq (+0/-21)
tests/00-setup (+0/-11)
tests/014-basic-precise-icehouse (+0/-11)
tests/015-basic-trusty-icehouse (+0/-9)
tests/016-basic-trusty-juno (+0/-11)
tests/017-basic-trusty-kilo (+0/-11)
tests/018-basic-utopic-juno (+0/-9)
tests/019-basic-vivid-kilo (+0/-9)
tests/050-basic-trusty-icehouse-git (+0/-9)
tests/051-basic-trusty-juno-git (+0/-12)
tests/README (+0/-53)
tests/basic_deployment.py (+0/-656)
tests/charmhelpers/__init__.py (+0/-38)
tests/charmhelpers/contrib/__init__.py (+0/-15)
tests/charmhelpers/contrib/amulet/__init__.py (+0/-15)
tests/charmhelpers/contrib/amulet/deployment.py (+0/-93)
tests/charmhelpers/contrib/amulet/utils.py (+0/-323)
tests/charmhelpers/contrib/openstack/__init__.py (+0/-15)
tests/charmhelpers/contrib/openstack/amulet/__init__.py (+0/-15)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+0/-146)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+0/-294)
unit_tests/__init__.py (+0/-4)
unit_tests/test_actions_git_reinstall.py (+0/-105)
unit_tests/test_quantum_contexts.py (+0/-274)
unit_tests/test_quantum_hooks.py (+0/-370)
unit_tests/test_quantum_utils.py (+0/-1077)
unit_tests/test_utils.py (+0/-100)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/quantum-gateway/end-of-life
Reviewer Review Type Date Requested Status
James Page Approve
Review via email: mp+265035@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Corey Bryant (corey.bryant) wrote :

We need to let users know they shouldn't use the quantum-gateway charm. This patch uses status-set to let users know it's EOL and they should use the neutron-gateway charm instead.

The two options I debated were:
 1) sys.exit(1) in install hook (doesn't allow you to set a status message; could set log message but user has to look in log)
 2) set status to 'blocked' with corresponding message* (doesn't fail the deployment but displays a clear message to 'juju status')

* status-set can only set the state to 'maintenance', 'blocked', 'waiting', or 'active'.

I went with option 2 in this patch, so deployment will look successful and 'juju status' will show the following:

'status-get'
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
  neutron-gateway:
    charm: local:trusty/quantum-gateway-64
    exposed: false
    service-status:
      current: blocked
      message: Charm has reached end-of-life. Please use neutron-gateway charm.
      since: 16 Jul 2015 18:11:14Z
    relations:
      amqp:
      - rabbitmq-server
      cluster:
      - neutron-gateway
      neutron-plugin-api:
      - neutron-api
      quantum-network-service:
      - nova-cloud-controller
      shared-db:
      - mysql
    units:
      neutron-gateway/0:
        workload-status:
          current: blocked
          message: Charm has reached end-of-life. Please use neutron-gateway charm.
          since: 16 Jul 2015 18:11:14Z
        agent-status:
          current: idle
          since: 16 Jul 2015 18:17:59Z
          version: 1.24.2
        agent-state: started
        agent-version: 1.24.2
        machine: "12"
        public-address: 10.5.3.37

Revision history for this message
Corey Bryant (corey.bryant) wrote :

I also stripped out any functionality from the charm.

115. By Corey Bryant

quantum-gateway charm has reached end-of-life

Strip all functionality from charm and issue status message
reporting end-of-life and pointing users to neutron-gateway charm.

Revision history for this message
James Page (james-page) wrote :

Aside from the amendment to the README I think this is OK.

review: Approve
Revision history for this message
Corey Bryant (corey.bryant) wrote :

James, Good point. I responded below. I hit a bug testing the charm upgrade and proposed a fix to charm-helpers.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file '.bzrignore'
2--- .bzrignore 2015-04-10 14:22:04 +0000
3+++ .bzrignore 2015-07-16 19:59:55 +0000
4@@ -1,3 +1,2 @@
5 bin
6-.coverage
7 tags
8
9=== removed file '.coveragerc'
10--- .coveragerc 2013-07-19 09:46:25 +0000
11+++ .coveragerc 1970-01-01 00:00:00 +0000
12@@ -1,6 +0,0 @@
13-[report]
14-# Regexes for lines to exclude from consideration
15-exclude_lines =
16- if __name__ == .__main__.:
17-include=
18- hooks/quantum_*
19
20=== removed file '.project'
21--- .project 2012-12-06 10:22:24 +0000
22+++ .project 1970-01-01 00:00:00 +0000
23@@ -1,17 +0,0 @@
24-<?xml version="1.0" encoding="UTF-8"?>
25-<projectDescription>
26- <name>quantum-gateway</name>
27- <comment></comment>
28- <projects>
29- </projects>
30- <buildSpec>
31- <buildCommand>
32- <name>org.python.pydev.PyDevBuilder</name>
33- <arguments>
34- </arguments>
35- </buildCommand>
36- </buildSpec>
37- <natures>
38- <nature>org.python.pydev.pythonNature</nature>
39- </natures>
40-</projectDescription>
41
42=== removed file '.pydevproject'
43--- .pydevproject 2013-07-19 09:46:25 +0000
44+++ .pydevproject 1970-01-01 00:00:00 +0000
45@@ -1,9 +0,0 @@
46-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
47-<?eclipse-pydev version="1.0"?><pydev_project>
48-<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
49-<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
50-<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
51-<path>/quantum-gateway/hooks</path>
52-<path>/quantum-gateway/unit_tests</path>
53-</pydev_pathproperty>
54-</pydev_project>
55
56=== modified file 'Makefile'
57--- Makefile 2015-04-16 21:32:07 +0000
58+++ Makefile 2015-07-16 19:59:55 +0000
59@@ -2,13 +2,9 @@
60 PYTHON := /usr/bin/env python
61
62 lint:
63- @flake8 --exclude hooks/charmhelpers actions hooks unit_tests tests
64+ @flake8 --exclude hooks/charmhelpers hooks
65 @charm proof
66
67-unit_test:
68- @echo Starting unit tests...
69- @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
70-
71 bin/charm_helpers_sync.py:
72 @mkdir -p bin
73 @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
74@@ -16,14 +12,6 @@
75
76 sync: bin/charm_helpers_sync.py
77 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
78- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
79-
80-test:
81- @echo Starting Amulet tests...
82- # coreycb note: The -v should only be temporary until Amulet sends
83- # raise_status() messages to stderr:
84- # https://bugs.launchpad.net/amulet/+bug/1320357
85- @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700
86
87 publish: lint unit_test
88 bzr push lp:charms/quantum-gateway
89
90=== modified file 'README.md'
91--- README.md 2015-05-01 10:33:20 +0000
92+++ README.md 2015-07-16 19:59:55 +0000
93@@ -1,210 +1,4 @@
94 Overview
95 --------
96
97-Neutron provides flexible software defined networking (SDN) for OpenStack.
98-
99-This charm is designed to be used in conjunction with the rest of the OpenStack
100-related charms in the charm store) to virtualized the network that Nova Compute
101-instances plug into.
102-
103-Its designed as a replacement for nova-network; however it does not yet
104-support all of the features as nova-network (such as multihost) so may not
105-be suitable for all.
106-
107-Neutron supports a rich plugin/extension framework for propriety networking
108-solutions and supports (in core) Nicira NVP, NEC, Cisco and others...
109-
110-The Openstack charms currently only support the fully free OpenvSwitch plugin
111-and implements the 'Provider Router with Private Networks' use case.
112-
113-See the upstream [Neutron documentation](http://docs.openstack.org/trunk/openstack-network/admin/content/use_cases_single_router.html)
114-for more details.
115-
116-
117-Usage
118------
119-
120-In order to use Neutron with Openstack, you will need to deploy the
121-nova-compute and nova-cloud-controller charms with the network-manager
122-configuration set to 'Neutron':
123-
124- nova-cloud-controller:
125- network-manager: Neutron
126-
127-This decision must be made prior to deploying Openstack with Juju as
128-Neutron is deployed baked into these charms from install onwards:
129-
130- juju deploy nova-compute
131- juju deploy --config config.yaml nova-cloud-controller
132- juju add-relation nova-compute nova-cloud-controller
133-
134-The Neutron Gateway can then be added to the deploying:
135-
136- juju deploy quantum-gateway
137- juju add-relation quantum-gateway mysql
138- juju add-relation quantum-gateway rabbitmq-server
139- juju add-relation quantum-gateway nova-cloud-controller
140-
141-The gateway provides two key services; L3 network routing and DHCP services.
142-
143-These are both required in a fully functional Neutron Openstack deployment.
144-
145-See upstream [Neutron multi extnet](http://docs.openstack.org/trunk/config-reference/content/adv_cfg_l3_agent_multi_extnet.html)
146-
147-Configuration Options
148----------------------
149-
150-External Port Configuration
151-===========================
152-
153-If the port to be used for external traffic is consistent accross all physical
154-servers then is can be specified by simply setting ext-port to the nic id:
155-
156- quantum-gateway:
157- ext-port: eth2
158-
159-However, if it varies between hosts then the mac addresses of the external
160-nics for each host can be passed as a space seperated list:
161-
162- quantum-gateway:
163- ext-port: <MAC ext port host 1> <MAC ext port host 2> <MAC ext port host 3>
164-
165-
166-Multiple Floating Pools
167-=======================
168-
169-If multiple floating pools are needed then an L3 agent (which corresponds to
170-a quantum-gateway for the sake of this charm) is needed for each one. Each
171-gateway needs to be deployed as a seperate service so that the external
172-network id can be set differently for each gateway e.g.
173-
174- juju deploy quantum-gateway quantum-gateway-extnet1
175- juju add-relation quantum-gateway-extnet1 mysql
176- juju add-relation quantum-gateway-extnet1 rabbitmq-server
177- juju add-relation quantum-gateway-extnet1 nova-cloud-controller
178- juju deploy quantum-gateway quantum-gateway-extnet2
179- juju add-relation quantum-gateway-extnet2 mysql
180- juju add-relation quantum-gateway-extnet2 rabbitmq-server
181- juju add-relation quantum-gateway-extnet2 nova-cloud-controller
182-
183- Create extnet1 and extnet2 via neutron client and take a note of their ids
184-
185- juju set quantum-gateway-extnet1 "run-internal-router=leader"
186- juju set quantum-gateway-extnet2 "run-internal-router=none"
187- juju set quantum-gateway-extnet1 "external-network-id=<extnet1 id>"
188- juju set quantum-gateway-extnet2 "external-network-id=<extnet2 id>"
189-
190-Instance MTU
191-============
192-
193-When using Open vSwitch plugin with GRE tunnels default MTU of 1500 can cause
194-packet fragmentation due to GRE overhead. One solution is to increase the MTU on
195-physical hosts and network equipment. When this is not possible or practical the
196-charm's instance-mtu option can be used to reduce instance MTU via DHCP.
197-
198- juju set quantum-gateway instance-mtu=1400
199-
200-OpenStack upstream documentation recomments a MTU value of 1400:
201-[Openstack documentation](http://docs.openstack.org/admin-guide-cloud/content/openvswitch_plugin.html)
202-
203-Note that this option was added in Havana and will be ignored in older releases.
204-
205-Deploying from source
206-=====================
207-
208-The minimum openstack-origin-git config required to deploy from source is:
209-
210- openstack-origin-git: include-file://neutron-juno.yaml
211-
212- neutron-juno.yaml
213- -----------------
214- repositories:
215- - {name: requirements,
216- repository: 'git://github.com/openstack/requirements',
217- branch: stable/juno}
218- - {name: neutron,
219- repository: 'git://github.com/openstack/neutron',
220- branch: stable/juno}
221-
222-Note that there are only two 'name' values the charm knows about: 'requirements'
223-and 'neutron'. These repositories must correspond to these 'name' values.
224-Additionally, the requirements repository must be specified first and the
225-neutron repository must be specified last. All other repostories are installed
226-in the order in which they are specified.
227-
228-The following is a full list of current tip repos (may not be up-to-date):
229-
230- openstack-origin-git: include-file://neutron-master.yaml
231-
232- neutron-master.yaml
233- -------------------
234- repositories:
235- - {name: requirements,
236- repository: 'git://github.com/openstack/requirements',
237- branch: master}
238- - {name: oslo-concurrency,
239- repository: 'git://github.com/openstack/oslo.concurrency',
240- branch: master}
241- - {name: oslo-config,
242- repository: 'git://github.com/openstack/oslo.config',
243- branch: master}
244- - {name: oslo-context,
245- repository: 'git://github.com/openstack/oslo.context',
246- branch: master}
247- - {name: oslo-db,
248- repository: 'git://github.com/openstack/oslo.db',
249- branch: master}
250- - {name: oslo-i18n,
251- repository: 'git://github.com/openstack/oslo.i18n',
252- branch: master}
253- - {name: oslo-messaging,
254- repository: 'git://github.com/openstack/oslo.messaging',
255- branch: master}
256- - {name: oslo-middleware,
257- repository': 'git://github.com/openstack/oslo.middleware',
258- branch: master}
259- - {name: oslo-rootwrap',
260- repository: 'git://github.com/openstack/oslo.rootwrap',
261- branch: master}
262- - {name: oslo-serialization,
263- repository: 'git://github.com/openstack/oslo.serialization',
264- branch: master}
265- - {name: oslo-utils,
266- repository: 'git://github.com/openstack/oslo.utils',
267- branch: master}
268- - {name: pbr,
269- repository: 'git://github.com/openstack-dev/pbr',
270- branch: master}
271- - {name: stevedore,
272- repository: 'git://github.com/openstack/stevedore',
273- branch: 'master'}
274- - {name: python-keystoneclient,
275- repository: 'git://github.com/openstack/python-keystoneclient',
276- branch: master}
277- - {name: python-neutronclient,
278- repository: 'git://github.com/openstack/python-neutronclient',
279- branch: master}
280- - {name: python-novaclient,
281- repository': 'git://github.com/openstack/python-novaclient',
282- branch: master}
283- - {name: keystonemiddleware,
284- repository: 'git://github.com/openstack/keystonemiddleware',
285- branch: master}
286- - {name: neutron-fwaas,
287- repository': 'git://github.com/openstack/neutron-fwaas',
288- branch: master}
289- - {name: neutron-lbaas,
290- repository: 'git://github.com/openstack/neutron-lbaas',
291- branch: master}
292- - {name: neutron-vpnaas,
293- repository: 'git://github.com/openstack/neutron-vpnaas',
294- branch: master}
295- - {name: neutron,
296- repository: 'git://github.com/openstack/neutron',
297- branch: master}
298-
299-TODO
300-----
301-
302- * Provide more network configuration use cases.
303- * Support VLAN in addition to GRE+OpenFlow for L2 separation.
304+This charm has reached end-of-life. Please use the neutron-gateway charm.
305
306=== removed directory 'actions'
307=== removed file 'actions.yaml'
308--- actions.yaml 2015-04-10 14:22:04 +0000
309+++ actions.yaml 1970-01-01 00:00:00 +0000
310@@ -1,2 +0,0 @@
311-git-reinstall:
312- description: Reinstall quantum-gateway from the openstack-origin-git repositories.
313
314=== removed symlink 'actions/git-reinstall'
315=== target was u'git_reinstall.py'
316=== removed file 'actions/git_reinstall.py'
317--- actions/git_reinstall.py 2015-04-15 16:46:22 +0000
318+++ actions/git_reinstall.py 1970-01-01 00:00:00 +0000
319@@ -1,45 +0,0 @@
320-#!/usr/bin/python
321-import sys
322-import traceback
323-
324-sys.path.append('hooks/')
325-
326-from charmhelpers.contrib.openstack.utils import (
327- git_install_requested,
328-)
329-
330-from charmhelpers.core.hookenv import (
331- action_set,
332- action_fail,
333- config,
334-)
335-
336-from quantum_utils import (
337- git_install,
338-)
339-
340-from quantum_hooks import (
341- config_changed,
342-)
343-
344-
345-def git_reinstall():
346- """Reinstall from source and restart services.
347-
348- If the openstack-origin-git config option was used to install openstack
349- from source git repositories, then this action can be used to reinstall
350- from updated git repositories, followed by a restart of services."""
351- if not git_install_requested():
352- action_fail('openstack-origin-git is not configured')
353- return
354-
355- try:
356- git_install(config('openstack-origin-git'))
357- config_changed()
358- except:
359- action_set({'traceback': traceback.format_exc()})
360- action_fail('git-reinstall resulted in an unexpected error')
361-
362-
363-if __name__ == '__main__':
364- git_reinstall()
365
366=== modified file 'charm-helpers-hooks.yaml'
367--- charm-helpers-hooks.yaml 2015-05-02 22:35:06 +0000
368+++ charm-helpers-hooks.yaml 2015-07-16 19:59:55 +0000
369@@ -2,12 +2,3 @@
370 destination: hooks/charmhelpers
371 include:
372 - core
373- - fetch
374- - contrib.openstack|inc=*
375- - contrib.hahelpers
376- - contrib.network
377- - contrib.python.packages
378- - contrib.storage.linux
379- - contrib.python
380- - payload.execd
381- - contrib.charmsupport
382
383=== removed file 'charm-helpers-tests.yaml'
384--- charm-helpers-tests.yaml 2015-05-02 22:35:06 +0000
385+++ charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
386@@ -1,5 +0,0 @@
387-branch: lp:charm-helpers
388-destination: tests/charmhelpers
389-include:
390- - contrib.amulet
391- - contrib.openstack.amulet
392
393=== removed directory 'files'
394=== removed file 'files/NeutronAgentMon'
395--- files/NeutronAgentMon 2015-01-15 10:00:38 +0000
396+++ files/NeutronAgentMon 1970-01-01 00:00:00 +0000
397@@ -1,155 +0,0 @@
398-#!/bin/sh
399-#
400-#
401-# NeutronAgentMon OCF RA.
402-# Starts crm_mon in background which logs cluster status as
403-# html to the specified file.
404-#
405-# Copyright 2014 Canonical Ltd.
406-#
407-# Authors: Hui Xiang <hui.xiang@canonical.com>
408-# Edward Hope-Morley <edward.hope-morley@canonical.com>
409-#
410-# OCF instance parameters:
411-# OCF_RESKEY_file
412-
413-#######################################################################
414-# Initialization:
415-: ${OCF_FUNCTIONS=${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs}
416-. ${OCF_FUNCTIONS}
417-: ${__OCF_ACTION=$1}
418-
419-#######################################################################
420-
421-meta_data() {
422- cat <<END
423-<?xml version="1.0"?>
424-<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
425-<resource-agent name="NeutronAgentMon">
426-<version>1.0</version>
427-
428-<longdesc lang="en">
429-This is a NeutronAgentMon Resource Agent.
430-It monitors the 'neutron-ha-monitor daemon' status.
431-</longdesc>
432-<shortdesc lang="en">Monitor '/usr/local/bin/neutron-ha-monitor.py' in the background.</shortdesc>
433-
434-<parameters>
435-
436-<parameter name="file" unique="0">
437-<longdesc lang="en">
438-The file we want to run as a daemon.
439-</longdesc>
440-<shortdesc lang="en">The file we want to run as a daemon.</shortdesc>
441-<content type="string" default="/usr/local/bin/neutron-ha-monitor.py" />
442-</parameter>
443-
444-</parameters>
445-
446-<actions>
447-<action name="start" timeout="20" />
448-<action name="stop" timeout="20" />
449-<action name="monitor" depth="0" timeout="20" interval="60" />
450-<action name="meta-data" timeout="5" />
451-<action name="validate-all" timeout="30" />
452-</actions>
453-</resource-agent>
454-END
455-}
456-
457-#######################################################################
458-
459-NeutronAgentMon_usage() {
460- cat <<END
461-usage: $0 {start|stop|monitor|validate-all|meta-data}
462-
463-Expects to have a fully populated OCF RA-compliant environment set.
464-END
465-}
466-
467-NeutronAgentMon_exit() {
468- if [ $1 != 0 ]; then
469- exit $OCF_ERR_GENERIC
470- else
471- exit $OCF_SUCCESS
472- fi
473-}
474-
475-NeutronAgentMon_start() {
476- pid=`sudo ps -aux | grep neutron-ha-m\[o\]nitor.py | awk -F' ' '{print $2}'`
477- if [ -z $pid ]; then
478- ocf_log info "[NeutronAgentMon_start] Start Monitor daemon."
479- sudo mkdir -p /var/log/neutron-ha
480- sudo python /usr/local/bin/neutron-ha-monitor.py \
481- --config-file /var/lib/juju-neutron-ha/neutron-ha-monitor.conf \
482- --log-file /var/log/neutron-ha/monitor.log >> /dev/null 2>&1 & echo $!
483- sleep 5
484- else
485- ocf_log warn "[NeutronAgentMon_start] Monitor daemon already running."
486- fi
487- NeutronAgentMon_exit $?
488-}
489-
490-NeutronAgentMon_stop() {
491- pid=`sudo ps -aux | grep neutron-ha-m\[o\]nitor.py | awk -F' ' '{print $2}'`
492- if [ ! -z $pid ]; then
493- sudo kill -s 9 $pid
494- ocf_log info "[NeutronAgentMon_stop] Pid $pid is killed."
495- else
496- ocf_log warn "[NeutronAgentMon_stop] Monitor daemon already stopped."
497- fi
498- NeutronAgentMon_exit 0
499-}
500-
501-NeutronAgentMon_monitor() {
502- pid=`sudo ps -aux | grep neutron-ha-m\[o\]nitor.py | awk -F' ' '{print $2}'`
503- if [ ! -z $pid ]; then
504- ocf_log info "[NeutronAgentMon_monitor] success."
505- exit $OCF_SUCCESS
506- fi
507- exit $OCF_NOT_RUNNING
508-}
509-
510-NeutronAgentMon_validate() {
511-# Existence of the user
512- if [ -f $OCF_RESKEY_file ]; then
513- echo "Validate OK"
514- return $OCF_SUCCESS
515- else
516- ocf_log err "The file $OCF_RESKEY_file does not exist!"
517- exit $OCF_ERR_ARGS
518- fi
519-}
520-
521-if [ $# -ne 1 ]; then
522- NeutronAgentMon_usage
523- exit $OCF_ERR_ARGS
524-fi
525-
526-: ${OCF_RESKEY_update:="15000"}
527-: ${OCF_RESKEY_pidfile:="/tmp/NeutronAgentMon_${OCF_RESOURCE_INSTANCE}.pid"}
528-: ${OCF_RESKEY_htmlfile:="/tmp/NeutronAgentMon_${OCF_RESOURCE_INSTANCE}.html"}
529-
530-OCF_RESKEY_update=`expr $OCF_RESKEY_update / 1000`
531-
532-case $__OCF_ACTION in
533-meta-data) meta_data
534- exit $OCF_SUCCESS
535- ;;
536-start) NeutronAgentMon_start
537- ;;
538-stop) NeutronAgentMon_stop
539- ;;
540-monitor) NeutronAgentMon_monitor
541- ;;
542-validate-all) NeutronAgentMon_validate
543- ;;
544-usage|help) NeutronAgentMon_usage
545- exit $OCF_SUCCESS
546- ;;
547-*) NeutronAgentMon_usage
548- exit $OCF_ERR_UNIMPLEMENTED
549- ;;
550-esac
551-
552-exit $?
553
554=== removed file 'files/neutron-ha-monitor.conf'
555--- files/neutron-ha-monitor.conf 2015-01-15 10:00:38 +0000
556+++ files/neutron-ha-monitor.conf 1970-01-01 00:00:00 +0000
557@@ -1,4 +0,0 @@
558-[DEFAULT]
559-verbose=True
560-#debug=True
561-check_interval=8
562
563=== removed file 'files/neutron-ha-monitor.py'
564--- files/neutron-ha-monitor.py 2015-01-19 10:43:09 +0000
565+++ files/neutron-ha-monitor.py 1970-01-01 00:00:00 +0000
566@@ -1,436 +0,0 @@
567-# Copyright 2014 Canonical Ltd.
568-#
569-# Authors: Hui Xiang <hui.xiang@canonical.com>
570-# Joshua Zhang <joshua.zhang@canonical.com>
571-# Edward Hope-Morley <edward.hope-morley@canonical.com>
572-#
573-
574-"""
575-Helpers for monitoring Neutron agents, reschedule failed agents,
576-cleaned resources on failed nodes.
577-"""
578-
579-import os
580-import re
581-import sys
582-import signal
583-import socket
584-import subprocess
585-import time
586-
587-from oslo.config import cfg
588-from neutron.agent.linux import ovs_lib
589-from neutron.agent.linux import ip_lib
590-from neutron.common import exceptions
591-from neutron.openstack.common import log as logging
592-
593-LOG = logging.getLogger(__name__)
594-
595-
596-class Daemon(object):
597- """A generic daemon class.
598-
599- Usage: subclass the Daemon class and override the run() method
600- """
601- def __init__(self, stdin='/dev/null', stdout='/dev/null',
602- stderr='/dev/null', procname='python'):
603- self.stdin = stdin
604- self.stdout = stdout
605- self.stderr = stderr
606- self.procname = procname
607-
608- def _fork(self):
609- try:
610- pid = os.fork()
611- if pid > 0:
612- sys.exit(0)
613- except OSError:
614- LOG.exception('Fork failed')
615- sys.exit(1)
616-
617- def daemonize(self):
618- """Daemonize process by doing Stevens double fork."""
619- # fork first time
620- self._fork()
621-
622- # decouple from parent environment
623- os.chdir("/")
624- os.setsid()
625- os.umask(0)
626- # fork second time
627- self._fork()
628-
629- # redirect standard file descriptors
630- sys.stdout.flush()
631- sys.stderr.flush()
632- stdin = open(self.stdin, 'r')
633- stdout = open(self.stdout, 'a+')
634- stderr = open(self.stderr, 'a+', 0)
635- os.dup2(stdin.fileno(), sys.stdin.fileno())
636- os.dup2(stdout.fileno(), sys.stdout.fileno())
637- os.dup2(stderr.fileno(), sys.stderr.fileno())
638-
639- signal.signal(signal.SIGTERM, self.handle_sigterm)
640-
641- def handle_sigterm(self, signum, frame):
642- sys.exit(0)
643-
644- def start(self):
645- """Start the daemon."""
646- self.daemonize()
647- self.run()
648-
649- def run(self):
650- """Override this method when subclassing Daemon.
651-
652- start() will call this method after the process has daemonized.
653- """
654- pass
655-
656-
657-class MonitorNeutronAgentsDaemon(Daemon):
658- def __init__(self):
659- super(MonitorNeutronAgentsDaemon, self).__init__()
660- logging.setup('Neuron-HA-Monitor')
661- LOG.info('Monitor Neutron Agent Loop Init')
662- self.hostname = None
663- self.env = {}
664-
665- def get_env(self):
666- envrc_f = '/etc/legacy_ha_envrc'
667- envrc_f_m = False
668- if os.path.isfile(envrc_f):
669- ctime = time.ctime(os.stat(envrc_f).st_ctime)
670- mtime = time.ctime(os.stat(envrc_f).st_mtime)
671- if ctime != mtime:
672- envrc_f_m = True
673-
674- if not self.env or envrc_f_m:
675- with open(envrc_f, 'r') as f:
676- for line in f:
677- data = line.strip().split('=')
678- if data and data[0] and data[1]:
679- self.env[data[0]] = data[1]
680- else:
681- raise Exception("OpenStack env data uncomplete.")
682- return self.env
683-
684- def get_hostname(self):
685- if not self.hostname:
686- self.hostname = socket.gethostname()
687- return self.hostname
688-
689- def get_root_helper(self):
690- return 'sudo'
691-
692- def list_monitor_res(self):
693- # List crm resource 'cl_monitor' running node
694- nodes = []
695- cmd = ['crm', 'resource', 'show', 'cl_monitor']
696- output = subprocess.check_output(cmd)
697- pattern = re.compile('resource cl_monitor is running on: (.*) ')
698- nodes = pattern.findall(output)
699- return nodes
700-
701- def get_crm_res_lead_node(self):
702- nodes = self.list_monitor_res()
703- if nodes:
704- return nodes[0].strip()
705- else:
706- LOG.error('Failed to get crm resource.')
707- return None
708-
709- def unplug_device(self, device):
710- try:
711- device.link.delete()
712- except RuntimeError:
713- root_helper = self.get_root_helper()
714- # Maybe the device is OVS port, so try to delete
715- bridge_name = ovs_lib.get_bridge_for_iface(root_helper,
716- device.name)
717- if bridge_name:
718- bridge = ovs_lib.OVSBridge(bridge_name, root_helper)
719- bridge.delete_port(device.name)
720- else:
721- LOG.debug('Unable to find bridge for device: %s', device.name)
722-
723- def get_pattern(self, key, text):
724- if not key or not text:
725- LOG.debug('Invalid key(%s) or text(%s)' % (key, text))
726- return None
727-
728- pattern = re.compile('%s' % key)
729- result = pattern.findall(text)
730- return result
731-
732- def _cleanup(self, key1, key2):
733- namespaces = []
734- if key1:
735- for k in key1.iterkeys():
736- namespaces.append(key2 + '-' + k)
737- else:
738- try:
739- cmd = ['sudo', 'ip', 'netns']
740- ns = subprocess.check_output(cmd)
741- namespaces = self.get_pattern('(%s.*)' % key2, ns)
742- except RuntimeError as e:
743- LOG.error('Failed to list namespace, (%s)' % e)
744-
745- if namespaces:
746- LOG.info('Namespaces: %s is going to be deleted.' % namespaces)
747- self.destroy_namespaces(namespaces)
748-
749- def cleanup_dhcp(self, networks):
750- self._cleanup(networks, 'qdhcp')
751-
752- def cleanup_router(self, routers):
753- self._cleanup(routers, 'qrouter')
754-
755- def destroy_namespaces(self, namespaces):
756- try:
757- root_helper = self.get_root_helper()
758- for namespace in namespaces:
759- ip = ip_lib.IPWrapper(root_helper, namespace)
760- if ip.netns.exists(namespace):
761- for device in ip.get_devices(exclude_loopback=True):
762- self.unplug_device(device)
763-
764- ip.garbage_collect_namespace()
765- except Exception:
766- LOG.exception('Error unable to destroy namespace: %s', namespace)
767-
768- def is_same_host(self, host):
769- return str(host).strip() == self.get_hostname()
770-
771- def validate_reschedule(self):
772- crm_no_1_node = self.get_crm_res_lead_node()
773- if not crm_no_1_node:
774- LOG.error('No crm first node could be found.')
775- return False
776-
777- if not self.is_same_host(crm_no_1_node):
778- LOG.warn('Only the first crm node %s could reschedule. '
779- % crm_no_1_node)
780- return False
781- return True
782-
783- def l3_agents_reschedule(self, l3_agents, routers, quantum):
784- if not self.validate_reschedule():
785- return
786-
787- index = 0
788- for router_id in routers:
789- agent = index % len(l3_agents)
790- LOG.info('Moving router %s from %s to %s' %
791- (router_id, routers[router_id], l3_agents[agent]))
792- try:
793- quantum.remove_router_from_l3_agent(l3_agent=routers[router_id],
794- router_id=router_id)
795- except exceptions.NeutronException as e:
796- LOG.error('Remove router raised exception: %s' % e)
797- try:
798- quantum.add_router_to_l3_agent(l3_agent=l3_agents[agent],
799- body={'router_id': router_id})
800- except exceptions.NeutronException as e:
801- LOG.error('Add router raised exception: %s' % e)
802- index += 1
803-
804- def dhcp_agents_reschedule(self, dhcp_agents, networks, quantum):
805- if not self.validate_reschedule():
806- return
807-
808- index = 0
809- for network_id in networks:
810- agent = index % len(dhcp_agents)
811- LOG.info('Moving network %s from %s to %s' % (network_id,
812- networks[network_id], dhcp_agents[agent]))
813- try:
814- quantum.remove_network_from_dhcp_agent(
815- dhcp_agent=networks[network_id], network_id=network_id)
816- except exceptions.NeutronException as e:
817- LOG.error('Remove network raised exception: %s' % e)
818- try:
819- quantum.add_network_to_dhcp_agent(
820- dhcp_agent=dhcp_agents[agent],
821- body={'network_id': network_id})
822- except exceptions.NeutronException as e:
823- LOG.error('Add network raised exception: %s' % e)
824- index += 1
825-
826- def get_quantum_client(self):
827- env = self.get_env()
828- if not env:
829- LOG.info('Unable to re-assign resources at this time')
830- return None
831-
832- try:
833- from quantumclient.v2_0 import client
834- except ImportError:
835- # Try to import neutronclient instead for havana+
836- from neutronclient.v2_0 import client
837-
838- auth_url = '%(auth_protocol)s://%(keystone_host)s:%(auth_port)s/v2.0' \
839- % env
840- quantum = client.Client(username=env['service_username'],
841- password=env['service_password'],
842- tenant_name=env['service_tenant'],
843- auth_url=auth_url,
844- region_name=env['region'])
845- return quantum
846-
847- def reassign_agent_resources(self, quantum=None):
848- """Use agent scheduler API to detect down agents and re-schedule"""
849- if not quantum:
850- LOG.error('Failed to get quantum client.')
851- return
852-
853- try:
854- DHCP_AGENT = "DHCP Agent"
855- L3_AGENT = "L3 Agent"
856- agents = quantum.list_agents(agent_type=DHCP_AGENT)
857- except exceptions.NeutronException as e:
858- LOG.error('Failed to get quantum agents, %s' % e)
859- return
860-
861- dhcp_agents = []
862- l3_agents = []
863- networks = {}
864- for agent in agents['agents']:
865- hosted_networks = quantum.list_networks_on_dhcp_agent(
866- agent['id'])['networks']
867- if not agent['alive']:
868- LOG.info('DHCP Agent %s down' % agent['id'])
869- for network in hosted_networks:
870- networks[network['id']] = agent['id']
871- if self.is_same_host(agent['host']):
872- self.cleanup_dhcp(networks)
873- else:
874- dhcp_agents.append(agent['id'])
875- LOG.info('Active dhcp agents: %s' % agent['id'])
876- if not hosted_networks and self.is_same_host(agent['host']):
877- self.cleanup_dhcp(None)
878-
879- agents = quantum.list_agents(agent_type=L3_AGENT)
880- routers = {}
881- for agent in agents['agents']:
882- hosted_routers = quantum.list_routers_on_l3_agent(
883- agent['id'])['routers']
884- if not agent['alive']:
885- LOG.info('L3 Agent %s down' % agent['id'])
886- for router in hosted_routers:
887- routers[router['id']] = agent['id']
888- if self.is_same_host(agent['host']):
889- self.cleanup_router(routers)
890- else:
891- l3_agents.append(agent['id'])
892- LOG.info('Active l3 agents: %s' % agent['id'])
893- if not hosted_routers and self.is_same_host(agent['host']):
894- self.cleanup_router(None)
895-
896- if not networks and not routers:
897- LOG.info('No networks and routers hosted on failed agents.')
898- return
899-
900- if len(dhcp_agents) == 0 and len(l3_agents) == 0:
901- LOG.error('Unable to relocate resources, there are %s dhcp_agents '
902- 'and %s l3_agents in this cluster' % (len(dhcp_agents),
903- len(l3_agents)))
904- return
905-
906- if len(l3_agents) > 0:
907- self.l3_agents_reschedule(l3_agents, routers, quantum)
908- # new l3 node will not create a tunnel if don't restart ovs process
909-
910- if len(dhcp_agents) > 0:
911- self.dhcp_agents_reschedule(dhcp_agents, networks, quantum)
912-
913-
914- def check_ovs_tunnel(self, quantum=None):
915- '''
916- Work around for Bug #1411163
917- No fdb entries added when failover dhcp and l3 agent together.
918- '''
919- if not quantum:
920- LOG.error('Failed to get quantum client.')
921- return
922-
923- try:
924- OVS_AGENT = 'Open vSwitch agent'
925- agents = quantum.list_agents(agent_type=OVS_AGENT)
926- except exceptions.NeutronException as e:
927- LOG.error('No ovs agent found on localhost, error:%s.' % e)
928- return
929-
930- for agent in agents['agents']:
931- if self.is_same_host(agent['host']) and agent['alive']:
932- conf = agent['configurations']
933- if 'gre' in conf['tunnel_types'] and conf['l2_population'] \
934- and conf['devices']:
935- LOG.debug('local ovs agent:%s' % agent)
936- ovs_output = subprocess.check_output(['ovs-vsctl',
937- 'list-ports', 'br-tun'])
938- ports = ovs_output.strip().split('\n')
939- look_up_gre_port = False
940- for port in ports:
941- if port.startswith('gre-'):
942- look_up_gre_port = True
943- break
944- if not look_up_gre_port:
945- try:
946- LOG.error('Local agent has devices, but no ovs tunnel is created,'
947- 'restart ovs agent.')
948- cmd = ['sudo', 'service', 'neutron-plugin-openvswitch-agent',
949- 'restart']
950- subprocess.call(cmd)
951- except subprocess.CalledProcessError:
952- LOG.error('Failed to restart neutron-plugin-openvswitch-agent.')
953-
954- def check_local_agents(self):
955- services = ['openvswitch-switch', 'neutron-dhcp-agent',
956- 'neutron-metadata-agent', 'neutron-vpn-agent']
957- for s in services:
958- status = ['sudo', 'service', s, 'status']
959- restart = ['sudo', 'service', s, 'restart']
960- start = ['sudo', 'service', s, 'start']
961- stop = '%s stop/waiting' % s
962- try:
963- output = subprocess.check_output(status)
964- if output.strip() == stop:
965- subprocess.check_output(start)
966- LOG.error('Restart service: %s' % s)
967- if s == 'neutron-metadata-agent':
968- subprocess.check_output(['sudo', 'service',
969- 'neutron-vpn-agent',
970- 'restart'])
971- LOG.error('Restart neutron-vpn-agent')
972- except subprocess.CalledProcessError:
973- LOG.error('Restart service: %s' % s)
974- subprocess.check_output(restart)
975- if s == 'neutron-metadata-agent':
976- subprocess.check_output(['sudo', 'service',
977- 'neutron-vpn-agent',
978- 'restart'])
979-
980- def run(self):
981- while True:
982- LOG.info('Monitor Neutron HA Agent Loop Start')
983- quantum = self.get_quantum_client()
984- self.reassign_agent_resources(quantum=quantum)
985- self.check_ovs_tunnel(quantum=quantum)
986- self.check_local_agents()
987- LOG.info('sleep %s' % cfg.CONF.check_interval)
988- time.sleep(float(cfg.CONF.check_interval))
989-
990-
991-if __name__ == '__main__':
992- opts = [
993- cfg.StrOpt('check_interval',
994- default=8,
995- help='Check Neutron Agents interval.'),
996- ]
997-
998- cfg.CONF.register_cli_opts(opts)
999- cfg.CONF(project='monitor_neutron_agents', default_config_files=[])
1000- logging.setup('Neuron-HA-Monitor')
1001- monitor_daemon = MonitorNeutronAgentsDaemon()
1002- monitor_daemon.start()
1003
1004=== removed symlink 'hooks/amqp-nova-relation-changed'
1005=== target was u'quantum_hooks.py'
1006=== removed symlink 'hooks/amqp-nova-relation-departed'
1007=== target was u'quantum_hooks.py'
1008=== removed symlink 'hooks/amqp-nova-relation-joined'
1009=== target was u'quantum_hooks.py'
1010=== removed symlink 'hooks/amqp-relation-changed'
1011=== target was u'quantum_hooks.py'
1012=== removed symlink 'hooks/amqp-relation-departed'
1013=== target was u'quantum_hooks.py'
1014=== removed symlink 'hooks/amqp-relation-joined'
1015=== target was u'quantum_hooks.py'
1016=== removed directory 'hooks/charmhelpers/contrib'
1017=== removed file 'hooks/charmhelpers/contrib/__init__.py'
1018--- hooks/charmhelpers/contrib/__init__.py 2015-01-23 11:08:26 +0000
1019+++ hooks/charmhelpers/contrib/__init__.py 1970-01-01 00:00:00 +0000
1020@@ -1,15 +0,0 @@
1021-# Copyright 2014-2015 Canonical Limited.
1022-#
1023-# This file is part of charm-helpers.
1024-#
1025-# charm-helpers is free software: you can redistribute it and/or modify
1026-# it under the terms of the GNU Lesser General Public License version 3 as
1027-# published by the Free Software Foundation.
1028-#
1029-# charm-helpers is distributed in the hope that it will be useful,
1030-# but WITHOUT ANY WARRANTY; without even the implied warranty of
1031-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1032-# GNU Lesser General Public License for more details.
1033-#
1034-# You should have received a copy of the GNU Lesser General Public License
1035-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1036
1037=== removed directory 'hooks/charmhelpers/contrib/charmsupport'
1038=== removed file 'hooks/charmhelpers/contrib/charmsupport/__init__.py'
1039--- hooks/charmhelpers/contrib/charmsupport/__init__.py 2015-01-23 11:08:26 +0000
1040+++ hooks/charmhelpers/contrib/charmsupport/__init__.py 1970-01-01 00:00:00 +0000
1041@@ -1,15 +0,0 @@
1042-# Copyright 2014-2015 Canonical Limited.
1043-#
1044-# This file is part of charm-helpers.
1045-#
1046-# charm-helpers is free software: you can redistribute it and/or modify
1047-# it under the terms of the GNU Lesser General Public License version 3 as
1048-# published by the Free Software Foundation.
1049-#
1050-# charm-helpers is distributed in the hope that it will be useful,
1051-# but WITHOUT ANY WARRANTY; without even the implied warranty of
1052-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1053-# GNU Lesser General Public License for more details.
1054-#
1055-# You should have received a copy of the GNU Lesser General Public License
1056-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1057
1058=== removed file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py'
1059--- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2015-04-19 09:02:48 +0000
1060+++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000
1061@@ -1,360 +0,0 @@
1062-# Copyright 2014-2015 Canonical Limited.
1063-#
1064-# This file is part of charm-helpers.
1065-#
1066-# charm-helpers is free software: you can redistribute it and/or modify
1067-# it under the terms of the GNU Lesser General Public License version 3 as
1068-# published by the Free Software Foundation.
1069-#
1070-# charm-helpers is distributed in the hope that it will be useful,
1071-# but WITHOUT ANY WARRANTY; without even the implied warranty of
1072-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1073-# GNU Lesser General Public License for more details.
1074-#
1075-# You should have received a copy of the GNU Lesser General Public License
1076-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1077-
1078-"""Compatibility with the nrpe-external-master charm"""
1079-# Copyright 2012 Canonical Ltd.
1080-#
1081-# Authors:
1082-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
1083-
1084-import subprocess
1085-import pwd
1086-import grp
1087-import os
1088-import glob
1089-import shutil
1090-import re
1091-import shlex
1092-import yaml
1093-
1094-from charmhelpers.core.hookenv import (
1095- config,
1096- local_unit,
1097- log,
1098- relation_ids,
1099- relation_set,
1100- relations_of_type,
1101-)
1102-
1103-from charmhelpers.core.host import service
1104-
1105-# This module adds compatibility with the nrpe-external-master and plain nrpe
1106-# subordinate charms. To use it in your charm:
1107-#
1108-# 1. Update metadata.yaml
1109-#
1110-# provides:
1111-# (...)
1112-# nrpe-external-master:
1113-# interface: nrpe-external-master
1114-# scope: container
1115-#
1116-# and/or
1117-#
1118-# provides:
1119-# (...)
1120-# local-monitors:
1121-# interface: local-monitors
1122-# scope: container
1123-
1124-#
1125-# 2. Add the following to config.yaml
1126-#
1127-# nagios_context:
1128-# default: "juju"
1129-# type: string
1130-# description: |
1131-# Used by the nrpe subordinate charms.
1132-# A string that will be prepended to instance name to set the host name
1133-# in nagios. So for instance the hostname would be something like:
1134-# juju-myservice-0
1135-# If you're running multiple environments with the same services in them
1136-# this allows you to differentiate between them.
1137-# nagios_servicegroups:
1138-# default: ""
1139-# type: string
1140-# description: |
1141-# A comma-separated list of nagios servicegroups.
1142-# If left empty, the nagios_context will be used as the servicegroup
1143-#
1144-# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
1145-#
1146-# 4. Update your hooks.py with something like this:
1147-#
1148-# from charmsupport.nrpe import NRPE
1149-# (...)
1150-# def update_nrpe_config():
1151-# nrpe_compat = NRPE()
1152-# nrpe_compat.add_check(
1153-# shortname = "myservice",
1154-# description = "Check MyService",
1155-# check_cmd = "check_http -w 2 -c 10 http://localhost"
1156-# )
1157-# nrpe_compat.add_check(
1158-# "myservice_other",
1159-# "Check for widget failures",
1160-# check_cmd = "/srv/myapp/scripts/widget_check"
1161-# )
1162-# nrpe_compat.write()
1163-#
1164-# def config_changed():
1165-# (...)
1166-# update_nrpe_config()
1167-#
1168-# def nrpe_external_master_relation_changed():
1169-# update_nrpe_config()
1170-#
1171-# def local_monitors_relation_changed():
1172-# update_nrpe_config()
1173-#
1174-# 5. ln -s hooks.py nrpe-external-master-relation-changed
1175-# ln -s hooks.py local-monitors-relation-changed
1176-
1177-
1178-class CheckException(Exception):
1179- pass
1180-
1181-
1182-class Check(object):
1183- shortname_re = '[A-Za-z0-9-_]+$'
1184- service_template = ("""
1185-#---------------------------------------------------
1186-# This file is Juju managed
1187-#---------------------------------------------------
1188-define service {{
1189- use active-service
1190- host_name {nagios_hostname}
1191- service_description {nagios_hostname}[{shortname}] """
1192- """{description}
1193- check_command check_nrpe!{command}
1194- servicegroups {nagios_servicegroup}
1195-}}
1196-""")
1197-
1198- def __init__(self, shortname, description, check_cmd):
1199- super(Check, self).__init__()
1200- # XXX: could be better to calculate this from the service name
1201- if not re.match(self.shortname_re, shortname):
1202- raise CheckException("shortname must match {}".format(
1203- Check.shortname_re))
1204- self.shortname = shortname
1205- self.command = "check_{}".format(shortname)
1206- # Note: a set of invalid characters is defined by the
1207- # Nagios server config
1208- # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
1209- self.description = description
1210- self.check_cmd = self._locate_cmd(check_cmd)
1211-
1212- def _locate_cmd(self, check_cmd):
1213- search_path = (
1214- '/usr/lib/nagios/plugins',
1215- '/usr/local/lib/nagios/plugins',
1216- )
1217- parts = shlex.split(check_cmd)
1218- for path in search_path:
1219- if os.path.exists(os.path.join(path, parts[0])):
1220- command = os.path.join(path, parts[0])
1221- if len(parts) > 1:
1222- command += " " + " ".join(parts[1:])
1223- return command
1224- log('Check command not found: {}'.format(parts[0]))
1225- return ''
1226-
1227- def write(self, nagios_context, hostname, nagios_servicegroups):
1228- nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
1229- self.command)
1230- with open(nrpe_check_file, 'w') as nrpe_check_config:
1231- nrpe_check_config.write("# check {}\n".format(self.shortname))
1232- nrpe_check_config.write("command[{}]={}\n".format(
1233- self.command, self.check_cmd))
1234-
1235- if not os.path.exists(NRPE.nagios_exportdir):
1236- log('Not writing service config as {} is not accessible'.format(
1237- NRPE.nagios_exportdir))
1238- else:
1239- self.write_service_config(nagios_context, hostname,
1240- nagios_servicegroups)
1241-
1242- def write_service_config(self, nagios_context, hostname,
1243- nagios_servicegroups):
1244- for f in os.listdir(NRPE.nagios_exportdir):
1245- if re.search('.*{}.cfg'.format(self.command), f):
1246- os.remove(os.path.join(NRPE.nagios_exportdir, f))
1247-
1248- templ_vars = {
1249- 'nagios_hostname': hostname,
1250- 'nagios_servicegroup': nagios_servicegroups,
1251- 'description': self.description,
1252- 'shortname': self.shortname,
1253- 'command': self.command,
1254- }
1255- nrpe_service_text = Check.service_template.format(**templ_vars)
1256- nrpe_service_file = '{}/service__{}_{}.cfg'.format(
1257- NRPE.nagios_exportdir, hostname, self.command)
1258- with open(nrpe_service_file, 'w') as nrpe_service_config:
1259- nrpe_service_config.write(str(nrpe_service_text))
1260-
1261- def run(self):
1262- subprocess.call(self.check_cmd)
1263-
1264-
1265-class NRPE(object):
1266- nagios_logdir = '/var/log/nagios'
1267- nagios_exportdir = '/var/lib/nagios/export'
1268- nrpe_confdir = '/etc/nagios/nrpe.d'
1269-
1270- def __init__(self, hostname=None):
1271- super(NRPE, self).__init__()
1272- self.config = config()
1273- self.nagios_context = self.config['nagios_context']
1274- if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
1275- self.nagios_servicegroups = self.config['nagios_servicegroups']
1276- else:
1277- self.nagios_servicegroups = self.nagios_context
1278- self.unit_name = local_unit().replace('/', '-')
1279- if hostname:
1280- self.hostname = hostname
1281- else:
1282- self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
1283- self.checks = []
1284-
1285- def add_check(self, *args, **kwargs):
1286- self.checks.append(Check(*args, **kwargs))
1287-
1288- def write(self):
1289- try:
1290- nagios_uid = pwd.getpwnam('nagios').pw_uid
1291- nagios_gid = grp.getgrnam('nagios').gr_gid
1292- except:
1293- log("Nagios user not set up, nrpe checks not updated")
1294- return
1295-
1296- if not os.path.exists(NRPE.nagios_logdir):
1297- os.mkdir(NRPE.nagios_logdir)
1298- os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
1299-
1300- nrpe_monitors = {}
1301- monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
1302- for nrpecheck in self.checks:
1303- nrpecheck.write(self.nagios_context, self.hostname,
1304- self.nagios_servicegroups)
1305- nrpe_monitors[nrpecheck.shortname] = {
1306- "command": nrpecheck.command,
1307- }
1308-
1309- service('restart', 'nagios-nrpe-server')
1310-
1311- monitor_ids = relation_ids("local-monitors") + \
1312- relation_ids("nrpe-external-master")
1313- for rid in monitor_ids:
1314- relation_set(relation_id=rid, monitors=yaml.dump(monitors))
1315-
1316-
1317-def get_nagios_hostcontext(relation_name='nrpe-external-master'):
1318- """
1319- Query relation with nrpe subordinate, return the nagios_host_context
1320-
1321- :param str relation_name: Name of relation nrpe sub joined to
1322- """
1323- for rel in relations_of_type(relation_name):
1324- if 'nagios_hostname' in rel:
1325- return rel['nagios_host_context']
1326-
1327-
1328-def get_nagios_hostname(relation_name='nrpe-external-master'):
1329- """
1330- Query relation with nrpe subordinate, return the nagios_hostname
1331-
1332- :param str relation_name: Name of relation nrpe sub joined to
1333- """
1334- for rel in relations_of_type(relation_name):
1335- if 'nagios_hostname' in rel:
1336- return rel['nagios_hostname']
1337-
1338-
1339-def get_nagios_unit_name(relation_name='nrpe-external-master'):
1340- """
1341- Return the nagios unit name prepended with host_context if needed
1342-
1343- :param str relation_name: Name of relation nrpe sub joined to
1344- """
1345- host_context = get_nagios_hostcontext(relation_name)
1346- if host_context:
1347- unit = "%s:%s" % (host_context, local_unit())
1348- else:
1349- unit = local_unit()
1350- return unit
1351-
1352-
1353-def add_init_service_checks(nrpe, services, unit_name):
1354- """
1355- Add checks for each service in list
1356-
1357- :param NRPE nrpe: NRPE object to add check to
1358- :param list services: List of services to check
1359- :param str unit_name: Unit name to use in check description
1360- """
1361- for svc in services:
1362- upstart_init = '/etc/init/%s.conf' % svc
1363- sysv_init = '/etc/init.d/%s' % svc
1364- if os.path.exists(upstart_init):
1365- nrpe.add_check(
1366- shortname=svc,
1367- description='process check {%s}' % unit_name,
1368- check_cmd='check_upstart_job %s' % svc
1369- )
1370- elif os.path.exists(sysv_init):
1371- cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
1372- cron_file = ('*/5 * * * * root '
1373- '/usr/local/lib/nagios/plugins/check_exit_status.pl '
1374- '-s /etc/init.d/%s status > '
1375- '/var/lib/nagios/service-check-%s.txt\n' % (svc,
1376- svc)
1377- )
1378- f = open(cronpath, 'w')
1379- f.write(cron_file)
1380- f.close()
1381- nrpe.add_check(
1382- shortname=svc,
1383- description='process check {%s}' % unit_name,
1384- check_cmd='check_status_file.py -f '
1385- '/var/lib/nagios/service-check-%s.txt' % svc,
1386- )
1387-
1388-
1389-def copy_nrpe_checks():
1390- """
1391- Copy the nrpe checks into place
1392-
1393- """
1394- NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
1395- nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
1396- 'charmhelpers', 'contrib', 'openstack',
1397- 'files')
1398-
1399- if not os.path.exists(NAGIOS_PLUGINS):
1400- os.makedirs(NAGIOS_PLUGINS)
1401- for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
1402- if os.path.isfile(fname):
1403- shutil.copy2(fname,
1404- os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
1405-
1406-
1407-def add_haproxy_checks(nrpe, unit_name):
1408- """
1409- Add checks for each service in list
1410-
1411- :param NRPE nrpe: NRPE object to add check to
1412- :param str unit_name: Unit name to use in check description
1413- """
1414- nrpe.add_check(
1415- shortname='haproxy_servers',
1416- description='Check HAProxy {%s}' % unit_name,
1417- check_cmd='check_haproxy.sh')
1418- nrpe.add_check(
1419- shortname='haproxy_queue',
1420- description='Check HAProxy queue depth {%s}' % unit_name,
1421- check_cmd='check_haproxy_queue_depth.sh')
1422
1423=== removed file 'hooks/charmhelpers/contrib/charmsupport/volumes.py'
1424--- hooks/charmhelpers/contrib/charmsupport/volumes.py 2015-01-23 11:08:26 +0000
1425+++ hooks/charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000
1426@@ -1,175 +0,0 @@
1427-# Copyright 2014-2015 Canonical Limited.
1428-#
1429-# This file is part of charm-helpers.
1430-#
1431-# charm-helpers is free software: you can redistribute it and/or modify
1432-# it under the terms of the GNU Lesser General Public License version 3 as
1433-# published by the Free Software Foundation.
1434-#
1435-# charm-helpers is distributed in the hope that it will be useful,
1436-# but WITHOUT ANY WARRANTY; without even the implied warranty of
1437-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1438-# GNU Lesser General Public License for more details.
1439-#
1440-# You should have received a copy of the GNU Lesser General Public License
1441-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1442-
1443-'''
1444-Functions for managing volumes in juju units. One volume is supported per unit.
1445-Subordinates may have their own storage, provided it is on its own partition.
1446-
1447-Configuration stanzas::
1448-
1449- volume-ephemeral:
1450- type: boolean
1451- default: true
1452- description: >
1453- If false, a volume is mounted as sepecified in "volume-map"
1454- If true, ephemeral storage will be used, meaning that log data
1455- will only exist as long as the machine. YOU HAVE BEEN WARNED.
1456- volume-map:
1457- type: string
1458- default: {}
1459- description: >
1460- YAML map of units to device names, e.g:
1461- "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
1462- Service units will raise a configure-error if volume-ephemeral
1463- is 'true' and no volume-map value is set. Use 'juju set' to set a
1464- value and 'juju resolved' to complete configuration.
1465-
1466-Usage::
1467-
1468- from charmsupport.volumes import configure_volume, VolumeConfigurationError
1469- from charmsupport.hookenv import log, ERROR
1470- def post_mount_hook():
1471- stop_service('myservice')
1472- def post_mount_hook():
1473- start_service('myservice')
1474-
1475- if __name__ == '__main__':
1476- try:
1477- configure_volume(before_change=pre_mount_hook,
1478- after_change=post_mount_hook)
1479- except VolumeConfigurationError:
1480- log('Storage could not be configured', ERROR)
1481-
1482-'''
1483-
1484-# XXX: Known limitations
1485-# - fstab is neither consulted nor updated
1486-
1487-import os
1488-from charmhelpers.core import hookenv
1489-from charmhelpers.core import host
1490-import yaml
1491-
1492-
1493-MOUNT_BASE = '/srv/juju/volumes'
1494-
1495-
1496-class VolumeConfigurationError(Exception):
1497- '''Volume configuration data is missing or invalid'''
1498- pass
1499-
1500-
1501-def get_config():
1502- '''Gather and sanity-check volume configuration data'''
1503- volume_config = {}
1504- config = hookenv.config()
1505-
1506- errors = False
1507-
1508- if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
1509- volume_config['ephemeral'] = True
1510- else:
1511- volume_config['ephemeral'] = False
1512-
1513- try:
1514- volume_map = yaml.safe_load(config.get('volume-map', '{}'))
1515- except yaml.YAMLError as e:
1516- hookenv.log("Error parsing YAML volume-map: {}".format(e),
1517- hookenv.ERROR)
1518- errors = True
1519- if volume_map is None:
1520- # probably an empty string
1521- volume_map = {}
1522- elif not isinstance(volume_map, dict):
1523- hookenv.log("Volume-map should be a dictionary, not {}".format(
1524- type(volume_map)))
1525- errors = True
1526-
1527- volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
1528- if volume_config['device'] and volume_config['ephemeral']:
1529- # asked for ephemeral storage but also defined a volume ID
1530- hookenv.log('A volume is defined for this unit, but ephemeral '
1531- 'storage was requested', hookenv.ERROR)
1532- errors = True
1533- elif not volume_config['device'] and not volume_config['ephemeral']:
1534- # asked for permanent storage but did not define volume ID
1535- hookenv.log('Ephemeral storage was requested, but there is no volume '
1536- 'defined for this unit.', hookenv.ERROR)
1537- errors = True
1538-
1539- unit_mount_name = hookenv.local_unit().replace('/', '-')
1540- volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
1541-
1542- if errors:
1543- return None
1544- return volume_config
1545-
1546-
1547-def mount_volume(config):
1548- if os.path.exists(config['mountpoint']):
1549- if not os.path.isdir(config['mountpoint']):
1550- hookenv.log('Not a directory: {}'.format(config['mountpoint']))
1551- raise VolumeConfigurationError()
1552- else:
1553- host.mkdir(config['mountpoint'])
1554- if os.path.ismount(config['mountpoint']):
1555- unmount_volume(config)
1556- if not host.mount(config['device'], config['mountpoint'], persist=True):
1557- raise VolumeConfigurationError()
1558-
1559-
1560-def unmount_volume(config):
1561- if os.path.ismount(config['mountpoint']):
1562- if not host.umount(config['mountpoint'], persist=True):
1563- raise VolumeConfigurationError()
1564-
1565-
1566-def managed_mounts():
1567- '''List of all mounted managed volumes'''
1568- return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
1569-
1570-
1571-def configure_volume(before_change=lambda: None, after_change=lambda: None):
1572- '''Set up storage (or don't) according to the charm's volume configuration.
1573- Returns the mount point or "ephemeral". before_change and after_change
1574- are optional functions to be called if the volume configuration changes.
1575- '''
1576-
1577- config = get_config()
1578- if not config:
1579- hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
1580- raise VolumeConfigurationError()
1581-
1582- if config['ephemeral']:
1583- if os.path.ismount(config['mountpoint']):
1584- before_change()
1585- unmount_volume(config)
1586- after_change()
1587- return 'ephemeral'
1588- else:
1589- # persistent storage
1590- if os.path.ismount(config['mountpoint']):
1591- mounts = dict(managed_mounts())
1592- if mounts.get(config['mountpoint']) != config['device']:
1593- before_change()
1594- unmount_volume(config)
1595- mount_volume(config)
1596- after_change()
1597- else:
1598- before_change()
1599- mount_volume(config)
1600- after_change()
1601- return config['mountpoint']
1602
1603=== removed directory 'hooks/charmhelpers/contrib/hahelpers'
1604=== removed file 'hooks/charmhelpers/contrib/hahelpers/__init__.py'
1605--- hooks/charmhelpers/contrib/hahelpers/__init__.py 2015-01-23 11:08:26 +0000
1606+++ hooks/charmhelpers/contrib/hahelpers/__init__.py 1970-01-01 00:00:00 +0000
1607@@ -1,15 +0,0 @@
1608-# Copyright 2014-2015 Canonical Limited.
1609-#
1610-# This file is part of charm-helpers.
1611-#
1612-# charm-helpers is free software: you can redistribute it and/or modify
1613-# it under the terms of the GNU Lesser General Public License version 3 as
1614-# published by the Free Software Foundation.
1615-#
1616-# charm-helpers is distributed in the hope that it will be useful,
1617-# but WITHOUT ANY WARRANTY; without even the implied warranty of
1618-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1619-# GNU Lesser General Public License for more details.
1620-#
1621-# You should have received a copy of the GNU Lesser General Public License
1622-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1623
1624=== removed file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
1625--- hooks/charmhelpers/contrib/hahelpers/apache.py 2015-02-24 12:07:07 +0000
1626+++ hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000
1627@@ -1,82 +0,0 @@
1628-# Copyright 2014-2015 Canonical Limited.
1629-#
1630-# This file is part of charm-helpers.
1631-#
1632-# charm-helpers is free software: you can redistribute it and/or modify
1633-# it under the terms of the GNU Lesser General Public License version 3 as
1634-# published by the Free Software Foundation.
1635-#
1636-# charm-helpers is distributed in the hope that it will be useful,
1637-# but WITHOUT ANY WARRANTY; without even the implied warranty of
1638-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1639-# GNU Lesser General Public License for more details.
1640-#
1641-# You should have received a copy of the GNU Lesser General Public License
1642-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1643-
1644-#
1645-# Copyright 2012 Canonical Ltd.
1646-#
1647-# This file is sourced from lp:openstack-charm-helpers
1648-#
1649-# Authors:
1650-# James Page <james.page@ubuntu.com>
1651-# Adam Gandelman <adamg@ubuntu.com>
1652-#
1653-
1654-import subprocess
1655-
1656-from charmhelpers.core.hookenv import (
1657- config as config_get,
1658- relation_get,
1659- relation_ids,
1660- related_units as relation_list,
1661- log,
1662- INFO,
1663-)
1664-
1665-
1666-def get_cert(cn=None):
1667- # TODO: deal with multiple https endpoints via charm config
1668- cert = config_get('ssl_cert')
1669- key = config_get('ssl_key')
1670- if not (cert and key):
1671- log("Inspecting identity-service relations for SSL certificate.",
1672- level=INFO)
1673- cert = key = None
1674- if cn:
1675- ssl_cert_attr = 'ssl_cert_{}'.format(cn)
1676- ssl_key_attr = 'ssl_key_{}'.format(cn)
1677- else:
1678- ssl_cert_attr = 'ssl_cert'
1679- ssl_key_attr = 'ssl_key'
1680- for r_id in relation_ids('identity-service'):
1681- for unit in relation_list(r_id):
1682- if not cert:
1683- cert = relation_get(ssl_cert_attr,
1684- rid=r_id, unit=unit)
1685- if not key:
1686- key = relation_get(ssl_key_attr,
1687- rid=r_id, unit=unit)
1688- return (cert, key)
1689-
1690-
1691-def get_ca_cert():
1692- ca_cert = config_get('ssl_ca')
1693- if ca_cert is None:
1694- log("Inspecting identity-service relations for CA SSL certificate.",
1695- level=INFO)
1696- for r_id in relation_ids('identity-service'):
1697- for unit in relation_list(r_id):
1698- if ca_cert is None:
1699- ca_cert = relation_get('ca_cert',
1700- rid=r_id, unit=unit)
1701- return ca_cert
1702-
1703-
1704-def install_ca_cert(ca_cert):
1705- if ca_cert:
1706- with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
1707- 'w') as crt:
1708- crt.write(ca_cert)
1709- subprocess.check_call(['update-ca-certificates', '--fresh'])
1710
1711=== removed file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
1712--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-03-31 15:13:53 +0000
1713+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
1714@@ -1,272 +0,0 @@
1715-# Copyright 2014-2015 Canonical Limited.
1716-#
1717-# This file is part of charm-helpers.
1718-#
1719-# charm-helpers is free software: you can redistribute it and/or modify
1720-# it under the terms of the GNU Lesser General Public License version 3 as
1721-# published by the Free Software Foundation.
1722-#
1723-# charm-helpers is distributed in the hope that it will be useful,
1724-# but WITHOUT ANY WARRANTY; without even the implied warranty of
1725-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1726-# GNU Lesser General Public License for more details.
1727-#
1728-# You should have received a copy of the GNU Lesser General Public License
1729-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1730-
1731-#
1732-# Copyright 2012 Canonical Ltd.
1733-#
1734-# Authors:
1735-# James Page <james.page@ubuntu.com>
1736-# Adam Gandelman <adamg@ubuntu.com>
1737-#
1738-
1739-"""
1740-Helpers for clustering and determining "cluster leadership" and other
1741-clustering-related helpers.
1742-"""
1743-
1744-import subprocess
1745-import os
1746-
1747-from socket import gethostname as get_unit_hostname
1748-
1749-import six
1750-
1751-from charmhelpers.core.hookenv import (
1752- log,
1753- relation_ids,
1754- related_units as relation_list,
1755- relation_get,
1756- config as config_get,
1757- INFO,
1758- ERROR,
1759- WARNING,
1760- unit_get,
1761-)
1762-from charmhelpers.core.decorators import (
1763- retry_on_exception,
1764-)
1765-from charmhelpers.core.strutils import (
1766- bool_from_string,
1767-)
1768-
1769-
1770-class HAIncompleteConfig(Exception):
1771- pass
1772-
1773-
1774-class CRMResourceNotFound(Exception):
1775- pass
1776-
1777-
1778-def is_elected_leader(resource):
1779- """
1780- Returns True if the charm executing this is the elected cluster leader.
1781-
1782- It relies on two mechanisms to determine leadership:
1783- 1. If the charm is part of a corosync cluster, call corosync to
1784- determine leadership.
1785- 2. If the charm is not part of a corosync cluster, the leader is
1786- determined as being "the alive unit with the lowest unit numer". In
1787- other words, the oldest surviving unit.
1788- """
1789- if is_clustered():
1790- if not is_crm_leader(resource):
1791- log('Deferring action to CRM leader.', level=INFO)
1792- return False
1793- else:
1794- peers = peer_units()
1795- if peers and not oldest_peer(peers):
1796- log('Deferring action to oldest service unit.', level=INFO)
1797- return False
1798- return True
1799-
1800-
1801-def is_clustered():
1802- for r_id in (relation_ids('ha') or []):
1803- for unit in (relation_list(r_id) or []):
1804- clustered = relation_get('clustered',
1805- rid=r_id,
1806- unit=unit)
1807- if clustered:
1808- return True
1809- return False
1810-
1811-
1812-@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
1813-def is_crm_leader(resource, retry=False):
1814- """
1815- Returns True if the charm calling this is the elected corosync leader,
1816- as returned by calling the external "crm" command.
1817-
1818- We allow this operation to be retried to avoid the possibility of getting a
1819- false negative. See LP #1396246 for more info.
1820- """
1821- cmd = ['crm', 'resource', 'show', resource]
1822- try:
1823- status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
1824- if not isinstance(status, six.text_type):
1825- status = six.text_type(status, "utf-8")
1826- except subprocess.CalledProcessError:
1827- status = None
1828-
1829- if status and get_unit_hostname() in status:
1830- return True
1831-
1832- if status and "resource %s is NOT running" % (resource) in status:
1833- raise CRMResourceNotFound("CRM resource %s not found" % (resource))
1834-
1835- return False
1836-
1837-
1838-def is_leader(resource):
1839- log("is_leader is deprecated. Please consider using is_crm_leader "
1840- "instead.", level=WARNING)
1841- return is_crm_leader(resource)
1842-
1843-
1844-def peer_units(peer_relation="cluster"):
1845- peers = []
1846- for r_id in (relation_ids(peer_relation) or []):
1847- for unit in (relation_list(r_id) or []):
1848- peers.append(unit)
1849- return peers
1850-
1851-
1852-def peer_ips(peer_relation='cluster', addr_key='private-address'):
1853- '''Return a dict of peers and their private-address'''
1854- peers = {}
1855- for r_id in relation_ids(peer_relation):
1856- for unit in relation_list(r_id):
1857- peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
1858- return peers
1859-
1860-
1861-def oldest_peer(peers):
1862- """Determines who the oldest peer is by comparing unit numbers."""
1863- local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
1864- for peer in peers:
1865- remote_unit_no = int(peer.split('/')[1])
1866- if remote_unit_no < local_unit_no:
1867- return False
1868- return True
1869-
1870-
1871-def eligible_leader(resource):
1872- log("eligible_leader is deprecated. Please consider using "
1873- "is_elected_leader instead.", level=WARNING)
1874- return is_elected_leader(resource)
1875-
1876-
1877-def https():
1878- '''
1879- Determines whether enough data has been provided in configuration
1880- or relation data to configure HTTPS
1881- .
1882- returns: boolean
1883- '''
1884- use_https = config_get('use-https')
1885- if use_https and bool_from_string(use_https):
1886- return True
1887- if config_get('ssl_cert') and config_get('ssl_key'):
1888- return True
1889- for r_id in relation_ids('identity-service'):
1890- for unit in relation_list(r_id):
1891- # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
1892- rel_state = [
1893- relation_get('https_keystone', rid=r_id, unit=unit),
1894- relation_get('ca_cert', rid=r_id, unit=unit),
1895- ]
1896- # NOTE: works around (LP: #1203241)
1897- if (None not in rel_state) and ('' not in rel_state):
1898- return True
1899- return False
1900-
1901-
1902-def determine_api_port(public_port, singlenode_mode=False):
1903- '''
1904- Determine correct API server listening port based on
1905- existence of HTTPS reverse proxy and/or haproxy.
1906-
1907- public_port: int: standard public port for given service
1908-
1909- singlenode_mode: boolean: Shuffle ports when only a single unit is present
1910-
1911- returns: int: the correct listening port for the API service
1912- '''
1913- i = 0
1914- if singlenode_mode:
1915- i += 1
1916- elif len(peer_units()) > 0 or is_clustered():
1917- i += 1
1918- if https():
1919- i += 1
1920- return public_port - (i * 10)
1921-
1922-
1923-def determine_apache_port(public_port, singlenode_mode=False):
1924- '''
1925- Description: Determine correct apache listening port based on public IP +
1926- state of the cluster.
1927-
1928- public_port: int: standard public port for given service
1929-
1930- singlenode_mode: boolean: Shuffle ports when only a single unit is present
1931-
1932- returns: int: the correct listening port for the HAProxy service
1933- '''
1934- i = 0
1935- if singlenode_mode:
1936- i += 1
1937- elif len(peer_units()) > 0 or is_clustered():
1938- i += 1
1939- return public_port - (i * 10)
1940-
1941-
1942-def get_hacluster_config(exclude_keys=None):
1943- '''
1944- Obtains all relevant configuration from charm configuration required
1945- for initiating a relation to hacluster:
1946-
1947- ha-bindiface, ha-mcastport, vip
1948-
1949- param: exclude_keys: list of setting key(s) to be excluded.
1950- returns: dict: A dict containing settings keyed by setting name.
1951- raises: HAIncompleteConfig if settings are missing.
1952- '''
1953- settings = ['ha-bindiface', 'ha-mcastport', 'vip']
1954- conf = {}
1955- for setting in settings:
1956- if exclude_keys and setting in exclude_keys:
1957- continue
1958-
1959- conf[setting] = config_get(setting)
1960- missing = []
1961- [missing.append(s) for s, v in six.iteritems(conf) if v is None]
1962- if missing:
1963- log('Insufficient config data to configure hacluster.', level=ERROR)
1964- raise HAIncompleteConfig
1965- return conf
1966-
1967-
1968-def canonical_url(configs, vip_setting='vip'):
1969- '''
1970- Returns the correct HTTP URL to this host given the state of HTTPS
1971- configuration and hacluster.
1972-
1973- :configs : OSTemplateRenderer: A config tempating object to inspect for
1974- a complete https context.
1975-
1976- :vip_setting: str: Setting in charm config that specifies
1977- VIP address.
1978- '''
1979- scheme = 'http'
1980- if 'https' in configs.complete_contexts():
1981- scheme = 'https'
1982- if is_clustered():
1983- addr = config_get(vip_setting)
1984- else:
1985- addr = unit_get('private-address')
1986- return '%s://%s' % (scheme, addr)
1987
1988=== removed directory 'hooks/charmhelpers/contrib/network'
1989=== removed file 'hooks/charmhelpers/contrib/network/__init__.py'
1990--- hooks/charmhelpers/contrib/network/__init__.py 2015-01-23 11:08:26 +0000
1991+++ hooks/charmhelpers/contrib/network/__init__.py 1970-01-01 00:00:00 +0000
1992@@ -1,15 +0,0 @@
1993-# Copyright 2014-2015 Canonical Limited.
1994-#
1995-# This file is part of charm-helpers.
1996-#
1997-# charm-helpers is free software: you can redistribute it and/or modify
1998-# it under the terms of the GNU Lesser General Public License version 3 as
1999-# published by the Free Software Foundation.
2000-#
2001-# charm-helpers is distributed in the hope that it will be useful,
2002-# but WITHOUT ANY WARRANTY; without even the implied warranty of
2003-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2004-# GNU Lesser General Public License for more details.
2005-#
2006-# You should have received a copy of the GNU Lesser General Public License
2007-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2008
2009=== removed file 'hooks/charmhelpers/contrib/network/ip.py'
2010--- hooks/charmhelpers/contrib/network/ip.py 2015-03-31 15:13:53 +0000
2011+++ hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
2012@@ -1,450 +0,0 @@
2013-# Copyright 2014-2015 Canonical Limited.
2014-#
2015-# This file is part of charm-helpers.
2016-#
2017-# charm-helpers is free software: you can redistribute it and/or modify
2018-# it under the terms of the GNU Lesser General Public License version 3 as
2019-# published by the Free Software Foundation.
2020-#
2021-# charm-helpers is distributed in the hope that it will be useful,
2022-# but WITHOUT ANY WARRANTY; without even the implied warranty of
2023-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2024-# GNU Lesser General Public License for more details.
2025-#
2026-# You should have received a copy of the GNU Lesser General Public License
2027-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2028-
2029-import glob
2030-import re
2031-import subprocess
2032-import six
2033-import socket
2034-
2035-from functools import partial
2036-
2037-from charmhelpers.core.hookenv import unit_get
2038-from charmhelpers.fetch import apt_install
2039-from charmhelpers.core.hookenv import (
2040- log,
2041- WARNING,
2042-)
2043-
2044-try:
2045- import netifaces
2046-except ImportError:
2047- apt_install('python-netifaces')
2048- import netifaces
2049-
2050-try:
2051- import netaddr
2052-except ImportError:
2053- apt_install('python-netaddr')
2054- import netaddr
2055-
2056-
2057-def _validate_cidr(network):
2058- try:
2059- netaddr.IPNetwork(network)
2060- except (netaddr.core.AddrFormatError, ValueError):
2061- raise ValueError("Network (%s) is not in CIDR presentation format" %
2062- network)
2063-
2064-
2065-def no_ip_found_error_out(network):
2066- errmsg = ("No IP address found in network: %s" % network)
2067- raise ValueError(errmsg)
2068-
2069-
2070-def get_address_in_network(network, fallback=None, fatal=False):
2071- """Get an IPv4 or IPv6 address within the network from the host.
2072-
2073- :param network (str): CIDR presentation format. For example,
2074- '192.168.1.0/24'.
2075- :param fallback (str): If no address is found, return fallback.
2076- :param fatal (boolean): If no address is found, fallback is not
2077- set and fatal is True then exit(1).
2078- """
2079- if network is None:
2080- if fallback is not None:
2081- return fallback
2082-
2083- if fatal:
2084- no_ip_found_error_out(network)
2085- else:
2086- return None
2087-
2088- _validate_cidr(network)
2089- network = netaddr.IPNetwork(network)
2090- for iface in netifaces.interfaces():
2091- addresses = netifaces.ifaddresses(iface)
2092- if network.version == 4 and netifaces.AF_INET in addresses:
2093- addr = addresses[netifaces.AF_INET][0]['addr']
2094- netmask = addresses[netifaces.AF_INET][0]['netmask']
2095- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
2096- if cidr in network:
2097- return str(cidr.ip)
2098-
2099- if network.version == 6 and netifaces.AF_INET6 in addresses:
2100- for addr in addresses[netifaces.AF_INET6]:
2101- if not addr['addr'].startswith('fe80'):
2102- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
2103- addr['netmask']))
2104- if cidr in network:
2105- return str(cidr.ip)
2106-
2107- if fallback is not None:
2108- return fallback
2109-
2110- if fatal:
2111- no_ip_found_error_out(network)
2112-
2113- return None
2114-
2115-
2116-def is_ipv6(address):
2117- """Determine whether provided address is IPv6 or not."""
2118- try:
2119- address = netaddr.IPAddress(address)
2120- except netaddr.AddrFormatError:
2121- # probably a hostname - so not an address at all!
2122- return False
2123-
2124- return address.version == 6
2125-
2126-
2127-def is_address_in_network(network, address):
2128- """
2129- Determine whether the provided address is within a network range.
2130-
2131- :param network (str): CIDR presentation format. For example,
2132- '192.168.1.0/24'.
2133- :param address: An individual IPv4 or IPv6 address without a net
2134- mask or subnet prefix. For example, '192.168.1.1'.
2135- :returns boolean: Flag indicating whether address is in network.
2136- """
2137- try:
2138- network = netaddr.IPNetwork(network)
2139- except (netaddr.core.AddrFormatError, ValueError):
2140- raise ValueError("Network (%s) is not in CIDR presentation format" %
2141- network)
2142-
2143- try:
2144- address = netaddr.IPAddress(address)
2145- except (netaddr.core.AddrFormatError, ValueError):
2146- raise ValueError("Address (%s) is not in correct presentation format" %
2147- address)
2148-
2149- if address in network:
2150- return True
2151- else:
2152- return False
2153-
2154-
2155-def _get_for_address(address, key):
2156- """Retrieve an attribute of or the physical interface that
2157- the IP address provided could be bound to.
2158-
2159- :param address (str): An individual IPv4 or IPv6 address without a net
2160- mask or subnet prefix. For example, '192.168.1.1'.
2161- :param key: 'iface' for the physical interface name or an attribute
2162- of the configured interface, for example 'netmask'.
2163- :returns str: Requested attribute or None if address is not bindable.
2164- """
2165- address = netaddr.IPAddress(address)
2166- for iface in netifaces.interfaces():
2167- addresses = netifaces.ifaddresses(iface)
2168- if address.version == 4 and netifaces.AF_INET in addresses:
2169- addr = addresses[netifaces.AF_INET][0]['addr']
2170- netmask = addresses[netifaces.AF_INET][0]['netmask']
2171- network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
2172- cidr = network.cidr
2173- if address in cidr:
2174- if key == 'iface':
2175- return iface
2176- else:
2177- return addresses[netifaces.AF_INET][0][key]
2178-
2179- if address.version == 6 and netifaces.AF_INET6 in addresses:
2180- for addr in addresses[netifaces.AF_INET6]:
2181- if not addr['addr'].startswith('fe80'):
2182- network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
2183- addr['netmask']))
2184- cidr = network.cidr
2185- if address in cidr:
2186- if key == 'iface':
2187- return iface
2188- elif key == 'netmask' and cidr:
2189- return str(cidr).split('/')[1]
2190- else:
2191- return addr[key]
2192-
2193- return None
2194-
2195-
2196-get_iface_for_address = partial(_get_for_address, key='iface')
2197-
2198-
2199-get_netmask_for_address = partial(_get_for_address, key='netmask')
2200-
2201-
2202-def format_ipv6_addr(address):
2203- """If address is IPv6, wrap it in '[]' otherwise return None.
2204-
2205- This is required by most configuration files when specifying IPv6
2206- addresses.
2207- """
2208- if is_ipv6(address):
2209- return "[%s]" % address
2210-
2211- return None
2212-
2213-
2214-def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
2215- fatal=True, exc_list=None):
2216- """Return the assigned IP address for a given interface, if any."""
2217- # Extract nic if passed /dev/ethX
2218- if '/' in iface:
2219- iface = iface.split('/')[-1]
2220-
2221- if not exc_list:
2222- exc_list = []
2223-
2224- try:
2225- inet_num = getattr(netifaces, inet_type)
2226- except AttributeError:
2227- raise Exception("Unknown inet type '%s'" % str(inet_type))
2228-
2229- interfaces = netifaces.interfaces()
2230- if inc_aliases:
2231- ifaces = []
2232- for _iface in interfaces:
2233- if iface == _iface or _iface.split(':')[0] == iface:
2234- ifaces.append(_iface)
2235-
2236- if fatal and not ifaces:
2237- raise Exception("Invalid interface '%s'" % iface)
2238-
2239- ifaces.sort()
2240- else:
2241- if iface not in interfaces:
2242- if fatal:
2243- raise Exception("Interface '%s' not found " % (iface))
2244- else:
2245- return []
2246-
2247- else:
2248- ifaces = [iface]
2249-
2250- addresses = []
2251- for netiface in ifaces:
2252- net_info = netifaces.ifaddresses(netiface)
2253- if inet_num in net_info:
2254- for entry in net_info[inet_num]:
2255- if 'addr' in entry and entry['addr'] not in exc_list:
2256- addresses.append(entry['addr'])
2257-
2258- if fatal and not addresses:
2259- raise Exception("Interface '%s' doesn't have any %s addresses." %
2260- (iface, inet_type))
2261-
2262- return sorted(addresses)
2263-
2264-
2265-get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
2266-
2267-
2268-def get_iface_from_addr(addr):
2269- """Work out on which interface the provided address is configured."""
2270- for iface in netifaces.interfaces():
2271- addresses = netifaces.ifaddresses(iface)
2272- for inet_type in addresses:
2273- for _addr in addresses[inet_type]:
2274- _addr = _addr['addr']
2275- # link local
2276- ll_key = re.compile("(.+)%.*")
2277- raw = re.match(ll_key, _addr)
2278- if raw:
2279- _addr = raw.group(1)
2280-
2281- if _addr == addr:
2282- log("Address '%s' is configured on iface '%s'" %
2283- (addr, iface))
2284- return iface
2285-
2286- msg = "Unable to infer net iface on which '%s' is configured" % (addr)
2287- raise Exception(msg)
2288-
2289-
2290-def sniff_iface(f):
2291- """Ensure decorated function is called with a value for iface.
2292-
2293- If no iface provided, inject net iface inferred from unit private address.
2294- """
2295- def iface_sniffer(*args, **kwargs):
2296- if not kwargs.get('iface', None):
2297- kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
2298-
2299- return f(*args, **kwargs)
2300-
2301- return iface_sniffer
2302-
2303-
2304-@sniff_iface
2305-def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
2306- dynamic_only=True):
2307- """Get assigned IPv6 address for a given interface.
2308-
2309- Returns list of addresses found. If no address found, returns empty list.
2310-
2311- If iface is None, we infer the current primary interface by doing a reverse
2312- lookup on the unit private-address.
2313-
2314- We currently only support scope global IPv6 addresses i.e. non-temporary
2315- addresses. If no global IPv6 address is found, return the first one found
2316- in the ipv6 address list.
2317- """
2318- addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
2319- inc_aliases=inc_aliases, fatal=fatal,
2320- exc_list=exc_list)
2321-
2322- if addresses:
2323- global_addrs = []
2324- for addr in addresses:
2325- key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
2326- m = re.match(key_scope_link_local, addr)
2327- if m:
2328- eui_64_mac = m.group(1)
2329- iface = m.group(2)
2330- else:
2331- global_addrs.append(addr)
2332-
2333- if global_addrs:
2334- # Make sure any found global addresses are not temporary
2335- cmd = ['ip', 'addr', 'show', iface]
2336- out = subprocess.check_output(cmd).decode('UTF-8')
2337- if dynamic_only:
2338- key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
2339- else:
2340- key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
2341-
2342- addrs = []
2343- for line in out.split('\n'):
2344- line = line.strip()
2345- m = re.match(key, line)
2346- if m and 'temporary' not in line:
2347- # Return the first valid address we find
2348- for addr in global_addrs:
2349- if m.group(1) == addr:
2350- if not dynamic_only or \
2351- m.group(1).endswith(eui_64_mac):
2352- addrs.append(addr)
2353-
2354- if addrs:
2355- return addrs
2356-
2357- if fatal:
2358- raise Exception("Interface '%s' does not have a scope global "
2359- "non-temporary ipv6 address." % iface)
2360-
2361- return []
2362-
2363-
2364-def get_bridges(vnic_dir='/sys/devices/virtual/net'):
2365- """Return a list of bridges on the system."""
2366- b_regex = "%s/*/bridge" % vnic_dir
2367- return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
2368-
2369-
2370-def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
2371- """Return a list of nics comprising a given bridge on the system."""
2372- brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
2373- return [x.split('/')[-1] for x in glob.glob(brif_regex)]
2374-
2375-
2376-def is_bridge_member(nic):
2377- """Check if a given nic is a member of a bridge."""
2378- for bridge in get_bridges():
2379- if nic in get_bridge_nics(bridge):
2380- return True
2381-
2382- return False
2383-
2384-
2385-def is_ip(address):
2386- """
2387- Returns True if address is a valid IP address.
2388- """
2389- try:
2390- # Test to see if already an IPv4 address
2391- socket.inet_aton(address)
2392- return True
2393- except socket.error:
2394- return False
2395-
2396-
2397-def ns_query(address):
2398- try:
2399- import dns.resolver
2400- except ImportError:
2401- apt_install('python-dnspython')
2402- import dns.resolver
2403-
2404- if isinstance(address, dns.name.Name):
2405- rtype = 'PTR'
2406- elif isinstance(address, six.string_types):
2407- rtype = 'A'
2408- else:
2409- return None
2410-
2411- answers = dns.resolver.query(address, rtype)
2412- if answers:
2413- return str(answers[0])
2414- return None
2415-
2416-
2417-def get_host_ip(hostname, fallback=None):
2418- """
2419- Resolves the IP for a given hostname, or returns
2420- the input if it is already an IP.
2421- """
2422- if is_ip(hostname):
2423- return hostname
2424-
2425- ip_addr = ns_query(hostname)
2426- if not ip_addr:
2427- try:
2428- ip_addr = socket.gethostbyname(hostname)
2429- except:
2430- log("Failed to resolve hostname '%s'" % (hostname),
2431- level=WARNING)
2432- return fallback
2433- return ip_addr
2434-
2435-
2436-def get_hostname(address, fqdn=True):
2437- """
2438- Resolves hostname for given IP, or returns the input
2439- if it is already a hostname.
2440- """
2441- if is_ip(address):
2442- try:
2443- import dns.reversename
2444- except ImportError:
2445- apt_install("python-dnspython")
2446- import dns.reversename
2447-
2448- rev = dns.reversename.from_address(address)
2449- result = ns_query(rev)
2450- if not result:
2451- return None
2452- else:
2453- result = address
2454-
2455- if fqdn:
2456- # strip trailing .
2457- if result.endswith('.'):
2458- return result[:-1]
2459- else:
2460- return result
2461- else:
2462- return result.split('.')[0]
2463
2464=== removed directory 'hooks/charmhelpers/contrib/network/ovs'
2465=== removed file 'hooks/charmhelpers/contrib/network/ovs/__init__.py'
2466--- hooks/charmhelpers/contrib/network/ovs/__init__.py 2015-01-23 11:08:26 +0000
2467+++ hooks/charmhelpers/contrib/network/ovs/__init__.py 1970-01-01 00:00:00 +0000
2468@@ -1,96 +0,0 @@
2469-# Copyright 2014-2015 Canonical Limited.
2470-#
2471-# This file is part of charm-helpers.
2472-#
2473-# charm-helpers is free software: you can redistribute it and/or modify
2474-# it under the terms of the GNU Lesser General Public License version 3 as
2475-# published by the Free Software Foundation.
2476-#
2477-# charm-helpers is distributed in the hope that it will be useful,
2478-# but WITHOUT ANY WARRANTY; without even the implied warranty of
2479-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2480-# GNU Lesser General Public License for more details.
2481-#
2482-# You should have received a copy of the GNU Lesser General Public License
2483-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2484-
2485-''' Helpers for interacting with OpenvSwitch '''
2486-import subprocess
2487-import os
2488-from charmhelpers.core.hookenv import (
2489- log, WARNING
2490-)
2491-from charmhelpers.core.host import (
2492- service
2493-)
2494-
2495-
2496-def add_bridge(name):
2497- ''' Add the named bridge to openvswitch '''
2498- log('Creating bridge {}'.format(name))
2499- subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
2500-
2501-
2502-def del_bridge(name):
2503- ''' Delete the named bridge from openvswitch '''
2504- log('Deleting bridge {}'.format(name))
2505- subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
2506-
2507-
2508-def add_bridge_port(name, port, promisc=False):
2509- ''' Add a port to the named openvswitch bridge '''
2510- log('Adding port {} to bridge {}'.format(port, name))
2511- subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
2512- name, port])
2513- subprocess.check_call(["ip", "link", "set", port, "up"])
2514- if promisc:
2515- subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
2516- else:
2517- subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
2518-
2519-
2520-def del_bridge_port(name, port):
2521- ''' Delete a port from the named openvswitch bridge '''
2522- log('Deleting port {} from bridge {}'.format(port, name))
2523- subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
2524- name, port])
2525- subprocess.check_call(["ip", "link", "set", port, "down"])
2526- subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
2527-
2528-
2529-def set_manager(manager):
2530- ''' Set the controller for the local openvswitch '''
2531- log('Setting manager for local ovs to {}'.format(manager))
2532- subprocess.check_call(['ovs-vsctl', 'set-manager',
2533- 'ssl:{}'.format(manager)])
2534-
2535-
2536-CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
2537-
2538-
2539-def get_certificate():
2540- ''' Read openvswitch certificate from disk '''
2541- if os.path.exists(CERT_PATH):
2542- log('Reading ovs certificate from {}'.format(CERT_PATH))
2543- with open(CERT_PATH, 'r') as cert:
2544- full_cert = cert.read()
2545- begin_marker = "-----BEGIN CERTIFICATE-----"
2546- end_marker = "-----END CERTIFICATE-----"
2547- begin_index = full_cert.find(begin_marker)
2548- end_index = full_cert.rfind(end_marker)
2549- if end_index == -1 or begin_index == -1:
2550- raise RuntimeError("Certificate does not contain valid begin"
2551- " and end markers.")
2552- full_cert = full_cert[begin_index:(end_index + len(end_marker))]
2553- return full_cert
2554- else:
2555- log('Certificate not found', level=WARNING)
2556- return None
2557-
2558-
2559-def full_restart():
2560- ''' Full restart and reload of openvswitch '''
2561- if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
2562- service('start', 'openvswitch-force-reload-kmod')
2563- else:
2564- service('force-reload-kmod', 'openvswitch-switch')
2565
2566=== removed file 'hooks/charmhelpers/contrib/network/ufw.py'
2567--- hooks/charmhelpers/contrib/network/ufw.py 2015-03-23 18:25:01 +0000
2568+++ hooks/charmhelpers/contrib/network/ufw.py 1970-01-01 00:00:00 +0000
2569@@ -1,276 +0,0 @@
2570-# Copyright 2014-2015 Canonical Limited.
2571-#
2572-# This file is part of charm-helpers.
2573-#
2574-# charm-helpers is free software: you can redistribute it and/or modify
2575-# it under the terms of the GNU Lesser General Public License version 3 as
2576-# published by the Free Software Foundation.
2577-#
2578-# charm-helpers is distributed in the hope that it will be useful,
2579-# but WITHOUT ANY WARRANTY; without even the implied warranty of
2580-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2581-# GNU Lesser General Public License for more details.
2582-#
2583-# You should have received a copy of the GNU Lesser General Public License
2584-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2585-
2586-"""
2587-This module contains helpers to add and remove ufw rules.
2588-
2589-Examples:
2590-
2591-- open SSH port for subnet 10.0.3.0/24:
2592-
2593- >>> from charmhelpers.contrib.network import ufw
2594- >>> ufw.enable()
2595- >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
2596-
2597-- open service by name as defined in /etc/services:
2598-
2599- >>> from charmhelpers.contrib.network import ufw
2600- >>> ufw.enable()
2601- >>> ufw.service('ssh', 'open')
2602-
2603-- close service by port number:
2604-
2605- >>> from charmhelpers.contrib.network import ufw
2606- >>> ufw.enable()
2607- >>> ufw.service('4949', 'close') # munin
2608-"""
2609-import re
2610-import os
2611-import subprocess
2612-from charmhelpers.core import hookenv
2613-
2614-__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
2615-
2616-
2617-class UFWError(Exception):
2618- pass
2619-
2620-
2621-class UFWIPv6Error(UFWError):
2622- pass
2623-
2624-
2625-def is_enabled():
2626- """
2627- Check if `ufw` is enabled
2628-
2629- :returns: True if ufw is enabled
2630- """
2631- output = subprocess.check_output(['ufw', 'status'],
2632- universal_newlines=True,
2633- env={'LANG': 'en_US',
2634- 'PATH': os.environ['PATH']})
2635-
2636- m = re.findall(r'^Status: active\n', output, re.M)
2637-
2638- return len(m) >= 1
2639-
2640-
2641-def is_ipv6_ok(soft_fail=False):
2642- """
2643- Check if IPv6 support is present and ip6tables functional
2644-
2645- :param soft_fail: If set to True and IPv6 support is broken, then reports
2646- that the host doesn't have IPv6 support, otherwise a
2647- UFWIPv6Error exception is raised.
2648- :returns: True if IPv6 is working, False otherwise
2649- """
2650-
2651- # do we have IPv6 in the machine?
2652- if os.path.isdir('/proc/sys/net/ipv6'):
2653- # is ip6tables kernel module loaded?
2654- lsmod = subprocess.check_output(['lsmod'], universal_newlines=True)
2655- matches = re.findall('^ip6_tables[ ]+', lsmod, re.M)
2656- if len(matches) == 0:
2657- # ip6tables support isn't complete, let's try to load it
2658- try:
2659- subprocess.check_output(['modprobe', 'ip6_tables'],
2660- universal_newlines=True)
2661- # great, we could load the module
2662- return True
2663- except subprocess.CalledProcessError as ex:
2664- hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
2665- level="WARN")
2666- # we are in a world where ip6tables isn't working
2667- if soft_fail:
2668- # so we inform that the machine doesn't have IPv6
2669- return False
2670- else:
2671- raise UFWIPv6Error("IPv6 firewall support broken")
2672- else:
2673- # the module is present :)
2674- return True
2675-
2676- else:
2677- # the system doesn't have IPv6
2678- return False
2679-
2680-
2681-def disable_ipv6():
2682- """
2683- Disable ufw IPv6 support in /etc/default/ufw
2684- """
2685- exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
2686- '/etc/default/ufw'])
2687- if exit_code == 0:
2688- hookenv.log('IPv6 support in ufw disabled', level='INFO')
2689- else:
2690- hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
2691- raise UFWError("Couldn't disable IPv6 support in ufw")
2692-
2693-
2694-def enable(soft_fail=False):
2695- """
2696- Enable ufw
2697-
2698- :param soft_fail: If set to True silently disables IPv6 support in ufw,
2699- otherwise a UFWIPv6Error exception is raised when IP6
2700- support is broken.
2701- :returns: True if ufw is successfully enabled
2702- """
2703- if is_enabled():
2704- return True
2705-
2706- if not is_ipv6_ok(soft_fail):
2707- disable_ipv6()
2708-
2709- output = subprocess.check_output(['ufw', 'enable'],
2710- universal_newlines=True,
2711- env={'LANG': 'en_US',
2712- 'PATH': os.environ['PATH']})
2713-
2714- m = re.findall('^Firewall is active and enabled on system startup\n',
2715- output, re.M)
2716- hookenv.log(output, level='DEBUG')
2717-
2718- if len(m) == 0:
2719- hookenv.log("ufw couldn't be enabled", level='WARN')
2720- return False
2721- else:
2722- hookenv.log("ufw enabled", level='INFO')
2723- return True
2724-
2725-
2726-def disable():
2727- """
2728- Disable ufw
2729-
2730- :returns: True if ufw is successfully disabled
2731- """
2732- if not is_enabled():
2733- return True
2734-
2735- output = subprocess.check_output(['ufw', 'disable'],
2736- universal_newlines=True,
2737- env={'LANG': 'en_US',
2738- 'PATH': os.environ['PATH']})
2739-
2740- m = re.findall(r'^Firewall stopped and disabled on system startup\n',
2741- output, re.M)
2742- hookenv.log(output, level='DEBUG')
2743-
2744- if len(m) == 0:
2745- hookenv.log("ufw couldn't be disabled", level='WARN')
2746- return False
2747- else:
2748- hookenv.log("ufw disabled", level='INFO')
2749- return True
2750-
2751-
2752-def modify_access(src, dst='any', port=None, proto=None, action='allow'):
2753- """
2754- Grant access to an address or subnet
2755-
2756- :param src: address (e.g. 192.168.1.234) or subnet
2757- (e.g. 192.168.1.0/24).
2758- :param dst: destiny of the connection, if the machine has multiple IPs and
2759- connections to only one of those have to accepted this is the
2760- field has to be set.
2761- :param port: destiny port
2762- :param proto: protocol (tcp or udp)
2763- :param action: `allow` or `delete`
2764- """
2765- if not is_enabled():
2766- hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
2767- return
2768-
2769- if action == 'delete':
2770- cmd = ['ufw', 'delete', 'allow']
2771- else:
2772- cmd = ['ufw', action]
2773-
2774- if src is not None:
2775- cmd += ['from', src]
2776-
2777- if dst is not None:
2778- cmd += ['to', dst]
2779-
2780- if port is not None:
2781- cmd += ['port', str(port)]
2782-
2783- if proto is not None:
2784- cmd += ['proto', proto]
2785-
2786- hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
2787- p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
2788- (stdout, stderr) = p.communicate()
2789-
2790- hookenv.log(stdout, level='INFO')
2791-
2792- if p.returncode != 0:
2793- hookenv.log(stderr, level='ERROR')
2794- hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
2795- p.returncode),
2796- level='ERROR')
2797-
2798-
2799-def grant_access(src, dst='any', port=None, proto=None):
2800- """
2801- Grant access to an address or subnet
2802-
2803- :param src: address (e.g. 192.168.1.234) or subnet
2804- (e.g. 192.168.1.0/24).
2805- :param dst: destiny of the connection, if the machine has multiple IPs and
2806- connections to only one of those have to accepted this is the
2807- field has to be set.
2808- :param port: destiny port
2809- :param proto: protocol (tcp or udp)
2810- """
2811- return modify_access(src, dst=dst, port=port, proto=proto, action='allow')
2812-
2813-
2814-def revoke_access(src, dst='any', port=None, proto=None):
2815- """
2816- Revoke access to an address or subnet
2817-
2818- :param src: address (e.g. 192.168.1.234) or subnet
2819- (e.g. 192.168.1.0/24).
2820- :param dst: destiny of the connection, if the machine has multiple IPs and
2821- connections to only one of those have to accepted this is the
2822- field has to be set.
2823- :param port: destiny port
2824- :param proto: protocol (tcp or udp)
2825- """
2826- return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
2827-
2828-
2829-def service(name, action):
2830- """
2831- Open/close access to a service
2832-
2833- :param name: could be a service name defined in `/etc/services` or a port
2834- number.
2835- :param action: `open` or `close`
2836- """
2837- if action == 'open':
2838- subprocess.check_output(['ufw', 'allow', str(name)],
2839- universal_newlines=True)
2840- elif action == 'close':
2841- subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
2842- universal_newlines=True)
2843- else:
2844- raise UFWError(("'{}' not supported, use 'allow' "
2845- "or 'delete'").format(action))
2846
2847=== removed directory 'hooks/charmhelpers/contrib/openstack'
2848=== removed file 'hooks/charmhelpers/contrib/openstack/__init__.py'
2849--- hooks/charmhelpers/contrib/openstack/__init__.py 2015-01-23 11:08:26 +0000
2850+++ hooks/charmhelpers/contrib/openstack/__init__.py 1970-01-01 00:00:00 +0000
2851@@ -1,15 +0,0 @@
2852-# Copyright 2014-2015 Canonical Limited.
2853-#
2854-# This file is part of charm-helpers.
2855-#
2856-# charm-helpers is free software: you can redistribute it and/or modify
2857-# it under the terms of the GNU Lesser General Public License version 3 as
2858-# published by the Free Software Foundation.
2859-#
2860-# charm-helpers is distributed in the hope that it will be useful,
2861-# but WITHOUT ANY WARRANTY; without even the implied warranty of
2862-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2863-# GNU Lesser General Public License for more details.
2864-#
2865-# You should have received a copy of the GNU Lesser General Public License
2866-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2867
2868=== removed file 'hooks/charmhelpers/contrib/openstack/alternatives.py'
2869--- hooks/charmhelpers/contrib/openstack/alternatives.py 2015-01-23 11:08:26 +0000
2870+++ hooks/charmhelpers/contrib/openstack/alternatives.py 1970-01-01 00:00:00 +0000
2871@@ -1,33 +0,0 @@
2872-# Copyright 2014-2015 Canonical Limited.
2873-#
2874-# This file is part of charm-helpers.
2875-#
2876-# charm-helpers is free software: you can redistribute it and/or modify
2877-# it under the terms of the GNU Lesser General Public License version 3 as
2878-# published by the Free Software Foundation.
2879-#
2880-# charm-helpers is distributed in the hope that it will be useful,
2881-# but WITHOUT ANY WARRANTY; without even the implied warranty of
2882-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2883-# GNU Lesser General Public License for more details.
2884-#
2885-# You should have received a copy of the GNU Lesser General Public License
2886-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2887-
2888-''' Helper for managing alternatives for file conflict resolution '''
2889-
2890-import subprocess
2891-import shutil
2892-import os
2893-
2894-
2895-def install_alternative(name, target, source, priority=50):
2896- ''' Install alternative configuration '''
2897- if (os.path.exists(target) and not os.path.islink(target)):
2898- # Move existing file/directory away before installing
2899- shutil.move(target, '{}.bak'.format(target))
2900- cmd = [
2901- 'update-alternatives', '--force', '--install',
2902- target, name, source, str(priority)
2903- ]
2904- subprocess.check_call(cmd)
2905
2906=== removed directory 'hooks/charmhelpers/contrib/openstack/amulet'
2907=== removed file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
2908--- hooks/charmhelpers/contrib/openstack/amulet/__init__.py 2015-01-23 11:08:26 +0000
2909+++ hooks/charmhelpers/contrib/openstack/amulet/__init__.py 1970-01-01 00:00:00 +0000
2910@@ -1,15 +0,0 @@
2911-# Copyright 2014-2015 Canonical Limited.
2912-#
2913-# This file is part of charm-helpers.
2914-#
2915-# charm-helpers is free software: you can redistribute it and/or modify
2916-# it under the terms of the GNU Lesser General Public License version 3 as
2917-# published by the Free Software Foundation.
2918-#
2919-# charm-helpers is distributed in the hope that it will be useful,
2920-# but WITHOUT ANY WARRANTY; without even the implied warranty of
2921-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2922-# GNU Lesser General Public License for more details.
2923-#
2924-# You should have received a copy of the GNU Lesser General Public License
2925-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2926
2927=== removed file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
2928--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-04-23 14:54:24 +0000
2929+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
2930@@ -1,146 +0,0 @@
2931-# Copyright 2014-2015 Canonical Limited.
2932-#
2933-# This file is part of charm-helpers.
2934-#
2935-# charm-helpers is free software: you can redistribute it and/or modify
2936-# it under the terms of the GNU Lesser General Public License version 3 as
2937-# published by the Free Software Foundation.
2938-#
2939-# charm-helpers is distributed in the hope that it will be useful,
2940-# but WITHOUT ANY WARRANTY; without even the implied warranty of
2941-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2942-# GNU Lesser General Public License for more details.
2943-#
2944-# You should have received a copy of the GNU Lesser General Public License
2945-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2946-
2947-import six
2948-from collections import OrderedDict
2949-from charmhelpers.contrib.amulet.deployment import (
2950- AmuletDeployment
2951-)
2952-
2953-
2954-class OpenStackAmuletDeployment(AmuletDeployment):
2955- """OpenStack amulet deployment.
2956-
2957- This class inherits from AmuletDeployment and has additional support
2958- that is specifically for use by OpenStack charms.
2959- """
2960-
2961- def __init__(self, series=None, openstack=None, source=None, stable=True):
2962- """Initialize the deployment environment."""
2963- super(OpenStackAmuletDeployment, self).__init__(series)
2964- self.openstack = openstack
2965- self.source = source
2966- self.stable = stable
2967- # Note(coreycb): this needs to be changed when new next branches come
2968- # out.
2969- self.current_next = "trusty"
2970-
2971- def _determine_branch_locations(self, other_services):
2972- """Determine the branch locations for the other services.
2973-
2974- Determine if the local branch being tested is derived from its
2975- stable or next (dev) branch, and based on this, use the corresonding
2976- stable or next branches for the other_services."""
2977- base_charms = ['mysql', 'mongodb']
2978-
2979- if self.series in ['precise', 'trusty']:
2980- base_series = self.series
2981- else:
2982- base_series = self.current_next
2983-
2984- if self.stable:
2985- for svc in other_services:
2986- temp = 'lp:charms/{}/{}'
2987- svc['location'] = temp.format(base_series,
2988- svc['name'])
2989- else:
2990- for svc in other_services:
2991- if svc['name'] in base_charms:
2992- temp = 'lp:charms/{}/{}'
2993- svc['location'] = temp.format(base_series,
2994- svc['name'])
2995- else:
2996- temp = 'lp:~openstack-charmers/charms/{}/{}/next'
2997- svc['location'] = temp.format(self.current_next,
2998- svc['name'])
2999- return other_services
3000-
3001- def _add_services(self, this_service, other_services):
3002- """Add services to the deployment and set openstack-origin/source."""
3003- other_services = self._determine_branch_locations(other_services)
3004-
3005- super(OpenStackAmuletDeployment, self)._add_services(this_service,
3006- other_services)
3007-
3008- services = other_services
3009- services.append(this_service)
3010- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
3011- 'ceph-osd', 'ceph-radosgw']
3012- # Openstack subordinate charms do not expose an origin option as that
3013- # is controlled by the principle
3014- ignore = ['neutron-openvswitch']
3015-
3016- if self.openstack:
3017- for svc in services:
3018- if svc['name'] not in use_source + ignore:
3019- config = {'openstack-origin': self.openstack}
3020- self.d.configure(svc['name'], config)
3021-
3022- if self.source:
3023- for svc in services:
3024- if svc['name'] in use_source and svc['name'] not in ignore:
3025- config = {'source': self.source}
3026- self.d.configure(svc['name'], config)
3027-
3028- def _configure_services(self, configs):
3029- """Configure all of the services."""
3030- for service, config in six.iteritems(configs):
3031- self.d.configure(service, config)
3032-
3033- def _get_openstack_release(self):
3034- """Get openstack release.
3035-
3036- Return an integer representing the enum value of the openstack
3037- release.
3038- """
3039- # Must be ordered by OpenStack release (not by Ubuntu release):
3040- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
3041- self.precise_havana, self.precise_icehouse,
3042- self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
3043- self.trusty_kilo, self.vivid_kilo) = range(10)
3044-
3045- releases = {
3046- ('precise', None): self.precise_essex,
3047- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
3048- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
3049- ('precise', 'cloud:precise-havana'): self.precise_havana,
3050- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
3051- ('trusty', None): self.trusty_icehouse,
3052- ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
3053- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
3054- ('utopic', None): self.utopic_juno,
3055- ('vivid', None): self.vivid_kilo}
3056- return releases[(self.series, self.openstack)]
3057-
3058- def _get_openstack_release_string(self):
3059- """Get openstack release string.
3060-
3061- Return a string representing the openstack release.
3062- """
3063- releases = OrderedDict([
3064- ('precise', 'essex'),
3065- ('quantal', 'folsom'),
3066- ('raring', 'grizzly'),
3067- ('saucy', 'havana'),
3068- ('trusty', 'icehouse'),
3069- ('utopic', 'juno'),
3070- ('vivid', 'kilo'),
3071- ])
3072- if self.openstack:
3073- os_origin = self.openstack.split(':')[1]
3074- return os_origin.split('%s-' % self.series)[1].split('/')[0]
3075- else:
3076- return releases[self.series]
3077
3078=== removed file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
3079--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-03-31 15:13:53 +0000
3080+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
3081@@ -1,294 +0,0 @@
3082-# Copyright 2014-2015 Canonical Limited.
3083-#
3084-# This file is part of charm-helpers.
3085-#
3086-# charm-helpers is free software: you can redistribute it and/or modify
3087-# it under the terms of the GNU Lesser General Public License version 3 as
3088-# published by the Free Software Foundation.
3089-#
3090-# charm-helpers is distributed in the hope that it will be useful,
3091-# but WITHOUT ANY WARRANTY; without even the implied warranty of
3092-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3093-# GNU Lesser General Public License for more details.
3094-#
3095-# You should have received a copy of the GNU Lesser General Public License
3096-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3097-
3098-import logging
3099-import os
3100-import time
3101-import urllib
3102-
3103-import glanceclient.v1.client as glance_client
3104-import keystoneclient.v2_0 as keystone_client
3105-import novaclient.v1_1.client as nova_client
3106-
3107-import six
3108-
3109-from charmhelpers.contrib.amulet.utils import (
3110- AmuletUtils
3111-)
3112-
3113-DEBUG = logging.DEBUG
3114-ERROR = logging.ERROR
3115-
3116-
3117-class OpenStackAmuletUtils(AmuletUtils):
3118- """OpenStack amulet utilities.
3119-
3120- This class inherits from AmuletUtils and has additional support
3121- that is specifically for use by OpenStack charms.
3122- """
3123-
3124- def __init__(self, log_level=ERROR):
3125- """Initialize the deployment environment."""
3126- super(OpenStackAmuletUtils, self).__init__(log_level)
3127-
3128- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
3129- public_port, expected):
3130- """Validate endpoint data.
3131-
3132- Validate actual endpoint data vs expected endpoint data. The ports
3133- are used to find the matching endpoint.
3134- """
3135- found = False
3136- for ep in endpoints:
3137- self.log.debug('endpoint: {}'.format(repr(ep)))
3138- if (admin_port in ep.adminurl and
3139- internal_port in ep.internalurl and
3140- public_port in ep.publicurl):
3141- found = True
3142- actual = {'id': ep.id,
3143- 'region': ep.region,
3144- 'adminurl': ep.adminurl,
3145- 'internalurl': ep.internalurl,
3146- 'publicurl': ep.publicurl,
3147- 'service_id': ep.service_id}
3148- ret = self._validate_dict_data(expected, actual)
3149- if ret:
3150- return 'unexpected endpoint data - {}'.format(ret)
3151-
3152- if not found:
3153- return 'endpoint not found'
3154-
3155- def validate_svc_catalog_endpoint_data(self, expected, actual):
3156- """Validate service catalog endpoint data.
3157-
3158- Validate a list of actual service catalog endpoints vs a list of
3159- expected service catalog endpoints.
3160- """
3161- self.log.debug('actual: {}'.format(repr(actual)))
3162- for k, v in six.iteritems(expected):
3163- if k in actual:
3164- ret = self._validate_dict_data(expected[k][0], actual[k][0])
3165- if ret:
3166- return self.endpoint_error(k, ret)
3167- else:
3168- return "endpoint {} does not exist".format(k)
3169- return ret
3170-
3171- def validate_tenant_data(self, expected, actual):
3172- """Validate tenant data.
3173-
3174- Validate a list of actual tenant data vs list of expected tenant
3175- data.
3176- """
3177- self.log.debug('actual: {}'.format(repr(actual)))
3178- for e in expected:
3179- found = False
3180- for act in actual:
3181- a = {'enabled': act.enabled, 'description': act.description,
3182- 'name': act.name, 'id': act.id}
3183- if e['name'] == a['name']:
3184- found = True
3185- ret = self._validate_dict_data(e, a)
3186- if ret:
3187- return "unexpected tenant data - {}".format(ret)
3188- if not found:
3189- return "tenant {} does not exist".format(e['name'])
3190- return ret
3191-
3192- def validate_role_data(self, expected, actual):
3193- """Validate role data.
3194-
3195- Validate a list of actual role data vs a list of expected role
3196- data.
3197- """
3198- self.log.debug('actual: {}'.format(repr(actual)))
3199- for e in expected:
3200- found = False
3201- for act in actual:
3202- a = {'name': act.name, 'id': act.id}
3203- if e['name'] == a['name']:
3204- found = True
3205- ret = self._validate_dict_data(e, a)
3206- if ret:
3207- return "unexpected role data - {}".format(ret)
3208- if not found:
3209- return "role {} does not exist".format(e['name'])
3210- return ret
3211-
3212- def validate_user_data(self, expected, actual):
3213- """Validate user data.
3214-
3215- Validate a list of actual user data vs a list of expected user
3216- data.
3217- """
3218- self.log.debug('actual: {}'.format(repr(actual)))
3219- for e in expected:
3220- found = False
3221- for act in actual:
3222- a = {'enabled': act.enabled, 'name': act.name,
3223- 'email': act.email, 'tenantId': act.tenantId,
3224- 'id': act.id}
3225- if e['name'] == a['name']:
3226- found = True
3227- ret = self._validate_dict_data(e, a)
3228- if ret:
3229- return "unexpected user data - {}".format(ret)
3230- if not found:
3231- return "user {} does not exist".format(e['name'])
3232- return ret
3233-
3234- def validate_flavor_data(self, expected, actual):
3235- """Validate flavor data.
3236-
3237- Validate a list of actual flavors vs a list of expected flavors.
3238- """
3239- self.log.debug('actual: {}'.format(repr(actual)))
3240- act = [a.name for a in actual]
3241- return self._validate_list_data(expected, act)
3242-
3243- def tenant_exists(self, keystone, tenant):
3244- """Return True if tenant exists."""
3245- return tenant in [t.name for t in keystone.tenants.list()]
3246-
3247- def authenticate_keystone_admin(self, keystone_sentry, user, password,
3248- tenant):
3249- """Authenticates admin user with the keystone admin endpoint."""
3250- unit = keystone_sentry
3251- service_ip = unit.relation('shared-db',
3252- 'mysql:shared-db')['private-address']
3253- ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
3254- return keystone_client.Client(username=user, password=password,
3255- tenant_name=tenant, auth_url=ep)
3256-
3257- def authenticate_keystone_user(self, keystone, user, password, tenant):
3258- """Authenticates a regular user with the keystone public endpoint."""
3259- ep = keystone.service_catalog.url_for(service_type='identity',
3260- endpoint_type='publicURL')
3261- return keystone_client.Client(username=user, password=password,
3262- tenant_name=tenant, auth_url=ep)
3263-
3264- def authenticate_glance_admin(self, keystone):
3265- """Authenticates admin user with glance."""
3266- ep = keystone.service_catalog.url_for(service_type='image',
3267- endpoint_type='adminURL')
3268- return glance_client.Client(ep, token=keystone.auth_token)
3269-
3270- def authenticate_nova_user(self, keystone, user, password, tenant):
3271- """Authenticates a regular user with nova-api."""
3272- ep = keystone.service_catalog.url_for(service_type='identity',
3273- endpoint_type='publicURL')
3274- return nova_client.Client(username=user, api_key=password,
3275- project_id=tenant, auth_url=ep)
3276-
3277- def create_cirros_image(self, glance, image_name):
3278- """Download the latest cirros image and upload it to glance."""
3279- http_proxy = os.getenv('AMULET_HTTP_PROXY')
3280- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
3281- if http_proxy:
3282- proxies = {'http': http_proxy}
3283- opener = urllib.FancyURLopener(proxies)
3284- else:
3285- opener = urllib.FancyURLopener()
3286-
3287- f = opener.open("http://download.cirros-cloud.net/version/released")
3288- version = f.read().strip()
3289- cirros_img = "cirros-{}-x86_64-disk.img".format(version)
3290- local_path = os.path.join('tests', cirros_img)
3291-
3292- if not os.path.exists(local_path):
3293- cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
3294- version, cirros_img)
3295- opener.retrieve(cirros_url, local_path)
3296- f.close()
3297-
3298- with open(local_path) as f:
3299- image = glance.images.create(name=image_name, is_public=True,
3300- disk_format='qcow2',
3301- container_format='bare', data=f)
3302- count = 1
3303- status = image.status
3304- while status != 'active' and count < 10:
3305- time.sleep(3)
3306- image = glance.images.get(image.id)
3307- status = image.status
3308- self.log.debug('image status: {}'.format(status))
3309- count += 1
3310-
3311- if status != 'active':
3312- self.log.error('image creation timed out')
3313- return None
3314-
3315- return image
3316-
3317- def delete_image(self, glance, image):
3318- """Delete the specified image."""
3319- num_before = len(list(glance.images.list()))
3320- glance.images.delete(image)
3321-
3322- count = 1
3323- num_after = len(list(glance.images.list()))
3324- while num_after != (num_before - 1) and count < 10:
3325- time.sleep(3)
3326- num_after = len(list(glance.images.list()))
3327- self.log.debug('number of images: {}'.format(num_after))
3328- count += 1
3329-
3330- if num_after != (num_before - 1):
3331- self.log.error('image deletion timed out')
3332- return False
3333-
3334- return True
3335-
3336- def create_instance(self, nova, image_name, instance_name, flavor):
3337- """Create the specified instance."""
3338- image = nova.images.find(name=image_name)
3339- flavor = nova.flavors.find(name=flavor)
3340- instance = nova.servers.create(name=instance_name, image=image,
3341- flavor=flavor)
3342-
3343- count = 1
3344- status = instance.status
3345- while status != 'ACTIVE' and count < 60:
3346- time.sleep(3)
3347- instance = nova.servers.get(instance.id)
3348- status = instance.status
3349- self.log.debug('instance status: {}'.format(status))
3350- count += 1
3351-
3352- if status != 'ACTIVE':
3353- self.log.error('instance creation timed out')
3354- return None
3355-
3356- return instance
3357-
3358- def delete_instance(self, nova, instance):
3359- """Delete the specified instance."""
3360- num_before = len(list(nova.servers.list()))
3361- nova.servers.delete(instance)
3362-
3363- count = 1
3364- num_after = len(list(nova.servers.list()))
3365- while num_after != (num_before - 1) and count < 10:
3366- time.sleep(3)
3367- num_after = len(list(nova.servers.list()))
3368- self.log.debug('number of instances: {}'.format(num_after))
3369- count += 1
3370-
3371- if num_after != (num_before - 1):
3372- self.log.error('instance deletion timed out')
3373- return False
3374-
3375- return True
3376
3377=== removed file 'hooks/charmhelpers/contrib/openstack/context.py'
3378--- hooks/charmhelpers/contrib/openstack/context.py 2015-04-16 21:35:13 +0000
3379+++ hooks/charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
3380@@ -1,1328 +0,0 @@
3381-# Copyright 2014-2015 Canonical Limited.
3382-#
3383-# This file is part of charm-helpers.
3384-#
3385-# charm-helpers is free software: you can redistribute it and/or modify
3386-# it under the terms of the GNU Lesser General Public License version 3 as
3387-# published by the Free Software Foundation.
3388-#
3389-# charm-helpers is distributed in the hope that it will be useful,
3390-# but WITHOUT ANY WARRANTY; without even the implied warranty of
3391-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3392-# GNU Lesser General Public License for more details.
3393-#
3394-# You should have received a copy of the GNU Lesser General Public License
3395-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3396-
3397-import json
3398-import os
3399-import re
3400-import time
3401-from base64 import b64decode
3402-from subprocess import check_call
3403-
3404-import six
3405-import yaml
3406-
3407-from charmhelpers.fetch import (
3408- apt_install,
3409- filter_installed_packages,
3410-)
3411-from charmhelpers.core.hookenv import (
3412- config,
3413- is_relation_made,
3414- local_unit,
3415- log,
3416- relation_get,
3417- relation_ids,
3418- related_units,
3419- relation_set,
3420- unit_get,
3421- unit_private_ip,
3422- charm_name,
3423- DEBUG,
3424- INFO,
3425- WARNING,
3426- ERROR,
3427-)
3428-
3429-from charmhelpers.core.sysctl import create as sysctl_create
3430-from charmhelpers.core.strutils import bool_from_string
3431-
3432-from charmhelpers.core.host import (
3433- list_nics,
3434- get_nic_hwaddr,
3435- mkdir,
3436- write_file,
3437-)
3438-from charmhelpers.contrib.hahelpers.cluster import (
3439- determine_apache_port,
3440- determine_api_port,
3441- https,
3442- is_clustered,
3443-)
3444-from charmhelpers.contrib.hahelpers.apache import (
3445- get_cert,
3446- get_ca_cert,
3447- install_ca_cert,
3448-)
3449-from charmhelpers.contrib.openstack.neutron import (
3450- neutron_plugin_attribute,
3451- parse_data_port_mappings,
3452-)
3453-from charmhelpers.contrib.openstack.ip import (
3454- resolve_address,
3455- INTERNAL,
3456-)
3457-from charmhelpers.contrib.network.ip import (
3458- get_address_in_network,
3459- get_ipv4_addr,
3460- get_ipv6_addr,
3461- get_netmask_for_address,
3462- format_ipv6_addr,
3463- is_address_in_network,
3464- is_bridge_member,
3465-)
3466-from charmhelpers.contrib.openstack.utils import get_host_ip
3467-CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
3468-ADDRESS_TYPES = ['admin', 'internal', 'public']
3469-
3470-
3471-class OSContextError(Exception):
3472- pass
3473-
3474-
3475-def ensure_packages(packages):
3476- """Install but do not upgrade required plugin packages."""
3477- required = filter_installed_packages(packages)
3478- if required:
3479- apt_install(required, fatal=True)
3480-
3481-
3482-def context_complete(ctxt):
3483- _missing = []
3484- for k, v in six.iteritems(ctxt):
3485- if v is None or v == '':
3486- _missing.append(k)
3487-
3488- if _missing:
3489- log('Missing required data: %s' % ' '.join(_missing), level=INFO)
3490- return False
3491-
3492- return True
3493-
3494-
3495-def config_flags_parser(config_flags):
3496- """Parses config flags string into dict.
3497-
3498- This parsing method supports a few different formats for the config
3499- flag values to be parsed:
3500-
3501- 1. A string in the simple format of key=value pairs, with the possibility
3502- of specifying multiple key value pairs within the same string. For
3503- example, a string in the format of 'key1=value1, key2=value2' will
3504- return a dict of:
3505- {'key1': 'value1',
3506- 'key2': 'value2'}.
3507-
3508- 2. A string in the above format, but supporting a comma-delimited list
3509- of values for the same key. For example, a string in the format of
3510- 'key1=value1, key2=value3,value4,value5' will return a dict of:
3511- {'key1', 'value1',
3512- 'key2', 'value2,value3,value4'}
3513-
3514- 3. A string containing a colon character (:) prior to an equal
3515- character (=) will be treated as yaml and parsed as such. This can be
3516- used to specify more complex key value pairs. For example,
3517- a string in the format of 'key1: subkey1=value1, subkey2=value2' will
3518- return a dict of:
3519- {'key1', 'subkey1=value1, subkey2=value2'}
3520-
3521- The provided config_flags string may be a list of comma-separated values
3522- which themselves may be comma-separated list of values.
3523- """
3524- # If we find a colon before an equals sign then treat it as yaml.
3525- # Note: limit it to finding the colon first since this indicates assignment
3526- # for inline yaml.
3527- colon = config_flags.find(':')
3528- equals = config_flags.find('=')
3529- if colon > 0:
3530- if colon < equals or equals < 0:
3531- return yaml.safe_load(config_flags)
3532-
3533- if config_flags.find('==') >= 0:
3534- log("config_flags is not in expected format (key=value)", level=ERROR)
3535- raise OSContextError
3536-
3537- # strip the following from each value.
3538- post_strippers = ' ,'
3539- # we strip any leading/trailing '=' or ' ' from the string then
3540- # split on '='.
3541- split = config_flags.strip(' =').split('=')
3542- limit = len(split)
3543- flags = {}
3544- for i in range(0, limit - 1):
3545- current = split[i]
3546- next = split[i + 1]
3547- vindex = next.rfind(',')
3548- if (i == limit - 2) or (vindex < 0):
3549- value = next
3550- else:
3551- value = next[:vindex]
3552-
3553- if i == 0:
3554- key = current
3555- else:
3556- # if this not the first entry, expect an embedded key.
3557- index = current.rfind(',')
3558- if index < 0:
3559- log("Invalid config value(s) at index %s" % (i), level=ERROR)
3560- raise OSContextError
3561- key = current[index + 1:]
3562-
3563- # Add to collection.
3564- flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
3565-
3566- return flags
3567-
3568-
3569-class OSContextGenerator(object):
3570- """Base class for all context generators."""
3571- interfaces = []
3572-
3573- def __call__(self):
3574- raise NotImplementedError
3575-
3576-
3577-class SharedDBContext(OSContextGenerator):
3578- interfaces = ['shared-db']
3579-
3580- def __init__(self,
3581- database=None, user=None, relation_prefix=None, ssl_dir=None):
3582- """Allows inspecting relation for settings prefixed with
3583- relation_prefix. This is useful for parsing access for multiple
3584- databases returned via the shared-db interface (eg, nova_password,
3585- quantum_password)
3586- """
3587- self.relation_prefix = relation_prefix
3588- self.database = database
3589- self.user = user
3590- self.ssl_dir = ssl_dir
3591-
3592- def __call__(self):
3593- self.database = self.database or config('database')
3594- self.user = self.user or config('database-user')
3595- if None in [self.database, self.user]:
3596- log("Could not generate shared_db context. Missing required charm "
3597- "config options. (database name and user)", level=ERROR)
3598- raise OSContextError
3599-
3600- ctxt = {}
3601-
3602- # NOTE(jamespage) if mysql charm provides a network upon which
3603- # access to the database should be made, reconfigure relation
3604- # with the service units local address and defer execution
3605- access_network = relation_get('access-network')
3606- if access_network is not None:
3607- if self.relation_prefix is not None:
3608- hostname_key = "{}_hostname".format(self.relation_prefix)
3609- else:
3610- hostname_key = "hostname"
3611- access_hostname = get_address_in_network(access_network,
3612- unit_get('private-address'))
3613- set_hostname = relation_get(attribute=hostname_key,
3614- unit=local_unit())
3615- if set_hostname != access_hostname:
3616- relation_set(relation_settings={hostname_key: access_hostname})
3617- return None # Defer any further hook execution for now....
3618-
3619- password_setting = 'password'
3620- if self.relation_prefix:
3621- password_setting = self.relation_prefix + '_password'
3622-
3623- for rid in relation_ids('shared-db'):
3624- for unit in related_units(rid):
3625- rdata = relation_get(rid=rid, unit=unit)
3626- host = rdata.get('db_host')
3627- host = format_ipv6_addr(host) or host
3628- ctxt = {
3629- 'database_host': host,
3630- 'database': self.database,
3631- 'database_user': self.user,
3632- 'database_password': rdata.get(password_setting),
3633- 'database_type': 'mysql'
3634- }
3635- if context_complete(ctxt):
3636- db_ssl(rdata, ctxt, self.ssl_dir)
3637- return ctxt
3638- return {}
3639-
3640-
3641-class PostgresqlDBContext(OSContextGenerator):
3642- interfaces = ['pgsql-db']
3643-
3644- def __init__(self, database=None):
3645- self.database = database
3646-
3647- def __call__(self):
3648- self.database = self.database or config('database')
3649- if self.database is None:
3650- log('Could not generate postgresql_db context. Missing required '
3651- 'charm config options. (database name)', level=ERROR)
3652- raise OSContextError
3653-
3654- ctxt = {}
3655- for rid in relation_ids(self.interfaces[0]):
3656- for unit in related_units(rid):
3657- rel_host = relation_get('host', rid=rid, unit=unit)
3658- rel_user = relation_get('user', rid=rid, unit=unit)
3659- rel_passwd = relation_get('password', rid=rid, unit=unit)
3660- ctxt = {'database_host': rel_host,
3661- 'database': self.database,
3662- 'database_user': rel_user,
3663- 'database_password': rel_passwd,
3664- 'database_type': 'postgresql'}
3665- if context_complete(ctxt):
3666- return ctxt
3667-
3668- return {}
3669-
3670-
3671-def db_ssl(rdata, ctxt, ssl_dir):
3672- if 'ssl_ca' in rdata and ssl_dir:
3673- ca_path = os.path.join(ssl_dir, 'db-client.ca')
3674- with open(ca_path, 'w') as fh:
3675- fh.write(b64decode(rdata['ssl_ca']))
3676-
3677- ctxt['database_ssl_ca'] = ca_path
3678- elif 'ssl_ca' in rdata:
3679- log("Charm not setup for ssl support but ssl ca found", level=INFO)
3680- return ctxt
3681-
3682- if 'ssl_cert' in rdata:
3683- cert_path = os.path.join(
3684- ssl_dir, 'db-client.cert')
3685- if not os.path.exists(cert_path):
3686- log("Waiting 1m for ssl client cert validity", level=INFO)
3687- time.sleep(60)
3688-
3689- with open(cert_path, 'w') as fh:
3690- fh.write(b64decode(rdata['ssl_cert']))
3691-
3692- ctxt['database_ssl_cert'] = cert_path
3693- key_path = os.path.join(ssl_dir, 'db-client.key')
3694- with open(key_path, 'w') as fh:
3695- fh.write(b64decode(rdata['ssl_key']))
3696-
3697- ctxt['database_ssl_key'] = key_path
3698-
3699- return ctxt
3700-
3701-
3702-class IdentityServiceContext(OSContextGenerator):
3703-
3704- def __init__(self, service=None, service_user=None, rel_name='identity-service'):
3705- self.service = service
3706- self.service_user = service_user
3707- self.rel_name = rel_name
3708- self.interfaces = [self.rel_name]
3709-
3710- def __call__(self):
3711- log('Generating template context for ' + self.rel_name, level=DEBUG)
3712- ctxt = {}
3713-
3714- if self.service and self.service_user:
3715- # This is required for pki token signing if we don't want /tmp to
3716- # be used.
3717- cachedir = '/var/cache/%s' % (self.service)
3718- if not os.path.isdir(cachedir):
3719- log("Creating service cache dir %s" % (cachedir), level=DEBUG)
3720- mkdir(path=cachedir, owner=self.service_user,
3721- group=self.service_user, perms=0o700)
3722-
3723- ctxt['signing_dir'] = cachedir
3724-
3725- for rid in relation_ids(self.rel_name):
3726- for unit in related_units(rid):
3727- rdata = relation_get(rid=rid, unit=unit)
3728- serv_host = rdata.get('service_host')
3729- serv_host = format_ipv6_addr(serv_host) or serv_host
3730- auth_host = rdata.get('auth_host')
3731- auth_host = format_ipv6_addr(auth_host) or auth_host
3732- svc_protocol = rdata.get('service_protocol') or 'http'
3733- auth_protocol = rdata.get('auth_protocol') or 'http'
3734- ctxt.update({'service_port': rdata.get('service_port'),
3735- 'service_host': serv_host,
3736- 'auth_host': auth_host,
3737- 'auth_port': rdata.get('auth_port'),
3738- 'admin_tenant_name': rdata.get('service_tenant'),
3739- 'admin_user': rdata.get('service_username'),
3740- 'admin_password': rdata.get('service_password'),
3741- 'service_protocol': svc_protocol,
3742- 'auth_protocol': auth_protocol})
3743-
3744- if context_complete(ctxt):
3745- # NOTE(jamespage) this is required for >= icehouse
3746- # so a missing value just indicates keystone needs
3747- # upgrading
3748- ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
3749- return ctxt
3750-
3751- return {}
3752-
3753-
3754-class AMQPContext(OSContextGenerator):
3755-
3756- def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
3757- self.ssl_dir = ssl_dir
3758- self.rel_name = rel_name
3759- self.relation_prefix = relation_prefix
3760- self.interfaces = [rel_name]
3761-
3762- def __call__(self):
3763- log('Generating template context for amqp', level=DEBUG)
3764- conf = config()
3765- if self.relation_prefix:
3766- user_setting = '%s-rabbit-user' % (self.relation_prefix)
3767- vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
3768- else:
3769- user_setting = 'rabbit-user'
3770- vhost_setting = 'rabbit-vhost'
3771-
3772- try:
3773- username = conf[user_setting]
3774- vhost = conf[vhost_setting]
3775- except KeyError as e:
3776- log('Could not generate shared_db context. Missing required charm '
3777- 'config options: %s.' % e, level=ERROR)
3778- raise OSContextError
3779-
3780- ctxt = {}
3781- for rid in relation_ids(self.rel_name):
3782- ha_vip_only = False
3783- for unit in related_units(rid):
3784- if relation_get('clustered', rid=rid, unit=unit):
3785- ctxt['clustered'] = True
3786- vip = relation_get('vip', rid=rid, unit=unit)
3787- vip = format_ipv6_addr(vip) or vip
3788- ctxt['rabbitmq_host'] = vip
3789- else:
3790- host = relation_get('private-address', rid=rid, unit=unit)
3791- host = format_ipv6_addr(host) or host
3792- ctxt['rabbitmq_host'] = host
3793-
3794- ctxt.update({
3795- 'rabbitmq_user': username,
3796- 'rabbitmq_password': relation_get('password', rid=rid,
3797- unit=unit),
3798- 'rabbitmq_virtual_host': vhost,
3799- })
3800-
3801- ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
3802- if ssl_port:
3803- ctxt['rabbit_ssl_port'] = ssl_port
3804-
3805- ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
3806- if ssl_ca:
3807- ctxt['rabbit_ssl_ca'] = ssl_ca
3808-
3809- if relation_get('ha_queues', rid=rid, unit=unit) is not None:
3810- ctxt['rabbitmq_ha_queues'] = True
3811-
3812- ha_vip_only = relation_get('ha-vip-only',
3813- rid=rid, unit=unit) is not None
3814-
3815- if context_complete(ctxt):
3816- if 'rabbit_ssl_ca' in ctxt:
3817- if not self.ssl_dir:
3818- log("Charm not setup for ssl support but ssl ca "
3819- "found", level=INFO)
3820- break
3821-
3822- ca_path = os.path.join(
3823- self.ssl_dir, 'rabbit-client-ca.pem')
3824- with open(ca_path, 'w') as fh:
3825- fh.write(b64decode(ctxt['rabbit_ssl_ca']))
3826- ctxt['rabbit_ssl_ca'] = ca_path
3827-
3828- # Sufficient information found = break out!
3829- break
3830-
3831- # Used for active/active rabbitmq >= grizzly
3832- if (('clustered' not in ctxt or ha_vip_only) and
3833- len(related_units(rid)) > 1):
3834- rabbitmq_hosts = []
3835- for unit in related_units(rid):
3836- host = relation_get('private-address', rid=rid, unit=unit)
3837- host = format_ipv6_addr(host) or host
3838- rabbitmq_hosts.append(host)
3839-
3840- ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
3841-
3842- oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
3843- if oslo_messaging_flags:
3844- ctxt['oslo_messaging_flags'] = config_flags_parser(
3845- oslo_messaging_flags)
3846-
3847- if not context_complete(ctxt):
3848- return {}
3849-
3850- return ctxt
3851-
3852-
3853-class CephContext(OSContextGenerator):
3854- """Generates context for /etc/ceph/ceph.conf templates."""
3855- interfaces = ['ceph']
3856-
3857- def __call__(self):
3858- if not relation_ids('ceph'):
3859- return {}
3860-
3861- log('Generating template context for ceph', level=DEBUG)
3862- mon_hosts = []
3863- auth = None
3864- key = None
3865- use_syslog = str(config('use-syslog')).lower()
3866- for rid in relation_ids('ceph'):
3867- for unit in related_units(rid):
3868- auth = relation_get('auth', rid=rid, unit=unit)
3869- key = relation_get('key', rid=rid, unit=unit)
3870- ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
3871- unit=unit)
3872- unit_priv_addr = relation_get('private-address', rid=rid,
3873- unit=unit)
3874- ceph_addr = ceph_pub_addr or unit_priv_addr
3875- ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
3876- mon_hosts.append(ceph_addr)
3877-
3878- ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
3879- 'auth': auth,
3880- 'key': key,
3881- 'use_syslog': use_syslog}
3882-
3883- if not os.path.isdir('/etc/ceph'):
3884- os.mkdir('/etc/ceph')
3885-
3886- if not context_complete(ctxt):
3887- return {}
3888-
3889- ensure_packages(['ceph-common'])
3890- return ctxt
3891-
3892-
3893-class HAProxyContext(OSContextGenerator):
3894- """Provides half a context for the haproxy template, which describes
3895- all peers to be included in the cluster. Each charm needs to include
3896- its own context generator that describes the port mapping.
3897- """
3898- interfaces = ['cluster']
3899-
3900- def __init__(self, singlenode_mode=False):
3901- self.singlenode_mode = singlenode_mode
3902-
3903- def __call__(self):
3904- if not relation_ids('cluster') and not self.singlenode_mode:
3905- return {}
3906-
3907- if config('prefer-ipv6'):
3908- addr = get_ipv6_addr(exc_list=[config('vip')])[0]
3909- else:
3910- addr = get_host_ip(unit_get('private-address'))
3911-
3912- l_unit = local_unit().replace('/', '-')
3913- cluster_hosts = {}
3914-
3915- # NOTE(jamespage): build out map of configured network endpoints
3916- # and associated backends
3917- for addr_type in ADDRESS_TYPES:
3918- cfg_opt = 'os-{}-network'.format(addr_type)
3919- laddr = get_address_in_network(config(cfg_opt))
3920- if laddr:
3921- netmask = get_netmask_for_address(laddr)
3922- cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
3923- netmask),
3924- 'backends': {l_unit: laddr}}
3925- for rid in relation_ids('cluster'):
3926- for unit in related_units(rid):
3927- _laddr = relation_get('{}-address'.format(addr_type),
3928- rid=rid, unit=unit)
3929- if _laddr:
3930- _unit = unit.replace('/', '-')
3931- cluster_hosts[laddr]['backends'][_unit] = _laddr
3932-
3933- # NOTE(jamespage) add backend based on private address - this
3934- # with either be the only backend or the fallback if no acls
3935- # match in the frontend
3936- cluster_hosts[addr] = {}
3937- netmask = get_netmask_for_address(addr)
3938- cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
3939- 'backends': {l_unit: addr}}
3940- for rid in relation_ids('cluster'):
3941- for unit in related_units(rid):
3942- _laddr = relation_get('private-address',
3943- rid=rid, unit=unit)
3944- if _laddr:
3945- _unit = unit.replace('/', '-')
3946- cluster_hosts[addr]['backends'][_unit] = _laddr
3947-
3948- ctxt = {
3949- 'frontends': cluster_hosts,
3950- 'default_backend': addr
3951- }
3952-
3953- if config('haproxy-server-timeout'):
3954- ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
3955-
3956- if config('haproxy-client-timeout'):
3957- ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
3958-
3959- if config('prefer-ipv6'):
3960- ctxt['ipv6'] = True
3961- ctxt['local_host'] = 'ip6-localhost'
3962- ctxt['haproxy_host'] = '::'
3963- ctxt['stat_port'] = ':::8888'
3964- else:
3965- ctxt['local_host'] = '127.0.0.1'
3966- ctxt['haproxy_host'] = '0.0.0.0'
3967- ctxt['stat_port'] = ':8888'
3968-
3969- for frontend in cluster_hosts:
3970- if (len(cluster_hosts[frontend]['backends']) > 1 or
3971- self.singlenode_mode):
3972- # Enable haproxy when we have enough peers.
3973- log('Ensuring haproxy enabled in /etc/default/haproxy.',
3974- level=DEBUG)
3975- with open('/etc/default/haproxy', 'w') as out:
3976- out.write('ENABLED=1\n')
3977-
3978- return ctxt
3979-
3980- log('HAProxy context is incomplete, this unit has no peers.',
3981- level=INFO)
3982- return {}
3983-
3984-
3985-class ImageServiceContext(OSContextGenerator):
3986- interfaces = ['image-service']
3987-
3988- def __call__(self):
3989- """Obtains the glance API server from the image-service relation.
3990- Useful in nova and cinder (currently).
3991- """
3992- log('Generating template context for image-service.', level=DEBUG)
3993- rids = relation_ids('image-service')
3994- if not rids:
3995- return {}
3996-
3997- for rid in rids:
3998- for unit in related_units(rid):
3999- api_server = relation_get('glance-api-server',
4000- rid=rid, unit=unit)
4001- if api_server:
4002- return {'glance_api_servers': api_server}
4003-
4004- log("ImageService context is incomplete. Missing required relation "
4005- "data.", level=INFO)
4006- return {}
4007-
4008-
4009-class ApacheSSLContext(OSContextGenerator):
4010- """Generates a context for an apache vhost configuration that configures
4011- HTTPS reverse proxying for one or many endpoints. Generated context
4012- looks something like::
4013-
4014- {
4015- 'namespace': 'cinder',
4016- 'private_address': 'iscsi.mycinderhost.com',
4017- 'endpoints': [(8776, 8766), (8777, 8767)]
4018- }
4019-
4020- The endpoints list consists of a tuples mapping external ports
4021- to internal ports.
4022- """
4023- interfaces = ['https']
4024-
4025- # charms should inherit this context and set external ports
4026- # and service namespace accordingly.
4027- external_ports = []
4028- service_namespace = None
4029-
4030- def enable_modules(self):
4031- cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
4032- check_call(cmd)
4033-
4034- def configure_cert(self, cn=None):
4035- ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
4036- mkdir(path=ssl_dir)
4037- cert, key = get_cert(cn)
4038- if cn:
4039- cert_filename = 'cert_{}'.format(cn)
4040- key_filename = 'key_{}'.format(cn)
4041- else:
4042- cert_filename = 'cert'
4043- key_filename = 'key'
4044-
4045- write_file(path=os.path.join(ssl_dir, cert_filename),
4046- content=b64decode(cert))
4047- write_file(path=os.path.join(ssl_dir, key_filename),
4048- content=b64decode(key))
4049-
4050- def configure_ca(self):
4051- ca_cert = get_ca_cert()
4052- if ca_cert:
4053- install_ca_cert(b64decode(ca_cert))
4054-
4055- def canonical_names(self):
4056- """Figure out which canonical names clients will access this service.
4057- """
4058- cns = []
4059- for r_id in relation_ids('identity-service'):
4060- for unit in related_units(r_id):
4061- rdata = relation_get(rid=r_id, unit=unit)
4062- for k in rdata:
4063- if k.startswith('ssl_key_'):
4064- cns.append(k.lstrip('ssl_key_'))
4065-
4066- return sorted(list(set(cns)))
4067-
4068- def get_network_addresses(self):
4069- """For each network configured, return corresponding address and vip
4070- (if available).
4071-
4072- Returns a list of tuples of the form:
4073-
4074- [(address_in_net_a, vip_in_net_a),
4075- (address_in_net_b, vip_in_net_b),
4076- ...]
4077-
4078- or, if no vip(s) available:
4079-
4080- [(address_in_net_a, address_in_net_a),
4081- (address_in_net_b, address_in_net_b),
4082- ...]
4083- """
4084- addresses = []
4085- if config('vip'):
4086- vips = config('vip').split()
4087- else:
4088- vips = []
4089-
4090- for net_type in ['os-internal-network', 'os-admin-network',
4091- 'os-public-network']:
4092- addr = get_address_in_network(config(net_type),
4093- unit_get('private-address'))
4094- if len(vips) > 1 and is_clustered():
4095- if not config(net_type):
4096- log("Multiple networks configured but net_type "
4097- "is None (%s)." % net_type, level=WARNING)
4098- continue
4099-
4100- for vip in vips:
4101- if is_address_in_network(config(net_type), vip):
4102- addresses.append((addr, vip))
4103- break
4104-
4105- elif is_clustered() and config('vip'):
4106- addresses.append((addr, config('vip')))
4107- else:
4108- addresses.append((addr, addr))
4109-
4110- return sorted(addresses)
4111-
4112- def __call__(self):
4113- if isinstance(self.external_ports, six.string_types):
4114- self.external_ports = [self.external_ports]
4115-
4116- if not self.external_ports or not https():
4117- return {}
4118-
4119- self.configure_ca()
4120- self.enable_modules()
4121-
4122- ctxt = {'namespace': self.service_namespace,
4123- 'endpoints': [],
4124- 'ext_ports': []}
4125-
4126- cns = self.canonical_names()
4127- if cns:
4128- for cn in cns:
4129- self.configure_cert(cn)
4130- else:
4131- # Expect cert/key provided in config (currently assumed that ca
4132- # uses ip for cn)
4133- cn = resolve_address(endpoint_type=INTERNAL)
4134- self.configure_cert(cn)
4135-
4136- addresses = self.get_network_addresses()
4137- for address, endpoint in sorted(set(addresses)):
4138- for api_port in self.external_ports:
4139- ext_port = determine_apache_port(api_port,
4140- singlenode_mode=True)
4141- int_port = determine_api_port(api_port, singlenode_mode=True)
4142- portmap = (address, endpoint, int(ext_port), int(int_port))
4143- ctxt['endpoints'].append(portmap)
4144- ctxt['ext_ports'].append(int(ext_port))
4145-
4146- ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
4147- return ctxt
4148-
4149-
4150-class NeutronContext(OSContextGenerator):
4151- interfaces = []
4152-
4153- @property
4154- def plugin(self):
4155- return None
4156-
4157- @property
4158- def network_manager(self):
4159- return None
4160-
4161- @property
4162- def packages(self):
4163- return neutron_plugin_attribute(self.plugin, 'packages',
4164- self.network_manager)
4165-
4166- @property
4167- def neutron_security_groups(self):
4168- return None
4169-
4170- def _ensure_packages(self):
4171- for pkgs in self.packages:
4172- ensure_packages(pkgs)
4173-
4174- def _save_flag_file(self):
4175- if self.network_manager == 'quantum':
4176- _file = '/etc/nova/quantum_plugin.conf'
4177- else:
4178- _file = '/etc/nova/neutron_plugin.conf'
4179-
4180- with open(_file, 'wb') as out:
4181- out.write(self.plugin + '\n')
4182-
4183- def ovs_ctxt(self):
4184- driver = neutron_plugin_attribute(self.plugin, 'driver',
4185- self.network_manager)
4186- config = neutron_plugin_attribute(self.plugin, 'config',
4187- self.network_manager)
4188- ovs_ctxt = {'core_plugin': driver,
4189- 'neutron_plugin': 'ovs',
4190- 'neutron_security_groups': self.neutron_security_groups,
4191- 'local_ip': unit_private_ip(),
4192- 'config': config}
4193-
4194- return ovs_ctxt
4195-
4196- def nuage_ctxt(self):
4197- driver = neutron_plugin_attribute(self.plugin, 'driver',
4198- self.network_manager)
4199- config = neutron_plugin_attribute(self.plugin, 'config',
4200- self.network_manager)
4201- nuage_ctxt = {'core_plugin': driver,
4202- 'neutron_plugin': 'vsp',
4203- 'neutron_security_groups': self.neutron_security_groups,
4204- 'local_ip': unit_private_ip(),
4205- 'config': config}
4206-
4207- return nuage_ctxt
4208-
4209- def nvp_ctxt(self):
4210- driver = neutron_plugin_attribute(self.plugin, 'driver',
4211- self.network_manager)
4212- config = neutron_plugin_attribute(self.plugin, 'config',
4213- self.network_manager)
4214- nvp_ctxt = {'core_plugin': driver,
4215- 'neutron_plugin': 'nvp',
4216- 'neutron_security_groups': self.neutron_security_groups,
4217- 'local_ip': unit_private_ip(),
4218- 'config': config}
4219-
4220- return nvp_ctxt
4221-
4222- def n1kv_ctxt(self):
4223- driver = neutron_plugin_attribute(self.plugin, 'driver',
4224- self.network_manager)
4225- n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
4226- self.network_manager)
4227- n1kv_user_config_flags = config('n1kv-config-flags')
4228- restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
4229- n1kv_ctxt = {'core_plugin': driver,
4230- 'neutron_plugin': 'n1kv',
4231- 'neutron_security_groups': self.neutron_security_groups,
4232- 'local_ip': unit_private_ip(),
4233- 'config': n1kv_config,
4234- 'vsm_ip': config('n1kv-vsm-ip'),
4235- 'vsm_username': config('n1kv-vsm-username'),
4236- 'vsm_password': config('n1kv-vsm-password'),
4237- 'restrict_policy_profiles': restrict_policy_profiles}
4238-
4239- if n1kv_user_config_flags:
4240- flags = config_flags_parser(n1kv_user_config_flags)
4241- n1kv_ctxt['user_config_flags'] = flags
4242-
4243- return n1kv_ctxt
4244-
4245- def calico_ctxt(self):
4246- driver = neutron_plugin_attribute(self.plugin, 'driver',
4247- self.network_manager)
4248- config = neutron_plugin_attribute(self.plugin, 'config',
4249- self.network_manager)
4250- calico_ctxt = {'core_plugin': driver,
4251- 'neutron_plugin': 'Calico',
4252- 'neutron_security_groups': self.neutron_security_groups,
4253- 'local_ip': unit_private_ip(),
4254- 'config': config}
4255-
4256- return calico_ctxt
4257-
4258- def neutron_ctxt(self):
4259- if https():
4260- proto = 'https'
4261- else:
4262- proto = 'http'
4263-
4264- if is_clustered():
4265- host = config('vip')
4266- else:
4267- host = unit_get('private-address')
4268-
4269- ctxt = {'network_manager': self.network_manager,
4270- 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
4271- return ctxt
4272-
4273- def __call__(self):
4274- self._ensure_packages()
4275-
4276- if self.network_manager not in ['quantum', 'neutron']:
4277- return {}
4278-
4279- if not self.plugin:
4280- return {}
4281-
4282- ctxt = self.neutron_ctxt()
4283-
4284- if self.plugin == 'ovs':
4285- ctxt.update(self.ovs_ctxt())
4286- elif self.plugin in ['nvp', 'nsx']:
4287- ctxt.update(self.nvp_ctxt())
4288- elif self.plugin == 'n1kv':
4289- ctxt.update(self.n1kv_ctxt())
4290- elif self.plugin == 'Calico':
4291- ctxt.update(self.calico_ctxt())
4292- elif self.plugin == 'vsp':
4293- ctxt.update(self.nuage_ctxt())
4294-
4295- alchemy_flags = config('neutron-alchemy-flags')
4296- if alchemy_flags:
4297- flags = config_flags_parser(alchemy_flags)
4298- ctxt['neutron_alchemy_flags'] = flags
4299-
4300- self._save_flag_file()
4301- return ctxt
4302-
4303-
4304-class NeutronPortContext(OSContextGenerator):
4305- NIC_PREFIXES = ['eth', 'bond']
4306-
4307- def resolve_ports(self, ports):
4308- """Resolve NICs not yet bound to bridge(s)
4309-
4310- If hwaddress provided then returns resolved hwaddress otherwise NIC.
4311- """
4312- if not ports:
4313- return None
4314-
4315- hwaddr_to_nic = {}
4316- hwaddr_to_ip = {}
4317- for nic in list_nics(self.NIC_PREFIXES):
4318- hwaddr = get_nic_hwaddr(nic)
4319- hwaddr_to_nic[hwaddr] = nic
4320- addresses = get_ipv4_addr(nic, fatal=False)
4321- addresses += get_ipv6_addr(iface=nic, fatal=False)
4322- hwaddr_to_ip[hwaddr] = addresses
4323-
4324- resolved = []
4325- mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
4326- for entry in ports:
4327- if re.match(mac_regex, entry):
4328- # NIC is in known NICs and does NOT hace an IP address
4329- if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
4330- # If the nic is part of a bridge then don't use it
4331- if is_bridge_member(hwaddr_to_nic[entry]):
4332- continue
4333-
4334- # Entry is a MAC address for a valid interface that doesn't
4335- # have an IP address assigned yet.
4336- resolved.append(hwaddr_to_nic[entry])
4337- else:
4338- # If the passed entry is not a MAC address, assume it's a valid
4339- # interface, and that the user put it there on purpose (we can
4340- # trust it to be the real external network).
4341- resolved.append(entry)
4342-
4343- return resolved
4344-
4345-
4346-class OSConfigFlagContext(OSContextGenerator):
4347- """Provides support for user-defined config flags.
4348-
4349- Users can define a comma-seperated list of key=value pairs
4350- in the charm configuration and apply them at any point in
4351- any file by using a template flag.
4352-
4353- Sometimes users might want config flags inserted within a
4354- specific section so this class allows users to specify the
4355- template flag name, allowing for multiple template flags
4356- (sections) within the same context.
4357-
4358- NOTE: the value of config-flags may be a comma-separated list of
4359- key=value pairs and some Openstack config files support
4360- comma-separated lists as values.
4361- """
4362-
4363- def __init__(self, charm_flag='config-flags',
4364- template_flag='user_config_flags'):
4365- """
4366- :param charm_flag: config flags in charm configuration.
4367- :param template_flag: insert point for user-defined flags in template
4368- file.
4369- """
4370- super(OSConfigFlagContext, self).__init__()
4371- self._charm_flag = charm_flag
4372- self._template_flag = template_flag
4373-
4374- def __call__(self):
4375- config_flags = config(self._charm_flag)
4376- if not config_flags:
4377- return {}
4378-
4379- return {self._template_flag:
4380- config_flags_parser(config_flags)}
4381-
4382-
4383-class SubordinateConfigContext(OSContextGenerator):
4384-
4385- """
4386- Responsible for inspecting relations to subordinates that
4387- may be exporting required config via a json blob.
4388-
4389- The subordinate interface allows subordinates to export their
4390- configuration requirements to the principle for multiple config
4391- files and multiple serivces. Ie, a subordinate that has interfaces
4392- to both glance and nova may export to following yaml blob as json::
4393-
4394- glance:
4395- /etc/glance/glance-api.conf:
4396- sections:
4397- DEFAULT:
4398- - [key1, value1]
4399- /etc/glance/glance-registry.conf:
4400- MYSECTION:
4401- - [key2, value2]
4402- nova:
4403- /etc/nova/nova.conf:
4404- sections:
4405- DEFAULT:
4406- - [key3, value3]
4407-
4408-
4409- It is then up to the principle charms to subscribe this context to
4410- the service+config file it is interestd in. Configuration data will
4411- be available in the template context, in glance's case, as::
4412-
4413- ctxt = {
4414- ... other context ...
4415- 'subordinate_config': {
4416- 'DEFAULT': {
4417- 'key1': 'value1',
4418- },
4419- 'MYSECTION': {
4420- 'key2': 'value2',
4421- },
4422- }
4423- }
4424- """
4425-
4426- def __init__(self, service, config_file, interface):
4427- """
4428- :param service : Service name key to query in any subordinate
4429- data found
4430- :param config_file : Service's config file to query sections
4431- :param interface : Subordinate interface to inspect
4432- """
4433- self.service = service
4434- self.config_file = config_file
4435- self.interface = interface
4436-
4437- def __call__(self):
4438- ctxt = {'sections': {}}
4439- for rid in relation_ids(self.interface):
4440- for unit in related_units(rid):
4441- sub_config = relation_get('subordinate_configuration',
4442- rid=rid, unit=unit)
4443- if sub_config and sub_config != '':
4444- try:
4445- sub_config = json.loads(sub_config)
4446- except:
4447- log('Could not parse JSON from subordinate_config '
4448- 'setting from %s' % rid, level=ERROR)
4449- continue
4450-
4451- if self.service not in sub_config:
4452- log('Found subordinate_config on %s but it contained'
4453- 'nothing for %s service' % (rid, self.service),
4454- level=INFO)
4455- continue
4456-
4457- sub_config = sub_config[self.service]
4458- if self.config_file not in sub_config:
4459- log('Found subordinate_config on %s but it contained'
4460- 'nothing for %s' % (rid, self.config_file),
4461- level=INFO)
4462- continue
4463-
4464- sub_config = sub_config[self.config_file]
4465- for k, v in six.iteritems(sub_config):
4466- if k == 'sections':
4467- for section, config_dict in six.iteritems(v):
4468- log("adding section '%s'" % (section),
4469- level=DEBUG)
4470- ctxt[k][section] = config_dict
4471- else:
4472- ctxt[k] = v
4473-
4474- log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
4475- return ctxt
4476-
4477-
4478-class LogLevelContext(OSContextGenerator):
4479-
4480- def __call__(self):
4481- ctxt = {}
4482- ctxt['debug'] = \
4483- False if config('debug') is None else config('debug')
4484- ctxt['verbose'] = \
4485- False if config('verbose') is None else config('verbose')
4486-
4487- return ctxt
4488-
4489-
4490-class SyslogContext(OSContextGenerator):
4491-
4492- def __call__(self):
4493- ctxt = {'use_syslog': config('use-syslog')}
4494- return ctxt
4495-
4496-
4497-class BindHostContext(OSContextGenerator):
4498-
4499- def __call__(self):
4500- if config('prefer-ipv6'):
4501- return {'bind_host': '::'}
4502- else:
4503- return {'bind_host': '0.0.0.0'}
4504-
4505-
4506-class WorkerConfigContext(OSContextGenerator):
4507-
4508- @property
4509- def num_cpus(self):
4510- try:
4511- from psutil import NUM_CPUS
4512- except ImportError:
4513- apt_install('python-psutil', fatal=True)
4514- from psutil import NUM_CPUS
4515-
4516- return NUM_CPUS
4517-
4518- def __call__(self):
4519- multiplier = config('worker-multiplier') or 0
4520- ctxt = {"workers": self.num_cpus * multiplier}
4521- return ctxt
4522-
4523-
4524-class ZeroMQContext(OSContextGenerator):
4525- interfaces = ['zeromq-configuration']
4526-
4527- def __call__(self):
4528- ctxt = {}
4529- if is_relation_made('zeromq-configuration', 'host'):
4530- for rid in relation_ids('zeromq-configuration'):
4531- for unit in related_units(rid):
4532- ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
4533- ctxt['zmq_host'] = relation_get('host', unit, rid)
4534- ctxt['zmq_redis_address'] = relation_get(
4535- 'zmq_redis_address', unit, rid)
4536-
4537- return ctxt
4538-
4539-
4540-class NotificationDriverContext(OSContextGenerator):
4541-
4542- def __init__(self, zmq_relation='zeromq-configuration',
4543- amqp_relation='amqp'):
4544- """
4545- :param zmq_relation: Name of Zeromq relation to check
4546- """
4547- self.zmq_relation = zmq_relation
4548- self.amqp_relation = amqp_relation
4549-
4550- def __call__(self):
4551- ctxt = {'notifications': 'False'}
4552- if is_relation_made(self.amqp_relation):
4553- ctxt['notifications'] = "True"
4554-
4555- return ctxt
4556-
4557-
4558-class SysctlContext(OSContextGenerator):
4559- """This context check if the 'sysctl' option exists on configuration
4560- then creates a file with the loaded contents"""
4561- def __call__(self):
4562- sysctl_dict = config('sysctl')
4563- if sysctl_dict:
4564- sysctl_create(sysctl_dict,
4565- '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
4566- return {'sysctl': sysctl_dict}
4567-
4568-
4569-class NeutronAPIContext(OSContextGenerator):
4570- '''
4571- Inspects current neutron-plugin-api relation for neutron settings. Return
4572- defaults if it is not present.
4573- '''
4574- interfaces = ['neutron-plugin-api']
4575-
4576- def __call__(self):
4577- self.neutron_defaults = {
4578- 'l2_population': {
4579- 'rel_key': 'l2-population',
4580- 'default': False,
4581- },
4582- 'overlay_network_type': {
4583- 'rel_key': 'overlay-network-type',
4584- 'default': 'gre',
4585- },
4586- 'neutron_security_groups': {
4587- 'rel_key': 'neutron-security-groups',
4588- 'default': False,
4589- },
4590- 'network_device_mtu': {
4591- 'rel_key': 'network-device-mtu',
4592- 'default': None,
4593- },
4594- 'enable_dvr': {
4595- 'rel_key': 'enable-dvr',
4596- 'default': False,
4597- },
4598- 'enable_l3ha': {
4599- 'rel_key': 'enable-l3ha',
4600- 'default': False,
4601- },
4602- }
4603- ctxt = self.get_neutron_options({})
4604- for rid in relation_ids('neutron-plugin-api'):
4605- for unit in related_units(rid):
4606- rdata = relation_get(rid=rid, unit=unit)
4607- if 'l2-population' in rdata:
4608- ctxt.update(self.get_neutron_options(rdata))
4609-
4610- return ctxt
4611-
4612- def get_neutron_options(self, rdata):
4613- settings = {}
4614- for nkey in self.neutron_defaults.keys():
4615- defv = self.neutron_defaults[nkey]['default']
4616- rkey = self.neutron_defaults[nkey]['rel_key']
4617- if rkey in rdata.keys():
4618- if type(defv) is bool:
4619- settings[nkey] = bool_from_string(rdata[rkey])
4620- else:
4621- settings[nkey] = rdata[rkey]
4622- else:
4623- settings[nkey] = defv
4624- return settings
4625-
4626-
4627-class ExternalPortContext(NeutronPortContext):
4628-
4629- def __call__(self):
4630- ctxt = {}
4631- ports = config('ext-port')
4632- if ports:
4633- ports = [p.strip() for p in ports.split()]
4634- ports = self.resolve_ports(ports)
4635- if ports:
4636- ctxt = {"ext_port": ports[0]}
4637- napi_settings = NeutronAPIContext()()
4638- mtu = napi_settings.get('network_device_mtu')
4639- if mtu:
4640- ctxt['ext_port_mtu'] = mtu
4641-
4642- return ctxt
4643-
4644-
4645-class DataPortContext(NeutronPortContext):
4646-
4647- def __call__(self):
4648- ports = config('data-port')
4649- if ports:
4650- portmap = parse_data_port_mappings(ports)
4651- ports = portmap.values()
4652- resolved = self.resolve_ports(ports)
4653- normalized = {get_nic_hwaddr(port): port for port in resolved
4654- if port not in ports}
4655- normalized.update({port: port for port in resolved
4656- if port in ports})
4657- if resolved:
4658- return {bridge: normalized[port] for bridge, port in
4659- six.iteritems(portmap) if port in normalized.keys()}
4660-
4661- return None
4662-
4663-
4664-class PhyNICMTUContext(DataPortContext):
4665-
4666- def __call__(self):
4667- ctxt = {}
4668- mappings = super(PhyNICMTUContext, self).__call__()
4669- if mappings and mappings.values():
4670- ports = mappings.values()
4671- napi_settings = NeutronAPIContext()()
4672- mtu = napi_settings.get('network_device_mtu')
4673- if mtu:
4674- ctxt["devs"] = '\\n'.join(ports)
4675- ctxt['mtu'] = mtu
4676-
4677- return ctxt
4678-
4679-
4680-class NetworkServiceContext(OSContextGenerator):
4681-
4682- def __init__(self, rel_name='quantum-network-service'):
4683- self.rel_name = rel_name
4684- self.interfaces = [rel_name]
4685-
4686- def __call__(self):
4687- for rid in relation_ids(self.rel_name):
4688- for unit in related_units(rid):
4689- rdata = relation_get(rid=rid, unit=unit)
4690- ctxt = {
4691- 'keystone_host': rdata.get('keystone_host'),
4692- 'service_port': rdata.get('service_port'),
4693- 'auth_port': rdata.get('auth_port'),
4694- 'service_tenant': rdata.get('service_tenant'),
4695- 'service_username': rdata.get('service_username'),
4696- 'service_password': rdata.get('service_password'),
4697- 'quantum_host': rdata.get('quantum_host'),
4698- 'quantum_port': rdata.get('quantum_port'),
4699- 'quantum_url': rdata.get('quantum_url'),
4700- 'region': rdata.get('region'),
4701- 'service_protocol':
4702- rdata.get('service_protocol') or 'http',
4703- 'auth_protocol':
4704- rdata.get('auth_protocol') or 'http',
4705- }
4706- if context_complete(ctxt):
4707- return ctxt
4708- return {}
4709
4710=== removed directory 'hooks/charmhelpers/contrib/openstack/files'
4711=== removed file 'hooks/charmhelpers/contrib/openstack/files/__init__.py'
4712--- hooks/charmhelpers/contrib/openstack/files/__init__.py 2015-02-19 05:17:57 +0000
4713+++ hooks/charmhelpers/contrib/openstack/files/__init__.py 1970-01-01 00:00:00 +0000
4714@@ -1,18 +0,0 @@
4715-# Copyright 2014-2015 Canonical Limited.
4716-#
4717-# This file is part of charm-helpers.
4718-#
4719-# charm-helpers is free software: you can redistribute it and/or modify
4720-# it under the terms of the GNU Lesser General Public License version 3 as
4721-# published by the Free Software Foundation.
4722-#
4723-# charm-helpers is distributed in the hope that it will be useful,
4724-# but WITHOUT ANY WARRANTY; without even the implied warranty of
4725-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4726-# GNU Lesser General Public License for more details.
4727-#
4728-# You should have received a copy of the GNU Lesser General Public License
4729-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4730-
4731-# dummy __init__.py to fool syncer into thinking this is a syncable python
4732-# module
4733
4734=== removed file 'hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh'
4735--- hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 2015-02-25 23:34:09 +0000
4736+++ hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 1970-01-01 00:00:00 +0000
4737@@ -1,32 +0,0 @@
4738-#!/bin/bash
4739-#--------------------------------------------
4740-# This file is managed by Juju
4741-#--------------------------------------------
4742-#
4743-# Copyright 2009,2012 Canonical Ltd.
4744-# Author: Tom Haddon
4745-
4746-CRITICAL=0
4747-NOTACTIVE=''
4748-LOGFILE=/var/log/nagios/check_haproxy.log
4749-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
4750-
4751-for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
4752-do
4753- output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
4754- if [ $? != 0 ]; then
4755- date >> $LOGFILE
4756- echo $output >> $LOGFILE
4757- /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
4758- CRITICAL=1
4759- NOTACTIVE="${NOTACTIVE} $appserver"
4760- fi
4761-done
4762-
4763-if [ $CRITICAL = 1 ]; then
4764- echo "CRITICAL:${NOTACTIVE}"
4765- exit 2
4766-fi
4767-
4768-echo "OK: All haproxy instances looking good"
4769-exit 0
4770
4771=== removed file 'hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh'
4772--- hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh 2015-02-25 23:34:09 +0000
4773+++ hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh 1970-01-01 00:00:00 +0000
4774@@ -1,30 +0,0 @@
4775-#!/bin/bash
4776-#--------------------------------------------
4777-# This file is managed by Juju
4778-#--------------------------------------------
4779-#
4780-# Copyright 2009,2012 Canonical Ltd.
4781-# Author: Tom Haddon
4782-
4783-# These should be config options at some stage
4784-CURRQthrsh=0
4785-MAXQthrsh=100
4786-
4787-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
4788-
4789-HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
4790-
4791-for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
4792-do
4793- CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
4794- MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
4795-
4796- if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
4797- echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
4798- exit 2
4799- fi
4800-done
4801-
4802-echo "OK: All haproxy queue depths looking good"
4803-exit 0
4804-
4805
4806=== removed file 'hooks/charmhelpers/contrib/openstack/ip.py'
4807--- hooks/charmhelpers/contrib/openstack/ip.py 2015-03-31 15:13:53 +0000
4808+++ hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
4809@@ -1,146 +0,0 @@
4810-# Copyright 2014-2015 Canonical Limited.
4811-#
4812-# This file is part of charm-helpers.
4813-#
4814-# charm-helpers is free software: you can redistribute it and/or modify
4815-# it under the terms of the GNU Lesser General Public License version 3 as
4816-# published by the Free Software Foundation.
4817-#
4818-# charm-helpers is distributed in the hope that it will be useful,
4819-# but WITHOUT ANY WARRANTY; without even the implied warranty of
4820-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4821-# GNU Lesser General Public License for more details.
4822-#
4823-# You should have received a copy of the GNU Lesser General Public License
4824-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4825-
4826-from charmhelpers.core.hookenv import (
4827- config,
4828- unit_get,
4829-)
4830-from charmhelpers.contrib.network.ip import (
4831- get_address_in_network,
4832- is_address_in_network,
4833- is_ipv6,
4834- get_ipv6_addr,
4835-)
4836-from charmhelpers.contrib.hahelpers.cluster import is_clustered
4837-
4838-from functools import partial
4839-
4840-PUBLIC = 'public'
4841-INTERNAL = 'int'
4842-ADMIN = 'admin'
4843-
4844-ADDRESS_MAP = {
4845- PUBLIC: {
4846- 'config': 'os-public-network',
4847- 'fallback': 'public-address'
4848- },
4849- INTERNAL: {
4850- 'config': 'os-internal-network',
4851- 'fallback': 'private-address'
4852- },
4853- ADMIN: {
4854- 'config': 'os-admin-network',
4855- 'fallback': 'private-address'
4856- }
4857-}
4858-
4859-
4860-def canonical_url(configs, endpoint_type=PUBLIC):
4861- """Returns the correct HTTP URL to this host given the state of HTTPS
4862- configuration, hacluster and charm configuration.
4863-
4864- :param configs: OSTemplateRenderer config templating object to inspect
4865- for a complete https context.
4866- :param endpoint_type: str endpoint type to resolve.
4867- :param returns: str base URL for services on the current service unit.
4868- """
4869- scheme = 'http'
4870- if 'https' in configs.complete_contexts():
4871- scheme = 'https'
4872- address = resolve_address(endpoint_type)
4873- if is_ipv6(address):
4874- address = "[{}]".format(address)
4875- return '%s://%s' % (scheme, address)
4876-
4877-
4878-def resolve_address(endpoint_type=PUBLIC):
4879- """Return unit address depending on net config.
4880-
4881- If unit is clustered with vip(s) and has net splits defined, return vip on
4882- correct network. If clustered with no nets defined, return primary vip.
4883-
4884- If not clustered, return unit address ensuring address is on configured net
4885- split if one is configured.
4886-
4887- :param endpoint_type: Network endpoing type
4888- """
4889- resolved_address = None
4890- vips = config('vip')
4891- if vips:
4892- vips = vips.split()
4893-
4894- net_type = ADDRESS_MAP[endpoint_type]['config']
4895- net_addr = config(net_type)
4896- net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
4897- clustered = is_clustered()
4898- if clustered:
4899- if not net_addr:
4900- # If no net-splits defined, we expect a single vip
4901- resolved_address = vips[0]
4902- else:
4903- for vip in vips:
4904- if is_address_in_network(net_addr, vip):
4905- resolved_address = vip
4906- break
4907- else:
4908- if config('prefer-ipv6'):
4909- fallback_addr = get_ipv6_addr(exc_list=vips)[0]
4910- else:
4911- fallback_addr = unit_get(net_fallback)
4912-
4913- resolved_address = get_address_in_network(net_addr, fallback_addr)
4914-
4915- if resolved_address is None:
4916- raise ValueError("Unable to resolve a suitable IP address based on "
4917- "charm state and configuration. (net_type=%s, "
4918- "clustered=%s)" % (net_type, clustered))
4919-
4920- return resolved_address
4921-
4922-
4923-def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC,
4924- override=None):
4925- """Returns the correct endpoint URL to advertise to Keystone.
4926-
4927- This method provides the correct endpoint URL which should be advertised to
4928- the keystone charm for endpoint creation. This method allows for the url to
4929- be overridden to force a keystone endpoint to have specific URL for any of
4930- the defined scopes (admin, internal, public).
4931-
4932- :param configs: OSTemplateRenderer config templating object to inspect
4933- for a complete https context.
4934- :param url_template: str format string for creating the url template. Only
4935- two values will be passed - the scheme+hostname
4936- returned by the canonical_url and the port.
4937- :param endpoint_type: str endpoint type to resolve.
4938- :param override: str the name of the config option which overrides the
4939- endpoint URL defined by the charm itself. None will
4940- disable any overrides (default).
4941- """
4942- if override:
4943- # Return any user-defined overrides for the keystone endpoint URL.
4944- user_value = config(override)
4945- if user_value:
4946- return user_value.strip()
4947-
4948- return url_template % (canonical_url(configs, endpoint_type), port)
4949-
4950-
4951-public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC)
4952-
4953-internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL)
4954-
4955-admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN)
4956
4957=== removed file 'hooks/charmhelpers/contrib/openstack/neutron.py'
4958--- hooks/charmhelpers/contrib/openstack/neutron.py 2015-04-16 20:07:38 +0000
4959+++ hooks/charmhelpers/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
4960@@ -1,322 +0,0 @@
4961-# Copyright 2014-2015 Canonical Limited.
4962-#
4963-# This file is part of charm-helpers.
4964-#
4965-# charm-helpers is free software: you can redistribute it and/or modify
4966-# it under the terms of the GNU Lesser General Public License version 3 as
4967-# published by the Free Software Foundation.
4968-#
4969-# charm-helpers is distributed in the hope that it will be useful,
4970-# but WITHOUT ANY WARRANTY; without even the implied warranty of
4971-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4972-# GNU Lesser General Public License for more details.
4973-#
4974-# You should have received a copy of the GNU Lesser General Public License
4975-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4976-
4977-# Various utilies for dealing with Neutron and the renaming from Quantum.
4978-
4979-import six
4980-from subprocess import check_output
4981-
4982-from charmhelpers.core.hookenv import (
4983- config,
4984- log,
4985- ERROR,
4986-)
4987-
4988-from charmhelpers.contrib.openstack.utils import os_release
4989-
4990-
4991-def headers_package():
4992- """Ensures correct linux-headers for running kernel are installed,
4993- for building DKMS package"""
4994- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
4995- return 'linux-headers-%s' % kver
4996-
4997-QUANTUM_CONF_DIR = '/etc/quantum'
4998-
4999-
5000-def kernel_version():
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches