Merge lp:~opnfv-team/charms/trusty/neutron-api-odl/packagefix into lp:~openstack-charmers-archive/charms/trusty/neutron-api-odl/next

Proposed by Narinder Gupta
Status: Merged
Merged at revision: 12
Proposed branch: lp:~opnfv-team/charms/trusty/neutron-api-odl/packagefix
Merge into: lp:~openstack-charmers-archive/charms/trusty/neutron-api-odl/next
Diff against target: 2291 lines (+996/-317)
20 files modified
hooks/charmhelpers/contrib/network/ip.py (+21/-19)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+10/-4)
hooks/charmhelpers/contrib/openstack/context.py (+48/-10)
hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh (+7/-5)
hooks/charmhelpers/contrib/openstack/neutron.py (+20/-8)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+19/-11)
hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken (+11/-0)
hooks/charmhelpers/contrib/openstack/utils.py (+145/-67)
hooks/charmhelpers/contrib/python/packages.py (+35/-11)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+441/-59)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+10/-0)
hooks/charmhelpers/core/hookenv.py (+41/-7)
hooks/charmhelpers/core/host.py (+103/-43)
hooks/charmhelpers/core/services/helpers.py (+11/-5)
hooks/charmhelpers/core/templating.py (+13/-7)
hooks/charmhelpers/fetch/__init__.py (+9/-1)
hooks/charmhelpers/fetch/archiveurl.py (+1/-1)
hooks/charmhelpers/fetch/bzrurl.py (+22/-32)
hooks/charmhelpers/fetch/giturl.py (+20/-23)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+9/-4)
To merge this branch: bzr merge lp:~opnfv-team/charms/trusty/neutron-api-odl/packagefix
Reviewer Review Type Date Requested Status
David Ames (community) Approve
James Page Pending
Review via email: mp+285933@code.launchpad.net

Description of the change

sync chram helpers as without that python-networking-odl package does not install and break the api access.

To post a comment you must log in.
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #384 neutron-api-odl-next for narindergupta mp285933
    LINT OK: passed

Build: http://10.245.162.36:8080/job/charm_lint_check/384/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #305 neutron-api-odl-next for narindergupta mp285933
    UNIT OK: passed

Build: http://10.245.162.36:8080/job/charm_unit_test/305/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #155 neutron-api-odl-next for narindergupta mp285933
    AMULET FAIL: amulet-test failed

AMULET Results (max last 2 lines):
make: *** [functional_test] Error 1
ERROR:root:Make target returned non-zero.

Full amulet test output: http://paste.ubuntu.com/15028593/
Build: http://10.245.162.36:8080/job/charm_amulet_test/155/

Revision history for this message
David Ames (thedac) wrote :

This is a simple charm helpers sync. To fix liberty version string handling. As now neutron-common is at version 7.0.1-0ubuntu1~cloud0

      'neutron-common': OrderedDict([
- ('7.0.0', 'liberty'),
+ ('7.0', 'liberty'),
+ ('8.0', 'mitaka'),

Approved

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
2--- hooks/charmhelpers/contrib/network/ip.py 2015-11-03 12:29:06 +0000
3+++ hooks/charmhelpers/contrib/network/ip.py 2016-02-12 19:36:45 +0000
4@@ -53,7 +53,7 @@
5
6
7 def no_ip_found_error_out(network):
8- errmsg = ("No IP address found in network: %s" % network)
9+ errmsg = ("No IP address found in network(s): %s" % network)
10 raise ValueError(errmsg)
11
12
13@@ -61,7 +61,7 @@
14 """Get an IPv4 or IPv6 address within the network from the host.
15
16 :param network (str): CIDR presentation format. For example,
17- '192.168.1.0/24'.
18+ '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
19 :param fallback (str): If no address is found, return fallback.
20 :param fatal (boolean): If no address is found, fallback is not
21 set and fatal is True then exit(1).
22@@ -75,24 +75,26 @@
23 else:
24 return None
25
26- _validate_cidr(network)
27- network = netaddr.IPNetwork(network)
28- for iface in netifaces.interfaces():
29- addresses = netifaces.ifaddresses(iface)
30- if network.version == 4 and netifaces.AF_INET in addresses:
31- addr = addresses[netifaces.AF_INET][0]['addr']
32- netmask = addresses[netifaces.AF_INET][0]['netmask']
33- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
34- if cidr in network:
35- return str(cidr.ip)
36+ networks = network.split() or [network]
37+ for network in networks:
38+ _validate_cidr(network)
39+ network = netaddr.IPNetwork(network)
40+ for iface in netifaces.interfaces():
41+ addresses = netifaces.ifaddresses(iface)
42+ if network.version == 4 and netifaces.AF_INET in addresses:
43+ addr = addresses[netifaces.AF_INET][0]['addr']
44+ netmask = addresses[netifaces.AF_INET][0]['netmask']
45+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
46+ if cidr in network:
47+ return str(cidr.ip)
48
49- if network.version == 6 and netifaces.AF_INET6 in addresses:
50- for addr in addresses[netifaces.AF_INET6]:
51- if not addr['addr'].startswith('fe80'):
52- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
53- addr['netmask']))
54- if cidr in network:
55- return str(cidr.ip)
56+ if network.version == 6 and netifaces.AF_INET6 in addresses:
57+ for addr in addresses[netifaces.AF_INET6]:
58+ if not addr['addr'].startswith('fe80'):
59+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
60+ addr['netmask']))
61+ if cidr in network:
62+ return str(cidr.ip)
63
64 if fallback is not None:
65 return fallback
66
67=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
68--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-11-03 12:29:06 +0000
69+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2016-02-12 19:36:45 +0000
70@@ -121,10 +121,12 @@
71
72 # Charms which should use the source config option
73 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
74- 'ceph-osd', 'ceph-radosgw']
75+ 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
76
77 # Charms which can not use openstack-origin, ie. many subordinates
78- no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
79+ no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
80+ 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
81+ 'cinder-backup']
82
83 if self.openstack:
84 for svc in services:
85@@ -224,7 +226,8 @@
86 self.precise_havana, self.precise_icehouse,
87 self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
88 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
89- self.wily_liberty) = range(12)
90+ self.wily_liberty, self.trusty_mitaka,
91+ self.xenial_mitaka) = range(14)
92
93 releases = {
94 ('precise', None): self.precise_essex,
95@@ -236,9 +239,11 @@
96 ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
97 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
98 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
99+ ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
100 ('utopic', None): self.utopic_juno,
101 ('vivid', None): self.vivid_kilo,
102- ('wily', None): self.wily_liberty}
103+ ('wily', None): self.wily_liberty,
104+ ('xenial', None): self.xenial_mitaka}
105 return releases[(self.series, self.openstack)]
106
107 def _get_openstack_release_string(self):
108@@ -255,6 +260,7 @@
109 ('utopic', 'juno'),
110 ('vivid', 'kilo'),
111 ('wily', 'liberty'),
112+ ('xenial', 'mitaka'),
113 ])
114 if self.openstack:
115 os_origin = self.openstack.split(':')[1]
116
117=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
118--- hooks/charmhelpers/contrib/openstack/context.py 2015-11-03 12:29:06 +0000
119+++ hooks/charmhelpers/contrib/openstack/context.py 2016-02-12 19:36:45 +0000
120@@ -57,6 +57,7 @@
121 get_nic_hwaddr,
122 mkdir,
123 write_file,
124+ pwgen,
125 )
126 from charmhelpers.contrib.hahelpers.cluster import (
127 determine_apache_port,
128@@ -87,6 +88,14 @@
129 is_bridge_member,
130 )
131 from charmhelpers.contrib.openstack.utils import get_host_ip
132+from charmhelpers.core.unitdata import kv
133+
134+try:
135+ import psutil
136+except ImportError:
137+ apt_install('python-psutil', fatal=True)
138+ import psutil
139+
140 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
141 ADDRESS_TYPES = ['admin', 'internal', 'public']
142
143@@ -401,6 +410,7 @@
144 auth_host = format_ipv6_addr(auth_host) or auth_host
145 svc_protocol = rdata.get('service_protocol') or 'http'
146 auth_protocol = rdata.get('auth_protocol') or 'http'
147+ api_version = rdata.get('api_version') or '2.0'
148 ctxt.update({'service_port': rdata.get('service_port'),
149 'service_host': serv_host,
150 'auth_host': auth_host,
151@@ -409,7 +419,8 @@
152 'admin_user': rdata.get('service_username'),
153 'admin_password': rdata.get('service_password'),
154 'service_protocol': svc_protocol,
155- 'auth_protocol': auth_protocol})
156+ 'auth_protocol': auth_protocol,
157+ 'api_version': api_version})
158
159 if self.context_complete(ctxt):
160 # NOTE(jamespage) this is required for >= icehouse
161@@ -626,15 +637,28 @@
162 if config('haproxy-client-timeout'):
163 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
164
165+ if config('haproxy-queue-timeout'):
166+ ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
167+
168+ if config('haproxy-connect-timeout'):
169+ ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
170+
171 if config('prefer-ipv6'):
172 ctxt['ipv6'] = True
173 ctxt['local_host'] = 'ip6-localhost'
174 ctxt['haproxy_host'] = '::'
175- ctxt['stat_port'] = ':::8888'
176 else:
177 ctxt['local_host'] = '127.0.0.1'
178 ctxt['haproxy_host'] = '0.0.0.0'
179- ctxt['stat_port'] = ':8888'
180+
181+ ctxt['stat_port'] = '8888'
182+
183+ db = kv()
184+ ctxt['stat_password'] = db.get('stat-password')
185+ if not ctxt['stat_password']:
186+ ctxt['stat_password'] = db.set('stat-password',
187+ pwgen(32))
188+ db.flush()
189
190 for frontend in cluster_hosts:
191 if (len(cluster_hosts[frontend]['backends']) > 1 or
192@@ -1088,6 +1112,20 @@
193 config_flags_parser(config_flags)}
194
195
196+class LibvirtConfigFlagsContext(OSContextGenerator):
197+ """
198+ This context provides support for extending
199+ the libvirt section through user-defined flags.
200+ """
201+ def __call__(self):
202+ ctxt = {}
203+ libvirt_flags = config('libvirt-flags')
204+ if libvirt_flags:
205+ ctxt['libvirt_flags'] = config_flags_parser(
206+ libvirt_flags)
207+ return ctxt
208+
209+
210 class SubordinateConfigContext(OSContextGenerator):
211
212 """
213@@ -1228,13 +1266,11 @@
214
215 @property
216 def num_cpus(self):
217- try:
218- from psutil import NUM_CPUS
219- except ImportError:
220- apt_install('python-psutil', fatal=True)
221- from psutil import NUM_CPUS
222-
223- return NUM_CPUS
224+ # NOTE: use cpu_count if present (16.04 support)
225+ if hasattr(psutil, 'cpu_count'):
226+ return psutil.cpu_count()
227+ else:
228+ return psutil.NUM_CPUS
229
230 def __call__(self):
231 multiplier = config('worker-multiplier') or 0
232@@ -1437,6 +1473,8 @@
233 rdata.get('service_protocol') or 'http',
234 'auth_protocol':
235 rdata.get('auth_protocol') or 'http',
236+ 'api_version':
237+ rdata.get('api_version') or '2.0',
238 }
239 if self.context_complete(ctxt):
240 return ctxt
241
242=== modified file 'hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh'
243--- hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 2015-06-24 12:22:08 +0000
244+++ hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 2016-02-12 19:36:45 +0000
245@@ -9,15 +9,17 @@
246 CRITICAL=0
247 NOTACTIVE=''
248 LOGFILE=/var/log/nagios/check_haproxy.log
249-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
250+AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
251
252-for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
253+typeset -i N_INSTANCES=0
254+for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
255 do
256- output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
257+ N_INSTANCES=N_INSTANCES+1
258+ output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK')
259 if [ $? != 0 ]; then
260 date >> $LOGFILE
261 echo $output >> $LOGFILE
262- /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
263+ /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1
264 CRITICAL=1
265 NOTACTIVE="${NOTACTIVE} $appserver"
266 fi
267@@ -28,5 +30,5 @@
268 exit 2
269 fi
270
271-echo "OK: All haproxy instances looking good"
272+echo "OK: All haproxy instances ($N_INSTANCES) looking good"
273 exit 0
274
275=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
276--- hooks/charmhelpers/contrib/openstack/neutron.py 2015-11-03 12:29:06 +0000
277+++ hooks/charmhelpers/contrib/openstack/neutron.py 2016-02-12 19:36:45 +0000
278@@ -50,7 +50,7 @@
279 if kernel_version() >= (3, 13):
280 return []
281 else:
282- return ['openvswitch-datapath-dkms']
283+ return [headers_package(), 'openvswitch-datapath-dkms']
284
285
286 # legacy
287@@ -70,7 +70,7 @@
288 relation_prefix='neutron',
289 ssl_dir=QUANTUM_CONF_DIR)],
290 'services': ['quantum-plugin-openvswitch-agent'],
291- 'packages': [[headers_package()] + determine_dkms_package(),
292+ 'packages': [determine_dkms_package(),
293 ['quantum-plugin-openvswitch-agent']],
294 'server_packages': ['quantum-server',
295 'quantum-plugin-openvswitch'],
296@@ -111,7 +111,7 @@
297 relation_prefix='neutron',
298 ssl_dir=NEUTRON_CONF_DIR)],
299 'services': ['neutron-plugin-openvswitch-agent'],
300- 'packages': [[headers_package()] + determine_dkms_package(),
301+ 'packages': [determine_dkms_package(),
302 ['neutron-plugin-openvswitch-agent']],
303 'server_packages': ['neutron-server',
304 'neutron-plugin-openvswitch'],
305@@ -155,7 +155,7 @@
306 relation_prefix='neutron',
307 ssl_dir=NEUTRON_CONF_DIR)],
308 'services': [],
309- 'packages': [[headers_package()] + determine_dkms_package(),
310+ 'packages': [determine_dkms_package(),
311 ['neutron-plugin-cisco']],
312 'server_packages': ['neutron-server',
313 'neutron-plugin-cisco'],
314@@ -174,7 +174,7 @@
315 'neutron-dhcp-agent',
316 'nova-api-metadata',
317 'etcd'],
318- 'packages': [[headers_package()] + determine_dkms_package(),
319+ 'packages': [determine_dkms_package(),
320 ['calico-compute',
321 'bird',
322 'neutron-dhcp-agent',
323@@ -204,8 +204,8 @@
324 database=config('database'),
325 ssl_dir=NEUTRON_CONF_DIR)],
326 'services': [],
327- 'packages': [['plumgrid-lxc'],
328- ['iovisor-dkms']],
329+ 'packages': ['plumgrid-lxc',
330+ 'iovisor-dkms'],
331 'server_packages': ['neutron-server',
332 'neutron-plugin-plumgrid'],
333 'server_services': ['neutron-server']
334@@ -219,7 +219,7 @@
335 relation_prefix='neutron',
336 ssl_dir=NEUTRON_CONF_DIR)],
337 'services': [],
338- 'packages': [[headers_package()] + determine_dkms_package()],
339+ 'packages': [determine_dkms_package()],
340 'server_packages': ['neutron-server',
341 'python-neutron-plugin-midonet'],
342 'server_services': ['neutron-server']
343@@ -233,6 +233,18 @@
344 'neutron-plugin-ml2']
345 # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
346 plugins['nvp'] = plugins['nsx']
347+ if release >= 'kilo':
348+ plugins['midonet']['driver'] = (
349+ 'neutron.plugins.midonet.plugin.MidonetPluginV2')
350+ if release >= 'liberty':
351+ midonet_origin = config('midonet-origin')
352+ if midonet_origin is not None and midonet_origin[4:5] == '1':
353+ plugins['midonet']['driver'] = (
354+ 'midonet.neutron.plugin_v1.MidonetPluginV2')
355+ plugins['midonet']['server_packages'].remove(
356+ 'python-neutron-plugin-midonet')
357+ plugins['midonet']['server_packages'].append(
358+ 'python-networking-midonet')
359 return plugins
360
361
362
363=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
364--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2015-06-24 12:22:08 +0000
365+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2016-02-12 19:36:45 +0000
366@@ -12,27 +12,35 @@
367 option tcplog
368 option dontlognull
369 retries 3
370- timeout queue 1000
371- timeout connect 1000
372-{% if haproxy_client_timeout -%}
373+{%- if haproxy_queue_timeout %}
374+ timeout queue {{ haproxy_queue_timeout }}
375+{%- else %}
376+ timeout queue 5000
377+{%- endif %}
378+{%- if haproxy_connect_timeout %}
379+ timeout connect {{ haproxy_connect_timeout }}
380+{%- else %}
381+ timeout connect 5000
382+{%- endif %}
383+{%- if haproxy_client_timeout %}
384 timeout client {{ haproxy_client_timeout }}
385-{% else -%}
386+{%- else %}
387 timeout client 30000
388-{% endif -%}
389-
390-{% if haproxy_server_timeout -%}
391+{%- endif %}
392+{%- if haproxy_server_timeout %}
393 timeout server {{ haproxy_server_timeout }}
394-{% else -%}
395+{%- else %}
396 timeout server 30000
397-{% endif -%}
398+{%- endif %}
399
400-listen stats {{ stat_port }}
401+listen stats
402+ bind {{ local_host }}:{{ stat_port }}
403 mode http
404 stats enable
405 stats hide-version
406 stats realm Haproxy\ Statistics
407 stats uri /
408- stats auth admin:password
409+ stats auth admin:{{ stat_password }}
410
411 {% if frontends -%}
412 {% for service, ports in service_ports.items() -%}
413
414=== modified file 'hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken'
415--- hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken 2015-06-24 12:22:08 +0000
416+++ hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken 2016-02-12 19:36:45 +0000
417@@ -1,4 +1,14 @@
418 {% if auth_host -%}
419+{% if api_version == '3' -%}
420+[keystone_authtoken]
421+auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
422+project_name = {{ admin_tenant_name }}
423+username = {{ admin_user }}
424+password = {{ admin_password }}
425+project_domain_name = default
426+user_domain_name = default
427+auth_plugin = password
428+{% else -%}
429 [keystone_authtoken]
430 identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
431 auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
432@@ -7,3 +17,4 @@
433 admin_password = {{ admin_password }}
434 signing_dir = {{ signing_dir }}
435 {% endif -%}
436+{% endif -%}
437
438=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
439--- hooks/charmhelpers/contrib/openstack/utils.py 2015-11-03 12:29:06 +0000
440+++ hooks/charmhelpers/contrib/openstack/utils.py 2016-02-12 19:36:45 +0000
441@@ -25,6 +25,7 @@
442 import re
443
444 import six
445+import tempfile
446 import traceback
447 import uuid
448 import yaml
449@@ -41,6 +42,7 @@
450 config,
451 log as juju_log,
452 charm_dir,
453+ DEBUG,
454 INFO,
455 related_units,
456 relation_ids,
457@@ -86,6 +88,7 @@
458 ('utopic', 'juno'),
459 ('vivid', 'kilo'),
460 ('wily', 'liberty'),
461+ ('xenial', 'mitaka'),
462 ])
463
464
465@@ -99,61 +102,70 @@
466 ('2014.2', 'juno'),
467 ('2015.1', 'kilo'),
468 ('2015.2', 'liberty'),
469+ ('2016.1', 'mitaka'),
470 ])
471
472-# The ugly duckling
473+# The ugly duckling - must list releases oldest to newest
474 SWIFT_CODENAMES = OrderedDict([
475- ('1.4.3', 'diablo'),
476- ('1.4.8', 'essex'),
477- ('1.7.4', 'folsom'),
478- ('1.8.0', 'grizzly'),
479- ('1.7.7', 'grizzly'),
480- ('1.7.6', 'grizzly'),
481- ('1.10.0', 'havana'),
482- ('1.9.1', 'havana'),
483- ('1.9.0', 'havana'),
484- ('1.13.1', 'icehouse'),
485- ('1.13.0', 'icehouse'),
486- ('1.12.0', 'icehouse'),
487- ('1.11.0', 'icehouse'),
488- ('2.0.0', 'juno'),
489- ('2.1.0', 'juno'),
490- ('2.2.0', 'juno'),
491- ('2.2.1', 'kilo'),
492- ('2.2.2', 'kilo'),
493- ('2.3.0', 'liberty'),
494- ('2.4.0', 'liberty'),
495- ('2.5.0', 'liberty'),
496+ ('diablo',
497+ ['1.4.3']),
498+ ('essex',
499+ ['1.4.8']),
500+ ('folsom',
501+ ['1.7.4']),
502+ ('grizzly',
503+ ['1.7.6', '1.7.7', '1.8.0']),
504+ ('havana',
505+ ['1.9.0', '1.9.1', '1.10.0']),
506+ ('icehouse',
507+ ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
508+ ('juno',
509+ ['2.0.0', '2.1.0', '2.2.0']),
510+ ('kilo',
511+ ['2.2.1', '2.2.2']),
512+ ('liberty',
513+ ['2.3.0', '2.4.0', '2.5.0']),
514+ ('mitaka',
515+ ['2.5.0']),
516 ])
517
518 # >= Liberty version->codename mapping
519 PACKAGE_CODENAMES = {
520 'nova-common': OrderedDict([
521- ('12.0.0', 'liberty'),
522+ ('12.0', 'liberty'),
523+ ('13.0', 'mitaka'),
524 ]),
525 'neutron-common': OrderedDict([
526- ('7.0.0', 'liberty'),
527+ ('7.0', 'liberty'),
528+ ('8.0', 'mitaka'),
529 ]),
530 'cinder-common': OrderedDict([
531- ('7.0.0', 'liberty'),
532+ ('7.0', 'liberty'),
533+ ('8.0', 'mitaka'),
534 ]),
535 'keystone': OrderedDict([
536- ('8.0.0', 'liberty'),
537+ ('8.0', 'liberty'),
538+ ('9.0', 'mitaka'),
539 ]),
540 'horizon-common': OrderedDict([
541- ('8.0.0', 'liberty'),
542+ ('8.0', 'liberty'),
543+ ('9.0', 'mitaka'),
544 ]),
545 'ceilometer-common': OrderedDict([
546- ('5.0.0', 'liberty'),
547+ ('5.0', 'liberty'),
548+ ('6.0', 'mitaka'),
549 ]),
550 'heat-common': OrderedDict([
551- ('5.0.0', 'liberty'),
552+ ('5.0', 'liberty'),
553+ ('6.0', 'mitaka'),
554 ]),
555 'glance-common': OrderedDict([
556- ('11.0.0', 'liberty'),
557+ ('11.0', 'liberty'),
558+ ('12.0', 'mitaka'),
559 ]),
560 'openstack-dashboard': OrderedDict([
561- ('8.0.0', 'liberty'),
562+ ('8.0', 'liberty'),
563+ ('9.0', 'mitaka'),
564 ]),
565 }
566
567@@ -216,6 +228,33 @@
568 error_out(e)
569
570
571+def get_os_version_codename_swift(codename):
572+ '''Determine OpenStack version number of swift from codename.'''
573+ for k, v in six.iteritems(SWIFT_CODENAMES):
574+ if k == codename:
575+ return v[-1]
576+ e = 'Could not derive swift version for '\
577+ 'codename: %s' % codename
578+ error_out(e)
579+
580+
581+def get_swift_codename(version):
582+ '''Determine OpenStack codename that corresponds to swift version.'''
583+ codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
584+ if len(codenames) > 1:
585+ # If more than one release codename contains this version we determine
586+ # the actual codename based on the highest available install source.
587+ for codename in reversed(codenames):
588+ releases = UBUNTU_OPENSTACK_RELEASE
589+ release = [k for k, v in six.iteritems(releases) if codename in v]
590+ ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
591+ if codename in ret or release[0] in ret:
592+ return codename
593+ elif len(codenames) == 1:
594+ return codenames[0]
595+ return None
596+
597+
598 def get_os_codename_package(package, fatal=True):
599 '''Derive OpenStack release codename from an installed package.'''
600 import apt_pkg as apt
601@@ -240,7 +279,14 @@
602 error_out(e)
603
604 vers = apt.upstream_version(pkg.current_ver.ver_str)
605- match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
606+ if 'swift' in pkg.name:
607+ # Fully x.y.z match for swift versions
608+ match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
609+ else:
610+ # x.y match only for 20XX.X
611+ # and ignore patch level for other packages
612+ match = re.match('^(\d+)\.(\d+)', vers)
613+
614 if match:
615 vers = match.group(0)
616
617@@ -252,13 +298,8 @@
618 # < Liberty co-ordinated project versions
619 try:
620 if 'swift' in pkg.name:
621- swift_vers = vers[:5]
622- if swift_vers not in SWIFT_CODENAMES:
623- # Deal with 1.10.0 upward
624- swift_vers = vers[:6]
625- return SWIFT_CODENAMES[swift_vers]
626+ return get_swift_codename(vers)
627 else:
628- vers = vers[:6]
629 return OPENSTACK_CODENAMES[vers]
630 except KeyError:
631 if not fatal:
632@@ -276,12 +317,14 @@
633
634 if 'swift' in pkg:
635 vers_map = SWIFT_CODENAMES
636+ for cname, version in six.iteritems(vers_map):
637+ if cname == codename:
638+ return version[-1]
639 else:
640 vers_map = OPENSTACK_CODENAMES
641-
642- for version, cname in six.iteritems(vers_map):
643- if cname == codename:
644- return version
645+ for version, cname in six.iteritems(vers_map):
646+ if cname == codename:
647+ return version
648 # e = "Could not determine OpenStack version for package: %s" % pkg
649 # error_out(e)
650
651@@ -306,12 +349,42 @@
652
653
654 def import_key(keyid):
655- cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
656- "--recv-keys %s" % keyid
657- try:
658- subprocess.check_call(cmd.split(' '))
659- except subprocess.CalledProcessError:
660- error_out("Error importing repo key %s" % keyid)
661+ key = keyid.strip()
662+ if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
663+ key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
664+ juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
665+ juju_log("Importing ASCII Armor PGP key", level=DEBUG)
666+ with tempfile.NamedTemporaryFile() as keyfile:
667+ with open(keyfile.name, 'w') as fd:
668+ fd.write(key)
669+ fd.write("\n")
670+
671+ cmd = ['apt-key', 'add', keyfile.name]
672+ try:
673+ subprocess.check_call(cmd)
674+ except subprocess.CalledProcessError:
675+ error_out("Error importing PGP key '%s'" % key)
676+ else:
677+ juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
678+ juju_log("Importing PGP key from keyserver", level=DEBUG)
679+ cmd = ['apt-key', 'adv', '--keyserver',
680+ 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
681+ try:
682+ subprocess.check_call(cmd)
683+ except subprocess.CalledProcessError:
684+ error_out("Error importing PGP key '%s'" % key)
685+
686+
687+def get_source_and_pgp_key(input):
688+ """Look for a pgp key ID or ascii-armor key in the given input."""
689+ index = input.strip()
690+ index = input.rfind('|')
691+ if index < 0:
692+ return input, None
693+
694+ key = input[index + 1:].strip('|')
695+ source = input[:index]
696+ return source, key
697
698
699 def configure_installation_source(rel):
700@@ -323,16 +396,16 @@
701 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
702 f.write(DISTRO_PROPOSED % ubuntu_rel)
703 elif rel[:4] == "ppa:":
704- src = rel
705+ src, key = get_source_and_pgp_key(rel)
706+ if key:
707+ import_key(key)
708+
709 subprocess.check_call(["add-apt-repository", "-y", src])
710 elif rel[:3] == "deb":
711- l = len(rel.split('|'))
712- if l == 2:
713- src, key = rel.split('|')
714- juju_log("Importing PPA key from keyserver for %s" % src)
715+ src, key = get_source_and_pgp_key(rel)
716+ if key:
717 import_key(key)
718- elif l == 1:
719- src = rel
720+
721 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
722 f.write(src)
723 elif rel[:6] == 'cloud:':
724@@ -377,6 +450,9 @@
725 'liberty': 'trusty-updates/liberty',
726 'liberty/updates': 'trusty-updates/liberty',
727 'liberty/proposed': 'trusty-proposed/liberty',
728+ 'mitaka': 'trusty-updates/mitaka',
729+ 'mitaka/updates': 'trusty-updates/mitaka',
730+ 'mitaka/proposed': 'trusty-proposed/mitaka',
731 }
732
733 try:
734@@ -444,11 +520,16 @@
735 cur_vers = get_os_version_package(package)
736 if "swift" in package:
737 codename = get_os_codename_install_source(src)
738- available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
739+ avail_vers = get_os_version_codename_swift(codename)
740 else:
741- available_vers = get_os_version_install_source(src)
742+ avail_vers = get_os_version_install_source(src)
743 apt.init()
744- return apt.version_compare(available_vers, cur_vers) == 1
745+ if "swift" in package:
746+ major_cur_vers = cur_vers.split('.', 1)[0]
747+ major_avail_vers = avail_vers.split('.', 1)[0]
748+ major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
749+ return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
750+ return apt.version_compare(avail_vers, cur_vers) == 1
751
752
753 def ensure_block_device(block_device):
754@@ -577,7 +658,7 @@
755 return yaml.load(projects_yaml)
756
757
758-def git_clone_and_install(projects_yaml, core_project, depth=1):
759+def git_clone_and_install(projects_yaml, core_project):
760 """
761 Clone/install all specified OpenStack repositories.
762
763@@ -627,6 +708,9 @@
764 for p in projects['repositories']:
765 repo = p['repository']
766 branch = p['branch']
767+ depth = '1'
768+ if 'depth' in p.keys():
769+ depth = p['depth']
770 if p['name'] == 'requirements':
771 repo_dir = _git_clone_and_install_single(repo, branch, depth,
772 parent_dir, http_proxy,
773@@ -671,19 +755,13 @@
774 """
775 Clone and install a single git repository.
776 """
777- dest_dir = os.path.join(parent_dir, os.path.basename(repo))
778-
779 if not os.path.exists(parent_dir):
780 juju_log('Directory already exists at {}. '
781 'No need to create directory.'.format(parent_dir))
782 os.mkdir(parent_dir)
783
784- if not os.path.exists(dest_dir):
785- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
786- repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
787- depth=depth)
788- else:
789- repo_dir = dest_dir
790+ juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
791+ repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth)
792
793 venv = os.path.join(parent_dir, 'venv')
794
795
796=== modified file 'hooks/charmhelpers/contrib/python/packages.py'
797--- hooks/charmhelpers/contrib/python/packages.py 2015-11-03 12:29:06 +0000
798+++ hooks/charmhelpers/contrib/python/packages.py 2016-02-12 19:36:45 +0000
799@@ -19,20 +19,35 @@
800
801 import os
802 import subprocess
803+import sys
804
805 from charmhelpers.fetch import apt_install, apt_update
806 from charmhelpers.core.hookenv import charm_dir, log
807
808-try:
809- from pip import main as pip_execute
810-except ImportError:
811- apt_update()
812- apt_install('python-pip')
813- from pip import main as pip_execute
814-
815 __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
816
817
818+def pip_execute(*args, **kwargs):
819+ """Overriden pip_execute() to stop sys.path being changed.
820+
821+ The act of importing main from the pip module seems to cause add wheels
822+ from the /usr/share/python-wheels which are installed by various tools.
823+ This function ensures that sys.path remains the same after the call is
824+ executed.
825+ """
826+ try:
827+ _path = sys.path
828+ try:
829+ from pip import main as _pip_execute
830+ except ImportError:
831+ apt_update()
832+ apt_install('python-pip')
833+ from pip import main as _pip_execute
834+ _pip_execute(*args, **kwargs)
835+ finally:
836+ sys.path = _path
837+
838+
839 def parse_options(given, available):
840 """Given a set of options, check if available"""
841 for key, value in sorted(given.items()):
842@@ -42,8 +57,12 @@
843 yield "--{0}={1}".format(key, value)
844
845
846-def pip_install_requirements(requirements, **options):
847- """Install a requirements file """
848+def pip_install_requirements(requirements, constraints=None, **options):
849+ """Install a requirements file.
850+
851+ :param constraints: Path to pip constraints file.
852+ http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
853+ """
854 command = ["install"]
855
856 available_options = ('proxy', 'src', 'log', )
857@@ -51,8 +70,13 @@
858 command.append(option)
859
860 command.append("-r {0}".format(requirements))
861- log("Installing from file: {} with options: {}".format(requirements,
862- command))
863+ if constraints:
864+ command.append("-c {0}".format(constraints))
865+ log("Installing from file: {} with constraints {} "
866+ "and options: {}".format(requirements, constraints, command))
867+ else:
868+ log("Installing from file: {} with options: {}".format(requirements,
869+ command))
870 pip_execute(command)
871
872
873
874=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
875--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-11-03 12:29:06 +0000
876+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2016-02-12 19:36:45 +0000
877@@ -23,6 +23,8 @@
878 # James Page <james.page@ubuntu.com>
879 # Adam Gandelman <adamg@ubuntu.com>
880 #
881+import bisect
882+import six
883
884 import os
885 import shutil
886@@ -72,6 +74,394 @@
887 err to syslog = {use_syslog}
888 clog to syslog = {use_syslog}
889 """
890+# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
891+powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
892+
893+
894+def validator(value, valid_type, valid_range=None):
895+ """
896+ Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
897+ Example input:
898+ validator(value=1,
899+ valid_type=int,
900+ valid_range=[0, 2])
901+ This says I'm testing value=1. It must be an int inclusive in [0,2]
902+
903+ :param value: The value to validate
904+ :param valid_type: The type that value should be.
905+ :param valid_range: A range of values that value can assume.
906+ :return:
907+ """
908+ assert isinstance(value, valid_type), "{} is not a {}".format(
909+ value,
910+ valid_type)
911+ if valid_range is not None:
912+ assert isinstance(valid_range, list), \
913+ "valid_range must be a list, was given {}".format(valid_range)
914+ # If we're dealing with strings
915+ if valid_type is six.string_types:
916+ assert value in valid_range, \
917+ "{} is not in the list {}".format(value, valid_range)
918+ # Integer, float should have a min and max
919+ else:
920+ if len(valid_range) != 2:
921+ raise ValueError(
922+ "Invalid valid_range list of {} for {}. "
923+ "List must be [min,max]".format(valid_range, value))
924+ assert value >= valid_range[0], \
925+ "{} is less than minimum allowed value of {}".format(
926+ value, valid_range[0])
927+ assert value <= valid_range[1], \
928+ "{} is greater than maximum allowed value of {}".format(
929+ value, valid_range[1])
930+
931+
932+class PoolCreationError(Exception):
933+ """
934+ A custom error to inform the caller that a pool creation failed. Provides an error message
935+ """
936+ def __init__(self, message):
937+ super(PoolCreationError, self).__init__(message)
938+
939+
940+class Pool(object):
941+ """
942+ An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
943+ Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
944+ """
945+ def __init__(self, service, name):
946+ self.service = service
947+ self.name = name
948+
949+ # Create the pool if it doesn't exist already
950+ # To be implemented by subclasses
951+ def create(self):
952+ pass
953+
954+ def add_cache_tier(self, cache_pool, mode):
955+ """
956+ Adds a new cache tier to an existing pool.
957+ :param cache_pool: six.string_types. The cache tier pool name to add.
958+ :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
959+ :return: None
960+ """
961+ # Check the input types and values
962+ validator(value=cache_pool, valid_type=six.string_types)
963+ validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
964+
965+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
966+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
967+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
968+ check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
969+
970+ def remove_cache_tier(self, cache_pool):
971+ """
972+ Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
973+ :param cache_pool: six.string_types. The cache tier pool name to remove.
974+ :return: None
975+ """
976+ # read-only is easy, writeback is much harder
977+ mode = get_cache_mode(cache_pool)
978+ if mode == 'readonly':
979+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
980+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
981+
982+ elif mode == 'writeback':
983+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
984+ # Flush the cache and wait for it to return
985+ check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
986+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
987+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
988+
989+ def get_pgs(self, pool_size):
990+ """
991+ :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
992+ erasure coded pools
993+ :return: int. The number of pgs to use.
994+ """
995+ validator(value=pool_size, valid_type=int)
996+ osds = get_osds(self.service)
997+ if not osds:
998+ # NOTE(james-page): Default to 200 for older ceph versions
999+ # which don't support OSD query from cli
1000+ return 200
1001+
1002+ # Calculate based on Ceph best practices
1003+ if osds < 5:
1004+ return 128
1005+ elif 5 < osds < 10:
1006+ return 512
1007+ elif 10 < osds < 50:
1008+ return 4096
1009+ else:
1010+ estimate = (osds * 100) / pool_size
1011+ # Return the next nearest power of 2
1012+ index = bisect.bisect_right(powers_of_two, estimate)
1013+ return powers_of_two[index]
1014+
1015+
1016+class ReplicatedPool(Pool):
1017+ def __init__(self, service, name, replicas=2):
1018+ super(ReplicatedPool, self).__init__(service=service, name=name)
1019+ self.replicas = replicas
1020+
1021+ def create(self):
1022+ if not pool_exists(self.service, self.name):
1023+ # Create it
1024+ pgs = self.get_pgs(self.replicas)
1025+ cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)]
1026+ try:
1027+ check_call(cmd)
1028+ except CalledProcessError:
1029+ raise
1030+
1031+
1032+# Default jerasure erasure coded pool
1033+class ErasurePool(Pool):
1034+ def __init__(self, service, name, erasure_code_profile="default"):
1035+ super(ErasurePool, self).__init__(service=service, name=name)
1036+ self.erasure_code_profile = erasure_code_profile
1037+
1038+ def create(self):
1039+ if not pool_exists(self.service, self.name):
1040+ # Try to find the erasure profile information so we can properly size the pgs
1041+ erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
1042+
1043+ # Check for errors
1044+ if erasure_profile is None:
1045+ log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
1046+ level=ERROR)
1047+ raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
1048+ if 'k' not in erasure_profile or 'm' not in erasure_profile:
1049+ # Error
1050+ log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
1051+ level=ERROR)
1052+ raise PoolCreationError(
1053+ message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
1054+
1055+ pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
1056+ # Create it
1057+ cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs),
1058+ 'erasure', self.erasure_code_profile]
1059+ try:
1060+ check_call(cmd)
1061+ except CalledProcessError:
1062+ raise
1063+
1064+ """Get an existing erasure code profile if it already exists.
1065+ Returns json formatted output"""
1066+
1067+
1068+def get_erasure_profile(service, name):
1069+ """
1070+ :param service: six.string_types. The Ceph user name to run the command under
1071+ :param name:
1072+ :return:
1073+ """
1074+ try:
1075+ out = check_output(['ceph', '--id', service,
1076+ 'osd', 'erasure-code-profile', 'get',
1077+ name, '--format=json'])
1078+ return json.loads(out)
1079+ except (CalledProcessError, OSError, ValueError):
1080+ return None
1081+
1082+
1083+def pool_set(service, pool_name, key, value):
1084+ """
1085+ Sets a value for a RADOS pool in ceph.
1086+ :param service: six.string_types. The Ceph user name to run the command under
1087+ :param pool_name: six.string_types
1088+ :param key: six.string_types
1089+ :param value:
1090+ :return: None. Can raise CalledProcessError
1091+ """
1092+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
1093+ try:
1094+ check_call(cmd)
1095+ except CalledProcessError:
1096+ raise
1097+
1098+
1099+def snapshot_pool(service, pool_name, snapshot_name):
1100+ """
1101+ Snapshots a RADOS pool in ceph.
1102+ :param service: six.string_types. The Ceph user name to run the command under
1103+ :param pool_name: six.string_types
1104+ :param snapshot_name: six.string_types
1105+ :return: None. Can raise CalledProcessError
1106+ """
1107+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
1108+ try:
1109+ check_call(cmd)
1110+ except CalledProcessError:
1111+ raise
1112+
1113+
1114+def remove_pool_snapshot(service, pool_name, snapshot_name):
1115+ """
1116+ Remove a snapshot from a RADOS pool in ceph.
1117+ :param service: six.string_types. The Ceph user name to run the command under
1118+ :param pool_name: six.string_types
1119+ :param snapshot_name: six.string_types
1120+ :return: None. Can raise CalledProcessError
1121+ """
1122+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
1123+ try:
1124+ check_call(cmd)
1125+ except CalledProcessError:
1126+ raise
1127+
1128+
1129+# max_bytes should be an int or long
1130+def set_pool_quota(service, pool_name, max_bytes):
1131+ """
1132+ :param service: six.string_types. The Ceph user name to run the command under
1133+ :param pool_name: six.string_types
1134+ :param max_bytes: int or long
1135+ :return: None. Can raise CalledProcessError
1136+ """
1137+ # Set a byte quota on a RADOS pool in ceph.
1138+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes]
1139+ try:
1140+ check_call(cmd)
1141+ except CalledProcessError:
1142+ raise
1143+
1144+
1145+def remove_pool_quota(service, pool_name):
1146+ """
1147+ Set a byte quota on a RADOS pool in ceph.
1148+ :param service: six.string_types. The Ceph user name to run the command under
1149+ :param pool_name: six.string_types
1150+ :return: None. Can raise CalledProcessError
1151+ """
1152+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
1153+ try:
1154+ check_call(cmd)
1155+ except CalledProcessError:
1156+ raise
1157+
1158+
1159+def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host',
1160+ data_chunks=2, coding_chunks=1,
1161+ locality=None, durability_estimator=None):
1162+ """
1163+ Create a new erasure code profile if one does not already exist for it. Updates
1164+ the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
1165+ for more details
1166+ :param service: six.string_types. The Ceph user name to run the command under
1167+ :param profile_name: six.string_types
1168+ :param erasure_plugin_name: six.string_types
1169+ :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
1170+ 'room', 'root', 'row'])
1171+ :param data_chunks: int
1172+ :param coding_chunks: int
1173+ :param locality: int
1174+ :param durability_estimator: int
1175+ :return: None. Can raise CalledProcessError
1176+ """
1177+ # Ensure this failure_domain is allowed by Ceph
1178+ validator(failure_domain, six.string_types,
1179+ ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
1180+
1181+ cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
1182+ 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
1183+ 'ruleset_failure_domain=' + failure_domain]
1184+ if locality is not None and durability_estimator is not None:
1185+ raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
1186+
1187+ # Add plugin specific information
1188+ if locality is not None:
1189+ # For local erasure codes
1190+ cmd.append('l=' + str(locality))
1191+ if durability_estimator is not None:
1192+ # For Shec erasure codes
1193+ cmd.append('c=' + str(durability_estimator))
1194+
1195+ if erasure_profile_exists(service, profile_name):
1196+ cmd.append('--force')
1197+
1198+ try:
1199+ check_call(cmd)
1200+ except CalledProcessError:
1201+ raise
1202+
1203+
1204+def rename_pool(service, old_name, new_name):
1205+ """
1206+ Rename a Ceph pool from old_name to new_name
1207+ :param service: six.string_types. The Ceph user name to run the command under
1208+ :param old_name: six.string_types
1209+ :param new_name: six.string_types
1210+ :return: None
1211+ """
1212+ validator(value=old_name, valid_type=six.string_types)
1213+ validator(value=new_name, valid_type=six.string_types)
1214+
1215+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
1216+ check_call(cmd)
1217+
1218+
1219+def erasure_profile_exists(service, name):
1220+ """
1221+ Check to see if an Erasure code profile already exists.
1222+ :param service: six.string_types. The Ceph user name to run the command under
1223+ :param name: six.string_types
1224+ :return: int or None
1225+ """
1226+ validator(value=name, valid_type=six.string_types)
1227+ try:
1228+ check_call(['ceph', '--id', service,
1229+ 'osd', 'erasure-code-profile', 'get',
1230+ name])
1231+ return True
1232+ except CalledProcessError:
1233+ return False
1234+
1235+
1236+def get_cache_mode(service, pool_name):
1237+ """
1238+ Find the current caching mode of the pool_name given.
1239+ :param service: six.string_types. The Ceph user name to run the command under
1240+ :param pool_name: six.string_types
1241+ :return: int or None
1242+ """
1243+ validator(value=service, valid_type=six.string_types)
1244+ validator(value=pool_name, valid_type=six.string_types)
1245+ out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
1246+ try:
1247+ osd_json = json.loads(out)
1248+ for pool in osd_json['pools']:
1249+ if pool['pool_name'] == pool_name:
1250+ return pool['cache_mode']
1251+ return None
1252+ except ValueError:
1253+ raise
1254+
1255+
1256+def pool_exists(service, name):
1257+ """Check to see if a RADOS pool already exists."""
1258+ try:
1259+ out = check_output(['rados', '--id', service,
1260+ 'lspools']).decode('UTF-8')
1261+ except CalledProcessError:
1262+ return False
1263+
1264+ return name in out
1265+
1266+
1267+def get_osds(service):
1268+ """Return a list of all Ceph Object Storage Daemons currently in the
1269+ cluster.
1270+ """
1271+ version = ceph_version()
1272+ if version and version >= '0.56':
1273+ return json.loads(check_output(['ceph', '--id', service,
1274+ 'osd', 'ls',
1275+ '--format=json']).decode('UTF-8'))
1276+
1277+ return None
1278
1279
1280 def install():
1281@@ -101,53 +491,37 @@
1282 check_call(cmd)
1283
1284
1285-def pool_exists(service, name):
1286- """Check to see if a RADOS pool already exists."""
1287- try:
1288- out = check_output(['rados', '--id', service,
1289- 'lspools']).decode('UTF-8')
1290- except CalledProcessError:
1291- return False
1292-
1293- return name in out
1294-
1295-
1296-def get_osds(service):
1297- """Return a list of all Ceph Object Storage Daemons currently in the
1298- cluster.
1299- """
1300- version = ceph_version()
1301- if version and version >= '0.56':
1302- return json.loads(check_output(['ceph', '--id', service,
1303- 'osd', 'ls',
1304- '--format=json']).decode('UTF-8'))
1305-
1306- return None
1307-
1308-
1309-def create_pool(service, name, replicas=3):
1310+def update_pool(client, pool, settings):
1311+ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
1312+ for k, v in six.iteritems(settings):
1313+ cmd.append(k)
1314+ cmd.append(v)
1315+
1316+ check_call(cmd)
1317+
1318+
1319+def create_pool(service, name, replicas=3, pg_num=None):
1320 """Create a new RADOS pool."""
1321 if pool_exists(service, name):
1322 log("Ceph pool {} already exists, skipping creation".format(name),
1323 level=WARNING)
1324 return
1325
1326- # Calculate the number of placement groups based
1327- # on upstream recommended best practices.
1328- osds = get_osds(service)
1329- if osds:
1330- pgnum = (len(osds) * 100 // replicas)
1331- else:
1332- # NOTE(james-page): Default to 200 for older ceph versions
1333- # which don't support OSD query from cli
1334- pgnum = 200
1335-
1336- cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
1337- check_call(cmd)
1338-
1339- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
1340- str(replicas)]
1341- check_call(cmd)
1342+ if not pg_num:
1343+ # Calculate the number of placement groups based
1344+ # on upstream recommended best practices.
1345+ osds = get_osds(service)
1346+ if osds:
1347+ pg_num = (len(osds) * 100 // replicas)
1348+ else:
1349+ # NOTE(james-page): Default to 200 for older ceph versions
1350+ # which don't support OSD query from cli
1351+ pg_num = 200
1352+
1353+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
1354+ check_call(cmd)
1355+
1356+ update_pool(service, name, settings={'size': str(replicas)})
1357
1358
1359 def delete_pool(service, name):
1360@@ -202,10 +576,10 @@
1361 log('Created new keyfile at %s.' % keyfile, level=INFO)
1362
1363
1364-def get_ceph_nodes():
1365- """Query named relation 'ceph' to determine current nodes."""
1366+def get_ceph_nodes(relation='ceph'):
1367+ """Query named relation to determine current nodes."""
1368 hosts = []
1369- for r_id in relation_ids('ceph'):
1370+ for r_id in relation_ids(relation):
1371 for unit in related_units(r_id):
1372 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
1373
1374@@ -357,14 +731,14 @@
1375 service_start(svc)
1376
1377
1378-def ensure_ceph_keyring(service, user=None, group=None):
1379+def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
1380 """Ensures a ceph keyring is created for a named service and optionally
1381 ensures user and group ownership.
1382
1383 Returns False if no ceph key is available in relation state.
1384 """
1385 key = None
1386- for rid in relation_ids('ceph'):
1387+ for rid in relation_ids(relation):
1388 for unit in related_units(rid):
1389 key = relation_get('key', rid=rid, unit=unit)
1390 if key:
1391@@ -405,6 +779,7 @@
1392
1393 The API is versioned and defaults to version 1.
1394 """
1395+
1396 def __init__(self, api_version=1, request_id=None):
1397 self.api_version = api_version
1398 if request_id:
1399@@ -413,9 +788,16 @@
1400 self.request_id = str(uuid.uuid1())
1401 self.ops = []
1402
1403- def add_op_create_pool(self, name, replica_count=3):
1404+ def add_op_create_pool(self, name, replica_count=3, pg_num=None):
1405+ """Adds an operation to create a pool.
1406+
1407+ @param pg_num setting: optional setting. If not provided, this value
1408+ will be calculated by the broker based on how many OSDs are in the
1409+ cluster at the time of creation. Note that, if provided, this value
1410+ will be capped at the current available maximum.
1411+ """
1412 self.ops.append({'op': 'create-pool', 'name': name,
1413- 'replicas': replica_count})
1414+ 'replicas': replica_count, 'pg_num': pg_num})
1415
1416 def set_ops(self, ops):
1417 """Set request ops to provided value.
1418@@ -433,8 +815,8 @@
1419 def _ops_equal(self, other):
1420 if len(self.ops) == len(other.ops):
1421 for req_no in range(0, len(self.ops)):
1422- for key in ['replicas', 'name', 'op']:
1423- if self.ops[req_no][key] != other.ops[req_no][key]:
1424+ for key in ['replicas', 'name', 'op', 'pg_num']:
1425+ if self.ops[req_no].get(key) != other.ops[req_no].get(key):
1426 return False
1427 else:
1428 return False
1429@@ -540,7 +922,7 @@
1430 return request
1431
1432
1433-def get_request_states(request):
1434+def get_request_states(request, relation='ceph'):
1435 """Return a dict of requests per relation id with their corresponding
1436 completion state.
1437
1438@@ -552,7 +934,7 @@
1439 """
1440 complete = []
1441 requests = {}
1442- for rid in relation_ids('ceph'):
1443+ for rid in relation_ids(relation):
1444 complete = False
1445 previous_request = get_previous_request(rid)
1446 if request == previous_request:
1447@@ -570,14 +952,14 @@
1448 return requests
1449
1450
1451-def is_request_sent(request):
1452+def is_request_sent(request, relation='ceph'):
1453 """Check to see if a functionally equivalent request has already been sent
1454
1455 Returns True if a similair request has been sent
1456
1457 @param request: A CephBrokerRq object
1458 """
1459- states = get_request_states(request)
1460+ states = get_request_states(request, relation=relation)
1461 for rid in states.keys():
1462 if not states[rid]['sent']:
1463 return False
1464@@ -585,7 +967,7 @@
1465 return True
1466
1467
1468-def is_request_complete(request):
1469+def is_request_complete(request, relation='ceph'):
1470 """Check to see if a functionally equivalent request has already been
1471 completed
1472
1473@@ -593,7 +975,7 @@
1474
1475 @param request: A CephBrokerRq object
1476 """
1477- states = get_request_states(request)
1478+ states = get_request_states(request, relation=relation)
1479 for rid in states.keys():
1480 if not states[rid]['complete']:
1481 return False
1482@@ -643,15 +1025,15 @@
1483 return 'broker-rsp-' + local_unit().replace('/', '-')
1484
1485
1486-def send_request_if_needed(request):
1487+def send_request_if_needed(request, relation='ceph'):
1488 """Send broker request if an equivalent request has not already been sent
1489
1490 @param request: A CephBrokerRq object
1491 """
1492- if is_request_sent(request):
1493+ if is_request_sent(request, relation=relation):
1494 log('Request already sent but not complete, not sending new request',
1495 level=DEBUG)
1496 else:
1497- for rid in relation_ids('ceph'):
1498+ for rid in relation_ids(relation):
1499 log('Sending request {}'.format(request.request_id), level=DEBUG)
1500 relation_set(relation_id=rid, broker_req=request.request)
1501
1502=== modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
1503--- hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-06-24 12:22:08 +0000
1504+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2016-02-12 19:36:45 +0000
1505@@ -76,3 +76,13 @@
1506 check_call(cmd)
1507
1508 return create_loopback(path)
1509+
1510+
1511+def is_mapped_loopback_device(device):
1512+ """
1513+ Checks if a given device name is an existing/mapped loopback device.
1514+ :param device: str: Full path to the device (eg, /dev/loop1).
1515+ :returns: str: Path to the backing file if is a loopback device
1516+ empty string otherwise
1517+ """
1518+ return loopback_devices().get(device, "")
1519
1520=== modified file 'hooks/charmhelpers/core/hookenv.py'
1521--- hooks/charmhelpers/core/hookenv.py 2015-11-03 12:29:06 +0000
1522+++ hooks/charmhelpers/core/hookenv.py 2016-02-12 19:36:45 +0000
1523@@ -492,7 +492,7 @@
1524
1525 @cached
1526 def peer_relation_id():
1527- '''Get a peer relation id if a peer relation has been joined, else None.'''
1528+ '''Get the peers relation id if a peers relation has been joined, else None.'''
1529 md = metadata()
1530 section = md.get('peers')
1531 if section:
1532@@ -517,12 +517,12 @@
1533 def relation_to_role_and_interface(relation_name):
1534 """
1535 Given the name of a relation, return the role and the name of the interface
1536- that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
1537+ that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
1538
1539 :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
1540 """
1541 _metadata = metadata()
1542- for role in ('provides', 'requires', 'peer'):
1543+ for role in ('provides', 'requires', 'peers'):
1544 interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
1545 if interface:
1546 return role, interface
1547@@ -534,7 +534,7 @@
1548 """
1549 Given a role and interface name, return a list of relation names for the
1550 current charm that use that interface under that role (where role is one
1551- of ``provides``, ``requires``, or ``peer``).
1552+ of ``provides``, ``requires``, or ``peers``).
1553
1554 :returns: A list of relation names.
1555 """
1556@@ -555,7 +555,7 @@
1557 :returns: A list of relation names.
1558 """
1559 results = []
1560- for role in ('provides', 'requires', 'peer'):
1561+ for role in ('provides', 'requires', 'peers'):
1562 results.extend(role_and_interface_to_relations(role, interface_name))
1563 return results
1564
1565@@ -637,7 +637,7 @@
1566
1567
1568 @cached
1569-def storage_get(attribute="", storage_id=""):
1570+def storage_get(attribute=None, storage_id=None):
1571 """Get storage attributes"""
1572 _args = ['storage-get', '--format=json']
1573 if storage_id:
1574@@ -651,7 +651,7 @@
1575
1576
1577 @cached
1578-def storage_list(storage_name=""):
1579+def storage_list(storage_name=None):
1580 """List the storage IDs for the unit"""
1581 _args = ['storage-list', '--format=json']
1582 if storage_name:
1583@@ -878,6 +878,40 @@
1584 subprocess.check_call(cmd)
1585
1586
1587+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1588+def payload_register(ptype, klass, pid):
1589+ """ is used while a hook is running to let Juju know that a
1590+ payload has been started."""
1591+ cmd = ['payload-register']
1592+ for x in [ptype, klass, pid]:
1593+ cmd.append(x)
1594+ subprocess.check_call(cmd)
1595+
1596+
1597+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1598+def payload_unregister(klass, pid):
1599+ """ is used while a hook is running to let Juju know
1600+ that a payload has been manually stopped. The <class> and <id> provided
1601+ must match a payload that has been previously registered with juju using
1602+ payload-register."""
1603+ cmd = ['payload-unregister']
1604+ for x in [klass, pid]:
1605+ cmd.append(x)
1606+ subprocess.check_call(cmd)
1607+
1608+
1609+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1610+def payload_status_set(klass, pid, status):
1611+ """is used to update the current status of a registered payload.
1612+ The <class> and <id> provided must match a payload that has been previously
1613+ registered with juju using payload-register. The <status> must be one of the
1614+ follow: starting, started, stopping, stopped"""
1615+ cmd = ['payload-status-set']
1616+ for x in [klass, pid, status]:
1617+ cmd.append(x)
1618+ subprocess.check_call(cmd)
1619+
1620+
1621 @cached
1622 def juju_version():
1623 """Full version string (eg. '1.23.3.1-trusty-amd64')"""
1624
1625=== modified file 'hooks/charmhelpers/core/host.py'
1626--- hooks/charmhelpers/core/host.py 2015-11-03 12:29:06 +0000
1627+++ hooks/charmhelpers/core/host.py 2016-02-12 19:36:45 +0000
1628@@ -67,10 +67,14 @@
1629 """Pause a system service.
1630
1631 Stop it, and prevent it from starting again at boot."""
1632- stopped = service_stop(service_name)
1633+ stopped = True
1634+ if service_running(service_name):
1635+ stopped = service_stop(service_name)
1636 upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
1637 sysv_file = os.path.join(initd_dir, service_name)
1638- if os.path.exists(upstart_file):
1639+ if init_is_systemd():
1640+ service('disable', service_name)
1641+ elif os.path.exists(upstart_file):
1642 override_path = os.path.join(
1643 init_dir, '{}.override'.format(service_name))
1644 with open(override_path, 'w') as fh:
1645@@ -78,9 +82,9 @@
1646 elif os.path.exists(sysv_file):
1647 subprocess.check_call(["update-rc.d", service_name, "disable"])
1648 else:
1649- # XXX: Support SystemD too
1650 raise ValueError(
1651- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
1652+ "Unable to detect {0} as SystemD, Upstart {1} or"
1653+ " SysV {2}".format(
1654 service_name, upstart_file, sysv_file))
1655 return stopped
1656
1657@@ -92,7 +96,9 @@
1658 Reenable starting again at boot. Start the service"""
1659 upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
1660 sysv_file = os.path.join(initd_dir, service_name)
1661- if os.path.exists(upstart_file):
1662+ if init_is_systemd():
1663+ service('enable', service_name)
1664+ elif os.path.exists(upstart_file):
1665 override_path = os.path.join(
1666 init_dir, '{}.override'.format(service_name))
1667 if os.path.exists(override_path):
1668@@ -100,34 +106,43 @@
1669 elif os.path.exists(sysv_file):
1670 subprocess.check_call(["update-rc.d", service_name, "enable"])
1671 else:
1672- # XXX: Support SystemD too
1673 raise ValueError(
1674- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
1675+ "Unable to detect {0} as SystemD, Upstart {1} or"
1676+ " SysV {2}".format(
1677 service_name, upstart_file, sysv_file))
1678
1679- started = service_start(service_name)
1680+ started = service_running(service_name)
1681+ if not started:
1682+ started = service_start(service_name)
1683 return started
1684
1685
1686 def service(action, service_name):
1687 """Control a system service"""
1688- cmd = ['service', service_name, action]
1689+ if init_is_systemd():
1690+ cmd = ['systemctl', action, service_name]
1691+ else:
1692+ cmd = ['service', service_name, action]
1693 return subprocess.call(cmd) == 0
1694
1695
1696-def service_running(service):
1697+def service_running(service_name):
1698 """Determine whether a system service is running"""
1699- try:
1700- output = subprocess.check_output(
1701- ['service', service, 'status'],
1702- stderr=subprocess.STDOUT).decode('UTF-8')
1703- except subprocess.CalledProcessError:
1704- return False
1705+ if init_is_systemd():
1706+ return service('is-active', service_name)
1707 else:
1708- if ("start/running" in output or "is running" in output):
1709- return True
1710- else:
1711+ try:
1712+ output = subprocess.check_output(
1713+ ['service', service_name, 'status'],
1714+ stderr=subprocess.STDOUT).decode('UTF-8')
1715+ except subprocess.CalledProcessError:
1716 return False
1717+ else:
1718+ if ("start/running" in output or "is running" in output or
1719+ "up and running" in output):
1720+ return True
1721+ else:
1722+ return False
1723
1724
1725 def service_available(service_name):
1726@@ -142,8 +157,29 @@
1727 return True
1728
1729
1730-def adduser(username, password=None, shell='/bin/bash', system_user=False):
1731- """Add a user to the system"""
1732+SYSTEMD_SYSTEM = '/run/systemd/system'
1733+
1734+
1735+def init_is_systemd():
1736+ """Return True if the host system uses systemd, False otherwise."""
1737+ return os.path.isdir(SYSTEMD_SYSTEM)
1738+
1739+
1740+def adduser(username, password=None, shell='/bin/bash', system_user=False,
1741+ primary_group=None, secondary_groups=None):
1742+ """Add a user to the system.
1743+
1744+ Will log but otherwise succeed if the user already exists.
1745+
1746+ :param str username: Username to create
1747+ :param str password: Password for user; if ``None``, create a system user
1748+ :param str shell: The default shell for the user
1749+ :param bool system_user: Whether to create a login or system user
1750+ :param str primary_group: Primary group for user; defaults to username
1751+ :param list secondary_groups: Optional list of additional groups
1752+
1753+ :returns: The password database entry struct, as returned by `pwd.getpwnam`
1754+ """
1755 try:
1756 user_info = pwd.getpwnam(username)
1757 log('user {0} already exists!'.format(username))
1758@@ -158,6 +194,16 @@
1759 '--shell', shell,
1760 '--password', password,
1761 ])
1762+ if not primary_group:
1763+ try:
1764+ grp.getgrnam(username)
1765+ primary_group = username # avoid "group exists" error
1766+ except KeyError:
1767+ pass
1768+ if primary_group:
1769+ cmd.extend(['-g', primary_group])
1770+ if secondary_groups:
1771+ cmd.extend(['-G', ','.join(secondary_groups)])
1772 cmd.append(username)
1773 subprocess.check_call(cmd)
1774 user_info = pwd.getpwnam(username)
1775@@ -255,14 +301,12 @@
1776
1777
1778 def fstab_remove(mp):
1779- """Remove the given mountpoint entry from /etc/fstab
1780- """
1781+ """Remove the given mountpoint entry from /etc/fstab"""
1782 return Fstab.remove_by_mountpoint(mp)
1783
1784
1785 def fstab_add(dev, mp, fs, options=None):
1786- """Adds the given device entry to the /etc/fstab file
1787- """
1788+ """Adds the given device entry to the /etc/fstab file"""
1789 return Fstab.add(dev, mp, fs, options=options)
1790
1791
1792@@ -318,8 +362,7 @@
1793
1794
1795 def file_hash(path, hash_type='md5'):
1796- """
1797- Generate a hash checksum of the contents of 'path' or None if not found.
1798+ """Generate a hash checksum of the contents of 'path' or None if not found.
1799
1800 :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
1801 such as md5, sha1, sha256, sha512, etc.
1802@@ -334,10 +377,9 @@
1803
1804
1805 def path_hash(path):
1806- """
1807- Generate a hash checksum of all files matching 'path'. Standard wildcards
1808- like '*' and '?' are supported, see documentation for the 'glob' module for
1809- more information.
1810+ """Generate a hash checksum of all files matching 'path'. Standard
1811+ wildcards like '*' and '?' are supported, see documentation for the 'glob'
1812+ module for more information.
1813
1814 :return: dict: A { filename: hash } dictionary for all matched files.
1815 Empty if none found.
1816@@ -349,8 +391,7 @@
1817
1818
1819 def check_hash(path, checksum, hash_type='md5'):
1820- """
1821- Validate a file using a cryptographic checksum.
1822+ """Validate a file using a cryptographic checksum.
1823
1824 :param str checksum: Value of the checksum used to validate the file.
1825 :param str hash_type: Hash algorithm used to generate `checksum`.
1826@@ -365,6 +406,7 @@
1827
1828
1829 class ChecksumError(ValueError):
1830+ """A class derived from Value error to indicate the checksum failed."""
1831 pass
1832
1833
1834@@ -470,7 +512,7 @@
1835
1836
1837 def list_nics(nic_type=None):
1838- '''Return a list of nics of given type(s)'''
1839+ """Return a list of nics of given type(s)"""
1840 if isinstance(nic_type, six.string_types):
1841 int_types = [nic_type]
1842 else:
1843@@ -512,12 +554,13 @@
1844
1845
1846 def set_nic_mtu(nic, mtu):
1847- '''Set MTU on a network interface'''
1848+ """Set the Maximum Transmission Unit (MTU) on a network interface."""
1849 cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
1850 subprocess.check_call(cmd)
1851
1852
1853 def get_nic_mtu(nic):
1854+ """Return the Maximum Transmission Unit (MTU) for a network interface."""
1855 cmd = ['ip', 'addr', 'show', nic]
1856 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
1857 mtu = ""
1858@@ -529,6 +572,7 @@
1859
1860
1861 def get_nic_hwaddr(nic):
1862+ """Return the Media Access Control (MAC) for a network interface."""
1863 cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
1864 ip_output = subprocess.check_output(cmd).decode('UTF-8')
1865 hwaddr = ""
1866@@ -539,7 +583,7 @@
1867
1868
1869 def cmp_pkgrevno(package, revno, pkgcache=None):
1870- '''Compare supplied revno with the revno of the installed package
1871+ """Compare supplied revno with the revno of the installed package
1872
1873 * 1 => Installed revno is greater than supplied arg
1874 * 0 => Installed revno is the same as supplied arg
1875@@ -548,7 +592,7 @@
1876 This function imports apt_cache function from charmhelpers.fetch if
1877 the pkgcache argument is None. Be sure to add charmhelpers.fetch if
1878 you call this function, or pass an apt_pkg.Cache() instance.
1879- '''
1880+ """
1881 import apt_pkg
1882 if not pkgcache:
1883 from charmhelpers.fetch import apt_cache
1884@@ -558,19 +602,27 @@
1885
1886
1887 @contextmanager
1888-def chdir(d):
1889+def chdir(directory):
1890+ """Change the current working directory to a different directory for a code
1891+ block and return the previous directory after the block exits. Useful to
1892+ run commands from a specificed directory.
1893+
1894+ :param str directory: The directory path to change to for this context.
1895+ """
1896 cur = os.getcwd()
1897 try:
1898- yield os.chdir(d)
1899+ yield os.chdir(directory)
1900 finally:
1901 os.chdir(cur)
1902
1903
1904 def chownr(path, owner, group, follow_links=True, chowntopdir=False):
1905- """
1906- Recursively change user and group ownership of files and directories
1907+ """Recursively change user and group ownership of files and directories
1908 in given path. Doesn't chown path itself by default, only its children.
1909
1910+ :param str path: The string path to start changing ownership.
1911+ :param str owner: The owner string to use when looking up the uid.
1912+ :param str group: The group string to use when looking up the gid.
1913 :param bool follow_links: Also Chown links if True
1914 :param bool chowntopdir: Also chown path itself if True
1915 """
1916@@ -594,15 +646,23 @@
1917
1918
1919 def lchownr(path, owner, group):
1920+ """Recursively change user and group ownership of files and directories
1921+ in a given path, not following symbolic links. See the documentation for
1922+ 'os.lchown' for more information.
1923+
1924+ :param str path: The string path to start changing ownership.
1925+ :param str owner: The owner string to use when looking up the uid.
1926+ :param str group: The group string to use when looking up the gid.
1927+ """
1928 chownr(path, owner, group, follow_links=False)
1929
1930
1931 def get_total_ram():
1932- '''The total amount of system RAM in bytes.
1933+ """The total amount of system RAM in bytes.
1934
1935 This is what is reported by the OS, and may be overcommitted when
1936 there are multiple containers hosted on the same machine.
1937- '''
1938+ """
1939 with open('/proc/meminfo', 'r') as f:
1940 for line in f.readlines():
1941 if line:
1942
1943=== modified file 'hooks/charmhelpers/core/services/helpers.py'
1944--- hooks/charmhelpers/core/services/helpers.py 2015-11-03 12:29:06 +0000
1945+++ hooks/charmhelpers/core/services/helpers.py 2016-02-12 19:36:45 +0000
1946@@ -243,13 +243,15 @@
1947 :param str source: The template source file, relative to
1948 `$CHARM_DIR/templates`
1949
1950- :param str target: The target to write the rendered template to
1951+ :param str target: The target to write the rendered template to (or None)
1952 :param str owner: The owner of the rendered file
1953 :param str group: The group of the rendered file
1954 :param int perms: The permissions of the rendered file
1955 :param partial on_change_action: functools partial to be executed when
1956 rendered file changes
1957 :param jinja2 loader template_loader: A jinja2 template loader
1958+
1959+ :return str: The rendered template
1960 """
1961 def __init__(self, source, target,
1962 owner='root', group='root', perms=0o444,
1963@@ -267,12 +269,14 @@
1964 if self.on_change_action and os.path.isfile(self.target):
1965 pre_checksum = host.file_hash(self.target)
1966 service = manager.get_service(service_name)
1967- context = {}
1968+ context = {'ctx': {}}
1969 for ctx in service.get('required_data', []):
1970 context.update(ctx)
1971- templating.render(self.source, self.target, context,
1972- self.owner, self.group, self.perms,
1973- template_loader=self.template_loader)
1974+ context['ctx'].update(ctx)
1975+
1976+ result = templating.render(self.source, self.target, context,
1977+ self.owner, self.group, self.perms,
1978+ template_loader=self.template_loader)
1979 if self.on_change_action:
1980 if pre_checksum == host.file_hash(self.target):
1981 hookenv.log(
1982@@ -281,6 +285,8 @@
1983 else:
1984 self.on_change_action()
1985
1986+ return result
1987+
1988
1989 # Convenience aliases for templates
1990 render_template = template = TemplateCallback
1991
1992=== modified file 'hooks/charmhelpers/core/templating.py'
1993--- hooks/charmhelpers/core/templating.py 2015-11-03 12:29:06 +0000
1994+++ hooks/charmhelpers/core/templating.py 2016-02-12 19:36:45 +0000
1995@@ -27,7 +27,8 @@
1996
1997 The `source` path, if not absolute, is relative to the `templates_dir`.
1998
1999- The `target` path should be absolute.
2000+ The `target` path should be absolute. It can also be `None`, in which
2001+ case no file will be written.
2002
2003 The context should be a dict containing the values to be replaced in the
2004 template.
2005@@ -36,6 +37,9 @@
2006
2007 If omitted, `templates_dir` defaults to the `templates` folder in the charm.
2008
2009+ The rendered template will be written to the file as well as being returned
2010+ as a string.
2011+
2012 Note: Using this requires python-jinja2; if it is not installed, calling
2013 this will attempt to use charmhelpers.fetch.apt_install to install it.
2014 """
2015@@ -67,9 +71,11 @@
2016 level=hookenv.ERROR)
2017 raise e
2018 content = template.render(context)
2019- target_dir = os.path.dirname(target)
2020- if not os.path.exists(target_dir):
2021- # This is a terrible default directory permission, as the file
2022- # or its siblings will often contain secrets.
2023- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
2024- host.write_file(target, content.encode(encoding), owner, group, perms)
2025+ if target is not None:
2026+ target_dir = os.path.dirname(target)
2027+ if not os.path.exists(target_dir):
2028+ # This is a terrible default directory permission, as the file
2029+ # or its siblings will often contain secrets.
2030+ host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
2031+ host.write_file(target, content.encode(encoding), owner, group, perms)
2032+ return content
2033
2034=== modified file 'hooks/charmhelpers/fetch/__init__.py'
2035--- hooks/charmhelpers/fetch/__init__.py 2015-11-03 12:29:06 +0000
2036+++ hooks/charmhelpers/fetch/__init__.py 2016-02-12 19:36:45 +0000
2037@@ -98,6 +98,14 @@
2038 'liberty/proposed': 'trusty-proposed/liberty',
2039 'trusty-liberty/proposed': 'trusty-proposed/liberty',
2040 'trusty-proposed/liberty': 'trusty-proposed/liberty',
2041+ # Mitaka
2042+ 'mitaka': 'trusty-updates/mitaka',
2043+ 'trusty-mitaka': 'trusty-updates/mitaka',
2044+ 'trusty-mitaka/updates': 'trusty-updates/mitaka',
2045+ 'trusty-updates/mitaka': 'trusty-updates/mitaka',
2046+ 'mitaka/proposed': 'trusty-proposed/mitaka',
2047+ 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
2048+ 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
2049 }
2050
2051 # The order of this list is very important. Handlers should be listed in from
2052@@ -411,7 +419,7 @@
2053 importlib.import_module(package),
2054 classname)
2055 plugin_list.append(handler_class())
2056- except (ImportError, AttributeError):
2057+ except NotImplementedError:
2058 # Skip missing plugins so that they can be ommitted from
2059 # installation if desired
2060 log("FetchHandler {} not found, skipping plugin".format(
2061
2062=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
2063--- hooks/charmhelpers/fetch/archiveurl.py 2015-11-03 12:29:06 +0000
2064+++ hooks/charmhelpers/fetch/archiveurl.py 2016-02-12 19:36:45 +0000
2065@@ -108,7 +108,7 @@
2066 install_opener(opener)
2067 response = urlopen(source)
2068 try:
2069- with open(dest, 'w') as dest_file:
2070+ with open(dest, 'wb') as dest_file:
2071 dest_file.write(response.read())
2072 except Exception as e:
2073 if os.path.isfile(dest):
2074
2075=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
2076--- hooks/charmhelpers/fetch/bzrurl.py 2015-06-24 12:22:08 +0000
2077+++ hooks/charmhelpers/fetch/bzrurl.py 2016-02-12 19:36:45 +0000
2078@@ -15,60 +15,50 @@
2079 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2080
2081 import os
2082+from subprocess import check_call
2083 from charmhelpers.fetch import (
2084 BaseFetchHandler,
2085- UnhandledSource
2086+ UnhandledSource,
2087+ filter_installed_packages,
2088+ apt_install,
2089 )
2090 from charmhelpers.core.host import mkdir
2091
2092-import six
2093-if six.PY3:
2094- raise ImportError('bzrlib does not support Python3')
2095
2096-try:
2097- from bzrlib.branch import Branch
2098- from bzrlib import bzrdir, workingtree, errors
2099-except ImportError:
2100- from charmhelpers.fetch import apt_install
2101- apt_install("python-bzrlib")
2102- from bzrlib.branch import Branch
2103- from bzrlib import bzrdir, workingtree, errors
2104+if filter_installed_packages(['bzr']) != []:
2105+ apt_install(['bzr'])
2106+ if filter_installed_packages(['bzr']) != []:
2107+ raise NotImplementedError('Unable to install bzr')
2108
2109
2110 class BzrUrlFetchHandler(BaseFetchHandler):
2111 """Handler for bazaar branches via generic and lp URLs"""
2112 def can_handle(self, source):
2113 url_parts = self.parse_url(source)
2114- if url_parts.scheme not in ('bzr+ssh', 'lp'):
2115+ if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
2116 return False
2117+ elif not url_parts.scheme:
2118+ return os.path.exists(os.path.join(source, '.bzr'))
2119 else:
2120 return True
2121
2122 def branch(self, source, dest):
2123- url_parts = self.parse_url(source)
2124- # If we use lp:branchname scheme we need to load plugins
2125 if not self.can_handle(source):
2126 raise UnhandledSource("Cannot handle {}".format(source))
2127- if url_parts.scheme == "lp":
2128- from bzrlib.plugin import load_plugins
2129- load_plugins()
2130- try:
2131- local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
2132- except errors.AlreadyControlDirError:
2133- local_branch = Branch.open(dest)
2134- try:
2135- remote_branch = Branch.open(source)
2136- remote_branch.push(local_branch)
2137- tree = workingtree.WorkingTree.open(dest)
2138- tree.update()
2139- except Exception as e:
2140- raise e
2141+ if os.path.exists(dest):
2142+ check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
2143+ else:
2144+ check_call(['bzr', 'branch', source, dest])
2145
2146- def install(self, source):
2147+ def install(self, source, dest=None):
2148 url_parts = self.parse_url(source)
2149 branch_name = url_parts.path.strip("/").split("/")[-1]
2150- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
2151- branch_name)
2152+ if dest:
2153+ dest_dir = os.path.join(dest, branch_name)
2154+ else:
2155+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
2156+ branch_name)
2157+
2158 if not os.path.exists(dest_dir):
2159 mkdir(dest_dir, perms=0o755)
2160 try:
2161
2162=== modified file 'hooks/charmhelpers/fetch/giturl.py'
2163--- hooks/charmhelpers/fetch/giturl.py 2015-11-03 12:29:06 +0000
2164+++ hooks/charmhelpers/fetch/giturl.py 2016-02-12 19:36:45 +0000
2165@@ -15,24 +15,18 @@
2166 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2167
2168 import os
2169+from subprocess import check_call, CalledProcessError
2170 from charmhelpers.fetch import (
2171 BaseFetchHandler,
2172- UnhandledSource
2173+ UnhandledSource,
2174+ filter_installed_packages,
2175+ apt_install,
2176 )
2177-from charmhelpers.core.host import mkdir
2178-
2179-import six
2180-if six.PY3:
2181- raise ImportError('GitPython does not support Python 3')
2182-
2183-try:
2184- from git import Repo
2185-except ImportError:
2186- from charmhelpers.fetch import apt_install
2187- apt_install("python-git")
2188- from git import Repo
2189-
2190-from git.exc import GitCommandError # noqa E402
2191+
2192+if filter_installed_packages(['git']) != []:
2193+ apt_install(['git'])
2194+ if filter_installed_packages(['git']) != []:
2195+ raise NotImplementedError('Unable to install git')
2196
2197
2198 class GitUrlFetchHandler(BaseFetchHandler):
2199@@ -40,19 +34,24 @@
2200 def can_handle(self, source):
2201 url_parts = self.parse_url(source)
2202 # TODO (mattyw) no support for ssh git@ yet
2203- if url_parts.scheme not in ('http', 'https', 'git'):
2204+ if url_parts.scheme not in ('http', 'https', 'git', ''):
2205 return False
2206+ elif not url_parts.scheme:
2207+ return os.path.exists(os.path.join(source, '.git'))
2208 else:
2209 return True
2210
2211- def clone(self, source, dest, branch, depth=None):
2212+ def clone(self, source, dest, branch="master", depth=None):
2213 if not self.can_handle(source):
2214 raise UnhandledSource("Cannot handle {}".format(source))
2215
2216- if depth:
2217- Repo.clone_from(source, dest, branch=branch, depth=depth)
2218+ if os.path.exists(dest):
2219+ cmd = ['git', '-C', dest, 'pull', source, branch]
2220 else:
2221- Repo.clone_from(source, dest, branch=branch)
2222+ cmd = ['git', 'clone', source, dest, '--branch', branch]
2223+ if depth:
2224+ cmd.extend(['--depth', depth])
2225+ check_call(cmd)
2226
2227 def install(self, source, branch="master", dest=None, depth=None):
2228 url_parts = self.parse_url(source)
2229@@ -62,11 +61,9 @@
2230 else:
2231 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
2232 branch_name)
2233- if not os.path.exists(dest_dir):
2234- mkdir(dest_dir, perms=0o755)
2235 try:
2236 self.clone(source, dest_dir, branch, depth)
2237- except GitCommandError as e:
2238+ except CalledProcessError as e:
2239 raise UnhandledSource(e)
2240 except OSError as e:
2241 raise UnhandledSource(e.strerror)
2242
2243=== modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
2244--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-11-12 11:42:00 +0000
2245+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2016-02-12 19:36:45 +0000
2246@@ -121,11 +121,12 @@
2247
2248 # Charms which should use the source config option
2249 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
2250- 'ceph-osd', 'ceph-radosgw']
2251+ 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
2252
2253 # Charms which can not use openstack-origin, ie. many subordinates
2254 no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
2255- 'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
2256+ 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
2257+ 'cinder-backup']
2258
2259 if self.openstack:
2260 for svc in services:
2261@@ -225,7 +226,8 @@
2262 self.precise_havana, self.precise_icehouse,
2263 self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
2264 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
2265- self.wily_liberty) = range(12)
2266+ self.wily_liberty, self.trusty_mitaka,
2267+ self.xenial_mitaka) = range(14)
2268
2269 releases = {
2270 ('precise', None): self.precise_essex,
2271@@ -237,9 +239,11 @@
2272 ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
2273 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
2274 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
2275+ ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
2276 ('utopic', None): self.utopic_juno,
2277 ('vivid', None): self.vivid_kilo,
2278- ('wily', None): self.wily_liberty}
2279+ ('wily', None): self.wily_liberty,
2280+ ('xenial', None): self.xenial_mitaka}
2281 return releases[(self.series, self.openstack)]
2282
2283 def _get_openstack_release_string(self):
2284@@ -256,6 +260,7 @@
2285 ('utopic', 'juno'),
2286 ('vivid', 'kilo'),
2287 ('wily', 'liberty'),
2288+ ('xenial', 'mitaka'),
2289 ])
2290 if self.openstack:
2291 os_origin = self.openstack.split(':')[1]

Subscribers

People subscribed via source and target branches

to all changes: