Merge lp:~1chb1n/charms/trusty/ceph-radosgw/stable-ch-sync-and-enable-tests into lp:~openstack-charmers-archive/charms/trusty/ceph-radosgw/trunk

Proposed by Ryan Beisner
Status: Needs review
Proposed branch: lp:~1chb1n/charms/trusty/ceph-radosgw/stable-ch-sync-and-enable-tests
Merge into: lp:~openstack-charmers-archive/charms/trusty/ceph-radosgw/trunk
Diff against target: 1826 lines (+807/-242)
18 files modified
hooks/charmhelpers/contrib/network/ip.py (+21/-19)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+9/-4)
hooks/charmhelpers/contrib/openstack/context.py (+26/-2)
hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh (+7/-5)
hooks/charmhelpers/contrib/openstack/neutron.py (+14/-6)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+3/-2)
hooks/charmhelpers/contrib/openstack/utils.py (+100/-54)
hooks/charmhelpers/contrib/python/packages.py (+13/-4)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+391/-25)
hooks/charmhelpers/core/hookenv.py (+41/-7)
hooks/charmhelpers/core/host.py (+97/-41)
hooks/charmhelpers/core/services/helpers.py (+11/-5)
hooks/charmhelpers/core/templating.py (+13/-7)
hooks/charmhelpers/fetch/__init__.py (+9/-1)
hooks/charmhelpers/fetch/archiveurl.py (+1/-1)
hooks/charmhelpers/fetch/bzrurl.py (+22/-32)
hooks/charmhelpers/fetch/giturl.py (+20/-23)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+9/-4)
To merge this branch: bzr merge lp:~1chb1n/charms/trusty/ceph-radosgw/stable-ch-sync-and-enable-tests
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+284748@code.launchpad.net

Description of the change

Sync charm-helpers, enable liberty, mitaka amulet tests.

To post a comment you must log in.
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #18506 ceph-radosgw for 1chb1n mp284748
    LINT OK: passed

Build: http://10.245.162.77:8080/job/charm_lint_check/18506/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #17242 ceph-radosgw for 1chb1n mp284748
    UNIT OK: passed

Build: http://10.245.162.77:8080/job/charm_unit_test/17242/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #9150 ceph-radosgw for 1chb1n mp284748
    AMULET FAIL: amulet-test failed

AMULET Results (max last 2 lines):
make: *** [functional_test] Error 1
ERROR:root:Make target returned non-zero.

Full amulet test output: http://paste.ubuntu.com/14857573/
Build: http://10.245.162.77:8080/job/charm_amulet_test/9150/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #129 ceph-radosgw for 1chb1n mp284748
    LINT OK: passed

Build: http://10.245.162.36:8080/job/charm_lint_check/129/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #128 ceph-radosgw for 1chb1n mp284748
    UNIT OK: passed

Build: http://10.245.162.36:8080/job/charm_unit_test/128/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #24 ceph-radosgw for 1chb1n mp284748
    AMULET FAIL: amulet-test failed

AMULET Results (max last 2 lines):
make: *** [functional_test] Error 1
ERROR:root:Make target returned non-zero.

Full amulet test output: http://paste.ubuntu.com/15004101/
Build: http://10.245.162.36:8080/job/charm_amulet_test/24/

Unmerged revisions

48. By Ryan Beisner

enable mitaka, liberty amulet tests

47. By Ryan Beisner

sync charm-helpers for mitaka cloud archive awareness

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
2--- hooks/charmhelpers/contrib/network/ip.py 2015-10-22 13:18:44 +0000
3+++ hooks/charmhelpers/contrib/network/ip.py 2016-02-02 14:25:45 +0000
4@@ -53,7 +53,7 @@
5
6
7 def no_ip_found_error_out(network):
8- errmsg = ("No IP address found in network: %s" % network)
9+ errmsg = ("No IP address found in network(s): %s" % network)
10 raise ValueError(errmsg)
11
12
13@@ -61,7 +61,7 @@
14 """Get an IPv4 or IPv6 address within the network from the host.
15
16 :param network (str): CIDR presentation format. For example,
17- '192.168.1.0/24'.
18+ '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
19 :param fallback (str): If no address is found, return fallback.
20 :param fatal (boolean): If no address is found, fallback is not
21 set and fatal is True then exit(1).
22@@ -75,24 +75,26 @@
23 else:
24 return None
25
26- _validate_cidr(network)
27- network = netaddr.IPNetwork(network)
28- for iface in netifaces.interfaces():
29- addresses = netifaces.ifaddresses(iface)
30- if network.version == 4 and netifaces.AF_INET in addresses:
31- addr = addresses[netifaces.AF_INET][0]['addr']
32- netmask = addresses[netifaces.AF_INET][0]['netmask']
33- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
34- if cidr in network:
35- return str(cidr.ip)
36+ networks = network.split() or [network]
37+ for network in networks:
38+ _validate_cidr(network)
39+ network = netaddr.IPNetwork(network)
40+ for iface in netifaces.interfaces():
41+ addresses = netifaces.ifaddresses(iface)
42+ if network.version == 4 and netifaces.AF_INET in addresses:
43+ addr = addresses[netifaces.AF_INET][0]['addr']
44+ netmask = addresses[netifaces.AF_INET][0]['netmask']
45+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
46+ if cidr in network:
47+ return str(cidr.ip)
48
49- if network.version == 6 and netifaces.AF_INET6 in addresses:
50- for addr in addresses[netifaces.AF_INET6]:
51- if not addr['addr'].startswith('fe80'):
52- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
53- addr['netmask']))
54- if cidr in network:
55- return str(cidr.ip)
56+ if network.version == 6 and netifaces.AF_INET6 in addresses:
57+ for addr in addresses[netifaces.AF_INET6]:
58+ if not addr['addr'].startswith('fe80'):
59+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
60+ addr['netmask']))
61+ if cidr in network:
62+ return str(cidr.ip)
63
64 if fallback is not None:
65 return fallback
66
67=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
68--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2016-01-28 09:03:35 +0000
69+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2016-02-02 14:25:45 +0000
70@@ -121,11 +121,12 @@
71
72 # Charms which should use the source config option
73 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
74- 'ceph-osd', 'ceph-radosgw']
75+ 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
76
77 # Charms which can not use openstack-origin, ie. many subordinates
78 no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
79- 'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
80+ 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
81+ 'cinder-backup']
82
83 if self.openstack:
84 for svc in services:
85@@ -225,7 +226,8 @@
86 self.precise_havana, self.precise_icehouse,
87 self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
88 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
89- self.wily_liberty) = range(12)
90+ self.wily_liberty, self.trusty_mitaka,
91+ self.xenial_mitaka) = range(14)
92
93 releases = {
94 ('precise', None): self.precise_essex,
95@@ -237,9 +239,11 @@
96 ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
97 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
98 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
99+ ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
100 ('utopic', None): self.utopic_juno,
101 ('vivid', None): self.vivid_kilo,
102- ('wily', None): self.wily_liberty}
103+ ('wily', None): self.wily_liberty,
104+ ('xenial', None): self.xenial_mitaka}
105 return releases[(self.series, self.openstack)]
106
107 def _get_openstack_release_string(self):
108@@ -256,6 +260,7 @@
109 ('utopic', 'juno'),
110 ('vivid', 'kilo'),
111 ('wily', 'liberty'),
112+ ('xenial', 'mitaka'),
113 ])
114 if self.openstack:
115 os_origin = self.openstack.split(':')[1]
116
117=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
118--- hooks/charmhelpers/contrib/openstack/context.py 2016-01-28 09:03:35 +0000
119+++ hooks/charmhelpers/contrib/openstack/context.py 2016-02-02 14:25:45 +0000
120@@ -57,6 +57,7 @@
121 get_nic_hwaddr,
122 mkdir,
123 write_file,
124+ pwgen,
125 )
126 from charmhelpers.contrib.hahelpers.cluster import (
127 determine_apache_port,
128@@ -87,6 +88,8 @@
129 is_bridge_member,
130 )
131 from charmhelpers.contrib.openstack.utils import get_host_ip
132+from charmhelpers.core.unitdata import kv
133+
134 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
135 ADDRESS_TYPES = ['admin', 'internal', 'public']
136
137@@ -636,11 +639,18 @@
138 ctxt['ipv6'] = True
139 ctxt['local_host'] = 'ip6-localhost'
140 ctxt['haproxy_host'] = '::'
141- ctxt['stat_port'] = ':::8888'
142 else:
143 ctxt['local_host'] = '127.0.0.1'
144 ctxt['haproxy_host'] = '0.0.0.0'
145- ctxt['stat_port'] = ':8888'
146+
147+ ctxt['stat_port'] = '8888'
148+
149+ db = kv()
150+ ctxt['stat_password'] = db.get('stat-password')
151+ if not ctxt['stat_password']:
152+ ctxt['stat_password'] = db.set('stat-password',
153+ pwgen(32))
154+ db.flush()
155
156 for frontend in cluster_hosts:
157 if (len(cluster_hosts[frontend]['backends']) > 1 or
158@@ -1094,6 +1104,20 @@
159 config_flags_parser(config_flags)}
160
161
162+class LibvirtConfigFlagsContext(OSContextGenerator):
163+ """
164+ This context provides support for extending
165+ the libvirt section through user-defined flags.
166+ """
167+ def __call__(self):
168+ ctxt = {}
169+ libvirt_flags = config('libvirt-flags')
170+ if libvirt_flags:
171+ ctxt['libvirt_flags'] = config_flags_parser(
172+ libvirt_flags)
173+ return ctxt
174+
175+
176 class SubordinateConfigContext(OSContextGenerator):
177
178 """
179
180=== modified file 'hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh'
181--- hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 2015-02-24 11:02:02 +0000
182+++ hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 2016-02-02 14:25:45 +0000
183@@ -9,15 +9,17 @@
184 CRITICAL=0
185 NOTACTIVE=''
186 LOGFILE=/var/log/nagios/check_haproxy.log
187-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
188+AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
189
190-for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
191+typeset -i N_INSTANCES=0
192+for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
193 do
194- output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
195+ N_INSTANCES=N_INSTANCES+1
196+ output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK')
197 if [ $? != 0 ]; then
198 date >> $LOGFILE
199 echo $output >> $LOGFILE
200- /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
201+ /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1
202 CRITICAL=1
203 NOTACTIVE="${NOTACTIVE} $appserver"
204 fi
205@@ -28,5 +30,5 @@
206 exit 2
207 fi
208
209-echo "OK: All haproxy instances looking good"
210+echo "OK: All haproxy instances ($N_INSTANCES) looking good"
211 exit 0
212
213=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
214--- hooks/charmhelpers/contrib/openstack/neutron.py 2016-01-28 09:03:35 +0000
215+++ hooks/charmhelpers/contrib/openstack/neutron.py 2016-02-02 14:25:45 +0000
216@@ -50,7 +50,7 @@
217 if kernel_version() >= (3, 13):
218 return []
219 else:
220- return ['openvswitch-datapath-dkms']
221+ return [headers_package(), 'openvswitch-datapath-dkms']
222
223
224 # legacy
225@@ -70,7 +70,7 @@
226 relation_prefix='neutron',
227 ssl_dir=QUANTUM_CONF_DIR)],
228 'services': ['quantum-plugin-openvswitch-agent'],
229- 'packages': [[headers_package()] + determine_dkms_package(),
230+ 'packages': [determine_dkms_package(),
231 ['quantum-plugin-openvswitch-agent']],
232 'server_packages': ['quantum-server',
233 'quantum-plugin-openvswitch'],
234@@ -111,7 +111,7 @@
235 relation_prefix='neutron',
236 ssl_dir=NEUTRON_CONF_DIR)],
237 'services': ['neutron-plugin-openvswitch-agent'],
238- 'packages': [[headers_package()] + determine_dkms_package(),
239+ 'packages': [determine_dkms_package(),
240 ['neutron-plugin-openvswitch-agent']],
241 'server_packages': ['neutron-server',
242 'neutron-plugin-openvswitch'],
243@@ -155,7 +155,7 @@
244 relation_prefix='neutron',
245 ssl_dir=NEUTRON_CONF_DIR)],
246 'services': [],
247- 'packages': [[headers_package()] + determine_dkms_package(),
248+ 'packages': [determine_dkms_package(),
249 ['neutron-plugin-cisco']],
250 'server_packages': ['neutron-server',
251 'neutron-plugin-cisco'],
252@@ -174,7 +174,7 @@
253 'neutron-dhcp-agent',
254 'nova-api-metadata',
255 'etcd'],
256- 'packages': [[headers_package()] + determine_dkms_package(),
257+ 'packages': [determine_dkms_package(),
258 ['calico-compute',
259 'bird',
260 'neutron-dhcp-agent',
261@@ -219,7 +219,7 @@
262 relation_prefix='neutron',
263 ssl_dir=NEUTRON_CONF_DIR)],
264 'services': [],
265- 'packages': [[headers_package()] + determine_dkms_package()],
266+ 'packages': [determine_dkms_package()],
267 'server_packages': ['neutron-server',
268 'python-neutron-plugin-midonet'],
269 'server_services': ['neutron-server']
270@@ -233,6 +233,14 @@
271 'neutron-plugin-ml2']
272 # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
273 plugins['nvp'] = plugins['nsx']
274+ if release >= 'kilo':
275+ plugins['midonet']['driver'] = (
276+ 'neutron.plugins.midonet.plugin.MidonetPluginV2')
277+ if release >= 'liberty':
278+ midonet_origin = config('midonet-origin')
279+ if midonet_origin is not None and midonet_origin[4:5] == '1':
280+ plugins['midonet']['driver'] = (
281+ 'midonet.neutron.plugin_v1.MidonetPluginV2')
282 return plugins
283
284
285
286=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
287--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2016-01-28 09:03:35 +0000
288+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2016-02-02 14:25:45 +0000
289@@ -33,13 +33,14 @@
290 timeout server 30000
291 {%- endif %}
292
293-listen stats {{ stat_port }}
294+listen stats
295+ bind {{ local_host }}:{{ stat_port }}
296 mode http
297 stats enable
298 stats hide-version
299 stats realm Haproxy\ Statistics
300 stats uri /
301- stats auth admin:password
302+ stats auth admin:{{ stat_password }}
303
304 {% if frontends -%}
305 {% for service, ports in service_ports.items() -%}
306
307=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
308--- hooks/charmhelpers/contrib/openstack/utils.py 2016-01-28 09:03:35 +0000
309+++ hooks/charmhelpers/contrib/openstack/utils.py 2016-02-02 14:25:45 +0000
310@@ -86,6 +86,7 @@
311 ('utopic', 'juno'),
312 ('vivid', 'kilo'),
313 ('wily', 'liberty'),
314+ ('xenial', 'mitaka'),
315 ])
316
317
318@@ -99,61 +100,70 @@
319 ('2014.2', 'juno'),
320 ('2015.1', 'kilo'),
321 ('2015.2', 'liberty'),
322+ ('2016.1', 'mitaka'),
323 ])
324
325-# The ugly duckling
326+# The ugly duckling - must list releases oldest to newest
327 SWIFT_CODENAMES = OrderedDict([
328- ('1.4.3', 'diablo'),
329- ('1.4.8', 'essex'),
330- ('1.7.4', 'folsom'),
331- ('1.8.0', 'grizzly'),
332- ('1.7.7', 'grizzly'),
333- ('1.7.6', 'grizzly'),
334- ('1.10.0', 'havana'),
335- ('1.9.1', 'havana'),
336- ('1.9.0', 'havana'),
337- ('1.13.1', 'icehouse'),
338- ('1.13.0', 'icehouse'),
339- ('1.12.0', 'icehouse'),
340- ('1.11.0', 'icehouse'),
341- ('2.0.0', 'juno'),
342- ('2.1.0', 'juno'),
343- ('2.2.0', 'juno'),
344- ('2.2.1', 'kilo'),
345- ('2.2.2', 'kilo'),
346- ('2.3.0', 'liberty'),
347- ('2.4.0', 'liberty'),
348- ('2.5.0', 'liberty'),
349+ ('diablo',
350+ ['1.4.3']),
351+ ('essex',
352+ ['1.4.8']),
353+ ('folsom',
354+ ['1.7.4']),
355+ ('grizzly',
356+ ['1.7.6', '1.7.7', '1.8.0']),
357+ ('havana',
358+ ['1.9.0', '1.9.1', '1.10.0']),
359+ ('icehouse',
360+ ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
361+ ('juno',
362+ ['2.0.0', '2.1.0', '2.2.0']),
363+ ('kilo',
364+ ['2.2.1', '2.2.2']),
365+ ('liberty',
366+ ['2.3.0', '2.4.0', '2.5.0']),
367+ ('mitaka',
368+ ['2.5.0']),
369 ])
370
371 # >= Liberty version->codename mapping
372 PACKAGE_CODENAMES = {
373 'nova-common': OrderedDict([
374- ('12.0.0', 'liberty'),
375+ ('12.0', 'liberty'),
376+ ('13.0', 'mitaka'),
377 ]),
378 'neutron-common': OrderedDict([
379- ('7.0.0', 'liberty'),
380+ ('7.0', 'liberty'),
381+ ('8.0', 'mitaka'),
382 ]),
383 'cinder-common': OrderedDict([
384- ('7.0.0', 'liberty'),
385+ ('7.0', 'liberty'),
386+ ('8.0', 'mitaka'),
387 ]),
388 'keystone': OrderedDict([
389- ('8.0.0', 'liberty'),
390+ ('8.0', 'liberty'),
391+ ('9.0', 'mitaka'),
392 ]),
393 'horizon-common': OrderedDict([
394- ('8.0.0', 'liberty'),
395+ ('8.0', 'liberty'),
396+ ('9.0', 'mitaka'),
397 ]),
398 'ceilometer-common': OrderedDict([
399- ('5.0.0', 'liberty'),
400+ ('5.0', 'liberty'),
401+ ('6.0', 'mitaka'),
402 ]),
403 'heat-common': OrderedDict([
404- ('5.0.0', 'liberty'),
405+ ('5.0', 'liberty'),
406+ ('6.0', 'mitaka'),
407 ]),
408 'glance-common': OrderedDict([
409- ('11.0.0', 'liberty'),
410+ ('11.0', 'liberty'),
411+ ('12.0', 'mitaka'),
412 ]),
413 'openstack-dashboard': OrderedDict([
414- ('8.0.0', 'liberty'),
415+ ('8.0', 'liberty'),
416+ ('9.0', 'mitaka'),
417 ]),
418 }
419
420@@ -216,6 +226,33 @@
421 error_out(e)
422
423
424+def get_os_version_codename_swift(codename):
425+ '''Determine OpenStack version number of swift from codename.'''
426+ for k, v in six.iteritems(SWIFT_CODENAMES):
427+ if k == codename:
428+ return v[-1]
429+ e = 'Could not derive swift version for '\
430+ 'codename: %s' % codename
431+ error_out(e)
432+
433+
434+def get_swift_codename(version):
435+ '''Determine OpenStack codename that corresponds to swift version.'''
436+ codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
437+ if len(codenames) > 1:
438+ # If more than one release codename contains this version we determine
439+ # the actual codename based on the highest available install source.
440+ for codename in reversed(codenames):
441+ releases = UBUNTU_OPENSTACK_RELEASE
442+ release = [k for k, v in six.iteritems(releases) if codename in v]
443+ ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
444+ if codename in ret or release[0] in ret:
445+ return codename
446+ elif len(codenames) == 1:
447+ return codenames[0]
448+ return None
449+
450+
451 def get_os_codename_package(package, fatal=True):
452 '''Derive OpenStack release codename from an installed package.'''
453 import apt_pkg as apt
454@@ -240,7 +277,14 @@
455 error_out(e)
456
457 vers = apt.upstream_version(pkg.current_ver.ver_str)
458- match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
459+ if 'swift' in pkg.name:
460+ # Fully x.y.z match for swift versions
461+ match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
462+ else:
463+ # x.y match only for 20XX.X
464+ # and ignore patch level for other packages
465+ match = re.match('^(\d+)\.(\d+)', vers)
466+
467 if match:
468 vers = match.group(0)
469
470@@ -252,13 +296,8 @@
471 # < Liberty co-ordinated project versions
472 try:
473 if 'swift' in pkg.name:
474- swift_vers = vers[:5]
475- if swift_vers not in SWIFT_CODENAMES:
476- # Deal with 1.10.0 upward
477- swift_vers = vers[:6]
478- return SWIFT_CODENAMES[swift_vers]
479+ return get_swift_codename(vers)
480 else:
481- vers = vers[:6]
482 return OPENSTACK_CODENAMES[vers]
483 except KeyError:
484 if not fatal:
485@@ -276,12 +315,14 @@
486
487 if 'swift' in pkg:
488 vers_map = SWIFT_CODENAMES
489+ for cname, version in six.iteritems(vers_map):
490+ if cname == codename:
491+ return version[-1]
492 else:
493 vers_map = OPENSTACK_CODENAMES
494-
495- for version, cname in six.iteritems(vers_map):
496- if cname == codename:
497- return version
498+ for version, cname in six.iteritems(vers_map):
499+ if cname == codename:
500+ return version
501 # e = "Could not determine OpenStack version for package: %s" % pkg
502 # error_out(e)
503
504@@ -377,6 +418,9 @@
505 'liberty': 'trusty-updates/liberty',
506 'liberty/updates': 'trusty-updates/liberty',
507 'liberty/proposed': 'trusty-proposed/liberty',
508+ 'mitaka': 'trusty-updates/mitaka',
509+ 'mitaka/updates': 'trusty-updates/mitaka',
510+ 'mitaka/proposed': 'trusty-proposed/mitaka',
511 }
512
513 try:
514@@ -444,11 +488,16 @@
515 cur_vers = get_os_version_package(package)
516 if "swift" in package:
517 codename = get_os_codename_install_source(src)
518- available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
519+ avail_vers = get_os_version_codename_swift(codename)
520 else:
521- available_vers = get_os_version_install_source(src)
522+ avail_vers = get_os_version_install_source(src)
523 apt.init()
524- return apt.version_compare(available_vers, cur_vers) == 1
525+ if "swift" in package:
526+ major_cur_vers = cur_vers.split('.', 1)[0]
527+ major_avail_vers = avail_vers.split('.', 1)[0]
528+ major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
529+ return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
530+ return apt.version_compare(avail_vers, cur_vers) == 1
531
532
533 def ensure_block_device(block_device):
534@@ -577,7 +626,7 @@
535 return yaml.load(projects_yaml)
536
537
538-def git_clone_and_install(projects_yaml, core_project, depth=1):
539+def git_clone_and_install(projects_yaml, core_project):
540 """
541 Clone/install all specified OpenStack repositories.
542
543@@ -627,6 +676,9 @@
544 for p in projects['repositories']:
545 repo = p['repository']
546 branch = p['branch']
547+ depth = '1'
548+ if 'depth' in p.keys():
549+ depth = p['depth']
550 if p['name'] == 'requirements':
551 repo_dir = _git_clone_and_install_single(repo, branch, depth,
552 parent_dir, http_proxy,
553@@ -671,19 +723,13 @@
554 """
555 Clone and install a single git repository.
556 """
557- dest_dir = os.path.join(parent_dir, os.path.basename(repo))
558-
559 if not os.path.exists(parent_dir):
560 juju_log('Directory already exists at {}. '
561 'No need to create directory.'.format(parent_dir))
562 os.mkdir(parent_dir)
563
564- if not os.path.exists(dest_dir):
565- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
566- repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
567- depth=depth)
568- else:
569- repo_dir = dest_dir
570+ juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
571+ repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth)
572
573 venv = os.path.join(parent_dir, 'venv')
574
575
576=== modified file 'hooks/charmhelpers/contrib/python/packages.py'
577--- hooks/charmhelpers/contrib/python/packages.py 2015-08-10 16:33:38 +0000
578+++ hooks/charmhelpers/contrib/python/packages.py 2016-02-02 14:25:45 +0000
579@@ -42,8 +42,12 @@
580 yield "--{0}={1}".format(key, value)
581
582
583-def pip_install_requirements(requirements, **options):
584- """Install a requirements file """
585+def pip_install_requirements(requirements, constraints=None, **options):
586+ """Install a requirements file.
587+
588+ :param constraints: Path to pip constraints file.
589+ http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
590+ """
591 command = ["install"]
592
593 available_options = ('proxy', 'src', 'log', )
594@@ -51,8 +55,13 @@
595 command.append(option)
596
597 command.append("-r {0}".format(requirements))
598- log("Installing from file: {} with options: {}".format(requirements,
599- command))
600+ if constraints:
601+ command.append("-c {0}".format(constraints))
602+ log("Installing from file: {} with constraints {} "
603+ "and options: {}".format(requirements, constraints, command))
604+ else:
605+ log("Installing from file: {} with options: {}".format(requirements,
606+ command))
607 pip_execute(command)
608
609
610
611=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
612--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2016-01-28 09:03:35 +0000
613+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2016-02-02 14:25:45 +0000
614@@ -23,10 +23,11 @@
615 # James Page <james.page@ubuntu.com>
616 # Adam Gandelman <adamg@ubuntu.com>
617 #
618+import bisect
619+import six
620
621 import os
622 import shutil
623-import six
624 import json
625 import time
626 import uuid
627@@ -73,6 +74,394 @@
628 err to syslog = {use_syslog}
629 clog to syslog = {use_syslog}
630 """
631+# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
632+powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
633+
634+
635+def validator(value, valid_type, valid_range=None):
636+ """
637+ Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
638+ Example input:
639+ validator(value=1,
640+ valid_type=int,
641+ valid_range=[0, 2])
642+ This says I'm testing value=1. It must be an int inclusive in [0,2]
643+
644+ :param value: The value to validate
645+ :param valid_type: The type that value should be.
646+ :param valid_range: A range of values that value can assume.
647+ :return:
648+ """
649+ assert isinstance(value, valid_type), "{} is not a {}".format(
650+ value,
651+ valid_type)
652+ if valid_range is not None:
653+ assert isinstance(valid_range, list), \
654+ "valid_range must be a list, was given {}".format(valid_range)
655+ # If we're dealing with strings
656+ if valid_type is six.string_types:
657+ assert value in valid_range, \
658+ "{} is not in the list {}".format(value, valid_range)
659+ # Integer, float should have a min and max
660+ else:
661+ if len(valid_range) != 2:
662+ raise ValueError(
663+ "Invalid valid_range list of {} for {}. "
664+ "List must be [min,max]".format(valid_range, value))
665+ assert value >= valid_range[0], \
666+ "{} is less than minimum allowed value of {}".format(
667+ value, valid_range[0])
668+ assert value <= valid_range[1], \
669+ "{} is greater than maximum allowed value of {}".format(
670+ value, valid_range[1])
671+
672+
673+class PoolCreationError(Exception):
674+ """
675+ A custom error to inform the caller that a pool creation failed. Provides an error message
676+ """
677+ def __init__(self, message):
678+ super(PoolCreationError, self).__init__(message)
679+
680+
681+class Pool(object):
682+ """
683+ An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
684+ Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
685+ """
686+ def __init__(self, service, name):
687+ self.service = service
688+ self.name = name
689+
690+ # Create the pool if it doesn't exist already
691+ # To be implemented by subclasses
692+ def create(self):
693+ pass
694+
695+ def add_cache_tier(self, cache_pool, mode):
696+ """
697+ Adds a new cache tier to an existing pool.
698+ :param cache_pool: six.string_types. The cache tier pool name to add.
699+ :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
700+ :return: None
701+ """
702+ # Check the input types and values
703+ validator(value=cache_pool, valid_type=six.string_types)
704+ validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
705+
706+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
707+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
708+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
709+ check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
710+
711+ def remove_cache_tier(self, cache_pool):
712+ """
713+ Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
714+ :param cache_pool: six.string_types. The cache tier pool name to remove.
715+ :return: None
716+ """
717+ # read-only is easy, writeback is much harder
718+ mode = get_cache_mode(cache_pool)
719+ if mode == 'readonly':
720+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
721+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
722+
723+ elif mode == 'writeback':
724+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
725+ # Flush the cache and wait for it to return
726+ check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
727+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
728+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
729+
730+ def get_pgs(self, pool_size):
731+ """
732+ :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
733+ erasure coded pools
734+ :return: int. The number of pgs to use.
735+ """
736+ validator(value=pool_size, valid_type=int)
737+ osds = get_osds(self.service)
738+ if not osds:
739+ # NOTE(james-page): Default to 200 for older ceph versions
740+ # which don't support OSD query from cli
741+ return 200
742+
743+ # Calculate based on Ceph best practices
744+ if osds < 5:
745+ return 128
746+ elif 5 < osds < 10:
747+ return 512
748+ elif 10 < osds < 50:
749+ return 4096
750+ else:
751+ estimate = (osds * 100) / pool_size
752+ # Return the next nearest power of 2
753+ index = bisect.bisect_right(powers_of_two, estimate)
754+ return powers_of_two[index]
755+
756+
757+class ReplicatedPool(Pool):
758+ def __init__(self, service, name, replicas=2):
759+ super(ReplicatedPool, self).__init__(service=service, name=name)
760+ self.replicas = replicas
761+
762+ def create(self):
763+ if not pool_exists(self.service, self.name):
764+ # Create it
765+ pgs = self.get_pgs(self.replicas)
766+ cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)]
767+ try:
768+ check_call(cmd)
769+ except CalledProcessError:
770+ raise
771+
772+
773+# Default jerasure erasure coded pool
774+class ErasurePool(Pool):
775+ def __init__(self, service, name, erasure_code_profile="default"):
776+ super(ErasurePool, self).__init__(service=service, name=name)
777+ self.erasure_code_profile = erasure_code_profile
778+
779+ def create(self):
780+ if not pool_exists(self.service, self.name):
781+ # Try to find the erasure profile information so we can properly size the pgs
782+ erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
783+
784+ # Check for errors
785+ if erasure_profile is None:
786+ log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
787+ level=ERROR)
788+ raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
789+ if 'k' not in erasure_profile or 'm' not in erasure_profile:
790+ # Error
791+ log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
792+ level=ERROR)
793+ raise PoolCreationError(
794+ message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
795+
796+ pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
797+ # Create it
798+ cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs),
799+ 'erasure', self.erasure_code_profile]
800+ try:
801+ check_call(cmd)
802+ except CalledProcessError:
803+ raise
804+
805+ """Get an existing erasure code profile if it already exists.
806+ Returns json formatted output"""
807+
808+
809+def get_erasure_profile(service, name):
810+ """
811+ :param service: six.string_types. The Ceph user name to run the command under
812+ :param name:
813+ :return:
814+ """
815+ try:
816+ out = check_output(['ceph', '--id', service,
817+ 'osd', 'erasure-code-profile', 'get',
818+ name, '--format=json'])
819+ return json.loads(out)
820+ except (CalledProcessError, OSError, ValueError):
821+ return None
822+
823+
824+def pool_set(service, pool_name, key, value):
825+ """
826+ Sets a value for a RADOS pool in ceph.
827+ :param service: six.string_types. The Ceph user name to run the command under
828+ :param pool_name: six.string_types
829+ :param key: six.string_types
830+ :param value:
831+ :return: None. Can raise CalledProcessError
832+ """
833+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
834+ try:
835+ check_call(cmd)
836+ except CalledProcessError:
837+ raise
838+
839+
840+def snapshot_pool(service, pool_name, snapshot_name):
841+ """
842+ Snapshots a RADOS pool in ceph.
843+ :param service: six.string_types. The Ceph user name to run the command under
844+ :param pool_name: six.string_types
845+ :param snapshot_name: six.string_types
846+ :return: None. Can raise CalledProcessError
847+ """
848+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
849+ try:
850+ check_call(cmd)
851+ except CalledProcessError:
852+ raise
853+
854+
855+def remove_pool_snapshot(service, pool_name, snapshot_name):
856+ """
857+ Remove a snapshot from a RADOS pool in ceph.
858+ :param service: six.string_types. The Ceph user name to run the command under
859+ :param pool_name: six.string_types
860+ :param snapshot_name: six.string_types
861+ :return: None. Can raise CalledProcessError
862+ """
863+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
864+ try:
865+ check_call(cmd)
866+ except CalledProcessError:
867+ raise
868+
869+
870+# max_bytes should be an int or long
871+def set_pool_quota(service, pool_name, max_bytes):
872+ """
873+ :param service: six.string_types. The Ceph user name to run the command under
874+ :param pool_name: six.string_types
875+ :param max_bytes: int or long
876+ :return: None. Can raise CalledProcessError
877+ """
878+ # Set a byte quota on a RADOS pool in ceph.
879+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes]
880+ try:
881+ check_call(cmd)
882+ except CalledProcessError:
883+ raise
884+
885+
886+def remove_pool_quota(service, pool_name):
887+ """
888+ Set a byte quota on a RADOS pool in ceph.
889+ :param service: six.string_types. The Ceph user name to run the command under
890+ :param pool_name: six.string_types
891+ :return: None. Can raise CalledProcessError
892+ """
893+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
894+ try:
895+ check_call(cmd)
896+ except CalledProcessError:
897+ raise
898+
899+
900+def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host',
901+ data_chunks=2, coding_chunks=1,
902+ locality=None, durability_estimator=None):
903+ """
904+ Create a new erasure code profile if one does not already exist for it. Updates
905+ the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
906+ for more details
907+ :param service: six.string_types. The Ceph user name to run the command under
908+ :param profile_name: six.string_types
909+ :param erasure_plugin_name: six.string_types
910+ :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
911+ 'room', 'root', 'row'])
912+ :param data_chunks: int
913+ :param coding_chunks: int
914+ :param locality: int
915+ :param durability_estimator: int
916+ :return: None. Can raise CalledProcessError
917+ """
918+ # Ensure this failure_domain is allowed by Ceph
919+ validator(failure_domain, six.string_types,
920+ ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
921+
922+ cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
923+ 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
924+ 'ruleset_failure_domain=' + failure_domain]
925+ if locality is not None and durability_estimator is not None:
926+ raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
927+
928+ # Add plugin specific information
929+ if locality is not None:
930+ # For local erasure codes
931+ cmd.append('l=' + str(locality))
932+ if durability_estimator is not None:
933+ # For Shec erasure codes
934+ cmd.append('c=' + str(durability_estimator))
935+
936+ if erasure_profile_exists(service, profile_name):
937+ cmd.append('--force')
938+
939+ try:
940+ check_call(cmd)
941+ except CalledProcessError:
942+ raise
943+
944+
945+def rename_pool(service, old_name, new_name):
946+ """
947+ Rename a Ceph pool from old_name to new_name
948+ :param service: six.string_types. The Ceph user name to run the command under
949+ :param old_name: six.string_types
950+ :param new_name: six.string_types
951+ :return: None
952+ """
953+ validator(value=old_name, valid_type=six.string_types)
954+ validator(value=new_name, valid_type=six.string_types)
955+
956+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
957+ check_call(cmd)
958+
959+
960+def erasure_profile_exists(service, name):
961+ """
962+ Check to see if an Erasure code profile already exists.
963+ :param service: six.string_types. The Ceph user name to run the command under
964+ :param name: six.string_types
965+ :return: int or None
966+ """
967+ validator(value=name, valid_type=six.string_types)
968+ try:
969+ check_call(['ceph', '--id', service,
970+ 'osd', 'erasure-code-profile', 'get',
971+ name])
972+ return True
973+ except CalledProcessError:
974+ return False
975+
976+
977+def get_cache_mode(service, pool_name):
978+ """
979+ Find the current caching mode of the pool_name given.
980+ :param service: six.string_types. The Ceph user name to run the command under
981+ :param pool_name: six.string_types
982+ :return: int or None
983+ """
984+ validator(value=service, valid_type=six.string_types)
985+ validator(value=pool_name, valid_type=six.string_types)
986+ out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
987+ try:
988+ osd_json = json.loads(out)
989+ for pool in osd_json['pools']:
990+ if pool['pool_name'] == pool_name:
991+ return pool['cache_mode']
992+ return None
993+ except ValueError:
994+ raise
995+
996+
997+def pool_exists(service, name):
998+ """Check to see if a RADOS pool already exists."""
999+ try:
1000+ out = check_output(['rados', '--id', service,
1001+ 'lspools']).decode('UTF-8')
1002+ except CalledProcessError:
1003+ return False
1004+
1005+ return name in out
1006+
1007+
1008+def get_osds(service):
1009+ """Return a list of all Ceph Object Storage Daemons currently in the
1010+ cluster.
1011+ """
1012+ version = ceph_version()
1013+ if version and version >= '0.56':
1014+ return json.loads(check_output(['ceph', '--id', service,
1015+ 'osd', 'ls',
1016+ '--format=json']).decode('UTF-8'))
1017+
1018+ return None
1019
1020
1021 def install():
1022@@ -102,30 +491,6 @@
1023 check_call(cmd)
1024
1025
1026-def pool_exists(service, name):
1027- """Check to see if a RADOS pool already exists."""
1028- try:
1029- out = check_output(['rados', '--id', service,
1030- 'lspools']).decode('UTF-8')
1031- except CalledProcessError:
1032- return False
1033-
1034- return name in out
1035-
1036-
1037-def get_osds(service):
1038- """Return a list of all Ceph Object Storage Daemons currently in the
1039- cluster.
1040- """
1041- version = ceph_version()
1042- if version and version >= '0.56':
1043- return json.loads(check_output(['ceph', '--id', service,
1044- 'osd', 'ls',
1045- '--format=json']).decode('UTF-8'))
1046-
1047- return None
1048-
1049-
1050 def update_pool(client, pool, settings):
1051 cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
1052 for k, v in six.iteritems(settings):
1053@@ -414,6 +779,7 @@
1054
1055 The API is versioned and defaults to version 1.
1056 """
1057+
1058 def __init__(self, api_version=1, request_id=None):
1059 self.api_version = api_version
1060 if request_id:
1061
1062=== modified file 'hooks/charmhelpers/core/hookenv.py'
1063--- hooks/charmhelpers/core/hookenv.py 2016-01-28 09:03:35 +0000
1064+++ hooks/charmhelpers/core/hookenv.py 2016-02-02 14:25:45 +0000
1065@@ -492,7 +492,7 @@
1066
1067 @cached
1068 def peer_relation_id():
1069- '''Get a peer relation id if a peer relation has been joined, else None.'''
1070+ '''Get the peers relation id if a peers relation has been joined, else None.'''
1071 md = metadata()
1072 section = md.get('peers')
1073 if section:
1074@@ -517,12 +517,12 @@
1075 def relation_to_role_and_interface(relation_name):
1076 """
1077 Given the name of a relation, return the role and the name of the interface
1078- that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
1079+ that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
1080
1081 :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
1082 """
1083 _metadata = metadata()
1084- for role in ('provides', 'requires', 'peer'):
1085+ for role in ('provides', 'requires', 'peers'):
1086 interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
1087 if interface:
1088 return role, interface
1089@@ -534,7 +534,7 @@
1090 """
1091 Given a role and interface name, return a list of relation names for the
1092 current charm that use that interface under that role (where role is one
1093- of ``provides``, ``requires``, or ``peer``).
1094+ of ``provides``, ``requires``, or ``peers``).
1095
1096 :returns: A list of relation names.
1097 """
1098@@ -555,7 +555,7 @@
1099 :returns: A list of relation names.
1100 """
1101 results = []
1102- for role in ('provides', 'requires', 'peer'):
1103+ for role in ('provides', 'requires', 'peers'):
1104 results.extend(role_and_interface_to_relations(role, interface_name))
1105 return results
1106
1107@@ -637,7 +637,7 @@
1108
1109
1110 @cached
1111-def storage_get(attribute="", storage_id=""):
1112+def storage_get(attribute=None, storage_id=None):
1113 """Get storage attributes"""
1114 _args = ['storage-get', '--format=json']
1115 if storage_id:
1116@@ -651,7 +651,7 @@
1117
1118
1119 @cached
1120-def storage_list(storage_name=""):
1121+def storage_list(storage_name=None):
1122 """List the storage IDs for the unit"""
1123 _args = ['storage-list', '--format=json']
1124 if storage_name:
1125@@ -878,6 +878,40 @@
1126 subprocess.check_call(cmd)
1127
1128
1129+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1130+def payload_register(ptype, klass, pid):
1131+ """ is used while a hook is running to let Juju know that a
1132+ payload has been started."""
1133+ cmd = ['payload-register']
1134+ for x in [ptype, klass, pid]:
1135+ cmd.append(x)
1136+ subprocess.check_call(cmd)
1137+
1138+
1139+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1140+def payload_unregister(klass, pid):
1141+ """ is used while a hook is running to let Juju know
1142+ that a payload has been manually stopped. The <class> and <id> provided
1143+ must match a payload that has been previously registered with juju using
1144+ payload-register."""
1145+ cmd = ['payload-unregister']
1146+ for x in [klass, pid]:
1147+ cmd.append(x)
1148+ subprocess.check_call(cmd)
1149+
1150+
1151+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1152+def payload_status_set(klass, pid, status):
1153+ """is used to update the current status of a registered payload.
1154+ The <class> and <id> provided must match a payload that has been previously
1155+ registered with juju using payload-register. The <status> must be one of the
1156+ follow: starting, started, stopping, stopped"""
1157+ cmd = ['payload-status-set']
1158+ for x in [klass, pid, status]:
1159+ cmd.append(x)
1160+ subprocess.check_call(cmd)
1161+
1162+
1163 @cached
1164 def juju_version():
1165 """Full version string (eg. '1.23.3.1-trusty-amd64')"""
1166
1167=== modified file 'hooks/charmhelpers/core/host.py'
1168--- hooks/charmhelpers/core/host.py 2016-01-28 09:03:35 +0000
1169+++ hooks/charmhelpers/core/host.py 2016-02-02 14:25:45 +0000
1170@@ -72,7 +72,9 @@
1171 stopped = service_stop(service_name)
1172 upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
1173 sysv_file = os.path.join(initd_dir, service_name)
1174- if os.path.exists(upstart_file):
1175+ if init_is_systemd():
1176+ service('disable', service_name)
1177+ elif os.path.exists(upstart_file):
1178 override_path = os.path.join(
1179 init_dir, '{}.override'.format(service_name))
1180 with open(override_path, 'w') as fh:
1181@@ -80,9 +82,9 @@
1182 elif os.path.exists(sysv_file):
1183 subprocess.check_call(["update-rc.d", service_name, "disable"])
1184 else:
1185- # XXX: Support SystemD too
1186 raise ValueError(
1187- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
1188+ "Unable to detect {0} as SystemD, Upstart {1} or"
1189+ " SysV {2}".format(
1190 service_name, upstart_file, sysv_file))
1191 return stopped
1192
1193@@ -94,7 +96,9 @@
1194 Reenable starting again at boot. Start the service"""
1195 upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
1196 sysv_file = os.path.join(initd_dir, service_name)
1197- if os.path.exists(upstart_file):
1198+ if init_is_systemd():
1199+ service('enable', service_name)
1200+ elif os.path.exists(upstart_file):
1201 override_path = os.path.join(
1202 init_dir, '{}.override'.format(service_name))
1203 if os.path.exists(override_path):
1204@@ -102,9 +106,9 @@
1205 elif os.path.exists(sysv_file):
1206 subprocess.check_call(["update-rc.d", service_name, "enable"])
1207 else:
1208- # XXX: Support SystemD too
1209 raise ValueError(
1210- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
1211+ "Unable to detect {0} as SystemD, Upstart {1} or"
1212+ " SysV {2}".format(
1213 service_name, upstart_file, sysv_file))
1214
1215 started = service_running(service_name)
1216@@ -115,23 +119,30 @@
1217
1218 def service(action, service_name):
1219 """Control a system service"""
1220- cmd = ['service', service_name, action]
1221+ if init_is_systemd():
1222+ cmd = ['systemctl', action, service_name]
1223+ else:
1224+ cmd = ['service', service_name, action]
1225 return subprocess.call(cmd) == 0
1226
1227
1228-def service_running(service):
1229+def service_running(service_name):
1230 """Determine whether a system service is running"""
1231- try:
1232- output = subprocess.check_output(
1233- ['service', service, 'status'],
1234- stderr=subprocess.STDOUT).decode('UTF-8')
1235- except subprocess.CalledProcessError:
1236- return False
1237+ if init_is_systemd():
1238+ return service('is-active', service_name)
1239 else:
1240- if ("start/running" in output or "is running" in output):
1241- return True
1242- else:
1243+ try:
1244+ output = subprocess.check_output(
1245+ ['service', service_name, 'status'],
1246+ stderr=subprocess.STDOUT).decode('UTF-8')
1247+ except subprocess.CalledProcessError:
1248 return False
1249+ else:
1250+ if ("start/running" in output or "is running" in output or
1251+ "up and running" in output):
1252+ return True
1253+ else:
1254+ return False
1255
1256
1257 def service_available(service_name):
1258@@ -146,8 +157,29 @@
1259 return True
1260
1261
1262-def adduser(username, password=None, shell='/bin/bash', system_user=False):
1263- """Add a user to the system"""
1264+SYSTEMD_SYSTEM = '/run/systemd/system'
1265+
1266+
1267+def init_is_systemd():
1268+ """Return True if the host system uses systemd, False otherwise."""
1269+ return os.path.isdir(SYSTEMD_SYSTEM)
1270+
1271+
1272+def adduser(username, password=None, shell='/bin/bash', system_user=False,
1273+ primary_group=None, secondary_groups=None):
1274+ """Add a user to the system.
1275+
1276+ Will log but otherwise succeed if the user already exists.
1277+
1278+ :param str username: Username to create
1279+ :param str password: Password for user; if ``None``, create a system user
1280+ :param str shell: The default shell for the user
1281+ :param bool system_user: Whether to create a login or system user
1282+ :param str primary_group: Primary group for user; defaults to username
1283+ :param list secondary_groups: Optional list of additional groups
1284+
1285+ :returns: The password database entry struct, as returned by `pwd.getpwnam`
1286+ """
1287 try:
1288 user_info = pwd.getpwnam(username)
1289 log('user {0} already exists!'.format(username))
1290@@ -162,6 +194,16 @@
1291 '--shell', shell,
1292 '--password', password,
1293 ])
1294+ if not primary_group:
1295+ try:
1296+ grp.getgrnam(username)
1297+ primary_group = username # avoid "group exists" error
1298+ except KeyError:
1299+ pass
1300+ if primary_group:
1301+ cmd.extend(['-g', primary_group])
1302+ if secondary_groups:
1303+ cmd.extend(['-G', ','.join(secondary_groups)])
1304 cmd.append(username)
1305 subprocess.check_call(cmd)
1306 user_info = pwd.getpwnam(username)
1307@@ -259,14 +301,12 @@
1308
1309
1310 def fstab_remove(mp):
1311- """Remove the given mountpoint entry from /etc/fstab
1312- """
1313+ """Remove the given mountpoint entry from /etc/fstab"""
1314 return Fstab.remove_by_mountpoint(mp)
1315
1316
1317 def fstab_add(dev, mp, fs, options=None):
1318- """Adds the given device entry to the /etc/fstab file
1319- """
1320+ """Adds the given device entry to the /etc/fstab file"""
1321 return Fstab.add(dev, mp, fs, options=options)
1322
1323
1324@@ -322,8 +362,7 @@
1325
1326
1327 def file_hash(path, hash_type='md5'):
1328- """
1329- Generate a hash checksum of the contents of 'path' or None if not found.
1330+ """Generate a hash checksum of the contents of 'path' or None if not found.
1331
1332 :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
1333 such as md5, sha1, sha256, sha512, etc.
1334@@ -338,10 +377,9 @@
1335
1336
1337 def path_hash(path):
1338- """
1339- Generate a hash checksum of all files matching 'path'. Standard wildcards
1340- like '*' and '?' are supported, see documentation for the 'glob' module for
1341- more information.
1342+ """Generate a hash checksum of all files matching 'path'. Standard
1343+ wildcards like '*' and '?' are supported, see documentation for the 'glob'
1344+ module for more information.
1345
1346 :return: dict: A { filename: hash } dictionary for all matched files.
1347 Empty if none found.
1348@@ -353,8 +391,7 @@
1349
1350
1351 def check_hash(path, checksum, hash_type='md5'):
1352- """
1353- Validate a file using a cryptographic checksum.
1354+ """Validate a file using a cryptographic checksum.
1355
1356 :param str checksum: Value of the checksum used to validate the file.
1357 :param str hash_type: Hash algorithm used to generate `checksum`.
1358@@ -369,6 +406,7 @@
1359
1360
1361 class ChecksumError(ValueError):
1362+ """A class derived from Value error to indicate the checksum failed."""
1363 pass
1364
1365
1366@@ -474,7 +512,7 @@
1367
1368
1369 def list_nics(nic_type=None):
1370- '''Return a list of nics of given type(s)'''
1371+ """Return a list of nics of given type(s)"""
1372 if isinstance(nic_type, six.string_types):
1373 int_types = [nic_type]
1374 else:
1375@@ -516,12 +554,13 @@
1376
1377
1378 def set_nic_mtu(nic, mtu):
1379- '''Set MTU on a network interface'''
1380+ """Set the Maximum Transmission Unit (MTU) on a network interface."""
1381 cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
1382 subprocess.check_call(cmd)
1383
1384
1385 def get_nic_mtu(nic):
1386+ """Return the Maximum Transmission Unit (MTU) for a network interface."""
1387 cmd = ['ip', 'addr', 'show', nic]
1388 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
1389 mtu = ""
1390@@ -533,6 +572,7 @@
1391
1392
1393 def get_nic_hwaddr(nic):
1394+ """Return the Media Access Control (MAC) for a network interface."""
1395 cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
1396 ip_output = subprocess.check_output(cmd).decode('UTF-8')
1397 hwaddr = ""
1398@@ -543,7 +583,7 @@
1399
1400
1401 def cmp_pkgrevno(package, revno, pkgcache=None):
1402- '''Compare supplied revno with the revno of the installed package
1403+ """Compare supplied revno with the revno of the installed package
1404
1405 * 1 => Installed revno is greater than supplied arg
1406 * 0 => Installed revno is the same as supplied arg
1407@@ -552,7 +592,7 @@
1408 This function imports apt_cache function from charmhelpers.fetch if
1409 the pkgcache argument is None. Be sure to add charmhelpers.fetch if
1410 you call this function, or pass an apt_pkg.Cache() instance.
1411- '''
1412+ """
1413 import apt_pkg
1414 if not pkgcache:
1415 from charmhelpers.fetch import apt_cache
1416@@ -562,19 +602,27 @@
1417
1418
1419 @contextmanager
1420-def chdir(d):
1421+def chdir(directory):
1422+ """Change the current working directory to a different directory for a code
1423+ block and return the previous directory after the block exits. Useful to
1424+ run commands from a specificed directory.
1425+
1426+ :param str directory: The directory path to change to for this context.
1427+ """
1428 cur = os.getcwd()
1429 try:
1430- yield os.chdir(d)
1431+ yield os.chdir(directory)
1432 finally:
1433 os.chdir(cur)
1434
1435
1436 def chownr(path, owner, group, follow_links=True, chowntopdir=False):
1437- """
1438- Recursively change user and group ownership of files and directories
1439+ """Recursively change user and group ownership of files and directories
1440 in given path. Doesn't chown path itself by default, only its children.
1441
1442+ :param str path: The string path to start changing ownership.
1443+ :param str owner: The owner string to use when looking up the uid.
1444+ :param str group: The group string to use when looking up the gid.
1445 :param bool follow_links: Also Chown links if True
1446 :param bool chowntopdir: Also chown path itself if True
1447 """
1448@@ -598,15 +646,23 @@
1449
1450
1451 def lchownr(path, owner, group):
1452+ """Recursively change user and group ownership of files and directories
1453+ in a given path, not following symbolic links. See the documentation for
1454+ 'os.lchown' for more information.
1455+
1456+ :param str path: The string path to start changing ownership.
1457+ :param str owner: The owner string to use when looking up the uid.
1458+ :param str group: The group string to use when looking up the gid.
1459+ """
1460 chownr(path, owner, group, follow_links=False)
1461
1462
1463 def get_total_ram():
1464- '''The total amount of system RAM in bytes.
1465+ """The total amount of system RAM in bytes.
1466
1467 This is what is reported by the OS, and may be overcommitted when
1468 there are multiple containers hosted on the same machine.
1469- '''
1470+ """
1471 with open('/proc/meminfo', 'r') as f:
1472 for line in f.readlines():
1473 if line:
1474
1475=== modified file 'hooks/charmhelpers/core/services/helpers.py'
1476--- hooks/charmhelpers/core/services/helpers.py 2016-01-28 09:03:35 +0000
1477+++ hooks/charmhelpers/core/services/helpers.py 2016-02-02 14:25:45 +0000
1478@@ -243,13 +243,15 @@
1479 :param str source: The template source file, relative to
1480 `$CHARM_DIR/templates`
1481
1482- :param str target: The target to write the rendered template to
1483+ :param str target: The target to write the rendered template to (or None)
1484 :param str owner: The owner of the rendered file
1485 :param str group: The group of the rendered file
1486 :param int perms: The permissions of the rendered file
1487 :param partial on_change_action: functools partial to be executed when
1488 rendered file changes
1489 :param jinja2 loader template_loader: A jinja2 template loader
1490+
1491+ :return str: The rendered template
1492 """
1493 def __init__(self, source, target,
1494 owner='root', group='root', perms=0o444,
1495@@ -267,12 +269,14 @@
1496 if self.on_change_action and os.path.isfile(self.target):
1497 pre_checksum = host.file_hash(self.target)
1498 service = manager.get_service(service_name)
1499- context = {}
1500+ context = {'ctx': {}}
1501 for ctx in service.get('required_data', []):
1502 context.update(ctx)
1503- templating.render(self.source, self.target, context,
1504- self.owner, self.group, self.perms,
1505- template_loader=self.template_loader)
1506+ context['ctx'].update(ctx)
1507+
1508+ result = templating.render(self.source, self.target, context,
1509+ self.owner, self.group, self.perms,
1510+ template_loader=self.template_loader)
1511 if self.on_change_action:
1512 if pre_checksum == host.file_hash(self.target):
1513 hookenv.log(
1514@@ -281,6 +285,8 @@
1515 else:
1516 self.on_change_action()
1517
1518+ return result
1519+
1520
1521 # Convenience aliases for templates
1522 render_template = template = TemplateCallback
1523
1524=== modified file 'hooks/charmhelpers/core/templating.py'
1525--- hooks/charmhelpers/core/templating.py 2016-01-28 09:03:35 +0000
1526+++ hooks/charmhelpers/core/templating.py 2016-02-02 14:25:45 +0000
1527@@ -27,7 +27,8 @@
1528
1529 The `source` path, if not absolute, is relative to the `templates_dir`.
1530
1531- The `target` path should be absolute.
1532+ The `target` path should be absolute. It can also be `None`, in which
1533+ case no file will be written.
1534
1535 The context should be a dict containing the values to be replaced in the
1536 template.
1537@@ -36,6 +37,9 @@
1538
1539 If omitted, `templates_dir` defaults to the `templates` folder in the charm.
1540
1541+ The rendered template will be written to the file as well as being returned
1542+ as a string.
1543+
1544 Note: Using this requires python-jinja2; if it is not installed, calling
1545 this will attempt to use charmhelpers.fetch.apt_install to install it.
1546 """
1547@@ -67,9 +71,11 @@
1548 level=hookenv.ERROR)
1549 raise e
1550 content = template.render(context)
1551- target_dir = os.path.dirname(target)
1552- if not os.path.exists(target_dir):
1553- # This is a terrible default directory permission, as the file
1554- # or its siblings will often contain secrets.
1555- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
1556- host.write_file(target, content.encode(encoding), owner, group, perms)
1557+ if target is not None:
1558+ target_dir = os.path.dirname(target)
1559+ if not os.path.exists(target_dir):
1560+ # This is a terrible default directory permission, as the file
1561+ # or its siblings will often contain secrets.
1562+ host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
1563+ host.write_file(target, content.encode(encoding), owner, group, perms)
1564+ return content
1565
1566=== modified file 'hooks/charmhelpers/fetch/__init__.py'
1567--- hooks/charmhelpers/fetch/__init__.py 2016-01-28 09:03:35 +0000
1568+++ hooks/charmhelpers/fetch/__init__.py 2016-02-02 14:25:45 +0000
1569@@ -98,6 +98,14 @@
1570 'liberty/proposed': 'trusty-proposed/liberty',
1571 'trusty-liberty/proposed': 'trusty-proposed/liberty',
1572 'trusty-proposed/liberty': 'trusty-proposed/liberty',
1573+ # Mitaka
1574+ 'mitaka': 'trusty-updates/mitaka',
1575+ 'trusty-mitaka': 'trusty-updates/mitaka',
1576+ 'trusty-mitaka/updates': 'trusty-updates/mitaka',
1577+ 'trusty-updates/mitaka': 'trusty-updates/mitaka',
1578+ 'mitaka/proposed': 'trusty-proposed/mitaka',
1579+ 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
1580+ 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
1581 }
1582
1583 # The order of this list is very important. Handlers should be listed in from
1584@@ -411,7 +419,7 @@
1585 importlib.import_module(package),
1586 classname)
1587 plugin_list.append(handler_class())
1588- except (ImportError, AttributeError):
1589+ except NotImplementedError:
1590 # Skip missing plugins so that they can be ommitted from
1591 # installation if desired
1592 log("FetchHandler {} not found, skipping plugin".format(
1593
1594=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
1595--- hooks/charmhelpers/fetch/archiveurl.py 2015-08-10 16:33:38 +0000
1596+++ hooks/charmhelpers/fetch/archiveurl.py 2016-02-02 14:25:45 +0000
1597@@ -108,7 +108,7 @@
1598 install_opener(opener)
1599 response = urlopen(source)
1600 try:
1601- with open(dest, 'w') as dest_file:
1602+ with open(dest, 'wb') as dest_file:
1603 dest_file.write(response.read())
1604 except Exception as e:
1605 if os.path.isfile(dest):
1606
1607=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
1608--- hooks/charmhelpers/fetch/bzrurl.py 2015-01-26 11:53:19 +0000
1609+++ hooks/charmhelpers/fetch/bzrurl.py 2016-02-02 14:25:45 +0000
1610@@ -15,60 +15,50 @@
1611 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1612
1613 import os
1614+from subprocess import check_call
1615 from charmhelpers.fetch import (
1616 BaseFetchHandler,
1617- UnhandledSource
1618+ UnhandledSource,
1619+ filter_installed_packages,
1620+ apt_install,
1621 )
1622 from charmhelpers.core.host import mkdir
1623
1624-import six
1625-if six.PY3:
1626- raise ImportError('bzrlib does not support Python3')
1627
1628-try:
1629- from bzrlib.branch import Branch
1630- from bzrlib import bzrdir, workingtree, errors
1631-except ImportError:
1632- from charmhelpers.fetch import apt_install
1633- apt_install("python-bzrlib")
1634- from bzrlib.branch import Branch
1635- from bzrlib import bzrdir, workingtree, errors
1636+if filter_installed_packages(['bzr']) != []:
1637+ apt_install(['bzr'])
1638+ if filter_installed_packages(['bzr']) != []:
1639+ raise NotImplementedError('Unable to install bzr')
1640
1641
1642 class BzrUrlFetchHandler(BaseFetchHandler):
1643 """Handler for bazaar branches via generic and lp URLs"""
1644 def can_handle(self, source):
1645 url_parts = self.parse_url(source)
1646- if url_parts.scheme not in ('bzr+ssh', 'lp'):
1647+ if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
1648 return False
1649+ elif not url_parts.scheme:
1650+ return os.path.exists(os.path.join(source, '.bzr'))
1651 else:
1652 return True
1653
1654 def branch(self, source, dest):
1655- url_parts = self.parse_url(source)
1656- # If we use lp:branchname scheme we need to load plugins
1657 if not self.can_handle(source):
1658 raise UnhandledSource("Cannot handle {}".format(source))
1659- if url_parts.scheme == "lp":
1660- from bzrlib.plugin import load_plugins
1661- load_plugins()
1662- try:
1663- local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
1664- except errors.AlreadyControlDirError:
1665- local_branch = Branch.open(dest)
1666- try:
1667- remote_branch = Branch.open(source)
1668- remote_branch.push(local_branch)
1669- tree = workingtree.WorkingTree.open(dest)
1670- tree.update()
1671- except Exception as e:
1672- raise e
1673+ if os.path.exists(dest):
1674+ check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
1675+ else:
1676+ check_call(['bzr', 'branch', source, dest])
1677
1678- def install(self, source):
1679+ def install(self, source, dest=None):
1680 url_parts = self.parse_url(source)
1681 branch_name = url_parts.path.strip("/").split("/")[-1]
1682- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
1683- branch_name)
1684+ if dest:
1685+ dest_dir = os.path.join(dest, branch_name)
1686+ else:
1687+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
1688+ branch_name)
1689+
1690 if not os.path.exists(dest_dir):
1691 mkdir(dest_dir, perms=0o755)
1692 try:
1693
1694=== modified file 'hooks/charmhelpers/fetch/giturl.py'
1695--- hooks/charmhelpers/fetch/giturl.py 2015-08-10 16:33:38 +0000
1696+++ hooks/charmhelpers/fetch/giturl.py 2016-02-02 14:25:45 +0000
1697@@ -15,24 +15,18 @@
1698 # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1699
1700 import os
1701+from subprocess import check_call, CalledProcessError
1702 from charmhelpers.fetch import (
1703 BaseFetchHandler,
1704- UnhandledSource
1705+ UnhandledSource,
1706+ filter_installed_packages,
1707+ apt_install,
1708 )
1709-from charmhelpers.core.host import mkdir
1710-
1711-import six
1712-if six.PY3:
1713- raise ImportError('GitPython does not support Python 3')
1714-
1715-try:
1716- from git import Repo
1717-except ImportError:
1718- from charmhelpers.fetch import apt_install
1719- apt_install("python-git")
1720- from git import Repo
1721-
1722-from git.exc import GitCommandError # noqa E402
1723+
1724+if filter_installed_packages(['git']) != []:
1725+ apt_install(['git'])
1726+ if filter_installed_packages(['git']) != []:
1727+ raise NotImplementedError('Unable to install git')
1728
1729
1730 class GitUrlFetchHandler(BaseFetchHandler):
1731@@ -40,19 +34,24 @@
1732 def can_handle(self, source):
1733 url_parts = self.parse_url(source)
1734 # TODO (mattyw) no support for ssh git@ yet
1735- if url_parts.scheme not in ('http', 'https', 'git'):
1736+ if url_parts.scheme not in ('http', 'https', 'git', ''):
1737 return False
1738+ elif not url_parts.scheme:
1739+ return os.path.exists(os.path.join(source, '.git'))
1740 else:
1741 return True
1742
1743- def clone(self, source, dest, branch, depth=None):
1744+ def clone(self, source, dest, branch="master", depth=None):
1745 if not self.can_handle(source):
1746 raise UnhandledSource("Cannot handle {}".format(source))
1747
1748- if depth:
1749- Repo.clone_from(source, dest, branch=branch, depth=depth)
1750+ if os.path.exists(dest):
1751+ cmd = ['git', '-C', dest, 'pull', source, branch]
1752 else:
1753- Repo.clone_from(source, dest, branch=branch)
1754+ cmd = ['git', 'clone', source, dest, '--branch', branch]
1755+ if depth:
1756+ cmd.extend(['--depth', depth])
1757+ check_call(cmd)
1758
1759 def install(self, source, branch="master", dest=None, depth=None):
1760 url_parts = self.parse_url(source)
1761@@ -62,11 +61,9 @@
1762 else:
1763 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
1764 branch_name)
1765- if not os.path.exists(dest_dir):
1766- mkdir(dest_dir, perms=0o755)
1767 try:
1768 self.clone(source, dest_dir, branch, depth)
1769- except GitCommandError as e:
1770+ except CalledProcessError as e:
1771 raise UnhandledSource(e)
1772 except OSError as e:
1773 raise UnhandledSource(e.strerror)
1774
1775=== modified file 'tests/018-basic-trusty-liberty' (properties changed: -x to +x)
1776=== modified file 'tests/019-basic-trusty-mitaka' (properties changed: -x to +x)
1777=== modified file 'tests/020-basic-wily-liberty' (properties changed: -x to +x)
1778=== modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
1779--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2016-01-28 09:03:35 +0000
1780+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2016-02-02 14:25:45 +0000
1781@@ -121,11 +121,12 @@
1782
1783 # Charms which should use the source config option
1784 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
1785- 'ceph-osd', 'ceph-radosgw']
1786+ 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
1787
1788 # Charms which can not use openstack-origin, ie. many subordinates
1789 no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
1790- 'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
1791+ 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
1792+ 'cinder-backup']
1793
1794 if self.openstack:
1795 for svc in services:
1796@@ -225,7 +226,8 @@
1797 self.precise_havana, self.precise_icehouse,
1798 self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
1799 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
1800- self.wily_liberty) = range(12)
1801+ self.wily_liberty, self.trusty_mitaka,
1802+ self.xenial_mitaka) = range(14)
1803
1804 releases = {
1805 ('precise', None): self.precise_essex,
1806@@ -237,9 +239,11 @@
1807 ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
1808 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
1809 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
1810+ ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
1811 ('utopic', None): self.utopic_juno,
1812 ('vivid', None): self.vivid_kilo,
1813- ('wily', None): self.wily_liberty}
1814+ ('wily', None): self.wily_liberty,
1815+ ('xenial', None): self.xenial_mitaka}
1816 return releases[(self.series, self.openstack)]
1817
1818 def _get_openstack_release_string(self):
1819@@ -256,6 +260,7 @@
1820 ('utopic', 'juno'),
1821 ('vivid', 'kilo'),
1822 ('wily', 'liberty'),
1823+ ('xenial', 'mitaka'),
1824 ])
1825 if self.openstack:
1826 os_origin = self.openstack.split(':')[1]

Subscribers

People subscribed via source and target branches