Merge lp:~niedbalski/charms/trusty/quantum-gateway/lp-1396607 into lp:charms/trusty/quantum-gateway

Proposed by Jorge Niedbalski
Status: Superseded
Proposed branch: lp:~niedbalski/charms/trusty/quantum-gateway/lp-1396607
Merge into: lp:charms/trusty/quantum-gateway
Diff against target: 3645 lines (+1001/-548)
38 files modified
config.yaml (+19/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+16/-7)
hooks/charmhelpers/contrib/network/ip.py (+52/-50)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+2/-1)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+3/-1)
hooks/charmhelpers/contrib/openstack/context.py (+319/-226)
hooks/charmhelpers/contrib/openstack/ip.py (+41/-27)
hooks/charmhelpers/contrib/openstack/neutron.py (+20/-4)
hooks/charmhelpers/contrib/openstack/templating.py (+5/-5)
hooks/charmhelpers/contrib/openstack/utils.py (+35/-12)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+89/-102)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+4/-4)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+1/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-2)
hooks/charmhelpers/core/fstab.py (+10/-8)
hooks/charmhelpers/core/hookenv.py (+25/-11)
hooks/charmhelpers/core/host.py (+22/-18)
hooks/charmhelpers/core/services/__init__.py (+2/-2)
hooks/charmhelpers/core/services/helpers.py (+9/-5)
hooks/charmhelpers/core/templating.py (+2/-1)
hooks/charmhelpers/fetch/__init__.py (+18/-12)
hooks/charmhelpers/fetch/archiveurl.py (+53/-16)
hooks/charmhelpers/fetch/bzrurl.py (+5/-1)
hooks/charmhelpers/fetch/giturl.py (+48/-0)
hooks/quantum_contexts.py (+34/-7)
hooks/quantum_hooks.py (+18/-0)
hooks/quantum_utils.py (+45/-3)
templates/havana/dhcp_agent.ini (+11/-1)
templates/havana/l3_agent.ini (+8/-1)
templates/icehouse/ml2_conf.ini (+14/-2)
tests/basic_deployment.py (+2/-2)
tests/charmhelpers/contrib/amulet/deployment.py (+3/-3)
tests/charmhelpers/contrib/amulet/utils.py (+6/-4)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+2/-1)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+3/-1)
unit_tests/test_quantum_contexts.py (+14/-6)
unit_tests/test_quantum_hooks.py (+16/-1)
unit_tests/test_quantum_utils.py (+22/-1)
To merge this branch: bzr merge lp:~niedbalski/charms/trusty/quantum-gateway/lp-1396607
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+242925@code.launchpad.net

This proposal has been superseded by a proposal from 2014-11-26.

Description of the change

Fix for LP: #1396607

To post a comment you must log in.

Unmerged revisions

79. By Jorge Niedbalski

[hooks] config_changed checks for "sysctl". fixes LP: #1366598

78. By Jorge Niedbalski

[all] make "sync"

77. By Jorge Niedbalski

[hooks] config_changed checks for "sysctl". fixes LP: #1366598

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'config.yaml'
2--- config.yaml 2014-09-30 14:12:10 +0000
3+++ config.yaml 2014-11-26 13:43:20 +0000
4@@ -8,6 +8,7 @@
5 .
6 ovs - OpenVSwitch
7 nvp|nsx - Nicira NVP/VMware NSX
8+ n1kv - Cisco N1kv
9 ext-port:
10 type: string
11 default:
12@@ -16,6 +17,12 @@
13 traffic to the external public network. Valid values are either MAC
14 addresses (in which case only MAC addresses for interfaces without an IP
15 address already assigned will be used), or interfaces (eth0)
16+ data-port:
17+ type: string
18+ default:
19+ description: |
20+ The data port will be added to br-data and will allow usage of flat or VLAN
21+ network types with Neutron.
22 openstack-origin:
23 type: string
24 default: distro
25@@ -83,6 +90,12 @@
26 within the cloud. This is useful in deployments where its not
27 possible to increase MTU on switches and physical servers to
28 accomodate the packet overhead of using GRE tunnels.
29+ enable-l3-agent:
30+ type: boolean
31+ default: True
32+ description: |
33+ Optional configuration to support use of linux router
34+ Note that this is used only for Cisco n1kv plugin.
35 database-user:
36 default: nova
37 type: string
38@@ -102,3 +115,9 @@
39 .
40 This network will be used for tenant network traffic in overlay
41 networks.
42+ sysctl:
43+ type: string
44+ default:
45+ description: |
46+ YAML formatted associative array of sysctl values, e.g.:
47+ '{ kernel.pid_max : 4194303 }'
48
49=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
50--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-10-07 21:03:47 +0000
51+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-11-26 13:43:20 +0000
52@@ -13,9 +13,10 @@
53
54 import subprocess
55 import os
56-
57 from socket import gethostname as get_unit_hostname
58
59+import six
60+
61 from charmhelpers.core.hookenv import (
62 log,
63 relation_ids,
64@@ -77,7 +78,7 @@
65 "show", resource
66 ]
67 try:
68- status = subprocess.check_output(cmd)
69+ status = subprocess.check_output(cmd).decode('UTF-8')
70 except subprocess.CalledProcessError:
71 return False
72 else:
73@@ -150,34 +151,42 @@
74 return False
75
76
77-def determine_api_port(public_port):
78+def determine_api_port(public_port, singlenode_mode=False):
79 '''
80 Determine correct API server listening port based on
81 existence of HTTPS reverse proxy and/or haproxy.
82
83 public_port: int: standard public port for given service
84
85+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
86+
87 returns: int: the correct listening port for the API service
88 '''
89 i = 0
90- if len(peer_units()) > 0 or is_clustered():
91+ if singlenode_mode:
92+ i += 1
93+ elif len(peer_units()) > 0 or is_clustered():
94 i += 1
95 if https():
96 i += 1
97 return public_port - (i * 10)
98
99
100-def determine_apache_port(public_port):
101+def determine_apache_port(public_port, singlenode_mode=False):
102 '''
103 Description: Determine correct apache listening port based on public IP +
104 state of the cluster.
105
106 public_port: int: standard public port for given service
107
108+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
109+
110 returns: int: the correct listening port for the HAProxy service
111 '''
112 i = 0
113- if len(peer_units()) > 0 or is_clustered():
114+ if singlenode_mode:
115+ i += 1
116+ elif len(peer_units()) > 0 or is_clustered():
117 i += 1
118 return public_port - (i * 10)
119
120@@ -197,7 +206,7 @@
121 for setting in settings:
122 conf[setting] = config_get(setting)
123 missing = []
124- [missing.append(s) for s, v in conf.iteritems() if v is None]
125+ [missing.append(s) for s, v in six.iteritems(conf) if v is None]
126 if missing:
127 log('Insufficient config data to configure hacluster.', level=ERROR)
128 raise HAIncompleteConfig
129
130=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
131--- hooks/charmhelpers/contrib/network/ip.py 2014-10-16 17:42:14 +0000
132+++ hooks/charmhelpers/contrib/network/ip.py 2014-11-26 13:43:20 +0000
133@@ -1,15 +1,12 @@
134 import glob
135 import re
136 import subprocess
137-import sys
138
139 from functools import partial
140
141 from charmhelpers.core.hookenv import unit_get
142 from charmhelpers.fetch import apt_install
143 from charmhelpers.core.hookenv import (
144- WARNING,
145- ERROR,
146 log
147 )
148
149@@ -34,31 +31,28 @@
150 network)
151
152
153+def no_ip_found_error_out(network):
154+ errmsg = ("No IP address found in network: %s" % network)
155+ raise ValueError(errmsg)
156+
157+
158 def get_address_in_network(network, fallback=None, fatal=False):
159- """
160- Get an IPv4 or IPv6 address within the network from the host.
161+ """Get an IPv4 or IPv6 address within the network from the host.
162
163 :param network (str): CIDR presentation format. For example,
164 '192.168.1.0/24'.
165 :param fallback (str): If no address is found, return fallback.
166 :param fatal (boolean): If no address is found, fallback is not
167 set and fatal is True then exit(1).
168-
169 """
170-
171- def not_found_error_out():
172- log("No IP address found in network: %s" % network,
173- level=ERROR)
174- sys.exit(1)
175-
176 if network is None:
177 if fallback is not None:
178 return fallback
179+
180+ if fatal:
181+ no_ip_found_error_out(network)
182 else:
183- if fatal:
184- not_found_error_out()
185- else:
186- return None
187+ return None
188
189 _validate_cidr(network)
190 network = netaddr.IPNetwork(network)
191@@ -70,6 +64,7 @@
192 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
193 if cidr in network:
194 return str(cidr.ip)
195+
196 if network.version == 6 and netifaces.AF_INET6 in addresses:
197 for addr in addresses[netifaces.AF_INET6]:
198 if not addr['addr'].startswith('fe80'):
199@@ -82,20 +77,20 @@
200 return fallback
201
202 if fatal:
203- not_found_error_out()
204+ no_ip_found_error_out(network)
205
206 return None
207
208
209 def is_ipv6(address):
210- '''Determine whether provided address is IPv6 or not'''
211+ """Determine whether provided address is IPv6 or not."""
212 try:
213 address = netaddr.IPAddress(address)
214 except netaddr.AddrFormatError:
215 # probably a hostname - so not an address at all!
216 return False
217- else:
218- return address.version == 6
219+
220+ return address.version == 6
221
222
223 def is_address_in_network(network, address):
224@@ -113,11 +108,13 @@
225 except (netaddr.core.AddrFormatError, ValueError):
226 raise ValueError("Network (%s) is not in CIDR presentation format" %
227 network)
228+
229 try:
230 address = netaddr.IPAddress(address)
231 except (netaddr.core.AddrFormatError, ValueError):
232 raise ValueError("Address (%s) is not in correct presentation format" %
233 address)
234+
235 if address in network:
236 return True
237 else:
238@@ -147,6 +144,7 @@
239 return iface
240 else:
241 return addresses[netifaces.AF_INET][0][key]
242+
243 if address.version == 6 and netifaces.AF_INET6 in addresses:
244 for addr in addresses[netifaces.AF_INET6]:
245 if not addr['addr'].startswith('fe80'):
246@@ -160,41 +158,42 @@
247 return str(cidr).split('/')[1]
248 else:
249 return addr[key]
250+
251 return None
252
253
254 get_iface_for_address = partial(_get_for_address, key='iface')
255
256+
257 get_netmask_for_address = partial(_get_for_address, key='netmask')
258
259
260 def format_ipv6_addr(address):
261- """
262- IPv6 needs to be wrapped with [] in url link to parse correctly.
263+ """If address is IPv6, wrap it in '[]' otherwise return None.
264+
265+ This is required by most configuration files when specifying IPv6
266+ addresses.
267 """
268 if is_ipv6(address):
269- address = "[%s]" % address
270- else:
271- log("Not a valid ipv6 address: %s" % address, level=WARNING)
272- address = None
273+ return "[%s]" % address
274
275- return address
276+ return None
277
278
279 def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
280 fatal=True, exc_list=None):
281- """
282- Return the assigned IP address for a given interface, if any, or [].
283- """
284+ """Return the assigned IP address for a given interface, if any."""
285 # Extract nic if passed /dev/ethX
286 if '/' in iface:
287 iface = iface.split('/')[-1]
288+
289 if not exc_list:
290 exc_list = []
291+
292 try:
293 inet_num = getattr(netifaces, inet_type)
294 except AttributeError:
295- raise Exception('Unknown inet type ' + str(inet_type))
296+ raise Exception("Unknown inet type '%s'" % str(inet_type))
297
298 interfaces = netifaces.interfaces()
299 if inc_aliases:
300@@ -202,15 +201,18 @@
301 for _iface in interfaces:
302 if iface == _iface or _iface.split(':')[0] == iface:
303 ifaces.append(_iface)
304+
305 if fatal and not ifaces:
306 raise Exception("Invalid interface '%s'" % iface)
307+
308 ifaces.sort()
309 else:
310 if iface not in interfaces:
311 if fatal:
312- raise Exception("%s not found " % (iface))
313+ raise Exception("Interface '%s' not found " % (iface))
314 else:
315 return []
316+
317 else:
318 ifaces = [iface]
319
320@@ -221,10 +223,13 @@
321 for entry in net_info[inet_num]:
322 if 'addr' in entry and entry['addr'] not in exc_list:
323 addresses.append(entry['addr'])
324+
325 if fatal and not addresses:
326 raise Exception("Interface '%s' doesn't have any %s addresses." %
327 (iface, inet_type))
328- return addresses
329+
330+ return sorted(addresses)
331+
332
333 get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
334
335@@ -241,6 +246,7 @@
336 raw = re.match(ll_key, _addr)
337 if raw:
338 _addr = raw.group(1)
339+
340 if _addr == addr:
341 log("Address '%s' is configured on iface '%s'" %
342 (addr, iface))
343@@ -251,8 +257,9 @@
344
345
346 def sniff_iface(f):
347- """If no iface provided, inject net iface inferred from unit private
348- address.
349+ """Ensure decorated function is called with a value for iface.
350+
351+ If no iface provided, inject net iface inferred from unit private address.
352 """
353 def iface_sniffer(*args, **kwargs):
354 if not kwargs.get('iface', None):
355@@ -295,7 +302,7 @@
356 if global_addrs:
357 # Make sure any found global addresses are not temporary
358 cmd = ['ip', 'addr', 'show', iface]
359- out = subprocess.check_output(cmd)
360+ out = subprocess.check_output(cmd).decode('UTF-8')
361 if dynamic_only:
362 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
363 else:
364@@ -317,33 +324,28 @@
365 return addrs
366
367 if fatal:
368- raise Exception("Interface '%s' doesn't have a scope global "
369+ raise Exception("Interface '%s' does not have a scope global "
370 "non-temporary ipv6 address." % iface)
371
372 return []
373
374
375 def get_bridges(vnic_dir='/sys/devices/virtual/net'):
376- """
377- Return a list of bridges on the system or []
378- """
379- b_rgex = vnic_dir + '/*/bridge'
380- return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
381+ """Return a list of bridges on the system."""
382+ b_regex = "%s/*/bridge" % vnic_dir
383+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
384
385
386 def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
387- """
388- Return a list of nics comprising a given bridge on the system or []
389- """
390- brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
391- return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
392+ """Return a list of nics comprising a given bridge on the system."""
393+ brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
394+ return [x.split('/')[-1] for x in glob.glob(brif_regex)]
395
396
397 def is_bridge_member(nic):
398- """
399- Check if a given nic is a member of a bridge
400- """
401+ """Check if a given nic is a member of a bridge."""
402 for bridge in get_bridges():
403 if nic in get_bridge_nics(bridge):
404 return True
405+
406 return False
407
408=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
409--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-07 21:03:47 +0000
410+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-11-26 13:43:20 +0000
411@@ -1,3 +1,4 @@
412+import six
413 from charmhelpers.contrib.amulet.deployment import (
414 AmuletDeployment
415 )
416@@ -69,7 +70,7 @@
417
418 def _configure_services(self, configs):
419 """Configure all of the services."""
420- for service, config in configs.iteritems():
421+ for service, config in six.iteritems(configs):
422 self.d.configure(service, config)
423
424 def _get_openstack_release(self):
425
426=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
427--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-25 15:37:05 +0000
428+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-11-26 13:43:20 +0000
429@@ -7,6 +7,8 @@
430 import keystoneclient.v2_0 as keystone_client
431 import novaclient.v1_1.client as nova_client
432
433+import six
434+
435 from charmhelpers.contrib.amulet.utils import (
436 AmuletUtils
437 )
438@@ -60,7 +62,7 @@
439 expected service catalog endpoints.
440 """
441 self.log.debug('actual: {}'.format(repr(actual)))
442- for k, v in expected.iteritems():
443+ for k, v in six.iteritems(expected):
444 if k in actual:
445 ret = self._validate_dict_data(expected[k][0], actual[k][0])
446 if ret:
447
448=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
449--- hooks/charmhelpers/contrib/openstack/context.py 2014-10-07 21:03:47 +0000
450+++ hooks/charmhelpers/contrib/openstack/context.py 2014-11-26 13:43:20 +0000
451@@ -1,20 +1,18 @@
452 import json
453 import os
454 import time
455-
456 from base64 import b64decode
457+from subprocess import check_call
458
459-from subprocess import (
460- check_call
461-)
462+import six
463
464 from charmhelpers.fetch import (
465 apt_install,
466 filter_installed_packages,
467 )
468-
469 from charmhelpers.core.hookenv import (
470 config,
471+ is_relation_made,
472 local_unit,
473 log,
474 relation_get,
475@@ -23,43 +21,40 @@
476 relation_set,
477 unit_get,
478 unit_private_ip,
479+ DEBUG,
480+ INFO,
481+ WARNING,
482 ERROR,
483- INFO
484 )
485-
486 from charmhelpers.core.host import (
487 mkdir,
488- write_file
489+ write_file,
490 )
491-
492 from charmhelpers.contrib.hahelpers.cluster import (
493 determine_apache_port,
494 determine_api_port,
495 https,
496- is_clustered
497+ is_clustered,
498 )
499-
500 from charmhelpers.contrib.hahelpers.apache import (
501 get_cert,
502 get_ca_cert,
503 install_ca_cert,
504 )
505-
506 from charmhelpers.contrib.openstack.neutron import (
507 neutron_plugin_attribute,
508 )
509-
510 from charmhelpers.contrib.network.ip import (
511 get_address_in_network,
512 get_ipv6_addr,
513 get_netmask_for_address,
514 format_ipv6_addr,
515- is_address_in_network
516+ is_address_in_network,
517 )
518-
519 from charmhelpers.contrib.openstack.utils import get_host_ip
520
521 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
522+ADDRESS_TYPES = ['admin', 'internal', 'public']
523
524
525 class OSContextError(Exception):
526@@ -67,7 +62,7 @@
527
528
529 def ensure_packages(packages):
530- '''Install but do not upgrade required plugin packages'''
531+ """Install but do not upgrade required plugin packages."""
532 required = filter_installed_packages(packages)
533 if required:
534 apt_install(required, fatal=True)
535@@ -75,20 +70,27 @@
536
537 def context_complete(ctxt):
538 _missing = []
539- for k, v in ctxt.iteritems():
540+ for k, v in six.iteritems(ctxt):
541 if v is None or v == '':
542 _missing.append(k)
543+
544 if _missing:
545- log('Missing required data: %s' % ' '.join(_missing), level='INFO')
546+ log('Missing required data: %s' % ' '.join(_missing), level=INFO)
547 return False
548+
549 return True
550
551
552 def config_flags_parser(config_flags):
553+ """Parses config flags string into dict.
554+
555+ The provided config_flags string may be a list of comma-separated values
556+ which themselves may be comma-separated list of values.
557+ """
558 if config_flags.find('==') >= 0:
559- log("config_flags is not in expected format (key=value)",
560- level=ERROR)
561+ log("config_flags is not in expected format (key=value)", level=ERROR)
562 raise OSContextError
563+
564 # strip the following from each value.
565 post_strippers = ' ,'
566 # we strip any leading/trailing '=' or ' ' from the string then
567@@ -96,7 +98,7 @@
568 split = config_flags.strip(' =').split('=')
569 limit = len(split)
570 flags = {}
571- for i in xrange(0, limit - 1):
572+ for i in range(0, limit - 1):
573 current = split[i]
574 next = split[i + 1]
575 vindex = next.rfind(',')
576@@ -111,17 +113,18 @@
577 # if this not the first entry, expect an embedded key.
578 index = current.rfind(',')
579 if index < 0:
580- log("invalid config value(s) at index %s" % (i),
581- level=ERROR)
582+ log("Invalid config value(s) at index %s" % (i), level=ERROR)
583 raise OSContextError
584 key = current[index + 1:]
585
586 # Add to collection.
587 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
588+
589 return flags
590
591
592 class OSContextGenerator(object):
593+ """Base class for all context generators."""
594 interfaces = []
595
596 def __call__(self):
597@@ -133,11 +136,11 @@
598
599 def __init__(self,
600 database=None, user=None, relation_prefix=None, ssl_dir=None):
601- '''
602- Allows inspecting relation for settings prefixed with relation_prefix.
603- This is useful for parsing access for multiple databases returned via
604- the shared-db interface (eg, nova_password, quantum_password)
605- '''
606+ """Allows inspecting relation for settings prefixed with
607+ relation_prefix. This is useful for parsing access for multiple
608+ databases returned via the shared-db interface (eg, nova_password,
609+ quantum_password)
610+ """
611 self.relation_prefix = relation_prefix
612 self.database = database
613 self.user = user
614@@ -147,9 +150,8 @@
615 self.database = self.database or config('database')
616 self.user = self.user or config('database-user')
617 if None in [self.database, self.user]:
618- log('Could not generate shared_db context. '
619- 'Missing required charm config options. '
620- '(database name and user)')
621+ log("Could not generate shared_db context. Missing required charm "
622+ "config options. (database name and user)", level=ERROR)
623 raise OSContextError
624
625 ctxt = {}
626@@ -202,23 +204,24 @@
627 def __call__(self):
628 self.database = self.database or config('database')
629 if self.database is None:
630- log('Could not generate postgresql_db context. '
631- 'Missing required charm config options. '
632- '(database name)')
633+ log('Could not generate postgresql_db context. Missing required '
634+ 'charm config options. (database name)', level=ERROR)
635 raise OSContextError
636+
637 ctxt = {}
638-
639 for rid in relation_ids(self.interfaces[0]):
640 for unit in related_units(rid):
641- ctxt = {
642- 'database_host': relation_get('host', rid=rid, unit=unit),
643- 'database': self.database,
644- 'database_user': relation_get('user', rid=rid, unit=unit),
645- 'database_password': relation_get('password', rid=rid, unit=unit),
646- 'database_type': 'postgresql',
647- }
648+ rel_host = relation_get('host', rid=rid, unit=unit)
649+ rel_user = relation_get('user', rid=rid, unit=unit)
650+ rel_passwd = relation_get('password', rid=rid, unit=unit)
651+ ctxt = {'database_host': rel_host,
652+ 'database': self.database,
653+ 'database_user': rel_user,
654+ 'database_password': rel_passwd,
655+ 'database_type': 'postgresql'}
656 if context_complete(ctxt):
657 return ctxt
658+
659 return {}
660
661
662@@ -227,23 +230,29 @@
663 ca_path = os.path.join(ssl_dir, 'db-client.ca')
664 with open(ca_path, 'w') as fh:
665 fh.write(b64decode(rdata['ssl_ca']))
666+
667 ctxt['database_ssl_ca'] = ca_path
668 elif 'ssl_ca' in rdata:
669- log("Charm not setup for ssl support but ssl ca found")
670+ log("Charm not setup for ssl support but ssl ca found", level=INFO)
671 return ctxt
672+
673 if 'ssl_cert' in rdata:
674 cert_path = os.path.join(
675 ssl_dir, 'db-client.cert')
676 if not os.path.exists(cert_path):
677- log("Waiting 1m for ssl client cert validity")
678+ log("Waiting 1m for ssl client cert validity", level=INFO)
679 time.sleep(60)
680+
681 with open(cert_path, 'w') as fh:
682 fh.write(b64decode(rdata['ssl_cert']))
683+
684 ctxt['database_ssl_cert'] = cert_path
685 key_path = os.path.join(ssl_dir, 'db-client.key')
686 with open(key_path, 'w') as fh:
687 fh.write(b64decode(rdata['ssl_key']))
688+
689 ctxt['database_ssl_key'] = key_path
690+
691 return ctxt
692
693
694@@ -251,9 +260,8 @@
695 interfaces = ['identity-service']
696
697 def __call__(self):
698- log('Generating template context for identity-service')
699+ log('Generating template context for identity-service', level=DEBUG)
700 ctxt = {}
701-
702 for rid in relation_ids('identity-service'):
703 for unit in related_units(rid):
704 rdata = relation_get(rid=rid, unit=unit)
705@@ -261,26 +269,24 @@
706 serv_host = format_ipv6_addr(serv_host) or serv_host
707 auth_host = rdata.get('auth_host')
708 auth_host = format_ipv6_addr(auth_host) or auth_host
709-
710- ctxt = {
711- 'service_port': rdata.get('service_port'),
712- 'service_host': serv_host,
713- 'auth_host': auth_host,
714- 'auth_port': rdata.get('auth_port'),
715- 'admin_tenant_name': rdata.get('service_tenant'),
716- 'admin_user': rdata.get('service_username'),
717- 'admin_password': rdata.get('service_password'),
718- 'service_protocol':
719- rdata.get('service_protocol') or 'http',
720- 'auth_protocol':
721- rdata.get('auth_protocol') or 'http',
722- }
723+ svc_protocol = rdata.get('service_protocol') or 'http'
724+ auth_protocol = rdata.get('auth_protocol') or 'http'
725+ ctxt = {'service_port': rdata.get('service_port'),
726+ 'service_host': serv_host,
727+ 'auth_host': auth_host,
728+ 'auth_port': rdata.get('auth_port'),
729+ 'admin_tenant_name': rdata.get('service_tenant'),
730+ 'admin_user': rdata.get('service_username'),
731+ 'admin_password': rdata.get('service_password'),
732+ 'service_protocol': svc_protocol,
733+ 'auth_protocol': auth_protocol}
734 if context_complete(ctxt):
735 # NOTE(jamespage) this is required for >= icehouse
736 # so a missing value just indicates keystone needs
737 # upgrading
738 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
739 return ctxt
740+
741 return {}
742
743
744@@ -293,21 +299,23 @@
745 self.interfaces = [rel_name]
746
747 def __call__(self):
748- log('Generating template context for amqp')
749+ log('Generating template context for amqp', level=DEBUG)
750 conf = config()
751- user_setting = 'rabbit-user'
752- vhost_setting = 'rabbit-vhost'
753 if self.relation_prefix:
754- user_setting = self.relation_prefix + '-rabbit-user'
755- vhost_setting = self.relation_prefix + '-rabbit-vhost'
756+ user_setting = '%s-rabbit-user' % (self.relation_prefix)
757+ vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
758+ else:
759+ user_setting = 'rabbit-user'
760+ vhost_setting = 'rabbit-vhost'
761
762 try:
763 username = conf[user_setting]
764 vhost = conf[vhost_setting]
765 except KeyError as e:
766- log('Could not generate shared_db context. '
767- 'Missing required charm config options: %s.' % e)
768+ log('Could not generate shared_db context. Missing required charm '
769+ 'config options: %s.' % e, level=ERROR)
770 raise OSContextError
771+
772 ctxt = {}
773 for rid in relation_ids(self.rel_name):
774 ha_vip_only = False
775@@ -321,6 +329,7 @@
776 host = relation_get('private-address', rid=rid, unit=unit)
777 host = format_ipv6_addr(host) or host
778 ctxt['rabbitmq_host'] = host
779+
780 ctxt.update({
781 'rabbitmq_user': username,
782 'rabbitmq_password': relation_get('password', rid=rid,
783@@ -331,6 +340,7 @@
784 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
785 if ssl_port:
786 ctxt['rabbit_ssl_port'] = ssl_port
787+
788 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
789 if ssl_ca:
790 ctxt['rabbit_ssl_ca'] = ssl_ca
791@@ -344,41 +354,45 @@
792 if context_complete(ctxt):
793 if 'rabbit_ssl_ca' in ctxt:
794 if not self.ssl_dir:
795- log(("Charm not setup for ssl support "
796- "but ssl ca found"))
797+ log("Charm not setup for ssl support but ssl ca "
798+ "found", level=INFO)
799 break
800+
801 ca_path = os.path.join(
802 self.ssl_dir, 'rabbit-client-ca.pem')
803 with open(ca_path, 'w') as fh:
804 fh.write(b64decode(ctxt['rabbit_ssl_ca']))
805 ctxt['rabbit_ssl_ca'] = ca_path
806+
807 # Sufficient information found = break out!
808 break
809+
810 # Used for active/active rabbitmq >= grizzly
811- if ('clustered' not in ctxt or ha_vip_only) \
812- and len(related_units(rid)) > 1:
813+ if (('clustered' not in ctxt or ha_vip_only) and
814+ len(related_units(rid)) > 1):
815 rabbitmq_hosts = []
816 for unit in related_units(rid):
817 host = relation_get('private-address', rid=rid, unit=unit)
818 host = format_ipv6_addr(host) or host
819 rabbitmq_hosts.append(host)
820- ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
821+
822+ ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
823+
824 if not context_complete(ctxt):
825 return {}
826- else:
827- return ctxt
828+
829+ return ctxt
830
831
832 class CephContext(OSContextGenerator):
833+ """Generates context for /etc/ceph/ceph.conf templates."""
834 interfaces = ['ceph']
835
836 def __call__(self):
837- '''This generates context for /etc/ceph/ceph.conf templates'''
838 if not relation_ids('ceph'):
839 return {}
840
841- log('Generating template context for ceph')
842-
843+ log('Generating template context for ceph', level=DEBUG)
844 mon_hosts = []
845 auth = None
846 key = None
847@@ -387,18 +401,18 @@
848 for unit in related_units(rid):
849 auth = relation_get('auth', rid=rid, unit=unit)
850 key = relation_get('key', rid=rid, unit=unit)
851- ceph_addr = \
852- relation_get('ceph-public-address', rid=rid, unit=unit) or \
853- relation_get('private-address', rid=rid, unit=unit)
854+ ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
855+ unit=unit)
856+ unit_priv_addr = relation_get('private-address', rid=rid,
857+ unit=unit)
858+ ceph_addr = ceph_pub_addr or unit_priv_addr
859 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
860 mon_hosts.append(ceph_addr)
861
862- ctxt = {
863- 'mon_hosts': ' '.join(mon_hosts),
864- 'auth': auth,
865- 'key': key,
866- 'use_syslog': use_syslog
867- }
868+ ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
869+ 'auth': auth,
870+ 'key': key,
871+ 'use_syslog': use_syslog}
872
873 if not os.path.isdir('/etc/ceph'):
874 os.mkdir('/etc/ceph')
875@@ -407,79 +421,68 @@
876 return {}
877
878 ensure_packages(['ceph-common'])
879-
880 return ctxt
881
882
883-ADDRESS_TYPES = ['admin', 'internal', 'public']
884-
885-
886 class HAProxyContext(OSContextGenerator):
887+ """Provides half a context for the haproxy template, which describes
888+ all peers to be included in the cluster. Each charm needs to include
889+ its own context generator that describes the port mapping.
890+ """
891 interfaces = ['cluster']
892
893+ def __init__(self, singlenode_mode=False):
894+ self.singlenode_mode = singlenode_mode
895+
896 def __call__(self):
897- '''
898- Builds half a context for the haproxy template, which describes
899- all peers to be included in the cluster. Each charm needs to include
900- its own context generator that describes the port mapping.
901- '''
902- if not relation_ids('cluster'):
903+ if not relation_ids('cluster') and not self.singlenode_mode:
904 return {}
905
906- l_unit = local_unit().replace('/', '-')
907-
908 if config('prefer-ipv6'):
909 addr = get_ipv6_addr(exc_list=[config('vip')])[0]
910 else:
911 addr = get_host_ip(unit_get('private-address'))
912
913+ l_unit = local_unit().replace('/', '-')
914 cluster_hosts = {}
915
916 # NOTE(jamespage): build out map of configured network endpoints
917 # and associated backends
918 for addr_type in ADDRESS_TYPES:
919- laddr = get_address_in_network(
920- config('os-{}-network'.format(addr_type)))
921+ cfg_opt = 'os-{}-network'.format(addr_type)
922+ laddr = get_address_in_network(config(cfg_opt))
923 if laddr:
924- cluster_hosts[laddr] = {}
925- cluster_hosts[laddr]['network'] = "{}/{}".format(
926- laddr,
927- get_netmask_for_address(laddr)
928- )
929- cluster_hosts[laddr]['backends'] = {}
930- cluster_hosts[laddr]['backends'][l_unit] = laddr
931+ netmask = get_netmask_for_address(laddr)
932+ cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
933+ netmask),
934+ 'backends': {l_unit: laddr}}
935 for rid in relation_ids('cluster'):
936 for unit in related_units(rid):
937- _unit = unit.replace('/', '-')
938 _laddr = relation_get('{}-address'.format(addr_type),
939 rid=rid, unit=unit)
940 if _laddr:
941+ _unit = unit.replace('/', '-')
942 cluster_hosts[laddr]['backends'][_unit] = _laddr
943
944 # NOTE(jamespage) no split configurations found, just use
945 # private addresses
946 if not cluster_hosts:
947- cluster_hosts[addr] = {}
948- cluster_hosts[addr]['network'] = "{}/{}".format(
949- addr,
950- get_netmask_for_address(addr)
951- )
952- cluster_hosts[addr]['backends'] = {}
953- cluster_hosts[addr]['backends'][l_unit] = addr
954+ netmask = get_netmask_for_address(addr)
955+ cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
956+ 'backends': {l_unit: addr}}
957 for rid in relation_ids('cluster'):
958 for unit in related_units(rid):
959- _unit = unit.replace('/', '-')
960 _laddr = relation_get('private-address',
961 rid=rid, unit=unit)
962 if _laddr:
963+ _unit = unit.replace('/', '-')
964 cluster_hosts[addr]['backends'][_unit] = _laddr
965
966- ctxt = {
967- 'frontends': cluster_hosts,
968- }
969+ ctxt = {'frontends': cluster_hosts}
970
971 if config('haproxy-server-timeout'):
972 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
973+
974 if config('haproxy-client-timeout'):
975 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
976
977@@ -493,13 +496,18 @@
978 ctxt['stat_port'] = ':8888'
979
980 for frontend in cluster_hosts:
981- if len(cluster_hosts[frontend]['backends']) > 1:
982+ if (len(cluster_hosts[frontend]['backends']) > 1 or
983+ self.singlenode_mode):
984 # Enable haproxy when we have enough peers.
985- log('Ensuring haproxy enabled in /etc/default/haproxy.')
986+ log('Ensuring haproxy enabled in /etc/default/haproxy.',
987+ level=DEBUG)
988 with open('/etc/default/haproxy', 'w') as out:
989 out.write('ENABLED=1\n')
990+
991 return ctxt
992- log('HAProxy context is incomplete, this unit has no peers.')
993+
994+ log('HAProxy context is incomplete, this unit has no peers.',
995+ level=INFO)
996 return {}
997
998
999@@ -507,29 +515,28 @@
1000 interfaces = ['image-service']
1001
1002 def __call__(self):
1003- '''
1004- Obtains the glance API server from the image-service relation. Useful
1005- in nova and cinder (currently).
1006- '''
1007- log('Generating template context for image-service.')
1008+ """Obtains the glance API server from the image-service relation.
1009+ Useful in nova and cinder (currently).
1010+ """
1011+ log('Generating template context for image-service.', level=DEBUG)
1012 rids = relation_ids('image-service')
1013 if not rids:
1014 return {}
1015+
1016 for rid in rids:
1017 for unit in related_units(rid):
1018 api_server = relation_get('glance-api-server',
1019 rid=rid, unit=unit)
1020 if api_server:
1021 return {'glance_api_servers': api_server}
1022- log('ImageService context is incomplete. '
1023- 'Missing required relation data.')
1024+
1025+ log("ImageService context is incomplete. Missing required relation "
1026+ "data.", level=INFO)
1027 return {}
1028
1029
1030 class ApacheSSLContext(OSContextGenerator):
1031-
1032- """
1033- Generates a context for an apache vhost configuration that configures
1034+ """Generates a context for an apache vhost configuration that configures
1035 HTTPS reverse proxying for one or many endpoints. Generated context
1036 looks something like::
1037
1038@@ -563,6 +570,7 @@
1039 else:
1040 cert_filename = 'cert'
1041 key_filename = 'key'
1042+
1043 write_file(path=os.path.join(ssl_dir, cert_filename),
1044 content=b64decode(cert))
1045 write_file(path=os.path.join(ssl_dir, key_filename),
1046@@ -574,7 +582,8 @@
1047 install_ca_cert(b64decode(ca_cert))
1048
1049 def canonical_names(self):
1050- '''Figure out which canonical names clients will access this service'''
1051+ """Figure out which canonical names clients will access this service.
1052+ """
1053 cns = []
1054 for r_id in relation_ids('identity-service'):
1055 for unit in related_units(r_id):
1056@@ -582,55 +591,80 @@
1057 for k in rdata:
1058 if k.startswith('ssl_key_'):
1059 cns.append(k.lstrip('ssl_key_'))
1060- return list(set(cns))
1061+
1062+ return sorted(list(set(cns)))
1063+
1064+ def get_network_addresses(self):
1065+ """For each network configured, return corresponding address and vip
1066+ (if available).
1067+
1068+ Returns a list of tuples of the form:
1069+
1070+ [(address_in_net_a, vip_in_net_a),
1071+ (address_in_net_b, vip_in_net_b),
1072+ ...]
1073+
1074+ or, if no vip(s) available:
1075+
1076+ [(address_in_net_a, address_in_net_a),
1077+ (address_in_net_b, address_in_net_b),
1078+ ...]
1079+ """
1080+ addresses = []
1081+ if config('vip'):
1082+ vips = config('vip').split()
1083+ else:
1084+ vips = []
1085+
1086+ for net_type in ['os-internal-network', 'os-admin-network',
1087+ 'os-public-network']:
1088+ addr = get_address_in_network(config(net_type),
1089+ unit_get('private-address'))
1090+ if len(vips) > 1 and is_clustered():
1091+ if not config(net_type):
1092+ log("Multiple networks configured but net_type "
1093+ "is None (%s)." % net_type, level=WARNING)
1094+ continue
1095+
1096+ for vip in vips:
1097+ if is_address_in_network(config(net_type), vip):
1098+ addresses.append((addr, vip))
1099+ break
1100+
1101+ elif is_clustered() and config('vip'):
1102+ addresses.append((addr, config('vip')))
1103+ else:
1104+ addresses.append((addr, addr))
1105+
1106+ return sorted(addresses)
1107
1108 def __call__(self):
1109- if isinstance(self.external_ports, basestring):
1110+ if isinstance(self.external_ports, six.string_types):
1111 self.external_ports = [self.external_ports]
1112- if (not self.external_ports or not https()):
1113+
1114+ if not self.external_ports or not https():
1115 return {}
1116
1117 self.configure_ca()
1118 self.enable_modules()
1119
1120- ctxt = {
1121- 'namespace': self.service_namespace,
1122- 'endpoints': [],
1123- 'ext_ports': []
1124- }
1125+ ctxt = {'namespace': self.service_namespace,
1126+ 'endpoints': [],
1127+ 'ext_ports': []}
1128
1129 for cn in self.canonical_names():
1130 self.configure_cert(cn)
1131
1132- addresses = []
1133- vips = []
1134- if config('vip'):
1135- vips = config('vip').split()
1136-
1137- for network_type in ['os-internal-network',
1138- 'os-admin-network',
1139- 'os-public-network']:
1140- address = get_address_in_network(config(network_type),
1141- unit_get('private-address'))
1142- if len(vips) > 0 and is_clustered():
1143- for vip in vips:
1144- if is_address_in_network(config(network_type),
1145- vip):
1146- addresses.append((address, vip))
1147- break
1148- elif is_clustered():
1149- addresses.append((address, config('vip')))
1150- else:
1151- addresses.append((address, address))
1152-
1153- for address, endpoint in set(addresses):
1154+ addresses = self.get_network_addresses()
1155+ for address, endpoint in sorted(set(addresses)):
1156 for api_port in self.external_ports:
1157 ext_port = determine_apache_port(api_port)
1158 int_port = determine_api_port(api_port)
1159 portmap = (address, endpoint, int(ext_port), int(int_port))
1160 ctxt['endpoints'].append(portmap)
1161 ctxt['ext_ports'].append(int(ext_port))
1162- ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
1163+
1164+ ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
1165 return ctxt
1166
1167
1168@@ -647,21 +681,23 @@
1169
1170 @property
1171 def packages(self):
1172- return neutron_plugin_attribute(
1173- self.plugin, 'packages', self.network_manager)
1174+ return neutron_plugin_attribute(self.plugin, 'packages',
1175+ self.network_manager)
1176
1177 @property
1178 def neutron_security_groups(self):
1179 return None
1180
1181 def _ensure_packages(self):
1182- [ensure_packages(pkgs) for pkgs in self.packages]
1183+ for pkgs in self.packages:
1184+ ensure_packages(pkgs)
1185
1186 def _save_flag_file(self):
1187 if self.network_manager == 'quantum':
1188 _file = '/etc/nova/quantum_plugin.conf'
1189 else:
1190 _file = '/etc/nova/neutron_plugin.conf'
1191+
1192 with open(_file, 'wb') as out:
1193 out.write(self.plugin + '\n')
1194
1195@@ -670,13 +706,11 @@
1196 self.network_manager)
1197 config = neutron_plugin_attribute(self.plugin, 'config',
1198 self.network_manager)
1199- ovs_ctxt = {
1200- 'core_plugin': driver,
1201- 'neutron_plugin': 'ovs',
1202- 'neutron_security_groups': self.neutron_security_groups,
1203- 'local_ip': unit_private_ip(),
1204- 'config': config
1205- }
1206+ ovs_ctxt = {'core_plugin': driver,
1207+ 'neutron_plugin': 'ovs',
1208+ 'neutron_security_groups': self.neutron_security_groups,
1209+ 'local_ip': unit_private_ip(),
1210+ 'config': config}
1211
1212 return ovs_ctxt
1213
1214@@ -685,13 +719,11 @@
1215 self.network_manager)
1216 config = neutron_plugin_attribute(self.plugin, 'config',
1217 self.network_manager)
1218- nvp_ctxt = {
1219- 'core_plugin': driver,
1220- 'neutron_plugin': 'nvp',
1221- 'neutron_security_groups': self.neutron_security_groups,
1222- 'local_ip': unit_private_ip(),
1223- 'config': config
1224- }
1225+ nvp_ctxt = {'core_plugin': driver,
1226+ 'neutron_plugin': 'nvp',
1227+ 'neutron_security_groups': self.neutron_security_groups,
1228+ 'local_ip': unit_private_ip(),
1229+ 'config': config}
1230
1231 return nvp_ctxt
1232
1233@@ -700,35 +732,50 @@
1234 self.network_manager)
1235 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
1236 self.network_manager)
1237- n1kv_ctxt = {
1238- 'core_plugin': driver,
1239- 'neutron_plugin': 'n1kv',
1240- 'neutron_security_groups': self.neutron_security_groups,
1241- 'local_ip': unit_private_ip(),
1242- 'config': n1kv_config,
1243- 'vsm_ip': config('n1kv-vsm-ip'),
1244- 'vsm_username': config('n1kv-vsm-username'),
1245- 'vsm_password': config('n1kv-vsm-password'),
1246- 'restrict_policy_profiles': config(
1247- 'n1kv_restrict_policy_profiles'),
1248- }
1249+ n1kv_user_config_flags = config('n1kv-config-flags')
1250+ restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
1251+ n1kv_ctxt = {'core_plugin': driver,
1252+ 'neutron_plugin': 'n1kv',
1253+ 'neutron_security_groups': self.neutron_security_groups,
1254+ 'local_ip': unit_private_ip(),
1255+ 'config': n1kv_config,
1256+ 'vsm_ip': config('n1kv-vsm-ip'),
1257+ 'vsm_username': config('n1kv-vsm-username'),
1258+ 'vsm_password': config('n1kv-vsm-password'),
1259+ 'restrict_policy_profiles': restrict_policy_profiles}
1260+
1261+ if n1kv_user_config_flags:
1262+ flags = config_flags_parser(n1kv_user_config_flags)
1263+ n1kv_ctxt['user_config_flags'] = flags
1264
1265 return n1kv_ctxt
1266
1267+ def calico_ctxt(self):
1268+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1269+ self.network_manager)
1270+ config = neutron_plugin_attribute(self.plugin, 'config',
1271+ self.network_manager)
1272+ calico_ctxt = {'core_plugin': driver,
1273+ 'neutron_plugin': 'Calico',
1274+ 'neutron_security_groups': self.neutron_security_groups,
1275+ 'local_ip': unit_private_ip(),
1276+ 'config': config}
1277+
1278+ return calico_ctxt
1279+
1280 def neutron_ctxt(self):
1281 if https():
1282 proto = 'https'
1283 else:
1284 proto = 'http'
1285+
1286 if is_clustered():
1287 host = config('vip')
1288 else:
1289 host = unit_get('private-address')
1290- url = '%s://%s:%s' % (proto, host, '9696')
1291- ctxt = {
1292- 'network_manager': self.network_manager,
1293- 'neutron_url': url,
1294- }
1295+
1296+ ctxt = {'network_manager': self.network_manager,
1297+ 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
1298 return ctxt
1299
1300 def __call__(self):
1301@@ -748,6 +795,8 @@
1302 ctxt.update(self.nvp_ctxt())
1303 elif self.plugin == 'n1kv':
1304 ctxt.update(self.n1kv_ctxt())
1305+ elif self.plugin == 'Calico':
1306+ ctxt.update(self.calico_ctxt())
1307
1308 alchemy_flags = config('neutron-alchemy-flags')
1309 if alchemy_flags:
1310@@ -759,23 +808,40 @@
1311
1312
1313 class OSConfigFlagContext(OSContextGenerator):
1314-
1315- """
1316- Responsible for adding user-defined config-flags in charm config to a
1317- template context.
1318+ """Provides support for user-defined config flags.
1319+
1320+ Users can define a comma-seperated list of key=value pairs
1321+ in the charm configuration and apply them at any point in
1322+ any file by using a template flag.
1323+
1324+ Sometimes users might want config flags inserted within a
1325+ specific section so this class allows users to specify the
1326+ template flag name, allowing for multiple template flags
1327+ (sections) within the same context.
1328
1329 NOTE: the value of config-flags may be a comma-separated list of
1330 key=value pairs and some Openstack config files support
1331 comma-separated lists as values.
1332 """
1333
1334+ def __init__(self, charm_flag='config-flags',
1335+ template_flag='user_config_flags'):
1336+ """
1337+ :param charm_flag: config flags in charm configuration.
1338+ :param template_flag: insert point for user-defined flags in template
1339+ file.
1340+ """
1341+ super(OSConfigFlagContext, self).__init__()
1342+ self._charm_flag = charm_flag
1343+ self._template_flag = template_flag
1344+
1345 def __call__(self):
1346- config_flags = config('config-flags')
1347+ config_flags = config(self._charm_flag)
1348 if not config_flags:
1349 return {}
1350
1351- flags = config_flags_parser(config_flags)
1352- return {'user_config_flags': flags}
1353+ return {self._template_flag:
1354+ config_flags_parser(config_flags)}
1355
1356
1357 class SubordinateConfigContext(OSContextGenerator):
1358@@ -819,7 +885,6 @@
1359 },
1360 }
1361 }
1362-
1363 """
1364
1365 def __init__(self, service, config_file, interface):
1366@@ -849,26 +914,28 @@
1367
1368 if self.service not in sub_config:
1369 log('Found subordinate_config on %s but it contained'
1370- 'nothing for %s service' % (rid, self.service))
1371+ 'nothing for %s service' % (rid, self.service),
1372+ level=INFO)
1373 continue
1374
1375 sub_config = sub_config[self.service]
1376 if self.config_file not in sub_config:
1377 log('Found subordinate_config on %s but it contained'
1378- 'nothing for %s' % (rid, self.config_file))
1379+ 'nothing for %s' % (rid, self.config_file),
1380+ level=INFO)
1381 continue
1382
1383 sub_config = sub_config[self.config_file]
1384- for k, v in sub_config.iteritems():
1385+ for k, v in six.iteritems(sub_config):
1386 if k == 'sections':
1387- for section, config_dict in v.iteritems():
1388- log("adding section '%s'" % (section))
1389+ for section, config_dict in six.iteritems(v):
1390+ log("adding section '%s'" % (section),
1391+ level=DEBUG)
1392 ctxt[k][section] = config_dict
1393 else:
1394 ctxt[k] = v
1395
1396- log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
1397-
1398+ log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1399 return ctxt
1400
1401
1402@@ -880,15 +947,14 @@
1403 False if config('debug') is None else config('debug')
1404 ctxt['verbose'] = \
1405 False if config('verbose') is None else config('verbose')
1406+
1407 return ctxt
1408
1409
1410 class SyslogContext(OSContextGenerator):
1411
1412 def __call__(self):
1413- ctxt = {
1414- 'use_syslog': config('use-syslog')
1415- }
1416+ ctxt = {'use_syslog': config('use-syslog')}
1417 return ctxt
1418
1419
1420@@ -896,13 +962,9 @@
1421
1422 def __call__(self):
1423 if config('prefer-ipv6'):
1424- return {
1425- 'bind_host': '::'
1426- }
1427+ return {'bind_host': '::'}
1428 else:
1429- return {
1430- 'bind_host': '0.0.0.0'
1431- }
1432+ return {'bind_host': '0.0.0.0'}
1433
1434
1435 class WorkerConfigContext(OSContextGenerator):
1436@@ -914,11 +976,42 @@
1437 except ImportError:
1438 apt_install('python-psutil', fatal=True)
1439 from psutil import NUM_CPUS
1440+
1441 return NUM_CPUS
1442
1443 def __call__(self):
1444- multiplier = config('worker-multiplier') or 1
1445- ctxt = {
1446- "workers": self.num_cpus * multiplier
1447- }
1448+ multiplier = config('worker-multiplier') or 0
1449+ ctxt = {"workers": self.num_cpus * multiplier}
1450+ return ctxt
1451+
1452+
1453+class ZeroMQContext(OSContextGenerator):
1454+ interfaces = ['zeromq-configuration']
1455+
1456+ def __call__(self):
1457+ ctxt = {}
1458+ if is_relation_made('zeromq-configuration', 'host'):
1459+ for rid in relation_ids('zeromq-configuration'):
1460+ for unit in related_units(rid):
1461+ ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
1462+ ctxt['zmq_host'] = relation_get('host', unit, rid)
1463+
1464+ return ctxt
1465+
1466+
1467+class NotificationDriverContext(OSContextGenerator):
1468+
1469+ def __init__(self, zmq_relation='zeromq-configuration',
1470+ amqp_relation='amqp'):
1471+ """
1472+ :param zmq_relation: Name of Zeromq relation to check
1473+ """
1474+ self.zmq_relation = zmq_relation
1475+ self.amqp_relation = amqp_relation
1476+
1477+ def __call__(self):
1478+ ctxt = {'notifications': 'False'}
1479+ if is_relation_made(self.amqp_relation):
1480+ ctxt['notifications'] = "True"
1481+
1482 return ctxt
1483
1484=== modified file 'hooks/charmhelpers/contrib/openstack/ip.py'
1485--- hooks/charmhelpers/contrib/openstack/ip.py 2014-10-07 21:03:47 +0000
1486+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-11-26 13:43:20 +0000
1487@@ -2,21 +2,19 @@
1488 config,
1489 unit_get,
1490 )
1491-
1492 from charmhelpers.contrib.network.ip import (
1493 get_address_in_network,
1494 is_address_in_network,
1495 is_ipv6,
1496 get_ipv6_addr,
1497 )
1498-
1499 from charmhelpers.contrib.hahelpers.cluster import is_clustered
1500
1501 PUBLIC = 'public'
1502 INTERNAL = 'int'
1503 ADMIN = 'admin'
1504
1505-_address_map = {
1506+ADDRESS_MAP = {
1507 PUBLIC: {
1508 'config': 'os-public-network',
1509 'fallback': 'public-address'
1510@@ -33,16 +31,14 @@
1511
1512
1513 def canonical_url(configs, endpoint_type=PUBLIC):
1514- '''
1515- Returns the correct HTTP URL to this host given the state of HTTPS
1516+ """Returns the correct HTTP URL to this host given the state of HTTPS
1517 configuration, hacluster and charm configuration.
1518
1519- :configs OSTemplateRenderer: A config tempating object to inspect for
1520- a complete https context.
1521- :endpoint_type str: The endpoint type to resolve.
1522-
1523- :returns str: Base URL for services on the current service unit.
1524- '''
1525+ :param configs: OSTemplateRenderer config templating object to inspect
1526+ for a complete https context.
1527+ :param endpoint_type: str endpoint type to resolve.
1528+ :param returns: str base URL for services on the current service unit.
1529+ """
1530 scheme = 'http'
1531 if 'https' in configs.complete_contexts():
1532 scheme = 'https'
1533@@ -53,27 +49,45 @@
1534
1535
1536 def resolve_address(endpoint_type=PUBLIC):
1537+ """Return unit address depending on net config.
1538+
1539+ If unit is clustered with vip(s) and has net splits defined, return vip on
1540+ correct network. If clustered with no nets defined, return primary vip.
1541+
1542+ If not clustered, return unit address ensuring address is on configured net
1543+ split if one is configured.
1544+
1545+ :param endpoint_type: Network endpoing type
1546+ """
1547 resolved_address = None
1548- if is_clustered():
1549- if config(_address_map[endpoint_type]['config']) is None:
1550- # Assume vip is simple and pass back directly
1551- resolved_address = config('vip')
1552+ vips = config('vip')
1553+ if vips:
1554+ vips = vips.split()
1555+
1556+ net_type = ADDRESS_MAP[endpoint_type]['config']
1557+ net_addr = config(net_type)
1558+ net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
1559+ clustered = is_clustered()
1560+ if clustered:
1561+ if not net_addr:
1562+ # If no net-splits defined, we expect a single vip
1563+ resolved_address = vips[0]
1564 else:
1565- for vip in config('vip').split():
1566- if is_address_in_network(
1567- config(_address_map[endpoint_type]['config']),
1568- vip):
1569+ for vip in vips:
1570+ if is_address_in_network(net_addr, vip):
1571 resolved_address = vip
1572+ break
1573 else:
1574 if config('prefer-ipv6'):
1575- fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1576+ fallback_addr = get_ipv6_addr(exc_list=vips)[0]
1577 else:
1578- fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
1579- resolved_address = get_address_in_network(
1580- config(_address_map[endpoint_type]['config']), fallback_addr)
1581+ fallback_addr = unit_get(net_fallback)
1582+
1583+ resolved_address = get_address_in_network(net_addr, fallback_addr)
1584
1585 if resolved_address is None:
1586- raise ValueError('Unable to resolve a suitable IP address'
1587- ' based on charm state and configuration')
1588- else:
1589- return resolved_address
1590+ raise ValueError("Unable to resolve a suitable IP address based on "
1591+ "charm state and configuration. (net_type=%s, "
1592+ "clustered=%s)" % (net_type, clustered))
1593+
1594+ return resolved_address
1595
1596=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
1597--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-06-24 13:40:39 +0000
1598+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-11-26 13:43:20 +0000
1599@@ -14,7 +14,7 @@
1600 def headers_package():
1601 """Ensures correct linux-headers for running kernel are installed,
1602 for building DKMS package"""
1603- kver = check_output(['uname', '-r']).strip()
1604+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
1605 return 'linux-headers-%s' % kver
1606
1607 QUANTUM_CONF_DIR = '/etc/quantum'
1608@@ -22,7 +22,7 @@
1609
1610 def kernel_version():
1611 """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
1612- kver = check_output(['uname', '-r']).strip()
1613+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
1614 kver = kver.split('.')
1615 return (int(kver[0]), int(kver[1]))
1616
1617@@ -138,10 +138,25 @@
1618 relation_prefix='neutron',
1619 ssl_dir=NEUTRON_CONF_DIR)],
1620 'services': [],
1621- 'packages': [['neutron-plugin-cisco']],
1622+ 'packages': [[headers_package()] + determine_dkms_package(),
1623+ ['neutron-plugin-cisco']],
1624 'server_packages': ['neutron-server',
1625 'neutron-plugin-cisco'],
1626 'server_services': ['neutron-server']
1627+ },
1628+ 'Calico': {
1629+ 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
1630+ 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
1631+ 'contexts': [
1632+ context.SharedDBContext(user=config('neutron-database-user'),
1633+ database=config('neutron-database'),
1634+ relation_prefix='neutron',
1635+ ssl_dir=NEUTRON_CONF_DIR)],
1636+ 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'],
1637+ 'packages': [[headers_package()] + determine_dkms_package(),
1638+ ['calico-compute', 'bird', 'neutron-dhcp-agent']],
1639+ 'server_packages': ['neutron-server', 'calico-control'],
1640+ 'server_services': ['neutron-server']
1641 }
1642 }
1643 if release >= 'icehouse':
1644@@ -162,7 +177,8 @@
1645 elif manager == 'neutron':
1646 plugins = neutron_plugins()
1647 else:
1648- log('Error: Network manager does not support plugins.')
1649+ log("Network manager '%s' does not support plugins." % (manager),
1650+ level=ERROR)
1651 raise Exception
1652
1653 try:
1654
1655=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
1656--- hooks/charmhelpers/contrib/openstack/templating.py 2014-07-29 07:46:01 +0000
1657+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-11-26 13:43:20 +0000
1658@@ -1,13 +1,13 @@
1659 import os
1660
1661+import six
1662+
1663 from charmhelpers.fetch import apt_install
1664-
1665 from charmhelpers.core.hookenv import (
1666 log,
1667 ERROR,
1668 INFO
1669 )
1670-
1671 from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
1672
1673 try:
1674@@ -43,7 +43,7 @@
1675 order by OpenStack release.
1676 """
1677 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
1678- for rel in OPENSTACK_CODENAMES.itervalues()]
1679+ for rel in six.itervalues(OPENSTACK_CODENAMES)]
1680
1681 if not os.path.isdir(templates_dir):
1682 log('Templates directory not found @ %s.' % templates_dir,
1683@@ -258,7 +258,7 @@
1684 """
1685 Write out all registered config files.
1686 """
1687- [self.write(k) for k in self.templates.iterkeys()]
1688+ [self.write(k) for k in six.iterkeys(self.templates)]
1689
1690 def set_release(self, openstack_release):
1691 """
1692@@ -275,5 +275,5 @@
1693 '''
1694 interfaces = []
1695 [interfaces.extend(i.complete_contexts())
1696- for i in self.templates.itervalues()]
1697+ for i in six.itervalues(self.templates)]
1698 return interfaces
1699
1700=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
1701--- hooks/charmhelpers/contrib/openstack/utils.py 2014-10-07 21:03:47 +0000
1702+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-11-26 13:43:20 +0000
1703@@ -2,6 +2,7 @@
1704
1705 # Common python helper functions used for OpenStack charms.
1706 from collections import OrderedDict
1707+from functools import wraps
1708
1709 import subprocess
1710 import json
1711@@ -9,11 +10,12 @@
1712 import socket
1713 import sys
1714
1715+import six
1716+
1717 from charmhelpers.core.hookenv import (
1718 config,
1719 log as juju_log,
1720 charm_dir,
1721- ERROR,
1722 INFO,
1723 relation_ids,
1724 relation_set
1725@@ -112,7 +114,7 @@
1726
1727 # Best guess match based on deb string provided
1728 if src.startswith('deb') or src.startswith('ppa'):
1729- for k, v in OPENSTACK_CODENAMES.iteritems():
1730+ for k, v in six.iteritems(OPENSTACK_CODENAMES):
1731 if v in src:
1732 return v
1733
1734@@ -133,7 +135,7 @@
1735
1736 def get_os_version_codename(codename):
1737 '''Determine OpenStack version number from codename.'''
1738- for k, v in OPENSTACK_CODENAMES.iteritems():
1739+ for k, v in six.iteritems(OPENSTACK_CODENAMES):
1740 if v == codename:
1741 return k
1742 e = 'Could not derive OpenStack version for '\
1743@@ -193,7 +195,7 @@
1744 else:
1745 vers_map = OPENSTACK_CODENAMES
1746
1747- for version, cname in vers_map.iteritems():
1748+ for version, cname in six.iteritems(vers_map):
1749 if cname == codename:
1750 return version
1751 # e = "Could not determine OpenStack version for package: %s" % pkg
1752@@ -317,7 +319,7 @@
1753 rc_script.write(
1754 "#!/bin/bash\n")
1755 [rc_script.write('export %s=%s\n' % (u, p))
1756- for u, p in env_vars.iteritems() if u != "script_path"]
1757+ for u, p in six.iteritems(env_vars) if u != "script_path"]
1758
1759
1760 def openstack_upgrade_available(package):
1761@@ -350,8 +352,8 @@
1762 '''
1763 _none = ['None', 'none', None]
1764 if (block_device in _none):
1765- error_out('prepare_storage(): Missing required input: '
1766- 'block_device=%s.' % block_device, level=ERROR)
1767+ error_out('prepare_storage(): Missing required input: block_device=%s.'
1768+ % block_device)
1769
1770 if block_device.startswith('/dev/'):
1771 bdev = block_device
1772@@ -367,8 +369,7 @@
1773 bdev = '/dev/%s' % block_device
1774
1775 if not is_block_device(bdev):
1776- error_out('Failed to locate valid block device at %s' % bdev,
1777- level=ERROR)
1778+ error_out('Failed to locate valid block device at %s' % bdev)
1779
1780 return bdev
1781
1782@@ -417,7 +418,7 @@
1783
1784 if isinstance(address, dns.name.Name):
1785 rtype = 'PTR'
1786- elif isinstance(address, basestring):
1787+ elif isinstance(address, six.string_types):
1788 rtype = 'A'
1789 else:
1790 return None
1791@@ -468,6 +469,14 @@
1792 return result.split('.')[0]
1793
1794
1795+def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
1796+ mm_map = {}
1797+ if os.path.isfile(mm_file):
1798+ with open(mm_file, 'r') as f:
1799+ mm_map = json.load(f)
1800+ return mm_map
1801+
1802+
1803 def sync_db_with_multi_ipv6_addresses(database, database_user,
1804 relation_prefix=None):
1805 hosts = get_ipv6_addr(dynamic_only=False)
1806@@ -477,10 +486,24 @@
1807 'hostname': json.dumps(hosts)}
1808
1809 if relation_prefix:
1810- keys = kwargs.keys()
1811- for key in keys:
1812+ for key in list(kwargs.keys()):
1813 kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
1814 del kwargs[key]
1815
1816 for rid in relation_ids('shared-db'):
1817 relation_set(relation_id=rid, **kwargs)
1818+
1819+
1820+def os_requires_version(ostack_release, pkg):
1821+ """
1822+ Decorator for hook to specify minimum supported release
1823+ """
1824+ def wrap(f):
1825+ @wraps(f)
1826+ def wrapped_f(*args):
1827+ if os_release(pkg) < ostack_release:
1828+ raise Exception("This hook is not supported on releases"
1829+ " before %s" % ostack_release)
1830+ f(*args)
1831+ return wrapped_f
1832+ return wrap
1833
1834=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
1835--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-29 07:46:01 +0000
1836+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-11-26 13:43:20 +0000
1837@@ -16,19 +16,18 @@
1838 from subprocess import (
1839 check_call,
1840 check_output,
1841- CalledProcessError
1842+ CalledProcessError,
1843 )
1844-
1845 from charmhelpers.core.hookenv import (
1846 relation_get,
1847 relation_ids,
1848 related_units,
1849 log,
1850+ DEBUG,
1851 INFO,
1852 WARNING,
1853- ERROR
1854+ ERROR,
1855 )
1856-
1857 from charmhelpers.core.host import (
1858 mount,
1859 mounts,
1860@@ -37,7 +36,6 @@
1861 service_running,
1862 umount,
1863 )
1864-
1865 from charmhelpers.fetch import (
1866 apt_install,
1867 )
1868@@ -56,99 +54,85 @@
1869
1870
1871 def install():
1872- ''' Basic Ceph client installation '''
1873+ """Basic Ceph client installation."""
1874 ceph_dir = "/etc/ceph"
1875 if not os.path.exists(ceph_dir):
1876 os.mkdir(ceph_dir)
1877+
1878 apt_install('ceph-common', fatal=True)
1879
1880
1881 def rbd_exists(service, pool, rbd_img):
1882- ''' Check to see if a RADOS block device exists '''
1883+ """Check to see if a RADOS block device exists."""
1884 try:
1885- out = check_output(['rbd', 'list', '--id', service,
1886- '--pool', pool])
1887+ out = check_output(['rbd', 'list', '--id',
1888+ service, '--pool', pool]).decode('UTF-8')
1889 except CalledProcessError:
1890 return False
1891- else:
1892- return rbd_img in out
1893+
1894+ return rbd_img in out
1895
1896
1897 def create_rbd_image(service, pool, image, sizemb):
1898- ''' Create a new RADOS block device '''
1899- cmd = [
1900- 'rbd',
1901- 'create',
1902- image,
1903- '--size',
1904- str(sizemb),
1905- '--id',
1906- service,
1907- '--pool',
1908- pool
1909- ]
1910+ """Create a new RADOS block device."""
1911+ cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
1912+ '--pool', pool]
1913 check_call(cmd)
1914
1915
1916 def pool_exists(service, name):
1917- ''' Check to see if a RADOS pool already exists '''
1918+ """Check to see if a RADOS pool already exists."""
1919 try:
1920- out = check_output(['rados', '--id', service, 'lspools'])
1921+ out = check_output(['rados', '--id', service,
1922+ 'lspools']).decode('UTF-8')
1923 except CalledProcessError:
1924 return False
1925- else:
1926- return name in out
1927+
1928+ return name in out
1929
1930
1931 def get_osds(service):
1932- '''
1933- Return a list of all Ceph Object Storage Daemons
1934- currently in the cluster
1935- '''
1936+ """Return a list of all Ceph Object Storage Daemons currently in the
1937+ cluster.
1938+ """
1939 version = ceph_version()
1940 if version and version >= '0.56':
1941 return json.loads(check_output(['ceph', '--id', service,
1942- 'osd', 'ls', '--format=json']))
1943- else:
1944- return None
1945-
1946-
1947-def create_pool(service, name, replicas=2):
1948- ''' Create a new RADOS pool '''
1949+ 'osd', 'ls',
1950+ '--format=json']).decode('UTF-8'))
1951+
1952+ return None
1953+
1954+
1955+def create_pool(service, name, replicas=3):
1956+ """Create a new RADOS pool."""
1957 if pool_exists(service, name):
1958 log("Ceph pool {} already exists, skipping creation".format(name),
1959 level=WARNING)
1960 return
1961+
1962 # Calculate the number of placement groups based
1963 # on upstream recommended best practices.
1964 osds = get_osds(service)
1965 if osds:
1966- pgnum = (len(osds) * 100 / replicas)
1967+ pgnum = (len(osds) * 100 // replicas)
1968 else:
1969 # NOTE(james-page): Default to 200 for older ceph versions
1970 # which don't support OSD query from cli
1971 pgnum = 200
1972- cmd = [
1973- 'ceph', '--id', service,
1974- 'osd', 'pool', 'create',
1975- name, str(pgnum)
1976- ]
1977+
1978+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
1979 check_call(cmd)
1980- cmd = [
1981- 'ceph', '--id', service,
1982- 'osd', 'pool', 'set', name,
1983- 'size', str(replicas)
1984- ]
1985+
1986+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
1987+ str(replicas)]
1988 check_call(cmd)
1989
1990
1991 def delete_pool(service, name):
1992- ''' Delete a RADOS pool from ceph '''
1993- cmd = [
1994- 'ceph', '--id', service,
1995- 'osd', 'pool', 'delete',
1996- name, '--yes-i-really-really-mean-it'
1997- ]
1998+ """Delete a RADOS pool from ceph."""
1999+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
2000+ '--yes-i-really-really-mean-it']
2001 check_call(cmd)
2002
2003
2004@@ -161,44 +145,43 @@
2005
2006
2007 def create_keyring(service, key):
2008- ''' Create a new Ceph keyring containing key'''
2009+ """Create a new Ceph keyring containing key."""
2010 keyring = _keyring_path(service)
2011 if os.path.exists(keyring):
2012- log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
2013+ log('Ceph keyring exists at %s.' % keyring, level=WARNING)
2014 return
2015- cmd = [
2016- 'ceph-authtool',
2017- keyring,
2018- '--create-keyring',
2019- '--name=client.{}'.format(service),
2020- '--add-key={}'.format(key)
2021- ]
2022+
2023+ cmd = ['ceph-authtool', keyring, '--create-keyring',
2024+ '--name=client.{}'.format(service), '--add-key={}'.format(key)]
2025 check_call(cmd)
2026- log('ceph: Created new ring at %s.' % keyring, level=INFO)
2027+ log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
2028
2029
2030 def create_key_file(service, key):
2031- ''' Create a file containing key '''
2032+ """Create a file containing key."""
2033 keyfile = _keyfile_path(service)
2034 if os.path.exists(keyfile):
2035- log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
2036+ log('Keyfile exists at %s.' % keyfile, level=WARNING)
2037 return
2038+
2039 with open(keyfile, 'w') as fd:
2040 fd.write(key)
2041- log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
2042+
2043+ log('Created new keyfile at %s.' % keyfile, level=INFO)
2044
2045
2046 def get_ceph_nodes():
2047- ''' Query named relation 'ceph' to detemine current nodes '''
2048+ """Query named relation 'ceph' to determine current nodes."""
2049 hosts = []
2050 for r_id in relation_ids('ceph'):
2051 for unit in related_units(r_id):
2052 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
2053+
2054 return hosts
2055
2056
2057 def configure(service, key, auth, use_syslog):
2058- ''' Perform basic configuration of Ceph '''
2059+ """Perform basic configuration of Ceph."""
2060 create_keyring(service, key)
2061 create_key_file(service, key)
2062 hosts = get_ceph_nodes()
2063@@ -211,17 +194,17 @@
2064
2065
2066 def image_mapped(name):
2067- ''' Determine whether a RADOS block device is mapped locally '''
2068+ """Determine whether a RADOS block device is mapped locally."""
2069 try:
2070- out = check_output(['rbd', 'showmapped'])
2071+ out = check_output(['rbd', 'showmapped']).decode('UTF-8')
2072 except CalledProcessError:
2073 return False
2074- else:
2075- return name in out
2076+
2077+ return name in out
2078
2079
2080 def map_block_storage(service, pool, image):
2081- ''' Map a RADOS block device for local use '''
2082+ """Map a RADOS block device for local use."""
2083 cmd = [
2084 'rbd',
2085 'map',
2086@@ -235,31 +218,32 @@
2087
2088
2089 def filesystem_mounted(fs):
2090- ''' Determine whether a filesytems is already mounted '''
2091+ """Determine whether a filesytems is already mounted."""
2092 return fs in [f for f, m in mounts()]
2093
2094
2095 def make_filesystem(blk_device, fstype='ext4', timeout=10):
2096- ''' Make a new filesystem on the specified block device '''
2097+ """Make a new filesystem on the specified block device."""
2098 count = 0
2099 e_noent = os.errno.ENOENT
2100 while not os.path.exists(blk_device):
2101 if count >= timeout:
2102- log('ceph: gave up waiting on block device %s' % blk_device,
2103+ log('Gave up waiting on block device %s' % blk_device,
2104 level=ERROR)
2105 raise IOError(e_noent, os.strerror(e_noent), blk_device)
2106- log('ceph: waiting for block device %s to appear' % blk_device,
2107- level=INFO)
2108+
2109+ log('Waiting for block device %s to appear' % blk_device,
2110+ level=DEBUG)
2111 count += 1
2112 time.sleep(1)
2113 else:
2114- log('ceph: Formatting block device %s as filesystem %s.' %
2115+ log('Formatting block device %s as filesystem %s.' %
2116 (blk_device, fstype), level=INFO)
2117 check_call(['mkfs', '-t', fstype, blk_device])
2118
2119
2120 def place_data_on_block_device(blk_device, data_src_dst):
2121- ''' Migrate data in data_src_dst to blk_device and then remount '''
2122+ """Migrate data in data_src_dst to blk_device and then remount."""
2123 # mount block device into /mnt
2124 mount(blk_device, '/mnt')
2125 # copy data to /mnt
2126@@ -279,8 +263,8 @@
2127
2128 # TODO: re-use
2129 def modprobe(module):
2130- ''' Load a kernel module and configure for auto-load on reboot '''
2131- log('ceph: Loading kernel module', level=INFO)
2132+ """Load a kernel module and configure for auto-load on reboot."""
2133+ log('Loading kernel module', level=INFO)
2134 cmd = ['modprobe', module]
2135 check_call(cmd)
2136 with open('/etc/modules', 'r+') as modules:
2137@@ -289,7 +273,7 @@
2138
2139
2140 def copy_files(src, dst, symlinks=False, ignore=None):
2141- ''' Copy files from src to dst '''
2142+ """Copy files from src to dst."""
2143 for item in os.listdir(src):
2144 s = os.path.join(src, item)
2145 d = os.path.join(dst, item)
2146@@ -300,9 +284,9 @@
2147
2148
2149 def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
2150- blk_device, fstype, system_services=[]):
2151- """
2152- NOTE: This function must only be called from a single service unit for
2153+ blk_device, fstype, system_services=[],
2154+ replicas=3):
2155+ """NOTE: This function must only be called from a single service unit for
2156 the same rbd_img otherwise data loss will occur.
2157
2158 Ensures given pool and RBD image exists, is mapped to a block device,
2159@@ -316,15 +300,16 @@
2160 """
2161 # Ensure pool, RBD image, RBD mappings are in place.
2162 if not pool_exists(service, pool):
2163- log('ceph: Creating new pool {}.'.format(pool))
2164- create_pool(service, pool)
2165+ log('Creating new pool {}.'.format(pool), level=INFO)
2166+ create_pool(service, pool, replicas=replicas)
2167
2168 if not rbd_exists(service, pool, rbd_img):
2169- log('ceph: Creating RBD image ({}).'.format(rbd_img))
2170+ log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
2171 create_rbd_image(service, pool, rbd_img, sizemb)
2172
2173 if not image_mapped(rbd_img):
2174- log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
2175+ log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
2176+ level=INFO)
2177 map_block_storage(service, pool, rbd_img)
2178
2179 # make file system
2180@@ -339,45 +324,47 @@
2181
2182 for svc in system_services:
2183 if service_running(svc):
2184- log('ceph: Stopping services {} prior to migrating data.'
2185- .format(svc))
2186+ log('Stopping services {} prior to migrating data.'
2187+ .format(svc), level=DEBUG)
2188 service_stop(svc)
2189
2190 place_data_on_block_device(blk_device, mount_point)
2191
2192 for svc in system_services:
2193- log('ceph: Starting service {} after migrating data.'
2194- .format(svc))
2195+ log('Starting service {} after migrating data.'
2196+ .format(svc), level=DEBUG)
2197 service_start(svc)
2198
2199
2200 def ensure_ceph_keyring(service, user=None, group=None):
2201- '''
2202- Ensures a ceph keyring is created for a named service
2203- and optionally ensures user and group ownership.
2204+ """Ensures a ceph keyring is created for a named service and optionally
2205+ ensures user and group ownership.
2206
2207 Returns False if no ceph key is available in relation state.
2208- '''
2209+ """
2210 key = None
2211 for rid in relation_ids('ceph'):
2212 for unit in related_units(rid):
2213 key = relation_get('key', rid=rid, unit=unit)
2214 if key:
2215 break
2216+
2217 if not key:
2218 return False
2219+
2220 create_keyring(service=service, key=key)
2221 keyring = _keyring_path(service)
2222 if user and group:
2223 check_call(['chown', '%s.%s' % (user, group), keyring])
2224+
2225 return True
2226
2227
2228 def ceph_version():
2229- ''' Retrieve the local version of ceph '''
2230+ """Retrieve the local version of ceph."""
2231 if os.path.exists('/usr/bin/ceph'):
2232 cmd = ['ceph', '-v']
2233- output = check_output(cmd)
2234+ output = check_output(cmd).decode('US-ASCII')
2235 output = output.split()
2236 if len(output) > 3:
2237 return output[2]
2238
2239=== modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
2240--- hooks/charmhelpers/contrib/storage/linux/loopback.py 2013-11-08 05:55:44 +0000
2241+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2014-11-26 13:43:20 +0000
2242@@ -1,12 +1,12 @@
2243-
2244 import os
2245 import re
2246-
2247 from subprocess import (
2248 check_call,
2249 check_output,
2250 )
2251
2252+import six
2253+
2254
2255 ##################################################
2256 # loopback device helpers.
2257@@ -37,7 +37,7 @@
2258 '''
2259 file_path = os.path.abspath(file_path)
2260 check_call(['losetup', '--find', file_path])
2261- for d, f in loopback_devices().iteritems():
2262+ for d, f in six.iteritems(loopback_devices()):
2263 if f == file_path:
2264 return d
2265
2266@@ -51,7 +51,7 @@
2267
2268 :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
2269 '''
2270- for d, f in loopback_devices().iteritems():
2271+ for d, f in six.iteritems(loopback_devices()):
2272 if f == path:
2273 return d
2274
2275
2276=== modified file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
2277--- hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-05-19 11:43:55 +0000
2278+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-11-26 13:43:20 +0000
2279@@ -61,6 +61,7 @@
2280 vg = None
2281 pvd = check_output(['pvdisplay', block_device]).splitlines()
2282 for l in pvd:
2283+ l = l.decode('UTF-8')
2284 if l.strip().startswith('VG Name'):
2285 vg = ' '.join(l.strip().split()[2:])
2286 return vg
2287
2288=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
2289--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-08-13 13:12:47 +0000
2290+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-11-26 13:43:20 +0000
2291@@ -30,7 +30,8 @@
2292 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
2293 call(['sgdisk', '--zap-all', '--mbrtogpt',
2294 '--clear', block_device])
2295- dev_end = check_output(['blockdev', '--getsz', block_device])
2296+ dev_end = check_output(['blockdev', '--getsz',
2297+ block_device]).decode('UTF-8')
2298 gpt_end = int(dev_end.split()[0]) - 100
2299 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
2300 'bs=1M', 'count=1'])
2301@@ -47,7 +48,7 @@
2302 it doesn't.
2303 '''
2304 is_partition = bool(re.search(r".*[0-9]+\b", device))
2305- out = check_output(['mount'])
2306+ out = check_output(['mount']).decode('UTF-8')
2307 if is_partition:
2308 return bool(re.search(device + r"\b", out))
2309 return bool(re.search(device + r"[0-9]+\b", out))
2310
2311=== modified file 'hooks/charmhelpers/core/fstab.py'
2312--- hooks/charmhelpers/core/fstab.py 2014-06-24 13:40:39 +0000
2313+++ hooks/charmhelpers/core/fstab.py 2014-11-26 13:43:20 +0000
2314@@ -3,10 +3,11 @@
2315
2316 __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
2317
2318+import io
2319 import os
2320
2321
2322-class Fstab(file):
2323+class Fstab(io.FileIO):
2324 """This class extends file in order to implement a file reader/writer
2325 for file `/etc/fstab`
2326 """
2327@@ -24,8 +25,8 @@
2328 options = "defaults"
2329
2330 self.options = options
2331- self.d = d
2332- self.p = p
2333+ self.d = int(d)
2334+ self.p = int(p)
2335
2336 def __eq__(self, o):
2337 return str(self) == str(o)
2338@@ -45,7 +46,7 @@
2339 self._path = path
2340 else:
2341 self._path = self.DEFAULT_PATH
2342- file.__init__(self, self._path, 'r+')
2343+ super(Fstab, self).__init__(self._path, 'rb+')
2344
2345 def _hydrate_entry(self, line):
2346 # NOTE: use split with no arguments to split on any
2347@@ -58,8 +59,9 @@
2348 def entries(self):
2349 self.seek(0)
2350 for line in self.readlines():
2351+ line = line.decode('us-ascii')
2352 try:
2353- if not line.startswith("#"):
2354+ if line.strip() and not line.startswith("#"):
2355 yield self._hydrate_entry(line)
2356 except ValueError:
2357 pass
2358@@ -75,14 +77,14 @@
2359 if self.get_entry_by_attr('device', entry.device):
2360 return False
2361
2362- self.write(str(entry) + '\n')
2363+ self.write((str(entry) + '\n').encode('us-ascii'))
2364 self.truncate()
2365 return entry
2366
2367 def remove_entry(self, entry):
2368 self.seek(0)
2369
2370- lines = self.readlines()
2371+ lines = [l.decode('us-ascii') for l in self.readlines()]
2372
2373 found = False
2374 for index, line in enumerate(lines):
2375@@ -97,7 +99,7 @@
2376 lines.remove(line)
2377
2378 self.seek(0)
2379- self.write(''.join(lines))
2380+ self.write(''.join(lines).encode('us-ascii'))
2381 self.truncate()
2382 return True
2383
2384
2385=== modified file 'hooks/charmhelpers/core/hookenv.py'
2386--- hooks/charmhelpers/core/hookenv.py 2014-09-25 15:37:05 +0000
2387+++ hooks/charmhelpers/core/hookenv.py 2014-11-26 13:43:20 +0000
2388@@ -9,9 +9,14 @@
2389 import yaml
2390 import subprocess
2391 import sys
2392-import UserDict
2393 from subprocess import CalledProcessError
2394
2395+import six
2396+if not six.PY3:
2397+ from UserDict import UserDict
2398+else:
2399+ from collections import UserDict
2400+
2401 CRITICAL = "CRITICAL"
2402 ERROR = "ERROR"
2403 WARNING = "WARNING"
2404@@ -67,12 +72,12 @@
2405 subprocess.call(command)
2406
2407
2408-class Serializable(UserDict.IterableUserDict):
2409+class Serializable(UserDict):
2410 """Wrapper, an object that can be serialized to yaml or json"""
2411
2412 def __init__(self, obj):
2413 # wrap the object
2414- UserDict.IterableUserDict.__init__(self)
2415+ UserDict.__init__(self)
2416 self.data = obj
2417
2418 def __getattr__(self, attr):
2419@@ -214,6 +219,12 @@
2420 except KeyError:
2421 return (self._prev_dict or {})[key]
2422
2423+ def keys(self):
2424+ prev_keys = []
2425+ if self._prev_dict is not None:
2426+ prev_keys = self._prev_dict.keys()
2427+ return list(set(prev_keys + list(dict.keys(self))))
2428+
2429 def load_previous(self, path=None):
2430 """Load previous copy of config from disk.
2431
2432@@ -263,7 +274,7 @@
2433
2434 """
2435 if self._prev_dict:
2436- for k, v in self._prev_dict.iteritems():
2437+ for k, v in six.iteritems(self._prev_dict):
2438 if k not in self:
2439 self[k] = v
2440 with open(self.path, 'w') as f:
2441@@ -278,7 +289,8 @@
2442 config_cmd_line.append(scope)
2443 config_cmd_line.append('--format=json')
2444 try:
2445- config_data = json.loads(subprocess.check_output(config_cmd_line))
2446+ config_data = json.loads(
2447+ subprocess.check_output(config_cmd_line).decode('UTF-8'))
2448 if scope is not None:
2449 return config_data
2450 return Config(config_data)
2451@@ -297,10 +309,10 @@
2452 if unit:
2453 _args.append(unit)
2454 try:
2455- return json.loads(subprocess.check_output(_args))
2456+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
2457 except ValueError:
2458 return None
2459- except CalledProcessError, e:
2460+ except CalledProcessError as e:
2461 if e.returncode == 2:
2462 return None
2463 raise
2464@@ -312,7 +324,7 @@
2465 relation_cmd_line = ['relation-set']
2466 if relation_id is not None:
2467 relation_cmd_line.extend(('-r', relation_id))
2468- for k, v in (relation_settings.items() + kwargs.items()):
2469+ for k, v in (list(relation_settings.items()) + list(kwargs.items())):
2470 if v is None:
2471 relation_cmd_line.append('{}='.format(k))
2472 else:
2473@@ -329,7 +341,8 @@
2474 relid_cmd_line = ['relation-ids', '--format=json']
2475 if reltype is not None:
2476 relid_cmd_line.append(reltype)
2477- return json.loads(subprocess.check_output(relid_cmd_line)) or []
2478+ return json.loads(
2479+ subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
2480 return []
2481
2482
2483@@ -340,7 +353,8 @@
2484 units_cmd_line = ['relation-list', '--format=json']
2485 if relid is not None:
2486 units_cmd_line.extend(('-r', relid))
2487- return json.loads(subprocess.check_output(units_cmd_line)) or []
2488+ return json.loads(
2489+ subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
2490
2491
2492 @cached
2493@@ -449,7 +463,7 @@
2494 """Get the unit ID for the remote unit"""
2495 _args = ['unit-get', '--format=json', attribute]
2496 try:
2497- return json.loads(subprocess.check_output(_args))
2498+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
2499 except ValueError:
2500 return None
2501
2502
2503=== modified file 'hooks/charmhelpers/core/host.py'
2504--- hooks/charmhelpers/core/host.py 2014-10-16 17:42:14 +0000
2505+++ hooks/charmhelpers/core/host.py 2014-11-26 13:43:20 +0000
2506@@ -13,13 +13,13 @@
2507 import string
2508 import subprocess
2509 import hashlib
2510-import shutil
2511 from contextlib import contextmanager
2512-
2513 from collections import OrderedDict
2514
2515-from hookenv import log
2516-from fstab import Fstab
2517+import six
2518+
2519+from .hookenv import log
2520+from .fstab import Fstab
2521
2522
2523 def service_start(service_name):
2524@@ -55,7 +55,9 @@
2525 def service_running(service):
2526 """Determine whether a system service is running"""
2527 try:
2528- output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
2529+ output = subprocess.check_output(
2530+ ['service', service, 'status'],
2531+ stderr=subprocess.STDOUT).decode('UTF-8')
2532 except subprocess.CalledProcessError:
2533 return False
2534 else:
2535@@ -68,7 +70,9 @@
2536 def service_available(service_name):
2537 """Determine whether a system service is available"""
2538 try:
2539- subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
2540+ subprocess.check_output(
2541+ ['service', service_name, 'status'],
2542+ stderr=subprocess.STDOUT).decode('UTF-8')
2543 except subprocess.CalledProcessError as e:
2544 return 'unrecognized service' not in e.output
2545 else:
2546@@ -116,7 +120,7 @@
2547 cmd.append(from_path)
2548 cmd.append(to_path)
2549 log(" ".join(cmd))
2550- return subprocess.check_output(cmd).strip()
2551+ return subprocess.check_output(cmd).decode('UTF-8').strip()
2552
2553
2554 def symlink(source, destination):
2555@@ -131,7 +135,7 @@
2556 subprocess.check_call(cmd)
2557
2558
2559-def mkdir(path, owner='root', group='root', perms=0555, force=False):
2560+def mkdir(path, owner='root', group='root', perms=0o555, force=False):
2561 """Create a directory"""
2562 log("Making dir {} {}:{} {:o}".format(path, owner, group,
2563 perms))
2564@@ -147,7 +151,7 @@
2565 os.chown(realpath, uid, gid)
2566
2567
2568-def write_file(path, content, owner='root', group='root', perms=0444):
2569+def write_file(path, content, owner='root', group='root', perms=0o444):
2570 """Create or overwrite a file with the contents of a string"""
2571 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
2572 uid = pwd.getpwnam(owner).pw_uid
2573@@ -178,7 +182,7 @@
2574 cmd_args.extend([device, mountpoint])
2575 try:
2576 subprocess.check_output(cmd_args)
2577- except subprocess.CalledProcessError, e:
2578+ except subprocess.CalledProcessError as e:
2579 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
2580 return False
2581
2582@@ -192,7 +196,7 @@
2583 cmd_args = ['umount', mountpoint]
2584 try:
2585 subprocess.check_output(cmd_args)
2586- except subprocess.CalledProcessError, e:
2587+ except subprocess.CalledProcessError as e:
2588 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
2589 return False
2590
2591@@ -219,8 +223,8 @@
2592 """
2593 if os.path.exists(path):
2594 h = getattr(hashlib, hash_type)()
2595- with open(path, 'r') as source:
2596- h.update(source.read()) # IGNORE:E1101 - it does have update
2597+ with open(path, 'rb') as source:
2598+ h.update(source.read())
2599 return h.hexdigest()
2600 else:
2601 return None
2602@@ -298,7 +302,7 @@
2603 if length is None:
2604 length = random.choice(range(35, 45))
2605 alphanumeric_chars = [
2606- l for l in (string.letters + string.digits)
2607+ l for l in (string.ascii_letters + string.digits)
2608 if l not in 'l0QD1vAEIOUaeiou']
2609 random_chars = [
2610 random.choice(alphanumeric_chars) for _ in range(length)]
2611@@ -307,14 +311,14 @@
2612
2613 def list_nics(nic_type):
2614 '''Return a list of nics of given type(s)'''
2615- if isinstance(nic_type, basestring):
2616+ if isinstance(nic_type, six.string_types):
2617 int_types = [nic_type]
2618 else:
2619 int_types = nic_type
2620 interfaces = []
2621 for int_type in int_types:
2622 cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
2623- ip_output = subprocess.check_output(cmd).split('\n')
2624+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
2625 ip_output = (line for line in ip_output if line)
2626 for line in ip_output:
2627 if line.split()[1].startswith(int_type):
2628@@ -336,7 +340,7 @@
2629
2630 def get_nic_mtu(nic):
2631 cmd = ['ip', 'addr', 'show', nic]
2632- ip_output = subprocess.check_output(cmd).split('\n')
2633+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
2634 mtu = ""
2635 for line in ip_output:
2636 words = line.split()
2637@@ -347,7 +351,7 @@
2638
2639 def get_nic_hwaddr(nic):
2640 cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
2641- ip_output = subprocess.check_output(cmd)
2642+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
2643 hwaddr = ""
2644 words = ip_output.split()
2645 if 'link/ether' in words:
2646
2647=== modified file 'hooks/charmhelpers/core/services/__init__.py'
2648--- hooks/charmhelpers/core/services/__init__.py 2014-08-13 13:12:47 +0000
2649+++ hooks/charmhelpers/core/services/__init__.py 2014-11-26 13:43:20 +0000
2650@@ -1,2 +1,2 @@
2651-from .base import *
2652-from .helpers import *
2653+from .base import * # NOQA
2654+from .helpers import * # NOQA
2655
2656=== modified file 'hooks/charmhelpers/core/services/helpers.py'
2657--- hooks/charmhelpers/core/services/helpers.py 2014-09-25 15:37:05 +0000
2658+++ hooks/charmhelpers/core/services/helpers.py 2014-11-26 13:43:20 +0000
2659@@ -196,7 +196,7 @@
2660 if not os.path.isabs(file_name):
2661 file_name = os.path.join(hookenv.charm_dir(), file_name)
2662 with open(file_name, 'w') as file_stream:
2663- os.fchmod(file_stream.fileno(), 0600)
2664+ os.fchmod(file_stream.fileno(), 0o600)
2665 yaml.dump(config_data, file_stream)
2666
2667 def read_context(self, file_name):
2668@@ -211,15 +211,19 @@
2669
2670 class TemplateCallback(ManagerCallback):
2671 """
2672- Callback class that will render a Jinja2 template, for use as a ready action.
2673-
2674- :param str source: The template source file, relative to `$CHARM_DIR/templates`
2675+ Callback class that will render a Jinja2 template, for use as a ready
2676+ action.
2677+
2678+ :param str source: The template source file, relative to
2679+ `$CHARM_DIR/templates`
2680+
2681 :param str target: The target to write the rendered template to
2682 :param str owner: The owner of the rendered file
2683 :param str group: The group of the rendered file
2684 :param int perms: The permissions of the rendered file
2685 """
2686- def __init__(self, source, target, owner='root', group='root', perms=0444):
2687+ def __init__(self, source, target,
2688+ owner='root', group='root', perms=0o444):
2689 self.source = source
2690 self.target = target
2691 self.owner = owner
2692
2693=== modified file 'hooks/charmhelpers/core/templating.py'
2694--- hooks/charmhelpers/core/templating.py 2014-08-13 13:12:47 +0000
2695+++ hooks/charmhelpers/core/templating.py 2014-11-26 13:43:20 +0000
2696@@ -4,7 +4,8 @@
2697 from charmhelpers.core import hookenv
2698
2699
2700-def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
2701+def render(source, target, context, owner='root', group='root',
2702+ perms=0o444, templates_dir=None):
2703 """
2704 Render a template.
2705
2706
2707=== modified file 'hooks/charmhelpers/fetch/__init__.py'
2708--- hooks/charmhelpers/fetch/__init__.py 2014-09-25 15:37:05 +0000
2709+++ hooks/charmhelpers/fetch/__init__.py 2014-11-26 13:43:20 +0000
2710@@ -5,10 +5,6 @@
2711 from charmhelpers.core.host import (
2712 lsb_release
2713 )
2714-from urlparse import (
2715- urlparse,
2716- urlunparse,
2717-)
2718 import subprocess
2719 from charmhelpers.core.hookenv import (
2720 config,
2721@@ -16,6 +12,12 @@
2722 )
2723 import os
2724
2725+import six
2726+if six.PY3:
2727+ from urllib.parse import urlparse, urlunparse
2728+else:
2729+ from urlparse import urlparse, urlunparse
2730+
2731
2732 CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
2733 deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
2734@@ -72,6 +74,7 @@
2735 FETCH_HANDLERS = (
2736 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
2737 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
2738+ 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
2739 )
2740
2741 APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
2742@@ -148,7 +151,7 @@
2743 cmd = ['apt-get', '--assume-yes']
2744 cmd.extend(options)
2745 cmd.append('install')
2746- if isinstance(packages, basestring):
2747+ if isinstance(packages, six.string_types):
2748 cmd.append(packages)
2749 else:
2750 cmd.extend(packages)
2751@@ -181,7 +184,7 @@
2752 def apt_purge(packages, fatal=False):
2753 """Purge one or more packages"""
2754 cmd = ['apt-get', '--assume-yes', 'purge']
2755- if isinstance(packages, basestring):
2756+ if isinstance(packages, six.string_types):
2757 cmd.append(packages)
2758 else:
2759 cmd.extend(packages)
2760@@ -192,7 +195,7 @@
2761 def apt_hold(packages, fatal=False):
2762 """Hold one or more packages"""
2763 cmd = ['apt-mark', 'hold']
2764- if isinstance(packages, basestring):
2765+ if isinstance(packages, six.string_types):
2766 cmd.append(packages)
2767 else:
2768 cmd.extend(packages)
2769@@ -218,6 +221,7 @@
2770 pocket for the release.
2771 'cloud:' may be used to activate official cloud archive pockets,
2772 such as 'cloud:icehouse'
2773+ 'distro' may be used as a noop
2774
2775 @param key: A key to be added to the system's APT keyring and used
2776 to verify the signatures on packages. Ideally, this should be an
2777@@ -251,12 +255,14 @@
2778 release = lsb_release()['DISTRIB_CODENAME']
2779 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
2780 apt.write(PROPOSED_POCKET.format(release))
2781+ elif source == 'distro':
2782+ pass
2783 else:
2784- raise SourceConfigError("Unknown source: {!r}".format(source))
2785+ log("Unknown source: {!r}".format(source))
2786
2787 if key:
2788 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
2789- with NamedTemporaryFile() as key_file:
2790+ with NamedTemporaryFile('w+') as key_file:
2791 key_file.write(key)
2792 key_file.flush()
2793 key_file.seek(0)
2794@@ -293,14 +299,14 @@
2795 sources = safe_load((config(sources_var) or '').strip()) or []
2796 keys = safe_load((config(keys_var) or '').strip()) or None
2797
2798- if isinstance(sources, basestring):
2799+ if isinstance(sources, six.string_types):
2800 sources = [sources]
2801
2802 if keys is None:
2803 for source in sources:
2804 add_source(source, None)
2805 else:
2806- if isinstance(keys, basestring):
2807+ if isinstance(keys, six.string_types):
2808 keys = [keys]
2809
2810 if len(sources) != len(keys):
2811@@ -397,7 +403,7 @@
2812 while result is None or result == APT_NO_LOCK:
2813 try:
2814 result = subprocess.check_call(cmd, env=env)
2815- except subprocess.CalledProcessError, e:
2816+ except subprocess.CalledProcessError as e:
2817 retry_count = retry_count + 1
2818 if retry_count > APT_NO_LOCK_RETRY_COUNT:
2819 raise
2820
2821=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
2822--- hooks/charmhelpers/fetch/archiveurl.py 2014-09-25 15:37:05 +0000
2823+++ hooks/charmhelpers/fetch/archiveurl.py 2014-11-26 13:43:20 +0000
2824@@ -1,8 +1,23 @@
2825 import os
2826-import urllib2
2827-from urllib import urlretrieve
2828-import urlparse
2829 import hashlib
2830+import re
2831+
2832+import six
2833+if six.PY3:
2834+ from urllib.request import (
2835+ build_opener, install_opener, urlopen, urlretrieve,
2836+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
2837+ )
2838+ from urllib.parse import urlparse, urlunparse, parse_qs
2839+ from urllib.error import URLError
2840+else:
2841+ from urllib import urlretrieve
2842+ from urllib2 import (
2843+ build_opener, install_opener, urlopen,
2844+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
2845+ URLError
2846+ )
2847+ from urlparse import urlparse, urlunparse, parse_qs
2848
2849 from charmhelpers.fetch import (
2850 BaseFetchHandler,
2851@@ -15,6 +30,24 @@
2852 from charmhelpers.core.host import mkdir, check_hash
2853
2854
2855+def splituser(host):
2856+ '''urllib.splituser(), but six's support of this seems broken'''
2857+ _userprog = re.compile('^(.*)@(.*)$')
2858+ match = _userprog.match(host)
2859+ if match:
2860+ return match.group(1, 2)
2861+ return None, host
2862+
2863+
2864+def splitpasswd(user):
2865+ '''urllib.splitpasswd(), but six's support of this is missing'''
2866+ _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
2867+ match = _passwdprog.match(user)
2868+ if match:
2869+ return match.group(1, 2)
2870+ return user, None
2871+
2872+
2873 class ArchiveUrlFetchHandler(BaseFetchHandler):
2874 """
2875 Handler to download archive files from arbitrary URLs.
2876@@ -42,20 +75,20 @@
2877 """
2878 # propogate all exceptions
2879 # URLError, OSError, etc
2880- proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
2881+ proto, netloc, path, params, query, fragment = urlparse(source)
2882 if proto in ('http', 'https'):
2883- auth, barehost = urllib2.splituser(netloc)
2884+ auth, barehost = splituser(netloc)
2885 if auth is not None:
2886- source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
2887- username, password = urllib2.splitpasswd(auth)
2888- passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
2889+ source = urlunparse((proto, barehost, path, params, query, fragment))
2890+ username, password = splitpasswd(auth)
2891+ passman = HTTPPasswordMgrWithDefaultRealm()
2892 # Realm is set to None in add_password to force the username and password
2893 # to be used whatever the realm
2894 passman.add_password(None, source, username, password)
2895- authhandler = urllib2.HTTPBasicAuthHandler(passman)
2896- opener = urllib2.build_opener(authhandler)
2897- urllib2.install_opener(opener)
2898- response = urllib2.urlopen(source)
2899+ authhandler = HTTPBasicAuthHandler(passman)
2900+ opener = build_opener(authhandler)
2901+ install_opener(opener)
2902+ response = urlopen(source)
2903 try:
2904 with open(dest, 'w') as dest_file:
2905 dest_file.write(response.read())
2906@@ -91,17 +124,21 @@
2907 url_parts = self.parse_url(source)
2908 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
2909 if not os.path.exists(dest_dir):
2910- mkdir(dest_dir, perms=0755)
2911+ mkdir(dest_dir, perms=0o755)
2912 dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
2913 try:
2914 self.download(source, dld_file)
2915- except urllib2.URLError as e:
2916+ except URLError as e:
2917 raise UnhandledSource(e.reason)
2918 except OSError as e:
2919 raise UnhandledSource(e.strerror)
2920- options = urlparse.parse_qs(url_parts.fragment)
2921+ options = parse_qs(url_parts.fragment)
2922 for key, value in options.items():
2923- if key in hashlib.algorithms:
2924+ if not six.PY3:
2925+ algorithms = hashlib.algorithms
2926+ else:
2927+ algorithms = hashlib.algorithms_available
2928+ if key in algorithms:
2929 check_hash(dld_file, value, key)
2930 if checksum:
2931 check_hash(dld_file, checksum, hash_type)
2932
2933=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
2934--- hooks/charmhelpers/fetch/bzrurl.py 2014-06-24 13:40:39 +0000
2935+++ hooks/charmhelpers/fetch/bzrurl.py 2014-11-26 13:43:20 +0000
2936@@ -5,6 +5,10 @@
2937 )
2938 from charmhelpers.core.host import mkdir
2939
2940+import six
2941+if six.PY3:
2942+ raise ImportError('bzrlib does not support Python3')
2943+
2944 try:
2945 from bzrlib.branch import Branch
2946 except ImportError:
2947@@ -42,7 +46,7 @@
2948 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
2949 branch_name)
2950 if not os.path.exists(dest_dir):
2951- mkdir(dest_dir, perms=0755)
2952+ mkdir(dest_dir, perms=0o755)
2953 try:
2954 self.branch(source, dest_dir)
2955 except OSError as e:
2956
2957=== added file 'hooks/charmhelpers/fetch/giturl.py'
2958--- hooks/charmhelpers/fetch/giturl.py 1970-01-01 00:00:00 +0000
2959+++ hooks/charmhelpers/fetch/giturl.py 2014-11-26 13:43:20 +0000
2960@@ -0,0 +1,48 @@
2961+import os
2962+from charmhelpers.fetch import (
2963+ BaseFetchHandler,
2964+ UnhandledSource
2965+)
2966+from charmhelpers.core.host import mkdir
2967+
2968+import six
2969+if six.PY3:
2970+ raise ImportError('GitPython does not support Python 3')
2971+
2972+try:
2973+ from git import Repo
2974+except ImportError:
2975+ from charmhelpers.fetch import apt_install
2976+ apt_install("python-git")
2977+ from git import Repo
2978+
2979+
2980+class GitUrlFetchHandler(BaseFetchHandler):
2981+ """Handler for git branches via generic and github URLs"""
2982+ def can_handle(self, source):
2983+ url_parts = self.parse_url(source)
2984+ # TODO (mattyw) no support for ssh git@ yet
2985+ if url_parts.scheme not in ('http', 'https', 'git'):
2986+ return False
2987+ else:
2988+ return True
2989+
2990+ def clone(self, source, dest, branch):
2991+ if not self.can_handle(source):
2992+ raise UnhandledSource("Cannot handle {}".format(source))
2993+
2994+ repo = Repo.clone_from(source, dest)
2995+ repo.git.checkout(branch)
2996+
2997+ def install(self, source, branch="master"):
2998+ url_parts = self.parse_url(source)
2999+ branch_name = url_parts.path.strip("/").split("/")[-1]
3000+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
3001+ branch_name)
3002+ if not os.path.exists(dest_dir):
3003+ mkdir(dest_dir, perms=0o755)
3004+ try:
3005+ self.clone(source, dest_dir, branch)
3006+ except OSError as e:
3007+ raise UnhandledSource(e.strerror)
3008+ return dest_dir
3009
3010=== modified file 'hooks/quantum_contexts.py'
3011--- hooks/quantum_contexts.py 2014-10-09 14:15:25 +0000
3012+++ hooks/quantum_contexts.py 2014-11-26 13:43:20 +0000
3013@@ -50,6 +50,8 @@
3014 "neutron.plugins.ml2.plugin.Ml2Plugin"
3015 NEUTRON_NVP_PLUGIN = \
3016 "neutron.plugins.nicira.nicira_nvp_plugin.NeutronPlugin.NvpPluginV2"
3017+NEUTRON_N1KV_PLUGIN = \
3018+ "neutron.plugins.cisco.n1kv.n1kv_neutron_plugin.N1kvNeutronPluginV2"
3019 NEUTRON_NSX_PLUGIN = "vmware"
3020
3021 NEUTRON = 'neutron'
3022@@ -65,16 +67,18 @@
3023
3024 OVS = 'ovs'
3025 NVP = 'nvp'
3026+N1KV = 'n1kv'
3027 NSX = 'nsx'
3028
3029 CORE_PLUGIN = {
3030 QUANTUM: {
3031 OVS: QUANTUM_OVS_PLUGIN,
3032- NVP: QUANTUM_NVP_PLUGIN
3033+ NVP: QUANTUM_NVP_PLUGIN,
3034 },
3035 NEUTRON: {
3036 OVS: NEUTRON_OVS_PLUGIN,
3037 NVP: NEUTRON_NVP_PLUGIN,
3038+ N1KV: NEUTRON_N1KV_PLUGIN,
3039 NSX: NEUTRON_NSX_PLUGIN
3040 },
3041 }
3042@@ -166,13 +170,16 @@
3043
3044 if config('external-network-id'):
3045 ctxt['ext_net_id'] = config('external-network-id')
3046+
3047+ if config('plugin'):
3048+ ctxt['plugin'] = config('plugin')
3049 return ctxt
3050
3051
3052-class ExternalPortContext(OSContextGenerator):
3053+class NeutronPortContext(OSContextGenerator):
3054
3055- def __call__(self):
3056- if not config('ext-port'):
3057+ def _resolve_port(self, config_key):
3058+ if not config(config_key):
3059 return None
3060 hwaddr_to_nic = {}
3061 hwaddr_to_ip = {}
3062@@ -183,7 +190,7 @@
3063 get_ipv6_addr(iface=nic, fatal=False)
3064 hwaddr_to_ip[hwaddr] = addresses
3065 mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
3066- for entry in config('ext-port').split():
3067+ for entry in config(config_key).split():
3068 entry = entry.strip()
3069 if re.match(mac_regex, entry):
3070 if entry in hwaddr_to_nic and len(hwaddr_to_ip[entry]) == 0:
3071@@ -192,15 +199,35 @@
3072 continue
3073 # Entry is a MAC address for a valid interface that doesn't
3074 # have an IP address assigned yet.
3075- return {"ext_port": hwaddr_to_nic[entry]}
3076+ return hwaddr_to_nic[entry]
3077 else:
3078 # If the passed entry is not a MAC address, assume it's a valid
3079 # interface, and that the user put it there on purpose (we can
3080 # trust it to be the real external network).
3081- return {"ext_port": entry}
3082+ return entry
3083 return None
3084
3085
3086+class ExternalPortContext(NeutronPortContext):
3087+
3088+ def __call__(self):
3089+ port = self._resolve_port('ext-port')
3090+ if port:
3091+ return {"ext_port": port}
3092+ else:
3093+ return None
3094+
3095+
3096+class DataPortContext(NeutronPortContext):
3097+
3098+ def __call__(self):
3099+ port = self._resolve_port('data-port')
3100+ if port:
3101+ return {"data_port": port}
3102+ else:
3103+ return None
3104+
3105+
3106 class QuantumGatewayContext(OSContextGenerator):
3107
3108 def __call__(self):
3109
3110=== modified file 'hooks/quantum_hooks.py'
3111--- hooks/quantum_hooks.py 2014-09-30 14:12:10 +0000
3112+++ hooks/quantum_hooks.py 2014-11-26 13:43:20 +0000
3113@@ -16,6 +16,7 @@
3114 apt_update,
3115 apt_install,
3116 filter_installed_packages,
3117+ apt_purge,
3118 )
3119 from charmhelpers.core.host import (
3120 restart_on_change,
3121@@ -32,6 +33,8 @@
3122 openstack_upgrade_available,
3123 )
3124 from charmhelpers.payload.execd import execd_preinstall
3125+from charmhelpers.core.sysctl import create as create_sysctl
3126+
3127
3128 import sys
3129 from quantum_utils import (
3130@@ -76,6 +79,11 @@
3131 global CONFIGS
3132 if openstack_upgrade_available(get_common_package()):
3133 CONFIGS = do_openstack_upgrade()
3134+
3135+ sysctl_dict = config('sysctl')
3136+ if sysctl_dict:
3137+ create_sysctl(sysctl_dict, '/etc/sysctl.d/50-quantum-gateway.conf')
3138+
3139 # Re-run joined hooks as config might have changed
3140 for r_id in relation_ids('shared-db'):
3141 db_joined(relation_id=r_id)
3142@@ -92,6 +100,12 @@
3143 log('Please provide a valid plugin config', level=ERROR)
3144 sys.exit(1)
3145
3146+ if config('plugin') == 'n1kv':
3147+ if config('enable-l3-agent'):
3148+ apt_install(filter_installed_packages('neutron-l3-agent'))
3149+ else:
3150+ apt_purge('neutron-l3-agent')
3151+
3152
3153 @hooks.hook('upgrade-charm')
3154 def upgrade_charm():
3155@@ -186,6 +200,10 @@
3156 ' failed nodes with nvp|nsx',
3157 level=WARNING)
3158 return
3159+ if config('plugin') == 'n1kv':
3160+ log('Unable to re-assign agent resources for failed nodes with n1kv',
3161+ level=WARNING)
3162+ return
3163 if eligible_leader(None):
3164 reassign_agent_resources()
3165 CONFIGS.write_all()
3166
3167=== modified file 'hooks/quantum_utils.py'
3168--- hooks/quantum_utils.py 2014-09-19 09:18:01 +0000
3169+++ hooks/quantum_utils.py 2014-11-26 13:43:20 +0000
3170@@ -39,13 +39,14 @@
3171 import charmhelpers.contrib.openstack.templating as templating
3172 from charmhelpers.contrib.openstack.neutron import headers_package
3173 from quantum_contexts import (
3174- CORE_PLUGIN, OVS, NVP, NSX,
3175+ CORE_PLUGIN, OVS, NVP, NSX, N1KV,
3176 NEUTRON, QUANTUM,
3177 networking_name,
3178 QuantumGatewayContext,
3179 NetworkServiceContext,
3180 L3AgentContext,
3181 ExternalPortContext,
3182+ DataPortContext,
3183 remap_plugin
3184 )
3185
3186@@ -63,7 +64,7 @@
3187 "/etc/quantum/plugins/nicira/nvp.ini"
3188 QUANTUM_PLUGIN_CONF = {
3189 OVS: QUANTUM_OVS_PLUGIN_CONF,
3190- NVP: QUANTUM_NVP_PLUGIN_CONF
3191+ NVP: QUANTUM_NVP_PLUGIN_CONF,
3192 }
3193
3194 NEUTRON_CONF_DIR = '/etc/neutron'
3195@@ -120,6 +121,15 @@
3196 'python-psycopg2',
3197 'python-oslo.config', # Force upgrade
3198 "nova-api-metadata"
3199+ ],
3200+ N1KV: [
3201+ "neutron-plugin-cisco",
3202+ "neutron-dhcp-agent",
3203+ "python-mysqldb",
3204+ "python-psycopg2",
3205+ "nova-api-metadata",
3206+ "neutron-common",
3207+ "neutron-l3-agent"
3208 ]
3209 }
3210 NEUTRON_GATEWAY_PKGS[NSX] = NEUTRON_GATEWAY_PKGS[NVP]
3211@@ -129,6 +139,12 @@
3212 NEUTRON: NEUTRON_GATEWAY_PKGS,
3213 }
3214
3215+EARLY_PACKAGES = {
3216+ OVS: ['openvswitch-datapath-dkms'],
3217+ NVP: [],
3218+ N1KV: []
3219+}
3220+
3221
3222 def get_early_packages():
3223 '''Return a list of package for pre-install based on configured plugin'''
3224@@ -326,6 +342,24 @@
3225 }
3226 NEUTRON_NVP_CONFIG_FILES.update(NEUTRON_SHARED_CONFIG_FILES)
3227
3228+NEUTRON_N1KV_CONFIG_FILES = {
3229+ NEUTRON_CONF: {
3230+ 'hook_contexts': [context.AMQPContext(ssl_dir=NEUTRON_CONF_DIR),
3231+ QuantumGatewayContext(),
3232+ SyslogContext()],
3233+ 'services': ['neutron-l3-agent',
3234+ 'neutron-dhcp-agent',
3235+ 'neutron-metadata-agent']
3236+ },
3237+ NEUTRON_L3_AGENT_CONF: {
3238+ 'hook_contexts': [NetworkServiceContext(),
3239+ L3AgentContext(),
3240+ QuantumGatewayContext()],
3241+ 'services': ['neutron-l3-agent']
3242+ },
3243+}
3244+NEUTRON_N1KV_CONFIG_FILES.update(NEUTRON_SHARED_CONFIG_FILES)
3245+
3246 CONFIG_FILES = {
3247 QUANTUM: {
3248 NVP: QUANTUM_NVP_CONFIG_FILES,
3249@@ -335,6 +369,7 @@
3250 NSX: NEUTRON_NVP_CONFIG_FILES,
3251 NVP: NEUTRON_NVP_CONFIG_FILES,
3252 OVS: NEUTRON_OVS_CONFIG_FILES,
3253+ N1KV: NEUTRON_N1KV_CONFIG_FILES,
3254 },
3255 }
3256
3257@@ -404,6 +439,7 @@
3258
3259 INT_BRIDGE = "br-int"
3260 EXT_BRIDGE = "br-ex"
3261+DATA_BRIDGE = 'br-data'
3262
3263 DHCP_AGENT = "DHCP Agent"
3264 L3_AGENT = "L3 Agent"
3265@@ -533,5 +569,11 @@
3266 add_bridge(INT_BRIDGE)
3267 add_bridge(EXT_BRIDGE)
3268 ext_port_ctx = ExternalPortContext()()
3269- if ext_port_ctx is not None and ext_port_ctx['ext_port']:
3270+ if ext_port_ctx and ext_port_ctx['ext_port']:
3271 add_bridge_port(EXT_BRIDGE, ext_port_ctx['ext_port'])
3272+
3273+ add_bridge(DATA_BRIDGE)
3274+ data_port_ctx = DataPortContext()()
3275+ if data_port_ctx and data_port_ctx['data_port']:
3276+ add_bridge_port(DATA_BRIDGE, data_port_ctx['data_port'],
3277+ promisc=True)
3278
3279=== modified file 'templates/havana/dhcp_agent.ini'
3280--- templates/havana/dhcp_agent.ini 2014-05-03 07:31:11 +0000
3281+++ templates/havana/dhcp_agent.ini 2014-11-26 13:43:20 +0000
3282@@ -7,7 +7,7 @@
3283 interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
3284 dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
3285 root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
3286-ovs_use_veth = True
3287+
3288 {% if instance_mtu -%}
3289 dnsmasq_config_file = /etc/neutron/dnsmasq.conf
3290 {% endif -%}
3291@@ -15,3 +15,13 @@
3292 enable_metadata_network = True
3293 enable_isolated_metadata = True
3294 {% endif -%}
3295+
3296+{% if plugin == 'n1kv' %}
3297+enable_metadata_network = True
3298+enable_isolated_metadata = True
3299+resync_interval = 30
3300+use_namespaces = True
3301+dhcp_lease_time=3600
3302+{% else %}
3303+ovs_use_veth = True
3304+{% endif %}
3305
3306=== modified file 'templates/havana/l3_agent.ini'
3307--- templates/havana/l3_agent.ini 2014-04-10 16:50:13 +0000
3308+++ templates/havana/l3_agent.ini 2014-11-26 13:43:20 +0000
3309@@ -10,8 +10,15 @@
3310 admin_user = {{ service_username }}
3311 admin_password = {{ service_password }}
3312 root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
3313+handle_internal_only_routers = {{ handle_internal_only_router }}
3314+{% if plugin == 'n1kv' %}
3315+l3_agent_manager = neutron.agent.l3_agent.L3NATAgentWithStateReport
3316+external_network_bridge = br-int
3317+ovs_use_veth = False
3318+use_namespaces = True
3319+{% else %}
3320 ovs_use_veth = True
3321-handle_internal_only_routers = {{ handle_internal_only_router }}
3322+{% endif %}
3323 {% if ext_net_id -%}
3324 gateway_external_network_id = {{ ext_net_id }}
3325 {% endif -%}
3326
3327=== modified file 'templates/icehouse/ml2_conf.ini'
3328--- templates/icehouse/ml2_conf.ini 2014-10-02 11:05:47 +0000
3329+++ templates/icehouse/ml2_conf.ini 2014-11-26 13:43:20 +0000
3330@@ -3,18 +3,30 @@
3331 # Configuration file maintained by Juju. Local changes may be overwritten.
3332 ###############################################################################
3333 [ml2]
3334-type_drivers = gre,vxlan
3335-tenant_network_types = gre,vxlan
3336+type_drivers = gre,vxlan,vlan,flat
3337+tenant_network_types = gre,vxlan,vlan,flat
3338 mechanism_drivers = openvswitch,l2population
3339+
3340 [ml2_type_gre]
3341 tunnel_id_ranges = 1:1000
3342+
3343 [ml2_type_vxlan]
3344 vni_ranges = 1001:2000
3345+
3346+[ml2_type_vlan]
3347+network_vlan_ranges = physnet1:1000:2000
3348+
3349+[ml2_type_flat]
3350+flat_networks = physnet1
3351+
3352 [ovs]
3353 enable_tunneling = True
3354 local_ip = {{ local_ip }}
3355+bridge_mappings = physnet1:br-data
3356+
3357 [agent]
3358 tunnel_types = {{ overlay_network_type }}
3359 l2_population = {{ l2_population }}
3360+
3361 [securitygroup]
3362 firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
3363
3364=== modified file 'tests/basic_deployment.py'
3365--- tests/basic_deployment.py 2014-10-23 18:04:44 +0000
3366+++ tests/basic_deployment.py 2014-11-26 13:43:20 +0000
3367@@ -358,8 +358,8 @@
3368 quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db')
3369 expected = {
3370 'ml2': {
3371- 'type_drivers': 'gre,vxlan',
3372- 'tenant_network_types': 'gre,vxlan',
3373+ 'type_drivers': 'gre,vxlan,vlan,flat',
3374+ 'tenant_network_types': 'gre,vxlan,vlan,flat',
3375 'mechanism_drivers': 'openvswitch,l2population'
3376 },
3377 'ml2_type_gre': {
3378
3379=== modified file 'tests/charmhelpers/contrib/amulet/deployment.py'
3380--- tests/charmhelpers/contrib/amulet/deployment.py 2014-10-07 21:03:47 +0000
3381+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-11-26 13:43:20 +0000
3382@@ -1,6 +1,6 @@
3383 import amulet
3384-
3385 import os
3386+import six
3387
3388
3389 class AmuletDeployment(object):
3390@@ -52,12 +52,12 @@
3391
3392 def _add_relations(self, relations):
3393 """Add all of the relations for the services."""
3394- for k, v in relations.iteritems():
3395+ for k, v in six.iteritems(relations):
3396 self.d.relate(k, v)
3397
3398 def _configure_services(self, configs):
3399 """Configure all of the services."""
3400- for service, config in configs.iteritems():
3401+ for service, config in six.iteritems(configs):
3402 self.d.configure(service, config)
3403
3404 def _deploy(self):
3405
3406=== modified file 'tests/charmhelpers/contrib/amulet/utils.py'
3407--- tests/charmhelpers/contrib/amulet/utils.py 2014-07-30 15:21:30 +0000
3408+++ tests/charmhelpers/contrib/amulet/utils.py 2014-11-26 13:43:20 +0000
3409@@ -5,6 +5,8 @@
3410 import sys
3411 import time
3412
3413+import six
3414+
3415
3416 class AmuletUtils(object):
3417 """Amulet utilities.
3418@@ -58,7 +60,7 @@
3419 Verify the specified services are running on the corresponding
3420 service units.
3421 """
3422- for k, v in commands.iteritems():
3423+ for k, v in six.iteritems(commands):
3424 for cmd in v:
3425 output, code = k.run(cmd)
3426 if code != 0:
3427@@ -100,11 +102,11 @@
3428 longs, or can be a function that evaluate a variable and returns a
3429 bool.
3430 """
3431- for k, v in expected.iteritems():
3432+ for k, v in six.iteritems(expected):
3433 if k in actual:
3434- if (isinstance(v, basestring) or
3435+ if (isinstance(v, six.string_types) or
3436 isinstance(v, bool) or
3437- isinstance(v, (int, long))):
3438+ isinstance(v, six.integer_types)):
3439 if v != actual[k]:
3440 return "{}:{}".format(k, actual[k])
3441 elif not v(actual[k]):
3442
3443=== modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
3444--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-07 21:03:47 +0000
3445+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-11-26 13:43:20 +0000
3446@@ -1,3 +1,4 @@
3447+import six
3448 from charmhelpers.contrib.amulet.deployment import (
3449 AmuletDeployment
3450 )
3451@@ -69,7 +70,7 @@
3452
3453 def _configure_services(self, configs):
3454 """Configure all of the services."""
3455- for service, config in configs.iteritems():
3456+ for service, config in six.iteritems(configs):
3457 self.d.configure(service, config)
3458
3459 def _get_openstack_release(self):
3460
3461=== modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
3462--- tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-25 15:37:05 +0000
3463+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-11-26 13:43:20 +0000
3464@@ -7,6 +7,8 @@
3465 import keystoneclient.v2_0 as keystone_client
3466 import novaclient.v1_1.client as nova_client
3467
3468+import six
3469+
3470 from charmhelpers.contrib.amulet.utils import (
3471 AmuletUtils
3472 )
3473@@ -60,7 +62,7 @@
3474 expected service catalog endpoints.
3475 """
3476 self.log.debug('actual: {}'.format(repr(actual)))
3477- for k, v in expected.iteritems():
3478+ for k, v in six.iteritems(expected):
3479 if k in actual:
3480 ret = self._validate_dict_data(expected[k][0], actual[k][0])
3481 if ret:
3482
3483=== modified file 'unit_tests/test_quantum_contexts.py'
3484--- unit_tests/test_quantum_contexts.py 2014-10-09 14:15:25 +0000
3485+++ unit_tests/test_quantum_contexts.py 2014-11-26 13:43:20 +0000
3486@@ -117,11 +117,11 @@
3487 }
3488
3489
3490-class TestExternalPortContext(CharmTestCase):
3491+class TestNeutronPortContext(CharmTestCase):
3492
3493 def setUp(self):
3494- super(TestExternalPortContext, self).setUp(quantum_contexts,
3495- TO_PATCH)
3496+ super(TestNeutronPortContext, self).setUp(quantum_contexts,
3497+ TO_PATCH)
3498 self.machine_macs = {
3499 'eth0': 'fe:c5:ce:8e:2b:00',
3500 'eth1': 'fe:c5:ce:8e:2b:01',
3501@@ -174,6 +174,11 @@
3502 self.assertEquals(quantum_contexts.ExternalPortContext()(),
3503 {'ext_port': 'eth2'})
3504
3505+ def test_data_port_eth(self):
3506+ self.config.return_value = 'eth1010'
3507+ self.assertEquals(quantum_contexts.DataPortContext()(),
3508+ {'data_port': 'eth1010'})
3509+
3510
3511 class TestL3AgentContext(CharmTestCase):
3512
3513@@ -187,7 +192,8 @@
3514 self.test_config.set('external-network-id', '')
3515 self.eligible_leader.return_value = False
3516 self.assertEquals(quantum_contexts.L3AgentContext()(),
3517- {'handle_internal_only_router': False})
3518+ {'handle_internal_only_router': False,
3519+ 'plugin': 'ovs'})
3520
3521 def test_hior_leader(self):
3522 self.test_config.set('run-internal-router', 'leader')
3523@@ -195,7 +201,8 @@
3524 self.eligible_leader.return_value = True
3525 self.assertEquals(quantum_contexts.L3AgentContext()(),
3526 {'handle_internal_only_router': True,
3527- 'ext_net_id': 'netid'})
3528+ 'ext_net_id': 'netid',
3529+ 'plugin': 'ovs'})
3530
3531 def test_hior_all(self):
3532 self.test_config.set('run-internal-router', 'all')
3533@@ -203,7 +210,8 @@
3534 self.eligible_leader.return_value = True
3535 self.assertEquals(quantum_contexts.L3AgentContext()(),
3536 {'handle_internal_only_router': True,
3537- 'ext_net_id': 'netid'})
3538+ 'ext_net_id': 'netid',
3539+ 'plugin': 'ovs'})
3540
3541
3542 class TestQuantumGatewayContext(CharmTestCase):
3543
3544=== modified file 'unit_tests/test_quantum_hooks.py'
3545--- unit_tests/test_quantum_hooks.py 2014-09-26 10:14:13 +0000
3546+++ unit_tests/test_quantum_hooks.py 2014-11-26 13:43:20 +0000
3547@@ -19,6 +19,7 @@
3548 'valid_plugin',
3549 'apt_update',
3550 'apt_install',
3551+ 'apt_purge',
3552 'filter_installed_packages',
3553 'get_early_packages',
3554 'get_packages',
3555@@ -39,7 +40,8 @@
3556 'lsb_release',
3557 'stop_services',
3558 'b64decode',
3559- 'is_relation_made'
3560+ 'is_relation_made',
3561+ 'create_sysctl',
3562 ]
3563
3564
3565@@ -97,6 +99,7 @@
3566 def test_config_changed(self):
3567 def mock_relids(rel):
3568 return ['relid']
3569+ self.test_config.set('sysctl', '{ kernel.max_pid: "1337"}')
3570 self.openstack_upgrade_available.return_value = True
3571 self.valid_plugin.return_value = True
3572 self.relation_ids.side_effect = mock_relids
3573@@ -105,6 +108,7 @@
3574 _amqp_joined = self.patch('amqp_joined')
3575 _amqp_nova_joined = self.patch('amqp_nova_joined')
3576 self._call_hook('config-changed')
3577+ self.assertTrue(self.create_sysctl.called)
3578 self.assertTrue(self.do_openstack_upgrade.called)
3579 self.assertTrue(self.configure_ovs.called)
3580 self.assertTrue(_db_joined.called)
3581@@ -119,6 +123,17 @@
3582 self.assertTrue(self.do_openstack_upgrade.called)
3583 self.assertTrue(self.configure_ovs.called)
3584
3585+ def test_config_changed_n1kv(self):
3586+ self.openstack_upgrade_available.return_value = False
3587+ self.valid_plugin.return_value = True
3588+ self.filter_installed_packages.side_effect = lambda p: p
3589+ self.test_config.set('plugin', 'n1kv')
3590+ self._call_hook('config-changed')
3591+ self.apt_install.assert_called_with('neutron-l3-agent')
3592+ self.test_config.set('enable-l3-agent', False)
3593+ self._call_hook('config-changed')
3594+ self.apt_purge.assert_called_with('neutron-l3-agent')
3595+
3596 @patch('sys.exit')
3597 def test_config_changed_invalid_plugin(self, _exit):
3598 self.valid_plugin.return_value = False
3599
3600=== modified file 'unit_tests/test_quantum_utils.py'
3601--- unit_tests/test_quantum_utils.py 2014-09-19 09:18:01 +0000
3602+++ unit_tests/test_quantum_utils.py 2014-11-26 13:43:20 +0000
3603@@ -36,6 +36,7 @@
3604 'service_running',
3605 'NetworkServiceContext',
3606 'ExternalPortContext',
3607+ 'DataPortContext',
3608 'unit_private_ip',
3609 'relations_of_type',
3610 'service_stop',
3611@@ -148,13 +149,33 @@
3612 self.test_config.set('ext-port', 'eth0')
3613 self.ExternalPortContext.return_value = \
3614 DummyExternalPortContext(return_value={'ext_port': 'eth0'})
3615+ self.DataPortContext.return_value = \
3616+ DummyExternalPortContext(return_value=None)
3617 quantum_utils.configure_ovs()
3618 self.add_bridge.assert_has_calls([
3619 call('br-int'),
3620- call('br-ex')
3621+ call('br-ex'),
3622+ call('br-data')
3623 ])
3624 self.add_bridge_port.assert_called_with('br-ex', 'eth0')
3625
3626+ def test_configure_ovs_ovs_data_port(self):
3627+ self.config.side_effect = self.test_config.get
3628+ self.test_config.set('plugin', 'ovs')
3629+ self.test_config.set('data-port', 'eth0')
3630+ self.ExternalPortContext.return_value = \
3631+ DummyExternalPortContext(return_value=None)
3632+ self.DataPortContext.return_value = \
3633+ DummyExternalPortContext(return_value={'data_port': 'eth0'})
3634+ quantum_utils.configure_ovs()
3635+ self.add_bridge.assert_has_calls([
3636+ call('br-int'),
3637+ call('br-ex'),
3638+ call('br-data')
3639+ ])
3640+ self.add_bridge_port.assert_called_with('br-data', 'eth0',
3641+ promisc=True)
3642+
3643 def test_do_openstack_upgrade(self):
3644 self.config.side_effect = self.test_config.get
3645 self.is_relation_made.return_value = False

Subscribers

People subscribed via source and target branches