Merge lp:~openstack-charmers/charms/precise/nova-compute/ods_merge into lp:~charmers/charms/precise/nova-compute/trunk

Proposed by Adam Gandelman
Status: Merged
Merged at revision: 50
Proposed branch: lp:~openstack-charmers/charms/precise/nova-compute/ods_merge
Merge into: lp:~charmers/charms/precise/nova-compute/trunk
Diff against target: 1306 lines (+444/-83)
22 files modified
.project (+1/-1)
.pydevproject (+2/-2)
hooks/charmhelpers/contrib/network/ovs/__init__.py (+4/-1)
hooks/charmhelpers/contrib/openstack/alternatives.py (+17/-0)
hooks/charmhelpers/contrib/openstack/context.py (+20/-1)
hooks/charmhelpers/contrib/openstack/neutron.py (+20/-0)
hooks/charmhelpers/contrib/openstack/utils.py (+81/-10)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+27/-3)
hooks/charmhelpers/core/hookenv.py (+78/-23)
hooks/charmhelpers/core/host.py (+15/-9)
hooks/charmhelpers/fetch/__init__.py (+53/-5)
hooks/charmhelpers/fetch/bzrurl.py (+1/-1)
hooks/nova_compute_context.py (+11/-0)
hooks/nova_compute_hooks.py (+11/-8)
hooks/nova_compute_utils.py (+35/-15)
templates/essex/nova.conf (+3/-0)
templates/folsom/nova.conf (+3/-0)
templates/grizzly/nova.conf (+5/-0)
templates/havana/nova.conf (+5/-0)
unit_tests/test_nova_compute_contexts.py (+14/-0)
unit_tests/test_nova_compute_hooks.py (+34/-4)
unit_tests/test_nova_compute_utils.py (+4/-0)
To merge this branch: bzr merge lp:~openstack-charmers/charms/precise/nova-compute/ods_merge
Reviewer Review Type Date Requested Status
Marco Ceppi (community) Abstain
OpenStack Charmers Pending
Review via email: mp+194063@code.launchpad.net

Description of the change

* Adds Neutron NVP support
* Adds support for deploying alongside ceph via hulk smash

To post a comment you must log in.
59. By James Page

Rebase on trunk

Revision history for this message
Marco Ceppi (marcoceppi) :
review: Abstain

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file '.project'
2--- .project 2013-09-20 16:51:57 +0000
3+++ .project 2013-11-08 05:49:21 +0000
4@@ -1,6 +1,6 @@
5 <?xml version="1.0" encoding="UTF-8"?>
6 <projectDescription>
7- <name>nova-compute</name>
8+ <name>nvp-nova-compute</name>
9 <comment></comment>
10 <projects>
11 </projects>
12
13=== modified file '.pydevproject'
14--- .pydevproject 2013-09-23 13:23:51 +0000
15+++ .pydevproject 2013-11-08 05:49:21 +0000
16@@ -3,7 +3,7 @@
17 <pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
18 <pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
19 <pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
20-<path>/nova-compute/hooks</path>
21-<path>/nova-compute/unit_tests</path>
22+<path>/nvp-nova-compute/hooks</path>
23+<path>/nvp-nova-compute/unit_tests</path>
24 </pydev_pathproperty>
25 </pydev_project>
26
27=== modified file 'hooks/charmhelpers/contrib/network/ovs/__init__.py'
28--- hooks/charmhelpers/contrib/network/ovs/__init__.py 2013-09-25 09:31:31 +0000
29+++ hooks/charmhelpers/contrib/network/ovs/__init__.py 2013-11-08 05:49:21 +0000
30@@ -69,4 +69,7 @@
31
32 def full_restart():
33 ''' Full restart and reload of openvswitch '''
34- service('force-reload-kmod', 'openvswitch-switch')
35+ if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
36+ service('start', 'openvswitch-force-reload-kmod')
37+ else:
38+ service('force-reload-kmod', 'openvswitch-switch')
39
40=== added file 'hooks/charmhelpers/contrib/openstack/alternatives.py'
41--- hooks/charmhelpers/contrib/openstack/alternatives.py 1970-01-01 00:00:00 +0000
42+++ hooks/charmhelpers/contrib/openstack/alternatives.py 2013-11-08 05:49:21 +0000
43@@ -0,0 +1,17 @@
44+''' Helper for managing alternatives for file conflict resolution '''
45+
46+import subprocess
47+import shutil
48+import os
49+
50+
51+def install_alternative(name, target, source, priority=50):
52+ ''' Install alternative configuration '''
53+ if (os.path.exists(target) and not os.path.islink(target)):
54+ # Move existing file/directory away before installing
55+ shutil.move(target, '{}.bak'.format(target))
56+ cmd = [
57+ 'update-alternatives', '--force', '--install',
58+ target, name, source, str(priority)
59+ ]
60+ subprocess.check_call(cmd)
61
62=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
63--- hooks/charmhelpers/contrib/openstack/context.py 2013-10-15 01:32:42 +0000
64+++ hooks/charmhelpers/contrib/openstack/context.py 2013-11-08 05:49:21 +0000
65@@ -385,16 +385,33 @@
66 def ovs_ctxt(self):
67 driver = neutron_plugin_attribute(self.plugin, 'driver',
68 self.network_manager)
69-
70+ config = neutron_plugin_attribute(self.plugin, 'config',
71+ self.network_manager)
72 ovs_ctxt = {
73 'core_plugin': driver,
74 'neutron_plugin': 'ovs',
75 'neutron_security_groups': self.neutron_security_groups,
76 'local_ip': unit_private_ip(),
77+ 'config': config
78 }
79
80 return ovs_ctxt
81
82+ def nvp_ctxt(self):
83+ driver = neutron_plugin_attribute(self.plugin, 'driver',
84+ self.network_manager)
85+ config = neutron_plugin_attribute(self.plugin, 'config',
86+ self.network_manager)
87+ nvp_ctxt = {
88+ 'core_plugin': driver,
89+ 'neutron_plugin': 'nvp',
90+ 'neutron_security_groups': self.neutron_security_groups,
91+ 'local_ip': unit_private_ip(),
92+ 'config': config
93+ }
94+
95+ return nvp_ctxt
96+
97 def __call__(self):
98 self._ensure_packages()
99
100@@ -408,6 +425,8 @@
101
102 if self.plugin == 'ovs':
103 ctxt.update(self.ovs_ctxt())
104+ elif self.plugin == 'nvp':
105+ ctxt.update(self.nvp_ctxt())
106
107 self._save_flag_file()
108 return ctxt
109
110=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
111--- hooks/charmhelpers/contrib/openstack/neutron.py 2013-10-15 01:32:42 +0000
112+++ hooks/charmhelpers/contrib/openstack/neutron.py 2013-11-08 05:49:21 +0000
113@@ -34,13 +34,23 @@
114 'services': ['quantum-plugin-openvswitch-agent'],
115 'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
116 ['quantum-plugin-openvswitch-agent']],
117+ 'server_packages': ['quantum-server',
118+ 'quantum-plugin-openvswitch'],
119+ 'server_services': ['quantum-server']
120 },
121 'nvp': {
122 'config': '/etc/quantum/plugins/nicira/nvp.ini',
123 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
124 'QuantumPlugin.NvpPluginV2',
125+ 'contexts': [
126+ context.SharedDBContext(user=config('neutron-database-user'),
127+ database=config('neutron-database'),
128+ relation_prefix='neutron')],
129 'services': [],
130 'packages': [],
131+ 'server_packages': ['quantum-server',
132+ 'quantum-plugin-nicira'],
133+ 'server_services': ['quantum-server']
134 }
135 }
136
137@@ -60,13 +70,23 @@
138 'services': ['neutron-plugin-openvswitch-agent'],
139 'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
140 ['quantum-plugin-openvswitch-agent']],
141+ 'server_packages': ['neutron-server',
142+ 'neutron-plugin-openvswitch'],
143+ 'server_services': ['neutron-server']
144 },
145 'nvp': {
146 'config': '/etc/neutron/plugins/nicira/nvp.ini',
147 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
148 'NeutronPlugin.NvpPluginV2',
149+ 'contexts': [
150+ context.SharedDBContext(user=config('neutron-database-user'),
151+ database=config('neutron-database'),
152+ relation_prefix='neutron')],
153 'services': [],
154 'packages': [],
155+ 'server_packages': ['neutron-server',
156+ 'neutron-plugin-nicira'],
157+ 'server_services': ['neutron-server']
158 }
159 }
160
161
162=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
163--- hooks/charmhelpers/contrib/openstack/utils.py 2013-10-13 22:51:26 +0000
164+++ hooks/charmhelpers/contrib/openstack/utils.py 2013-11-08 05:49:21 +0000
165@@ -13,19 +13,28 @@
166 config,
167 log as juju_log,
168 charm_dir,
169-)
170-
171-from charmhelpers.core.host import (
172- lsb_release,
173-)
174-
175-from charmhelpers.fetch import (
176- apt_install,
177-)
178+ ERROR,
179+ INFO
180+)
181+
182+from charmhelpers.contrib.storage.linux.lvm import (
183+ deactivate_lvm_volume_group,
184+ is_lvm_physical_volume,
185+ remove_lvm_physical_volume,
186+)
187+
188+from charmhelpers.core.host import lsb_release, mounts, umount
189+from charmhelpers.fetch import apt_install
190+from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
191+from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
192
193 CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
194 CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
195
196+DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
197+ 'restricted main multiverse universe')
198+
199+
200 UBUNTU_OPENSTACK_RELEASE = OrderedDict([
201 ('oneiric', 'diablo'),
202 ('precise', 'essex'),
203@@ -57,6 +66,8 @@
204 ('1.9.0', 'havana'),
205 ])
206
207+DEFAULT_LOOPBACK_SIZE = '5G'
208+
209
210 def error_out(msg):
211 juju_log("FATAL ERROR: %s" % msg, level='ERROR')
212@@ -67,7 +78,7 @@
213 '''Derive OpenStack release codename from a given installation source.'''
214 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
215 rel = ''
216- if src == 'distro':
217+ if src in ['distro', 'distro-proposed']:
218 try:
219 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
220 except KeyError:
221@@ -202,6 +213,10 @@
222 '''Configure apt installation source.'''
223 if rel == 'distro':
224 return
225+ elif rel == 'distro-proposed':
226+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
227+ with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
228+ f.write(DISTRO_PROPOSED % ubuntu_rel)
229 elif rel[:4] == "ppa:":
230 src = rel
231 subprocess.check_call(["add-apt-repository", "-y", src])
232@@ -299,6 +314,62 @@
233 return apt.version_compare(available_vers, cur_vers) == 1
234
235
236+def ensure_block_device(block_device):
237+ '''
238+ Confirm block_device, create as loopback if necessary.
239+
240+ :param block_device: str: Full path of block device to ensure.
241+
242+ :returns: str: Full path of ensured block device.
243+ '''
244+ _none = ['None', 'none', None]
245+ if (block_device in _none):
246+ error_out('prepare_storage(): Missing required input: '
247+ 'block_device=%s.' % block_device, level=ERROR)
248+
249+ if block_device.startswith('/dev/'):
250+ bdev = block_device
251+ elif block_device.startswith('/'):
252+ _bd = block_device.split('|')
253+ if len(_bd) == 2:
254+ bdev, size = _bd
255+ else:
256+ bdev = block_device
257+ size = DEFAULT_LOOPBACK_SIZE
258+ bdev = ensure_loopback_device(bdev, size)
259+ else:
260+ bdev = '/dev/%s' % block_device
261+
262+ if not is_block_device(bdev):
263+ error_out('Failed to locate valid block device at %s' % bdev,
264+ level=ERROR)
265+
266+ return bdev
267+
268+
269+def clean_storage(block_device):
270+ '''
271+ Ensures a block device is clean. That is:
272+ - unmounted
273+ - any lvm volume groups are deactivated
274+ - any lvm physical device signatures removed
275+ - partition table wiped
276+
277+ :param block_device: str: Full path to block device to clean.
278+ '''
279+ for mp, d in mounts():
280+ if d == block_device:
281+ juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
282+ (d, mp), level=INFO)
283+ umount(mp, persist=True)
284+
285+ if is_lvm_physical_volume(block_device):
286+ deactivate_lvm_volume_group(block_device)
287+ remove_lvm_physical_volume(block_device)
288+ else:
289+ zap_disk(block_device)
290+
291+
292 def is_ip(address):
293 """
294 Returns True if address is a valid IP address.
295
296=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
297--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2013-09-25 09:31:31 +0000
298+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2013-11-08 05:49:21 +0000
299@@ -102,8 +102,12 @@
300 Return a list of all Ceph Object Storage Daemons
301 currently in the cluster
302 '''
303- return json.loads(check_output(['ceph', '--id', service,
304- 'osd', 'ls', '--format=json']))
305+ version = ceph_version()
306+ if version and version >= '0.56':
307+ return json.loads(check_output(['ceph', '--id', service,
308+ 'osd', 'ls', '--format=json']))
309+ else:
310+ return None
311
312
313 def create_pool(service, name, replicas=2):
314@@ -114,7 +118,13 @@
315 return
316 # Calculate the number of placement groups based
317 # on upstream recommended best practices.
318- pgnum = (len(get_osds(service)) * 100 / replicas)
319+ osds = get_osds(service)
320+ if osds:
321+ pgnum = (len(osds) * 100 / replicas)
322+ else:
323+ # NOTE(james-page): Default to 200 for older ceph versions
324+ # which don't support OSD query from cli
325+ pgnum = 200
326 cmd = [
327 'ceph', '--id', service,
328 'osd', 'pool', 'create',
329@@ -357,3 +367,17 @@
330 if user and group:
331 check_call(['chown', '%s.%s' % (user, group), keyring])
332 return True
333+
334+
335+def ceph_version():
336+ ''' Retrieve the local version of ceph '''
337+ if os.path.exists('/usr/bin/ceph'):
338+ cmd = ['ceph', '-v']
339+ output = check_output(cmd)
340+ output = output.split()
341+ if len(output) > 3:
342+ return output[2]
343+ else:
344+ return None
345+ else:
346+ return None
347
348=== modified file 'hooks/charmhelpers/core/hookenv.py'
349--- hooks/charmhelpers/core/hookenv.py 2013-07-19 02:37:30 +0000
350+++ hooks/charmhelpers/core/hookenv.py 2013-11-08 05:49:21 +0000
351@@ -9,6 +9,7 @@
352 import yaml
353 import subprocess
354 import UserDict
355+from subprocess import CalledProcessError
356
357 CRITICAL = "CRITICAL"
358 ERROR = "ERROR"
359@@ -21,7 +22,7 @@
360
361
362 def cached(func):
363- ''' Cache return values for multiple executions of func + args
364+ """Cache return values for multiple executions of func + args
365
366 For example:
367
368@@ -32,7 +33,7 @@
369 unit_get('test')
370
371 will cache the result of unit_get + 'test' for future calls.
372- '''
373+ """
374 def wrapper(*args, **kwargs):
375 global cache
376 key = str((func, args, kwargs))
377@@ -46,8 +47,8 @@
378
379
380 def flush(key):
381- ''' Flushes any entries from function cache where the
382- key is found in the function+args '''
383+ """Flushes any entries from function cache where the
384+ key is found in the function+args """
385 flush_list = []
386 for item in cache:
387 if key in item:
388@@ -57,7 +58,7 @@
389
390
391 def log(message, level=None):
392- "Write a message to the juju log"
393+ """Write a message to the juju log"""
394 command = ['juju-log']
395 if level:
396 command += ['-l', level]
397@@ -66,7 +67,7 @@
398
399
400 class Serializable(UserDict.IterableUserDict):
401- "Wrapper, an object that can be serialized to yaml or json"
402+ """Wrapper, an object that can be serialized to yaml or json"""
403
404 def __init__(self, obj):
405 # wrap the object
406@@ -96,11 +97,11 @@
407 self.data = state
408
409 def json(self):
410- "Serialize the object to json"
411+ """Serialize the object to json"""
412 return json.dumps(self.data)
413
414 def yaml(self):
415- "Serialize the object to yaml"
416+ """Serialize the object to yaml"""
417 return yaml.dump(self.data)
418
419
420@@ -119,38 +120,38 @@
421
422
423 def in_relation_hook():
424- "Determine whether we're running in a relation hook"
425+ """Determine whether we're running in a relation hook"""
426 return 'JUJU_RELATION' in os.environ
427
428
429 def relation_type():
430- "The scope for the current relation hook"
431+ """The scope for the current relation hook"""
432 return os.environ.get('JUJU_RELATION', None)
433
434
435 def relation_id():
436- "The relation ID for the current relation hook"
437+ """The relation ID for the current relation hook"""
438 return os.environ.get('JUJU_RELATION_ID', None)
439
440
441 def local_unit():
442- "Local unit ID"
443+ """Local unit ID"""
444 return os.environ['JUJU_UNIT_NAME']
445
446
447 def remote_unit():
448- "The remote unit for the current relation hook"
449+ """The remote unit for the current relation hook"""
450 return os.environ['JUJU_REMOTE_UNIT']
451
452
453 def service_name():
454- "The name service group this unit belongs to"
455+ """The name service group this unit belongs to"""
456 return local_unit().split('/')[0]
457
458
459 @cached
460 def config(scope=None):
461- "Juju charm configuration"
462+ """Juju charm configuration"""
463 config_cmd_line = ['config-get']
464 if scope is not None:
465 config_cmd_line.append(scope)
466@@ -163,6 +164,7 @@
467
468 @cached
469 def relation_get(attribute=None, unit=None, rid=None):
470+ """Get relation information"""
471 _args = ['relation-get', '--format=json']
472 if rid:
473 _args.append('-r')
474@@ -174,9 +176,14 @@
475 return json.loads(subprocess.check_output(_args))
476 except ValueError:
477 return None
478+ except CalledProcessError, e:
479+ if e.returncode == 2:
480+ return None
481+ raise
482
483
484 def relation_set(relation_id=None, relation_settings={}, **kwargs):
485+ """Set relation information for the current unit"""
486 relation_cmd_line = ['relation-set']
487 if relation_id is not None:
488 relation_cmd_line.extend(('-r', relation_id))
489@@ -192,7 +199,7 @@
490
491 @cached
492 def relation_ids(reltype=None):
493- "A list of relation_ids"
494+ """A list of relation_ids"""
495 reltype = reltype or relation_type()
496 relid_cmd_line = ['relation-ids', '--format=json']
497 if reltype is not None:
498@@ -203,7 +210,7 @@
499
500 @cached
501 def related_units(relid=None):
502- "A list of related units"
503+ """A list of related units"""
504 relid = relid or relation_id()
505 units_cmd_line = ['relation-list', '--format=json']
506 if relid is not None:
507@@ -213,7 +220,7 @@
508
509 @cached
510 def relation_for_unit(unit=None, rid=None):
511- "Get the json represenation of a unit's relation"
512+ """Get the json represenation of a unit's relation"""
513 unit = unit or remote_unit()
514 relation = relation_get(unit=unit, rid=rid)
515 for key in relation:
516@@ -225,7 +232,7 @@
517
518 @cached
519 def relations_for_id(relid=None):
520- "Get relations of a specific relation ID"
521+ """Get relations of a specific relation ID"""
522 relation_data = []
523 relid = relid or relation_ids()
524 for unit in related_units(relid):
525@@ -237,7 +244,7 @@
526
527 @cached
528 def relations_of_type(reltype=None):
529- "Get relations of a specific type"
530+ """Get relations of a specific type"""
531 relation_data = []
532 reltype = reltype or relation_type()
533 for relid in relation_ids(reltype):
534@@ -249,7 +256,7 @@
535
536 @cached
537 def relation_types():
538- "Get a list of relation types supported by this charm"
539+ """Get a list of relation types supported by this charm"""
540 charmdir = os.environ.get('CHARM_DIR', '')
541 mdf = open(os.path.join(charmdir, 'metadata.yaml'))
542 md = yaml.safe_load(mdf)
543@@ -264,6 +271,7 @@
544
545 @cached
546 def relations():
547+ """Get a nested dictionary of relation data for all related units"""
548 rels = {}
549 for reltype in relation_types():
550 relids = {}
551@@ -277,15 +285,35 @@
552 return rels
553
554
555+@cached
556+def is_relation_made(relation, keys='private-address'):
557+ '''
558+ Determine whether a relation is established by checking for
559+ presence of key(s). If a list of keys is provided, they
560+ must all be present for the relation to be identified as made
561+ '''
562+ if isinstance(keys, str):
563+ keys = [keys]
564+ for r_id in relation_ids(relation):
565+ for unit in related_units(r_id):
566+ context = {}
567+ for k in keys:
568+ context[k] = relation_get(k, rid=r_id,
569+ unit=unit)
570+ if None not in context.values():
571+ return True
572+ return False
573+
574+
575 def open_port(port, protocol="TCP"):
576- "Open a service network port"
577+ """Open a service network port"""
578 _args = ['open-port']
579 _args.append('{}/{}'.format(port, protocol))
580 subprocess.check_call(_args)
581
582
583 def close_port(port, protocol="TCP"):
584- "Close a service network port"
585+ """Close a service network port"""
586 _args = ['close-port']
587 _args.append('{}/{}'.format(port, protocol))
588 subprocess.check_call(_args)
589@@ -293,6 +321,7 @@
590
591 @cached
592 def unit_get(attribute):
593+ """Get the unit ID for the remote unit"""
594 _args = ['unit-get', '--format=json', attribute]
595 try:
596 return json.loads(subprocess.check_output(_args))
597@@ -301,22 +330,46 @@
598
599
600 def unit_private_ip():
601+ """Get this unit's private IP address"""
602 return unit_get('private-address')
603
604
605 class UnregisteredHookError(Exception):
606+ """Raised when an undefined hook is called"""
607 pass
608
609
610 class Hooks(object):
611+ """A convenient handler for hook functions.
612+
613+ Example:
614+ hooks = Hooks()
615+
616+ # register a hook, taking its name from the function name
617+ @hooks.hook()
618+ def install():
619+ ...
620+
621+ # register a hook, providing a custom hook name
622+ @hooks.hook("config-changed")
623+ def config_changed():
624+ ...
625+
626+ if __name__ == "__main__":
627+ # execute a hook based on the name the program is called by
628+ hooks.execute(sys.argv)
629+ """
630+
631 def __init__(self):
632 super(Hooks, self).__init__()
633 self._hooks = {}
634
635 def register(self, name, function):
636+ """Register a hook"""
637 self._hooks[name] = function
638
639 def execute(self, args):
640+ """Execute a registered hook based on args[0]"""
641 hook_name = os.path.basename(args[0])
642 if hook_name in self._hooks:
643 self._hooks[hook_name]()
644@@ -324,6 +377,7 @@
645 raise UnregisteredHookError(hook_name)
646
647 def hook(self, *hook_names):
648+ """Decorator, registering them as hooks"""
649 def wrapper(decorated):
650 for hook_name in hook_names:
651 self.register(hook_name, decorated)
652@@ -337,4 +391,5 @@
653
654
655 def charm_dir():
656+ """Return the root directory of the current charm"""
657 return os.environ.get('CHARM_DIR')
658
659=== modified file 'hooks/charmhelpers/core/host.py'
660--- hooks/charmhelpers/core/host.py 2013-09-20 16:40:54 +0000
661+++ hooks/charmhelpers/core/host.py 2013-11-08 05:49:21 +0000
662@@ -19,18 +19,22 @@
663
664
665 def service_start(service_name):
666+ """Start a system service"""
667 return service('start', service_name)
668
669
670 def service_stop(service_name):
671+ """Stop a system service"""
672 return service('stop', service_name)
673
674
675 def service_restart(service_name):
676+ """Restart a system service"""
677 return service('restart', service_name)
678
679
680 def service_reload(service_name, restart_on_failure=False):
681+ """Reload a system service, optionally falling back to restart if reload fails"""
682 service_result = service('reload', service_name)
683 if not service_result and restart_on_failure:
684 service_result = service('restart', service_name)
685@@ -38,11 +42,13 @@
686
687
688 def service(action, service_name):
689+ """Control a system service"""
690 cmd = ['service', service_name, action]
691 return subprocess.call(cmd) == 0
692
693
694 def service_running(service):
695+ """Determine whether a system service is running"""
696 try:
697 output = subprocess.check_output(['service', service, 'status'])
698 except subprocess.CalledProcessError:
699@@ -55,7 +61,7 @@
700
701
702 def adduser(username, password=None, shell='/bin/bash', system_user=False):
703- """Add a user"""
704+ """Add a user to the system"""
705 try:
706 user_info = pwd.getpwnam(username)
707 log('user {0} already exists!'.format(username))
708@@ -138,7 +144,7 @@
709
710
711 def mount(device, mountpoint, options=None, persist=False):
712- '''Mount a filesystem'''
713+ """Mount a filesystem at a particular mountpoint"""
714 cmd_args = ['mount']
715 if options is not None:
716 cmd_args.extend(['-o', options])
717@@ -155,7 +161,7 @@
718
719
720 def umount(mountpoint, persist=False):
721- '''Unmount a filesystem'''
722+ """Unmount a filesystem"""
723 cmd_args = ['umount', mountpoint]
724 try:
725 subprocess.check_output(cmd_args)
726@@ -169,7 +175,7 @@
727
728
729 def mounts():
730- '''List of all mounted volumes as [[mountpoint,device],[...]]'''
731+ """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
732 with open('/proc/mounts') as f:
733 # [['/mount/point','/dev/path'],[...]]
734 system_mounts = [m[1::-1] for m in [l.strip().split()
735@@ -178,7 +184,7 @@
736
737
738 def file_hash(path):
739- ''' Generate a md5 hash of the contents of 'path' or None if not found '''
740+ """Generate a md5 hash of the contents of 'path' or None if not found """
741 if os.path.exists(path):
742 h = hashlib.md5()
743 with open(path, 'r') as source:
744@@ -189,7 +195,7 @@
745
746
747 def restart_on_change(restart_map):
748- ''' Restart services based on configuration files changing
749+ """Restart services based on configuration files changing
750
751 This function is used a decorator, for example
752
753@@ -202,7 +208,7 @@
754 In this example, the cinder-api and cinder-volume services
755 would be restarted if /etc/ceph/ceph.conf is changed by the
756 ceph_client_changed function.
757- '''
758+ """
759 def wrap(f):
760 def wrapped_f(*args):
761 checksums = {}
762@@ -220,7 +226,7 @@
763
764
765 def lsb_release():
766- '''Return /etc/lsb-release in a dict'''
767+ """Return /etc/lsb-release in a dict"""
768 d = {}
769 with open('/etc/lsb-release', 'r') as lsb:
770 for l in lsb:
771@@ -230,7 +236,7 @@
772
773
774 def pwgen(length=None):
775- '''Generate a random pasword.'''
776+ """Generate a random pasword."""
777 if length is None:
778 length = random.choice(range(35, 45))
779 alphanumeric_chars = [
780
781=== modified file 'hooks/charmhelpers/fetch/__init__.py'
782--- hooks/charmhelpers/fetch/__init__.py 2013-09-23 13:23:51 +0000
783+++ hooks/charmhelpers/fetch/__init__.py 2013-11-08 05:49:21 +0000
784@@ -20,6 +20,32 @@
785 PROPOSED_POCKET = """# Proposed
786 deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
787 """
788+CLOUD_ARCHIVE_POCKETS = {
789+ # Folsom
790+ 'folsom': 'precise-updates/folsom',
791+ 'precise-folsom': 'precise-updates/folsom',
792+ 'precise-folsom/updates': 'precise-updates/folsom',
793+ 'precise-updates/folsom': 'precise-updates/folsom',
794+ 'folsom/proposed': 'precise-proposed/folsom',
795+ 'precise-folsom/proposed': 'precise-proposed/folsom',
796+ 'precise-proposed/folsom': 'precise-proposed/folsom',
797+ # Grizzly
798+ 'grizzly': 'precise-updates/grizzly',
799+ 'precise-grizzly': 'precise-updates/grizzly',
800+ 'precise-grizzly/updates': 'precise-updates/grizzly',
801+ 'precise-updates/grizzly': 'precise-updates/grizzly',
802+ 'grizzly/proposed': 'precise-proposed/grizzly',
803+ 'precise-grizzly/proposed': 'precise-proposed/grizzly',
804+ 'precise-proposed/grizzly': 'precise-proposed/grizzly',
805+ # Havana
806+ 'havana': 'precise-updates/havana',
807+ 'precise-havana': 'precise-updates/havana',
808+ 'precise-havana/updates': 'precise-updates/havana',
809+ 'precise-updates/havana': 'precise-updates/havana',
810+ 'havana/proposed': 'precise-proposed/havana',
811+ 'precies-havana/proposed': 'precise-proposed/havana',
812+ 'precise-proposed/havana': 'precise-proposed/havana',
813+}
814
815
816 def filter_installed_packages(packages):
817@@ -79,16 +105,35 @@
818 subprocess.call(cmd)
819
820
821+def apt_hold(packages, fatal=False):
822+ """Hold one or more packages"""
823+ cmd = ['apt-mark', 'hold']
824+ if isinstance(packages, basestring):
825+ cmd.append(packages)
826+ else:
827+ cmd.extend(packages)
828+ log("Holding {}".format(packages))
829+ if fatal:
830+ subprocess.check_call(cmd)
831+ else:
832+ subprocess.call(cmd)
833+
834+
835 def add_source(source, key=None):
836- if ((source.startswith('ppa:') or
837- source.startswith('http:'))):
838+ if (source.startswith('ppa:') or
839+ source.startswith('http:') or
840+ source.startswith('deb ') or
841+ source.startswith('cloud-archive:')):
842 subprocess.check_call(['add-apt-repository', '--yes', source])
843 elif source.startswith('cloud:'):
844 apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
845 fatal=True)
846 pocket = source.split(':')[-1]
847+ if pocket not in CLOUD_ARCHIVE_POCKETS:
848+ raise SourceConfigError('Unsupported cloud: source option %s' % pocket)
849+ actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
850 with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
851- apt.write(CLOUD_ARCHIVE.format(pocket))
852+ apt.write(CLOUD_ARCHIVE.format(actual_pocket))
853 elif source == 'proposed':
854 release = lsb_release()['DISTRIB_CODENAME']
855 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
856@@ -118,8 +163,11 @@
857 Note that 'null' (a.k.a. None) should not be quoted.
858 """
859 sources = safe_load(config(sources_var))
860- keys = safe_load(config(keys_var))
861- if isinstance(sources, basestring) and isinstance(keys, basestring):
862+ keys = config(keys_var)
863+ if keys is not None:
864+ keys = safe_load(keys)
865+ if isinstance(sources, basestring) and (
866+ keys is None or isinstance(keys, basestring)):
867 add_source(sources, keys)
868 else:
869 if not len(sources) == len(keys):
870
871=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
872--- hooks/charmhelpers/fetch/bzrurl.py 2013-09-23 13:23:51 +0000
873+++ hooks/charmhelpers/fetch/bzrurl.py 2013-11-08 05:49:21 +0000
874@@ -12,6 +12,7 @@
875 apt_install("python-bzrlib")
876 from bzrlib.branch import Branch
877
878+
879 class BzrUrlFetchHandler(BaseFetchHandler):
880 """Handler for bazaar branches via generic and lp URLs"""
881 def can_handle(self, source):
882@@ -46,4 +47,3 @@
883 except OSError as e:
884 raise UnhandledSource(e.strerror)
885 return dest_dir
886-
887
888=== modified file 'hooks/nova_compute_context.py'
889--- hooks/nova_compute_context.py 2013-10-15 12:04:13 +0000
890+++ hooks/nova_compute_context.py 2013-11-08 05:49:21 +0000
891@@ -273,6 +273,14 @@
892 self.network_manager)
893 return ctxt
894
895+ def restart_trigger(self):
896+ rt = None
897+ for rid in relation_ids('cloud-compute'):
898+ for unit in related_units(rid):
899+ rt = relation_get('restart_trigger', rid=rid, unit=unit)
900+ if rt:
901+ return rt
902+
903 def __call__(self):
904 rids = relation_ids('cloud-compute')
905 if not rids:
906@@ -289,6 +297,9 @@
907 if vol_service:
908 ctxt['volume_service'] = vol_service
909
910+ if self.restart_trigger():
911+ ctxt['restart_trigger'] = self.restart_trigger()
912+
913 return ctxt
914
915
916
917=== modified file 'hooks/nova_compute_hooks.py'
918--- hooks/nova_compute_hooks.py 2013-10-17 19:27:44 +0000
919+++ hooks/nova_compute_hooks.py 2013-11-08 05:49:21 +0000
920@@ -48,7 +48,7 @@
921 register_configs,
922 NOVA_CONF,
923 QUANTUM_CONF, NEUTRON_CONF,
924- CEPH_CONF, CEPH_SECRET
925+ ceph_config_file, CEPH_SECRET
926 )
927
928 from nova_compute_context import CEPH_SECRET_UUID
929@@ -94,9 +94,10 @@
930 log('amqp relation incomplete. Peer not ready?')
931 return
932 CONFIGS.write(NOVA_CONF)
933- if network_manager() == 'quantum':
934+
935+ if network_manager() == 'quantum' and neutron_plugin() == 'ovs':
936 CONFIGS.write(QUANTUM_CONF)
937- if network_manager() == 'neutron':
938+ if network_manager() == 'neutron' and neutron_plugin() == 'ovs':
939 CONFIGS.write(NEUTRON_CONF)
940
941
942@@ -106,7 +107,8 @@
943 nova_database=config('database'),
944 nova_username=config('database-user'),
945 nova_hostname=unit_get('private-address'))
946- if network_manager() in ['quantum', 'neutron']:
947+ if (network_manager() in ['quantum', 'neutron']
948+ and neutron_plugin() == 'ovs'):
949 # XXX: Renaming relations from quantum_* to neutron_* here.
950 relation_set(relation_id=rid,
951 neutron_database=config('neutron-database'),
952@@ -122,8 +124,8 @@
953 return
954 CONFIGS.write(NOVA_CONF)
955 nm = network_manager()
956- if nm in ['quantum', 'neutron']:
957- plugin = neutron_plugin()
958+ plugin = neutron_plugin()
959+ if nm in ['quantum', 'neutron'] and plugin == 'ovs':
960 CONFIGS.write(neutron_plugin_attribute(plugin, 'config', nm))
961
962
963@@ -157,7 +159,8 @@
964 CONFIGS.write_all()
965 import_authorized_keys()
966 import_keystone_ca_cert()
967- if network_manager() in ['quantum', 'neutron']:
968+ if (network_manager() in ['quantum', 'neutron']
969+ and neutron_plugin() == 'ovs'):
970 # in case we already have a database relation, need to request
971 # access to the additional neutron database.
972 [db_joined(rid) for rid in relation_ids('shared-db')]
973@@ -179,7 +182,7 @@
974 if not ensure_ceph_keyring(service=svc):
975 log('Could not create ceph keyring: peer not ready?')
976 return
977- CONFIGS.write(CEPH_CONF)
978+ CONFIGS.write(ceph_config_file())
979 CONFIGS.write(CEPH_SECRET)
980 CONFIGS.write(NOVA_CONF)
981
982
983=== modified file 'hooks/nova_compute_utils.py'
984--- hooks/nova_compute_utils.py 2013-09-24 17:11:51 +0000
985+++ hooks/nova_compute_utils.py 2013-11-08 05:49:21 +0000
986@@ -6,7 +6,7 @@
987 from subprocess import check_call, check_output
988
989 from charmhelpers.fetch import apt_update, apt_install
990-
991+from charmhelpers.core.host import mkdir
992 from charmhelpers.core.hookenv import (
993 config,
994 log,
995@@ -14,10 +14,12 @@
996 relation_ids,
997 relation_get,
998 DEBUG,
999+ service_name
1000 )
1001
1002 from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute
1003 from charmhelpers.contrib.openstack import templating, context
1004+from charmhelpers.contrib.openstack.alternatives import install_alternative
1005
1006 from charmhelpers.contrib.openstack.utils import (
1007 configure_installation_source,
1008@@ -72,13 +74,10 @@
1009 }
1010
1011 CEPH_CONF = '/etc/ceph/ceph.conf'
1012+CHARM_CEPH_CONF = '/var/lib/charm/{}/ceph.conf'
1013 CEPH_SECRET = '/etc/ceph/secret.xml'
1014
1015 CEPH_RESOURCES = {
1016- CEPH_CONF: {
1017- 'contexts': [NovaComputeCephContext()],
1018- 'services': [],
1019- },
1020 CEPH_SECRET: {
1021 'contexts': [NovaComputeCephContext()],
1022 'services': [],
1023@@ -114,6 +113,10 @@
1024 }
1025
1026
1027+def ceph_config_file():
1028+ return CHARM_CEPH_CONF.format(service_name())
1029+
1030+
1031 def resource_map():
1032 '''
1033 Dynamically generate a map of resources that will be managed for a single
1034@@ -122,6 +125,7 @@
1035 # TODO: Cache this on first call?
1036 resource_map = deepcopy(BASE_RESOURCE_MAP)
1037 net_manager = network_manager()
1038+ plugin = neutron_plugin()
1039
1040 # Network manager gets set late by the cloud-compute interface.
1041 # FlatDHCPManager only requires some extra packages.
1042@@ -133,17 +137,15 @@
1043
1044 # Neutron/quantum requires additional contexts, as well as new resources
1045 # depending on the plugin used.
1046+ # NOTE(james-page): only required for ovs plugin right now
1047 if net_manager in ['neutron', 'quantum']:
1048- if net_manager == 'quantum':
1049- nm_rsc = QUANTUM_RESOURCES
1050- if net_manager == 'neutron':
1051- nm_rsc = NEUTRON_RESOURCES
1052- resource_map.update(nm_rsc)
1053-
1054- resource_map[NOVA_CONF]['contexts'].append(NeutronComputeContext())
1055-
1056- plugin = neutron_plugin()
1057- if plugin:
1058+ if plugin == 'ovs':
1059+ if net_manager == 'quantum':
1060+ nm_rsc = QUANTUM_RESOURCES
1061+ if net_manager == 'neutron':
1062+ nm_rsc = NEUTRON_RESOURCES
1063+ resource_map.update(nm_rsc)
1064+
1065 conf = neutron_plugin_attribute(plugin, 'config', net_manager)
1066 svcs = neutron_plugin_attribute(plugin, 'services', net_manager)
1067 ctxts = (neutron_plugin_attribute(plugin, 'contexts', net_manager)
1068@@ -156,7 +158,25 @@
1069 # associate the plugin agent with main network manager config(s)
1070 [resource_map[nmc]['services'].extend(svcs) for nmc in nm_rsc]
1071
1072+ resource_map[NOVA_CONF]['contexts'].append(NeutronComputeContext())
1073+
1074 if relation_ids('ceph'):
1075+ # Add charm ceph configuration to resources and
1076+ # ensure directory actually exists
1077+ mkdir(os.path.dirname(ceph_config_file()))
1078+ mkdir(os.path.dirname(CEPH_CONF))
1079+ # Install ceph config as an alternative for co-location with
1080+ # ceph and ceph-osd charms - nova-compute ceph.conf will be
1081+ # lower priority that both of these but thats OK
1082+ if not os.path.exists(ceph_config_file()):
1083+ # touch file for pre-templated generation
1084+ open(ceph_config_file(), 'w').close()
1085+ install_alternative(os.path.basename(CEPH_CONF),
1086+ CEPH_CONF, ceph_config_file())
1087+ CEPH_RESOURCES[ceph_config_file()] = {
1088+ 'contexts': [NovaComputeCephContext()],
1089+ 'services': [],
1090+ }
1091 resource_map.update(CEPH_RESOURCES)
1092
1093 return resource_map
1094
1095=== modified file 'templates/essex/nova.conf'
1096--- templates/essex/nova.conf 2013-08-01 23:21:58 +0000
1097+++ templates/essex/nova.conf 2013-11-08 05:49:21 +0000
1098@@ -1,6 +1,9 @@
1099 ###############################################################################
1100 # [ WARNING ]
1101 # Configuration file maintained by Juju. Local changes may be overwritten.
1102+{% if restart_trigger -%}
1103+# restart trigger: {{ restart_trigger }}
1104+{% endif -%}
1105 ###############################################################################
1106 --dhcpbridge_flagfile=/etc/nova/nova.conf
1107 --dhcpbridge=/usr/bin/nova-dhcpbridge
1108
1109=== modified file 'templates/folsom/nova.conf'
1110--- templates/folsom/nova.conf 2013-10-01 11:16:21 +0000
1111+++ templates/folsom/nova.conf 2013-11-08 05:49:21 +0000
1112@@ -2,6 +2,9 @@
1113 ###############################################################################
1114 # [ WARNING ]
1115 # Configuration file maintained by Juju. Local changes may be overwritten.
1116+{% if restart_trigger -%}
1117+# restart trigger: {{ restart_trigger }}
1118+{% endif -%}
1119 ###############################################################################
1120 [DEFAULT]
1121 dhcpbridge_flagfile=/etc/nova/nova.conf
1122
1123=== modified file 'templates/grizzly/nova.conf'
1124--- templates/grizzly/nova.conf 2013-10-01 11:16:21 +0000
1125+++ templates/grizzly/nova.conf 2013-11-08 05:49:21 +0000
1126@@ -2,6 +2,9 @@
1127 ###############################################################################
1128 # [ WARNING ]
1129 # Configuration file maintained by Juju. Local changes may be overwritten.
1130+{% if restart_trigger -%}
1131+# restart trigger: {{ restart_trigger }}
1132+{% endif -%}
1133 ###############################################################################
1134 [DEFAULT]
1135 dhcpbridge_flagfile=/etc/nova/nova.conf
1136@@ -52,6 +55,8 @@
1137
1138 {% if neutron_plugin and neutron_plugin == 'nvp' -%}
1139 libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtOpenVswitchVirtualPortDriver
1140+security_group_api = quantum
1141+firewall_driver = nova.virt.firewall.NoopFirewallDriver
1142 {% endif -%}
1143
1144 {% if network_manager_config -%}
1145
1146=== modified file 'templates/havana/nova.conf'
1147--- templates/havana/nova.conf 2013-10-01 11:16:21 +0000
1148+++ templates/havana/nova.conf 2013-11-08 05:49:21 +0000
1149@@ -2,6 +2,9 @@
1150 ###############################################################################
1151 # [ WARNING ]
1152 # Configuration file maintained by Juju. Local changes may be overwritten.
1153+{% if restart_trigger -%}
1154+# restart trigger: {{ restart_trigger }}
1155+{% endif -%}
1156 ###############################################################################
1157 [DEFAULT]
1158 dhcpbridge_flagfile=/etc/nova/nova.conf
1159@@ -52,6 +55,8 @@
1160
1161 {% if neutron_plugin and neutron_plugin == 'nvp' -%}
1162 libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtOpenVswitchVirtualPortDriver
1163+security_group_api = neutron
1164+firewall_driver = nova.virt.firewall.NoopFirewallDriver
1165 {% endif -%}
1166
1167 {% if network_manager_config -%}
1168
1169=== modified file 'unit_tests/test_nova_compute_contexts.py'
1170--- unit_tests/test_nova_compute_contexts.py 2013-09-25 09:01:42 +0000
1171+++ unit_tests/test_nova_compute_contexts.py 2013-11-08 05:49:21 +0000
1172@@ -67,6 +67,20 @@
1173 self.assertEquals({}, cloud_compute())
1174
1175 @patch.object(context, '_network_manager')
1176+ def test_cloud_compute_context_restart_trigger(self, nm):
1177+ nm.return_value = None
1178+ cloud_compute = context.CloudComputeContext()
1179+ with patch.object(cloud_compute, 'restart_trigger') as rt:
1180+ rt.return_value = 'footrigger'
1181+ ctxt = cloud_compute()
1182+ self.assertEquals(ctxt.get('restart_trigger'), 'footrigger')
1183+
1184+ with patch.object(cloud_compute, 'restart_trigger') as rt:
1185+ rt.return_value = None
1186+ ctxt = cloud_compute()
1187+ self.assertEquals(ctxt.get('restart_trigger'), None)
1188+
1189+ @patch.object(context, '_network_manager')
1190 def test_cloud_compute_volume_context_cinder(self, netman):
1191 netman.return_value = None
1192 self.relation_ids.return_value = 'cloud-compute:0'
1193
1194=== modified file 'unit_tests/test_nova_compute_hooks.py'
1195--- unit_tests/test_nova_compute_hooks.py 2013-10-17 19:25:15 +0000
1196+++ unit_tests/test_nova_compute_hooks.py 2013-11-08 05:49:21 +0000
1197@@ -123,6 +123,7 @@
1198 configs.write = MagicMock()
1199 if quantum:
1200 self.network_manager.return_value = 'quantum'
1201+ self.neutron_plugin.return_value = 'ovs'
1202 hooks.amqp_changed()
1203
1204 @patch.object(hooks, 'CONFIGS')
1205@@ -147,9 +148,10 @@
1206 nova_hostname='nova.foohost.com')
1207 self.unit_get.assert_called_with('private-address')
1208
1209- def test_db_joined_quantum(self):
1210+ def test_db_joined_quantum_ovs(self):
1211 self.unit_get.return_value = 'nova.foohost.com'
1212 self.network_manager.return_value = 'quantum'
1213+ self.neutron_plugin.return_value = 'ovs'
1214 hooks.db_joined(rid='shared-db:0')
1215 calls = [call(nova_database='nova',
1216 nova_username='nova',
1217@@ -163,6 +165,21 @@
1218 for c in calls]
1219 self.unit_get.assert_called_with('private-address')
1220
1221+ def test_db_joined_quantum_nvp(self):
1222+ self.unit_get.return_value = 'nova.foohost.com'
1223+ self.network_manager.return_value = 'quantum'
1224+ self.neutron_plugin.return_value = 'nvp'
1225+ hooks.db_joined(rid='shared-db:0')
1226+ calls = [call(nova_database='nova',
1227+ nova_username='nova',
1228+ nova_hostname='nova.foohost.com',
1229+ relation_id='shared-db:0')]
1230+ # NVP plugin requires no DB access - check it was not
1231+ # requested
1232+ [self.assertIn(c, self.relation_set.call_args_list)
1233+ for c in calls]
1234+ self.unit_get.assert_called_with('private-address')
1235+
1236 @patch.object(hooks, 'CONFIGS')
1237 def test_db_changed_missing_relation_data(self, configs):
1238 configs.complete_contexts = MagicMock()
1239@@ -187,13 +204,23 @@
1240 configs.write.call_args_list)
1241
1242 @patch.object(hooks, 'CONFIGS')
1243- def test_db_changed_with_data_and_quantum(self, configs):
1244+ def test_db_changed_with_data_and_quantum_ovs(self, configs):
1245 self.neutron_plugin_attribute.return_value = '/etc/quantum/plugin.conf'
1246+ self.neutron_plugin.return_value = 'ovs'
1247 self._shared_db_test(configs, quantum=True)
1248 ex = [call('/etc/nova/nova.conf'), call('/etc/quantum/plugin.conf')]
1249 self.assertEquals(ex, configs.write.call_args_list)
1250
1251 @patch.object(hooks, 'CONFIGS')
1252+ def test_db_changed_with_data_and_quantum_nvp(self, configs):
1253+ self.neutron_plugin_attribute.return_value = '/etc/quantum/plugin.conf'
1254+ self.neutron_plugin.return_value = 'nvp'
1255+ self._shared_db_test(configs, quantum=True)
1256+ ex = [call('/etc/nova/nova.conf')]
1257+ # NVP has no compute agent for neutron; check no config files generated
1258+ self.assertEquals(ex, configs.write.call_args_list)
1259+
1260+ @patch.object(hooks, 'CONFIGS')
1261 def test_image_service_missing_relation_data(self, configs):
1262 configs.complete_contexts = MagicMock()
1263 configs.complete_contexts.return_value = []
1264@@ -264,15 +291,18 @@
1265 'Could not create ceph keyring: peer not ready?'
1266 )
1267
1268+ @patch.object(utils, 'service_name')
1269 @patch.object(hooks, 'CONFIGS')
1270- def test_ceph_changed_with_key_and_relation_data(self, configs):
1271+ def test_ceph_changed_with_key_and_relation_data(self, configs,
1272+ service_name):
1273 configs.complete_contexts = MagicMock()
1274 configs.complete_contexts.return_value = ['ceph']
1275 configs.write = MagicMock()
1276+ service_name.return_value = 'nova-compute'
1277 self.ensure_ceph_keyring.return_value = True
1278 hooks.ceph_changed()
1279 ex = [
1280- call('/etc/ceph/ceph.conf'),
1281+ call('/var/lib/charm/nova-compute/ceph.conf'),
1282 call('/etc/ceph/secret.xml'),
1283 call('/etc/nova/nova.conf'),
1284 ]
1285
1286=== modified file 'unit_tests/test_nova_compute_utils.py'
1287--- unit_tests/test_nova_compute_utils.py 2013-09-25 09:01:42 +0000
1288+++ unit_tests/test_nova_compute_utils.py 2013-11-08 05:49:21 +0000
1289@@ -13,6 +13,9 @@
1290 'related_units',
1291 'relation_ids',
1292 'relation_get',
1293+ 'service_name',
1294+ 'mkdir',
1295+ 'install_alternative'
1296 ]
1297
1298 OVS_PKGS = [
1299@@ -25,6 +28,7 @@
1300 def setUp(self):
1301 super(NovaComputeUtilsTests, self).setUp(utils, TO_PATCH)
1302 self.config.side_effect = self.test_config.get
1303+ self.service_name.return_value = 'nova-compute'
1304
1305 @patch.object(utils, 'network_manager')
1306 def test_determine_packages_nova_network(self, net_man):

Subscribers

People subscribed via source and target branches