Merge lp:~hopem/nova/icehouse-sru-lp1459046 into lp:~ubuntu-server-dev/nova/icehouse

Proposed by Edward Hope-Morley
Status: Merged
Merged at revision: 705
Proposed branch: lp:~hopem/nova/icehouse-sru-lp1459046
Merge into: lp:~ubuntu-server-dev/nova/icehouse
Diff against target: 653 lines (+628/-0)
4 files modified
debian/changelog (+9/-0)
debian/patches/add-support-for-syslog-connect-retries.patch (+115/-0)
debian/patches/clean-shutdown.patch (+502/-0)
debian/patches/series (+2/-0)
To merge this branch: bzr merge lp:~hopem/nova/icehouse-sru-lp1459046
Reviewer Review Type Date Requested Status
James Page Pending
Review via email: mp+264878@code.launchpad.net

This proposal supersedes a proposal from 2015-07-15.

To post a comment you must log in.
Revision history for this message
James Page (james-page) : Posted in a previous version of this proposal
review: Needs Fixing

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'debian/changelog'
2--- debian/changelog 2015-07-15 13:52:24 +0000
3+++ debian/changelog 2015-07-15 16:43:01 +0000
4@@ -1,3 +1,12 @@
5+nova (1:2014.1.5-0ubuntu1.2) UNRELEASED; urgency=medium
6+
7+ * Add rsyslog retry support (LP: #1459046)
8+ - d/p/add-support-for-syslog-connect-retries.patch
9+ * Add vm clean shutdown support (LP: #1196924)
10+ - d/p/clean-shutdown.patch
11+
12+ -- Edward Hope-Morley <edward.hope-morley@canonical.com> Wed, 15 Jul 2015 18:33:42 +0200
13+
14 nova (1:2014.1.5-0ubuntu1.1) trusty; urgency=medium
15
16 [ Edward Hope-Morley ]
17
18=== added file 'debian/patches/add-support-for-syslog-connect-retries.patch'
19--- debian/patches/add-support-for-syslog-connect-retries.patch 1970-01-01 00:00:00 +0000
20+++ debian/patches/add-support-for-syslog-connect-retries.patch 2015-07-15 16:43:01 +0000
21@@ -0,0 +1,115 @@
22+From fa2a6c6b6aee59b1a98fa7b93f55405457449bf0 Mon Sep 17 00:00:00 2001
23+From: Edward Hope-Morley <edward.hope-morley@canonical.com>
24+Date: Thu, 18 Jun 2015 13:38:58 +0100
25+Subject: [PATCH] Add support for syslog connect retries
26+
27+If we have requested logging to syslog and syslog is
28+not yet ready we shoudl allow for retry attempts. This
29+patch provides a new option syslog-connect-retries to
30+allow for retries with a 5 second interval between
31+each retry.
32+
33+Closes-Bug: 1459046
34+Co-authored-by: Liang Chen <liang.chen@canonical.com>
35+Conflicts:
36+ nova/openstack/common/log.py
37+
38+Change-Id: I88269a75c56c68443230620217a469aebee523f8
39+---
40+ nova/openstack/common/log.py | 58 +++++++++++++++++++++++++++++++++++---------
41+ 1 file changed, 46 insertions(+), 12 deletions(-)
42+
43+diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py
44+index cdc439a..71700b7 100644
45+--- a/nova/openstack/common/log.py
46++++ b/nova/openstack/common/log.py
47+@@ -34,7 +34,9 @@ import logging.config
48+ import logging.handlers
49+ import os
50+ import re
51++import socket
52+ import sys
53++import time
54+ import traceback
55+
56+ from oslo.config import cfg
57+@@ -118,6 +120,10 @@ logging_cli_opts = [
58+ help='Use syslog for logging. '
59+ 'Existing syslog format is DEPRECATED during I, '
60+ 'and then will be changed in J to honor RFC5424'),
61++ cfg.IntOpt('syslog-connect-retries',
62++ default=3,
63++ help='Number of attempts with a five second interval to retry '
64++ 'connecting to syslog. (if use-syslog=True)'),
65+ cfg.BoolOpt('use-syslog-rfc-format',
66+ # TODO(bogdando) remove or use True after existing
67+ # syslog format deprecation in J
68+@@ -490,18 +496,6 @@ def _setup_logging_from_conf():
69+ for handler in log_root.handlers:
70+ log_root.removeHandler(handler)
71+
72+- if CONF.use_syslog:
73+- facility = _find_facility_from_conf()
74+- # TODO(bogdando) use the format provided by RFCSysLogHandler
75+- # after existing syslog format deprecation in J
76+- if CONF.use_syslog_rfc_format:
77+- syslog = RFCSysLogHandler(address='/dev/log',
78+- facility=facility)
79+- else:
80+- syslog = logging.handlers.SysLogHandler(address='/dev/log',
81+- facility=facility)
82+- log_root.addHandler(syslog)
83+-
84+ logpath = _get_log_file_path()
85+ if logpath:
86+ filelog = logging.handlers.WatchedFileHandler(logpath)
87+@@ -548,6 +542,46 @@ def _setup_logging_from_conf():
88+ logger = logging.getLogger(mod)
89+ logger.setLevel(level)
90+
91++ if CONF.use_syslog:
92++ retries = CONF.syslog_connect_retries
93++ syslog_ready = False
94++ while True:
95++ try:
96++ facility = _find_facility_from_conf()
97++ # TODO(bogdando) use the format provided by RFCSysLogHandler
98++ # after existing syslog format deprecation in J
99++ if CONF.use_syslog_rfc_format:
100++ syslog = RFCSysLogHandler(address='/dev/log',
101++ facility=facility)
102++ else:
103++ syslog = logging.handlers.SysLogHandler(address='/dev/log',
104++ facility=facility)
105++ log_root.addHandler(syslog)
106++ syslog_ready = True
107++ except socket.error:
108++ if CONF.syslog_connect_retries <= 0:
109++ log_root.error(_('Connection to syslog failed and no '
110++ 'retry attempts requested'))
111++ break
112++
113++ if retries:
114++ log_root.info(_('Connection to syslog failed - '
115++ 'retrying in 5 seconds'))
116++ retries -= 1
117++ else:
118++ log_root.error(_('Connection to syslog failed and '
119++ 'max retry attempts reached'))
120++ break
121++
122++ time.sleep(5)
123++ else:
124++ break
125++
126++ if not syslog_ready:
127++ log_root.error(_('Unable to add syslog handler. Verify that '
128++ 'syslog is running.'))
129++
130++
131+ _loggers = {}
132+
133+
134+--
135+1.9.1
136+
137
138=== added file 'debian/patches/clean-shutdown.patch'
139--- debian/patches/clean-shutdown.patch 1970-01-01 00:00:00 +0000
140+++ debian/patches/clean-shutdown.patch 2015-07-15 16:43:01 +0000
141@@ -0,0 +1,502 @@
142+commit 879bbcf902c7a8ba0b3c58660b461f5b4918834e
143+Author: Phil Day <philip.day@hp.com>
144+Date: Fri Jan 24 15:43:20 2014 +0000
145+
146+ Power off commands should give guests a chance to shutdown
147+
148+ Currently in libvirt operations which power off an instance such as stop,
149+ shelve, rescue, and resize simply destroy the underlying VM. Some
150+ GuestOS's do not react well to this type of power failure, and so it would
151+ be better if these operations followed the same approach as soft_reboot
152+ and give the guest as chance to shutdown gracefully.
153+
154+ The shutdown behavior is defined by two values:
155+
156+ - shutdown_timeout defines the overall period a Guest is allowed to
157+ complete it's shutdown. The default valus is set via nova.conf and can be
158+ overridden on a per image basis by image metadata allowing different types
159+ of guest OS to specify how long they need to shutdown cleanly.
160+
161+ - shutdown_retry_interval defines how frequently within that period
162+ the Guest will be signaled to shutdown. This is a protection against
163+ guests that may not be ready to process the shutdown signal when it
164+ is first issued. (e.g. still booting). This is defined as a constant.
165+
166+ This is one of a set of changes that will eventually expose the choice
167+ of whether to give the GuestOS a chance to shutdown via the API.
168+
169+ This change implements the libvirt changes to power_off() and adds
170+ a clean shutdown to compute.manager.stop().
171+
172+ Subsequent patches will:
173+ - Add clean shutdown to Shelve
174+ - Add clean shutdown to Rescue
175+ - Convert soft_reboot to use the same approach
176+ - Expose clean shutdown via rpcapi
177+ - Expose clean shutdown via API
178+
179+ Partially-Implements: blueprint user-defined-shutdown
180+ Closes-Bug: #1196924
181+ DocImpact
182+
183+ Conflicts:
184+ nova/compute/manager.py
185+ nova/tests/virt/test_ironic_api_contracts.py
186+
187+ Change-Id: I432b0b0c09db82797f28deb5617f02ee45a4278c
188+ (cherry picked from commit c07ed15415c0ec3c5862f437f440632eff1e94df)
189+
190+diff --git a/nova/compute/manager.py b/nova/compute/manager.py
191+index 990b92f..e27103f 100644
192+--- a/nova/compute/manager.py
193++++ b/nova/compute/manager.py
194+@@ -183,6 +183,10 @@ timeout_opts = [
195+ default=0,
196+ help="Automatically confirm resizes after N seconds. "
197+ "Set to 0 to disable."),
198++ cfg.IntOpt("shutdown_timeout",
199++ default=60,
200++ help="Total amount of time to wait in seconds for an instance "
201++ "to perform a clean shutdown."),
202+ ]
203+
204+ running_deleted_opts = [
205+@@ -575,6 +579,11 @@ class ComputeManager(manager.Manager):
206+
207+ target = messaging.Target(version='3.23')
208+
209++ # How long to wait in seconds before re-issuing a shutdown
210++ # signal to a instance during power off. The overall
211++ # time to wait is set by CONF.shutdown_timeout.
212++ SHUTDOWN_RETRY_INTERVAL = 10
213++
214+ def __init__(self, compute_driver=None, *args, **kwargs):
215+ """Load configuration options and connect to the hypervisor."""
216+ self.virtapi = ComputeVirtAPI(self)
217+@@ -2137,6 +2146,25 @@ class ComputeManager(manager.Manager):
218+ instance=instance)
219+ self._set_instance_error_state(context, instance['uuid'])
220+
221++ def _get_power_off_values(self, context, instance, clean_shutdown):
222++ """Get the timing configuration for powering down this instance."""
223++ if clean_shutdown:
224++ timeout = compute_utils.get_value_from_system_metadata(instance,
225++ key='image_os_shutdown_timeout', type=int,
226++ default=CONF.shutdown_timeout)
227++ retry_interval = self.SHUTDOWN_RETRY_INTERVAL
228++ else:
229++ timeout = 0
230++ retry_interval = 0
231++
232++ return timeout, retry_interval
233++
234++ def _power_off_instance(self, context, instance, clean_shutdown=True):
235++ """Power off an instance on this host."""
236++ timeout, retry_interval = self._get_power_off_values(context,
237++ instance, clean_shutdown)
238++ self.driver.power_off(instance, timeout, retry_interval)
239++
240+ def _shutdown_instance(self, context, instance,
241+ bdms, requested_networks=None, notify=True):
242+ """Shutdown an instance on this host."""
243+@@ -2308,16 +2336,23 @@ class ComputeManager(manager.Manager):
244+ @reverts_task_state
245+ @wrap_instance_event
246+ @wrap_instance_fault
247+- def stop_instance(self, context, instance):
248++ def stop_instance(self, context, instance, clean_shutdown=True):
249+ """Stopping an instance on this host."""
250+- self._notify_about_instance_usage(context, instance, "power_off.start")
251+- self.driver.power_off(instance)
252+- current_power_state = self._get_power_state(context, instance)
253+- instance.power_state = current_power_state
254+- instance.vm_state = vm_states.STOPPED
255+- instance.task_state = None
256+- instance.save(expected_task_state=task_states.POWERING_OFF)
257+- self._notify_about_instance_usage(context, instance, "power_off.end")
258++
259++ @utils.synchronized(instance.uuid)
260++ def do_stop_instance():
261++ self._notify_about_instance_usage(context, instance,
262++ "power_off.start")
263++ self._power_off_instance(context, instance, clean_shutdown)
264++ current_power_state = self._get_power_state(context, instance)
265++ instance.power_state = current_power_state
266++ instance.vm_state = vm_states.STOPPED
267++ instance.task_state = None
268++ instance.save(expected_task_state=task_states.POWERING_OFF)
269++ self._notify_about_instance_usage(context, instance,
270++ "power_off.end")
271++
272++ do_stop_instance()
273+
274+ def _power_on(self, context, instance):
275+ network_info = self._get_instance_nw_info(context, instance)
276+diff --git a/nova/compute/utils.py b/nova/compute/utils.py
277+index 119510c..ced00eb 100644
278+--- a/nova/compute/utils.py
279++++ b/nova/compute/utils.py
280+@@ -267,6 +267,25 @@ def get_image_metadata(context, image_service, image_id, instance):
281+ return utils.get_image_from_system_metadata(system_meta)
282+
283+
284++def get_value_from_system_metadata(instance, key, type, default):
285++ """Get a value of a specified type from image metadata.
286++
287++ @param instance: The instance object
288++ @param key: The name of the property to get
289++ @param type: The python type the value is be returned as
290++ @param default: The value to return if key is not set or not the right type
291++ """
292++ value = instance.system_metadata.get(key, default)
293++ try:
294++ return type(value)
295++ except ValueError:
296++ LOG.warning(_("Metadata value %(value)s for %(key)s is not of "
297++ "type %(type)s. Using default value %(default)s."),
298++ {'value': value, 'key': key, 'type': type,
299++ 'default': default}, instance=instance)
300++ return default
301++
302++
303+ def notify_usage_exists(notifier, context, instance_ref, current_period=False,
304+ ignore_missing_network_data=True,
305+ system_metadata=None, extra_usage_info=None):
306+diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
307+index 00ea03e..9d037cf 100644
308+--- a/nova/tests/api/ec2/test_cloud.py
309++++ b/nova/tests/api/ec2/test_cloud.py
310+@@ -2449,7 +2449,8 @@ class CloudTestCase(test.TestCase):
311+
312+ self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
313+
314+- def fake_power_off(self, instance):
315++ def fake_power_off(self, instance,
316++ shutdown_timeout, shutdown_attempts):
317+ virt_driver['powered_off'] = True
318+
319+ self.stubs.Set(fake_virt.FakeDriver, 'power_off', fake_power_off)
320+diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
321+index b126a52..cb680f3 100644
322+--- a/nova/tests/compute/test_compute.py
323++++ b/nova/tests/compute/test_compute.py
324+@@ -2064,7 +2064,8 @@ class ComputeTestCase(BaseTestCase):
325+
326+ called = {'power_off': False}
327+
328+- def fake_driver_power_off(self, instance):
329++ def fake_driver_power_off(self, instance,
330++ shutdown_timeout, shutdown_attempts):
331+ called['power_off'] = True
332+
333+ self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off',
334+diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
335+index 2304e95..7415f46 100644
336+--- a/nova/tests/compute/test_compute_utils.py
337++++ b/nova/tests/compute/test_compute_utils.py
338+@@ -711,6 +711,28 @@ class ComputeGetImageMetadataTestCase(test.TestCase):
339+ self.assertThat(expected, matchers.DictMatches(image_meta))
340+
341+
342++class ComputeUtilsGetValFromSysMetadata(test.TestCase):
343++
344++ def test_get_value_from_system_metadata(self):
345++ instance = fake_instance.fake_instance_obj('fake-context')
346++ system_meta = {'int_val': 1,
347++ 'int_string': '2',
348++ 'not_int': 'Nope'}
349++ instance.system_metadata = system_meta
350++
351++ result = compute_utils.get_value_from_system_metadata(
352++ instance, 'int_val', int, 0)
353++ self.assertEqual(1, result)
354++
355++ result = compute_utils.get_value_from_system_metadata(
356++ instance, 'int_string', int, 0)
357++ self.assertEqual(2, result)
358++
359++ result = compute_utils.get_value_from_system_metadata(
360++ instance, 'not_int', int, 0)
361++ self.assertEqual(0, result)
362++
363++
364+ class ComputeUtilsGetNWInfo(test.TestCase):
365+ def test_instance_object_none_info_cache(self):
366+ inst = fake_instance.fake_instance_obj('fake-context',
367+diff --git a/nova/tests/virt/libvirt/test_libvirt.py b/nova/tests/virt/libvirt/test_libvirt.py
368+index 2478e8e..ed1c8e8 100644
369+--- a/nova/tests/virt/libvirt/test_libvirt.py
370++++ b/nova/tests/virt/libvirt/test_libvirt.py
371+@@ -5608,6 +5608,82 @@ class LibvirtConnTestCase(test.TestCase):
372+ conn._hard_reboot(self.context, instance, network_info,
373+ block_device_info)
374+
375++ def _test_clean_shutdown(self, seconds_to_shutdown,
376++ timeout, retry_interval,
377++ shutdown_attempts, succeeds):
378++ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
379++ shutdown_count = []
380++
381++ def count_shutdowns():
382++ shutdown_count.append("shutdown")
383++
384++ # Mock domain
385++ mock_domain = self.mox.CreateMock(libvirt.virDomain)
386++
387++ mock_domain.info().AndReturn(
388++ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
389++ mock_domain.shutdown().WithSideEffects(count_shutdowns)
390++
391++ retry_countdown = retry_interval
392++ for x in xrange(min(seconds_to_shutdown, timeout)):
393++ mock_domain.info().AndReturn(
394++ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
395++ if retry_countdown == 0:
396++ mock_domain.shutdown().WithSideEffects(count_shutdowns)
397++ retry_countdown = retry_interval
398++ else:
399++ retry_countdown -= 1
400++
401++ if seconds_to_shutdown < timeout:
402++ mock_domain.info().AndReturn(
403++ (libvirt_driver.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
404++
405++ self.mox.ReplayAll()
406++
407++ def fake_lookup_by_name(instance_name):
408++ return mock_domain
409++
410++ def fake_create_domain(**kwargs):
411++ self.reboot_create_called = True
412++
413++ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
414++ instance = {"name": "instancename", "id": "instanceid",
415++ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
416++ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
417++ self.stubs.Set(conn, '_create_domain', fake_create_domain)
418++ result = conn._clean_shutdown(instance, timeout, retry_interval)
419++
420++ self.assertEqual(succeeds, result)
421++ self.assertEqual(shutdown_attempts, len(shutdown_count))
422++
423++ def test_clean_shutdown_first_time(self):
424++ self._test_clean_shutdown(seconds_to_shutdown=2,
425++ timeout=5,
426++ retry_interval=3,
427++ shutdown_attempts=1,
428++ succeeds=True)
429++
430++ def test_clean_shutdown_with_retry(self):
431++ self._test_clean_shutdown(seconds_to_shutdown=4,
432++ timeout=5,
433++ retry_interval=3,
434++ shutdown_attempts=2,
435++ succeeds=True)
436++
437++ def test_clean_shutdown_failure(self):
438++ self._test_clean_shutdown(seconds_to_shutdown=6,
439++ timeout=5,
440++ retry_interval=3,
441++ shutdown_attempts=2,
442++ succeeds=False)
443++
444++ def test_clean_shutdown_no_wait(self):
445++ self._test_clean_shutdown(seconds_to_shutdown=6,
446++ timeout=0,
447++ retry_interval=3,
448++ shutdown_attempts=1,
449++ succeeds=False)
450++
451+ def test_resume(self):
452+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
453+ "<devices>"
454+diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
455+index c1de148..b24e50a 100644
456+--- a/nova/virt/baremetal/driver.py
457++++ b/nova/virt/baremetal/driver.py
458+@@ -399,8 +399,9 @@ class BareMetalDriver(driver.ComputeDriver):
459+ """Cleanup after instance being destroyed."""
460+ pass
461+
462+- def power_off(self, instance, node=None):
463++ def power_off(self, instance, timeout=0, retry_interval=0, node=None):
464+ """Power off the specified instance."""
465++ # TODO(PhilDay): Add support for timeout (clean shutdown)
466+ if not node:
467+ node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
468+ pm = get_power_manager(node=node, instance=instance)
469+diff --git a/nova/virt/driver.py b/nova/virt/driver.py
470+index 2fc95cc..2db2964 100644
471+--- a/nova/virt/driver.py
472++++ b/nova/virt/driver.py
473+@@ -579,10 +579,13 @@ class ComputeDriver(object):
474+ # TODO(Vek): Need to pass context in for access to auth_token
475+ raise NotImplementedError()
476+
477+- def power_off(self, instance):
478++ def power_off(self, instance, timeout=0, retry_interval=0):
479+ """Power off the specified instance.
480+
481+ :param instance: nova.objects.instance.Instance
482++ :param timeout: time to wait for GuestOS to shutdown
483++ :param retry_interval: How often to signal guest while
484++ waiting for it to shutdown
485+ """
486+ raise NotImplementedError()
487+
488+diff --git a/nova/virt/fake.py b/nova/virt/fake.py
489+index ea175cb..19d81a8 100644
490+--- a/nova/virt/fake.py
491++++ b/nova/virt/fake.py
492+@@ -179,7 +179,7 @@ class FakeDriver(driver.ComputeDriver):
493+ block_device_info=None):
494+ pass
495+
496+- def power_off(self, instance):
497++ def power_off(self, instance, shutdown_timeout=0, shutdown_attempts=0):
498+ pass
499+
500+ def power_on(self, context, instance, network_info, block_device_info):
501+diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
502+index 566a9a2..e975cf7 100644
503+--- a/nova/virt/hyperv/driver.py
504++++ b/nova/virt/hyperv/driver.py
505+@@ -111,7 +111,8 @@ class HyperVDriver(driver.ComputeDriver):
506+ def resume(self, context, instance, network_info, block_device_info=None):
507+ self._vmops.resume(instance)
508+
509+- def power_off(self, instance):
510++ def power_off(self, instance, timeout=0, retry_interval=0):
511++ # TODO(PhilDay): Add support for timeout (clean shutdown)
512+ self._vmops.power_off(instance)
513+
514+ def power_on(self, context, instance, network_info,
515+diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
516+index 43f4762..7cddad3 100644
517+--- a/nova/virt/libvirt/driver.py
518++++ b/nova/virt/libvirt/driver.py
519+@@ -45,6 +45,7 @@ import glob
520+ import mmap
521+ import os
522+ import shutil
523++import six
524+ import socket
525+ import sys
526+ import tempfile
527+@@ -2157,8 +2158,85 @@ class LibvirtDriver(driver.ComputeDriver):
528+ dom = self._lookup_by_name(instance['name'])
529+ dom.resume()
530+
531+- def power_off(self, instance):
532++ def _clean_shutdown(self, instance, timeout, retry_interval):
533++ """Attempt to shutdown the instance gracefully.
534++
535++ :param instance: The instance to be shutdown
536++ :param timeout: How long to wait in seconds for the instance to
537++ shutdown
538++ :param retry_interval: How often in seconds to signal the instance
539++ to shutdown while waiting
540++
541++ :returns: True if the shutdown succeeded
542++ """
543++
544++ # List of states that represent a shutdown instance
545++ SHUTDOWN_STATES = [power_state.SHUTDOWN,
546++ power_state.CRASHED]
547++
548++ try:
549++ dom = self._lookup_by_name(instance["name"])
550++ except exception.InstanceNotFound:
551++ # If the instance has gone then we don't need to
552++ # wait for it to shutdown
553++ return True
554++
555++ (state, _max_mem, _mem, _cpus, _t) = dom.info()
556++ state = LIBVIRT_POWER_STATE[state]
557++ if state in SHUTDOWN_STATES:
558++ LOG.info(_("Instance already shutdown."),
559++ instance=instance)
560++ return True
561++
562++ LOG.debug("Shutting down instance from state %s", state,
563++ instance=instance)
564++ dom.shutdown()
565++ retry_countdown = retry_interval
566++
567++ for sec in six.moves.range(timeout):
568++
569++ dom = self._lookup_by_name(instance["name"])
570++ (state, _max_mem, _mem, _cpus, _t) = dom.info()
571++ state = LIBVIRT_POWER_STATE[state]
572++
573++ if state in SHUTDOWN_STATES:
574++ LOG.info(_("Instance shutdown successfully after %d "
575++ "seconds."), sec, instance=instance)
576++ return True
577++
578++ # Note(PhilD): We can't assume that the Guest was able to process
579++ # any previous shutdown signal (for example it may
580++ # have still been startingup, so within the overall
581++ # timeout we re-trigger the shutdown every
582++ # retry_interval
583++ if retry_countdown == 0:
584++ retry_countdown = retry_interval
585++ # Instance could shutdown at any time, in which case we
586++ # will get an exception when we call shutdown
587++ try:
588++ LOG.debug("Instance in state %s after %d seconds - "
589++ "resending shutdown", state, sec,
590++ instance=instance)
591++ dom.shutdown()
592++ except libvirt.libvirtError:
593++ # Assume this is because its now shutdown, so loop
594++ # one more time to clean up.
595++ LOG.debug("Ignoring libvirt exception from shutdown "
596++ "request.", instance=instance)
597++ continue
598++ else:
599++ retry_countdown -= 1
600++
601++ time.sleep(1)
602++
603++ LOG.info(_("Instance failed to shutdown in %d seconds."),
604++ timeout, instance=instance)
605++ return False
606++
607++ def power_off(self, instance, timeout=0, retry_interval=0):
608+ """Power off the specified instance."""
609++ if timeout:
610++ self._clean_shutdown(instance, timeout, retry_interval)
611+ self._destroy(instance)
612+
613+ def power_on(self, context, instance, network_info,
614+diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
615+index e514bbb..aedc5c3 100644
616+--- a/nova/virt/vmwareapi/driver.py
617++++ b/nova/virt/vmwareapi/driver.py
618+@@ -704,8 +704,9 @@ class VMwareVCDriver(VMwareESXDriver):
619+ _vmops = self._get_vmops_for_compute_node(instance['node'])
620+ _vmops.unrescue(instance)
621+
622+- def power_off(self, instance):
623++ def power_off(self, instance, timeout=0, retry_interval=0):
624+ """Power off the specified instance."""
625++ # TODO(PhilDay): Add support for timeout (clean shutdown)
626+ _vmops = self._get_vmops_for_compute_node(instance['node'])
627+ _vmops.power_off(instance)
628+
629+diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
630+index e7a0d1c..ccbe765 100644
631+--- a/nova/virt/xenapi/driver.py
632++++ b/nova/virt/xenapi/driver.py
633+@@ -325,8 +325,9 @@ class XenAPIDriver(driver.ComputeDriver):
634+ """Unrescue the specified instance."""
635+ self._vmops.unrescue(instance)
636+
637+- def power_off(self, instance):
638++ def power_off(self, instance, timeout=0, retry_interval=0):
639+ """Power off the specified instance."""
640++ # TODO(PhilDay): Add support for timeout (clean shutdown)
641+ self._vmops.power_off(instance)
642+
643+ def power_on(self, context, instance, network_info,
644
645=== modified file 'debian/patches/series'
646--- debian/patches/series 2015-06-22 16:00:27 +0000
647+++ debian/patches/series 2015-07-15 16:43:01 +0000
648@@ -4,3 +4,5 @@
649 skip_ipv6_test.patch
650 arm-console-patch.patch
651 update-run-tests.patch
652+add-support-for-syslog-connect-retries.patch
653+clean-shutdown.patch

Subscribers

People subscribed via source and target branches