Merge lp:~anso/nova/rescue-mode into lp:~hudson-openstack/nova/trunk
- rescue-mode
- Merge into trunk
Proposed by
Vish Ishaya
Status: | Merged |
---|---|
Approved by: | Devin Carlen |
Approved revision: | 287 |
Merged at revision: | 383 |
Proposed branch: | lp:~anso/nova/rescue-mode |
Merge into: | lp:~hudson-openstack/nova/trunk |
Diff against target: |
563 lines (+289/-36) 9 files modified
nova/api/cloud.py (+20/-0) nova/api/ec2/cloud.py (+15/-2) nova/compute/manager.py (+43/-10) nova/tests/virt_unittest.py (+2/-2) nova/virt/fake.py (+18/-2) nova/virt/libvirt.rescue.qemu.xml.template (+37/-0) nova/virt/libvirt.rescue.uml.xml.template (+26/-0) nova/virt/libvirt.rescue.xen.xml.template (+34/-0) nova/virt/libvirt_conn.py (+94/-20) |
To merge this branch: | bzr merge lp:~anso/nova/rescue-mode |
Related bugs: | |
Related blueprints: |
Rescue Mode
(High)
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Devin Carlen (community) | Approve | ||
Jay Pipes (community) | Approve | ||
Review via email: mp+39239@code.launchpad.net |
Commit message
Adds rescue and unrescue commands.
Description of the change
Adds rescue and unrescue commands to api, compute, and the libvirt driver. Rescue mode simply reboots the instance with a rescue image as the first drive and the regular disk image as a second attached drive. Unrescue reboots the image back in normal mode.
A couple minor fixes to the handling of get_info to make it return a standard exception (NotFound) were also made to allow rescuing and rebooting of images that the hypervisor doesn't know about. Reboot was also fixed because it was passing the ec2_id instead of the internal_id to api/cloud.py
To post a comment you must log in.
lp:~anso/nova/rescue-mode
updated
- 287. By Vish Ishaya
-
logging.warn not raise logging.Warn
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'nova/api/cloud.py' |
2 | --- nova/api/cloud.py 2010-10-14 07:30:42 +0000 |
3 | +++ nova/api/cloud.py 2010-10-25 05:59:40 +0000 |
4 | @@ -36,3 +36,23 @@ |
5 | db.queue_get_for(context, FLAGS.compute_topic, host), |
6 | {"method": "reboot_instance", |
7 | "args": {"instance_id": instance_ref['id']}}) |
8 | + |
9 | + |
10 | +def rescue(instance_id, context): |
11 | + """Rescue the given instance.""" |
12 | + instance_ref = db.instance_get_by_internal_id(context, instance_id) |
13 | + host = instance_ref['host'] |
14 | + rpc.cast(context, |
15 | + db.queue_get_for(context, FLAGS.compute_topic, host), |
16 | + {"method": "rescue_instance", |
17 | + "args": {"instance_id": instance_ref['id']}}) |
18 | + |
19 | + |
20 | +def unrescue(instance_id, context): |
21 | + """Unrescue the given instance.""" |
22 | + instance_ref = db.instance_get_by_internal_id(context, instance_id) |
23 | + host = instance_ref['host'] |
24 | + rpc.cast(context, |
25 | + db.queue_get_for(context, FLAGS.compute_topic, host), |
26 | + {"method": "unrescue_instance", |
27 | + "args": {"instance_id": instance_ref['id']}}) |
28 | |
29 | === modified file 'nova/api/ec2/cloud.py' |
30 | --- nova/api/ec2/cloud.py 2010-10-21 22:26:06 +0000 |
31 | +++ nova/api/ec2/cloud.py 2010-10-25 05:59:40 +0000 |
32 | @@ -932,8 +932,21 @@ |
33 | |
34 | def reboot_instances(self, context, instance_id, **kwargs): |
35 | """instance_id is a list of instance ids""" |
36 | - for id_str in instance_id: |
37 | - cloud.reboot(id_str, context=context) |
38 | + for ec2_id in instance_id: |
39 | + internal_id = ec2_id_to_internal_id(ec2_id) |
40 | + cloud.reboot(internal_id, context=context) |
41 | + return True |
42 | + |
43 | + def rescue_instance(self, context, instance_id, **kwargs): |
44 | + """This is an extension to the normal ec2_api""" |
45 | + internal_id = ec2_id_to_internal_id(instance_id) |
46 | + cloud.rescue(internal_id, context=context) |
47 | + return True |
48 | + |
49 | + def unrescue_instance(self, context, instance_id, **kwargs): |
50 | + """This is an extension to the normal ec2_api""" |
51 | + internal_id = ec2_id_to_internal_id(instance_id) |
52 | + cloud.unrescue(internal_id, context=context) |
53 | return True |
54 | |
55 | def update_instance(self, context, ec2_id, **kwargs): |
56 | |
57 | === modified file 'nova/compute/manager.py' |
58 | --- nova/compute/manager.py 2010-10-14 06:26:58 +0000 |
59 | +++ nova/compute/manager.py 2010-10-25 05:59:40 +0000 |
60 | @@ -20,10 +20,8 @@ |
61 | Handles all code relating to instances (guest vms) |
62 | """ |
63 | |
64 | -import base64 |
65 | import datetime |
66 | import logging |
67 | -import os |
68 | |
69 | from twisted.internet import defer |
70 | |
71 | @@ -59,7 +57,11 @@ |
72 | """Update the state of an instance from the driver info""" |
73 | # FIXME(ja): include other fields from state? |
74 | instance_ref = self.db.instance_get(context, instance_id) |
75 | - state = self.driver.get_info(instance_ref.name)['state'] |
76 | + try: |
77 | + info = self.driver.get_info(instance_ref['name']) |
78 | + state = info['state'] |
79 | + except exception.NotFound: |
80 | + state = power_state.NOSTATE |
81 | self.db.instance_set_state(context, instance_id, state) |
82 | |
83 | @defer.inlineCallbacks |
84 | @@ -126,16 +128,15 @@ |
85 | def reboot_instance(self, context, instance_id): |
86 | """Reboot an instance on this server.""" |
87 | context = context.elevated() |
88 | + instance_ref = self.db.instance_get(context, instance_id) |
89 | self._update_state(context, instance_id) |
90 | - instance_ref = self.db.instance_get(context, instance_id) |
91 | |
92 | if instance_ref['state'] != power_state.RUNNING: |
93 | - raise exception.Error( |
94 | - 'trying to reboot a non-running' |
95 | - 'instance: %s (state: %s excepted: %s)' % |
96 | - (instance_ref['internal_id'], |
97 | - instance_ref['state'], |
98 | - power_state.RUNNING)) |
99 | + logging.warn('trying to reboot a non-running ' |
100 | + 'instance: %s (state: %s excepted: %s)', |
101 | + instance_ref['internal_id'], |
102 | + instance_ref['state'], |
103 | + power_state.RUNNING) |
104 | |
105 | logging.debug('instance %s: rebooting', instance_ref['name']) |
106 | self.db.instance_set_state(context, |
107 | @@ -145,6 +146,38 @@ |
108 | yield self.driver.reboot(instance_ref) |
109 | self._update_state(context, instance_id) |
110 | |
111 | + @defer.inlineCallbacks |
112 | + @exception.wrap_exception |
113 | + def rescue_instance(self, context, instance_id): |
114 | + """Rescue an instance on this server.""" |
115 | + context = context.elevated() |
116 | + instance_ref = self.db.instance_get(context, instance_id) |
117 | + |
118 | + logging.debug('instance %s: rescuing', |
119 | + instance_ref['internal_id']) |
120 | + self.db.instance_set_state(context, |
121 | + instance_id, |
122 | + power_state.NOSTATE, |
123 | + 'rescuing') |
124 | + yield self.driver.rescue(instance_ref) |
125 | + self._update_state(context, instance_id) |
126 | + |
127 | + @defer.inlineCallbacks |
128 | + @exception.wrap_exception |
129 | + def unrescue_instance(self, context, instance_id): |
130 | + """Rescue an instance on this server.""" |
131 | + context = context.elevated() |
132 | + instance_ref = self.db.instance_get(context, instance_id) |
133 | + |
134 | + logging.debug('instance %s: unrescuing', |
135 | + instance_ref['internal_id']) |
136 | + self.db.instance_set_state(context, |
137 | + instance_id, |
138 | + power_state.NOSTATE, |
139 | + 'unrescuing') |
140 | + yield self.driver.unrescue(instance_ref) |
141 | + self._update_state(context, instance_id) |
142 | + |
143 | @exception.wrap_exception |
144 | def get_console_output(self, context, instance_id): |
145 | """Send the console output for an instance.""" |
146 | |
147 | === modified file 'nova/tests/virt_unittest.py' |
148 | --- nova/tests/virt_unittest.py 2010-10-22 07:48:27 +0000 |
149 | +++ nova/tests/virt_unittest.py 2010-10-25 05:59:40 +0000 |
150 | @@ -91,7 +91,7 @@ |
151 | FLAGS.libvirt_type = libvirt_type |
152 | conn = libvirt_conn.LibvirtConnection(True) |
153 | |
154 | - uri, template = conn.get_uri_and_template() |
155 | + uri, _template, _rescue = conn.get_uri_and_templates() |
156 | self.assertEquals(uri, expected_uri) |
157 | |
158 | xml = conn.to_xml(instance_ref) |
159 | @@ -114,7 +114,7 @@ |
160 | for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): |
161 | FLAGS.libvirt_type = libvirt_type |
162 | conn = libvirt_conn.LibvirtConnection(True) |
163 | - uri, template = conn.get_uri_and_template() |
164 | + uri, _template, _rescue = conn.get_uri_and_templates() |
165 | self.assertEquals(uri, testuri) |
166 | |
167 | def tearDown(self): |
168 | |
169 | === modified file 'nova/virt/fake.py' |
170 | --- nova/virt/fake.py 2010-10-22 00:15:21 +0000 |
171 | +++ nova/virt/fake.py 2010-10-25 05:59:40 +0000 |
172 | @@ -22,10 +22,9 @@ |
173 | This module also documents the semantics of real hypervisor connections. |
174 | """ |
175 | |
176 | -import logging |
177 | - |
178 | from twisted.internet import defer |
179 | |
180 | +from nova import exception |
181 | from nova.compute import power_state |
182 | |
183 | |
184 | @@ -119,6 +118,18 @@ |
185 | """ |
186 | return defer.succeed(None) |
187 | |
188 | + def rescue(self, instance): |
189 | + """ |
190 | + Rescue the specified instance. |
191 | + """ |
192 | + return defer.succeed(None) |
193 | + |
194 | + def unrescue(self, instance): |
195 | + """ |
196 | + Unrescue the specified instance. |
197 | + """ |
198 | + return defer.succeed(None) |
199 | + |
200 | def destroy(self, instance): |
201 | """ |
202 | Destroy (shutdown and delete) the specified instance. |
203 | @@ -148,7 +159,12 @@ |
204 | current memory the instance has, in KiB, 'num_cpu': The current number |
205 | of virtual CPUs the instance has, 'cpu_time': The total CPU time used |
206 | by the instance, in nanoseconds. |
207 | + |
208 | + This method should raise exception.NotFound if the hypervisor has no |
209 | + knowledge of the instance |
210 | """ |
211 | + if instance_name not in self.instances: |
212 | + raise exception.NotFound("Instance %s Not Found" % instance_name) |
213 | i = self.instances[instance_name] |
214 | return {'state': i._state, |
215 | 'max_mem': 0, |
216 | |
217 | === added file 'nova/virt/libvirt.rescue.qemu.xml.template' |
218 | --- nova/virt/libvirt.rescue.qemu.xml.template 1970-01-01 00:00:00 +0000 |
219 | +++ nova/virt/libvirt.rescue.qemu.xml.template 2010-10-25 05:59:40 +0000 |
220 | @@ -0,0 +1,37 @@ |
221 | +<domain type='%(type)s'> |
222 | + <name>%(name)s</name> |
223 | + <os> |
224 | + <type>hvm</type> |
225 | + <kernel>%(basepath)s/rescue-kernel</kernel> |
226 | + <initrd>%(basepath)s/rescue-ramdisk</initrd> |
227 | + <cmdline>root=/dev/vda1 console=ttyS0</cmdline> |
228 | + </os> |
229 | + <features> |
230 | + <acpi/> |
231 | + </features> |
232 | + <memory>%(memory_kb)s</memory> |
233 | + <vcpu>%(vcpus)s</vcpu> |
234 | + <devices> |
235 | + <disk type='file'> |
236 | + <source file='%(basepath)s/rescue-disk'/> |
237 | + <target dev='vda' bus='virtio'/> |
238 | + </disk> |
239 | + <disk type='file'> |
240 | + <source file='%(basepath)s/disk'/> |
241 | + <target dev='vdb' bus='virtio'/> |
242 | + </disk> |
243 | + <interface type='bridge'> |
244 | + <source bridge='%(bridge_name)s'/> |
245 | + <mac address='%(mac_address)s'/> |
246 | + <!-- <model type='virtio'/> CANT RUN virtio network right now --> |
247 | + <filterref filter="nova-instance-%(name)s"> |
248 | + <parameter name="IP" value="%(ip_address)s" /> |
249 | + <parameter name="DHCPSERVER" value="%(dhcp_server)s" /> |
250 | + </filterref> |
251 | + </interface> |
252 | + <serial type="file"> |
253 | + <source path='%(basepath)s/console.log'/> |
254 | + <target port='1'/> |
255 | + </serial> |
256 | + </devices> |
257 | +</domain> |
258 | |
259 | === added file 'nova/virt/libvirt.rescue.uml.xml.template' |
260 | --- nova/virt/libvirt.rescue.uml.xml.template 1970-01-01 00:00:00 +0000 |
261 | +++ nova/virt/libvirt.rescue.uml.xml.template 2010-10-25 05:59:40 +0000 |
262 | @@ -0,0 +1,26 @@ |
263 | +<domain type='%(type)s'> |
264 | + <name>%(name)s</name> |
265 | + <memory>%(memory_kb)s</memory> |
266 | + <os> |
267 | + <type>%(type)s</type> |
268 | + <kernel>/usr/bin/linux</kernel> |
269 | + <root>/dev/ubda1</root> |
270 | + </os> |
271 | + <devices> |
272 | + <disk type='file'> |
273 | + <source file='%(basepath)s/rescue-disk'/> |
274 | + <target dev='ubd0' bus='uml'/> |
275 | + </disk> |
276 | + <disk type='file'> |
277 | + <source file='%(basepath)s/disk'/> |
278 | + <target dev='ubd1' bus='uml'/> |
279 | + </disk> |
280 | + <interface type='bridge'> |
281 | + <source bridge='%(bridge_name)s'/> |
282 | + <mac address='%(mac_address)s'/> |
283 | + </interface> |
284 | + <console type="file"> |
285 | + <source path='%(basepath)s/console.log'/> |
286 | + </console> |
287 | + </devices> |
288 | +</domain> |
289 | |
290 | === added file 'nova/virt/libvirt.rescue.xen.xml.template' |
291 | --- nova/virt/libvirt.rescue.xen.xml.template 1970-01-01 00:00:00 +0000 |
292 | +++ nova/virt/libvirt.rescue.xen.xml.template 2010-10-25 05:59:40 +0000 |
293 | @@ -0,0 +1,34 @@ |
294 | +<domain type='%(type)s'> |
295 | + <name>%(name)s</name> |
296 | + <os> |
297 | + <type>linux</type> |
298 | + <kernel>%(basepath)s/kernel</kernel> |
299 | + <initrd>%(basepath)s/ramdisk</initrd> |
300 | + <root>/dev/xvda1</root> |
301 | + <cmdline>ro</cmdline> |
302 | + </os> |
303 | + <features> |
304 | + <acpi/> |
305 | + </features> |
306 | + <memory>%(memory_kb)s</memory> |
307 | + <vcpu>%(vcpus)s</vcpu> |
308 | + <devices> |
309 | + <disk type='file'> |
310 | + <source file='%(basepath)s/rescue-disk'/> |
311 | + <target dev='sda' /> |
312 | + </disk> |
313 | + <disk type='file'> |
314 | + <source file='%(basepath)s/disk'/> |
315 | + <target dev='sdb' /> |
316 | + </disk> |
317 | + <interface type='bridge'> |
318 | + <source bridge='%(bridge_name)s'/> |
319 | + <mac address='%(mac_address)s'/> |
320 | + </interface> |
321 | + <console type="file"> |
322 | + <source path='%(basepath)s/console.log'/> |
323 | + <target port='1'/> |
324 | + </console> |
325 | + </devices> |
326 | +</domain> |
327 | + |
328 | |
329 | === modified file 'nova/virt/libvirt_conn.py' |
330 | --- nova/virt/libvirt_conn.py 2010-10-22 00:15:21 +0000 |
331 | +++ nova/virt/libvirt_conn.py 2010-10-25 05:59:40 +0000 |
332 | @@ -48,6 +48,19 @@ |
333 | |
334 | |
335 | FLAGS = flags.FLAGS |
336 | +flags.DEFINE_string('libvirt_rescue_xml_template', |
337 | + utils.abspath('virt/libvirt.rescue.qemu.xml.template'), |
338 | + 'Libvirt RESCUE XML Template for QEmu/KVM') |
339 | +flags.DEFINE_string('libvirt_rescue_xen_xml_template', |
340 | + utils.abspath('virt/libvirt.rescue.xen.xml.template'), |
341 | + 'Libvirt RESCUE XML Template for xen') |
342 | +flags.DEFINE_string('libvirt_rescue_uml_xml_template', |
343 | + utils.abspath('virt/libvirt.rescue.uml.xml.template'), |
344 | + 'Libvirt RESCUE XML Template for user-mode-linux') |
345 | +# TODO(vish): These flags should probably go into a shared location |
346 | +flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') |
347 | +flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') |
348 | +flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') |
349 | flags.DEFINE_string('libvirt_xml_template', |
350 | utils.abspath('virt/libvirt.qemu.xml.template'), |
351 | 'Libvirt XML Template for QEmu/KVM') |
352 | @@ -87,9 +100,12 @@ |
353 | |
354 | class LibvirtConnection(object): |
355 | def __init__(self, read_only): |
356 | - self.libvirt_uri, template_file = self.get_uri_and_template() |
357 | + (self.libvirt_uri, |
358 | + template_file, |
359 | + rescue_file) = self.get_uri_and_templates() |
360 | |
361 | self.libvirt_xml = open(template_file).read() |
362 | + self.rescue_xml = open(rescue_file).read() |
363 | self._wrapped_conn = None |
364 | self.read_only = read_only |
365 | |
366 | @@ -112,17 +128,20 @@ |
367 | return False |
368 | raise |
369 | |
370 | - def get_uri_and_template(self): |
371 | + def get_uri_and_templates(self): |
372 | if FLAGS.libvirt_type == 'uml': |
373 | uri = FLAGS.libvirt_uri or 'uml:///system' |
374 | template_file = FLAGS.libvirt_uml_xml_template |
375 | + rescue_file = FLAGS.libvirt_rescue_uml_xml_template |
376 | elif FLAGS.libvirt_type == 'xen': |
377 | uri = FLAGS.libvirt_uri or 'xen:///' |
378 | template_file = FLAGS.libvirt_xen_xml_template |
379 | + rescue_file = FLAGS.libvirt_rescue_xen_xml_template |
380 | else: |
381 | uri = FLAGS.libvirt_uri or 'qemu:///system' |
382 | template_file = FLAGS.libvirt_xml_template |
383 | - return uri, template_file |
384 | + rescue_file = FLAGS.libvirt_rescue_xml_template |
385 | + return uri, template_file, rescue_file |
386 | |
387 | def _connect(self, uri, read_only): |
388 | auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], |
389 | @@ -138,7 +157,7 @@ |
390 | return [self._conn.lookupByID(x).name() |
391 | for x in self._conn.listDomainsID()] |
392 | |
393 | - def destroy(self, instance): |
394 | + def destroy(self, instance, cleanup=True): |
395 | try: |
396 | virt_dom = self._conn.lookupByName(instance['name']) |
397 | virt_dom.destroy() |
398 | @@ -146,10 +165,11 @@ |
399 | pass |
400 | # If the instance is already terminated, we're still happy |
401 | d = defer.Deferred() |
402 | - d.addCallback(lambda _: self._cleanup(instance)) |
403 | + if cleanup: |
404 | + d.addCallback(lambda _: self._cleanup(instance)) |
405 | # FIXME: What does this comment mean? |
406 | # TODO(termie): short-circuit me for tests |
407 | - # WE'LL save this for when we do shutdown, |
408 | + # WE'LL save this for when we do shutdown, |
409 | # instead of destroy - but destroy returns immediately |
410 | timer = task.LoopingCall(f=None) |
411 | |
412 | @@ -199,8 +219,8 @@ |
413 | @defer.inlineCallbacks |
414 | @exception.wrap_exception |
415 | def reboot(self, instance): |
416 | + yield self.destroy(instance, False) |
417 | xml = self.to_xml(instance) |
418 | - yield self._conn.lookupByName(instance['name']).destroy() |
419 | yield self._conn.createXML(xml, 0) |
420 | |
421 | d = defer.Deferred() |
422 | @@ -229,6 +249,48 @@ |
423 | |
424 | @defer.inlineCallbacks |
425 | @exception.wrap_exception |
426 | + def rescue(self, instance): |
427 | + yield self.destroy(instance, False) |
428 | + |
429 | + xml = self.to_xml(instance, rescue=True) |
430 | + rescue_images = {'image_id': FLAGS.rescue_image_id, |
431 | + 'kernel_id': FLAGS.rescue_kernel_id, |
432 | + 'ramdisk_id': FLAGS.rescue_ramdisk_id} |
433 | + yield self._create_image(instance, xml, 'rescue-', rescue_images) |
434 | + yield self._conn.createXML(xml, 0) |
435 | + |
436 | + d = defer.Deferred() |
437 | + timer = task.LoopingCall(f=None) |
438 | + |
439 | + def _wait_for_rescue(): |
440 | + try: |
441 | + state = self.get_info(instance['name'])['state'] |
442 | + db.instance_set_state(None, instance['id'], state) |
443 | + if state == power_state.RUNNING: |
444 | + logging.debug('instance %s: rescued', instance['name']) |
445 | + timer.stop() |
446 | + d.callback(None) |
447 | + except Exception, exn: |
448 | + logging.error('_wait_for_rescue failed: %s', exn) |
449 | + db.instance_set_state(None, |
450 | + instance['id'], |
451 | + power_state.SHUTDOWN) |
452 | + timer.stop() |
453 | + d.callback(None) |
454 | + |
455 | + timer.f = _wait_for_rescue |
456 | + timer.start(interval=0.5, now=True) |
457 | + yield d |
458 | + |
459 | + @defer.inlineCallbacks |
460 | + @exception.wrap_exception |
461 | + def unrescue(self, instance): |
462 | + # NOTE(vish): Because reboot destroys and recreates an instance using |
463 | + # the normal xml file, we can just call reboot here |
464 | + yield self.reboot(instance) |
465 | + |
466 | + @defer.inlineCallbacks |
467 | + @exception.wrap_exception |
468 | def spawn(self, instance): |
469 | xml = self.to_xml(instance) |
470 | db.instance_set_state(context.get_admin_context(), |
471 | @@ -239,8 +301,6 @@ |
472 | setup_nwfilters_for_instance(instance) |
473 | yield self._create_image(instance, xml) |
474 | yield self._conn.createXML(xml, 0) |
475 | - # TODO(termie): this should actually register |
476 | - # a callback to check for successful boot |
477 | logging.debug("instance %s: is running", instance['name']) |
478 | |
479 | local_d = defer.Deferred() |
480 | @@ -311,15 +371,16 @@ |
481 | return d |
482 | |
483 | @defer.inlineCallbacks |
484 | - def _create_image(self, inst, libvirt_xml): |
485 | + def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None): |
486 | # syntactic nicety |
487 | - basepath = lambda fname='': os.path.join(FLAGS.instances_path, |
488 | + basepath = lambda fname='', prefix=prefix: os.path.join( |
489 | + FLAGS.instances_path, |
490 | inst['name'], |
491 | - fname) |
492 | + prefix + fname) |
493 | |
494 | # ensure directories exist and are writable |
495 | - yield process.simple_execute('mkdir -p %s' % basepath()) |
496 | - yield process.simple_execute('chmod 0777 %s' % basepath()) |
497 | + yield process.simple_execute('mkdir -p %s' % basepath(prefix='')) |
498 | + yield process.simple_execute('chmod 0777 %s' % basepath(prefix='')) |
499 | |
500 | # TODO(termie): these are blocking calls, it would be great |
501 | # if they weren't. |
502 | @@ -328,12 +389,17 @@ |
503 | f.write(libvirt_xml) |
504 | f.close() |
505 | |
506 | - os.close(os.open(basepath('console.log'), os.O_CREAT | os.O_WRONLY, |
507 | - 0660)) |
508 | + # NOTE(vish): No need add the prefix to console.log |
509 | + os.close(os.open(basepath('console.log', ''), |
510 | + os.O_CREAT | os.O_WRONLY, 0660)) |
511 | |
512 | user = manager.AuthManager().get_user(inst['user_id']) |
513 | project = manager.AuthManager().get_project(inst['project_id']) |
514 | |
515 | + if not disk_images: |
516 | + disk_images = {'image_id': inst['image_id'], |
517 | + 'kernel_id': inst['kernel_id'], |
518 | + 'ramdisk_id': inst['ramdisk_id']} |
519 | if not os.path.exists(basepath('disk')): |
520 | yield images.fetch(inst.image_id, basepath('disk-raw'), user, |
521 | project) |
522 | @@ -379,7 +445,9 @@ |
523 | ['local_gb'] |
524 | * 1024 * 1024 * 1024) |
525 | |
526 | - resize = inst['instance_type'] != 'm1.tiny' |
527 | + resize = True |
528 | + if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': |
529 | + resize = False |
530 | yield disk.partition(basepath('disk-raw'), basepath('disk'), |
531 | local_bytes, resize, execute=execute) |
532 | |
533 | @@ -387,7 +455,7 @@ |
534 | yield process.simple_execute('sudo chown root %s' % |
535 | basepath('disk')) |
536 | |
537 | - def to_xml(self, instance): |
538 | + def to_xml(self, instance, rescue=False): |
539 | # TODO(termie): cache? |
540 | logging.debug('instance %s: starting toXML method', instance['name']) |
541 | network = db.project_get_network(context.get_admin_context(), |
542 | @@ -409,13 +477,19 @@ |
543 | 'mac_address': instance['mac_address'], |
544 | 'ip_address': ip_address, |
545 | 'dhcp_server': dhcp_server} |
546 | - libvirt_xml = self.libvirt_xml % xml_info |
547 | + if rescue: |
548 | + libvirt_xml = self.rescue_xml % xml_info |
549 | + else: |
550 | + libvirt_xml = self.libvirt_xml % xml_info |
551 | logging.debug('instance %s: finished toXML method', instance['name']) |
552 | |
553 | return libvirt_xml |
554 | |
555 | def get_info(self, instance_name): |
556 | - virt_dom = self._conn.lookupByName(instance_name) |
557 | + try: |
558 | + virt_dom = self._conn.lookupByName(instance_name) |
559 | + except: |
560 | + raise exception.NotFound("Instance %s not found" % instance_name) |
561 | (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() |
562 | return {'state': state, |
563 | 'max_mem': max_mem, |
Very nice work, Vish. Been through it all and can't see any issues at all.