Merge lp:~cbehrens/nova/swapdisk into lp:~hudson-openstack/nova/trunk
- swapdisk
- Merge into trunk
Status: | Merged |
---|---|
Approved by: | Josh Kearney |
Approved revision: | 1113 |
Merged at revision: | 1115 |
Proposed branch: | lp:~cbehrens/nova/swapdisk |
Merge into: | lp:~hudson-openstack/nova/trunk |
Diff against target: |
484 lines (+202/-62) 6 files modified
nova/tests/test_xenapi.py (+23/-0) nova/tests/xenapi/stubs.py (+26/-6) nova/virt/xenapi/fake.py (+4/-1) nova/virt/xenapi/vm_utils.py (+40/-9) nova/virt/xenapi/vmops.py (+38/-21) plugins/xenserver/xenapi/etc/xapi.d/plugins/glance (+71/-25) |
To merge this branch: | bzr merge lp:~cbehrens/nova/swapdisk |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Ed Leafe (community) | Approve | ||
Josh Kearney (community) | Approve | ||
Review via email: mp+62549@code.launchpad.net |
Commit message
Description of the change
Essentially adds support for wiring up a swap disk when building.
Modifies the glance plugin to check for a swap.vhd. Glance's download_vhd will now return a list of dictionaries describing VHDs found in the image. All returns from _fetch_image calls in xenapi have been modified accordingly.
One can now build a .ova for glance that contains an image.vhd and a swap.vhd files.
When a VM is created, it'll iterate through the list and create VBDs for all of the VDIs found.
Added a test for this, too, which required a slight fix to xenapi's fake.py.
Vish Ishaya (vishvananda) wrote : | # |
Chris Behrens (cbehrens) wrote : | # |
I see I've accidentally removed an assert from glance plugin... fixing...
Ed Leafe (ed-leafe) wrote : | # |
Really Vish? Quoting the whole diff in your reply?
:-P
Josh Kearney (jk0) wrote : | # |
Looks great (and works great -- tested in our lab). The extra docs are very much appreciated.
Chris Behrens (cbehrens) wrote : | # |
Vish: Yeah, out of scope for what I need to do for RAX. This just builds on the xenserver glance plugin support.
Chris Behrens (cbehrens) wrote : | # |
Updating a comment..
Vish Ishaya (vishvananda) wrote : | # |
Ha, oops. That is what i get for just hitting reply in my mail client.
:)
On May 26, 2011, at 1:01 PM, Ed Leafe wrote:
> Really Vish? Quoting the whole diff in your reply?
>
> :-P
> --
> https:/
> You are subscribed to branch lp:nova.
- 1113. By Chris Behrens
-
add a comment when calling glance:download_vhd so it's clear what is returned
Preview Diff
1 | === modified file 'nova/tests/test_xenapi.py' | |||
2 | --- nova/tests/test_xenapi.py 2011-05-13 16:45:42 +0000 | |||
3 | +++ nova/tests/test_xenapi.py 2011-05-26 20:22:46 +0000 | |||
4 | @@ -395,6 +395,29 @@ | |||
5 | 395 | os_type="linux") | 395 | os_type="linux") |
6 | 396 | self.check_vm_params_for_linux() | 396 | self.check_vm_params_for_linux() |
7 | 397 | 397 | ||
8 | 398 | def test_spawn_vhd_glance_swapdisk(self): | ||
9 | 399 | # Change the default host_call_plugin to one that'll return | ||
10 | 400 | # a swap disk | ||
11 | 401 | orig_func = stubs.FakeSessionForVMTests.host_call_plugin | ||
12 | 402 | |||
13 | 403 | stubs.FakeSessionForVMTests.host_call_plugin = \ | ||
14 | 404 | stubs.FakeSessionForVMTests.host_call_plugin_swap | ||
15 | 405 | |||
16 | 406 | try: | ||
17 | 407 | # We'll steal the above glance linux test | ||
18 | 408 | self.test_spawn_vhd_glance_linux() | ||
19 | 409 | finally: | ||
20 | 410 | # Make sure to put this back | ||
21 | 411 | stubs.FakeSessionForVMTests.host_call_plugin = orig_func | ||
22 | 412 | |||
23 | 413 | # We should have 2 VBDs. | ||
24 | 414 | self.assertEqual(len(self.vm['VBDs']), 2) | ||
25 | 415 | # Now test that we have 1. | ||
26 | 416 | self.tearDown() | ||
27 | 417 | self.setUp() | ||
28 | 418 | self.test_spawn_vhd_glance_linux() | ||
29 | 419 | self.assertEqual(len(self.vm['VBDs']), 1) | ||
30 | 420 | |||
31 | 398 | def test_spawn_vhd_glance_windows(self): | 421 | def test_spawn_vhd_glance_windows(self): |
32 | 399 | FLAGS.xenapi_image_service = 'glance' | 422 | FLAGS.xenapi_image_service = 'glance' |
33 | 400 | self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, | 423 | self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, |
34 | 401 | 424 | ||
35 | === modified file 'nova/tests/xenapi/stubs.py' | |||
36 | --- nova/tests/xenapi/stubs.py 2011-05-13 16:47:18 +0000 | |||
37 | +++ nova/tests/xenapi/stubs.py 2011-05-26 20:22:46 +0000 | |||
38 | @@ -17,6 +17,7 @@ | |||
39 | 17 | """Stubouts, mocks and fixtures for the test suite""" | 17 | """Stubouts, mocks and fixtures for the test suite""" |
40 | 18 | 18 | ||
41 | 19 | import eventlet | 19 | import eventlet |
42 | 20 | import json | ||
43 | 20 | from nova.virt import xenapi_conn | 21 | from nova.virt import xenapi_conn |
44 | 21 | from nova.virt.xenapi import fake | 22 | from nova.virt.xenapi import fake |
45 | 22 | from nova.virt.xenapi import volume_utils | 23 | from nova.virt.xenapi import volume_utils |
46 | @@ -37,7 +38,7 @@ | |||
47 | 37 | sr_ref=sr_ref, sharable=False) | 38 | sr_ref=sr_ref, sharable=False) |
48 | 38 | vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) | 39 | vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) |
49 | 39 | vdi_uuid = vdi_rec['uuid'] | 40 | vdi_uuid = vdi_rec['uuid'] |
51 | 40 | return vdi_uuid | 41 | return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] |
52 | 41 | 42 | ||
53 | 42 | stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) | 43 | stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) |
54 | 43 | 44 | ||
55 | @@ -132,11 +133,30 @@ | |||
56 | 132 | def __init__(self, uri): | 133 | def __init__(self, uri): |
57 | 133 | super(FakeSessionForVMTests, self).__init__(uri) | 134 | super(FakeSessionForVMTests, self).__init__(uri) |
58 | 134 | 135 | ||
64 | 135 | def host_call_plugin(self, _1, _2, _3, _4, _5): | 136 | def host_call_plugin(self, _1, _2, plugin, method, _5): |
65 | 136 | sr_ref = fake.get_all('SR')[0] | 137 | sr_ref = fake.get_all('SR')[0] |
66 | 137 | vdi_ref = fake.create_vdi('', False, sr_ref, False) | 138 | vdi_ref = fake.create_vdi('', False, sr_ref, False) |
67 | 138 | vdi_rec = fake.get_record('VDI', vdi_ref) | 139 | vdi_rec = fake.get_record('VDI', vdi_ref) |
68 | 139 | return '<string>%s</string>' % vdi_rec['uuid'] | 140 | if plugin == "glance" and method == "download_vhd": |
69 | 141 | ret_str = json.dumps([dict(vdi_type='os', | ||
70 | 142 | vdi_uuid=vdi_rec['uuid'])]) | ||
71 | 143 | else: | ||
72 | 144 | ret_str = vdi_rec['uuid'] | ||
73 | 145 | return '<string>%s</string>' % ret_str | ||
74 | 146 | |||
75 | 147 | def host_call_plugin_swap(self, _1, _2, plugin, method, _5): | ||
76 | 148 | sr_ref = fake.get_all('SR')[0] | ||
77 | 149 | vdi_ref = fake.create_vdi('', False, sr_ref, False) | ||
78 | 150 | vdi_rec = fake.get_record('VDI', vdi_ref) | ||
79 | 151 | if plugin == "glance" and method == "download_vhd": | ||
80 | 152 | swap_vdi_ref = fake.create_vdi('', False, sr_ref, False) | ||
81 | 153 | swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref) | ||
82 | 154 | ret_str = json.dumps( | ||
83 | 155 | [dict(vdi_type='os', vdi_uuid=vdi_rec['uuid']), | ||
84 | 156 | dict(vdi_type='swap', vdi_uuid=swap_vdi_rec['uuid'])]) | ||
85 | 157 | else: | ||
86 | 158 | ret_str = vdi_rec['uuid'] | ||
87 | 159 | return '<string>%s</string>' % ret_str | ||
88 | 140 | 160 | ||
89 | 141 | def VM_start(self, _1, ref, _2, _3): | 161 | def VM_start(self, _1, ref, _2, _3): |
90 | 142 | vm = fake.get_record('VM', ref) | 162 | vm = fake.get_record('VM', ref) |
91 | 143 | 163 | ||
92 | === modified file 'nova/virt/xenapi/fake.py' | |||
93 | --- nova/virt/xenapi/fake.py 2011-04-18 22:00:39 +0000 | |||
94 | +++ nova/virt/xenapi/fake.py 2011-05-26 20:22:46 +0000 | |||
95 | @@ -159,7 +159,10 @@ | |||
96 | 159 | vbd_rec['device'] = '' | 159 | vbd_rec['device'] = '' |
97 | 160 | vm_ref = vbd_rec['VM'] | 160 | vm_ref = vbd_rec['VM'] |
98 | 161 | vm_rec = _db_content['VM'][vm_ref] | 161 | vm_rec = _db_content['VM'][vm_ref] |
100 | 162 | vm_rec['VBDs'] = [vbd_ref] | 162 | if vm_rec.get('VBDs', None): |
101 | 163 | vm_rec['VBDs'].append(vbd_ref) | ||
102 | 164 | else: | ||
103 | 165 | vm_rec['VBDs'] = [vbd_ref] | ||
104 | 163 | 166 | ||
105 | 164 | vm_name_label = _db_content['VM'][vm_ref]['name_label'] | 167 | vm_name_label = _db_content['VM'][vm_ref]['name_label'] |
106 | 165 | vbd_rec['vm_name_label'] = vm_name_label | 168 | vbd_rec['vm_name_label'] = vm_name_label |
107 | 166 | 169 | ||
108 | === modified file 'nova/virt/xenapi/vm_utils.py' | |||
109 | --- nova/virt/xenapi/vm_utils.py 2011-05-09 15:35:45 +0000 | |||
110 | +++ nova/virt/xenapi/vm_utils.py 2011-05-26 20:22:46 +0000 | |||
111 | @@ -19,6 +19,7 @@ | |||
112 | 19 | their attributes like VDIs, VIFs, as well as their lookup functions. | 19 | their attributes like VDIs, VIFs, as well as their lookup functions. |
113 | 20 | """ | 20 | """ |
114 | 21 | 21 | ||
115 | 22 | import json | ||
116 | 22 | import os | 23 | import os |
117 | 23 | import pickle | 24 | import pickle |
118 | 24 | import re | 25 | import re |
119 | @@ -376,6 +377,9 @@ | |||
120 | 376 | xenapi_image_service = ['glance', 'objectstore'] | 377 | xenapi_image_service = ['glance', 'objectstore'] |
121 | 377 | glance_address = 'address for glance services' | 378 | glance_address = 'address for glance services' |
122 | 378 | glance_port = 'port for glance services' | 379 | glance_port = 'port for glance services' |
123 | 380 | |||
124 | 381 | Returns: A single filename if image_type is KERNEL_RAMDISK | ||
125 | 382 | A list of dictionaries that describe VDIs, otherwise | ||
126 | 379 | """ | 383 | """ |
127 | 380 | access = AuthManager().get_access_key(user, project) | 384 | access = AuthManager().get_access_key(user, project) |
128 | 381 | 385 | ||
129 | @@ -390,6 +394,10 @@ | |||
130 | 390 | @classmethod | 394 | @classmethod |
131 | 391 | def _fetch_image_glance_vhd(cls, session, instance_id, image, access, | 395 | def _fetch_image_glance_vhd(cls, session, instance_id, image, access, |
132 | 392 | image_type): | 396 | image_type): |
133 | 397 | """Tell glance to download an image and put the VHDs into the SR | ||
134 | 398 | |||
135 | 399 | Returns: A list of dictionaries that describe VDIs | ||
136 | 400 | """ | ||
137 | 393 | LOG.debug(_("Asking xapi to fetch vhd image %(image)s") | 401 | LOG.debug(_("Asking xapi to fetch vhd image %(image)s") |
138 | 394 | % locals()) | 402 | % locals()) |
139 | 395 | 403 | ||
140 | @@ -408,18 +416,26 @@ | |||
141 | 408 | 416 | ||
142 | 409 | kwargs = {'params': pickle.dumps(params)} | 417 | kwargs = {'params': pickle.dumps(params)} |
143 | 410 | task = session.async_call_plugin('glance', 'download_vhd', kwargs) | 418 | task = session.async_call_plugin('glance', 'download_vhd', kwargs) |
145 | 411 | vdi_uuid = session.wait_for_task(task, instance_id) | 419 | result = session.wait_for_task(task, instance_id) |
146 | 420 | # 'download_vhd' will return a json encoded string containing | ||
147 | 421 | # a list of dictionaries describing VDIs. The dictionary will | ||
148 | 422 | # contain 'vdi_type' and 'vdi_uuid' keys. 'vdi_type' can be | ||
149 | 423 | # 'os' or 'swap' right now. | ||
150 | 424 | vdis = json.loads(result) | ||
151 | 425 | for vdi in vdis: | ||
152 | 426 | LOG.debug(_("xapi 'download_vhd' returned VDI of " | ||
153 | 427 | "type '%(vdi_type)s' with UUID '%(vdi_uuid)s'" % vdi)) | ||
154 | 412 | 428 | ||
155 | 413 | cls.scan_sr(session, instance_id, sr_ref) | 429 | cls.scan_sr(session, instance_id, sr_ref) |
156 | 414 | 430 | ||
157 | 431 | # Pull out the UUID of the first VDI | ||
158 | 432 | vdi_uuid = vdis[0]['vdi_uuid'] | ||
159 | 415 | # Set the name-label to ease debugging | 433 | # Set the name-label to ease debugging |
160 | 416 | vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) | 434 | vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) |
163 | 417 | name_label = get_name_label_for_image(image) | 435 | primary_name_label = get_name_label_for_image(image) |
164 | 418 | session.get_xenapi().VDI.set_name_label(vdi_ref, name_label) | 436 | session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label) |
165 | 419 | 437 | ||
169 | 420 | LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s") | 438 | return vdis |
167 | 421 | % locals()) | ||
168 | 422 | return vdi_uuid | ||
170 | 423 | 439 | ||
171 | 424 | @classmethod | 440 | @classmethod |
172 | 425 | def _fetch_image_glance_disk(cls, session, instance_id, image, access, | 441 | def _fetch_image_glance_disk(cls, session, instance_id, image, access, |
173 | @@ -431,6 +447,8 @@ | |||
174 | 431 | plugin; instead, it streams the disks through domU to the VDI | 447 | plugin; instead, it streams the disks through domU to the VDI |
175 | 432 | directly. | 448 | directly. |
176 | 433 | 449 | ||
177 | 450 | Returns: A single filename if image_type is KERNEL_RAMDISK | ||
178 | 451 | A list of dictionaries that describe VDIs, otherwise | ||
179 | 434 | """ | 452 | """ |
180 | 435 | # FIXME(sirp): Since the Glance plugin seems to be required for the | 453 | # FIXME(sirp): Since the Glance plugin seems to be required for the |
181 | 436 | # VHD disk, it may be worth using the plugin for both VHD and RAW and | 454 | # VHD disk, it may be worth using the plugin for both VHD and RAW and |
182 | @@ -476,7 +494,8 @@ | |||
183 | 476 | LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) | 494 | LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) |
184 | 477 | return filename | 495 | return filename |
185 | 478 | else: | 496 | else: |
187 | 479 | return session.get_xenapi().VDI.get_uuid(vdi_ref) | 497 | vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref) |
188 | 498 | return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] | ||
189 | 480 | 499 | ||
190 | 481 | @classmethod | 500 | @classmethod |
191 | 482 | def determine_disk_image_type(cls, instance): | 501 | def determine_disk_image_type(cls, instance): |
192 | @@ -535,6 +554,11 @@ | |||
193 | 535 | @classmethod | 554 | @classmethod |
194 | 536 | def _fetch_image_glance(cls, session, instance_id, image, access, | 555 | def _fetch_image_glance(cls, session, instance_id, image, access, |
195 | 537 | image_type): | 556 | image_type): |
196 | 557 | """Fetch image from glance based on image type. | ||
197 | 558 | |||
198 | 559 | Returns: A single filename if image_type is KERNEL_RAMDISK | ||
199 | 560 | A list of dictionaries that describe VDIs, otherwise | ||
200 | 561 | """ | ||
201 | 538 | if image_type == ImageType.DISK_VHD: | 562 | if image_type == ImageType.DISK_VHD: |
202 | 539 | return cls._fetch_image_glance_vhd( | 563 | return cls._fetch_image_glance_vhd( |
203 | 540 | session, instance_id, image, access, image_type) | 564 | session, instance_id, image, access, image_type) |
204 | @@ -545,6 +569,11 @@ | |||
205 | 545 | @classmethod | 569 | @classmethod |
206 | 546 | def _fetch_image_objectstore(cls, session, instance_id, image, access, | 570 | def _fetch_image_objectstore(cls, session, instance_id, image, access, |
207 | 547 | secret, image_type): | 571 | secret, image_type): |
208 | 572 | """Fetch an image from objectstore. | ||
209 | 573 | |||
210 | 574 | Returns: A single filename if image_type is KERNEL_RAMDISK | ||
211 | 575 | A list of dictionaries that describe VDIs, otherwise | ||
212 | 576 | """ | ||
213 | 548 | url = images.image_url(image) | 577 | url = images.image_url(image) |
214 | 549 | LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) | 578 | LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) |
215 | 550 | if image_type == ImageType.KERNEL_RAMDISK: | 579 | if image_type == ImageType.KERNEL_RAMDISK: |
216 | @@ -562,8 +591,10 @@ | |||
217 | 562 | if image_type == ImageType.DISK_RAW: | 591 | if image_type == ImageType.DISK_RAW: |
218 | 563 | args['raw'] = 'true' | 592 | args['raw'] = 'true' |
219 | 564 | task = session.async_call_plugin('objectstore', fn, args) | 593 | task = session.async_call_plugin('objectstore', fn, args) |
222 | 565 | uuid = session.wait_for_task(task, instance_id) | 594 | uuid_or_fn = session.wait_for_task(task, instance_id) |
223 | 566 | return uuid | 595 | if image_type != ImageType.KERNEL_RAMDISK: |
224 | 596 | return [dict(vdi_type='os', vdi_uuid=uuid_or_fn)] | ||
225 | 597 | return uuid_or_fn | ||
226 | 567 | 598 | ||
227 | 568 | @classmethod | 599 | @classmethod |
228 | 569 | def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type, | 600 | def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type, |
229 | 570 | 601 | ||
230 | === modified file 'nova/virt/xenapi/vmops.py' | |||
231 | --- nova/virt/xenapi/vmops.py 2011-05-25 17:55:51 +0000 | |||
232 | +++ nova/virt/xenapi/vmops.py 2011-05-26 20:22:46 +0000 | |||
233 | @@ -91,7 +91,8 @@ | |||
234 | 91 | def finish_resize(self, instance, disk_info): | 91 | def finish_resize(self, instance, disk_info): |
235 | 92 | vdi_uuid = self.link_disks(instance, disk_info['base_copy'], | 92 | vdi_uuid = self.link_disks(instance, disk_info['base_copy'], |
236 | 93 | disk_info['cow']) | 93 | disk_info['cow']) |
238 | 94 | vm_ref = self._create_vm(instance, vdi_uuid) | 94 | vm_ref = self._create_vm(instance, |
239 | 95 | [dict(vdi_type='os', vdi_uuid=vdi_uuid)]) | ||
240 | 95 | self.resize_instance(instance, vdi_uuid) | 96 | self.resize_instance(instance, vdi_uuid) |
241 | 96 | self._spawn(instance, vm_ref) | 97 | self._spawn(instance, vm_ref) |
242 | 97 | 98 | ||
243 | @@ -105,24 +106,25 @@ | |||
244 | 105 | LOG.debug(_("Starting instance %s"), instance.name) | 106 | LOG.debug(_("Starting instance %s"), instance.name) |
245 | 106 | self._session.call_xenapi('VM.start', vm_ref, False, False) | 107 | self._session.call_xenapi('VM.start', vm_ref, False, False) |
246 | 107 | 108 | ||
248 | 108 | def _create_disk(self, instance): | 109 | def _create_disks(self, instance): |
249 | 109 | user = AuthManager().get_user(instance.user_id) | 110 | user = AuthManager().get_user(instance.user_id) |
250 | 110 | project = AuthManager().get_project(instance.project_id) | 111 | project = AuthManager().get_project(instance.project_id) |
251 | 111 | disk_image_type = VMHelper.determine_disk_image_type(instance) | 112 | disk_image_type = VMHelper.determine_disk_image_type(instance) |
255 | 112 | vdi_uuid = VMHelper.fetch_image(self._session, instance.id, | 113 | vdis = VMHelper.fetch_image(self._session, |
256 | 113 | instance.image_id, user, project, disk_image_type) | 114 | instance.id, instance.image_id, user, project, |
257 | 114 | return vdi_uuid | 115 | disk_image_type) |
258 | 116 | return vdis | ||
259 | 115 | 117 | ||
260 | 116 | def spawn(self, instance, network_info=None): | 118 | def spawn(self, instance, network_info=None): |
263 | 117 | vdi_uuid = self._create_disk(instance) | 119 | vdis = self._create_disks(instance) |
264 | 118 | vm_ref = self._create_vm(instance, vdi_uuid, network_info) | 120 | vm_ref = self._create_vm(instance, vdis, network_info) |
265 | 119 | self._spawn(instance, vm_ref) | 121 | self._spawn(instance, vm_ref) |
266 | 120 | 122 | ||
267 | 121 | def spawn_rescue(self, instance): | 123 | def spawn_rescue(self, instance): |
268 | 122 | """Spawn a rescue instance.""" | 124 | """Spawn a rescue instance.""" |
269 | 123 | self.spawn(instance) | 125 | self.spawn(instance) |
270 | 124 | 126 | ||
272 | 125 | def _create_vm(self, instance, vdi_uuid, network_info=None): | 127 | def _create_vm(self, instance, vdis, network_info=None): |
273 | 126 | """Create VM instance.""" | 128 | """Create VM instance.""" |
274 | 127 | instance_name = instance.name | 129 | instance_name = instance.name |
275 | 128 | vm_ref = VMHelper.lookup(self._session, instance_name) | 130 | vm_ref = VMHelper.lookup(self._session, instance_name) |
276 | @@ -141,28 +143,43 @@ | |||
277 | 141 | user = AuthManager().get_user(instance.user_id) | 143 | user = AuthManager().get_user(instance.user_id) |
278 | 142 | project = AuthManager().get_project(instance.project_id) | 144 | project = AuthManager().get_project(instance.project_id) |
279 | 143 | 145 | ||
280 | 144 | # Are we building from a pre-existing disk? | ||
281 | 145 | vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) | ||
282 | 146 | |||
283 | 147 | disk_image_type = VMHelper.determine_disk_image_type(instance) | 146 | disk_image_type = VMHelper.determine_disk_image_type(instance) |
284 | 148 | 147 | ||
285 | 149 | kernel = None | 148 | kernel = None |
286 | 150 | if instance.kernel_id: | 149 | if instance.kernel_id: |
287 | 151 | kernel = VMHelper.fetch_image(self._session, instance.id, | 150 | kernel = VMHelper.fetch_image(self._session, instance.id, |
289 | 152 | instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) | 151 | instance.kernel_id, user, project, |
290 | 152 | ImageType.KERNEL_RAMDISK) | ||
291 | 153 | 153 | ||
292 | 154 | ramdisk = None | 154 | ramdisk = None |
293 | 155 | if instance.ramdisk_id: | 155 | if instance.ramdisk_id: |
294 | 156 | ramdisk = VMHelper.fetch_image(self._session, instance.id, | 156 | ramdisk = VMHelper.fetch_image(self._session, instance.id, |
302 | 157 | instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) | 157 | instance.ramdisk_id, user, project, |
303 | 158 | 158 | ImageType.KERNEL_RAMDISK) | |
304 | 159 | use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id, | 159 | |
305 | 160 | vdi_ref, disk_image_type, instance.os_type) | 160 | # Create the VM ref and attach the first disk |
306 | 161 | vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, | 161 | first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', |
307 | 162 | use_pv_kernel) | 162 | vdis[0]['vdi_uuid']) |
308 | 163 | 163 | use_pv_kernel = VMHelper.determine_is_pv(self._session, | |
309 | 164 | instance.id, first_vdi_ref, disk_image_type, | ||
310 | 165 | instance.os_type) | ||
311 | 166 | vm_ref = VMHelper.create_vm(self._session, instance, | ||
312 | 167 | kernel, ramdisk, use_pv_kernel) | ||
313 | 164 | VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, | 168 | VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, |
315 | 165 | vdi_ref=vdi_ref, userdevice=0, bootable=True) | 169 | vdi_ref=first_vdi_ref, userdevice=0, bootable=True) |
316 | 170 | |||
317 | 171 | # Attach any other disks | ||
318 | 172 | # userdevice 1 is reserved for rescue | ||
319 | 173 | userdevice = 2 | ||
320 | 174 | for vdi in vdis[1:]: | ||
321 | 175 | # vdi['vdi_type'] is either 'os' or 'swap', but we don't | ||
322 | 176 | # really care what it is right here. | ||
323 | 177 | vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', | ||
324 | 178 | vdi['vdi_uuid']) | ||
325 | 179 | VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, | ||
326 | 180 | vdi_ref=vdi_ref, userdevice=userdevice, | ||
327 | 181 | bootable=False) | ||
328 | 182 | userdevice += 1 | ||
329 | 166 | 183 | ||
330 | 167 | # TODO(tr3buchet) - check to make sure we have network info, otherwise | 184 | # TODO(tr3buchet) - check to make sure we have network info, otherwise |
331 | 168 | # create it now. This goes away once nova-multi-nic hits. | 185 | # create it now. This goes away once nova-multi-nic hits. |
332 | @@ -172,7 +189,7 @@ | |||
333 | 172 | # Alter the image before VM start for, e.g. network injection | 189 | # Alter the image before VM start for, e.g. network injection |
334 | 173 | if FLAGS.xenapi_inject_image: | 190 | if FLAGS.xenapi_inject_image: |
335 | 174 | VMHelper.preconfigure_instance(self._session, instance, | 191 | VMHelper.preconfigure_instance(self._session, instance, |
337 | 175 | vdi_ref, network_info) | 192 | first_vdi_ref, network_info) |
338 | 176 | 193 | ||
339 | 177 | self.create_vifs(vm_ref, network_info) | 194 | self.create_vifs(vm_ref, network_info) |
340 | 178 | self.inject_network_info(instance, network_info, vm_ref) | 195 | self.inject_network_info(instance, network_info, vm_ref) |
341 | 179 | 196 | ||
342 | === modified file 'plugins/xenserver/xenapi/etc/xapi.d/plugins/glance' | |||
343 | --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance 2011-05-20 21:45:19 +0000 | |||
344 | +++ plugins/xenserver/xenapi/etc/xapi.d/plugins/glance 2011-05-26 20:22:46 +0000 | |||
345 | @@ -22,6 +22,10 @@ | |||
346 | 22 | # | 22 | # |
347 | 23 | 23 | ||
348 | 24 | import httplib | 24 | import httplib |
349 | 25 | try: | ||
350 | 26 | import json | ||
351 | 27 | except ImportError: | ||
352 | 28 | import simplejson as json | ||
353 | 25 | import os | 29 | import os |
354 | 26 | import os.path | 30 | import os.path |
355 | 27 | import pickle | 31 | import pickle |
356 | @@ -87,8 +91,8 @@ | |||
357 | 87 | conn.close() | 91 | conn.close() |
358 | 88 | 92 | ||
359 | 89 | 93 | ||
362 | 90 | def _fixup_vhds(sr_path, staging_path, uuid_stack): | 94 | def _import_vhds(sr_path, staging_path, uuid_stack): |
363 | 91 | """Fixup the downloaded VHDs before we move them into the SR. | 95 | """Import the VHDs found in the staging path. |
364 | 92 | 96 | ||
365 | 93 | We cannot extract VHDs directly into the SR since they don't yet have | 97 | We cannot extract VHDs directly into the SR since they don't yet have |
366 | 94 | UUIDs, aren't properly associated with each other, and would be subject to | 98 | UUIDs, aren't properly associated with each other, and would be subject to |
367 | @@ -98,16 +102,25 @@ | |||
368 | 98 | To avoid these we problems, we use a staging area to fixup the VHDs before | 102 | To avoid these we problems, we use a staging area to fixup the VHDs before |
369 | 99 | moving them into the SR. The steps involved are: | 103 | moving them into the SR. The steps involved are: |
370 | 100 | 104 | ||
372 | 101 | 1. Extracting tarball into staging area | 105 | 1. Extracting tarball into staging area (done prior to this call) |
373 | 102 | 106 | ||
374 | 103 | 2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd') | 107 | 2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd') |
375 | 104 | 108 | ||
377 | 105 | 3. Linking the two VHDs together | 109 | 3. Linking VHDs together if there's a snap.vhd |
378 | 106 | 110 | ||
379 | 107 | 4. Pseudo-atomically moving the images into the SR. (It's not really | 111 | 4. Pseudo-atomically moving the images into the SR. (It's not really |
382 | 108 | atomic because it takes place as two os.rename operations; however, | 112 | atomic because it takes place as multiple os.rename operations; |
383 | 109 | the chances of an SR.scan occuring between the two rename() | 113 | however, the chances of an SR.scan occuring between the rename()s |
384 | 110 | invocations is so small that we can safely ignore it) | 114 | invocations is so small that we can safely ignore it) |
385 | 115 | |||
386 | 116 | Returns: A list of VDIs. Each list element is a dictionary containing | ||
387 | 117 | information about the VHD. Dictionary keys are: | ||
388 | 118 | 1. "vdi_type" - The type of VDI. Currently they can be "os_disk" or | ||
389 | 119 | "swap" | ||
390 | 120 | 2. "vdi_uuid" - The UUID of the VDI | ||
391 | 121 | |||
392 | 122 | Example return: [{"vdi_type": "os_disk","vdi_uuid": "ffff-aaa..vhd"}, | ||
393 | 123 | {"vdi_type": "swap","vdi_uuid": "ffff-bbb..vhd"}] | ||
394 | 111 | """ | 124 | """ |
395 | 112 | def rename_with_uuid(orig_path): | 125 | def rename_with_uuid(orig_path): |
396 | 113 | """Rename VHD using UUID so that it will be recognized by SR on a | 126 | """Rename VHD using UUID so that it will be recognized by SR on a |
397 | @@ -158,27 +171,59 @@ | |||
398 | 158 | "VHD %(path)s is marked as hidden without child" % | 171 | "VHD %(path)s is marked as hidden without child" % |
399 | 159 | locals()) | 172 | locals()) |
400 | 160 | 173 | ||
403 | 161 | orig_base_copy_path = os.path.join(staging_path, 'image.vhd') | 174 | def prepare_if_exists(staging_path, vhd_name, parent_path=None): |
404 | 162 | if not os.path.exists(orig_base_copy_path): | 175 | """ |
405 | 176 | Check for existance of a particular VHD in the staging path and | ||
406 | 177 | preparing it for moving into the SR. | ||
407 | 178 | |||
408 | 179 | Returns: Tuple of (Path to move into the SR, VDI_UUID) | ||
409 | 180 | None, if the vhd_name doesn't exist in the staging path | ||
410 | 181 | |||
411 | 182 | If the VHD exists, we will do the following: | ||
412 | 183 | 1. Rename it with a UUID. | ||
413 | 184 | 2. If parent_path exists, we'll link up the VHDs. | ||
414 | 185 | """ | ||
415 | 186 | orig_path = os.path.join(staging_path, vhd_name) | ||
416 | 187 | if not os.path.exists(orig_path): | ||
417 | 188 | return None | ||
418 | 189 | new_path, vdi_uuid = rename_with_uuid(orig_path) | ||
419 | 190 | if parent_path: | ||
420 | 191 | # NOTE(sirp): this step is necessary so that an SR scan won't | ||
421 | 192 | # delete the base_copy out from under us (since it would be | ||
422 | 193 | # orphaned) | ||
423 | 194 | link_vhds(new_path, parent_path) | ||
424 | 195 | return (new_path, vdi_uuid) | ||
425 | 196 | |||
426 | 197 | vdi_return_list = [] | ||
427 | 198 | paths_to_move = [] | ||
428 | 199 | |||
429 | 200 | image_info = prepare_if_exists(staging_path, 'image.vhd') | ||
430 | 201 | if not image_info: | ||
431 | 163 | raise Exception("Invalid image: image.vhd not present") | 202 | raise Exception("Invalid image: image.vhd not present") |
432 | 164 | 203 | ||
434 | 165 | base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path) | 204 | paths_to_move.append(image_info[0]) |
435 | 166 | 205 | ||
446 | 167 | vdi_uuid = base_copy_uuid | 206 | snap_info = prepare_if_exists(staging_path, 'snap.vhd', |
447 | 168 | orig_snap_path = os.path.join(staging_path, 'snap.vhd') | 207 | image_info[0]) |
448 | 169 | if os.path.exists(orig_snap_path): | 208 | if snap_info: |
449 | 170 | snap_path, snap_uuid = rename_with_uuid(orig_snap_path) | 209 | paths_to_move.append(snap_info[0]) |
450 | 171 | vdi_uuid = snap_uuid | 210 | # We return this snap as the VDI instead of image.vhd |
451 | 172 | # NOTE(sirp): this step is necessary so that an SR scan won't | 211 | vdi_return_list.append(dict(vdi_type="os", vdi_uuid=snap_info[1])) |
442 | 173 | # delete the base_copy out from under us (since it would be | ||
443 | 174 | # orphaned) | ||
444 | 175 | link_vhds(snap_path, base_copy_path) | ||
445 | 176 | move_into_sr(snap_path) | ||
452 | 177 | else: | 212 | else: |
457 | 178 | assert_vhd_not_hidden(base_copy_path) | 213 | assert_vhd_not_hidden(image_info[0]) |
458 | 179 | 214 | # If there's no snap, we return the image.vhd UUID | |
459 | 180 | move_into_sr(base_copy_path) | 215 | vdi_return_list.append(dict(vdi_type="os", vdi_uuid=image_info[1])) |
460 | 181 | return vdi_uuid | 216 | |
461 | 217 | swap_info = prepare_if_exists(staging_path, 'swap.vhd') | ||
462 | 218 | if swap_info: | ||
463 | 219 | assert_vhd_not_hidden(swap_info[0]) | ||
464 | 220 | paths_to_move.append(swap_info[0]) | ||
465 | 221 | vdi_return_list.append(dict(vdi_type="swap", vdi_uuid=swap_info[1])) | ||
466 | 222 | |||
467 | 223 | for path in paths_to_move: | ||
468 | 224 | move_into_sr(path) | ||
469 | 225 | |||
470 | 226 | return vdi_return_list | ||
471 | 182 | 227 | ||
472 | 183 | 228 | ||
473 | 184 | def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): | 229 | def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): |
474 | @@ -324,8 +369,9 @@ | |||
475 | 324 | try: | 369 | try: |
476 | 325 | _download_tarball(sr_path, staging_path, image_id, glance_host, | 370 | _download_tarball(sr_path, staging_path, image_id, glance_host, |
477 | 326 | glance_port) | 371 | glance_port) |
480 | 327 | vdi_uuid = _fixup_vhds(sr_path, staging_path, uuid_stack) | 372 | # Right now, it's easier to return a single string via XenAPI, |
481 | 328 | return vdi_uuid | 373 | # so we'll json encode the list of VHDs. |
482 | 374 | return json.dumps(_import_vhds(sr_path, staging_path, uuid_stack)) | ||
483 | 329 | finally: | 375 | finally: |
484 | 330 | _cleanup_staging_area(staging_path) | 376 | _cleanup_staging_area(staging_path) |
485 | 331 | 377 |
This looks cool. It would be nice to have similar functionality for libvirt/kvm.
Vish
On May 26, 2011, at 12:46 PM, Chris Behrens wrote:
> Chris Behrens has proposed merging lp:~cbehrens/nova/swapdisk into lp:nova. /code.launchpad .net/~cbehrens/ nova/swapdisk/ +merge/ 62549 /code.launchpad .net/~cbehrens/ nova/swapdisk/ +merge/ 62549 test_xenapi. py' test_xenapi. py 2011-05-13 16:45:42 +0000 test_xenapi. py 2011-05-26 19:46:27 +0000 vm_params_ for_linux( ) vhd_glance_ swapdisk( self): onForVMTests. host_call_ plugin onForVMTests. host_call_ plugin = \ onForVMTests. host_call_ plugin_ swap spawn_vhd_ glance_ linux() onForVMTests. host_call_ plugin = orig_func l(len(self. vm['VBDs' ]), 2) spawn_vhd_ glance_ linux() l(len(self. vm['VBDs' ]), 1) vhd_glance_ windows( self): image_service = 'glance' spawn(glance_ stubs.FakeGlanc e.IMAGE_ VHD, None, None, xenapi/ stubs.py' xenapi/ stubs.py 2011-05-13 16:47:18 +0000 xenapi/ stubs.py 2011-05-26 19:46:27 +0000 get_xenapi( ).VDI.get_ record( vdi_ref) type='os' , vdi_uuid=vdi_uuid)] vm_utils. VMHelper, 'fetch_image', fake_fetch_image)
>
> Requested reviews:
> Nova Core (nova-core)
>
> For more details, see:
> https:/
>
> Essentially adds support for wiring up a swap disk when building.
>
> Modifies the glance plugin to check for a swap.vhd. Glance's download_vhd will now return a list of dictionaries describing VHDs found in the image. All returns from _fetch_image calls in xenapi have been modified accordingly.
>
> One can now build a .ova for glance that contains an image.vhd and a swap.vhd files.
>
> When a VM is created, it'll iterate through the list and create VBDs for all of the VDIs found.
>
> Added a test for this, too, which required a slight fix to xenapi's fake.py.
> --
> https:/
> You are subscribed to branch lp:nova.
> === modified file 'nova/tests/
> --- nova/tests/
> +++ nova/tests/
> @@ -395,6 +395,29 @@
> os_type="linux")
> self.check_
>
> + def test_spawn_
> + # Change the default host_call_plugin to one that'll return
> + # a swap disk
> + orig_func = stubs.FakeSessi
> +
> + stubs.FakeSessi
> + stubs.FakeSessi
> +
> + try:
> + # We'll steal the above glance linux test
> + self.test_
> + finally:
> + # Make sure to put this back
> + stubs.FakeSessi
> +
> + # We should have 2 VBDs.
> + self.assertEqua
> + # Now test that we have 1.
> + self.tearDown()
> + self.setUp()
> + self.test_
> + self.assertEqua
> +
> def test_spawn_
> FLAGS.xenapi_
> self._test_
>
> === modified file 'nova/tests/
> --- nova/tests/
> +++ nova/tests/
> @@ -17,6 +17,7 @@
> """Stubouts, mocks and fixtures for the test suite"""
>
> import eventlet
> +import json
> from nova.virt import xenapi_conn
> from nova.virt.xenapi import fake
> from nova.virt.xenapi import volume_utils
> @@ -37,7 +38,7 @@
> sr_ref=sr_ref, sharable=False)
> vdi_rec = session.
> vdi_uuid = vdi_rec['uuid']
> - return vdi_uuid
> + return [dict(vdi_
>
> stubs.Set(
>
> @@ -132,11 +133,30 @@
> def __init__(self, uri):
> s...