Merge lp:~cbehrens/nova/swapdisk into lp:~hudson-openstack/nova/trunk
- swapdisk
- Merge into trunk
Status: | Merged |
---|---|
Approved by: | Josh Kearney |
Approved revision: | 1113 |
Merged at revision: | 1115 |
Proposed branch: | lp:~cbehrens/nova/swapdisk |
Merge into: | lp:~hudson-openstack/nova/trunk |
Diff against target: |
484 lines (+202/-62) 6 files modified
nova/tests/test_xenapi.py (+23/-0) nova/tests/xenapi/stubs.py (+26/-6) nova/virt/xenapi/fake.py (+4/-1) nova/virt/xenapi/vm_utils.py (+40/-9) nova/virt/xenapi/vmops.py (+38/-21) plugins/xenserver/xenapi/etc/xapi.d/plugins/glance (+71/-25) |
To merge this branch: | bzr merge lp:~cbehrens/nova/swapdisk |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Ed Leafe (community) | Approve | ||
Josh Kearney (community) | Approve | ||
Review via email: mp+62549@code.launchpad.net |
Commit message
Description of the change
Essentially adds support for wiring up a swap disk when building.
Modifies the glance plugin to check for a swap.vhd. Glance's download_vhd will now return a list of dictionaries describing VHDs found in the image. All returns from _fetch_image calls in xenapi have been modified accordingly.
One can now build a .ova for glance that contains an image.vhd and a swap.vhd files.
When a VM is created, it'll iterate through the list and create VBDs for all of the VDIs found.
Added a test for this, too, which required a slight fix to xenapi's fake.py.
Vish Ishaya (vishvananda) wrote : | # |
Chris Behrens (cbehrens) wrote : | # |
I see I've accidentally removed an assert from glance plugin... fixing...
Ed Leafe (ed-leafe) wrote : | # |
Really Vish? Quoting the whole diff in your reply?
:-P
Josh Kearney (jk0) wrote : | # |
Looks great (and works great -- tested in our lab). The extra docs are very much appreciated.
Chris Behrens (cbehrens) wrote : | # |
Vish: Yeah, out of scope for what I need to do for RAX. This just builds on the xenserver glance plugin support.
Chris Behrens (cbehrens) wrote : | # |
Updating a comment..
Vish Ishaya (vishvananda) wrote : | # |
Ha, oops. That is what i get for just hitting reply in my mail client.
:)
On May 26, 2011, at 1:01 PM, Ed Leafe wrote:
> Really Vish? Quoting the whole diff in your reply?
>
> :-P
> --
> https:/
> You are subscribed to branch lp:nova.
- 1113. By Chris Behrens
-
add a comment when calling glance:download_vhd so it's clear what is returned
Preview Diff
1 | === modified file 'nova/tests/test_xenapi.py' |
2 | --- nova/tests/test_xenapi.py 2011-05-13 16:45:42 +0000 |
3 | +++ nova/tests/test_xenapi.py 2011-05-26 20:22:46 +0000 |
4 | @@ -395,6 +395,29 @@ |
5 | os_type="linux") |
6 | self.check_vm_params_for_linux() |
7 | |
8 | + def test_spawn_vhd_glance_swapdisk(self): |
9 | + # Change the default host_call_plugin to one that'll return |
10 | + # a swap disk |
11 | + orig_func = stubs.FakeSessionForVMTests.host_call_plugin |
12 | + |
13 | + stubs.FakeSessionForVMTests.host_call_plugin = \ |
14 | + stubs.FakeSessionForVMTests.host_call_plugin_swap |
15 | + |
16 | + try: |
17 | + # We'll steal the above glance linux test |
18 | + self.test_spawn_vhd_glance_linux() |
19 | + finally: |
20 | + # Make sure to put this back |
21 | + stubs.FakeSessionForVMTests.host_call_plugin = orig_func |
22 | + |
23 | + # We should have 2 VBDs. |
24 | + self.assertEqual(len(self.vm['VBDs']), 2) |
25 | + # Now test that we have 1. |
26 | + self.tearDown() |
27 | + self.setUp() |
28 | + self.test_spawn_vhd_glance_linux() |
29 | + self.assertEqual(len(self.vm['VBDs']), 1) |
30 | + |
31 | def test_spawn_vhd_glance_windows(self): |
32 | FLAGS.xenapi_image_service = 'glance' |
33 | self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, |
34 | |
35 | === modified file 'nova/tests/xenapi/stubs.py' |
36 | --- nova/tests/xenapi/stubs.py 2011-05-13 16:47:18 +0000 |
37 | +++ nova/tests/xenapi/stubs.py 2011-05-26 20:22:46 +0000 |
38 | @@ -17,6 +17,7 @@ |
39 | """Stubouts, mocks and fixtures for the test suite""" |
40 | |
41 | import eventlet |
42 | +import json |
43 | from nova.virt import xenapi_conn |
44 | from nova.virt.xenapi import fake |
45 | from nova.virt.xenapi import volume_utils |
46 | @@ -37,7 +38,7 @@ |
47 | sr_ref=sr_ref, sharable=False) |
48 | vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) |
49 | vdi_uuid = vdi_rec['uuid'] |
50 | - return vdi_uuid |
51 | + return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] |
52 | |
53 | stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) |
54 | |
55 | @@ -132,11 +133,30 @@ |
56 | def __init__(self, uri): |
57 | super(FakeSessionForVMTests, self).__init__(uri) |
58 | |
59 | - def host_call_plugin(self, _1, _2, _3, _4, _5): |
60 | - sr_ref = fake.get_all('SR')[0] |
61 | - vdi_ref = fake.create_vdi('', False, sr_ref, False) |
62 | - vdi_rec = fake.get_record('VDI', vdi_ref) |
63 | - return '<string>%s</string>' % vdi_rec['uuid'] |
64 | + def host_call_plugin(self, _1, _2, plugin, method, _5): |
65 | + sr_ref = fake.get_all('SR')[0] |
66 | + vdi_ref = fake.create_vdi('', False, sr_ref, False) |
67 | + vdi_rec = fake.get_record('VDI', vdi_ref) |
68 | + if plugin == "glance" and method == "download_vhd": |
69 | + ret_str = json.dumps([dict(vdi_type='os', |
70 | + vdi_uuid=vdi_rec['uuid'])]) |
71 | + else: |
72 | + ret_str = vdi_rec['uuid'] |
73 | + return '<string>%s</string>' % ret_str |
74 | + |
75 | + def host_call_plugin_swap(self, _1, _2, plugin, method, _5): |
76 | + sr_ref = fake.get_all('SR')[0] |
77 | + vdi_ref = fake.create_vdi('', False, sr_ref, False) |
78 | + vdi_rec = fake.get_record('VDI', vdi_ref) |
79 | + if plugin == "glance" and method == "download_vhd": |
80 | + swap_vdi_ref = fake.create_vdi('', False, sr_ref, False) |
81 | + swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref) |
82 | + ret_str = json.dumps( |
83 | + [dict(vdi_type='os', vdi_uuid=vdi_rec['uuid']), |
84 | + dict(vdi_type='swap', vdi_uuid=swap_vdi_rec['uuid'])]) |
85 | + else: |
86 | + ret_str = vdi_rec['uuid'] |
87 | + return '<string>%s</string>' % ret_str |
88 | |
89 | def VM_start(self, _1, ref, _2, _3): |
90 | vm = fake.get_record('VM', ref) |
91 | |
92 | === modified file 'nova/virt/xenapi/fake.py' |
93 | --- nova/virt/xenapi/fake.py 2011-04-18 22:00:39 +0000 |
94 | +++ nova/virt/xenapi/fake.py 2011-05-26 20:22:46 +0000 |
95 | @@ -159,7 +159,10 @@ |
96 | vbd_rec['device'] = '' |
97 | vm_ref = vbd_rec['VM'] |
98 | vm_rec = _db_content['VM'][vm_ref] |
99 | - vm_rec['VBDs'] = [vbd_ref] |
100 | + if vm_rec.get('VBDs', None): |
101 | + vm_rec['VBDs'].append(vbd_ref) |
102 | + else: |
103 | + vm_rec['VBDs'] = [vbd_ref] |
104 | |
105 | vm_name_label = _db_content['VM'][vm_ref]['name_label'] |
106 | vbd_rec['vm_name_label'] = vm_name_label |
107 | |
108 | === modified file 'nova/virt/xenapi/vm_utils.py' |
109 | --- nova/virt/xenapi/vm_utils.py 2011-05-09 15:35:45 +0000 |
110 | +++ nova/virt/xenapi/vm_utils.py 2011-05-26 20:22:46 +0000 |
111 | @@ -19,6 +19,7 @@ |
112 | their attributes like VDIs, VIFs, as well as their lookup functions. |
113 | """ |
114 | |
115 | +import json |
116 | import os |
117 | import pickle |
118 | import re |
119 | @@ -376,6 +377,9 @@ |
120 | xenapi_image_service = ['glance', 'objectstore'] |
121 | glance_address = 'address for glance services' |
122 | glance_port = 'port for glance services' |
123 | + |
124 | + Returns: A single filename if image_type is KERNEL_RAMDISK |
125 | + A list of dictionaries that describe VDIs, otherwise |
126 | """ |
127 | access = AuthManager().get_access_key(user, project) |
128 | |
129 | @@ -390,6 +394,10 @@ |
130 | @classmethod |
131 | def _fetch_image_glance_vhd(cls, session, instance_id, image, access, |
132 | image_type): |
133 | + """Tell glance to download an image and put the VHDs into the SR |
134 | + |
135 | + Returns: A list of dictionaries that describe VDIs |
136 | + """ |
137 | LOG.debug(_("Asking xapi to fetch vhd image %(image)s") |
138 | % locals()) |
139 | |
140 | @@ -408,18 +416,26 @@ |
141 | |
142 | kwargs = {'params': pickle.dumps(params)} |
143 | task = session.async_call_plugin('glance', 'download_vhd', kwargs) |
144 | - vdi_uuid = session.wait_for_task(task, instance_id) |
145 | + result = session.wait_for_task(task, instance_id) |
146 | + # 'download_vhd' will return a json encoded string containing |
147 | + # a list of dictionaries describing VDIs. The dictionary will |
148 | + # contain 'vdi_type' and 'vdi_uuid' keys. 'vdi_type' can be |
149 | + # 'os' or 'swap' right now. |
150 | + vdis = json.loads(result) |
151 | + for vdi in vdis: |
152 | + LOG.debug(_("xapi 'download_vhd' returned VDI of " |
153 | + "type '%(vdi_type)s' with UUID '%(vdi_uuid)s'" % vdi)) |
154 | |
155 | cls.scan_sr(session, instance_id, sr_ref) |
156 | |
157 | + # Pull out the UUID of the first VDI |
158 | + vdi_uuid = vdis[0]['vdi_uuid'] |
159 | # Set the name-label to ease debugging |
160 | vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) |
161 | - name_label = get_name_label_for_image(image) |
162 | - session.get_xenapi().VDI.set_name_label(vdi_ref, name_label) |
163 | + primary_name_label = get_name_label_for_image(image) |
164 | + session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label) |
165 | |
166 | - LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s") |
167 | - % locals()) |
168 | - return vdi_uuid |
169 | + return vdis |
170 | |
171 | @classmethod |
172 | def _fetch_image_glance_disk(cls, session, instance_id, image, access, |
173 | @@ -431,6 +447,8 @@ |
174 | plugin; instead, it streams the disks through domU to the VDI |
175 | directly. |
176 | |
177 | + Returns: A single filename if image_type is KERNEL_RAMDISK |
178 | + A list of dictionaries that describe VDIs, otherwise |
179 | """ |
180 | # FIXME(sirp): Since the Glance plugin seems to be required for the |
181 | # VHD disk, it may be worth using the plugin for both VHD and RAW and |
182 | @@ -476,7 +494,8 @@ |
183 | LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) |
184 | return filename |
185 | else: |
186 | - return session.get_xenapi().VDI.get_uuid(vdi_ref) |
187 | + vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref) |
188 | + return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] |
189 | |
190 | @classmethod |
191 | def determine_disk_image_type(cls, instance): |
192 | @@ -535,6 +554,11 @@ |
193 | @classmethod |
194 | def _fetch_image_glance(cls, session, instance_id, image, access, |
195 | image_type): |
196 | + """Fetch image from glance based on image type. |
197 | + |
198 | + Returns: A single filename if image_type is KERNEL_RAMDISK |
199 | + A list of dictionaries that describe VDIs, otherwise |
200 | + """ |
201 | if image_type == ImageType.DISK_VHD: |
202 | return cls._fetch_image_glance_vhd( |
203 | session, instance_id, image, access, image_type) |
204 | @@ -545,6 +569,11 @@ |
205 | @classmethod |
206 | def _fetch_image_objectstore(cls, session, instance_id, image, access, |
207 | secret, image_type): |
208 | + """Fetch an image from objectstore. |
209 | + |
210 | + Returns: A single filename if image_type is KERNEL_RAMDISK |
211 | + A list of dictionaries that describe VDIs, otherwise |
212 | + """ |
213 | url = images.image_url(image) |
214 | LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) |
215 | if image_type == ImageType.KERNEL_RAMDISK: |
216 | @@ -562,8 +591,10 @@ |
217 | if image_type == ImageType.DISK_RAW: |
218 | args['raw'] = 'true' |
219 | task = session.async_call_plugin('objectstore', fn, args) |
220 | - uuid = session.wait_for_task(task, instance_id) |
221 | - return uuid |
222 | + uuid_or_fn = session.wait_for_task(task, instance_id) |
223 | + if image_type != ImageType.KERNEL_RAMDISK: |
224 | + return [dict(vdi_type='os', vdi_uuid=uuid_or_fn)] |
225 | + return uuid_or_fn |
226 | |
227 | @classmethod |
228 | def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type, |
229 | |
230 | === modified file 'nova/virt/xenapi/vmops.py' |
231 | --- nova/virt/xenapi/vmops.py 2011-05-25 17:55:51 +0000 |
232 | +++ nova/virt/xenapi/vmops.py 2011-05-26 20:22:46 +0000 |
233 | @@ -91,7 +91,8 @@ |
234 | def finish_resize(self, instance, disk_info): |
235 | vdi_uuid = self.link_disks(instance, disk_info['base_copy'], |
236 | disk_info['cow']) |
237 | - vm_ref = self._create_vm(instance, vdi_uuid) |
238 | + vm_ref = self._create_vm(instance, |
239 | + [dict(vdi_type='os', vdi_uuid=vdi_uuid)]) |
240 | self.resize_instance(instance, vdi_uuid) |
241 | self._spawn(instance, vm_ref) |
242 | |
243 | @@ -105,24 +106,25 @@ |
244 | LOG.debug(_("Starting instance %s"), instance.name) |
245 | self._session.call_xenapi('VM.start', vm_ref, False, False) |
246 | |
247 | - def _create_disk(self, instance): |
248 | + def _create_disks(self, instance): |
249 | user = AuthManager().get_user(instance.user_id) |
250 | project = AuthManager().get_project(instance.project_id) |
251 | disk_image_type = VMHelper.determine_disk_image_type(instance) |
252 | - vdi_uuid = VMHelper.fetch_image(self._session, instance.id, |
253 | - instance.image_id, user, project, disk_image_type) |
254 | - return vdi_uuid |
255 | + vdis = VMHelper.fetch_image(self._session, |
256 | + instance.id, instance.image_id, user, project, |
257 | + disk_image_type) |
258 | + return vdis |
259 | |
260 | def spawn(self, instance, network_info=None): |
261 | - vdi_uuid = self._create_disk(instance) |
262 | - vm_ref = self._create_vm(instance, vdi_uuid, network_info) |
263 | + vdis = self._create_disks(instance) |
264 | + vm_ref = self._create_vm(instance, vdis, network_info) |
265 | self._spawn(instance, vm_ref) |
266 | |
267 | def spawn_rescue(self, instance): |
268 | """Spawn a rescue instance.""" |
269 | self.spawn(instance) |
270 | |
271 | - def _create_vm(self, instance, vdi_uuid, network_info=None): |
272 | + def _create_vm(self, instance, vdis, network_info=None): |
273 | """Create VM instance.""" |
274 | instance_name = instance.name |
275 | vm_ref = VMHelper.lookup(self._session, instance_name) |
276 | @@ -141,28 +143,43 @@ |
277 | user = AuthManager().get_user(instance.user_id) |
278 | project = AuthManager().get_project(instance.project_id) |
279 | |
280 | - # Are we building from a pre-existing disk? |
281 | - vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) |
282 | - |
283 | disk_image_type = VMHelper.determine_disk_image_type(instance) |
284 | |
285 | kernel = None |
286 | if instance.kernel_id: |
287 | kernel = VMHelper.fetch_image(self._session, instance.id, |
288 | - instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) |
289 | + instance.kernel_id, user, project, |
290 | + ImageType.KERNEL_RAMDISK) |
291 | |
292 | ramdisk = None |
293 | if instance.ramdisk_id: |
294 | ramdisk = VMHelper.fetch_image(self._session, instance.id, |
295 | - instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) |
296 | - |
297 | - use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id, |
298 | - vdi_ref, disk_image_type, instance.os_type) |
299 | - vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, |
300 | - use_pv_kernel) |
301 | - |
302 | + instance.ramdisk_id, user, project, |
303 | + ImageType.KERNEL_RAMDISK) |
304 | + |
305 | + # Create the VM ref and attach the first disk |
306 | + first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', |
307 | + vdis[0]['vdi_uuid']) |
308 | + use_pv_kernel = VMHelper.determine_is_pv(self._session, |
309 | + instance.id, first_vdi_ref, disk_image_type, |
310 | + instance.os_type) |
311 | + vm_ref = VMHelper.create_vm(self._session, instance, |
312 | + kernel, ramdisk, use_pv_kernel) |
313 | VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, |
314 | - vdi_ref=vdi_ref, userdevice=0, bootable=True) |
315 | + vdi_ref=first_vdi_ref, userdevice=0, bootable=True) |
316 | + |
317 | + # Attach any other disks |
318 | + # userdevice 1 is reserved for rescue |
319 | + userdevice = 2 |
320 | + for vdi in vdis[1:]: |
321 | + # vdi['vdi_type'] is either 'os' or 'swap', but we don't |
322 | + # really care what it is right here. |
323 | + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', |
324 | + vdi['vdi_uuid']) |
325 | + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, |
326 | + vdi_ref=vdi_ref, userdevice=userdevice, |
327 | + bootable=False) |
328 | + userdevice += 1 |
329 | |
330 | # TODO(tr3buchet) - check to make sure we have network info, otherwise |
331 | # create it now. This goes away once nova-multi-nic hits. |
332 | @@ -172,7 +189,7 @@ |
333 | # Alter the image before VM start for, e.g. network injection |
334 | if FLAGS.xenapi_inject_image: |
335 | VMHelper.preconfigure_instance(self._session, instance, |
336 | - vdi_ref, network_info) |
337 | + first_vdi_ref, network_info) |
338 | |
339 | self.create_vifs(vm_ref, network_info) |
340 | self.inject_network_info(instance, network_info, vm_ref) |
341 | |
342 | === modified file 'plugins/xenserver/xenapi/etc/xapi.d/plugins/glance' |
343 | --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance 2011-05-20 21:45:19 +0000 |
344 | +++ plugins/xenserver/xenapi/etc/xapi.d/plugins/glance 2011-05-26 20:22:46 +0000 |
345 | @@ -22,6 +22,10 @@ |
346 | # |
347 | |
348 | import httplib |
349 | +try: |
350 | + import json |
351 | +except ImportError: |
352 | + import simplejson as json |
353 | import os |
354 | import os.path |
355 | import pickle |
356 | @@ -87,8 +91,8 @@ |
357 | conn.close() |
358 | |
359 | |
360 | -def _fixup_vhds(sr_path, staging_path, uuid_stack): |
361 | - """Fixup the downloaded VHDs before we move them into the SR. |
362 | +def _import_vhds(sr_path, staging_path, uuid_stack): |
363 | + """Import the VHDs found in the staging path. |
364 | |
365 | We cannot extract VHDs directly into the SR since they don't yet have |
366 | UUIDs, aren't properly associated with each other, and would be subject to |
367 | @@ -98,16 +102,25 @@ |
368 | To avoid these we problems, we use a staging area to fixup the VHDs before |
369 | moving them into the SR. The steps involved are: |
370 | |
371 | - 1. Extracting tarball into staging area |
372 | + 1. Extracting tarball into staging area (done prior to this call) |
373 | |
374 | 2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd') |
375 | |
376 | - 3. Linking the two VHDs together |
377 | + 3. Linking VHDs together if there's a snap.vhd |
378 | |
379 | 4. Pseudo-atomically moving the images into the SR. (It's not really |
380 | - atomic because it takes place as two os.rename operations; however, |
381 | - the chances of an SR.scan occuring between the two rename() |
382 | + atomic because it takes place as multiple os.rename operations; |
383 | + however, the chances of an SR.scan occuring between the rename()s |
384 | invocations is so small that we can safely ignore it) |
385 | + |
386 | + Returns: A list of VDIs. Each list element is a dictionary containing |
387 | + information about the VHD. Dictionary keys are: |
388 | + 1. "vdi_type" - The type of VDI. Currently they can be "os_disk" or |
389 | + "swap" |
390 | + 2. "vdi_uuid" - The UUID of the VDI |
391 | + |
392 | + Example return: [{"vdi_type": "os_disk","vdi_uuid": "ffff-aaa..vhd"}, |
393 | + {"vdi_type": "swap","vdi_uuid": "ffff-bbb..vhd"}] |
394 | """ |
395 | def rename_with_uuid(orig_path): |
396 | """Rename VHD using UUID so that it will be recognized by SR on a |
397 | @@ -158,27 +171,59 @@ |
398 | "VHD %(path)s is marked as hidden without child" % |
399 | locals()) |
400 | |
401 | - orig_base_copy_path = os.path.join(staging_path, 'image.vhd') |
402 | - if not os.path.exists(orig_base_copy_path): |
403 | + def prepare_if_exists(staging_path, vhd_name, parent_path=None): |
404 | + """ |
405 | + Check for existance of a particular VHD in the staging path and |
406 | + preparing it for moving into the SR. |
407 | + |
408 | + Returns: Tuple of (Path to move into the SR, VDI_UUID) |
409 | + None, if the vhd_name doesn't exist in the staging path |
410 | + |
411 | + If the VHD exists, we will do the following: |
412 | + 1. Rename it with a UUID. |
413 | + 2. If parent_path exists, we'll link up the VHDs. |
414 | + """ |
415 | + orig_path = os.path.join(staging_path, vhd_name) |
416 | + if not os.path.exists(orig_path): |
417 | + return None |
418 | + new_path, vdi_uuid = rename_with_uuid(orig_path) |
419 | + if parent_path: |
420 | + # NOTE(sirp): this step is necessary so that an SR scan won't |
421 | + # delete the base_copy out from under us (since it would be |
422 | + # orphaned) |
423 | + link_vhds(new_path, parent_path) |
424 | + return (new_path, vdi_uuid) |
425 | + |
426 | + vdi_return_list = [] |
427 | + paths_to_move = [] |
428 | + |
429 | + image_info = prepare_if_exists(staging_path, 'image.vhd') |
430 | + if not image_info: |
431 | raise Exception("Invalid image: image.vhd not present") |
432 | |
433 | - base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path) |
434 | + paths_to_move.append(image_info[0]) |
435 | |
436 | - vdi_uuid = base_copy_uuid |
437 | - orig_snap_path = os.path.join(staging_path, 'snap.vhd') |
438 | - if os.path.exists(orig_snap_path): |
439 | - snap_path, snap_uuid = rename_with_uuid(orig_snap_path) |
440 | - vdi_uuid = snap_uuid |
441 | - # NOTE(sirp): this step is necessary so that an SR scan won't |
442 | - # delete the base_copy out from under us (since it would be |
443 | - # orphaned) |
444 | - link_vhds(snap_path, base_copy_path) |
445 | - move_into_sr(snap_path) |
446 | + snap_info = prepare_if_exists(staging_path, 'snap.vhd', |
447 | + image_info[0]) |
448 | + if snap_info: |
449 | + paths_to_move.append(snap_info[0]) |
450 | + # We return this snap as the VDI instead of image.vhd |
451 | + vdi_return_list.append(dict(vdi_type="os", vdi_uuid=snap_info[1])) |
452 | else: |
453 | - assert_vhd_not_hidden(base_copy_path) |
454 | - |
455 | - move_into_sr(base_copy_path) |
456 | - return vdi_uuid |
457 | + assert_vhd_not_hidden(image_info[0]) |
458 | + # If there's no snap, we return the image.vhd UUID |
459 | + vdi_return_list.append(dict(vdi_type="os", vdi_uuid=image_info[1])) |
460 | + |
461 | + swap_info = prepare_if_exists(staging_path, 'swap.vhd') |
462 | + if swap_info: |
463 | + assert_vhd_not_hidden(swap_info[0]) |
464 | + paths_to_move.append(swap_info[0]) |
465 | + vdi_return_list.append(dict(vdi_type="swap", vdi_uuid=swap_info[1])) |
466 | + |
467 | + for path in paths_to_move: |
468 | + move_into_sr(path) |
469 | + |
470 | + return vdi_return_list |
471 | |
472 | |
473 | def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): |
474 | @@ -324,8 +369,9 @@ |
475 | try: |
476 | _download_tarball(sr_path, staging_path, image_id, glance_host, |
477 | glance_port) |
478 | - vdi_uuid = _fixup_vhds(sr_path, staging_path, uuid_stack) |
479 | - return vdi_uuid |
480 | + # Right now, it's easier to return a single string via XenAPI, |
481 | + # so we'll json encode the list of VHDs. |
482 | + return json.dumps(_import_vhds(sr_path, staging_path, uuid_stack)) |
483 | finally: |
484 | _cleanup_staging_area(staging_path) |
485 |
This looks cool. It would be nice to have similar functionality for libvirt/kvm.
Vish
On May 26, 2011, at 12:46 PM, Chris Behrens wrote:
> Chris Behrens has proposed merging lp:~cbehrens/nova/swapdisk into lp:nova. /code.launchpad .net/~cbehrens/ nova/swapdisk/ +merge/ 62549 /code.launchpad .net/~cbehrens/ nova/swapdisk/ +merge/ 62549 test_xenapi. py' test_xenapi. py 2011-05-13 16:45:42 +0000 test_xenapi. py 2011-05-26 19:46:27 +0000 vm_params_ for_linux( ) vhd_glance_ swapdisk( self): onForVMTests. host_call_ plugin onForVMTests. host_call_ plugin = \ onForVMTests. host_call_ plugin_ swap spawn_vhd_ glance_ linux() onForVMTests. host_call_ plugin = orig_func l(len(self. vm['VBDs' ]), 2) spawn_vhd_ glance_ linux() l(len(self. vm['VBDs' ]), 1) vhd_glance_ windows( self): image_service = 'glance' spawn(glance_ stubs.FakeGlanc e.IMAGE_ VHD, None, None, xenapi/ stubs.py' xenapi/ stubs.py 2011-05-13 16:47:18 +0000 xenapi/ stubs.py 2011-05-26 19:46:27 +0000 get_xenapi( ).VDI.get_ record( vdi_ref) type='os' , vdi_uuid=vdi_uuid)] vm_utils. VMHelper, 'fetch_image', fake_fetch_image)
>
> Requested reviews:
> Nova Core (nova-core)
>
> For more details, see:
> https:/
>
> Essentially adds support for wiring up a swap disk when building.
>
> Modifies the glance plugin to check for a swap.vhd. Glance's download_vhd will now return a list of dictionaries describing VHDs found in the image. All returns from _fetch_image calls in xenapi have been modified accordingly.
>
> One can now build a .ova for glance that contains an image.vhd and a swap.vhd files.
>
> When a VM is created, it'll iterate through the list and create VBDs for all of the VDIs found.
>
> Added a test for this, too, which required a slight fix to xenapi's fake.py.
> --
> https:/
> You are subscribed to branch lp:nova.
> === modified file 'nova/tests/
> --- nova/tests/
> +++ nova/tests/
> @@ -395,6 +395,29 @@
> os_type="linux")
> self.check_
>
> + def test_spawn_
> + # Change the default host_call_plugin to one that'll return
> + # a swap disk
> + orig_func = stubs.FakeSessi
> +
> + stubs.FakeSessi
> + stubs.FakeSessi
> +
> + try:
> + # We'll steal the above glance linux test
> + self.test_
> + finally:
> + # Make sure to put this back
> + stubs.FakeSessi
> +
> + # We should have 2 VBDs.
> + self.assertEqua
> + # Now test that we have 1.
> + self.tearDown()
> + self.setUp()
> + self.test_
> + self.assertEqua
> +
> def test_spawn_
> FLAGS.xenapi_
> self._test_
>
> === modified file 'nova/tests/
> --- nova/tests/
> +++ nova/tests/
> @@ -17,6 +17,7 @@
> """Stubouts, mocks and fixtures for the test suite"""
>
> import eventlet
> +import json
> from nova.virt import xenapi_conn
> from nova.virt.xenapi import fake
> from nova.virt.xenapi import volume_utils
> @@ -37,7 +38,7 @@
> sr_ref=sr_ref, sharable=False)
> vdi_rec = session.
> vdi_uuid = vdi_rec['uuid']
> - return vdi_uuid
> + return [dict(vdi_
>
> stubs.Set(
>
> @@ -132,11 +133,30 @@
> def __init__(self, uri):
> s...