This looks cool. It would be nice to have similar functionality for libvirt/kvm.
Vish
On May 26, 2011, at 12:46 PM, Chris Behrens wrote:
> Chris Behrens has proposed merging lp:~cbehrens/nova/swapdisk into lp:nova.
>
> Requested reviews:
> Nova Core (nova-core)
>
> For more details, see:
> https://code.launchpad.net/~cbehrens/nova/swapdisk/+merge/62549
>
> Essentially adds support for wiring up a swap disk when building.
>
> Modifies the glance plugin to check for a swap.vhd. Glance's download_vhd will now return a list of dictionaries describing VHDs found in the image. All returns from _fetch_image calls in xenapi have been modified accordingly.
>
> One can now build a .ova for glance that contains an image.vhd and a swap.vhd files.
>
> When a VM is created, it'll iterate through the list and create VBDs for all of the VDIs found.
>
> Added a test for this, too, which required a slight fix to xenapi's fake.py.
> --
> https://code.launchpad.net/~cbehrens/nova/swapdisk/+merge/62549
> You are subscribed to branch lp:nova.
> === modified file 'nova/tests/test_xenapi.py'
> --- nova/tests/test_xenapi.py 2011-05-13 16:45:42 +0000
> +++ nova/tests/test_xenapi.py 2011-05-26 19:46:27 +0000
> @@ -395,6 +395,29 @@
> os_type="linux")
> self.check_vm_params_for_linux()
>
> + def test_spawn_vhd_glance_swapdisk(self):
> + # Change the default host_call_plugin to one that'll return
> + # a swap disk
> + orig_func = stubs.FakeSessionForVMTests.host_call_plugin
> +
> + stubs.FakeSessionForVMTests.host_call_plugin = \
> + stubs.FakeSessionForVMTests.host_call_plugin_swap
> +
> + try:
> + # We'll steal the above glance linux test
> + self.test_spawn_vhd_glance_linux()
> + finally:
> + # Make sure to put this back
> + stubs.FakeSessionForVMTests.host_call_plugin = orig_func
> +
> + # We should have 2 VBDs.
> + self.assertEqual(len(self.vm['VBDs']), 2)
> + # Now test that we have 1.
> + self.tearDown()
> + self.setUp()
> + self.test_spawn_vhd_glance_linux()
> + self.assertEqual(len(self.vm['VBDs']), 1)
> +
> def test_spawn_vhd_glance_windows(self):
> FLAGS.xenapi_image_service = 'glance'
> self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
>
> === modified file 'nova/tests/xenapi/stubs.py'
> --- nova/tests/xenapi/stubs.py 2011-05-13 16:47:18 +0000
> +++ nova/tests/xenapi/stubs.py 2011-05-26 19:46:27 +0000
> @@ -17,6 +17,7 @@
> """Stubouts, mocks and fixtures for the test suite"""
>
> import eventlet
> +import json
> from nova.virt import xenapi_conn
> from nova.virt.xenapi import fake
> from nova.virt.xenapi import volume_utils
> @@ -37,7 +38,7 @@
> sr_ref=sr_ref, sharable=False)
> vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
> vdi_uuid = vdi_rec['uuid']
> - return vdi_uuid
> + return [dict(vdi_type='os', vdi_uuid=vdi_uuid)]
>
> stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
>
> @@ -132,11 +133,30 @@
> def __init__(self, uri):
> super(FakeSessionForVMTests, self).__init__(uri)
>
> - def host_call_plugin(self, _1, _2, _3, _4, _5):
> - sr_ref = fake.get_all('SR')[0]
> - vdi_ref = fake.create_vdi('', False, sr_ref, False)
> - vdi_rec = fake.get_record('VDI', vdi_ref)
> - return '%s' % vdi_rec['uuid']
> + def host_call_plugin(self, _1, _2, plugin, method, _5):
> + sr_ref = fake.get_all('SR')[0]
> + vdi_ref = fake.create_vdi('', False, sr_ref, False)
> + vdi_rec = fake.get_record('VDI', vdi_ref)
> + if plugin == "glance" and method == "download_vhd":
> + ret_str = json.dumps([dict(vdi_type='os',
> + vdi_uuid=vdi_rec['uuid'])])
> + else:
> + ret_str = vdi_rec['uuid']
> + return '%s' % ret_str
> +
> + def host_call_plugin_swap(self, _1, _2, plugin, method, _5):
> + sr_ref = fake.get_all('SR')[0]
> + vdi_ref = fake.create_vdi('', False, sr_ref, False)
> + vdi_rec = fake.get_record('VDI', vdi_ref)
> + if plugin == "glance" and method == "download_vhd":
> + swap_vdi_ref = fake.create_vdi('', False, sr_ref, False)
> + swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref)
> + ret_str = json.dumps(
> + [dict(vdi_type='os', vdi_uuid=vdi_rec['uuid']),
> + dict(vdi_type='swap', vdi_uuid=swap_vdi_rec['uuid'])])
> + else:
> + ret_str = vdi_rec['uuid']
> + return '%s' % ret_str
>
> def VM_start(self, _1, ref, _2, _3):
> vm = fake.get_record('VM', ref)
>
> === modified file 'nova/virt/xenapi/fake.py'
> --- nova/virt/xenapi/fake.py 2011-04-18 22:00:39 +0000
> +++ nova/virt/xenapi/fake.py 2011-05-26 19:46:27 +0000
> @@ -159,7 +159,10 @@
> vbd_rec['device'] = ''
> vm_ref = vbd_rec['VM']
> vm_rec = _db_content['VM'][vm_ref]
> - vm_rec['VBDs'] = [vbd_ref]
> + if vm_rec.get('VBDs', None):
> + vm_rec['VBDs'].append(vbd_ref)
> + else:
> + vm_rec['VBDs'] = [vbd_ref]
>
> vm_name_label = _db_content['VM'][vm_ref]['name_label']
> vbd_rec['vm_name_label'] = vm_name_label
>
> === modified file 'nova/virt/xenapi/vm_utils.py'
> --- nova/virt/xenapi/vm_utils.py 2011-05-09 15:35:45 +0000
> +++ nova/virt/xenapi/vm_utils.py 2011-05-26 19:46:27 +0000
> @@ -19,6 +19,7 @@
> their attributes like VDIs, VIFs, as well as their lookup functions.
> """
>
> +import json
> import os
> import pickle
> import re
> @@ -376,6 +377,9 @@
> xenapi_image_service = ['glance', 'objectstore']
> glance_address = 'address for glance services'
> glance_port = 'port for glance services'
> +
> + Returns: A single filename if image_type is KERNEL_RAMDISK
> + A list of dictionaries that describe VDIs, otherwise
> """
> access = AuthManager().get_access_key(user, project)
>
> @@ -390,6 +394,10 @@
> @classmethod
> def _fetch_image_glance_vhd(cls, session, instance_id, image, access,
> image_type):
> + """Tell glance to download an image and put the VHDs into the SR
> +
> + Returns: A list of dictionaries that describe VDIs
> + """
> LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
> % locals())
>
> @@ -408,18 +416,22 @@
>
> kwargs = {'params': pickle.dumps(params)}
> task = session.async_call_plugin('glance', 'download_vhd', kwargs)
> - vdi_uuid = session.wait_for_task(task, instance_id)
> + result = session.wait_for_task(task, instance_id)
> + vdis = json.loads(result)
> + for vdi in vdis:
> + LOG.debug(_("xapi 'download_vhd' returned VDI of "
> + "type '%(vdi_type)s' with UUID '%(vdi_uuid)s'" % vdi))
>
> cls.scan_sr(session, instance_id, sr_ref)
>
> + # Pull out the UUID of the first VDI
> + vdi_uuid = vdis[0]['vdi_uuid']
> # Set the name-label to ease debugging
> vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
> - name_label = get_name_label_for_image(image)
> - session.get_xenapi().VDI.set_name_label(vdi_ref, name_label)
> + primary_name_label = get_name_label_for_image(image)
> + session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label)
>
> - LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s")
> - % locals())
> - return vdi_uuid
> + return vdis
>
> @classmethod
> def _fetch_image_glance_disk(cls, session, instance_id, image, access,
> @@ -431,6 +443,8 @@
> plugin; instead, it streams the disks through domU to the VDI
> directly.
>
> + Returns: A single filename if image_type is KERNEL_RAMDISK
> + A list of dictionaries that describe VDIs, otherwise
> """
> # FIXME(sirp): Since the Glance plugin seems to be required for the
> # VHD disk, it may be worth using the plugin for both VHD and RAW and
> @@ -476,7 +490,8 @@
> LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref)
> return filename
> else:
> - return session.get_xenapi().VDI.get_uuid(vdi_ref)
> + vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref)
> + return [dict(vdi_type='os', vdi_uuid=vdi_uuid)]
>
> @classmethod
> def determine_disk_image_type(cls, instance):
> @@ -535,6 +550,11 @@
> @classmethod
> def _fetch_image_glance(cls, session, instance_id, image, access,
> image_type):
> + """Fetch image from glance based on image type.
> +
> + Returns: A single filename if image_type is KERNEL_RAMDISK
> + A list of dictionaries that describe VDIs, otherwise
> + """
> if image_type == ImageType.DISK_VHD:
> return cls._fetch_image_glance_vhd(
> session, instance_id, image, access, image_type)
> @@ -545,6 +565,11 @@
> @classmethod
> def _fetch_image_objectstore(cls, session, instance_id, image, access,
> secret, image_type):
> + """Fetch an image from objectstore.
> +
> + Returns: A single filename if image_type is KERNEL_RAMDISK
> + A list of dictionaries that describe VDIs, otherwise
> + """
> url = images.image_url(image)
> LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
> if image_type == ImageType.KERNEL_RAMDISK:
> @@ -562,8 +587,10 @@
> if image_type == ImageType.DISK_RAW:
> args['raw'] = 'true'
> task = session.async_call_plugin('objectstore', fn, args)
> - uuid = session.wait_for_task(task, instance_id)
> - return uuid
> + uuid_or_fn = session.wait_for_task(task, instance_id)
> + if image_type != ImageType.KERNEL_RAMDISK:
> + return [dict(vdi_type='os', vdi_uuid=uuid_or_fn)]
> + return uuid_or_fn
>
> @classmethod
> def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
>
> === modified file 'nova/virt/xenapi/vmops.py'
> --- nova/virt/xenapi/vmops.py 2011-05-25 17:55:51 +0000
> +++ nova/virt/xenapi/vmops.py 2011-05-26 19:46:27 +0000
> @@ -91,7 +91,8 @@
> def finish_resize(self, instance, disk_info):
> vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
> disk_info['cow'])
> - vm_ref = self._create_vm(instance, vdi_uuid)
> + vm_ref = self._create_vm(instance,
> + [dict(vdi_type='os', vdi_uuid=vdi_uuid)])
> self.resize_instance(instance, vdi_uuid)
> self._spawn(instance, vm_ref)
>
> @@ -105,24 +106,25 @@
> LOG.debug(_("Starting instance %s"), instance.name)
> self._session.call_xenapi('VM.start', vm_ref, False, False)
>
> - def _create_disk(self, instance):
> + def _create_disks(self, instance):
> user = AuthManager().get_user(instance.user_id)
> project = AuthManager().get_project(instance.project_id)
> disk_image_type = VMHelper.determine_disk_image_type(instance)
> - vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
> - instance.image_id, user, project, disk_image_type)
> - return vdi_uuid
> + vdis = VMHelper.fetch_image(self._session,
> + instance.id, instance.image_id, user, project,
> + disk_image_type)
> + return vdis
>
> def spawn(self, instance, network_info=None):
> - vdi_uuid = self._create_disk(instance)
> - vm_ref = self._create_vm(instance, vdi_uuid, network_info)
> + vdis = self._create_disks(instance)
> + vm_ref = self._create_vm(instance, vdis, network_info)
> self._spawn(instance, vm_ref)
>
> def spawn_rescue(self, instance):
> """Spawn a rescue instance."""
> self.spawn(instance)
>
> - def _create_vm(self, instance, vdi_uuid, network_info=None):
> + def _create_vm(self, instance, vdis, network_info=None):
> """Create VM instance."""
> instance_name = instance.name
> vm_ref = VMHelper.lookup(self._session, instance_name)
> @@ -141,28 +143,43 @@
> user = AuthManager().get_user(instance.user_id)
> project = AuthManager().get_project(instance.project_id)
>
> - # Are we building from a pre-existing disk?
> - vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
> -
> disk_image_type = VMHelper.determine_disk_image_type(instance)
>
> kernel = None
> if instance.kernel_id:
> kernel = VMHelper.fetch_image(self._session, instance.id,
> - instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
> + instance.kernel_id, user, project,
> + ImageType.KERNEL_RAMDISK)
>
> ramdisk = None
> if instance.ramdisk_id:
> ramdisk = VMHelper.fetch_image(self._session, instance.id,
> - instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
> -
> - use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id,
> - vdi_ref, disk_image_type, instance.os_type)
> - vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk,
> - use_pv_kernel)
> -
> + instance.ramdisk_id, user, project,
> + ImageType.KERNEL_RAMDISK)
> +
> + # Create the VM ref and attach the first disk
> + first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
> + vdis[0]['vdi_uuid'])
> + use_pv_kernel = VMHelper.determine_is_pv(self._session,
> + instance.id, first_vdi_ref, disk_image_type,
> + instance.os_type)
> + vm_ref = VMHelper.create_vm(self._session, instance,
> + kernel, ramdisk, use_pv_kernel)
> VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
> - vdi_ref=vdi_ref, userdevice=0, bootable=True)
> + vdi_ref=first_vdi_ref, userdevice=0, bootable=True)
> +
> + # Attach any other disks
> + # userdevice 1 is reserved for rescue
> + userdevice = 2
> + for vdi in vdis[1:]:
> + # vdi['vdi_type'] is either 'os' or 'swap', but we don't
> + # really care what it is right here.
> + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
> + vdi['vdi_uuid'])
> + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
> + vdi_ref=vdi_ref, userdevice=userdevice,
> + bootable=False)
> + userdevice += 1
>
> # TODO(tr3buchet) - check to make sure we have network info, otherwise
> # create it now. This goes away once nova-multi-nic hits.
> @@ -172,7 +189,7 @@
> # Alter the image before VM start for, e.g. network injection
> if FLAGS.xenapi_inject_image:
> VMHelper.preconfigure_instance(self._session, instance,
> - vdi_ref, network_info)
> + first_vdi_ref, network_info)
>
> self.create_vifs(vm_ref, network_info)
> self.inject_network_info(instance, network_info, vm_ref)
>
> === modified file 'plugins/xenserver/xenapi/etc/xapi.d/plugins/glance'
> --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance 2011-05-20 21:45:19 +0000
> +++ plugins/xenserver/xenapi/etc/xapi.d/plugins/glance 2011-05-26 19:46:27 +0000
> @@ -22,6 +22,10 @@
> #
>
> import httplib
> +try:
> + import json
> +except ImportError:
> + import simplejson as json
> import os
> import os.path
> import pickle
> @@ -87,8 +91,8 @@
> conn.close()
>
>
> -def _fixup_vhds(sr_path, staging_path, uuid_stack):
> - """Fixup the downloaded VHDs before we move them into the SR.
> +def _import_vhds(sr_path, staging_path, uuid_stack):
> + """Import the VHDs found in the staging path.
>
> We cannot extract VHDs directly into the SR since they don't yet have
> UUIDs, aren't properly associated with each other, and would be subject to
> @@ -98,16 +102,25 @@
> To avoid these we problems, we use a staging area to fixup the VHDs before
> moving them into the SR. The steps involved are:
>
> - 1. Extracting tarball into staging area
> + 1. Extracting tarball into staging area (done prior to this call)
>
> 2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd')
>
> - 3. Linking the two VHDs together
> + 3. Linking VHDs together if there's a snap.vhd
>
> 4. Pseudo-atomically moving the images into the SR. (It's not really
> - atomic because it takes place as two os.rename operations; however,
> - the chances of an SR.scan occuring between the two rename()
> + atomic because it takes place as multiple os.rename operations;
> + however, the chances of an SR.scan occuring between the rename()s
> invocations is so small that we can safely ignore it)
> +
> + Returns: A list of VDIs. Each list element is a dictionary containing
> + information about the VHD. Dictionary keys are:
> + 1. "vdi_type" - The type of VDI. Currently they can be "os_disk" or
> + "swap"
> + 2. "vdi_uuid" - The UUID of the VDI
> +
> + Example return: [{"vdi_type": "os_disk","vdi_uuid": "ffff-aaa..vhd"},
> + {"vdi_type": "swap","vdi_uuid": "ffff-bbb..vhd"}]
> """
> def rename_with_uuid(orig_path):
> """Rename VHD using UUID so that it will be recognized by SR on a
> @@ -158,27 +171,57 @@
> "VHD %(path)s is marked as hidden without child" %
> locals())
>
> - orig_base_copy_path = os.path.join(staging_path, 'image.vhd')
> - if not os.path.exists(orig_base_copy_path):
> + def prepare_if_exists(staging_path, vhd_name, parent_path=None):
> + """
> + Check for existance of a particular VHD in the staging path and
> + preparing it for moving into the SR.
> +
> + Returns: Tuple of (Path to move into the SR, VDI_UUID)
> + None, if the vhd_name doesn't exist in the staging path
> +
> + If the VHD exists, we will do the following:
> + 1. Rename it with a UUID.
> + 2. If parent_path exists, we'll link up the VHDs.
> + """
> + orig_path = os.path.join(staging_path, vhd_name)
> + if not os.path.exists(orig_path):
> + return None
> + new_path, vdi_uuid = rename_with_uuid(orig_path)
> + if parent_path:
> + # NOTE(sirp): this step is necessary so that an SR scan won't
> + # delete the base_copy out from under us (since it would be
> + # orphaned)
> + link_vhds(new_path, parent_path)
> + return (new_path, vdi_uuid)
> +
> + vdi_return_list = []
> + paths_to_move = []
> +
> + image_info = prepare_if_exists(staging_path, 'image.vhd')
> + if not image_info:
> raise Exception("Invalid image: image.vhd not present")
>
> - base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path)
> + paths_to_move.append(image_info[0])
>
> - vdi_uuid = base_copy_uuid
> - orig_snap_path = os.path.join(staging_path, 'snap.vhd')
> - if os.path.exists(orig_snap_path):
> - snap_path, snap_uuid = rename_with_uuid(orig_snap_path)
> - vdi_uuid = snap_uuid
> - # NOTE(sirp): this step is necessary so that an SR scan won't
> - # delete the base_copy out from under us (since it would be
> - # orphaned)
> - link_vhds(snap_path, base_copy_path)
> - move_into_sr(snap_path)
> + snap_info = prepare_if_exists(staging_path, 'snap.vhd',
> + image_info[0])
> + if snap_info:
> + paths_to_move.append(snap_info[0])
> + # We return this snap as the VDI instead of image.vhd
> + vdi_return_list.append(dict(vdi_type="os", vdi_uuid=snap_info[1]))
> else:
> - assert_vhd_not_hidden(base_copy_path)
> -
> - move_into_sr(base_copy_path)
> - return vdi_uuid
> + # If there's no snap, we return the image.vhd UUID
> + vdi_return_list.append(dict(vdi_type="os", vdi_uuid=image_info[1]))
> +
> + swap_info = prepare_if_exists(staging_path, 'swap.vhd')
> + if swap_info:
> + paths_to_move.append(swap_info[0])
> + vdi_return_list.append(dict(vdi_type="swap", vdi_uuid=swap_info[1]))
> +
> + for path in paths_to_move:
> + move_into_sr(path)
> +
> + return vdi_return_list
>
>
> def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids):
> @@ -324,8 +367,9 @@
> try:
> _download_tarball(sr_path, staging_path, image_id, glance_host,
> glance_port)
> - vdi_uuid = _fixup_vhds(sr_path, staging_path, uuid_stack)
> - return vdi_uuid
> + # Right now, it's easier to return a single string via XenAPI,
> + # so we'll json encode the list of VHDs.
> + return json.dumps(_import_vhds(sr_path, staging_path, uuid_stack))
> finally:
> _cleanup_staging_area(staging_path)
>
>