Now here is the unit tests(with some fixes). So it's ready for merge now, I think. The next step is to support the following. - describe instance attribute - get metadata for bundle volume - swap/ephemeral device support. On Wed, Jun 22, 2011 at 05:01:54AM -0000, Isaku Yamahata wrote: > Isaku Yamahata has proposed merging lp:~yamahata/nova/boot-from-volume-1 into lp:nova with lp:~yamahata/nova/boot-from-volume-0 as a prerequisite. > > Requested reviews: > Nova Core (nova-core) > > For more details, see: > https://code.launchpad.net/~yamahata/nova/boot-from-volume-1/+merge/64825 > > This is early review request before going further. > If this direction is okay, I'll add unit tests and then move on to the next step. > > This change adds the basic boot-from-volume support to the image service. > Specifically following API will supports --block-device-mapping with volume/snapshot and root device name > - register image > - describe image > - create image(newly support) > > At the moment swap and ephemeral aren't supported. Are these wanted? > > NOTE > - bundle volume is broken > > TODO > - unit tests > > Next step > - describe instance attribute with euca command > - get metadata for bundle volume > - swap/ephemeral device support(Is this wanted? or unnecessary?) > -- > https://code.launchpad.net/~yamahata/nova/boot-from-volume-1/+merge/64825 > You are the owner of lp:~yamahata/nova/boot-from-volume-1. > === modified file 'nova/api/ec2/__init__.py' > --- nova/api/ec2/__init__.py 2011-06-15 16:46:24 +0000 > +++ nova/api/ec2/__init__.py 2011-06-22 04:55:48 +0000 > @@ -262,6 +262,8 @@ > 'TerminateInstances': ['projectmanager', 'sysadmin'], > 'RebootInstances': ['projectmanager', 'sysadmin'], > 'UpdateInstance': ['projectmanager', 'sysadmin'], > + 'StartInstances': ['projectmanager', 'sysadmin'], > + 'StopInstances': ['projectmanager', 'sysadmin'], > 'DeleteVolume': ['projectmanager', 'sysadmin'], > 'DescribeImages': ['all'], > 'DeregisterImage': ['projectmanager', 'sysadmin'], > @@ -269,6 +271,7 @@ > 'DescribeImageAttribute': ['all'], > 'ModifyImageAttribute': ['projectmanager', 'sysadmin'], > 'UpdateImage': ['projectmanager', 'sysadmin'], > + 'CreateImage': ['projectmanager', 'sysadmin'], > }, > 'AdminController': { > # All actions have the same permission: ['none'] (the default) > @@ -325,13 +328,13 @@ > except exception.VolumeNotFound as ex: > LOG.info(_('VolumeNotFound raised: %s'), unicode(ex), > context=context) > - ec2_id = ec2utils.id_to_ec2_id(ex.volume_id, 'vol-%08x') > + ec2_id = ec2utils.id_to_ec2_vol_id(ex.volume_id) > message = _('Volume %s not found') % ec2_id > return self._error(req, context, type(ex).__name__, message) > except exception.SnapshotNotFound as ex: > LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), > context=context) > - ec2_id = ec2utils.id_to_ec2_id(ex.snapshot_id, 'snap-%08x') > + ec2_id = ec2utils.id_to_ec2_snap_id(ex.snapshot_id) > message = _('Snapshot %s not found') % ec2_id > return self._error(req, context, type(ex).__name__, message) > except exception.NotFound as ex: > > === modified file 'nova/api/ec2/cloud.py' > --- nova/api/ec2/cloud.py 2011-06-17 23:52:22 +0000 > +++ nova/api/ec2/cloud.py 2011-06-22 04:55:48 +0000 > @@ -27,6 +27,7 @@ > import os > import urllib > import tempfile > +import time > import shutil > > from nova import compute > @@ -75,6 +76,90 @@ > return {'private_key': private_key, 'fingerprint': fingerprint} > > > +# TODO(yamahata): hypervisor dependent default device name > +_DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1' > + > + > +def _parse_block_device_mapping(bdm): > + """Parse BlockDeviceMappingItemType into flat hash > + BlockDevicedMapping..DeviceName > + BlockDevicedMapping..Ebs.SnapshotId > + BlockDevicedMapping..Ebs.VolumeSize > + BlockDevicedMapping..Ebs.DeleteOnTermination > + BlockDevicedMapping..Ebs.NoDevice > + BlockDevicedMapping..VirtualName > + => remove .Ebs and allow volume id in SnapshotId > + """ > + ebs = bdm.pop('ebs', None) > + if ebs: > + ec2_id = ebs.pop('snapshot_id') > + id = ec2utils.ec2_id_to_id(ec2_id) > + if ec2_id.startswith('snap-'): > + bdm['snapshot_id'] = id > + elif ec2_id.startswith('vol-'): > + bdm['volume_id'] = id > + ebs.setdefault('delete_on_termination', True) > + bdm.update(ebs) > + return bdm > + > + > +def _format_block_device_mapping(bdm): > + """Contruct BlockDeviceMappingItemType > + {'device_name': '...', 'Snapshot_Id': , ...} > + => BlockDeviceMappingItemType > + """ > + keys = (('deviceName', 'device_name'), > + ('virtualName', 'virtual_name')) > + item = {} > + for name, k in keys: > + if k in bdm: > + item[name] = bdm[k] > + if bdm.get('no_device'): > + item['noDevice'] = True > + if ('snapshot_id' in bdm) or ('volume_id' in bdm): > + ebs_keys = (('snapshotId', 'snapshot_id'), > + ('snapshotId', 'volume_id'), # snapshotId is abused > + ('volumeSize', 'volume_size'), > + ('deleteOnTermination', 'delete_on_termination')) > + ebs = {} > + for name, k in ebs_keys: > + if k in bdm: > + if k == 'snapshot_id': > + ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) > + elif k == 'volume_id': > + ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) > + else: > + ebs[name] = bdm[k] > + assert 'snapshotId' in ebs > + item['ebs'] = ebs > + return item > + > + > +def _format_mappings(properties, result): > + """Format multiple BlockDeviceMappingItemType""" > + mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} > + for m in properties.get('mappings', []) > + if (m['virtual'] == 'swap' or > + m['virtual'].startswith('ephemeral'))] > + > + block_device_mapping = [_format_block_device_mapping(bdm) for bdm in > + properties.get('block_device_mapping', [])] > + > + # NOTE(yamahata): overwrite mappings with block_device_mapping > + for bdm in block_device_mapping: > + for i in range(len(mappings)): > + if bdm['deviceName'] == mappings[i]['deviceName']: > + del mappings[i] > + break > + mappings.append(bdm) > + > + # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? > + mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] > + > + if mappings: > + result['blockDeviceMapping'] = mappings > + > + > class CloudController(object): > """ CloudController provides the critical dispatch between > inbound API calls through the endpoint and messages > @@ -177,7 +262,7 @@ > # TODO(vish): replace with real data > 'ami': 'sda1', > 'ephemeral0': 'sda2', > - 'root': '/dev/sda1', > + 'root': _DEFAULT_ROOT_DEVICE_NAME, > 'swap': 'sda3'}, > 'hostname': hostname, > 'instance-action': 'none', > @@ -305,9 +390,8 @@ > > def _format_snapshot(self, context, snapshot): > s = {} > - s['snapshotId'] = ec2utils.id_to_ec2_id(snapshot['id'], 'snap-%08x') > - s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], > - 'vol-%08x') > + s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) > + s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) > s['status'] = snapshot['status'] > s['startTime'] = snapshot['created_at'] > s['progress'] = snapshot['progress'] > @@ -641,7 +725,7 @@ > instance_data = '%s[%s]' % (instance_ec2_id, > volume['instance']['host']) > v = {} > - v['volumeId'] = ec2utils.id_to_ec2_id(volume['id'], 'vol-%08x') > + v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) > v['status'] = volume['status'] > v['size'] = volume['size'] > v['availabilityZone'] = volume['availability_zone'] > @@ -663,8 +747,7 @@ > else: > v['attachmentSet'] = [{}] > if volume.get('snapshot_id') != None: > - v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], > - 'snap-%08x') > + v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) > else: > v['snapshotId'] = None > > @@ -727,7 +810,7 @@ > 'instanceId': ec2utils.id_to_ec2_id(instance_id), > 'requestId': context.request_id, > 'status': volume['attach_status'], > - 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')} > + 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} > > def detach_volume(self, context, volume_id, **kwargs): > volume_id = ec2utils.ec2_id_to_id(volume_id) > @@ -739,7 +822,7 @@ > 'instanceId': ec2utils.id_to_ec2_id(instance['id']), > 'requestId': context.request_id, > 'status': volume['attach_status'], > - 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')} > + 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} > > def _convert_to_set(self, lst, label): > if lst is None or lst == []: > @@ -763,6 +846,37 @@ > assert len(i) == 1 > return i[0] > > + def _format_instance_bdm(self, context, instance_id, root_device_name, > + result): > + """Format InstanceBlockDeviceMappingResponseItemType""" > + root_device_type = 'instance-store' > + mapping = [] > + for bdm in db.block_device_mapping_get_all_by_instance(context, > + instance_id): > + volume_id = bdm['volume_id'] > + if (volume_id is None or bdm['no_device']): > + continue > + > + if (bdm['device_name'] == root_device_name and > + (bdm['snapshot_id'] or bdm['volume_id'])): > + assert not bdm['virtual_name'] > + root_device_type = 'ebs' > + > + vol = self.volume_api.get(context, volume_id=volume_id) > + LOG.debug(_("vol = %s\n"), vol) > + # TODO(yamahata): volume attach time > + ebs = {'volumeId': volume_id, > + 'deleteOnTermination': bdm['delete_on_termination'], > + 'attachTime': vol['attach_time'] or '-', > + 'status': vol['status'], } > + res = {'deviceName': bdm['device_name'], > + 'ebs': ebs, } > + mapping.append(res) > + > + if mapping: > + result['blockDeviceMapping'] = mapping > + result['rootDeviceType'] = root_device_type > + > def _format_instances(self, context, instance_id=None, **kwargs): > # TODO(termie): this method is poorly named as its name does not imply > # that it will be making a variety of database calls > @@ -824,6 +938,10 @@ > i['amiLaunchIndex'] = instance['launch_index'] > i['displayName'] = instance['display_name'] > i['displayDescription'] = instance['display_description'] > + i['rootDeviceName'] = (instance['root_device_name'] or > + _DEFAULT_ROOT_DEVICE_NAME) > + self._format_instance_bdm(context, instance_id, > + i['rootDeviceName'], i) > host = instance['host'] > zone = self._get_availability_zone_by_host(context, host) > i['placement'] = {'availabilityZone': zone} > @@ -910,23 +1028,7 @@ > ramdisk = self._get_image(context, kwargs['ramdisk_id']) > kwargs['ramdisk_id'] = ramdisk['id'] > for bdm in kwargs.get('block_device_mapping', []): > - # NOTE(yamahata) > - # BlockDevicedMapping..DeviceName > - # BlockDevicedMapping..Ebs.SnapshotId > - # BlockDevicedMapping..Ebs.VolumeSize > - # BlockDevicedMapping..Ebs.DeleteOnTermination > - # BlockDevicedMapping..VirtualName > - # => remove .Ebs and allow volume id in SnapshotId > - ebs = bdm.pop('ebs', None) > - if ebs: > - ec2_id = ebs.pop('snapshot_id') > - id = ec2utils.ec2_id_to_id(ec2_id) > - if ec2_id.startswith('snap-'): > - bdm['snapshot_id'] = id > - elif ec2_id.startswith('vol-'): > - bdm['volume_id'] = id > - ebs.setdefault('delete_on_termination', True) > - bdm.update(ebs) > + _parse_block_device_mapping(bdm) > > image = self._get_image(context, kwargs['image_id']) > > @@ -1081,6 +1183,20 @@ > i['imageType'] = display_mapping.get(image_type) > i['isPublic'] = image.get('is_public') == True > i['architecture'] = image['properties'].get('architecture') > + > + properties = image['properties'] > + root_device_name = ec2utils.properties_root_device_name(properties) > + root_device_type = 'instance-store' > + for bdm in properties.get('block_device_mapping', []): > + if (bdm.get('device_name') == root_device_name and > + ('snapshot_id' in bdm or 'volume_id' in bdm) and > + not bdm.get('no_device')): > + root_device_type = 'ebs' > + i['rootDeviceName'] = (root_device_name or _DEFAULT_ROOT_DEVICE_NAME) > + i['rootDeviceType'] = root_device_type > + > + _format_mappings(properties, i) > + > return i > > def describe_images(self, context, image_id=None, **kwargs): > @@ -1105,30 +1221,64 @@ > self.image_service.delete(context, internal_id) > return {'imageId': image_id} > > + def _register_image(self, context, metadata): > + image = self.image_service.create(context, metadata) > + image_type = self._image_type(image.get('container_format')) > + image_id = self.image_ec2_id(image['id'], image_type) > + return image_id > + > def register_image(self, context, image_location=None, **kwargs): > if image_location is None and 'name' in kwargs: > image_location = kwargs['name'] > metadata = {'properties': {'image_location': image_location}} > - image = self.image_service.create(context, metadata) > - image_type = self._image_type(image.get('container_format')) > - image_id = self.image_ec2_id(image['id'], > - image_type) > + > + if 'root_device_name' in kwargs: > + metadata['properties']['root_device_name'] = \ > + kwargs.get('root_device_name') > + > + mappings = [_parse_block_device_mapping(bdm) for bdm in > + kwargs.get('block_device_mapping', [])] > + if mappings: > + metadata['properties']['block_device_mapping'] = mappings > + > + image_id = self._register_image(context, metadata) > msg = _("Registered image %(image_location)s with" > " id %(image_id)s") % locals() > LOG.audit(msg, context=context) > return {'imageId': image_id} > > def describe_image_attribute(self, context, image_id, attribute, **kwargs): > - if attribute != 'launchPermission': > + def _block_device_mapping_attribute(image, result): > + _format_mappings(image['properties'], result) > + > + def _launch_permission_attribute(image, result): > + result['launchPermission'] = [] > + if image['is_public']: > + result['launchPermission'].append({'group': 'all'}) > + > + def _root_device_name_attribute(image, result): > + result['rootDeviceName'] = \ > + ec2utils.properties_root_device_name(image['properties']) > + if result['rootDeviceName'] is None: > + result['rootDeviceName'] = _DEFAULT_ROOT_DEVICE_NAME > + > + supported_attributes = { > + 'blockDeviceMapping': _block_device_mapping_attribute, > + 'launchPermission': _launch_permission_attribute, > + 'rootDeviceName': _root_device_name_attribute, > + } > + > + fn = supported_attributes.get(attribute) > + if fn is None: > raise exception.ApiError(_('attribute not supported: %s') > % attribute) > try: > image = self._get_image(context, image_id) > except exception.NotFound: > raise exception.ImageNotFound(image_id=image_id) > - result = {'imageId': image_id, 'launchPermission': []} > - if image['is_public']: > - result['launchPermission'].append({'group': 'all'}) > + > + result = {'imageId': image_id} > + fn(image, result) > return result > > def modify_image_attribute(self, context, image_id, attribute, > @@ -1159,3 +1309,104 @@ > internal_id = ec2utils.ec2_id_to_id(image_id) > result = self.image_service.update(context, internal_id, dict(kwargs)) > return result > + > + # TODO(yamahata): race condition > + # At the moment there is no way to prevent others from > + # manipulating instances/volumes/snapshots. > + # As other code doesn't take it into consideration, here we don't > + # care of it for now. Ostrich algorithm > + def create_image(self, context, instance_id, **kwargs): > + # NOTE(yamahata): name/description are ignored by register_image(), > + # do so here > + #description = kwargs.get('name') > + #description = kwargs.get('description') > + no_reboot = kwargs.get('no_reboot', False) > + > + ec2_instance_id = instance_id > + instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) > + instance = self.compute_api.get(context, instance_id) > + > + # stop the instance if necessary > + restart_instance = False > + if not no_reboot: > + state_description = instance['state_description'] > + > + # if the instance is in subtle state, refuse to proceed. > + if state_description not in ('running', 'stopping', 'stopped'): > + raise exception.InstanceNotRunning(instance_id=ec2_instance_id) > + > + if state_description == 'running': > + restart_instance = True > + self.compute_api.stop(context, instance_id=instance_id) > + > + # wait instance for really stopped > + while state_description != 'stopped': > + time.sleep(1) > + instance = self.compute_api.get(context, instance_id) > + state_description = instance['state_description'] > + # NOTE(yamahata): timeout and error? > + > + src_image = self._get_image(context, instance['image_ref']) > + properties = src_image['properties'] > + if instance['root_device_name']: > + properties['root_device_name'] = instance['root_device_name'] > + > + mapping = [] > + bdms = db.block_device_mapping_get_all_by_instance(context, > + instance_id) > + for bdm in bdms: > + if bdm.no_device: > + continue > + m = {} > + for attr in ('device_name', 'snapshot_id', 'volume_id', > + 'volume_size', 'delete_on_termination', 'no_device', > + 'virtual_name'): > + val = getattr(bdm, attr) > + if val is not None: > + m[attr] = val > + > + volume_id = m.get('volume_id') > + if m.get('snapshot_id') and volume_id: > + # create snapshot based on volume_id > + vol = self.volume_api.get(context, volume_id=volume_id) > + # NOTE(yamahata): Should we wait for snapshot creation? > + # Linux LVM snapshot creation completes in > + # short time, it doesn't matter for now. > + snapshot = self.volume_api.create_snapshot_force( > + context, volume_id=volume_id, name=vol['display_name'], > + description=vol['display_description']) > + m['snapshot_id'] = snapshot['id'] > + del m['volume_id'] > + > + if m: > + mapping.append(m) > + > + for m in properties.get('mappings', []): > + virtual_name = m['virtual'] > + if virtual_name in ('ami', 'root'): > + continue > + > + assert (virtual_name == 'swap' or > + virtual_name.startswith('ephemeral')) > + device_name = m['device'] > + if device_name in [b['device_name'] for b in mapping > + if not b.get('no_device', False)]: > + continue > + > + # NOTE(yamahata): swap and ephemeral devices are specified in > + # AMI, but disabled for this instance by user. > + # So disable those device by no_device. > + mapping.append({'device_name': device_name, 'no_device': True}) > + > + if mapping: > + properties['block_device_mapping'] = mapping > + > + for attr in ('status', 'location', 'id'): > + src_image.pop(attr, None) > + > + image_id = self._register_image(context, src_image) > + > + if restart_instance: > + self.compute_api.start(context, instance_id=instance_id) > + > + return {'imageId': image_id} > > === modified file 'nova/api/ec2/ec2utils.py' > --- nova/api/ec2/ec2utils.py 2011-06-15 06:08:23 +0000 > +++ nova/api/ec2/ec2utils.py 2011-06-22 04:55:48 +0000 > @@ -34,6 +34,17 @@ > return template % instance_id > > > +def id_to_ec2_snap_id(instance_id): > + """Convert an snapshot ID (int) to an ec2 snapshot ID > + (snap-[base 16 number])""" > + return id_to_ec2_id(instance_id, 'snap-%08x') > + > + > +def id_to_ec2_vol_id(instance_id): > + """Convert an volume ID (int) to an ec2 volume ID (vol-[base 16 number])""" > + return id_to_ec2_id(instance_id, 'vol-%08x') > + > + > _c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') > > > @@ -124,3 +135,22 @@ > args[key] = value > > return args > + > + > +def properties_root_device_name(properties): > + """get root device name from image meta data. > + If it isn't specified, return None. > + """ > + root_device_name = None > + > + # NOTE(yamahata): see image_service.s3.s3create() > + for bdm in properties.get('mappings', []): > + if bdm['virtual'] == 'root': > + root_device_name = bdm['device'] > + > + # NOTE(yamahata): register_image's command line can override > + # .manifest.xml > + if 'root_device_name' in properties: > + root_device_name = properties['root_device_name'] > + > + return root_device_name > > === modified file 'nova/compute/api.py' > --- nova/compute/api.py 2011-06-20 20:55:16 +0000 > +++ nova/compute/api.py 2011-06-22 04:55:48 +0000 > @@ -32,6 +32,7 @@ > from nova import rpc > from nova import utils > from nova import volume > +from nova.api.ec2 import ec2utils > from nova.compute import instance_types > from nova.compute import power_state > from nova.compute.utils import terminate_volumes > @@ -223,6 +224,9 @@ > if reservation_id is None: > reservation_id = utils.generate_uid('r') > > + root_device_name = ec2utils.properties_root_device_name( > + image['properties']) > + > base_options = { > 'reservation_id': reservation_id, > 'image_ref': image_href, > @@ -247,11 +251,61 @@ > 'availability_zone': availability_zone, > 'os_type': os_type, > 'architecture': architecture, > - 'vm_mode': vm_mode} > - > - return (num_instances, base_options, security_groups) > - > - def create_db_entry_for_new_instance(self, context, base_options, > + 'vm_mode': vm_mode, > + 'root_device_name': root_device_name} > + > + return (num_instances, base_options, security_groups, image) > + > + def _update_image_block_device_mapping(self, elevated_context, instance_id, > + mappings): > + """tell vm driver to create ephemeral/swap device at boot time by > + updating BlockDeviceMapping > + """ > + for bdm in mappings: > + LOG.debug(_("bdm %s"), bdm) > + > + virtual_name = bdm['virtual'] > + if virtual_name == 'ami' or virtual_name == 'root': > + continue > + > + assert (virtual_name == 'swap' or > + virtual_name.startswith('ephemeral')) > + values = { > + 'instance_id': instance_id, > + 'device_name': bdm['device'], > + 'virtual_name': virtual_name, } > + self.db.block_device_mapping_update_or_create(elevated_context, > + values) > + > + def _update_block_device_mapping(self, elevated_context, instance_id, > + block_device_mapping): > + """tell vm driver to attach volume at boot time by updating > + BlockDeviceMapping > + """ > + for bdm in block_device_mapping: > + LOG.debug(_('bdm %s'), bdm) > + assert 'device_name' in bdm > + > + values = {'instance_id': instance_id} > + for key in ('device_name', 'delete_on_termination', 'virtual_name', > + 'snapshot_id', 'volume_id', 'volume_size', > + 'no_device'): > + values[key] = bdm.get(key) > + > + # NOTE(yamahata): NoDevice eliminates devices defined in image > + # files by command line option. > + # (--block-device-mapping) > + if bdm.get('virtual_name') == 'NoDevice': > + values['no_device'] = True > + for k in ('delete_on_termination', 'volume_id', > + 'snapshot_id', 'volume_id', 'volume_size', > + 'virtual_name'): > + values[k] = None > + > + self.db.block_device_mapping_update_or_create(elevated_context, > + values) > + > + def create_db_entry_for_new_instance(self, context, image, base_options, > security_groups, block_device_mapping, num=1): > """Create an entry in the DB for this new instance, > including any related table updates (such as security > @@ -272,22 +326,14 @@ > instance_id, > security_group_id) > > - # NOTE(yamahata) > - # tell vm driver to attach volume at boot time by updating > - # BlockDeviceMapping > - for bdm in block_device_mapping: > - LOG.debug(_('bdm %s'), bdm) > - assert 'device_name' in bdm > - values = { > - 'instance_id': instance_id, > - 'device_name': bdm['device_name'], > - 'delete_on_termination': bdm.get('delete_on_termination'), > - 'virtual_name': bdm.get('virtual_name'), > - 'snapshot_id': bdm.get('snapshot_id'), > - 'volume_id': bdm.get('volume_id'), > - 'volume_size': bdm.get('volume_size'), > - 'no_device': bdm.get('no_device')} > - self.db.block_device_mapping_create(elevated, values) > + # BlockDeviceMapping table > + self._update_image_block_device_mapping(elevated, instance_id, > + image['properties'].get('mappings', [])) > + self._update_block_device_mapping(elevated, instance_id, > + image['properties'].get('block_device_mapping', [])) > + # override via command line option > + self._update_block_device_mapping(elevated, instance_id, > + block_device_mapping) > > # Set sane defaults if not specified > updates = dict(hostname=self.hostname_factory(instance_id)) > @@ -347,7 +393,7 @@ > """Provision the instances by passing the whole request to > the Scheduler for execution. Returns a Reservation ID > related to the creation of all of these instances.""" > - num_instances, base_options, security_groups = \ > + num_instances, base_options, security_groups, image = \ > self._check_create_parameters( > context, instance_type, > image_href, kernel_id, ramdisk_id, > @@ -383,7 +429,7 @@ > Returns a list of instance dicts. > """ > > - num_instances, base_options, security_groups = \ > + num_instances, base_options, security_groups, image = \ > self._check_create_parameters( > context, instance_type, > image_href, kernel_id, ramdisk_id, > @@ -398,7 +444,7 @@ > instances = [] > LOG.debug(_("Going to run %s instances..."), num_instances) > for num in range(num_instances): > - instance = self.create_db_entry_for_new_instance(context, > + instance = self.create_db_entry_for_new_instance(context, image, > base_options, security_groups, > block_device_mapping, num=num) > instances.append(instance) > > === modified file 'nova/compute/manager.py' > --- nova/compute/manager.py 2011-06-20 20:55:16 +0000 > +++ nova/compute/manager.py 2011-06-22 04:55:48 +0000 > @@ -227,6 +227,17 @@ > for bdm in self.db.block_device_mapping_get_all_by_instance( > context, instance_id): > LOG.debug(_("setting up bdm %s"), bdm) > + > + if bdm['no_device']: > + continue > + if bdm['virtual_name']: > + # TODO(yamahata): > + # block devices for swap and ephemeralN will be > + # created by virt driver locally in compute node. > + assert (bdm['virtual_name'] == 'swap' or > + bdm['virtual_name'].startswith('ephemeral')) > + continue > + > if ((bdm['snapshot_id'] is not None) and > (bdm['volume_id'] is None)): > # TODO(yamahata): default name and description > @@ -259,15 +270,6 @@ > block_device_mapping.append({'device_path': dev_path, > 'mount_device': > bdm['device_name']}) > - elif bdm['virtual_name'] is not None: > - # TODO(yamahata): ephemeral/swap device support > - LOG.debug(_('block_device_mapping: ' > - 'ephemeral device is not supported yet')) > - else: > - # TODO(yamahata): NoDevice support > - assert bdm['no_device'] > - LOG.debug(_('block_device_mapping: ' > - 'no device is not supported yet')) > > return block_device_mapping > > > === modified file 'nova/db/api.py' > --- nova/db/api.py 2011-06-20 20:55:16 +0000 > +++ nova/db/api.py 2011-06-22 04:55:48 +0000 > @@ -936,10 +936,16 @@ > > > def block_device_mapping_update(context, bdm_id, values): > - """Create an entry of block device mapping""" > + """Update an entry of block device mapping""" > return IMPL.block_device_mapping_update(context, bdm_id, values) > > > +def block_device_mapping_update_or_create(context, values): > + """Update an entry of block device mapping. > + If not existed, create a new entry""" > + return IMPL.block_device_mapping_update_or_create(context, values) > + > + > def block_device_mapping_get_all_by_instance(context, instance_id): > """Get all block device mapping belonging to a instance""" > return IMPL.block_device_mapping_get_all_by_instance(context, instance_id) > > === modified file 'nova/db/sqlalchemy/api.py' > --- nova/db/sqlalchemy/api.py 2011-06-20 20:55:16 +0000 > +++ nova/db/sqlalchemy/api.py 2011-06-22 04:55:48 +0000 > @@ -1933,6 +1933,23 @@ > > > @require_context > +def block_device_mapping_update_or_create(context, values): > + session = get_session() > + with session.begin(): > + result = session.query(models.BlockDeviceMapping).\ > + filter_by(instance_id=values['instance_id']).\ > + filter_by(device_name=values['device_name']).\ > + filter_by(deleted=False).\ > + first() > + if not result: > + bdm_ref = models.BlockDeviceMapping() > + bdm_ref.update(values) > + bdm_ref.save(session=session) > + else: > + result.update(values) > + > + > +@require_context > def block_device_mapping_get_all_by_instance(context, instance_id): > session = get_session() > result = session.query(models.BlockDeviceMapping).\ > > === added file 'nova/db/sqlalchemy/migrate_repo/versions/027_add_root_device_name.py' > --- nova/db/sqlalchemy/migrate_repo/versions/027_add_root_device_name.py 1970-01-01 00:00:00 +0000 > +++ nova/db/sqlalchemy/migrate_repo/versions/027_add_root_device_name.py 2011-06-22 04:55:48 +0000 > @@ -0,0 +1,47 @@ > +# Copyright 2011 OpenStack LLC. > +# Copyright 2011 Isaku Yamahata > +# > +# Licensed under the Apache License, Version 2.0 (the "License"); you may > +# not use this file except in compliance with the License. You may obtain > +# a copy of the License at > +# > +# http://www.apache.org/licenses/LICENSE-2.0 > +# > +# Unless required by applicable law or agreed to in writing, software > +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT > +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the > +# License for the specific language governing permissions and limitations > +# under the License. > + > +from sqlalchemy import Column, Integer, MetaData, Table, String > + > +meta = MetaData() > + > + > +# Just for the ForeignKey and column creation to succeed, these are not the > +# actual definitions of instances or services. > +instances = Table('instances', meta, > + Column('id', Integer(), primary_key=True, nullable=False), > + ) > + > +# > +# New Column > +# > +root_device_name = Column( > + 'root_device_name', > + String(length=255, convert_unicode=False, assert_unicode=None, > + unicode_error=None, _warn_on_bytestring=False), > + nullable=True) > + > + > +def upgrade(migrate_engine): > + # Upgrade operations go here. Don't create your own engine; > + # bind migrate_engine to your metadata > + meta.bind = migrate_engine > + instances.create_column(root_device_name) > + > + > +def downgrade(migrate_engine): > + # Operations to reverse the above upgrade go here. > + meta.bind = migrate_engine > + instances.drop_column('root_device_name') > > === modified file 'nova/db/sqlalchemy/models.py' > --- nova/db/sqlalchemy/models.py 2011-06-20 20:55:16 +0000 > +++ nova/db/sqlalchemy/models.py 2011-06-22 04:55:48 +0000 > @@ -236,6 +236,8 @@ > vm_mode = Column(String(255)) > uuid = Column(String(36)) > > + root_device_name = Column(String(255)) > + > # TODO(vish): see Ewan's email about state improvements, probably > # should be in a driver base class or some such > # vmstate_state = running, halted, suspended, paused > > === modified file 'nova/image/s3.py' > --- nova/image/s3.py 2011-06-01 03:16:22 +0000 > +++ nova/image/s3.py 2011-06-22 04:55:48 +0000 > @@ -102,18 +102,7 @@ > key.get_contents_to_filename(local_filename) > return local_filename > > - def _s3_create(self, context, metadata): > - """Gets a manifext from s3 and makes an image.""" > - > - image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir) > - > - image_location = metadata['properties']['image_location'] > - bucket_name = image_location.split('/')[0] > - manifest_path = image_location[len(bucket_name) + 1:] > - bucket = self._conn(context).get_bucket(bucket_name) > - key = bucket.get_key(manifest_path) > - manifest = key.get_contents_as_string() > - > + def _s3_parse_manifest(self, context, metadata, manifest): > manifest = ElementTree.fromstring(manifest) > image_format = 'ami' > image_type = 'machine' > @@ -141,6 +130,28 @@ > except Exception: > arch = 'x86_64' > > + # NOTE(yamahata): > + # EC2 ec2-budlne-image --block-device-mapping accepts > + # = where > + # virtual name = {ami, root, swap, ephemeral} > + # where N is no negative integer > + # device name = the device name seen by guest kernel. > + # They are converted into > + # block_device_mapping/mapping/{virtual, device} > + # > + # Do NOT confuse this with ec2-register's block device mapping > + # argument. > + mappings = [] > + try: > + block_device_mapping = manifest.findall('machine_configuration/' > + 'block_device_mapping/' > + 'mapping') > + for bdm in block_device_mapping: > + mappings.append({'virtual': bdm.find('virtual').text, > + 'device': bdm.find('device').text}) > + except Exception: > + mappings = [] > + > properties = metadata['properties'] > properties['project_id'] = context.project_id > properties['architecture'] = arch > @@ -151,6 +162,9 @@ > if ramdisk_id: > properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id) > > + if mappings: > + properties['mappings'] = mappings > + > metadata.update({'disk_format': image_format, > 'container_format': image_format, > 'status': 'queued', > @@ -158,6 +172,21 @@ > 'properties': properties}) > metadata['properties']['image_state'] = 'pending' > image = self.service.create(context, metadata) > + return manifest, image > + > + def _s3_create(self, context, metadata): > + """Gets a manifext from s3 and makes an image.""" > + > + image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir) > + > + image_location = metadata['properties']['image_location'] > + bucket_name = image_location.split('/')[0] > + manifest_path = image_location[len(bucket_name) + 1:] > + bucket = self._conn(context).get_bucket(bucket_name) > + key = bucket.get_key(manifest_path) > + manifest = key.get_contents_as_string() > + > + manifest, image = self._s3_parse_manifest(metadata, context, manifest) > image_id = image['id'] > > def delayed_create(): > > === modified file 'nova/test.py' > --- nova/test.py 2011-06-03 15:11:01 +0000 > +++ nova/test.py 2011-06-22 04:55:48 +0000 > @@ -252,3 +252,15 @@ > for d1, d2 in zip(L1, L2): > self.assertDictMatch(d1, d2, approx_equal=approx_equal, > tolerance=tolerance) > + > + def assertSubDictMatch(self, sub_dict, super_dict): > + """Assert a sub_dict is subset of super_dict.""" > + self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys()))) > + for k, sub_value in sub_dict.items(): > + super_value = super_dict[k] > + if isinstance(sub_value, dict): > + self.assertSubDictMatch(sub_value, super_value) > + elif 'DONTCARE' in (sub_value, super_value): > + continue > + else: > + self.assertEqual(sub_value, super_value) > > === added file 'nova/tests/image/test_s3.py' > --- nova/tests/image/test_s3.py 1970-01-01 00:00:00 +0000 > +++ nova/tests/image/test_s3.py 2011-06-22 04:55:48 +0000 > @@ -0,0 +1,122 @@ > +# vim: tabstop=4 shiftwidth=4 softtabstop=4 > + > +# Copyright 2011 Isaku Yamahata > +# All Rights Reserved. > +# > +# Licensed under the Apache License, Version 2.0 (the "License"); you may > +# not use this file except in compliance with the License. You may obtain > +# a copy of the License at > +# > +# http://www.apache.org/licenses/LICENSE-2.0 > +# > +# Unless required by applicable law or agreed to in writing, software > +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT > +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the > +# License for the specific language governing permissions and limitations > +# under the License. > + > +from nova import context > +from nova import flags > +from nova import test > +from nova.image import s3 > + > +FLAGS = flags.FLAGS > + > + > +ami_manifest_xml = """ > + > + 2011-06-17 > + > + test-s3 > + 0 > + 0 > + > + > + x86_64 > + > + > + ami > + sda1 > + > + > + root > + /dev/sda1 > + > + > + ephemeral0 > + sda2 > + > + > + swap > + sda3 > + > + > + > + > +""" > + > + > +class TestS3ImageService(test.TestCase): > + def setUp(self): > + super(TestS3ImageService, self).setUp() > + self.orig_image_service = FLAGS.image_service > + FLAGS.image_service = 'nova.image.fake.FakeImageService' > + self.image_service = s3.S3ImageService() > + self.context = context.RequestContext(None, None) > + > + def tearDown(self): > + super(TestS3ImageService, self).tearDown() > + FLAGS.image_service = self.orig_image_service > + > + def _assertEqualList(self, list0, list1, keys): > + self.assertEqual(len(list0), len(list1)) > + key = keys[0] > + for x in list0: > + self.assertEqual(len(x), len(keys)) > + self.assertTrue(key in x) > + for y in list1: > + self.assertTrue(key in y) > + if x[key] == y[key]: > + for k in keys: > + self.assertEqual(x[k], y[k]) > + > + def test_s3_create(self): > + metadata = {'properties': { > + 'root_device_name': '/dev/sda1', > + 'block_device_mapping': [ > + {'device_name': '/dev/sda1', > + 'snapshot_id': 'snap-12345678', > + 'delete_on_termination': True}, > + {'device_name': '/dev/sda2', > + 'virutal_name': 'ephemeral0'}, > + {'device_name': '/dev/sdb0', > + 'no_device': True}]}} > + _manifest, image = self.image_service._s3_parse_manifest( > + self.context, metadata, ami_manifest_xml) > + image_id = image['id'] > + > + ret_image = self.image_service.show(self.context, image_id) > + self.assertTrue('properties' in ret_image) > + properties = ret_image['properties'] > + > + self.assertTrue('mappings' in properties) > + mappings = properties['mappings'] > + expected_mappings = [ > + {"device": "sda1", "virtual": "ami"}, > + {"device": "/dev/sda1", "virtual": "root"}, > + {"device": "sda2", "virtual": "ephemeral0"}, > + {"device": "sda3", "virtual": "swap"}] > + self._assertEqualList(mappings, expected_mappings, > + ['device', 'virtual']) > + > + self.assertTrue('block_device_mapping', properties) > + block_device_mapping = properties['block_device_mapping'] > + expected_bdm = [ > + {'device_name': '/dev/sda1', > + 'snapshot_id': 'snap-12345678', > + 'delete_on_termination': True}, > + {'device_name': '/dev/sda2', > + 'virutal_name': 'ephemeral0'}, > + {'device_name': '/dev/sdb0', > + 'no_device': True}] > + self.assertEqual(block_device_mapping, expected_bdm) > > === modified file 'nova/tests/test_api.py' > --- nova/tests/test_api.py 2011-06-15 05:41:29 +0000 > +++ nova/tests/test_api.py 2011-06-22 04:55:48 +0000 > @@ -92,7 +92,9 @@ > conv = ec2utils._try_convert > self.assertEqual(conv('None'), None) > self.assertEqual(conv('True'), True) > + self.assertEqual(conv('true'), True) > self.assertEqual(conv('False'), False) > + self.assertEqual(conv('false'), False) > self.assertEqual(conv('0'), 0) > self.assertEqual(conv('42'), 42) > self.assertEqual(conv('3.14'), 3.14) > @@ -107,6 +109,8 @@ > def test_ec2_id_to_id(self): > self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30) > self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29) > + self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28) > + self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27) > > def test_bad_ec2_id(self): > self.assertRaises(exception.InvalidEc2Id, > @@ -116,6 +120,38 @@ > def test_id_to_ec2_id(self): > self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e') > self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d') > + self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c') > + self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b') > + > + def test_dict_from_dotted_str(self): > + in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'), > + ('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'), > + ('BlockDeviceMapping.1.Ebs.VolumeSize', '80'), > + ('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'), > + ('BlockDeviceMapping.2.DeviceName', '/dev/sdc'), > + ('BlockDeviceMapping.2.VirtualName', 'ephemeral0')] > + expected_dict = { > + 'block_device_mapping': { > + '1': {'device_name': '/dev/sda1', > + 'ebs': {'snapshot_id': 'snap-0000001c', > + 'volume_size': 80, > + 'delete_on_termination': False}}, > + '2': {'device_name': '/dev/sdc', > + 'virtual_name': 'ephemeral0'}}} > + out_dict = ec2utils.dict_from_dotted_str(in_str) > + > + self.assertDictMatch(out_dict, expected_dict) > + > + def test_properties_root_defice_name(self): > + mappings = [{"device": "/dev/sda1", "virtual": "root"}] > + properties0 = {'mappings': mappings} > + properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings} > + > + root_device_name = ec2utils.properties_root_device_name(properties0) > + self.assertEqual(root_device_name, '/dev/sda1') > + > + root_device_name = ec2utils.properties_root_device_name(properties1) > + self.assertEqual(root_device_name, '/dev/sdb') > > > class ApiEc2TestCase(test.TestCase): > > === modified file 'nova/tests/test_cloud.py' > --- nova/tests/test_cloud.py 2011-06-17 23:52:22 +0000 > +++ nova/tests/test_cloud.py 2011-06-22 04:55:48 +0000 > @@ -171,7 +171,7 @@ > vol2 = db.volume_create(self.context, {}) > result = self.cloud.describe_volumes(self.context) > self.assertEqual(len(result['volumeSet']), 2) > - volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x') > + volume_id = ec2utils.id_to_ec2_vol_id(vol2['id']) > result = self.cloud.describe_volumes(self.context, > volume_id=[volume_id]) > self.assertEqual(len(result['volumeSet']), 1) > @@ -187,7 +187,7 @@ > snap = db.snapshot_create(self.context, {'volume_id': vol['id'], > 'volume_size': vol['size'], > 'status': "available"}) > - snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') > + snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id']) > > result = self.cloud.create_volume(self.context, > snapshot_id=snapshot_id) > @@ -224,7 +224,7 @@ > snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']}) > result = self.cloud.describe_snapshots(self.context) > self.assertEqual(len(result['snapshotSet']), 2) > - snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x') > + snapshot_id = ec2utils.id_to_ec2_snap_id(snap2['id']) > result = self.cloud.describe_snapshots(self.context, > snapshot_id=[snapshot_id]) > self.assertEqual(len(result['snapshotSet']), 1) > @@ -238,7 +238,7 @@ > def test_create_snapshot(self): > """Makes sure create_snapshot works.""" > vol = db.volume_create(self.context, {'status': "available"}) > - volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x') > + volume_id = ec2utils.id_to_ec2_vol_id(vol['id']) > > result = self.cloud.create_snapshot(self.context, > volume_id=volume_id) > @@ -255,7 +255,7 @@ > vol = db.volume_create(self.context, {'status': "available"}) > snap = db.snapshot_create(self.context, {'volume_id': vol['id'], > 'status': "available"}) > - snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') > + snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id']) > > result = self.cloud.delete_snapshot(self.context, > snapshot_id=snapshot_id) > @@ -294,6 +294,151 @@ > db.service_destroy(self.context, comp1['id']) > db.service_destroy(self.context, comp2['id']) > > + def _block_device_mapping_create(self, instance_id, mappings): > + volumes = [] > + for bdm in mappings: > + db.block_device_mapping_create(self.context, bdm) > + if 'volume_id' in bdm: > + values = {'id': bdm['volume_id']} > + for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'), > + ('snapshot_size', 'volume_size'), > + ('delete_on_termination', > + 'delete_on_termination')]: > + if bdm_key in bdm: > + values[vol_key] = bdm[bdm_key] > + vol = db.volume_create(self.context, values) > + db.volume_attached(self.context, vol['id'], > + instance_id, bdm['device_name']) > + volumes.append(vol) > + return volumes > + > + def _assertInstance(self, instance_id): > + ec2_instance_id = ec2utils.id_to_ec2_id(instance_id) > + result = self.cloud.describe_instances(self.context, > + instance_id=[ec2_instance_id]) > + result = result['reservationSet'][0] > + self.assertEqual(len(result['instancesSet']), 1) > + result = result['instancesSet'][0] > + self.assertEqual(result['instanceId'], ec2_instance_id) > + return (ec2_instance_id, result) > + > + def _assertEqualBlockDeviceMapping(self, expected, result): > + self.assertEqual(len(expected), len(result)) > + for x in expected: > + found = False > + for y in result: > + if x['deviceName'] == y['deviceName']: > + self.assertSubDictMatch(x, y) > + found = True > + break > + self.assertTrue(found) > + > + def test_describe_instances_bdm(self): > + """Make sure describe_instances works with root_device_name and > + block device mappings > + """ > + inst1 = db.instance_create(self.context, > + {'image_ref': 1, > + 'root_device_name': '/dev/sdb1'}) > + inst2 = db.instance_create(self.context, > + {'image_ref': 2, > + 'root_device_name': '/dev/sdc1'}) > + > + instance_id = inst1['id'] > + mappings = [ > + {'instance_id': instance_id, > + 'device_name': '/dev/sdb1', > + 'snapshot_id': '1', > + 'volume_id': '2'}, > + {'instance_id': instance_id, > + 'device_name': '/dev/sdb2', > + 'volume_id': '3', > + 'volume_size': 1}, > + {'instance_id': instance_id, > + 'device_name': '/dev/sdb3', > + 'delete_on_termination': True, > + 'snapshot_id': '4', > + 'volume_id': '5'}, > + {'instance_id': instance_id, > + 'device_name': '/dev/sdb4', > + 'delete_on_termination': False, > + 'snapshot_id': '6', > + 'volume_id': '7'}, > + {'instance_id': instance_id, > + 'device_name': '/dev/sdb5', > + 'snapshot_id': '8', > + 'volume_id': '9', > + 'volume_size': 0}, > + {'instance_id': instance_id, > + 'device_name': '/dev/sdb6', > + 'snapshot_id': '10', > + 'volume_id': '11', > + 'volume_size': 1}, > + {'instance_id': instance_id, > + 'device_name': '/dev/sdb7', > + 'no_device': True}, > + {'instance_id': instance_id, > + 'device_name': '/dev/sdb8', > + 'virtual_name': 'swap'}, > + {'instance_id': instance_id, > + 'device_name': '/dev/sdb9', > + 'virtual_name': 'ephemeral3'}] > + > + volumes = self._block_device_mapping_create(instance_id, mappings) > + > + ec2_instance_id, result = self._assertInstance(instance_id) > + expected_result = {'instanceId': ec2_instance_id, > + 'rootDeviceName': '/dev/sdb1', > + 'rootDeviceType': 'ebs'} > + expected_block_device_mapping = [ > + {'deviceName': '/dev/sdb1', > + 'ebs': {'status': 'in-use', > + 'deleteOnTermination': False, > + 'volumeId': 2, > + }}, > + {'deviceName': '/dev/sdb2', > + 'ebs': {'status': 'in-use', > + 'deleteOnTermination': False, > + 'volumeId': 3, > + }}, > + {'deviceName': '/dev/sdb3', > + 'ebs': {'status': 'in-use', > + 'deleteOnTermination': True, > + 'volumeId': 5, > + }}, > + {'deviceName': '/dev/sdb4', > + 'ebs': {'status': 'in-use', > + 'deleteOnTermination': False, > + 'volumeId': 7, > + }}, > + {'deviceName': '/dev/sdb5', > + 'ebs': {'status': 'in-use', > + 'deleteOnTermination': False, > + 'volumeId': 9, > + }}, > + {'deviceName': '/dev/sdb6', > + 'ebs': {'status': 'in-use', > + 'deleteOnTermination': False, > + 'volumeId': 11, }}] > + # NOTE(yamahata): swap/ephemeral device case isn't supported yet. > + self.assertSubDictMatch(expected_result, result) > + self._assertEqualBlockDeviceMapping(expected_block_device_mapping, > + result['blockDeviceMapping']) > + > + ec2_instance_id, result = self._assertInstance(inst2['id']) > + expected_result = {'instanceId': ec2_instance_id, > + 'rootDeviceName': '/dev/sdc1', > + 'rootDeviceType': 'instance-store'} > + self.assertSubDictMatch(expected_result, result) > + > + for vol in volumes: > + db.volume_destroy(self.context, vol['id']) > + for bdm in db.block_device_mapping_get_all_by_instance(self.context, > + instance_id): > + db.block_device_mapping_destroy(self.context, bdm['id']) > + db.instance_destroy(self.context, inst2['id']) > + db.instance_destroy(self.context, inst1['id']) > + > def test_describe_images(self): > describe_images = self.cloud.describe_images > > @@ -323,6 +468,161 @@ > self.assertRaises(exception.ImageNotFound, describe_images, > self.context, ['ami-fake']) > > + def assertDictListUnorderedMatch(self, L1, L2, key): > + self.assertEqual(len(L1), len(L2)) > + for d1 in L1: > + self.assertTrue(key in d1) > + for d2 in L2: > + self.assertTrue(key in d2) > + if d1[key] == d2[key]: > + self.assertDictMatch(d1, d2) > + > + def _setUpImageSet(self, create_volumes_and_snapshots=False): > + mappings1 = [ > + {'device': '/dev/sda1', 'virtual': 'root'}, > + > + {'device': '/dev/sdb0', 'virtual': 'ephemeral0'}, > + {'device': '/dev/sdb1', 'virtual': 'ephemeral1'}, > + {'device': '/dev/sdb2', 'virtual': 'ephemeral2'}, > + {'device': '/dev/sdb3', 'virtual': 'ephemeral3'}, > + {'device': '/dev/sdb4', 'virtual': 'ephemeral4'}, > + > + {'device': '/dev/sdc0', 'virtual': 'swap'}, > + {'device': '/dev/sdc1', 'virtual': 'swap'}, > + {'device': '/dev/sdc2', 'virtual': 'swap'}, > + {'device': '/dev/sdc3', 'virtual': 'swap'}, > + {'device': '/dev/sdc4', 'virtual': 'swap'}] > + block_device_mapping1 = [ > + {'device_name': '/dev/sdb1', 'snapshot_id': 01234567}, > + {'device_name': '/dev/sdb2', 'volume_id': 01234567}, > + {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'}, > + {'device_name': '/dev/sdb4', 'no_device': True}, > + > + {'device_name': '/dev/sdc1', 'snapshot_id': 12345678}, > + {'device_name': '/dev/sdc2', 'volume_id': 12345678}, > + {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'}, > + {'device_name': '/dev/sdc4', 'no_device': True}] > + image1 = { > + 'id': 1, > + 'properties': { > + 'kernel_id': 1, > + 'type': 'machine', > + 'image_state': 'available', > + 'mappings': mappings1, > + 'block_device_mapping': block_device_mapping1, > + } > + } > + > + mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}] > + block_device_mapping2 = [{'device_name': '/dev/sdb1', > + 'snapshot_id': 01234567}] > + image2 = { > + 'id': 2, > + 'properties': { > + 'kernel_id': 2, > + 'type': 'machine', > + 'root_device_name': '/dev/sdb1', > + 'mappings': mappings2, > + 'block_device_mapping': block_device_mapping2}} > + > + def fake_show(meh, context, image_id): > + for i in [image1, image2]: > + if i['id'] == image_id: > + return i > + raise exception.ImageNotFound(image_id=image_id) > + > + def fake_detail(meh, context): > + return [image1, image2] > + > + self.stubs.Set(fake._FakeImageService, 'show', fake_show) > + self.stubs.Set(fake._FakeImageService, 'detail', fake_detail) > + > + volumes = [] > + snapshots = [] > + if create_volumes_and_snapshots: > + for bdm in block_device_mapping1: > + if 'volume_id' in bdm: > + vol = self._volume_create(bdm['volume_id']) > + volumes.append(vol['id']) > + if 'snapshot_id' in bdm: > + snap = db.snapshot_create(self.context, > + {'id': bdm['snapshot_id'], > + 'volume_id': 76543210, > + 'status': "available", > + 'volume_size': 1}) > + snapshots.append(snap['id']) > + return (volumes, snapshots) > + > + def _assertImageSet(self, result, root_device_type, root_device_name): > + self.assertEqual(1, len(result['imagesSet'])) > + result = result['imagesSet'][0] > + self.assertTrue('rootDeviceType' in result) > + self.assertEqual(result['rootDeviceType'], root_device_type) > + self.assertTrue('rootDeviceName' in result) > + self.assertEqual(result['rootDeviceName'], root_device_name) > + self.assertTrue('blockDeviceMapping' in result) > + > + return result > + > + _expected_root_device_name1 = '/dev/sda1' > + # NOTE(yamahata): noDevice doesn't make sense when returning mapping > + # It makes sense only when user overriding existing > + # mapping. > + _expected_bdms1 = [ > + {'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'}, > + {'deviceName': '/dev/sdb1', 'ebs': {'snapshotId': > + 'snap-00053977'}}, > + {'deviceName': '/dev/sdb2', 'ebs': {'snapshotId': > + 'vol-00053977'}}, > + {'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'}, > + # {'deviceName': '/dev/sdb4', 'noDevice': True}, > + > + {'deviceName': '/dev/sdc0', 'virtualName': 'swap'}, > + {'deviceName': '/dev/sdc1', 'ebs': {'snapshotId': > + 'snap-00bc614e'}}, > + {'deviceName': '/dev/sdc2', 'ebs': {'snapshotId': > + 'vol-00bc614e'}}, > + {'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'}, > + # {'deviceName': '/dev/sdc4', 'noDevice': True} > + ] > + > + _expected_root_device_name2 = '/dev/sdb1' > + _expected_bdms2 = [{'deviceName': '/dev/sdb1', > + 'ebs': {'snapshotId': 'snap-00053977'}}] > + > + # NOTE(yamahata): > + # InstanceBlockDeviceMappingItemType > + # rootDeviceType > + # rootDeviceName > + # blockDeviceMapping > + # deviceName > + # virtualName > + # ebs > + # snapshotId > + # volumeSize > + # deleteOnTermination > + # noDevice > + def test_describe_image_mapping(self): > + """test for rootDeviceName and blockDeiceMapping""" > + describe_images = self.cloud.describe_images > + self._setUpImageSet() > + > + result = describe_images(self.context, ['ami-00000001']) > + result = self._assertImageSet(result, 'instance-store', > + self._expected_root_device_name1) > + > + self.assertDictListUnorderedMatch(result['blockDeviceMapping'], > + self._expected_bdms1, 'deviceName') > + > + result = describe_images(self.context, ['ami-00000002']) > + result = self._assertImageSet(result, 'ebs', > + self._expected_root_device_name2) > + > + self.assertDictListUnorderedMatch(result['blockDeviceMapping'], > + self._expected_bdms2, 'deviceName') > + > + self.stubs.UnsetAll() > + > def test_describe_image_attribute(self): > describe_image_attribute = self.cloud.describe_image_attribute > > @@ -336,6 +636,32 @@ > 'launchPermission') > self.assertEqual([{'group': 'all'}], result['launchPermission']) > > + def test_describe_image_attribute_root_device_name(self): > + describe_image_attribute = self.cloud.describe_image_attribute > + self._setUpImageSet() > + > + result = describe_image_attribute(self.context, 'ami-00000001', > + 'rootDeviceName') > + self.assertEqual(result['rootDeviceName'], > + self._expected_root_device_name1) > + result = describe_image_attribute(self.context, 'ami-00000002', > + 'rootDeviceName') > + self.assertEqual(result['rootDeviceName'], > + self._expected_root_device_name2) > + > + def test_describe_image_attribute_block_device_mapping(self): > + describe_image_attribute = self.cloud.describe_image_attribute > + self._setUpImageSet() > + > + result = describe_image_attribute(self.context, 'ami-00000001', > + 'blockDeviceMapping') > + self.assertDictListUnorderedMatch(result['blockDeviceMapping'], > + self._expected_bdms1, 'deviceName') > + result = describe_image_attribute(self.context, 'ami-00000002', > + 'blockDeviceMapping') > + self.assertDictListUnorderedMatch(result['blockDeviceMapping'], > + self._expected_bdms2, 'deviceName') > + > def test_modify_image_attribute(self): > modify_image_attribute = self.cloud.modify_image_attribute > > @@ -561,7 +887,7 @@ > def test_update_of_volume_display_fields(self): > vol = db.volume_create(self.context, {}) > self.cloud.update_volume(self.context, > - ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'), > + ec2utils.id_to_ec2_vol_id(vol['id']), > display_name='c00l v0lum3') > vol = db.volume_get(self.context, vol['id']) > self.assertEqual('c00l v0lum3', vol['display_name']) > @@ -570,7 +896,7 @@ > def test_update_of_volume_wont_update_private_fields(self): > vol = db.volume_create(self.context, {}) > self.cloud.update_volume(self.context, > - ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'), > + ec2utils.id_to_ec2_vol_id(vol['id']), > mountpoint='/not/here') > vol = db.volume_get(self.context, vol['id']) > self.assertEqual(None, vol['mountpoint']) > @@ -647,11 +973,13 @@ > > self._restart_compute_service() > > - def _volume_create(self): > + def _volume_create(self, volume_id=None): > kwargs = {'status': 'available', > 'host': self.volume.host, > 'size': 1, > 'attach_status': 'detached', } > + if volume_id: > + kwargs['id'] = volume_id > return db.volume_create(self.context, kwargs) > > def _assert_volume_attached(self, vol, instance_id, mountpoint): > @@ -679,10 +1007,10 @@ > 'max_count': 1, > 'block_device_mapping': [{'device_name': '/dev/vdb', > 'volume_id': vol1['id'], > - 'delete_on_termination': False, }, > + 'delete_on_termination': False}, > {'device_name': '/dev/vdc', > 'volume_id': vol2['id'], > - 'delete_on_termination': True, }, > + 'delete_on_termination': True}, > ]} > ec2_instance_id = self._run_instance_wait(**kwargs) > instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) > @@ -812,7 +1140,7 @@ > def test_run_with_snapshot(self): > """Makes sure run/stop/start instance with snapshot works.""" > vol = self._volume_create() > - ec2_volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x') > + ec2_volume_id = ec2utils.id_to_ec2_vol_id(vol['id']) > > ec2_snapshot1_id = self._create_snapshot(ec2_volume_id) > snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id) > @@ -871,3 +1199,33 @@ > self.cloud.delete_snapshot(self.context, snapshot_id) > greenthread.sleep(0.3) > db.volume_destroy(self.context, vol['id']) > + > + def test_create_image(self): > + """Make sure that CreateImage works""" > + # enforce periodic tasks run in short time to avoid wait for 60s. > + self._restart_compute_service(periodic_interval=0.3) > + > + (volumes, snapshots) = self._setUpImageSet( > + create_volumes_and_snapshots=True) > + > + kwargs = {'image_id': 'ami-1', > + 'instance_type': FLAGS.default_instance_type, > + 'max_count': 1} > + ec2_instance_id = self._run_instance_wait(**kwargs) > + > + # TODO(yamahata): s3._s3_create() can't be tested easily by unit test > + # as there is no unit test for s3.create() > + ## result = self.cloud.create_image(self.context, ec2_instance_id, > + ## no_reboot=True) > + ## ec2_image_id = result['imageId'] > + ## created_image = self.cloud.describe_images(self.context, > + ## [ec2_image_id]) > + > + self.cloud.terminate_instances(self.context, [ec2_instance_id]) > + for vol in volumes: > + db.volume_destroy(self.context, vol) > + for snap in snapshots: > + db.snapshot_destroy(self.context, snap) > + # TODO(yamahata): clean up snapshot created by CreateImage. > + > + self._restart_compute_service() > > === modified file 'nova/volume/api.py' > --- nova/volume/api.py 2011-06-15 16:46:24 +0000 > +++ nova/volume/api.py 2011-06-22 04:55:48 +0000 > @@ -140,9 +140,10 @@ > {"method": "remove_volume", > "args": {'volume_id': volume_id}}) > > - def create_snapshot(self, context, volume_id, name, description): > + def _create_snapshot(self, context, volume_id, name, description, > + force=False): > volume = self.get(context, volume_id) > - if volume['status'] != "available": > + if ((not force) and (volume['status'] != "available")): > raise exception.ApiError(_("Volume status must be available")) > > options = { > @@ -164,6 +165,14 @@ > "snapshot_id": snapshot['id']}}) > return snapshot > > + def create_snapshot(self, context, volume_id, name, description): > + return self._create_snapshot(context, volume_id, name, description, > + False) > + > + def create_snapshot_force(self, context, volume_id, name, description): > + return self._create_snapshot(context, volume_id, name, description, > + True) > + > def delete_snapshot(self, context, snapshot_id): > snapshot = self.get_snapshot(context, snapshot_id) > if snapshot['status'] != "available": > -- yamahata