Merge lp:~zulcss/ubuntu/precise/nova/trunk into lp:~ubuntu-cloud-archive/ubuntu/precise/nova/trunk

Proposed by Chuck Short
Status: Merged
Approved by: James Page
Approved revision: 97
Merged at revision: 97
Proposed branch: lp:~zulcss/ubuntu/precise/nova/trunk
Merge into: lp:~ubuntu-cloud-archive/ubuntu/precise/nova/trunk
Diff against target: 298126 lines (+77032/-91301)
1002 files modified
.mailmap (+2/-2)
.pc/.quilt_patches (+0/-1)
.pc/.quilt_series (+0/-1)
.pc/.version (+0/-1)
.pc/applied-patches (+0/-9)
.pc/avoid_setuptools_git_dependency.patch/tools/pip-requires (+0/-23)
.pc/fix-docs-build-without-network.patch/doc/source/conf.py (+0/-279)
.pc/fix-ubuntu-tests.patch/nova/tests/test_api.py (+0/-616)
.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost (+0/-445)
.pc/rbd-security.patch/nova/virt/libvirt/volume.py (+0/-206)
.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy/api.py (+0/-5256)
.pc/ubuntu/fix-libvirt-firewall-slowdown.patch/nova/tests/test_libvirt.py (+0/-3919)
.pc/ubuntu/fix-libvirt-firewall-slowdown.patch/nova/virt/firewall.py (+0/-536)
.pc/ubuntu/ubuntu-fix-32-64-bit-iss.patch/nova/tests/test_nfs.py (+0/-629)
.pc/ubuntu/ubuntu-fix-32-64-bit-iss.patch/nova/volume/nfs.py (+0/-293)
.pc/ubuntu/ubuntu-fix-ec2-instance-id-mappings.patch/nova/db/sqlalchemy/api.py (+0/-5253)
AUTHORS (+34/-0)
ChangeLog (+15284/-618)
HACKING.rst (+1/-0)
PKG-INFO (+10/-2)
bin/nova-all (+8/-5)
bin/nova-api (+7/-3)
bin/nova-api-ec2 (+2/-2)
bin/nova-api-metadata (+2/-2)
bin/nova-api-os-compute (+2/-2)
bin/nova-api-os-volume (+0/-46)
bin/nova-cert (+7/-3)
bin/nova-clear-rabbit-queues (+6/-6)
bin/nova-compute (+16/-4)
bin/nova-conductor (+52/-0)
bin/nova-console (+8/-3)
bin/nova-consoleauth (+6/-4)
bin/nova-dhcpbridge (+18/-18)
bin/nova-manage (+56/-279)
bin/nova-network (+8/-3)
bin/nova-novncproxy (+22/-21)
bin/nova-objectstore (+2/-2)
bin/nova-rootwrap (+9/-0)
bin/nova-rpc-zmq-receiver (+10/-9)
bin/nova-scheduler (+8/-3)
bin/nova-volume (+0/-48)
bin/nova-volume-usage-audit (+0/-82)
bin/nova-xvpvncproxy (+2/-4)
debian/changelog (+37/-0)
debian/control (+32/-10)
debian/mans/nova-conductor.8 (+20/-0)
debian/nova-api-os-volume.init (+0/-76)
debian/nova-api-os-volume.install (+0/-1)
debian/nova-api-os-volume.logrotate (+0/-4)
debian/nova-api-os-volume.manpages (+0/-1)
debian/nova-api-os-volume.postrm (+0/-7)
debian/nova-api-os-volume.upstart.in (+0/-18)
debian/nova-conductor.init (+71/-0)
debian/nova-conductor.install (+1/-0)
debian/nova-conductor.logrotate (+7/-0)
debian/nova-conductor.manpages (+1/-0)
debian/nova-conductor.postrm (+7/-0)
debian/nova-conductor.upstart.in (+18/-0)
debian/nova-scheduler.install (+2/-0)
debian/nova-volume.default (+0/-4)
debian/nova-volume.dirs (+0/-1)
debian/nova-volume.init (+0/-111)
debian/nova-volume.install (+0/-3)
debian/nova-volume.logrotate (+0/-7)
debian/nova-volume.manpages (+0/-1)
debian/nova-volume.postinst (+0/-21)
debian/nova-volume.postrm (+0/-7)
debian/nova-volume.upstart.in (+0/-18)
debian/nova.conf (+1/-0)
debian/patches/avoid_setuptools_git_dependency.patch (+7/-6)
debian/patches/fix-libvirt-tests.patch (+48/-0)
debian/patches/fix-ubuntu-tests.patch (+3/-5)
debian/patches/rbd-security.patch (+0/-43)
debian/patches/series (+2/-6)
debian/patches/ubuntu-show-tests.patch (+15/-0)
debian/patches/ubuntu/fix-ec2-volume-id-mappings.patch (+0/-43)
debian/patches/ubuntu/fix-libvirt-firewall-slowdown.patch (+0/-106)
debian/patches/ubuntu/ubuntu-fix-32-64-bit-iss.patch (+0/-75)
debian/patches/ubuntu/ubuntu-fix-ec2-instance-id-mappings.patch (+0/-17)
debian/rules (+4/-8)
doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json (+8/-8)
doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml (+3/-3)
doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json (+8/-8)
doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml (+3/-3)
doc/api_samples/all_extensions/extensions-get-resp.json (+9/-9)
doc/api_samples/all_extensions/extensions-get-resp.xml (+4/-4)
doc/api_samples/all_extensions/server-action-changepassword.json (+5/-0)
doc/api_samples/all_extensions/server-action-changepassword.xml (+4/-0)
doc/api_samples/all_extensions/server-action-confirmresize.json (+3/-0)
doc/api_samples/all_extensions/server-action-confirmresize.xml (+3/-0)
doc/api_samples/all_extensions/server-action-createimage.json (+8/-0)
doc/api_samples/all_extensions/server-action-createimage.xml (+8/-0)
doc/api_samples/all_extensions/server-action-reboot.json (+5/-0)
doc/api_samples/all_extensions/server-action-reboot.xml (+4/-0)
doc/api_samples/all_extensions/server-action-rebuild-resp.json (+56/-0)
doc/api_samples/all_extensions/server-action-rebuild-resp.xml (+19/-0)
doc/api_samples/all_extensions/server-action-rebuild.json (+18/-0)
doc/api_samples/all_extensions/server-action-rebuild.xml (+25/-0)
doc/api_samples/all_extensions/server-action-resize.json (+5/-0)
doc/api_samples/all_extensions/server-action-resize.xml (+4/-0)
doc/api_samples/all_extensions/server-action-revertresize.json (+3/-0)
doc/api_samples/all_extensions/server-action-revertresize.xml (+3/-0)
doc/api_samples/all_extensions/server-get-resp.json (+8/-8)
doc/api_samples/all_extensions/server-get-resp.xml (+3/-3)
doc/api_samples/all_extensions/servers-details-resp.json (+9/-8)
doc/api_samples/all_extensions/servers-details-resp.xml (+3/-3)
doc/api_samples/limit-get-resp.json (+0/-2)
doc/api_samples/limit-get-resp.xml (+0/-2)
doc/api_samples/os-admin-actions/admin-actions-backup-server.json (+7/-0)
doc/api_samples/os-admin-actions/admin-actions-backup-server.xml (+6/-0)
doc/api_samples/os-admin-actions/admin-actions-inject-network-info.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-inject-network-info.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-live-migrate.json (+7/-0)
doc/api_samples/os-admin-actions/admin-actions-live-migrate.xml (+6/-0)
doc/api_samples/os-admin-actions/admin-actions-lock-server.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-lock-server.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-lock.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-migrate.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-migrate.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-pause.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-pause.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-reset-network.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-reset-network.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-reset-server-state.json (+5/-0)
doc/api_samples/os-admin-actions/admin-actions-reset-server-state.xml (+4/-0)
doc/api_samples/os-admin-actions/admin-actions-resume.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-resume.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-suspend.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-suspend.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-unlock-server.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-unlock-server.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-unlock.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-unpause.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-unpause.xml (+2/-0)
doc/api_samples/os-admin-actions/server-post-req.json (+16/-0)
doc/api_samples/os-admin-actions/server-post-req.xml (+19/-0)
doc/api_samples/os-admin-actions/server-post-resp.json (+16/-0)
doc/api_samples/os-admin-actions/server-post-resp.xml (+6/-0)
doc/api_samples/os-aggregates/aggregate-add-host-post-req.json (+6/-0)
doc/api_samples/os-aggregates/aggregate-add-host-post-req.xml (+2/-0)
doc/api_samples/os-aggregates/aggregate-metadata-post-req.json (+9/-0)
doc/api_samples/os-aggregates/aggregate-metadata-post-req.xml (+6/-0)
doc/api_samples/os-aggregates/aggregate-post-req.json (+7/-0)
doc/api_samples/os-aggregates/aggregate-post-req.xml (+2/-0)
doc/api_samples/os-aggregates/aggregate-post-resp.json (+11/-0)
doc/api_samples/os-aggregates/aggregate-post-resp.xml (+10/-0)
doc/api_samples/os-aggregates/aggregate-remove-host-post-req.json (+6/-0)
doc/api_samples/os-aggregates/aggregate-remove-host-post-req.xml (+2/-0)
doc/api_samples/os-aggregates/aggregate-update-post-req.json (+7/-0)
doc/api_samples/os-aggregates/aggregate-update-post-req.xml (+2/-0)
doc/api_samples/os-aggregates/aggregate-update-post-resp.json (+13/-0)
doc/api_samples/os-aggregates/aggregate-update-post-resp.xml (+12/-0)
doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json (+15/-0)
doc/api_samples/os-aggregates/aggregates-add-host-post-resp.xml (+14/-0)
doc/api_samples/os-aggregates/aggregates-get-resp.json (+13/-0)
doc/api_samples/os-aggregates/aggregates-get-resp.xml (+12/-0)
doc/api_samples/os-aggregates/aggregates-list-get-resp.json (+15/-0)
doc/api_samples/os-aggregates/aggregates-list-get-resp.xml (+14/-0)
doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json (+15/-0)
doc/api_samples/os-aggregates/aggregates-metadata-post-resp.xml (+14/-0)
doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json (+13/-0)
doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml (+12/-0)
doc/api_samples/os-aggregates/server-post-req.json (+16/-0)
doc/api_samples/os-aggregates/server-post-resp.json (+16/-0)
doc/api_samples/os-availability-zone/availability-zone-post-req.json (+17/-0)
doc/api_samples/os-availability-zone/availability-zone-post-req.xml (+23/-0)
doc/api_samples/os-availability-zone/availability-zone-post-resp.json (+16/-0)
doc/api_samples/os-availability-zone/availability-zone-post-resp.xml (+6/-0)
doc/api_samples/os-certificates/certificate-create-resp.json (+6/-0)
doc/api_samples/os-certificates/certificate-create-resp.xml (+2/-0)
doc/api_samples/os-certificates/certificate-get-root-resp.json (+6/-0)
doc/api_samples/os-certificates/certificate-get-root-resp.xml (+2/-0)
doc/api_samples/os-cloudpipe/cloud-pipe-create-req.json (+5/-0)
doc/api_samples/os-cloudpipe/cloud-pipe-create-req.xml (+3/-0)
doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.json (+5/-0)
doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml (+4/-0)
doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json (+15/-0)
doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml (+12/-0)
doc/api_samples/os-consoles/get-vnc-console-post-req.json (+5/-0)
doc/api_samples/os-consoles/get-vnc-console-post-req.xml (+2/-0)
doc/api_samples/os-consoles/get-vnc-console-post-resp.json (+6/-0)
doc/api_samples/os-consoles/get-vnc-console-post-resp.xml (+5/-0)
doc/api_samples/os-consoles/server-post-req.json (+16/-0)
doc/api_samples/os-consoles/server-post-req.xml (+19/-0)
doc/api_samples/os-consoles/server-post-resp.json (+16/-0)
doc/api_samples/os-consoles/server-post-resp.xml (+6/-0)
doc/api_samples/os-hosts/host-get-resp.json (+31/-0)
doc/api_samples/os-hosts/host-get-resp.xml (+24/-0)
doc/api_samples/os-hosts/hosts-list-resp.json (+24/-0)
doc/api_samples/os-hosts/hosts-list-resp.xml (+7/-0)
doc/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json (+18/-0)
doc/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml (+24/-0)
doc/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json (+16/-0)
doc/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml (+6/-0)
doc/api_samples/os-multiple-create/multiple-create-post-req.json (+19/-0)
doc/api_samples/os-multiple-create/multiple-create-post-req.xml (+25/-0)
doc/api_samples/os-multiple-create/multiple-create-post-resp.json (+3/-0)
doc/api_samples/os-multiple-create/multiple-create-post-resp.xml (+4/-0)
doc/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json (+17/-0)
doc/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml (+18/-0)
doc/api_samples/os-server-diagnostics/server-post-req.json (+16/-0)
doc/api_samples/os-server-diagnostics/server-post-req.xml (+19/-0)
doc/api_samples/os-server-diagnostics/server-post-resp.json (+16/-0)
doc/api_samples/os-server-diagnostics/server-post-resp.xml (+6/-0)
doc/api_samples/os-server-start-stop/server-post-req.json (+16/-0)
doc/api_samples/os-server-start-stop/server-post-req.xml (+19/-0)
doc/api_samples/os-server-start-stop/server-post-resp.json (+16/-0)
doc/api_samples/os-server-start-stop/server-post-resp.xml (+6/-0)
doc/api_samples/os-server-start-stop/server_start_stop.xml (+1/-1)
doc/api_samples/os-simple-tenant-usage/server-post-req.json (+16/-0)
doc/api_samples/os-simple-tenant-usage/server-post-req.xml (+19/-0)
doc/api_samples/os-simple-tenant-usage/server-post-resp.json (+16/-0)
doc/api_samples/os-simple-tenant-usage/server-post-resp.xml (+6/-0)
doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json (+27/-0)
doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml (+26/-0)
doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json (+13/-0)
doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml (+13/-0)
doc/api_samples/os-used-limits/usedlimits-get-resp.json (+77/-0)
doc/api_samples/os-used-limits/usedlimits-get-resp.xml (+34/-0)
doc/api_samples/server-ips-network-resp.json (+8/-0)
doc/api_samples/server-ips-network-resp.xml (+4/-0)
doc/api_samples/server-ips-resp.json (+10/-0)
doc/api_samples/server-ips-resp.xml (+6/-0)
doc/source/conf.py (+4/-7)
doc/source/devref/architecture.rst (+3/-4)
doc/source/devref/development.environment.rst (+4/-0)
doc/source/devref/fakes.rst (+0/-10)
doc/source/devref/filter_scheduler.rst (+23/-14)
doc/source/devref/index.rst (+0/-2)
doc/source/devref/rpc.rst (+4/-6)
doc/source/devref/scheduler.rst (+0/-10)
doc/source/devref/threading.rst (+1/-1)
doc/source/devref/volume.rst (+0/-66)
doc/source/devref/xensmvolume.rst (+0/-88)
doc/source/man/nova-api-os-volume.rst (+0/-49)
doc/source/man/nova-conductor.rst (+45/-0)
doc/source/man/nova-rootwrap.rst (+2/-2)
doc/source/man/nova-volume-usage-audit.rst (+0/-61)
doc/source/man/nova-volume.rst (+0/-54)
etc/nova/api-paste.ini (+2/-2)
etc/nova/logging_sample.conf (+2/-2)
etc/nova/nova.conf.sample (+156/-298)
etc/nova/policy.json (+123/-118)
etc/nova/rootwrap.d/volume.filters (+0/-36)
nova.egg-info/PKG-INFO (+10/-2)
nova.egg-info/SOURCES.txt (+438/-174)
nova.egg-info/requires.txt (+4/-3)
nova/__init__.py (+0/-6)
nova/api/auth.py (+8/-7)
nova/api/ec2/__init__.py (+27/-16)
nova/api/ec2/apirequest.py (+0/-2)
nova/api/ec2/cloud.py (+57/-38)
nova/api/ec2/ec2utils.py (+39/-12)
nova/api/ec2/faults.py (+3/-3)
nova/api/manager.py (+4/-3)
nova/api/metadata/base.py (+16/-11)
nova/api/metadata/handler.py (+8/-6)
nova/api/openstack/__init__.py (+21/-6)
nova/api/openstack/auth.py (+4/-4)
nova/api/openstack/common.py (+11/-9)
nova/api/openstack/compute/__init__.py (+74/-63)
nova/api/openstack/compute/contrib/__init__.py (+4/-3)
nova/api/openstack/compute/contrib/admin_actions.py (+11/-9)
nova/api/openstack/compute/contrib/certificates.py (+0/-3)
nova/api/openstack/compute/contrib/cloudpipe.py (+16/-9)
nova/api/openstack/compute/contrib/config_drive.py (+1/-4)
nova/api/openstack/compute/contrib/deferred_delete.py (+4/-0)
nova/api/openstack/compute/contrib/extended_server_attributes.py (+0/-3)
nova/api/openstack/compute/contrib/extended_status.py (+0/-3)
nova/api/openstack/compute/contrib/fixed_ips.py (+98/-0)
nova/api/openstack/compute/contrib/flavormanage.py (+2/-1)
nova/api/openstack/compute/contrib/floating_ips.py (+42/-20)
nova/api/openstack/compute/contrib/fping.py (+161/-0)
nova/api/openstack/compute/contrib/hosts.py (+9/-11)
nova/api/openstack/compute/contrib/instance_usage_audit_log.py (+4/-3)
nova/api/openstack/compute/contrib/networks.py (+2/-5)
nova/api/openstack/compute/contrib/rescue.py (+4/-3)
nova/api/openstack/compute/contrib/security_groups.py (+2/-4)
nova/api/openstack/compute/contrib/services.py (+142/-0)
nova/api/openstack/compute/contrib/simple_tenant_usage.py (+0/-3)
nova/api/openstack/compute/contrib/volumes.py (+11/-21)
nova/api/openstack/compute/contrib/volumetypes.py (+0/-225)
nova/api/openstack/compute/extensions.py (+4/-3)
nova/api/openstack/compute/image_metadata.py (+0/-4)
nova/api/openstack/compute/images.py (+0/-2)
nova/api/openstack/compute/ips.py (+0/-3)
nova/api/openstack/compute/limits.py (+1/-0)
nova/api/openstack/compute/server_metadata.py (+3/-0)
nova/api/openstack/compute/servers.py (+68/-30)
nova/api/openstack/compute/views/addresses.py (+0/-3)
nova/api/openstack/compute/views/images.py (+4/-4)
nova/api/openstack/compute/views/limits.py (+0/-2)
nova/api/openstack/compute/views/versions.py (+6/-5)
nova/api/openstack/extensions.py (+0/-6)
nova/api/openstack/volume/__init__.py (+0/-64)
nova/api/openstack/volume/contrib/__init__.py (+0/-39)
nova/api/openstack/volume/contrib/admin_actions.py (+0/-129)
nova/api/openstack/volume/contrib/image_create.py (+0/-31)
nova/api/openstack/volume/contrib/types_extra_specs.py (+0/-149)
nova/api/openstack/volume/contrib/types_manage.py (+0/-91)
nova/api/openstack/volume/contrib/volume_actions.py (+0/-131)
nova/api/openstack/volume/extensions.py (+0/-34)
nova/api/openstack/volume/snapshots.py (+0/-180)
nova/api/openstack/volume/types.py (+0/-80)
nova/api/openstack/volume/versions.py (+0/-83)
nova/api/openstack/volume/views/__init__.py (+0/-16)
nova/api/openstack/volume/views/types.py (+0/-34)
nova/api/openstack/volume/views/versions.py (+0/-36)
nova/api/openstack/volume/volumes.py (+0/-359)
nova/api/openstack/wsgi.py (+26/-6)
nova/api/sizelimit.py (+4/-5)
nova/auth/__init__.py (+0/-28)
nova/auth/fakeldap.py (+0/-328)
nova/block_device.py (+9/-1)
nova/cert/manager.py (+0/-2)
nova/cert/rpcapi.py (+4/-4)
nova/cloudpipe/__init__.py (+0/-3)
nova/cloudpipe/pipelib.py (+22/-17)
nova/common/deprecated.py (+0/-55)
nova/common/eventlet_backdoor.py (+0/-80)
nova/compat/__init__.py (+0/-15)
nova/compat/flagfile.py (+0/-188)
nova/compute/__init__.py (+7/-3)
nova/compute/api.py (+350/-222)
nova/compute/claims.py (+220/-0)
nova/compute/instance_types.py (+16/-6)
nova/compute/manager.py (+750/-335)
nova/compute/resource_tracker.py (+280/-315)
nova/compute/rpcapi.py (+103/-41)
nova/compute/stats.py (+4/-0)
nova/compute/task_states.py (+10/-2)
nova/compute/utils.py (+14/-9)
nova/conductor/__init__.py (+25/-0)
nova/conductor/api.py (+60/-0)
nova/conductor/manager.py (+51/-0)
nova/conductor/rpcapi.py (+43/-0)
nova/config.py (+386/-0)
nova/console/api.py (+7/-7)
nova/console/manager.py (+6/-7)
nova/console/rpcapi.py (+4/-4)
nova/console/vmrc.py (+3/-4)
nova/console/vmrc_manager.py (+7/-16)
nova/console/xvp.py (+14/-13)
nova/consoleauth/__init__.py (+2/-3)
nova/consoleauth/manager.py (+6/-6)
nova/consoleauth/rpcapi.py (+3/-4)
nova/context.py (+2/-2)
nova/crypto.py (+18/-17)
nova/db/api.py (+35/-370)
nova/db/base.py (+3/-5)
nova/db/migration.py (+1/-1)
nova/db/sqlalchemy/api.py (+329/-1035)
nova/db/sqlalchemy/migrate_repo/versions/082_essex.py (+0/-996)
nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py (+0/-63)
nova/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py (+0/-42)
nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py (+0/-33)
nova/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py (+0/-58)
nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py (+0/-80)
nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql (+0/-97)
nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql (+0/-97)
nova/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py (+0/-117)
nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py (+0/-237)
nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql (+0/-226)
nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql (+0/-226)
nova/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py (+0/-218)
nova/db/sqlalchemy/migrate_repo/versions/092_add_instance_system_metadata.py (+0/-73)
nova/db/sqlalchemy/migrate_repo/versions/093_drop_instance_actions_table.py (+0/-54)
nova/db/sqlalchemy/migrate_repo/versions/094_update_postgresql_sequence_names.py (+0/-54)
nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py (+0/-94)
nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_downgrade.sql (+0/-133)
nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_upgrade.sql (+0/-132)
nova/db/sqlalchemy/migrate_repo/versions/096_recreate_dns_domains.py (+0/-145)
nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py (+0/-106)
nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py (+0/-72)
nova/db/sqlalchemy/migrate_repo/versions/099_add_disabled_instance_types.py (+0/-40)
nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py (+0/-80)
nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql (+0/-64)
nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql (+0/-64)
nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py (+0/-80)
nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql (+0/-61)
nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql (+0/-61)
nova/db/sqlalchemy/migrate_repo/versions/102_consoles_uses_uuid.py (+0/-80)
nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_downgrade.sql (+0/-72)
nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_upgrade.sql (+0/-72)
nova/db/sqlalchemy/migrate_repo/versions/103_instance_indexes.py (+0/-43)
nova/db/sqlalchemy/migrate_repo/versions/104_instance_indexes_2.py (+0/-43)
nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_downgrade.sql (+0/-1)
nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_upgrade.sql (+0/-1)
nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py (+0/-70)
nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_downgrade.sql (+0/-50)
nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_upgrade.sql (+0/-50)
nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py (+0/-67)
nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_downgrade.sql (+0/-1)
nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_upgrade.sql (+0/-1)
nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py (+0/-67)
nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py (+0/-62)
nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py (+0/-63)
nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_downgrade.sql (+0/-53)
nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_upgrade.sql (+0/-52)
nova/db/sqlalchemy/migrate_repo/versions/110_drop_deprecated_auth.py (+0/-189)
nova/db/sqlalchemy/migrate_repo/versions/111_general_aggregates.py (+0/-72)
nova/db/sqlalchemy/migrate_repo/versions/112_update_deleted_instance_data.py (+0/-69)
nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py (+0/-108)
nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_downgrade.sql (+0/-85)
nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_upgrade.sql (+0/-85)
nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_downgrade.sql (+0/-71)
nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_upgrade.sql (+0/-71)
nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py (+0/-108)
nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py (+0/-94)
nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py (+0/-98)
nova/db/sqlalchemy/migrate_repo/versions/117_add_compute_node_stats.py (+0/-61)
nova/db/sqlalchemy/migrate_repo/versions/118_add_indexes_to_agent_builds.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/119_add_indexes_to_aggregate_metadata.py (+0/-42)
nova/db/sqlalchemy/migrate_repo/versions/120_add_indexes_to_block_device_mapping.py (+0/-71)
nova/db/sqlalchemy/migrate_repo/versions/121_add_indexes_to_bw_usage_cache.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/122_add_indexes_to_certificates.py (+0/-59)
nova/db/sqlalchemy/migrate_repo/versions/123_add_indexes_to_dns_domains.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/124_add_indexes_to_fixed_ips.py (+0/-76)
nova/db/sqlalchemy/migrate_repo/versions/125_add_indexes_to_floating_ips.py (+0/-68)
nova/db/sqlalchemy/migrate_repo/versions/126_add_indexes_to_instance_faults.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/127_add_indexes_to_instance_type_extra_specs.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/128_add_indexes_to_instances.py (+0/-96)
nova/db/sqlalchemy/migrate_repo/versions/129_add_indexes_to_iscsi_targets.py (+0/-57)
nova/db/sqlalchemy/migrate_repo/versions/130_add_indexes_to_key_pairs.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/131_add_indexes_to_networks.py (+0/-107)
nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py (+0/-67)
nova/db/sqlalchemy/migrate_repo/versions/133_aggregate_delete_fix.py (+0/-48)
nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py (+1225/-0)
nova/db/sqlalchemy/migrate_repo/versions/134_add_counters_to_bw_usage_cache.py (+60/-0)
nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py (+55/-0)
nova/db/sqlalchemy/migrate_repo/versions/136_add_index_to_instances.py (+41/-0)
nova/db/sqlalchemy/migrate_repo/versions/137_add_indexes_to_migrations.py (+46/-0)
nova/db/sqlalchemy/migrate_repo/versions/138_drop_server_name_from_instances.py (+39/-0)
nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_downgrade.sql (+239/-0)
nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_upgrade.sql (+239/-0)
nova/db/sqlalchemy/migrate_repo/versions/139_add_indexes_to_fixed_ips.py (+46/-0)
nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py (+61/-0)
nova/db/sqlalchemy/migrate_repo/versions/141_update_migrations_instance_uuid.py (+33/-0)
nova/db/sqlalchemy/migration.py (+0/-3)
nova/db/sqlalchemy/models.py (+23/-84)
nova/db/sqlalchemy/session.py (+244/-67)
nova/exception.py (+32/-127)
nova/filters.py (+53/-0)
nova/flags.py (+0/-424)
nova/image/glance.py (+15/-11)
nova/image/s3.py (+11/-10)
nova/ipv6/api.py (+2/-3)
nova/loadables.py (+116/-0)
nova/locale/bs/LC_MESSAGES/nova.po (+1447/-1344)
nova/locale/cs/LC_MESSAGES/nova.po (+1457/-1342)
nova/locale/da/LC_MESSAGES/nova.po (+1447/-1344)
nova/locale/de/LC_MESSAGES/nova.po (+1449/-1347)
nova/locale/en_AU/LC_MESSAGES/nova.po (+1454/-1347)
nova/locale/en_GB/LC_MESSAGES/nova.po (+1464/-1361)
nova/locale/en_US/LC_MESSAGES/nova.po (+1905/-1800)
nova/locale/es/LC_MESSAGES/nova.po (+1454/-1347)
nova/locale/fr/LC_MESSAGES/nova.po (+1455/-1349)
nova/locale/it/LC_MESSAGES/nova.po (+1450/-1347)
nova/locale/ja/LC_MESSAGES/nova.po (+1454/-1347)
nova/locale/ko/LC_MESSAGES/nova.po (+1450/-1346)
nova/locale/nb/LC_MESSAGES/nova.po (+1448/-1344)
nova/locale/nova.pot (+1949/-3656)
nova/locale/pt_BR/LC_MESSAGES/nova.po (+1456/-1347)
nova/locale/ru/LC_MESSAGES/nova.po (+1458/-1344)
nova/locale/tl/LC_MESSAGES/nova.po (+1447/-1344)
nova/locale/tr/LC_MESSAGES/nova.po (+1447/-1344)
nova/locale/uk/LC_MESSAGES/nova.po (+1447/-1344)
nova/locale/zh_CN/LC_MESSAGES/nova.po (+1453/-1338)
nova/locale/zh_TW/LC_MESSAGES/nova.po (+1450/-1346)
nova/manager.py (+44/-16)
nova/network/__init__.py (+8/-5)
nova/network/api.py (+117/-150)
nova/network/l3.py (+0/-3)
nova/network/ldapdns.py (+14/-27)
nova/network/linux_net.py (+96/-79)
nova/network/manager.py (+242/-128)
nova/network/minidns.py (+5/-3)
nova/network/quantum/nova_ipam_lib.py (+0/-4)
nova/network/quantumv2/__init__.py (+19/-20)
nova/network/quantumv2/api.py (+226/-22)
nova/network/rpcapi.py (+299/-0)
nova/notifications.py (+30/-10)
nova/objectstore/__init__.py (+0/-6)
nova/objectstore/s3server.py (+10/-9)
nova/openstack/common/cfg.py (+3/-3)
nova/openstack/common/eventlet_backdoor.py (+80/-0)
nova/openstack/common/fileutils.py (+35/-0)
nova/openstack/common/gettextutils.py (+1/-1)
nova/openstack/common/lockutils.py (+232/-0)
nova/openstack/common/log.py (+22/-5)
nova/openstack/common/network_utils.py (+68/-0)
nova/openstack/common/notifier/api.py (+3/-3)
nova/openstack/common/notifier/rabbit_notifier.py (+7/-24)
nova/openstack/common/notifier/rpc_notifier.py (+46/-0)
nova/openstack/common/policy.py (+712/-233)
nova/openstack/common/rpc/__init__.py (+10/-4)
nova/openstack/common/rpc/amqp.py (+8/-0)
nova/openstack/common/rpc/impl_kombu.py (+90/-48)
nova/openstack/common/rpc/impl_qpid.py (+47/-66)
nova/openstack/common/rpc/impl_zmq.py (+1/-1)
nova/openstack/common/rpc/service.py (+70/-0)
nova/openstack/common/setup.py (+24/-16)
nova/openstack/common/timeutils.py (+16/-5)
nova/openstack/common/uuidutils.py (+39/-0)
nova/policy.py (+47/-36)
nova/quota.py (+54/-23)
nova/rootwrap/filters.py (+8/-6)
nova/scheduler/baremetal_host_manager.py (+71/-0)
nova/scheduler/chance.py (+7/-14)
nova/scheduler/driver.py (+23/-46)
nova/scheduler/filter_scheduler.py (+54/-106)
nova/scheduler/filters/__init__.py (+30/-60)
nova/scheduler/filters/compute_capabilities_filter.py (+13/-4)
nova/scheduler/filters/compute_filter.py (+2/-3)
nova/scheduler/filters/core_filter.py (+4/-5)
nova/scheduler/filters/disk_filter.py (+3/-4)
nova/scheduler/filters/image_props_filter.py (+1/-1)
nova/scheduler/filters/io_ops_filter.py (+43/-0)
nova/scheduler/filters/isolated_hosts_filter.py (+6/-6)
nova/scheduler/filters/num_instances_filter.py (+41/-0)
nova/scheduler/filters/ram_filter.py (+3/-4)
nova/scheduler/filters/trusted_filter.py (+19/-14)
nova/scheduler/host_manager.py (+195/-94)
nova/scheduler/least_cost.py (+0/-117)
nova/scheduler/manager.py (+16/-16)
nova/scheduler/multi.py (+10/-14)
nova/scheduler/rpcapi.py (+5/-11)
nova/scheduler/scheduler_options.py (+3/-4)
nova/scheduler/simple.py (+0/-97)
nova/scheduler/weights/__init__.py (+61/-0)
nova/scheduler/weights/least_cost.py (+125/-0)
nova/scheduler/weights/ram.py (+45/-0)
nova/service.py (+44/-36)
nova/test.py (+27/-204)
nova/testing/README.rst (+25/-5)
nova/tests/__init__.py (+26/-29)
nova/tests/api/ec2/test_cinder_cloud.py (+35/-45)
nova/tests/api/ec2/test_cloud.py (+138/-588)
nova/tests/api/ec2/test_ec2_validate.py (+83/-3)
nova/tests/api/ec2/test_middleware.py (+10/-10)
nova/tests/api/openstack/compute/contrib/test_admin_actions.py (+63/-26)
nova/tests/api/openstack/compute/contrib/test_aggregates.py (+3/-1)
nova/tests/api/openstack/compute/contrib/test_cloudpipe.py (+11/-11)
nova/tests/api/openstack/compute/contrib/test_config_drive.py (+6/-2)
nova/tests/api/openstack/compute/contrib/test_console_output.py (+19/-12)
nova/tests/api/openstack/compute/contrib/test_consoles.py (+19/-14)
nova/tests/api/openstack/compute/contrib/test_createserverext.py (+77/-87)
nova/tests/api/openstack/compute/contrib/test_deferred_delete.py (+20/-19)
nova/tests/api/openstack/compute/contrib/test_disk_config.py (+5/-5)
nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py (+5/-6)
nova/tests/api/openstack/compute/contrib/test_extended_status.py (+5/-6)
nova/tests/api/openstack/compute/contrib/test_fixed_ips.py (+164/-0)
nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py (+0/-5)
nova/tests/api/openstack/compute/contrib/test_flavor_manage.py (+10/-4)
nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py (+0/-5)
nova/tests/api/openstack/compute/contrib/test_flavor_swap.py (+0/-5)
nova/tests/api/openstack/compute/contrib/test_flavorextradata.py (+2/-2)
nova/tests/api/openstack/compute/contrib/test_floating_ips.py (+131/-4)
nova/tests/api/openstack/compute/contrib/test_fping.py (+94/-0)
nova/tests/api/openstack/compute/contrib/test_hosts.py (+29/-17)
nova/tests/api/openstack/compute/contrib/test_keypairs.py (+22/-17)
nova/tests/api/openstack/compute/contrib/test_multinic_xs.py (+9/-4)
nova/tests/api/openstack/compute/contrib/test_networks.py (+4/-7)
nova/tests/api/openstack/compute/contrib/test_quota_classes.py (+7/-19)
nova/tests/api/openstack/compute/contrib/test_quotas.py (+5/-20)
nova/tests/api/openstack/compute/contrib/test_rescue.py (+14/-8)
nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py (+5/-1)
nova/tests/api/openstack/compute/contrib/test_security_groups.py (+9/-7)
nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py (+8/-6)
nova/tests/api/openstack/compute/contrib/test_server_start_stop.py (+7/-11)
nova/tests/api/openstack/compute/contrib/test_services.py (+198/-0)
nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py (+16/-10)
nova/tests/api/openstack/compute/contrib/test_snapshots.py (+22/-18)
nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py (+6/-1)
nova/tests/api/openstack/compute/contrib/test_volume_types.py (+0/-224)
nova/tests/api/openstack/compute/contrib/test_volume_types_extra_specs.py (+0/-198)
nova/tests/api/openstack/compute/contrib/test_volumes.py (+43/-32)
nova/tests/api/openstack/compute/test_consoles.py (+8/-9)
nova/tests/api/openstack/compute/test_extensions.py (+47/-17)
nova/tests/api/openstack/compute/test_flavors.py (+6/-8)
nova/tests/api/openstack/compute/test_image_metadata.py (+4/-5)
nova/tests/api/openstack/compute/test_images.py (+7/-10)
nova/tests/api/openstack/compute/test_limits.py (+3/-6)
nova/tests/api/openstack/compute/test_server_actions.py (+127/-46)
nova/tests/api/openstack/compute/test_server_metadata.py (+17/-16)
nova/tests/api/openstack/compute/test_servers.py (+340/-181)
nova/tests/api/openstack/compute/test_urlmap.py (+8/-8)
nova/tests/api/openstack/compute/test_versions.py (+8/-4)
nova/tests/api/openstack/fakes.py (+13/-12)
nova/tests/api/openstack/test_common.py (+4/-8)
nova/tests/api/openstack/volume/__init__.py (+0/-19)
nova/tests/api/openstack/volume/contrib/__init__.py (+0/-19)
nova/tests/api/openstack/volume/contrib/test_admin_actions.py (+0/-184)
nova/tests/api/openstack/volume/contrib/test_types_extra_specs.py (+0/-226)
nova/tests/api/openstack/volume/contrib/test_types_manage.py (+0/-122)
nova/tests/api/openstack/volume/contrib/test_volume_actions.py (+0/-162)
nova/tests/api/openstack/volume/extensions/__init__.py (+0/-15)
nova/tests/api/openstack/volume/extensions/foxinsocks.py (+0/-94)
nova/tests/api/openstack/volume/test_extensions.py (+0/-155)
nova/tests/api/openstack/volume/test_router.py (+0/-112)
nova/tests/api/openstack/volume/test_snapshots.py (+0/-285)
nova/tests/api/openstack/volume/test_types.py (+0/-194)
nova/tests/api/openstack/volume/test_volumes.py (+0/-602)
nova/tests/api/test_sizelimit.py (+3/-3)
nova/tests/baremetal/db/__init__.py (+14/-0)
nova/tests/baremetal/db/base.py (+51/-0)
nova/tests/baremetal/db/test_bm_interface.py (+47/-0)
nova/tests/baremetal/db/test_bm_node.py (+140/-0)
nova/tests/baremetal/db/test_bm_pxe_ip.py (+93/-0)
nova/tests/baremetal/db/utils.py (+81/-0)
nova/tests/baremetal/test_proxy_bare_metal.py (+0/-269)
nova/tests/baremetal/test_tilera.py (+0/-84)
nova/tests/cert/test_rpcapi.py (+4/-11)
nova/tests/compute/test_claims.py (+162/-0)
nova/tests/compute/test_compute.py (+1212/-514)
nova/tests/compute/test_compute_utils.py (+34/-17)
nova/tests/compute/test_multiple_nodes.py (+100/-0)
nova/tests/compute/test_resource_tracker.py (+480/-278)
nova/tests/compute/test_rpcapi.py (+49/-29)
nova/tests/conductor/test_conductor.py (+133/-0)
nova/tests/console/test_console.py (+5/-4)
nova/tests/console/test_rpcapi.py (+4/-11)
nova/tests/consoleauth/test_consoleauth.py (+0/-3)
nova/tests/consoleauth/test_rpcapi.py (+3/-11)
nova/tests/db/fakes.py (+0/-11)
nova/tests/declare_flags.py (+2/-3)
nova/tests/fake_crypto.py (+111/-0)
nova/tests/fake_flags.py (+10/-15)
nova/tests/fake_imagebackend.py (+8/-0)
nova/tests/fake_ldap.py (+328/-0)
nova/tests/fake_libvirt_utils.py (+13/-4)
nova/tests/fake_loadables/__init__.py (+27/-0)
nova/tests/fake_loadables/fake_loadable1.py (+44/-0)
nova/tests/fake_loadables/fake_loadable2.py (+39/-0)
nova/tests/fake_network.py (+17/-16)
nova/tests/fake_volume.py (+15/-7)
nova/tests/fakelibvirt.py (+14/-0)
nova/tests/hyperv/README.rst (+83/-0)
nova/tests/hyperv/basetestcase.py (+3/-1)
nova/tests/hyperv/db_fakes.py (+30/-12)
nova/tests/hyperv/hypervutils.py (+10/-3)
nova/tests/hyperv/mockproxy.py (+43/-4)
nova/tests/image/fake.py (+19/-20)
nova/tests/image/test_fake.py (+1/-1)
nova/tests/image/test_glance.py (+6/-5)
nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl (+1/-1)
nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl (+1/-1)
nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl (+1/-1)
nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl (+1/-1)
nova/tests/integrated/api_samples/README.rst (+1/-1)
nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl (+24/-8)
nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl (+9/-3)
nova/tests/integrated/api_samples/all_extensions/server-action-changepassword.json.tpl (+5/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-changepassword.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-confirmresize.json.tpl (+3/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-confirmresize.xml.tpl (+3/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-createimage.json.tpl (+9/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-createimage.xml.tpl (+8/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-reboot.json.tpl (+5/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-reboot.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-rebuild-resp.json.tpl (+56/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-rebuild-resp.xml.tpl (+39/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-rebuild.json.tpl (+18/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-rebuild.xml.tpl (+25/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-resize.json.tpl (+5/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-resize.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-revertresize.json.tpl (+3/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-revertresize.xml.tpl (+3/-0)
nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl (+1/-1)
nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl (+1/-1)
nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl (+1/-1)
nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl (+1/-1)
nova/tests/integrated/api_samples/limit-get-resp.json.tpl (+13/-2)
nova/tests/integrated/api_samples/limit-get-resp.xml.tpl (+3/-2)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-backup-server.json.tpl (+7/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-backup-server.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.json.tpl (+7/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-lock-server.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-lock-server.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-migrate.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-migrate.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-pause.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-pause.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-network.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl (+5/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl (+5/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-state.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-resume.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-resume.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-suspend.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-suspend.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unpause.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unpause.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/server-post-req.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-admin-actions/server-post-req.xml.tpl (+19/-0)
nova/tests/integrated/api_samples/os-admin-actions/server-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-admin-actions/server-post-resp.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl (+6/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl (+9/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-post-req.json.tpl (+7/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-post-req.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-post-resp.json.tpl (+11/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-post-resp.xml.tpl (+10/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl (+6/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-req.json.tpl (+7/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-req.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl (+13/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl (+12/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl (+15/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl (+14/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl (+13/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl (+12/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl (+15/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl (+14/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl (+15/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl (+14/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl (+13/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl (+12/-0)
nova/tests/integrated/api_samples/os-aggregates/server-post-req.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-aggregates/server-post-req.xml.tpl (+19/-0)
nova/tests/integrated/api_samples/os-aggregates/server-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-aggregates/server-post-resp.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-req.json.tpl (+17/-0)
nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-req.xml.tpl (+23/-0)
nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-resp.xml.tpl (+8/-0)
nova/tests/integrated/api_samples/os-certificates/certificate-create-resp.json.tpl (+6/-0)
nova/tests/integrated/api_samples/os-certificates/certificate-create-resp.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-certificates/certificate-get-root-resp.json.tpl (+6/-0)
nova/tests/integrated/api_samples/os-certificates/certificate-get-root-resp.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl (+5/-0)
nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.xml.tpl (+3/-0)
nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml.tpl (+3/-0)
nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl (+13/-0)
nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml.tpl (+12/-0)
nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl (+5/-0)
nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl (+6/-0)
nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl (+5/-0)
nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl (+19/-0)
nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-hosts/host-get-resp.json.tpl (+31/-0)
nova/tests/integrated/api_samples/os-hosts/host-get-resp.xml.tpl (+24/-0)
nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl (+24/-0)
nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl (+7/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl (+18/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml.tpl (+24/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml.tpl (+8/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-req.json.tpl (+19/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-req.xml.tpl (+25/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-resp.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl (+17/-0)
nova/tests/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml.tpl (+18/-0)
nova/tests/integrated/api_samples/os-server-diagnostics/server-post-req.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-server-diagnostics/server-post-req.xml.tpl (+19/-0)
nova/tests/integrated/api_samples/os-server-diagnostics/server-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-server-diagnostics/server-post-resp.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-req.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-req.xml.tpl (+19/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-resp.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl (+27/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml.tpl (+26/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl (+13/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml.tpl (+13/-0)
nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl (+90/-0)
nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl (+37/-0)
nova/tests/integrated/api_samples/server-action-rebuild-resp.xml.tpl (+2/-8)
nova/tests/integrated/api_samples/server-ips-network-resp.json.tpl (+8/-0)
nova/tests/integrated/api_samples/server-ips-network-resp.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/server-ips-resp.json.tpl (+10/-0)
nova/tests/integrated/api_samples/server-ips-resp.xml.tpl (+6/-0)
nova/tests/integrated/integrated_helpers.py (+6/-6)
nova/tests/integrated/test_api_samples.py (+539/-40)
nova/tests/integrated/test_extensions.py (+4/-4)
nova/tests/integrated/test_multiprocess_api.py (+0/-2)
nova/tests/integrated/test_servers.py (+0/-4)
nova/tests/integrated/test_volumes.py (+0/-181)
nova/tests/matchers.py (+196/-0)
nova/tests/network/test_api.py (+133/-1)
nova/tests/network/test_linux_net.py (+23/-24)
nova/tests/network/test_manager.py (+178/-25)
nova/tests/network/test_quantumv2.py (+338/-28)
nova/tests/network/test_rpcapi.py (+311/-0)
nova/tests/policy.json (+202/-196)
nova/tests/runtime_flags.py (+2/-3)
nova/tests/scheduler/fakes.py (+21/-17)
nova/tests/scheduler/test_chance_scheduler.py (+4/-4)
nova/tests/scheduler/test_filter_scheduler.py (+84/-60)
nova/tests/scheduler/test_host_filters.py (+153/-118)
nova/tests/scheduler/test_host_manager.py (+248/-141)
nova/tests/scheduler/test_least_cost.py (+76/-37)
nova/tests/scheduler/test_multi_scheduler.py (+2/-19)
nova/tests/scheduler/test_rpcapi.py (+4/-17)
nova/tests/scheduler/test_scheduler.py (+5/-108)
nova/tests/scheduler/test_weights.py (+117/-0)
nova/tests/test_api.py (+9/-12)
nova/tests/test_bdm.py (+2/-1)
nova/tests/test_cinder.py (+0/-3)
nova/tests/test_compat_flagfile.py (+0/-175)
nova/tests/test_configdrive2.py (+3/-8)
nova/tests/test_crypto.py (+0/-3)
nova/tests/test_db_api.py (+228/-207)
nova/tests/test_deprecated.py (+0/-46)
nova/tests/test_exception.py (+0/-1)
nova/tests/test_filters.py (+125/-0)
nova/tests/test_flags.py (+36/-40)
nova/tests/test_hypervapi.py (+68/-14)
nova/tests/test_image_utils.py (+218/-0)
nova/tests/test_imagebackend.py (+12/-9)
nova/tests/test_imagecache.py (+146/-95)
nova/tests/test_instance_types.py (+25/-3)
nova/tests/test_iptables_network.py (+1/-1)
nova/tests/test_iscsi.py (+0/-121)
nova/tests/test_libvirt.py (+538/-256)
nova/tests/test_libvirt_config.py (+6/-0)
nova/tests/test_libvirt_utils.py (+38/-0)
nova/tests/test_libvirt_vif.py (+52/-9)
nova/tests/test_loadables.py (+113/-0)
nova/tests/test_matchers.py (+144/-0)
nova/tests/test_metadata.py (+5/-5)
nova/tests/test_migrations.py (+2/-201)
nova/tests/test_misc.py (+0/-94)
nova/tests/test_netapp.py (+0/-1380)
nova/tests/test_netapp_nfs.py (+0/-261)
nova/tests/test_nexenta.py (+0/-281)
nova/tests/test_nfs.py (+0/-630)
nova/tests/test_notifications.py (+31/-4)
nova/tests/test_nova_manage.py (+7/-2)
nova/tests/test_nova_rootwrap.py (+33/-26)
nova/tests/test_objectstore.py (+11/-10)
nova/tests/test_pipelib.py (+5/-5)
nova/tests/test_plugin_api_extensions.py (+0/-1)
nova/tests/test_policy.py (+62/-28)
nova/tests/test_powervm.py (+29/-5)
nova/tests/test_quota.py (+59/-104)
nova/tests/test_rbd.py (+0/-161)
nova/tests/test_service.py (+64/-51)
nova/tests/test_skip_examples.py (+0/-47)
nova/tests/test_solidfire.py (+0/-210)
nova/tests/test_storwize_svc.py (+0/-1239)
nova/tests/test_utils.py (+64/-64)
nova/tests/test_virt.py (+9/-8)
nova/tests/test_virt_drivers.py (+39/-54)
nova/tests/test_vmwareapi.py (+1/-5)
nova/tests/test_volume.py (+0/-929)
nova/tests/test_volume_types.py (+0/-167)
nova/tests/test_volume_types_extra_specs.py (+0/-130)
nova/tests/test_volume_utils.py (+0/-91)
nova/tests/test_xenapi.py (+398/-169)
nova/tests/test_xensm.py (+0/-143)
nova/tests/utils.py (+10/-3)
nova/tests/virt/xenapi/test_volumeops.py (+40/-0)
nova/tests/vmwareapi/db_fakes.py (+2/-1)
nova/tests/volume/test_HpSanISCSIDriver.py (+0/-212)
nova/tests/xenapi/stubs.py (+23/-58)
nova/tests/xenapi/test_vm_utils.py (+143/-3)
nova/utils.py (+95/-235)
nova/version.py (+2/-2)
nova/virt/baremetal/__init__.py (+0/-18)
nova/virt/baremetal/db/__init__.py (+16/-0)
nova/virt/baremetal/db/api.py (+174/-0)
nova/virt/baremetal/db/migration.py (+38/-0)
nova/virt/baremetal/db/sqlalchemy/__init__.py (+14/-0)
nova/virt/baremetal/db/sqlalchemy/api.py (+351/-0)
nova/virt/baremetal/db/sqlalchemy/migrate_repo/__init__.py (+14/-0)
nova/virt/baremetal/db/sqlalchemy/migrate_repo/migrate.cfg (+20/-0)
nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py (+124/-0)
nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/__init__.py (+14/-0)
nova/virt/baremetal/db/sqlalchemy/migration.py (+113/-0)
nova/virt/baremetal/db/sqlalchemy/models.py (+80/-0)
nova/virt/baremetal/db/sqlalchemy/session.py (+58/-0)
nova/virt/baremetal/doc/README.rst (+69/-0)
nova/virt/baremetal/dom.py (+0/-266)
nova/virt/baremetal/driver.py (+0/-744)
nova/virt/baremetal/fake.py (+0/-157)
nova/virt/baremetal/nodes.py (+0/-42)
nova/virt/baremetal/tilera.py (+0/-364)
nova/virt/configdrive.py (+29/-12)
nova/virt/connection.py (+0/-84)
nova/virt/disk/api.py (+12/-20)
nova/virt/disk/guestfs.py (+0/-121)
nova/virt/disk/loop.py (+0/-42)
nova/virt/disk/mount.py (+0/-162)
nova/virt/disk/mount/__init__.py (+19/-0)
nova/virt/disk/mount/api.py (+162/-0)
nova/virt/disk/mount/guestfs.py (+121/-0)
nova/virt/disk/mount/loop.py (+42/-0)
nova/virt/disk/mount/nbd.py (+110/-0)
nova/virt/disk/nbd.py (+0/-111)
nova/virt/disk/vfs/__init__.py (+19/-0)
nova/virt/disk/vfs/api.py (+107/-0)
nova/virt/driver.py (+86/-14)
nova/virt/fake.py (+140/-15)
nova/virt/firewall.py (+32/-30)
nova/virt/hyperv/constants.py (+3/-0)
nova/virt/hyperv/driver.py (+13/-16)
nova/virt/hyperv/hostops.py (+160/-0)
nova/virt/hyperv/livemigrationops.py (+5/-4)
nova/virt/hyperv/snapshotops.py (+3/-3)
nova/virt/hyperv/vmops.py (+134/-157)
nova/virt/hyperv/vmutils.py (+30/-13)
nova/virt/hyperv/volumeops.py (+6/-6)
nova/virt/hyperv/volumeutils.py (+5/-4)
nova/virt/images.py (+148/-22)
nova/virt/libvirt/config.py (+3/-1)
nova/virt/libvirt/driver.py (+280/-261)
nova/virt/libvirt/firewall.py (+37/-18)
nova/virt/libvirt/imagebackend.py (+80/-36)
nova/virt/libvirt/imagecache.py (+108/-77)
nova/virt/libvirt/snapshots.py (+89/-0)
nova/virt/libvirt/utils.py (+145/-71)
nova/virt/libvirt/vif.py (+37/-40)
nova/virt/libvirt/volume.py (+40/-25)
nova/virt/libvirt/volume_nfs.py (+4/-4)
nova/virt/netutils.py (+7/-8)
nova/virt/powervm/driver.py (+7/-24)
nova/virt/powervm/exception.py (+6/-1)
nova/virt/powervm/operator.py (+51/-30)
nova/virt/storage_users.py (+63/-0)
nova/virt/virtapi.py (+107/-0)
nova/virt/vmwareapi/driver.py (+14/-15)
nova/virt/vmwareapi/network_utils.py (+4/-4)
nova/virt/vmwareapi/read_write_util.py (+0/-3)
nova/virt/vmwareapi/vif.py (+4/-4)
nova/virt/vmwareapi/vim.py (+4/-5)
nova/virt/vmwareapi/vmops.py (+4/-5)
nova/virt/xenapi/agent.py (+183/-120)
nova/virt/xenapi/driver.py (+76/-60)
nova/virt/xenapi/fake.py (+13/-3)
nova/virt/xenapi/firewall.py (+4/-9)
nova/virt/xenapi/host.py (+45/-20)
nova/virt/xenapi/pool.py (+30/-23)
nova/virt/xenapi/pool_states.py (+1/-3)
nova/virt/xenapi/vif.py (+4/-5)
nova/virt/xenapi/vm_utils.py (+243/-168)
nova/virt/xenapi/vmops.py (+262/-201)
nova/virt/xenapi/volume_utils.py (+26/-21)
nova/virt/xenapi/volumeops.py (+39/-36)
nova/vnc/__init__.py (+2/-3)
nova/vnc/xvp_proxy.py (+4/-5)
nova/volume/__init__.py (+3/-2)
nova/volume/api.py (+0/-511)
nova/volume/cinder.py (+5/-6)
nova/volume/driver.py (+0/-953)
nova/volume/iscsi.py (+0/-233)
nova/volume/manager.py (+0/-452)
nova/volume/netapp.py (+0/-1291)
nova/volume/netapp_nfs.py (+0/-267)
nova/volume/nexenta/__init__.py (+0/-33)
nova/volume/nexenta/jsonrpc.py (+0/-84)
nova/volume/nexenta/volume.py (+0/-282)
nova/volume/nfs.py (+0/-293)
nova/volume/san.py (+0/-649)
nova/volume/solidfire.py (+0/-424)
nova/volume/storwize_svc.py (+0/-1249)
nova/volume/utils.py (+0/-84)
nova/volume/volume_types.py (+0/-125)
nova/volume/xensm.py (+0/-249)
nova/weights.py (+71/-0)
nova/wsgi.py (+17/-5)
openstack-common.conf (+1/-1)
plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec (+3/-0)
plugins/xenserver/xenapi/etc/xapi.d/plugins/_bittorrent_seeder (+121/-0)
plugins/xenserver/xenapi/etc/xapi.d/plugins/agent (+20/-14)
plugins/xenserver/xenapi/etc/xapi.d/plugins/bandwidth (+51/-0)
plugins/xenserver/xenapi/etc/xapi.d/plugins/bittorrent (+299/-0)
plugins/xenserver/xenapi/etc/xapi.d/plugins/utils.py (+13/-18)
plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost (+4/-2)
run_tests.sh (+14/-3)
setup.cfg (+0/-1)
setup.py (+11/-6)
smoketests/__init__.py (+0/-6)
smoketests/base.py (+1/-1)
smoketests/run_tests.py (+3/-3)
smoketests/test_sysadmin.py (+15/-3)
tools/conf/extract_opts.py (+1/-2)
tools/esx/guest_tool.py (+1/-1)
tools/hacking.py (+18/-10)
tools/install_venv.py (+2/-5)
tools/pip-requires (+5/-3)
tools/test-options (+0/-1)
tools/test-requires (+4/-2)
tools/xenserver/destroy_cached_images.py (+6/-6)
tools/xenserver/vm_vdi_cleaner.py (+2/-2)
tox.ini (+6/-9)
To merge this branch: bzr merge lp:~zulcss/ubuntu/precise/nova/trunk
Reviewer Review Type Date Requested Status
James Page Approve
Review via email: mp+136252@code.launchpad.net

Description of the change

nova g1

To post a comment you must log in.
Revision history for this message
James Page (james-page) wrote :

Hi Chuck

LGTM; one thing I did notice is that on both precise and raring the test suite has errors, but the package still builds despite this - thoughts?

https://jenkins.qa.ubuntu.com/view/Openstack_Testing/view/Grizzly/job/raring_grizzly_nova_trunk/187/consoleFull
https://jenkins.qa.ubuntu.com/view/Openstack_Testing/view/Grizzly/job/precise_grizzly_nova_trunk/186/consoleFull

review: Approve
Revision history for this message
James Page (james-page) wrote :

Uploaded to staging

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file '.mailmap'
2--- .mailmap 2012-08-27 15:37:18 +0000
3+++ .mailmap 2012-11-26 19:55:29 +0000
4@@ -21,7 +21,7 @@
5 <devin.carlen@gmail.com> <devcamcar@illian.local>
6 <doug.hellmann@dreamhost.com> <doug.hellmann@gmail.com>
7 <dprince@redhat.com> <dan.prince@rackspace.com>
8-<edouard1.thuleau@orange.com> <thuleau@gmail.com>
9+<edouard.thuleau@orange.com> <thuleau@gmail.com>
10 <ewan.mellor@citrix.com> <emellor@silver>
11 <ghe@debian.org> <ghe.rivero@gmail.com>
12 <ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>
13@@ -85,6 +85,7 @@
14 Dan Wendlandt <dan@nicira.com> danwent <danwent@dan-xs3-cs>
15 Dan Wendlandt <dan@nicira.com> danwent@gmail.com <>
16 Dan Wendlandt <dan@nicira.com> danwent@gmail.com <dan@nicira.com>
17+Édouard Thuleau <edouard.thuleau@orange.com> Thuleau Édouard <thuleau@gmail.com>
18 Jake Dahn <jake@ansolabs.com> jakedahn <jake@ansolabs.com>
19 Jason Koelker <jason@koelker.net> Jason Kölker <jason@koelker.net>
20 Jay Pipes <jaypipes@gmail.com> jaypipes@gmail.com <>
21@@ -116,4 +117,3 @@
22 Vishvananda Ishaya <vishvananda@gmail.com> <root@ubuntu>
23 Vivek YS <vivek.ys@gmail.com> Vivek YS vivek.ys@gmail.com <>
24 Zhongyue Luo <zhongyue.nah@intel.com> <lzyeval@gmail.com>
25-Édouard Thuleau <edouard1.thuleau@orange.com> Thuleau Édouard <thuleau@gmail.com>
26
27=== removed directory '.pc'
28=== removed file '.pc/.quilt_patches'
29--- .pc/.quilt_patches 2012-07-06 10:18:33 +0000
30+++ .pc/.quilt_patches 1970-01-01 00:00:00 +0000
31@@ -1,1 +0,0 @@
32-debian/patches
33
34=== removed file '.pc/.quilt_series'
35--- .pc/.quilt_series 2012-07-06 10:18:33 +0000
36+++ .pc/.quilt_series 1970-01-01 00:00:00 +0000
37@@ -1,1 +0,0 @@
38-series
39
40=== removed file '.pc/.version'
41--- .pc/.version 2010-10-08 23:16:58 +0000
42+++ .pc/.version 1970-01-01 00:00:00 +0000
43@@ -1,1 +0,0 @@
44-2
45
46=== removed file '.pc/applied-patches'
47--- .pc/applied-patches 2012-10-12 12:35:01 +0000
48+++ .pc/applied-patches 1970-01-01 00:00:00 +0000
49@@ -1,9 +0,0 @@
50-path-to-the-xenhost.conf-fixup.patch
51-fix-ubuntu-tests.patch
52-fix-docs-build-without-network.patch
53-avoid_setuptools_git_dependency.patch
54-rbd-security.patch
55-ubuntu/ubuntu-fix-32-64-bit-iss.patch
56-ubuntu/ubuntu-fix-ec2-instance-id-mappings.patch
57-ubuntu/fix-libvirt-firewall-slowdown.patch
58-ubuntu/fix-ec2-volume-id-mappings.patch
59
60=== removed directory '.pc/avoid_setuptools_git_dependency.patch'
61=== removed directory '.pc/avoid_setuptools_git_dependency.patch/tools'
62=== removed file '.pc/avoid_setuptools_git_dependency.patch/tools/pip-requires'
63--- .pc/avoid_setuptools_git_dependency.patch/tools/pip-requires 2012-10-12 12:35:01 +0000
64+++ .pc/avoid_setuptools_git_dependency.patch/tools/pip-requires 1970-01-01 00:00:00 +0000
65@@ -1,23 +0,0 @@
66-SQLAlchemy>=0.7.3
67-Cheetah==2.4.4
68-amqplib==0.6.1
69-anyjson==0.2.4
70-boto==2.1.1
71-eventlet>=0.9.17
72-kombu==1.0.4
73-lxml>=2.3,<=2.3.5
74-routes==1.12.3
75-WebOb==1.0.8
76-greenlet>=0.3.1
77-PasteDeploy==1.5.0
78-paste
79-sqlalchemy-migrate>=0.7.2
80-netaddr
81-suds==0.4
82-paramiko
83-Babel>=0.9.6
84-iso8601>=0.1.4
85-httplib2
86-setuptools_git>=0.4
87-python-quantumclient>=2.0
88-python-glanceclient>=0.5.0,<2
89
90=== removed directory '.pc/fix-docs-build-without-network.patch'
91=== removed directory '.pc/fix-docs-build-without-network.patch/doc'
92=== removed directory '.pc/fix-docs-build-without-network.patch/doc/source'
93=== removed file '.pc/fix-docs-build-without-network.patch/doc/source/conf.py'
94--- .pc/fix-docs-build-without-network.patch/doc/source/conf.py 2012-09-20 07:45:50 +0000
95+++ .pc/fix-docs-build-without-network.patch/doc/source/conf.py 1970-01-01 00:00:00 +0000
96@@ -1,279 +0,0 @@
97-# -*- coding: utf-8 -*-
98-#
99-# nova documentation build configuration file, created by
100-# sphinx-quickstart on Sat May 1 15:17:47 2010.
101-#
102-# This file is execfile()d with the current directory set to
103-# its containing dir.
104-#
105-# Note that not all possible configuration values are present in this
106-# autogenerated file.
107-#
108-# All configuration values have a default; values that are commented out
109-# serve to show the default.
110-
111-import sys
112-import os
113-
114-# If extensions (or modules to document with autodoc) are in another directory,
115-# add these directories to sys.path here. If the directory is relative to the
116-# documentation root, use os.path.abspath to make it absolute, like shown here.
117-sys.path.insert(0, os.path.abspath('../../'))
118-sys.path.insert(0, os.path.abspath('../'))
119-sys.path.insert(0, os.path.abspath('./'))
120-
121-# -- General configuration ----------------------------------------------------
122-
123-# Add any Sphinx extension module names here, as strings. They can be
124-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
125-
126-extensions = ['sphinx.ext.autodoc',
127- 'sphinx.ext.intersphinx',
128- 'ext.nova_todo',
129- 'sphinx.ext.coverage',
130- 'sphinx.ext.pngmath',
131- 'sphinx.ext.ifconfig',
132- 'sphinx.ext.graphviz']
133-
134-todo_include_todos = True
135-
136-# Add any paths that contain templates here, relative to this directory.
137-# Changing the path so that the Hudson build output contains GA code
138-# and the source docs do not contain the code so local, offline sphinx builds
139-# are "clean."
140-templates_path = []
141-if os.getenv('HUDSON_PUBLISH_DOCS'):
142- templates_path = ['_ga', '_templates']
143-else:
144- templates_path = ['_templates']
145-
146-# The suffix of source filenames.
147-source_suffix = '.rst'
148-
149-# The encoding of source files.
150-#source_encoding = 'utf-8'
151-
152-# The master toctree document.
153-master_doc = 'index'
154-
155-# General information about the project.
156-project = u'nova'
157-copyright = u'2010-present, OpenStack, LLC'
158-
159-# The version info for the project you're documenting, acts as replacement for
160-# |version| and |release|, also used in various other places throughout the
161-# built documents.
162-#
163-from nova import version as nova_version
164-#import nova.version
165-# The full version, including alpha/beta/rc tags.
166-release = nova_version.version_string()
167-# The short X.Y version.
168-version = nova_version.canonical_version_string()
169-
170-# The language for content autogenerated by Sphinx. Refer to documentation
171-# for a list of supported languages.
172-#language = None
173-
174-# There are two options for replacing |today|: either, you set today to some
175-# non-false value, then it is used:
176-#today = ''
177-# Else, today_fmt is used as the format for a strftime call.
178-#today_fmt = '%B %d, %Y'
179-
180-# List of documents that shouldn't be included in the build.
181-unused_docs = [
182- 'api_ext/rst_extension_template',
183- 'vmwareapi_readme',
184- 'installer',
185-]
186-
187-# List of directories, relative to source directory, that shouldn't be searched
188-# for source files.
189-exclude_trees = []
190-
191-# The reST default role (used for this markup: `text`) to use
192-# for all documents.
193-#default_role = None
194-
195-# If true, '()' will be appended to :func: etc. cross-reference text.
196-#add_function_parentheses = True
197-
198-# If true, the current module name will be prepended to all description
199-# unit titles (such as .. function::).
200-add_module_names = False
201-
202-# If true, sectionauthor and moduleauthor directives will be shown in the
203-# output. They are ignored by default.
204-show_authors = False
205-
206-# The name of the Pygments (syntax highlighting) style to use.
207-pygments_style = 'sphinx'
208-
209-# A list of ignored prefixes for module index sorting.
210-modindex_common_prefix = ['nova.']
211-
212-# -- Options for man page output ----------------------------------------------
213-
214-# Grouping the document tree for man pages.
215-# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
216-
217-man_pages = [
218- ('man/nova-all', 'nova-all', u'Cloud controller fabric',
219- [u'OpenStack'], 1),
220- ('man/nova-api-ec2', 'nova-api-ec2', u'Cloud controller fabric',
221- [u'OpenStack'], 1),
222- ('man/nova-api-metadata', 'nova-api-metadata', u'Cloud controller fabric',
223- [u'OpenStack'], 1),
224- ('man/nova-api-os-compute', 'nova-api-os-compute',
225- u'Cloud controller fabric', [u'OpenStack'], 1),
226- ('man/nova-api-os-volume', 'nova-api-os-volume',
227- u'Cloud controller fabric', [u'OpenStack'], 1),
228- ('man/nova-api', 'nova-api', u'Cloud controller fabric',
229- [u'OpenStack'], 1),
230- ('man/nova-cert', 'nova-cert', u'Cloud controller fabric',
231- [u'OpenStack'], 1),
232- ('man/nova-compute', 'nova-compute', u'Cloud controller fabric',
233- [u'OpenStack'], 1),
234- ('man/nova-console', 'nova-console', u'Cloud controller fabric',
235- [u'OpenStack'], 1),
236- ('man/nova-consoleauth', 'nova-consoleauth', u'Cloud controller fabric',
237- [u'OpenStack'], 1),
238- ('man/nova-dhcpbridge', 'nova-dhcpbridge', u'Cloud controller fabric',
239- [u'OpenStack'], 1),
240- ('man/nova-manage', 'nova-manage', u'Cloud controller fabric',
241- [u'OpenStack'], 1),
242- ('man/nova-network', 'nova-network', u'Cloud controller fabric',
243- [u'OpenStack'], 1),
244- ('man/nova-novncproxy', 'nova-novncproxy', u'Cloud controller fabric',
245- [u'OpenStack'], 1),
246- ('man/nova-objectstore', 'nova-objectstore', u'Cloud controller fabric',
247- [u'OpenStack'], 1),
248- ('man/nova-rootwrap', 'nova-rootwrap', u'Cloud controller fabric',
249- [u'OpenStack'], 1),
250- ('man/nova-rpc-zmq-receiver', 'nova-rpc-zmq-receiver', u'Cloud controller fabric',
251- [u'OpenStack'], 1),
252- ('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric',
253- [u'OpenStack'], 1),
254- ('man/nova-volume-usage-audit', 'nova-volume-usage-audit', u'Cloud controller fabric',
255- [u'OpenStack'], 1),
256- ('man/nova-volume', 'nova-volume', u'Cloud controller fabric',
257- [u'OpenStack'], 1),
258- ('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric',
259- [u'OpenStack'], 1)
260-]
261-
262-# -- Options for HTML output --------------------------------------------------
263-
264-# The theme to use for HTML and HTML Help pages. Major themes that come with
265-# Sphinx are currently 'default' and 'sphinxdoc'.
266-html_theme_path = ["."]
267-html_theme = '_theme'
268-
269-# Theme options are theme-specific and customize the look and feel of a theme
270-# further. For a list of options available for each theme, see the
271-# documentation.
272-#html_theme_options = {}
273-
274-# Add any paths that contain custom themes here, relative to this directory.
275-#html_theme_path = []
276-
277-# The name for this set of Sphinx documents. If None, it defaults to
278-# "<project> v<release> documentation".
279-#html_title = None
280-
281-# A shorter title for the navigation bar. Default is the same as html_title.
282-#html_short_title = None
283-
284-# The name of an image file (relative to this directory) to place at the top
285-# of the sidebar.
286-#html_logo = None
287-
288-# The name of an image file (within the static path) to use as favicon of the
289-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
290-# pixels large.
291-#html_favicon = None
292-
293-# Add any paths that contain custom static files (such as style sheets) here,
294-# relative to this directory. They are copied after the builtin static files,
295-# so a file named "default.css" will overwrite the builtin "default.css".
296-html_static_path = ['_static']
297-
298-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
299-# using the given strftime format.
300-#html_last_updated_fmt = '%b %d, %Y'
301-git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
302-html_last_updated_fmt = os.popen(git_cmd).read()
303-
304-# If true, SmartyPants will be used to convert quotes and dashes to
305-# typographically correct entities.
306-#html_use_smartypants = True
307-
308-# Custom sidebar templates, maps document names to template names.
309-#html_sidebars = {}
310-
311-# Additional templates that should be rendered to pages, maps page names to
312-# template names.
313-#html_additional_pages = {}
314-
315-# If false, no module index is generated.
316-#html_use_modindex = True
317-
318-# If false, no index is generated.
319-#html_use_index = True
320-
321-# If true, the index is split into individual pages for each letter.
322-#html_split_index = False
323-
324-# If true, links to the reST sources are added to the pages.
325-#html_show_sourcelink = True
326-
327-# If true, an OpenSearch description file will be output, and all pages will
328-# contain a <link> tag referring to it. The value of this option must be the
329-# base URL from which the finished HTML is served.
330-#html_use_opensearch = ''
331-
332-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
333-#html_file_suffix = ''
334-
335-# Output file base name for HTML help builder.
336-htmlhelp_basename = 'novadoc'
337-
338-
339-# -- Options for LaTeX output -------------------------------------------------
340-
341-# The paper size ('letter' or 'a4').
342-#latex_paper_size = 'letter'
343-
344-# The font size ('10pt', '11pt' or '12pt').
345-#latex_font_size = '10pt'
346-
347-# Grouping the document tree into LaTeX files. List of tuples
348-# (source start file, target name, title, author, documentclass
349-# [howto/manual]).
350-latex_documents = [
351- ('index', 'Nova.tex', u'Nova Documentation',
352- u'Anso Labs, LLC', 'manual'),
353-]
354-
355-# The name of an image file (relative to this directory) to place at the top of
356-# the title page.
357-#latex_logo = None
358-
359-# For "manual" documents, if this is true, then toplevel headings are parts,
360-# not chapters.
361-#latex_use_parts = False
362-
363-# Additional stuff for the LaTeX preamble.
364-#latex_preamble = ''
365-
366-# Documents to append as an appendix to all manuals.
367-#latex_appendices = []
368-
369-# If false, no module index is generated.
370-#latex_use_modindex = True
371-
372-
373-# Example configuration for intersphinx: refer to the Python standard library.
374-intersphinx_mapping = {'python': ('http://docs.python.org/', None),
375- 'swift': ('http://swift.openstack.org', None)}
376
377=== removed directory '.pc/fix-ubuntu-tests.patch'
378=== removed directory '.pc/fix-ubuntu-tests.patch/nova'
379=== removed directory '.pc/fix-ubuntu-tests.patch/nova/tests'
380=== removed file '.pc/fix-ubuntu-tests.patch/nova/tests/test_api.py'
381--- .pc/fix-ubuntu-tests.patch/nova/tests/test_api.py 2012-08-16 14:04:11 +0000
382+++ .pc/fix-ubuntu-tests.patch/nova/tests/test_api.py 1970-01-01 00:00:00 +0000
383@@ -1,616 +0,0 @@
384-# vim: tabstop=4 shiftwidth=4 softtabstop=4
385-
386-# Copyright 2010 United States Government as represented by the
387-# Administrator of the National Aeronautics and Space Administration.
388-# All Rights Reserved.
389-#
390-# Licensed under the Apache License, Version 2.0 (the "License"); you may
391-# not use this file except in compliance with the License. You may obtain
392-# a copy of the License at
393-#
394-# http://www.apache.org/licenses/LICENSE-2.0
395-#
396-# Unless required by applicable law or agreed to in writing, software
397-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
398-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
399-# License for the specific language governing permissions and limitations
400-# under the License.
401-
402-"""Unit tests for the API endpoint"""
403-
404-import random
405-import StringIO
406-
407-import boto
408-from boto.ec2 import regioninfo
409-from boto import exception as boto_exc
410-# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
411-try:
412- from boto.connection import HTTPResponse
413-except ImportError:
414- from httplib import HTTPResponse
415-import webob
416-
417-from nova.api import auth
418-from nova.api import ec2
419-from nova.api.ec2 import apirequest
420-from nova.api.ec2 import ec2utils
421-from nova import block_device
422-from nova import context
423-from nova import exception
424-from nova import flags
425-from nova.openstack.common import timeutils
426-from nova import test
427-
428-
429-FLAGS = flags.FLAGS
430-
431-
432-class FakeHttplibSocket(object):
433- """a fake socket implementation for httplib.HTTPResponse, trivial"""
434- def __init__(self, response_string):
435- self.response_string = response_string
436- self._buffer = StringIO.StringIO(response_string)
437-
438- def makefile(self, _mode, _other):
439- """Returns the socket's internal buffer"""
440- return self._buffer
441-
442-
443-class FakeHttplibConnection(object):
444- """A fake httplib.HTTPConnection for boto to use
445-
446- requests made via this connection actually get translated and routed into
447- our WSGI app, we then wait for the response and turn it back into
448- the HTTPResponse that boto expects.
449- """
450- def __init__(self, app, host, is_secure=False):
451- self.app = app
452- self.host = host
453-
454- def request(self, method, path, data, headers):
455- req = webob.Request.blank(path)
456- req.method = method
457- req.body = data
458- req.headers = headers
459- req.headers['Accept'] = 'text/html'
460- req.host = self.host
461- # Call the WSGI app, get the HTTP response
462- resp = str(req.get_response(self.app))
463- # For some reason, the response doesn't have "HTTP/1.0 " prepended; I
464- # guess that's a function the web server usually provides.
465- resp = "HTTP/1.0 %s" % resp
466- self.sock = FakeHttplibSocket(resp)
467- self.http_response = HTTPResponse(self.sock)
468- # NOTE(vish): boto is accessing private variables for some reason
469- self._HTTPConnection__response = self.http_response
470- self.http_response.begin()
471-
472- def getresponse(self):
473- return self.http_response
474-
475- def getresponsebody(self):
476- return self.sock.response_string
477-
478- def close(self):
479- """Required for compatibility with boto/tornado"""
480- pass
481-
482-
483-class XmlConversionTestCase(test.TestCase):
484- """Unit test api xml conversion"""
485- def test_number_conversion(self):
486- conv = ec2utils._try_convert
487- self.assertEqual(conv('None'), None)
488- self.assertEqual(conv('True'), True)
489- self.assertEqual(conv('TRUE'), True)
490- self.assertEqual(conv('true'), True)
491- self.assertEqual(conv('False'), False)
492- self.assertEqual(conv('FALSE'), False)
493- self.assertEqual(conv('false'), False)
494- self.assertEqual(conv('0'), 0)
495- self.assertEqual(conv('42'), 42)
496- self.assertEqual(conv('3.14'), 3.14)
497- self.assertEqual(conv('-57.12'), -57.12)
498- self.assertEqual(conv('0x57'), 0x57)
499- self.assertEqual(conv('-0x57'), -0x57)
500- self.assertEqual(conv('-'), '-')
501- self.assertEqual(conv('-0'), 0)
502- self.assertEqual(conv('0.0'), 0.0)
503- self.assertEqual(conv('1e-8'), 0.0)
504- self.assertEqual(conv('-1e-8'), 0.0)
505- self.assertEqual(conv('0xDD8G'), '0xDD8G')
506- self.assertEqual(conv('0XDD8G'), '0XDD8G')
507- self.assertEqual(conv('-stringy'), '-stringy')
508- self.assertEqual(conv('stringy'), 'stringy')
509- self.assertEqual(conv('add'), 'add')
510- self.assertEqual(conv('remove'), 'remove')
511- self.assertEqual(conv(''), '')
512-
513-
514-class Ec2utilsTestCase(test.TestCase):
515- def test_ec2_id_to_id(self):
516- self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
517- self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
518- self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
519- self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
520-
521- def test_bad_ec2_id(self):
522- self.assertRaises(exception.InvalidEc2Id,
523- ec2utils.ec2_id_to_id,
524- 'badone')
525-
526- def test_id_to_ec2_id(self):
527- self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
528- self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
529- self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
530- self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
531-
532- def test_dict_from_dotted_str(self):
533- in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
534- ('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
535- ('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
536- ('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
537- ('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
538- ('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
539- expected_dict = {
540- 'block_device_mapping': {
541- '1': {'device_name': '/dev/sda1',
542- 'ebs': {'snapshot_id': 'snap-0000001c',
543- 'volume_size': 80,
544- 'delete_on_termination': False}},
545- '2': {'device_name': '/dev/sdc',
546- 'virtual_name': 'ephemeral0'}}}
547- out_dict = ec2utils.dict_from_dotted_str(in_str)
548-
549- self.assertDictMatch(out_dict, expected_dict)
550-
551- def test_properties_root_defice_name(self):
552- mappings = [{"device": "/dev/sda1", "virtual": "root"}]
553- properties0 = {'mappings': mappings}
554- properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
555-
556- root_device_name = block_device.properties_root_device_name(
557- properties0)
558- self.assertEqual(root_device_name, '/dev/sda1')
559-
560- root_device_name = block_device.properties_root_device_name(
561- properties1)
562- self.assertEqual(root_device_name, '/dev/sdb')
563-
564- def test_mapping_prepend_dev(self):
565- mappings = [
566- {'virtual': 'ami',
567- 'device': 'sda1'},
568- {'virtual': 'root',
569- 'device': '/dev/sda1'},
570-
571- {'virtual': 'swap',
572- 'device': 'sdb1'},
573- {'virtual': 'swap',
574- 'device': '/dev/sdb2'},
575-
576- {'virtual': 'ephemeral0',
577- 'device': 'sdc1'},
578- {'virtual': 'ephemeral1',
579- 'device': '/dev/sdc1'}]
580- expected_result = [
581- {'virtual': 'ami',
582- 'device': 'sda1'},
583- {'virtual': 'root',
584- 'device': '/dev/sda1'},
585-
586- {'virtual': 'swap',
587- 'device': '/dev/sdb1'},
588- {'virtual': 'swap',
589- 'device': '/dev/sdb2'},
590-
591- {'virtual': 'ephemeral0',
592- 'device': '/dev/sdc1'},
593- {'virtual': 'ephemeral1',
594- 'device': '/dev/sdc1'}]
595- self.assertDictListMatch(block_device.mappings_prepend_dev(mappings),
596- expected_result)
597-
598-
599-class ApiEc2TestCase(test.TestCase):
600- """Unit test for the cloud controller on an EC2 API"""
601- def setUp(self):
602- super(ApiEc2TestCase, self).setUp()
603- self.host = '127.0.0.1'
604- # NOTE(vish): skipping the Authorizer
605- roles = ['sysadmin', 'netadmin']
606- ctxt = context.RequestContext('fake', 'fake', roles=roles)
607- self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
608- ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
609- ), 'nova.api.ec2.cloud.CloudController'))))
610-
611- def expect_http(self, host=None, is_secure=False, api_version=None):
612- """Returns a new EC2 connection"""
613- self.ec2 = boto.connect_ec2(
614- aws_access_key_id='fake',
615- aws_secret_access_key='fake',
616- is_secure=False,
617- region=regioninfo.RegionInfo(None, 'test', self.host),
618- port=8773,
619- path='/services/Cloud')
620- if api_version:
621- self.ec2.APIVersion = api_version
622-
623- self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
624- self.http = FakeHttplibConnection(
625- self.app, '%s:8773' % (self.host), False)
626- # pylint: disable=E1103
627- if boto.Version >= '2':
628- self.ec2.new_http_connection(host or '%s:8773' % (self.host),
629- is_secure).AndReturn(self.http)
630- else:
631- self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
632- return self.http
633-
634- def test_return_valid_isoformat(self):
635- """
636- Ensure that the ec2 api returns datetime in xs:dateTime
637- (which apparently isn't datetime.isoformat())
638- NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
639- """
640- conv = apirequest._database_to_isoformat
641- # sqlite database representation with microseconds
642- time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276",
643- "%Y-%m-%d %H:%M:%S.%f")
644- self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z')
645- # mysqlite database representation
646- time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18",
647- "%Y-%m-%d %H:%M:%S")
648- self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z')
649-
650- def test_xmlns_version_matches_request_version(self):
651- self.expect_http(api_version='2010-10-30')
652- self.mox.ReplayAll()
653-
654- # Any request should be fine
655- self.ec2.get_all_instances()
656- self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
657- 'The version in the xmlns of the response does '
658- 'not match the API version given in the request.')
659-
660- def test_describe_instances(self):
661- """Test that, after creating a user and a project, the describe
662- instances call to the API works properly"""
663- self.expect_http()
664- self.mox.ReplayAll()
665- self.assertEqual(self.ec2.get_all_instances(), [])
666-
667- def test_terminate_invalid_instance(self):
668- """Attempt to terminate an invalid instance"""
669- self.expect_http()
670- self.mox.ReplayAll()
671- self.assertRaises(boto_exc.EC2ResponseError,
672- self.ec2.terminate_instances, "i-00000005")
673-
674- def test_get_all_key_pairs(self):
675- """Test that, after creating a user and project and generating
676- a key pair, that the API call to list key pairs works properly"""
677- keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
678- for x in range(random.randint(4, 8)))
679- self.expect_http()
680- self.mox.ReplayAll()
681- self.ec2.create_key_pair(keyname)
682- rv = self.ec2.get_all_key_pairs()
683- results = [k for k in rv if k.name == keyname]
684- self.assertEquals(len(results), 1)
685-
686- def test_create_duplicate_key_pair(self):
687- """Test that, after successfully generating a keypair,
688- requesting a second keypair with the same name fails sanely"""
689- self.expect_http()
690- self.mox.ReplayAll()
691- self.ec2.create_key_pair('test')
692-
693- try:
694- self.ec2.create_key_pair('test')
695- except boto_exc.EC2ResponseError, e:
696- if e.code == 'KeyPairExists':
697- pass
698- else:
699- self.fail("Unexpected EC2ResponseError: %s "
700- "(expected KeyPairExists)" % e.code)
701- else:
702- self.fail('Exception not raised.')
703-
704- def test_get_all_security_groups(self):
705- """Test that we can retrieve security groups"""
706- self.expect_http()
707- self.mox.ReplayAll()
708-
709- rv = self.ec2.get_all_security_groups()
710-
711- self.assertEquals(len(rv), 1)
712- self.assertEquals(rv[0].name, 'default')
713-
714- def test_create_delete_security_group(self):
715- """Test that we can create a security group"""
716- self.expect_http()
717- self.mox.ReplayAll()
718-
719- security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
720- for x in range(random.randint(4, 8)))
721-
722- self.ec2.create_security_group(security_group_name, 'test group')
723-
724- self.expect_http()
725- self.mox.ReplayAll()
726-
727- rv = self.ec2.get_all_security_groups()
728- self.assertEquals(len(rv), 2)
729- self.assertTrue(security_group_name in [group.name for group in rv])
730-
731- self.expect_http()
732- self.mox.ReplayAll()
733-
734- self.ec2.delete_security_group(security_group_name)
735-
736- def test_group_name_valid_chars_security_group(self):
737- """ Test that we sanely handle invalid security group names.
738- EC2 API Spec states we should only accept alphanumeric characters,
739- spaces, dashes, and underscores. Amazon implementation
740- accepts more characters - so, [:print:] is ok. """
741-
742- bad_strict_ec2 = "aa \t\x01\x02\x7f"
743- bad_amazon_ec2 = "aa #^% -=99"
744- test_raise = [
745- (True, bad_amazon_ec2, "test desc"),
746- (True, "test name", bad_amazon_ec2),
747- (False, bad_strict_ec2, "test desc"),
748- ]
749- for test in test_raise:
750- self.expect_http()
751- self.mox.ReplayAll()
752- FLAGS.ec2_strict_validation = test[0]
753- self.assertRaises(boto_exc.EC2ResponseError,
754- self.ec2.create_security_group,
755- test[1],
756- test[2])
757- test_accept = [
758- (False, bad_amazon_ec2, "test desc"),
759- (False, "test name", bad_amazon_ec2),
760- ]
761- for test in test_accept:
762- self.expect_http()
763- self.mox.ReplayAll()
764- FLAGS.ec2_strict_validation = test[0]
765- self.ec2.create_security_group(test[1], test[2])
766- self.expect_http()
767- self.mox.ReplayAll()
768- self.ec2.delete_security_group(test[1])
769-
770- def test_group_name_valid_length_security_group(self):
771- """Test that we sanely handle invalid security group names.
772- API Spec states that the length should not exceed 255 chars """
773- self.expect_http()
774- self.mox.ReplayAll()
775-
776- # Test block group_name > 255 chars
777- security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
778- for x in range(random.randint(256, 266)))
779-
780- self.assertRaises(boto_exc.EC2ResponseError,
781- self.ec2.create_security_group,
782- security_group_name,
783- 'test group')
784-
785- def test_authorize_revoke_security_group_cidr(self):
786- """
787- Test that we can add and remove CIDR based rules
788- to a security group
789- """
790- self.expect_http()
791- self.mox.ReplayAll()
792-
793- security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
794- for x in range(random.randint(4, 8)))
795-
796- group = self.ec2.create_security_group(security_group_name,
797- 'test group')
798-
799- self.expect_http()
800- self.mox.ReplayAll()
801- group.connection = self.ec2
802-
803- group.authorize('tcp', 80, 81, '0.0.0.0/0')
804- group.authorize('icmp', -1, -1, '0.0.0.0/0')
805- group.authorize('udp', 80, 81, '0.0.0.0/0')
806- group.authorize('tcp', 1, 65535, '0.0.0.0/0')
807- group.authorize('udp', 1, 65535, '0.0.0.0/0')
808- group.authorize('icmp', 1, 0, '0.0.0.0/0')
809- group.authorize('icmp', 0, 1, '0.0.0.0/0')
810- group.authorize('icmp', 0, 0, '0.0.0.0/0')
811-
812- def _assert(message, *args):
813- try:
814- group.authorize(*args)
815- except boto_exc.EC2ResponseError as e:
816- self.assertEqual(e.status, 400, 'Expected status to be 400')
817- self.assertIn(message, e.error_message, e.error_message)
818- else:
819- raise self.failureException, 'EC2ResponseError not raised'
820-
821- # Invalid CIDR address
822- _assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
823- # Missing ports
824- _assert('Not enough parameters', 'tcp', '0.0.0.0/0')
825- # from port cannot be greater than to port
826- _assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
827- # For tcp, negative values are not allowed
828- _assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
829- # For tcp, valid port range 1-65535
830- _assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
831- # Invalid Cidr for ICMP type
832- _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
833- # Invalid protocol
834- _assert('An unknown error has occurred', 'xyz', 1, 14, '0.0.0.0/0')
835- # Invalid port
836- _assert('An unknown error has occurred', 'tcp', " ", "81", '0.0.0.0/0')
837- # Invalid icmp port
838- _assert('An unknown error has occurred', 'icmp', " ", "81",
839- '0.0.0.0/0')
840- # Invalid CIDR Address
841- _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
842- # Invalid CIDR Address
843- _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
844- # Invalid Cidr ports
845- _assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
846-
847- self.expect_http()
848- self.mox.ReplayAll()
849-
850- rv = self.ec2.get_all_security_groups()
851-
852- group = [grp for grp in rv if grp.name == security_group_name][0]
853-
854- self.assertEquals(len(group.rules), 8)
855- self.assertEquals(int(group.rules[0].from_port), 80)
856- self.assertEquals(int(group.rules[0].to_port), 81)
857- self.assertEquals(len(group.rules[0].grants), 1)
858- self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
859-
860- self.expect_http()
861- self.mox.ReplayAll()
862- group.connection = self.ec2
863-
864- group.revoke('tcp', 80, 81, '0.0.0.0/0')
865- group.revoke('icmp', -1, -1, '0.0.0.0/0')
866- group.revoke('udp', 80, 81, '0.0.0.0/0')
867- group.revoke('tcp', 1, 65535, '0.0.0.0/0')
868- group.revoke('udp', 1, 65535, '0.0.0.0/0')
869- group.revoke('icmp', 1, 0, '0.0.0.0/0')
870- group.revoke('icmp', 0, 1, '0.0.0.0/0')
871- group.revoke('icmp', 0, 0, '0.0.0.0/0')
872-
873- self.expect_http()
874- self.mox.ReplayAll()
875-
876- self.ec2.delete_security_group(security_group_name)
877-
878- self.expect_http()
879- self.mox.ReplayAll()
880- group.connection = self.ec2
881-
882- rv = self.ec2.get_all_security_groups()
883-
884- self.assertEqual(len(rv), 1)
885- self.assertEqual(rv[0].name, 'default')
886-
887- def test_authorize_revoke_security_group_cidr_v6(self):
888- """
889- Test that we can add and remove CIDR based rules
890- to a security group for IPv6
891- """
892- self.expect_http()
893- self.mox.ReplayAll()
894-
895- security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
896- for x in range(random.randint(4, 8)))
897-
898- group = self.ec2.create_security_group(security_group_name,
899- 'test group')
900-
901- self.expect_http()
902- self.mox.ReplayAll()
903- group.connection = self.ec2
904-
905- group.authorize('tcp', 80, 81, '::/0')
906-
907- self.expect_http()
908- self.mox.ReplayAll()
909-
910- rv = self.ec2.get_all_security_groups()
911-
912- group = [grp for grp in rv if grp.name == security_group_name][0]
913- self.assertEquals(len(group.rules), 1)
914- self.assertEquals(int(group.rules[0].from_port), 80)
915- self.assertEquals(int(group.rules[0].to_port), 81)
916- self.assertEquals(len(group.rules[0].grants), 1)
917- self.assertEquals(str(group.rules[0].grants[0]), '::/0')
918-
919- self.expect_http()
920- self.mox.ReplayAll()
921- group.connection = self.ec2
922-
923- group.revoke('tcp', 80, 81, '::/0')
924-
925- self.expect_http()
926- self.mox.ReplayAll()
927-
928- self.ec2.delete_security_group(security_group_name)
929-
930- self.expect_http()
931- self.mox.ReplayAll()
932- group.connection = self.ec2
933-
934- rv = self.ec2.get_all_security_groups()
935-
936- self.assertEqual(len(rv), 1)
937- self.assertEqual(rv[0].name, 'default')
938-
939- def test_authorize_revoke_security_group_foreign_group(self):
940- """
941- Test that we can grant and revoke another security group access
942- to a security group
943- """
944- self.expect_http()
945- self.mox.ReplayAll()
946-
947- rand_string = 'sdiuisudfsdcnpaqwertasd'
948- security_group_name = "".join(random.choice(rand_string)
949- for x in range(random.randint(4, 8)))
950- other_security_group_name = "".join(random.choice(rand_string)
951- for x in range(random.randint(4, 8)))
952-
953- group = self.ec2.create_security_group(security_group_name,
954- 'test group')
955-
956- self.expect_http()
957- self.mox.ReplayAll()
958-
959- other_group = self.ec2.create_security_group(other_security_group_name,
960- 'some other group')
961-
962- self.expect_http()
963- self.mox.ReplayAll()
964- group.connection = self.ec2
965-
966- group.authorize(src_group=other_group)
967-
968- self.expect_http()
969- self.mox.ReplayAll()
970-
971- rv = self.ec2.get_all_security_groups()
972-
973- # I don't bother checkng that we actually find it here,
974- # because the create/delete unit test further up should
975- # be good enough for that.
976- for group in rv:
977- if group.name == security_group_name:
978- self.assertEquals(len(group.rules), 3)
979- self.assertEquals(len(group.rules[0].grants), 1)
980- self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' %
981- (other_security_group_name, 'fake'))
982-
983- self.expect_http()
984- self.mox.ReplayAll()
985-
986- rv = self.ec2.get_all_security_groups()
987-
988- for group in rv:
989- if group.name == security_group_name:
990- self.expect_http()
991- self.mox.ReplayAll()
992- group.connection = self.ec2
993- group.revoke(src_group=other_group)
994-
995- self.expect_http()
996- self.mox.ReplayAll()
997-
998- self.ec2.delete_security_group(security_group_name)
999- self.ec2.delete_security_group(other_security_group_name)
1000
1001=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch'
1002=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch/plugins'
1003=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver'
1004=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi'
1005=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc'
1006=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d'
1007=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins'
1008=== removed file '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost'
1009--- .pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost 2012-08-16 14:04:11 +0000
1010+++ .pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost 1970-01-01 00:00:00 +0000
1011@@ -1,445 +0,0 @@
1012-#!/usr/bin/env python
1013-
1014-# Copyright 2011 OpenStack LLC.
1015-# Copyright 2011 United States Government as represented by the
1016-# Administrator of the National Aeronautics and Space Administration.
1017-# All Rights Reserved.
1018-#
1019-# Licensed under the Apache License, Version 2.0 (the "License"); you may
1020-# not use this file except in compliance with the License. You may obtain
1021-# a copy of the License at
1022-#
1023-# http://www.apache.org/licenses/LICENSE-2.0
1024-#
1025-# Unless required by applicable law or agreed to in writing, software
1026-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
1027-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
1028-# License for the specific language governing permissions and limitations
1029-# under the License.
1030-
1031-#
1032-# XenAPI plugin for host operations
1033-#
1034-
1035-try:
1036- import json
1037-except ImportError:
1038- import simplejson as json
1039-import logging
1040-import os
1041-import random
1042-import re
1043-import subprocess
1044-import tempfile
1045-import time
1046-
1047-import XenAPI
1048-import XenAPIPlugin
1049-import pluginlib_nova as pluginlib
1050-
1051-
1052-pluginlib.configure_logging("xenhost")
1053-
1054-host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)")
1055-config_file_path = "/usr/etc/xenhost.conf"
1056-DEFAULT_TRIES = 23
1057-DEFAULT_SLEEP = 10
1058-
1059-
1060-def jsonify(fnc):
1061- def wrapper(*args, **kwargs):
1062- return json.dumps(fnc(*args, **kwargs))
1063- return wrapper
1064-
1065-
1066-class TimeoutError(StandardError):
1067- pass
1068-
1069-
1070-def _run_command(cmd):
1071- """Abstracts out the basics of issuing system commands. If the command
1072- returns anything in stderr, a PluginError is raised with that information.
1073- Otherwise, the output from stdout is returned.
1074- """
1075- pipe = subprocess.PIPE
1076- proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe,
1077- stderr=pipe, close_fds=True)
1078- proc.wait()
1079- err = proc.stderr.read()
1080- if err:
1081- raise pluginlib.PluginError(err)
1082- return proc.stdout.read()
1083-
1084-
1085-# NOTE (salvatore-orlando):
1086-# Instead of updating run_command a new method has been implemented,
1087-# in order to avoid risking breaking existing functions calling _run_command
1088-def _run_command_with_input(cmd, process_input):
1089- """Abstracts out the basics of issuing system commands. If the command
1090- returns anything in stderr, a PluginError is raised with that information.
1091- Otherwise, the output from stdout is returned.
1092-
1093- process_input specificies a variable to use as the process' standard input.
1094- """
1095- pipe = subprocess.PIPE
1096- # cmd can be either a single string with command and arguments,
1097- # or a sequence of string
1098- if not hasattr(cmd, '__iter__'):
1099- cmd = [cmd] # make it iterable
1100-
1101- #Note(salvatore-orlando): the shell argument has been set to False
1102- proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe,
1103- stderr=pipe, close_fds=True)
1104- if process_input is not None:
1105- (output, err) = proc.communicate(process_input)
1106- else:
1107- (output, err) = proc.communicate()
1108- if err:
1109- raise pluginlib.PluginError(err)
1110- # This is tantamount to proc.stdout.read()
1111- return output
1112-
1113-
1114-def _resume_compute(session, compute_ref, compute_uuid):
1115- """Resume compute node on slave host after pool join. This has to
1116- happen regardless of the success or failure of the join operation."""
1117- try:
1118- # session is valid if the join operation has failed
1119- session.xenapi.VM.start(compute_ref, False, True)
1120- except XenAPI.Failure, e:
1121- # if session is invalid, e.g. xapi has restarted, then the pool
1122- # join has been successful, wait for xapi to become alive again
1123- for c in xrange(0, DEFAULT_TRIES):
1124- try:
1125- _run_command("xe vm-start uuid=%s" % compute_uuid)
1126- return
1127- except pluginlib.PluginError, e:
1128- logging.exception('Waited %d seconds for the slave to '
1129- 'become available.' % (c * DEFAULT_SLEEP))
1130- time.sleep(DEFAULT_SLEEP)
1131- raise pluginlib.PluginError('Unrecoverable error: the host has '
1132- 'not come back for more than %d seconds'
1133- % (DEFAULT_SLEEP * (DEFAULT_TRIES + 1)))
1134-
1135-
1136-@jsonify
1137-def set_host_enabled(self, arg_dict):
1138- """Sets this host's ability to accept new instances.
1139- It will otherwise continue to operate normally.
1140- """
1141- enabled = arg_dict.get("enabled")
1142- if enabled is None:
1143- raise pluginlib.PluginError(
1144- _("Missing 'enabled' argument to set_host_enabled"))
1145-
1146- host_uuid = arg_dict['host_uuid']
1147- if enabled == "true":
1148- result = _run_command("xe host-enable uuid=%s" % host_uuid)
1149- elif enabled == "false":
1150- result = _run_command("xe host-disable uuid=%s" % host_uuid)
1151- else:
1152- raise pluginlib.PluginError(_("Illegal enabled status: %s") % enabled)
1153- # Should be empty string
1154- if result:
1155- raise pluginlib.PluginError(result)
1156- # Return the current enabled status
1157- cmd = "xe host-param-list uuid=%s | grep enabled" % host_uuid
1158- resp = _run_command(cmd)
1159- # Response should be in the format: "enabled ( RO): true"
1160- host_enabled = resp.strip().split()[-1]
1161- if host_enabled == "true":
1162- status = "enabled"
1163- else:
1164- status = "disabled"
1165- return {"status": status}
1166-
1167-
1168-def _write_config_dict(dct):
1169- conf_file = file(config_file_path, "w")
1170- json.dump(dct, conf_file)
1171- conf_file.close()
1172-
1173-
1174-def _get_config_dict():
1175- """Returns a dict containing the key/values in the config file.
1176- If the file doesn't exist, it is created, and an empty dict
1177- is returned.
1178- """
1179- try:
1180- conf_file = file(config_file_path)
1181- config_dct = json.load(conf_file)
1182- conf_file.close()
1183- except IOError:
1184- # File doesn't exist
1185- config_dct = {}
1186- # Create the file
1187- _write_config_dict(config_dct)
1188- return config_dct
1189-
1190-
1191-@jsonify
1192-def get_config(self, arg_dict):
1193- """Return the value stored for the specified key, or None if no match."""
1194- conf = _get_config_dict()
1195- params = arg_dict["params"]
1196- try:
1197- dct = json.loads(params)
1198- except Exception, e:
1199- dct = params
1200- key = dct["key"]
1201- ret = conf.get(key)
1202- if ret is None:
1203- # Can't jsonify None
1204- return "None"
1205- return ret
1206-
1207-
1208-@jsonify
1209-def set_config(self, arg_dict):
1210- """Write the specified key/value pair, overwriting any existing value."""
1211- conf = _get_config_dict()
1212- params = arg_dict["params"]
1213- try:
1214- dct = json.loads(params)
1215- except Exception, e:
1216- dct = params
1217- key = dct["key"]
1218- val = dct["value"]
1219- if val is None:
1220- # Delete the key, if present
1221- conf.pop(key, None)
1222- else:
1223- conf.update({key: val})
1224- _write_config_dict(conf)
1225-
1226-
1227-def iptables_config(session, args):
1228- # command should be either save or restore
1229- logging.debug("iptables_config:enter")
1230- logging.debug("iptables_config: args=%s", args)
1231- cmd_args = pluginlib.exists(args, 'cmd_args')
1232- logging.debug("iptables_config: cmd_args=%s", cmd_args)
1233- process_input = pluginlib.optional(args, 'process_input')
1234- logging.debug("iptables_config: process_input=%s", process_input)
1235- cmd = json.loads(cmd_args)
1236- cmd = map(str, cmd)
1237-
1238- # either execute iptable-save or iptables-restore
1239- # command must be only one of these two
1240- # process_input must be used only with iptables-restore
1241- if len(cmd) > 0 and cmd[0] in ('iptables-save',
1242- 'iptables-restore',
1243- 'ip6tables-save',
1244- 'ip6tables-restore'):
1245- result = _run_command_with_input(cmd, process_input)
1246- ret_str = json.dumps(dict(out=result,
1247- err=''))
1248- logging.debug("iptables_config:exit")
1249- return ret_str
1250- else:
1251- # else don't do anything and return an error
1252- raise pluginlib.PluginError(_("Invalid iptables command"))
1253-
1254-
1255-def _power_action(action, arg_dict):
1256- # Host must be disabled first
1257- host_uuid = arg_dict['host_uuid']
1258- result = _run_command("xe host-disable uuid=%s" % host_uuid)
1259- if result:
1260- raise pluginlib.PluginError(result)
1261- # All running VMs must be shutdown
1262- result = _run_command("xe vm-shutdown --multiple "
1263- "resident-on=%s" % host_uuid)
1264- if result:
1265- raise pluginlib.PluginError(result)
1266- cmds = {"reboot": "xe host-reboot uuid=%s",
1267- "startup": "xe host-power-on uuid=%s",
1268- "shutdown": "xe host-shutdown uuid=%s"}
1269- result = _run_command(cmds[action] % host_uuid)
1270- # Should be empty string
1271- if result:
1272- raise pluginlib.PluginError(result)
1273- return {"power_action": action}
1274-
1275-
1276-@jsonify
1277-def host_reboot(self, arg_dict):
1278- """Reboots the host."""
1279- return _power_action("reboot", arg_dict)
1280-
1281-
1282-@jsonify
1283-def host_shutdown(self, arg_dict):
1284- """Reboots the host."""
1285- return _power_action("shutdown", arg_dict)
1286-
1287-
1288-@jsonify
1289-def host_start(self, arg_dict):
1290- """Starts the host. Currently not feasible, since the host
1291- runs on the same machine as Xen.
1292- """
1293- return _power_action("startup", arg_dict)
1294-
1295-
1296-@jsonify
1297-def host_join(self, arg_dict):
1298- """Join a remote host into a pool whose master is the host
1299- where the plugin is called from. The following constraints apply:
1300-
1301- - The host must have no VMs running, except nova-compute, which will be
1302- shut down (and restarted upon pool-join) automatically,
1303- - The host must have no shared storage currently set up,
1304- - The host must have the same license of the master,
1305- - The host must have the same supplemental packs as the master."""
1306- session = XenAPI.Session(arg_dict.get("url"))
1307- session.login_with_password(arg_dict.get("user"),
1308- arg_dict.get("password"))
1309- compute_ref = session.xenapi.VM.get_by_uuid(arg_dict.get('compute_uuid'))
1310- session.xenapi.VM.clean_shutdown(compute_ref)
1311- try:
1312- if arg_dict.get("force"):
1313- session.xenapi.pool.join(arg_dict.get("master_addr"),
1314- arg_dict.get("master_user"),
1315- arg_dict.get("master_pass"))
1316- else:
1317- session.xenapi.pool.join_force(arg_dict.get("master_addr"),
1318- arg_dict.get("master_user"),
1319- arg_dict.get("master_pass"))
1320- finally:
1321- _resume_compute(session, compute_ref, arg_dict.get("compute_uuid"))
1322-
1323-
1324-@jsonify
1325-def host_data(self, arg_dict):
1326- """Runs the commands on the xenstore host to return the current status
1327- information.
1328- """
1329- host_uuid = arg_dict['host_uuid']
1330- resp = _run_command("xe host-param-list uuid=%s" % host_uuid)
1331- parsed_data = parse_response(resp)
1332- # We have the raw dict of values. Extract those that we need,
1333- # and convert the data types as needed.
1334- ret_dict = cleanup(parsed_data)
1335- # Add any config settings
1336- config = _get_config_dict()
1337- ret_dict.update(config)
1338- return ret_dict
1339-
1340-
1341-def parse_response(resp):
1342- data = {}
1343- for ln in resp.splitlines():
1344- if not ln:
1345- continue
1346- mtch = host_data_pattern.match(ln.strip())
1347- try:
1348- k, v = mtch.groups()
1349- data[k] = v
1350- except AttributeError:
1351- # Not a valid line; skip it
1352- continue
1353- return data
1354-
1355-
1356-@jsonify
1357-def host_uptime(self, arg_dict):
1358- """Returns the result of the uptime command on the xenhost."""
1359- return {"uptime": _run_command('uptime')}
1360-
1361-
1362-def cleanup(dct):
1363- """Take the raw KV pairs returned and translate them into the
1364- appropriate types, discarding any we don't need.
1365- """
1366- def safe_int(val):
1367- """Integer values will either be string versions of numbers,
1368- or empty strings. Convert the latter to nulls.
1369- """
1370- try:
1371- return int(val)
1372- except ValueError:
1373- return None
1374-
1375- def strip_kv(ln):
1376- return [val.strip() for val in ln.split(":", 1)]
1377-
1378- out = {}
1379-
1380-# sbs = dct.get("supported-bootloaders", "")
1381-# out["host_supported-bootloaders"] = sbs.split("; ")
1382-# out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "")
1383-# out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "")
1384-# out["host_local-cache-sr"] = dct.get("local-cache-sr", "")
1385- out["enabled"] = dct.get("enabled", "true") == "true"
1386- out["host_memory"] = omm = {}
1387- omm["total"] = safe_int(dct.get("memory-total", ""))
1388- omm["overhead"] = safe_int(dct.get("memory-overhead", ""))
1389- omm["free"] = safe_int(dct.get("memory-free", ""))
1390- omm["free-computed"] = safe_int(
1391- dct.get("memory-free-computed", ""))
1392-
1393-# out["host_API-version"] = avv = {}
1394-# avv["vendor"] = dct.get("API-version-vendor", "")
1395-# avv["major"] = safe_int(dct.get("API-version-major", ""))
1396-# avv["minor"] = safe_int(dct.get("API-version-minor", ""))
1397-
1398- out["enabled"] = dct.get("enabled", True)
1399- out["host_uuid"] = dct.get("uuid", None)
1400- out["host_name-label"] = dct.get("name-label", "")
1401- out["host_name-description"] = dct.get("name-description", "")
1402-# out["host_host-metrics-live"] = dct.get(
1403-# "host-metrics-live", "false") == "true"
1404- out["host_hostname"] = dct.get("hostname", "")
1405- out["host_ip_address"] = dct.get("address", "")
1406- oc = dct.get("other-config", "")
1407- out["host_other-config"] = ocd = {}
1408- if oc:
1409- for oc_fld in oc.split("; "):
1410- ock, ocv = strip_kv(oc_fld)
1411- ocd[ock] = ocv
1412-# out["host_capabilities"] = dct.get("capabilities", "").split("; ")
1413-# out["host_allowed-operations"] = dct.get(
1414-# "allowed-operations", "").split("; ")
1415-# lsrv = dct.get("license-server", "")
1416-# out["host_license-server"] = ols = {}
1417-# if lsrv:
1418-# for lspart in lsrv.split("; "):
1419-# lsk, lsv = lspart.split(": ")
1420-# if lsk == "port":
1421-# ols[lsk] = safe_int(lsv)
1422-# else:
1423-# ols[lsk] = lsv
1424-# sv = dct.get("software-version", "")
1425-# out["host_software-version"] = osv = {}
1426-# if sv:
1427-# for svln in sv.split("; "):
1428-# svk, svv = strip_kv(svln)
1429-# osv[svk] = svv
1430- cpuinf = dct.get("cpu_info", "")
1431- out["host_cpu_info"] = ocp = {}
1432- if cpuinf:
1433- for cpln in cpuinf.split("; "):
1434- cpk, cpv = strip_kv(cpln)
1435- if cpk in ("cpu_count", "family", "model", "stepping"):
1436- ocp[cpk] = safe_int(cpv)
1437- else:
1438- ocp[cpk] = cpv
1439-# out["host_edition"] = dct.get("edition", "")
1440-# out["host_external-auth-service-name"] = dct.get(
1441-# "external-auth-service-name", "")
1442- return out
1443-
1444-
1445-if __name__ == "__main__":
1446- XenAPIPlugin.dispatch(
1447- {"host_data": host_data,
1448- "set_host_enabled": set_host_enabled,
1449- "host_shutdown": host_shutdown,
1450- "host_reboot": host_reboot,
1451- "host_start": host_start,
1452- "host_join": host_join,
1453- "get_config": get_config,
1454- "set_config": set_config,
1455- "iptables_config": iptables_config,
1456- "host_uptime": host_uptime})
1457
1458=== removed directory '.pc/rbd-security.patch'
1459=== removed directory '.pc/rbd-security.patch/nova'
1460=== removed directory '.pc/rbd-security.patch/nova/virt'
1461=== removed directory '.pc/rbd-security.patch/nova/virt/libvirt'
1462=== removed file '.pc/rbd-security.patch/nova/virt/libvirt/volume.py'
1463--- .pc/rbd-security.patch/nova/virt/libvirt/volume.py 2012-10-12 12:35:01 +0000
1464+++ .pc/rbd-security.patch/nova/virt/libvirt/volume.py 1970-01-01 00:00:00 +0000
1465@@ -1,206 +0,0 @@
1466-# vim: tabstop=4 shiftwidth=4 softtabstop=4
1467-
1468-# Copyright 2011 OpenStack LLC.
1469-# All Rights Reserved.
1470-#
1471-# Licensed under the Apache License, Version 2.0 (the "License"); you may
1472-# not use this file except in compliance with the License. You may obtain
1473-# a copy of the License at
1474-#
1475-# http://www.apache.org/licenses/LICENSE-2.0
1476-#
1477-# Unless required by applicable law or agreed to in writing, software
1478-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
1479-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
1480-# License for the specific language governing permissions and limitations
1481-# under the License.
1482-
1483-"""Volume drivers for libvirt."""
1484-
1485-import os
1486-import time
1487-
1488-from nova import exception
1489-from nova import flags
1490-from nova.openstack.common import log as logging
1491-from nova import utils
1492-from nova.virt.libvirt import config
1493-from nova.virt.libvirt import utils as virtutils
1494-
1495-LOG = logging.getLogger(__name__)
1496-FLAGS = flags.FLAGS
1497-flags.DECLARE('num_iscsi_scan_tries', 'nova.volume.driver')
1498-
1499-
1500-class LibvirtVolumeDriver(object):
1501- """Base class for volume drivers."""
1502- def __init__(self, connection):
1503- self.connection = connection
1504-
1505- def connect_volume(self, connection_info, mount_device):
1506- """Connect the volume. Returns xml for libvirt."""
1507- conf = config.LibvirtConfigGuestDisk()
1508- conf.source_type = "block"
1509- conf.driver_name = virtutils.pick_disk_driver_name(is_block_dev=True)
1510- conf.driver_format = "raw"
1511- conf.driver_cache = "none"
1512- conf.source_path = connection_info['data']['device_path']
1513- conf.target_dev = mount_device
1514- conf.target_bus = "virtio"
1515- conf.serial = connection_info.get('serial')
1516- return conf
1517-
1518- def disconnect_volume(self, connection_info, mount_device):
1519- """Disconnect the volume"""
1520- pass
1521-
1522-
1523-class LibvirtFakeVolumeDriver(LibvirtVolumeDriver):
1524- """Driver to attach Network volumes to libvirt."""
1525-
1526- def connect_volume(self, connection_info, mount_device):
1527- conf = config.LibvirtConfigGuestDisk()
1528- conf.source_type = "network"
1529- conf.driver_name = "qemu"
1530- conf.driver_format = "raw"
1531- conf.driver_cache = "none"
1532- conf.source_protocol = "fake"
1533- conf.source_host = "fake"
1534- conf.target_dev = mount_device
1535- conf.target_bus = "virtio"
1536- conf.serial = connection_info.get('serial')
1537- return conf
1538-
1539-
1540-class LibvirtNetVolumeDriver(LibvirtVolumeDriver):
1541- """Driver to attach Network volumes to libvirt."""
1542-
1543- def connect_volume(self, connection_info, mount_device):
1544- conf = config.LibvirtConfigGuestDisk()
1545- conf.source_type = "network"
1546- conf.driver_name = virtutils.pick_disk_driver_name(is_block_dev=False)
1547- conf.driver_format = "raw"
1548- conf.driver_cache = "none"
1549- conf.source_protocol = connection_info['driver_volume_type']
1550- conf.source_host = connection_info['data']['name']
1551- conf.target_dev = mount_device
1552- conf.target_bus = "virtio"
1553- conf.serial = connection_info.get('serial')
1554- netdisk_properties = connection_info['data']
1555- if netdisk_properties.get('auth_enabled'):
1556- conf.auth_username = netdisk_properties['auth_username']
1557- conf.auth_secret_type = netdisk_properties['secret_type']
1558- conf.auth_secret_uuid = netdisk_properties['secret_uuid']
1559- return conf
1560-
1561-
1562-class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
1563- """Driver to attach Network volumes to libvirt."""
1564-
1565- def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
1566- check_exit_code = kwargs.pop('check_exit_code', 0)
1567- (out, err) = utils.execute('iscsiadm', '-m', 'node', '-T',
1568- iscsi_properties['target_iqn'],
1569- '-p', iscsi_properties['target_portal'],
1570- *iscsi_command, run_as_root=True,
1571- check_exit_code=check_exit_code)
1572- LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
1573- (iscsi_command, out, err))
1574- return (out, err)
1575-
1576- def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
1577- **kwargs):
1578- iscsi_command = ('--op', 'update', '-n', property_key,
1579- '-v', property_value)
1580- return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
1581-
1582- @utils.synchronized('connect_volume')
1583- def connect_volume(self, connection_info, mount_device):
1584- """Attach the volume to instance_name"""
1585- iscsi_properties = connection_info['data']
1586- # NOTE(vish): If we are on the same host as nova volume, the
1587- # discovery makes the target so we don't need to
1588- # run --op new. Therefore, we check to see if the
1589- # target exists, and if we get 255 (Not Found), then
1590- # we run --op new. This will also happen if another
1591- # volume is using the same target.
1592- try:
1593- self._run_iscsiadm(iscsi_properties, ())
1594- except exception.ProcessExecutionError as exc:
1595- # iscsiadm returns 21 for "No records found" after version 2.0-871
1596- if exc.exit_code in [21, 255]:
1597- self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
1598- else:
1599- raise
1600-
1601- if iscsi_properties.get('auth_method'):
1602- self._iscsiadm_update(iscsi_properties,
1603- "node.session.auth.authmethod",
1604- iscsi_properties['auth_method'])
1605- self._iscsiadm_update(iscsi_properties,
1606- "node.session.auth.username",
1607- iscsi_properties['auth_username'])
1608- self._iscsiadm_update(iscsi_properties,
1609- "node.session.auth.password",
1610- iscsi_properties['auth_password'])
1611-
1612- # NOTE(vish): If we have another lun on the same target, we may
1613- # have a duplicate login
1614- self._run_iscsiadm(iscsi_properties, ("--login",),
1615- check_exit_code=[0, 255])
1616-
1617- self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
1618-
1619- host_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" %
1620- (iscsi_properties['target_portal'],
1621- iscsi_properties['target_iqn'],
1622- iscsi_properties.get('target_lun', 0)))
1623-
1624- # The /dev/disk/by-path/... node is not always present immediately
1625- # TODO(justinsb): This retry-with-delay is a pattern, move to utils?
1626- tries = 0
1627- while not os.path.exists(host_device):
1628- if tries >= FLAGS.num_iscsi_scan_tries:
1629- raise exception.NovaException(_("iSCSI device not found at %s")
1630- % (host_device))
1631-
1632- LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. "
1633- "Will rescan & retry. Try number: %(tries)s") %
1634- locals())
1635-
1636- # The rescan isn't documented as being necessary(?), but it helps
1637- self._run_iscsiadm(iscsi_properties, ("--rescan",))
1638-
1639- tries = tries + 1
1640- if not os.path.exists(host_device):
1641- time.sleep(tries ** 2)
1642-
1643- if tries != 0:
1644- LOG.debug(_("Found iSCSI node %(mount_device)s "
1645- "(after %(tries)s rescans)") %
1646- locals())
1647-
1648- connection_info['data']['device_path'] = host_device
1649- sup = super(LibvirtISCSIVolumeDriver, self)
1650- return sup.connect_volume(connection_info, mount_device)
1651-
1652- @utils.synchronized('connect_volume')
1653- def disconnect_volume(self, connection_info, mount_device):
1654- """Detach the volume from instance_name"""
1655- sup = super(LibvirtISCSIVolumeDriver, self)
1656- sup.disconnect_volume(connection_info, mount_device)
1657- iscsi_properties = connection_info['data']
1658- # NOTE(vish): Only disconnect from the target if no luns from the
1659- # target are in use.
1660- device_prefix = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-" %
1661- (iscsi_properties['target_portal'],
1662- iscsi_properties['target_iqn']))
1663- devices = self.connection.get_all_block_devices()
1664- devices = [dev for dev in devices if dev.startswith(device_prefix)]
1665- if not devices:
1666- self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
1667- check_exit_code=[0, 255])
1668- self._run_iscsiadm(iscsi_properties, ("--logout",),
1669- check_exit_code=[0, 255])
1670- self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
1671- check_exit_code=[0, 21, 255])
1672
1673=== removed directory '.pc/ubuntu'
1674=== removed directory '.pc/ubuntu/fix-ec2-volume-id-mappings.patch'
1675=== removed directory '.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova'
1676=== removed directory '.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db'
1677=== removed directory '.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy'
1678=== removed file '.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy/api.py'
1679--- .pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy/api.py 2012-10-12 12:35:01 +0000
1680+++ .pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy/api.py 1970-01-01 00:00:00 +0000
1681@@ -1,5256 +0,0 @@
1682-# vim: tabstop=4 shiftwidth=4 softtabstop=4
1683-
1684-# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
1685-# Copyright 2010 United States Government as represented by the
1686-# Administrator of the National Aeronautics and Space Administration.
1687-# All Rights Reserved.
1688-#
1689-# Licensed under the Apache License, Version 2.0 (the "License"); you may
1690-# not use this file except in compliance with the License. You may obtain
1691-# a copy of the License at
1692-#
1693-# http://www.apache.org/licenses/LICENSE-2.0
1694-#
1695-# Unless required by applicable law or agreed to in writing, software
1696-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
1697-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
1698-# License for the specific language governing permissions and limitations
1699-# under the License.
1700-
1701-"""Implementation of SQLAlchemy backend."""
1702-
1703-import collections
1704-import copy
1705-import datetime
1706-import functools
1707-import warnings
1708-
1709-from nova import block_device
1710-from nova.common.sqlalchemyutils import paginate_query
1711-from nova.compute import vm_states
1712-from nova import db
1713-from nova.db.sqlalchemy import models
1714-from nova.db.sqlalchemy.session import get_session
1715-from nova import exception
1716-from nova import flags
1717-from nova.openstack.common import log as logging
1718-from nova.openstack.common import timeutils
1719-from nova import utils
1720-from sqlalchemy import and_
1721-from sqlalchemy.exc import IntegrityError
1722-from sqlalchemy import or_
1723-from sqlalchemy.orm import joinedload
1724-from sqlalchemy.orm import joinedload_all
1725-from sqlalchemy.sql.expression import asc
1726-from sqlalchemy.sql.expression import desc
1727-from sqlalchemy.sql.expression import literal_column
1728-from sqlalchemy.sql import func
1729-
1730-FLAGS = flags.FLAGS
1731-
1732-LOG = logging.getLogger(__name__)
1733-
1734-
1735-def is_admin_context(context):
1736- """Indicates if the request context is an administrator."""
1737- if not context:
1738- warnings.warn(_('Use of empty request context is deprecated'),
1739- DeprecationWarning)
1740- raise Exception('die')
1741- return context.is_admin
1742-
1743-
1744-def is_user_context(context):
1745- """Indicates if the request context is a normal user."""
1746- if not context:
1747- return False
1748- if context.is_admin:
1749- return False
1750- if not context.user_id or not context.project_id:
1751- return False
1752- return True
1753-
1754-
1755-def authorize_project_context(context, project_id):
1756- """Ensures a request has permission to access the given project."""
1757- if is_user_context(context):
1758- if not context.project_id:
1759- raise exception.NotAuthorized()
1760- elif context.project_id != project_id:
1761- raise exception.NotAuthorized()
1762-
1763-
1764-def authorize_user_context(context, user_id):
1765- """Ensures a request has permission to access the given user."""
1766- if is_user_context(context):
1767- if not context.user_id:
1768- raise exception.NotAuthorized()
1769- elif context.user_id != user_id:
1770- raise exception.NotAuthorized()
1771-
1772-
1773-def authorize_quota_class_context(context, class_name):
1774- """Ensures a request has permission to access the given quota class."""
1775- if is_user_context(context):
1776- if not context.quota_class:
1777- raise exception.NotAuthorized()
1778- elif context.quota_class != class_name:
1779- raise exception.NotAuthorized()
1780-
1781-
1782-def require_admin_context(f):
1783- """Decorator to require admin request context.
1784-
1785- The first argument to the wrapped function must be the context.
1786-
1787- """
1788-
1789- def wrapper(*args, **kwargs):
1790- if not is_admin_context(args[0]):
1791- raise exception.AdminRequired()
1792- return f(*args, **kwargs)
1793- return wrapper
1794-
1795-
1796-def require_context(f):
1797- """Decorator to require *any* user or admin context.
1798-
1799- This does no authorization for user or project access matching, see
1800- :py:func:`authorize_project_context` and
1801- :py:func:`authorize_user_context`.
1802-
1803- The first argument to the wrapped function must be the context.
1804-
1805- """
1806-
1807- def wrapper(*args, **kwargs):
1808- if not is_admin_context(args[0]) and not is_user_context(args[0]):
1809- raise exception.NotAuthorized()
1810- return f(*args, **kwargs)
1811- return wrapper
1812-
1813-
1814-def require_instance_exists(f):
1815- """Decorator to require the specified instance to exist.
1816-
1817- Requires the wrapped function to use context and instance_id as
1818- their first two arguments.
1819- """
1820- @functools.wraps(f)
1821- def wrapper(context, instance_id, *args, **kwargs):
1822- db.instance_get(context, instance_id)
1823- return f(context, instance_id, *args, **kwargs)
1824-
1825- return wrapper
1826-
1827-
1828-def require_instance_exists_using_uuid(f):
1829- """Decorator to require the specified instance to exist.
1830-
1831- Requires the wrapped function to use context and instance_uuid as
1832- their first two arguments.
1833- """
1834- @functools.wraps(f)
1835- def wrapper(context, instance_uuid, *args, **kwargs):
1836- db.instance_get_by_uuid(context, instance_uuid)
1837- return f(context, instance_uuid, *args, **kwargs)
1838-
1839- return wrapper
1840-
1841-
1842-def require_volume_exists(f):
1843- """Decorator to require the specified volume to exist.
1844-
1845- Requires the wrapped function to use context and volume_id as
1846- their first two arguments.
1847- """
1848-
1849- def wrapper(context, volume_id, *args, **kwargs):
1850- db.volume_get(context, volume_id)
1851- return f(context, volume_id, *args, **kwargs)
1852- wrapper.__name__ = f.__name__
1853- return wrapper
1854-
1855-
1856-def require_aggregate_exists(f):
1857- """Decorator to require the specified aggregate to exist.
1858-
1859- Requires the wrapped function to use context and aggregate_id as
1860- their first two arguments.
1861- """
1862-
1863- @functools.wraps(f)
1864- def wrapper(context, aggregate_id, *args, **kwargs):
1865- db.aggregate_get(context, aggregate_id)
1866- return f(context, aggregate_id, *args, **kwargs)
1867- return wrapper
1868-
1869-
1870-def model_query(context, model, *args, **kwargs):
1871- """Query helper that accounts for context's `read_deleted` field.
1872-
1873- :param context: context to query under
1874- :param session: if present, the session to use
1875- :param read_deleted: if present, overrides context's read_deleted field.
1876- :param project_only: if present and context is user-type, then restrict
1877- query to match the context's project_id. If set to 'allow_none',
1878- restriction includes project_id = None.
1879- """
1880- session = kwargs.get('session') or get_session()
1881- read_deleted = kwargs.get('read_deleted') or context.read_deleted
1882- project_only = kwargs.get('project_only', False)
1883-
1884- query = session.query(model, *args)
1885-
1886- if read_deleted == 'no':
1887- query = query.filter_by(deleted=False)
1888- elif read_deleted == 'yes':
1889- pass # omit the filter to include deleted and active
1890- elif read_deleted == 'only':
1891- query = query.filter_by(deleted=True)
1892- else:
1893- raise Exception(
1894- _("Unrecognized read_deleted value '%s'") % read_deleted)
1895-
1896- if is_user_context(context) and project_only:
1897- if project_only == 'allow_none':
1898- query = query.filter(or_(model.project_id == context.project_id,
1899- model.project_id == None))
1900- else:
1901- query = query.filter_by(project_id=context.project_id)
1902-
1903- return query
1904-
1905-
1906-def exact_filter(query, model, filters, legal_keys):
1907- """Applies exact match filtering to a query.
1908-
1909- Returns the updated query. Modifies filters argument to remove
1910- filters consumed.
1911-
1912- :param query: query to apply filters to
1913- :param model: model object the query applies to, for IN-style
1914- filtering
1915- :param filters: dictionary of filters; values that are lists,
1916- tuples, sets, or frozensets cause an 'IN' test to
1917- be performed, while exact matching ('==' operator)
1918- is used for other values
1919- :param legal_keys: list of keys to apply exact filtering to
1920- """
1921-
1922- filter_dict = {}
1923-
1924- # Walk through all the keys
1925- for key in legal_keys:
1926- # Skip ones we're not filtering on
1927- if key not in filters:
1928- continue
1929-
1930- # OK, filtering on this key; what value do we search for?
1931- value = filters.pop(key)
1932-
1933- if key == 'metadata':
1934- column_attr = getattr(model, key)
1935- if isinstance(value, list):
1936- for item in value:
1937- for k, v in item.iteritems():
1938- query = query.filter(column_attr.any(key=k))
1939- query = query.filter(column_attr.any(value=v))
1940-
1941- else:
1942- for k, v in value.iteritems():
1943- query = query.filter(column_attr.any(key=k))
1944- query = query.filter(column_attr.any(value=v))
1945- elif isinstance(value, (list, tuple, set, frozenset)):
1946- # Looking for values in a list; apply to query directly
1947- column_attr = getattr(model, key)
1948- query = query.filter(column_attr.in_(value))
1949- else:
1950- # OK, simple exact match; save for later
1951- filter_dict[key] = value
1952-
1953- # Apply simple exact matches
1954- if filter_dict:
1955- query = query.filter_by(**filter_dict)
1956-
1957- return query
1958-
1959-
1960-###################
1961-
1962-
1963-def constraint(**conditions):
1964- return Constraint(conditions)
1965-
1966-
1967-def equal_any(*values):
1968- return EqualityCondition(values)
1969-
1970-
1971-def not_equal(*values):
1972- return InequalityCondition(values)
1973-
1974-
1975-class Constraint(object):
1976-
1977- def __init__(self, conditions):
1978- self.conditions = conditions
1979-
1980- def apply(self, model, query):
1981- for key, condition in self.conditions.iteritems():
1982- for clause in condition.clauses(getattr(model, key)):
1983- query = query.filter(clause)
1984- return query
1985-
1986-
1987-class EqualityCondition(object):
1988-
1989- def __init__(self, values):
1990- self.values = values
1991-
1992- def clauses(self, field):
1993- return or_([field == value for value in self.values])
1994-
1995-
1996-class InequalityCondition(object):
1997-
1998- def __init__(self, values):
1999- self.values = values
2000-
2001- def clauses(self, field):
2002- return [field != value for value in self.values]
2003-
2004-
2005-###################
2006-
2007-
2008-@require_admin_context
2009-def service_destroy(context, service_id):
2010- session = get_session()
2011- with session.begin():
2012- service_ref = service_get(context, service_id, session=session)
2013- service_ref.delete(session=session)
2014-
2015- if service_ref.topic == 'compute' and service_ref.compute_node:
2016- for c in service_ref.compute_node:
2017- c.delete(session=session)
2018-
2019-
2020-@require_admin_context
2021-def service_get(context, service_id, session=None):
2022- result = model_query(context, models.Service, session=session).\
2023- options(joinedload('compute_node')).\
2024- filter_by(id=service_id).\
2025- first()
2026- if not result:
2027- raise exception.ServiceNotFound(service_id=service_id)
2028-
2029- return result
2030-
2031-
2032-@require_admin_context
2033-def service_get_all(context, disabled=None):
2034- query = model_query(context, models.Service)
2035-
2036- if disabled is not None:
2037- query = query.filter_by(disabled=disabled)
2038-
2039- return query.all()
2040-
2041-
2042-@require_admin_context
2043-def service_get_all_by_topic(context, topic):
2044- return model_query(context, models.Service, read_deleted="no").\
2045- filter_by(disabled=False).\
2046- filter_by(topic=topic).\
2047- all()
2048-
2049-
2050-@require_admin_context
2051-def service_get_by_host_and_topic(context, host, topic):
2052- return model_query(context, models.Service, read_deleted="no").\
2053- filter_by(disabled=False).\
2054- filter_by(host=host).\
2055- filter_by(topic=topic).\
2056- first()
2057-
2058-
2059-@require_admin_context
2060-def service_get_all_by_host(context, host):
2061- return model_query(context, models.Service, read_deleted="no").\
2062- filter_by(host=host).\
2063- all()
2064-
2065-
2066-@require_admin_context
2067-def service_get_all_compute_by_host(context, host):
2068- result = model_query(context, models.Service, read_deleted="no").\
2069- options(joinedload('compute_node')).\
2070- filter_by(host=host).\
2071- filter_by(topic="compute").\
2072- all()
2073-
2074- if not result:
2075- raise exception.ComputeHostNotFound(host=host)
2076-
2077- return result
2078-
2079-
2080-@require_admin_context
2081-def _service_get_all_topic_subquery(context, session, topic, subq, label):
2082- sort_value = getattr(subq.c, label)
2083- return model_query(context, models.Service,
2084- func.coalesce(sort_value, 0),
2085- session=session, read_deleted="no").\
2086- filter_by(topic=topic).\
2087- filter_by(disabled=False).\
2088- outerjoin((subq, models.Service.host == subq.c.host)).\
2089- order_by(sort_value).\
2090- all()
2091-
2092-
2093-@require_admin_context
2094-def service_get_all_compute_sorted(context):
2095- session = get_session()
2096- with session.begin():
2097- # NOTE(vish): The intended query is below
2098- # SELECT services.*, COALESCE(inst_cores.instance_cores,
2099- # 0)
2100- # FROM services LEFT OUTER JOIN
2101- # (SELECT host, SUM(instances.vcpus) AS instance_cores
2102- # FROM instances GROUP BY host) AS inst_cores
2103- # ON services.host = inst_cores.host
2104- topic = 'compute'
2105- label = 'instance_cores'
2106- subq = model_query(context, models.Instance.host,
2107- func.sum(models.Instance.vcpus).label(label),
2108- session=session, read_deleted="no").\
2109- group_by(models.Instance.host).\
2110- subquery()
2111- return _service_get_all_topic_subquery(context,
2112- session,
2113- topic,
2114- subq,
2115- label)
2116-
2117-
2118-@require_admin_context
2119-def service_get_all_volume_sorted(context):
2120- session = get_session()
2121- with session.begin():
2122- topic = 'volume'
2123- label = 'volume_gigabytes'
2124- subq = model_query(context, models.Volume.host,
2125- func.sum(models.Volume.size).label(label),
2126- session=session, read_deleted="no").\
2127- group_by(models.Volume.host).\
2128- subquery()
2129- return _service_get_all_topic_subquery(context,
2130- session,
2131- topic,
2132- subq,
2133- label)
2134-
2135-
2136-@require_admin_context
2137-def service_get_by_args(context, host, binary):
2138- result = model_query(context, models.Service).\
2139- filter_by(host=host).\
2140- filter_by(binary=binary).\
2141- first()
2142-
2143- if not result:
2144- raise exception.HostBinaryNotFound(host=host, binary=binary)
2145-
2146- return result
2147-
2148-
2149-@require_admin_context
2150-def service_create(context, values):
2151- service_ref = models.Service()
2152- service_ref.update(values)
2153- if not FLAGS.enable_new_services:
2154- service_ref.disabled = True
2155- service_ref.save()
2156- return service_ref
2157-
2158-
2159-@require_admin_context
2160-def service_update(context, service_id, values):
2161- session = get_session()
2162- with session.begin():
2163- service_ref = service_get(context, service_id, session=session)
2164- service_ref.update(values)
2165- service_ref.save(session=session)
2166-
2167-
2168-###################
2169-
2170-def compute_node_get(context, compute_id, session=None):
2171- result = model_query(context, models.ComputeNode, session=session).\
2172- filter_by(id=compute_id).\
2173- options(joinedload('service')).\
2174- options(joinedload('stats')).\
2175- first()
2176-
2177- if not result:
2178- raise exception.ComputeHostNotFound(host=compute_id)
2179-
2180- return result
2181-
2182-
2183-@require_admin_context
2184-def compute_node_get_all(context, session=None):
2185- return model_query(context, models.ComputeNode, session=session).\
2186- options(joinedload('service')).\
2187- options(joinedload('stats')).\
2188- all()
2189-
2190-
2191-@require_admin_context
2192-def compute_node_search_by_hypervisor(context, hypervisor_match):
2193- field = models.ComputeNode.hypervisor_hostname
2194- return model_query(context, models.ComputeNode).\
2195- options(joinedload('service')).\
2196- filter(field.like('%%%s%%' % hypervisor_match)).\
2197- all()
2198-
2199-
2200-def _prep_stats_dict(values):
2201- """Make list of ComputeNodeStats"""
2202- stats = []
2203- d = values.get('stats', {})
2204- for k, v in d.iteritems():
2205- stat = models.ComputeNodeStat()
2206- stat['key'] = k
2207- stat['value'] = v
2208- stats.append(stat)
2209- values['stats'] = stats
2210-
2211-
2212-@require_admin_context
2213-def compute_node_create(context, values, session=None):
2214- """Creates a new ComputeNode and populates the capacity fields
2215- with the most recent data."""
2216- _prep_stats_dict(values)
2217-
2218- if not session:
2219- session = get_session()
2220-
2221- with session.begin(subtransactions=True):
2222- compute_node_ref = models.ComputeNode()
2223- session.add(compute_node_ref)
2224- compute_node_ref.update(values)
2225- return compute_node_ref
2226-
2227-
2228-def _update_stats(context, new_stats, compute_id, session, prune_stats=False):
2229-
2230- existing = model_query(context, models.ComputeNodeStat, session=session,
2231- read_deleted="no").filter_by(compute_node_id=compute_id).all()
2232- statmap = {}
2233- for stat in existing:
2234- key = stat['key']
2235- statmap[key] = stat
2236-
2237- stats = []
2238- for k, v in new_stats.iteritems():
2239- old_stat = statmap.pop(k, None)
2240- if old_stat:
2241- # update existing value:
2242- old_stat.update({'value': v})
2243- stats.append(old_stat)
2244- else:
2245- # add new stat:
2246- stat = models.ComputeNodeStat()
2247- stat['compute_node_id'] = compute_id
2248- stat['key'] = k
2249- stat['value'] = v
2250- stats.append(stat)
2251-
2252- if prune_stats:
2253- # prune un-touched old stats:
2254- for stat in statmap.values():
2255- session.add(stat)
2256- stat.update({'deleted': True})
2257-
2258- # add new and updated stats
2259- for stat in stats:
2260- session.add(stat)
2261-
2262-
2263-@require_admin_context
2264-def compute_node_update(context, compute_id, values, prune_stats=False):
2265- """Updates the ComputeNode record with the most recent data"""
2266- stats = values.pop('stats', {})
2267-
2268- session = get_session()
2269- with session.begin(subtransactions=True):
2270- _update_stats(context, stats, compute_id, session, prune_stats)
2271- compute_ref = compute_node_get(context, compute_id, session=session)
2272- compute_ref.update(values)
2273- return compute_ref
2274-
2275-
2276-def compute_node_get_by_host(context, host):
2277- """Get all capacity entries for the given host."""
2278- session = get_session()
2279- with session.begin():
2280- node = session.query(models.ComputeNode).\
2281- join('service').\
2282- filter(models.Service.host == host).\
2283- filter_by(deleted=False)
2284- return node.first()
2285-
2286-
2287-def compute_node_statistics(context):
2288- """Compute statistics over all compute nodes."""
2289- result = model_query(context,
2290- func.count(models.ComputeNode.id),
2291- func.sum(models.ComputeNode.vcpus),
2292- func.sum(models.ComputeNode.memory_mb),
2293- func.sum(models.ComputeNode.local_gb),
2294- func.sum(models.ComputeNode.vcpus_used),
2295- func.sum(models.ComputeNode.memory_mb_used),
2296- func.sum(models.ComputeNode.local_gb_used),
2297- func.sum(models.ComputeNode.free_ram_mb),
2298- func.sum(models.ComputeNode.free_disk_gb),
2299- func.sum(models.ComputeNode.current_workload),
2300- func.sum(models.ComputeNode.running_vms),
2301- func.sum(models.ComputeNode.disk_available_least),
2302- read_deleted="no").first()
2303-
2304- # Build a dict of the info--making no assumptions about result
2305- fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
2306- 'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
2307- 'current_workload', 'running_vms', 'disk_available_least')
2308- return dict((field, int(result[idx] or 0))
2309- for idx, field in enumerate(fields))
2310-
2311-
2312-###################
2313-
2314-
2315-@require_admin_context
2316-def certificate_get(context, certificate_id, session=None):
2317- result = model_query(context, models.Certificate, session=session).\
2318- filter_by(id=certificate_id).\
2319- first()
2320-
2321- if not result:
2322- raise exception.CertificateNotFound(certificate_id=certificate_id)
2323-
2324- return result
2325-
2326-
2327-@require_admin_context
2328-def certificate_create(context, values):
2329- certificate_ref = models.Certificate()
2330- for (key, value) in values.iteritems():
2331- certificate_ref[key] = value
2332- certificate_ref.save()
2333- return certificate_ref
2334-
2335-
2336-@require_admin_context
2337-def certificate_get_all_by_project(context, project_id):
2338- return model_query(context, models.Certificate, read_deleted="no").\
2339- filter_by(project_id=project_id).\
2340- all()
2341-
2342-
2343-@require_admin_context
2344-def certificate_get_all_by_user(context, user_id):
2345- return model_query(context, models.Certificate, read_deleted="no").\
2346- filter_by(user_id=user_id).\
2347- all()
2348-
2349-
2350-@require_admin_context
2351-def certificate_get_all_by_user_and_project(context, user_id, project_id):
2352- return model_query(context, models.Certificate, read_deleted="no").\
2353- filter_by(user_id=user_id).\
2354- filter_by(project_id=project_id).\
2355- all()
2356-
2357-
2358-###################
2359-
2360-
2361-@require_context
2362-def floating_ip_get(context, id):
2363- result = model_query(context, models.FloatingIp, project_only=True).\
2364- filter_by(id=id).\
2365- first()
2366-
2367- if not result:
2368- raise exception.FloatingIpNotFound(id=id)
2369-
2370- return result
2371-
2372-
2373-@require_context
2374-def floating_ip_get_pools(context):
2375- pools = []
2376- for result in model_query(context, models.FloatingIp.pool).distinct():
2377- pools.append({'name': result[0]})
2378- return pools
2379-
2380-
2381-@require_context
2382-def floating_ip_allocate_address(context, project_id, pool):
2383- authorize_project_context(context, project_id)
2384- session = get_session()
2385- with session.begin():
2386- floating_ip_ref = model_query(context, models.FloatingIp,
2387- session=session, read_deleted="no").\
2388- filter_by(fixed_ip_id=None).\
2389- filter_by(project_id=None).\
2390- filter_by(pool=pool).\
2391- with_lockmode('update').\
2392- first()
2393- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
2394- # then this has concurrency issues
2395- if not floating_ip_ref:
2396- raise exception.NoMoreFloatingIps()
2397- floating_ip_ref['project_id'] = project_id
2398- session.add(floating_ip_ref)
2399- return floating_ip_ref['address']
2400-
2401-
2402-@require_context
2403-def floating_ip_bulk_create(context, ips):
2404- existing_ips = {}
2405- for floating in _floating_ip_get_all(context).all():
2406- existing_ips[floating['address']] = floating
2407-
2408- session = get_session()
2409- with session.begin():
2410- for ip in ips:
2411- addr = ip['address']
2412- if (addr in existing_ips and
2413- ip.get('id') != existing_ips[addr]['id']):
2414- raise exception.FloatingIpExists(**dict(existing_ips[addr]))
2415-
2416- model = models.FloatingIp()
2417- model.update(ip)
2418- session.add(model)
2419-
2420-
2421-def _ip_range_splitter(ips, block_size=256):
2422- """Yields blocks of IPs no more than block_size elements long."""
2423- out = []
2424- count = 0
2425- for ip in ips:
2426- out.append(ip['address'])
2427- count += 1
2428-
2429- if count > block_size - 1:
2430- yield out
2431- out = []
2432- count = 0
2433-
2434- if out:
2435- yield out
2436-
2437-
2438-@require_context
2439-def floating_ip_bulk_destroy(context, ips):
2440- session = get_session()
2441- with session.begin():
2442- for ip_block in _ip_range_splitter(ips):
2443- model_query(context, models.FloatingIp).\
2444- filter(models.FloatingIp.address.in_(ip_block)).\
2445- update({'deleted': True,
2446- 'deleted_at': timeutils.utcnow()},
2447- synchronize_session='fetch')
2448-
2449-
2450-@require_context
2451-def floating_ip_create(context, values, session=None):
2452- if not session:
2453- session = get_session()
2454-
2455- floating_ip_ref = models.FloatingIp()
2456- floating_ip_ref.update(values)
2457-
2458- # check uniqueness for not deleted addresses
2459- if not floating_ip_ref.deleted:
2460- try:
2461- floating_ip = floating_ip_get_by_address(context,
2462- floating_ip_ref.address,
2463- session)
2464- except exception.FloatingIpNotFoundForAddress:
2465- pass
2466- else:
2467- if floating_ip.id != floating_ip_ref.id:
2468- raise exception.FloatingIpExists(**dict(floating_ip_ref))
2469-
2470- floating_ip_ref.save(session=session)
2471- return floating_ip_ref['address']
2472-
2473-
2474-@require_context
2475-def floating_ip_count_by_project(context, project_id, session=None):
2476- authorize_project_context(context, project_id)
2477- # TODO(tr3buchet): why leave auto_assigned floating IPs out?
2478- return model_query(context, models.FloatingIp, read_deleted="no",
2479- session=session).\
2480- filter_by(project_id=project_id).\
2481- filter_by(auto_assigned=False).\
2482- count()
2483-
2484-
2485-@require_context
2486-def floating_ip_fixed_ip_associate(context, floating_address,
2487- fixed_address, host):
2488- session = get_session()
2489- with session.begin():
2490- floating_ip_ref = floating_ip_get_by_address(context,
2491- floating_address,
2492- session=session)
2493- fixed_ip_ref = fixed_ip_get_by_address(context,
2494- fixed_address,
2495- session=session)
2496- floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"]
2497- floating_ip_ref.host = host
2498- floating_ip_ref.save(session=session)
2499-
2500-
2501-@require_context
2502-def floating_ip_deallocate(context, address):
2503- session = get_session()
2504- with session.begin():
2505- floating_ip_ref = floating_ip_get_by_address(context,
2506- address,
2507- session=session)
2508- floating_ip_ref['project_id'] = None
2509- floating_ip_ref['host'] = None
2510- floating_ip_ref['auto_assigned'] = False
2511- floating_ip_ref.save(session=session)
2512-
2513-
2514-@require_context
2515-def floating_ip_destroy(context, address):
2516- session = get_session()
2517- with session.begin():
2518- floating_ip_ref = floating_ip_get_by_address(context,
2519- address,
2520- session=session)
2521- floating_ip_ref.delete(session=session)
2522-
2523-
2524-@require_context
2525-def floating_ip_disassociate(context, address):
2526- session = get_session()
2527- with session.begin():
2528- floating_ip_ref = floating_ip_get_by_address(context,
2529- address,
2530- session=session)
2531- fixed_ip_ref = fixed_ip_get(context,
2532- floating_ip_ref['fixed_ip_id'])
2533- if fixed_ip_ref:
2534- fixed_ip_address = fixed_ip_ref['address']
2535- else:
2536- fixed_ip_address = None
2537- floating_ip_ref.fixed_ip_id = None
2538- floating_ip_ref.host = None
2539- floating_ip_ref.save(session=session)
2540- return fixed_ip_address
2541-
2542-
2543-@require_context
2544-def floating_ip_set_auto_assigned(context, address):
2545- session = get_session()
2546- with session.begin():
2547- floating_ip_ref = floating_ip_get_by_address(context,
2548- address,
2549- session=session)
2550- floating_ip_ref.auto_assigned = True
2551- floating_ip_ref.save(session=session)
2552-
2553-
2554-def _floating_ip_get_all(context, session=None):
2555- return model_query(context, models.FloatingIp, read_deleted="no",
2556- session=session)
2557-
2558-
2559-@require_admin_context
2560-def floating_ip_get_all(context):
2561- floating_ip_refs = _floating_ip_get_all(context).all()
2562- if not floating_ip_refs:
2563- raise exception.NoFloatingIpsDefined()
2564- return floating_ip_refs
2565-
2566-
2567-@require_admin_context
2568-def floating_ip_get_all_by_host(context, host):
2569- floating_ip_refs = _floating_ip_get_all(context).\
2570- filter_by(host=host).\
2571- all()
2572- if not floating_ip_refs:
2573- raise exception.FloatingIpNotFoundForHost(host=host)
2574- return floating_ip_refs
2575-
2576-
2577-@require_context
2578-def floating_ip_get_all_by_project(context, project_id):
2579- authorize_project_context(context, project_id)
2580- # TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
2581- return _floating_ip_get_all(context).\
2582- filter_by(project_id=project_id).\
2583- filter_by(auto_assigned=False).\
2584- all()
2585-
2586-
2587-@require_context
2588-def floating_ip_get_by_address(context, address, session=None):
2589- result = model_query(context, models.FloatingIp, session=session).\
2590- filter_by(address=address).\
2591- first()
2592-
2593- if not result:
2594- raise exception.FloatingIpNotFoundForAddress(address=address)
2595-
2596- # If the floating IP has a project ID set, check to make sure
2597- # the non-admin user has access.
2598- if result.project_id and is_user_context(context):
2599- authorize_project_context(context, result.project_id)
2600-
2601- return result
2602-
2603-
2604-@require_context
2605-def floating_ip_get_by_fixed_address(context, fixed_address, session=None):
2606- if not session:
2607- session = get_session()
2608-
2609- fixed_ip = fixed_ip_get_by_address(context, fixed_address, session)
2610- fixed_ip_id = fixed_ip['id']
2611-
2612- return model_query(context, models.FloatingIp, session=session).\
2613- filter_by(fixed_ip_id=fixed_ip_id).\
2614- all()
2615-
2616- # NOTE(tr3buchet) please don't invent an exception here, empty list is fine
2617-
2618-
2619-@require_context
2620-def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None):
2621- if not session:
2622- session = get_session()
2623-
2624- return model_query(context, models.FloatingIp, session=session).\
2625- filter_by(fixed_ip_id=fixed_ip_id).\
2626- all()
2627-
2628-
2629-@require_context
2630-def floating_ip_update(context, address, values):
2631- session = get_session()
2632- with session.begin():
2633- floating_ip_ref = floating_ip_get_by_address(context, address, session)
2634- for (key, value) in values.iteritems():
2635- floating_ip_ref[key] = value
2636- floating_ip_ref.save(session=session)
2637-
2638-
2639-@require_context
2640-def _dnsdomain_get(context, session, fqdomain):
2641- return model_query(context, models.DNSDomain,
2642- session=session, read_deleted="no").\
2643- filter_by(domain=fqdomain).\
2644- with_lockmode('update').\
2645- first()
2646-
2647-
2648-@require_context
2649-def dnsdomain_get(context, fqdomain):
2650- session = get_session()
2651- with session.begin():
2652- return _dnsdomain_get(context, session, fqdomain)
2653-
2654-
2655-@require_admin_context
2656-def _dnsdomain_get_or_create(context, session, fqdomain):
2657- domain_ref = _dnsdomain_get(context, session, fqdomain)
2658- if not domain_ref:
2659- dns_ref = models.DNSDomain()
2660- dns_ref.update({'domain': fqdomain,
2661- 'availability_zone': None,
2662- 'project_id': None})
2663- return dns_ref
2664-
2665- return domain_ref
2666-
2667-
2668-@require_admin_context
2669-def dnsdomain_register_for_zone(context, fqdomain, zone):
2670- session = get_session()
2671- with session.begin():
2672- domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
2673- domain_ref.scope = 'private'
2674- domain_ref.availability_zone = zone
2675- domain_ref.save(session=session)
2676-
2677-
2678-@require_admin_context
2679-def dnsdomain_register_for_project(context, fqdomain, project):
2680- session = get_session()
2681- with session.begin():
2682- domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
2683- domain_ref.scope = 'public'
2684- domain_ref.project_id = project
2685- domain_ref.save(session=session)
2686-
2687-
2688-@require_admin_context
2689-def dnsdomain_unregister(context, fqdomain):
2690- session = get_session()
2691- with session.begin():
2692- session.query(models.DNSDomain).\
2693- filter_by(domain=fqdomain).\
2694- delete()
2695-
2696-
2697-@require_context
2698-def dnsdomain_list(context):
2699- session = get_session()
2700- records = model_query(context, models.DNSDomain,
2701- session=session, read_deleted="no").\
2702- all()
2703- domains = []
2704- for record in records:
2705- domains.append(record.domain)
2706-
2707- return domains
2708-
2709-
2710-###################
2711-
2712-
2713-@require_admin_context
2714-def fixed_ip_associate(context, address, instance_uuid, network_id=None,
2715- reserved=False):
2716- """Keyword arguments:
2717- reserved -- should be a boolean value(True or False), exact value will be
2718- used to filter on the fixed ip address
2719- """
2720- if not utils.is_uuid_like(instance_uuid):
2721- raise exception.InvalidUUID(uuid=instance_uuid)
2722-
2723- session = get_session()
2724- with session.begin():
2725- network_or_none = or_(models.FixedIp.network_id == network_id,
2726- models.FixedIp.network_id == None)
2727- fixed_ip_ref = model_query(context, models.FixedIp, session=session,
2728- read_deleted="no").\
2729- filter(network_or_none).\
2730- filter_by(reserved=reserved).\
2731- filter_by(address=address).\
2732- with_lockmode('update').\
2733- first()
2734- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
2735- # then this has concurrency issues
2736- if fixed_ip_ref is None:
2737- raise exception.FixedIpNotFoundForNetwork(address=address,
2738- network_id=network_id)
2739- if fixed_ip_ref.instance_uuid:
2740- raise exception.FixedIpAlreadyInUse(address=address)
2741-
2742- if not fixed_ip_ref.network_id:
2743- fixed_ip_ref.network_id = network_id
2744- fixed_ip_ref.instance_uuid = instance_uuid
2745- session.add(fixed_ip_ref)
2746- return fixed_ip_ref['address']
2747-
2748-
2749-@require_admin_context
2750-def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
2751- host=None):
2752- if instance_uuid and not utils.is_uuid_like(instance_uuid):
2753- raise exception.InvalidUUID(uuid=instance_uuid)
2754-
2755- session = get_session()
2756- with session.begin():
2757- network_or_none = or_(models.FixedIp.network_id == network_id,
2758- models.FixedIp.network_id == None)
2759- fixed_ip_ref = model_query(context, models.FixedIp, session=session,
2760- read_deleted="no").\
2761- filter(network_or_none).\
2762- filter_by(reserved=False).\
2763- filter_by(instance_uuid=None).\
2764- filter_by(host=None).\
2765- with_lockmode('update').\
2766- first()
2767- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
2768- # then this has concurrency issues
2769- if not fixed_ip_ref:
2770- raise exception.NoMoreFixedIps()
2771-
2772- if fixed_ip_ref['network_id'] is None:
2773- fixed_ip_ref['network'] = network_id
2774-
2775- if instance_uuid:
2776- fixed_ip_ref['instance_uuid'] = instance_uuid
2777-
2778- if host:
2779- fixed_ip_ref['host'] = host
2780- session.add(fixed_ip_ref)
2781- return fixed_ip_ref['address']
2782-
2783-
2784-@require_context
2785-def fixed_ip_create(context, values):
2786- fixed_ip_ref = models.FixedIp()
2787- fixed_ip_ref.update(values)
2788- fixed_ip_ref.save()
2789- return fixed_ip_ref['address']
2790-
2791-
2792-@require_context
2793-def fixed_ip_bulk_create(context, ips):
2794- session = get_session()
2795- with session.begin():
2796- for ip in ips:
2797- model = models.FixedIp()
2798- model.update(ip)
2799- session.add(model)
2800-
2801-
2802-@require_context
2803-def fixed_ip_disassociate(context, address):
2804- session = get_session()
2805- with session.begin():
2806- fixed_ip_ref = fixed_ip_get_by_address(context,
2807- address,
2808- session=session)
2809- fixed_ip_ref['instance_uuid'] = None
2810- fixed_ip_ref.save(session=session)
2811-
2812-
2813-@require_admin_context
2814-def fixed_ip_disassociate_all_by_timeout(context, host, time):
2815- session = get_session()
2816- # NOTE(vish): only update fixed ips that "belong" to this
2817- # host; i.e. the network host or the instance
2818- # host matches. Two queries necessary because
2819- # join with update doesn't work.
2820- host_filter = or_(and_(models.Instance.host == host,
2821- models.Network.multi_host == True),
2822- models.Network.host == host)
2823- result = session.query(models.FixedIp.id).\
2824- filter(models.FixedIp.deleted == False).\
2825- filter(models.FixedIp.allocated == False).\
2826- filter(models.FixedIp.updated_at < time).\
2827- join((models.Network,
2828- models.Network.id == models.FixedIp.network_id)).\
2829- join((models.Instance,
2830- models.Instance.uuid == \
2831- models.FixedIp.instance_uuid)).\
2832- filter(host_filter).\
2833- all()
2834- fixed_ip_ids = [fip[0] for fip in result]
2835- if not fixed_ip_ids:
2836- return 0
2837- result = model_query(context, models.FixedIp, session=session).\
2838- filter(models.FixedIp.id.in_(fixed_ip_ids)).\
2839- update({'instance_uuid': None,
2840- 'leased': False,
2841- 'updated_at': timeutils.utcnow()},
2842- synchronize_session='fetch')
2843- return result
2844-
2845-
2846-@require_context
2847-def fixed_ip_get(context, id, session=None):
2848- result = model_query(context, models.FixedIp, session=session).\
2849- filter_by(id=id).\
2850- first()
2851- if not result:
2852- raise exception.FixedIpNotFound(id=id)
2853-
2854- # FIXME(sirp): shouldn't we just use project_only here to restrict the
2855- # results?
2856- if is_user_context(context) and result['instance_uuid'] is not None:
2857- instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
2858- result['instance_uuid'],
2859- session)
2860- authorize_project_context(context, instance.project_id)
2861-
2862- return result
2863-
2864-
2865-@require_admin_context
2866-def fixed_ip_get_all(context, session=None):
2867- result = model_query(context, models.FixedIp, session=session,
2868- read_deleted="yes").\
2869- all()
2870- if not result:
2871- raise exception.NoFixedIpsDefined()
2872-
2873- return result
2874-
2875-
2876-@require_context
2877-def fixed_ip_get_by_address(context, address, session=None):
2878- result = model_query(context, models.FixedIp, session=session).\
2879- filter_by(address=address).\
2880- first()
2881- if not result:
2882- raise exception.FixedIpNotFoundForAddress(address=address)
2883-
2884- # NOTE(sirp): shouldn't we just use project_only here to restrict the
2885- # results?
2886- if is_user_context(context) and result['instance_uuid'] is not None:
2887- instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
2888- result['instance_uuid'],
2889- session)
2890- authorize_project_context(context, instance.project_id)
2891-
2892- return result
2893-
2894-
2895-@require_context
2896-def fixed_ip_get_by_instance(context, instance_uuid):
2897- if not utils.is_uuid_like(instance_uuid):
2898- raise exception.InvalidUUID(uuid=instance_uuid)
2899-
2900- result = model_query(context, models.FixedIp, read_deleted="no").\
2901- filter_by(instance_uuid=instance_uuid).\
2902- all()
2903-
2904- if not result:
2905- raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
2906-
2907- return result
2908-
2909-
2910-@require_context
2911-def fixed_ip_get_by_network_host(context, network_id, host):
2912- result = model_query(context, models.FixedIp, read_deleted="no").\
2913- filter_by(network_id=network_id).\
2914- filter_by(host=host).\
2915- first()
2916-
2917- if not result:
2918- raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
2919- host=host)
2920- return result
2921-
2922-
2923-@require_context
2924-def fixed_ips_by_virtual_interface(context, vif_id):
2925- result = model_query(context, models.FixedIp, read_deleted="no").\
2926- filter_by(virtual_interface_id=vif_id).\
2927- all()
2928-
2929- return result
2930-
2931-
2932-@require_admin_context
2933-def fixed_ip_get_network(context, address):
2934- fixed_ip_ref = fixed_ip_get_by_address(context, address)
2935- return fixed_ip_ref.network
2936-
2937-
2938-@require_context
2939-def fixed_ip_update(context, address, values):
2940- session = get_session()
2941- with session.begin():
2942- fixed_ip_ref = fixed_ip_get_by_address(context,
2943- address,
2944- session=session)
2945- fixed_ip_ref.update(values)
2946- fixed_ip_ref.save(session=session)
2947-
2948-
2949-###################
2950-
2951-
2952-@require_context
2953-def virtual_interface_create(context, values):
2954- """Create a new virtual interface record in the database.
2955-
2956- :param values: = dict containing column values
2957- """
2958- try:
2959- vif_ref = models.VirtualInterface()
2960- vif_ref.update(values)
2961- vif_ref.save()
2962- except IntegrityError:
2963- raise exception.VirtualInterfaceCreateException()
2964-
2965- return vif_ref
2966-
2967-
2968-@require_context
2969-def _virtual_interface_query(context, session=None):
2970- return model_query(context, models.VirtualInterface, session=session,
2971- read_deleted="yes")
2972-
2973-
2974-@require_context
2975-def virtual_interface_get(context, vif_id, session=None):
2976- """Gets a virtual interface from the table.
2977-
2978- :param vif_id: = id of the virtual interface
2979- """
2980- vif_ref = _virtual_interface_query(context, session=session).\
2981- filter_by(id=vif_id).\
2982- first()
2983- return vif_ref
2984-
2985-
2986-@require_context
2987-def virtual_interface_get_by_address(context, address):
2988- """Gets a virtual interface from the table.
2989-
2990- :param address: = the address of the interface you're looking to get
2991- """
2992- vif_ref = _virtual_interface_query(context).\
2993- filter_by(address=address).\
2994- first()
2995- return vif_ref
2996-
2997-
2998-@require_context
2999-def virtual_interface_get_by_uuid(context, vif_uuid):
3000- """Gets a virtual interface from the table.
3001-
3002- :param vif_uuid: the uuid of the interface you're looking to get
3003- """
3004- vif_ref = _virtual_interface_query(context).\
3005- filter_by(uuid=vif_uuid).\
3006- first()
3007- return vif_ref
3008-
3009-
3010-@require_context
3011-@require_instance_exists_using_uuid
3012-def virtual_interface_get_by_instance(context, instance_uuid):
3013- """Gets all virtual interfaces for instance.
3014-
3015- :param instance_uuid: = uuid of the instance to retrieve vifs for
3016- """
3017- vif_refs = _virtual_interface_query(context).\
3018- filter_by(instance_uuid=instance_uuid).\
3019- all()
3020- return vif_refs
3021-
3022-
3023-@require_context
3024-def virtual_interface_get_by_instance_and_network(context, instance_uuid,
3025- network_id):
3026- """Gets virtual interface for instance that's associated with network."""
3027- vif_ref = _virtual_interface_query(context).\
3028- filter_by(instance_uuid=instance_uuid).\
3029- filter_by(network_id=network_id).\
3030- first()
3031- return vif_ref
3032-
3033-
3034-@require_context
3035-def virtual_interface_delete(context, vif_id):
3036- """Delete virtual interface record from the database.
3037-
3038- :param vif_id: = id of vif to delete
3039- """
3040- session = get_session()
3041- vif_ref = virtual_interface_get(context, vif_id, session)
3042- with session.begin():
3043- session.delete(vif_ref)
3044-
3045-
3046-@require_context
3047-def virtual_interface_delete_by_instance(context, instance_uuid):
3048- """Delete virtual interface records that are associated
3049- with the instance given by instance_id.
3050-
3051- :param instance_uuid: = uuid of instance
3052- """
3053- vif_refs = virtual_interface_get_by_instance(context, instance_uuid)
3054- for vif_ref in vif_refs:
3055- virtual_interface_delete(context, vif_ref['id'])
3056-
3057-
3058-@require_context
3059-def virtual_interface_get_all(context):
3060- """Get all vifs"""
3061- vif_refs = _virtual_interface_query(context).all()
3062- return vif_refs
3063-
3064-
3065-###################
3066-
3067-
3068-def _metadata_refs(metadata_dict, meta_class):
3069- metadata_refs = []
3070- if metadata_dict:
3071- for k, v in metadata_dict.iteritems():
3072- metadata_ref = meta_class()
3073- metadata_ref['key'] = k
3074- metadata_ref['value'] = v
3075- metadata_refs.append(metadata_ref)
3076- return metadata_refs
3077-
3078-
3079-@require_context
3080-def instance_create(context, values):
3081- """Create a new Instance record in the database.
3082-
3083- context - request context object
3084- values - dict containing column values.
3085- """
3086- values = values.copy()
3087- values['metadata'] = _metadata_refs(
3088- values.get('metadata'), models.InstanceMetadata)
3089-
3090- values['system_metadata'] = _metadata_refs(
3091- values.get('system_metadata'), models.InstanceSystemMetadata)
3092-
3093- instance_ref = models.Instance()
3094- if not values.get('uuid'):
3095- values['uuid'] = str(utils.gen_uuid())
3096- instance_ref['info_cache'] = models.InstanceInfoCache()
3097- info_cache = values.pop('info_cache', None)
3098- if info_cache is not None:
3099- instance_ref['info_cache'].update(info_cache)
3100- security_groups = values.pop('security_groups', [])
3101- instance_ref.update(values)
3102-
3103- def _get_sec_group_models(session, security_groups):
3104- models = []
3105- _existed, default_group = security_group_ensure_default(context,
3106- session=session)
3107- if 'default' in security_groups:
3108- models.append(default_group)
3109- # Generate a new list, so we don't modify the original
3110- security_groups = [x for x in security_groups if x != 'default']
3111- if security_groups:
3112- models.extend(_security_group_get_by_names(context,
3113- session, context.project_id, security_groups))
3114- return models
3115-
3116- session = get_session()
3117- with session.begin():
3118- instance_ref.security_groups = _get_sec_group_models(session,
3119- security_groups)
3120- instance_ref.save(session=session)
3121- # NOTE(comstud): This forces instance_type to be loaded so it
3122- # exists in the ref when we return. Fixes lazy loading issues.
3123- instance_ref.instance_type
3124-
3125- # create the instance uuid to ec2_id mapping entry for instance
3126- ec2_instance_create(context, instance_ref['uuid'])
3127-
3128- return instance_ref
3129-
3130-
3131-@require_admin_context
3132-def instance_data_get_for_project(context, project_id, session=None):
3133- result = model_query(context,
3134- func.count(models.Instance.id),
3135- func.sum(models.Instance.vcpus),
3136- func.sum(models.Instance.memory_mb),
3137- read_deleted="no",
3138- session=session).\
3139- filter_by(project_id=project_id).\
3140- first()
3141- # NOTE(vish): convert None to 0
3142- return (result[0] or 0, result[1] or 0, result[2] or 0)
3143-
3144-
3145-@require_context
3146-def instance_destroy(context, instance_uuid, constraint=None):
3147- session = get_session()
3148- with session.begin():
3149- if utils.is_uuid_like(instance_uuid):
3150- instance_ref = instance_get_by_uuid(context, instance_uuid,
3151- session=session)
3152- else:
3153- raise exception.InvalidUUID(instance_uuid)
3154-
3155- query = session.query(models.Instance).\
3156- filter_by(uuid=instance_ref['uuid'])
3157- if constraint is not None:
3158- query = constraint.apply(models.Instance, query)
3159- count = query.update({'deleted': True,
3160- 'deleted_at': timeutils.utcnow(),
3161- 'updated_at': literal_column('updated_at')})
3162- if count == 0:
3163- raise exception.ConstraintNotMet()
3164- session.query(models.SecurityGroupInstanceAssociation).\
3165- filter_by(instance_uuid=instance_ref['uuid']).\
3166- update({'deleted': True,
3167- 'deleted_at': timeutils.utcnow(),
3168- 'updated_at': literal_column('updated_at')})
3169-
3170- instance_info_cache_delete(context, instance_ref['uuid'],
3171- session=session)
3172- return instance_ref
3173-
3174-
3175-@require_context
3176-def instance_get_by_uuid(context, uuid, session=None):
3177- result = _build_instance_get(context, session=session).\
3178- filter_by(uuid=uuid).\
3179- first()
3180-
3181- if not result:
3182- raise exception.InstanceNotFound(instance_id=uuid)
3183-
3184- return result
3185-
3186-
3187-@require_context
3188-def instance_get(context, instance_id, session=None):
3189- result = _build_instance_get(context, session=session).\
3190- filter_by(id=instance_id).\
3191- first()
3192-
3193- if not result:
3194- raise exception.InstanceNotFound(instance_id=instance_id)
3195-
3196- return result
3197-
3198-
3199-@require_context
3200-def _build_instance_get(context, session=None):
3201- return model_query(context, models.Instance, session=session,
3202- project_only=True).\
3203- options(joinedload_all('security_groups.rules')).\
3204- options(joinedload('info_cache')).\
3205- options(joinedload('metadata')).\
3206- options(joinedload('instance_type'))
3207-
3208-
3209-@require_admin_context
3210-def instance_get_all(context, columns_to_join=None):
3211- if columns_to_join is None:
3212- columns_to_join = ['info_cache', 'security_groups',
3213- 'metadata', 'instance_type']
3214- query = model_query(context, models.Instance)
3215- for column in columns_to_join:
3216- query = query.options(joinedload(column))
3217- return query.all()
3218-
3219-
3220-@require_context
3221-def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
3222- limit=None, marker=None):
3223- """Return instances that match all filters. Deleted instances
3224- will be returned by default, unless there's a filter that says
3225- otherwise"""
3226-
3227- sort_fn = {'desc': desc, 'asc': asc}
3228-
3229- session = get_session()
3230- query_prefix = session.query(models.Instance).\
3231- options(joinedload('info_cache')).\
3232- options(joinedload('security_groups')).\
3233- options(joinedload('metadata')).\
3234- options(joinedload('instance_type')).\
3235- order_by(sort_fn[sort_dir](getattr(models.Instance, sort_key)))
3236-
3237- # Make a copy of the filters dictionary to use going forward, as we'll
3238- # be modifying it and we shouldn't affect the caller's use of it.
3239- filters = filters.copy()
3240-
3241- if 'changes-since' in filters:
3242- changes_since = timeutils.normalize_time(filters['changes-since'])
3243- query_prefix = query_prefix.\
3244- filter(models.Instance.updated_at > changes_since)
3245-
3246- if 'deleted' in filters:
3247- # Instances can be soft or hard deleted and the query needs to
3248- # include or exclude both
3249- if filters.pop('deleted'):
3250- deleted = or_(models.Instance.deleted == True,
3251- models.Instance.vm_state == vm_states.SOFT_DELETED)
3252- query_prefix = query_prefix.filter(deleted)
3253- else:
3254- query_prefix = query_prefix.\
3255- filter_by(deleted=False).\
3256- filter(models.Instance.vm_state != vm_states.SOFT_DELETED)
3257-
3258- if not context.is_admin:
3259- # If we're not admin context, add appropriate filter..
3260- if context.project_id:
3261- filters['project_id'] = context.project_id
3262- else:
3263- filters['user_id'] = context.user_id
3264-
3265- # Filters for exact matches that we can do along with the SQL query...
3266- # For other filters that don't match this, we will do regexp matching
3267- exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
3268- 'vm_state', 'instance_type_id', 'uuid',
3269- 'metadata']
3270-
3271- # Filter the query
3272- query_prefix = exact_filter(query_prefix, models.Instance,
3273- filters, exact_match_filter_names)
3274-
3275- query_prefix = regex_filter(query_prefix, models.Instance, filters)
3276-
3277- # paginate query
3278- if marker is not None:
3279- try:
3280- marker = instance_get_by_uuid(context, marker, session=session)
3281- except exception.InstanceNotFound as e:
3282- raise exception.MarkerNotFound(marker)
3283- query_prefix = paginate_query(query_prefix, models.Instance, limit,
3284- [sort_key, 'created_at', 'id'],
3285- marker=marker,
3286- sort_dir=sort_dir)
3287-
3288- instances = query_prefix.all()
3289- return instances
3290-
3291-
3292-def regex_filter(query, model, filters):
3293- """Applies regular expression filtering to a query.
3294-
3295- Returns the updated query.
3296-
3297- :param query: query to apply filters to
3298- :param model: model object the query applies to
3299- :param filters: dictionary of filters with regex values
3300- """
3301-
3302- regexp_op_map = {
3303- 'postgresql': '~',
3304- 'mysql': 'REGEXP',
3305- 'oracle': 'REGEXP_LIKE',
3306- 'sqlite': 'REGEXP'
3307- }
3308- db_string = FLAGS.sql_connection.split(':')[0].split('+')[0]
3309- db_regexp_op = regexp_op_map.get(db_string, 'LIKE')
3310- for filter_name in filters.iterkeys():
3311- try:
3312- column_attr = getattr(model, filter_name)
3313- except AttributeError:
3314- continue
3315- if 'property' == type(column_attr).__name__:
3316- continue
3317- query = query.filter(column_attr.op(db_regexp_op)(
3318- str(filters[filter_name])))
3319- return query
3320-
3321-
3322-@require_context
3323-def instance_get_active_by_window(context, begin, end=None,
3324- project_id=None, host=None):
3325- """Return instances that were active during window."""
3326- session = get_session()
3327- query = session.query(models.Instance)
3328-
3329- query = query.filter(or_(models.Instance.terminated_at == None,
3330- models.Instance.terminated_at > begin))
3331- if end:
3332- query = query.filter(models.Instance.launched_at < end)
3333- if project_id:
3334- query = query.filter_by(project_id=project_id)
3335- if host:
3336- query = query.filter_by(host=host)
3337-
3338- return query.all()
3339-
3340-
3341-@require_admin_context
3342-def instance_get_active_by_window_joined(context, begin, end=None,
3343- project_id=None, host=None):
3344- """Return instances and joins that were active during window."""
3345- session = get_session()
3346- query = session.query(models.Instance)
3347-
3348- query = query.options(joinedload('info_cache')).\
3349- options(joinedload('security_groups')).\
3350- options(joinedload('metadata')).\
3351- options(joinedload('instance_type')).\
3352- filter(or_(models.Instance.terminated_at == None,
3353- models.Instance.terminated_at > begin))
3354- if end:
3355- query = query.filter(models.Instance.launched_at < end)
3356- if project_id:
3357- query = query.filter_by(project_id=project_id)
3358- if host:
3359- query = query.filter_by(host=host)
3360-
3361- return query.all()
3362-
3363-
3364-@require_admin_context
3365-def _instance_get_all_query(context, project_only=False):
3366- return model_query(context, models.Instance, project_only=project_only).\
3367- options(joinedload('info_cache')).\
3368- options(joinedload('security_groups')).\
3369- options(joinedload('metadata')).\
3370- options(joinedload('instance_type'))
3371-
3372-
3373-@require_admin_context
3374-def instance_get_all_by_host(context, host):
3375- return _instance_get_all_query(context).filter_by(host=host).all()
3376-
3377-
3378-@require_admin_context
3379-def instance_get_all_by_host_and_not_type(context, host, type_id=None):
3380- return _instance_get_all_query(context).filter_by(host=host).\
3381- filter(models.Instance.instance_type_id != type_id).all()
3382-
3383-
3384-@require_context
3385-def instance_get_all_by_project(context, project_id):
3386- authorize_project_context(context, project_id)
3387- return _instance_get_all_query(context).\
3388- filter_by(project_id=project_id).\
3389- all()
3390-
3391-
3392-@require_context
3393-def instance_get_all_by_reservation(context, reservation_id):
3394- return _instance_get_all_query(context, project_only=True).\
3395- filter_by(reservation_id=reservation_id).\
3396- all()
3397-
3398-
3399-# NOTE(jkoelker) This is only being left here for compat with floating
3400-# ips. Currently the network_api doesn't return floaters
3401-# in network_info. Once it starts return the model. This
3402-# function and its call in compute/manager.py on 1829 can
3403-# go away
3404-@require_context
3405-def instance_get_floating_address(context, instance_id):
3406- instance = instance_get(context, instance_id)
3407- fixed_ips = fixed_ip_get_by_instance(context, instance['uuid'])
3408-
3409- if not fixed_ips:
3410- return None
3411-
3412- # NOTE(tr3buchet): this only gets the first fixed_ip
3413- # won't find floating ips associated with other fixed_ips
3414- floating_ips = floating_ip_get_by_fixed_address(context,
3415- fixed_ips[0]['address'])
3416- if not floating_ips:
3417- return None
3418- # NOTE(vish): this just returns the first floating ip
3419- return floating_ips[0]['address']
3420-
3421-
3422-@require_admin_context
3423-def instance_get_all_hung_in_rebooting(context, reboot_window, session=None):
3424- reboot_window = (timeutils.utcnow() -
3425- datetime.timedelta(seconds=reboot_window))
3426-
3427- if not session:
3428- session = get_session()
3429-
3430- results = session.query(models.Instance).\
3431- filter(models.Instance.updated_at <= reboot_window).\
3432- filter_by(task_state="rebooting").all()
3433-
3434- return results
3435-
3436-
3437-@require_context
3438-def instance_test_and_set(context, instance_uuid, attr, ok_states,
3439- new_state, session=None):
3440- """Atomically check if an instance is in a valid state, and if it is, set
3441- the instance into a new state.
3442- """
3443- if not session:
3444- session = get_session()
3445-
3446- with session.begin():
3447- query = model_query(context, models.Instance, session=session,
3448- project_only=True)
3449-
3450- if utils.is_uuid_like(instance_uuid):
3451- query = query.filter_by(uuid=instance_uuid)
3452- else:
3453- raise exception.InvalidUUID(instance_uuid)
3454-
3455- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
3456- # then this has concurrency issues
3457- instance = query.with_lockmode('update').first()
3458-
3459- state = instance[attr]
3460- if state not in ok_states:
3461- raise exception.InstanceInvalidState(
3462- attr=attr,
3463- instance_uuid=instance['uuid'],
3464- state=state,
3465- method='instance_test_and_set')
3466-
3467- instance[attr] = new_state
3468- instance.save(session=session)
3469-
3470-
3471-@require_context
3472-def instance_update(context, instance_uuid, values):
3473- instance_ref = _instance_update(context, instance_uuid, values)[1]
3474- return instance_ref
3475-
3476-
3477-@require_context
3478-def instance_update_and_get_original(context, instance_uuid, values):
3479- """Set the given properties on an instance and update it. Return
3480- a shallow copy of the original instance reference, as well as the
3481- updated one.
3482-
3483- :param context: = request context object
3484- :param instance_uuid: = instance uuid
3485- :param values: = dict containing column values
3486-
3487- If "expected_task_state" exists in values, the update can only happen
3488- when the task state before update matches expected_task_state. Otherwise
3489- a UnexpectedTaskStateError is thrown.
3490-
3491- :returns: a tuple of the form (old_instance_ref, new_instance_ref)
3492-
3493- Raises NotFound if instance does not exist.
3494- """
3495- return _instance_update(context, instance_uuid, values,
3496- copy_old_instance=True)
3497-
3498-
3499-def _instance_update(context, instance_uuid, values, copy_old_instance=False):
3500- session = get_session()
3501-
3502- if not utils.is_uuid_like(instance_uuid):
3503- raise exception.InvalidUUID(instance_uuid)
3504-
3505- with session.begin():
3506- instance_ref = instance_get_by_uuid(context, instance_uuid,
3507- session=session)
3508- if "expected_task_state" in values:
3509- # it is not a db column so always pop out
3510- expected = values.pop("expected_task_state")
3511- if not isinstance(expected, (tuple, list, set)):
3512- expected = (expected,)
3513- actual_state = instance_ref["task_state"]
3514- if actual_state not in expected:
3515- raise exception.UnexpectedTaskStateError(actual=actual_state,
3516- expected=expected)
3517-
3518- if copy_old_instance:
3519- old_instance_ref = copy.copy(instance_ref)
3520- else:
3521- old_instance_ref = None
3522-
3523- metadata = values.get('metadata')
3524- if metadata is not None:
3525- instance_metadata_update(context, instance_ref['uuid'],
3526- values.pop('metadata'), True,
3527- session=session)
3528-
3529- system_metadata = values.get('system_metadata')
3530- if system_metadata is not None:
3531- instance_system_metadata_update(
3532- context, instance_ref['uuid'], values.pop('system_metadata'),
3533- delete=True, session=session)
3534-
3535- instance_ref.update(values)
3536- instance_ref.save(session=session)
3537-
3538- return (old_instance_ref, instance_ref)
3539-
3540-
3541-def instance_add_security_group(context, instance_uuid, security_group_id):
3542- """Associate the given security group with the given instance"""
3543- session = get_session()
3544- with session.begin():
3545- instance_ref = instance_get_by_uuid(context, instance_uuid,
3546- session=session)
3547- security_group_ref = security_group_get(context,
3548- security_group_id,
3549- session=session)
3550- instance_ref.security_groups += [security_group_ref]
3551- instance_ref.save(session=session)
3552-
3553-
3554-@require_context
3555-def instance_remove_security_group(context, instance_uuid, security_group_id):
3556- """Disassociate the given security group from the given instance"""
3557- session = get_session()
3558- instance_ref = instance_get_by_uuid(context, instance_uuid,
3559- session=session)
3560- session.query(models.SecurityGroupInstanceAssociation).\
3561- filter_by(instance_uuid=instance_ref['uuid']).\
3562- filter_by(security_group_id=security_group_id).\
3563- update({'deleted': True,
3564- 'deleted_at': timeutils.utcnow(),
3565- 'updated_at': literal_column('updated_at')})
3566-
3567-
3568-###################
3569-
3570-
3571-@require_context
3572-def instance_info_cache_create(context, values):
3573- """Create a new instance cache record in the table.
3574-
3575- :param context: = request context object
3576- :param values: = dict containing column values
3577- """
3578- info_cache = models.InstanceInfoCache()
3579- info_cache.update(values)
3580-
3581- session = get_session()
3582- with session.begin():
3583- info_cache.save(session=session)
3584- return info_cache
3585-
3586-
3587-@require_context
3588-def instance_info_cache_get(context, instance_uuid, session=None):
3589- """Gets an instance info cache from the table.
3590-
3591- :param instance_uuid: = uuid of the info cache's instance
3592- :param session: = optional session object
3593- """
3594- session = session or get_session()
3595-
3596- info_cache = session.query(models.InstanceInfoCache).\
3597- filter_by(instance_uuid=instance_uuid).\
3598- first()
3599- return info_cache
3600-
3601-
3602-@require_context
3603-def instance_info_cache_update(context, instance_uuid, values,
3604- session=None):
3605- """Update an instance info cache record in the table.
3606-
3607- :param instance_uuid: = uuid of info cache's instance
3608- :param values: = dict containing column values to update
3609- :param session: = optional session object
3610- """
3611- session = session or get_session()
3612- info_cache = instance_info_cache_get(context, instance_uuid,
3613- session=session)
3614- if info_cache:
3615- # NOTE(tr3buchet): let's leave it alone if it's already deleted
3616- if info_cache['deleted']:
3617- return info_cache
3618-
3619- info_cache.update(values)
3620- info_cache.save(session=session)
3621- else:
3622- # NOTE(tr3buchet): just in case someone blows away an instance's
3623- # cache entry
3624- values['instance_uuid'] = instance_uuid
3625- info_cache = instance_info_cache_create(context, values)
3626-
3627- return info_cache
3628-
3629-
3630-@require_context
3631-def instance_info_cache_delete(context, instance_uuid, session=None):
3632- """Deletes an existing instance_info_cache record
3633-
3634- :param instance_uuid: = uuid of the instance tied to the cache record
3635- :param session: = optional session object
3636- """
3637- values = {'deleted': True,
3638- 'deleted_at': timeutils.utcnow()}
3639- instance_info_cache_update(context, instance_uuid, values, session)
3640-
3641-
3642-###################
3643-
3644-
3645-@require_context
3646-def key_pair_create(context, values):
3647- key_pair_ref = models.KeyPair()
3648- key_pair_ref.update(values)
3649- key_pair_ref.save()
3650- return key_pair_ref
3651-
3652-
3653-@require_context
3654-def key_pair_destroy(context, user_id, name):
3655- authorize_user_context(context, user_id)
3656- session = get_session()
3657- with session.begin():
3658- key_pair_ref = key_pair_get(context, user_id, name, session=session)
3659- key_pair_ref.delete(session=session)
3660-
3661-
3662-@require_context
3663-def key_pair_destroy_all_by_user(context, user_id):
3664- authorize_user_context(context, user_id)
3665- session = get_session()
3666- with session.begin():
3667- session.query(models.KeyPair).\
3668- filter_by(user_id=user_id).\
3669- update({'deleted': True,
3670- 'deleted_at': timeutils.utcnow(),
3671- 'updated_at': literal_column('updated_at')})
3672-
3673-
3674-@require_context
3675-def key_pair_get(context, user_id, name, session=None):
3676- authorize_user_context(context, user_id)
3677- result = model_query(context, models.KeyPair, session=session).\
3678- filter_by(user_id=user_id).\
3679- filter_by(name=name).\
3680- first()
3681-
3682- if not result:
3683- raise exception.KeypairNotFound(user_id=user_id, name=name)
3684-
3685- return result
3686-
3687-
3688-@require_context
3689-def key_pair_get_all_by_user(context, user_id):
3690- authorize_user_context(context, user_id)
3691- return model_query(context, models.KeyPair, read_deleted="no").\
3692- filter_by(user_id=user_id).\
3693- all()
3694-
3695-
3696-def key_pair_count_by_user(context, user_id):
3697- authorize_user_context(context, user_id)
3698- return model_query(context, models.KeyPair, read_deleted="no").\
3699- filter_by(user_id=user_id).\
3700- count()
3701-
3702-
3703-###################
3704-
3705-
3706-@require_admin_context
3707-def network_associate(context, project_id, network_id=None, force=False):
3708- """Associate a project with a network.
3709-
3710- called by project_get_networks under certain conditions
3711- and network manager add_network_to_project()
3712-
3713- only associate if the project doesn't already have a network
3714- or if force is True
3715-
3716- force solves race condition where a fresh project has multiple instance
3717- builds simultaneously picked up by multiple network hosts which attempt
3718- to associate the project with multiple networks
3719- force should only be used as a direct consequence of user request
3720- all automated requests should not use force
3721- """
3722- session = get_session()
3723- with session.begin():
3724-
3725- def network_query(project_filter, id=None):
3726- filter_kwargs = {'project_id': project_filter}
3727- if id is not None:
3728- filter_kwargs['id'] = id
3729- return model_query(context, models.Network, session=session,
3730- read_deleted="no").\
3731- filter_by(**filter_kwargs).\
3732- with_lockmode('update').\
3733- first()
3734-
3735- if not force:
3736- # find out if project has a network
3737- network_ref = network_query(project_id)
3738-
3739- if force or not network_ref:
3740- # in force mode or project doesn't have a network so associate
3741- # with a new network
3742-
3743- # get new network
3744- network_ref = network_query(None, network_id)
3745- if not network_ref:
3746- raise db.NoMoreNetworks()
3747-
3748- # associate with network
3749- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
3750- # then this has concurrency issues
3751- network_ref['project_id'] = project_id
3752- session.add(network_ref)
3753- return network_ref
3754-
3755-
3756-@require_admin_context
3757-def network_count(context):
3758- return model_query(context, models.Network).count()
3759-
3760-
3761-@require_admin_context
3762-def _network_ips_query(context, network_id):
3763- return model_query(context, models.FixedIp, read_deleted="no").\
3764- filter_by(network_id=network_id)
3765-
3766-
3767-@require_admin_context
3768-def network_count_reserved_ips(context, network_id):
3769- return _network_ips_query(context, network_id).\
3770- filter_by(reserved=True).\
3771- count()
3772-
3773-
3774-@require_admin_context
3775-def network_create_safe(context, values):
3776- if values.get('vlan'):
3777- if model_query(context, models.Network, read_deleted="no")\
3778- .filter_by(vlan=values['vlan'])\
3779- .first():
3780- raise exception.DuplicateVlan(vlan=values['vlan'])
3781-
3782- network_ref = models.Network()
3783- network_ref['uuid'] = str(utils.gen_uuid())
3784- network_ref.update(values)
3785-
3786- try:
3787- network_ref.save()
3788- return network_ref
3789- except IntegrityError:
3790- return None
3791-
3792-
3793-@require_admin_context
3794-def network_delete_safe(context, network_id):
3795- session = get_session()
3796- with session.begin():
3797- result = session.query(models.FixedIp).\
3798- filter_by(network_id=network_id).\
3799- filter_by(deleted=False).\
3800- filter_by(allocated=True).\
3801- all()
3802- if result:
3803- raise exception.NetworkInUse(network_id=network_id)
3804- network_ref = network_get(context, network_id=network_id,
3805- session=session)
3806- session.query(models.FixedIp).\
3807- filter_by(network_id=network_id).\
3808- filter_by(deleted=False).\
3809- update({'deleted': True,
3810- 'updated_at': literal_column('updated_at'),
3811- 'deleted_at': timeutils.utcnow()})
3812- session.delete(network_ref)
3813-
3814-
3815-@require_admin_context
3816-def network_disassociate(context, network_id):
3817- network_update(context, network_id, {'project_id': None,
3818- 'host': None})
3819-
3820-
3821-@require_context
3822-def network_get(context, network_id, session=None, project_only='allow_none'):
3823- result = model_query(context, models.Network, session=session,
3824- project_only=project_only).\
3825- filter_by(id=network_id).\
3826- first()
3827-
3828- if not result:
3829- raise exception.NetworkNotFound(network_id=network_id)
3830-
3831- return result
3832-
3833-
3834-@require_context
3835-def network_get_all(context):
3836- result = model_query(context, models.Network, read_deleted="no").all()
3837-
3838- if not result:
3839- raise exception.NoNetworksFound()
3840-
3841- return result
3842-
3843-
3844-@require_context
3845-def network_get_all_by_uuids(context, network_uuids,
3846- project_only="allow_none"):
3847- result = model_query(context, models.Network, read_deleted="no",
3848- project_only=project_only).\
3849- filter(models.Network.uuid.in_(network_uuids)).\
3850- all()
3851-
3852- if not result:
3853- raise exception.NoNetworksFound()
3854-
3855- #check if the result contains all the networks
3856- #we are looking for
3857- for network_uuid in network_uuids:
3858- found = False
3859- for network in result:
3860- if network['uuid'] == network_uuid:
3861- found = True
3862- break
3863- if not found:
3864- if project_only:
3865- raise exception.NetworkNotFoundForProject(
3866- network_uuid=network_uuid, project_id=context.project_id)
3867- raise exception.NetworkNotFound(network_id=network_uuid)
3868-
3869- return result
3870-
3871-# NOTE(vish): pylint complains because of the long method name, but
3872-# it fits with the names of the rest of the methods
3873-# pylint: disable=C0103
3874-
3875-
3876-@require_admin_context
3877-def network_get_associated_fixed_ips(context, network_id, host=None):
3878- # FIXME(sirp): since this returns fixed_ips, this would be better named
3879- # fixed_ip_get_all_by_network.
3880- # NOTE(vish): The ugly joins here are to solve a performance issue and
3881- # should be removed once we can add and remove leases
3882- # without regenerating the whole list
3883- vif_and = and_(models.VirtualInterface.id ==
3884- models.FixedIp.virtual_interface_id,
3885- models.VirtualInterface.deleted == False)
3886- inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
3887- models.Instance.deleted == False)
3888- session = get_session()
3889- query = session.query(models.FixedIp.address,
3890- models.FixedIp.instance_uuid,
3891- models.FixedIp.network_id,
3892- models.FixedIp.virtual_interface_id,
3893- models.VirtualInterface.address,
3894- models.Instance.hostname,
3895- models.Instance.updated_at,
3896- models.Instance.created_at).\
3897- filter(models.FixedIp.deleted == False).\
3898- filter(models.FixedIp.network_id == network_id).\
3899- filter(models.FixedIp.allocated == True).\
3900- join((models.VirtualInterface, vif_and)).\
3901- join((models.Instance, inst_and)).\
3902- filter(models.FixedIp.instance_uuid != None).\
3903- filter(models.FixedIp.virtual_interface_id != None)
3904- if host:
3905- query = query.filter(models.Instance.host == host)
3906- result = query.all()
3907- data = []
3908- for datum in result:
3909- cleaned = {}
3910- cleaned['address'] = datum[0]
3911- cleaned['instance_uuid'] = datum[1]
3912- cleaned['network_id'] = datum[2]
3913- cleaned['vif_id'] = datum[3]
3914- cleaned['vif_address'] = datum[4]
3915- cleaned['instance_hostname'] = datum[5]
3916- cleaned['instance_updated'] = datum[6]
3917- cleaned['instance_created'] = datum[7]
3918- data.append(cleaned)
3919- return data
3920-
3921-
3922-@require_admin_context
3923-def _network_get_query(context, session=None):
3924- return model_query(context, models.Network, session=session,
3925- read_deleted="no")
3926-
3927-
3928-@require_admin_context
3929-def network_get_by_bridge(context, bridge):
3930- result = _network_get_query(context).filter_by(bridge=bridge).first()
3931-
3932- if not result:
3933- raise exception.NetworkNotFoundForBridge(bridge=bridge)
3934-
3935- return result
3936-
3937-
3938-@require_admin_context
3939-def network_get_by_uuid(context, uuid):
3940- result = _network_get_query(context).filter_by(uuid=uuid).first()
3941-
3942- if not result:
3943- raise exception.NetworkNotFoundForUUID(uuid=uuid)
3944-
3945- return result
3946-
3947-
3948-@require_admin_context
3949-def network_get_by_cidr(context, cidr):
3950- result = _network_get_query(context).\
3951- filter(or_(models.Network.cidr == cidr,
3952- models.Network.cidr_v6 == cidr)).\
3953- first()
3954-
3955- if not result:
3956- raise exception.NetworkNotFoundForCidr(cidr=cidr)
3957-
3958- return result
3959-
3960-
3961-@require_admin_context
3962-def network_get_by_instance(context, instance_id):
3963- # note this uses fixed IP to get to instance
3964- # only works for networks the instance has an IP from
3965- result = _network_get_query(context).\
3966- filter_by(instance_id=instance_id).\
3967- first()
3968-
3969- if not result:
3970- raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
3971-
3972- return result
3973-
3974-
3975-@require_admin_context
3976-def network_get_all_by_instance(context, instance_id):
3977- result = _network_get_query(context).\
3978- filter_by(instance_id=instance_id).\
3979- all()
3980-
3981- if not result:
3982- raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
3983-
3984- return result
3985-
3986-
3987-@require_admin_context
3988-def network_get_all_by_host(context, host):
3989- session = get_session()
3990- fixed_ip_query = model_query(context, models.FixedIp.network_id,
3991- session=session).\
3992- filter(models.FixedIp.host == host)
3993- # NOTE(vish): return networks that have host set
3994- # or that have a fixed ip with host set
3995- host_filter = or_(models.Network.host == host,
3996- models.Network.id.in_(fixed_ip_query.subquery()))
3997- return _network_get_query(context, session=session).\
3998- filter(host_filter).\
3999- all()
4000-
4001-
4002-@require_admin_context
4003-def network_set_host(context, network_id, host_id):
4004- session = get_session()
4005- with session.begin():
4006- network_ref = _network_get_query(context, session=session).\
4007- filter_by(id=network_id).\
4008- with_lockmode('update').\
4009- first()
4010-
4011- if not network_ref:
4012- raise exception.NetworkNotFound(network_id=network_id)
4013-
4014- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
4015- # then this has concurrency issues
4016- if not network_ref['host']:
4017- network_ref['host'] = host_id
4018- session.add(network_ref)
4019-
4020- return network_ref['host']
4021-
4022-
4023-@require_context
4024-def network_update(context, network_id, values):
4025- session = get_session()
4026- with session.begin():
4027- network_ref = network_get(context, network_id, session=session)
4028- network_ref.update(values)
4029- network_ref.save(session=session)
4030- return network_ref
4031-
4032-
4033-###################
4034-
4035-
4036-@require_admin_context
4037-def iscsi_target_count_by_host(context, host):
4038- return model_query(context, models.IscsiTarget).\
4039- filter_by(host=host).\
4040- count()
4041-
4042-
4043-@require_admin_context
4044-def iscsi_target_create_safe(context, values):
4045- iscsi_target_ref = models.IscsiTarget()
4046-
4047- for (key, value) in values.iteritems():
4048- iscsi_target_ref[key] = value
4049- try:
4050- iscsi_target_ref.save()
4051- return iscsi_target_ref
4052- except IntegrityError:
4053- return None
4054-
4055-
4056-###################
4057-
4058-
4059-@require_context
4060-def quota_get(context, project_id, resource, session=None):
4061- result = model_query(context, models.Quota, session=session,
4062- read_deleted="no").\
4063- filter_by(project_id=project_id).\
4064- filter_by(resource=resource).\
4065- first()
4066-
4067- if not result:
4068- raise exception.ProjectQuotaNotFound(project_id=project_id)
4069-
4070- return result
4071-
4072-
4073-@require_context
4074-def quota_get_all_by_project(context, project_id):
4075- authorize_project_context(context, project_id)
4076-
4077- rows = model_query(context, models.Quota, read_deleted="no").\
4078- filter_by(project_id=project_id).\
4079- all()
4080-
4081- result = {'project_id': project_id}
4082- for row in rows:
4083- result[row.resource] = row.hard_limit
4084-
4085- return result
4086-
4087-
4088-@require_admin_context
4089-def quota_create(context, project_id, resource, limit):
4090- quota_ref = models.Quota()
4091- quota_ref.project_id = project_id
4092- quota_ref.resource = resource
4093- quota_ref.hard_limit = limit
4094- quota_ref.save()
4095- return quota_ref
4096-
4097-
4098-@require_admin_context
4099-def quota_update(context, project_id, resource, limit):
4100- session = get_session()
4101- with session.begin():
4102- quota_ref = quota_get(context, project_id, resource, session=session)
4103- quota_ref.hard_limit = limit
4104- quota_ref.save(session=session)
4105-
4106-
4107-@require_admin_context
4108-def quota_destroy(context, project_id, resource):
4109- session = get_session()
4110- with session.begin():
4111- quota_ref = quota_get(context, project_id, resource, session=session)
4112- quota_ref.delete(session=session)
4113-
4114-
4115-###################
4116-
4117-
4118-@require_context
4119-def quota_class_get(context, class_name, resource, session=None):
4120- result = model_query(context, models.QuotaClass, session=session,
4121- read_deleted="no").\
4122- filter_by(class_name=class_name).\
4123- filter_by(resource=resource).\
4124- first()
4125-
4126- if not result:
4127- raise exception.QuotaClassNotFound(class_name=class_name)
4128-
4129- return result
4130-
4131-
4132-@require_context
4133-def quota_class_get_all_by_name(context, class_name):
4134- authorize_quota_class_context(context, class_name)
4135-
4136- rows = model_query(context, models.QuotaClass, read_deleted="no").\
4137- filter_by(class_name=class_name).\
4138- all()
4139-
4140- result = {'class_name': class_name}
4141- for row in rows:
4142- result[row.resource] = row.hard_limit
4143-
4144- return result
4145-
4146-
4147-@require_admin_context
4148-def quota_class_create(context, class_name, resource, limit):
4149- quota_class_ref = models.QuotaClass()
4150- quota_class_ref.class_name = class_name
4151- quota_class_ref.resource = resource
4152- quota_class_ref.hard_limit = limit
4153- quota_class_ref.save()
4154- return quota_class_ref
4155-
4156-
4157-@require_admin_context
4158-def quota_class_update(context, class_name, resource, limit):
4159- session = get_session()
4160- with session.begin():
4161- quota_class_ref = quota_class_get(context, class_name, resource,
4162- session=session)
4163- quota_class_ref.hard_limit = limit
4164- quota_class_ref.save(session=session)
4165-
4166-
4167-@require_admin_context
4168-def quota_class_destroy(context, class_name, resource):
4169- session = get_session()
4170- with session.begin():
4171- quota_class_ref = quota_class_get(context, class_name, resource,
4172- session=session)
4173- quota_class_ref.delete(session=session)
4174-
4175-
4176-@require_admin_context
4177-def quota_class_destroy_all_by_name(context, class_name):
4178- session = get_session()
4179- with session.begin():
4180- quota_classes = model_query(context, models.QuotaClass,
4181- session=session, read_deleted="no").\
4182- filter_by(class_name=class_name).\
4183- all()
4184-
4185- for quota_class_ref in quota_classes:
4186- quota_class_ref.delete(session=session)
4187-
4188-
4189-###################
4190-
4191-
4192-@require_context
4193-def quota_usage_get(context, project_id, resource, session=None):
4194- result = model_query(context, models.QuotaUsage, session=session,
4195- read_deleted="no").\
4196- filter_by(project_id=project_id).\
4197- filter_by(resource=resource).\
4198- first()
4199-
4200- if not result:
4201- raise exception.QuotaUsageNotFound(project_id=project_id)
4202-
4203- return result
4204-
4205-
4206-@require_context
4207-def quota_usage_get_all_by_project(context, project_id):
4208- authorize_project_context(context, project_id)
4209-
4210- rows = model_query(context, models.QuotaUsage, read_deleted="no").\
4211- filter_by(project_id=project_id).\
4212- all()
4213-
4214- result = {'project_id': project_id}
4215- for row in rows:
4216- result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved)
4217-
4218- return result
4219-
4220-
4221-@require_admin_context
4222-def quota_usage_create(context, project_id, resource, in_use, reserved,
4223- until_refresh, session=None):
4224- quota_usage_ref = models.QuotaUsage()
4225- quota_usage_ref.project_id = project_id
4226- quota_usage_ref.resource = resource
4227- quota_usage_ref.in_use = in_use
4228- quota_usage_ref.reserved = reserved
4229- quota_usage_ref.until_refresh = until_refresh
4230- quota_usage_ref.save(session=session)
4231-
4232- return quota_usage_ref
4233-
4234-
4235-@require_admin_context
4236-def quota_usage_update(context, project_id, resource, in_use, reserved,
4237- until_refresh, session=None):
4238- def do_update(session):
4239- quota_usage_ref = quota_usage_get(context, project_id, resource,
4240- session=session)
4241- quota_usage_ref.in_use = in_use
4242- quota_usage_ref.reserved = reserved
4243- quota_usage_ref.until_refresh = until_refresh
4244- quota_usage_ref.save(session=session)
4245-
4246- if session:
4247- # Assume caller started a transaction
4248- do_update(session)
4249- else:
4250- session = get_session()
4251- with session.begin():
4252- do_update(session)
4253-
4254-
4255-@require_admin_context
4256-def quota_usage_destroy(context, project_id, resource):
4257- session = get_session()
4258- with session.begin():
4259- quota_usage_ref = quota_usage_get(context, project_id, resource,
4260- session=session)
4261- quota_usage_ref.delete(session=session)
4262-
4263-
4264-###################
4265-
4266-
4267-@require_context
4268-def reservation_get(context, uuid, session=None):
4269- result = model_query(context, models.Reservation, session=session,
4270- read_deleted="no").\
4271- filter_by(uuid=uuid).\
4272- first()
4273-
4274- if not result:
4275- raise exception.ReservationNotFound(uuid=uuid)
4276-
4277- return result
4278-
4279-
4280-@require_context
4281-def reservation_get_all_by_project(context, project_id):
4282- authorize_project_context(context, project_id)
4283-
4284- rows = model_query(context, models.QuotaUsage, read_deleted="no").\
4285- filter_by(project_id=project_id).\
4286- all()
4287-
4288- result = {'project_id': project_id}
4289- for row in rows:
4290- result.setdefault(row.resource, {})
4291- result[row.resource][row.uuid] = row.delta
4292-
4293- return result
4294-
4295-
4296-@require_admin_context
4297-def reservation_create(context, uuid, usage, project_id, resource, delta,
4298- expire, session=None):
4299- reservation_ref = models.Reservation()
4300- reservation_ref.uuid = uuid
4301- reservation_ref.usage_id = usage['id']
4302- reservation_ref.project_id = project_id
4303- reservation_ref.resource = resource
4304- reservation_ref.delta = delta
4305- reservation_ref.expire = expire
4306- reservation_ref.save(session=session)
4307- return reservation_ref
4308-
4309-
4310-@require_admin_context
4311-def reservation_destroy(context, uuid):
4312- session = get_session()
4313- with session.begin():
4314- reservation_ref = reservation_get(context, uuid, session=session)
4315- reservation_ref.delete(session=session)
4316-
4317-
4318-###################
4319-
4320-
4321-# NOTE(johannes): The quota code uses SQL locking to ensure races don't
4322-# cause under or over counting of resources. To avoid deadlocks, this
4323-# code always acquires the lock on quota_usages before acquiring the lock
4324-# on reservations.
4325-
4326-def _get_quota_usages(context, session):
4327- # Broken out for testability
4328- rows = model_query(context, models.QuotaUsage,
4329- read_deleted="no",
4330- session=session).\
4331- filter_by(project_id=context.project_id).\
4332- with_lockmode('update').\
4333- all()
4334- return dict((row.resource, row) for row in rows)
4335-
4336-
4337-@require_context
4338-def quota_reserve(context, resources, quotas, deltas, expire,
4339- until_refresh, max_age):
4340- elevated = context.elevated()
4341- session = get_session()
4342- with session.begin():
4343- # Get the current usages
4344- usages = _get_quota_usages(context, session)
4345-
4346- # Handle usage refresh
4347- work = set(deltas.keys())
4348- while work:
4349- resource = work.pop()
4350-
4351- # Do we need to refresh the usage?
4352- refresh = False
4353- if resource not in usages:
4354- usages[resource] = quota_usage_create(elevated,
4355- context.project_id,
4356- resource,
4357- 0, 0,
4358- until_refresh or None,
4359- session=session)
4360- refresh = True
4361- elif usages[resource].in_use < 0:
4362- # Negative in_use count indicates a desync, so try to
4363- # heal from that...
4364- refresh = True
4365- elif usages[resource].until_refresh is not None:
4366- usages[resource].until_refresh -= 1
4367- if usages[resource].until_refresh <= 0:
4368- refresh = True
4369- elif max_age and (usages[resource].updated_at -
4370- timeutils.utcnow()).seconds >= max_age:
4371- refresh = True
4372-
4373- # OK, refresh the usage
4374- if refresh:
4375- # Grab the sync routine
4376- sync = resources[resource].sync
4377-
4378- updates = sync(elevated, context.project_id, session)
4379- for res, in_use in updates.items():
4380- # Make sure we have a destination for the usage!
4381- if res not in usages:
4382- usages[res] = quota_usage_create(elevated,
4383- context.project_id,
4384- res,
4385- 0, 0,
4386- until_refresh or None,
4387- session=session)
4388-
4389- # Update the usage
4390- usages[res].in_use = in_use
4391- usages[res].until_refresh = until_refresh or None
4392-
4393- # Because more than one resource may be refreshed
4394- # by the call to the sync routine, and we don't
4395- # want to double-sync, we make sure all refreshed
4396- # resources are dropped from the work set.
4397- work.discard(res)
4398-
4399- # NOTE(Vek): We make the assumption that the sync
4400- # routine actually refreshes the
4401- # resources that it is the sync routine
4402- # for. We don't check, because this is
4403- # a best-effort mechanism.
4404-
4405- # Check for deltas that would go negative
4406- unders = [resource for resource, delta in deltas.items()
4407- if delta < 0 and
4408- delta + usages[resource].in_use < 0]
4409-
4410- # Now, let's check the quotas
4411- # NOTE(Vek): We're only concerned about positive increments.
4412- # If a project has gone over quota, we want them to
4413- # be able to reduce their usage without any
4414- # problems.
4415- overs = [resource for resource, delta in deltas.items()
4416- if quotas[resource] >= 0 and delta >= 0 and
4417- quotas[resource] < delta + usages[resource].total]
4418-
4419- # NOTE(Vek): The quota check needs to be in the transaction,
4420- # but the transaction doesn't fail just because
4421- # we're over quota, so the OverQuota raise is
4422- # outside the transaction. If we did the raise
4423- # here, our usage updates would be discarded, but
4424- # they're not invalidated by being over-quota.
4425-
4426- # Create the reservations
4427- if not overs:
4428- reservations = []
4429- for resource, delta in deltas.items():
4430- reservation = reservation_create(elevated,
4431- str(utils.gen_uuid()),
4432- usages[resource],
4433- context.project_id,
4434- resource, delta, expire,
4435- session=session)
4436- reservations.append(reservation.uuid)
4437-
4438- # Also update the reserved quantity
4439- # NOTE(Vek): Again, we are only concerned here about
4440- # positive increments. Here, though, we're
4441- # worried about the following scenario:
4442- #
4443- # 1) User initiates resize down.
4444- # 2) User allocates a new instance.
4445- # 3) Resize down fails or is reverted.
4446- # 4) User is now over quota.
4447- #
4448- # To prevent this, we only update the
4449- # reserved value if the delta is positive.
4450- if delta > 0:
4451- usages[resource].reserved += delta
4452-
4453- # Apply updates to the usages table
4454- for usage_ref in usages.values():
4455- usage_ref.save(session=session)
4456-
4457- if unders:
4458- LOG.warning(_("Change will make usage less than 0 for the following "
4459- "resources: %(unders)s") % locals())
4460- if overs:
4461- usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
4462- for k, v in usages.items())
4463- raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
4464- usages=usages)
4465-
4466- return reservations
4467-
4468-
4469-def _quota_reservations(session, context, reservations):
4470- """Return the relevant reservations."""
4471-
4472- # Get the listed reservations
4473- return model_query(context, models.Reservation,
4474- read_deleted="no",
4475- session=session).\
4476- filter(models.Reservation.uuid.in_(reservations)).\
4477- with_lockmode('update').\
4478- all()
4479-
4480-
4481-@require_context
4482-def reservation_commit(context, reservations):
4483- session = get_session()
4484- with session.begin():
4485- usages = _get_quota_usages(context, session)
4486-
4487- for reservation in _quota_reservations(session, context, reservations):
4488- usage = usages[reservation.resource]
4489- if reservation.delta >= 0:
4490- usage.reserved -= reservation.delta
4491- usage.in_use += reservation.delta
4492-
4493- reservation.delete(session=session)
4494-
4495- for usage in usages.values():
4496- usage.save(session=session)
4497-
4498-
4499-@require_context
4500-def reservation_rollback(context, reservations):
4501- session = get_session()
4502- with session.begin():
4503- usages = _get_quota_usages(context, session)
4504-
4505- for reservation in _quota_reservations(session, context, reservations):
4506- usage = usages[reservation.resource]
4507- if reservation.delta >= 0:
4508- usage.reserved -= reservation.delta
4509-
4510- reservation.delete(session=session)
4511-
4512- for usage in usages.values():
4513- usage.save(session=session)
4514-
4515-
4516-@require_admin_context
4517-def quota_destroy_all_by_project(context, project_id):
4518- session = get_session()
4519- with session.begin():
4520- quotas = model_query(context, models.Quota, session=session,
4521- read_deleted="no").\
4522- filter_by(project_id=project_id).\
4523- all()
4524-
4525- for quota_ref in quotas:
4526- quota_ref.delete(session=session)
4527-
4528- quota_usages = model_query(context, models.QuotaUsage,
4529- session=session, read_deleted="no").\
4530- filter_by(project_id=project_id).\
4531- all()
4532-
4533- for quota_usage_ref in quota_usages:
4534- quota_usage_ref.delete(session=session)
4535-
4536- reservations = model_query(context, models.Reservation,
4537- session=session, read_deleted="no").\
4538- filter_by(project_id=project_id).\
4539- all()
4540-
4541- for reservation_ref in reservations:
4542- reservation_ref.delete(session=session)
4543-
4544-
4545-@require_admin_context
4546-def reservation_expire(context):
4547- session = get_session()
4548- with session.begin():
4549- current_time = timeutils.utcnow()
4550- results = model_query(context, models.Reservation, session=session,
4551- read_deleted="no").\
4552- filter(models.Reservation.expire < current_time).\
4553- all()
4554-
4555- if results:
4556- for reservation in results:
4557- if reservation.delta >= 0:
4558- reservation.usage.reserved -= reservation.delta
4559- reservation.usage.save(session=session)
4560-
4561- reservation.delete(session=session)
4562-
4563-
4564-###################
4565-
4566-
4567-@require_admin_context
4568-def volume_allocate_iscsi_target(context, volume_id, host):
4569- session = get_session()
4570- with session.begin():
4571- iscsi_target_ref = model_query(context, models.IscsiTarget,
4572- session=session, read_deleted="no").\
4573- filter_by(volume=None).\
4574- filter_by(host=host).\
4575- with_lockmode('update').\
4576- first()
4577-
4578- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
4579- # then this has concurrency issues
4580- if not iscsi_target_ref:
4581- raise db.NoMoreTargets()
4582-
4583- iscsi_target_ref.volume_id = volume_id
4584- session.add(iscsi_target_ref)
4585-
4586- return iscsi_target_ref.target_num
4587-
4588-
4589-@require_admin_context
4590-def volume_attached(context, volume_id, instance_uuid, mountpoint):
4591- if not utils.is_uuid_like(instance_uuid):
4592- raise exception.InvalidUUID(instance_uuid)
4593-
4594- session = get_session()
4595- with session.begin():
4596- volume_ref = volume_get(context, volume_id, session=session)
4597- volume_ref['status'] = 'in-use'
4598- volume_ref['mountpoint'] = mountpoint
4599- volume_ref['attach_status'] = 'attached'
4600- volume_ref['instance_uuid'] = instance_uuid
4601- volume_ref['attach_time'] = timeutils.utcnow()
4602- volume_ref.save(session=session)
4603-
4604-
4605-@require_context
4606-def volume_create(context, values):
4607- values['volume_metadata'] = _metadata_refs(values.get('metadata'),
4608- models.VolumeMetadata)
4609- volume_ref = models.Volume()
4610- if not values.get('id'):
4611- values['id'] = str(utils.gen_uuid())
4612- volume_ref.update(values)
4613-
4614- session = get_session()
4615- with session.begin():
4616- volume_ref.save(session=session)
4617-
4618- return volume_get(context, values['id'], session=session)
4619-
4620-
4621-@require_admin_context
4622-def volume_data_get_for_project(context, project_id, session=None):
4623- result = model_query(context,
4624- func.count(models.Volume.id),
4625- func.sum(models.Volume.size),
4626- read_deleted="no",
4627- session=session).\
4628- filter_by(project_id=project_id).\
4629- first()
4630-
4631- # NOTE(vish): convert None to 0
4632- return (result[0] or 0, result[1] or 0)
4633-
4634-
4635-@require_admin_context
4636-def volume_destroy(context, volume_id):
4637- session = get_session()
4638- with session.begin():
4639- volume_ref = volume_get(context, volume_id, session=session)
4640- session.query(models.Volume).\
4641- filter_by(id=volume_id).\
4642- update({'deleted': True,
4643- 'deleted_at': timeutils.utcnow(),
4644- 'updated_at': literal_column('updated_at')})
4645- session.query(models.IscsiTarget).\
4646- filter_by(volume_id=volume_id).\
4647- update({'volume_id': None})
4648- session.query(models.VolumeMetadata).\
4649- filter_by(volume_id=volume_id).\
4650- update({'deleted': True,
4651- 'deleted_at': timeutils.utcnow(),
4652- 'updated_at': literal_column('updated_at')})
4653- return volume_ref
4654-
4655-
4656-@require_admin_context
4657-def volume_detached(context, volume_id):
4658- session = get_session()
4659- with session.begin():
4660- volume_ref = volume_get(context, volume_id, session=session)
4661- volume_ref['status'] = 'available'
4662- volume_ref['mountpoint'] = None
4663- volume_ref['attach_status'] = 'detached'
4664- volume_ref['instance_uuid'] = None
4665- volume_ref.save(session=session)
4666-
4667-
4668-@require_context
4669-def _volume_get_query(context, session=None, project_only=False):
4670- return model_query(context, models.Volume, session=session,
4671- project_only=project_only).\
4672- options(joinedload('volume_metadata')).\
4673- options(joinedload('volume_type'))
4674-
4675-
4676-@require_context
4677-def _ec2_volume_get_query(context, session=None):
4678- return model_query(context, models.VolumeIdMapping, session=session)
4679-
4680-
4681-@require_context
4682-def _ec2_snapshot_get_query(context, session=None):
4683- return model_query(context, models.SnapshotIdMapping, session=session)
4684-
4685-
4686-@require_context
4687-def volume_get(context, volume_id, session=None):
4688- result = _volume_get_query(context, session=session, project_only=True).\
4689- filter_by(id=volume_id).\
4690- first()
4691-
4692- if not result:
4693- raise exception.VolumeNotFound(volume_id=volume_id)
4694-
4695- return result
4696-
4697-
4698-@require_admin_context
4699-def volume_get_all(context):
4700- return _volume_get_query(context).all()
4701-
4702-
4703-@require_admin_context
4704-def volume_get_all_by_host(context, host):
4705- return _volume_get_query(context).filter_by(host=host).all()
4706-
4707-
4708-@require_admin_context
4709-def volume_get_all_by_instance_uuid(context, instance_uuid):
4710- result = model_query(context, models.Volume, read_deleted="no").\
4711- options(joinedload('volume_metadata')).\
4712- options(joinedload('volume_type')).\
4713- filter_by(instance_uuid=instance_uuid).\
4714- all()
4715-
4716- if not result:
4717- return []
4718-
4719- return result
4720-
4721-
4722-@require_context
4723-def volume_get_all_by_project(context, project_id):
4724- authorize_project_context(context, project_id)
4725- return _volume_get_query(context).filter_by(project_id=project_id).all()
4726-
4727-
4728-@require_admin_context
4729-def volume_get_iscsi_target_num(context, volume_id):
4730- result = model_query(context, models.IscsiTarget, read_deleted="yes").\
4731- filter_by(volume_id=volume_id).\
4732- first()
4733-
4734- if not result:
4735- raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
4736-
4737- return result.target_num
4738-
4739-
4740-@require_context
4741-def volume_update(context, volume_id, values):
4742- session = get_session()
4743- volume_ref = volume_get(context, volume_id, session=session)
4744- metadata = values.get('metadata')
4745- if metadata is not None:
4746- volume_metadata_update(context,
4747- volume_id,
4748- values.pop('metadata'),
4749- delete=True)
4750- with session.begin():
4751- volume_ref.update(values)
4752- volume_ref.save(session=session)
4753-
4754- return volume_ref
4755-
4756-
4757-@require_context
4758-def ec2_volume_create(context, volume_uuid, id=None):
4759- """Create ec2 compatable volume by provided uuid"""
4760- ec2_volume_ref = models.VolumeIdMapping()
4761- ec2_volume_ref.update({'uuid': volume_uuid})
4762- if id is not None:
4763- ec2_volume_ref.update({'id': id})
4764-
4765- ec2_volume_ref.save()
4766-
4767- return ec2_volume_ref
4768-
4769-
4770-@require_context
4771-def get_ec2_volume_id_by_uuid(context, volume_id, session=None):
4772- result = _ec2_volume_get_query(context, session=session).\
4773- filter_by(uuid=volume_id).\
4774- first()
4775-
4776- if not result:
4777- raise exception.VolumeNotFound(volume_id=volume_id)
4778-
4779- return result['id']
4780-
4781-
4782-@require_context
4783-def get_volume_uuid_by_ec2_id(context, ec2_id, session=None):
4784- result = _ec2_volume_get_query(context, session=session).\
4785- filter_by(id=ec2_id).\
4786- first()
4787-
4788- if not result:
4789- raise exception.VolumeNotFound(volume_id=ec2_id)
4790-
4791- return result['uuid']
4792-
4793-
4794-@require_context
4795-def ec2_snapshot_create(context, snapshot_uuid, id=None):
4796- """Create ec2 compatable snapshot by provided uuid"""
4797- ec2_snapshot_ref = models.SnapshotIdMapping()
4798- ec2_snapshot_ref.update({'uuid': snapshot_uuid})
4799- if id is not None:
4800- ec2_snapshot_ref.update({'id': id})
4801-
4802- ec2_snapshot_ref.save()
4803-
4804- return ec2_snapshot_ref
4805-
4806-
4807-@require_context
4808-def get_ec2_snapshot_id_by_uuid(context, snapshot_id, session=None):
4809- result = _ec2_snapshot_get_query(context, session=session).\
4810- filter_by(uuid=snapshot_id).\
4811- first()
4812-
4813- if not result:
4814- raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
4815-
4816- return result['id']
4817-
4818-
4819-@require_context
4820-def get_snapshot_uuid_by_ec2_id(context, ec2_id, session=None):
4821- result = _ec2_snapshot_get_query(context, session=session).\
4822- filter_by(id=ec2_id).\
4823- first()
4824-
4825- if not result:
4826- raise exception.SnapshotNotFound(snapshot_id=ec2_id)
4827-
4828- return result['uuid']
4829-
4830-
4831-####################
4832-
4833-def _volume_metadata_get_query(context, volume_id, session=None):
4834- return model_query(context, models.VolumeMetadata,
4835- session=session, read_deleted="no").\
4836- filter_by(volume_id=volume_id)
4837-
4838-
4839-@require_context
4840-@require_volume_exists
4841-def volume_metadata_get(context, volume_id):
4842- rows = _volume_metadata_get_query(context, volume_id).all()
4843- result = {}
4844- for row in rows:
4845- result[row['key']] = row['value']
4846-
4847- return result
4848-
4849-
4850-@require_context
4851-@require_volume_exists
4852-def volume_metadata_delete(context, volume_id, key):
4853- _volume_metadata_get_query(context, volume_id).\
4854- filter_by(key=key).\
4855- update({'deleted': True,
4856- 'deleted_at': timeutils.utcnow(),
4857- 'updated_at': literal_column('updated_at')})
4858-
4859-
4860-@require_context
4861-@require_volume_exists
4862-def volume_metadata_get_item(context, volume_id, key, session=None):
4863- result = _volume_metadata_get_query(context, volume_id, session=session).\
4864- filter_by(key=key).\
4865- first()
4866-
4867- if not result:
4868- raise exception.VolumeMetadataNotFound(metadata_key=key,
4869- volume_id=volume_id)
4870- return result
4871-
4872-
4873-@require_context
4874-@require_volume_exists
4875-def volume_metadata_update(context, volume_id, metadata, delete):
4876- session = get_session()
4877-
4878- # Set existing metadata to deleted if delete argument is True
4879- if delete:
4880- original_metadata = volume_metadata_get(context, volume_id)
4881- for meta_key, meta_value in original_metadata.iteritems():
4882- if meta_key not in metadata:
4883- meta_ref = volume_metadata_get_item(context, volume_id,
4884- meta_key, session)
4885- meta_ref.update({'deleted': True})
4886- meta_ref.save(session=session)
4887-
4888- meta_ref = None
4889-
4890- # Now update all existing items with new values, or create new meta objects
4891- for meta_key, meta_value in metadata.iteritems():
4892-
4893- # update the value whether it exists or not
4894- item = {"value": meta_value}
4895-
4896- try:
4897- meta_ref = volume_metadata_get_item(context, volume_id,
4898- meta_key, session)
4899- except exception.VolumeMetadataNotFound, e:
4900- meta_ref = models.VolumeMetadata()
4901- item.update({"key": meta_key, "volume_id": volume_id})
4902-
4903- meta_ref.update(item)
4904- meta_ref.save(session=session)
4905-
4906- return metadata
4907-
4908-
4909-###################
4910-
4911-
4912-@require_context
4913-def snapshot_create(context, values):
4914- snapshot_ref = models.Snapshot()
4915- if not values.get('id'):
4916- values['id'] = str(utils.gen_uuid())
4917- snapshot_ref.update(values)
4918-
4919- session = get_session()
4920- with session.begin():
4921- snapshot_ref.save(session=session)
4922- return snapshot_ref
4923-
4924-
4925-@require_admin_context
4926-def snapshot_destroy(context, snapshot_id):
4927- session = get_session()
4928- with session.begin():
4929- session.query(models.Snapshot).\
4930- filter_by(id=snapshot_id).\
4931- update({'deleted': True,
4932- 'deleted_at': timeutils.utcnow(),
4933- 'updated_at': literal_column('updated_at')})
4934-
4935-
4936-@require_context
4937-def snapshot_get(context, snapshot_id, session=None):
4938- result = model_query(context, models.Snapshot, session=session,
4939- project_only=True).\
4940- filter_by(id=snapshot_id).\
4941- first()
4942-
4943- if not result:
4944- raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
4945-
4946- return result
4947-
4948-
4949-@require_admin_context
4950-def snapshot_get_all(context):
4951- return model_query(context, models.Snapshot).all()
4952-
4953-
4954-@require_context
4955-def snapshot_get_all_for_volume(context, volume_id):
4956- return model_query(context, models.Snapshot, read_deleted='no',
4957- project_only=True).\
4958- filter_by(volume_id=volume_id).all()
4959-
4960-
4961-@require_context
4962-def snapshot_get_all_by_project(context, project_id):
4963- authorize_project_context(context, project_id)
4964- return model_query(context, models.Snapshot).\
4965- filter_by(project_id=project_id).\
4966- all()
4967-
4968-
4969-@require_context
4970-def snapshot_update(context, snapshot_id, values):
4971- session = get_session()
4972- with session.begin():
4973- snapshot_ref = snapshot_get(context, snapshot_id, session=session)
4974- snapshot_ref.update(values)
4975- snapshot_ref.save(session=session)
4976-
4977-
4978-###################
4979-
4980-
4981-def _block_device_mapping_get_query(context, session=None):
4982- return model_query(context, models.BlockDeviceMapping, session=session)
4983-
4984-
4985-@require_context
4986-def block_device_mapping_create(context, values):
4987- bdm_ref = models.BlockDeviceMapping()
4988- bdm_ref.update(values)
4989-
4990- session = get_session()
4991- with session.begin():
4992- bdm_ref.save(session=session)
4993-
4994-
4995-@require_context
4996-def block_device_mapping_update(context, bdm_id, values):
4997- session = get_session()
4998- with session.begin():
4999- _block_device_mapping_get_query(context, session=session).\
5000- filter_by(id=bdm_id).\
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches