Merge lp:~zulcss/ubuntu/precise/nova/trunk into lp:~ubuntu-cloud-archive/ubuntu/precise/nova/trunk

Proposed by Chuck Short
Status: Merged
Approved by: James Page
Approved revision: 97
Merged at revision: 97
Proposed branch: lp:~zulcss/ubuntu/precise/nova/trunk
Merge into: lp:~ubuntu-cloud-archive/ubuntu/precise/nova/trunk
Diff against target: 298126 lines (+77032/-91301)
1002 files modified
.mailmap (+2/-2)
.pc/.quilt_patches (+0/-1)
.pc/.quilt_series (+0/-1)
.pc/.version (+0/-1)
.pc/applied-patches (+0/-9)
.pc/avoid_setuptools_git_dependency.patch/tools/pip-requires (+0/-23)
.pc/fix-docs-build-without-network.patch/doc/source/conf.py (+0/-279)
.pc/fix-ubuntu-tests.patch/nova/tests/test_api.py (+0/-616)
.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost (+0/-445)
.pc/rbd-security.patch/nova/virt/libvirt/volume.py (+0/-206)
.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy/api.py (+0/-5256)
.pc/ubuntu/fix-libvirt-firewall-slowdown.patch/nova/tests/test_libvirt.py (+0/-3919)
.pc/ubuntu/fix-libvirt-firewall-slowdown.patch/nova/virt/firewall.py (+0/-536)
.pc/ubuntu/ubuntu-fix-32-64-bit-iss.patch/nova/tests/test_nfs.py (+0/-629)
.pc/ubuntu/ubuntu-fix-32-64-bit-iss.patch/nova/volume/nfs.py (+0/-293)
.pc/ubuntu/ubuntu-fix-ec2-instance-id-mappings.patch/nova/db/sqlalchemy/api.py (+0/-5253)
AUTHORS (+34/-0)
ChangeLog (+15284/-618)
HACKING.rst (+1/-0)
PKG-INFO (+10/-2)
bin/nova-all (+8/-5)
bin/nova-api (+7/-3)
bin/nova-api-ec2 (+2/-2)
bin/nova-api-metadata (+2/-2)
bin/nova-api-os-compute (+2/-2)
bin/nova-api-os-volume (+0/-46)
bin/nova-cert (+7/-3)
bin/nova-clear-rabbit-queues (+6/-6)
bin/nova-compute (+16/-4)
bin/nova-conductor (+52/-0)
bin/nova-console (+8/-3)
bin/nova-consoleauth (+6/-4)
bin/nova-dhcpbridge (+18/-18)
bin/nova-manage (+56/-279)
bin/nova-network (+8/-3)
bin/nova-novncproxy (+22/-21)
bin/nova-objectstore (+2/-2)
bin/nova-rootwrap (+9/-0)
bin/nova-rpc-zmq-receiver (+10/-9)
bin/nova-scheduler (+8/-3)
bin/nova-volume (+0/-48)
bin/nova-volume-usage-audit (+0/-82)
bin/nova-xvpvncproxy (+2/-4)
debian/changelog (+37/-0)
debian/control (+32/-10)
debian/mans/nova-conductor.8 (+20/-0)
debian/nova-api-os-volume.init (+0/-76)
debian/nova-api-os-volume.install (+0/-1)
debian/nova-api-os-volume.logrotate (+0/-4)
debian/nova-api-os-volume.manpages (+0/-1)
debian/nova-api-os-volume.postrm (+0/-7)
debian/nova-api-os-volume.upstart.in (+0/-18)
debian/nova-conductor.init (+71/-0)
debian/nova-conductor.install (+1/-0)
debian/nova-conductor.logrotate (+7/-0)
debian/nova-conductor.manpages (+1/-0)
debian/nova-conductor.postrm (+7/-0)
debian/nova-conductor.upstart.in (+18/-0)
debian/nova-scheduler.install (+2/-0)
debian/nova-volume.default (+0/-4)
debian/nova-volume.dirs (+0/-1)
debian/nova-volume.init (+0/-111)
debian/nova-volume.install (+0/-3)
debian/nova-volume.logrotate (+0/-7)
debian/nova-volume.manpages (+0/-1)
debian/nova-volume.postinst (+0/-21)
debian/nova-volume.postrm (+0/-7)
debian/nova-volume.upstart.in (+0/-18)
debian/nova.conf (+1/-0)
debian/patches/avoid_setuptools_git_dependency.patch (+7/-6)
debian/patches/fix-libvirt-tests.patch (+48/-0)
debian/patches/fix-ubuntu-tests.patch (+3/-5)
debian/patches/rbd-security.patch (+0/-43)
debian/patches/series (+2/-6)
debian/patches/ubuntu-show-tests.patch (+15/-0)
debian/patches/ubuntu/fix-ec2-volume-id-mappings.patch (+0/-43)
debian/patches/ubuntu/fix-libvirt-firewall-slowdown.patch (+0/-106)
debian/patches/ubuntu/ubuntu-fix-32-64-bit-iss.patch (+0/-75)
debian/patches/ubuntu/ubuntu-fix-ec2-instance-id-mappings.patch (+0/-17)
debian/rules (+4/-8)
doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json (+8/-8)
doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml (+3/-3)
doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json (+8/-8)
doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml (+3/-3)
doc/api_samples/all_extensions/extensions-get-resp.json (+9/-9)
doc/api_samples/all_extensions/extensions-get-resp.xml (+4/-4)
doc/api_samples/all_extensions/server-action-changepassword.json (+5/-0)
doc/api_samples/all_extensions/server-action-changepassword.xml (+4/-0)
doc/api_samples/all_extensions/server-action-confirmresize.json (+3/-0)
doc/api_samples/all_extensions/server-action-confirmresize.xml (+3/-0)
doc/api_samples/all_extensions/server-action-createimage.json (+8/-0)
doc/api_samples/all_extensions/server-action-createimage.xml (+8/-0)
doc/api_samples/all_extensions/server-action-reboot.json (+5/-0)
doc/api_samples/all_extensions/server-action-reboot.xml (+4/-0)
doc/api_samples/all_extensions/server-action-rebuild-resp.json (+56/-0)
doc/api_samples/all_extensions/server-action-rebuild-resp.xml (+19/-0)
doc/api_samples/all_extensions/server-action-rebuild.json (+18/-0)
doc/api_samples/all_extensions/server-action-rebuild.xml (+25/-0)
doc/api_samples/all_extensions/server-action-resize.json (+5/-0)
doc/api_samples/all_extensions/server-action-resize.xml (+4/-0)
doc/api_samples/all_extensions/server-action-revertresize.json (+3/-0)
doc/api_samples/all_extensions/server-action-revertresize.xml (+3/-0)
doc/api_samples/all_extensions/server-get-resp.json (+8/-8)
doc/api_samples/all_extensions/server-get-resp.xml (+3/-3)
doc/api_samples/all_extensions/servers-details-resp.json (+9/-8)
doc/api_samples/all_extensions/servers-details-resp.xml (+3/-3)
doc/api_samples/limit-get-resp.json (+0/-2)
doc/api_samples/limit-get-resp.xml (+0/-2)
doc/api_samples/os-admin-actions/admin-actions-backup-server.json (+7/-0)
doc/api_samples/os-admin-actions/admin-actions-backup-server.xml (+6/-0)
doc/api_samples/os-admin-actions/admin-actions-inject-network-info.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-inject-network-info.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-live-migrate.json (+7/-0)
doc/api_samples/os-admin-actions/admin-actions-live-migrate.xml (+6/-0)
doc/api_samples/os-admin-actions/admin-actions-lock-server.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-lock-server.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-lock.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-migrate.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-migrate.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-pause.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-pause.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-reset-network.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-reset-network.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-reset-server-state.json (+5/-0)
doc/api_samples/os-admin-actions/admin-actions-reset-server-state.xml (+4/-0)
doc/api_samples/os-admin-actions/admin-actions-resume.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-resume.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-suspend.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-suspend.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-unlock-server.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-unlock-server.xml (+2/-0)
doc/api_samples/os-admin-actions/admin-actions-unlock.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-unpause.json (+3/-0)
doc/api_samples/os-admin-actions/admin-actions-unpause.xml (+2/-0)
doc/api_samples/os-admin-actions/server-post-req.json (+16/-0)
doc/api_samples/os-admin-actions/server-post-req.xml (+19/-0)
doc/api_samples/os-admin-actions/server-post-resp.json (+16/-0)
doc/api_samples/os-admin-actions/server-post-resp.xml (+6/-0)
doc/api_samples/os-aggregates/aggregate-add-host-post-req.json (+6/-0)
doc/api_samples/os-aggregates/aggregate-add-host-post-req.xml (+2/-0)
doc/api_samples/os-aggregates/aggregate-metadata-post-req.json (+9/-0)
doc/api_samples/os-aggregates/aggregate-metadata-post-req.xml (+6/-0)
doc/api_samples/os-aggregates/aggregate-post-req.json (+7/-0)
doc/api_samples/os-aggregates/aggregate-post-req.xml (+2/-0)
doc/api_samples/os-aggregates/aggregate-post-resp.json (+11/-0)
doc/api_samples/os-aggregates/aggregate-post-resp.xml (+10/-0)
doc/api_samples/os-aggregates/aggregate-remove-host-post-req.json (+6/-0)
doc/api_samples/os-aggregates/aggregate-remove-host-post-req.xml (+2/-0)
doc/api_samples/os-aggregates/aggregate-update-post-req.json (+7/-0)
doc/api_samples/os-aggregates/aggregate-update-post-req.xml (+2/-0)
doc/api_samples/os-aggregates/aggregate-update-post-resp.json (+13/-0)
doc/api_samples/os-aggregates/aggregate-update-post-resp.xml (+12/-0)
doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json (+15/-0)
doc/api_samples/os-aggregates/aggregates-add-host-post-resp.xml (+14/-0)
doc/api_samples/os-aggregates/aggregates-get-resp.json (+13/-0)
doc/api_samples/os-aggregates/aggregates-get-resp.xml (+12/-0)
doc/api_samples/os-aggregates/aggregates-list-get-resp.json (+15/-0)
doc/api_samples/os-aggregates/aggregates-list-get-resp.xml (+14/-0)
doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json (+15/-0)
doc/api_samples/os-aggregates/aggregates-metadata-post-resp.xml (+14/-0)
doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json (+13/-0)
doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml (+12/-0)
doc/api_samples/os-aggregates/server-post-req.json (+16/-0)
doc/api_samples/os-aggregates/server-post-resp.json (+16/-0)
doc/api_samples/os-availability-zone/availability-zone-post-req.json (+17/-0)
doc/api_samples/os-availability-zone/availability-zone-post-req.xml (+23/-0)
doc/api_samples/os-availability-zone/availability-zone-post-resp.json (+16/-0)
doc/api_samples/os-availability-zone/availability-zone-post-resp.xml (+6/-0)
doc/api_samples/os-certificates/certificate-create-resp.json (+6/-0)
doc/api_samples/os-certificates/certificate-create-resp.xml (+2/-0)
doc/api_samples/os-certificates/certificate-get-root-resp.json (+6/-0)
doc/api_samples/os-certificates/certificate-get-root-resp.xml (+2/-0)
doc/api_samples/os-cloudpipe/cloud-pipe-create-req.json (+5/-0)
doc/api_samples/os-cloudpipe/cloud-pipe-create-req.xml (+3/-0)
doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.json (+5/-0)
doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml (+4/-0)
doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json (+15/-0)
doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml (+12/-0)
doc/api_samples/os-consoles/get-vnc-console-post-req.json (+5/-0)
doc/api_samples/os-consoles/get-vnc-console-post-req.xml (+2/-0)
doc/api_samples/os-consoles/get-vnc-console-post-resp.json (+6/-0)
doc/api_samples/os-consoles/get-vnc-console-post-resp.xml (+5/-0)
doc/api_samples/os-consoles/server-post-req.json (+16/-0)
doc/api_samples/os-consoles/server-post-req.xml (+19/-0)
doc/api_samples/os-consoles/server-post-resp.json (+16/-0)
doc/api_samples/os-consoles/server-post-resp.xml (+6/-0)
doc/api_samples/os-hosts/host-get-resp.json (+31/-0)
doc/api_samples/os-hosts/host-get-resp.xml (+24/-0)
doc/api_samples/os-hosts/hosts-list-resp.json (+24/-0)
doc/api_samples/os-hosts/hosts-list-resp.xml (+7/-0)
doc/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json (+18/-0)
doc/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml (+24/-0)
doc/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json (+16/-0)
doc/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml (+6/-0)
doc/api_samples/os-multiple-create/multiple-create-post-req.json (+19/-0)
doc/api_samples/os-multiple-create/multiple-create-post-req.xml (+25/-0)
doc/api_samples/os-multiple-create/multiple-create-post-resp.json (+3/-0)
doc/api_samples/os-multiple-create/multiple-create-post-resp.xml (+4/-0)
doc/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json (+17/-0)
doc/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml (+18/-0)
doc/api_samples/os-server-diagnostics/server-post-req.json (+16/-0)
doc/api_samples/os-server-diagnostics/server-post-req.xml (+19/-0)
doc/api_samples/os-server-diagnostics/server-post-resp.json (+16/-0)
doc/api_samples/os-server-diagnostics/server-post-resp.xml (+6/-0)
doc/api_samples/os-server-start-stop/server-post-req.json (+16/-0)
doc/api_samples/os-server-start-stop/server-post-req.xml (+19/-0)
doc/api_samples/os-server-start-stop/server-post-resp.json (+16/-0)
doc/api_samples/os-server-start-stop/server-post-resp.xml (+6/-0)
doc/api_samples/os-server-start-stop/server_start_stop.xml (+1/-1)
doc/api_samples/os-simple-tenant-usage/server-post-req.json (+16/-0)
doc/api_samples/os-simple-tenant-usage/server-post-req.xml (+19/-0)
doc/api_samples/os-simple-tenant-usage/server-post-resp.json (+16/-0)
doc/api_samples/os-simple-tenant-usage/server-post-resp.xml (+6/-0)
doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json (+27/-0)
doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml (+26/-0)
doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json (+13/-0)
doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml (+13/-0)
doc/api_samples/os-used-limits/usedlimits-get-resp.json (+77/-0)
doc/api_samples/os-used-limits/usedlimits-get-resp.xml (+34/-0)
doc/api_samples/server-ips-network-resp.json (+8/-0)
doc/api_samples/server-ips-network-resp.xml (+4/-0)
doc/api_samples/server-ips-resp.json (+10/-0)
doc/api_samples/server-ips-resp.xml (+6/-0)
doc/source/conf.py (+4/-7)
doc/source/devref/architecture.rst (+3/-4)
doc/source/devref/development.environment.rst (+4/-0)
doc/source/devref/fakes.rst (+0/-10)
doc/source/devref/filter_scheduler.rst (+23/-14)
doc/source/devref/index.rst (+0/-2)
doc/source/devref/rpc.rst (+4/-6)
doc/source/devref/scheduler.rst (+0/-10)
doc/source/devref/threading.rst (+1/-1)
doc/source/devref/volume.rst (+0/-66)
doc/source/devref/xensmvolume.rst (+0/-88)
doc/source/man/nova-api-os-volume.rst (+0/-49)
doc/source/man/nova-conductor.rst (+45/-0)
doc/source/man/nova-rootwrap.rst (+2/-2)
doc/source/man/nova-volume-usage-audit.rst (+0/-61)
doc/source/man/nova-volume.rst (+0/-54)
etc/nova/api-paste.ini (+2/-2)
etc/nova/logging_sample.conf (+2/-2)
etc/nova/nova.conf.sample (+156/-298)
etc/nova/policy.json (+123/-118)
etc/nova/rootwrap.d/volume.filters (+0/-36)
nova.egg-info/PKG-INFO (+10/-2)
nova.egg-info/SOURCES.txt (+438/-174)
nova.egg-info/requires.txt (+4/-3)
nova/__init__.py (+0/-6)
nova/api/auth.py (+8/-7)
nova/api/ec2/__init__.py (+27/-16)
nova/api/ec2/apirequest.py (+0/-2)
nova/api/ec2/cloud.py (+57/-38)
nova/api/ec2/ec2utils.py (+39/-12)
nova/api/ec2/faults.py (+3/-3)
nova/api/manager.py (+4/-3)
nova/api/metadata/base.py (+16/-11)
nova/api/metadata/handler.py (+8/-6)
nova/api/openstack/__init__.py (+21/-6)
nova/api/openstack/auth.py (+4/-4)
nova/api/openstack/common.py (+11/-9)
nova/api/openstack/compute/__init__.py (+74/-63)
nova/api/openstack/compute/contrib/__init__.py (+4/-3)
nova/api/openstack/compute/contrib/admin_actions.py (+11/-9)
nova/api/openstack/compute/contrib/certificates.py (+0/-3)
nova/api/openstack/compute/contrib/cloudpipe.py (+16/-9)
nova/api/openstack/compute/contrib/config_drive.py (+1/-4)
nova/api/openstack/compute/contrib/deferred_delete.py (+4/-0)
nova/api/openstack/compute/contrib/extended_server_attributes.py (+0/-3)
nova/api/openstack/compute/contrib/extended_status.py (+0/-3)
nova/api/openstack/compute/contrib/fixed_ips.py (+98/-0)
nova/api/openstack/compute/contrib/flavormanage.py (+2/-1)
nova/api/openstack/compute/contrib/floating_ips.py (+42/-20)
nova/api/openstack/compute/contrib/fping.py (+161/-0)
nova/api/openstack/compute/contrib/hosts.py (+9/-11)
nova/api/openstack/compute/contrib/instance_usage_audit_log.py (+4/-3)
nova/api/openstack/compute/contrib/networks.py (+2/-5)
nova/api/openstack/compute/contrib/rescue.py (+4/-3)
nova/api/openstack/compute/contrib/security_groups.py (+2/-4)
nova/api/openstack/compute/contrib/services.py (+142/-0)
nova/api/openstack/compute/contrib/simple_tenant_usage.py (+0/-3)
nova/api/openstack/compute/contrib/volumes.py (+11/-21)
nova/api/openstack/compute/contrib/volumetypes.py (+0/-225)
nova/api/openstack/compute/extensions.py (+4/-3)
nova/api/openstack/compute/image_metadata.py (+0/-4)
nova/api/openstack/compute/images.py (+0/-2)
nova/api/openstack/compute/ips.py (+0/-3)
nova/api/openstack/compute/limits.py (+1/-0)
nova/api/openstack/compute/server_metadata.py (+3/-0)
nova/api/openstack/compute/servers.py (+68/-30)
nova/api/openstack/compute/views/addresses.py (+0/-3)
nova/api/openstack/compute/views/images.py (+4/-4)
nova/api/openstack/compute/views/limits.py (+0/-2)
nova/api/openstack/compute/views/versions.py (+6/-5)
nova/api/openstack/extensions.py (+0/-6)
nova/api/openstack/volume/__init__.py (+0/-64)
nova/api/openstack/volume/contrib/__init__.py (+0/-39)
nova/api/openstack/volume/contrib/admin_actions.py (+0/-129)
nova/api/openstack/volume/contrib/image_create.py (+0/-31)
nova/api/openstack/volume/contrib/types_extra_specs.py (+0/-149)
nova/api/openstack/volume/contrib/types_manage.py (+0/-91)
nova/api/openstack/volume/contrib/volume_actions.py (+0/-131)
nova/api/openstack/volume/extensions.py (+0/-34)
nova/api/openstack/volume/snapshots.py (+0/-180)
nova/api/openstack/volume/types.py (+0/-80)
nova/api/openstack/volume/versions.py (+0/-83)
nova/api/openstack/volume/views/__init__.py (+0/-16)
nova/api/openstack/volume/views/types.py (+0/-34)
nova/api/openstack/volume/views/versions.py (+0/-36)
nova/api/openstack/volume/volumes.py (+0/-359)
nova/api/openstack/wsgi.py (+26/-6)
nova/api/sizelimit.py (+4/-5)
nova/auth/__init__.py (+0/-28)
nova/auth/fakeldap.py (+0/-328)
nova/block_device.py (+9/-1)
nova/cert/manager.py (+0/-2)
nova/cert/rpcapi.py (+4/-4)
nova/cloudpipe/__init__.py (+0/-3)
nova/cloudpipe/pipelib.py (+22/-17)
nova/common/deprecated.py (+0/-55)
nova/common/eventlet_backdoor.py (+0/-80)
nova/compat/__init__.py (+0/-15)
nova/compat/flagfile.py (+0/-188)
nova/compute/__init__.py (+7/-3)
nova/compute/api.py (+350/-222)
nova/compute/claims.py (+220/-0)
nova/compute/instance_types.py (+16/-6)
nova/compute/manager.py (+750/-335)
nova/compute/resource_tracker.py (+280/-315)
nova/compute/rpcapi.py (+103/-41)
nova/compute/stats.py (+4/-0)
nova/compute/task_states.py (+10/-2)
nova/compute/utils.py (+14/-9)
nova/conductor/__init__.py (+25/-0)
nova/conductor/api.py (+60/-0)
nova/conductor/manager.py (+51/-0)
nova/conductor/rpcapi.py (+43/-0)
nova/config.py (+386/-0)
nova/console/api.py (+7/-7)
nova/console/manager.py (+6/-7)
nova/console/rpcapi.py (+4/-4)
nova/console/vmrc.py (+3/-4)
nova/console/vmrc_manager.py (+7/-16)
nova/console/xvp.py (+14/-13)
nova/consoleauth/__init__.py (+2/-3)
nova/consoleauth/manager.py (+6/-6)
nova/consoleauth/rpcapi.py (+3/-4)
nova/context.py (+2/-2)
nova/crypto.py (+18/-17)
nova/db/api.py (+35/-370)
nova/db/base.py (+3/-5)
nova/db/migration.py (+1/-1)
nova/db/sqlalchemy/api.py (+329/-1035)
nova/db/sqlalchemy/migrate_repo/versions/082_essex.py (+0/-996)
nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py (+0/-63)
nova/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py (+0/-42)
nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py (+0/-33)
nova/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py (+0/-58)
nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py (+0/-80)
nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql (+0/-97)
nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql (+0/-97)
nova/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py (+0/-117)
nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py (+0/-237)
nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql (+0/-226)
nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql (+0/-226)
nova/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py (+0/-218)
nova/db/sqlalchemy/migrate_repo/versions/092_add_instance_system_metadata.py (+0/-73)
nova/db/sqlalchemy/migrate_repo/versions/093_drop_instance_actions_table.py (+0/-54)
nova/db/sqlalchemy/migrate_repo/versions/094_update_postgresql_sequence_names.py (+0/-54)
nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py (+0/-94)
nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_downgrade.sql (+0/-133)
nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_upgrade.sql (+0/-132)
nova/db/sqlalchemy/migrate_repo/versions/096_recreate_dns_domains.py (+0/-145)
nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py (+0/-106)
nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py (+0/-72)
nova/db/sqlalchemy/migrate_repo/versions/099_add_disabled_instance_types.py (+0/-40)
nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py (+0/-80)
nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql (+0/-64)
nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql (+0/-64)
nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py (+0/-80)
nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql (+0/-61)
nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql (+0/-61)
nova/db/sqlalchemy/migrate_repo/versions/102_consoles_uses_uuid.py (+0/-80)
nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_downgrade.sql (+0/-72)
nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_upgrade.sql (+0/-72)
nova/db/sqlalchemy/migrate_repo/versions/103_instance_indexes.py (+0/-43)
nova/db/sqlalchemy/migrate_repo/versions/104_instance_indexes_2.py (+0/-43)
nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_downgrade.sql (+0/-1)
nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_upgrade.sql (+0/-1)
nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py (+0/-70)
nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_downgrade.sql (+0/-50)
nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_upgrade.sql (+0/-50)
nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py (+0/-67)
nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_downgrade.sql (+0/-1)
nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_upgrade.sql (+0/-1)
nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py (+0/-67)
nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py (+0/-62)
nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py (+0/-63)
nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_downgrade.sql (+0/-53)
nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_upgrade.sql (+0/-52)
nova/db/sqlalchemy/migrate_repo/versions/110_drop_deprecated_auth.py (+0/-189)
nova/db/sqlalchemy/migrate_repo/versions/111_general_aggregates.py (+0/-72)
nova/db/sqlalchemy/migrate_repo/versions/112_update_deleted_instance_data.py (+0/-69)
nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py (+0/-108)
nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_downgrade.sql (+0/-85)
nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_upgrade.sql (+0/-85)
nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_downgrade.sql (+0/-71)
nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_upgrade.sql (+0/-71)
nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py (+0/-108)
nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py (+0/-94)
nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py (+0/-98)
nova/db/sqlalchemy/migrate_repo/versions/117_add_compute_node_stats.py (+0/-61)
nova/db/sqlalchemy/migrate_repo/versions/118_add_indexes_to_agent_builds.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/119_add_indexes_to_aggregate_metadata.py (+0/-42)
nova/db/sqlalchemy/migrate_repo/versions/120_add_indexes_to_block_device_mapping.py (+0/-71)
nova/db/sqlalchemy/migrate_repo/versions/121_add_indexes_to_bw_usage_cache.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/122_add_indexes_to_certificates.py (+0/-59)
nova/db/sqlalchemy/migrate_repo/versions/123_add_indexes_to_dns_domains.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/124_add_indexes_to_fixed_ips.py (+0/-76)
nova/db/sqlalchemy/migrate_repo/versions/125_add_indexes_to_floating_ips.py (+0/-68)
nova/db/sqlalchemy/migrate_repo/versions/126_add_indexes_to_instance_faults.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/127_add_indexes_to_instance_type_extra_specs.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/128_add_indexes_to_instances.py (+0/-96)
nova/db/sqlalchemy/migrate_repo/versions/129_add_indexes_to_iscsi_targets.py (+0/-57)
nova/db/sqlalchemy/migrate_repo/versions/130_add_indexes_to_key_pairs.py (+0/-44)
nova/db/sqlalchemy/migrate_repo/versions/131_add_indexes_to_networks.py (+0/-107)
nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py (+0/-67)
nova/db/sqlalchemy/migrate_repo/versions/133_aggregate_delete_fix.py (+0/-48)
nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py (+1225/-0)
nova/db/sqlalchemy/migrate_repo/versions/134_add_counters_to_bw_usage_cache.py (+60/-0)
nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py (+55/-0)
nova/db/sqlalchemy/migrate_repo/versions/136_add_index_to_instances.py (+41/-0)
nova/db/sqlalchemy/migrate_repo/versions/137_add_indexes_to_migrations.py (+46/-0)
nova/db/sqlalchemy/migrate_repo/versions/138_drop_server_name_from_instances.py (+39/-0)
nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_downgrade.sql (+239/-0)
nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_upgrade.sql (+239/-0)
nova/db/sqlalchemy/migrate_repo/versions/139_add_indexes_to_fixed_ips.py (+46/-0)
nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py (+61/-0)
nova/db/sqlalchemy/migrate_repo/versions/141_update_migrations_instance_uuid.py (+33/-0)
nova/db/sqlalchemy/migration.py (+0/-3)
nova/db/sqlalchemy/models.py (+23/-84)
nova/db/sqlalchemy/session.py (+244/-67)
nova/exception.py (+32/-127)
nova/filters.py (+53/-0)
nova/flags.py (+0/-424)
nova/image/glance.py (+15/-11)
nova/image/s3.py (+11/-10)
nova/ipv6/api.py (+2/-3)
nova/loadables.py (+116/-0)
nova/locale/bs/LC_MESSAGES/nova.po (+1447/-1344)
nova/locale/cs/LC_MESSAGES/nova.po (+1457/-1342)
nova/locale/da/LC_MESSAGES/nova.po (+1447/-1344)
nova/locale/de/LC_MESSAGES/nova.po (+1449/-1347)
nova/locale/en_AU/LC_MESSAGES/nova.po (+1454/-1347)
nova/locale/en_GB/LC_MESSAGES/nova.po (+1464/-1361)
nova/locale/en_US/LC_MESSAGES/nova.po (+1905/-1800)
nova/locale/es/LC_MESSAGES/nova.po (+1454/-1347)
nova/locale/fr/LC_MESSAGES/nova.po (+1455/-1349)
nova/locale/it/LC_MESSAGES/nova.po (+1450/-1347)
nova/locale/ja/LC_MESSAGES/nova.po (+1454/-1347)
nova/locale/ko/LC_MESSAGES/nova.po (+1450/-1346)
nova/locale/nb/LC_MESSAGES/nova.po (+1448/-1344)
nova/locale/nova.pot (+1949/-3656)
nova/locale/pt_BR/LC_MESSAGES/nova.po (+1456/-1347)
nova/locale/ru/LC_MESSAGES/nova.po (+1458/-1344)
nova/locale/tl/LC_MESSAGES/nova.po (+1447/-1344)
nova/locale/tr/LC_MESSAGES/nova.po (+1447/-1344)
nova/locale/uk/LC_MESSAGES/nova.po (+1447/-1344)
nova/locale/zh_CN/LC_MESSAGES/nova.po (+1453/-1338)
nova/locale/zh_TW/LC_MESSAGES/nova.po (+1450/-1346)
nova/manager.py (+44/-16)
nova/network/__init__.py (+8/-5)
nova/network/api.py (+117/-150)
nova/network/l3.py (+0/-3)
nova/network/ldapdns.py (+14/-27)
nova/network/linux_net.py (+96/-79)
nova/network/manager.py (+242/-128)
nova/network/minidns.py (+5/-3)
nova/network/quantum/nova_ipam_lib.py (+0/-4)
nova/network/quantumv2/__init__.py (+19/-20)
nova/network/quantumv2/api.py (+226/-22)
nova/network/rpcapi.py (+299/-0)
nova/notifications.py (+30/-10)
nova/objectstore/__init__.py (+0/-6)
nova/objectstore/s3server.py (+10/-9)
nova/openstack/common/cfg.py (+3/-3)
nova/openstack/common/eventlet_backdoor.py (+80/-0)
nova/openstack/common/fileutils.py (+35/-0)
nova/openstack/common/gettextutils.py (+1/-1)
nova/openstack/common/lockutils.py (+232/-0)
nova/openstack/common/log.py (+22/-5)
nova/openstack/common/network_utils.py (+68/-0)
nova/openstack/common/notifier/api.py (+3/-3)
nova/openstack/common/notifier/rabbit_notifier.py (+7/-24)
nova/openstack/common/notifier/rpc_notifier.py (+46/-0)
nova/openstack/common/policy.py (+712/-233)
nova/openstack/common/rpc/__init__.py (+10/-4)
nova/openstack/common/rpc/amqp.py (+8/-0)
nova/openstack/common/rpc/impl_kombu.py (+90/-48)
nova/openstack/common/rpc/impl_qpid.py (+47/-66)
nova/openstack/common/rpc/impl_zmq.py (+1/-1)
nova/openstack/common/rpc/service.py (+70/-0)
nova/openstack/common/setup.py (+24/-16)
nova/openstack/common/timeutils.py (+16/-5)
nova/openstack/common/uuidutils.py (+39/-0)
nova/policy.py (+47/-36)
nova/quota.py (+54/-23)
nova/rootwrap/filters.py (+8/-6)
nova/scheduler/baremetal_host_manager.py (+71/-0)
nova/scheduler/chance.py (+7/-14)
nova/scheduler/driver.py (+23/-46)
nova/scheduler/filter_scheduler.py (+54/-106)
nova/scheduler/filters/__init__.py (+30/-60)
nova/scheduler/filters/compute_capabilities_filter.py (+13/-4)
nova/scheduler/filters/compute_filter.py (+2/-3)
nova/scheduler/filters/core_filter.py (+4/-5)
nova/scheduler/filters/disk_filter.py (+3/-4)
nova/scheduler/filters/image_props_filter.py (+1/-1)
nova/scheduler/filters/io_ops_filter.py (+43/-0)
nova/scheduler/filters/isolated_hosts_filter.py (+6/-6)
nova/scheduler/filters/num_instances_filter.py (+41/-0)
nova/scheduler/filters/ram_filter.py (+3/-4)
nova/scheduler/filters/trusted_filter.py (+19/-14)
nova/scheduler/host_manager.py (+195/-94)
nova/scheduler/least_cost.py (+0/-117)
nova/scheduler/manager.py (+16/-16)
nova/scheduler/multi.py (+10/-14)
nova/scheduler/rpcapi.py (+5/-11)
nova/scheduler/scheduler_options.py (+3/-4)
nova/scheduler/simple.py (+0/-97)
nova/scheduler/weights/__init__.py (+61/-0)
nova/scheduler/weights/least_cost.py (+125/-0)
nova/scheduler/weights/ram.py (+45/-0)
nova/service.py (+44/-36)
nova/test.py (+27/-204)
nova/testing/README.rst (+25/-5)
nova/tests/__init__.py (+26/-29)
nova/tests/api/ec2/test_cinder_cloud.py (+35/-45)
nova/tests/api/ec2/test_cloud.py (+138/-588)
nova/tests/api/ec2/test_ec2_validate.py (+83/-3)
nova/tests/api/ec2/test_middleware.py (+10/-10)
nova/tests/api/openstack/compute/contrib/test_admin_actions.py (+63/-26)
nova/tests/api/openstack/compute/contrib/test_aggregates.py (+3/-1)
nova/tests/api/openstack/compute/contrib/test_cloudpipe.py (+11/-11)
nova/tests/api/openstack/compute/contrib/test_config_drive.py (+6/-2)
nova/tests/api/openstack/compute/contrib/test_console_output.py (+19/-12)
nova/tests/api/openstack/compute/contrib/test_consoles.py (+19/-14)
nova/tests/api/openstack/compute/contrib/test_createserverext.py (+77/-87)
nova/tests/api/openstack/compute/contrib/test_deferred_delete.py (+20/-19)
nova/tests/api/openstack/compute/contrib/test_disk_config.py (+5/-5)
nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py (+5/-6)
nova/tests/api/openstack/compute/contrib/test_extended_status.py (+5/-6)
nova/tests/api/openstack/compute/contrib/test_fixed_ips.py (+164/-0)
nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py (+0/-5)
nova/tests/api/openstack/compute/contrib/test_flavor_manage.py (+10/-4)
nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py (+0/-5)
nova/tests/api/openstack/compute/contrib/test_flavor_swap.py (+0/-5)
nova/tests/api/openstack/compute/contrib/test_flavorextradata.py (+2/-2)
nova/tests/api/openstack/compute/contrib/test_floating_ips.py (+131/-4)
nova/tests/api/openstack/compute/contrib/test_fping.py (+94/-0)
nova/tests/api/openstack/compute/contrib/test_hosts.py (+29/-17)
nova/tests/api/openstack/compute/contrib/test_keypairs.py (+22/-17)
nova/tests/api/openstack/compute/contrib/test_multinic_xs.py (+9/-4)
nova/tests/api/openstack/compute/contrib/test_networks.py (+4/-7)
nova/tests/api/openstack/compute/contrib/test_quota_classes.py (+7/-19)
nova/tests/api/openstack/compute/contrib/test_quotas.py (+5/-20)
nova/tests/api/openstack/compute/contrib/test_rescue.py (+14/-8)
nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py (+5/-1)
nova/tests/api/openstack/compute/contrib/test_security_groups.py (+9/-7)
nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py (+8/-6)
nova/tests/api/openstack/compute/contrib/test_server_start_stop.py (+7/-11)
nova/tests/api/openstack/compute/contrib/test_services.py (+198/-0)
nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py (+16/-10)
nova/tests/api/openstack/compute/contrib/test_snapshots.py (+22/-18)
nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py (+6/-1)
nova/tests/api/openstack/compute/contrib/test_volume_types.py (+0/-224)
nova/tests/api/openstack/compute/contrib/test_volume_types_extra_specs.py (+0/-198)
nova/tests/api/openstack/compute/contrib/test_volumes.py (+43/-32)
nova/tests/api/openstack/compute/test_consoles.py (+8/-9)
nova/tests/api/openstack/compute/test_extensions.py (+47/-17)
nova/tests/api/openstack/compute/test_flavors.py (+6/-8)
nova/tests/api/openstack/compute/test_image_metadata.py (+4/-5)
nova/tests/api/openstack/compute/test_images.py (+7/-10)
nova/tests/api/openstack/compute/test_limits.py (+3/-6)
nova/tests/api/openstack/compute/test_server_actions.py (+127/-46)
nova/tests/api/openstack/compute/test_server_metadata.py (+17/-16)
nova/tests/api/openstack/compute/test_servers.py (+340/-181)
nova/tests/api/openstack/compute/test_urlmap.py (+8/-8)
nova/tests/api/openstack/compute/test_versions.py (+8/-4)
nova/tests/api/openstack/fakes.py (+13/-12)
nova/tests/api/openstack/test_common.py (+4/-8)
nova/tests/api/openstack/volume/__init__.py (+0/-19)
nova/tests/api/openstack/volume/contrib/__init__.py (+0/-19)
nova/tests/api/openstack/volume/contrib/test_admin_actions.py (+0/-184)
nova/tests/api/openstack/volume/contrib/test_types_extra_specs.py (+0/-226)
nova/tests/api/openstack/volume/contrib/test_types_manage.py (+0/-122)
nova/tests/api/openstack/volume/contrib/test_volume_actions.py (+0/-162)
nova/tests/api/openstack/volume/extensions/__init__.py (+0/-15)
nova/tests/api/openstack/volume/extensions/foxinsocks.py (+0/-94)
nova/tests/api/openstack/volume/test_extensions.py (+0/-155)
nova/tests/api/openstack/volume/test_router.py (+0/-112)
nova/tests/api/openstack/volume/test_snapshots.py (+0/-285)
nova/tests/api/openstack/volume/test_types.py (+0/-194)
nova/tests/api/openstack/volume/test_volumes.py (+0/-602)
nova/tests/api/test_sizelimit.py (+3/-3)
nova/tests/baremetal/db/__init__.py (+14/-0)
nova/tests/baremetal/db/base.py (+51/-0)
nova/tests/baremetal/db/test_bm_interface.py (+47/-0)
nova/tests/baremetal/db/test_bm_node.py (+140/-0)
nova/tests/baremetal/db/test_bm_pxe_ip.py (+93/-0)
nova/tests/baremetal/db/utils.py (+81/-0)
nova/tests/baremetal/test_proxy_bare_metal.py (+0/-269)
nova/tests/baremetal/test_tilera.py (+0/-84)
nova/tests/cert/test_rpcapi.py (+4/-11)
nova/tests/compute/test_claims.py (+162/-0)
nova/tests/compute/test_compute.py (+1212/-514)
nova/tests/compute/test_compute_utils.py (+34/-17)
nova/tests/compute/test_multiple_nodes.py (+100/-0)
nova/tests/compute/test_resource_tracker.py (+480/-278)
nova/tests/compute/test_rpcapi.py (+49/-29)
nova/tests/conductor/test_conductor.py (+133/-0)
nova/tests/console/test_console.py (+5/-4)
nova/tests/console/test_rpcapi.py (+4/-11)
nova/tests/consoleauth/test_consoleauth.py (+0/-3)
nova/tests/consoleauth/test_rpcapi.py (+3/-11)
nova/tests/db/fakes.py (+0/-11)
nova/tests/declare_flags.py (+2/-3)
nova/tests/fake_crypto.py (+111/-0)
nova/tests/fake_flags.py (+10/-15)
nova/tests/fake_imagebackend.py (+8/-0)
nova/tests/fake_ldap.py (+328/-0)
nova/tests/fake_libvirt_utils.py (+13/-4)
nova/tests/fake_loadables/__init__.py (+27/-0)
nova/tests/fake_loadables/fake_loadable1.py (+44/-0)
nova/tests/fake_loadables/fake_loadable2.py (+39/-0)
nova/tests/fake_network.py (+17/-16)
nova/tests/fake_volume.py (+15/-7)
nova/tests/fakelibvirt.py (+14/-0)
nova/tests/hyperv/README.rst (+83/-0)
nova/tests/hyperv/basetestcase.py (+3/-1)
nova/tests/hyperv/db_fakes.py (+30/-12)
nova/tests/hyperv/hypervutils.py (+10/-3)
nova/tests/hyperv/mockproxy.py (+43/-4)
nova/tests/image/fake.py (+19/-20)
nova/tests/image/test_fake.py (+1/-1)
nova/tests/image/test_glance.py (+6/-5)
nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl (+1/-1)
nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl (+1/-1)
nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl (+1/-1)
nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl (+1/-1)
nova/tests/integrated/api_samples/README.rst (+1/-1)
nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl (+24/-8)
nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl (+9/-3)
nova/tests/integrated/api_samples/all_extensions/server-action-changepassword.json.tpl (+5/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-changepassword.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-confirmresize.json.tpl (+3/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-confirmresize.xml.tpl (+3/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-createimage.json.tpl (+9/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-createimage.xml.tpl (+8/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-reboot.json.tpl (+5/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-reboot.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-rebuild-resp.json.tpl (+56/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-rebuild-resp.xml.tpl (+39/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-rebuild.json.tpl (+18/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-rebuild.xml.tpl (+25/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-resize.json.tpl (+5/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-resize.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-revertresize.json.tpl (+3/-0)
nova/tests/integrated/api_samples/all_extensions/server-action-revertresize.xml.tpl (+3/-0)
nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl (+1/-1)
nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl (+1/-1)
nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl (+1/-1)
nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl (+1/-1)
nova/tests/integrated/api_samples/limit-get-resp.json.tpl (+13/-2)
nova/tests/integrated/api_samples/limit-get-resp.xml.tpl (+3/-2)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-backup-server.json.tpl (+7/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-backup-server.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.json.tpl (+7/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-lock-server.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-lock-server.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-migrate.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-migrate.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-pause.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-pause.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-network.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl (+5/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl (+5/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-state.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-resume.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-resume.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-suspend.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-suspend.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unpause.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unpause.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-admin-actions/server-post-req.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-admin-actions/server-post-req.xml.tpl (+19/-0)
nova/tests/integrated/api_samples/os-admin-actions/server-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-admin-actions/server-post-resp.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl (+6/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl (+9/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-post-req.json.tpl (+7/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-post-req.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-post-resp.json.tpl (+11/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-post-resp.xml.tpl (+10/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl (+6/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-req.json.tpl (+7/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-req.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl (+13/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl (+12/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl (+15/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl (+14/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl (+13/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl (+12/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl (+15/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl (+14/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl (+15/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl (+14/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl (+13/-0)
nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl (+12/-0)
nova/tests/integrated/api_samples/os-aggregates/server-post-req.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-aggregates/server-post-req.xml.tpl (+19/-0)
nova/tests/integrated/api_samples/os-aggregates/server-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-aggregates/server-post-resp.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-req.json.tpl (+17/-0)
nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-req.xml.tpl (+23/-0)
nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-resp.xml.tpl (+8/-0)
nova/tests/integrated/api_samples/os-certificates/certificate-create-resp.json.tpl (+6/-0)
nova/tests/integrated/api_samples/os-certificates/certificate-create-resp.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-certificates/certificate-get-root-resp.json.tpl (+6/-0)
nova/tests/integrated/api_samples/os-certificates/certificate-get-root-resp.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl (+5/-0)
nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.xml.tpl (+3/-0)
nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml.tpl (+3/-0)
nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl (+13/-0)
nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml.tpl (+12/-0)
nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl (+5/-0)
nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl (+2/-0)
nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl (+6/-0)
nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl (+5/-0)
nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl (+19/-0)
nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-hosts/host-get-resp.json.tpl (+31/-0)
nova/tests/integrated/api_samples/os-hosts/host-get-resp.xml.tpl (+24/-0)
nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl (+24/-0)
nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl (+7/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl (+18/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml.tpl (+24/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml.tpl (+8/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-req.json.tpl (+19/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-req.xml.tpl (+25/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl (+3/-0)
nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-resp.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl (+17/-0)
nova/tests/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml.tpl (+18/-0)
nova/tests/integrated/api_samples/os-server-diagnostics/server-post-req.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-server-diagnostics/server-post-req.xml.tpl (+19/-0)
nova/tests/integrated/api_samples/os-server-diagnostics/server-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-server-diagnostics/server-post-resp.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-req.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-req.xml.tpl (+19/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl (+16/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-resp.xml.tpl (+6/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl (+27/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml.tpl (+26/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl (+13/-0)
nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml.tpl (+13/-0)
nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl (+90/-0)
nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl (+37/-0)
nova/tests/integrated/api_samples/server-action-rebuild-resp.xml.tpl (+2/-8)
nova/tests/integrated/api_samples/server-ips-network-resp.json.tpl (+8/-0)
nova/tests/integrated/api_samples/server-ips-network-resp.xml.tpl (+4/-0)
nova/tests/integrated/api_samples/server-ips-resp.json.tpl (+10/-0)
nova/tests/integrated/api_samples/server-ips-resp.xml.tpl (+6/-0)
nova/tests/integrated/integrated_helpers.py (+6/-6)
nova/tests/integrated/test_api_samples.py (+539/-40)
nova/tests/integrated/test_extensions.py (+4/-4)
nova/tests/integrated/test_multiprocess_api.py (+0/-2)
nova/tests/integrated/test_servers.py (+0/-4)
nova/tests/integrated/test_volumes.py (+0/-181)
nova/tests/matchers.py (+196/-0)
nova/tests/network/test_api.py (+133/-1)
nova/tests/network/test_linux_net.py (+23/-24)
nova/tests/network/test_manager.py (+178/-25)
nova/tests/network/test_quantumv2.py (+338/-28)
nova/tests/network/test_rpcapi.py (+311/-0)
nova/tests/policy.json (+202/-196)
nova/tests/runtime_flags.py (+2/-3)
nova/tests/scheduler/fakes.py (+21/-17)
nova/tests/scheduler/test_chance_scheduler.py (+4/-4)
nova/tests/scheduler/test_filter_scheduler.py (+84/-60)
nova/tests/scheduler/test_host_filters.py (+153/-118)
nova/tests/scheduler/test_host_manager.py (+248/-141)
nova/tests/scheduler/test_least_cost.py (+76/-37)
nova/tests/scheduler/test_multi_scheduler.py (+2/-19)
nova/tests/scheduler/test_rpcapi.py (+4/-17)
nova/tests/scheduler/test_scheduler.py (+5/-108)
nova/tests/scheduler/test_weights.py (+117/-0)
nova/tests/test_api.py (+9/-12)
nova/tests/test_bdm.py (+2/-1)
nova/tests/test_cinder.py (+0/-3)
nova/tests/test_compat_flagfile.py (+0/-175)
nova/tests/test_configdrive2.py (+3/-8)
nova/tests/test_crypto.py (+0/-3)
nova/tests/test_db_api.py (+228/-207)
nova/tests/test_deprecated.py (+0/-46)
nova/tests/test_exception.py (+0/-1)
nova/tests/test_filters.py (+125/-0)
nova/tests/test_flags.py (+36/-40)
nova/tests/test_hypervapi.py (+68/-14)
nova/tests/test_image_utils.py (+218/-0)
nova/tests/test_imagebackend.py (+12/-9)
nova/tests/test_imagecache.py (+146/-95)
nova/tests/test_instance_types.py (+25/-3)
nova/tests/test_iptables_network.py (+1/-1)
nova/tests/test_iscsi.py (+0/-121)
nova/tests/test_libvirt.py (+538/-256)
nova/tests/test_libvirt_config.py (+6/-0)
nova/tests/test_libvirt_utils.py (+38/-0)
nova/tests/test_libvirt_vif.py (+52/-9)
nova/tests/test_loadables.py (+113/-0)
nova/tests/test_matchers.py (+144/-0)
nova/tests/test_metadata.py (+5/-5)
nova/tests/test_migrations.py (+2/-201)
nova/tests/test_misc.py (+0/-94)
nova/tests/test_netapp.py (+0/-1380)
nova/tests/test_netapp_nfs.py (+0/-261)
nova/tests/test_nexenta.py (+0/-281)
nova/tests/test_nfs.py (+0/-630)
nova/tests/test_notifications.py (+31/-4)
nova/tests/test_nova_manage.py (+7/-2)
nova/tests/test_nova_rootwrap.py (+33/-26)
nova/tests/test_objectstore.py (+11/-10)
nova/tests/test_pipelib.py (+5/-5)
nova/tests/test_plugin_api_extensions.py (+0/-1)
nova/tests/test_policy.py (+62/-28)
nova/tests/test_powervm.py (+29/-5)
nova/tests/test_quota.py (+59/-104)
nova/tests/test_rbd.py (+0/-161)
nova/tests/test_service.py (+64/-51)
nova/tests/test_skip_examples.py (+0/-47)
nova/tests/test_solidfire.py (+0/-210)
nova/tests/test_storwize_svc.py (+0/-1239)
nova/tests/test_utils.py (+64/-64)
nova/tests/test_virt.py (+9/-8)
nova/tests/test_virt_drivers.py (+39/-54)
nova/tests/test_vmwareapi.py (+1/-5)
nova/tests/test_volume.py (+0/-929)
nova/tests/test_volume_types.py (+0/-167)
nova/tests/test_volume_types_extra_specs.py (+0/-130)
nova/tests/test_volume_utils.py (+0/-91)
nova/tests/test_xenapi.py (+398/-169)
nova/tests/test_xensm.py (+0/-143)
nova/tests/utils.py (+10/-3)
nova/tests/virt/xenapi/test_volumeops.py (+40/-0)
nova/tests/vmwareapi/db_fakes.py (+2/-1)
nova/tests/volume/test_HpSanISCSIDriver.py (+0/-212)
nova/tests/xenapi/stubs.py (+23/-58)
nova/tests/xenapi/test_vm_utils.py (+143/-3)
nova/utils.py (+95/-235)
nova/version.py (+2/-2)
nova/virt/baremetal/__init__.py (+0/-18)
nova/virt/baremetal/db/__init__.py (+16/-0)
nova/virt/baremetal/db/api.py (+174/-0)
nova/virt/baremetal/db/migration.py (+38/-0)
nova/virt/baremetal/db/sqlalchemy/__init__.py (+14/-0)
nova/virt/baremetal/db/sqlalchemy/api.py (+351/-0)
nova/virt/baremetal/db/sqlalchemy/migrate_repo/__init__.py (+14/-0)
nova/virt/baremetal/db/sqlalchemy/migrate_repo/migrate.cfg (+20/-0)
nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py (+124/-0)
nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/__init__.py (+14/-0)
nova/virt/baremetal/db/sqlalchemy/migration.py (+113/-0)
nova/virt/baremetal/db/sqlalchemy/models.py (+80/-0)
nova/virt/baremetal/db/sqlalchemy/session.py (+58/-0)
nova/virt/baremetal/doc/README.rst (+69/-0)
nova/virt/baremetal/dom.py (+0/-266)
nova/virt/baremetal/driver.py (+0/-744)
nova/virt/baremetal/fake.py (+0/-157)
nova/virt/baremetal/nodes.py (+0/-42)
nova/virt/baremetal/tilera.py (+0/-364)
nova/virt/configdrive.py (+29/-12)
nova/virt/connection.py (+0/-84)
nova/virt/disk/api.py (+12/-20)
nova/virt/disk/guestfs.py (+0/-121)
nova/virt/disk/loop.py (+0/-42)
nova/virt/disk/mount.py (+0/-162)
nova/virt/disk/mount/__init__.py (+19/-0)
nova/virt/disk/mount/api.py (+162/-0)
nova/virt/disk/mount/guestfs.py (+121/-0)
nova/virt/disk/mount/loop.py (+42/-0)
nova/virt/disk/mount/nbd.py (+110/-0)
nova/virt/disk/nbd.py (+0/-111)
nova/virt/disk/vfs/__init__.py (+19/-0)
nova/virt/disk/vfs/api.py (+107/-0)
nova/virt/driver.py (+86/-14)
nova/virt/fake.py (+140/-15)
nova/virt/firewall.py (+32/-30)
nova/virt/hyperv/constants.py (+3/-0)
nova/virt/hyperv/driver.py (+13/-16)
nova/virt/hyperv/hostops.py (+160/-0)
nova/virt/hyperv/livemigrationops.py (+5/-4)
nova/virt/hyperv/snapshotops.py (+3/-3)
nova/virt/hyperv/vmops.py (+134/-157)
nova/virt/hyperv/vmutils.py (+30/-13)
nova/virt/hyperv/volumeops.py (+6/-6)
nova/virt/hyperv/volumeutils.py (+5/-4)
nova/virt/images.py (+148/-22)
nova/virt/libvirt/config.py (+3/-1)
nova/virt/libvirt/driver.py (+280/-261)
nova/virt/libvirt/firewall.py (+37/-18)
nova/virt/libvirt/imagebackend.py (+80/-36)
nova/virt/libvirt/imagecache.py (+108/-77)
nova/virt/libvirt/snapshots.py (+89/-0)
nova/virt/libvirt/utils.py (+145/-71)
nova/virt/libvirt/vif.py (+37/-40)
nova/virt/libvirt/volume.py (+40/-25)
nova/virt/libvirt/volume_nfs.py (+4/-4)
nova/virt/netutils.py (+7/-8)
nova/virt/powervm/driver.py (+7/-24)
nova/virt/powervm/exception.py (+6/-1)
nova/virt/powervm/operator.py (+51/-30)
nova/virt/storage_users.py (+63/-0)
nova/virt/virtapi.py (+107/-0)
nova/virt/vmwareapi/driver.py (+14/-15)
nova/virt/vmwareapi/network_utils.py (+4/-4)
nova/virt/vmwareapi/read_write_util.py (+0/-3)
nova/virt/vmwareapi/vif.py (+4/-4)
nova/virt/vmwareapi/vim.py (+4/-5)
nova/virt/vmwareapi/vmops.py (+4/-5)
nova/virt/xenapi/agent.py (+183/-120)
nova/virt/xenapi/driver.py (+76/-60)
nova/virt/xenapi/fake.py (+13/-3)
nova/virt/xenapi/firewall.py (+4/-9)
nova/virt/xenapi/host.py (+45/-20)
nova/virt/xenapi/pool.py (+30/-23)
nova/virt/xenapi/pool_states.py (+1/-3)
nova/virt/xenapi/vif.py (+4/-5)
nova/virt/xenapi/vm_utils.py (+243/-168)
nova/virt/xenapi/vmops.py (+262/-201)
nova/virt/xenapi/volume_utils.py (+26/-21)
nova/virt/xenapi/volumeops.py (+39/-36)
nova/vnc/__init__.py (+2/-3)
nova/vnc/xvp_proxy.py (+4/-5)
nova/volume/__init__.py (+3/-2)
nova/volume/api.py (+0/-511)
nova/volume/cinder.py (+5/-6)
nova/volume/driver.py (+0/-953)
nova/volume/iscsi.py (+0/-233)
nova/volume/manager.py (+0/-452)
nova/volume/netapp.py (+0/-1291)
nova/volume/netapp_nfs.py (+0/-267)
nova/volume/nexenta/__init__.py (+0/-33)
nova/volume/nexenta/jsonrpc.py (+0/-84)
nova/volume/nexenta/volume.py (+0/-282)
nova/volume/nfs.py (+0/-293)
nova/volume/san.py (+0/-649)
nova/volume/solidfire.py (+0/-424)
nova/volume/storwize_svc.py (+0/-1249)
nova/volume/utils.py (+0/-84)
nova/volume/volume_types.py (+0/-125)
nova/volume/xensm.py (+0/-249)
nova/weights.py (+71/-0)
nova/wsgi.py (+17/-5)
openstack-common.conf (+1/-1)
plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec (+3/-0)
plugins/xenserver/xenapi/etc/xapi.d/plugins/_bittorrent_seeder (+121/-0)
plugins/xenserver/xenapi/etc/xapi.d/plugins/agent (+20/-14)
plugins/xenserver/xenapi/etc/xapi.d/plugins/bandwidth (+51/-0)
plugins/xenserver/xenapi/etc/xapi.d/plugins/bittorrent (+299/-0)
plugins/xenserver/xenapi/etc/xapi.d/plugins/utils.py (+13/-18)
plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost (+4/-2)
run_tests.sh (+14/-3)
setup.cfg (+0/-1)
setup.py (+11/-6)
smoketests/__init__.py (+0/-6)
smoketests/base.py (+1/-1)
smoketests/run_tests.py (+3/-3)
smoketests/test_sysadmin.py (+15/-3)
tools/conf/extract_opts.py (+1/-2)
tools/esx/guest_tool.py (+1/-1)
tools/hacking.py (+18/-10)
tools/install_venv.py (+2/-5)
tools/pip-requires (+5/-3)
tools/test-options (+0/-1)
tools/test-requires (+4/-2)
tools/xenserver/destroy_cached_images.py (+6/-6)
tools/xenserver/vm_vdi_cleaner.py (+2/-2)
tox.ini (+6/-9)
To merge this branch: bzr merge lp:~zulcss/ubuntu/precise/nova/trunk
Reviewer Review Type Date Requested Status
James Page Approve
Review via email: mp+136252@code.launchpad.net

Description of the change

nova g1

To post a comment you must log in.
Revision history for this message
James Page (james-page) wrote :

Hi Chuck

LGTM; one thing I did notice is that on both precise and raring the test suite has errors, but the package still builds despite this - thoughts?

https://jenkins.qa.ubuntu.com/view/Openstack_Testing/view/Grizzly/job/raring_grizzly_nova_trunk/187/consoleFull
https://jenkins.qa.ubuntu.com/view/Openstack_Testing/view/Grizzly/job/precise_grizzly_nova_trunk/186/consoleFull

review: Approve
Revision history for this message
James Page (james-page) wrote :

Uploaded to staging

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file '.mailmap'
--- .mailmap 2012-08-27 15:37:18 +0000
+++ .mailmap 2012-11-26 19:55:29 +0000
@@ -21,7 +21,7 @@
21<devin.carlen@gmail.com> <devcamcar@illian.local>21<devin.carlen@gmail.com> <devcamcar@illian.local>
22<doug.hellmann@dreamhost.com> <doug.hellmann@gmail.com>22<doug.hellmann@dreamhost.com> <doug.hellmann@gmail.com>
23<dprince@redhat.com> <dan.prince@rackspace.com>23<dprince@redhat.com> <dan.prince@rackspace.com>
24<edouard1.thuleau@orange.com> <thuleau@gmail.com>24<edouard.thuleau@orange.com> <thuleau@gmail.com>
25<ewan.mellor@citrix.com> <emellor@silver>25<ewan.mellor@citrix.com> <emellor@silver>
26<ghe@debian.org> <ghe.rivero@gmail.com>26<ghe@debian.org> <ghe.rivero@gmail.com>
27<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>27<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>
@@ -85,6 +85,7 @@
85Dan Wendlandt <dan@nicira.com> danwent <danwent@dan-xs3-cs>85Dan Wendlandt <dan@nicira.com> danwent <danwent@dan-xs3-cs>
86Dan Wendlandt <dan@nicira.com> danwent@gmail.com <>86Dan Wendlandt <dan@nicira.com> danwent@gmail.com <>
87Dan Wendlandt <dan@nicira.com> danwent@gmail.com <dan@nicira.com>87Dan Wendlandt <dan@nicira.com> danwent@gmail.com <dan@nicira.com>
88Édouard Thuleau <edouard.thuleau@orange.com> Thuleau Édouard <thuleau@gmail.com>
88Jake Dahn <jake@ansolabs.com> jakedahn <jake@ansolabs.com>89Jake Dahn <jake@ansolabs.com> jakedahn <jake@ansolabs.com>
89Jason Koelker <jason@koelker.net> Jason Kölker <jason@koelker.net>90Jason Koelker <jason@koelker.net> Jason Kölker <jason@koelker.net>
90Jay Pipes <jaypipes@gmail.com> jaypipes@gmail.com <>91Jay Pipes <jaypipes@gmail.com> jaypipes@gmail.com <>
@@ -116,4 +117,3 @@
116Vishvananda Ishaya <vishvananda@gmail.com> <root@ubuntu>117Vishvananda Ishaya <vishvananda@gmail.com> <root@ubuntu>
117Vivek YS <vivek.ys@gmail.com> Vivek YS vivek.ys@gmail.com <>118Vivek YS <vivek.ys@gmail.com> Vivek YS vivek.ys@gmail.com <>
118Zhongyue Luo <zhongyue.nah@intel.com> <lzyeval@gmail.com>119Zhongyue Luo <zhongyue.nah@intel.com> <lzyeval@gmail.com>
119Édouard Thuleau <edouard1.thuleau@orange.com> Thuleau Édouard <thuleau@gmail.com>
120120
=== removed directory '.pc'
=== removed file '.pc/.quilt_patches'
--- .pc/.quilt_patches 2012-07-06 10:18:33 +0000
+++ .pc/.quilt_patches 1970-01-01 00:00:00 +0000
@@ -1,1 +0,0 @@
1debian/patches
20
=== removed file '.pc/.quilt_series'
--- .pc/.quilt_series 2012-07-06 10:18:33 +0000
+++ .pc/.quilt_series 1970-01-01 00:00:00 +0000
@@ -1,1 +0,0 @@
1series
20
=== removed file '.pc/.version'
--- .pc/.version 2010-10-08 23:16:58 +0000
+++ .pc/.version 1970-01-01 00:00:00 +0000
@@ -1,1 +0,0 @@
12
20
=== removed file '.pc/applied-patches'
--- .pc/applied-patches 2012-10-12 12:35:01 +0000
+++ .pc/applied-patches 1970-01-01 00:00:00 +0000
@@ -1,9 +0,0 @@
1path-to-the-xenhost.conf-fixup.patch
2fix-ubuntu-tests.patch
3fix-docs-build-without-network.patch
4avoid_setuptools_git_dependency.patch
5rbd-security.patch
6ubuntu/ubuntu-fix-32-64-bit-iss.patch
7ubuntu/ubuntu-fix-ec2-instance-id-mappings.patch
8ubuntu/fix-libvirt-firewall-slowdown.patch
9ubuntu/fix-ec2-volume-id-mappings.patch
100
=== removed directory '.pc/avoid_setuptools_git_dependency.patch'
=== removed directory '.pc/avoid_setuptools_git_dependency.patch/tools'
=== removed file '.pc/avoid_setuptools_git_dependency.patch/tools/pip-requires'
--- .pc/avoid_setuptools_git_dependency.patch/tools/pip-requires 2012-10-12 12:35:01 +0000
+++ .pc/avoid_setuptools_git_dependency.patch/tools/pip-requires 1970-01-01 00:00:00 +0000
@@ -1,23 +0,0 @@
1SQLAlchemy>=0.7.3
2Cheetah==2.4.4
3amqplib==0.6.1
4anyjson==0.2.4
5boto==2.1.1
6eventlet>=0.9.17
7kombu==1.0.4
8lxml>=2.3,<=2.3.5
9routes==1.12.3
10WebOb==1.0.8
11greenlet>=0.3.1
12PasteDeploy==1.5.0
13paste
14sqlalchemy-migrate>=0.7.2
15netaddr
16suds==0.4
17paramiko
18Babel>=0.9.6
19iso8601>=0.1.4
20httplib2
21setuptools_git>=0.4
22python-quantumclient>=2.0
23python-glanceclient>=0.5.0,<2
240
=== removed directory '.pc/fix-docs-build-without-network.patch'
=== removed directory '.pc/fix-docs-build-without-network.patch/doc'
=== removed directory '.pc/fix-docs-build-without-network.patch/doc/source'
=== removed file '.pc/fix-docs-build-without-network.patch/doc/source/conf.py'
--- .pc/fix-docs-build-without-network.patch/doc/source/conf.py 2012-09-20 07:45:50 +0000
+++ .pc/fix-docs-build-without-network.patch/doc/source/conf.py 1970-01-01 00:00:00 +0000
@@ -1,279 +0,0 @@
1# -*- coding: utf-8 -*-
2#
3# nova documentation build configuration file, created by
4# sphinx-quickstart on Sat May 1 15:17:47 2010.
5#
6# This file is execfile()d with the current directory set to
7# its containing dir.
8#
9# Note that not all possible configuration values are present in this
10# autogenerated file.
11#
12# All configuration values have a default; values that are commented out
13# serve to show the default.
14
15import sys
16import os
17
18# If extensions (or modules to document with autodoc) are in another directory,
19# add these directories to sys.path here. If the directory is relative to the
20# documentation root, use os.path.abspath to make it absolute, like shown here.
21sys.path.insert(0, os.path.abspath('../../'))
22sys.path.insert(0, os.path.abspath('../'))
23sys.path.insert(0, os.path.abspath('./'))
24
25# -- General configuration ----------------------------------------------------
26
27# Add any Sphinx extension module names here, as strings. They can be
28# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
29
30extensions = ['sphinx.ext.autodoc',
31 'sphinx.ext.intersphinx',
32 'ext.nova_todo',
33 'sphinx.ext.coverage',
34 'sphinx.ext.pngmath',
35 'sphinx.ext.ifconfig',
36 'sphinx.ext.graphviz']
37
38todo_include_todos = True
39
40# Add any paths that contain templates here, relative to this directory.
41# Changing the path so that the Hudson build output contains GA code
42# and the source docs do not contain the code so local, offline sphinx builds
43# are "clean."
44templates_path = []
45if os.getenv('HUDSON_PUBLISH_DOCS'):
46 templates_path = ['_ga', '_templates']
47else:
48 templates_path = ['_templates']
49
50# The suffix of source filenames.
51source_suffix = '.rst'
52
53# The encoding of source files.
54#source_encoding = 'utf-8'
55
56# The master toctree document.
57master_doc = 'index'
58
59# General information about the project.
60project = u'nova'
61copyright = u'2010-present, OpenStack, LLC'
62
63# The version info for the project you're documenting, acts as replacement for
64# |version| and |release|, also used in various other places throughout the
65# built documents.
66#
67from nova import version as nova_version
68#import nova.version
69# The full version, including alpha/beta/rc tags.
70release = nova_version.version_string()
71# The short X.Y version.
72version = nova_version.canonical_version_string()
73
74# The language for content autogenerated by Sphinx. Refer to documentation
75# for a list of supported languages.
76#language = None
77
78# There are two options for replacing |today|: either, you set today to some
79# non-false value, then it is used:
80#today = ''
81# Else, today_fmt is used as the format for a strftime call.
82#today_fmt = '%B %d, %Y'
83
84# List of documents that shouldn't be included in the build.
85unused_docs = [
86 'api_ext/rst_extension_template',
87 'vmwareapi_readme',
88 'installer',
89]
90
91# List of directories, relative to source directory, that shouldn't be searched
92# for source files.
93exclude_trees = []
94
95# The reST default role (used for this markup: `text`) to use
96# for all documents.
97#default_role = None
98
99# If true, '()' will be appended to :func: etc. cross-reference text.
100#add_function_parentheses = True
101
102# If true, the current module name will be prepended to all description
103# unit titles (such as .. function::).
104add_module_names = False
105
106# If true, sectionauthor and moduleauthor directives will be shown in the
107# output. They are ignored by default.
108show_authors = False
109
110# The name of the Pygments (syntax highlighting) style to use.
111pygments_style = 'sphinx'
112
113# A list of ignored prefixes for module index sorting.
114modindex_common_prefix = ['nova.']
115
116# -- Options for man page output ----------------------------------------------
117
118# Grouping the document tree for man pages.
119# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
120
121man_pages = [
122 ('man/nova-all', 'nova-all', u'Cloud controller fabric',
123 [u'OpenStack'], 1),
124 ('man/nova-api-ec2', 'nova-api-ec2', u'Cloud controller fabric',
125 [u'OpenStack'], 1),
126 ('man/nova-api-metadata', 'nova-api-metadata', u'Cloud controller fabric',
127 [u'OpenStack'], 1),
128 ('man/nova-api-os-compute', 'nova-api-os-compute',
129 u'Cloud controller fabric', [u'OpenStack'], 1),
130 ('man/nova-api-os-volume', 'nova-api-os-volume',
131 u'Cloud controller fabric', [u'OpenStack'], 1),
132 ('man/nova-api', 'nova-api', u'Cloud controller fabric',
133 [u'OpenStack'], 1),
134 ('man/nova-cert', 'nova-cert', u'Cloud controller fabric',
135 [u'OpenStack'], 1),
136 ('man/nova-compute', 'nova-compute', u'Cloud controller fabric',
137 [u'OpenStack'], 1),
138 ('man/nova-console', 'nova-console', u'Cloud controller fabric',
139 [u'OpenStack'], 1),
140 ('man/nova-consoleauth', 'nova-consoleauth', u'Cloud controller fabric',
141 [u'OpenStack'], 1),
142 ('man/nova-dhcpbridge', 'nova-dhcpbridge', u'Cloud controller fabric',
143 [u'OpenStack'], 1),
144 ('man/nova-manage', 'nova-manage', u'Cloud controller fabric',
145 [u'OpenStack'], 1),
146 ('man/nova-network', 'nova-network', u'Cloud controller fabric',
147 [u'OpenStack'], 1),
148 ('man/nova-novncproxy', 'nova-novncproxy', u'Cloud controller fabric',
149 [u'OpenStack'], 1),
150 ('man/nova-objectstore', 'nova-objectstore', u'Cloud controller fabric',
151 [u'OpenStack'], 1),
152 ('man/nova-rootwrap', 'nova-rootwrap', u'Cloud controller fabric',
153 [u'OpenStack'], 1),
154 ('man/nova-rpc-zmq-receiver', 'nova-rpc-zmq-receiver', u'Cloud controller fabric',
155 [u'OpenStack'], 1),
156 ('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric',
157 [u'OpenStack'], 1),
158 ('man/nova-volume-usage-audit', 'nova-volume-usage-audit', u'Cloud controller fabric',
159 [u'OpenStack'], 1),
160 ('man/nova-volume', 'nova-volume', u'Cloud controller fabric',
161 [u'OpenStack'], 1),
162 ('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric',
163 [u'OpenStack'], 1)
164]
165
166# -- Options for HTML output --------------------------------------------------
167
168# The theme to use for HTML and HTML Help pages. Major themes that come with
169# Sphinx are currently 'default' and 'sphinxdoc'.
170html_theme_path = ["."]
171html_theme = '_theme'
172
173# Theme options are theme-specific and customize the look and feel of a theme
174# further. For a list of options available for each theme, see the
175# documentation.
176#html_theme_options = {}
177
178# Add any paths that contain custom themes here, relative to this directory.
179#html_theme_path = []
180
181# The name for this set of Sphinx documents. If None, it defaults to
182# "<project> v<release> documentation".
183#html_title = None
184
185# A shorter title for the navigation bar. Default is the same as html_title.
186#html_short_title = None
187
188# The name of an image file (relative to this directory) to place at the top
189# of the sidebar.
190#html_logo = None
191
192# The name of an image file (within the static path) to use as favicon of the
193# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
194# pixels large.
195#html_favicon = None
196
197# Add any paths that contain custom static files (such as style sheets) here,
198# relative to this directory. They are copied after the builtin static files,
199# so a file named "default.css" will overwrite the builtin "default.css".
200html_static_path = ['_static']
201
202# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
203# using the given strftime format.
204#html_last_updated_fmt = '%b %d, %Y'
205git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
206html_last_updated_fmt = os.popen(git_cmd).read()
207
208# If true, SmartyPants will be used to convert quotes and dashes to
209# typographically correct entities.
210#html_use_smartypants = True
211
212# Custom sidebar templates, maps document names to template names.
213#html_sidebars = {}
214
215# Additional templates that should be rendered to pages, maps page names to
216# template names.
217#html_additional_pages = {}
218
219# If false, no module index is generated.
220#html_use_modindex = True
221
222# If false, no index is generated.
223#html_use_index = True
224
225# If true, the index is split into individual pages for each letter.
226#html_split_index = False
227
228# If true, links to the reST sources are added to the pages.
229#html_show_sourcelink = True
230
231# If true, an OpenSearch description file will be output, and all pages will
232# contain a <link> tag referring to it. The value of this option must be the
233# base URL from which the finished HTML is served.
234#html_use_opensearch = ''
235
236# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
237#html_file_suffix = ''
238
239# Output file base name for HTML help builder.
240htmlhelp_basename = 'novadoc'
241
242
243# -- Options for LaTeX output -------------------------------------------------
244
245# The paper size ('letter' or 'a4').
246#latex_paper_size = 'letter'
247
248# The font size ('10pt', '11pt' or '12pt').
249#latex_font_size = '10pt'
250
251# Grouping the document tree into LaTeX files. List of tuples
252# (source start file, target name, title, author, documentclass
253# [howto/manual]).
254latex_documents = [
255 ('index', 'Nova.tex', u'Nova Documentation',
256 u'Anso Labs, LLC', 'manual'),
257]
258
259# The name of an image file (relative to this directory) to place at the top of
260# the title page.
261#latex_logo = None
262
263# For "manual" documents, if this is true, then toplevel headings are parts,
264# not chapters.
265#latex_use_parts = False
266
267# Additional stuff for the LaTeX preamble.
268#latex_preamble = ''
269
270# Documents to append as an appendix to all manuals.
271#latex_appendices = []
272
273# If false, no module index is generated.
274#latex_use_modindex = True
275
276
277# Example configuration for intersphinx: refer to the Python standard library.
278intersphinx_mapping = {'python': ('http://docs.python.org/', None),
279 'swift': ('http://swift.openstack.org', None)}
2800
=== removed directory '.pc/fix-ubuntu-tests.patch'
=== removed directory '.pc/fix-ubuntu-tests.patch/nova'
=== removed directory '.pc/fix-ubuntu-tests.patch/nova/tests'
=== removed file '.pc/fix-ubuntu-tests.patch/nova/tests/test_api.py'
--- .pc/fix-ubuntu-tests.patch/nova/tests/test_api.py 2012-08-16 14:04:11 +0000
+++ .pc/fix-ubuntu-tests.patch/nova/tests/test_api.py 1970-01-01 00:00:00 +0000
@@ -1,616 +0,0 @@
1# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
3# Copyright 2010 United States Government as represented by the
4# Administrator of the National Aeronautics and Space Administration.
5# All Rights Reserved.
6#
7# Licensed under the Apache License, Version 2.0 (the "License"); you may
8# not use this file except in compliance with the License. You may obtain
9# a copy of the License at
10#
11# http://www.apache.org/licenses/LICENSE-2.0
12#
13# Unless required by applicable law or agreed to in writing, software
14# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16# License for the specific language governing permissions and limitations
17# under the License.
18
19"""Unit tests for the API endpoint"""
20
21import random
22import StringIO
23
24import boto
25from boto.ec2 import regioninfo
26from boto import exception as boto_exc
27# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
28try:
29 from boto.connection import HTTPResponse
30except ImportError:
31 from httplib import HTTPResponse
32import webob
33
34from nova.api import auth
35from nova.api import ec2
36from nova.api.ec2 import apirequest
37from nova.api.ec2 import ec2utils
38from nova import block_device
39from nova import context
40from nova import exception
41from nova import flags
42from nova.openstack.common import timeutils
43from nova import test
44
45
46FLAGS = flags.FLAGS
47
48
49class FakeHttplibSocket(object):
50 """a fake socket implementation for httplib.HTTPResponse, trivial"""
51 def __init__(self, response_string):
52 self.response_string = response_string
53 self._buffer = StringIO.StringIO(response_string)
54
55 def makefile(self, _mode, _other):
56 """Returns the socket's internal buffer"""
57 return self._buffer
58
59
60class FakeHttplibConnection(object):
61 """A fake httplib.HTTPConnection for boto to use
62
63 requests made via this connection actually get translated and routed into
64 our WSGI app, we then wait for the response and turn it back into
65 the HTTPResponse that boto expects.
66 """
67 def __init__(self, app, host, is_secure=False):
68 self.app = app
69 self.host = host
70
71 def request(self, method, path, data, headers):
72 req = webob.Request.blank(path)
73 req.method = method
74 req.body = data
75 req.headers = headers
76 req.headers['Accept'] = 'text/html'
77 req.host = self.host
78 # Call the WSGI app, get the HTTP response
79 resp = str(req.get_response(self.app))
80 # For some reason, the response doesn't have "HTTP/1.0 " prepended; I
81 # guess that's a function the web server usually provides.
82 resp = "HTTP/1.0 %s" % resp
83 self.sock = FakeHttplibSocket(resp)
84 self.http_response = HTTPResponse(self.sock)
85 # NOTE(vish): boto is accessing private variables for some reason
86 self._HTTPConnection__response = self.http_response
87 self.http_response.begin()
88
89 def getresponse(self):
90 return self.http_response
91
92 def getresponsebody(self):
93 return self.sock.response_string
94
95 def close(self):
96 """Required for compatibility with boto/tornado"""
97 pass
98
99
100class XmlConversionTestCase(test.TestCase):
101 """Unit test api xml conversion"""
102 def test_number_conversion(self):
103 conv = ec2utils._try_convert
104 self.assertEqual(conv('None'), None)
105 self.assertEqual(conv('True'), True)
106 self.assertEqual(conv('TRUE'), True)
107 self.assertEqual(conv('true'), True)
108 self.assertEqual(conv('False'), False)
109 self.assertEqual(conv('FALSE'), False)
110 self.assertEqual(conv('false'), False)
111 self.assertEqual(conv('0'), 0)
112 self.assertEqual(conv('42'), 42)
113 self.assertEqual(conv('3.14'), 3.14)
114 self.assertEqual(conv('-57.12'), -57.12)
115 self.assertEqual(conv('0x57'), 0x57)
116 self.assertEqual(conv('-0x57'), -0x57)
117 self.assertEqual(conv('-'), '-')
118 self.assertEqual(conv('-0'), 0)
119 self.assertEqual(conv('0.0'), 0.0)
120 self.assertEqual(conv('1e-8'), 0.0)
121 self.assertEqual(conv('-1e-8'), 0.0)
122 self.assertEqual(conv('0xDD8G'), '0xDD8G')
123 self.assertEqual(conv('0XDD8G'), '0XDD8G')
124 self.assertEqual(conv('-stringy'), '-stringy')
125 self.assertEqual(conv('stringy'), 'stringy')
126 self.assertEqual(conv('add'), 'add')
127 self.assertEqual(conv('remove'), 'remove')
128 self.assertEqual(conv(''), '')
129
130
131class Ec2utilsTestCase(test.TestCase):
132 def test_ec2_id_to_id(self):
133 self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
134 self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
135 self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
136 self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
137
138 def test_bad_ec2_id(self):
139 self.assertRaises(exception.InvalidEc2Id,
140 ec2utils.ec2_id_to_id,
141 'badone')
142
143 def test_id_to_ec2_id(self):
144 self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
145 self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
146 self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
147 self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
148
149 def test_dict_from_dotted_str(self):
150 in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
151 ('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
152 ('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
153 ('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
154 ('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
155 ('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
156 expected_dict = {
157 'block_device_mapping': {
158 '1': {'device_name': '/dev/sda1',
159 'ebs': {'snapshot_id': 'snap-0000001c',
160 'volume_size': 80,
161 'delete_on_termination': False}},
162 '2': {'device_name': '/dev/sdc',
163 'virtual_name': 'ephemeral0'}}}
164 out_dict = ec2utils.dict_from_dotted_str(in_str)
165
166 self.assertDictMatch(out_dict, expected_dict)
167
168 def test_properties_root_defice_name(self):
169 mappings = [{"device": "/dev/sda1", "virtual": "root"}]
170 properties0 = {'mappings': mappings}
171 properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
172
173 root_device_name = block_device.properties_root_device_name(
174 properties0)
175 self.assertEqual(root_device_name, '/dev/sda1')
176
177 root_device_name = block_device.properties_root_device_name(
178 properties1)
179 self.assertEqual(root_device_name, '/dev/sdb')
180
181 def test_mapping_prepend_dev(self):
182 mappings = [
183 {'virtual': 'ami',
184 'device': 'sda1'},
185 {'virtual': 'root',
186 'device': '/dev/sda1'},
187
188 {'virtual': 'swap',
189 'device': 'sdb1'},
190 {'virtual': 'swap',
191 'device': '/dev/sdb2'},
192
193 {'virtual': 'ephemeral0',
194 'device': 'sdc1'},
195 {'virtual': 'ephemeral1',
196 'device': '/dev/sdc1'}]
197 expected_result = [
198 {'virtual': 'ami',
199 'device': 'sda1'},
200 {'virtual': 'root',
201 'device': '/dev/sda1'},
202
203 {'virtual': 'swap',
204 'device': '/dev/sdb1'},
205 {'virtual': 'swap',
206 'device': '/dev/sdb2'},
207
208 {'virtual': 'ephemeral0',
209 'device': '/dev/sdc1'},
210 {'virtual': 'ephemeral1',
211 'device': '/dev/sdc1'}]
212 self.assertDictListMatch(block_device.mappings_prepend_dev(mappings),
213 expected_result)
214
215
216class ApiEc2TestCase(test.TestCase):
217 """Unit test for the cloud controller on an EC2 API"""
218 def setUp(self):
219 super(ApiEc2TestCase, self).setUp()
220 self.host = '127.0.0.1'
221 # NOTE(vish): skipping the Authorizer
222 roles = ['sysadmin', 'netadmin']
223 ctxt = context.RequestContext('fake', 'fake', roles=roles)
224 self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
225 ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
226 ), 'nova.api.ec2.cloud.CloudController'))))
227
228 def expect_http(self, host=None, is_secure=False, api_version=None):
229 """Returns a new EC2 connection"""
230 self.ec2 = boto.connect_ec2(
231 aws_access_key_id='fake',
232 aws_secret_access_key='fake',
233 is_secure=False,
234 region=regioninfo.RegionInfo(None, 'test', self.host),
235 port=8773,
236 path='/services/Cloud')
237 if api_version:
238 self.ec2.APIVersion = api_version
239
240 self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
241 self.http = FakeHttplibConnection(
242 self.app, '%s:8773' % (self.host), False)
243 # pylint: disable=E1103
244 if boto.Version >= '2':
245 self.ec2.new_http_connection(host or '%s:8773' % (self.host),
246 is_secure).AndReturn(self.http)
247 else:
248 self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
249 return self.http
250
251 def test_return_valid_isoformat(self):
252 """
253 Ensure that the ec2 api returns datetime in xs:dateTime
254 (which apparently isn't datetime.isoformat())
255 NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
256 """
257 conv = apirequest._database_to_isoformat
258 # sqlite database representation with microseconds
259 time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276",
260 "%Y-%m-%d %H:%M:%S.%f")
261 self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z')
262 # mysqlite database representation
263 time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18",
264 "%Y-%m-%d %H:%M:%S")
265 self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z')
266
267 def test_xmlns_version_matches_request_version(self):
268 self.expect_http(api_version='2010-10-30')
269 self.mox.ReplayAll()
270
271 # Any request should be fine
272 self.ec2.get_all_instances()
273 self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
274 'The version in the xmlns of the response does '
275 'not match the API version given in the request.')
276
277 def test_describe_instances(self):
278 """Test that, after creating a user and a project, the describe
279 instances call to the API works properly"""
280 self.expect_http()
281 self.mox.ReplayAll()
282 self.assertEqual(self.ec2.get_all_instances(), [])
283
284 def test_terminate_invalid_instance(self):
285 """Attempt to terminate an invalid instance"""
286 self.expect_http()
287 self.mox.ReplayAll()
288 self.assertRaises(boto_exc.EC2ResponseError,
289 self.ec2.terminate_instances, "i-00000005")
290
291 def test_get_all_key_pairs(self):
292 """Test that, after creating a user and project and generating
293 a key pair, that the API call to list key pairs works properly"""
294 keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
295 for x in range(random.randint(4, 8)))
296 self.expect_http()
297 self.mox.ReplayAll()
298 self.ec2.create_key_pair(keyname)
299 rv = self.ec2.get_all_key_pairs()
300 results = [k for k in rv if k.name == keyname]
301 self.assertEquals(len(results), 1)
302
303 def test_create_duplicate_key_pair(self):
304 """Test that, after successfully generating a keypair,
305 requesting a second keypair with the same name fails sanely"""
306 self.expect_http()
307 self.mox.ReplayAll()
308 self.ec2.create_key_pair('test')
309
310 try:
311 self.ec2.create_key_pair('test')
312 except boto_exc.EC2ResponseError, e:
313 if e.code == 'KeyPairExists':
314 pass
315 else:
316 self.fail("Unexpected EC2ResponseError: %s "
317 "(expected KeyPairExists)" % e.code)
318 else:
319 self.fail('Exception not raised.')
320
321 def test_get_all_security_groups(self):
322 """Test that we can retrieve security groups"""
323 self.expect_http()
324 self.mox.ReplayAll()
325
326 rv = self.ec2.get_all_security_groups()
327
328 self.assertEquals(len(rv), 1)
329 self.assertEquals(rv[0].name, 'default')
330
331 def test_create_delete_security_group(self):
332 """Test that we can create a security group"""
333 self.expect_http()
334 self.mox.ReplayAll()
335
336 security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
337 for x in range(random.randint(4, 8)))
338
339 self.ec2.create_security_group(security_group_name, 'test group')
340
341 self.expect_http()
342 self.mox.ReplayAll()
343
344 rv = self.ec2.get_all_security_groups()
345 self.assertEquals(len(rv), 2)
346 self.assertTrue(security_group_name in [group.name for group in rv])
347
348 self.expect_http()
349 self.mox.ReplayAll()
350
351 self.ec2.delete_security_group(security_group_name)
352
353 def test_group_name_valid_chars_security_group(self):
354 """ Test that we sanely handle invalid security group names.
355 EC2 API Spec states we should only accept alphanumeric characters,
356 spaces, dashes, and underscores. Amazon implementation
357 accepts more characters - so, [:print:] is ok. """
358
359 bad_strict_ec2 = "aa \t\x01\x02\x7f"
360 bad_amazon_ec2 = "aa #^% -=99"
361 test_raise = [
362 (True, bad_amazon_ec2, "test desc"),
363 (True, "test name", bad_amazon_ec2),
364 (False, bad_strict_ec2, "test desc"),
365 ]
366 for test in test_raise:
367 self.expect_http()
368 self.mox.ReplayAll()
369 FLAGS.ec2_strict_validation = test[0]
370 self.assertRaises(boto_exc.EC2ResponseError,
371 self.ec2.create_security_group,
372 test[1],
373 test[2])
374 test_accept = [
375 (False, bad_amazon_ec2, "test desc"),
376 (False, "test name", bad_amazon_ec2),
377 ]
378 for test in test_accept:
379 self.expect_http()
380 self.mox.ReplayAll()
381 FLAGS.ec2_strict_validation = test[0]
382 self.ec2.create_security_group(test[1], test[2])
383 self.expect_http()
384 self.mox.ReplayAll()
385 self.ec2.delete_security_group(test[1])
386
387 def test_group_name_valid_length_security_group(self):
388 """Test that we sanely handle invalid security group names.
389 API Spec states that the length should not exceed 255 chars """
390 self.expect_http()
391 self.mox.ReplayAll()
392
393 # Test block group_name > 255 chars
394 security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
395 for x in range(random.randint(256, 266)))
396
397 self.assertRaises(boto_exc.EC2ResponseError,
398 self.ec2.create_security_group,
399 security_group_name,
400 'test group')
401
402 def test_authorize_revoke_security_group_cidr(self):
403 """
404 Test that we can add and remove CIDR based rules
405 to a security group
406 """
407 self.expect_http()
408 self.mox.ReplayAll()
409
410 security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
411 for x in range(random.randint(4, 8)))
412
413 group = self.ec2.create_security_group(security_group_name,
414 'test group')
415
416 self.expect_http()
417 self.mox.ReplayAll()
418 group.connection = self.ec2
419
420 group.authorize('tcp', 80, 81, '0.0.0.0/0')
421 group.authorize('icmp', -1, -1, '0.0.0.0/0')
422 group.authorize('udp', 80, 81, '0.0.0.0/0')
423 group.authorize('tcp', 1, 65535, '0.0.0.0/0')
424 group.authorize('udp', 1, 65535, '0.0.0.0/0')
425 group.authorize('icmp', 1, 0, '0.0.0.0/0')
426 group.authorize('icmp', 0, 1, '0.0.0.0/0')
427 group.authorize('icmp', 0, 0, '0.0.0.0/0')
428
429 def _assert(message, *args):
430 try:
431 group.authorize(*args)
432 except boto_exc.EC2ResponseError as e:
433 self.assertEqual(e.status, 400, 'Expected status to be 400')
434 self.assertIn(message, e.error_message, e.error_message)
435 else:
436 raise self.failureException, 'EC2ResponseError not raised'
437
438 # Invalid CIDR address
439 _assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
440 # Missing ports
441 _assert('Not enough parameters', 'tcp', '0.0.0.0/0')
442 # from port cannot be greater than to port
443 _assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
444 # For tcp, negative values are not allowed
445 _assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
446 # For tcp, valid port range 1-65535
447 _assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
448 # Invalid Cidr for ICMP type
449 _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
450 # Invalid protocol
451 _assert('An unknown error has occurred', 'xyz', 1, 14, '0.0.0.0/0')
452 # Invalid port
453 _assert('An unknown error has occurred', 'tcp', " ", "81", '0.0.0.0/0')
454 # Invalid icmp port
455 _assert('An unknown error has occurred', 'icmp', " ", "81",
456 '0.0.0.0/0')
457 # Invalid CIDR Address
458 _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
459 # Invalid CIDR Address
460 _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
461 # Invalid Cidr ports
462 _assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
463
464 self.expect_http()
465 self.mox.ReplayAll()
466
467 rv = self.ec2.get_all_security_groups()
468
469 group = [grp for grp in rv if grp.name == security_group_name][0]
470
471 self.assertEquals(len(group.rules), 8)
472 self.assertEquals(int(group.rules[0].from_port), 80)
473 self.assertEquals(int(group.rules[0].to_port), 81)
474 self.assertEquals(len(group.rules[0].grants), 1)
475 self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
476
477 self.expect_http()
478 self.mox.ReplayAll()
479 group.connection = self.ec2
480
481 group.revoke('tcp', 80, 81, '0.0.0.0/0')
482 group.revoke('icmp', -1, -1, '0.0.0.0/0')
483 group.revoke('udp', 80, 81, '0.0.0.0/0')
484 group.revoke('tcp', 1, 65535, '0.0.0.0/0')
485 group.revoke('udp', 1, 65535, '0.0.0.0/0')
486 group.revoke('icmp', 1, 0, '0.0.0.0/0')
487 group.revoke('icmp', 0, 1, '0.0.0.0/0')
488 group.revoke('icmp', 0, 0, '0.0.0.0/0')
489
490 self.expect_http()
491 self.mox.ReplayAll()
492
493 self.ec2.delete_security_group(security_group_name)
494
495 self.expect_http()
496 self.mox.ReplayAll()
497 group.connection = self.ec2
498
499 rv = self.ec2.get_all_security_groups()
500
501 self.assertEqual(len(rv), 1)
502 self.assertEqual(rv[0].name, 'default')
503
504 def test_authorize_revoke_security_group_cidr_v6(self):
505 """
506 Test that we can add and remove CIDR based rules
507 to a security group for IPv6
508 """
509 self.expect_http()
510 self.mox.ReplayAll()
511
512 security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
513 for x in range(random.randint(4, 8)))
514
515 group = self.ec2.create_security_group(security_group_name,
516 'test group')
517
518 self.expect_http()
519 self.mox.ReplayAll()
520 group.connection = self.ec2
521
522 group.authorize('tcp', 80, 81, '::/0')
523
524 self.expect_http()
525 self.mox.ReplayAll()
526
527 rv = self.ec2.get_all_security_groups()
528
529 group = [grp for grp in rv if grp.name == security_group_name][0]
530 self.assertEquals(len(group.rules), 1)
531 self.assertEquals(int(group.rules[0].from_port), 80)
532 self.assertEquals(int(group.rules[0].to_port), 81)
533 self.assertEquals(len(group.rules[0].grants), 1)
534 self.assertEquals(str(group.rules[0].grants[0]), '::/0')
535
536 self.expect_http()
537 self.mox.ReplayAll()
538 group.connection = self.ec2
539
540 group.revoke('tcp', 80, 81, '::/0')
541
542 self.expect_http()
543 self.mox.ReplayAll()
544
545 self.ec2.delete_security_group(security_group_name)
546
547 self.expect_http()
548 self.mox.ReplayAll()
549 group.connection = self.ec2
550
551 rv = self.ec2.get_all_security_groups()
552
553 self.assertEqual(len(rv), 1)
554 self.assertEqual(rv[0].name, 'default')
555
556 def test_authorize_revoke_security_group_foreign_group(self):
557 """
558 Test that we can grant and revoke another security group access
559 to a security group
560 """
561 self.expect_http()
562 self.mox.ReplayAll()
563
564 rand_string = 'sdiuisudfsdcnpaqwertasd'
565 security_group_name = "".join(random.choice(rand_string)
566 for x in range(random.randint(4, 8)))
567 other_security_group_name = "".join(random.choice(rand_string)
568 for x in range(random.randint(4, 8)))
569
570 group = self.ec2.create_security_group(security_group_name,
571 'test group')
572
573 self.expect_http()
574 self.mox.ReplayAll()
575
576 other_group = self.ec2.create_security_group(other_security_group_name,
577 'some other group')
578
579 self.expect_http()
580 self.mox.ReplayAll()
581 group.connection = self.ec2
582
583 group.authorize(src_group=other_group)
584
585 self.expect_http()
586 self.mox.ReplayAll()
587
588 rv = self.ec2.get_all_security_groups()
589
590 # I don't bother checkng that we actually find it here,
591 # because the create/delete unit test further up should
592 # be good enough for that.
593 for group in rv:
594 if group.name == security_group_name:
595 self.assertEquals(len(group.rules), 3)
596 self.assertEquals(len(group.rules[0].grants), 1)
597 self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' %
598 (other_security_group_name, 'fake'))
599
600 self.expect_http()
601 self.mox.ReplayAll()
602
603 rv = self.ec2.get_all_security_groups()
604
605 for group in rv:
606 if group.name == security_group_name:
607 self.expect_http()
608 self.mox.ReplayAll()
609 group.connection = self.ec2
610 group.revoke(src_group=other_group)
611
612 self.expect_http()
613 self.mox.ReplayAll()
614
615 self.ec2.delete_security_group(security_group_name)
616 self.ec2.delete_security_group(other_security_group_name)
6170
=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch'
=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch/plugins'
=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver'
=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi'
=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc'
=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d'
=== removed directory '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins'
=== removed file '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost'
--- .pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost 2012-08-16 14:04:11 +0000
+++ .pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost 1970-01-01 00:00:00 +0000
@@ -1,445 +0,0 @@
1#!/usr/bin/env python
2
3# Copyright 2011 OpenStack LLC.
4# Copyright 2011 United States Government as represented by the
5# Administrator of the National Aeronautics and Space Administration.
6# All Rights Reserved.
7#
8# Licensed under the Apache License, Version 2.0 (the "License"); you may
9# not use this file except in compliance with the License. You may obtain
10# a copy of the License at
11#
12# http://www.apache.org/licenses/LICENSE-2.0
13#
14# Unless required by applicable law or agreed to in writing, software
15# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17# License for the specific language governing permissions and limitations
18# under the License.
19
20#
21# XenAPI plugin for host operations
22#
23
24try:
25 import json
26except ImportError:
27 import simplejson as json
28import logging
29import os
30import random
31import re
32import subprocess
33import tempfile
34import time
35
36import XenAPI
37import XenAPIPlugin
38import pluginlib_nova as pluginlib
39
40
41pluginlib.configure_logging("xenhost")
42
43host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)")
44config_file_path = "/usr/etc/xenhost.conf"
45DEFAULT_TRIES = 23
46DEFAULT_SLEEP = 10
47
48
49def jsonify(fnc):
50 def wrapper(*args, **kwargs):
51 return json.dumps(fnc(*args, **kwargs))
52 return wrapper
53
54
55class TimeoutError(StandardError):
56 pass
57
58
59def _run_command(cmd):
60 """Abstracts out the basics of issuing system commands. If the command
61 returns anything in stderr, a PluginError is raised with that information.
62 Otherwise, the output from stdout is returned.
63 """
64 pipe = subprocess.PIPE
65 proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe,
66 stderr=pipe, close_fds=True)
67 proc.wait()
68 err = proc.stderr.read()
69 if err:
70 raise pluginlib.PluginError(err)
71 return proc.stdout.read()
72
73
74# NOTE (salvatore-orlando):
75# Instead of updating run_command a new method has been implemented,
76# in order to avoid risking breaking existing functions calling _run_command
77def _run_command_with_input(cmd, process_input):
78 """Abstracts out the basics of issuing system commands. If the command
79 returns anything in stderr, a PluginError is raised with that information.
80 Otherwise, the output from stdout is returned.
81
82 process_input specificies a variable to use as the process' standard input.
83 """
84 pipe = subprocess.PIPE
85 # cmd can be either a single string with command and arguments,
86 # or a sequence of string
87 if not hasattr(cmd, '__iter__'):
88 cmd = [cmd] # make it iterable
89
90 #Note(salvatore-orlando): the shell argument has been set to False
91 proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe,
92 stderr=pipe, close_fds=True)
93 if process_input is not None:
94 (output, err) = proc.communicate(process_input)
95 else:
96 (output, err) = proc.communicate()
97 if err:
98 raise pluginlib.PluginError(err)
99 # This is tantamount to proc.stdout.read()
100 return output
101
102
103def _resume_compute(session, compute_ref, compute_uuid):
104 """Resume compute node on slave host after pool join. This has to
105 happen regardless of the success or failure of the join operation."""
106 try:
107 # session is valid if the join operation has failed
108 session.xenapi.VM.start(compute_ref, False, True)
109 except XenAPI.Failure, e:
110 # if session is invalid, e.g. xapi has restarted, then the pool
111 # join has been successful, wait for xapi to become alive again
112 for c in xrange(0, DEFAULT_TRIES):
113 try:
114 _run_command("xe vm-start uuid=%s" % compute_uuid)
115 return
116 except pluginlib.PluginError, e:
117 logging.exception('Waited %d seconds for the slave to '
118 'become available.' % (c * DEFAULT_SLEEP))
119 time.sleep(DEFAULT_SLEEP)
120 raise pluginlib.PluginError('Unrecoverable error: the host has '
121 'not come back for more than %d seconds'
122 % (DEFAULT_SLEEP * (DEFAULT_TRIES + 1)))
123
124
125@jsonify
126def set_host_enabled(self, arg_dict):
127 """Sets this host's ability to accept new instances.
128 It will otherwise continue to operate normally.
129 """
130 enabled = arg_dict.get("enabled")
131 if enabled is None:
132 raise pluginlib.PluginError(
133 _("Missing 'enabled' argument to set_host_enabled"))
134
135 host_uuid = arg_dict['host_uuid']
136 if enabled == "true":
137 result = _run_command("xe host-enable uuid=%s" % host_uuid)
138 elif enabled == "false":
139 result = _run_command("xe host-disable uuid=%s" % host_uuid)
140 else:
141 raise pluginlib.PluginError(_("Illegal enabled status: %s") % enabled)
142 # Should be empty string
143 if result:
144 raise pluginlib.PluginError(result)
145 # Return the current enabled status
146 cmd = "xe host-param-list uuid=%s | grep enabled" % host_uuid
147 resp = _run_command(cmd)
148 # Response should be in the format: "enabled ( RO): true"
149 host_enabled = resp.strip().split()[-1]
150 if host_enabled == "true":
151 status = "enabled"
152 else:
153 status = "disabled"
154 return {"status": status}
155
156
157def _write_config_dict(dct):
158 conf_file = file(config_file_path, "w")
159 json.dump(dct, conf_file)
160 conf_file.close()
161
162
163def _get_config_dict():
164 """Returns a dict containing the key/values in the config file.
165 If the file doesn't exist, it is created, and an empty dict
166 is returned.
167 """
168 try:
169 conf_file = file(config_file_path)
170 config_dct = json.load(conf_file)
171 conf_file.close()
172 except IOError:
173 # File doesn't exist
174 config_dct = {}
175 # Create the file
176 _write_config_dict(config_dct)
177 return config_dct
178
179
180@jsonify
181def get_config(self, arg_dict):
182 """Return the value stored for the specified key, or None if no match."""
183 conf = _get_config_dict()
184 params = arg_dict["params"]
185 try:
186 dct = json.loads(params)
187 except Exception, e:
188 dct = params
189 key = dct["key"]
190 ret = conf.get(key)
191 if ret is None:
192 # Can't jsonify None
193 return "None"
194 return ret
195
196
197@jsonify
198def set_config(self, arg_dict):
199 """Write the specified key/value pair, overwriting any existing value."""
200 conf = _get_config_dict()
201 params = arg_dict["params"]
202 try:
203 dct = json.loads(params)
204 except Exception, e:
205 dct = params
206 key = dct["key"]
207 val = dct["value"]
208 if val is None:
209 # Delete the key, if present
210 conf.pop(key, None)
211 else:
212 conf.update({key: val})
213 _write_config_dict(conf)
214
215
216def iptables_config(session, args):
217 # command should be either save or restore
218 logging.debug("iptables_config:enter")
219 logging.debug("iptables_config: args=%s", args)
220 cmd_args = pluginlib.exists(args, 'cmd_args')
221 logging.debug("iptables_config: cmd_args=%s", cmd_args)
222 process_input = pluginlib.optional(args, 'process_input')
223 logging.debug("iptables_config: process_input=%s", process_input)
224 cmd = json.loads(cmd_args)
225 cmd = map(str, cmd)
226
227 # either execute iptable-save or iptables-restore
228 # command must be only one of these two
229 # process_input must be used only with iptables-restore
230 if len(cmd) > 0 and cmd[0] in ('iptables-save',
231 'iptables-restore',
232 'ip6tables-save',
233 'ip6tables-restore'):
234 result = _run_command_with_input(cmd, process_input)
235 ret_str = json.dumps(dict(out=result,
236 err=''))
237 logging.debug("iptables_config:exit")
238 return ret_str
239 else:
240 # else don't do anything and return an error
241 raise pluginlib.PluginError(_("Invalid iptables command"))
242
243
244def _power_action(action, arg_dict):
245 # Host must be disabled first
246 host_uuid = arg_dict['host_uuid']
247 result = _run_command("xe host-disable uuid=%s" % host_uuid)
248 if result:
249 raise pluginlib.PluginError(result)
250 # All running VMs must be shutdown
251 result = _run_command("xe vm-shutdown --multiple "
252 "resident-on=%s" % host_uuid)
253 if result:
254 raise pluginlib.PluginError(result)
255 cmds = {"reboot": "xe host-reboot uuid=%s",
256 "startup": "xe host-power-on uuid=%s",
257 "shutdown": "xe host-shutdown uuid=%s"}
258 result = _run_command(cmds[action] % host_uuid)
259 # Should be empty string
260 if result:
261 raise pluginlib.PluginError(result)
262 return {"power_action": action}
263
264
265@jsonify
266def host_reboot(self, arg_dict):
267 """Reboots the host."""
268 return _power_action("reboot", arg_dict)
269
270
271@jsonify
272def host_shutdown(self, arg_dict):
273 """Reboots the host."""
274 return _power_action("shutdown", arg_dict)
275
276
277@jsonify
278def host_start(self, arg_dict):
279 """Starts the host. Currently not feasible, since the host
280 runs on the same machine as Xen.
281 """
282 return _power_action("startup", arg_dict)
283
284
285@jsonify
286def host_join(self, arg_dict):
287 """Join a remote host into a pool whose master is the host
288 where the plugin is called from. The following constraints apply:
289
290 - The host must have no VMs running, except nova-compute, which will be
291 shut down (and restarted upon pool-join) automatically,
292 - The host must have no shared storage currently set up,
293 - The host must have the same license of the master,
294 - The host must have the same supplemental packs as the master."""
295 session = XenAPI.Session(arg_dict.get("url"))
296 session.login_with_password(arg_dict.get("user"),
297 arg_dict.get("password"))
298 compute_ref = session.xenapi.VM.get_by_uuid(arg_dict.get('compute_uuid'))
299 session.xenapi.VM.clean_shutdown(compute_ref)
300 try:
301 if arg_dict.get("force"):
302 session.xenapi.pool.join(arg_dict.get("master_addr"),
303 arg_dict.get("master_user"),
304 arg_dict.get("master_pass"))
305 else:
306 session.xenapi.pool.join_force(arg_dict.get("master_addr"),
307 arg_dict.get("master_user"),
308 arg_dict.get("master_pass"))
309 finally:
310 _resume_compute(session, compute_ref, arg_dict.get("compute_uuid"))
311
312
313@jsonify
314def host_data(self, arg_dict):
315 """Runs the commands on the xenstore host to return the current status
316 information.
317 """
318 host_uuid = arg_dict['host_uuid']
319 resp = _run_command("xe host-param-list uuid=%s" % host_uuid)
320 parsed_data = parse_response(resp)
321 # We have the raw dict of values. Extract those that we need,
322 # and convert the data types as needed.
323 ret_dict = cleanup(parsed_data)
324 # Add any config settings
325 config = _get_config_dict()
326 ret_dict.update(config)
327 return ret_dict
328
329
330def parse_response(resp):
331 data = {}
332 for ln in resp.splitlines():
333 if not ln:
334 continue
335 mtch = host_data_pattern.match(ln.strip())
336 try:
337 k, v = mtch.groups()
338 data[k] = v
339 except AttributeError:
340 # Not a valid line; skip it
341 continue
342 return data
343
344
345@jsonify
346def host_uptime(self, arg_dict):
347 """Returns the result of the uptime command on the xenhost."""
348 return {"uptime": _run_command('uptime')}
349
350
351def cleanup(dct):
352 """Take the raw KV pairs returned and translate them into the
353 appropriate types, discarding any we don't need.
354 """
355 def safe_int(val):
356 """Integer values will either be string versions of numbers,
357 or empty strings. Convert the latter to nulls.
358 """
359 try:
360 return int(val)
361 except ValueError:
362 return None
363
364 def strip_kv(ln):
365 return [val.strip() for val in ln.split(":", 1)]
366
367 out = {}
368
369# sbs = dct.get("supported-bootloaders", "")
370# out["host_supported-bootloaders"] = sbs.split("; ")
371# out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "")
372# out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "")
373# out["host_local-cache-sr"] = dct.get("local-cache-sr", "")
374 out["enabled"] = dct.get("enabled", "true") == "true"
375 out["host_memory"] = omm = {}
376 omm["total"] = safe_int(dct.get("memory-total", ""))
377 omm["overhead"] = safe_int(dct.get("memory-overhead", ""))
378 omm["free"] = safe_int(dct.get("memory-free", ""))
379 omm["free-computed"] = safe_int(
380 dct.get("memory-free-computed", ""))
381
382# out["host_API-version"] = avv = {}
383# avv["vendor"] = dct.get("API-version-vendor", "")
384# avv["major"] = safe_int(dct.get("API-version-major", ""))
385# avv["minor"] = safe_int(dct.get("API-version-minor", ""))
386
387 out["enabled"] = dct.get("enabled", True)
388 out["host_uuid"] = dct.get("uuid", None)
389 out["host_name-label"] = dct.get("name-label", "")
390 out["host_name-description"] = dct.get("name-description", "")
391# out["host_host-metrics-live"] = dct.get(
392# "host-metrics-live", "false") == "true"
393 out["host_hostname"] = dct.get("hostname", "")
394 out["host_ip_address"] = dct.get("address", "")
395 oc = dct.get("other-config", "")
396 out["host_other-config"] = ocd = {}
397 if oc:
398 for oc_fld in oc.split("; "):
399 ock, ocv = strip_kv(oc_fld)
400 ocd[ock] = ocv
401# out["host_capabilities"] = dct.get("capabilities", "").split("; ")
402# out["host_allowed-operations"] = dct.get(
403# "allowed-operations", "").split("; ")
404# lsrv = dct.get("license-server", "")
405# out["host_license-server"] = ols = {}
406# if lsrv:
407# for lspart in lsrv.split("; "):
408# lsk, lsv = lspart.split(": ")
409# if lsk == "port":
410# ols[lsk] = safe_int(lsv)
411# else:
412# ols[lsk] = lsv
413# sv = dct.get("software-version", "")
414# out["host_software-version"] = osv = {}
415# if sv:
416# for svln in sv.split("; "):
417# svk, svv = strip_kv(svln)
418# osv[svk] = svv
419 cpuinf = dct.get("cpu_info", "")
420 out["host_cpu_info"] = ocp = {}
421 if cpuinf:
422 for cpln in cpuinf.split("; "):
423 cpk, cpv = strip_kv(cpln)
424 if cpk in ("cpu_count", "family", "model", "stepping"):
425 ocp[cpk] = safe_int(cpv)
426 else:
427 ocp[cpk] = cpv
428# out["host_edition"] = dct.get("edition", "")
429# out["host_external-auth-service-name"] = dct.get(
430# "external-auth-service-name", "")
431 return out
432
433
434if __name__ == "__main__":
435 XenAPIPlugin.dispatch(
436 {"host_data": host_data,
437 "set_host_enabled": set_host_enabled,
438 "host_shutdown": host_shutdown,
439 "host_reboot": host_reboot,
440 "host_start": host_start,
441 "host_join": host_join,
442 "get_config": get_config,
443 "set_config": set_config,
444 "iptables_config": iptables_config,
445 "host_uptime": host_uptime})
4460
=== removed directory '.pc/rbd-security.patch'
=== removed directory '.pc/rbd-security.patch/nova'
=== removed directory '.pc/rbd-security.patch/nova/virt'
=== removed directory '.pc/rbd-security.patch/nova/virt/libvirt'
=== removed file '.pc/rbd-security.patch/nova/virt/libvirt/volume.py'
--- .pc/rbd-security.patch/nova/virt/libvirt/volume.py 2012-10-12 12:35:01 +0000
+++ .pc/rbd-security.patch/nova/virt/libvirt/volume.py 1970-01-01 00:00:00 +0000
@@ -1,206 +0,0 @@
1# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
3# Copyright 2011 OpenStack LLC.
4# All Rights Reserved.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17
18"""Volume drivers for libvirt."""
19
20import os
21import time
22
23from nova import exception
24from nova import flags
25from nova.openstack.common import log as logging
26from nova import utils
27from nova.virt.libvirt import config
28from nova.virt.libvirt import utils as virtutils
29
30LOG = logging.getLogger(__name__)
31FLAGS = flags.FLAGS
32flags.DECLARE('num_iscsi_scan_tries', 'nova.volume.driver')
33
34
35class LibvirtVolumeDriver(object):
36 """Base class for volume drivers."""
37 def __init__(self, connection):
38 self.connection = connection
39
40 def connect_volume(self, connection_info, mount_device):
41 """Connect the volume. Returns xml for libvirt."""
42 conf = config.LibvirtConfigGuestDisk()
43 conf.source_type = "block"
44 conf.driver_name = virtutils.pick_disk_driver_name(is_block_dev=True)
45 conf.driver_format = "raw"
46 conf.driver_cache = "none"
47 conf.source_path = connection_info['data']['device_path']
48 conf.target_dev = mount_device
49 conf.target_bus = "virtio"
50 conf.serial = connection_info.get('serial')
51 return conf
52
53 def disconnect_volume(self, connection_info, mount_device):
54 """Disconnect the volume"""
55 pass
56
57
58class LibvirtFakeVolumeDriver(LibvirtVolumeDriver):
59 """Driver to attach Network volumes to libvirt."""
60
61 def connect_volume(self, connection_info, mount_device):
62 conf = config.LibvirtConfigGuestDisk()
63 conf.source_type = "network"
64 conf.driver_name = "qemu"
65 conf.driver_format = "raw"
66 conf.driver_cache = "none"
67 conf.source_protocol = "fake"
68 conf.source_host = "fake"
69 conf.target_dev = mount_device
70 conf.target_bus = "virtio"
71 conf.serial = connection_info.get('serial')
72 return conf
73
74
75class LibvirtNetVolumeDriver(LibvirtVolumeDriver):
76 """Driver to attach Network volumes to libvirt."""
77
78 def connect_volume(self, connection_info, mount_device):
79 conf = config.LibvirtConfigGuestDisk()
80 conf.source_type = "network"
81 conf.driver_name = virtutils.pick_disk_driver_name(is_block_dev=False)
82 conf.driver_format = "raw"
83 conf.driver_cache = "none"
84 conf.source_protocol = connection_info['driver_volume_type']
85 conf.source_host = connection_info['data']['name']
86 conf.target_dev = mount_device
87 conf.target_bus = "virtio"
88 conf.serial = connection_info.get('serial')
89 netdisk_properties = connection_info['data']
90 if netdisk_properties.get('auth_enabled'):
91 conf.auth_username = netdisk_properties['auth_username']
92 conf.auth_secret_type = netdisk_properties['secret_type']
93 conf.auth_secret_uuid = netdisk_properties['secret_uuid']
94 return conf
95
96
97class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
98 """Driver to attach Network volumes to libvirt."""
99
100 def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
101 check_exit_code = kwargs.pop('check_exit_code', 0)
102 (out, err) = utils.execute('iscsiadm', '-m', 'node', '-T',
103 iscsi_properties['target_iqn'],
104 '-p', iscsi_properties['target_portal'],
105 *iscsi_command, run_as_root=True,
106 check_exit_code=check_exit_code)
107 LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
108 (iscsi_command, out, err))
109 return (out, err)
110
111 def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
112 **kwargs):
113 iscsi_command = ('--op', 'update', '-n', property_key,
114 '-v', property_value)
115 return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
116
117 @utils.synchronized('connect_volume')
118 def connect_volume(self, connection_info, mount_device):
119 """Attach the volume to instance_name"""
120 iscsi_properties = connection_info['data']
121 # NOTE(vish): If we are on the same host as nova volume, the
122 # discovery makes the target so we don't need to
123 # run --op new. Therefore, we check to see if the
124 # target exists, and if we get 255 (Not Found), then
125 # we run --op new. This will also happen if another
126 # volume is using the same target.
127 try:
128 self._run_iscsiadm(iscsi_properties, ())
129 except exception.ProcessExecutionError as exc:
130 # iscsiadm returns 21 for "No records found" after version 2.0-871
131 if exc.exit_code in [21, 255]:
132 self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
133 else:
134 raise
135
136 if iscsi_properties.get('auth_method'):
137 self._iscsiadm_update(iscsi_properties,
138 "node.session.auth.authmethod",
139 iscsi_properties['auth_method'])
140 self._iscsiadm_update(iscsi_properties,
141 "node.session.auth.username",
142 iscsi_properties['auth_username'])
143 self._iscsiadm_update(iscsi_properties,
144 "node.session.auth.password",
145 iscsi_properties['auth_password'])
146
147 # NOTE(vish): If we have another lun on the same target, we may
148 # have a duplicate login
149 self._run_iscsiadm(iscsi_properties, ("--login",),
150 check_exit_code=[0, 255])
151
152 self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
153
154 host_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" %
155 (iscsi_properties['target_portal'],
156 iscsi_properties['target_iqn'],
157 iscsi_properties.get('target_lun', 0)))
158
159 # The /dev/disk/by-path/... node is not always present immediately
160 # TODO(justinsb): This retry-with-delay is a pattern, move to utils?
161 tries = 0
162 while not os.path.exists(host_device):
163 if tries >= FLAGS.num_iscsi_scan_tries:
164 raise exception.NovaException(_("iSCSI device not found at %s")
165 % (host_device))
166
167 LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. "
168 "Will rescan & retry. Try number: %(tries)s") %
169 locals())
170
171 # The rescan isn't documented as being necessary(?), but it helps
172 self._run_iscsiadm(iscsi_properties, ("--rescan",))
173
174 tries = tries + 1
175 if not os.path.exists(host_device):
176 time.sleep(tries ** 2)
177
178 if tries != 0:
179 LOG.debug(_("Found iSCSI node %(mount_device)s "
180 "(after %(tries)s rescans)") %
181 locals())
182
183 connection_info['data']['device_path'] = host_device
184 sup = super(LibvirtISCSIVolumeDriver, self)
185 return sup.connect_volume(connection_info, mount_device)
186
187 @utils.synchronized('connect_volume')
188 def disconnect_volume(self, connection_info, mount_device):
189 """Detach the volume from instance_name"""
190 sup = super(LibvirtISCSIVolumeDriver, self)
191 sup.disconnect_volume(connection_info, mount_device)
192 iscsi_properties = connection_info['data']
193 # NOTE(vish): Only disconnect from the target if no luns from the
194 # target are in use.
195 device_prefix = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-" %
196 (iscsi_properties['target_portal'],
197 iscsi_properties['target_iqn']))
198 devices = self.connection.get_all_block_devices()
199 devices = [dev for dev in devices if dev.startswith(device_prefix)]
200 if not devices:
201 self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
202 check_exit_code=[0, 255])
203 self._run_iscsiadm(iscsi_properties, ("--logout",),
204 check_exit_code=[0, 255])
205 self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
206 check_exit_code=[0, 21, 255])
2070
=== removed directory '.pc/ubuntu'
=== removed directory '.pc/ubuntu/fix-ec2-volume-id-mappings.patch'
=== removed directory '.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova'
=== removed directory '.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db'
=== removed directory '.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy'
=== removed file '.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy/api.py'
--- .pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy/api.py 2012-10-12 12:35:01 +0000
+++ .pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy/api.py 1970-01-01 00:00:00 +0000
@@ -1,5256 +0,0 @@
1# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
3# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
4# Copyright 2010 United States Government as represented by the
5# Administrator of the National Aeronautics and Space Administration.
6# All Rights Reserved.
7#
8# Licensed under the Apache License, Version 2.0 (the "License"); you may
9# not use this file except in compliance with the License. You may obtain
10# a copy of the License at
11#
12# http://www.apache.org/licenses/LICENSE-2.0
13#
14# Unless required by applicable law or agreed to in writing, software
15# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17# License for the specific language governing permissions and limitations
18# under the License.
19
20"""Implementation of SQLAlchemy backend."""
21
22import collections
23import copy
24import datetime
25import functools
26import warnings
27
28from nova import block_device
29from nova.common.sqlalchemyutils import paginate_query
30from nova.compute import vm_states
31from nova import db
32from nova.db.sqlalchemy import models
33from nova.db.sqlalchemy.session import get_session
34from nova import exception
35from nova import flags
36from nova.openstack.common import log as logging
37from nova.openstack.common import timeutils
38from nova import utils
39from sqlalchemy import and_
40from sqlalchemy.exc import IntegrityError
41from sqlalchemy import or_
42from sqlalchemy.orm import joinedload
43from sqlalchemy.orm import joinedload_all
44from sqlalchemy.sql.expression import asc
45from sqlalchemy.sql.expression import desc
46from sqlalchemy.sql.expression import literal_column
47from sqlalchemy.sql import func
48
49FLAGS = flags.FLAGS
50
51LOG = logging.getLogger(__name__)
52
53
54def is_admin_context(context):
55 """Indicates if the request context is an administrator."""
56 if not context:
57 warnings.warn(_('Use of empty request context is deprecated'),
58 DeprecationWarning)
59 raise Exception('die')
60 return context.is_admin
61
62
63def is_user_context(context):
64 """Indicates if the request context is a normal user."""
65 if not context:
66 return False
67 if context.is_admin:
68 return False
69 if not context.user_id or not context.project_id:
70 return False
71 return True
72
73
74def authorize_project_context(context, project_id):
75 """Ensures a request has permission to access the given project."""
76 if is_user_context(context):
77 if not context.project_id:
78 raise exception.NotAuthorized()
79 elif context.project_id != project_id:
80 raise exception.NotAuthorized()
81
82
83def authorize_user_context(context, user_id):
84 """Ensures a request has permission to access the given user."""
85 if is_user_context(context):
86 if not context.user_id:
87 raise exception.NotAuthorized()
88 elif context.user_id != user_id:
89 raise exception.NotAuthorized()
90
91
92def authorize_quota_class_context(context, class_name):
93 """Ensures a request has permission to access the given quota class."""
94 if is_user_context(context):
95 if not context.quota_class:
96 raise exception.NotAuthorized()
97 elif context.quota_class != class_name:
98 raise exception.NotAuthorized()
99
100
101def require_admin_context(f):
102 """Decorator to require admin request context.
103
104 The first argument to the wrapped function must be the context.
105
106 """
107
108 def wrapper(*args, **kwargs):
109 if not is_admin_context(args[0]):
110 raise exception.AdminRequired()
111 return f(*args, **kwargs)
112 return wrapper
113
114
115def require_context(f):
116 """Decorator to require *any* user or admin context.
117
118 This does no authorization for user or project access matching, see
119 :py:func:`authorize_project_context` and
120 :py:func:`authorize_user_context`.
121
122 The first argument to the wrapped function must be the context.
123
124 """
125
126 def wrapper(*args, **kwargs):
127 if not is_admin_context(args[0]) and not is_user_context(args[0]):
128 raise exception.NotAuthorized()
129 return f(*args, **kwargs)
130 return wrapper
131
132
133def require_instance_exists(f):
134 """Decorator to require the specified instance to exist.
135
136 Requires the wrapped function to use context and instance_id as
137 their first two arguments.
138 """
139 @functools.wraps(f)
140 def wrapper(context, instance_id, *args, **kwargs):
141 db.instance_get(context, instance_id)
142 return f(context, instance_id, *args, **kwargs)
143
144 return wrapper
145
146
147def require_instance_exists_using_uuid(f):
148 """Decorator to require the specified instance to exist.
149
150 Requires the wrapped function to use context and instance_uuid as
151 their first two arguments.
152 """
153 @functools.wraps(f)
154 def wrapper(context, instance_uuid, *args, **kwargs):
155 db.instance_get_by_uuid(context, instance_uuid)
156 return f(context, instance_uuid, *args, **kwargs)
157
158 return wrapper
159
160
161def require_volume_exists(f):
162 """Decorator to require the specified volume to exist.
163
164 Requires the wrapped function to use context and volume_id as
165 their first two arguments.
166 """
167
168 def wrapper(context, volume_id, *args, **kwargs):
169 db.volume_get(context, volume_id)
170 return f(context, volume_id, *args, **kwargs)
171 wrapper.__name__ = f.__name__
172 return wrapper
173
174
175def require_aggregate_exists(f):
176 """Decorator to require the specified aggregate to exist.
177
178 Requires the wrapped function to use context and aggregate_id as
179 their first two arguments.
180 """
181
182 @functools.wraps(f)
183 def wrapper(context, aggregate_id, *args, **kwargs):
184 db.aggregate_get(context, aggregate_id)
185 return f(context, aggregate_id, *args, **kwargs)
186 return wrapper
187
188
189def model_query(context, model, *args, **kwargs):
190 """Query helper that accounts for context's `read_deleted` field.
191
192 :param context: context to query under
193 :param session: if present, the session to use
194 :param read_deleted: if present, overrides context's read_deleted field.
195 :param project_only: if present and context is user-type, then restrict
196 query to match the context's project_id. If set to 'allow_none',
197 restriction includes project_id = None.
198 """
199 session = kwargs.get('session') or get_session()
200 read_deleted = kwargs.get('read_deleted') or context.read_deleted
201 project_only = kwargs.get('project_only', False)
202
203 query = session.query(model, *args)
204
205 if read_deleted == 'no':
206 query = query.filter_by(deleted=False)
207 elif read_deleted == 'yes':
208 pass # omit the filter to include deleted and active
209 elif read_deleted == 'only':
210 query = query.filter_by(deleted=True)
211 else:
212 raise Exception(
213 _("Unrecognized read_deleted value '%s'") % read_deleted)
214
215 if is_user_context(context) and project_only:
216 if project_only == 'allow_none':
217 query = query.filter(or_(model.project_id == context.project_id,
218 model.project_id == None))
219 else:
220 query = query.filter_by(project_id=context.project_id)
221
222 return query
223
224
225def exact_filter(query, model, filters, legal_keys):
226 """Applies exact match filtering to a query.
227
228 Returns the updated query. Modifies filters argument to remove
229 filters consumed.
230
231 :param query: query to apply filters to
232 :param model: model object the query applies to, for IN-style
233 filtering
234 :param filters: dictionary of filters; values that are lists,
235 tuples, sets, or frozensets cause an 'IN' test to
236 be performed, while exact matching ('==' operator)
237 is used for other values
238 :param legal_keys: list of keys to apply exact filtering to
239 """
240
241 filter_dict = {}
242
243 # Walk through all the keys
244 for key in legal_keys:
245 # Skip ones we're not filtering on
246 if key not in filters:
247 continue
248
249 # OK, filtering on this key; what value do we search for?
250 value = filters.pop(key)
251
252 if key == 'metadata':
253 column_attr = getattr(model, key)
254 if isinstance(value, list):
255 for item in value:
256 for k, v in item.iteritems():
257 query = query.filter(column_attr.any(key=k))
258 query = query.filter(column_attr.any(value=v))
259
260 else:
261 for k, v in value.iteritems():
262 query = query.filter(column_attr.any(key=k))
263 query = query.filter(column_attr.any(value=v))
264 elif isinstance(value, (list, tuple, set, frozenset)):
265 # Looking for values in a list; apply to query directly
266 column_attr = getattr(model, key)
267 query = query.filter(column_attr.in_(value))
268 else:
269 # OK, simple exact match; save for later
270 filter_dict[key] = value
271
272 # Apply simple exact matches
273 if filter_dict:
274 query = query.filter_by(**filter_dict)
275
276 return query
277
278
279###################
280
281
282def constraint(**conditions):
283 return Constraint(conditions)
284
285
286def equal_any(*values):
287 return EqualityCondition(values)
288
289
290def not_equal(*values):
291 return InequalityCondition(values)
292
293
294class Constraint(object):
295
296 def __init__(self, conditions):
297 self.conditions = conditions
298
299 def apply(self, model, query):
300 for key, condition in self.conditions.iteritems():
301 for clause in condition.clauses(getattr(model, key)):
302 query = query.filter(clause)
303 return query
304
305
306class EqualityCondition(object):
307
308 def __init__(self, values):
309 self.values = values
310
311 def clauses(self, field):
312 return or_([field == value for value in self.values])
313
314
315class InequalityCondition(object):
316
317 def __init__(self, values):
318 self.values = values
319
320 def clauses(self, field):
321 return [field != value for value in self.values]
322
323
324###################
325
326
327@require_admin_context
328def service_destroy(context, service_id):
329 session = get_session()
330 with session.begin():
331 service_ref = service_get(context, service_id, session=session)
332 service_ref.delete(session=session)
333
334 if service_ref.topic == 'compute' and service_ref.compute_node:
335 for c in service_ref.compute_node:
336 c.delete(session=session)
337
338
339@require_admin_context
340def service_get(context, service_id, session=None):
341 result = model_query(context, models.Service, session=session).\
342 options(joinedload('compute_node')).\
343 filter_by(id=service_id).\
344 first()
345 if not result:
346 raise exception.ServiceNotFound(service_id=service_id)
347
348 return result
349
350
351@require_admin_context
352def service_get_all(context, disabled=None):
353 query = model_query(context, models.Service)
354
355 if disabled is not None:
356 query = query.filter_by(disabled=disabled)
357
358 return query.all()
359
360
361@require_admin_context
362def service_get_all_by_topic(context, topic):
363 return model_query(context, models.Service, read_deleted="no").\
364 filter_by(disabled=False).\
365 filter_by(topic=topic).\
366 all()
367
368
369@require_admin_context
370def service_get_by_host_and_topic(context, host, topic):
371 return model_query(context, models.Service, read_deleted="no").\
372 filter_by(disabled=False).\
373 filter_by(host=host).\
374 filter_by(topic=topic).\
375 first()
376
377
378@require_admin_context
379def service_get_all_by_host(context, host):
380 return model_query(context, models.Service, read_deleted="no").\
381 filter_by(host=host).\
382 all()
383
384
385@require_admin_context
386def service_get_all_compute_by_host(context, host):
387 result = model_query(context, models.Service, read_deleted="no").\
388 options(joinedload('compute_node')).\
389 filter_by(host=host).\
390 filter_by(topic="compute").\
391 all()
392
393 if not result:
394 raise exception.ComputeHostNotFound(host=host)
395
396 return result
397
398
399@require_admin_context
400def _service_get_all_topic_subquery(context, session, topic, subq, label):
401 sort_value = getattr(subq.c, label)
402 return model_query(context, models.Service,
403 func.coalesce(sort_value, 0),
404 session=session, read_deleted="no").\
405 filter_by(topic=topic).\
406 filter_by(disabled=False).\
407 outerjoin((subq, models.Service.host == subq.c.host)).\
408 order_by(sort_value).\
409 all()
410
411
412@require_admin_context
413def service_get_all_compute_sorted(context):
414 session = get_session()
415 with session.begin():
416 # NOTE(vish): The intended query is below
417 # SELECT services.*, COALESCE(inst_cores.instance_cores,
418 # 0)
419 # FROM services LEFT OUTER JOIN
420 # (SELECT host, SUM(instances.vcpus) AS instance_cores
421 # FROM instances GROUP BY host) AS inst_cores
422 # ON services.host = inst_cores.host
423 topic = 'compute'
424 label = 'instance_cores'
425 subq = model_query(context, models.Instance.host,
426 func.sum(models.Instance.vcpus).label(label),
427 session=session, read_deleted="no").\
428 group_by(models.Instance.host).\
429 subquery()
430 return _service_get_all_topic_subquery(context,
431 session,
432 topic,
433 subq,
434 label)
435
436
437@require_admin_context
438def service_get_all_volume_sorted(context):
439 session = get_session()
440 with session.begin():
441 topic = 'volume'
442 label = 'volume_gigabytes'
443 subq = model_query(context, models.Volume.host,
444 func.sum(models.Volume.size).label(label),
445 session=session, read_deleted="no").\
446 group_by(models.Volume.host).\
447 subquery()
448 return _service_get_all_topic_subquery(context,
449 session,
450 topic,
451 subq,
452 label)
453
454
455@require_admin_context
456def service_get_by_args(context, host, binary):
457 result = model_query(context, models.Service).\
458 filter_by(host=host).\
459 filter_by(binary=binary).\
460 first()
461
462 if not result:
463 raise exception.HostBinaryNotFound(host=host, binary=binary)
464
465 return result
466
467
468@require_admin_context
469def service_create(context, values):
470 service_ref = models.Service()
471 service_ref.update(values)
472 if not FLAGS.enable_new_services:
473 service_ref.disabled = True
474 service_ref.save()
475 return service_ref
476
477
478@require_admin_context
479def service_update(context, service_id, values):
480 session = get_session()
481 with session.begin():
482 service_ref = service_get(context, service_id, session=session)
483 service_ref.update(values)
484 service_ref.save(session=session)
485
486
487###################
488
489def compute_node_get(context, compute_id, session=None):
490 result = model_query(context, models.ComputeNode, session=session).\
491 filter_by(id=compute_id).\
492 options(joinedload('service')).\
493 options(joinedload('stats')).\
494 first()
495
496 if not result:
497 raise exception.ComputeHostNotFound(host=compute_id)
498
499 return result
500
501
502@require_admin_context
503def compute_node_get_all(context, session=None):
504 return model_query(context, models.ComputeNode, session=session).\
505 options(joinedload('service')).\
506 options(joinedload('stats')).\
507 all()
508
509
510@require_admin_context
511def compute_node_search_by_hypervisor(context, hypervisor_match):
512 field = models.ComputeNode.hypervisor_hostname
513 return model_query(context, models.ComputeNode).\
514 options(joinedload('service')).\
515 filter(field.like('%%%s%%' % hypervisor_match)).\
516 all()
517
518
519def _prep_stats_dict(values):
520 """Make list of ComputeNodeStats"""
521 stats = []
522 d = values.get('stats', {})
523 for k, v in d.iteritems():
524 stat = models.ComputeNodeStat()
525 stat['key'] = k
526 stat['value'] = v
527 stats.append(stat)
528 values['stats'] = stats
529
530
531@require_admin_context
532def compute_node_create(context, values, session=None):
533 """Creates a new ComputeNode and populates the capacity fields
534 with the most recent data."""
535 _prep_stats_dict(values)
536
537 if not session:
538 session = get_session()
539
540 with session.begin(subtransactions=True):
541 compute_node_ref = models.ComputeNode()
542 session.add(compute_node_ref)
543 compute_node_ref.update(values)
544 return compute_node_ref
545
546
547def _update_stats(context, new_stats, compute_id, session, prune_stats=False):
548
549 existing = model_query(context, models.ComputeNodeStat, session=session,
550 read_deleted="no").filter_by(compute_node_id=compute_id).all()
551 statmap = {}
552 for stat in existing:
553 key = stat['key']
554 statmap[key] = stat
555
556 stats = []
557 for k, v in new_stats.iteritems():
558 old_stat = statmap.pop(k, None)
559 if old_stat:
560 # update existing value:
561 old_stat.update({'value': v})
562 stats.append(old_stat)
563 else:
564 # add new stat:
565 stat = models.ComputeNodeStat()
566 stat['compute_node_id'] = compute_id
567 stat['key'] = k
568 stat['value'] = v
569 stats.append(stat)
570
571 if prune_stats:
572 # prune un-touched old stats:
573 for stat in statmap.values():
574 session.add(stat)
575 stat.update({'deleted': True})
576
577 # add new and updated stats
578 for stat in stats:
579 session.add(stat)
580
581
582@require_admin_context
583def compute_node_update(context, compute_id, values, prune_stats=False):
584 """Updates the ComputeNode record with the most recent data"""
585 stats = values.pop('stats', {})
586
587 session = get_session()
588 with session.begin(subtransactions=True):
589 _update_stats(context, stats, compute_id, session, prune_stats)
590 compute_ref = compute_node_get(context, compute_id, session=session)
591 compute_ref.update(values)
592 return compute_ref
593
594
595def compute_node_get_by_host(context, host):
596 """Get all capacity entries for the given host."""
597 session = get_session()
598 with session.begin():
599 node = session.query(models.ComputeNode).\
600 join('service').\
601 filter(models.Service.host == host).\
602 filter_by(deleted=False)
603 return node.first()
604
605
606def compute_node_statistics(context):
607 """Compute statistics over all compute nodes."""
608 result = model_query(context,
609 func.count(models.ComputeNode.id),
610 func.sum(models.ComputeNode.vcpus),
611 func.sum(models.ComputeNode.memory_mb),
612 func.sum(models.ComputeNode.local_gb),
613 func.sum(models.ComputeNode.vcpus_used),
614 func.sum(models.ComputeNode.memory_mb_used),
615 func.sum(models.ComputeNode.local_gb_used),
616 func.sum(models.ComputeNode.free_ram_mb),
617 func.sum(models.ComputeNode.free_disk_gb),
618 func.sum(models.ComputeNode.current_workload),
619 func.sum(models.ComputeNode.running_vms),
620 func.sum(models.ComputeNode.disk_available_least),
621 read_deleted="no").first()
622
623 # Build a dict of the info--making no assumptions about result
624 fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
625 'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
626 'current_workload', 'running_vms', 'disk_available_least')
627 return dict((field, int(result[idx] or 0))
628 for idx, field in enumerate(fields))
629
630
631###################
632
633
634@require_admin_context
635def certificate_get(context, certificate_id, session=None):
636 result = model_query(context, models.Certificate, session=session).\
637 filter_by(id=certificate_id).\
638 first()
639
640 if not result:
641 raise exception.CertificateNotFound(certificate_id=certificate_id)
642
643 return result
644
645
646@require_admin_context
647def certificate_create(context, values):
648 certificate_ref = models.Certificate()
649 for (key, value) in values.iteritems():
650 certificate_ref[key] = value
651 certificate_ref.save()
652 return certificate_ref
653
654
655@require_admin_context
656def certificate_get_all_by_project(context, project_id):
657 return model_query(context, models.Certificate, read_deleted="no").\
658 filter_by(project_id=project_id).\
659 all()
660
661
662@require_admin_context
663def certificate_get_all_by_user(context, user_id):
664 return model_query(context, models.Certificate, read_deleted="no").\
665 filter_by(user_id=user_id).\
666 all()
667
668
669@require_admin_context
670def certificate_get_all_by_user_and_project(context, user_id, project_id):
671 return model_query(context, models.Certificate, read_deleted="no").\
672 filter_by(user_id=user_id).\
673 filter_by(project_id=project_id).\
674 all()
675
676
677###################
678
679
680@require_context
681def floating_ip_get(context, id):
682 result = model_query(context, models.FloatingIp, project_only=True).\
683 filter_by(id=id).\
684 first()
685
686 if not result:
687 raise exception.FloatingIpNotFound(id=id)
688
689 return result
690
691
692@require_context
693def floating_ip_get_pools(context):
694 pools = []
695 for result in model_query(context, models.FloatingIp.pool).distinct():
696 pools.append({'name': result[0]})
697 return pools
698
699
700@require_context
701def floating_ip_allocate_address(context, project_id, pool):
702 authorize_project_context(context, project_id)
703 session = get_session()
704 with session.begin():
705 floating_ip_ref = model_query(context, models.FloatingIp,
706 session=session, read_deleted="no").\
707 filter_by(fixed_ip_id=None).\
708 filter_by(project_id=None).\
709 filter_by(pool=pool).\
710 with_lockmode('update').\
711 first()
712 # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
713 # then this has concurrency issues
714 if not floating_ip_ref:
715 raise exception.NoMoreFloatingIps()
716 floating_ip_ref['project_id'] = project_id
717 session.add(floating_ip_ref)
718 return floating_ip_ref['address']
719
720
721@require_context
722def floating_ip_bulk_create(context, ips):
723 existing_ips = {}
724 for floating in _floating_ip_get_all(context).all():
725 existing_ips[floating['address']] = floating
726
727 session = get_session()
728 with session.begin():
729 for ip in ips:
730 addr = ip['address']
731 if (addr in existing_ips and
732 ip.get('id') != existing_ips[addr]['id']):
733 raise exception.FloatingIpExists(**dict(existing_ips[addr]))
734
735 model = models.FloatingIp()
736 model.update(ip)
737 session.add(model)
738
739
740def _ip_range_splitter(ips, block_size=256):
741 """Yields blocks of IPs no more than block_size elements long."""
742 out = []
743 count = 0
744 for ip in ips:
745 out.append(ip['address'])
746 count += 1
747
748 if count > block_size - 1:
749 yield out
750 out = []
751 count = 0
752
753 if out:
754 yield out
755
756
757@require_context
758def floating_ip_bulk_destroy(context, ips):
759 session = get_session()
760 with session.begin():
761 for ip_block in _ip_range_splitter(ips):
762 model_query(context, models.FloatingIp).\
763 filter(models.FloatingIp.address.in_(ip_block)).\
764 update({'deleted': True,
765 'deleted_at': timeutils.utcnow()},
766 synchronize_session='fetch')
767
768
769@require_context
770def floating_ip_create(context, values, session=None):
771 if not session:
772 session = get_session()
773
774 floating_ip_ref = models.FloatingIp()
775 floating_ip_ref.update(values)
776
777 # check uniqueness for not deleted addresses
778 if not floating_ip_ref.deleted:
779 try:
780 floating_ip = floating_ip_get_by_address(context,
781 floating_ip_ref.address,
782 session)
783 except exception.FloatingIpNotFoundForAddress:
784 pass
785 else:
786 if floating_ip.id != floating_ip_ref.id:
787 raise exception.FloatingIpExists(**dict(floating_ip_ref))
788
789 floating_ip_ref.save(session=session)
790 return floating_ip_ref['address']
791
792
793@require_context
794def floating_ip_count_by_project(context, project_id, session=None):
795 authorize_project_context(context, project_id)
796 # TODO(tr3buchet): why leave auto_assigned floating IPs out?
797 return model_query(context, models.FloatingIp, read_deleted="no",
798 session=session).\
799 filter_by(project_id=project_id).\
800 filter_by(auto_assigned=False).\
801 count()
802
803
804@require_context
805def floating_ip_fixed_ip_associate(context, floating_address,
806 fixed_address, host):
807 session = get_session()
808 with session.begin():
809 floating_ip_ref = floating_ip_get_by_address(context,
810 floating_address,
811 session=session)
812 fixed_ip_ref = fixed_ip_get_by_address(context,
813 fixed_address,
814 session=session)
815 floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"]
816 floating_ip_ref.host = host
817 floating_ip_ref.save(session=session)
818
819
820@require_context
821def floating_ip_deallocate(context, address):
822 session = get_session()
823 with session.begin():
824 floating_ip_ref = floating_ip_get_by_address(context,
825 address,
826 session=session)
827 floating_ip_ref['project_id'] = None
828 floating_ip_ref['host'] = None
829 floating_ip_ref['auto_assigned'] = False
830 floating_ip_ref.save(session=session)
831
832
833@require_context
834def floating_ip_destroy(context, address):
835 session = get_session()
836 with session.begin():
837 floating_ip_ref = floating_ip_get_by_address(context,
838 address,
839 session=session)
840 floating_ip_ref.delete(session=session)
841
842
843@require_context
844def floating_ip_disassociate(context, address):
845 session = get_session()
846 with session.begin():
847 floating_ip_ref = floating_ip_get_by_address(context,
848 address,
849 session=session)
850 fixed_ip_ref = fixed_ip_get(context,
851 floating_ip_ref['fixed_ip_id'])
852 if fixed_ip_ref:
853 fixed_ip_address = fixed_ip_ref['address']
854 else:
855 fixed_ip_address = None
856 floating_ip_ref.fixed_ip_id = None
857 floating_ip_ref.host = None
858 floating_ip_ref.save(session=session)
859 return fixed_ip_address
860
861
862@require_context
863def floating_ip_set_auto_assigned(context, address):
864 session = get_session()
865 with session.begin():
866 floating_ip_ref = floating_ip_get_by_address(context,
867 address,
868 session=session)
869 floating_ip_ref.auto_assigned = True
870 floating_ip_ref.save(session=session)
871
872
873def _floating_ip_get_all(context, session=None):
874 return model_query(context, models.FloatingIp, read_deleted="no",
875 session=session)
876
877
878@require_admin_context
879def floating_ip_get_all(context):
880 floating_ip_refs = _floating_ip_get_all(context).all()
881 if not floating_ip_refs:
882 raise exception.NoFloatingIpsDefined()
883 return floating_ip_refs
884
885
886@require_admin_context
887def floating_ip_get_all_by_host(context, host):
888 floating_ip_refs = _floating_ip_get_all(context).\
889 filter_by(host=host).\
890 all()
891 if not floating_ip_refs:
892 raise exception.FloatingIpNotFoundForHost(host=host)
893 return floating_ip_refs
894
895
896@require_context
897def floating_ip_get_all_by_project(context, project_id):
898 authorize_project_context(context, project_id)
899 # TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
900 return _floating_ip_get_all(context).\
901 filter_by(project_id=project_id).\
902 filter_by(auto_assigned=False).\
903 all()
904
905
906@require_context
907def floating_ip_get_by_address(context, address, session=None):
908 result = model_query(context, models.FloatingIp, session=session).\
909 filter_by(address=address).\
910 first()
911
912 if not result:
913 raise exception.FloatingIpNotFoundForAddress(address=address)
914
915 # If the floating IP has a project ID set, check to make sure
916 # the non-admin user has access.
917 if result.project_id and is_user_context(context):
918 authorize_project_context(context, result.project_id)
919
920 return result
921
922
923@require_context
924def floating_ip_get_by_fixed_address(context, fixed_address, session=None):
925 if not session:
926 session = get_session()
927
928 fixed_ip = fixed_ip_get_by_address(context, fixed_address, session)
929 fixed_ip_id = fixed_ip['id']
930
931 return model_query(context, models.FloatingIp, session=session).\
932 filter_by(fixed_ip_id=fixed_ip_id).\
933 all()
934
935 # NOTE(tr3buchet) please don't invent an exception here, empty list is fine
936
937
938@require_context
939def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None):
940 if not session:
941 session = get_session()
942
943 return model_query(context, models.FloatingIp, session=session).\
944 filter_by(fixed_ip_id=fixed_ip_id).\
945 all()
946
947
948@require_context
949def floating_ip_update(context, address, values):
950 session = get_session()
951 with session.begin():
952 floating_ip_ref = floating_ip_get_by_address(context, address, session)
953 for (key, value) in values.iteritems():
954 floating_ip_ref[key] = value
955 floating_ip_ref.save(session=session)
956
957
958@require_context
959def _dnsdomain_get(context, session, fqdomain):
960 return model_query(context, models.DNSDomain,
961 session=session, read_deleted="no").\
962 filter_by(domain=fqdomain).\
963 with_lockmode('update').\
964 first()
965
966
967@require_context
968def dnsdomain_get(context, fqdomain):
969 session = get_session()
970 with session.begin():
971 return _dnsdomain_get(context, session, fqdomain)
972
973
974@require_admin_context
975def _dnsdomain_get_or_create(context, session, fqdomain):
976 domain_ref = _dnsdomain_get(context, session, fqdomain)
977 if not domain_ref:
978 dns_ref = models.DNSDomain()
979 dns_ref.update({'domain': fqdomain,
980 'availability_zone': None,
981 'project_id': None})
982 return dns_ref
983
984 return domain_ref
985
986
987@require_admin_context
988def dnsdomain_register_for_zone(context, fqdomain, zone):
989 session = get_session()
990 with session.begin():
991 domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
992 domain_ref.scope = 'private'
993 domain_ref.availability_zone = zone
994 domain_ref.save(session=session)
995
996
997@require_admin_context
998def dnsdomain_register_for_project(context, fqdomain, project):
999 session = get_session()
1000 with session.begin():
1001 domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
1002 domain_ref.scope = 'public'
1003 domain_ref.project_id = project
1004 domain_ref.save(session=session)
1005
1006
1007@require_admin_context
1008def dnsdomain_unregister(context, fqdomain):
1009 session = get_session()
1010 with session.begin():
1011 session.query(models.DNSDomain).\
1012 filter_by(domain=fqdomain).\
1013 delete()
1014
1015
1016@require_context
1017def dnsdomain_list(context):
1018 session = get_session()
1019 records = model_query(context, models.DNSDomain,
1020 session=session, read_deleted="no").\
1021 all()
1022 domains = []
1023 for record in records:
1024 domains.append(record.domain)
1025
1026 return domains
1027
1028
1029###################
1030
1031
1032@require_admin_context
1033def fixed_ip_associate(context, address, instance_uuid, network_id=None,
1034 reserved=False):
1035 """Keyword arguments:
1036 reserved -- should be a boolean value(True or False), exact value will be
1037 used to filter on the fixed ip address
1038 """
1039 if not utils.is_uuid_like(instance_uuid):
1040 raise exception.InvalidUUID(uuid=instance_uuid)
1041
1042 session = get_session()
1043 with session.begin():
1044 network_or_none = or_(models.FixedIp.network_id == network_id,
1045 models.FixedIp.network_id == None)
1046 fixed_ip_ref = model_query(context, models.FixedIp, session=session,
1047 read_deleted="no").\
1048 filter(network_or_none).\
1049 filter_by(reserved=reserved).\
1050 filter_by(address=address).\
1051 with_lockmode('update').\
1052 first()
1053 # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
1054 # then this has concurrency issues
1055 if fixed_ip_ref is None:
1056 raise exception.FixedIpNotFoundForNetwork(address=address,
1057 network_id=network_id)
1058 if fixed_ip_ref.instance_uuid:
1059 raise exception.FixedIpAlreadyInUse(address=address)
1060
1061 if not fixed_ip_ref.network_id:
1062 fixed_ip_ref.network_id = network_id
1063 fixed_ip_ref.instance_uuid = instance_uuid
1064 session.add(fixed_ip_ref)
1065 return fixed_ip_ref['address']
1066
1067
1068@require_admin_context
1069def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
1070 host=None):
1071 if instance_uuid and not utils.is_uuid_like(instance_uuid):
1072 raise exception.InvalidUUID(uuid=instance_uuid)
1073
1074 session = get_session()
1075 with session.begin():
1076 network_or_none = or_(models.FixedIp.network_id == network_id,
1077 models.FixedIp.network_id == None)
1078 fixed_ip_ref = model_query(context, models.FixedIp, session=session,
1079 read_deleted="no").\
1080 filter(network_or_none).\
1081 filter_by(reserved=False).\
1082 filter_by(instance_uuid=None).\
1083 filter_by(host=None).\
1084 with_lockmode('update').\
1085 first()
1086 # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
1087 # then this has concurrency issues
1088 if not fixed_ip_ref:
1089 raise exception.NoMoreFixedIps()
1090
1091 if fixed_ip_ref['network_id'] is None:
1092 fixed_ip_ref['network'] = network_id
1093
1094 if instance_uuid:
1095 fixed_ip_ref['instance_uuid'] = instance_uuid
1096
1097 if host:
1098 fixed_ip_ref['host'] = host
1099 session.add(fixed_ip_ref)
1100 return fixed_ip_ref['address']
1101
1102
1103@require_context
1104def fixed_ip_create(context, values):
1105 fixed_ip_ref = models.FixedIp()
1106 fixed_ip_ref.update(values)
1107 fixed_ip_ref.save()
1108 return fixed_ip_ref['address']
1109
1110
1111@require_context
1112def fixed_ip_bulk_create(context, ips):
1113 session = get_session()
1114 with session.begin():
1115 for ip in ips:
1116 model = models.FixedIp()
1117 model.update(ip)
1118 session.add(model)
1119
1120
1121@require_context
1122def fixed_ip_disassociate(context, address):
1123 session = get_session()
1124 with session.begin():
1125 fixed_ip_ref = fixed_ip_get_by_address(context,
1126 address,
1127 session=session)
1128 fixed_ip_ref['instance_uuid'] = None
1129 fixed_ip_ref.save(session=session)
1130
1131
1132@require_admin_context
1133def fixed_ip_disassociate_all_by_timeout(context, host, time):
1134 session = get_session()
1135 # NOTE(vish): only update fixed ips that "belong" to this
1136 # host; i.e. the network host or the instance
1137 # host matches. Two queries necessary because
1138 # join with update doesn't work.
1139 host_filter = or_(and_(models.Instance.host == host,
1140 models.Network.multi_host == True),
1141 models.Network.host == host)
1142 result = session.query(models.FixedIp.id).\
1143 filter(models.FixedIp.deleted == False).\
1144 filter(models.FixedIp.allocated == False).\
1145 filter(models.FixedIp.updated_at < time).\
1146 join((models.Network,
1147 models.Network.id == models.FixedIp.network_id)).\
1148 join((models.Instance,
1149 models.Instance.uuid == \
1150 models.FixedIp.instance_uuid)).\
1151 filter(host_filter).\
1152 all()
1153 fixed_ip_ids = [fip[0] for fip in result]
1154 if not fixed_ip_ids:
1155 return 0
1156 result = model_query(context, models.FixedIp, session=session).\
1157 filter(models.FixedIp.id.in_(fixed_ip_ids)).\
1158 update({'instance_uuid': None,
1159 'leased': False,
1160 'updated_at': timeutils.utcnow()},
1161 synchronize_session='fetch')
1162 return result
1163
1164
1165@require_context
1166def fixed_ip_get(context, id, session=None):
1167 result = model_query(context, models.FixedIp, session=session).\
1168 filter_by(id=id).\
1169 first()
1170 if not result:
1171 raise exception.FixedIpNotFound(id=id)
1172
1173 # FIXME(sirp): shouldn't we just use project_only here to restrict the
1174 # results?
1175 if is_user_context(context) and result['instance_uuid'] is not None:
1176 instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
1177 result['instance_uuid'],
1178 session)
1179 authorize_project_context(context, instance.project_id)
1180
1181 return result
1182
1183
1184@require_admin_context
1185def fixed_ip_get_all(context, session=None):
1186 result = model_query(context, models.FixedIp, session=session,
1187 read_deleted="yes").\
1188 all()
1189 if not result:
1190 raise exception.NoFixedIpsDefined()
1191
1192 return result
1193
1194
1195@require_context
1196def fixed_ip_get_by_address(context, address, session=None):
1197 result = model_query(context, models.FixedIp, session=session).\
1198 filter_by(address=address).\
1199 first()
1200 if not result:
1201 raise exception.FixedIpNotFoundForAddress(address=address)
1202
1203 # NOTE(sirp): shouldn't we just use project_only here to restrict the
1204 # results?
1205 if is_user_context(context) and result['instance_uuid'] is not None:
1206 instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
1207 result['instance_uuid'],
1208 session)
1209 authorize_project_context(context, instance.project_id)
1210
1211 return result
1212
1213
1214@require_context
1215def fixed_ip_get_by_instance(context, instance_uuid):
1216 if not utils.is_uuid_like(instance_uuid):
1217 raise exception.InvalidUUID(uuid=instance_uuid)
1218
1219 result = model_query(context, models.FixedIp, read_deleted="no").\
1220 filter_by(instance_uuid=instance_uuid).\
1221 all()
1222
1223 if not result:
1224 raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
1225
1226 return result
1227
1228
1229@require_context
1230def fixed_ip_get_by_network_host(context, network_id, host):
1231 result = model_query(context, models.FixedIp, read_deleted="no").\
1232 filter_by(network_id=network_id).\
1233 filter_by(host=host).\
1234 first()
1235
1236 if not result:
1237 raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
1238 host=host)
1239 return result
1240
1241
1242@require_context
1243def fixed_ips_by_virtual_interface(context, vif_id):
1244 result = model_query(context, models.FixedIp, read_deleted="no").\
1245 filter_by(virtual_interface_id=vif_id).\
1246 all()
1247
1248 return result
1249
1250
1251@require_admin_context
1252def fixed_ip_get_network(context, address):
1253 fixed_ip_ref = fixed_ip_get_by_address(context, address)
1254 return fixed_ip_ref.network
1255
1256
1257@require_context
1258def fixed_ip_update(context, address, values):
1259 session = get_session()
1260 with session.begin():
1261 fixed_ip_ref = fixed_ip_get_by_address(context,
1262 address,
1263 session=session)
1264 fixed_ip_ref.update(values)
1265 fixed_ip_ref.save(session=session)
1266
1267
1268###################
1269
1270
1271@require_context
1272def virtual_interface_create(context, values):
1273 """Create a new virtual interface record in the database.
1274
1275 :param values: = dict containing column values
1276 """
1277 try:
1278 vif_ref = models.VirtualInterface()
1279 vif_ref.update(values)
1280 vif_ref.save()
1281 except IntegrityError:
1282 raise exception.VirtualInterfaceCreateException()
1283
1284 return vif_ref
1285
1286
1287@require_context
1288def _virtual_interface_query(context, session=None):
1289 return model_query(context, models.VirtualInterface, session=session,
1290 read_deleted="yes")
1291
1292
1293@require_context
1294def virtual_interface_get(context, vif_id, session=None):
1295 """Gets a virtual interface from the table.
1296
1297 :param vif_id: = id of the virtual interface
1298 """
1299 vif_ref = _virtual_interface_query(context, session=session).\
1300 filter_by(id=vif_id).\
1301 first()
1302 return vif_ref
1303
1304
1305@require_context
1306def virtual_interface_get_by_address(context, address):
1307 """Gets a virtual interface from the table.
1308
1309 :param address: = the address of the interface you're looking to get
1310 """
1311 vif_ref = _virtual_interface_query(context).\
1312 filter_by(address=address).\
1313 first()
1314 return vif_ref
1315
1316
1317@require_context
1318def virtual_interface_get_by_uuid(context, vif_uuid):
1319 """Gets a virtual interface from the table.
1320
1321 :param vif_uuid: the uuid of the interface you're looking to get
1322 """
1323 vif_ref = _virtual_interface_query(context).\
1324 filter_by(uuid=vif_uuid).\
1325 first()
1326 return vif_ref
1327
1328
1329@require_context
1330@require_instance_exists_using_uuid
1331def virtual_interface_get_by_instance(context, instance_uuid):
1332 """Gets all virtual interfaces for instance.
1333
1334 :param instance_uuid: = uuid of the instance to retrieve vifs for
1335 """
1336 vif_refs = _virtual_interface_query(context).\
1337 filter_by(instance_uuid=instance_uuid).\
1338 all()
1339 return vif_refs
1340
1341
1342@require_context
1343def virtual_interface_get_by_instance_and_network(context, instance_uuid,
1344 network_id):
1345 """Gets virtual interface for instance that's associated with network."""
1346 vif_ref = _virtual_interface_query(context).\
1347 filter_by(instance_uuid=instance_uuid).\
1348 filter_by(network_id=network_id).\
1349 first()
1350 return vif_ref
1351
1352
1353@require_context
1354def virtual_interface_delete(context, vif_id):
1355 """Delete virtual interface record from the database.
1356
1357 :param vif_id: = id of vif to delete
1358 """
1359 session = get_session()
1360 vif_ref = virtual_interface_get(context, vif_id, session)
1361 with session.begin():
1362 session.delete(vif_ref)
1363
1364
1365@require_context
1366def virtual_interface_delete_by_instance(context, instance_uuid):
1367 """Delete virtual interface records that are associated
1368 with the instance given by instance_id.
1369
1370 :param instance_uuid: = uuid of instance
1371 """
1372 vif_refs = virtual_interface_get_by_instance(context, instance_uuid)
1373 for vif_ref in vif_refs:
1374 virtual_interface_delete(context, vif_ref['id'])
1375
1376
1377@require_context
1378def virtual_interface_get_all(context):
1379 """Get all vifs"""
1380 vif_refs = _virtual_interface_query(context).all()
1381 return vif_refs
1382
1383
1384###################
1385
1386
1387def _metadata_refs(metadata_dict, meta_class):
1388 metadata_refs = []
1389 if metadata_dict:
1390 for k, v in metadata_dict.iteritems():
1391 metadata_ref = meta_class()
1392 metadata_ref['key'] = k
1393 metadata_ref['value'] = v
1394 metadata_refs.append(metadata_ref)
1395 return metadata_refs
1396
1397
1398@require_context
1399def instance_create(context, values):
1400 """Create a new Instance record in the database.
1401
1402 context - request context object
1403 values - dict containing column values.
1404 """
1405 values = values.copy()
1406 values['metadata'] = _metadata_refs(
1407 values.get('metadata'), models.InstanceMetadata)
1408
1409 values['system_metadata'] = _metadata_refs(
1410 values.get('system_metadata'), models.InstanceSystemMetadata)
1411
1412 instance_ref = models.Instance()
1413 if not values.get('uuid'):
1414 values['uuid'] = str(utils.gen_uuid())
1415 instance_ref['info_cache'] = models.InstanceInfoCache()
1416 info_cache = values.pop('info_cache', None)
1417 if info_cache is not None:
1418 instance_ref['info_cache'].update(info_cache)
1419 security_groups = values.pop('security_groups', [])
1420 instance_ref.update(values)
1421
1422 def _get_sec_group_models(session, security_groups):
1423 models = []
1424 _existed, default_group = security_group_ensure_default(context,
1425 session=session)
1426 if 'default' in security_groups:
1427 models.append(default_group)
1428 # Generate a new list, so we don't modify the original
1429 security_groups = [x for x in security_groups if x != 'default']
1430 if security_groups:
1431 models.extend(_security_group_get_by_names(context,
1432 session, context.project_id, security_groups))
1433 return models
1434
1435 session = get_session()
1436 with session.begin():
1437 instance_ref.security_groups = _get_sec_group_models(session,
1438 security_groups)
1439 instance_ref.save(session=session)
1440 # NOTE(comstud): This forces instance_type to be loaded so it
1441 # exists in the ref when we return. Fixes lazy loading issues.
1442 instance_ref.instance_type
1443
1444 # create the instance uuid to ec2_id mapping entry for instance
1445 ec2_instance_create(context, instance_ref['uuid'])
1446
1447 return instance_ref
1448
1449
1450@require_admin_context
1451def instance_data_get_for_project(context, project_id, session=None):
1452 result = model_query(context,
1453 func.count(models.Instance.id),
1454 func.sum(models.Instance.vcpus),
1455 func.sum(models.Instance.memory_mb),
1456 read_deleted="no",
1457 session=session).\
1458 filter_by(project_id=project_id).\
1459 first()
1460 # NOTE(vish): convert None to 0
1461 return (result[0] or 0, result[1] or 0, result[2] or 0)
1462
1463
1464@require_context
1465def instance_destroy(context, instance_uuid, constraint=None):
1466 session = get_session()
1467 with session.begin():
1468 if utils.is_uuid_like(instance_uuid):
1469 instance_ref = instance_get_by_uuid(context, instance_uuid,
1470 session=session)
1471 else:
1472 raise exception.InvalidUUID(instance_uuid)
1473
1474 query = session.query(models.Instance).\
1475 filter_by(uuid=instance_ref['uuid'])
1476 if constraint is not None:
1477 query = constraint.apply(models.Instance, query)
1478 count = query.update({'deleted': True,
1479 'deleted_at': timeutils.utcnow(),
1480 'updated_at': literal_column('updated_at')})
1481 if count == 0:
1482 raise exception.ConstraintNotMet()
1483 session.query(models.SecurityGroupInstanceAssociation).\
1484 filter_by(instance_uuid=instance_ref['uuid']).\
1485 update({'deleted': True,
1486 'deleted_at': timeutils.utcnow(),
1487 'updated_at': literal_column('updated_at')})
1488
1489 instance_info_cache_delete(context, instance_ref['uuid'],
1490 session=session)
1491 return instance_ref
1492
1493
1494@require_context
1495def instance_get_by_uuid(context, uuid, session=None):
1496 result = _build_instance_get(context, session=session).\
1497 filter_by(uuid=uuid).\
1498 first()
1499
1500 if not result:
1501 raise exception.InstanceNotFound(instance_id=uuid)
1502
1503 return result
1504
1505
1506@require_context
1507def instance_get(context, instance_id, session=None):
1508 result = _build_instance_get(context, session=session).\
1509 filter_by(id=instance_id).\
1510 first()
1511
1512 if not result:
1513 raise exception.InstanceNotFound(instance_id=instance_id)
1514
1515 return result
1516
1517
1518@require_context
1519def _build_instance_get(context, session=None):
1520 return model_query(context, models.Instance, session=session,
1521 project_only=True).\
1522 options(joinedload_all('security_groups.rules')).\
1523 options(joinedload('info_cache')).\
1524 options(joinedload('metadata')).\
1525 options(joinedload('instance_type'))
1526
1527
1528@require_admin_context
1529def instance_get_all(context, columns_to_join=None):
1530 if columns_to_join is None:
1531 columns_to_join = ['info_cache', 'security_groups',
1532 'metadata', 'instance_type']
1533 query = model_query(context, models.Instance)
1534 for column in columns_to_join:
1535 query = query.options(joinedload(column))
1536 return query.all()
1537
1538
1539@require_context
1540def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
1541 limit=None, marker=None):
1542 """Return instances that match all filters. Deleted instances
1543 will be returned by default, unless there's a filter that says
1544 otherwise"""
1545
1546 sort_fn = {'desc': desc, 'asc': asc}
1547
1548 session = get_session()
1549 query_prefix = session.query(models.Instance).\
1550 options(joinedload('info_cache')).\
1551 options(joinedload('security_groups')).\
1552 options(joinedload('metadata')).\
1553 options(joinedload('instance_type')).\
1554 order_by(sort_fn[sort_dir](getattr(models.Instance, sort_key)))
1555
1556 # Make a copy of the filters dictionary to use going forward, as we'll
1557 # be modifying it and we shouldn't affect the caller's use of it.
1558 filters = filters.copy()
1559
1560 if 'changes-since' in filters:
1561 changes_since = timeutils.normalize_time(filters['changes-since'])
1562 query_prefix = query_prefix.\
1563 filter(models.Instance.updated_at > changes_since)
1564
1565 if 'deleted' in filters:
1566 # Instances can be soft or hard deleted and the query needs to
1567 # include or exclude both
1568 if filters.pop('deleted'):
1569 deleted = or_(models.Instance.deleted == True,
1570 models.Instance.vm_state == vm_states.SOFT_DELETED)
1571 query_prefix = query_prefix.filter(deleted)
1572 else:
1573 query_prefix = query_prefix.\
1574 filter_by(deleted=False).\
1575 filter(models.Instance.vm_state != vm_states.SOFT_DELETED)
1576
1577 if not context.is_admin:
1578 # If we're not admin context, add appropriate filter..
1579 if context.project_id:
1580 filters['project_id'] = context.project_id
1581 else:
1582 filters['user_id'] = context.user_id
1583
1584 # Filters for exact matches that we can do along with the SQL query...
1585 # For other filters that don't match this, we will do regexp matching
1586 exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
1587 'vm_state', 'instance_type_id', 'uuid',
1588 'metadata']
1589
1590 # Filter the query
1591 query_prefix = exact_filter(query_prefix, models.Instance,
1592 filters, exact_match_filter_names)
1593
1594 query_prefix = regex_filter(query_prefix, models.Instance, filters)
1595
1596 # paginate query
1597 if marker is not None:
1598 try:
1599 marker = instance_get_by_uuid(context, marker, session=session)
1600 except exception.InstanceNotFound as e:
1601 raise exception.MarkerNotFound(marker)
1602 query_prefix = paginate_query(query_prefix, models.Instance, limit,
1603 [sort_key, 'created_at', 'id'],
1604 marker=marker,
1605 sort_dir=sort_dir)
1606
1607 instances = query_prefix.all()
1608 return instances
1609
1610
1611def regex_filter(query, model, filters):
1612 """Applies regular expression filtering to a query.
1613
1614 Returns the updated query.
1615
1616 :param query: query to apply filters to
1617 :param model: model object the query applies to
1618 :param filters: dictionary of filters with regex values
1619 """
1620
1621 regexp_op_map = {
1622 'postgresql': '~',
1623 'mysql': 'REGEXP',
1624 'oracle': 'REGEXP_LIKE',
1625 'sqlite': 'REGEXP'
1626 }
1627 db_string = FLAGS.sql_connection.split(':')[0].split('+')[0]
1628 db_regexp_op = regexp_op_map.get(db_string, 'LIKE')
1629 for filter_name in filters.iterkeys():
1630 try:
1631 column_attr = getattr(model, filter_name)
1632 except AttributeError:
1633 continue
1634 if 'property' == type(column_attr).__name__:
1635 continue
1636 query = query.filter(column_attr.op(db_regexp_op)(
1637 str(filters[filter_name])))
1638 return query
1639
1640
1641@require_context
1642def instance_get_active_by_window(context, begin, end=None,
1643 project_id=None, host=None):
1644 """Return instances that were active during window."""
1645 session = get_session()
1646 query = session.query(models.Instance)
1647
1648 query = query.filter(or_(models.Instance.terminated_at == None,
1649 models.Instance.terminated_at > begin))
1650 if end:
1651 query = query.filter(models.Instance.launched_at < end)
1652 if project_id:
1653 query = query.filter_by(project_id=project_id)
1654 if host:
1655 query = query.filter_by(host=host)
1656
1657 return query.all()
1658
1659
1660@require_admin_context
1661def instance_get_active_by_window_joined(context, begin, end=None,
1662 project_id=None, host=None):
1663 """Return instances and joins that were active during window."""
1664 session = get_session()
1665 query = session.query(models.Instance)
1666
1667 query = query.options(joinedload('info_cache')).\
1668 options(joinedload('security_groups')).\
1669 options(joinedload('metadata')).\
1670 options(joinedload('instance_type')).\
1671 filter(or_(models.Instance.terminated_at == None,
1672 models.Instance.terminated_at > begin))
1673 if end:
1674 query = query.filter(models.Instance.launched_at < end)
1675 if project_id:
1676 query = query.filter_by(project_id=project_id)
1677 if host:
1678 query = query.filter_by(host=host)
1679
1680 return query.all()
1681
1682
1683@require_admin_context
1684def _instance_get_all_query(context, project_only=False):
1685 return model_query(context, models.Instance, project_only=project_only).\
1686 options(joinedload('info_cache')).\
1687 options(joinedload('security_groups')).\
1688 options(joinedload('metadata')).\
1689 options(joinedload('instance_type'))
1690
1691
1692@require_admin_context
1693def instance_get_all_by_host(context, host):
1694 return _instance_get_all_query(context).filter_by(host=host).all()
1695
1696
1697@require_admin_context
1698def instance_get_all_by_host_and_not_type(context, host, type_id=None):
1699 return _instance_get_all_query(context).filter_by(host=host).\
1700 filter(models.Instance.instance_type_id != type_id).all()
1701
1702
1703@require_context
1704def instance_get_all_by_project(context, project_id):
1705 authorize_project_context(context, project_id)
1706 return _instance_get_all_query(context).\
1707 filter_by(project_id=project_id).\
1708 all()
1709
1710
1711@require_context
1712def instance_get_all_by_reservation(context, reservation_id):
1713 return _instance_get_all_query(context, project_only=True).\
1714 filter_by(reservation_id=reservation_id).\
1715 all()
1716
1717
1718# NOTE(jkoelker) This is only being left here for compat with floating
1719# ips. Currently the network_api doesn't return floaters
1720# in network_info. Once it starts return the model. This
1721# function and its call in compute/manager.py on 1829 can
1722# go away
1723@require_context
1724def instance_get_floating_address(context, instance_id):
1725 instance = instance_get(context, instance_id)
1726 fixed_ips = fixed_ip_get_by_instance(context, instance['uuid'])
1727
1728 if not fixed_ips:
1729 return None
1730
1731 # NOTE(tr3buchet): this only gets the first fixed_ip
1732 # won't find floating ips associated with other fixed_ips
1733 floating_ips = floating_ip_get_by_fixed_address(context,
1734 fixed_ips[0]['address'])
1735 if not floating_ips:
1736 return None
1737 # NOTE(vish): this just returns the first floating ip
1738 return floating_ips[0]['address']
1739
1740
1741@require_admin_context
1742def instance_get_all_hung_in_rebooting(context, reboot_window, session=None):
1743 reboot_window = (timeutils.utcnow() -
1744 datetime.timedelta(seconds=reboot_window))
1745
1746 if not session:
1747 session = get_session()
1748
1749 results = session.query(models.Instance).\
1750 filter(models.Instance.updated_at <= reboot_window).\
1751 filter_by(task_state="rebooting").all()
1752
1753 return results
1754
1755
1756@require_context
1757def instance_test_and_set(context, instance_uuid, attr, ok_states,
1758 new_state, session=None):
1759 """Atomically check if an instance is in a valid state, and if it is, set
1760 the instance into a new state.
1761 """
1762 if not session:
1763 session = get_session()
1764
1765 with session.begin():
1766 query = model_query(context, models.Instance, session=session,
1767 project_only=True)
1768
1769 if utils.is_uuid_like(instance_uuid):
1770 query = query.filter_by(uuid=instance_uuid)
1771 else:
1772 raise exception.InvalidUUID(instance_uuid)
1773
1774 # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
1775 # then this has concurrency issues
1776 instance = query.with_lockmode('update').first()
1777
1778 state = instance[attr]
1779 if state not in ok_states:
1780 raise exception.InstanceInvalidState(
1781 attr=attr,
1782 instance_uuid=instance['uuid'],
1783 state=state,
1784 method='instance_test_and_set')
1785
1786 instance[attr] = new_state
1787 instance.save(session=session)
1788
1789
1790@require_context
1791def instance_update(context, instance_uuid, values):
1792 instance_ref = _instance_update(context, instance_uuid, values)[1]
1793 return instance_ref
1794
1795
1796@require_context
1797def instance_update_and_get_original(context, instance_uuid, values):
1798 """Set the given properties on an instance and update it. Return
1799 a shallow copy of the original instance reference, as well as the
1800 updated one.
1801
1802 :param context: = request context object
1803 :param instance_uuid: = instance uuid
1804 :param values: = dict containing column values
1805
1806 If "expected_task_state" exists in values, the update can only happen
1807 when the task state before update matches expected_task_state. Otherwise
1808 a UnexpectedTaskStateError is thrown.
1809
1810 :returns: a tuple of the form (old_instance_ref, new_instance_ref)
1811
1812 Raises NotFound if instance does not exist.
1813 """
1814 return _instance_update(context, instance_uuid, values,
1815 copy_old_instance=True)
1816
1817
1818def _instance_update(context, instance_uuid, values, copy_old_instance=False):
1819 session = get_session()
1820
1821 if not utils.is_uuid_like(instance_uuid):
1822 raise exception.InvalidUUID(instance_uuid)
1823
1824 with session.begin():
1825 instance_ref = instance_get_by_uuid(context, instance_uuid,
1826 session=session)
1827 if "expected_task_state" in values:
1828 # it is not a db column so always pop out
1829 expected = values.pop("expected_task_state")
1830 if not isinstance(expected, (tuple, list, set)):
1831 expected = (expected,)
1832 actual_state = instance_ref["task_state"]
1833 if actual_state not in expected:
1834 raise exception.UnexpectedTaskStateError(actual=actual_state,
1835 expected=expected)
1836
1837 if copy_old_instance:
1838 old_instance_ref = copy.copy(instance_ref)
1839 else:
1840 old_instance_ref = None
1841
1842 metadata = values.get('metadata')
1843 if metadata is not None:
1844 instance_metadata_update(context, instance_ref['uuid'],
1845 values.pop('metadata'), True,
1846 session=session)
1847
1848 system_metadata = values.get('system_metadata')
1849 if system_metadata is not None:
1850 instance_system_metadata_update(
1851 context, instance_ref['uuid'], values.pop('system_metadata'),
1852 delete=True, session=session)
1853
1854 instance_ref.update(values)
1855 instance_ref.save(session=session)
1856
1857 return (old_instance_ref, instance_ref)
1858
1859
1860def instance_add_security_group(context, instance_uuid, security_group_id):
1861 """Associate the given security group with the given instance"""
1862 session = get_session()
1863 with session.begin():
1864 instance_ref = instance_get_by_uuid(context, instance_uuid,
1865 session=session)
1866 security_group_ref = security_group_get(context,
1867 security_group_id,
1868 session=session)
1869 instance_ref.security_groups += [security_group_ref]
1870 instance_ref.save(session=session)
1871
1872
1873@require_context
1874def instance_remove_security_group(context, instance_uuid, security_group_id):
1875 """Disassociate the given security group from the given instance"""
1876 session = get_session()
1877 instance_ref = instance_get_by_uuid(context, instance_uuid,
1878 session=session)
1879 session.query(models.SecurityGroupInstanceAssociation).\
1880 filter_by(instance_uuid=instance_ref['uuid']).\
1881 filter_by(security_group_id=security_group_id).\
1882 update({'deleted': True,
1883 'deleted_at': timeutils.utcnow(),
1884 'updated_at': literal_column('updated_at')})
1885
1886
1887###################
1888
1889
1890@require_context
1891def instance_info_cache_create(context, values):
1892 """Create a new instance cache record in the table.
1893
1894 :param context: = request context object
1895 :param values: = dict containing column values
1896 """
1897 info_cache = models.InstanceInfoCache()
1898 info_cache.update(values)
1899
1900 session = get_session()
1901 with session.begin():
1902 info_cache.save(session=session)
1903 return info_cache
1904
1905
1906@require_context
1907def instance_info_cache_get(context, instance_uuid, session=None):
1908 """Gets an instance info cache from the table.
1909
1910 :param instance_uuid: = uuid of the info cache's instance
1911 :param session: = optional session object
1912 """
1913 session = session or get_session()
1914
1915 info_cache = session.query(models.InstanceInfoCache).\
1916 filter_by(instance_uuid=instance_uuid).\
1917 first()
1918 return info_cache
1919
1920
1921@require_context
1922def instance_info_cache_update(context, instance_uuid, values,
1923 session=None):
1924 """Update an instance info cache record in the table.
1925
1926 :param instance_uuid: = uuid of info cache's instance
1927 :param values: = dict containing column values to update
1928 :param session: = optional session object
1929 """
1930 session = session or get_session()
1931 info_cache = instance_info_cache_get(context, instance_uuid,
1932 session=session)
1933 if info_cache:
1934 # NOTE(tr3buchet): let's leave it alone if it's already deleted
1935 if info_cache['deleted']:
1936 return info_cache
1937
1938 info_cache.update(values)
1939 info_cache.save(session=session)
1940 else:
1941 # NOTE(tr3buchet): just in case someone blows away an instance's
1942 # cache entry
1943 values['instance_uuid'] = instance_uuid
1944 info_cache = instance_info_cache_create(context, values)
1945
1946 return info_cache
1947
1948
1949@require_context
1950def instance_info_cache_delete(context, instance_uuid, session=None):
1951 """Deletes an existing instance_info_cache record
1952
1953 :param instance_uuid: = uuid of the instance tied to the cache record
1954 :param session: = optional session object
1955 """
1956 values = {'deleted': True,
1957 'deleted_at': timeutils.utcnow()}
1958 instance_info_cache_update(context, instance_uuid, values, session)
1959
1960
1961###################
1962
1963
1964@require_context
1965def key_pair_create(context, values):
1966 key_pair_ref = models.KeyPair()
1967 key_pair_ref.update(values)
1968 key_pair_ref.save()
1969 return key_pair_ref
1970
1971
1972@require_context
1973def key_pair_destroy(context, user_id, name):
1974 authorize_user_context(context, user_id)
1975 session = get_session()
1976 with session.begin():
1977 key_pair_ref = key_pair_get(context, user_id, name, session=session)
1978 key_pair_ref.delete(session=session)
1979
1980
1981@require_context
1982def key_pair_destroy_all_by_user(context, user_id):
1983 authorize_user_context(context, user_id)
1984 session = get_session()
1985 with session.begin():
1986 session.query(models.KeyPair).\
1987 filter_by(user_id=user_id).\
1988 update({'deleted': True,
1989 'deleted_at': timeutils.utcnow(),
1990 'updated_at': literal_column('updated_at')})
1991
1992
1993@require_context
1994def key_pair_get(context, user_id, name, session=None):
1995 authorize_user_context(context, user_id)
1996 result = model_query(context, models.KeyPair, session=session).\
1997 filter_by(user_id=user_id).\
1998 filter_by(name=name).\
1999 first()
2000
2001 if not result:
2002 raise exception.KeypairNotFound(user_id=user_id, name=name)
2003
2004 return result
2005
2006
2007@require_context
2008def key_pair_get_all_by_user(context, user_id):
2009 authorize_user_context(context, user_id)
2010 return model_query(context, models.KeyPair, read_deleted="no").\
2011 filter_by(user_id=user_id).\
2012 all()
2013
2014
2015def key_pair_count_by_user(context, user_id):
2016 authorize_user_context(context, user_id)
2017 return model_query(context, models.KeyPair, read_deleted="no").\
2018 filter_by(user_id=user_id).\
2019 count()
2020
2021
2022###################
2023
2024
2025@require_admin_context
2026def network_associate(context, project_id, network_id=None, force=False):
2027 """Associate a project with a network.
2028
2029 called by project_get_networks under certain conditions
2030 and network manager add_network_to_project()
2031
2032 only associate if the project doesn't already have a network
2033 or if force is True
2034
2035 force solves race condition where a fresh project has multiple instance
2036 builds simultaneously picked up by multiple network hosts which attempt
2037 to associate the project with multiple networks
2038 force should only be used as a direct consequence of user request
2039 all automated requests should not use force
2040 """
2041 session = get_session()
2042 with session.begin():
2043
2044 def network_query(project_filter, id=None):
2045 filter_kwargs = {'project_id': project_filter}
2046 if id is not None:
2047 filter_kwargs['id'] = id
2048 return model_query(context, models.Network, session=session,
2049 read_deleted="no").\
2050 filter_by(**filter_kwargs).\
2051 with_lockmode('update').\
2052 first()
2053
2054 if not force:
2055 # find out if project has a network
2056 network_ref = network_query(project_id)
2057
2058 if force or not network_ref:
2059 # in force mode or project doesn't have a network so associate
2060 # with a new network
2061
2062 # get new network
2063 network_ref = network_query(None, network_id)
2064 if not network_ref:
2065 raise db.NoMoreNetworks()
2066
2067 # associate with network
2068 # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
2069 # then this has concurrency issues
2070 network_ref['project_id'] = project_id
2071 session.add(network_ref)
2072 return network_ref
2073
2074
2075@require_admin_context
2076def network_count(context):
2077 return model_query(context, models.Network).count()
2078
2079
2080@require_admin_context
2081def _network_ips_query(context, network_id):
2082 return model_query(context, models.FixedIp, read_deleted="no").\
2083 filter_by(network_id=network_id)
2084
2085
2086@require_admin_context
2087def network_count_reserved_ips(context, network_id):
2088 return _network_ips_query(context, network_id).\
2089 filter_by(reserved=True).\
2090 count()
2091
2092
2093@require_admin_context
2094def network_create_safe(context, values):
2095 if values.get('vlan'):
2096 if model_query(context, models.Network, read_deleted="no")\
2097 .filter_by(vlan=values['vlan'])\
2098 .first():
2099 raise exception.DuplicateVlan(vlan=values['vlan'])
2100
2101 network_ref = models.Network()
2102 network_ref['uuid'] = str(utils.gen_uuid())
2103 network_ref.update(values)
2104
2105 try:
2106 network_ref.save()
2107 return network_ref
2108 except IntegrityError:
2109 return None
2110
2111
2112@require_admin_context
2113def network_delete_safe(context, network_id):
2114 session = get_session()
2115 with session.begin():
2116 result = session.query(models.FixedIp).\
2117 filter_by(network_id=network_id).\
2118 filter_by(deleted=False).\
2119 filter_by(allocated=True).\
2120 all()
2121 if result:
2122 raise exception.NetworkInUse(network_id=network_id)
2123 network_ref = network_get(context, network_id=network_id,
2124 session=session)
2125 session.query(models.FixedIp).\
2126 filter_by(network_id=network_id).\
2127 filter_by(deleted=False).\
2128 update({'deleted': True,
2129 'updated_at': literal_column('updated_at'),
2130 'deleted_at': timeutils.utcnow()})
2131 session.delete(network_ref)
2132
2133
2134@require_admin_context
2135def network_disassociate(context, network_id):
2136 network_update(context, network_id, {'project_id': None,
2137 'host': None})
2138
2139
2140@require_context
2141def network_get(context, network_id, session=None, project_only='allow_none'):
2142 result = model_query(context, models.Network, session=session,
2143 project_only=project_only).\
2144 filter_by(id=network_id).\
2145 first()
2146
2147 if not result:
2148 raise exception.NetworkNotFound(network_id=network_id)
2149
2150 return result
2151
2152
2153@require_context
2154def network_get_all(context):
2155 result = model_query(context, models.Network, read_deleted="no").all()
2156
2157 if not result:
2158 raise exception.NoNetworksFound()
2159
2160 return result
2161
2162
2163@require_context
2164def network_get_all_by_uuids(context, network_uuids,
2165 project_only="allow_none"):
2166 result = model_query(context, models.Network, read_deleted="no",
2167 project_only=project_only).\
2168 filter(models.Network.uuid.in_(network_uuids)).\
2169 all()
2170
2171 if not result:
2172 raise exception.NoNetworksFound()
2173
2174 #check if the result contains all the networks
2175 #we are looking for
2176 for network_uuid in network_uuids:
2177 found = False
2178 for network in result:
2179 if network['uuid'] == network_uuid:
2180 found = True
2181 break
2182 if not found:
2183 if project_only:
2184 raise exception.NetworkNotFoundForProject(
2185 network_uuid=network_uuid, project_id=context.project_id)
2186 raise exception.NetworkNotFound(network_id=network_uuid)
2187
2188 return result
2189
2190# NOTE(vish): pylint complains because of the long method name, but
2191# it fits with the names of the rest of the methods
2192# pylint: disable=C0103
2193
2194
2195@require_admin_context
2196def network_get_associated_fixed_ips(context, network_id, host=None):
2197 # FIXME(sirp): since this returns fixed_ips, this would be better named
2198 # fixed_ip_get_all_by_network.
2199 # NOTE(vish): The ugly joins here are to solve a performance issue and
2200 # should be removed once we can add and remove leases
2201 # without regenerating the whole list
2202 vif_and = and_(models.VirtualInterface.id ==
2203 models.FixedIp.virtual_interface_id,
2204 models.VirtualInterface.deleted == False)
2205 inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
2206 models.Instance.deleted == False)
2207 session = get_session()
2208 query = session.query(models.FixedIp.address,
2209 models.FixedIp.instance_uuid,
2210 models.FixedIp.network_id,
2211 models.FixedIp.virtual_interface_id,
2212 models.VirtualInterface.address,
2213 models.Instance.hostname,
2214 models.Instance.updated_at,
2215 models.Instance.created_at).\
2216 filter(models.FixedIp.deleted == False).\
2217 filter(models.FixedIp.network_id == network_id).\
2218 filter(models.FixedIp.allocated == True).\
2219 join((models.VirtualInterface, vif_and)).\
2220 join((models.Instance, inst_and)).\
2221 filter(models.FixedIp.instance_uuid != None).\
2222 filter(models.FixedIp.virtual_interface_id != None)
2223 if host:
2224 query = query.filter(models.Instance.host == host)
2225 result = query.all()
2226 data = []
2227 for datum in result:
2228 cleaned = {}
2229 cleaned['address'] = datum[0]
2230 cleaned['instance_uuid'] = datum[1]
2231 cleaned['network_id'] = datum[2]
2232 cleaned['vif_id'] = datum[3]
2233 cleaned['vif_address'] = datum[4]
2234 cleaned['instance_hostname'] = datum[5]
2235 cleaned['instance_updated'] = datum[6]
2236 cleaned['instance_created'] = datum[7]
2237 data.append(cleaned)
2238 return data
2239
2240
2241@require_admin_context
2242def _network_get_query(context, session=None):
2243 return model_query(context, models.Network, session=session,
2244 read_deleted="no")
2245
2246
2247@require_admin_context
2248def network_get_by_bridge(context, bridge):
2249 result = _network_get_query(context).filter_by(bridge=bridge).first()
2250
2251 if not result:
2252 raise exception.NetworkNotFoundForBridge(bridge=bridge)
2253
2254 return result
2255
2256
2257@require_admin_context
2258def network_get_by_uuid(context, uuid):
2259 result = _network_get_query(context).filter_by(uuid=uuid).first()
2260
2261 if not result:
2262 raise exception.NetworkNotFoundForUUID(uuid=uuid)
2263
2264 return result
2265
2266
2267@require_admin_context
2268def network_get_by_cidr(context, cidr):
2269 result = _network_get_query(context).\
2270 filter(or_(models.Network.cidr == cidr,
2271 models.Network.cidr_v6 == cidr)).\
2272 first()
2273
2274 if not result:
2275 raise exception.NetworkNotFoundForCidr(cidr=cidr)
2276
2277 return result
2278
2279
2280@require_admin_context
2281def network_get_by_instance(context, instance_id):
2282 # note this uses fixed IP to get to instance
2283 # only works for networks the instance has an IP from
2284 result = _network_get_query(context).\
2285 filter_by(instance_id=instance_id).\
2286 first()
2287
2288 if not result:
2289 raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
2290
2291 return result
2292
2293
2294@require_admin_context
2295def network_get_all_by_instance(context, instance_id):
2296 result = _network_get_query(context).\
2297 filter_by(instance_id=instance_id).\
2298 all()
2299
2300 if not result:
2301 raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
2302
2303 return result
2304
2305
2306@require_admin_context
2307def network_get_all_by_host(context, host):
2308 session = get_session()
2309 fixed_ip_query = model_query(context, models.FixedIp.network_id,
2310 session=session).\
2311 filter(models.FixedIp.host == host)
2312 # NOTE(vish): return networks that have host set
2313 # or that have a fixed ip with host set
2314 host_filter = or_(models.Network.host == host,
2315 models.Network.id.in_(fixed_ip_query.subquery()))
2316 return _network_get_query(context, session=session).\
2317 filter(host_filter).\
2318 all()
2319
2320
2321@require_admin_context
2322def network_set_host(context, network_id, host_id):
2323 session = get_session()
2324 with session.begin():
2325 network_ref = _network_get_query(context, session=session).\
2326 filter_by(id=network_id).\
2327 with_lockmode('update').\
2328 first()
2329
2330 if not network_ref:
2331 raise exception.NetworkNotFound(network_id=network_id)
2332
2333 # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
2334 # then this has concurrency issues
2335 if not network_ref['host']:
2336 network_ref['host'] = host_id
2337 session.add(network_ref)
2338
2339 return network_ref['host']
2340
2341
2342@require_context
2343def network_update(context, network_id, values):
2344 session = get_session()
2345 with session.begin():
2346 network_ref = network_get(context, network_id, session=session)
2347 network_ref.update(values)
2348 network_ref.save(session=session)
2349 return network_ref
2350
2351
2352###################
2353
2354
2355@require_admin_context
2356def iscsi_target_count_by_host(context, host):
2357 return model_query(context, models.IscsiTarget).\
2358 filter_by(host=host).\
2359 count()
2360
2361
2362@require_admin_context
2363def iscsi_target_create_safe(context, values):
2364 iscsi_target_ref = models.IscsiTarget()
2365
2366 for (key, value) in values.iteritems():
2367 iscsi_target_ref[key] = value
2368 try:
2369 iscsi_target_ref.save()
2370 return iscsi_target_ref
2371 except IntegrityError:
2372 return None
2373
2374
2375###################
2376
2377
2378@require_context
2379def quota_get(context, project_id, resource, session=None):
2380 result = model_query(context, models.Quota, session=session,
2381 read_deleted="no").\
2382 filter_by(project_id=project_id).\
2383 filter_by(resource=resource).\
2384 first()
2385
2386 if not result:
2387 raise exception.ProjectQuotaNotFound(project_id=project_id)
2388
2389 return result
2390
2391
2392@require_context
2393def quota_get_all_by_project(context, project_id):
2394 authorize_project_context(context, project_id)
2395
2396 rows = model_query(context, models.Quota, read_deleted="no").\
2397 filter_by(project_id=project_id).\
2398 all()
2399
2400 result = {'project_id': project_id}
2401 for row in rows:
2402 result[row.resource] = row.hard_limit
2403
2404 return result
2405
2406
2407@require_admin_context
2408def quota_create(context, project_id, resource, limit):
2409 quota_ref = models.Quota()
2410 quota_ref.project_id = project_id
2411 quota_ref.resource = resource
2412 quota_ref.hard_limit = limit
2413 quota_ref.save()
2414 return quota_ref
2415
2416
2417@require_admin_context
2418def quota_update(context, project_id, resource, limit):
2419 session = get_session()
2420 with session.begin():
2421 quota_ref = quota_get(context, project_id, resource, session=session)
2422 quota_ref.hard_limit = limit
2423 quota_ref.save(session=session)
2424
2425
2426@require_admin_context
2427def quota_destroy(context, project_id, resource):
2428 session = get_session()
2429 with session.begin():
2430 quota_ref = quota_get(context, project_id, resource, session=session)
2431 quota_ref.delete(session=session)
2432
2433
2434###################
2435
2436
2437@require_context
2438def quota_class_get(context, class_name, resource, session=None):
2439 result = model_query(context, models.QuotaClass, session=session,
2440 read_deleted="no").\
2441 filter_by(class_name=class_name).\
2442 filter_by(resource=resource).\
2443 first()
2444
2445 if not result:
2446 raise exception.QuotaClassNotFound(class_name=class_name)
2447
2448 return result
2449
2450
2451@require_context
2452def quota_class_get_all_by_name(context, class_name):
2453 authorize_quota_class_context(context, class_name)
2454
2455 rows = model_query(context, models.QuotaClass, read_deleted="no").\
2456 filter_by(class_name=class_name).\
2457 all()
2458
2459 result = {'class_name': class_name}
2460 for row in rows:
2461 result[row.resource] = row.hard_limit
2462
2463 return result
2464
2465
2466@require_admin_context
2467def quota_class_create(context, class_name, resource, limit):
2468 quota_class_ref = models.QuotaClass()
2469 quota_class_ref.class_name = class_name
2470 quota_class_ref.resource = resource
2471 quota_class_ref.hard_limit = limit
2472 quota_class_ref.save()
2473 return quota_class_ref
2474
2475
2476@require_admin_context
2477def quota_class_update(context, class_name, resource, limit):
2478 session = get_session()
2479 with session.begin():
2480 quota_class_ref = quota_class_get(context, class_name, resource,
2481 session=session)
2482 quota_class_ref.hard_limit = limit
2483 quota_class_ref.save(session=session)
2484
2485
2486@require_admin_context
2487def quota_class_destroy(context, class_name, resource):
2488 session = get_session()
2489 with session.begin():
2490 quota_class_ref = quota_class_get(context, class_name, resource,
2491 session=session)
2492 quota_class_ref.delete(session=session)
2493
2494
2495@require_admin_context
2496def quota_class_destroy_all_by_name(context, class_name):
2497 session = get_session()
2498 with session.begin():
2499 quota_classes = model_query(context, models.QuotaClass,
2500 session=session, read_deleted="no").\
2501 filter_by(class_name=class_name).\
2502 all()
2503
2504 for quota_class_ref in quota_classes:
2505 quota_class_ref.delete(session=session)
2506
2507
2508###################
2509
2510
2511@require_context
2512def quota_usage_get(context, project_id, resource, session=None):
2513 result = model_query(context, models.QuotaUsage, session=session,
2514 read_deleted="no").\
2515 filter_by(project_id=project_id).\
2516 filter_by(resource=resource).\
2517 first()
2518
2519 if not result:
2520 raise exception.QuotaUsageNotFound(project_id=project_id)
2521
2522 return result
2523
2524
2525@require_context
2526def quota_usage_get_all_by_project(context, project_id):
2527 authorize_project_context(context, project_id)
2528
2529 rows = model_query(context, models.QuotaUsage, read_deleted="no").\
2530 filter_by(project_id=project_id).\
2531 all()
2532
2533 result = {'project_id': project_id}
2534 for row in rows:
2535 result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved)
2536
2537 return result
2538
2539
2540@require_admin_context
2541def quota_usage_create(context, project_id, resource, in_use, reserved,
2542 until_refresh, session=None):
2543 quota_usage_ref = models.QuotaUsage()
2544 quota_usage_ref.project_id = project_id
2545 quota_usage_ref.resource = resource
2546 quota_usage_ref.in_use = in_use
2547 quota_usage_ref.reserved = reserved
2548 quota_usage_ref.until_refresh = until_refresh
2549 quota_usage_ref.save(session=session)
2550
2551 return quota_usage_ref
2552
2553
2554@require_admin_context
2555def quota_usage_update(context, project_id, resource, in_use, reserved,
2556 until_refresh, session=None):
2557 def do_update(session):
2558 quota_usage_ref = quota_usage_get(context, project_id, resource,
2559 session=session)
2560 quota_usage_ref.in_use = in_use
2561 quota_usage_ref.reserved = reserved
2562 quota_usage_ref.until_refresh = until_refresh
2563 quota_usage_ref.save(session=session)
2564
2565 if session:
2566 # Assume caller started a transaction
2567 do_update(session)
2568 else:
2569 session = get_session()
2570 with session.begin():
2571 do_update(session)
2572
2573
2574@require_admin_context
2575def quota_usage_destroy(context, project_id, resource):
2576 session = get_session()
2577 with session.begin():
2578 quota_usage_ref = quota_usage_get(context, project_id, resource,
2579 session=session)
2580 quota_usage_ref.delete(session=session)
2581
2582
2583###################
2584
2585
2586@require_context
2587def reservation_get(context, uuid, session=None):
2588 result = model_query(context, models.Reservation, session=session,
2589 read_deleted="no").\
2590 filter_by(uuid=uuid).\
2591 first()
2592
2593 if not result:
2594 raise exception.ReservationNotFound(uuid=uuid)
2595
2596 return result
2597
2598
2599@require_context
2600def reservation_get_all_by_project(context, project_id):
2601 authorize_project_context(context, project_id)
2602
2603 rows = model_query(context, models.QuotaUsage, read_deleted="no").\
2604 filter_by(project_id=project_id).\
2605 all()
2606
2607 result = {'project_id': project_id}
2608 for row in rows:
2609 result.setdefault(row.resource, {})
2610 result[row.resource][row.uuid] = row.delta
2611
2612 return result
2613
2614
2615@require_admin_context
2616def reservation_create(context, uuid, usage, project_id, resource, delta,
2617 expire, session=None):
2618 reservation_ref = models.Reservation()
2619 reservation_ref.uuid = uuid
2620 reservation_ref.usage_id = usage['id']
2621 reservation_ref.project_id = project_id
2622 reservation_ref.resource = resource
2623 reservation_ref.delta = delta
2624 reservation_ref.expire = expire
2625 reservation_ref.save(session=session)
2626 return reservation_ref
2627
2628
2629@require_admin_context
2630def reservation_destroy(context, uuid):
2631 session = get_session()
2632 with session.begin():
2633 reservation_ref = reservation_get(context, uuid, session=session)
2634 reservation_ref.delete(session=session)
2635
2636
2637###################
2638
2639
2640# NOTE(johannes): The quota code uses SQL locking to ensure races don't
2641# cause under or over counting of resources. To avoid deadlocks, this
2642# code always acquires the lock on quota_usages before acquiring the lock
2643# on reservations.
2644
2645def _get_quota_usages(context, session):
2646 # Broken out for testability
2647 rows = model_query(context, models.QuotaUsage,
2648 read_deleted="no",
2649 session=session).\
2650 filter_by(project_id=context.project_id).\
2651 with_lockmode('update').\
2652 all()
2653 return dict((row.resource, row) for row in rows)
2654
2655
2656@require_context
2657def quota_reserve(context, resources, quotas, deltas, expire,
2658 until_refresh, max_age):
2659 elevated = context.elevated()
2660 session = get_session()
2661 with session.begin():
2662 # Get the current usages
2663 usages = _get_quota_usages(context, session)
2664
2665 # Handle usage refresh
2666 work = set(deltas.keys())
2667 while work:
2668 resource = work.pop()
2669
2670 # Do we need to refresh the usage?
2671 refresh = False
2672 if resource not in usages:
2673 usages[resource] = quota_usage_create(elevated,
2674 context.project_id,
2675 resource,
2676 0, 0,
2677 until_refresh or None,
2678 session=session)
2679 refresh = True
2680 elif usages[resource].in_use < 0:
2681 # Negative in_use count indicates a desync, so try to
2682 # heal from that...
2683 refresh = True
2684 elif usages[resource].until_refresh is not None:
2685 usages[resource].until_refresh -= 1
2686 if usages[resource].until_refresh <= 0:
2687 refresh = True
2688 elif max_age and (usages[resource].updated_at -
2689 timeutils.utcnow()).seconds >= max_age:
2690 refresh = True
2691
2692 # OK, refresh the usage
2693 if refresh:
2694 # Grab the sync routine
2695 sync = resources[resource].sync
2696
2697 updates = sync(elevated, context.project_id, session)
2698 for res, in_use in updates.items():
2699 # Make sure we have a destination for the usage!
2700 if res not in usages:
2701 usages[res] = quota_usage_create(elevated,
2702 context.project_id,
2703 res,
2704 0, 0,
2705 until_refresh or None,
2706 session=session)
2707
2708 # Update the usage
2709 usages[res].in_use = in_use
2710 usages[res].until_refresh = until_refresh or None
2711
2712 # Because more than one resource may be refreshed
2713 # by the call to the sync routine, and we don't
2714 # want to double-sync, we make sure all refreshed
2715 # resources are dropped from the work set.
2716 work.discard(res)
2717
2718 # NOTE(Vek): We make the assumption that the sync
2719 # routine actually refreshes the
2720 # resources that it is the sync routine
2721 # for. We don't check, because this is
2722 # a best-effort mechanism.
2723
2724 # Check for deltas that would go negative
2725 unders = [resource for resource, delta in deltas.items()
2726 if delta < 0 and
2727 delta + usages[resource].in_use < 0]
2728
2729 # Now, let's check the quotas
2730 # NOTE(Vek): We're only concerned about positive increments.
2731 # If a project has gone over quota, we want them to
2732 # be able to reduce their usage without any
2733 # problems.
2734 overs = [resource for resource, delta in deltas.items()
2735 if quotas[resource] >= 0 and delta >= 0 and
2736 quotas[resource] < delta + usages[resource].total]
2737
2738 # NOTE(Vek): The quota check needs to be in the transaction,
2739 # but the transaction doesn't fail just because
2740 # we're over quota, so the OverQuota raise is
2741 # outside the transaction. If we did the raise
2742 # here, our usage updates would be discarded, but
2743 # they're not invalidated by being over-quota.
2744
2745 # Create the reservations
2746 if not overs:
2747 reservations = []
2748 for resource, delta in deltas.items():
2749 reservation = reservation_create(elevated,
2750 str(utils.gen_uuid()),
2751 usages[resource],
2752 context.project_id,
2753 resource, delta, expire,
2754 session=session)
2755 reservations.append(reservation.uuid)
2756
2757 # Also update the reserved quantity
2758 # NOTE(Vek): Again, we are only concerned here about
2759 # positive increments. Here, though, we're
2760 # worried about the following scenario:
2761 #
2762 # 1) User initiates resize down.
2763 # 2) User allocates a new instance.
2764 # 3) Resize down fails or is reverted.
2765 # 4) User is now over quota.
2766 #
2767 # To prevent this, we only update the
2768 # reserved value if the delta is positive.
2769 if delta > 0:
2770 usages[resource].reserved += delta
2771
2772 # Apply updates to the usages table
2773 for usage_ref in usages.values():
2774 usage_ref.save(session=session)
2775
2776 if unders:
2777 LOG.warning(_("Change will make usage less than 0 for the following "
2778 "resources: %(unders)s") % locals())
2779 if overs:
2780 usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
2781 for k, v in usages.items())
2782 raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
2783 usages=usages)
2784
2785 return reservations
2786
2787
2788def _quota_reservations(session, context, reservations):
2789 """Return the relevant reservations."""
2790
2791 # Get the listed reservations
2792 return model_query(context, models.Reservation,
2793 read_deleted="no",
2794 session=session).\
2795 filter(models.Reservation.uuid.in_(reservations)).\
2796 with_lockmode('update').\
2797 all()
2798
2799
2800@require_context
2801def reservation_commit(context, reservations):
2802 session = get_session()
2803 with session.begin():
2804 usages = _get_quota_usages(context, session)
2805
2806 for reservation in _quota_reservations(session, context, reservations):
2807 usage = usages[reservation.resource]
2808 if reservation.delta >= 0:
2809 usage.reserved -= reservation.delta
2810 usage.in_use += reservation.delta
2811
2812 reservation.delete(session=session)
2813
2814 for usage in usages.values():
2815 usage.save(session=session)
2816
2817
2818@require_context
2819def reservation_rollback(context, reservations):
2820 session = get_session()
2821 with session.begin():
2822 usages = _get_quota_usages(context, session)
2823
2824 for reservation in _quota_reservations(session, context, reservations):
2825 usage = usages[reservation.resource]
2826 if reservation.delta >= 0:
2827 usage.reserved -= reservation.delta
2828
2829 reservation.delete(session=session)
2830
2831 for usage in usages.values():
2832 usage.save(session=session)
2833
2834
2835@require_admin_context
2836def quota_destroy_all_by_project(context, project_id):
2837 session = get_session()
2838 with session.begin():
2839 quotas = model_query(context, models.Quota, session=session,
2840 read_deleted="no").\
2841 filter_by(project_id=project_id).\
2842 all()
2843
2844 for quota_ref in quotas:
2845 quota_ref.delete(session=session)
2846
2847 quota_usages = model_query(context, models.QuotaUsage,
2848 session=session, read_deleted="no").\
2849 filter_by(project_id=project_id).\
2850 all()
2851
2852 for quota_usage_ref in quota_usages:
2853 quota_usage_ref.delete(session=session)
2854
2855 reservations = model_query(context, models.Reservation,
2856 session=session, read_deleted="no").\
2857 filter_by(project_id=project_id).\
2858 all()
2859
2860 for reservation_ref in reservations:
2861 reservation_ref.delete(session=session)
2862
2863
2864@require_admin_context
2865def reservation_expire(context):
2866 session = get_session()
2867 with session.begin():
2868 current_time = timeutils.utcnow()
2869 results = model_query(context, models.Reservation, session=session,
2870 read_deleted="no").\
2871 filter(models.Reservation.expire < current_time).\
2872 all()
2873
2874 if results:
2875 for reservation in results:
2876 if reservation.delta >= 0:
2877 reservation.usage.reserved -= reservation.delta
2878 reservation.usage.save(session=session)
2879
2880 reservation.delete(session=session)
2881
2882
2883###################
2884
2885
2886@require_admin_context
2887def volume_allocate_iscsi_target(context, volume_id, host):
2888 session = get_session()
2889 with session.begin():
2890 iscsi_target_ref = model_query(context, models.IscsiTarget,
2891 session=session, read_deleted="no").\
2892 filter_by(volume=None).\
2893 filter_by(host=host).\
2894 with_lockmode('update').\
2895 first()
2896
2897 # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
2898 # then this has concurrency issues
2899 if not iscsi_target_ref:
2900 raise db.NoMoreTargets()
2901
2902 iscsi_target_ref.volume_id = volume_id
2903 session.add(iscsi_target_ref)
2904
2905 return iscsi_target_ref.target_num
2906
2907
2908@require_admin_context
2909def volume_attached(context, volume_id, instance_uuid, mountpoint):
2910 if not utils.is_uuid_like(instance_uuid):
2911 raise exception.InvalidUUID(instance_uuid)
2912
2913 session = get_session()
2914 with session.begin():
2915 volume_ref = volume_get(context, volume_id, session=session)
2916 volume_ref['status'] = 'in-use'
2917 volume_ref['mountpoint'] = mountpoint
2918 volume_ref['attach_status'] = 'attached'
2919 volume_ref['instance_uuid'] = instance_uuid
2920 volume_ref['attach_time'] = timeutils.utcnow()
2921 volume_ref.save(session=session)
2922
2923
2924@require_context
2925def volume_create(context, values):
2926 values['volume_metadata'] = _metadata_refs(values.get('metadata'),
2927 models.VolumeMetadata)
2928 volume_ref = models.Volume()
2929 if not values.get('id'):
2930 values['id'] = str(utils.gen_uuid())
2931 volume_ref.update(values)
2932
2933 session = get_session()
2934 with session.begin():
2935 volume_ref.save(session=session)
2936
2937 return volume_get(context, values['id'], session=session)
2938
2939
2940@require_admin_context
2941def volume_data_get_for_project(context, project_id, session=None):
2942 result = model_query(context,
2943 func.count(models.Volume.id),
2944 func.sum(models.Volume.size),
2945 read_deleted="no",
2946 session=session).\
2947 filter_by(project_id=project_id).\
2948 first()
2949
2950 # NOTE(vish): convert None to 0
2951 return (result[0] or 0, result[1] or 0)
2952
2953
2954@require_admin_context
2955def volume_destroy(context, volume_id):
2956 session = get_session()
2957 with session.begin():
2958 volume_ref = volume_get(context, volume_id, session=session)
2959 session.query(models.Volume).\
2960 filter_by(id=volume_id).\
2961 update({'deleted': True,
2962 'deleted_at': timeutils.utcnow(),
2963 'updated_at': literal_column('updated_at')})
2964 session.query(models.IscsiTarget).\
2965 filter_by(volume_id=volume_id).\
2966 update({'volume_id': None})
2967 session.query(models.VolumeMetadata).\
2968 filter_by(volume_id=volume_id).\
2969 update({'deleted': True,
2970 'deleted_at': timeutils.utcnow(),
2971 'updated_at': literal_column('updated_at')})
2972 return volume_ref
2973
2974
2975@require_admin_context
2976def volume_detached(context, volume_id):
2977 session = get_session()
2978 with session.begin():
2979 volume_ref = volume_get(context, volume_id, session=session)
2980 volume_ref['status'] = 'available'
2981 volume_ref['mountpoint'] = None
2982 volume_ref['attach_status'] = 'detached'
2983 volume_ref['instance_uuid'] = None
2984 volume_ref.save(session=session)
2985
2986
2987@require_context
2988def _volume_get_query(context, session=None, project_only=False):
2989 return model_query(context, models.Volume, session=session,
2990 project_only=project_only).\
2991 options(joinedload('volume_metadata')).\
2992 options(joinedload('volume_type'))
2993
2994
2995@require_context
2996def _ec2_volume_get_query(context, session=None):
2997 return model_query(context, models.VolumeIdMapping, session=session)
2998
2999
3000@require_context
3001def _ec2_snapshot_get_query(context, session=None):
3002 return model_query(context, models.SnapshotIdMapping, session=session)
3003
3004
3005@require_context
3006def volume_get(context, volume_id, session=None):
3007 result = _volume_get_query(context, session=session, project_only=True).\
3008 filter_by(id=volume_id).\
3009 first()
3010
3011 if not result:
3012 raise exception.VolumeNotFound(volume_id=volume_id)
3013
3014 return result
3015
3016
3017@require_admin_context
3018def volume_get_all(context):
3019 return _volume_get_query(context).all()
3020
3021
3022@require_admin_context
3023def volume_get_all_by_host(context, host):
3024 return _volume_get_query(context).filter_by(host=host).all()
3025
3026
3027@require_admin_context
3028def volume_get_all_by_instance_uuid(context, instance_uuid):
3029 result = model_query(context, models.Volume, read_deleted="no").\
3030 options(joinedload('volume_metadata')).\
3031 options(joinedload('volume_type')).\
3032 filter_by(instance_uuid=instance_uuid).\
3033 all()
3034
3035 if not result:
3036 return []
3037
3038 return result
3039
3040
3041@require_context
3042def volume_get_all_by_project(context, project_id):
3043 authorize_project_context(context, project_id)
3044 return _volume_get_query(context).filter_by(project_id=project_id).all()
3045
3046
3047@require_admin_context
3048def volume_get_iscsi_target_num(context, volume_id):
3049 result = model_query(context, models.IscsiTarget, read_deleted="yes").\
3050 filter_by(volume_id=volume_id).\
3051 first()
3052
3053 if not result:
3054 raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
3055
3056 return result.target_num
3057
3058
3059@require_context
3060def volume_update(context, volume_id, values):
3061 session = get_session()
3062 volume_ref = volume_get(context, volume_id, session=session)
3063 metadata = values.get('metadata')
3064 if metadata is not None:
3065 volume_metadata_update(context,
3066 volume_id,
3067 values.pop('metadata'),
3068 delete=True)
3069 with session.begin():
3070 volume_ref.update(values)
3071 volume_ref.save(session=session)
3072
3073 return volume_ref
3074
3075
3076@require_context
3077def ec2_volume_create(context, volume_uuid, id=None):
3078 """Create ec2 compatable volume by provided uuid"""
3079 ec2_volume_ref = models.VolumeIdMapping()
3080 ec2_volume_ref.update({'uuid': volume_uuid})
3081 if id is not None:
3082 ec2_volume_ref.update({'id': id})
3083
3084 ec2_volume_ref.save()
3085
3086 return ec2_volume_ref
3087
3088
3089@require_context
3090def get_ec2_volume_id_by_uuid(context, volume_id, session=None):
3091 result = _ec2_volume_get_query(context, session=session).\
3092 filter_by(uuid=volume_id).\
3093 first()
3094
3095 if not result:
3096 raise exception.VolumeNotFound(volume_id=volume_id)
3097
3098 return result['id']
3099
3100
3101@require_context
3102def get_volume_uuid_by_ec2_id(context, ec2_id, session=None):
3103 result = _ec2_volume_get_query(context, session=session).\
3104 filter_by(id=ec2_id).\
3105 first()
3106
3107 if not result:
3108 raise exception.VolumeNotFound(volume_id=ec2_id)
3109
3110 return result['uuid']
3111
3112
3113@require_context
3114def ec2_snapshot_create(context, snapshot_uuid, id=None):
3115 """Create ec2 compatable snapshot by provided uuid"""
3116 ec2_snapshot_ref = models.SnapshotIdMapping()
3117 ec2_snapshot_ref.update({'uuid': snapshot_uuid})
3118 if id is not None:
3119 ec2_snapshot_ref.update({'id': id})
3120
3121 ec2_snapshot_ref.save()
3122
3123 return ec2_snapshot_ref
3124
3125
3126@require_context
3127def get_ec2_snapshot_id_by_uuid(context, snapshot_id, session=None):
3128 result = _ec2_snapshot_get_query(context, session=session).\
3129 filter_by(uuid=snapshot_id).\
3130 first()
3131
3132 if not result:
3133 raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
3134
3135 return result['id']
3136
3137
3138@require_context
3139def get_snapshot_uuid_by_ec2_id(context, ec2_id, session=None):
3140 result = _ec2_snapshot_get_query(context, session=session).\
3141 filter_by(id=ec2_id).\
3142 first()
3143
3144 if not result:
3145 raise exception.SnapshotNotFound(snapshot_id=ec2_id)
3146
3147 return result['uuid']
3148
3149
3150####################
3151
3152def _volume_metadata_get_query(context, volume_id, session=None):
3153 return model_query(context, models.VolumeMetadata,
3154 session=session, read_deleted="no").\
3155 filter_by(volume_id=volume_id)
3156
3157
3158@require_context
3159@require_volume_exists
3160def volume_metadata_get(context, volume_id):
3161 rows = _volume_metadata_get_query(context, volume_id).all()
3162 result = {}
3163 for row in rows:
3164 result[row['key']] = row['value']
3165
3166 return result
3167
3168
3169@require_context
3170@require_volume_exists
3171def volume_metadata_delete(context, volume_id, key):
3172 _volume_metadata_get_query(context, volume_id).\
3173 filter_by(key=key).\
3174 update({'deleted': True,
3175 'deleted_at': timeutils.utcnow(),
3176 'updated_at': literal_column('updated_at')})
3177
3178
3179@require_context
3180@require_volume_exists
3181def volume_metadata_get_item(context, volume_id, key, session=None):
3182 result = _volume_metadata_get_query(context, volume_id, session=session).\
3183 filter_by(key=key).\
3184 first()
3185
3186 if not result:
3187 raise exception.VolumeMetadataNotFound(metadata_key=key,
3188 volume_id=volume_id)
3189 return result
3190
3191
3192@require_context
3193@require_volume_exists
3194def volume_metadata_update(context, volume_id, metadata, delete):
3195 session = get_session()
3196
3197 # Set existing metadata to deleted if delete argument is True
3198 if delete:
3199 original_metadata = volume_metadata_get(context, volume_id)
3200 for meta_key, meta_value in original_metadata.iteritems():
3201 if meta_key not in metadata:
3202 meta_ref = volume_metadata_get_item(context, volume_id,
3203 meta_key, session)
3204 meta_ref.update({'deleted': True})
3205 meta_ref.save(session=session)
3206
3207 meta_ref = None
3208
3209 # Now update all existing items with new values, or create new meta objects
3210 for meta_key, meta_value in metadata.iteritems():
3211
3212 # update the value whether it exists or not
3213 item = {"value": meta_value}
3214
3215 try:
3216 meta_ref = volume_metadata_get_item(context, volume_id,
3217 meta_key, session)
3218 except exception.VolumeMetadataNotFound, e:
3219 meta_ref = models.VolumeMetadata()
3220 item.update({"key": meta_key, "volume_id": volume_id})
3221
3222 meta_ref.update(item)
3223 meta_ref.save(session=session)
3224
3225 return metadata
3226
3227
3228###################
3229
3230
3231@require_context
3232def snapshot_create(context, values):
3233 snapshot_ref = models.Snapshot()
3234 if not values.get('id'):
3235 values['id'] = str(utils.gen_uuid())
3236 snapshot_ref.update(values)
3237
3238 session = get_session()
3239 with session.begin():
3240 snapshot_ref.save(session=session)
3241 return snapshot_ref
3242
3243
3244@require_admin_context
3245def snapshot_destroy(context, snapshot_id):
3246 session = get_session()
3247 with session.begin():
3248 session.query(models.Snapshot).\
3249 filter_by(id=snapshot_id).\
3250 update({'deleted': True,
3251 'deleted_at': timeutils.utcnow(),
3252 'updated_at': literal_column('updated_at')})
3253
3254
3255@require_context
3256def snapshot_get(context, snapshot_id, session=None):
3257 result = model_query(context, models.Snapshot, session=session,
3258 project_only=True).\
3259 filter_by(id=snapshot_id).\
3260 first()
3261
3262 if not result:
3263 raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
3264
3265 return result
3266
3267
3268@require_admin_context
3269def snapshot_get_all(context):
3270 return model_query(context, models.Snapshot).all()
3271
3272
3273@require_context
3274def snapshot_get_all_for_volume(context, volume_id):
3275 return model_query(context, models.Snapshot, read_deleted='no',
3276 project_only=True).\
3277 filter_by(volume_id=volume_id).all()
3278
3279
3280@require_context
3281def snapshot_get_all_by_project(context, project_id):
3282 authorize_project_context(context, project_id)
3283 return model_query(context, models.Snapshot).\
3284 filter_by(project_id=project_id).\
3285 all()
3286
3287
3288@require_context
3289def snapshot_update(context, snapshot_id, values):
3290 session = get_session()
3291 with session.begin():
3292 snapshot_ref = snapshot_get(context, snapshot_id, session=session)
3293 snapshot_ref.update(values)
3294 snapshot_ref.save(session=session)
3295
3296
3297###################
3298
3299
3300def _block_device_mapping_get_query(context, session=None):
3301 return model_query(context, models.BlockDeviceMapping, session=session)
3302
3303
3304@require_context
3305def block_device_mapping_create(context, values):
3306 bdm_ref = models.BlockDeviceMapping()
3307 bdm_ref.update(values)
3308
3309 session = get_session()
3310 with session.begin():
3311 bdm_ref.save(session=session)
3312
3313
3314@require_context
3315def block_device_mapping_update(context, bdm_id, values):
3316 session = get_session()
3317 with session.begin():
3318 _block_device_mapping_get_query(context, session=session).\
3319 filter_by(id=bdm_id).\
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches