Merge lp:~yolanda.robla/nova/precise-security into lp:ubuntu/precise-security/nova

Proposed by Yolanda Robla
Status: Merged
Approved by: James Page
Approved revision: 88
Merge reported by: James Page
Merged at revision: not available
Proposed branch: lp:~yolanda.robla/nova/precise-security
Merge into: lp:ubuntu/precise-security/nova
Diff against target: 173813 lines (+26521/-71314)
90 files modified
.gitignore (+0/-20)
.gitreview (+0/-5)
.mailmap (+0/-82)
.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py (+0/-97)
.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py (+0/-69)
.pc/CVE-2013-0208.patch/nova/compute/api.py (+0/-1842)
.pc/CVE-2013-0208.patch/nova/exception.py (+0/-1031)
.pc/CVE-2013-0335.patch/nova/compute/api.py (+0/-1858)
.pc/CVE-2013-0335.patch/nova/compute/manager.py (+0/-2585)
.pc/CVE-2013-0335.patch/nova/consoleauth/manager.py (+0/-81)
.pc/CVE-2013-0335.patch/nova/tests/test_compute.py (+0/-3691)
.pc/CVE-2013-0335.patch/nova/tests/test_consoleauth.py (+0/-54)
.pc/CVE-2013-0335_testsuite-fixes.patch/nova/consoleauth/manager.py (+0/-109)
.pc/CVE-2013-0335_testsuite-fixes.patch/nova/tests/policy.json (+0/-178)
.pc/CVE-2013-0335_testsuite-fixes.patch/nova/tests/test_compute.py (+0/-3732)
.pc/CVE-2013-0335_testsuite-fixes.patch/nova/tests/test_consoleauth.py (+0/-61)
.pc/CVE-2013-1664.patch/nova/api/openstack/common.py (+0/-490)
.pc/CVE-2013-1664.patch/nova/api/openstack/compute/contrib/hosts.py (+0/-305)
.pc/CVE-2013-1664.patch/nova/api/openstack/compute/contrib/security_groups.py (+0/-673)
.pc/CVE-2013-1664.patch/nova/api/openstack/compute/servers.py (+0/-1170)
.pc/CVE-2013-1664.patch/nova/api/openstack/wsgi.py (+0/-1124)
.pc/CVE-2013-1664.patch/nova/tests/test_utils.py (+0/-1188)
.pc/CVE-2013-1664.patch/nova/utils.py (+0/-1727)
.pc/CVE-2013-1838.patch/nova/api/openstack/compute/contrib/quotas.py (+0/-110)
.pc/CVE-2013-1838.patch/nova/db/api.py (+0/-1753)
.pc/CVE-2013-1838.patch/nova/db/sqlalchemy/api.py (+0/-4380)
.pc/CVE-2013-1838.patch/nova/network/manager.py (+0/-1899)
.pc/CVE-2013-1838.patch/nova/quota.py (+0/-229)
.pc/CVE-2013-1838.patch/nova/tests/api/openstack/compute/contrib/test_quotas.py (+0/-204)
.pc/CVE-2013-1838.patch/nova/tests/network/test_manager.py (+0/-1790)
.pc/applied-patches (+0/-12)
.pc/fix-docs-build-without-network.patch/doc/source/conf.py (+0/-234)
.pc/fix-pep8-errors.patch/nova/api/openstack/compute/contrib/hosts.py (+0/-305)
.pc/fix-pep8-errors.patch/nova/api/openstack/compute/contrib/networks.py (+0/-140)
.pc/fix-pep8-errors.patch/nova/tests/api/openstack/compute/contrib/test_admin_actions.py (+0/-278)
.pc/fix-pep8-errors.patch/nova/tests/api/openstack/compute/contrib/test_disk_config.py (+0/-253)
.pc/fix-pep8-errors.patch/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py (+0/-98)
.pc/fix-pep8-errors.patch/nova/tests/api/openstack/compute/contrib/test_security_groups.py (+0/-1185)
.pc/fix-pep8-errors.patch/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py (+0/-340)
.pc/fix-pep8-errors.patch/nova/tests/test_libvirt_vif.py (+0/-175)
.pc/fix-pep8-errors.patch/nova/virt/baremetal/nodes.py (+0/-42)
.pc/fix-pep8-errors.patch/nova/virt/baremetal/proxy.py (+0/-794)
.pc/fix-pep8-errors.patch/nova/virt/fake.py (+0/-343)
.pc/fix-pep8-errors.patch/nova/virt/libvirt/connection.py (+0/-2541)
.pc/fix-pep8-errors.patch/nova/virt/vmwareapi/vmops.py (+0/-825)
.pc/fix-pep8-errors.patch/nova/virt/xenapi/volume_utils.py (+0/-413)
.pc/fix-pep8-errors.patch/nova/virt/xenapi_conn.py (+0/-638)
.pc/fix-pep8-errors.patch/nova/vnc/xvp_proxy.py (+0/-187)
.pc/fix-pep8-errors.patch/tools/hacking.py (+0/-195)
.pc/fix-pep8-errors.patch/tools/xenserver/vdi_chain_cleanup.py (+0/-128)
.pc/fix-ubuntu-tests.patch/nova/tests/test_api.py (+0/-610)
.pc/kombu_tests_timeout.patch/nova/tests/rpc/test_kombu.py (+0/-295)
.pc/nova-manage_flagfile_location.patch/bin/nova-manage (+0/-1746)
.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost (+0/-435)
Authors (+7/-1)
ChangeLog (+26276/-25487)
PKG-INFO (+1/-1)
debian/changelog (+49/-0)
debian/patches/CVE-2013-0208.patch (+0/-69)
debian/patches/CVE-2013-0335.patch (+0/-202)
debian/patches/CVE-2013-0335_testsuite-fixes.patch (+0/-188)
debian/patches/CVE-2013-1664.patch (+0/-309)
debian/patches/CVE-2013-1838.patch (+0/-232)
debian/patches/series (+0/-5)
nova.egg-info/PKG-INFO (+1/-1)
nova.egg-info/SOURCES.txt (+0/-3)
nova/api/openstack/compute/contrib/flavorextradata.py (+1/-1)
nova/api/openstack/compute/contrib/simple_tenant_usage.py (+7/-0)
nova/compute/api.py (+7/-3)
nova/consoleauth/manager.py (+5/-4)
nova/db/sqlalchemy/api.py (+1/-1)
nova/manager.py (+5/-0)
nova/network/l3.py (+4/-2)
nova/network/linux_net.py (+17/-12)
nova/network/manager.py (+1/-2)
nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py (+22/-6)
nova/tests/fakelibvirt.py (+2/-0)
nova/tests/network/test_manager.py (+35/-0)
nova/tests/test_compute.py (+5/-3)
nova/tests/test_db_api.py (+2/-0)
nova/tests/test_libvirt.py (+13/-1)
nova/tests/test_linux_net.py (+1/-1)
nova/utils.py (+11/-9)
nova/version.py (+1/-1)
nova/virt/disk/api.py (+1/-0)
nova/virt/firewall.py (+14/-6)
nova/virt/libvirt/connection.py (+24/-10)
nova/virt/libvirt/volume.py (+5/-3)
tools/pip-requires (+2/-2)
tools/test-requires (+1/-0)
To merge this branch: bzr merge lp:~yolanda.robla/nova/precise-security
Reviewer Review Type Date Requested Status
James Page Approve
Review via email: mp+160628@code.launchpad.net

Description of the change

Essex SRU

To post a comment you must log in.
Revision history for this message
James Page (james-page) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== removed file '.gitignore'
2--- .gitignore 2012-08-27 14:50:40 +0000
3+++ .gitignore 1970-01-01 00:00:00 +0000
4@@ -1,20 +0,0 @@
5-*.pyc
6-*.DS_Store
7-local_settings.py
8-CA/
9-keeper
10-instances
11-keys
12-build/*
13-build-stamp
14-nova.egg-info
15-.nova-venv
16-.venv
17-.tox
18-*.sqlite
19-*.log
20-*.mo
21-tools/conf/nova.conf*
22-cover/*
23-dist/*
24-.coverage
25
26=== removed file '.gitreview'
27--- .gitreview 2012-08-27 14:50:40 +0000
28+++ .gitreview 1970-01-01 00:00:00 +0000
29@@ -1,5 +0,0 @@
30-[gerrit]
31-host=review.openstack.org
32-port=29418
33-project=openstack/nova.git
34-defaultbranch=stable/essex
35
36=== removed file '.mailmap'
37--- .mailmap 2012-08-24 02:09:33 +0000
38+++ .mailmap 1970-01-01 00:00:00 +0000
39@@ -1,82 +0,0 @@
40-# Format is:
41-# <preferred e-mail> <other e-mail 1>
42-# <preferred e-mail> <other e-mail 2>
43-<aaron.lee@rackspace.com> <wwkeyboard@gmail.com>
44-<anotherjesse@gmail.com> <jesse@dancelamb>
45-<anotherjesse@gmail.com> <jesse@gigantor.local>
46-<anotherjesse@gmail.com> <jesse@ubuntu>
47-<anotherjesse@gmail.com> <jesse@aire.local>
48-<ant@openstack.org> <amesserl@rackspace.com>
49-<Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com>
50-<Armando.Migliaccio@eu.citrix.com> <amigliaccio@internap.com>
51-<brian.lamar@rackspace.com> <brian.lamar@gmail.com>
52-<brian.waldon@rackspace.com> <bcwaldon@gmail.com>
53-<bschott@isi.edu> <bfschott@gmail.com>
54-<cbehrens@codestud.com> <chris.behrens@rackspace.com>
55-<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
56-<code@term.ie> <github@anarkystic.com>
57-<code@term.ie> <termie@preciousroy.local>
58-<corywright@gmail.com> <cory.wright@rackspace.com>
59-<dan@nicira.com> <danwent@dan-xs3-cs>
60-<dan@nicira.com> <danwent@gmail.com>
61-<Dave.Walker@canonical.com> <DaveWalker@ubuntu.com>
62-<derekh@redhat.com> <higginsd@gmail.com>
63-<devin.carlen@gmail.com> <devcamcar@illian.local>
64-<doug.hellmann@dreamhost.com> <doug.hellmann@gmail.com>
65-<dprince@redhat.com> <dan.prince@rackspace.com>
66-<edouard1.thuleau@orange.com> <thuleau@gmail.com>
67-<ewan.mellor@citrix.com> <emellor@silver>
68-<ghe@debian.org> <ghe.rivero@gmail.com>
69-<itoumsn@nttdata.co.jp> <itoumsn@shayol>
70-<jake@ansolabs.com> <jake@markupisart.com>
71-<jake@ansolabs.com> <admin@jakedahn.com>
72-<jaypipes@gmail.com> <jpipes@serialcoder>
73-<jeblair@hp.com> <james.blair@rackspace.com>
74-<jeblair@hp.com> <corvus@inaugust.com>
75-<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
76-<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
77-<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
78-<johannes.erdfelt@rackspace.com> <johannes@compute3.221.st>
79-<josh@jk0.org> <josh.kearney@rackspace.com>
80-<josh.durgin@dreamhost.com> <joshd@hq.newdream.net>
81-<justin@fathomdb.com> <justinsb@justinsb-desktop>
82-<justin@fathomdb.com> <superstack@superstack.org>
83-<lorin@nimbisservices.com> <lorin@isi.edu>
84-<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
85-<masumotok@nttdata.co.jp> <root@openstack2-api>
86-<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
87-<matt.dietz@rackspace.com> <mdietz@openstack>
88-<mikal@stillhq.com> <michael.still@canonical.com>
89-<mordred@inaugust.com> <mordred@hudson>
90-<naveedm9@gmail.com> <naveed.massjouni@rackspace.com>
91-<rnirmal@gmail.com> <nirmal.ranganathan@rackspace.com>
92-<rnirmal@gmail.com> <nirmal.ranganathan@rackspace.coom>
93-<paul@openstack.org> <paul.voccio@rackspace.com>
94-<paul@openstack.org> <pvoccio@castor.local>
95-<paul@openstack.org> <paul@substation9.com>
96-<rconradharris@gmail.com> <rick.harris@rackspace.com>
97-<rlane@wikimedia.org> <laner@controller>
98-<sandy.walsh@rackspace.com> <sandy@sandywalsh.com>
99-<sleepsonthefloor@gmail.com> <root@tonbuntu>
100-<soren.hansen@rackspace.com> <soren@linux2go.dk>
101-<soren@linux2go.dk> <sorhanse@cisco.com>
102-<throughnothing@gmail.com> <will.wolf@rackspace.com>
103-<tim.simpson@rackspace.com> <tim.simpson4@gmail.com>
104-<todd@ansolabs.com> <todd@lapex>
105-<todd@ansolabs.com> <todd@rubidine.com>
106-<todd@ansolabs.com> <xtoddx@gmail.com>
107-<trey.morris@rackspace.com> <treyemorris@gmail.com>
108-<tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in>
109-<ueno.nachi@lab.ntt.co.jp> <nati.ueno@gmail.com>
110-<ueno.nachi@lab.ntt.co.jp> <nova@u4>
111-<ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp>
112-<vishvananda@gmail.com> <root@mirror.nasanebula.net>
113-<vishvananda@gmail.com> <root@ubuntu>
114-<vishvananda@gmail.com> <vishvananda@yahoo.com>
115-<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>
116-<ilyaalekseyev@acm.org> <ilya@oscloud.ru>
117-<reldan@oscloud.ru> <enugaev@griddynamics.com>
118-<kshileev@gmail.com> <kshileev@griddynamics.com>
119-<nsokolov@griddynamics.com> <nsokolov@griddynamics.net>
120-<troy.toman@rackspace.com> <ttcl@mac.com>
121-<zulcss@ubuntu.com> <chuck.short@canonical.com>
122
123=== added directory '.pc/0001-fix-useexisting-deprecation-warnings.patch'
124=== removed directory '.pc/0001-fix-useexisting-deprecation-warnings.patch'
125=== added file '.pc/0001-fix-useexisting-deprecation-warnings.patch/.timestamp'
126=== added directory '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova'
127=== removed directory '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova'
128=== added directory '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db'
129=== removed directory '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db'
130=== added directory '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy'
131=== removed directory '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy'
132=== added directory '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo'
133=== removed directory '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo'
134=== added directory '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions'
135=== removed directory '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions'
136=== added file '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py'
137--- .pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py 1970-01-01 00:00:00 +0000
138+++ .pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py 2013-04-24 12:49:07 +0000
139@@ -0,0 +1,97 @@
140+# vim: tabstop=4 shiftwidth=4 softtabstop=4
141+
142+# Copyright 2011 OpenStack LLC.
143+# All Rights Reserved.
144+#
145+# Licensed under the Apache License, Version 2.0 (the "License"); you may
146+# not use this file except in compliance with the License. You may obtain
147+# a copy of the License at
148+#
149+# http://www.apache.org/licenses/LICENSE-2.0
150+#
151+# Unless required by applicable law or agreed to in writing, software
152+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
153+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
154+# License for the specific language governing permissions and limitations
155+# under the License.
156+
157+from sqlalchemy import and_, select
158+from sqlalchemy import BigInteger, Boolean, Column, DateTime
159+from sqlalchemy import Integer, MetaData, String
160+from sqlalchemy import Table
161+
162+from nova import utils
163+
164+
165+def upgrade(migrate_engine):
166+ meta = MetaData()
167+ meta.bind = migrate_engine
168+
169+ vifs = Table('virtual_interfaces', meta, autoload=True)
170+ networks = Table('networks', meta, autoload=True)
171+
172+ bw_usage_cache = Table('bw_usage_cache', meta,
173+ Column('created_at', DateTime(timezone=False)),
174+ Column('updated_at', DateTime(timezone=False)),
175+ Column('deleted_at', DateTime(timezone=False)),
176+ Column('deleted', Boolean(create_constraint=True, name=None)),
177+ Column('id', Integer(), primary_key=True, nullable=False),
178+ Column('instance_id', Integer(), nullable=False),
179+ Column('network_label',
180+ String(length=255, convert_unicode=False,
181+ assert_unicode=None,
182+ unicode_error=None, _warn_on_bytestring=False)),
183+ Column('start_period', DateTime(timezone=False),
184+ nullable=False),
185+ Column('last_refreshed', DateTime(timezone=False)),
186+ Column('bw_in', BigInteger()),
187+ Column('bw_out', BigInteger()),
188+ useexisting=True)
189+ mac_column = Column('mac', String(255))
190+ bw_usage_cache.create_column(mac_column)
191+
192+ bw_usage_cache.update()\
193+ .values(mac=select([vifs.c.address])\
194+ .where(and_(
195+ networks.c.label == bw_usage_cache.c.network_label,
196+ networks.c.id == vifs.c.network_id,
197+ bw_usage_cache.c.instance_id == vifs.c.instance_id))\
198+ .as_scalar()).execute()
199+
200+ bw_usage_cache.c.network_label.drop()
201+
202+
203+def downgrade(migrate_engine):
204+ meta = MetaData()
205+ meta.bind = migrate_engine
206+
207+ vifs = Table('virtual_interfaces', meta, autoload=True)
208+ network = Table('networks', meta, autoload=True)
209+
210+ bw_usage_cache = Table('bw_usage_cache', meta,
211+ Column('created_at', DateTime(timezone=False)),
212+ Column('updated_at', DateTime(timezone=False)),
213+ Column('deleted_at', DateTime(timezone=False)),
214+ Column('deleted', Boolean(create_constraint=True, name=None)),
215+ Column('id', Integer(), primary_key=True, nullable=False),
216+ Column('instance_id', Integer(), nullable=False),
217+ Column('mac', String(255)),
218+ Column('start_period', DateTime(timezone=False),
219+ nullable=False),
220+ Column('last_refreshed', DateTime(timezone=False)),
221+ Column('bw_in', BigInteger()),
222+ Column('bw_out', BigInteger()),
223+ useexisting=True)
224+
225+ network_label_column = Column('network_label', String(255))
226+ bw_usage_cache.create_column(network_label_column)
227+
228+ bw_usage_cache.update()\
229+ .values(network_label=select([network.c.label])\
230+ .where(and_(
231+ network.c.id == vifs.c.network_id,
232+ vifs.c.address == bw_usage_cache.c.mac,
233+ bw_usage_cache.c.instance_id == vifs.c.instance_id))\
234+ .as_scalar()).execute()
235+
236+ bw_usage_cache.c.mac.drop()
237
238=== removed file '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py'
239--- .pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py 2012-04-02 11:17:33 +0000
240+++ .pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py 1970-01-01 00:00:00 +0000
241@@ -1,97 +0,0 @@
242-# vim: tabstop=4 shiftwidth=4 softtabstop=4
243-
244-# Copyright 2011 OpenStack LLC.
245-# All Rights Reserved.
246-#
247-# Licensed under the Apache License, Version 2.0 (the "License"); you may
248-# not use this file except in compliance with the License. You may obtain
249-# a copy of the License at
250-#
251-# http://www.apache.org/licenses/LICENSE-2.0
252-#
253-# Unless required by applicable law or agreed to in writing, software
254-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
255-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
256-# License for the specific language governing permissions and limitations
257-# under the License.
258-
259-from sqlalchemy import and_, select
260-from sqlalchemy import BigInteger, Boolean, Column, DateTime
261-from sqlalchemy import Integer, MetaData, String
262-from sqlalchemy import Table
263-
264-from nova import utils
265-
266-
267-def upgrade(migrate_engine):
268- meta = MetaData()
269- meta.bind = migrate_engine
270-
271- vifs = Table('virtual_interfaces', meta, autoload=True)
272- networks = Table('networks', meta, autoload=True)
273-
274- bw_usage_cache = Table('bw_usage_cache', meta,
275- Column('created_at', DateTime(timezone=False)),
276- Column('updated_at', DateTime(timezone=False)),
277- Column('deleted_at', DateTime(timezone=False)),
278- Column('deleted', Boolean(create_constraint=True, name=None)),
279- Column('id', Integer(), primary_key=True, nullable=False),
280- Column('instance_id', Integer(), nullable=False),
281- Column('network_label',
282- String(length=255, convert_unicode=False,
283- assert_unicode=None,
284- unicode_error=None, _warn_on_bytestring=False)),
285- Column('start_period', DateTime(timezone=False),
286- nullable=False),
287- Column('last_refreshed', DateTime(timezone=False)),
288- Column('bw_in', BigInteger()),
289- Column('bw_out', BigInteger()),
290- useexisting=True)
291- mac_column = Column('mac', String(255))
292- bw_usage_cache.create_column(mac_column)
293-
294- bw_usage_cache.update()\
295- .values(mac=select([vifs.c.address])\
296- .where(and_(
297- networks.c.label == bw_usage_cache.c.network_label,
298- networks.c.id == vifs.c.network_id,
299- bw_usage_cache.c.instance_id == vifs.c.instance_id))\
300- .as_scalar()).execute()
301-
302- bw_usage_cache.c.network_label.drop()
303-
304-
305-def downgrade(migrate_engine):
306- meta = MetaData()
307- meta.bind = migrate_engine
308-
309- vifs = Table('virtual_interfaces', meta, autoload=True)
310- network = Table('networks', meta, autoload=True)
311-
312- bw_usage_cache = Table('bw_usage_cache', meta,
313- Column('created_at', DateTime(timezone=False)),
314- Column('updated_at', DateTime(timezone=False)),
315- Column('deleted_at', DateTime(timezone=False)),
316- Column('deleted', Boolean(create_constraint=True, name=None)),
317- Column('id', Integer(), primary_key=True, nullable=False),
318- Column('instance_id', Integer(), nullable=False),
319- Column('mac', String(255)),
320- Column('start_period', DateTime(timezone=False),
321- nullable=False),
322- Column('last_refreshed', DateTime(timezone=False)),
323- Column('bw_in', BigInteger()),
324- Column('bw_out', BigInteger()),
325- useexisting=True)
326-
327- network_label_column = Column('network_label', String(255))
328- bw_usage_cache.create_column(network_label_column)
329-
330- bw_usage_cache.update()\
331- .values(network_label=select([network.c.label])\
332- .where(and_(
333- network.c.id == vifs.c.network_id,
334- vifs.c.address == bw_usage_cache.c.mac,
335- bw_usage_cache.c.instance_id == vifs.c.instance_id))\
336- .as_scalar()).execute()
337-
338- bw_usage_cache.c.mac.drop()
339
340=== added file '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py'
341--- .pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py 1970-01-01 00:00:00 +0000
342+++ .pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py 2013-04-24 12:49:07 +0000
343@@ -0,0 +1,69 @@
344+# vim: tabstop=4 shiftwidth=4 softtabstop=4
345+
346+# Copyright (c) 2011 OpenStack, LLC.
347+# All Rights Reserved.
348+#
349+# Licensed under the Apache License, Version 2.0 (the "License"); you may
350+# not use this file except in compliance with the License. You may obtain
351+# a copy of the License at
352+#
353+# http://www.apache.org/licenses/LICENSE-2.0
354+#
355+# Unless required by applicable law or agreed to in writing, software
356+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
357+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
358+# License for the specific language governing permissions and limitations
359+# under the License.
360+import json
361+
362+from sqlalchemy import Column, Table, MetaData, Integer, Boolean, String
363+from sqlalchemy import DateTime, BigInteger
364+
365+
366+def upgrade(migrate_engine):
367+ meta = MetaData()
368+ meta.bind = migrate_engine
369+
370+ bw_usage_cache = Table('bw_usage_cache', meta,
371+ Column('created_at', DateTime(timezone=False)),
372+ Column('updated_at', DateTime(timezone=False)),
373+ Column('deleted_at', DateTime(timezone=False)),
374+ Column('deleted', Boolean(create_constraint=True, name=None)),
375+ Column('id', Integer(), primary_key=True, nullable=False),
376+ Column('instance_id', Integer(), nullable=False),
377+ Column('mac', String(255)),
378+ Column('start_period', DateTime(timezone=False),
379+ nullable=False),
380+ Column('last_refreshed', DateTime(timezone=False)),
381+ Column('bw_in', BigInteger()),
382+ Column('bw_out', BigInteger()),
383+ useexisting=True)
384+
385+ bw_usage_cache.drop_column('instance_id')
386+
387+
388+def downgrade(migrate_engine):
389+ meta = MetaData()
390+ meta.bind = migrate_engine
391+
392+ instance_info_caches = Table('instance_info_caches', meta, autoload=True)
393+ bw_usage_cache = Table('bw_usage_cache', meta, autoload=True)
394+
395+ instance_id = Column('instance_id', Integer)
396+ bw_usage_cache.create_column(instance_id)
397+
398+ cache = {}
399+ for row in migrate_engine.execute(instance_info_caches.select()):
400+ instance_id = row['instance']['id']
401+ if not row['network_info']:
402+ continue
403+
404+ nw_info = json.loads(row['network_info'])
405+ for vif in nw_info:
406+ cache[vif['address']] = instance_id
407+
408+ for row in migrate_engine.execute(bw_usage_cache.select()):
409+ instance_id = cache[row['mac']]
410+ migrate_engine.execute(bw_usage_cache.update()\
411+ .where(bw_usage_cache.c.id == row['id'])\
412+ .values(instance_id=instance_id))
413
414=== removed file '.pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py'
415--- .pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py 2012-04-02 11:17:33 +0000
416+++ .pc/0001-fix-useexisting-deprecation-warnings.patch/nova/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py 1970-01-01 00:00:00 +0000
417@@ -1,69 +0,0 @@
418-# vim: tabstop=4 shiftwidth=4 softtabstop=4
419-
420-# Copyright (c) 2011 OpenStack, LLC.
421-# All Rights Reserved.
422-#
423-# Licensed under the Apache License, Version 2.0 (the "License"); you may
424-# not use this file except in compliance with the License. You may obtain
425-# a copy of the License at
426-#
427-# http://www.apache.org/licenses/LICENSE-2.0
428-#
429-# Unless required by applicable law or agreed to in writing, software
430-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
431-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
432-# License for the specific language governing permissions and limitations
433-# under the License.
434-import json
435-
436-from sqlalchemy import Column, Table, MetaData, Integer, Boolean, String
437-from sqlalchemy import DateTime, BigInteger
438-
439-
440-def upgrade(migrate_engine):
441- meta = MetaData()
442- meta.bind = migrate_engine
443-
444- bw_usage_cache = Table('bw_usage_cache', meta,
445- Column('created_at', DateTime(timezone=False)),
446- Column('updated_at', DateTime(timezone=False)),
447- Column('deleted_at', DateTime(timezone=False)),
448- Column('deleted', Boolean(create_constraint=True, name=None)),
449- Column('id', Integer(), primary_key=True, nullable=False),
450- Column('instance_id', Integer(), nullable=False),
451- Column('mac', String(255)),
452- Column('start_period', DateTime(timezone=False),
453- nullable=False),
454- Column('last_refreshed', DateTime(timezone=False)),
455- Column('bw_in', BigInteger()),
456- Column('bw_out', BigInteger()),
457- useexisting=True)
458-
459- bw_usage_cache.drop_column('instance_id')
460-
461-
462-def downgrade(migrate_engine):
463- meta = MetaData()
464- meta.bind = migrate_engine
465-
466- instance_info_caches = Table('instance_info_caches', meta, autoload=True)
467- bw_usage_cache = Table('bw_usage_cache', meta, autoload=True)
468-
469- instance_id = Column('instance_id', Integer)
470- bw_usage_cache.create_column(instance_id)
471-
472- cache = {}
473- for row in migrate_engine.execute(instance_info_caches.select()):
474- instance_id = row['instance']['id']
475- if not row['network_info']:
476- continue
477-
478- nw_info = json.loads(row['network_info'])
479- for vif in nw_info:
480- cache[vif['address']] = instance_id
481-
482- for row in migrate_engine.execute(bw_usage_cache.select()):
483- instance_id = cache[row['mac']]
484- migrate_engine.execute(bw_usage_cache.update()\
485- .where(bw_usage_cache.c.id == row['id'])\
486- .values(instance_id=instance_id))
487
488=== removed directory '.pc/CVE-2013-0208.patch'
489=== removed directory '.pc/CVE-2013-0208.patch/nova'
490=== removed directory '.pc/CVE-2013-0208.patch/nova/compute'
491=== removed file '.pc/CVE-2013-0208.patch/nova/compute/api.py'
492--- .pc/CVE-2013-0208.patch/nova/compute/api.py 2013-01-23 13:03:11 +0000
493+++ .pc/CVE-2013-0208.patch/nova/compute/api.py 1970-01-01 00:00:00 +0000
494@@ -1,1842 +0,0 @@
495-# vim: tabstop=4 shiftwidth=4 softtabstop=4
496-
497-# Copyright 2010 United States Government as represented by the
498-# Administrator of the National Aeronautics and Space Administration.
499-# Copyright 2011 Piston Cloud Computing, Inc.
500-# All Rights Reserved.
501-#
502-# Licensed under the Apache License, Version 2.0 (the "License"); you may
503-# not use this file except in compliance with the License. You may obtain
504-# a copy of the License at
505-#
506-# http://www.apache.org/licenses/LICENSE-2.0
507-#
508-# Unless required by applicable law or agreed to in writing, software
509-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
510-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
511-# License for the specific language governing permissions and limitations
512-# under the License.
513-
514-"""Handles all requests relating to compute resources (e.g. guest vms,
515-networking and storage of vms, and compute hosts on which they run)."""
516-
517-import functools
518-import re
519-import time
520-
521-import novaclient
522-import webob.exc
523-
524-from nova import block_device
525-from nova.compute import aggregate_states
526-from nova.compute import instance_types
527-from nova.compute import power_state
528-from nova.compute import task_states
529-from nova.compute import vm_states
530-from nova.db import base
531-from nova import exception
532-from nova import flags
533-import nova.image
534-from nova import log as logging
535-from nova import network
536-from nova.openstack.common import cfg
537-import nova.policy
538-from nova import quota
539-from nova import rpc
540-from nova.scheduler import api as scheduler_api
541-from nova import utils
542-from nova import volume
543-
544-
545-LOG = logging.getLogger(__name__)
546-
547-find_host_timeout_opt = cfg.StrOpt('find_host_timeout',
548- default=30,
549- help='Timeout after NN seconds when looking for a host.')
550-
551-FLAGS = flags.FLAGS
552-FLAGS.register_opt(find_host_timeout_opt)
553-flags.DECLARE('consoleauth_topic', 'nova.consoleauth')
554-
555-
556-def check_instance_state(vm_state=None, task_state=None):
557- """Decorator to check VM and/or task state before entry to API functions.
558-
559- If the instance is in the wrong state, the wrapper will raise an exception.
560- """
561-
562- if vm_state is not None and not isinstance(vm_state, set):
563- vm_state = set(vm_state)
564- if task_state is not None and not isinstance(task_state, set):
565- task_state = set(task_state)
566-
567- def outer(f):
568- @functools.wraps(f)
569- def inner(self, context, instance, *args, **kw):
570- if vm_state is not None and instance['vm_state'] not in vm_state:
571- raise exception.InstanceInvalidState(
572- attr='vm_state',
573- instance_uuid=instance['uuid'],
574- state=instance['vm_state'],
575- method=f.__name__)
576- if (task_state is not None and
577- instance['task_state'] not in task_state):
578- raise exception.InstanceInvalidState(
579- attr='task_state',
580- instance_uuid=instance['uuid'],
581- state=instance['task_state'],
582- method=f.__name__)
583-
584- return f(self, context, instance, *args, **kw)
585- return inner
586- return outer
587-
588-
589-def wrap_check_policy(func):
590- """Check corresponding policy prior of wrapped method to execution"""
591- @functools.wraps(func)
592- def wrapped(self, context, target, *args, **kwargs):
593- check_policy(context, func.__name__, target)
594- return func(self, context, target, *args, **kwargs)
595- return wrapped
596-
597-
598-def check_policy(context, action, target):
599- _action = 'compute:%s' % action
600- nova.policy.enforce(context, _action, target)
601-
602-
603-class BaseAPI(base.Base):
604- """Base API class."""
605- def __init__(self, **kwargs):
606- super(BaseAPI, self).__init__(**kwargs)
607-
608- def _cast_or_call_compute_message(self, rpc_method, compute_method,
609- context, instance=None, host=None, params=None):
610- """Generic handler for RPC casts and calls to compute.
611-
612- :param rpc_method: RPC method to use (rpc.call or rpc.cast)
613- :param compute_method: Compute manager method to call
614- :param context: RequestContext of caller
615- :param instance: The instance object to use to find host to send to
616- Can be None to not include instance_uuid in args
617- :param host: Optional host to send to instead of instance['host']
618- Must be specified if 'instance' is None
619- :param params: Optional dictionary of arguments to be passed to the
620- compute worker
621-
622- :returns: None
623- """
624- if not params:
625- params = {}
626- if not host:
627- if not instance:
628- raise exception.Error(_("No compute host specified"))
629- host = instance['host']
630- if not host:
631- raise exception.Error(_("Unable to find host for "
632- "Instance %s") % instance['uuid'])
633- queue = self.db.queue_get_for(context, FLAGS.compute_topic, host)
634- if instance:
635- params['instance_uuid'] = instance['uuid']
636- kwargs = {'method': compute_method, 'args': params}
637- return rpc_method(context, queue, kwargs)
638-
639- def _cast_compute_message(self, *args, **kwargs):
640- """Generic handler for RPC casts to compute."""
641- self._cast_or_call_compute_message(rpc.cast, *args, **kwargs)
642-
643- def _call_compute_message(self, *args, **kwargs):
644- """Generic handler for RPC calls to compute."""
645- return self._cast_or_call_compute_message(rpc.call, *args, **kwargs)
646-
647- @staticmethod
648- def _cast_scheduler_message(context, args):
649- """Generic handler for RPC calls to the scheduler."""
650- rpc.cast(context, FLAGS.scheduler_topic, args)
651-
652-
653-class API(BaseAPI):
654- """API for interacting with the compute manager."""
655-
656- def __init__(self, image_service=None, network_api=None, volume_api=None,
657- **kwargs):
658- self.image_service = (image_service or
659- nova.image.get_default_image_service())
660-
661- self.network_api = network_api or network.API()
662- self.volume_api = volume_api or volume.API()
663- super(API, self).__init__(**kwargs)
664-
665- def _check_injected_file_quota(self, context, injected_files):
666- """Enforce quota limits on injected files.
667-
668- Raises a QuotaError if any limit is exceeded.
669- """
670- if injected_files is None:
671- return
672- limit = quota.allowed_injected_files(context, len(injected_files))
673- if len(injected_files) > limit:
674- raise exception.QuotaError(code="OnsetFileLimitExceeded")
675- path_limit = quota.allowed_injected_file_path_bytes(context)
676- for path, content in injected_files:
677- if len(path) > path_limit:
678- raise exception.QuotaError(code="OnsetFilePathLimitExceeded")
679- content_limit = quota.allowed_injected_file_content_bytes(
680- context, len(content))
681- if len(content) > content_limit:
682- code = "OnsetFileContentLimitExceeded"
683- raise exception.QuotaError(code=code)
684-
685- def _check_metadata_properties_quota(self, context, metadata=None):
686- """Enforce quota limits on metadata properties."""
687- if not metadata:
688- metadata = {}
689- num_metadata = len(metadata)
690- quota_metadata = quota.allowed_metadata_items(context, num_metadata)
691- if quota_metadata < num_metadata:
692- pid = context.project_id
693- msg = _("Quota exceeded for %(pid)s, tried to set "
694- "%(num_metadata)s metadata properties") % locals()
695- LOG.warn(msg)
696- raise exception.QuotaError(code="MetadataLimitExceeded")
697-
698- # Because metadata is stored in the DB, we hard-code the size limits
699- # In future, we may support more variable length strings, so we act
700- # as if this is quota-controlled for forwards compatibility
701- for k, v in metadata.iteritems():
702- if len(k) > 255 or len(v) > 255:
703- pid = context.project_id
704- msg = _("Quota exceeded for %(pid)s, metadata property "
705- "key or value too long") % locals()
706- LOG.warn(msg)
707- raise exception.QuotaError(code="MetadataLimitExceeded")
708-
709- def _check_requested_networks(self, context, requested_networks):
710- """ Check if the networks requested belongs to the project
711- and the fixed IP address for each network provided is within
712- same the network block
713- """
714- if requested_networks is None:
715- return
716-
717- self.network_api.validate_networks(context, requested_networks)
718-
719- def _create_instance(self, context, instance_type,
720- image_href, kernel_id, ramdisk_id,
721- min_count, max_count,
722- display_name, display_description,
723- key_name, key_data, security_group,
724- availability_zone, user_data, metadata,
725- injected_files, admin_password,
726- access_ip_v4, access_ip_v6,
727- requested_networks, config_drive,
728- block_device_mapping, auto_disk_config,
729- reservation_id=None, create_instance_here=False,
730- scheduler_hints=None):
731- """Verify all the input parameters regardless of the provisioning
732- strategy being performed and schedule the instance(s) for
733- creation."""
734-
735- if not metadata:
736- metadata = {}
737- if not display_description:
738- display_description = ''
739- if not security_group:
740- security_group = 'default'
741-
742- if not instance_type:
743- instance_type = instance_types.get_default_instance_type()
744- if not min_count:
745- min_count = 1
746- if not max_count:
747- max_count = min_count
748- if not metadata:
749- metadata = {}
750-
751- block_device_mapping = block_device_mapping or []
752-
753- num_instances = quota.allowed_instances(context, max_count,
754- instance_type)
755- if num_instances < min_count:
756- pid = context.project_id
757- if num_instances <= 0:
758- msg = _("Cannot run any more instances of this type.")
759- else:
760- msg = (_("Can only run %s more instances of this type.") %
761- num_instances)
762- LOG.warn(_("Quota exceeded for %(pid)s,"
763- " tried to run %(min_count)s instances. " + msg) % locals())
764- raise exception.QuotaError(code="InstanceLimitExceeded")
765-
766- self._check_metadata_properties_quota(context, metadata)
767- self._check_injected_file_quota(context, injected_files)
768- self._check_requested_networks(context, requested_networks)
769-
770- (image_service, image_id) = nova.image.get_image_service(context,
771- image_href)
772- image = image_service.show(context, image_id)
773-
774- if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
775- raise exception.InstanceTypeMemoryTooSmall()
776- if instance_type['root_gb'] < int(image.get('min_disk') or 0):
777- raise exception.InstanceTypeDiskTooSmall()
778-
779- config_drive_id = None
780- if config_drive and config_drive is not True:
781- # config_drive is volume id
782- config_drive, config_drive_id = None, config_drive
783-
784- os_type = None
785- if 'properties' in image and 'os_type' in image['properties']:
786- os_type = image['properties']['os_type']
787- architecture = None
788- if 'properties' in image and 'arch' in image['properties']:
789- architecture = image['properties']['arch']
790- vm_mode = None
791- if 'properties' in image and 'vm_mode' in image['properties']:
792- vm_mode = image['properties']['vm_mode']
793-
794- # If instance doesn't have auto_disk_config overridden by request, use
795- # whatever the image indicates
796- if auto_disk_config is None:
797- if ('properties' in image and
798- 'auto_disk_config' in image['properties']):
799- auto_disk_config = utils.bool_from_str(
800- image['properties']['auto_disk_config'])
801-
802- if kernel_id is None:
803- kernel_id = image['properties'].get('kernel_id', None)
804- if ramdisk_id is None:
805- ramdisk_id = image['properties'].get('ramdisk_id', None)
806- # FIXME(sirp): is there a way we can remove null_kernel?
807- # No kernel and ramdisk for raw images
808- if kernel_id == str(FLAGS.null_kernel):
809- kernel_id = None
810- ramdisk_id = None
811- LOG.debug(_("Creating a raw instance"))
812- # Make sure we have access to kernel and ramdisk (if not raw)
813- LOG.debug(_("Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s")
814- % locals())
815- if kernel_id:
816- image_service.show(context, kernel_id)
817- if ramdisk_id:
818- image_service.show(context, ramdisk_id)
819- if config_drive_id:
820- image_service.show(context, config_drive_id)
821-
822- self.ensure_default_security_group(context)
823-
824- if key_data is None and key_name:
825- key_pair = self.db.key_pair_get(context, context.user_id, key_name)
826- key_data = key_pair['public_key']
827-
828- if reservation_id is None:
829- reservation_id = utils.generate_uid('r')
830-
831- root_device_name = block_device.properties_root_device_name(
832- image['properties'])
833-
834- # NOTE(vish): We have a legacy hack to allow admins to specify hosts
835- # via az using az:host. It might be nice to expose an
836- # api to specify specific hosts to force onto, but for
837- # now it just supports this legacy hack.
838- host = None
839- if availability_zone:
840- availability_zone, _x, host = availability_zone.partition(':')
841- if not availability_zone:
842- availability_zone = FLAGS.default_schedule_zone
843- if context.is_admin and host:
844- filter_properties = {'force_hosts': [host]}
845- else:
846- filter_properties = {}
847-
848- filter_properties['scheduler_hints'] = scheduler_hints
849-
850- base_options = {
851- 'reservation_id': reservation_id,
852- 'image_ref': image_href,
853- 'kernel_id': kernel_id or '',
854- 'ramdisk_id': ramdisk_id or '',
855- 'power_state': power_state.NOSTATE,
856- 'vm_state': vm_states.BUILDING,
857- 'config_drive_id': config_drive_id or '',
858- 'config_drive': config_drive or '',
859- 'user_id': context.user_id,
860- 'project_id': context.project_id,
861- 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
862- 'instance_type_id': instance_type['id'],
863- 'memory_mb': instance_type['memory_mb'],
864- 'vcpus': instance_type['vcpus'],
865- 'root_gb': instance_type['root_gb'],
866- 'ephemeral_gb': instance_type['ephemeral_gb'],
867- 'display_name': display_name,
868- 'display_description': display_description,
869- 'user_data': user_data or '',
870- 'key_name': key_name,
871- 'key_data': key_data,
872- 'locked': False,
873- 'metadata': metadata,
874- 'access_ip_v4': access_ip_v4,
875- 'access_ip_v6': access_ip_v6,
876- 'availability_zone': availability_zone,
877- 'os_type': os_type,
878- 'architecture': architecture,
879- 'vm_mode': vm_mode,
880- 'root_device_name': root_device_name,
881- 'progress': 0,
882- 'auto_disk_config': auto_disk_config}
883-
884- LOG.debug(_("Going to run %s instances...") % num_instances)
885-
886- if create_instance_here:
887- instance = self.create_db_entry_for_new_instance(
888- context, instance_type, image, base_options,
889- security_group, block_device_mapping)
890- # Tells scheduler we created the instance already.
891- base_options['uuid'] = instance['uuid']
892- rpc_method = rpc.cast
893- else:
894- # We need to wait for the scheduler to create the instance
895- # DB entries, because the instance *could* be # created in
896- # a child zone.
897- rpc_method = rpc.call
898-
899- # TODO(comstud): We should use rpc.multicall when we can
900- # retrieve the full instance dictionary from the scheduler.
901- # Otherwise, we could exceed the AMQP max message size limit.
902- # This would require the schedulers' schedule_run_instances
903- # methods to return an iterator vs a list.
904- instances = self._schedule_run_instance(
905- rpc_method,
906- context, base_options,
907- instance_type,
908- availability_zone, injected_files,
909- admin_password, image,
910- num_instances, requested_networks,
911- block_device_mapping, security_group,
912- filter_properties)
913-
914- if create_instance_here:
915- return ([instance], reservation_id)
916- return (instances, reservation_id)
917-
918- @staticmethod
919- def _volume_size(instance_type, virtual_name):
920- size = 0
921- if virtual_name == 'swap':
922- size = instance_type.get('swap', 0)
923- elif block_device.is_ephemeral(virtual_name):
924- num = block_device.ephemeral_num(virtual_name)
925-
926- # TODO(yamahata): ephemeralN where N > 0
927- # Only ephemeral0 is allowed for now because InstanceTypes
928- # table only allows single local disk, ephemeral_gb.
929- # In order to enhance it, we need to add a new columns to
930- # instance_types table.
931- if num > 0:
932- return 0
933-
934- size = instance_type.get('ephemeral_gb')
935-
936- return size
937-
938- def _update_image_block_device_mapping(self, elevated_context,
939- instance_type, instance_id,
940- mappings):
941- """tell vm driver to create ephemeral/swap device at boot time by
942- updating BlockDeviceMapping
943- """
944- instance_type = (instance_type or
945- instance_types.get_default_instance_type())
946-
947- for bdm in block_device.mappings_prepend_dev(mappings):
948- LOG.debug(_("bdm %s"), bdm)
949-
950- virtual_name = bdm['virtual']
951- if virtual_name == 'ami' or virtual_name == 'root':
952- continue
953-
954- if not block_device.is_swap_or_ephemeral(virtual_name):
955- continue
956-
957- size = self._volume_size(instance_type, virtual_name)
958- if size == 0:
959- continue
960-
961- values = {
962- 'instance_id': instance_id,
963- 'device_name': bdm['device'],
964- 'virtual_name': virtual_name,
965- 'volume_size': size}
966- self.db.block_device_mapping_update_or_create(elevated_context,
967- values)
968-
969- def _update_block_device_mapping(self, elevated_context,
970- instance_type, instance_id,
971- block_device_mapping):
972- """tell vm driver to attach volume at boot time by updating
973- BlockDeviceMapping
974- """
975- LOG.debug(_("block_device_mapping %s"), block_device_mapping)
976- for bdm in block_device_mapping:
977- assert 'device_name' in bdm
978-
979- values = {'instance_id': instance_id}
980- for key in ('device_name', 'delete_on_termination', 'virtual_name',
981- 'snapshot_id', 'volume_id', 'volume_size',
982- 'no_device'):
983- values[key] = bdm.get(key)
984-
985- virtual_name = bdm.get('virtual_name')
986- if (virtual_name is not None and
987- block_device.is_swap_or_ephemeral(virtual_name)):
988- size = self._volume_size(instance_type, virtual_name)
989- if size == 0:
990- continue
991- values['volume_size'] = size
992-
993- # NOTE(yamahata): NoDevice eliminates devices defined in image
994- # files by command line option.
995- # (--block-device-mapping)
996- if virtual_name == 'NoDevice':
997- values['no_device'] = True
998- for k in ('delete_on_termination', 'volume_id',
999- 'snapshot_id', 'volume_id', 'volume_size',
1000- 'virtual_name'):
1001- values[k] = None
1002-
1003- self.db.block_device_mapping_update_or_create(elevated_context,
1004- values)
1005-
1006- #NOTE(bcwaldon): No policy check since this is only used by scheduler and
1007- # the compute api. That should probably be cleaned up, though.
1008- def create_db_entry_for_new_instance(self, context, instance_type, image,
1009- base_options, security_group, block_device_mapping):
1010- """Create an entry in the DB for this new instance,
1011- including any related table updates (such as security group,
1012- etc).
1013-
1014- This is called by the scheduler after a location for the
1015- instance has been determined.
1016- """
1017- elevated = context.elevated()
1018- if security_group is None:
1019- security_group = ['default']
1020- if not isinstance(security_group, list):
1021- security_group = [security_group]
1022-
1023- security_groups = []
1024- for security_group_name in security_group:
1025- group = self.db.security_group_get_by_name(context,
1026- context.project_id,
1027- security_group_name)
1028- security_groups.append(group['id'])
1029-
1030- base_options.setdefault('launch_index', 0)
1031- instance = self.db.instance_create(context, base_options)
1032- instance_id = instance['id']
1033- instance_uuid = instance['uuid']
1034-
1035- for security_group_id in security_groups:
1036- self.db.instance_add_security_group(elevated,
1037- instance_uuid,
1038- security_group_id)
1039-
1040- # BlockDeviceMapping table
1041- self._update_image_block_device_mapping(elevated, instance_type,
1042- instance_id, image['properties'].get('mappings', []))
1043- self._update_block_device_mapping(elevated, instance_type, instance_id,
1044- image['properties'].get('block_device_mapping', []))
1045- # override via command line option
1046- self._update_block_device_mapping(elevated, instance_type, instance_id,
1047- block_device_mapping)
1048-
1049- # Set sane defaults if not specified
1050- updates = {}
1051-
1052- display_name = instance.get('display_name')
1053- if display_name is None:
1054- display_name = self._default_display_name(instance_id)
1055-
1056- hostname = instance.get('hostname')
1057- if hostname is None:
1058- hostname = display_name
1059-
1060- updates['display_name'] = display_name
1061- updates['hostname'] = utils.sanitize_hostname(hostname)
1062- updates['vm_state'] = vm_states.BUILDING
1063- updates['task_state'] = task_states.SCHEDULING
1064-
1065- if (image['properties'].get('mappings', []) or
1066- image['properties'].get('block_device_mapping', []) or
1067- block_device_mapping):
1068- updates['shutdown_terminate'] = False
1069-
1070- instance = self.update(context, instance, **updates)
1071- return instance
1072-
1073- def _default_display_name(self, instance_id):
1074- return "Server %s" % instance_id
1075-
1076- def _schedule_run_instance(self,
1077- rpc_method,
1078- context, base_options,
1079- instance_type,
1080- availability_zone, injected_files,
1081- admin_password, image,
1082- num_instances,
1083- requested_networks,
1084- block_device_mapping,
1085- security_group,
1086- filter_properties):
1087- """Send a run_instance request to the schedulers for processing."""
1088-
1089- pid = context.project_id
1090- uid = context.user_id
1091-
1092- LOG.debug(_("Sending create to scheduler for %(pid)s/%(uid)s's") %
1093- locals())
1094-
1095- request_spec = {
1096- 'image': utils.to_primitive(image),
1097- 'instance_properties': base_options,
1098- 'instance_type': instance_type,
1099- 'num_instances': num_instances,
1100- 'block_device_mapping': block_device_mapping,
1101- 'security_group': security_group,
1102- }
1103-
1104- return rpc_method(context,
1105- FLAGS.scheduler_topic,
1106- {"method": "run_instance",
1107- "args": {"topic": FLAGS.compute_topic,
1108- "request_spec": request_spec,
1109- "admin_password": admin_password,
1110- "injected_files": injected_files,
1111- "requested_networks": requested_networks,
1112- "is_first_time": True,
1113- "filter_properties": filter_properties}})
1114-
1115- def _check_create_policies(self, context, availability_zone,
1116- requested_networks, block_device_mapping):
1117- """Check policies for create()."""
1118- target = {'project_id': context.project_id,
1119- 'user_id': context.user_id,
1120- 'availability_zone': availability_zone}
1121- check_policy(context, 'create', target)
1122-
1123- if requested_networks:
1124- check_policy(context, 'create:attach_network', target)
1125-
1126- if block_device_mapping:
1127- check_policy(context, 'create:attach_volume', target)
1128-
1129- def create(self, context, instance_type,
1130- image_href, kernel_id=None, ramdisk_id=None,
1131- min_count=None, max_count=None,
1132- display_name=None, display_description=None,
1133- key_name=None, key_data=None, security_group=None,
1134- availability_zone=None, user_data=None, metadata=None,
1135- injected_files=None, admin_password=None,
1136- block_device_mapping=None, access_ip_v4=None,
1137- access_ip_v6=None, requested_networks=None, config_drive=None,
1138- auto_disk_config=None, scheduler_hints=None):
1139- """
1140- Provision instances, sending instance information to the
1141- scheduler. The scheduler will determine where the instance(s)
1142- go and will handle creating the DB entries.
1143-
1144- Returns a tuple of (instances, reservation_id) where instances
1145- could be 'None' or a list of instance dicts depending on if
1146- we waited for information from the scheduler or not.
1147- """
1148-
1149- self._check_create_policies(context, availability_zone,
1150- requested_networks, block_device_mapping)
1151-
1152- # We can create the DB entry for the instance here if we're
1153- # only going to create 1 instance.
1154- # This speeds up API responses for builds
1155- # as we don't need to wait for the scheduler.
1156- create_instance_here = max_count == 1
1157-
1158- (instances, reservation_id) = self._create_instance(
1159- context, instance_type,
1160- image_href, kernel_id, ramdisk_id,
1161- min_count, max_count,
1162- display_name, display_description,
1163- key_name, key_data, security_group,
1164- availability_zone, user_data, metadata,
1165- injected_files, admin_password,
1166- access_ip_v4, access_ip_v6,
1167- requested_networks, config_drive,
1168- block_device_mapping, auto_disk_config,
1169- create_instance_here=create_instance_here,
1170- scheduler_hints=scheduler_hints)
1171-
1172- if create_instance_here or instances is None:
1173- return (instances, reservation_id)
1174-
1175- inst_ret_list = []
1176- for instance in instances:
1177- if instance.get('_is_precooked', False):
1178- inst_ret_list.append(instance)
1179- else:
1180- # Scheduler only gives us the 'id'. We need to pull
1181- # in the created instances from the DB
1182- instance = self.db.instance_get(context, instance['id'])
1183- inst_ret_list.append(dict(instance.iteritems()))
1184-
1185- return (inst_ret_list, reservation_id)
1186-
1187- def ensure_default_security_group(self, context):
1188- """Ensure that a context has a security group.
1189-
1190- Creates a security group for the security context if it does not
1191- already exist.
1192-
1193- :param context: the security context
1194- """
1195- try:
1196- self.db.security_group_get_by_name(context,
1197- context.project_id,
1198- 'default')
1199- except exception.NotFound:
1200- values = {'name': 'default',
1201- 'description': 'default',
1202- 'user_id': context.user_id,
1203- 'project_id': context.project_id}
1204- self.db.security_group_create(context, values)
1205-
1206- def trigger_security_group_rules_refresh(self, context, security_group_id):
1207- """Called when a rule is added to or removed from a security_group."""
1208-
1209- security_group = self.db.security_group_get(context, security_group_id)
1210-
1211- hosts = set()
1212- for instance in security_group['instances']:
1213- if instance['host'] is not None:
1214- hosts.add(instance['host'])
1215-
1216- for host in hosts:
1217- rpc.cast(context,
1218- self.db.queue_get_for(context, FLAGS.compute_topic, host),
1219- {"method": "refresh_security_group_rules",
1220- "args": {"security_group_id": security_group.id}})
1221-
1222- def trigger_security_group_members_refresh(self, context, group_ids):
1223- """Called when a security group gains a new or loses a member.
1224-
1225- Sends an update request to each compute node for whom this is
1226- relevant.
1227- """
1228- # First, we get the security group rules that reference these groups as
1229- # the grantee..
1230- security_group_rules = set()
1231- for group_id in group_ids:
1232- security_group_rules.update(
1233- self.db.security_group_rule_get_by_security_group_grantee(
1234- context,
1235- group_id))
1236-
1237- # ..then we distill the security groups to which they belong..
1238- security_groups = set()
1239- for rule in security_group_rules:
1240- security_group = self.db.security_group_get(
1241- context,
1242- rule['parent_group_id'])
1243- security_groups.add(security_group)
1244-
1245- # ..then we find the instances that are members of these groups..
1246- instances = set()
1247- for security_group in security_groups:
1248- for instance in security_group['instances']:
1249- instances.add(instance)
1250-
1251- # ...then we find the hosts where they live...
1252- hosts = set()
1253- for instance in instances:
1254- if instance['host']:
1255- hosts.add(instance['host'])
1256-
1257- # ...and finally we tell these nodes to refresh their view of this
1258- # particular security group.
1259- for host in hosts:
1260- rpc.cast(context,
1261- self.db.queue_get_for(context, FLAGS.compute_topic, host),
1262- {"method": "refresh_security_group_members",
1263- "args": {"security_group_id": group_id}})
1264-
1265- def trigger_provider_fw_rules_refresh(self, context):
1266- """Called when a rule is added/removed from a provider firewall"""
1267-
1268- hosts = [x['host'] for (x, idx)
1269- in self.db.service_get_all_compute_sorted(context)]
1270- for host in hosts:
1271- rpc.cast(context,
1272- self.db.queue_get_for(context, FLAGS.compute_topic, host),
1273- {'method': 'refresh_provider_fw_rules', 'args': {}})
1274-
1275- def _is_security_group_associated_with_server(self, security_group,
1276- instance_uuid):
1277- """Check if the security group is already associated
1278- with the instance. If Yes, return True.
1279- """
1280-
1281- if not security_group:
1282- return False
1283-
1284- instances = security_group.get('instances')
1285- if not instances:
1286- return False
1287-
1288- for inst in instances:
1289- if (instance_uuid == inst['uuid']):
1290- return True
1291-
1292- return False
1293-
1294- @wrap_check_policy
1295- def add_security_group(self, context, instance, security_group_name):
1296- """Add security group to the instance"""
1297- security_group = self.db.security_group_get_by_name(context,
1298- context.project_id,
1299- security_group_name)
1300-
1301- instance_uuid = instance['uuid']
1302-
1303- #check if the security group is associated with the server
1304- if self._is_security_group_associated_with_server(security_group,
1305- instance_uuid):
1306- raise exception.SecurityGroupExistsForInstance(
1307- security_group_id=security_group['id'],
1308- instance_id=instance_uuid)
1309-
1310- #check if the instance is in running state
1311- if instance['power_state'] != power_state.RUNNING:
1312- raise exception.InstanceNotRunning(instance_id=instance_uuid)
1313-
1314- self.db.instance_add_security_group(context.elevated(),
1315- instance_uuid,
1316- security_group['id'])
1317- params = {"security_group_id": security_group['id']}
1318- # NOTE(comstud): No instance_uuid argument to this compute manager
1319- # call
1320- self._cast_compute_message('refresh_security_group_rules',
1321- context, host=instance['host'], params=params)
1322-
1323- @wrap_check_policy
1324- def remove_security_group(self, context, instance, security_group_name):
1325- """Remove the security group associated with the instance"""
1326- security_group = self.db.security_group_get_by_name(context,
1327- context.project_id,
1328- security_group_name)
1329-
1330- instance_uuid = instance['uuid']
1331-
1332- #check if the security group is associated with the server
1333- if not self._is_security_group_associated_with_server(security_group,
1334- instance_uuid):
1335- raise exception.SecurityGroupNotExistsForInstance(
1336- security_group_id=security_group['id'],
1337- instance_id=instance_uuid)
1338-
1339- #check if the instance is in running state
1340- if instance['power_state'] != power_state.RUNNING:
1341- raise exception.InstanceNotRunning(instance_id=instance_uuid)
1342-
1343- self.db.instance_remove_security_group(context.elevated(),
1344- instance_uuid,
1345- security_group['id'])
1346- params = {"security_group_id": security_group['id']}
1347- # NOTE(comstud): No instance_uuid argument to this compute manager
1348- # call
1349- self._cast_compute_message('refresh_security_group_rules',
1350- context, host=instance['host'], params=params)
1351-
1352- @wrap_check_policy
1353- def update(self, context, instance, **kwargs):
1354- """Updates the instance in the datastore.
1355-
1356- :param context: The security context
1357- :param instance: The instance to update
1358- :param kwargs: All additional keyword args are treated
1359- as data fields of the instance to be
1360- updated
1361-
1362- :returns: None
1363- """
1364- rv = self.db.instance_update(context, instance["id"], kwargs)
1365- return dict(rv.iteritems())
1366-
1367- @wrap_check_policy
1368- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
1369- vm_states.ERROR])
1370- def soft_delete(self, context, instance):
1371- """Terminate an instance."""
1372- LOG.debug(_('Going to try to soft delete instance'),
1373- instance=instance)
1374-
1375- if instance['disable_terminate']:
1376- return
1377-
1378- # NOTE(jerdfelt): The compute daemon handles reclaiming instances
1379- # that are in soft delete. If there is no host assigned, there is
1380- # no daemon to reclaim, so delete it immediately.
1381- host = instance['host']
1382- if host:
1383- self.update(context,
1384- instance,
1385- vm_state=vm_states.SOFT_DELETE,
1386- task_state=task_states.POWERING_OFF,
1387- deleted_at=utils.utcnow())
1388-
1389- self._cast_compute_message('power_off_instance',
1390- context, instance)
1391- else:
1392- LOG.warning(_('No host for instance, deleting immediately'),
1393- instance=instance)
1394- try:
1395- self.db.instance_destroy(context, instance['id'])
1396- except exception.InstanceNotFound:
1397- # NOTE(comstud): Race condition. Instance already gone.
1398- pass
1399-
1400- def _delete(self, context, instance):
1401- host = instance['host']
1402- try:
1403- if host:
1404- self.update(context,
1405- instance,
1406- task_state=task_states.DELETING,
1407- progress=0)
1408-
1409- self._cast_compute_message('terminate_instance',
1410- context, instance)
1411- else:
1412- self.db.instance_destroy(context, instance['id'])
1413- except exception.InstanceNotFound:
1414- # NOTE(comstud): Race condition. Instance already gone.
1415- pass
1416-
1417- # NOTE(jerdfelt): The API implies that only ACTIVE and ERROR are
1418- # allowed but the EC2 API appears to allow from RESCUED and STOPPED
1419- # too
1420- @wrap_check_policy
1421- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.BUILDING,
1422- vm_states.ERROR, vm_states.RESCUED,
1423- vm_states.SHUTOFF, vm_states.STOPPED])
1424- def delete(self, context, instance):
1425- """Terminate an instance."""
1426- LOG.debug(_("Going to try to terminate instance"), instance=instance)
1427-
1428- if instance['disable_terminate']:
1429- return
1430-
1431- self._delete(context, instance)
1432-
1433- @wrap_check_policy
1434- @check_instance_state(vm_state=[vm_states.SOFT_DELETE])
1435- def restore(self, context, instance):
1436- """Restore a previously deleted (but not reclaimed) instance."""
1437- self.update(context,
1438- instance,
1439- vm_state=vm_states.ACTIVE,
1440- task_state=None,
1441- deleted_at=None)
1442-
1443- host = instance['host']
1444- if host:
1445- self.update(context,
1446- instance,
1447- task_state=task_states.POWERING_ON)
1448- self._cast_compute_message('power_on_instance',
1449- context, instance)
1450-
1451- @wrap_check_policy
1452- @check_instance_state(vm_state=[vm_states.SOFT_DELETE])
1453- def force_delete(self, context, instance):
1454- """Force delete a previously deleted (but not reclaimed) instance."""
1455- self._delete(context, instance)
1456-
1457- @wrap_check_policy
1458- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
1459- vm_states.RESCUED],
1460- task_state=[None, task_states.RESIZE_VERIFY])
1461- def stop(self, context, instance, do_cast=True):
1462- """Stop an instance."""
1463- instance_uuid = instance["uuid"]
1464- LOG.debug(_("Going to try to stop instance"), instance=instance)
1465-
1466- self.update(context,
1467- instance,
1468- vm_state=vm_states.ACTIVE,
1469- task_state=task_states.STOPPING,
1470- terminated_at=utils.utcnow(),
1471- progress=0)
1472-
1473- rpc_method = rpc.cast if do_cast else rpc.call
1474- self._cast_or_call_compute_message(rpc_method, 'stop_instance',
1475- context, instance)
1476-
1477- @wrap_check_policy
1478- @check_instance_state(vm_state=[vm_states.STOPPED, vm_states.SHUTOFF])
1479- def start(self, context, instance):
1480- """Start an instance."""
1481- vm_state = instance["vm_state"]
1482- instance_uuid = instance["uuid"]
1483- LOG.debug(_("Going to try to start instance"), instance=instance)
1484-
1485- if vm_state == vm_states.SHUTOFF:
1486- if instance['shutdown_terminate']:
1487- LOG.warning(_("Instance %(instance_uuid)s is not "
1488- "stopped. (%(vm_state)s") % locals())
1489- return
1490-
1491- # NOTE(yamahata): nova compute doesn't reap instances
1492- # which initiated shutdown itself. So reap it here.
1493- self.stop(context, instance, do_cast=False)
1494-
1495- self.update(context,
1496- instance,
1497- vm_state=vm_states.STOPPED,
1498- task_state=task_states.STARTING)
1499-
1500- # TODO(yamahata): injected_files isn't supported right now.
1501- # It is used only for osapi. not for ec2 api.
1502- # availability_zone isn't used by run_instance.
1503- self._cast_compute_message('start_instance', context, instance)
1504-
1505- #NOTE(bcwaldon): no policy check here since it should be rolled in to
1506- # search_opts in get_all
1507- def get_active_by_window(self, context, begin, end=None, project_id=None):
1508- """Get instances that were continuously active over a window."""
1509- return self.db.instance_get_active_by_window(context, begin, end,
1510- project_id)
1511-
1512- #NOTE(bcwaldon): this doesn't really belong in this class
1513- def get_instance_type(self, context, instance_type_id):
1514- """Get an instance type by instance type id."""
1515- return instance_types.get_instance_type(instance_type_id)
1516-
1517- def get(self, context, instance_id):
1518- """Get a single instance with the given instance_id."""
1519- # NOTE(ameade): we still need to support integer ids for ec2
1520- if utils.is_uuid_like(instance_id):
1521- instance = self.db.instance_get_by_uuid(context, instance_id)
1522- else:
1523- instance = self.db.instance_get(context, instance_id)
1524-
1525- check_policy(context, 'get', instance)
1526-
1527- inst = dict(instance.iteritems())
1528- # NOTE(comstud): Doesn't get returned with iteritems
1529- inst['name'] = instance['name']
1530- return inst
1531-
1532- def get_all(self, context, search_opts=None, sort_key='created_at',
1533- sort_dir='desc'):
1534- """Get all instances filtered by one of the given parameters.
1535-
1536- If there is no filter and the context is an admin, it will retrieve
1537- all instances in the system.
1538-
1539- Deleted instances will be returned by default, unless there is a
1540- search option that says otherwise.
1541-
1542- The results will be returned sorted in the order specified by the
1543- 'sort_dir' parameter using the key specified in the 'sort_key'
1544- parameter.
1545- """
1546-
1547- #TODO(bcwaldon): determine the best argument for target here
1548- target = {
1549- 'project_id': context.project_id,
1550- 'user_id': context.user_id,
1551- }
1552-
1553- check_policy(context, "get_all", target)
1554-
1555- if search_opts is None:
1556- search_opts = {}
1557-
1558- LOG.debug(_("Searching by: %s") % str(search_opts))
1559-
1560- # Fixups for the DB call
1561- filters = {}
1562-
1563- def _remap_flavor_filter(flavor_id):
1564- try:
1565- instance_type = instance_types.get_instance_type_by_flavor_id(
1566- flavor_id)
1567- except exception.FlavorNotFound:
1568- raise ValueError()
1569-
1570- filters['instance_type_id'] = instance_type['id']
1571-
1572- def _remap_fixed_ip_filter(fixed_ip):
1573- # Turn fixed_ip into a regexp match. Since '.' matches
1574- # any character, we need to use regexp escaping for it.
1575- filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
1576-
1577- # search_option to filter_name mapping.
1578- filter_mapping = {
1579- 'image': 'image_ref',
1580- 'name': 'display_name',
1581- 'instance_name': 'name',
1582- 'tenant_id': 'project_id',
1583- 'flavor': _remap_flavor_filter,
1584- 'fixed_ip': _remap_fixed_ip_filter}
1585-
1586- # copy from search_opts, doing various remappings as necessary
1587- for opt, value in search_opts.iteritems():
1588- # Do remappings.
1589- # Values not in the filter_mapping table are copied as-is.
1590- # If remapping is None, option is not copied
1591- # If the remapping is a string, it is the filter_name to use
1592- try:
1593- remap_object = filter_mapping[opt]
1594- except KeyError:
1595- filters[opt] = value
1596- else:
1597- # Remaps are strings to translate to, or functions to call
1598- # to do the translating as defined by the table above.
1599- if isinstance(remap_object, basestring):
1600- filters[remap_object] = value
1601- else:
1602- try:
1603- remap_object(value)
1604-
1605- # We already know we can't match the filter, so
1606- # return an empty list
1607- except ValueError:
1608- return []
1609-
1610- inst_models = self._get_instances_by_filters(context, filters,
1611- sort_key, sort_dir)
1612-
1613- # Convert the models to dictionaries
1614- instances = []
1615- for inst_model in inst_models:
1616- instance = dict(inst_model.iteritems())
1617- # NOTE(comstud): Doesn't get returned by iteritems
1618- instance['name'] = inst_model['name']
1619- instances.append(instance)
1620-
1621- return instances
1622-
1623- def _get_instances_by_filters(self, context, filters, sort_key, sort_dir):
1624- if 'ip6' in filters or 'ip' in filters:
1625- res = self.network_api.get_instance_uuids_by_ip_filter(context,
1626- filters)
1627- # NOTE(jkoelker) It is possible that we will get the same
1628- # instance uuid twice (one for ipv4 and ipv6)
1629- uuids = set([r['instance_uuid'] for r in res])
1630- filters['uuid'] = uuids
1631-
1632- return self.db.instance_get_all_by_filters(context, filters, sort_key,
1633- sort_dir)
1634-
1635- @wrap_check_policy
1636- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF])
1637- def backup(self, context, instance, name, backup_type, rotation,
1638- extra_properties=None):
1639- """Backup the given instance
1640-
1641- :param instance: nova.db.sqlalchemy.models.Instance
1642- :param name: name of the backup or snapshot
1643- name = backup_type # daily backups are called 'daily'
1644- :param rotation: int representing how many backups to keep around;
1645- None if rotation shouldn't be used (as in the case of snapshots)
1646- :param extra_properties: dict of extra image properties to include
1647- """
1648- recv_meta = self._create_image(context, instance, name, 'backup',
1649- backup_type=backup_type, rotation=rotation,
1650- extra_properties=extra_properties)
1651- return recv_meta
1652-
1653- @wrap_check_policy
1654- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF])
1655- def snapshot(self, context, instance, name, extra_properties=None):
1656- """Snapshot the given instance.
1657-
1658- :param instance: nova.db.sqlalchemy.models.Instance
1659- :param name: name of the backup or snapshot
1660- :param extra_properties: dict of extra image properties to include
1661-
1662- :returns: A dict containing image metadata
1663- """
1664- return self._create_image(context, instance, name, 'snapshot',
1665- extra_properties=extra_properties)
1666-
1667- def _create_image(self, context, instance, name, image_type,
1668- backup_type=None, rotation=None, extra_properties=None):
1669- """Create snapshot or backup for an instance on this host.
1670-
1671- :param context: security context
1672- :param instance: nova.db.sqlalchemy.models.Instance
1673- :param name: string for name of the snapshot
1674- :param image_type: snapshot | backup
1675- :param backup_type: daily | weekly
1676- :param rotation: int representing how many backups to keep around;
1677- None if rotation shouldn't be used (as in the case of snapshots)
1678- :param extra_properties: dict of extra image properties to include
1679-
1680- """
1681- instance_uuid = instance['uuid']
1682-
1683- if image_type == "snapshot":
1684- task_state = task_states.IMAGE_SNAPSHOT
1685- elif image_type == "backup":
1686- task_state = task_states.IMAGE_BACKUP
1687- else:
1688- raise Exception(_('Image type not recognized %s') % image_type)
1689-
1690- self.db.instance_test_and_set(
1691- context, instance_uuid, 'task_state', [None], task_state)
1692-
1693- properties = {
1694- 'instance_uuid': instance_uuid,
1695- 'user_id': str(context.user_id),
1696- 'image_type': image_type,
1697- }
1698-
1699- sent_meta = {'name': name, 'is_public': False}
1700-
1701- if image_type == 'backup':
1702- properties['backup_type'] = backup_type
1703-
1704- elif image_type == 'snapshot':
1705- min_ram, min_disk = self._get_minram_mindisk_params(context,
1706- instance)
1707- if min_ram is not None:
1708- sent_meta['min_ram'] = min_ram
1709- if min_disk is not None:
1710- sent_meta['min_disk'] = min_disk
1711-
1712- properties.update(extra_properties or {})
1713- sent_meta['properties'] = properties
1714-
1715- recv_meta = self.image_service.create(context, sent_meta)
1716- params = {'image_id': recv_meta['id'], 'image_type': image_type,
1717- 'backup_type': backup_type, 'rotation': rotation}
1718- self._cast_compute_message('snapshot_instance', context, instance,
1719- params=params)
1720- return recv_meta
1721-
1722- def _get_minram_mindisk_params(self, context, instance):
1723- try:
1724- #try to get source image of the instance
1725- orig_image = self.image_service.show(context,
1726- instance['image_ref'])
1727- except exception.ImageNotFound:
1728- return None, None
1729-
1730- #disk format of vhd is non-shrinkable
1731- if orig_image.get('disk_format') == 'vhd':
1732- min_ram = instance['instance_type']['memory_mb']
1733- min_disk = instance['instance_type']['root_gb']
1734- else:
1735- #set new image values to the original image values
1736- min_ram = orig_image.get('min_ram')
1737- min_disk = orig_image.get('min_disk')
1738-
1739- return min_ram, min_disk
1740-
1741- @wrap_check_policy
1742- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
1743- vm_states.RESCUED],
1744- task_state=[None, task_states.RESIZE_VERIFY])
1745- def reboot(self, context, instance, reboot_type):
1746- """Reboot the given instance."""
1747- state = {'SOFT': task_states.REBOOTING,
1748- 'HARD': task_states.REBOOTING_HARD}[reboot_type]
1749- self.update(context,
1750- instance,
1751- vm_state=vm_states.ACTIVE,
1752- task_state=state)
1753- self._cast_compute_message('reboot_instance', context, instance,
1754- params={'reboot_type': reboot_type})
1755-
1756- def _validate_image_href(self, context, image_href):
1757- """Throws an ImageNotFound exception if image_href does not exist."""
1758- (image_service, image_id) = nova.image.get_image_service(context,
1759- image_href)
1760- image_service.show(context, image_id)
1761-
1762- @wrap_check_policy
1763- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
1764- task_state=[None, task_states.RESIZE_VERIFY])
1765- def rebuild(self, context, instance, image_href, admin_password, **kwargs):
1766- """Rebuild the given instance with the provided attributes."""
1767-
1768- self._validate_image_href(context, image_href)
1769-
1770- files_to_inject = kwargs.pop('files_to_inject', [])
1771- self._check_injected_file_quota(context, files_to_inject)
1772-
1773- metadata = kwargs.get('metadata', {})
1774- self._check_metadata_properties_quota(context, metadata)
1775-
1776- self.update(context,
1777- instance,
1778- image_ref=image_href,
1779- vm_state=vm_states.REBUILDING,
1780- task_state=None,
1781- progress=0,
1782- **kwargs)
1783-
1784- rebuild_params = {
1785- "new_pass": admin_password,
1786- "injected_files": files_to_inject,
1787- }
1788-
1789- self._cast_compute_message('rebuild_instance', context, instance,
1790- params=rebuild_params)
1791-
1792- @wrap_check_policy
1793- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
1794- task_state=[task_states.RESIZE_VERIFY])
1795- def revert_resize(self, context, instance):
1796- """Reverts a resize, deleting the 'new' instance in the process."""
1797- context = context.elevated()
1798- migration_ref = self.db.migration_get_by_instance_and_status(context,
1799- instance['uuid'], 'finished')
1800- if not migration_ref:
1801- raise exception.MigrationNotFoundByStatus(
1802- instance_id=instance['uuid'], status='finished')
1803-
1804- self.update(context,
1805- instance,
1806- vm_state=vm_states.RESIZING,
1807- task_state=task_states.RESIZE_REVERTING)
1808-
1809- params = {'migration_id': migration_ref['id']}
1810- self._cast_compute_message('revert_resize', context, instance,
1811- host=migration_ref['dest_compute'], params=params)
1812-
1813- self.db.migration_update(context, migration_ref['id'],
1814- {'status': 'reverted'})
1815-
1816- @wrap_check_policy
1817- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
1818- task_state=[task_states.RESIZE_VERIFY])
1819- def confirm_resize(self, context, instance):
1820- """Confirms a migration/resize and deletes the 'old' instance."""
1821- context = context.elevated()
1822- migration_ref = self.db.migration_get_by_instance_and_status(context,
1823- instance['uuid'], 'finished')
1824- if not migration_ref:
1825- raise exception.MigrationNotFoundByStatus(
1826- instance_id=instance['uuid'], status='finished')
1827-
1828- self.update(context,
1829- instance,
1830- vm_state=vm_states.ACTIVE,
1831- task_state=None)
1832-
1833- params = {'migration_id': migration_ref['id']}
1834- self._cast_compute_message('confirm_resize', context, instance,
1835- host=migration_ref['source_compute'], params=params)
1836-
1837- self.db.migration_update(context, migration_ref['id'],
1838- {'status': 'confirmed'})
1839- self.db.instance_update(context, instance['uuid'],
1840- {'host': migration_ref['dest_compute'], })
1841-
1842- @wrap_check_policy
1843- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
1844- task_state=[None])
1845- def resize(self, context, instance, flavor_id=None, **kwargs):
1846- """Resize (ie, migrate) a running instance.
1847-
1848- If flavor_id is None, the process is considered a migration, keeping
1849- the original flavor_id. If flavor_id is not None, the instance should
1850- be migrated to a new host and resized to the new flavor_id.
1851- """
1852- current_instance_type = instance['instance_type']
1853-
1854- # If flavor_id is not provided, only migrate the instance.
1855- if not flavor_id:
1856- LOG.debug(_("flavor_id is None. Assuming migration."))
1857- new_instance_type = current_instance_type
1858- else:
1859- new_instance_type = instance_types.get_instance_type_by_flavor_id(
1860- flavor_id)
1861-
1862- current_instance_type_name = current_instance_type['name']
1863- new_instance_type_name = new_instance_type['name']
1864- LOG.debug(_("Old instance type %(current_instance_type_name)s, "
1865- " new instance type %(new_instance_type_name)s") % locals())
1866- if not new_instance_type:
1867- raise exception.FlavorNotFound(flavor_id=flavor_id)
1868-
1869- # NOTE(markwash): look up the image early to avoid auth problems later
1870- image = self.image_service.show(context, instance['image_ref'])
1871-
1872- current_memory_mb = current_instance_type['memory_mb']
1873- new_memory_mb = new_instance_type['memory_mb']
1874-
1875- if (current_memory_mb == new_memory_mb) and flavor_id:
1876- raise exception.CannotResizeToSameSize()
1877-
1878- self.update(context,
1879- instance,
1880- vm_state=vm_states.RESIZING,
1881- task_state=task_states.RESIZE_PREP,
1882- progress=0,
1883- **kwargs)
1884-
1885- request_spec = {
1886- 'instance_type': new_instance_type,
1887- 'num_instances': 1,
1888- 'instance_properties': instance}
1889-
1890- filter_properties = {'ignore_hosts': []}
1891-
1892- if not FLAGS.allow_resize_to_same_host:
1893- filter_properties['ignore_hosts'].append(instance['host'])
1894-
1895- args = {
1896- "topic": FLAGS.compute_topic,
1897- "instance_uuid": instance['uuid'],
1898- "instance_type_id": new_instance_type['id'],
1899- "image": image,
1900- "update_db": False,
1901- "request_spec": utils.to_primitive(request_spec),
1902- "filter_properties": filter_properties,
1903- }
1904- self._cast_scheduler_message(context,
1905- {"method": "prep_resize",
1906- "args": args})
1907-
1908- @wrap_check_policy
1909- def add_fixed_ip(self, context, instance, network_id):
1910- """Add fixed_ip from specified network to given instance."""
1911- self._cast_compute_message('add_fixed_ip_to_instance', context,
1912- instance, params=dict(network_id=network_id))
1913-
1914- @wrap_check_policy
1915- def remove_fixed_ip(self, context, instance, address):
1916- """Remove fixed_ip from specified network to given instance."""
1917- self._cast_compute_message('remove_fixed_ip_from_instance',
1918- context, instance, params=dict(address=address))
1919-
1920- @wrap_check_policy
1921- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
1922- vm_states.RESCUED],
1923- task_state=[None, task_states.RESIZE_VERIFY])
1924- def pause(self, context, instance):
1925- """Pause the given instance."""
1926- self.update(context,
1927- instance,
1928- vm_state=vm_states.ACTIVE,
1929- task_state=task_states.PAUSING)
1930- self._cast_compute_message('pause_instance', context, instance)
1931-
1932- @wrap_check_policy
1933- @check_instance_state(vm_state=[vm_states.PAUSED])
1934- def unpause(self, context, instance):
1935- """Unpause the given instance."""
1936- self.update(context,
1937- instance,
1938- vm_state=vm_states.PAUSED,
1939- task_state=task_states.UNPAUSING)
1940- self._cast_compute_message('unpause_instance', context, instance)
1941-
1942- @wrap_check_policy
1943- def get_diagnostics(self, context, instance):
1944- """Retrieve diagnostics for the given instance."""
1945- return self._call_compute_message("get_diagnostics", context,
1946- instance)
1947-
1948- @wrap_check_policy
1949- def get_actions(self, context, instance):
1950- """Retrieve actions for the given instance."""
1951- return self.db.instance_get_actions(context, instance['uuid'])
1952-
1953- @wrap_check_policy
1954- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
1955- vm_states.RESCUED],
1956- task_state=[None, task_states.RESIZE_VERIFY])
1957- def suspend(self, context, instance):
1958- """Suspend the given instance."""
1959- self.update(context,
1960- instance,
1961- vm_state=vm_states.ACTIVE,
1962- task_state=task_states.SUSPENDING)
1963- self._cast_compute_message('suspend_instance', context, instance)
1964-
1965- @wrap_check_policy
1966- @check_instance_state(vm_state=[vm_states.SUSPENDED])
1967- def resume(self, context, instance):
1968- """Resume the given instance."""
1969- self.update(context,
1970- instance,
1971- vm_state=vm_states.SUSPENDED,
1972- task_state=task_states.RESUMING)
1973- self._cast_compute_message('resume_instance', context, instance)
1974-
1975- @wrap_check_policy
1976- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
1977- vm_states.STOPPED],
1978- task_state=[None, task_states.RESIZE_VERIFY])
1979- def rescue(self, context, instance, rescue_password=None):
1980- """Rescue the given instance."""
1981- self.update(context,
1982- instance,
1983- vm_state=vm_states.ACTIVE,
1984- task_state=task_states.RESCUING)
1985-
1986- rescue_params = {
1987- "rescue_password": rescue_password
1988- }
1989- self._cast_compute_message('rescue_instance', context, instance,
1990- params=rescue_params)
1991-
1992- @wrap_check_policy
1993- @check_instance_state(vm_state=[vm_states.RESCUED])
1994- def unrescue(self, context, instance):
1995- """Unrescue the given instance."""
1996- self.update(context,
1997- instance,
1998- vm_state=vm_states.RESCUED,
1999- task_state=task_states.UNRESCUING)
2000- self._cast_compute_message('unrescue_instance', context, instance)
2001-
2002- @wrap_check_policy
2003- @check_instance_state(vm_state=[vm_states.ACTIVE])
2004- def set_admin_password(self, context, instance, password=None):
2005- """Set the root/admin password for the given instance."""
2006- self.update(context,
2007- instance,
2008- task_state=task_states.UPDATING_PASSWORD)
2009-
2010- params = {"new_pass": password}
2011- self._cast_compute_message('set_admin_password', context, instance,
2012- params=params)
2013-
2014- @wrap_check_policy
2015- def inject_file(self, context, instance, path, file_contents):
2016- """Write a file to the given instance."""
2017- params = {'path': path, 'file_contents': file_contents}
2018- self._cast_compute_message('inject_file', context, instance,
2019- params=params)
2020-
2021- @wrap_check_policy
2022- def get_vnc_console(self, context, instance, console_type):
2023- """Get a url to an instance Console."""
2024- connect_info = self._call_compute_message('get_vnc_console',
2025- context, instance, params={"console_type": console_type})
2026-
2027- rpc.call(context, '%s' % FLAGS.consoleauth_topic,
2028- {'method': 'authorize_console',
2029- 'args': {'token': connect_info['token'],
2030- 'console_type': console_type,
2031- 'host': connect_info['host'],
2032- 'port': connect_info['port'],
2033- 'internal_access_path':
2034- connect_info['internal_access_path']}})
2035-
2036- return {'url': connect_info['access_url']}
2037-
2038- @wrap_check_policy
2039- def get_console_output(self, context, instance, tail_length=None):
2040- """Get console output for an an instance."""
2041- params = {'tail_length': tail_length}
2042- return self._call_compute_message('get_console_output', context,
2043- instance, params=params)
2044-
2045- @wrap_check_policy
2046- def lock(self, context, instance):
2047- """Lock the given instance."""
2048- self._cast_compute_message('lock_instance', context, instance)
2049-
2050- @wrap_check_policy
2051- def unlock(self, context, instance):
2052- """Unlock the given instance."""
2053- self._cast_compute_message('unlock_instance', context, instance)
2054-
2055- @wrap_check_policy
2056- def get_lock(self, context, instance):
2057- """Return the boolean state of given instance's lock."""
2058- return self.get(context, instance['uuid'])['locked']
2059-
2060- @wrap_check_policy
2061- def reset_network(self, context, instance):
2062- """Reset networking on the instance."""
2063- self._cast_compute_message('reset_network', context, instance)
2064-
2065- @wrap_check_policy
2066- def inject_network_info(self, context, instance):
2067- """Inject network info for the instance."""
2068- self._cast_compute_message('inject_network_info', context, instance)
2069-
2070- @wrap_check_policy
2071- def attach_volume(self, context, instance, volume_id, device):
2072- """Attach an existing volume to an existing instance."""
2073- if not re.match("^/dev/x{0,1}[a-z]d[a-z]+$", device):
2074- raise exception.InvalidDevicePath(path=device)
2075- volume = self.volume_api.get(context, volume_id)
2076- self.volume_api.check_attach(context, volume)
2077- self.volume_api.reserve_volume(context, volume)
2078- params = {"volume_id": volume_id,
2079- "mountpoint": device}
2080- self._cast_compute_message('attach_volume', context, instance,
2081- params=params)
2082-
2083- # FIXME(comstud): I wonder if API should pull in the instance from
2084- # the volume ID via volume API and pass it and the volume object here
2085- def detach_volume(self, context, volume_id):
2086- """Detach a volume from an instance."""
2087- instance = self.db.volume_get_instance(context.elevated(), volume_id)
2088- if not instance:
2089- raise exception.VolumeUnattached(volume_id=volume_id)
2090-
2091- check_policy(context, 'detach_volume', instance)
2092-
2093- volume = self.volume_api.get(context, volume_id)
2094- self.volume_api.check_detach(context, volume)
2095-
2096- params = {'volume_id': volume_id}
2097- self._cast_compute_message('detach_volume', context, instance,
2098- params=params)
2099- return instance
2100-
2101- @wrap_check_policy
2102- def associate_floating_ip(self, context, instance, address):
2103- """Makes calls to network_api to associate_floating_ip.
2104-
2105- :param address: is a string floating ip address
2106- """
2107- instance_uuid = instance['uuid']
2108-
2109- # TODO(tr3buchet): currently network_info doesn't contain floating IPs
2110- # in its info, if this changes, the next few lines will need to
2111- # accommodate the info containing floating as well as fixed ip
2112- # addresses
2113- nw_info = self.network_api.get_instance_nw_info(context.elevated(),
2114- instance)
2115-
2116- if not nw_info:
2117- raise exception.FixedIpNotFoundForInstance(
2118- instance_id=instance_uuid)
2119-
2120- ips = [ip for ip in nw_info[0].fixed_ips()]
2121-
2122- if not ips:
2123- raise exception.FixedIpNotFoundForInstance(
2124- instance_id=instance_uuid)
2125-
2126- # TODO(tr3buchet): this will associate the floating IP with the
2127- # first fixed_ip (lowest id) an instance has. This should be
2128- # changed to support specifying a particular fixed_ip if
2129- # multiple exist.
2130- if len(ips) > 1:
2131- msg = _('multiple fixedips exist, using the first: %s')
2132- LOG.warning(msg, ips[0]['address'])
2133-
2134- self.network_api.associate_floating_ip(context,
2135- floating_address=address, fixed_address=ips[0]['address'])
2136- self.network_api.invalidate_instance_cache(context.elevated(),
2137- instance)
2138-
2139- @wrap_check_policy
2140- def get_instance_metadata(self, context, instance):
2141- """Get all metadata associated with an instance."""
2142- rv = self.db.instance_metadata_get(context, instance['id'])
2143- return dict(rv.iteritems())
2144-
2145- @wrap_check_policy
2146- def delete_instance_metadata(self, context, instance, key):
2147- """Delete the given metadata item from an instance."""
2148- self.db.instance_metadata_delete(context, instance['id'], key)
2149-
2150- @wrap_check_policy
2151- def update_instance_metadata(self, context, instance,
2152- metadata, delete=False):
2153- """Updates or creates instance metadata.
2154-
2155- If delete is True, metadata items that are not specified in the
2156- `metadata` argument will be deleted.
2157-
2158- """
2159- if delete:
2160- _metadata = metadata
2161- else:
2162- _metadata = self.get_instance_metadata(context, instance)
2163- _metadata.update(metadata)
2164-
2165- self._check_metadata_properties_quota(context, _metadata)
2166- self.db.instance_metadata_update(context, instance['id'],
2167- _metadata, True)
2168- return _metadata
2169-
2170- def get_instance_faults(self, context, instances):
2171- """Get all faults for a list of instance uuids."""
2172-
2173- if not instances:
2174- return {}
2175-
2176- for instance in instances:
2177- check_policy(context, 'get_instance_faults', instance)
2178-
2179- uuids = [instance['uuid'] for instance in instances]
2180- return self.db.instance_fault_get_by_instance_uuids(context, uuids)
2181-
2182-
2183-class HostAPI(BaseAPI):
2184- """Sub-set of the Compute Manager API for managing host operations."""
2185- def set_host_enabled(self, context, host, enabled):
2186- """Sets the specified host's ability to accept new instances."""
2187- # NOTE(comstud): No instance_uuid argument to this compute manager
2188- # call
2189- return self._call_compute_message("set_host_enabled", context,
2190- host=host, params={"enabled": enabled})
2191-
2192- def host_power_action(self, context, host, action):
2193- """Reboots, shuts down or powers up the host."""
2194- # NOTE(comstud): No instance_uuid argument to this compute manager
2195- # call
2196- return self._call_compute_message("host_power_action", context,
2197- host=host, params={"action": action})
2198-
2199- def set_host_maintenance(self, context, host, mode):
2200- """Start/Stop host maintenance window. On start, it triggers
2201- guest VMs evacuation."""
2202- return self._call_compute_message("host_maintenance_mode", context,
2203- host=host, params={"host": host, "mode": mode})
2204-
2205-
2206-class AggregateAPI(base.Base):
2207- """Sub-set of the Compute Manager API for managing host aggregates."""
2208- def __init__(self, **kwargs):
2209- super(AggregateAPI, self).__init__(**kwargs)
2210-
2211- def create_aggregate(self, context, aggregate_name, availability_zone):
2212- """Creates the model for the aggregate."""
2213- zones = [s.availability_zone for s in
2214- self.db.service_get_all_by_topic(context,
2215- FLAGS.compute_topic)]
2216- if availability_zone in zones:
2217- values = {"name": aggregate_name,
2218- "availability_zone": availability_zone}
2219- aggregate = self.db.aggregate_create(context, values)
2220- return dict(aggregate.iteritems())
2221- else:
2222- raise exception.InvalidAggregateAction(action='create_aggregate',
2223- aggregate_id="'N/A'",
2224- reason='invalid zone')
2225-
2226- def get_aggregate(self, context, aggregate_id):
2227- """Get an aggregate by id."""
2228- aggregate = self.db.aggregate_get(context, aggregate_id)
2229- return self._get_aggregate_info(context, aggregate)
2230-
2231- def get_aggregate_list(self, context):
2232- """Get all the aggregates for this zone."""
2233- aggregates = self.db.aggregate_get_all(context, read_deleted="no")
2234- return [self._get_aggregate_info(context, a) for a in aggregates]
2235-
2236- def update_aggregate(self, context, aggregate_id, values):
2237- """Update the properties of an aggregate."""
2238- aggregate = self.db.aggregate_update(context, aggregate_id, values)
2239- return self._get_aggregate_info(context, aggregate)
2240-
2241- def update_aggregate_metadata(self, context, aggregate_id, metadata):
2242- """Updates the aggregate metadata.
2243-
2244- If a key is set to None, it gets removed from the aggregate metadata.
2245- """
2246- # As a first release of the host aggregates blueprint, this call is
2247- # pretty dumb, in the sense that interacts only with the model.
2248- # In later releasses, updating metadata may trigger virt actions like
2249- # the setup of shared storage, or more generally changes to the
2250- # underlying hypervisor pools.
2251- for key in metadata.keys():
2252- if not metadata[key]:
2253- try:
2254- self.db.aggregate_metadata_delete(context,
2255- aggregate_id, key)
2256- metadata.pop(key)
2257- except exception.AggregateMetadataNotFound, e:
2258- LOG.warn(e.message)
2259- self.db.aggregate_metadata_add(context, aggregate_id, metadata)
2260- return self.get_aggregate(context, aggregate_id)
2261-
2262- def delete_aggregate(self, context, aggregate_id):
2263- """Deletes the aggregate."""
2264- hosts = self.db.aggregate_host_get_all(context, aggregate_id,
2265- read_deleted="no")
2266- if len(hosts) > 0:
2267- raise exception.InvalidAggregateAction(action='delete',
2268- aggregate_id=aggregate_id,
2269- reason='not empty')
2270- self.db.aggregate_delete(context, aggregate_id)
2271-
2272- def add_host_to_aggregate(self, context, aggregate_id, host):
2273- """Adds the host to an aggregate."""
2274- # validates the host; ComputeHostNotFound is raised if invalid
2275- service = self.db.service_get_all_compute_by_host(context, host)[0]
2276- # add host, and reflects action in the aggregate operational state
2277- aggregate = self.db.aggregate_get(context, aggregate_id)
2278- if aggregate.operational_state in [aggregate_states.CREATED,
2279- aggregate_states.ACTIVE]:
2280- if service.availability_zone != aggregate.availability_zone:
2281- raise exception.InvalidAggregateAction(
2282- action='add host',
2283- aggregate_id=aggregate_id,
2284- reason='availibility zone mismatch')
2285- self.db.aggregate_host_add(context, aggregate_id, host)
2286- if aggregate.operational_state == aggregate_states.CREATED:
2287- values = {'operational_state': aggregate_states.CHANGING}
2288- self.db.aggregate_update(context, aggregate_id, values)
2289- queue = self.db.queue_get_for(context, service.topic, host)
2290- rpc.cast(context, queue, {"method": "add_aggregate_host",
2291- "args": {"aggregate_id": aggregate_id,
2292- "host": host}, })
2293- return self.get_aggregate(context, aggregate_id)
2294- else:
2295- invalid = {aggregate_states.CHANGING: 'setup in progress',
2296- aggregate_states.DISMISSED: 'aggregate deleted',
2297- aggregate_states.ERROR: 'aggregate in error', }
2298- if aggregate.operational_state in invalid.keys():
2299- raise exception.InvalidAggregateAction(
2300- action='add host',
2301- aggregate_id=aggregate_id,
2302- reason=invalid[aggregate.operational_state])
2303-
2304- def remove_host_from_aggregate(self, context, aggregate_id, host):
2305- """Removes host from the aggregate."""
2306- # validates the host; ComputeHostNotFound is raised if invalid
2307- service = self.db.service_get_all_compute_by_host(context, host)[0]
2308- aggregate = self.db.aggregate_get(context, aggregate_id)
2309- if aggregate.operational_state in [aggregate_states.ACTIVE,
2310- aggregate_states.ERROR]:
2311- self.db.aggregate_host_delete(context, aggregate_id, host)
2312- queue = self.db.queue_get_for(context, service.topic, host)
2313- rpc.cast(context, queue, {"method": "remove_aggregate_host",
2314- "args": {"aggregate_id": aggregate_id,
2315- "host": host}, })
2316- return self.get_aggregate(context, aggregate_id)
2317- else:
2318- invalid = {aggregate_states.CREATED: 'no hosts to remove',
2319- aggregate_states.CHANGING: 'setup in progress',
2320- aggregate_states.DISMISSED: 'aggregate deleted', }
2321- if aggregate.operational_state in invalid.keys():
2322- raise exception.InvalidAggregateAction(
2323- action='remove host',
2324- aggregate_id=aggregate_id,
2325- reason=invalid[aggregate.operational_state])
2326-
2327- def _get_aggregate_info(self, context, aggregate):
2328- """Builds a dictionary with aggregate props, metadata and hosts."""
2329- metadata = self.db.aggregate_metadata_get(context, aggregate.id)
2330- hosts = self.db.aggregate_host_get_all(context, aggregate.id,
2331- read_deleted="no")
2332-
2333- result = dict(aggregate.iteritems())
2334- result["metadata"] = metadata
2335- result["hosts"] = hosts
2336- return result
2337
2338=== removed file '.pc/CVE-2013-0208.patch/nova/exception.py'
2339--- .pc/CVE-2013-0208.patch/nova/exception.py 2013-01-23 13:03:11 +0000
2340+++ .pc/CVE-2013-0208.patch/nova/exception.py 1970-01-01 00:00:00 +0000
2341@@ -1,1031 +0,0 @@
2342-# vim: tabstop=4 shiftwidth=4 softtabstop=4
2343-
2344-# Copyright 2010 United States Government as represented by the
2345-# Administrator of the National Aeronautics and Space Administration.
2346-# All Rights Reserved.
2347-#
2348-# Licensed under the Apache License, Version 2.0 (the "License"); you may
2349-# not use this file except in compliance with the License. You may obtain
2350-# a copy of the License at
2351-#
2352-# http://www.apache.org/licenses/LICENSE-2.0
2353-#
2354-# Unless required by applicable law or agreed to in writing, software
2355-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
2356-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
2357-# License for the specific language governing permissions and limitations
2358-# under the License.
2359-
2360-"""Nova base exception handling.
2361-
2362-Includes decorator for re-raising Nova-type exceptions.
2363-
2364-SHOULD include dedicated exception logging.
2365-
2366-"""
2367-
2368-import functools
2369-import sys
2370-
2371-import novaclient.exceptions
2372-import webob.exc
2373-
2374-from nova import log as logging
2375-
2376-LOG = logging.getLogger(__name__)
2377-
2378-
2379-class ConvertedException(webob.exc.WSGIHTTPException):
2380- def __init__(self, code=0, title="", explanation=""):
2381- self.code = code
2382- self.title = title
2383- self.explanation = explanation
2384- super(ConvertedException, self).__init__()
2385-
2386-
2387-class ProcessExecutionError(IOError):
2388- def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
2389- description=None):
2390- self.exit_code = exit_code
2391- self.stderr = stderr
2392- self.stdout = stdout
2393- self.cmd = cmd
2394- self.description = description
2395-
2396- if description is None:
2397- description = _('Unexpected error while running command.')
2398- if exit_code is None:
2399- exit_code = '-'
2400- message = _('%(description)s\nCommand: %(cmd)s\n'
2401- 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n'
2402- 'Stderr: %(stderr)r') % locals()
2403- IOError.__init__(self, message)
2404-
2405-
2406-class Error(Exception):
2407- pass
2408-
2409-
2410-class EC2APIError(Error):
2411- def __init__(self, message='Unknown', code=None):
2412- self.msg = message
2413- self.code = code
2414- if code:
2415- outstr = '%s: %s' % (code, message)
2416- else:
2417- outstr = '%s' % message
2418- super(EC2APIError, self).__init__(outstr)
2419-
2420-
2421-class DBError(Error):
2422- """Wraps an implementation specific exception."""
2423- def __init__(self, inner_exception=None):
2424- self.inner_exception = inner_exception
2425- super(DBError, self).__init__(str(inner_exception))
2426-
2427-
2428-def wrap_db_error(f):
2429- def _wrap(*args, **kwargs):
2430- try:
2431- return f(*args, **kwargs)
2432- except UnicodeEncodeError:
2433- raise InvalidUnicodeParameter()
2434- except Exception, e:
2435- LOG.exception(_('DB exception wrapped.'))
2436- raise DBError(e)
2437- _wrap.func_name = f.func_name
2438- return _wrap
2439-
2440-
2441-def wrap_exception(notifier=None, publisher_id=None, event_type=None,
2442- level=None):
2443- """This decorator wraps a method to catch any exceptions that may
2444- get thrown. It logs the exception as well as optionally sending
2445- it to the notification system.
2446- """
2447- # TODO(sandy): Find a way to import nova.notifier.api so we don't have
2448- # to pass it in as a parameter. Otherwise we get a cyclic import of
2449- # nova.notifier.api -> nova.utils -> nova.exception :(
2450- # TODO(johannes): Also, it would be nice to use
2451- # utils.save_and_reraise_exception() without an import loop
2452- def inner(f):
2453- def wrapped(*args, **kw):
2454- try:
2455- return f(*args, **kw)
2456- except Exception, e:
2457- # Save exception since it can be clobbered during processing
2458- # below before we can re-raise
2459- exc_info = sys.exc_info()
2460-
2461- if notifier:
2462- payload = dict(args=args, exception=e)
2463- payload.update(kw)
2464-
2465- # Use a temp vars so we don't shadow
2466- # our outer definitions.
2467- temp_level = level
2468- if not temp_level:
2469- temp_level = notifier.ERROR
2470-
2471- temp_type = event_type
2472- if not temp_type:
2473- # If f has multiple decorators, they must use
2474- # functools.wraps to ensure the name is
2475- # propagated.
2476- temp_type = f.__name__
2477-
2478- notifier.notify(publisher_id, temp_type, temp_level,
2479- payload)
2480-
2481- # re-raise original exception since it may have been clobbered
2482- raise exc_info[0], exc_info[1], exc_info[2]
2483-
2484- return functools.wraps(f)(wrapped)
2485- return inner
2486-
2487-
2488-class NovaException(Exception):
2489- """Base Nova Exception
2490-
2491- To correctly use this class, inherit from it and define
2492- a 'message' property. That message will get printf'd
2493- with the keyword arguments provided to the constructor.
2494-
2495- """
2496- message = _("An unknown exception occurred.")
2497- code = 500
2498- headers = {}
2499- safe = False
2500-
2501- def __init__(self, message=None, **kwargs):
2502- self.kwargs = kwargs
2503-
2504- if 'code' not in self.kwargs:
2505- try:
2506- self.kwargs['code'] = self.code
2507- except AttributeError:
2508- pass
2509-
2510- if not message:
2511- try:
2512- message = self.message % kwargs
2513-
2514- except Exception as e:
2515- # at least get the core message out if something happened
2516- message = self.message
2517-
2518- super(NovaException, self).__init__(message)
2519-
2520-
2521-class DecryptionFailure(NovaException):
2522- message = _("Failed to decrypt text")
2523-
2524-
2525-class ImagePaginationFailed(NovaException):
2526- message = _("Failed to paginate through images from image service")
2527-
2528-
2529-class VirtualInterfaceCreateException(NovaException):
2530- message = _("Virtual Interface creation failed")
2531-
2532-
2533-class VirtualInterfaceMacAddressException(NovaException):
2534- message = _("5 attempts to create virtual interface"
2535- "with unique mac address failed")
2536-
2537-
2538-class GlanceConnectionFailed(NovaException):
2539- message = _("Connection to glance failed") + ": %(reason)s"
2540-
2541-
2542-class MelangeConnectionFailed(NovaException):
2543- message = _("Connection to melange failed") + ": %(reason)s"
2544-
2545-
2546-class NotAuthorized(NovaException):
2547- message = _("Not authorized.")
2548- code = 403
2549-
2550-
2551-class AdminRequired(NotAuthorized):
2552- message = _("User does not have admin privileges")
2553-
2554-
2555-class PolicyNotAuthorized(NotAuthorized):
2556- message = _("Policy doesn't allow %(action)s to be performed.")
2557-
2558-
2559-class ImageNotAuthorized(NovaException):
2560- message = _("Not authorized for image %(image_id)s.")
2561-
2562-
2563-class Invalid(NovaException):
2564- message = _("Unacceptable parameters.")
2565- code = 400
2566-
2567-
2568-class InvalidSnapshot(Invalid):
2569- message = _("Invalid snapshot") + ": %(reason)s"
2570-
2571-
2572-class VolumeUnattached(Invalid):
2573- message = _("Volume %(volume_id)s is not attached to anything")
2574-
2575-
2576-class InvalidKeypair(Invalid):
2577- message = _("Keypair data is invalid")
2578-
2579-
2580-class SfJsonEncodeFailure(NovaException):
2581- message = _("Failed to load data into json format")
2582-
2583-
2584-class InvalidRequest(Invalid):
2585- message = _("The request is invalid.")
2586-
2587-
2588-class InvalidSignature(Invalid):
2589- message = _("Invalid signature %(signature)s for user %(user)s.")
2590-
2591-
2592-class InvalidInput(Invalid):
2593- message = _("Invalid input received") + ": %(reason)s"
2594-
2595-
2596-class InvalidInstanceType(Invalid):
2597- message = _("Invalid instance type %(instance_type)s.")
2598-
2599-
2600-class InvalidVolumeType(Invalid):
2601- message = _("Invalid volume type") + ": %(reason)s"
2602-
2603-
2604-class InvalidVolume(Invalid):
2605- message = _("Invalid volume") + ": %(reason)s"
2606-
2607-
2608-class InvalidPortRange(Invalid):
2609- message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
2610-
2611-
2612-class InvalidIpProtocol(Invalid):
2613- message = _("Invalid IP protocol %(protocol)s.")
2614-
2615-
2616-class InvalidContentType(Invalid):
2617- message = _("Invalid content type %(content_type)s.")
2618-
2619-
2620-class InvalidCidr(Invalid):
2621- message = _("Invalid cidr %(cidr)s.")
2622-
2623-
2624-class InvalidRPCConnectionReuse(Invalid):
2625- message = _("Invalid reuse of an RPC connection.")
2626-
2627-
2628-class InvalidUnicodeParameter(Invalid):
2629- message = _("Invalid Parameter: "
2630- "Unicode is not supported by the current database.")
2631-
2632-
2633-# Cannot be templated as the error syntax varies.
2634-# msg needs to be constructed when raised.
2635-class InvalidParameterValue(Invalid):
2636- message = _("%(err)s")
2637-
2638-
2639-class InvalidAggregateAction(Invalid):
2640- message = _("Cannot perform action '%(action)s' on aggregate "
2641- "%(aggregate_id)s. Reason: %(reason)s.")
2642-
2643-
2644-class InvalidGroup(Invalid):
2645- message = _("Group not valid. Reason: %(reason)s")
2646-
2647-
2648-class InstanceInvalidState(Invalid):
2649- message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
2650- "%(method)s while the instance is in this state.")
2651-
2652-
2653-class InstanceNotRunning(Invalid):
2654- message = _("Instance %(instance_id)s is not running.")
2655-
2656-
2657-class InstanceNotSuspended(Invalid):
2658- message = _("Instance %(instance_id)s is not suspended.")
2659-
2660-
2661-class InstanceNotInRescueMode(Invalid):
2662- message = _("Instance %(instance_id)s is not in rescue mode")
2663-
2664-
2665-class InstanceSuspendFailure(Invalid):
2666- message = _("Failed to suspend instance") + ": %(reason)s"
2667-
2668-
2669-class InstanceResumeFailure(Invalid):
2670- message = _("Failed to resume server") + ": %(reason)s."
2671-
2672-
2673-class InstanceRebootFailure(Invalid):
2674- message = _("Failed to reboot instance") + ": %(reason)s"
2675-
2676-
2677-class InstanceTerminationFailure(Invalid):
2678- message = _("Failed to terminate instance") + ": %(reason)s"
2679-
2680-
2681-class ServiceUnavailable(Invalid):
2682- message = _("Service is unavailable at this time.")
2683-
2684-
2685-class VolumeServiceUnavailable(ServiceUnavailable):
2686- message = _("Volume service is unavailable at this time.")
2687-
2688-
2689-class ComputeServiceUnavailable(ServiceUnavailable):
2690- message = _("Compute service is unavailable at this time.")
2691-
2692-
2693-class UnableToMigrateToSelf(Invalid):
2694- message = _("Unable to migrate instance (%(instance_id)s) "
2695- "to current host (%(host)s).")
2696-
2697-
2698-class DestinationHostUnavailable(Invalid):
2699- message = _("Destination compute host is unavailable at this time.")
2700-
2701-
2702-class SourceHostUnavailable(Invalid):
2703- message = _("Original compute host is unavailable at this time.")
2704-
2705-
2706-class InvalidHypervisorType(Invalid):
2707- message = _("The supplied hypervisor type of is invalid.")
2708-
2709-
2710-class DestinationHypervisorTooOld(Invalid):
2711- message = _("The instance requires a newer hypervisor version than "
2712- "has been provided.")
2713-
2714-
2715-class DestinationDiskExists(Invalid):
2716- message = _("The supplied disk path (%(path)s) already exists, "
2717- "it is expected not to exist.")
2718-
2719-
2720-class InvalidDevicePath(Invalid):
2721- message = _("The supplied device path (%(path)s) is invalid.")
2722-
2723-
2724-class DeviceIsBusy(Invalid):
2725- message = _("The supplied device (%(device)s) is busy.")
2726-
2727-
2728-class InvalidCPUInfo(Invalid):
2729- message = _("Unacceptable CPU info") + ": %(reason)s"
2730-
2731-
2732-class InvalidIpAddressError(Invalid):
2733- message = _("%(address)s is not a valid IP v4/6 address.")
2734-
2735-
2736-class InvalidVLANTag(Invalid):
2737- message = _("VLAN tag is not appropriate for the port group "
2738- "%(bridge)s. Expected VLAN tag is %(tag)s, "
2739- "but the one associated with the port group is %(pgroup)s.")
2740-
2741-
2742-class InvalidVLANPortGroup(Invalid):
2743- message = _("vSwitch which contains the port group %(bridge)s is "
2744- "not associated with the desired physical adapter. "
2745- "Expected vSwitch is %(expected)s, but the one associated "
2746- "is %(actual)s.")
2747-
2748-
2749-class InvalidDiskFormat(Invalid):
2750- message = _("Disk format %(disk_format)s is not acceptable")
2751-
2752-
2753-class ImageUnacceptable(Invalid):
2754- message = _("Image %(image_id)s is unacceptable: %(reason)s")
2755-
2756-
2757-class InstanceUnacceptable(Invalid):
2758- message = _("Instance %(instance_id)s is unacceptable: %(reason)s")
2759-
2760-
2761-class InvalidEc2Id(Invalid):
2762- message = _("Ec2 id %(ec2_id)s is unacceptable.")
2763-
2764-
2765-class NotFound(NovaException):
2766- message = _("Resource could not be found.")
2767- code = 404
2768-
2769-
2770-class FlagNotSet(NotFound):
2771- message = _("Required flag %(flag)s not set.")
2772-
2773-
2774-class VolumeNotFound(NotFound):
2775- message = _("Volume %(volume_id)s could not be found.")
2776-
2777-
2778-class SfAccountNotFound(NotFound):
2779- message = _("Unable to locate account %(account_name)s on "
2780- "Solidfire device")
2781-
2782-
2783-class VolumeNotFoundForInstance(VolumeNotFound):
2784- message = _("Volume not found for instance %(instance_id)s.")
2785-
2786-
2787-class VolumeMetadataNotFound(NotFound):
2788- message = _("Volume %(volume_id)s has no metadata with "
2789- "key %(metadata_key)s.")
2790-
2791-
2792-class NoVolumeTypesFound(NotFound):
2793- message = _("Zero volume types found.")
2794-
2795-
2796-class VolumeTypeNotFound(NotFound):
2797- message = _("Volume type %(volume_type_id)s could not be found.")
2798-
2799-
2800-class VolumeTypeNotFoundByName(VolumeTypeNotFound):
2801- message = _("Volume type with name %(volume_type_name)s "
2802- "could not be found.")
2803-
2804-
2805-class VolumeTypeExtraSpecsNotFound(NotFound):
2806- message = _("Volume Type %(volume_type_id)s has no extra specs with "
2807- "key %(extra_specs_key)s.")
2808-
2809-
2810-class SnapshotNotFound(NotFound):
2811- message = _("Snapshot %(snapshot_id)s could not be found.")
2812-
2813-
2814-class VolumeIsBusy(NovaException):
2815- message = _("deleting volume %(volume_name)s that has snapshot")
2816-
2817-
2818-class SnapshotIsBusy(NovaException):
2819- message = _("deleting snapshot %(snapshot_name)s that has "
2820- "dependent volumes")
2821-
2822-
2823-class ISCSITargetNotFoundForVolume(NotFound):
2824- message = _("No target id found for volume %(volume_id)s.")
2825-
2826-
2827-class DiskNotFound(NotFound):
2828- message = _("No disk at %(location)s")
2829-
2830-
2831-class VolumeDriverNotFound(NotFound):
2832- message = _("Could not find a handler for %(driver_type)s volume.")
2833-
2834-
2835-class InvalidImageRef(Invalid):
2836- message = _("Invalid image href %(image_href)s.")
2837-
2838-
2839-class ListingImageRefsNotSupported(Invalid):
2840- message = _("Some images have been stored via hrefs."
2841- + " This version of the api does not support displaying image hrefs.")
2842-
2843-
2844-class ImageNotFound(NotFound):
2845- message = _("Image %(image_id)s could not be found.")
2846-
2847-
2848-class KernelNotFoundForImage(ImageNotFound):
2849- message = _("Kernel not found for image %(image_id)s.")
2850-
2851-
2852-class UserNotFound(NotFound):
2853- message = _("User %(user_id)s could not be found.")
2854-
2855-
2856-class ProjectNotFound(NotFound):
2857- message = _("Project %(project_id)s could not be found.")
2858-
2859-
2860-class ProjectMembershipNotFound(NotFound):
2861- message = _("User %(user_id)s is not a member of project %(project_id)s.")
2862-
2863-
2864-class UserRoleNotFound(NotFound):
2865- message = _("Role %(role_id)s could not be found.")
2866-
2867-
2868-class StorageRepositoryNotFound(NotFound):
2869- message = _("Cannot find SR to read/write VDI.")
2870-
2871-
2872-class NetworkInUse(NovaException):
2873- message = _("Network %(network_id)s is still in use.")
2874-
2875-
2876-class NetworkNotCreated(NovaException):
2877- message = _("%(req)s is required to create a network.")
2878-
2879-
2880-class NetworkNotFound(NotFound):
2881- message = _("Network %(network_id)s could not be found.")
2882-
2883-
2884-class NetworkNotFoundForBridge(NetworkNotFound):
2885- message = _("Network could not be found for bridge %(bridge)s")
2886-
2887-
2888-class NetworkNotFoundForUUID(NetworkNotFound):
2889- message = _("Network could not be found for uuid %(uuid)s")
2890-
2891-
2892-class NetworkNotFoundForCidr(NetworkNotFound):
2893- message = _("Network could not be found with cidr %(cidr)s.")
2894-
2895-
2896-class NetworkNotFoundForInstance(NetworkNotFound):
2897- message = _("Network could not be found for instance %(instance_id)s.")
2898-
2899-
2900-class NoNetworksFound(NotFound):
2901- message = _("No networks defined.")
2902-
2903-
2904-class NetworkNotFoundForProject(NotFound):
2905- message = _("Either Network uuid %(network_uuid)s is not present or "
2906- "is not assigned to the project %(project_id)s.")
2907-
2908-
2909-class NetworkHostNotSet(NovaException):
2910- message = _("Host is not set to the network (%(network_id)s).")
2911-
2912-
2913-class NetworkBusy(NovaException):
2914- message = _("Network %(network)s has active ports, cannot delete.")
2915-
2916-
2917-class DatastoreNotFound(NotFound):
2918- message = _("Could not find the datastore reference(s) which the VM uses.")
2919-
2920-
2921-class FixedIpNotFound(NotFound):
2922- message = _("No fixed IP associated with id %(id)s.")
2923-
2924-
2925-class FixedIpNotFoundForAddress(FixedIpNotFound):
2926- message = _("Fixed ip not found for address %(address)s.")
2927-
2928-
2929-class FixedIpNotFoundForInstance(FixedIpNotFound):
2930- message = _("Instance %(instance_id)s has zero fixed ips.")
2931-
2932-
2933-class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
2934- message = _("Network host %(host)s has zero fixed ips "
2935- "in network %(network_id)s.")
2936-
2937-
2938-class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
2939- message = _("Instance %(instance_id)s doesn't have fixed ip '%(ip)s'.")
2940-
2941-
2942-class FixedIpNotFoundForHost(FixedIpNotFound):
2943- message = _("Host %(host)s has zero fixed ips.")
2944-
2945-
2946-class FixedIpNotFoundForNetwork(FixedIpNotFound):
2947- message = _("Fixed IP address (%(address)s) does not exist in "
2948- "network (%(network_uuid)s).")
2949-
2950-
2951-class FixedIpAlreadyInUse(NovaException):
2952- message = _("Fixed IP address %(address)s is already in use.")
2953-
2954-
2955-class FixedIpInvalid(Invalid):
2956- message = _("Fixed IP address %(address)s is invalid.")
2957-
2958-
2959-class NoMoreFixedIps(NovaException):
2960- message = _("Zero fixed ips available.")
2961-
2962-
2963-class NoFixedIpsDefined(NotFound):
2964- message = _("Zero fixed ips could be found.")
2965-
2966-
2967-class FloatingIpNotFound(NotFound):
2968- message = _("Floating ip not found for id %(id)s.")
2969-
2970-
2971-class FloatingIpDNSExists(Invalid):
2972- message = _("The DNS entry %(name)s already exists in domain %(domain)s.")
2973-
2974-
2975-class FloatingIpNotFoundForAddress(FloatingIpNotFound):
2976- message = _("Floating ip not found for address %(address)s.")
2977-
2978-
2979-class FloatingIpNotFoundForHost(FloatingIpNotFound):
2980- message = _("Floating ip not found for host %(host)s.")
2981-
2982-
2983-class NoMoreFloatingIps(FloatingIpNotFound):
2984- message = _("Zero floating ips available.")
2985-
2986-
2987-class FloatingIpAssociated(NovaException):
2988- message = _("Floating ip %(address)s is associated.")
2989-
2990-
2991-class FloatingIpNotAssociated(NovaException):
2992- message = _("Floating ip %(address)s is not associated.")
2993-
2994-
2995-class NoFloatingIpsDefined(NotFound):
2996- message = _("Zero floating ips exist.")
2997-
2998-
2999-class NoFloatingIpInterface(NotFound):
3000- message = _("Interface %(interface)s not found.")
3001-
3002-
3003-class KeypairNotFound(NotFound):
3004- message = _("Keypair %(name)s not found for user %(user_id)s")
3005-
3006-
3007-class CertificateNotFound(NotFound):
3008- message = _("Certificate %(certificate_id)s not found.")
3009-
3010-
3011-class ServiceNotFound(NotFound):
3012- message = _("Service %(service_id)s could not be found.")
3013-
3014-
3015-class HostNotFound(NotFound):
3016- message = _("Host %(host)s could not be found.")
3017-
3018-
3019-class ComputeHostNotFound(HostNotFound):
3020- message = _("Compute host %(host)s could not be found.")
3021-
3022-
3023-class HostBinaryNotFound(NotFound):
3024- message = _("Could not find binary %(binary)s on host %(host)s.")
3025-
3026-
3027-class AuthTokenNotFound(NotFound):
3028- message = _("Auth token %(token)s could not be found.")
3029-
3030-
3031-class AccessKeyNotFound(NotFound):
3032- message = _("Access Key %(access_key)s could not be found.")
3033-
3034-
3035-class QuotaNotFound(NotFound):
3036- message = _("Quota could not be found")
3037-
3038-
3039-class ProjectQuotaNotFound(QuotaNotFound):
3040- message = _("Quota for project %(project_id)s could not be found.")
3041-
3042-
3043-class SecurityGroupNotFound(NotFound):
3044- message = _("Security group %(security_group_id)s not found.")
3045-
3046-
3047-class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
3048- message = _("Security group %(security_group_id)s not found "
3049- "for project %(project_id)s.")
3050-
3051-
3052-class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
3053- message = _("Security group with rule %(rule_id)s not found.")
3054-
3055-
3056-class SecurityGroupExistsForInstance(Invalid):
3057- message = _("Security group %(security_group_id)s is already associated"
3058- " with the instance %(instance_id)s")
3059-
3060-
3061-class SecurityGroupNotExistsForInstance(Invalid):
3062- message = _("Security group %(security_group_id)s is not associated with"
3063- " the instance %(instance_id)s")
3064-
3065-
3066-class MigrationNotFound(NotFound):
3067- message = _("Migration %(migration_id)s could not be found.")
3068-
3069-
3070-class MigrationNotFoundByStatus(MigrationNotFound):
3071- message = _("Migration not found for instance %(instance_id)s "
3072- "with status %(status)s.")
3073-
3074-
3075-class ConsolePoolNotFound(NotFound):
3076- message = _("Console pool %(pool_id)s could not be found.")
3077-
3078-
3079-class ConsolePoolNotFoundForHostType(NotFound):
3080- message = _("Console pool of type %(console_type)s "
3081- "for compute host %(compute_host)s "
3082- "on proxy host %(host)s not found.")
3083-
3084-
3085-class ConsoleNotFound(NotFound):
3086- message = _("Console %(console_id)s could not be found.")
3087-
3088-
3089-class ConsoleNotFoundForInstance(ConsoleNotFound):
3090- message = _("Console for instance %(instance_id)s could not be found.")
3091-
3092-
3093-class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
3094- message = _("Console for instance %(instance_id)s "
3095- "in pool %(pool_id)s could not be found.")
3096-
3097-
3098-class ConsoleTypeInvalid(Invalid):
3099- message = _("Invalid console type %(console_type)s ")
3100-
3101-
3102-class NoInstanceTypesFound(NotFound):
3103- message = _("Zero instance types found.")
3104-
3105-
3106-class InstanceTypeNotFound(NotFound):
3107- message = _("Instance type %(instance_type_id)s could not be found.")
3108-
3109-
3110-class InstanceTypeNotFoundByName(InstanceTypeNotFound):
3111- message = _("Instance type with name %(instance_type_name)s "
3112- "could not be found.")
3113-
3114-
3115-class FlavorNotFound(NotFound):
3116- message = _("Flavor %(flavor_id)s could not be found.")
3117-
3118-
3119-class CellNotFound(NotFound):
3120- message = _("Cell %(cell_id)s could not be found.")
3121-
3122-
3123-class SchedulerHostFilterNotFound(NotFound):
3124- message = _("Scheduler Host Filter %(filter_name)s could not be found.")
3125-
3126-
3127-class SchedulerCostFunctionNotFound(NotFound):
3128- message = _("Scheduler cost function %(cost_fn_str)s could"
3129- " not be found.")
3130-
3131-
3132-class SchedulerWeightFlagNotFound(NotFound):
3133- message = _("Scheduler weight flag not found: %(flag_name)s")
3134-
3135-
3136-class InstanceMetadataNotFound(NotFound):
3137- message = _("Instance %(instance_id)s has no metadata with "
3138- "key %(metadata_key)s.")
3139-
3140-
3141-class InstanceTypeExtraSpecsNotFound(NotFound):
3142- message = _("Instance Type %(instance_type_id)s has no extra specs with "
3143- "key %(extra_specs_key)s.")
3144-
3145-
3146-class LDAPObjectNotFound(NotFound):
3147- message = _("LDAP object could not be found")
3148-
3149-
3150-class LDAPUserNotFound(LDAPObjectNotFound):
3151- message = _("LDAP user %(user_id)s could not be found.")
3152-
3153-
3154-class LDAPGroupNotFound(LDAPObjectNotFound):
3155- message = _("LDAP group %(group_id)s could not be found.")
3156-
3157-
3158-class LDAPGroupMembershipNotFound(NotFound):
3159- message = _("LDAP user %(user_id)s is not a member of group %(group_id)s.")
3160-
3161-
3162-class FileNotFound(NotFound):
3163- message = _("File %(file_path)s could not be found.")
3164-
3165-
3166-class NoFilesFound(NotFound):
3167- message = _("Zero files could be found.")
3168-
3169-
3170-class SwitchNotFoundForNetworkAdapter(NotFound):
3171- message = _("Virtual switch associated with the "
3172- "network adapter %(adapter)s not found.")
3173-
3174-
3175-class NetworkAdapterNotFound(NotFound):
3176- message = _("Network adapter %(adapter)s could not be found.")
3177-
3178-
3179-class ClassNotFound(NotFound):
3180- message = _("Class %(class_name)s could not be found: %(exception)s")
3181-
3182-
3183-class NotAllowed(NovaException):
3184- message = _("Action not allowed.")
3185-
3186-
3187-class GlobalRoleNotAllowed(NotAllowed):
3188- message = _("Unable to use global role %(role_id)s")
3189-
3190-
3191-class ImageRotationNotAllowed(NovaException):
3192- message = _("Rotation is not allowed for snapshots")
3193-
3194-
3195-class RotationRequiredForBackup(NovaException):
3196- message = _("Rotation param is required for backup image_type")
3197-
3198-
3199-#TODO(bcwaldon): EOL this exception!
3200-class Duplicate(NovaException):
3201- pass
3202-
3203-
3204-class KeyPairExists(Duplicate):
3205- message = _("Key pair %(key_name)s already exists.")
3206-
3207-
3208-class UserExists(Duplicate):
3209- message = _("User %(user)s already exists.")
3210-
3211-
3212-class LDAPUserExists(UserExists):
3213- message = _("LDAP user %(user)s already exists.")
3214-
3215-
3216-class LDAPGroupExists(Duplicate):
3217- message = _("LDAP group %(group)s already exists.")
3218-
3219-
3220-class LDAPMembershipExists(Duplicate):
3221- message = _("User %(uid)s is already a member of "
3222- "the group %(group_dn)s")
3223-
3224-
3225-class ProjectExists(Duplicate):
3226- message = _("Project %(project)s already exists.")
3227-
3228-
3229-class InstanceExists(Duplicate):
3230- message = _("Instance %(name)s already exists.")
3231-
3232-
3233-class InstanceTypeExists(Duplicate):
3234- message = _("Instance Type %(name)s already exists.")
3235-
3236-
3237-class VolumeTypeExists(Duplicate):
3238- message = _("Volume Type %(name)s already exists.")
3239-
3240-
3241-class InvalidSharedStorage(NovaException):
3242- message = _("%(path)s is on shared storage: %(reason)s")
3243-
3244-
3245-class MigrationError(NovaException):
3246- message = _("Migration error") + ": %(reason)s"
3247-
3248-
3249-class MalformedRequestBody(NovaException):
3250- message = _("Malformed message body: %(reason)s")
3251-
3252-
3253-class ConfigNotFound(NotFound):
3254- message = _("Could not find config at %(path)s")
3255-
3256-
3257-class PasteAppNotFound(NotFound):
3258- message = _("Could not load paste app '%(name)s' from %(path)s")
3259-
3260-
3261-class CannotResizeToSameSize(NovaException):
3262- message = _("When resizing, instances must change size!")
3263-
3264-
3265-class ImageTooLarge(NovaException):
3266- message = _("Image is larger than instance type allows")
3267-
3268-
3269-class ZoneRequestError(NovaException):
3270- message = _("1 or more Zones could not complete the request")
3271-
3272-
3273-class InstanceTypeMemoryTooSmall(NovaException):
3274- message = _("Instance type's memory is too small for requested image.")
3275-
3276-
3277-class InstanceTypeDiskTooSmall(NovaException):
3278- message = _("Instance type's disk is too small for requested image.")
3279-
3280-
3281-class InsufficientFreeMemory(NovaException):
3282- message = _("Insufficient free memory on compute node to start %(uuid)s.")
3283-
3284-
3285-class CouldNotFetchMetrics(NovaException):
3286- message = _("Could not fetch bandwidth/cpu/disk metrics for this host.")
3287-
3288-
3289-class NoValidHost(NovaException):
3290- message = _("No valid host was found. %(reason)s")
3291-
3292-
3293-class WillNotSchedule(NovaException):
3294- message = _("Host %(host)s is not up or doesn't exist.")
3295-
3296-
3297-class QuotaError(NovaException):
3298- message = _("Quota exceeded") + ": code=%(code)s"
3299- code = 413
3300- headers = {'Retry-After': 0}
3301- safe = True
3302-
3303-
3304-class AggregateError(NovaException):
3305- message = _("Aggregate %(aggregate_id)s: action '%(action)s' "
3306- "caused an error: %(reason)s.")
3307-
3308-
3309-class AggregateNotFound(NotFound):
3310- message = _("Aggregate %(aggregate_id)s could not be found.")
3311-
3312-
3313-class AggregateNameExists(Duplicate):
3314- message = _("Aggregate %(aggregate_name)s already exists.")
3315-
3316-
3317-class AggregateHostNotFound(NotFound):
3318- message = _("Aggregate %(aggregate_id)s has no host %(host)s.")
3319-
3320-
3321-class AggregateMetadataNotFound(NotFound):
3322- message = _("Aggregate %(aggregate_id)s has no metadata with "
3323- "key %(metadata_key)s.")
3324-
3325-
3326-class AggregateHostConflict(Duplicate):
3327- message = _("Host %(host)s already member of another aggregate.")
3328-
3329-
3330-class AggregateHostExists(Duplicate):
3331- message = _("Aggregate %(aggregate_id)s already has host %(host)s.")
3332-
3333-
3334-class DuplicateSfVolumeNames(Duplicate):
3335- message = _("Detected more than one volume with name %(vol_name)s")
3336-
3337-
3338-class VolumeTypeCreateFailed(NovaException):
3339- message = _("Cannot create volume_type with "
3340- "name %(name)s and specs %(extra_specs)s")
3341-
3342-
3343-class InstanceTypeCreateFailed(NovaException):
3344- message = _("Unable to create instance type")
3345-
3346-
3347-class SolidFireAPIException(NovaException):
3348- message = _("Bad response from SolidFire API")
3349-
3350-
3351-class SolidFireAPIStatusException(SolidFireAPIException):
3352- message = _("Error in SolidFire API response: status=%(status)s")
3353-
3354-
3355-class SolidFireAPIDataException(SolidFireAPIException):
3356- message = _("Error in SolidFire API response: data=%(data)s")
3357-
3358-
3359-class DuplicateVlan(Duplicate):
3360- message = _("Detected existing vlan with id %(vlan)d")
3361-
3362-
3363-class InstanceNotFound(NotFound):
3364- message = _("Instance %(instance_id)s could not be found.")
3365-
3366-
3367-class InvalidInstanceIDMalformed(Invalid):
3368- message = _("Invalid id: %(val)s (expecting \"i-...\").")
3369-
3370-
3371-class CouldNotFetchImage(NovaException):
3372- message = _("Could not fetch image %(image)s")
3373
3374=== removed directory '.pc/CVE-2013-0335.patch'
3375=== removed directory '.pc/CVE-2013-0335.patch/nova'
3376=== removed directory '.pc/CVE-2013-0335.patch/nova/compute'
3377=== removed file '.pc/CVE-2013-0335.patch/nova/compute/api.py'
3378--- .pc/CVE-2013-0335.patch/nova/compute/api.py 2013-03-20 10:07:08 +0000
3379+++ .pc/CVE-2013-0335.patch/nova/compute/api.py 1970-01-01 00:00:00 +0000
3380@@ -1,1858 +0,0 @@
3381-# vim: tabstop=4 shiftwidth=4 softtabstop=4
3382-
3383-# Copyright 2010 United States Government as represented by the
3384-# Administrator of the National Aeronautics and Space Administration.
3385-# Copyright 2011 Piston Cloud Computing, Inc.
3386-# All Rights Reserved.
3387-#
3388-# Licensed under the Apache License, Version 2.0 (the "License"); you may
3389-# not use this file except in compliance with the License. You may obtain
3390-# a copy of the License at
3391-#
3392-# http://www.apache.org/licenses/LICENSE-2.0
3393-#
3394-# Unless required by applicable law or agreed to in writing, software
3395-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
3396-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
3397-# License for the specific language governing permissions and limitations
3398-# under the License.
3399-
3400-"""Handles all requests relating to compute resources (e.g. guest vms,
3401-networking and storage of vms, and compute hosts on which they run)."""
3402-
3403-import functools
3404-import re
3405-import time
3406-
3407-import novaclient
3408-import webob.exc
3409-
3410-from nova import block_device
3411-from nova.compute import aggregate_states
3412-from nova.compute import instance_types
3413-from nova.compute import power_state
3414-from nova.compute import task_states
3415-from nova.compute import vm_states
3416-from nova.db import base
3417-from nova import exception
3418-from nova import flags
3419-import nova.image
3420-from nova import log as logging
3421-from nova import network
3422-from nova.openstack.common import cfg
3423-import nova.policy
3424-from nova import quota
3425-from nova import rpc
3426-from nova.scheduler import api as scheduler_api
3427-from nova import utils
3428-from nova import volume
3429-
3430-
3431-LOG = logging.getLogger(__name__)
3432-
3433-find_host_timeout_opt = cfg.StrOpt('find_host_timeout',
3434- default=30,
3435- help='Timeout after NN seconds when looking for a host.')
3436-
3437-FLAGS = flags.FLAGS
3438-FLAGS.register_opt(find_host_timeout_opt)
3439-flags.DECLARE('consoleauth_topic', 'nova.consoleauth')
3440-
3441-
3442-def check_instance_state(vm_state=None, task_state=None):
3443- """Decorator to check VM and/or task state before entry to API functions.
3444-
3445- If the instance is in the wrong state, the wrapper will raise an exception.
3446- """
3447-
3448- if vm_state is not None and not isinstance(vm_state, set):
3449- vm_state = set(vm_state)
3450- if task_state is not None and not isinstance(task_state, set):
3451- task_state = set(task_state)
3452-
3453- def outer(f):
3454- @functools.wraps(f)
3455- def inner(self, context, instance, *args, **kw):
3456- if vm_state is not None and instance['vm_state'] not in vm_state:
3457- raise exception.InstanceInvalidState(
3458- attr='vm_state',
3459- instance_uuid=instance['uuid'],
3460- state=instance['vm_state'],
3461- method=f.__name__)
3462- if (task_state is not None and
3463- instance['task_state'] not in task_state):
3464- raise exception.InstanceInvalidState(
3465- attr='task_state',
3466- instance_uuid=instance['uuid'],
3467- state=instance['task_state'],
3468- method=f.__name__)
3469-
3470- return f(self, context, instance, *args, **kw)
3471- return inner
3472- return outer
3473-
3474-
3475-def wrap_check_policy(func):
3476- """Check corresponding policy prior of wrapped method to execution"""
3477- @functools.wraps(func)
3478- def wrapped(self, context, target, *args, **kwargs):
3479- check_policy(context, func.__name__, target)
3480- return func(self, context, target, *args, **kwargs)
3481- return wrapped
3482-
3483-
3484-def check_policy(context, action, target):
3485- _action = 'compute:%s' % action
3486- nova.policy.enforce(context, _action, target)
3487-
3488-
3489-class BaseAPI(base.Base):
3490- """Base API class."""
3491- def __init__(self, **kwargs):
3492- super(BaseAPI, self).__init__(**kwargs)
3493-
3494- def _cast_or_call_compute_message(self, rpc_method, compute_method,
3495- context, instance=None, host=None, params=None):
3496- """Generic handler for RPC casts and calls to compute.
3497-
3498- :param rpc_method: RPC method to use (rpc.call or rpc.cast)
3499- :param compute_method: Compute manager method to call
3500- :param context: RequestContext of caller
3501- :param instance: The instance object to use to find host to send to
3502- Can be None to not include instance_uuid in args
3503- :param host: Optional host to send to instead of instance['host']
3504- Must be specified if 'instance' is None
3505- :param params: Optional dictionary of arguments to be passed to the
3506- compute worker
3507-
3508- :returns: None
3509- """
3510- if not params:
3511- params = {}
3512- if not host:
3513- if not instance:
3514- raise exception.Error(_("No compute host specified"))
3515- host = instance['host']
3516- if not host:
3517- raise exception.Error(_("Unable to find host for "
3518- "Instance %s") % instance['uuid'])
3519- queue = self.db.queue_get_for(context, FLAGS.compute_topic, host)
3520- if instance:
3521- params['instance_uuid'] = instance['uuid']
3522- kwargs = {'method': compute_method, 'args': params}
3523- return rpc_method(context, queue, kwargs)
3524-
3525- def _cast_compute_message(self, *args, **kwargs):
3526- """Generic handler for RPC casts to compute."""
3527- self._cast_or_call_compute_message(rpc.cast, *args, **kwargs)
3528-
3529- def _call_compute_message(self, *args, **kwargs):
3530- """Generic handler for RPC calls to compute."""
3531- return self._cast_or_call_compute_message(rpc.call, *args, **kwargs)
3532-
3533- @staticmethod
3534- def _cast_scheduler_message(context, args):
3535- """Generic handler for RPC calls to the scheduler."""
3536- rpc.cast(context, FLAGS.scheduler_topic, args)
3537-
3538-
3539-class API(BaseAPI):
3540- """API for interacting with the compute manager."""
3541-
3542- def __init__(self, image_service=None, network_api=None, volume_api=None,
3543- **kwargs):
3544- self.image_service = (image_service or
3545- nova.image.get_default_image_service())
3546-
3547- self.network_api = network_api or network.API()
3548- self.volume_api = volume_api or volume.API()
3549- super(API, self).__init__(**kwargs)
3550-
3551- def _check_injected_file_quota(self, context, injected_files):
3552- """Enforce quota limits on injected files.
3553-
3554- Raises a QuotaError if any limit is exceeded.
3555- """
3556- if injected_files is None:
3557- return
3558- limit = quota.allowed_injected_files(context, len(injected_files))
3559- if len(injected_files) > limit:
3560- raise exception.QuotaError(code="OnsetFileLimitExceeded")
3561- path_limit = quota.allowed_injected_file_path_bytes(context)
3562- for path, content in injected_files:
3563- if len(path) > path_limit:
3564- raise exception.QuotaError(code="OnsetFilePathLimitExceeded")
3565- content_limit = quota.allowed_injected_file_content_bytes(
3566- context, len(content))
3567- if len(content) > content_limit:
3568- code = "OnsetFileContentLimitExceeded"
3569- raise exception.QuotaError(code=code)
3570-
3571- def _check_metadata_properties_quota(self, context, metadata=None):
3572- """Enforce quota limits on metadata properties."""
3573- if not metadata:
3574- metadata = {}
3575- num_metadata = len(metadata)
3576- quota_metadata = quota.allowed_metadata_items(context, num_metadata)
3577- if quota_metadata < num_metadata:
3578- pid = context.project_id
3579- msg = _("Quota exceeded for %(pid)s, tried to set "
3580- "%(num_metadata)s metadata properties") % locals()
3581- LOG.warn(msg)
3582- raise exception.QuotaError(code="MetadataLimitExceeded")
3583-
3584- # Because metadata is stored in the DB, we hard-code the size limits
3585- # In future, we may support more variable length strings, so we act
3586- # as if this is quota-controlled for forwards compatibility
3587- for k, v in metadata.iteritems():
3588- if len(k) > 255 or len(v) > 255:
3589- pid = context.project_id
3590- msg = _("Quota exceeded for %(pid)s, metadata property "
3591- "key or value too long") % locals()
3592- LOG.warn(msg)
3593- raise exception.QuotaError(code="MetadataLimitExceeded")
3594-
3595- def _check_requested_networks(self, context, requested_networks):
3596- """ Check if the networks requested belongs to the project
3597- and the fixed IP address for each network provided is within
3598- same the network block
3599- """
3600- if requested_networks is None:
3601- return
3602-
3603- self.network_api.validate_networks(context, requested_networks)
3604-
3605- def _create_instance(self, context, instance_type,
3606- image_href, kernel_id, ramdisk_id,
3607- min_count, max_count,
3608- display_name, display_description,
3609- key_name, key_data, security_group,
3610- availability_zone, user_data, metadata,
3611- injected_files, admin_password,
3612- access_ip_v4, access_ip_v6,
3613- requested_networks, config_drive,
3614- block_device_mapping, auto_disk_config,
3615- reservation_id=None, create_instance_here=False,
3616- scheduler_hints=None):
3617- """Verify all the input parameters regardless of the provisioning
3618- strategy being performed and schedule the instance(s) for
3619- creation."""
3620-
3621- if not metadata:
3622- metadata = {}
3623- if not display_description:
3624- display_description = ''
3625- if not security_group:
3626- security_group = 'default'
3627-
3628- if not instance_type:
3629- instance_type = instance_types.get_default_instance_type()
3630- if not min_count:
3631- min_count = 1
3632- if not max_count:
3633- max_count = min_count
3634- if not metadata:
3635- metadata = {}
3636-
3637- block_device_mapping = block_device_mapping or []
3638-
3639- num_instances = quota.allowed_instances(context, max_count,
3640- instance_type)
3641- if num_instances < min_count:
3642- pid = context.project_id
3643- if num_instances <= 0:
3644- msg = _("Cannot run any more instances of this type.")
3645- else:
3646- msg = (_("Can only run %s more instances of this type.") %
3647- num_instances)
3648- LOG.warn(_("Quota exceeded for %(pid)s,"
3649- " tried to run %(min_count)s instances. " + msg) % locals())
3650- raise exception.QuotaError(code="InstanceLimitExceeded")
3651-
3652- self._check_metadata_properties_quota(context, metadata)
3653- self._check_injected_file_quota(context, injected_files)
3654- self._check_requested_networks(context, requested_networks)
3655-
3656- (image_service, image_id) = nova.image.get_image_service(context,
3657- image_href)
3658- image = image_service.show(context, image_id)
3659-
3660- if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
3661- raise exception.InstanceTypeMemoryTooSmall()
3662- if instance_type['root_gb'] < int(image.get('min_disk') or 0):
3663- raise exception.InstanceTypeDiskTooSmall()
3664-
3665- config_drive_id = None
3666- if config_drive and config_drive is not True:
3667- # config_drive is volume id
3668- config_drive, config_drive_id = None, config_drive
3669-
3670- os_type = None
3671- if 'properties' in image and 'os_type' in image['properties']:
3672- os_type = image['properties']['os_type']
3673- architecture = None
3674- if 'properties' in image and 'arch' in image['properties']:
3675- architecture = image['properties']['arch']
3676- vm_mode = None
3677- if 'properties' in image and 'vm_mode' in image['properties']:
3678- vm_mode = image['properties']['vm_mode']
3679-
3680- # If instance doesn't have auto_disk_config overridden by request, use
3681- # whatever the image indicates
3682- if auto_disk_config is None:
3683- if ('properties' in image and
3684- 'auto_disk_config' in image['properties']):
3685- auto_disk_config = utils.bool_from_str(
3686- image['properties']['auto_disk_config'])
3687-
3688- if kernel_id is None:
3689- kernel_id = image['properties'].get('kernel_id', None)
3690- if ramdisk_id is None:
3691- ramdisk_id = image['properties'].get('ramdisk_id', None)
3692- # FIXME(sirp): is there a way we can remove null_kernel?
3693- # No kernel and ramdisk for raw images
3694- if kernel_id == str(FLAGS.null_kernel):
3695- kernel_id = None
3696- ramdisk_id = None
3697- LOG.debug(_("Creating a raw instance"))
3698- # Make sure we have access to kernel and ramdisk (if not raw)
3699- LOG.debug(_("Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s")
3700- % locals())
3701- if kernel_id:
3702- image_service.show(context, kernel_id)
3703- if ramdisk_id:
3704- image_service.show(context, ramdisk_id)
3705- if config_drive_id:
3706- image_service.show(context, config_drive_id)
3707-
3708- self.ensure_default_security_group(context)
3709-
3710- if key_data is None and key_name:
3711- key_pair = self.db.key_pair_get(context, context.user_id, key_name)
3712- key_data = key_pair['public_key']
3713-
3714- if reservation_id is None:
3715- reservation_id = utils.generate_uid('r')
3716-
3717- root_device_name = block_device.properties_root_device_name(
3718- image['properties'])
3719-
3720- # NOTE(vish): We have a legacy hack to allow admins to specify hosts
3721- # via az using az:host. It might be nice to expose an
3722- # api to specify specific hosts to force onto, but for
3723- # now it just supports this legacy hack.
3724- host = None
3725- if availability_zone:
3726- availability_zone, _x, host = availability_zone.partition(':')
3727- if not availability_zone:
3728- availability_zone = FLAGS.default_schedule_zone
3729- if context.is_admin and host:
3730- filter_properties = {'force_hosts': [host]}
3731- else:
3732- filter_properties = {}
3733-
3734- filter_properties['scheduler_hints'] = scheduler_hints
3735-
3736- base_options = {
3737- 'reservation_id': reservation_id,
3738- 'image_ref': image_href,
3739- 'kernel_id': kernel_id or '',
3740- 'ramdisk_id': ramdisk_id or '',
3741- 'power_state': power_state.NOSTATE,
3742- 'vm_state': vm_states.BUILDING,
3743- 'config_drive_id': config_drive_id or '',
3744- 'config_drive': config_drive or '',
3745- 'user_id': context.user_id,
3746- 'project_id': context.project_id,
3747- 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
3748- 'instance_type_id': instance_type['id'],
3749- 'memory_mb': instance_type['memory_mb'],
3750- 'vcpus': instance_type['vcpus'],
3751- 'root_gb': instance_type['root_gb'],
3752- 'ephemeral_gb': instance_type['ephemeral_gb'],
3753- 'display_name': display_name,
3754- 'display_description': display_description,
3755- 'user_data': user_data or '',
3756- 'key_name': key_name,
3757- 'key_data': key_data,
3758- 'locked': False,
3759- 'metadata': metadata,
3760- 'access_ip_v4': access_ip_v4,
3761- 'access_ip_v6': access_ip_v6,
3762- 'availability_zone': availability_zone,
3763- 'os_type': os_type,
3764- 'architecture': architecture,
3765- 'vm_mode': vm_mode,
3766- 'root_device_name': root_device_name,
3767- 'progress': 0,
3768- 'auto_disk_config': auto_disk_config}
3769-
3770- LOG.debug(_("Going to run %s instances...") % num_instances)
3771-
3772- # Validate the correct devices have been specified
3773- for bdm in block_device_mapping:
3774- # NOTE(vish): For now, just make sure the volumes are accessible.
3775- snapshot_id = bdm.get('snapshot_id')
3776- volume_id = bdm.get('volume_id')
3777- if volume_id is not None:
3778- try:
3779- self.volume_api.get(context, volume_id)
3780- except Exception:
3781- raise exception.InvalidBDMVolume(id=volume_id)
3782- elif snapshot_id is not None:
3783- try:
3784- self.volume_api.get_snapshot(context, snapshot_id)
3785- except Exception:
3786- raise exception.InvalidBDMSnapshot(id=snapshot_id)
3787-
3788- if create_instance_here:
3789- instance = self.create_db_entry_for_new_instance(
3790- context, instance_type, image, base_options,
3791- security_group, block_device_mapping)
3792- # Tells scheduler we created the instance already.
3793- base_options['uuid'] = instance['uuid']
3794- rpc_method = rpc.cast
3795- else:
3796- # We need to wait for the scheduler to create the instance
3797- # DB entries, because the instance *could* be # created in
3798- # a child zone.
3799- rpc_method = rpc.call
3800-
3801- # TODO(comstud): We should use rpc.multicall when we can
3802- # retrieve the full instance dictionary from the scheduler.
3803- # Otherwise, we could exceed the AMQP max message size limit.
3804- # This would require the schedulers' schedule_run_instances
3805- # methods to return an iterator vs a list.
3806- instances = self._schedule_run_instance(
3807- rpc_method,
3808- context, base_options,
3809- instance_type,
3810- availability_zone, injected_files,
3811- admin_password, image,
3812- num_instances, requested_networks,
3813- block_device_mapping, security_group,
3814- filter_properties)
3815-
3816- if create_instance_here:
3817- return ([instance], reservation_id)
3818- return (instances, reservation_id)
3819-
3820- @staticmethod
3821- def _volume_size(instance_type, virtual_name):
3822- size = 0
3823- if virtual_name == 'swap':
3824- size = instance_type.get('swap', 0)
3825- elif block_device.is_ephemeral(virtual_name):
3826- num = block_device.ephemeral_num(virtual_name)
3827-
3828- # TODO(yamahata): ephemeralN where N > 0
3829- # Only ephemeral0 is allowed for now because InstanceTypes
3830- # table only allows single local disk, ephemeral_gb.
3831- # In order to enhance it, we need to add a new columns to
3832- # instance_types table.
3833- if num > 0:
3834- return 0
3835-
3836- size = instance_type.get('ephemeral_gb')
3837-
3838- return size
3839-
3840- def _update_image_block_device_mapping(self, elevated_context,
3841- instance_type, instance_id,
3842- mappings):
3843- """tell vm driver to create ephemeral/swap device at boot time by
3844- updating BlockDeviceMapping
3845- """
3846- instance_type = (instance_type or
3847- instance_types.get_default_instance_type())
3848-
3849- for bdm in block_device.mappings_prepend_dev(mappings):
3850- LOG.debug(_("bdm %s"), bdm)
3851-
3852- virtual_name = bdm['virtual']
3853- if virtual_name == 'ami' or virtual_name == 'root':
3854- continue
3855-
3856- if not block_device.is_swap_or_ephemeral(virtual_name):
3857- continue
3858-
3859- size = self._volume_size(instance_type, virtual_name)
3860- if size == 0:
3861- continue
3862-
3863- values = {
3864- 'instance_id': instance_id,
3865- 'device_name': bdm['device'],
3866- 'virtual_name': virtual_name,
3867- 'volume_size': size}
3868- self.db.block_device_mapping_update_or_create(elevated_context,
3869- values)
3870-
3871- def _update_block_device_mapping(self, elevated_context,
3872- instance_type, instance_id,
3873- block_device_mapping):
3874- """tell vm driver to attach volume at boot time by updating
3875- BlockDeviceMapping
3876- """
3877- LOG.debug(_("block_device_mapping %s"), block_device_mapping)
3878- for bdm in block_device_mapping:
3879- assert 'device_name' in bdm
3880-
3881- values = {'instance_id': instance_id}
3882- for key in ('device_name', 'delete_on_termination', 'virtual_name',
3883- 'snapshot_id', 'volume_id', 'volume_size',
3884- 'no_device'):
3885- values[key] = bdm.get(key)
3886-
3887- virtual_name = bdm.get('virtual_name')
3888- if (virtual_name is not None and
3889- block_device.is_swap_or_ephemeral(virtual_name)):
3890- size = self._volume_size(instance_type, virtual_name)
3891- if size == 0:
3892- continue
3893- values['volume_size'] = size
3894-
3895- # NOTE(yamahata): NoDevice eliminates devices defined in image
3896- # files by command line option.
3897- # (--block-device-mapping)
3898- if virtual_name == 'NoDevice':
3899- values['no_device'] = True
3900- for k in ('delete_on_termination', 'volume_id',
3901- 'snapshot_id', 'volume_id', 'volume_size',
3902- 'virtual_name'):
3903- values[k] = None
3904-
3905- self.db.block_device_mapping_update_or_create(elevated_context,
3906- values)
3907-
3908- #NOTE(bcwaldon): No policy check since this is only used by scheduler and
3909- # the compute api. That should probably be cleaned up, though.
3910- def create_db_entry_for_new_instance(self, context, instance_type, image,
3911- base_options, security_group, block_device_mapping):
3912- """Create an entry in the DB for this new instance,
3913- including any related table updates (such as security group,
3914- etc).
3915-
3916- This is called by the scheduler after a location for the
3917- instance has been determined.
3918- """
3919- elevated = context.elevated()
3920- if security_group is None:
3921- security_group = ['default']
3922- if not isinstance(security_group, list):
3923- security_group = [security_group]
3924-
3925- security_groups = []
3926- for security_group_name in security_group:
3927- group = self.db.security_group_get_by_name(context,
3928- context.project_id,
3929- security_group_name)
3930- security_groups.append(group['id'])
3931-
3932- base_options.setdefault('launch_index', 0)
3933- instance = self.db.instance_create(context, base_options)
3934- instance_id = instance['id']
3935- instance_uuid = instance['uuid']
3936-
3937- for security_group_id in security_groups:
3938- self.db.instance_add_security_group(elevated,
3939- instance_uuid,
3940- security_group_id)
3941-
3942- # BlockDeviceMapping table
3943- self._update_image_block_device_mapping(elevated, instance_type,
3944- instance_id, image['properties'].get('mappings', []))
3945- self._update_block_device_mapping(elevated, instance_type, instance_id,
3946- image['properties'].get('block_device_mapping', []))
3947- # override via command line option
3948- self._update_block_device_mapping(elevated, instance_type, instance_id,
3949- block_device_mapping)
3950-
3951- # Set sane defaults if not specified
3952- updates = {}
3953-
3954- display_name = instance.get('display_name')
3955- if display_name is None:
3956- display_name = self._default_display_name(instance_id)
3957-
3958- hostname = instance.get('hostname')
3959- if hostname is None:
3960- hostname = display_name
3961-
3962- updates['display_name'] = display_name
3963- updates['hostname'] = utils.sanitize_hostname(hostname)
3964- updates['vm_state'] = vm_states.BUILDING
3965- updates['task_state'] = task_states.SCHEDULING
3966-
3967- if (image['properties'].get('mappings', []) or
3968- image['properties'].get('block_device_mapping', []) or
3969- block_device_mapping):
3970- updates['shutdown_terminate'] = False
3971-
3972- instance = self.update(context, instance, **updates)
3973- return instance
3974-
3975- def _default_display_name(self, instance_id):
3976- return "Server %s" % instance_id
3977-
3978- def _schedule_run_instance(self,
3979- rpc_method,
3980- context, base_options,
3981- instance_type,
3982- availability_zone, injected_files,
3983- admin_password, image,
3984- num_instances,
3985- requested_networks,
3986- block_device_mapping,
3987- security_group,
3988- filter_properties):
3989- """Send a run_instance request to the schedulers for processing."""
3990-
3991- pid = context.project_id
3992- uid = context.user_id
3993-
3994- LOG.debug(_("Sending create to scheduler for %(pid)s/%(uid)s's") %
3995- locals())
3996-
3997- request_spec = {
3998- 'image': utils.to_primitive(image),
3999- 'instance_properties': base_options,
4000- 'instance_type': instance_type,
4001- 'num_instances': num_instances,
4002- 'block_device_mapping': block_device_mapping,
4003- 'security_group': security_group,
4004- }
4005-
4006- return rpc_method(context,
4007- FLAGS.scheduler_topic,
4008- {"method": "run_instance",
4009- "args": {"topic": FLAGS.compute_topic,
4010- "request_spec": request_spec,
4011- "admin_password": admin_password,
4012- "injected_files": injected_files,
4013- "requested_networks": requested_networks,
4014- "is_first_time": True,
4015- "filter_properties": filter_properties}})
4016-
4017- def _check_create_policies(self, context, availability_zone,
4018- requested_networks, block_device_mapping):
4019- """Check policies for create()."""
4020- target = {'project_id': context.project_id,
4021- 'user_id': context.user_id,
4022- 'availability_zone': availability_zone}
4023- check_policy(context, 'create', target)
4024-
4025- if requested_networks:
4026- check_policy(context, 'create:attach_network', target)
4027-
4028- if block_device_mapping:
4029- check_policy(context, 'create:attach_volume', target)
4030-
4031- def create(self, context, instance_type,
4032- image_href, kernel_id=None, ramdisk_id=None,
4033- min_count=None, max_count=None,
4034- display_name=None, display_description=None,
4035- key_name=None, key_data=None, security_group=None,
4036- availability_zone=None, user_data=None, metadata=None,
4037- injected_files=None, admin_password=None,
4038- block_device_mapping=None, access_ip_v4=None,
4039- access_ip_v6=None, requested_networks=None, config_drive=None,
4040- auto_disk_config=None, scheduler_hints=None):
4041- """
4042- Provision instances, sending instance information to the
4043- scheduler. The scheduler will determine where the instance(s)
4044- go and will handle creating the DB entries.
4045-
4046- Returns a tuple of (instances, reservation_id) where instances
4047- could be 'None' or a list of instance dicts depending on if
4048- we waited for information from the scheduler or not.
4049- """
4050-
4051- self._check_create_policies(context, availability_zone,
4052- requested_networks, block_device_mapping)
4053-
4054- # We can create the DB entry for the instance here if we're
4055- # only going to create 1 instance.
4056- # This speeds up API responses for builds
4057- # as we don't need to wait for the scheduler.
4058- create_instance_here = max_count == 1
4059-
4060- (instances, reservation_id) = self._create_instance(
4061- context, instance_type,
4062- image_href, kernel_id, ramdisk_id,
4063- min_count, max_count,
4064- display_name, display_description,
4065- key_name, key_data, security_group,
4066- availability_zone, user_data, metadata,
4067- injected_files, admin_password,
4068- access_ip_v4, access_ip_v6,
4069- requested_networks, config_drive,
4070- block_device_mapping, auto_disk_config,
4071- create_instance_here=create_instance_here,
4072- scheduler_hints=scheduler_hints)
4073-
4074- if create_instance_here or instances is None:
4075- return (instances, reservation_id)
4076-
4077- inst_ret_list = []
4078- for instance in instances:
4079- if instance.get('_is_precooked', False):
4080- inst_ret_list.append(instance)
4081- else:
4082- # Scheduler only gives us the 'id'. We need to pull
4083- # in the created instances from the DB
4084- instance = self.db.instance_get(context, instance['id'])
4085- inst_ret_list.append(dict(instance.iteritems()))
4086-
4087- return (inst_ret_list, reservation_id)
4088-
4089- def ensure_default_security_group(self, context):
4090- """Ensure that a context has a security group.
4091-
4092- Creates a security group for the security context if it does not
4093- already exist.
4094-
4095- :param context: the security context
4096- """
4097- try:
4098- self.db.security_group_get_by_name(context,
4099- context.project_id,
4100- 'default')
4101- except exception.NotFound:
4102- values = {'name': 'default',
4103- 'description': 'default',
4104- 'user_id': context.user_id,
4105- 'project_id': context.project_id}
4106- self.db.security_group_create(context, values)
4107-
4108- def trigger_security_group_rules_refresh(self, context, security_group_id):
4109- """Called when a rule is added to or removed from a security_group."""
4110-
4111- security_group = self.db.security_group_get(context, security_group_id)
4112-
4113- hosts = set()
4114- for instance in security_group['instances']:
4115- if instance['host'] is not None:
4116- hosts.add(instance['host'])
4117-
4118- for host in hosts:
4119- rpc.cast(context,
4120- self.db.queue_get_for(context, FLAGS.compute_topic, host),
4121- {"method": "refresh_security_group_rules",
4122- "args": {"security_group_id": security_group.id}})
4123-
4124- def trigger_security_group_members_refresh(self, context, group_ids):
4125- """Called when a security group gains a new or loses a member.
4126-
4127- Sends an update request to each compute node for whom this is
4128- relevant.
4129- """
4130- # First, we get the security group rules that reference these groups as
4131- # the grantee..
4132- security_group_rules = set()
4133- for group_id in group_ids:
4134- security_group_rules.update(
4135- self.db.security_group_rule_get_by_security_group_grantee(
4136- context,
4137- group_id))
4138-
4139- # ..then we distill the security groups to which they belong..
4140- security_groups = set()
4141- for rule in security_group_rules:
4142- security_group = self.db.security_group_get(
4143- context,
4144- rule['parent_group_id'])
4145- security_groups.add(security_group)
4146-
4147- # ..then we find the instances that are members of these groups..
4148- instances = set()
4149- for security_group in security_groups:
4150- for instance in security_group['instances']:
4151- instances.add(instance)
4152-
4153- # ...then we find the hosts where they live...
4154- hosts = set()
4155- for instance in instances:
4156- if instance['host']:
4157- hosts.add(instance['host'])
4158-
4159- # ...and finally we tell these nodes to refresh their view of this
4160- # particular security group.
4161- for host in hosts:
4162- rpc.cast(context,
4163- self.db.queue_get_for(context, FLAGS.compute_topic, host),
4164- {"method": "refresh_security_group_members",
4165- "args": {"security_group_id": group_id}})
4166-
4167- def trigger_provider_fw_rules_refresh(self, context):
4168- """Called when a rule is added/removed from a provider firewall"""
4169-
4170- hosts = [x['host'] for (x, idx)
4171- in self.db.service_get_all_compute_sorted(context)]
4172- for host in hosts:
4173- rpc.cast(context,
4174- self.db.queue_get_for(context, FLAGS.compute_topic, host),
4175- {'method': 'refresh_provider_fw_rules', 'args': {}})
4176-
4177- def _is_security_group_associated_with_server(self, security_group,
4178- instance_uuid):
4179- """Check if the security group is already associated
4180- with the instance. If Yes, return True.
4181- """
4182-
4183- if not security_group:
4184- return False
4185-
4186- instances = security_group.get('instances')
4187- if not instances:
4188- return False
4189-
4190- for inst in instances:
4191- if (instance_uuid == inst['uuid']):
4192- return True
4193-
4194- return False
4195-
4196- @wrap_check_policy
4197- def add_security_group(self, context, instance, security_group_name):
4198- """Add security group to the instance"""
4199- security_group = self.db.security_group_get_by_name(context,
4200- context.project_id,
4201- security_group_name)
4202-
4203- instance_uuid = instance['uuid']
4204-
4205- #check if the security group is associated with the server
4206- if self._is_security_group_associated_with_server(security_group,
4207- instance_uuid):
4208- raise exception.SecurityGroupExistsForInstance(
4209- security_group_id=security_group['id'],
4210- instance_id=instance_uuid)
4211-
4212- #check if the instance is in running state
4213- if instance['power_state'] != power_state.RUNNING:
4214- raise exception.InstanceNotRunning(instance_id=instance_uuid)
4215-
4216- self.db.instance_add_security_group(context.elevated(),
4217- instance_uuid,
4218- security_group['id'])
4219- params = {"security_group_id": security_group['id']}
4220- # NOTE(comstud): No instance_uuid argument to this compute manager
4221- # call
4222- self._cast_compute_message('refresh_security_group_rules',
4223- context, host=instance['host'], params=params)
4224-
4225- @wrap_check_policy
4226- def remove_security_group(self, context, instance, security_group_name):
4227- """Remove the security group associated with the instance"""
4228- security_group = self.db.security_group_get_by_name(context,
4229- context.project_id,
4230- security_group_name)
4231-
4232- instance_uuid = instance['uuid']
4233-
4234- #check if the security group is associated with the server
4235- if not self._is_security_group_associated_with_server(security_group,
4236- instance_uuid):
4237- raise exception.SecurityGroupNotExistsForInstance(
4238- security_group_id=security_group['id'],
4239- instance_id=instance_uuid)
4240-
4241- #check if the instance is in running state
4242- if instance['power_state'] != power_state.RUNNING:
4243- raise exception.InstanceNotRunning(instance_id=instance_uuid)
4244-
4245- self.db.instance_remove_security_group(context.elevated(),
4246- instance_uuid,
4247- security_group['id'])
4248- params = {"security_group_id": security_group['id']}
4249- # NOTE(comstud): No instance_uuid argument to this compute manager
4250- # call
4251- self._cast_compute_message('refresh_security_group_rules',
4252- context, host=instance['host'], params=params)
4253-
4254- @wrap_check_policy
4255- def update(self, context, instance, **kwargs):
4256- """Updates the instance in the datastore.
4257-
4258- :param context: The security context
4259- :param instance: The instance to update
4260- :param kwargs: All additional keyword args are treated
4261- as data fields of the instance to be
4262- updated
4263-
4264- :returns: None
4265- """
4266- rv = self.db.instance_update(context, instance["id"], kwargs)
4267- return dict(rv.iteritems())
4268-
4269- @wrap_check_policy
4270- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
4271- vm_states.ERROR])
4272- def soft_delete(self, context, instance):
4273- """Terminate an instance."""
4274- LOG.debug(_('Going to try to soft delete instance'),
4275- instance=instance)
4276-
4277- if instance['disable_terminate']:
4278- return
4279-
4280- # NOTE(jerdfelt): The compute daemon handles reclaiming instances
4281- # that are in soft delete. If there is no host assigned, there is
4282- # no daemon to reclaim, so delete it immediately.
4283- host = instance['host']
4284- if host:
4285- self.update(context,
4286- instance,
4287- vm_state=vm_states.SOFT_DELETE,
4288- task_state=task_states.POWERING_OFF,
4289- deleted_at=utils.utcnow())
4290-
4291- self._cast_compute_message('power_off_instance',
4292- context, instance)
4293- else:
4294- LOG.warning(_('No host for instance, deleting immediately'),
4295- instance=instance)
4296- try:
4297- self.db.instance_destroy(context, instance['id'])
4298- except exception.InstanceNotFound:
4299- # NOTE(comstud): Race condition. Instance already gone.
4300- pass
4301-
4302- def _delete(self, context, instance):
4303- host = instance['host']
4304- try:
4305- if host:
4306- self.update(context,
4307- instance,
4308- task_state=task_states.DELETING,
4309- progress=0)
4310-
4311- self._cast_compute_message('terminate_instance',
4312- context, instance)
4313- else:
4314- self.db.instance_destroy(context, instance['id'])
4315- except exception.InstanceNotFound:
4316- # NOTE(comstud): Race condition. Instance already gone.
4317- pass
4318-
4319- # NOTE(jerdfelt): The API implies that only ACTIVE and ERROR are
4320- # allowed but the EC2 API appears to allow from RESCUED and STOPPED
4321- # too
4322- @wrap_check_policy
4323- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.BUILDING,
4324- vm_states.ERROR, vm_states.RESCUED,
4325- vm_states.SHUTOFF, vm_states.STOPPED])
4326- def delete(self, context, instance):
4327- """Terminate an instance."""
4328- LOG.debug(_("Going to try to terminate instance"), instance=instance)
4329-
4330- if instance['disable_terminate']:
4331- return
4332-
4333- self._delete(context, instance)
4334-
4335- @wrap_check_policy
4336- @check_instance_state(vm_state=[vm_states.SOFT_DELETE])
4337- def restore(self, context, instance):
4338- """Restore a previously deleted (but not reclaimed) instance."""
4339- self.update(context,
4340- instance,
4341- vm_state=vm_states.ACTIVE,
4342- task_state=None,
4343- deleted_at=None)
4344-
4345- host = instance['host']
4346- if host:
4347- self.update(context,
4348- instance,
4349- task_state=task_states.POWERING_ON)
4350- self._cast_compute_message('power_on_instance',
4351- context, instance)
4352-
4353- @wrap_check_policy
4354- @check_instance_state(vm_state=[vm_states.SOFT_DELETE])
4355- def force_delete(self, context, instance):
4356- """Force delete a previously deleted (but not reclaimed) instance."""
4357- self._delete(context, instance)
4358-
4359- @wrap_check_policy
4360- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
4361- vm_states.RESCUED],
4362- task_state=[None, task_states.RESIZE_VERIFY])
4363- def stop(self, context, instance, do_cast=True):
4364- """Stop an instance."""
4365- instance_uuid = instance["uuid"]
4366- LOG.debug(_("Going to try to stop instance"), instance=instance)
4367-
4368- self.update(context,
4369- instance,
4370- vm_state=vm_states.ACTIVE,
4371- task_state=task_states.STOPPING,
4372- terminated_at=utils.utcnow(),
4373- progress=0)
4374-
4375- rpc_method = rpc.cast if do_cast else rpc.call
4376- self._cast_or_call_compute_message(rpc_method, 'stop_instance',
4377- context, instance)
4378-
4379- @wrap_check_policy
4380- @check_instance_state(vm_state=[vm_states.STOPPED, vm_states.SHUTOFF])
4381- def start(self, context, instance):
4382- """Start an instance."""
4383- vm_state = instance["vm_state"]
4384- instance_uuid = instance["uuid"]
4385- LOG.debug(_("Going to try to start instance"), instance=instance)
4386-
4387- if vm_state == vm_states.SHUTOFF:
4388- if instance['shutdown_terminate']:
4389- LOG.warning(_("Instance %(instance_uuid)s is not "
4390- "stopped. (%(vm_state)s") % locals())
4391- return
4392-
4393- # NOTE(yamahata): nova compute doesn't reap instances
4394- # which initiated shutdown itself. So reap it here.
4395- self.stop(context, instance, do_cast=False)
4396-
4397- self.update(context,
4398- instance,
4399- vm_state=vm_states.STOPPED,
4400- task_state=task_states.STARTING)
4401-
4402- # TODO(yamahata): injected_files isn't supported right now.
4403- # It is used only for osapi. not for ec2 api.
4404- # availability_zone isn't used by run_instance.
4405- self._cast_compute_message('start_instance', context, instance)
4406-
4407- #NOTE(bcwaldon): no policy check here since it should be rolled in to
4408- # search_opts in get_all
4409- def get_active_by_window(self, context, begin, end=None, project_id=None):
4410- """Get instances that were continuously active over a window."""
4411- return self.db.instance_get_active_by_window(context, begin, end,
4412- project_id)
4413-
4414- #NOTE(bcwaldon): this doesn't really belong in this class
4415- def get_instance_type(self, context, instance_type_id):
4416- """Get an instance type by instance type id."""
4417- return instance_types.get_instance_type(instance_type_id)
4418-
4419- def get(self, context, instance_id):
4420- """Get a single instance with the given instance_id."""
4421- # NOTE(ameade): we still need to support integer ids for ec2
4422- if utils.is_uuid_like(instance_id):
4423- instance = self.db.instance_get_by_uuid(context, instance_id)
4424- else:
4425- instance = self.db.instance_get(context, instance_id)
4426-
4427- check_policy(context, 'get', instance)
4428-
4429- inst = dict(instance.iteritems())
4430- # NOTE(comstud): Doesn't get returned with iteritems
4431- inst['name'] = instance['name']
4432- return inst
4433-
4434- def get_all(self, context, search_opts=None, sort_key='created_at',
4435- sort_dir='desc'):
4436- """Get all instances filtered by one of the given parameters.
4437-
4438- If there is no filter and the context is an admin, it will retrieve
4439- all instances in the system.
4440-
4441- Deleted instances will be returned by default, unless there is a
4442- search option that says otherwise.
4443-
4444- The results will be returned sorted in the order specified by the
4445- 'sort_dir' parameter using the key specified in the 'sort_key'
4446- parameter.
4447- """
4448-
4449- #TODO(bcwaldon): determine the best argument for target here
4450- target = {
4451- 'project_id': context.project_id,
4452- 'user_id': context.user_id,
4453- }
4454-
4455- check_policy(context, "get_all", target)
4456-
4457- if search_opts is None:
4458- search_opts = {}
4459-
4460- LOG.debug(_("Searching by: %s") % str(search_opts))
4461-
4462- # Fixups for the DB call
4463- filters = {}
4464-
4465- def _remap_flavor_filter(flavor_id):
4466- try:
4467- instance_type = instance_types.get_instance_type_by_flavor_id(
4468- flavor_id)
4469- except exception.FlavorNotFound:
4470- raise ValueError()
4471-
4472- filters['instance_type_id'] = instance_type['id']
4473-
4474- def _remap_fixed_ip_filter(fixed_ip):
4475- # Turn fixed_ip into a regexp match. Since '.' matches
4476- # any character, we need to use regexp escaping for it.
4477- filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
4478-
4479- # search_option to filter_name mapping.
4480- filter_mapping = {
4481- 'image': 'image_ref',
4482- 'name': 'display_name',
4483- 'instance_name': 'name',
4484- 'tenant_id': 'project_id',
4485- 'flavor': _remap_flavor_filter,
4486- 'fixed_ip': _remap_fixed_ip_filter}
4487-
4488- # copy from search_opts, doing various remappings as necessary
4489- for opt, value in search_opts.iteritems():
4490- # Do remappings.
4491- # Values not in the filter_mapping table are copied as-is.
4492- # If remapping is None, option is not copied
4493- # If the remapping is a string, it is the filter_name to use
4494- try:
4495- remap_object = filter_mapping[opt]
4496- except KeyError:
4497- filters[opt] = value
4498- else:
4499- # Remaps are strings to translate to, or functions to call
4500- # to do the translating as defined by the table above.
4501- if isinstance(remap_object, basestring):
4502- filters[remap_object] = value
4503- else:
4504- try:
4505- remap_object(value)
4506-
4507- # We already know we can't match the filter, so
4508- # return an empty list
4509- except ValueError:
4510- return []
4511-
4512- inst_models = self._get_instances_by_filters(context, filters,
4513- sort_key, sort_dir)
4514-
4515- # Convert the models to dictionaries
4516- instances = []
4517- for inst_model in inst_models:
4518- instance = dict(inst_model.iteritems())
4519- # NOTE(comstud): Doesn't get returned by iteritems
4520- instance['name'] = inst_model['name']
4521- instances.append(instance)
4522-
4523- return instances
4524-
4525- def _get_instances_by_filters(self, context, filters, sort_key, sort_dir):
4526- if 'ip6' in filters or 'ip' in filters:
4527- res = self.network_api.get_instance_uuids_by_ip_filter(context,
4528- filters)
4529- # NOTE(jkoelker) It is possible that we will get the same
4530- # instance uuid twice (one for ipv4 and ipv6)
4531- uuids = set([r['instance_uuid'] for r in res])
4532- filters['uuid'] = uuids
4533-
4534- return self.db.instance_get_all_by_filters(context, filters, sort_key,
4535- sort_dir)
4536-
4537- @wrap_check_policy
4538- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF])
4539- def backup(self, context, instance, name, backup_type, rotation,
4540- extra_properties=None):
4541- """Backup the given instance
4542-
4543- :param instance: nova.db.sqlalchemy.models.Instance
4544- :param name: name of the backup or snapshot
4545- name = backup_type # daily backups are called 'daily'
4546- :param rotation: int representing how many backups to keep around;
4547- None if rotation shouldn't be used (as in the case of snapshots)
4548- :param extra_properties: dict of extra image properties to include
4549- """
4550- recv_meta = self._create_image(context, instance, name, 'backup',
4551- backup_type=backup_type, rotation=rotation,
4552- extra_properties=extra_properties)
4553- return recv_meta
4554-
4555- @wrap_check_policy
4556- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF])
4557- def snapshot(self, context, instance, name, extra_properties=None):
4558- """Snapshot the given instance.
4559-
4560- :param instance: nova.db.sqlalchemy.models.Instance
4561- :param name: name of the backup or snapshot
4562- :param extra_properties: dict of extra image properties to include
4563-
4564- :returns: A dict containing image metadata
4565- """
4566- return self._create_image(context, instance, name, 'snapshot',
4567- extra_properties=extra_properties)
4568-
4569- def _create_image(self, context, instance, name, image_type,
4570- backup_type=None, rotation=None, extra_properties=None):
4571- """Create snapshot or backup for an instance on this host.
4572-
4573- :param context: security context
4574- :param instance: nova.db.sqlalchemy.models.Instance
4575- :param name: string for name of the snapshot
4576- :param image_type: snapshot | backup
4577- :param backup_type: daily | weekly
4578- :param rotation: int representing how many backups to keep around;
4579- None if rotation shouldn't be used (as in the case of snapshots)
4580- :param extra_properties: dict of extra image properties to include
4581-
4582- """
4583- instance_uuid = instance['uuid']
4584-
4585- if image_type == "snapshot":
4586- task_state = task_states.IMAGE_SNAPSHOT
4587- elif image_type == "backup":
4588- task_state = task_states.IMAGE_BACKUP
4589- else:
4590- raise Exception(_('Image type not recognized %s') % image_type)
4591-
4592- self.db.instance_test_and_set(
4593- context, instance_uuid, 'task_state', [None], task_state)
4594-
4595- properties = {
4596- 'instance_uuid': instance_uuid,
4597- 'user_id': str(context.user_id),
4598- 'image_type': image_type,
4599- }
4600-
4601- sent_meta = {'name': name, 'is_public': False}
4602-
4603- if image_type == 'backup':
4604- properties['backup_type'] = backup_type
4605-
4606- elif image_type == 'snapshot':
4607- min_ram, min_disk = self._get_minram_mindisk_params(context,
4608- instance)
4609- if min_ram is not None:
4610- sent_meta['min_ram'] = min_ram
4611- if min_disk is not None:
4612- sent_meta['min_disk'] = min_disk
4613-
4614- properties.update(extra_properties or {})
4615- sent_meta['properties'] = properties
4616-
4617- recv_meta = self.image_service.create(context, sent_meta)
4618- params = {'image_id': recv_meta['id'], 'image_type': image_type,
4619- 'backup_type': backup_type, 'rotation': rotation}
4620- self._cast_compute_message('snapshot_instance', context, instance,
4621- params=params)
4622- return recv_meta
4623-
4624- def _get_minram_mindisk_params(self, context, instance):
4625- try:
4626- #try to get source image of the instance
4627- orig_image = self.image_service.show(context,
4628- instance['image_ref'])
4629- except exception.ImageNotFound:
4630- return None, None
4631-
4632- #disk format of vhd is non-shrinkable
4633- if orig_image.get('disk_format') == 'vhd':
4634- min_ram = instance['instance_type']['memory_mb']
4635- min_disk = instance['instance_type']['root_gb']
4636- else:
4637- #set new image values to the original image values
4638- min_ram = orig_image.get('min_ram')
4639- min_disk = orig_image.get('min_disk')
4640-
4641- return min_ram, min_disk
4642-
4643- @wrap_check_policy
4644- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
4645- vm_states.RESCUED],
4646- task_state=[None, task_states.RESIZE_VERIFY])
4647- def reboot(self, context, instance, reboot_type):
4648- """Reboot the given instance."""
4649- state = {'SOFT': task_states.REBOOTING,
4650- 'HARD': task_states.REBOOTING_HARD}[reboot_type]
4651- self.update(context,
4652- instance,
4653- vm_state=vm_states.ACTIVE,
4654- task_state=state)
4655- self._cast_compute_message('reboot_instance', context, instance,
4656- params={'reboot_type': reboot_type})
4657-
4658- def _validate_image_href(self, context, image_href):
4659- """Throws an ImageNotFound exception if image_href does not exist."""
4660- (image_service, image_id) = nova.image.get_image_service(context,
4661- image_href)
4662- image_service.show(context, image_id)
4663-
4664- @wrap_check_policy
4665- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
4666- task_state=[None, task_states.RESIZE_VERIFY])
4667- def rebuild(self, context, instance, image_href, admin_password, **kwargs):
4668- """Rebuild the given instance with the provided attributes."""
4669-
4670- self._validate_image_href(context, image_href)
4671-
4672- files_to_inject = kwargs.pop('files_to_inject', [])
4673- self._check_injected_file_quota(context, files_to_inject)
4674-
4675- metadata = kwargs.get('metadata', {})
4676- self._check_metadata_properties_quota(context, metadata)
4677-
4678- self.update(context,
4679- instance,
4680- image_ref=image_href,
4681- vm_state=vm_states.REBUILDING,
4682- task_state=None,
4683- progress=0,
4684- **kwargs)
4685-
4686- rebuild_params = {
4687- "new_pass": admin_password,
4688- "injected_files": files_to_inject,
4689- }
4690-
4691- self._cast_compute_message('rebuild_instance', context, instance,
4692- params=rebuild_params)
4693-
4694- @wrap_check_policy
4695- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
4696- task_state=[task_states.RESIZE_VERIFY])
4697- def revert_resize(self, context, instance):
4698- """Reverts a resize, deleting the 'new' instance in the process."""
4699- context = context.elevated()
4700- migration_ref = self.db.migration_get_by_instance_and_status(context,
4701- instance['uuid'], 'finished')
4702- if not migration_ref:
4703- raise exception.MigrationNotFoundByStatus(
4704- instance_id=instance['uuid'], status='finished')
4705-
4706- self.update(context,
4707- instance,
4708- vm_state=vm_states.RESIZING,
4709- task_state=task_states.RESIZE_REVERTING)
4710-
4711- params = {'migration_id': migration_ref['id']}
4712- self._cast_compute_message('revert_resize', context, instance,
4713- host=migration_ref['dest_compute'], params=params)
4714-
4715- self.db.migration_update(context, migration_ref['id'],
4716- {'status': 'reverted'})
4717-
4718- @wrap_check_policy
4719- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
4720- task_state=[task_states.RESIZE_VERIFY])
4721- def confirm_resize(self, context, instance):
4722- """Confirms a migration/resize and deletes the 'old' instance."""
4723- context = context.elevated()
4724- migration_ref = self.db.migration_get_by_instance_and_status(context,
4725- instance['uuid'], 'finished')
4726- if not migration_ref:
4727- raise exception.MigrationNotFoundByStatus(
4728- instance_id=instance['uuid'], status='finished')
4729-
4730- self.update(context,
4731- instance,
4732- vm_state=vm_states.ACTIVE,
4733- task_state=None)
4734-
4735- params = {'migration_id': migration_ref['id']}
4736- self._cast_compute_message('confirm_resize', context, instance,
4737- host=migration_ref['source_compute'], params=params)
4738-
4739- self.db.migration_update(context, migration_ref['id'],
4740- {'status': 'confirmed'})
4741- self.db.instance_update(context, instance['uuid'],
4742- {'host': migration_ref['dest_compute'], })
4743-
4744- @wrap_check_policy
4745- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
4746- task_state=[None])
4747- def resize(self, context, instance, flavor_id=None, **kwargs):
4748- """Resize (ie, migrate) a running instance.
4749-
4750- If flavor_id is None, the process is considered a migration, keeping
4751- the original flavor_id. If flavor_id is not None, the instance should
4752- be migrated to a new host and resized to the new flavor_id.
4753- """
4754- current_instance_type = instance['instance_type']
4755-
4756- # If flavor_id is not provided, only migrate the instance.
4757- if not flavor_id:
4758- LOG.debug(_("flavor_id is None. Assuming migration."))
4759- new_instance_type = current_instance_type
4760- else:
4761- new_instance_type = instance_types.get_instance_type_by_flavor_id(
4762- flavor_id)
4763-
4764- current_instance_type_name = current_instance_type['name']
4765- new_instance_type_name = new_instance_type['name']
4766- LOG.debug(_("Old instance type %(current_instance_type_name)s, "
4767- " new instance type %(new_instance_type_name)s") % locals())
4768- if not new_instance_type:
4769- raise exception.FlavorNotFound(flavor_id=flavor_id)
4770-
4771- # NOTE(markwash): look up the image early to avoid auth problems later
4772- image = self.image_service.show(context, instance['image_ref'])
4773-
4774- current_memory_mb = current_instance_type['memory_mb']
4775- new_memory_mb = new_instance_type['memory_mb']
4776-
4777- if (current_memory_mb == new_memory_mb) and flavor_id:
4778- raise exception.CannotResizeToSameSize()
4779-
4780- self.update(context,
4781- instance,
4782- vm_state=vm_states.RESIZING,
4783- task_state=task_states.RESIZE_PREP,
4784- progress=0,
4785- **kwargs)
4786-
4787- request_spec = {
4788- 'instance_type': new_instance_type,
4789- 'num_instances': 1,
4790- 'instance_properties': instance}
4791-
4792- filter_properties = {'ignore_hosts': []}
4793-
4794- if not FLAGS.allow_resize_to_same_host:
4795- filter_properties['ignore_hosts'].append(instance['host'])
4796-
4797- args = {
4798- "topic": FLAGS.compute_topic,
4799- "instance_uuid": instance['uuid'],
4800- "instance_type_id": new_instance_type['id'],
4801- "image": image,
4802- "update_db": False,
4803- "request_spec": utils.to_primitive(request_spec),
4804- "filter_properties": filter_properties,
4805- }
4806- self._cast_scheduler_message(context,
4807- {"method": "prep_resize",
4808- "args": args})
4809-
4810- @wrap_check_policy
4811- def add_fixed_ip(self, context, instance, network_id):
4812- """Add fixed_ip from specified network to given instance."""
4813- self._cast_compute_message('add_fixed_ip_to_instance', context,
4814- instance, params=dict(network_id=network_id))
4815-
4816- @wrap_check_policy
4817- def remove_fixed_ip(self, context, instance, address):
4818- """Remove fixed_ip from specified network to given instance."""
4819- self._cast_compute_message('remove_fixed_ip_from_instance',
4820- context, instance, params=dict(address=address))
4821-
4822- @wrap_check_policy
4823- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
4824- vm_states.RESCUED],
4825- task_state=[None, task_states.RESIZE_VERIFY])
4826- def pause(self, context, instance):
4827- """Pause the given instance."""
4828- self.update(context,
4829- instance,
4830- vm_state=vm_states.ACTIVE,
4831- task_state=task_states.PAUSING)
4832- self._cast_compute_message('pause_instance', context, instance)
4833-
4834- @wrap_check_policy
4835- @check_instance_state(vm_state=[vm_states.PAUSED])
4836- def unpause(self, context, instance):
4837- """Unpause the given instance."""
4838- self.update(context,
4839- instance,
4840- vm_state=vm_states.PAUSED,
4841- task_state=task_states.UNPAUSING)
4842- self._cast_compute_message('unpause_instance', context, instance)
4843-
4844- @wrap_check_policy
4845- def get_diagnostics(self, context, instance):
4846- """Retrieve diagnostics for the given instance."""
4847- return self._call_compute_message("get_diagnostics", context,
4848- instance)
4849-
4850- @wrap_check_policy
4851- def get_actions(self, context, instance):
4852- """Retrieve actions for the given instance."""
4853- return self.db.instance_get_actions(context, instance['uuid'])
4854-
4855- @wrap_check_policy
4856- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
4857- vm_states.RESCUED],
4858- task_state=[None, task_states.RESIZE_VERIFY])
4859- def suspend(self, context, instance):
4860- """Suspend the given instance."""
4861- self.update(context,
4862- instance,
4863- vm_state=vm_states.ACTIVE,
4864- task_state=task_states.SUSPENDING)
4865- self._cast_compute_message('suspend_instance', context, instance)
4866-
4867- @wrap_check_policy
4868- @check_instance_state(vm_state=[vm_states.SUSPENDED])
4869- def resume(self, context, instance):
4870- """Resume the given instance."""
4871- self.update(context,
4872- instance,
4873- vm_state=vm_states.SUSPENDED,
4874- task_state=task_states.RESUMING)
4875- self._cast_compute_message('resume_instance', context, instance)
4876-
4877- @wrap_check_policy
4878- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
4879- vm_states.STOPPED],
4880- task_state=[None, task_states.RESIZE_VERIFY])
4881- def rescue(self, context, instance, rescue_password=None):
4882- """Rescue the given instance."""
4883- self.update(context,
4884- instance,
4885- vm_state=vm_states.ACTIVE,
4886- task_state=task_states.RESCUING)
4887-
4888- rescue_params = {
4889- "rescue_password": rescue_password
4890- }
4891- self._cast_compute_message('rescue_instance', context, instance,
4892- params=rescue_params)
4893-
4894- @wrap_check_policy
4895- @check_instance_state(vm_state=[vm_states.RESCUED])
4896- def unrescue(self, context, instance):
4897- """Unrescue the given instance."""
4898- self.update(context,
4899- instance,
4900- vm_state=vm_states.RESCUED,
4901- task_state=task_states.UNRESCUING)
4902- self._cast_compute_message('unrescue_instance', context, instance)
4903-
4904- @wrap_check_policy
4905- @check_instance_state(vm_state=[vm_states.ACTIVE])
4906- def set_admin_password(self, context, instance, password=None):
4907- """Set the root/admin password for the given instance."""
4908- self.update(context,
4909- instance,
4910- task_state=task_states.UPDATING_PASSWORD)
4911-
4912- params = {"new_pass": password}
4913- self._cast_compute_message('set_admin_password', context, instance,
4914- params=params)
4915-
4916- @wrap_check_policy
4917- def inject_file(self, context, instance, path, file_contents):
4918- """Write a file to the given instance."""
4919- params = {'path': path, 'file_contents': file_contents}
4920- self._cast_compute_message('inject_file', context, instance,
4921- params=params)
4922-
4923- @wrap_check_policy
4924- def get_vnc_console(self, context, instance, console_type):
4925- """Get a url to an instance Console."""
4926- connect_info = self._call_compute_message('get_vnc_console',
4927- context, instance, params={"console_type": console_type})
4928-
4929- rpc.call(context, '%s' % FLAGS.consoleauth_topic,
4930- {'method': 'authorize_console',
4931- 'args': {'token': connect_info['token'],
4932- 'console_type': console_type,
4933- 'host': connect_info['host'],
4934- 'port': connect_info['port'],
4935- 'internal_access_path':
4936- connect_info['internal_access_path']}})
4937-
4938- return {'url': connect_info['access_url']}
4939-
4940- @wrap_check_policy
4941- def get_console_output(self, context, instance, tail_length=None):
4942- """Get console output for an an instance."""
4943- params = {'tail_length': tail_length}
4944- return self._call_compute_message('get_console_output', context,
4945- instance, params=params)
4946-
4947- @wrap_check_policy
4948- def lock(self, context, instance):
4949- """Lock the given instance."""
4950- self._cast_compute_message('lock_instance', context, instance)
4951-
4952- @wrap_check_policy
4953- def unlock(self, context, instance):
4954- """Unlock the given instance."""
4955- self._cast_compute_message('unlock_instance', context, instance)
4956-
4957- @wrap_check_policy
4958- def get_lock(self, context, instance):
4959- """Return the boolean state of given instance's lock."""
4960- return self.get(context, instance['uuid'])['locked']
4961-
4962- @wrap_check_policy
4963- def reset_network(self, context, instance):
4964- """Reset networking on the instance."""
4965- self._cast_compute_message('reset_network', context, instance)
4966-
4967- @wrap_check_policy
4968- def inject_network_info(self, context, instance):
4969- """Inject network info for the instance."""
4970- self._cast_compute_message('inject_network_info', context, instance)
4971-
4972- @wrap_check_policy
4973- def attach_volume(self, context, instance, volume_id, device):
4974- """Attach an existing volume to an existing instance."""
4975- if not re.match("^/dev/x{0,1}[a-z]d[a-z]+$", device):
4976- raise exception.InvalidDevicePath(path=device)
4977- volume = self.volume_api.get(context, volume_id)
4978- self.volume_api.check_attach(context, volume)
4979- self.volume_api.reserve_volume(context, volume)
4980- params = {"volume_id": volume_id,
4981- "mountpoint": device}
4982- self._cast_compute_message('attach_volume', context, instance,
4983- params=params)
4984-
4985- # FIXME(comstud): I wonder if API should pull in the instance from
4986- # the volume ID via volume API and pass it and the volume object here
4987- def detach_volume(self, context, volume_id):
4988- """Detach a volume from an instance."""
4989- instance = self.db.volume_get_instance(context.elevated(), volume_id)
4990- if not instance:
4991- raise exception.VolumeUnattached(volume_id=volume_id)
4992-
4993- check_policy(context, 'detach_volume', instance)
4994-
4995- volume = self.volume_api.get(context, volume_id)
4996- self.volume_api.check_detach(context, volume)
4997-
4998- params = {'volume_id': volume_id}
4999- self._cast_compute_message('detach_volume', context, instance,
5000- params=params)
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: