Merge lp:~ttx/nova/diablo-rbp-merge into lp:~hudson-openstack/nova/milestone-proposed

Proposed by Thierry Carrez on 2011-09-09
Status: Merged
Approved by: Soren Hansen on 2011-09-09
Approved revision: 1154
Merged at revision: 1154
Proposed branch: lp:~ttx/nova/diablo-rbp-merge
Merge into: lp:~hudson-openstack/nova/milestone-proposed
Diff against target: 20939 lines (+13771/-2577)
161 files modified
.mailmap (+1/-0)
Authors (+5/-0)
bin/instance-usage-audit (+2/-3)
bin/nova-ajax-console-proxy (+6/-6)
bin/nova-api (+1/-0)
bin/nova-api-ec2 (+1/-0)
bin/nova-api-os (+1/-0)
bin/nova-compute (+1/-0)
bin/nova-manage (+544/-12)
bin/nova-network (+1/-0)
bin/nova-objectstore (+1/-0)
bin/nova-scheduler (+3/-0)
bin/nova-volume (+1/-0)
bin/nova-vsa (+49/-0)
contrib/nova.sh (+1/-1)
nova/api/ec2/__init__.py (+24/-15)
nova/api/ec2/admin.py (+2/-3)
nova/api/ec2/cloud.py (+41/-22)
nova/api/openstack/common.py (+54/-28)
nova/api/openstack/contrib/createserverext.py (+22/-6)
nova/api/openstack/contrib/floating_ips.py (+12/-7)
nova/api/openstack/contrib/simple_tenant_usage.py (+236/-0)
nova/api/openstack/contrib/virtual_storage_arrays.py (+606/-0)
nova/api/openstack/contrib/volumes.py (+34/-2)
nova/api/openstack/contrib/volumetypes.py (+197/-0)
nova/api/openstack/create_instance_helper.py (+30/-21)
nova/api/openstack/schemas/v1.1/server.rng (+2/-0)
nova/api/openstack/servers.py (+63/-27)
nova/api/openstack/views/addresses.py (+0/-1)
nova/api/openstack/views/servers.py (+7/-9)
nova/compute/api.py (+134/-63)
nova/compute/manager.py (+252/-235)
nova/compute/task_states.py (+59/-0)
nova/compute/vm_states.py (+39/-0)
nova/context.py (+1/-1)
nova/db/api.py (+135/-4)
nova/db/sqlalchemy/api.py (+486/-32)
nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py (+14/-4)
nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py (+10/-6)
nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py (+115/-0)
nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py (+75/-0)
nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py (+138/-0)
nova/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py (+44/-0)
nova/db/sqlalchemy/migration.py (+3/-1)
nova/db/sqlalchemy/models.py (+82/-13)
nova/exception.py (+56/-5)
nova/flags.py (+27/-2)
nova/image/glance.py (+29/-4)
nova/ipv6/account_identifier.py (+6/-1)
nova/ipv6/rfc2462.py (+3/-1)
nova/log.py (+5/-1)
nova/network/api.py (+6/-0)
nova/network/linux_net.py (+61/-4)
nova/network/manager.py (+44/-23)
nova/network/quantum/__init__.py (+16/-0)
nova/network/quantum/client.py (+307/-0)
nova/network/quantum/manager.py (+324/-0)
nova/network/quantum/melange_connection.py (+141/-0)
nova/network/quantum/melange_ipam_lib.py (+205/-0)
nova/network/quantum/nova_ipam_lib.py (+195/-0)
nova/network/quantum/quantum_connection.py (+118/-0)
nova/notifier/api.py (+29/-1)
nova/notifier/list_notifier.py (+68/-0)
nova/quota.py (+3/-2)
nova/rpc/__init__.py (+17/-26)
nova/rpc/common.py (+6/-0)
nova/rpc/impl_carrot.py (+81/-21)
nova/rpc/impl_kombu.py (+781/-0)
nova/scheduler/abstract_scheduler.py (+11/-7)
nova/scheduler/api.py (+35/-17)
nova/scheduler/base_scheduler.py (+14/-2)
nova/scheduler/driver.py (+4/-6)
nova/scheduler/host_filter.py (+6/-0)
nova/scheduler/manager.py (+6/-4)
nova/scheduler/vsa.py (+535/-0)
nova/service.py (+11/-21)
nova/test.py (+18/-0)
nova/tests/api/ec2/__init__.py (+19/-0)
nova/tests/api/ec2/test_middleware.py (+45/-0)
nova/tests/api/openstack/contrib/test_createserverext.py (+126/-17)
nova/tests/api/openstack/contrib/test_floating_ips.py (+213/-26)
nova/tests/api/openstack/contrib/test_security_groups.py (+36/-36)
nova/tests/api/openstack/contrib/test_simple_tenant_usage.py (+172/-0)
nova/tests/api/openstack/contrib/test_vsa.py (+450/-0)
nova/tests/api/openstack/fakes.py (+9/-2)
nova/tests/api/openstack/test_extensions.py (+3/-0)
nova/tests/api/openstack/test_server_actions.py (+22/-33)
nova/tests/api/openstack/test_servers.py (+205/-47)
nova/tests/api/openstack/test_volume_types.py (+171/-0)
nova/tests/api/openstack/test_volume_types_extra_specs.py (+181/-0)
nova/tests/image/test_glance.py (+47/-6)
nova/tests/integrated/test_servers.py (+25/-12)
nova/tests/integrated/test_volumes.py (+17/-0)
nova/tests/monkey_patch_example/__init__.py (+33/-0)
nova/tests/monkey_patch_example/example_a.py (+29/-0)
nova/tests/monkey_patch_example/example_b.py (+30/-0)
nova/tests/notifier/__init__.py (+16/-0)
nova/tests/notifier/test_list_notifier.py (+88/-0)
nova/tests/scheduler/test_abstract_scheduler.py (+55/-0)
nova/tests/scheduler/test_scheduler.py (+106/-9)
nova/tests/scheduler/test_vsa_scheduler.py (+641/-0)
nova/tests/test_adminapi.py (+0/-2)
nova/tests/test_cloud.py (+15/-16)
nova/tests/test_compute.py (+34/-17)
nova/tests/test_context.py (+33/-0)
nova/tests/test_db_api.py (+3/-1)
nova/tests/test_instance_types.py (+66/-2)
nova/tests/test_ipv6.py (+38/-0)
nova/tests/test_libvirt.py (+6/-4)
nova/tests/test_linux_net.py (+347/-0)
nova/tests/test_network.py (+78/-3)
nova/tests/test_notifier.py (+21/-0)
nova/tests/test_nova_manage.py (+154/-0)
nova/tests/test_quantum.py (+323/-0)
nova/tests/test_rpc.py (+6/-158)
nova/tests/test_rpc_amqp.py (+0/-88)
nova/tests/test_rpc_carrot.py (+45/-0)
nova/tests/test_rpc_common.py (+189/-0)
nova/tests/test_rpc_kombu.py (+110/-0)
nova/tests/test_test.py (+2/-3)
nova/tests/test_test_utils.py (+41/-0)
nova/tests/test_utils.py (+45/-0)
nova/tests/test_versions.py (+61/-0)
nova/tests/test_virt_drivers.py (+489/-0)
nova/tests/test_volume_types.py (+207/-0)
nova/tests/test_volume_types_extra_specs.py (+132/-0)
nova/tests/test_vsa.py (+182/-0)
nova/tests/test_vsa_volumes.py (+136/-0)
nova/tests/test_xenapi.py (+1/-38)
nova/tests/utils.py (+68/-0)
nova/tests/vmwareapi/db_fakes.py (+4/-1)
nova/tests/xenapi/stubs.py (+4/-0)
nova/utils.py (+50/-0)
nova/virt/driver.py (+7/-11)
nova/virt/fake.py (+11/-2)
nova/virt/libvirt.xml.template (+3/-1)
nova/virt/libvirt/connection.py (+5/-0)
nova/virt/libvirt/vif.py (+1/-1)
nova/virt/xenapi/fake.py (+3/-0)
nova/virt/xenapi/vmops.py (+10/-3)
nova/volume/api.py (+88/-6)
nova/volume/driver.py (+272/-0)
nova/volume/manager.py (+78/-0)
nova/volume/volume_types.py (+166/-0)
nova/vsa/__init__.py (+18/-0)
nova/vsa/api.py (+411/-0)
nova/vsa/connection.py (+25/-0)
nova/vsa/fake.py (+22/-0)
nova/vsa/manager.py (+179/-0)
nova/vsa/utils.py (+80/-0)
po/cs.po (+4/-22)
po/de.po (+4/-40)
po/es.po (+4/-480)
po/it.po (+1/-1)
po/ja.po (+4/-475)
po/pt_BR.po (+20/-167)
po/ru.po (+4/-122)
po/tl.po (+4/-9)
po/uk.po (+4/-38)
run_tests.sh (+10/-8)
tools/pip-requires (+1/-0)
To merge this branch: bzr merge lp:~ttx/nova/diablo-rbp-merge
Reviewer Review Type Date Requested Status
Soren Hansen (community) 2011-09-09 Approve on 2011-09-09
Review via email: mp+74739@code.launchpad.net

Commit message

Merge diablo-RBP development up to rev 1541

Description of the change

Merge diablo-RBP development up to rev 1541

This branch should be identical to lp:nova rev1541.
You can check it by bzr diff --old lp:nova -r 1541 --new lp:~ttx/nova/diablo-rbp-merge

To post a comment you must log in.
Soren Hansen (soren) wrote :

lgtm

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file '.mailmap'
2--- .mailmap 2011-08-09 21:45:31 +0000
3+++ .mailmap 2011-09-09 09:29:27 +0000
4@@ -15,6 +15,7 @@
5 <code@term.ie> <termie@preciousroy.local>
6 <corywright@gmail.com> <cory.wright@rackspace.com>
7 <dan@nicira.com> <danwent@dan-xs3-cs>
8+<dan@nicira.com> danwent@gmail.com
9 <devin.carlen@gmail.com> <devcamcar@illian.local>
10 <ewan.mellor@citrix.com> <emellor@silver>
11 <itoumsn@nttdata.co.jp> <itoumsn@shayol>
12
13=== modified file 'Authors'
14--- Authors 2011-08-23 04:17:57 +0000
15+++ Authors 2011-09-09 09:29:27 +0000
16@@ -11,6 +11,7 @@
17 Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
18 Arvind Somya <asomya@cisco.com>
19 Bilal Akhtar <bilalakhtar@ubuntu.com>
20+Brad Hall <brad@nicira.com>
21 Brian Lamar <brian.lamar@rackspace.com>
22 Brian Schott <bschott@isi.edu>
23 Brian Waldon <brian.waldon@rackspace.com>
24@@ -30,6 +31,7 @@
25 Devin Carlen <devin.carlen@gmail.com>
26 Donal Lafferty <donal.lafferty@citrix.com>
27 Ed Leafe <ed@leafe.com>
28+Edouard Thuleau <thuleau@gmail.com>
29 Eldar Nugaev <reldan@oscloud.ru>
30 Eric Day <eday@oddments.org>
31 Eric Windisch <eric@cloudscaling.com>
32@@ -60,6 +62,7 @@
33 Justin Santa Barbara <justin@fathomdb.com>
34 Justin Shepherd <jshepher@rackspace.com>
35 Kei Masumoto <masumotok@nttdata.co.jp>
36+Keisuke Tagami <tagami.keisuke@lab.ntt.co.jp>
37 masumoto<masumotok@nttdata.co.jp>
38 Ken Pepple <ken.pepple@gmail.com>
39 Kevin Bringard <kbringard@attinteractive.com>
40@@ -69,6 +72,7 @@
41 Lorin Hochstein <lorin@isi.edu>
42 Lvov Maxim <usrleon@gmail.com>
43 Mandell Degerness <mdegerne@gmail.com>
44+Mark McLoughlin <markmc@redhat.com>
45 Mark Washenberger <mark.washenberger@rackspace.com>
46 Masanori Itoh <itoumsn@nttdata.co.jp>
47 Matt Dietz <matt.dietz@rackspace.com>
48@@ -100,6 +104,7 @@
49 Soren Hansen <soren.hansen@rackspace.com>
50 Stephanie Reese <reese.sm@gmail.com>
51 Thierry Carrez <thierry@openstack.org>
52+Tim Simpson <tim.simpson@rackspace.com>
53 Todd Willey <todd@ansolabs.com>
54 Trey Morris <trey.morris@rackspace.com>
55 Troy Toman <troy.toman@rackspace.com>
56
57=== modified file 'bin/instance-usage-audit'
58--- bin/instance-usage-audit 2011-06-28 20:37:05 +0000
59+++ bin/instance-usage-audit 2011-09-09 09:29:27 +0000
60@@ -102,9 +102,8 @@
61 logging.setup()
62 begin, end = time_period(FLAGS.instance_usage_audit_period)
63 print "Creating usages for %s until %s" % (str(begin), str(end))
64- instances = db.instance_get_active_by_window(context.get_admin_context(),
65- begin,
66- end)
67+ ctxt = context.get_admin_context()
68+ instances = db.instance_get_active_by_window_joined(ctxt, begin, end)
69 print "%s instances" % len(instances)
70 for instance_ref in instances:
71 usage_info = utils.usage_from_instance(instance_ref,
72
73=== modified file 'bin/nova-ajax-console-proxy'
74--- bin/nova-ajax-console-proxy 2011-08-18 17:55:39 +0000
75+++ bin/nova-ajax-console-proxy 2011-09-09 09:29:27 +0000
76@@ -113,11 +113,10 @@
77 AjaxConsoleProxy.tokens[kwargs['token']] = \
78 {'args': kwargs, 'last_activity': time.time()}
79
80- conn = rpc.create_connection(new=True)
81- consumer = rpc.create_consumer(
82- conn,
83- FLAGS.ajax_console_proxy_topic,
84- TopicProxy)
85+ self.conn = rpc.create_connection(new=True)
86+ self.conn.create_consumer(
87+ FLAGS.ajax_console_proxy_topic,
88+ TopicProxy)
89
90 def delete_expired_tokens():
91 now = time.time()
92@@ -129,7 +128,7 @@
93 for k in to_delete:
94 del AjaxConsoleProxy.tokens[k]
95
96- utils.LoopingCall(consumer.fetch, enable_callbacks=True).start(0.1)
97+ self.conn.consume_in_thread()
98 utils.LoopingCall(delete_expired_tokens).start(1)
99
100 if __name__ == '__main__':
101@@ -142,3 +141,4 @@
102 server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port)
103 service.serve(server)
104 service.wait()
105+ self.conn.close()
106
107=== modified file 'bin/nova-api'
108--- bin/nova-api 2011-08-18 17:55:39 +0000
109+++ bin/nova-api 2011-09-09 09:29:27 +0000
110@@ -45,6 +45,7 @@
111 utils.default_flagfile()
112 flags.FLAGS(sys.argv)
113 logging.setup()
114+ utils.monkey_patch()
115 servers = []
116 for api in flags.FLAGS.enabled_apis:
117 servers.append(service.WSGIService(api))
118
119=== modified file 'bin/nova-api-ec2'
120--- bin/nova-api-ec2 2011-08-18 18:31:28 +0000
121+++ bin/nova-api-ec2 2011-09-09 09:29:27 +0000
122@@ -41,6 +41,7 @@
123 utils.default_flagfile()
124 flags.FLAGS(sys.argv)
125 logging.setup()
126+ utils.monkey_patch()
127 server = service.WSGIService('ec2')
128 service.serve(server)
129 service.wait()
130
131=== modified file 'bin/nova-api-os'
132--- bin/nova-api-os 2011-08-18 18:31:28 +0000
133+++ bin/nova-api-os 2011-09-09 09:29:27 +0000
134@@ -41,6 +41,7 @@
135 utils.default_flagfile()
136 flags.FLAGS(sys.argv)
137 logging.setup()
138+ utils.monkey_patch()
139 server = service.WSGIService('osapi')
140 service.serve(server)
141 service.wait()
142
143=== modified file 'bin/nova-compute'
144--- bin/nova-compute 2011-08-18 18:28:02 +0000
145+++ bin/nova-compute 2011-09-09 09:29:27 +0000
146@@ -43,6 +43,7 @@
147 utils.default_flagfile()
148 flags.FLAGS(sys.argv)
149 logging.setup()
150+ utils.monkey_patch()
151 server = service.Service.create(binary='nova-compute')
152 service.serve(server)
153 service.wait()
154
155=== modified file 'bin/nova-manage'
156--- bin/nova-manage 2011-08-23 12:23:07 +0000
157+++ bin/nova-manage 2011-09-09 09:29:27 +0000
158@@ -53,16 +53,17 @@
159 CLI interface for nova management.
160 """
161
162+import ast
163 import gettext
164 import glob
165 import json
166 import math
167 import netaddr
168+from optparse import OptionParser
169 import os
170 import sys
171 import time
172
173-from optparse import OptionParser
174
175 # If ../nova/__init__.py exists, add ../ to Python search path, so that
176 # it will override what happens to be installed in /usr/(local/)lib/python...
177@@ -85,11 +86,13 @@
178 from nova import rpc
179 from nova import utils
180 from nova import version
181+from nova import vsa
182 from nova.api.ec2 import ec2utils
183 from nova.auth import manager
184 from nova.cloudpipe import pipelib
185 from nova.compute import instance_types
186 from nova.db import migration
187+from nova.volume import volume_types
188
189 FLAGS = flags.FLAGS
190 flags.DECLARE('fixed_range', 'nova.network.manager')
191@@ -163,7 +166,7 @@
192 print address,
193 print vpn['host'],
194 print ec2utils.id_to_ec2_id(vpn['id']),
195- print vpn['state_description'],
196+ print vpn['vm_state'],
197 print state
198 else:
199 print None
200@@ -682,10 +685,17 @@
201 help='Multi host')
202 @args('--dns1', dest="dns1", metavar="<DNS Address>", help='First DNS')
203 @args('--dns2', dest="dns2", metavar="<DNS Address>", help='Second DNS')
204+ @args('--uuid', dest="net_uuid", metavar="<network uuid>",
205+ help='Network UUID')
206+ @args('--project_id', dest="project_id", metavar="<project id>",
207+ help='Project id')
208+ @args('--priority', dest="priority", metavar="<number>",
209+ help='Network interface priority')
210 def create(self, label=None, fixed_range_v4=None, num_networks=None,
211 network_size=None, multi_host=None, vlan_start=None,
212 vpn_start=None, fixed_range_v6=None, gateway_v6=None,
213- bridge=None, bridge_interface=None, dns1=None, dns2=None):
214+ bridge=None, bridge_interface=None, dns1=None, dns2=None,
215+ project_id=None, priority=None, uuid=None):
216 """Creates fixed ips for host by range"""
217
218 # check for certain required inputs
219@@ -762,7 +772,10 @@
220 bridge=bridge,
221 bridge_interface=bridge_interface,
222 dns1=dns1,
223- dns2=dns2)
224+ dns2=dns2,
225+ project_id=project_id,
226+ priority=priority,
227+ uuid=uuid)
228
229 def list(self):
230 """List all created networks"""
231@@ -787,16 +800,62 @@
232 network.project_id,
233 network.uuid)
234
235+ def quantum_list(self):
236+ """List all created networks with Quantum-relevant fields"""
237+ _fmt = "%-32s\t%-10s\t%-10s\t%s , %s"
238+ print _fmt % (_('uuid'),
239+ _('project'),
240+ _('priority'),
241+ _('cidr_v4'),
242+ _('cidr_v6'))
243+ for network in db.network_get_all(context.get_admin_context()):
244+ print _fmt % (network.uuid,
245+ network.project_id,
246+ network.priority,
247+ network.cidr,
248+ network.cidr_v6)
249+
250 @args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>',
251 help='Network to delete')
252 def delete(self, fixed_range):
253 """Deletes a network"""
254- network = db.network_get_by_cidr(context.get_admin_context(), \
255- fixed_range)
256- if network.project_id is not None:
257- raise ValueError(_('Network must be disassociated from project %s'
258- ' before delete' % network.project_id))
259- db.network_delete_safe(context.get_admin_context(), network.id)
260+
261+ # delete the network
262+ net_manager = utils.import_object(FLAGS.network_manager)
263+ net_manager.delete_network(context.get_admin_context(), fixed_range)
264+
265+ @args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>',
266+ help='Network to modify')
267+ @args('--project', dest="project", metavar='<project name>',
268+ help='Project name to associate')
269+ @args('--host', dest="host", metavar='<host>',
270+ help='Host to associate')
271+ @args('--disassociate-project', action="store_true", dest='dis_project',
272+ default=False, help='Disassociate Network from Project')
273+ @args('--disassociate-host', action="store_true", dest='dis_host',
274+ default=False, help='Disassociate Host from Project')
275+ def modify(self, fixed_range, project=None, host=None,
276+ dis_project=None, dis_host=None):
277+ """Associate/Disassociate Network with Project and/or Host
278+ arguments: network project host
279+ leave any field blank to ignore it
280+ """
281+ admin_context = context.get_admin_context()
282+ network = db.network_get_by_cidr(admin_context, fixed_range)
283+ net = {}
284+ #User can choose the following actions each for project and host.
285+ #1) Associate (set not None value given by project/host parameter)
286+ #2) Disassociate (set None by disassociate parameter)
287+ #3) Keep unchanged (project/host key is not added to 'net')
288+ if project:
289+ net['project_id'] = project
290+ elif dis_project:
291+ net['project_id'] = None
292+ if host:
293+ net['host'] = host
294+ elif dis_host:
295+ net['host'] = None
296+ db.network_update(admin_context, network['id'], net)
297
298
299 class VmCommands(object):
300@@ -833,7 +892,7 @@
301 instance['hostname'],
302 instance['host'],
303 instance['instance_type'].name,
304- instance['state_description'],
305+ instance['vm_state'],
306 instance['launched_at'],
307 instance['image_ref'],
308 instance['kernel_id'],
309@@ -1064,6 +1123,477 @@
310 self.list()
311
312
313+class VsaCommands(object):
314+ """Methods for dealing with VSAs"""
315+
316+ def __init__(self, *args, **kwargs):
317+ self.manager = manager.AuthManager()
318+ self.vsa_api = vsa.API()
319+ self.context = context.get_admin_context()
320+
321+ self._format_str_vsa = "%(id)-5s %(vsa_id)-15s %(name)-25s "\
322+ "%(type)-10s %(vcs)-6s %(drives)-9s %(stat)-10s "\
323+ "%(az)-10s %(time)-10s"
324+ self._format_str_volume = "\t%(id)-4s %(name)-15s %(size)-5s "\
325+ "%(stat)-10s %(att)-20s %(time)s"
326+ self._format_str_drive = "\t%(id)-4s %(name)-15s %(size)-5s "\
327+ "%(stat)-10s %(host)-20s %(type)-4s %(tname)-10s %(time)s"
328+ self._format_str_instance = "\t%(id)-4s %(name)-10s %(dname)-20s "\
329+ "%(image)-12s %(type)-10s %(fl_ip)-15s %(fx_ip)-15s "\
330+ "%(stat)-10s %(host)-15s %(time)s"
331+
332+ def _print_vsa_header(self):
333+ print self._format_str_vsa %\
334+ dict(id=_('ID'),
335+ vsa_id=_('vsa_id'),
336+ name=_('displayName'),
337+ type=_('vc_type'),
338+ vcs=_('vc_cnt'),
339+ drives=_('drive_cnt'),
340+ stat=_('status'),
341+ az=_('AZ'),
342+ time=_('createTime'))
343+
344+ def _print_vsa(self, vsa):
345+ print self._format_str_vsa %\
346+ dict(id=vsa['id'],
347+ vsa_id=vsa['name'],
348+ name=vsa['display_name'],
349+ type=vsa['vsa_instance_type'].get('name', None),
350+ vcs=vsa['vc_count'],
351+ drives=vsa['vol_count'],
352+ stat=vsa['status'],
353+ az=vsa['availability_zone'],
354+ time=str(vsa['created_at']))
355+
356+ def _print_volume_header(self):
357+ print _(' === Volumes ===')
358+ print self._format_str_volume %\
359+ dict(id=_('ID'),
360+ name=_('name'),
361+ size=_('size'),
362+ stat=_('status'),
363+ att=_('attachment'),
364+ time=_('createTime'))
365+
366+ def _print_volume(self, vol):
367+ print self._format_str_volume %\
368+ dict(id=vol['id'],
369+ name=vol['display_name'] or vol['name'],
370+ size=vol['size'],
371+ stat=vol['status'],
372+ att=vol['attach_status'],
373+ time=str(vol['created_at']))
374+
375+ def _print_drive_header(self):
376+ print _(' === Drives ===')
377+ print self._format_str_drive %\
378+ dict(id=_('ID'),
379+ name=_('name'),
380+ size=_('size'),
381+ stat=_('status'),
382+ host=_('host'),
383+ type=_('type'),
384+ tname=_('typeName'),
385+ time=_('createTime'))
386+
387+ def _print_drive(self, drive):
388+ if drive['volume_type_id'] is not None and drive.get('volume_type'):
389+ drive_type_name = drive['volume_type'].get('name')
390+ else:
391+ drive_type_name = ''
392+
393+ print self._format_str_drive %\
394+ dict(id=drive['id'],
395+ name=drive['display_name'],
396+ size=drive['size'],
397+ stat=drive['status'],
398+ host=drive['host'],
399+ type=drive['volume_type_id'],
400+ tname=drive_type_name,
401+ time=str(drive['created_at']))
402+
403+ def _print_instance_header(self):
404+ print _(' === Instances ===')
405+ print self._format_str_instance %\
406+ dict(id=_('ID'),
407+ name=_('name'),
408+ dname=_('disp_name'),
409+ image=_('image'),
410+ type=_('type'),
411+ fl_ip=_('floating_IP'),
412+ fx_ip=_('fixed_IP'),
413+ stat=_('status'),
414+ host=_('host'),
415+ time=_('createTime'))
416+
417+ def _print_instance(self, vc):
418+
419+ fixed_addr = None
420+ floating_addr = None
421+ if vc['fixed_ips']:
422+ fixed = vc['fixed_ips'][0]
423+ fixed_addr = fixed['address']
424+ if fixed['floating_ips']:
425+ floating_addr = fixed['floating_ips'][0]['address']
426+ floating_addr = floating_addr or fixed_addr
427+
428+ print self._format_str_instance %\
429+ dict(id=vc['id'],
430+ name=ec2utils.id_to_ec2_id(vc['id']),
431+ dname=vc['display_name'],
432+ image=('ami-%08x' % int(vc['image_ref'])),
433+ type=vc['instance_type']['name'],
434+ fl_ip=floating_addr,
435+ fx_ip=fixed_addr,
436+ stat=vc['vm_state'],
437+ host=vc['host'],
438+ time=str(vc['created_at']))
439+
440+ def _list(self, context, vsas, print_drives=False,
441+ print_volumes=False, print_instances=False):
442+ if vsas:
443+ self._print_vsa_header()
444+
445+ for vsa in vsas:
446+ self._print_vsa(vsa)
447+ vsa_id = vsa.get('id')
448+
449+ if print_instances:
450+ instances = self.vsa_api.get_all_vsa_instances(context, vsa_id)
451+ if instances:
452+ print
453+ self._print_instance_header()
454+ for instance in instances:
455+ self._print_instance(instance)
456+ print
457+
458+ if print_drives:
459+ drives = self.vsa_api.get_all_vsa_drives(context, vsa_id)
460+ if drives:
461+ self._print_drive_header()
462+ for drive in drives:
463+ self._print_drive(drive)
464+ print
465+
466+ if print_volumes:
467+ volumes = self.vsa_api.get_all_vsa_volumes(context, vsa_id)
468+ if volumes:
469+ self._print_volume_header()
470+ for volume in volumes:
471+ self._print_volume(volume)
472+ print
473+
474+ @args('--storage', dest='storage',
475+ metavar="[{'drive_name': 'type', 'num_drives': N, 'size': M},..]",
476+ help='Initial storage allocation for VSA')
477+ @args('--name', dest='name', metavar="<name>", help='VSA name')
478+ @args('--description', dest='description', metavar="<description>",
479+ help='VSA description')
480+ @args('--vc', dest='vc_count', metavar="<number>", help='Number of VCs')
481+ @args('--instance_type', dest='instance_type_name', metavar="<name>",
482+ help='Instance type name')
483+ @args('--image', dest='image_name', metavar="<name>", help='Image name')
484+ @args('--shared', dest='shared', action="store_true", default=False,
485+ help='Use shared drives')
486+ @args('--az', dest='az', metavar="<zone:host>", help='Availability zone')
487+ @args('--user', dest="user_id", metavar='<User name>',
488+ help='User name')
489+ @args('--project', dest="project_id", metavar='<Project name>',
490+ help='Project name')
491+ def create(self, storage='[]', name=None, description=None, vc_count=1,
492+ instance_type_name=None, image_name=None, shared=None,
493+ az=None, user_id=None, project_id=None):
494+ """Create a VSA."""
495+
496+ if project_id is None:
497+ try:
498+ project_id = os.getenv("EC2_ACCESS_KEY").split(':')[1]
499+ except Exception as exc:
500+ print _("Failed to retrieve project id: %(exc)s") % exc
501+ raise
502+
503+ if user_id is None:
504+ try:
505+ project = self.manager.get_project(project_id)
506+ user_id = project.project_manager_id
507+ except Exception as exc:
508+ print _("Failed to retrieve user info: %(exc)s") % exc
509+ raise
510+
511+ is_admin = self.manager.is_admin(user_id)
512+ ctxt = context.RequestContext(user_id, project_id, is_admin)
513+ if not is_admin and \
514+ not self.manager.is_project_member(user_id, project_id):
515+ msg = _("%(user_id)s must be an admin or a "
516+ "member of %(project_id)s")
517+ LOG.warn(msg % locals())
518+ raise ValueError(msg % locals())
519+
520+ # Sanity check for storage string
521+ storage_list = []
522+ if storage is not None:
523+ try:
524+ storage_list = ast.literal_eval(storage)
525+ except:
526+ print _("Invalid string format %s") % storage
527+ raise
528+
529+ for node in storage_list:
530+ if ('drive_name' not in node) or ('num_drives' not in node):
531+ print (_("Invalid string format for element %s. " \
532+ "Expecting keys 'drive_name' & 'num_drives'"),
533+ str(node))
534+ raise KeyError
535+
536+ if instance_type_name == '':
537+ instance_type_name = None
538+ instance_type = instance_types.get_instance_type_by_name(
539+ instance_type_name)
540+
541+ if image_name == '':
542+ image_name = None
543+
544+ if shared in [None, False, "--full_drives"]:
545+ shared = False
546+ elif shared in [True, "--shared"]:
547+ shared = True
548+ else:
549+ raise ValueError(_('Shared parameter should be set either to "\
550+ "--shared or --full_drives'))
551+
552+ values = {
553+ 'display_name': name,
554+ 'display_description': description,
555+ 'vc_count': int(vc_count),
556+ 'instance_type': instance_type,
557+ 'image_name': image_name,
558+ 'availability_zone': az,
559+ 'storage': storage_list,
560+ 'shared': shared,
561+ }
562+
563+ result = self.vsa_api.create(ctxt, **values)
564+ self._list(ctxt, [result])
565+
566+ @args('--id', dest='vsa_id', metavar="<vsa_id>", help='VSA ID')
567+ @args('--name', dest='name', metavar="<name>", help='VSA name')
568+ @args('--description', dest='description', metavar="<description>",
569+ help='VSA description')
570+ @args('--vc', dest='vc_count', metavar="<number>", help='Number of VCs')
571+ def update(self, vsa_id, name=None, description=None, vc_count=None):
572+ """Updates name/description of vsa and number of VCs."""
573+
574+ values = {}
575+ if name is not None:
576+ values['display_name'] = name
577+ if description is not None:
578+ values['display_description'] = description
579+ if vc_count is not None:
580+ values['vc_count'] = int(vc_count)
581+
582+ vsa_id = ec2utils.ec2_id_to_id(vsa_id)
583+ result = self.vsa_api.update(self.context, vsa_id=vsa_id, **values)
584+ self._list(self.context, [result])
585+
586+ @args('--id', dest='vsa_id', metavar="<vsa_id>", help='VSA ID')
587+ def delete(self, vsa_id):
588+ """Delete a VSA."""
589+ vsa_id = ec2utils.ec2_id_to_id(vsa_id)
590+ self.vsa_api.delete(self.context, vsa_id)
591+
592+ @args('--id', dest='vsa_id', metavar="<vsa_id>",
593+ help='VSA ID (optional)')
594+ @args('--all', dest='all', action="store_true", default=False,
595+ help='Show all available details')
596+ @args('--drives', dest='drives', action="store_true",
597+ help='Include drive-level details')
598+ @args('--volumes', dest='volumes', action="store_true",
599+ help='Include volume-level details')
600+ @args('--instances', dest='instances', action="store_true",
601+ help='Include instance-level details')
602+ def list(self, vsa_id=None, all=False,
603+ drives=False, volumes=False, instances=False):
604+ """Describe all available VSAs (or particular one)."""
605+
606+ vsas = []
607+ if vsa_id is not None:
608+ internal_id = ec2utils.ec2_id_to_id(vsa_id)
609+ vsa = self.vsa_api.get(self.context, internal_id)
610+ vsas.append(vsa)
611+ else:
612+ vsas = self.vsa_api.get_all(self.context)
613+
614+ if all:
615+ drives = volumes = instances = True
616+
617+ self._list(self.context, vsas, drives, volumes, instances)
618+
619+ def update_capabilities(self):
620+ """Forces updates capabilities on all nova-volume nodes."""
621+
622+ rpc.fanout_cast(context.get_admin_context(),
623+ FLAGS.volume_topic,
624+ {"method": "notification",
625+ "args": {"event": "startup"}})
626+
627+
628+class VsaDriveTypeCommands(object):
629+ """Methods for dealing with VSA drive types"""
630+
631+ def __init__(self, *args, **kwargs):
632+ super(VsaDriveTypeCommands, self).__init__(*args, **kwargs)
633+ self.context = context.get_admin_context()
634+ self._drive_type_template = '%s_%sGB_%sRPM'
635+
636+ def _list(self, drives):
637+ format_str = "%-5s %-30s %-10s %-10s %-10s %-20s %-10s %s"
638+ if len(drives):
639+ print format_str %\
640+ (_('ID'),
641+ _('name'),
642+ _('type'),
643+ _('size_gb'),
644+ _('rpm'),
645+ _('capabilities'),
646+ _('visible'),
647+ _('createTime'))
648+
649+ for name, vol_type in drives.iteritems():
650+ drive = vol_type.get('extra_specs')
651+ print format_str %\
652+ (str(vol_type['id']),
653+ drive['drive_name'],
654+ drive['drive_type'],
655+ drive['drive_size'],
656+ drive['drive_rpm'],
657+ drive.get('capabilities', ''),
658+ str(drive.get('visible', '')),
659+ str(vol_type['created_at']))
660+
661+ @args('--type', dest='type', metavar="<type>",
662+ help='Drive type (SATA, SAS, SSD, etc.)')
663+ @args('--size', dest='size_gb', metavar="<gb>", help='Drive size in GB')
664+ @args('--rpm', dest='rpm', metavar="<rpm>", help='RPM')
665+ @args('--capabilities', dest='capabilities', default=None,
666+ metavar="<string>", help='Different capabilities')
667+ @args('--hide', dest='hide', action="store_true", default=False,
668+ help='Show or hide drive')
669+ @args('--name', dest='name', metavar="<name>", help='Drive name')
670+ def create(self, type, size_gb, rpm, capabilities=None,
671+ hide=False, name=None):
672+ """Create drive type."""
673+
674+ hide = True if hide in [True, "True", "--hide", "hide"] else False
675+
676+ if name is None:
677+ name = self._drive_type_template % (type, size_gb, rpm)
678+
679+ extra_specs = {'type': 'vsa_drive',
680+ 'drive_name': name,
681+ 'drive_type': type,
682+ 'drive_size': size_gb,
683+ 'drive_rpm': rpm,
684+ 'visible': True,
685+ }
686+ if hide:
687+ extra_specs['visible'] = False
688+
689+ if capabilities is not None and capabilities != '':
690+ extra_specs['capabilities'] = capabilities
691+
692+ volume_types.create(self.context, name, extra_specs)
693+ result = volume_types.get_volume_type_by_name(self.context, name)
694+ self._list({name: result})
695+
696+ @args('--name', dest='name', metavar="<name>", help='Drive name')
697+ @args('--purge', action="store_true", dest='purge', default=False,
698+ help='purge record from database')
699+ def delete(self, name, purge):
700+ """Marks instance types / flavors as deleted"""
701+ try:
702+ if purge:
703+ volume_types.purge(self.context, name)
704+ verb = "purged"
705+ else:
706+ volume_types.destroy(self.context, name)
707+ verb = "deleted"
708+ except exception.ApiError:
709+ print "Valid volume type name is required"
710+ sys.exit(1)
711+ except exception.DBError, e:
712+ print "DB Error: %s" % e
713+ sys.exit(2)
714+ except:
715+ sys.exit(3)
716+ else:
717+ print "%s %s" % (name, verb)
718+
719+ @args('--all', dest='all', action="store_true", default=False,
720+ help='Show all drives (including invisible)')
721+ @args('--name', dest='name', metavar="<name>",
722+ help='Show only specified drive')
723+ def list(self, all=False, name=None):
724+ """Describe all available VSA drive types (or particular one)."""
725+
726+ all = False if all in ["--all", False, "False"] else True
727+
728+ search_opts = {'extra_specs': {'type': 'vsa_drive'}}
729+ if name is not None:
730+ search_opts['extra_specs']['name'] = name
731+
732+ if all == False:
733+ search_opts['extra_specs']['visible'] = '1'
734+
735+ drives = volume_types.get_all_types(self.context,
736+ search_opts=search_opts)
737+ self._list(drives)
738+
739+ @args('--name', dest='name', metavar="<name>", help='Drive name')
740+ @args('--type', dest='type', metavar="<type>",
741+ help='Drive type (SATA, SAS, SSD, etc.)')
742+ @args('--size', dest='size_gb', metavar="<gb>", help='Drive size in GB')
743+ @args('--rpm', dest='rpm', metavar="<rpm>", help='RPM')
744+ @args('--capabilities', dest='capabilities', default=None,
745+ metavar="<string>", help='Different capabilities')
746+ @args('--visible', dest='visible',
747+ metavar="<show|hide>", help='Show or hide drive')
748+ def update(self, name, type=None, size_gb=None, rpm=None,
749+ capabilities=None, visible=None):
750+ """Update drive type."""
751+
752+ volume_type = volume_types.get_volume_type_by_name(self.context, name)
753+
754+ extra_specs = {'type': 'vsa_drive'}
755+
756+ if type:
757+ extra_specs['drive_type'] = type
758+
759+ if size_gb:
760+ extra_specs['drive_size'] = size_gb
761+
762+ if rpm:
763+ extra_specs['drive_rpm'] = rpm
764+
765+ if capabilities:
766+ extra_specs['capabilities'] = capabilities
767+
768+ if visible is not None:
769+ if visible in ["show", True, "True"]:
770+ extra_specs['visible'] = True
771+ elif visible in ["hide", False, "False"]:
772+ extra_specs['visible'] = False
773+ else:
774+ raise ValueError(_('visible parameter should be set to '\
775+ 'show or hide'))
776+
777+ db.api.volume_type_extra_specs_update_or_create(self.context,
778+ volume_type['id'],
779+ extra_specs)
780+ result = volume_types.get_volume_type_by_name(self.context, name)
781+ self._list({name: result})
782+
783+
784 class VolumeCommands(object):
785 """Methods for dealing with a cloud in an odd state"""
786
787@@ -1450,6 +1980,7 @@
788 ('agent', AgentBuildCommands),
789 ('config', ConfigCommands),
790 ('db', DbCommands),
791+ ('drive', VsaDriveTypeCommands),
792 ('fixed', FixedIpCommands),
793 ('flavor', InstanceTypeCommands),
794 ('floating', FloatingIpCommands),
795@@ -1465,7 +1996,8 @@
796 ('version', VersionCommands),
797 ('vm', VmCommands),
798 ('volume', VolumeCommands),
799- ('vpn', VpnCommands)]
800+ ('vpn', VpnCommands),
801+ ('vsa', VsaCommands)]
802
803
804 def lazy_match(name, key_value_tuples):
805
806=== modified file 'bin/nova-network'
807--- bin/nova-network 2011-08-18 18:28:02 +0000
808+++ bin/nova-network 2011-09-09 09:29:27 +0000
809@@ -43,6 +43,7 @@
810 utils.default_flagfile()
811 flags.FLAGS(sys.argv)
812 logging.setup()
813+ utils.monkey_patch()
814 server = service.Service.create(binary='nova-network')
815 service.serve(server)
816 service.wait()
817
818=== modified file 'bin/nova-objectstore'
819--- bin/nova-objectstore 2011-08-18 17:55:39 +0000
820+++ bin/nova-objectstore 2011-09-09 09:29:27 +0000
821@@ -49,6 +49,7 @@
822 utils.default_flagfile()
823 FLAGS(sys.argv)
824 logging.setup()
825+ utils.monkey_patch()
826 router = s3server.S3Application(FLAGS.buckets_path)
827 server = wsgi.Server("S3 Objectstore",
828 router,
829
830=== modified file 'bin/nova-scheduler'
831--- bin/nova-scheduler 2011-08-18 18:28:43 +0000
832+++ bin/nova-scheduler 2011-09-09 09:29:27 +0000
833@@ -22,6 +22,7 @@
834 import eventlet
835 eventlet.monkey_patch()
836
837+import gettext
838 import os
839 import sys
840
841@@ -33,6 +34,7 @@
842 if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
843 sys.path.insert(0, possible_topdir)
844
845+gettext.install('nova', unicode=1)
846
847 from nova import flags
848 from nova import log as logging
849@@ -43,6 +45,7 @@
850 utils.default_flagfile()
851 flags.FLAGS(sys.argv)
852 logging.setup()
853+ utils.monkey_patch()
854 server = service.Service.create(binary='nova-scheduler')
855 service.serve(server)
856 service.wait()
857
858=== modified file 'bin/nova-volume'
859--- bin/nova-volume 2011-08-18 18:28:02 +0000
860+++ bin/nova-volume 2011-09-09 09:29:27 +0000
861@@ -43,6 +43,7 @@
862 utils.default_flagfile()
863 flags.FLAGS(sys.argv)
864 logging.setup()
865+ utils.monkey_patch()
866 server = service.Service.create(binary='nova-volume')
867 service.serve(server)
868 service.wait()
869
870=== added file 'bin/nova-vsa'
871--- bin/nova-vsa 1970-01-01 00:00:00 +0000
872+++ bin/nova-vsa 2011-09-09 09:29:27 +0000
873@@ -0,0 +1,49 @@
874+#!/usr/bin/env python
875+# vim: tabstop=4 shiftwidth=4 softtabstop=4
876+
877+# Copyright (c) 2011 Zadara Storage Inc.
878+# Copyright (c) 2011 OpenStack LLC.
879+#
880+#
881+# Licensed under the Apache License, Version 2.0 (the "License"); you may
882+# not use this file except in compliance with the License. You may obtain
883+# a copy of the License at
884+#
885+# http://www.apache.org/licenses/LICENSE-2.0
886+#
887+# Unless required by applicable law or agreed to in writing, software
888+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
889+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
890+# License for the specific language governing permissions and limitations
891+# under the License.
892+
893+"""Starter script for Nova VSA."""
894+
895+import eventlet
896+eventlet.monkey_patch()
897+
898+import os
899+import sys
900+
901+# If ../nova/__init__.py exists, add ../ to Python search path, so that
902+# it will override what happens to be installed in /usr/(local/)lib/python...
903+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
904+ os.pardir,
905+ os.pardir))
906+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
907+ sys.path.insert(0, possible_topdir)
908+
909+
910+from nova import flags
911+from nova import log as logging
912+from nova import service
913+from nova import utils
914+
915+if __name__ == '__main__':
916+ utils.default_flagfile()
917+ flags.FLAGS(sys.argv)
918+ logging.setup()
919+ utils.monkey_patch()
920+ server = service.Service.create(binary='nova-vsa')
921+ service.serve(server)
922+ service.wait()
923
924=== modified file 'contrib/nova.sh'
925--- contrib/nova.sh 2011-08-02 14:09:58 +0000
926+++ contrib/nova.sh 2011-09-09 09:29:27 +0000
927@@ -81,7 +81,7 @@
928 sudo apt-get install -y python-netaddr python-pastedeploy python-eventlet
929 sudo apt-get install -y python-novaclient python-glance python-cheetah
930 sudo apt-get install -y python-carrot python-tempita python-sqlalchemy
931- sudo apt-get install -y python-suds
932+ sudo apt-get install -y python-suds python-kombu
933
934
935 if [ "$USE_IPV6" == 1 ]; then
936
937=== modified file 'nova/api/ec2/__init__.py'
938--- nova/api/ec2/__init__.py 2011-08-22 21:24:59 +0000
939+++ nova/api/ec2/__init__.py 2011-09-09 09:29:27 +0000
940@@ -20,7 +20,10 @@
941
942 """
943
944-import httplib2
945+from urlparse import urlparse
946+
947+import eventlet
948+from eventlet.green import httplib
949 import webob
950 import webob.dec
951 import webob.exc
952@@ -35,7 +38,6 @@
953 from nova.api.ec2 import ec2utils
954 from nova.auth import manager
955
956-
957 FLAGS = flags.FLAGS
958 LOG = logging.getLogger("nova.api")
959 flags.DEFINE_integer('lockout_attempts', 5,
960@@ -158,7 +160,6 @@
961 auth_params.pop('Signature')
962
963 # Authenticate the request.
964- client = httplib2.Http()
965 creds = {'ec2Credentials': {'access': access,
966 'signature': signature,
967 'host': req.host,
968@@ -166,18 +167,24 @@
969 'path': req.path,
970 'params': auth_params,
971 }}
972- headers = {'Content-Type': 'application/json'},
973- resp, content = client.request(FLAGS.keystone_ec2_url,
974- 'POST',
975- headers=headers,
976- body=utils.dumps(creds))
977+ creds_json = utils.dumps(creds)
978+ headers = {'Content-Type': 'application/json'}
979+ o = urlparse(FLAGS.keystone_ec2_url)
980+ if o.scheme == "http":
981+ conn = httplib.HTTPConnection(o.netloc)
982+ else:
983+ conn = httplib.HTTPSConnection(o.netloc)
984+ conn.request('POST', o.path, body=creds_json, headers=headers)
985+ response = conn.getresponse().read()
986+ conn.close()
987+
988 # NOTE(vish): We could save a call to keystone by
989 # having keystone return token, tenant,
990 # user, and roles from this call.
991- result = utils.loads(content)
992+ result = utils.loads(response)
993 # TODO(vish): check for errors
994+
995 token_id = result['auth']['token']['id']
996-
997 # Authenticated!
998 req.headers['X-Auth-Token'] = token_id
999 return self.application
1000@@ -392,18 +399,20 @@
1001 except exception.InstanceNotFound as ex:
1002 LOG.info(_('InstanceNotFound raised: %s'), unicode(ex),
1003 context=context)
1004- return self._error(req, context, type(ex).__name__, ex.message)
1005+ ec2_id = ec2utils.id_to_ec2_id(ex.kwargs['instance_id'])
1006+ message = ex.message % {'instance_id': ec2_id}
1007+ return self._error(req, context, type(ex).__name__, message)
1008 except exception.VolumeNotFound as ex:
1009 LOG.info(_('VolumeNotFound raised: %s'), unicode(ex),
1010 context=context)
1011- ec2_id = ec2utils.id_to_ec2_vol_id(ex.volume_id)
1012- message = _('Volume %s not found') % ec2_id
1013+ ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id'])
1014+ message = ex.message % {'volume_id': ec2_id}
1015 return self._error(req, context, type(ex).__name__, message)
1016 except exception.SnapshotNotFound as ex:
1017 LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex),
1018 context=context)
1019- ec2_id = ec2utils.id_to_ec2_snap_id(ex.snapshot_id)
1020- message = _('Snapshot %s not found') % ec2_id
1021+ ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id'])
1022+ message = ex.message % {'snapshot_id': ec2_id}
1023 return self._error(req, context, type(ex).__name__, message)
1024 except exception.NotFound as ex:
1025 LOG.info(_('NotFound raised: %s'), unicode(ex), context=context)
1026
1027=== modified file 'nova/api/ec2/admin.py'
1028--- nova/api/ec2/admin.py 2011-08-18 02:31:01 +0000
1029+++ nova/api/ec2/admin.py 2011-09-09 09:29:27 +0000
1030@@ -21,7 +21,6 @@
1031 """
1032
1033 import base64
1034-import datetime
1035 import netaddr
1036 import urllib
1037
1038@@ -33,6 +32,7 @@
1039 from nova import utils
1040 from nova.api.ec2 import ec2utils
1041 from nova.auth import manager
1042+from nova.compute import vm_states
1043
1044
1045 FLAGS = flags.FLAGS
1046@@ -273,8 +273,7 @@
1047 """Get the VPN instance for a project ID."""
1048 for instance in db.instance_get_all_by_project(context, project_id):
1049 if (instance['image_id'] == str(FLAGS.vpn_image_id)
1050- and not instance['state_description'] in
1051- ['shutting_down', 'shutdown']):
1052+ and not instance['vm_state'] in [vm_states.DELETED]):
1053 return instance
1054
1055 def start_vpn(self, context, project):
1056
1057=== modified file 'nova/api/ec2/cloud.py'
1058--- nova/api/ec2/cloud.py 2011-08-16 14:49:26 +0000
1059+++ nova/api/ec2/cloud.py 2011-09-09 09:29:27 +0000
1060@@ -47,6 +47,7 @@
1061 from nova import volume
1062 from nova.api.ec2 import ec2utils
1063 from nova.compute import instance_types
1064+from nova.compute import vm_states
1065 from nova.image import s3
1066
1067
1068@@ -78,6 +79,30 @@
1069 return {'private_key': private_key, 'fingerprint': fingerprint}
1070
1071
1072+# EC2 API can return the following values as documented in the EC2 API
1073+# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
1074+# ApiReference-ItemType-InstanceStateType.html
1075+# pending | running | shutting-down | terminated | stopping | stopped
1076+_STATE_DESCRIPTION_MAP = {
1077+ None: 'pending',
1078+ vm_states.ACTIVE: 'running',
1079+ vm_states.BUILDING: 'pending',
1080+ vm_states.REBUILDING: 'pending',
1081+ vm_states.DELETED: 'terminated',
1082+ vm_states.STOPPED: 'stopped',
1083+ vm_states.MIGRATING: 'migrate',
1084+ vm_states.RESIZING: 'resize',
1085+ vm_states.PAUSED: 'pause',
1086+ vm_states.SUSPENDED: 'suspend',
1087+ vm_states.RESCUED: 'rescue',
1088+}
1089+
1090+
1091+def state_description_from_vm_state(vm_state):
1092+ """Map the vm state to the server status string"""
1093+ return _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
1094+
1095+
1096 # TODO(yamahata): hypervisor dependent default device name
1097 _DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1'
1098 _DEFAULT_MAPPINGS = {'ami': 'sda1',
1099@@ -995,14 +1020,6 @@
1100 'status': volume['attach_status'],
1101 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
1102
1103- @staticmethod
1104- def _convert_to_set(lst, label):
1105- if lst is None or lst == []:
1106- return None
1107- if not isinstance(lst, list):
1108- lst = [lst]
1109- return [{label: x} for x in lst]
1110-
1111 def _format_kernel_id(self, instance_ref, result, key):
1112 kernel_id = instance_ref['kernel_id']
1113 if kernel_id is None:
1114@@ -1039,11 +1056,12 @@
1115
1116 def _format_attr_instance_initiated_shutdown_behavior(instance,
1117 result):
1118- state_description = instance['state_description']
1119- state_to_value = {'stopping': 'stop',
1120- 'stopped': 'stop',
1121- 'terminating': 'terminate'}
1122- value = state_to_value.get(state_description)
1123+ vm_state = instance['vm_state']
1124+ state_to_value = {
1125+ vm_states.STOPPED: 'stopped',
1126+ vm_states.DELETED: 'terminated',
1127+ }
1128+ value = state_to_value.get(vm_state)
1129 if value:
1130 result['instanceInitiatedShutdownBehavior'] = value
1131
1132@@ -1160,7 +1178,7 @@
1133 if instance.get('security_groups'):
1134 for security_group in instance['security_groups']:
1135 security_group_names.append(security_group['name'])
1136- result['groupSet'] = CloudController._convert_to_set(
1137+ result['groupSet'] = utils.convert_to_list_dict(
1138 security_group_names, 'groupId')
1139
1140 def _format_instances(self, context, instance_id=None, use_v6=False,
1141@@ -1198,8 +1216,8 @@
1142 self._format_kernel_id(instance, i, 'kernelId')
1143 self._format_ramdisk_id(instance, i, 'ramdiskId')
1144 i['instanceState'] = {
1145- 'code': instance['state'],
1146- 'name': instance['state_description']}
1147+ 'code': instance['power_state'],
1148+ 'name': state_description_from_vm_state(instance['vm_state'])}
1149 fixed_addr = None
1150 floating_addr = None
1151 if instance['fixed_ips']:
1152@@ -1224,7 +1242,8 @@
1153 i['keyName'] = '%s (%s, %s)' % (i['keyName'],
1154 instance['project_id'],
1155 instance['host'])
1156- i['productCodesSet'] = self._convert_to_set([], 'product_codes')
1157+ i['productCodesSet'] = utils.convert_to_list_dict([],
1158+ 'product_codes')
1159 self._format_instance_type(instance, i)
1160 i['launchTime'] = instance['created_at']
1161 i['amiLaunchIndex'] = instance['launch_index']
1162@@ -1618,22 +1637,22 @@
1163 # stop the instance if necessary
1164 restart_instance = False
1165 if not no_reboot:
1166- state_description = instance['state_description']
1167+ vm_state = instance['vm_state']
1168
1169 # if the instance is in subtle state, refuse to proceed.
1170- if state_description not in ('running', 'stopping', 'stopped'):
1171+ if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
1172 raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
1173
1174- if state_description == 'running':
1175+ if vm_state == vm_states.ACTIVE:
1176 restart_instance = True
1177 self.compute_api.stop(context, instance_id=instance_id)
1178
1179 # wait instance for really stopped
1180 start_time = time.time()
1181- while state_description != 'stopped':
1182+ while vm_state != vm_states.STOPPED:
1183 time.sleep(1)
1184 instance = self.compute_api.get(context, instance_id)
1185- state_description = instance['state_description']
1186+ vm_state = instance['vm_state']
1187 # NOTE(yamahata): timeout and error. 1 hour for now for safety.
1188 # Is it too short/long?
1189 # Or is there any better way?
1190
1191=== modified file 'nova/api/openstack/common.py'
1192--- nova/api/openstack/common.py 2011-08-17 07:41:17 +0000
1193+++ nova/api/openstack/common.py 2011-09-09 09:29:27 +0000
1194@@ -27,7 +27,8 @@
1195 from nova import log as logging
1196 from nova import quota
1197 from nova.api.openstack import wsgi
1198-from nova.compute import power_state as compute_power_state
1199+from nova.compute import vm_states
1200+from nova.compute import task_states
1201
1202
1203 LOG = logging.getLogger('nova.api.openstack.common')
1204@@ -38,36 +39,61 @@
1205 XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
1206
1207
1208-_STATUS_MAP = {
1209- None: 'BUILD',
1210- compute_power_state.NOSTATE: 'BUILD',
1211- compute_power_state.RUNNING: 'ACTIVE',
1212- compute_power_state.BLOCKED: 'ACTIVE',
1213- compute_power_state.SUSPENDED: 'SUSPENDED',
1214- compute_power_state.PAUSED: 'PAUSED',
1215- compute_power_state.SHUTDOWN: 'SHUTDOWN',
1216- compute_power_state.SHUTOFF: 'SHUTOFF',
1217- compute_power_state.CRASHED: 'ERROR',
1218- compute_power_state.FAILED: 'ERROR',
1219- compute_power_state.BUILDING: 'BUILD',
1220+_STATE_MAP = {
1221+ vm_states.ACTIVE: {
1222+ 'default': 'ACTIVE',
1223+ task_states.REBOOTING: 'REBOOT',
1224+ task_states.UPDATING_PASSWORD: 'PASSWORD',
1225+ task_states.RESIZE_VERIFY: 'VERIFY_RESIZE',
1226+ },
1227+ vm_states.BUILDING: {
1228+ 'default': 'BUILD',
1229+ },
1230+ vm_states.REBUILDING: {
1231+ 'default': 'REBUILD',
1232+ },
1233+ vm_states.STOPPED: {
1234+ 'default': 'STOPPED',
1235+ },
1236+ vm_states.MIGRATING: {
1237+ 'default': 'MIGRATING',
1238+ },
1239+ vm_states.RESIZING: {
1240+ 'default': 'RESIZE',
1241+ },
1242+ vm_states.PAUSED: {
1243+ 'default': 'PAUSED',
1244+ },
1245+ vm_states.SUSPENDED: {
1246+ 'default': 'SUSPENDED',
1247+ },
1248+ vm_states.RESCUED: {
1249+ 'default': 'RESCUE',
1250+ },
1251+ vm_states.ERROR: {
1252+ 'default': 'ERROR',
1253+ },
1254+ vm_states.DELETED: {
1255+ 'default': 'DELETED',
1256+ },
1257 }
1258
1259
1260-def status_from_power_state(power_state):
1261- """Map the power state to the server status string"""
1262- return _STATUS_MAP[power_state]
1263-
1264-
1265-def power_states_from_status(status):
1266- """Map the server status string to a list of power states"""
1267- power_states = []
1268- for power_state, status_map in _STATUS_MAP.iteritems():
1269- # Skip the 'None' state
1270- if power_state is None:
1271- continue
1272- if status.lower() == status_map.lower():
1273- power_states.append(power_state)
1274- return power_states
1275+def status_from_state(vm_state, task_state='default'):
1276+ """Given vm_state and task_state, return a status string."""
1277+ task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN_STATE'))
1278+ status = task_map.get(task_state, task_map['default'])
1279+ LOG.debug("Generated %(status)s from vm_state=%(vm_state)s "
1280+ "task_state=%(task_state)s." % locals())
1281+ return status
1282+
1283+
1284+def vm_state_from_status(status):
1285+ """Map the server status string to a vm state."""
1286+ for state, task_map in _STATE_MAP.iteritems():
1287+ status_string = task_map.get("default")
1288+ if status.lower() == status_string.lower():
1289+ return state
1290
1291
1292 def get_pagination_params(request):
1293
1294=== modified file 'nova/api/openstack/contrib/createserverext.py'
1295--- nova/api/openstack/contrib/createserverext.py 2011-08-19 22:04:25 +0000
1296+++ nova/api/openstack/contrib/createserverext.py 2011-09-09 09:29:27 +0000
1297@@ -14,18 +14,34 @@
1298 # License for the specific language governing permissions and limitations
1299 # under the License
1300
1301+from nova import utils
1302 from nova.api.openstack import create_instance_helper as helper
1303 from nova.api.openstack import extensions
1304 from nova.api.openstack import servers
1305 from nova.api.openstack import wsgi
1306
1307
1308+class CreateServerController(servers.ControllerV11):
1309+ def _build_view(self, req, instance, is_detail=False):
1310+ server = super(CreateServerController, self)._build_view(req,
1311+ instance,
1312+ is_detail)
1313+ if is_detail:
1314+ self._build_security_groups(server['server'], instance)
1315+ return server
1316+
1317+ def _build_security_groups(self, response, inst):
1318+ sg_names = []
1319+ sec_groups = inst.get('security_groups')
1320+ if sec_groups:
1321+ sg_names = [sec_group['name'] for sec_group in sec_groups]
1322+
1323+ response['security_groups'] = utils.convert_to_list_dict(sg_names,
1324+ 'name')
1325+
1326+
1327 class Createserverext(extensions.ExtensionDescriptor):
1328- """The servers create ext
1329-
1330- Exposes addFixedIp and removeFixedIp actions on servers.
1331-
1332- """
1333+ """The servers create ext"""
1334 def get_name(self):
1335 return "Createserverext"
1336
1337@@ -58,7 +74,7 @@
1338 deserializer = wsgi.RequestDeserializer(body_deserializers)
1339
1340 res = extensions.ResourceExtension('os-create-server-ext',
1341- controller=servers.ControllerV11(),
1342+ controller=CreateServerController(),
1343 deserializer=deserializer,
1344 serializer=serializer)
1345 resources.append(res)
1346
1347=== modified file 'nova/api/openstack/contrib/floating_ips.py'
1348--- nova/api/openstack/contrib/floating_ips.py 2011-08-22 12:28:12 +0000
1349+++ nova/api/openstack/contrib/floating_ips.py 2011-09-09 09:29:27 +0000
1350@@ -36,9 +36,9 @@
1351 result['fixed_ip'] = floating_ip['fixed_ip']['address']
1352 except (TypeError, KeyError):
1353 result['fixed_ip'] = None
1354- if 'instance' in floating_ip:
1355- result['instance_id'] = floating_ip['instance']['id']
1356- else:
1357+ try:
1358+ result['instance_id'] = floating_ip['fixed_ip']['instance_id']
1359+ except (TypeError, KeyError):
1360 result['instance_id'] = None
1361 return {'floating_ip': result}
1362
1363@@ -96,7 +96,8 @@
1364 except rpc.RemoteError as ex:
1365 # NOTE(tr3buchet) - why does this block exist?
1366 if ex.exc_type == 'NoMoreFloatingIps':
1367- raise exception.NoMoreFloatingIps()
1368+ msg = _("No more floating ips available.")
1369+ raise webob.exc.HTTPBadRequest(explanation=msg)
1370 else:
1371 raise
1372
1373@@ -106,7 +107,7 @@
1374 context = req.environ['nova.context']
1375 floating_ip = self.network_api.get_floating_ip(context, id)
1376
1377- if 'fixed_ip' in floating_ip:
1378+ if floating_ip.get('fixed_ip'):
1379 self.network_api.disassociate_floating_ip(context,
1380 floating_ip['address'])
1381
1382@@ -138,7 +139,11 @@
1383 msg = _("Address not specified")
1384 raise webob.exc.HTTPBadRequest(explanation=msg)
1385
1386- self.compute_api.associate_floating_ip(context, instance_id, address)
1387+ try:
1388+ self.compute_api.associate_floating_ip(context, instance_id,
1389+ address)
1390+ except exception.ApiError, e:
1391+ raise webob.exc.HTTPBadRequest(explanation=e.message)
1392
1393 return webob.Response(status_int=202)
1394
1395@@ -156,7 +161,7 @@
1396 raise webob.exc.HTTPBadRequest(explanation=msg)
1397
1398 floating_ip = self.network_api.get_floating_ip_by_ip(context, address)
1399- if 'fixed_ip' in floating_ip:
1400+ if floating_ip.get('fixed_ip'):
1401 self.network_api.disassociate_floating_ip(context, address)
1402
1403 return webob.Response(status_int=202)
1404
1405=== added file 'nova/api/openstack/contrib/simple_tenant_usage.py'
1406--- nova/api/openstack/contrib/simple_tenant_usage.py 1970-01-01 00:00:00 +0000
1407+++ nova/api/openstack/contrib/simple_tenant_usage.py 2011-09-09 09:29:27 +0000
1408@@ -0,0 +1,236 @@
1409+# vim: tabstop=4 shiftwidth=4 softtabstop=4
1410+
1411+# Copyright 2011 OpenStack LLC.
1412+# All Rights Reserved.
1413+#
1414+# Licensed under the Apache License, Version 2.0 (the "License"); you may
1415+# not use this file except in compliance with the License. You may obtain
1416+# a copy of the License at
1417+#
1418+# http://www.apache.org/licenses/LICENSE-2.0
1419+#
1420+# Unless required by applicable law or agreed to in writing, software
1421+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
1422+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
1423+# License for the specific language governing permissions and limitations
1424+# under the License.
1425+
1426+import urlparse
1427+import webob
1428+
1429+from datetime import datetime
1430+from nova import exception
1431+from nova import flags
1432+from nova.compute import api
1433+from nova.api.openstack import extensions
1434+from nova.api.openstack import views
1435+from nova.db.sqlalchemy.session import get_session
1436+from webob import exc
1437+
1438+
1439+FLAGS = flags.FLAGS
1440+
1441+
1442+class SimpleTenantUsageController(object):
1443+ def _hours_for(self, instance, period_start, period_stop):
1444+ launched_at = instance['launched_at']
1445+ terminated_at = instance['terminated_at']
1446+ if terminated_at is not None:
1447+ if not isinstance(terminated_at, datetime):
1448+ terminated_at = datetime.strptime(terminated_at,
1449+ "%Y-%m-%d %H:%M:%S.%f")
1450+
1451+ if launched_at is not None:
1452+ if not isinstance(launched_at, datetime):
1453+ launched_at = datetime.strptime(launched_at,
1454+ "%Y-%m-%d %H:%M:%S.%f")
1455+
1456+ if terminated_at and terminated_at < period_start:
1457+ return 0
1458+ # nothing if it started after the usage report ended
1459+ if launched_at and launched_at > period_stop:
1460+ return 0
1461+ if launched_at:
1462+ # if instance launched after period_started, don't charge for first
1463+ start = max(launched_at, period_start)
1464+ if terminated_at:
1465+ # if instance stopped before period_stop, don't charge after
1466+ stop = min(period_stop, terminated_at)
1467+ else:
1468+ # instance is still running, so charge them up to current time
1469+ stop = period_stop
1470+ dt = stop - start
1471+ seconds = dt.days * 3600 * 24 + dt.seconds\
1472+ + dt.microseconds / 100000.0
1473+
1474+ return seconds / 3600.0
1475+ else:
1476+ # instance hasn't launched, so no charge
1477+ return 0
1478+
1479+ def _tenant_usages_for_period(self, context, period_start,
1480+ period_stop, tenant_id=None, detailed=True):
1481+
1482+ compute_api = api.API()
1483+ instances = compute_api.get_active_by_window(context,
1484+ period_start,
1485+ period_stop,
1486+ tenant_id)
1487+ from nova import log as logging
1488+ logging.info(instances)
1489+ rval = {}
1490+ flavors = {}
1491+
1492+ for instance in instances:
1493+ info = {}
1494+ info['hours'] = self._hours_for(instance,
1495+ period_start,
1496+ period_stop)
1497+ flavor_type = instance['instance_type_id']
1498+
1499+ if not flavors.get(flavor_type):
1500+ try:
1501+ it_ref = compute_api.get_instance_type(context,
1502+ flavor_type)
1503+ flavors[flavor_type] = it_ref
1504+ except exception.InstanceTypeNotFound:
1505+ # can't bill if there is no instance type
1506+ continue
1507+
1508+ flavor = flavors[flavor_type]
1509+
1510+ info['name'] = instance['display_name']
1511+
1512+ info['memory_mb'] = flavor['memory_mb']
1513+ info['local_gb'] = flavor['local_gb']
1514+ info['vcpus'] = flavor['vcpus']
1515+
1516+ info['tenant_id'] = instance['project_id']
1517+
1518+ info['flavor'] = flavor['name']
1519+
1520+ info['started_at'] = instance['launched_at']
1521+
1522+ info['ended_at'] = instance['terminated_at']
1523+
1524+ if info['ended_at']:
1525+ info['state'] = 'terminated'
1526+ else:
1527+ info['state'] = instance['vm_state']
1528+
1529+ now = datetime.utcnow()
1530+
1531+ if info['state'] == 'terminated':
1532+ delta = info['ended_at'] - info['started_at']
1533+ else:
1534+ delta = now - info['started_at']
1535+
1536+ info['uptime'] = delta.days * 24 * 60 + delta.seconds
1537+
1538+ if not info['tenant_id'] in rval:
1539+ summary = {}
1540+ summary['tenant_id'] = info['tenant_id']
1541+ if detailed:
1542+ summary['server_usages'] = []
1543+ summary['total_local_gb_usage'] = 0
1544+ summary['total_vcpus_usage'] = 0
1545+ summary['total_memory_mb_usage'] = 0
1546+ summary['total_hours'] = 0
1547+ summary['start'] = period_start
1548+ summary['stop'] = period_stop
1549+ rval[info['tenant_id']] = summary
1550+
1551+ summary = rval[info['tenant_id']]
1552+ summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
1553+ summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
1554+ summary['total_memory_mb_usage'] += info['memory_mb']\
1555+ * info['hours']
1556+
1557+ summary['total_hours'] += info['hours']
1558+ if detailed:
1559+ summary['server_usages'].append(info)
1560+
1561+ return rval.values()
1562+
1563+ def _parse_datetime(self, dtstr):
1564+ if isinstance(dtstr, datetime):
1565+ return dtstr
1566+ try:
1567+ return datetime.strptime(dtstr, "%Y-%m-%dT%H:%M:%S")
1568+ except:
1569+ try:
1570+ return datetime.strptime(dtstr, "%Y-%m-%dT%H:%M:%S.%f")
1571+ except:
1572+ return datetime.strptime(dtstr, "%Y-%m-%d %H:%M:%S.%f")
1573+
1574+ def _get_datetime_range(self, req):
1575+ qs = req.environ.get('QUERY_STRING', '')
1576+ env = urlparse.parse_qs(qs)
1577+ period_start = self._parse_datetime(env.get('start',
1578+ [datetime.utcnow().isoformat()])[0])
1579+ period_stop = self._parse_datetime(env.get('end',
1580+ [datetime.utcnow().isoformat()])[0])
1581+
1582+ detailed = bool(env.get('detailed', False))
1583+ return (period_start, period_stop, detailed)
1584+
1585+ def index(self, req):
1586+ """Retrive tenant_usage for all tenants"""
1587+ context = req.environ['nova.context']
1588+
1589+ if not context.is_admin and FLAGS.allow_admin_api:
1590+ return webob.Response(status_int=403)
1591+
1592+ (period_start, period_stop, detailed) = self._get_datetime_range(req)
1593+ usages = self._tenant_usages_for_period(context,
1594+ period_start,
1595+ period_stop,
1596+ detailed=detailed)
1597+ return {'tenant_usages': usages}
1598+
1599+ def show(self, req, id):
1600+ """Retrive tenant_usage for a specified tenant"""
1601+ tenant_id = id
1602+ context = req.environ['nova.context']
1603+
1604+ if not context.is_admin and FLAGS.allow_admin_api:
1605+ if tenant_id != context.project_id:
1606+ return webob.Response(status_int=403)
1607+
1608+ (period_start, period_stop, ignore) = self._get_datetime_range(req)
1609+ usage = self._tenant_usages_for_period(context,
1610+ period_start,
1611+ period_stop,
1612+ tenant_id=tenant_id,
1613+ detailed=True)
1614+ if len(usage):
1615+ usage = usage[0]
1616+ else:
1617+ usage = {}
1618+ return {'tenant_usage': usage}
1619+
1620+
1621+class Simple_tenant_usage(extensions.ExtensionDescriptor):
1622+ def get_name(self):
1623+ return "SimpleTenantUsage"
1624+
1625+ def get_alias(self):
1626+ return "os-simple-tenant-usage"
1627+
1628+ def get_description(self):
1629+ return "Simple tenant usage extension"
1630+
1631+ def get_namespace(self):
1632+ return "http://docs.openstack.org/ext/os-simple-tenant-usage/api/v1.1"
1633+
1634+ def get_updated(self):
1635+ return "2011-08-19T00:00:00+00:00"
1636+
1637+ def get_resources(self):
1638+ resources = []
1639+
1640+ res = extensions.ResourceExtension('os-simple-tenant-usage',
1641+ SimpleTenantUsageController())
1642+ resources.append(res)
1643+
1644+ return resources
1645
1646=== added file 'nova/api/openstack/contrib/virtual_storage_arrays.py'
1647--- nova/api/openstack/contrib/virtual_storage_arrays.py 1970-01-01 00:00:00 +0000
1648+++ nova/api/openstack/contrib/virtual_storage_arrays.py 2011-09-09 09:29:27 +0000
1649@@ -0,0 +1,606 @@
1650+# vim: tabstop=4 shiftwidth=4 softtabstop=4
1651+
1652+# Copyright (c) 2011 Zadara Storage Inc.
1653+# Copyright (c) 2011 OpenStack LLC.
1654+#
1655+# Licensed under the Apache License, Version 2.0 (the "License"); you may
1656+# not use this file except in compliance with the License. You may obtain
1657+# a copy of the License at
1658+#
1659+# http://www.apache.org/licenses/LICENSE-2.0
1660+#
1661+# Unless required by applicable law or agreed to in writing, software
1662+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
1663+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
1664+# License for the specific language governing permissions and limitations
1665+# under the License.
1666+
1667+""" The virtul storage array extension"""
1668+
1669+
1670+from webob import exc
1671+
1672+from nova import vsa
1673+from nova import volume
1674+from nova import compute
1675+from nova import network
1676+from nova import db
1677+from nova import quota
1678+from nova import exception
1679+from nova import log as logging
1680+from nova.api.openstack import common
1681+from nova.api.openstack import extensions
1682+from nova.api.openstack import faults
1683+from nova.api.openstack import wsgi
1684+from nova.api.openstack import servers
1685+from nova.api.openstack.contrib import volumes
1686+from nova.compute import instance_types
1687+
1688+from nova import flags
1689+FLAGS = flags.FLAGS
1690+
1691+LOG = logging.getLogger("nova.api.vsa")
1692+
1693+
1694+def _vsa_view(context, vsa, details=False, instances=None):
1695+ """Map keys for vsa summary/detailed view."""
1696+ d = {}
1697+
1698+ d['id'] = vsa.get('id')
1699+ d['name'] = vsa.get('name')
1700+ d['displayName'] = vsa.get('display_name')
1701+ d['displayDescription'] = vsa.get('display_description')
1702+
1703+ d['createTime'] = vsa.get('created_at')
1704+ d['status'] = vsa.get('status')
1705+
1706+ if 'vsa_instance_type' in vsa:
1707+ d['vcType'] = vsa['vsa_instance_type'].get('name', None)
1708+ else:
1709+ d['vcType'] = vsa['instance_type_id']
1710+
1711+ d['vcCount'] = vsa.get('vc_count')
1712+ d['driveCount'] = vsa.get('vol_count')
1713+
1714+ d['ipAddress'] = None
1715+ for instance in instances:
1716+ fixed_addr = None
1717+ floating_addr = None
1718+ if instance['fixed_ips']:
1719+ fixed = instance['fixed_ips'][0]
1720+ fixed_addr = fixed['address']
1721+ if fixed['floating_ips']:
1722+ floating_addr = fixed['floating_ips'][0]['address']
1723+
1724+ if floating_addr:
1725+ d['ipAddress'] = floating_addr
1726+ break
1727+ else:
1728+ d['ipAddress'] = d['ipAddress'] or fixed_addr
1729+
1730+ return d
1731+
1732+
1733+class VsaController(object):
1734+ """The Virtual Storage Array API controller for the OpenStack API."""
1735+
1736+ _serialization_metadata = {
1737+ 'application/xml': {
1738+ "attributes": {
1739+ "vsa": [
1740+ "id",
1741+ "name",
1742+ "displayName",
1743+ "displayDescription",
1744+ "createTime",
1745+ "status",
1746+ "vcType",
1747+ "vcCount",
1748+ "driveCount",
1749+ "ipAddress",
1750+ ]}}}
1751+
1752+ def __init__(self):
1753+ self.vsa_api = vsa.API()
1754+ self.compute_api = compute.API()
1755+ self.network_api = network.API()
1756+ super(VsaController, self).__init__()
1757+
1758+ def _get_instances_by_vsa_id(self, context, id):
1759+ return self.compute_api.get_all(context,
1760+ search_opts={'metadata': dict(vsa_id=str(id))})
1761+
1762+ def _items(self, req, details):
1763+ """Return summary or detailed list of VSAs."""
1764+ context = req.environ['nova.context']
1765+ vsas = self.vsa_api.get_all(context)
1766+ limited_list = common.limited(vsas, req)
1767+
1768+ vsa_list = []
1769+ for vsa in limited_list:
1770+ instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
1771+ vsa_list.append(_vsa_view(context, vsa, details, instances))
1772+ return {'vsaSet': vsa_list}
1773+
1774+ def index(self, req):
1775+ """Return a short list of VSAs."""
1776+ return self._items(req, details=False)
1777+
1778+ def detail(self, req):
1779+ """Return a detailed list of VSAs."""
1780+ return self._items(req, details=True)
1781+
1782+ def show(self, req, id):
1783+ """Return data about the given VSA."""
1784+ context = req.environ['nova.context']
1785+
1786+ try:
1787+ vsa = self.vsa_api.get(context, vsa_id=id)
1788+ except exception.NotFound:
1789+ return faults.Fault(exc.HTTPNotFound())
1790+
1791+ instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
1792+ return {'vsa': _vsa_view(context, vsa, True, instances)}
1793+
1794+ def create(self, req, body):
1795+ """Create a new VSA."""
1796+ context = req.environ['nova.context']
1797+
1798+ if not body or 'vsa' not in body:
1799+ LOG.debug(_("No body provided"), context=context)
1800+ return faults.Fault(exc.HTTPUnprocessableEntity())
1801+
1802+ vsa = body['vsa']
1803+
1804+ display_name = vsa.get('displayName')
1805+ vc_type = vsa.get('vcType', FLAGS.default_vsa_instance_type)
1806+ try:
1807+ instance_type = instance_types.get_instance_type_by_name(vc_type)
1808+ except exception.NotFound:
1809+ return faults.Fault(exc.HTTPNotFound())
1810+
1811+ LOG.audit(_("Create VSA %(display_name)s of type %(vc_type)s"),
1812+ locals(), context=context)
1813+
1814+ args = dict(display_name=display_name,
1815+ display_description=vsa.get('displayDescription'),
1816+ instance_type=instance_type,
1817+ storage=vsa.get('storage'),
1818+ shared=vsa.get('shared'),
1819+ availability_zone=vsa.get('placement', {}).\
1820+ get('AvailabilityZone'))
1821+
1822+ vsa = self.vsa_api.create(context, **args)
1823+
1824+ instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
1825+ return {'vsa': _vsa_view(context, vsa, True, instances)}
1826+
1827+ def delete(self, req, id):
1828+ """Delete a VSA."""
1829+ context = req.environ['nova.context']
1830+
1831+ LOG.audit(_("Delete VSA with id: %s"), id, context=context)
1832+
1833+ try:
1834+ self.vsa_api.delete(context, vsa_id=id)
1835+ except exception.NotFound:
1836+ return faults.Fault(exc.HTTPNotFound())
1837+
1838+ def associate_address(self, req, id, body):
1839+ """ /zadr-vsa/{vsa_id}/associate_address
1840+ auto or manually associate an IP to VSA
1841+ """
1842+ context = req.environ['nova.context']
1843+
1844+ if body is None:
1845+ ip = 'auto'
1846+ else:
1847+ ip = body.get('ipAddress', 'auto')
1848+
1849+ LOG.audit(_("Associate address %(ip)s to VSA %(id)s"),
1850+ locals(), context=context)
1851+
1852+ try:
1853+ instances = self._get_instances_by_vsa_id(context, id)
1854+ if instances is None or len(instances) == 0:
1855+ return faults.Fault(exc.HTTPNotFound())
1856+
1857+ for instance in instances:
1858+ self.network_api.allocate_for_instance(context, instance,
1859+ vpn=False)
1860+ # Placeholder
1861+ return
1862+
1863+ except exception.NotFound:
1864+ return faults.Fault(exc.HTTPNotFound())
1865+
1866+ def disassociate_address(self, req, id, body):
1867+ """ /zadr-vsa/{vsa_id}/disassociate_address
1868+ auto or manually associate an IP to VSA
1869+ """
1870+ context = req.environ['nova.context']
1871+
1872+ if body is None:
1873+ ip = 'auto'
1874+ else:
1875+ ip = body.get('ipAddress', 'auto')
1876+
1877+ LOG.audit(_("Disassociate address from VSA %(id)s"),
1878+ locals(), context=context)
1879+ # Placeholder
1880+
1881+
1882+class VsaVolumeDriveController(volumes.VolumeController):
1883+ """The base class for VSA volumes & drives.
1884+
1885+ A child resource of the VSA object. Allows operations with
1886+ volumes and drives created to/from particular VSA
1887+
1888+ """
1889+
1890+ _serialization_metadata = {
1891+ 'application/xml': {
1892+ "attributes": {
1893+ "volume": [
1894+ "id",
1895+ "name",
1896+ "status",
1897+ "size",
1898+ "availabilityZone",
1899+ "createdAt",
1900+ "displayName",
1901+ "displayDescription",
1902+ "vsaId",
1903+ ]}}}
1904+
1905+ def __init__(self):
1906+ self.volume_api = volume.API()
1907+ self.vsa_api = vsa.API()
1908+ super(VsaVolumeDriveController, self).__init__()
1909+
1910+ def _translation(self, context, vol, vsa_id, details):
1911+ if details:
1912+ translation = volumes._translate_volume_detail_view
1913+ else:
1914+ translation = volumes._translate_volume_summary_view
1915+
1916+ d = translation(context, vol)
1917+ d['vsaId'] = vsa_id
1918+ d['name'] = vol['name']
1919+ return d
1920+
1921+ def _check_volume_ownership(self, context, vsa_id, id):
1922+ obj = self.object
1923+ try:
1924+ volume_ref = self.volume_api.get(context, volume_id=id)
1925+ except exception.NotFound:
1926+ LOG.error(_("%(obj)s with ID %(id)s not found"), locals())
1927+ raise
1928+
1929+ own_vsa_id = self.volume_api.get_volume_metadata_value(volume_ref,
1930+ self.direction)
1931+ if own_vsa_id != vsa_id:
1932+ LOG.error(_("%(obj)s with ID %(id)s belongs to VSA %(own_vsa_id)s"\
1933+ " and not to VSA %(vsa_id)s."), locals())
1934+ raise exception.Invalid()
1935+
1936+ def _items(self, req, vsa_id, details):
1937+ """Return summary or detailed list of volumes for particular VSA."""
1938+ context = req.environ['nova.context']
1939+
1940+ vols = self.volume_api.get_all(context,
1941+ search_opts={'metadata': {self.direction: str(vsa_id)}})
1942+ limited_list = common.limited(vols, req)
1943+
1944+ res = [self._translation(context, vol, vsa_id, details) \
1945+ for vol in limited_list]
1946+
1947+ return {self.objects: res}
1948+
1949+ def index(self, req, vsa_id):
1950+ """Return a short list of volumes created from particular VSA."""
1951+ LOG.audit(_("Index. vsa_id=%(vsa_id)s"), locals())
1952+ return self._items(req, vsa_id, details=False)
1953+
1954+ def detail(self, req, vsa_id):
1955+ """Return a detailed list of volumes created from particular VSA."""
1956+ LOG.audit(_("Detail. vsa_id=%(vsa_id)s"), locals())
1957+ return self._items(req, vsa_id, details=True)
1958+
1959+ def create(self, req, vsa_id, body):
1960+ """Create a new volume from VSA."""
1961+ LOG.audit(_("Create. vsa_id=%(vsa_id)s, body=%(body)s"), locals())
1962+ context = req.environ['nova.context']
1963+
1964+ if not body:
1965+ return faults.Fault(exc.HTTPUnprocessableEntity())
1966+
1967+ vol = body[self.object]
1968+ size = vol['size']
1969+ LOG.audit(_("Create volume of %(size)s GB from VSA ID %(vsa_id)s"),
1970+ locals(), context=context)
1971+ try:
1972+ # create is supported for volumes only (drives created through VSA)
1973+ volume_type = self.vsa_api.get_vsa_volume_type(context)
1974+ except exception.NotFound:
1975+ return faults.Fault(exc.HTTPNotFound())
1976+
1977+ new_volume = self.volume_api.create(context,
1978+ size,
1979+ None,
1980+ vol.get('displayName'),
1981+ vol.get('displayDescription'),
1982+ volume_type=volume_type,
1983+ metadata=dict(from_vsa_id=str(vsa_id)))
1984+
1985+ return {self.object: self._translation(context, new_volume,
1986+ vsa_id, True)}
1987+
1988+ def update(self, req, vsa_id, id, body):
1989+ """Update a volume."""
1990+ context = req.environ['nova.context']
1991+
1992+ try:
1993+ self._check_volume_ownership(context, vsa_id, id)
1994+ except exception.NotFound:
1995+ return faults.Fault(exc.HTTPNotFound())
1996+ except exception.Invalid:
1997+ return faults.Fault(exc.HTTPBadRequest())
1998+
1999+ vol = body[self.object]
2000+ updatable_fields = [{'displayName': 'display_name'},
2001+ {'displayDescription': 'display_description'},
2002+ {'status': 'status'},
2003+ {'providerLocation': 'provider_location'},
2004+ {'providerAuth': 'provider_auth'}]
2005+ changes = {}
2006+ for field in updatable_fields:
2007+ key = field.keys()[0]
2008+ val = field[key]
2009+ if key in vol:
2010+ changes[val] = vol[key]
2011+
2012+ obj = self.object
2013+ LOG.audit(_("Update %(obj)s with id: %(id)s, changes: %(changes)s"),
2014+ locals(), context=context)
2015+
2016+ try:
2017+ self.volume_api.update(context, volume_id=id, fields=changes)
2018+ except exception.NotFound:
2019+ return faults.Fault(exc.HTTPNotFound())
2020+ return exc.HTTPAccepted()
2021+
2022+ def delete(self, req, vsa_id, id):
2023+ """Delete a volume."""
2024+ context = req.environ['nova.context']
2025+
2026+ LOG.audit(_("Delete. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
2027+
2028+ try:
2029+ self._check_volume_ownership(context, vsa_id, id)
2030+ except exception.NotFound:
2031+ return faults.Fault(exc.HTTPNotFound())
2032+ except exception.Invalid:
2033+ return faults.Fault(exc.HTTPBadRequest())
2034+
2035+ return super(VsaVolumeDriveController, self).delete(req, id)
2036+
2037+ def show(self, req, vsa_id, id):
2038+ """Return data about the given volume."""
2039+ context = req.environ['nova.context']
2040+
2041+ LOG.audit(_("Show. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
2042+
2043+ try:
2044+ self._check_volume_ownership(context, vsa_id, id)
2045+ except exception.NotFound:
2046+ return faults.Fault(exc.HTTPNotFound())
2047+ except exception.Invalid:
2048+ return faults.Fault(exc.HTTPBadRequest())
2049+
2050+ return super(VsaVolumeDriveController, self).show(req, id)
2051+
2052+
2053+class VsaVolumeController(VsaVolumeDriveController):
2054+ """The VSA volume API controller for the Openstack API.
2055+
2056+ A child resource of the VSA object. Allows operations with volumes created
2057+ by particular VSA
2058+
2059+ """
2060+
2061+ def __init__(self):
2062+ self.direction = 'from_vsa_id'
2063+ self.objects = 'volumes'
2064+ self.object = 'volume'
2065+ super(VsaVolumeController, self).__init__()
2066+
2067+
2068+class VsaDriveController(VsaVolumeDriveController):
2069+ """The VSA Drive API controller for the Openstack API.
2070+
2071+ A child resource of the VSA object. Allows operations with drives created
2072+ for particular VSA
2073+
2074+ """
2075+
2076+ def __init__(self):
2077+ self.direction = 'to_vsa_id'
2078+ self.objects = 'drives'
2079+ self.object = 'drive'
2080+ super(VsaDriveController, self).__init__()
2081+
2082+ def create(self, req, vsa_id, body):
2083+ """Create a new drive for VSA. Should be done through VSA APIs"""
2084+ return faults.Fault(exc.HTTPBadRequest())
2085+
2086+ def update(self, req, vsa_id, id, body):
2087+ """Update a drive. Should be done through VSA APIs"""
2088+ return faults.Fault(exc.HTTPBadRequest())
2089+
2090+ def delete(self, req, vsa_id, id):
2091+ """Delete a volume. Should be done through VSA APIs"""
2092+ return faults.Fault(exc.HTTPBadRequest())
2093+
2094+
2095+class VsaVPoolController(object):
2096+ """The vPool VSA API controller for the OpenStack API."""
2097+
2098+ _serialization_metadata = {
2099+ 'application/xml': {
2100+ "attributes": {
2101+ "vpool": [
2102+ "id",
2103+ "vsaId",
2104+ "name",
2105+ "displayName",
2106+ "displayDescription",
2107+ "driveCount",
2108+ "driveIds",
2109+ "protection",
2110+ "stripeSize",
2111+ "stripeWidth",
2112+ "createTime",
2113+ "status",
2114+ ]}}}
2115+
2116+ def __init__(self):
2117+ self.vsa_api = vsa.API()
2118+ super(VsaVPoolController, self).__init__()
2119+
2120+ def index(self, req, vsa_id):
2121+ """Return a short list of vpools created from particular VSA."""
2122+ return {'vpools': []}
2123+
2124+ def create(self, req, vsa_id, body):
2125+ """Create a new vPool for VSA."""
2126+ return faults.Fault(exc.HTTPBadRequest())
2127+
2128+ def update(self, req, vsa_id, id, body):
2129+ """Update vPool parameters."""
2130+ return faults.Fault(exc.HTTPBadRequest())
2131+
2132+ def delete(self, req, vsa_id, id):
2133+ """Delete a vPool."""
2134+ return faults.Fault(exc.HTTPBadRequest())
2135+
2136+ def show(self, req, vsa_id, id):
2137+ """Return data about the given vPool."""
2138+ return faults.Fault(exc.HTTPBadRequest())
2139+
2140+
2141+class VsaVCController(servers.ControllerV11):
2142+ """The VSA Virtual Controller API controller for the OpenStack API."""
2143+
2144+ def __init__(self):
2145+ self.vsa_api = vsa.API()
2146+ self.compute_api = compute.API()
2147+ self.vsa_id = None # VP-TODO: temporary ugly hack
2148+ super(VsaVCController, self).__init__()
2149+
2150+ def _get_servers(self, req, is_detail):
2151+ """Returns a list of servers, taking into account any search
2152+ options specified.
2153+ """
2154+
2155+ if self.vsa_id is None:
2156+ super(VsaVCController, self)._get_servers(req, is_detail)
2157+
2158+ context = req.environ['nova.context']
2159+
2160+ search_opts = {'metadata': dict(vsa_id=str(self.vsa_id))}
2161+ instance_list = self.compute_api.get_all(
2162+ context, search_opts=search_opts)
2163+
2164+ limited_list = self._limit_items(instance_list, req)
2165+ servers = [self._build_view(req, inst, is_detail)['server']
2166+ for inst in limited_list]
2167+ return dict(servers=servers)
2168+
2169+ def index(self, req, vsa_id):
2170+ """Return list of instances for particular VSA."""
2171+
2172+ LOG.audit(_("Index instances for VSA %s"), vsa_id)
2173+
2174+ self.vsa_id = vsa_id # VP-TODO: temporary ugly hack
2175+ result = super(VsaVCController, self).detail(req)
2176+ self.vsa_id = None
2177+ return result
2178+
2179+ def create(self, req, vsa_id, body):
2180+ """Create a new instance for VSA."""
2181+ return faults.Fault(exc.HTTPBadRequest())
2182+
2183+ def update(self, req, vsa_id, id, body):
2184+ """Update VSA instance."""
2185+ return faults.Fault(exc.HTTPBadRequest())
2186+
2187+ def delete(self, req, vsa_id, id):
2188+ """Delete VSA instance."""
2189+ return faults.Fault(exc.HTTPBadRequest())
2190+
2191+ def show(self, req, vsa_id, id):
2192+ """Return data about the given instance."""
2193+ return super(VsaVCController, self).show(req, id)
2194+
2195+
2196+class Virtual_storage_arrays(extensions.ExtensionDescriptor):
2197+
2198+ def get_name(self):
2199+ return "VSAs"
2200+
2201+ def get_alias(self):
2202+ return "zadr-vsa"
2203+
2204+ def get_description(self):
2205+ return "Virtual Storage Arrays support"
2206+
2207+ def get_namespace(self):
2208+ return "http://docs.openstack.org/ext/vsa/api/v1.1"
2209+
2210+ def get_updated(self):
2211+ return "2011-08-25T00:00:00+00:00"
2212+
2213+ def get_resources(self):
2214+ resources = []
2215+ res = extensions.ResourceExtension(
2216+ 'zadr-vsa',
2217+ VsaController(),
2218+ collection_actions={'detail': 'GET'},
2219+ member_actions={'add_capacity': 'POST',
2220+ 'remove_capacity': 'POST',
2221+ 'associate_address': 'POST',
2222+ 'disassociate_address': 'POST'})
2223+ resources.append(res)
2224+
2225+ res = extensions.ResourceExtension('volumes',
2226+ VsaVolumeController(),
2227+ collection_actions={'detail': 'GET'},
2228+ parent=dict(
2229+ member_name='vsa',
2230+ collection_name='zadr-vsa'))
2231+ resources.append(res)
2232+
2233+ res = extensions.ResourceExtension('drives',
2234+ VsaDriveController(),
2235+ collection_actions={'detail': 'GET'},
2236+ parent=dict(
2237+ member_name='vsa',
2238+ collection_name='zadr-vsa'))
2239+ resources.append(res)
2240+
2241+ res = extensions.ResourceExtension('vpools',
2242+ VsaVPoolController(),
2243+ parent=dict(
2244+ member_name='vsa',
2245+ collection_name='zadr-vsa'))
2246+ resources.append(res)
2247+
2248+ res = extensions.ResourceExtension('instances',
2249+ VsaVCController(),
2250+ parent=dict(
2251+ member_name='vsa',
2252+ collection_name='zadr-vsa'))
2253+ resources.append(res)
2254+
2255+ return resources
2256
2257=== modified file 'nova/api/openstack/contrib/volumes.py'
2258--- nova/api/openstack/contrib/volumes.py 2011-07-26 17:10:31 +0000
2259+++ nova/api/openstack/contrib/volumes.py 2011-09-09 09:29:27 +0000
2260@@ -24,6 +24,7 @@
2261 from nova import log as logging
2262 from nova import quota
2263 from nova import volume
2264+from nova.volume import volume_types
2265 from nova.api.openstack import common
2266 from nova.api.openstack import extensions
2267 from nova.api.openstack import faults
2268@@ -63,6 +64,22 @@
2269
2270 d['displayName'] = vol['display_name']
2271 d['displayDescription'] = vol['display_description']
2272+
2273+ if vol['volume_type_id'] and vol.get('volume_type'):
2274+ d['volumeType'] = vol['volume_type']['name']
2275+ else:
2276+ d['volumeType'] = vol['volume_type_id']
2277+
2278+ LOG.audit(_("vol=%s"), vol, context=context)
2279+
2280+ if vol.get('volume_metadata'):
2281+ meta_dict = {}
2282+ for i in vol['volume_metadata']:
2283+ meta_dict[i['key']] = i['value']
2284+ d['metadata'] = meta_dict
2285+ else:
2286+ d['metadata'] = {}
2287+
2288 return d
2289
2290
2291@@ -80,6 +97,8 @@
2292 "createdAt",
2293 "displayName",
2294 "displayDescription",
2295+ "volumeType",
2296+ "metadata",
2297 ]}}}
2298
2299 def __init__(self):
2300@@ -136,12 +155,25 @@
2301 vol = body['volume']
2302 size = vol['size']
2303 LOG.audit(_("Create volume of %s GB"), size, context=context)
2304+
2305+ vol_type = vol.get('volume_type', None)
2306+ if vol_type:
2307+ try:
2308+ vol_type = volume_types.get_volume_type_by_name(context,
2309+ vol_type)
2310+ except exception.NotFound:
2311+ return faults.Fault(exc.HTTPNotFound())
2312+
2313+ metadata = vol.get('metadata', None)
2314+
2315 new_volume = self.volume_api.create(context, size, None,
2316 vol.get('display_name'),
2317- vol.get('display_description'))
2318+ vol.get('display_description'),
2319+ volume_type=vol_type,
2320+ metadata=metadata)
2321
2322 # Work around problem that instance is lazy-loaded...
2323- new_volume['instance'] = None
2324+ new_volume = self.volume_api.get(context, new_volume['id'])
2325
2326 retval = _translate_volume_detail_view(context, new_volume)
2327
2328
2329=== added file 'nova/api/openstack/contrib/volumetypes.py'
2330--- nova/api/openstack/contrib/volumetypes.py 1970-01-01 00:00:00 +0000
2331+++ nova/api/openstack/contrib/volumetypes.py 2011-09-09 09:29:27 +0000
2332@@ -0,0 +1,197 @@
2333+# vim: tabstop=4 shiftwidth=4 softtabstop=4
2334+
2335+# Copyright (c) 2011 Zadara Storage Inc.
2336+# Copyright (c) 2011 OpenStack LLC.
2337+#
2338+# Licensed under the Apache License, Version 2.0 (the "License"); you may
2339+# not use this file except in compliance with the License. You may obtain
2340+# a copy of the License at
2341+#
2342+# http://www.apache.org/licenses/LICENSE-2.0
2343+#
2344+# Unless required by applicable law or agreed to in writing, software
2345+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
2346+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
2347+# License for the specific language governing permissions and limitations
2348+# under the License.
2349+
2350+""" The volume type & volume types extra specs extension"""
2351+
2352+from webob import exc
2353+
2354+from nova import db
2355+from nova import exception
2356+from nova import quota
2357+from nova.volume import volume_types
2358+from nova.api.openstack import extensions
2359+from nova.api.openstack import faults
2360+from nova.api.openstack import wsgi
2361+
2362+
2363+class VolumeTypesController(object):
2364+ """ The volume types API controller for the Openstack API """
2365+
2366+ def index(self, req):
2367+ """ Returns the list of volume types """
2368+ context = req.environ['nova.context']
2369+ return volume_types.get_all_types(context)
2370+
2371+ def create(self, req, body):
2372+ """Creates a new volume type."""
2373+ context = req.environ['nova.context']
2374+
2375+ if not body or body == "":
2376+ return faults.Fault(exc.HTTPUnprocessableEntity())
2377+
2378+ vol_type = body.get('volume_type', None)
2379+ if vol_type is None or vol_type == "":
2380+ return faults.Fault(exc.HTTPUnprocessableEntity())
2381+
2382+ name = vol_type.get('name', None)
2383+ specs = vol_type.get('extra_specs', {})
2384+
2385+ if name is None or name == "":
2386+ return faults.Fault(exc.HTTPUnprocessableEntity())
2387+
2388+ try:
2389+ volume_types.create(context, name, specs)
2390+ vol_type = volume_types.get_volume_type_by_name(context, name)
2391+ except quota.QuotaError as error:
2392+ self._handle_quota_error(error)
2393+ except exception.NotFound:
2394+ return faults.Fault(exc.HTTPNotFound())
2395+
2396+ return {'volume_type': vol_type}
2397+
2398+ def show(self, req, id):
2399+ """ Return a single volume type item """
2400+ context = req.environ['nova.context']
2401+
2402+ try:
2403+ vol_type = volume_types.get_volume_type(context, id)
2404+ except exception.NotFound or exception.ApiError:
2405+ return faults.Fault(exc.HTTPNotFound())
2406+
2407+ return {'volume_type': vol_type}
2408+
2409+ def delete(self, req, id):
2410+ """ Deletes an existing volume type """
2411+ context = req.environ['nova.context']
2412+
2413+ try:
2414+ vol_type = volume_types.get_volume_type(context, id)
2415+ volume_types.destroy(context, vol_type['name'])
2416+ except exception.NotFound:
2417+ return faults.Fault(exc.HTTPNotFound())
2418+
2419+ def _handle_quota_error(self, error):
2420+ """Reraise quota errors as api-specific http exceptions."""
2421+ if error.code == "MetadataLimitExceeded":
2422+ raise exc.HTTPBadRequest(explanation=error.message)
2423+ raise error
2424+
2425+
2426+class VolumeTypeExtraSpecsController(object):
2427+ """ The volume type extra specs API controller for the Openstack API """
2428+
2429+ def _get_extra_specs(self, context, vol_type_id):
2430+ extra_specs = db.api.volume_type_extra_specs_get(context, vol_type_id)
2431+ specs_dict = {}
2432+ for key, value in extra_specs.iteritems():
2433+ specs_dict[key] = value
2434+ return dict(extra_specs=specs_dict)
2435+
2436+ def _check_body(self, body):
2437+ if body == None or body == "":
2438+ expl = _('No Request Body')
2439+ raise exc.HTTPBadRequest(explanation=expl)
2440+
2441+ def index(self, req, vol_type_id):
2442+ """ Returns the list of extra specs for a given volume type """
2443+ context = req.environ['nova.context']
2444+ return self._get_extra_specs(context, vol_type_id)
2445+
2446+ def create(self, req, vol_type_id, body):
2447+ self._check_body(body)
2448+ context = req.environ['nova.context']
2449+ specs = body.get('extra_specs')
2450+ try:
2451+ db.api.volume_type_extra_specs_update_or_create(context,
2452+ vol_type_id,
2453+ specs)
2454+ except quota.QuotaError as error:
2455+ self._handle_quota_error(error)
2456+ return body
2457+
2458+ def update(self, req, vol_type_id, id, body):
2459+ self._check_body(body)
2460+ context = req.environ['nova.context']
2461+ if not id in body:
2462+ expl = _('Request body and URI mismatch')
2463+ raise exc.HTTPBadRequest(explanation=expl)
2464+ if len(body) > 1:
2465+ expl = _('Request body contains too many items')
2466+ raise exc.HTTPBadRequest(explanation=expl)
2467+ try:
2468+ db.api.volume_type_extra_specs_update_or_create(context,
2469+ vol_type_id,
2470+ body)
2471+ except quota.QuotaError as error:
2472+ self._handle_quota_error(error)
2473+
2474+ return body
2475+
2476+ def show(self, req, vol_type_id, id):
2477+ """ Return a single extra spec item """
2478+ context = req.environ['nova.context']
2479+ specs = self._get_extra_specs(context, vol_type_id)
2480+ if id in specs['extra_specs']:
2481+ return {id: specs['extra_specs'][id]}
2482+ else:
2483+ return faults.Fault(exc.HTTPNotFound())
2484+
2485+ def delete(self, req, vol_type_id, id):
2486+ """ Deletes an existing extra spec """
2487+ context = req.environ['nova.context']
2488+ db.api.volume_type_extra_specs_delete(context, vol_type_id, id)
2489+
2490+ def _handle_quota_error(self, error):
2491+ """Reraise quota errors as api-specific http exceptions."""
2492+ if error.code == "MetadataLimitExceeded":
2493+ raise exc.HTTPBadRequest(explanation=error.message)
2494+ raise error
2495+
2496+
2497+class Volumetypes(extensions.ExtensionDescriptor):
2498+
2499+ def get_name(self):
2500+ return "VolumeTypes"
2501+
2502+ def get_alias(self):
2503+ return "os-volume-types"
2504+
2505+ def get_description(self):
2506+ return "Volume types support"
2507+
2508+ def get_namespace(self):
2509+ return \
2510+ "http://docs.openstack.org/ext/volume_types/api/v1.1"
2511+
2512+ def get_updated(self):
2513+ return "2011-08-24T00:00:00+00:00"
2514+
2515+ def get_resources(self):
2516+ resources = []
2517+ res = extensions.ResourceExtension(
2518+ 'os-volume-types',
2519+ VolumeTypesController())
2520+ resources.append(res)
2521+
2522+ res = extensions.ResourceExtension('extra_specs',
2523+ VolumeTypeExtraSpecsController(),
2524+ parent=dict(
2525+ member_name='vol_type',
2526+ collection_name='os-volume-types'))
2527+ resources.append(res)
2528+
2529+ return resources
2530
2531=== modified file 'nova/api/openstack/create_instance_helper.py'
2532--- nova/api/openstack/create_instance_helper.py 2011-08-23 04:17:57 +0000
2533+++ nova/api/openstack/create_instance_helper.py 2011-09-09 09:29:27 +0000
2534@@ -19,7 +19,6 @@
2535 from webob import exc
2536 from xml.dom import minidom
2537
2538-from nova import db
2539 from nova import exception
2540 from nova import flags
2541 from nova import log as logging
2542@@ -74,20 +73,17 @@
2543 if not 'server' in body:
2544 raise exc.HTTPUnprocessableEntity()
2545
2546+ context = req.environ['nova.context']
2547 server_dict = body['server']
2548- context = req.environ['nova.context']
2549 password = self.controller._get_server_admin_password(server_dict)
2550
2551- key_name = None
2552- key_data = None
2553- # TODO(vish): Key pair access should move into a common library
2554- # instead of being accessed directly from the db.
2555- key_pairs = db.key_pair_get_all_by_user(context.elevated(),
2556- context.user_id)
2557- if key_pairs:
2558- key_pair = key_pairs[0]
2559- key_name = key_pair['name']
2560- key_data = key_pair['public_key']
2561+ if not 'name' in server_dict:
2562+ msg = _("Server name is not defined")
2563+ raise exc.HTTPBadRequest(explanation=msg)
2564+
2565+ name = server_dict['name']
2566+ self._validate_server_name(name)
2567+ name = name.strip()
2568
2569 image_href = self.controller._image_ref_from_req_data(body)
2570 # If the image href was generated by nova api, strip image_href
2571@@ -98,7 +94,7 @@
2572 try:
2573 image_service, image_id = nova.image.get_image_service(image_href)
2574 kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
2575- req, image_id)
2576+ req, image_service, image_id)
2577 images = set([str(x['id']) for x in image_service.index(context)])
2578 assert str(image_id) in images
2579 except Exception, e:
2580@@ -133,12 +129,13 @@
2581 msg = _("Invalid flavorRef provided.")
2582 raise exc.HTTPBadRequest(explanation=msg)
2583
2584- if not 'name' in server_dict:
2585- msg = _("Server name is not defined")
2586- raise exc.HTTPBadRequest(explanation=msg)
2587-
2588 zone_blob = server_dict.get('blob')
2589+
2590+ # optional openstack extensions:
2591+ key_name = server_dict.get('key_name')
2592 user_data = server_dict.get('user_data')
2593+ self._validate_user_data(user_data)
2594+
2595 availability_zone = server_dict.get('availability_zone')
2596 name = server_dict['name']
2597 self._validate_server_name(name)
2598@@ -173,7 +170,6 @@
2599 display_name=name,
2600 display_description=name,
2601 key_name=key_name,
2602- key_data=key_data,
2603 metadata=server_dict.get('metadata', {}),
2604 access_ip_v4=server_dict.get('accessIPv4'),
2605 access_ip_v6=server_dict.get('accessIPv6'),
2606@@ -196,6 +192,9 @@
2607 except exception.FlavorNotFound as error:
2608 msg = _("Invalid flavorRef provided.")
2609 raise exc.HTTPBadRequest(explanation=msg)
2610+ except exception.KeypairNotFound as error:
2611+ msg = _("Invalid key_name provided.")
2612+ raise exc.HTTPBadRequest(explanation=msg)
2613 except exception.SecurityGroupNotFound as error:
2614 raise exc.HTTPBadRequest(explanation=unicode(error))
2615 except RemoteError as err:
2616@@ -248,12 +247,12 @@
2617 msg = _("Server name is an empty string")
2618 raise exc.HTTPBadRequest(explanation=msg)
2619
2620- def _get_kernel_ramdisk_from_image(self, req, image_id):
2621+ def _get_kernel_ramdisk_from_image(self, req, image_service, image_id):
2622 """Fetch an image from the ImageService, then if present, return the
2623 associated kernel and ramdisk image IDs.
2624 """
2625 context = req.environ['nova.context']
2626- image_meta = self._image_service.show(context, image_id)
2627+ image_meta = image_service.show(context, image_id)
2628 # NOTE(sirp): extracted to a separate method to aid unit-testing, the
2629 # new method doesn't need a request obj or an ImageService stub
2630 kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image(
2631@@ -283,7 +282,7 @@
2632 try:
2633 ramdisk_id = image_meta['properties']['ramdisk_id']
2634 except KeyError:
2635- raise exception.RamdiskNotFoundForImage(image_id=image_id)
2636+ ramdisk_id = None
2637
2638 return kernel_id, ramdisk_id
2639
2640@@ -370,6 +369,16 @@
2641
2642 return networks
2643
2644+ def _validate_user_data(self, user_data):
2645+ """Check if the user_data is encoded properly"""
2646+ if not user_data:
2647+ return
2648+ try:
2649+ user_data = base64.b64decode(user_data)
2650+ except TypeError:
2651+ expl = _('Userdata content cannot be decoded')
2652+ raise exc.HTTPBadRequest(explanation=expl)
2653+
2654
2655 class ServerXMLDeserializer(wsgi.XMLDeserializer):
2656 """
2657
2658=== modified file 'nova/api/openstack/schemas/v1.1/server.rng'
2659--- nova/api/openstack/schemas/v1.1/server.rng 2011-08-19 19:55:56 +0000
2660+++ nova/api/openstack/schemas/v1.1/server.rng 2011-09-09 09:29:27 +0000
2661@@ -1,6 +1,8 @@
2662 <element name="server" ns="http://docs.openstack.org/compute/api/v1.1"
2663 xmlns="http://relaxng.org/ns/structure/1.0">
2664 <attribute name="name"> <text/> </attribute>
2665+ <attribute name="userId"> <text/> </attribute>
2666+ <attribute name="tenantId"> <text/> </attribute>
2667 <attribute name="id"> <text/> </attribute>
2668 <attribute name="uuid"> <text/> </attribute>
2669 <attribute name="updated"> <text/> </attribute>
2670
2671=== modified file 'nova/api/openstack/servers.py'
2672--- nova/api/openstack/servers.py 2011-08-24 14:37:59 +0000
2673+++ nova/api/openstack/servers.py 2011-09-09 09:29:27 +0000
2674@@ -22,6 +22,7 @@
2675 import webob
2676
2677 from nova import compute
2678+from nova import db
2679 from nova import exception
2680 from nova import flags
2681 from nova import log as logging
2682@@ -95,17 +96,23 @@
2683 search_opts['recurse_zones'] = utils.bool_from_str(
2684 search_opts.get('recurse_zones', False))
2685
2686- # If search by 'status', we need to convert it to 'state'
2687- # If the status is unknown, bail.
2688- # Leave 'state' in search_opts so compute can pass it on to
2689- # child zones..
2690+ # If search by 'status', we need to convert it to 'vm_state'
2691+ # to pass on to child zones.
2692 if 'status' in search_opts:
2693 status = search_opts['status']
2694- search_opts['state'] = common.power_states_from_status(status)
2695- if len(search_opts['state']) == 0:
2696+ state = common.vm_state_from_status(status)
2697+ if state is None:
2698 reason = _('Invalid server status: %(status)s') % locals()
2699- LOG.error(reason)
2700 raise exception.InvalidInput(reason=reason)
2701+ search_opts['vm_state'] = state
2702+
2703+ if 'changes-since' in search_opts:
2704+ try:
2705+ parsed = utils.parse_isotime(search_opts['changes-since'])
2706+ except ValueError:
2707+ msg = _('Invalid changes-since value')
2708+ raise exc.HTTPBadRequest(explanation=msg)
2709+ search_opts['changes-since'] = parsed
2710
2711 # By default, compute's get_all() will return deleted instances.
2712 # If an admin hasn't specified a 'deleted' search option, we need
2713@@ -114,23 +121,17 @@
2714 # should return recently deleted images according to the API spec.
2715
2716 if 'deleted' not in search_opts:
2717- # Admin hasn't specified deleted filter
2718 if 'changes-since' not in search_opts:
2719- # No 'changes-since', so we need to find non-deleted servers
2720+ # No 'changes-since', so we only want non-deleted servers
2721 search_opts['deleted'] = False
2722- else:
2723- # This is the default, but just in case..
2724- search_opts['deleted'] = True
2725-
2726- instance_list = self.compute_api.get_all(
2727- context, search_opts=search_opts)
2728-
2729- # FIXME(comstud): 'changes-since' is not fully implemented. Where
2730- # should this be filtered?
2731+
2732+ instance_list = self.compute_api.get_all(context,
2733+ search_opts=search_opts)
2734
2735 limited_list = self._limit_items(instance_list, req)
2736 servers = [self._build_view(req, inst, is_detail)['server']
2737- for inst in limited_list]
2738+ for inst in limited_list]
2739+
2740 return dict(servers=servers)
2741
2742 @scheduler_api.redirect_handler
2743@@ -143,10 +144,16 @@
2744 except exception.NotFound:
2745 raise exc.HTTPNotFound()
2746
2747+ def _get_key_name(self, req, body):
2748+ """ Get default keypair if not set """
2749+ raise NotImplementedError()
2750+
2751 def create(self, req, body):
2752 """ Creates a new server for a given user """
2753+ if 'server' in body:
2754+ body['server']['key_name'] = self._get_key_name(req, body)
2755+
2756 extra_values = None
2757- result = None
2758 extra_values, instances = self.helper.create_instance(
2759 req, body, self.compute_api.create)
2760
2761@@ -564,6 +571,13 @@
2762 raise exc.HTTPNotFound()
2763 return webob.Response(status_int=202)
2764
2765+ def _get_key_name(self, req, body):
2766+ context = req.environ["nova.context"]
2767+ keypairs = db.key_pair_get_all_by_user(context,
2768+ context.user_id)
2769+ if keypairs:
2770+ return keypairs[0]['name']
2771+
2772 def _image_ref_from_req_data(self, data):
2773 return data['server']['imageId']
2774
2775@@ -608,9 +622,8 @@
2776
2777 try:
2778 self.compute_api.rebuild(context, instance_id, image_id, password)
2779- except exception.BuildInProgress:
2780- msg = _("Instance %s is currently being rebuilt.") % instance_id
2781- LOG.debug(msg)
2782+ except exception.RebuildRequiresActiveInstance:
2783+ msg = _("Instance %s must be active to rebuild.") % instance_id
2784 raise exc.HTTPConflict(explanation=msg)
2785
2786 return webob.Response(status_int=202)
2787@@ -635,6 +648,10 @@
2788 except exception.NotFound:
2789 raise exc.HTTPNotFound()
2790
2791+ def _get_key_name(self, req, body):
2792+ if 'server' in body:
2793+ return body['server'].get('key_name')
2794+
2795 def _image_ref_from_req_data(self, data):
2796 try:
2797 return data['server']['imageRef']
2798@@ -750,9 +767,8 @@
2799 self.compute_api.rebuild(context, instance_id, image_href,
2800 password, name=name, metadata=metadata,
2801 files_to_inject=personalities)
2802- except exception.BuildInProgress:
2803- msg = _("Instance %s is currently being rebuilt.") % instance_id
2804- LOG.debug(msg)
2805+ except exception.RebuildRequiresActiveInstance:
2806+ msg = _("Instance %s must be active to rebuild.") % instance_id
2807 raise exc.HTTPConflict(explanation=msg)
2808 except exception.InstanceNotFound:
2809 msg = _("Instance %s could not be found") % instance_id
2810@@ -857,6 +873,8 @@
2811
2812 def _add_server_attributes(self, node, server):
2813 node.setAttribute('id', str(server['id']))
2814+ node.setAttribute('userId', str(server['user_id']))
2815+ node.setAttribute('tenantId', str(server['tenant_id']))
2816 node.setAttribute('uuid', str(server['uuid']))
2817 node.setAttribute('hostId', str(server['hostId']))
2818 node.setAttribute('name', server['name'])
2819@@ -912,6 +930,11 @@
2820 server['addresses'])
2821 server_node.appendChild(addresses_node)
2822
2823+ if 'security_groups' in server:
2824+ security_groups_node = self._create_security_groups_node(xml_doc,
2825+ server['security_groups'])
2826+ server_node.appendChild(security_groups_node)
2827+
2828 return server_node
2829
2830 def _server_list_to_xml(self, xml_doc, servers, detailed):
2831@@ -964,6 +987,19 @@
2832 server_dict['server'])
2833 return self.to_xml_string(node, True)
2834
2835+ def _security_group_to_xml(self, doc, security_group):
2836+ node = doc.createElement('security_group')
2837+ node.setAttribute('name', str(security_group.get('name')))
2838+ return node
2839+
2840+ def _create_security_groups_node(self, xml_doc, security_groups):
2841+ security_groups_node = xml_doc.createElement('security_groups')
2842+ if security_groups:
2843+ for security_group in security_groups:
2844+ node = self._security_group_to_xml(xml_doc, security_group)
2845+ security_groups_node.appendChild(node)
2846+ return security_groups_node
2847+
2848
2849 def create_resource(version='1.0'):
2850 controller = {
2851@@ -975,7 +1011,7 @@
2852 "attributes": {
2853 "server": ["id", "imageId", "name", "flavorId", "hostId",
2854 "status", "progress", "adminPass", "flavorRef",
2855- "imageRef"],
2856+ "imageRef", "userId", "tenantId"],
2857 "link": ["rel", "type", "href"],
2858 },
2859 "dict_collections": {
2860
2861=== modified file 'nova/api/openstack/views/addresses.py'
2862--- nova/api/openstack/views/addresses.py 2011-08-23 03:30:12 +0000
2863+++ nova/api/openstack/views/addresses.py 2011-09-09 09:29:27 +0000
2864@@ -88,7 +88,6 @@
2865 try:
2866 return interface['network']['label']
2867 except (TypeError, KeyError) as exc:
2868- LOG.exception(exc)
2869 raise TypeError
2870
2871 def _extract_ipv4_addresses(self, interface):
2872
2873=== modified file 'nova/api/openstack/views/servers.py'
2874--- nova/api/openstack/views/servers.py 2011-08-23 04:17:57 +0000
2875+++ nova/api/openstack/views/servers.py 2011-09-09 09:29:27 +0000
2876@@ -21,13 +21,12 @@
2877 import os
2878
2879 from nova import exception
2880-import nova.compute
2881-import nova.context
2882 from nova.api.openstack import common
2883 from nova.api.openstack.views import addresses as addresses_view
2884 from nova.api.openstack.views import flavors as flavors_view
2885 from nova.api.openstack.views import images as images_view
2886 from nova import utils
2887+from nova.compute import vm_states
2888
2889
2890 class ViewBuilder(object):
2891@@ -61,17 +60,15 @@
2892
2893 def _build_detail(self, inst):
2894 """Returns a detailed model of a server."""
2895+ vm_state = inst.get('vm_state', vm_states.BUILDING)
2896+ task_state = inst.get('task_state')
2897
2898 inst_dict = {
2899 'id': inst['id'],
2900 'name': inst['display_name'],
2901- 'status': common.status_from_power_state(inst.get('state'))}
2902-
2903- ctxt = nova.context.get_admin_context()
2904- compute_api = nova.compute.API()
2905-
2906- if compute_api.has_finished_migration(ctxt, inst['uuid']):
2907- inst_dict['status'] = 'RESIZE-CONFIRM'
2908+ 'user_id': inst.get('user_id', ''),
2909+ 'tenant_id': inst.get('project_id', ''),
2910+ 'status': common.status_from_state(vm_state, task_state)}
2911
2912 # Return the metadata as a dictionary
2913 metadata = {}
2914@@ -188,6 +185,7 @@
2915 def _build_extra(self, response, inst):
2916 self._build_links(response, inst)
2917 response['uuid'] = inst['uuid']
2918+ response['key_name'] = inst.get('key_name', '')
2919 self._build_config_drive(response, inst)
2920
2921 def _build_links(self, response, inst):
2922
2923=== modified file 'nova/compute/api.py'
2924--- nova/compute/api.py 2011-08-25 08:26:41 +0000
2925+++ nova/compute/api.py 2011-09-09 09:29:27 +0000
2926@@ -19,13 +19,11 @@
2927
2928 """Handles all requests relating to instances (guest vms)."""
2929
2930-import eventlet
2931 import novaclient
2932 import re
2933 import time
2934
2935 from nova import block_device
2936-from nova import db
2937 from nova import exception
2938 from nova import flags
2939 import nova.image
2940@@ -37,6 +35,8 @@
2941 from nova import volume
2942 from nova.compute import instance_types
2943 from nova.compute import power_state
2944+from nova.compute import task_states
2945+from nova.compute import vm_states
2946 from nova.compute.utils import terminate_volumes
2947 from nova.scheduler import api as scheduler_api
2948 from nova.db import base
2949@@ -75,12 +75,18 @@
2950
2951
2952 def _is_able_to_shutdown(instance, instance_id):
2953- states = {'terminating': "Instance %s is already being terminated",
2954- 'migrating': "Instance %s is being migrated",
2955- 'stopping': "Instance %s is being stopped"}
2956- msg = states.get(instance['state_description'])
2957- if msg:
2958- LOG.warning(_(msg), instance_id)
2959+ vm_state = instance["vm_state"]
2960+ task_state = instance["task_state"]
2961+
2962+ valid_shutdown_states = [
2963+ vm_states.ACTIVE,
2964+ vm_states.REBUILDING,
2965+ vm_states.BUILDING,
2966+ ]
2967+
2968+ if vm_state not in valid_shutdown_states:
2969+ LOG.warn(_("Instance %(instance_id)s is not in an 'active' state. It "
2970+ "is currently %(vm_state)s. Shutdown aborted.") % locals())
2971 return False
2972
2973 return True
2974@@ -237,7 +243,7 @@
2975 self.ensure_default_security_group(context)
2976
2977 if key_data is None and key_name:
2978- key_pair = db.key_pair_get(context, context.user_id, key_name)
2979+ key_pair = self.db.key_pair_get(context, context.user_id, key_name)
2980 key_data = key_pair['public_key']
2981
2982 if reservation_id is None:
2983@@ -251,10 +257,10 @@
2984 'image_ref': image_href,
2985 'kernel_id': kernel_id or '',
2986 'ramdisk_id': ramdisk_id or '',
2987+ 'power_state': power_state.NOSTATE,
2988+ 'vm_state': vm_states.BUILDING,
2989 'config_drive_id': config_drive_id or '',
2990 'config_drive': config_drive or '',
2991- 'state': 0,
2992- 'state_description': 'scheduling',
2993 'user_id': context.user_id,
2994 'project_id': context.project_id,
2995 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
2996@@ -377,10 +383,6 @@
2997 If you are changing this method, be sure to update both
2998 call paths.
2999 """
3000- instance = dict(launch_index=num, **base_options)
3001- instance = self.db.instance_create(context, instance)
3002- instance_id = instance['id']
3003-
3004 elevated = context.elevated()
3005 if security_group is None:
3006 security_group = ['default']
3007@@ -389,11 +391,15 @@
3008
3009 security_groups = []
3010 for security_group_name in security_group:
3011- group = db.security_group_get_by_name(context,
3012- context.project_id,
3013- security_group_name)
3014+ group = self.db.security_group_get_by_name(context,
3015+ context.project_id,
3016+ security_group_name)
3017 security_groups.append(group['id'])
3018
3019+ instance = dict(launch_index=num, **base_options)
3020+ instance = self.db.instance_create(context, instance)
3021+ instance_id = instance['id']
3022+
3023 for security_group_id in security_groups:
3024 self.db.instance_add_security_group(elevated,
3025 instance_id,
3026@@ -415,6 +421,8 @@
3027 updates['display_name'] = "Server %s" % instance_id
3028 instance['display_name'] = updates['display_name']
3029 updates['hostname'] = self.hostname_factory(instance)
3030+ updates['vm_state'] = vm_states.BUILDING
3031+ updates['task_state'] = task_states.SCHEDULING
3032
3033 instance = self.update(context, instance_id, **updates)
3034 return instance
3035@@ -551,8 +559,9 @@
3036 def has_finished_migration(self, context, instance_uuid):
3037 """Returns true if an instance has a finished migration."""
3038 try:
3039- db.migration_get_by_instance_and_status(context, instance_uuid,
3040- 'finished')
3041+ self.db.migration_get_by_instance_and_status(context,
3042+ instance_uuid,
3043+ 'finished')
3044 return True
3045 except exception.NotFound:
3046 return False
3047@@ -566,14 +575,15 @@
3048 :param context: the security context
3049 """
3050 try:
3051- db.security_group_get_by_name(context, context.project_id,
3052- 'default')
3053+ self.db.security_group_get_by_name(context,
3054+ context.project_id,
3055+ 'default')
3056 except exception.NotFound:
3057 values = {'name': 'default',
3058 'description': 'default',
3059 'user_id': context.user_id,
3060 'project_id': context.project_id}
3061- db.security_group_create(context, values)
3062+ self.db.security_group_create(context, values)
3063
3064 def trigger_security_group_rules_refresh(self, context, security_group_id):
3065 """Called when a rule is added to or removed from a security_group."""
3066@@ -638,7 +648,7 @@
3067 """Called when a rule is added to or removed from a security_group"""
3068
3069 hosts = [x['host'] for (x, idx)
3070- in db.service_get_all_compute_sorted(context)]
3071+ in self.db.service_get_all_compute_sorted(context)]
3072 for host in hosts:
3073 rpc.cast(context,
3074 self.db.queue_get_for(context, FLAGS.compute_topic, host),
3075@@ -666,11 +676,11 @@
3076
3077 def add_security_group(self, context, instance_id, security_group_name):
3078 """Add security group to the instance"""
3079- security_group = db.security_group_get_by_name(context,
3080- context.project_id,
3081- security_group_name)
3082+ security_group = self.db.security_group_get_by_name(context,
3083+ context.project_id,
3084+ security_group_name)
3085 # check if the server exists
3086- inst = db.instance_get(context, instance_id)
3087+ inst = self.db.instance_get(context, instance_id)
3088 #check if the security group is associated with the server
3089 if self._is_security_group_associated_with_server(security_group,
3090 instance_id):
3091@@ -682,21 +692,21 @@
3092 if inst['state'] != power_state.RUNNING:
3093 raise exception.InstanceNotRunning(instance_id=instance_id)
3094
3095- db.instance_add_security_group(context.elevated(),
3096- instance_id,
3097- security_group['id'])
3098+ self.db.instance_add_security_group(context.elevated(),
3099+ instance_id,
3100+ security_group['id'])
3101 rpc.cast(context,
3102- db.queue_get_for(context, FLAGS.compute_topic, inst['host']),
3103+ self.db.queue_get_for(context, FLAGS.compute_topic, inst['host']),
3104 {"method": "refresh_security_group_rules",
3105 "args": {"security_group_id": security_group['id']}})
3106
3107 def remove_security_group(self, context, instance_id, security_group_name):
3108 """Remove the security group associated with the instance"""
3109- security_group = db.security_group_get_by_name(context,
3110- context.project_id,
3111- security_group_name)
3112+ security_group = self.db.security_group_get_by_name(context,
3113+ context.project_id,
3114+ security_group_name)
3115 # check if the server exists
3116- inst = db.instance_get(context, instance_id)
3117+ inst = self.db.instance_get(context, instance_id)
3118 #check if the security group is associated with the server
3119 if not self._is_security_group_associated_with_server(security_group,
3120 instance_id):
3121@@ -708,11 +718,11 @@
3122 if inst['state'] != power_state.RUNNING:
3123 raise exception.InstanceNotRunning(instance_id=instance_id)
3124
3125- db.instance_remove_security_group(context.elevated(),
3126- instance_id,
3127- security_group['id'])
3128+ self.db.instance_remove_security_group(context.elevated(),
3129+ instance_id,
3130+ security_group['id'])
3131 rpc.cast(context,
3132- db.queue_get_for(context, FLAGS.compute_topic, inst['host']),
3133+ self.db.queue_get_for(context, FLAGS.compute_topic, inst['host']),
3134 {"method": "refresh_security_group_rules",
3135 "args": {"security_group_id": security_group['id']}})
3136
3137@@ -750,10 +760,8 @@
3138 return
3139
3140 self.update(context,
3141- instance['id'],
3142- state_description='terminating',
3143- state=0,
3144- terminated_at=utils.utcnow())
3145+ instance_id,
3146+ task_state=task_states.DELETING)
3147
3148 host = instance['host']
3149 if host:
3150@@ -773,9 +781,9 @@
3151 return
3152
3153 self.update(context,
3154- instance['id'],
3155- state_description='stopping',
3156- state=power_state.NOSTATE,
3157+ instance_id,
3158+ vm_state=vm_states.ACTIVE,
3159+ task_state=task_states.STOPPING,
3160 terminated_at=utils.utcnow())
3161
3162 host = instance['host']
3163@@ -787,12 +795,18 @@
3164 """Start an instance."""
3165 LOG.debug(_("Going to try to start %s"), instance_id)
3166 instance = self._get_instance(context, instance_id, 'starting')
3167- if instance['state_description'] != 'stopped':
3168- _state_description = instance['state_description']
3169+ vm_state = instance["vm_state"]
3170+
3171+ if vm_state != vm_states.STOPPED:
3172 LOG.warning(_("Instance %(instance_id)s is not "
3173- "stopped(%(_state_description)s)") % locals())
3174+ "stopped. (%(vm_state)s)") % locals())
3175 return
3176
3177+ self.update(context,
3178+ instance_id,
3179+ vm_state=vm_states.STOPPED,
3180+ task_state=task_states.STARTING)
3181+
3182 # TODO(yamahata): injected_files isn't supported right now.
3183 # It is used only for osapi. not for ec2 api.
3184 # availability_zone isn't used by run_instance.
3185@@ -802,6 +816,15 @@
3186 "args": {"topic": FLAGS.compute_topic,
3187 "instance_id": instance_id}})
3188
3189+ def get_active_by_window(self, context, begin, end=None, project_id=None):
3190+ """Get instances that were continuously active over a window."""
3191+ return self.db.instance_get_active_by_window(context, begin, end,
3192+ project_id)
3193+
3194+ def get_instance_type(self, context, instance_type_id):
3195+ """Get an instance type by instance type id."""
3196+ return self.db.instance_type_get(context, instance_type_id)
3197+
3198 def get(self, context, instance_id):
3199 """Get a single instance with the given instance_id."""
3200 # NOTE(sirp): id used to be exclusively integer IDs; now we're
3201@@ -854,6 +877,7 @@
3202 'image': 'image_ref',
3203 'name': 'display_name',
3204 'instance_name': 'name',
3205+ 'tenant_id': 'project_id',
3206 'recurse_zones': None,
3207 'flavor': _remap_flavor_filter,
3208 'fixed_ip': _remap_fixed_ip_filter}
3209@@ -1001,7 +1025,7 @@
3210 :param extra_properties: dict of extra image properties to include
3211
3212 """
3213- instance = db.api.instance_get(context, instance_id)
3214+ instance = self.db.instance_get(context, instance_id)
3215 properties = {'instance_uuid': instance['uuid'],
3216 'user_id': str(context.user_id),
3217 'image_state': 'creating',
3218@@ -1020,32 +1044,39 @@
3219 @scheduler_api.reroute_compute("reboot")
3220 def reboot(self, context, instance_id):
3221 """Reboot the given instance."""
3222+ self.update(context,
3223+ instance_id,
3224+ vm_state=vm_states.ACTIVE,
3225+ task_state=task_states.REBOOTING)
3226 self._cast_compute_message('reboot_instance', context, instance_id)
3227
3228 @scheduler_api.reroute_compute("rebuild")
3229 def rebuild(self, context, instance_id, image_href, admin_password,
3230 name=None, metadata=None, files_to_inject=None):
3231 """Rebuild the given instance with the provided metadata."""
3232- instance = db.api.instance_get(context, instance_id)
3233+ instance = self.db.instance_get(context, instance_id)
3234+ name = name or instance["display_name"]
3235
3236- if instance["state"] == power_state.BUILDING:
3237- msg = _("Instance already building")
3238- raise exception.BuildInProgress(msg)
3239+ if instance["vm_state"] != vm_states.ACTIVE:
3240+ msg = _("Instance must be active to rebuild.")
3241+ raise exception.RebuildRequiresActiveInstance(msg)
3242
3243 files_to_inject = files_to_inject or []
3244+ metadata = metadata or {}
3245+
3246 self._check_injected_file_quota(context, files_to_inject)
3247+ self._check_metadata_properties_quota(context, metadata)
3248
3249- values = {}
3250- if metadata is not None:
3251- self._check_metadata_properties_quota(context, metadata)
3252- values['metadata'] = metadata
3253- if name is not None:
3254- values['display_name'] = name
3255- self.db.instance_update(context, instance_id, values)
3256+ self.update(context,
3257+ instance_id,
3258+ metadata=metadata,
3259+ display_name=name,
3260+ image_ref=image_href,
3261+ vm_state=vm_states.ACTIVE,
3262+ task_state=task_states.REBUILDING)
3263
3264 rebuild_params = {
3265 "new_pass": admin_password,
3266- "image_ref": image_href,
3267 "injected_files": files_to_inject,
3268 }
3269
3270@@ -1066,6 +1097,11 @@
3271 raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
3272 status='finished')
3273
3274+ self.update(context,
3275+ instance_id,
3276+ vm_state=vm_states.ACTIVE,
3277+ task_state=None)
3278+
3279 params = {'migration_id': migration_ref['id']}
3280 self._cast_compute_message('revert_resize', context,
3281 instance_ref['uuid'],
3282@@ -1086,6 +1122,12 @@
3283 if not migration_ref:
3284 raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
3285 status='finished')
3286+
3287+ self.update(context,
3288+ instance_id,
3289+ vm_state=vm_states.ACTIVE,
3290+ task_state=None)
3291+
3292 params = {'migration_id': migration_ref['id']}
3293 self._cast_compute_message('confirm_resize', context,
3294 instance_ref['uuid'],
3295@@ -1131,6 +1173,11 @@
3296 if (current_memory_mb == new_memory_mb) and flavor_id:
3297 raise exception.CannotResizeToSameSize()
3298
3299+ self.update(context,
3300+ instance_id,
3301+ vm_state=vm_states.RESIZING,
3302+ task_state=task_states.RESIZE_PREP)
3303+
3304 instance_ref = self._get_instance(context, instance_id, 'resize')
3305 self._cast_scheduler_message(context,
3306 {"method": "prep_resize",
3307@@ -1164,11 +1211,19 @@
3308 @scheduler_api.reroute_compute("pause")
3309 def pause(self, context, instance_id):
3310 """Pause the given instance."""
3311+ self.update(context,
3312+ instance_id,
3313+ vm_state=vm_states.ACTIVE,
3314+ task_state=task_states.PAUSING)
3315 self._cast_compute_message('pause_instance', context, instance_id)
3316
3317 @scheduler_api.reroute_compute("unpause")
3318 def unpause(self, context, instance_id):
3319 """Unpause the given instance."""
3320+ self.update(context,
3321+ instance_id,
3322+ vm_state=vm_states.PAUSED,
3323+ task_state=task_states.UNPAUSING)
3324 self._cast_compute_message('unpause_instance', context, instance_id)
3325
3326 def _call_compute_message_for_host(self, action, context, host, params):
3327@@ -1201,21 +1256,37 @@
3328 @scheduler_api.reroute_compute("suspend")
3329 def suspend(self, context, instance_id):
3330 """Suspend the given instance."""
3331+ self.update(context,
3332+ instance_id,
3333+ vm_state=vm_states.ACTIVE,
3334+ task_state=task_states.SUSPENDING)
3335 self._cast_compute_message('suspend_instance', context, instance_id)
3336
3337 @scheduler_api.reroute_compute("resume")
3338 def resume(self, context, instance_id):
3339 """Resume the given instance."""
3340+ self.update(context,
3341+ instance_id,
3342+ vm_state=vm_states.SUSPENDED,
3343+ task_state=task_states.RESUMING)
3344 self._cast_compute_message('resume_instance', context, instance_id)
3345
3346 @scheduler_api.reroute_compute("rescue")
3347 def rescue(self, context, instance_id):
3348 """Rescue the given instance."""
3349+ self.update(context,
3350+ instance_id,
3351+ vm_state=vm_states.ACTIVE,
3352+ task_state=task_states.RESCUING)
3353 self._cast_compute_message('rescue_instance', context, instance_id)
3354
3355 @scheduler_api.reroute_compute("unrescue")
3356 def unrescue(self, context, instance_id):
3357 """Unrescue the given instance."""
3358+ self.update(context,
3359+ instance_id,
3360+ vm_state=vm_states.RESCUED,
3361+ task_state=task_states.UNRESCUING)
3362 self._cast_compute_message('unrescue_instance', context, instance_id)
3363
3364 @scheduler_api.reroute_compute("set_admin_password")
3365
3366=== modified file 'nova/compute/manager.py'
3367--- nova/compute/manager.py 2011-08-24 14:45:53 +0000
3368+++ nova/compute/manager.py 2011-09-09 09:29:27 +0000
3369@@ -56,6 +56,8 @@
3370 from nova import utils
3371 from nova import volume
3372 from nova.compute import power_state
3373+from nova.compute import task_states
3374+from nova.compute import vm_states
3375 from nova.notifier import api as notifier
3376 from nova.compute.utils import terminate_volumes
3377 from nova.virt import driver
3378@@ -146,6 +148,10 @@
3379 super(ComputeManager, self).__init__(service_name="compute",
3380 *args, **kwargs)
3381
3382+ def _instance_update(self, context, instance_id, **kwargs):
3383+ """Update an instance in the database using kwargs as value."""
3384+ return self.db.instance_update(context, instance_id, kwargs)
3385+
3386 def init_host(self):
3387 """Initialization for a standalone compute service."""
3388 self.driver.init_host(host=self.host)
3389@@ -153,8 +159,8 @@
3390 instances = self.db.instance_get_all_by_host(context, self.host)
3391 for instance in instances:
3392 inst_name = instance['name']
3393- db_state = instance['state']
3394- drv_state = self._update_state(context, instance['id'])
3395+ db_state = instance['power_state']
3396+ drv_state = self._get_power_state(context, instance)
3397
3398 expect_running = db_state == power_state.RUNNING \
3399 and drv_state != db_state
3400@@ -177,34 +183,13 @@
3401 LOG.warning(_('Hypervisor driver does not '
3402 'support firewall rules'))
3403
3404- def _update_state(self, context, instance_id, state=None):
3405- """Update the state of an instance from the driver info."""
3406- instance_ref = self.db.instance_get(context, instance_id)
3407-
3408- if state is None:
3409- try:
3410- LOG.debug(_('Checking state of %s'), instance_ref['name'])
3411- info = self.driver.get_info(instance_ref['name'])
3412- except exception.NotFound:
3413- info = None
3414-
3415- if info is not None:
3416- state = info['state']
3417- else:
3418- state = power_state.FAILED
3419-
3420- self.db.instance_set_state(context, instance_id, state)
3421- return state
3422-
3423- def _update_launched_at(self, context, instance_id, launched_at=None):
3424- """Update the launched_at parameter of the given instance."""
3425- data = {'launched_at': launched_at or utils.utcnow()}
3426- self.db.instance_update(context, instance_id, data)
3427-
3428- def _update_image_ref(self, context, instance_id, image_ref):
3429- """Update the image_id for the given instance."""
3430- data = {'image_ref': image_ref}
3431- self.db.instance_update(context, instance_id, data)
3432+ def _get_power_state(self, context, instance):
3433+ """Retrieve the power state for the given instance."""
3434+ LOG.debug(_('Checking state of %s'), instance['name'])
3435+ try:
3436+ return self.driver.get_info(instance['name'])["state"]
3437+ except exception.NotFound:
3438+ return power_state.FAILED
3439
3440 def get_console_topic(self, context, **kwargs):
3441 """Retrieves the console host for a project on this host.
3442@@ -256,11 +241,6 @@
3443
3444 def _setup_block_device_mapping(self, context, instance_id):
3445 """setup volumes for block device mapping"""
3446- self.db.instance_set_state(context,
3447- instance_id,
3448- power_state.NOSTATE,
3449- 'block_device_mapping')
3450-
3451 volume_api = volume.API()
3452 block_device_mapping = []
3453 swap = None
3454@@ -394,17 +374,12 @@
3455 updates = {}
3456 updates['host'] = self.host
3457 updates['launched_on'] = self.host
3458- instance = self.db.instance_update(context,
3459- instance_id,
3460- updates)
3461+ updates['vm_state'] = vm_states.BUILDING
3462+ updates['task_state'] = task_states.NETWORKING
3463+ instance = self.db.instance_update(context, instance_id, updates)
3464 instance['injected_files'] = kwargs.get('injected_files', [])
3465 instance['admin_pass'] = kwargs.get('admin_password', None)
3466
3467- self.db.instance_set_state(context,
3468- instance_id,
3469- power_state.NOSTATE,
3470- 'networking')
3471-
3472 is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
3473 try:
3474 # NOTE(vish): This could be a cast because we don't do anything
3475@@ -423,6 +398,11 @@
3476 # all vif creation and network injection, maybe this is correct
3477 network_info = []
3478
3479+ self._instance_update(context,
3480+ instance_id,
3481+ vm_state=vm_states.BUILDING,
3482+ task_state=task_states.BLOCK_DEVICE_MAPPING)
3483+
3484 (swap, ephemerals,
3485 block_device_mapping) = self._setup_block_device_mapping(
3486 context, instance_id)
3487@@ -432,9 +412,12 @@
3488 'ephemerals': ephemerals,
3489 'block_device_mapping': block_device_mapping}
3490
3491+ self._instance_update(context,
3492+ instance_id,
3493+ vm_state=vm_states.BUILDING,
3494+ task_state=task_states.SPAWNING)
3495+
3496 # TODO(vish) check to make sure the availability zone matches
3497- self._update_state(context, instance_id, power_state.BUILDING)
3498-
3499 try:
3500 self.driver.spawn(context, instance,
3501 network_info, block_device_info)
3502@@ -443,13 +426,21 @@
3503 "virtualization enabled in the BIOS? Details: "
3504 "%(ex)s") % locals()
3505 LOG.exception(msg)
3506-
3507- self._update_launched_at(context, instance_id)
3508- self._update_state(context, instance_id)
3509+ return
3510+
3511+ current_power_state = self._get_power_state(context, instance)
3512+ self._instance_update(context,
3513+ instance_id,
3514+ power_state=current_power_state,
3515+ vm_state=vm_states.ACTIVE,
3516+ task_state=None,
3517+ launched_at=utils.utcnow())
3518+
3519 usage_info = utils.usage_from_instance(instance)
3520 notifier.notify('compute.%s' % self.host,
3521 'compute.instance.create',
3522 notifier.INFO, usage_info)
3523+
3524 except exception.InstanceNotFound:
3525 # FIXME(wwolf): We are just ignoring InstanceNotFound
3526 # exceptions here in case the instance was immediately
3527@@ -485,8 +476,7 @@
3528 for volume in volumes:
3529 self._detach_volume(context, instance_id, volume['id'], False)
3530
3531- if (instance['state'] == power_state.SHUTOFF and
3532- instance['state_description'] != 'stopped'):
3533+ if instance['power_state'] == power_state.SHUTOFF:
3534 self.db.instance_destroy(context, instance_id)
3535 raise exception.Error(_('trying to destroy already destroyed'
3536 ' instance: %s') % instance_id)
3537@@ -501,9 +491,14 @@
3538 """Terminate an instance on this host."""
3539 self._shutdown_instance(context, instance_id, 'Terminating')
3540 instance = self.db.instance_get(context.elevated(), instance_id)
3541+ self._instance_update(context,
3542+ instance_id,
3543+ vm_state=vm_states.DELETED,
3544+ task_state=None,
3545+ terminated_at=utils.utcnow())
3546
3547- # TODO(ja): should we keep it in a terminated state for a bit?
3548 self.db.instance_destroy(context, instance_id)
3549+
3550 usage_info = utils.usage_from_instance(instance)
3551 notifier.notify('compute.%s' % self.host,
3552 'compute.instance.delete',
3553@@ -514,7 +509,10 @@
3554 def stop_instance(self, context, instance_id):
3555 """Stopping an instance on this host."""
3556 self._shutdown_instance(context, instance_id, 'Stopping')
3557- # instance state will be updated to stopped by _poll_instance_states()
3558+ self._instance_update(context,
3559+ instance_id,
3560+ vm_state=vm_states.STOPPED,
3561+ task_state=None)
3562
3563 @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
3564 @checks_instance_lock
3565@@ -526,7 +524,7 @@
3566
3567 :param context: `nova.RequestContext` object
3568 :param instance_id: Instance identifier (integer)
3569- :param image_ref: Image identifier (href or integer)
3570+ :param injected_files: Files to inject
3571 :param new_pass: password to set on rebuilt instance
3572 """
3573 context = context.elevated()
3574@@ -534,29 +532,46 @@
3575 instance_ref = self.db.instance_get(context, instance_id)
3576 LOG.audit(_("Rebuilding instance %s"), instance_id, context=context)
3577
3578- self._update_state(context, instance_id, power_state.BUILDING)
3579+ current_power_state = self._get_power_state(context, instance_ref)
3580+ self._instance_update(context,
3581+ instance_id,
3582+ power_state=current_power_state,
3583+ vm_state=vm_states.REBUILDING,
3584+ task_state=None)
3585
3586 network_info = self._get_instance_nw_info(context, instance_ref)
3587-
3588 self.driver.destroy(instance_ref, network_info)
3589- image_ref = kwargs.get('image_ref')
3590- instance_ref.image_ref = image_ref
3591+
3592+ self._instance_update(context,
3593+ instance_id,
3594+ vm_state=vm_states.REBUILDING,
3595+ task_state=task_states.BLOCK_DEVICE_MAPPING)
3596+
3597 instance_ref.injected_files = kwargs.get('injected_files', [])
3598 network_info = self.network_api.get_instance_nw_info(context,
3599 instance_ref)
3600 bd_mapping = self._setup_block_device_mapping(context, instance_id)
3601
3602+ self._instance_update(context,
3603+ instance_id,
3604+ vm_state=vm_states.REBUILDING,
3605+ task_state=task_states.SPAWNING)
3606+
3607 # pull in new password here since the original password isn't in the db
3608 instance_ref.admin_pass = kwargs.get('new_pass',
3609 utils.generate_password(FLAGS.password_length))
3610
3611 self.driver.spawn(context, instance_ref, network_info, bd_mapping)
3612
3613- self._update_image_ref(context, instance_id, image_ref)
3614- self._update_launched_at(context, instance_id)
3615- self._update_state(context, instance_id)
3616- usage_info = utils.usage_from_instance(instance_ref,
3617- image_ref=image_ref)
3618+ current_power_state = self._get_power_state(context, instance_ref)
3619+ self._instance_update(context,
3620+ instance_id,
3621+ power_state=current_power_state,
3622+ vm_state=vm_states.ACTIVE,
3623+ task_state=None,
3624+ launched_at=utils.utcnow())
3625+
3626+ usage_info = utils.usage_from_instance(instance_ref)
3627 notifier.notify('compute.%s' % self.host,
3628 'compute.instance.rebuild',
3629 notifier.INFO,
3630@@ -566,26 +581,34 @@
3631 @checks_instance_lock
3632 def reboot_instance(self, context, instance_id):
3633 """Reboot an instance on this host."""
3634- context = context.elevated()
3635- self._update_state(context, instance_id)
3636- instance_ref = self.db.instance_get(context, instance_id)
3637 LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
3638-
3639- if instance_ref['state'] != power_state.RUNNING:
3640- state = instance_ref['state']
3641+ context = context.elevated()
3642+ instance_ref = self.db.instance_get(context, instance_id)
3643+
3644+ current_power_state = self._get_power_state(context, instance_ref)
3645+ self._instance_update(context,
3646+ instance_id,
3647+ power_state=current_power_state,
3648+ vm_state=vm_states.ACTIVE,
3649+ task_state=task_states.REBOOTING)
3650+
3651+ if instance_ref['power_state'] != power_state.RUNNING:
3652+ state = instance_ref['power_state']
3653 running = power_state.RUNNING
3654 LOG.warn(_('trying to reboot a non-running '
3655 'instance: %(instance_id)s (state: %(state)s '
3656 'expected: %(running)s)') % locals(),
3657 context=context)
3658
3659- self.db.instance_set_state(context,
3660- instance_id,
3661- power_state.NOSTATE,
3662- 'rebooting')
3663 network_info = self._get_instance_nw_info(context, instance_ref)
3664 self.driver.reboot(instance_ref, network_info)
3665- self._update_state(context, instance_id)
3666+
3667+ current_power_state = self._get_power_state(context, instance_ref)
3668+ self._instance_update(context,
3669+ instance_id,
3670+ power_state=current_power_state,
3671+ vm_state=vm_states.ACTIVE,
3672+ task_state=None)
3673
3674 @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
3675 def snapshot_instance(self, context, instance_id, image_id,
3676@@ -601,37 +624,45 @@
3677 :param rotation: int representing how many backups to keep around;
3678 None if rotation shouldn't be used (as in the case of snapshots)
3679 """
3680+ if image_type == "snapshot":
3681+ task_state = task_states.IMAGE_SNAPSHOT
3682+ elif image_type == "backup":
3683+ task_state = task_states.IMAGE_BACKUP
3684+ else:
3685+ raise Exception(_('Image type not recognized %s') % image_type)
3686+
3687 context = context.elevated()
3688 instance_ref = self.db.instance_get(context, instance_id)
3689
3690- #NOTE(sirp): update_state currently only refreshes the state field
3691- # if we add is_snapshotting, we will need this refreshed too,
3692- # potentially?
3693- self._update_state(context, instance_id)
3694+ current_power_state = self._get_power_state(context, instance_ref)
3695+ self._instance_update(context,
3696+ instance_id,
3697+ power_state=current_power_state,
3698+ vm_state=vm_states.ACTIVE,
3699+ task_state=task_state)
3700
3701 LOG.audit(_('instance %s: snapshotting'), instance_id,
3702 context=context)
3703- if instance_ref['state'] != power_state.RUNNING:
3704- state = instance_ref['state']
3705+
3706+ if instance_ref['power_state'] != power_state.RUNNING:
3707+ state = instance_ref['power_state']
3708 running = power_state.RUNNING
3709 LOG.warn(_('trying to snapshot a non-running '
3710 'instance: %(instance_id)s (state: %(state)s '
3711 'expected: %(running)s)') % locals())
3712
3713 self.driver.snapshot(context, instance_ref, image_id)
3714-
3715- if image_type == 'snapshot':
3716- if rotation:
3717- raise exception.ImageRotationNotAllowed()
3718+ self._instance_update(context, instance_id, task_state=None)
3719+
3720+ if image_type == 'snapshot' and rotation:
3721+ raise exception.ImageRotationNotAllowed()
3722+
3723+ elif image_type == 'backup' and rotation:
3724+ instance_uuid = instance_ref['uuid']
3725+ self.rotate_backups(context, instance_uuid, backup_type, rotation)
3726+
3727 elif image_type == 'backup':
3728- if rotation:
3729- instance_uuid = instance_ref['uuid']
3730- self.rotate_backups(context, instance_uuid, backup_type,
3731- rotation)
3732- else:
3733- raise exception.RotationRequiredForBackup()
3734- else:
3735- raise Exception(_('Image type not recognized %s') % image_type)
3736+ raise exception.RotationRequiredForBackup()
3737
3738 def rotate_backups(self, context, instance_uuid, backup_type, rotation):
3739 """Delete excess backups associated to an instance.
3740@@ -699,7 +730,7 @@
3741 for i in xrange(max_tries):
3742 instance_ref = self.db.instance_get(context, instance_id)
3743 instance_id = instance_ref["id"]
3744- instance_state = instance_ref["state"]
3745+ instance_state = instance_ref["power_state"]
3746 expected_state = power_state.RUNNING
3747
3748 if instance_state != expected_state:
3749@@ -734,7 +765,7 @@
3750 context = context.elevated()
3751 instance_ref = self.db.instance_get(context, instance_id)
3752 instance_id = instance_ref['id']
3753- instance_state = instance_ref['state']
3754+ instance_state = instance_ref['power_state']
3755 expected_state = power_state.RUNNING
3756 if instance_state != expected_state:
3757 LOG.warn(_('trying to inject a file into a non-running '
3758@@ -752,7 +783,7 @@
3759 context = context.elevated()
3760 instance_ref = self.db.instance_get(context, instance_id)
3761 instance_id = instance_ref['id']
3762- instance_state = instance_ref['state']
3763+ instance_state = instance_ref['power_state']
3764 expected_state = power_state.RUNNING
3765 if instance_state != expected_state:
3766 LOG.warn(_('trying to update agent on a non-running '
3767@@ -767,40 +798,41 @@
3768 @checks_instance_lock
3769 def rescue_instance(self, context, instance_id):
3770 """Rescue an instance on this host."""
3771- context = context.elevated()
3772- instance_ref = self.db.instance_get(context, instance_id)
3773 LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
3774- self.db.instance_set_state(context,
3775- instance_id,
3776- power_state.NOSTATE,
3777- 'rescuing')
3778- _update_state = lambda result: self._update_state_callback(
3779- self, context, instance_id, result)
3780+ context = context.elevated()
3781+
3782+ instance_ref = self.db.instance_get(context, instance_id)
3783 network_info = self._get_instance_nw_info(context, instance_ref)
3784- self.driver.rescue(context, instance_ref, _update_state, network_info)
3785- self._update_state(context, instance_id)
3786+
3787+ # NOTE(blamar): None of the virt drivers use the 'callback' param
3788+ self.driver.rescue(context, instance_ref, None, network_info)
3789+
3790+ current_power_state = self._get_power_state(context, instance_ref)
3791+ self._instance_update(context,
3792+ instance_id,
3793+ vm_state=vm_states.RESCUED,
3794+ task_state=None,
3795+ power_state=current_power_state)
3796
3797 @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
3798 @checks_instance_lock
3799 def unrescue_instance(self, context, instance_id):
3800 """Rescue an instance on this host."""
3801- context = context.elevated()
3802- instance_ref = self.db.instance_get(context, instance_id)
3803 LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
3804- self.db.instance_set_state(context,
3805- instance_id,
3806- power_state.NOSTATE,
3807- 'unrescuing')
3808- _update_state = lambda result: self._update_state_callback(
3809- self, context, instance_id, result)
3810+ context = context.elevated()
3811+
3812+ instance_ref = self.db.instance_get(context, instance_id)
3813 network_info = self._get_instance_nw_info(context, instance_ref)
3814- self.driver.unrescue(instance_ref, _update_state, network_info)
3815- self._update_state(context, instance_id)
3816-
3817- @staticmethod
3818- def _update_state_callback(self, context, instance_id, result):
3819- """Update instance state when async task completes."""
3820- self._update_state(context, instance_id)
3821+
3822+ # NOTE(blamar): None of the virt drivers use the 'callback' param
3823+ self.driver.unrescue(instance_ref, None, network_info)
3824+
3825+ current_power_state = self._get_power_state(context, instance_ref)
3826+ self._instance_update(context,
3827+ instance_id,
3828+ vm_state=vm_states.ACTIVE,
3829+ task_state=None,
3830+ power_state=current_power_state)
3831
3832 @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
3833 @checks_instance_lock
3834@@ -859,11 +891,12 @@
3835
3836 # Just roll back the record. There's no need to resize down since
3837 # the 'old' VM already has the preferred attributes
3838- self.db.instance_update(context, instance_ref['uuid'],
3839- dict(memory_mb=instance_type['memory_mb'],
3840- vcpus=instance_type['vcpus'],
3841- local_gb=instance_type['local_gb'],
3842- instance_type_id=instance_type['id']))
3843+ self._instance_update(context,
3844+ instance_ref["uuid"],
3845+ memory_mb=instance_type['memory_mb'],
3846+ vcpus=instance_type['vcpus'],
3847+ local_gb=instance_type['local_gb'],
3848+ instance_type_id=instance_type['id'])
3849
3850 self.driver.revert_migration(instance_ref)
3851 self.db.migration_update(context, migration_id,
3852@@ -890,8 +923,11 @@
3853 instance_ref = self.db.instance_get_by_uuid(context, instance_id)
3854
3855 if instance_ref['host'] == FLAGS.host:
3856- raise exception.Error(_(
3857- 'Migration error: destination same as source!'))
3858+ self._instance_update(context,
3859+ instance_id,
3860+ vm_state=vm_states.ERROR)
3861+ msg = _('Migration error: destination same as source!')
3862+ raise exception.Error(msg)
3863
3864 old_instance_type = self.db.instance_type_get(context,
3865 instance_ref['instance_type_id'])
3866@@ -985,6 +1021,11 @@
3867 self.driver.finish_migration(context, instance_ref, disk_info,
3868 network_info, resize_instance)
3869
3870+ self._instance_update(context,
3871+ instance_id,
3872+ vm_state=vm_states.ACTIVE,
3873+ task_state=task_states.RESIZE_VERIFY)
3874+
3875 self.db.migration_update(context, migration_id,
3876 {'status': 'finished', })
3877
3878@@ -1016,35 +1057,35 @@
3879 @checks_instance_lock
3880 def pause_instance(self, context, instance_id):
3881 """Pause an instance on this host."""
3882- context = context.elevated()
3883- instance_ref = self.db.instance_get(context, instance_id)
3884 LOG.audit(_('instance %s: pausing'), instance_id, context=context)
3885- self.db.instance_set_state(context,
3886- instance_id,
3887- power_state.NOSTATE,
3888- 'pausing')
3889- self.driver.pause(instance_ref,
3890- lambda result: self._update_state_callback(self,
3891- context,
3892- instance_id,
3893- result))
3894+ context = context.elevated()
3895+
3896+ instance_ref = self.db.instance_get(context, instance_id)
3897+ self.driver.pause(instance_ref, lambda result: None)
3898+
3899+ current_power_state = self._get_power_state(context, instance_ref)
3900+ self._instance_update(context,
3901+ instance_id,
3902+ power_state=current_power_state,
3903+ vm_state=vm_states.PAUSED,
3904+ task_state=None)
3905
3906 @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
3907 @checks_instance_lock
3908 def unpause_instance(self, context, instance_id):
3909 """Unpause a paused instance on this host."""
3910- context = context.elevated()
3911- instance_ref = self.db.instance_get(context, instance_id)
3912 LOG.audit(_('instance %s: unpausing'), instance_id, context=context)
3913- self.db.instance_set_state(context,
3914- instance_id,
3915- power_state.NOSTATE,
3916- 'unpausing')
3917- self.driver.unpause(instance_ref,
3918- lambda result: self._update_state_callback(self,
3919- context,
3920- instance_id,
3921- result))
3922+ context = context.elevated()
3923+
3924+ instance_ref = self.db.instance_get(context, instance_id)
3925+ self.driver.unpause(instance_ref, lambda result: None)
3926+
3927+ current_power_state = self._get_power_state(context, instance_ref)
3928+ self._instance_update(context,
3929+ instance_id,
3930+ power_state=current_power_state,
3931+ vm_state=vm_states.ACTIVE,
3932+ task_state=None)
3933
3934 @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
3935 def host_power_action(self, context, host=None, action=None):
3936@@ -1060,7 +1101,7 @@
3937 def get_diagnostics(self, context, instance_id):
3938 """Retrieve diagnostics for an instance on this host."""
3939 instance_ref = self.db.instance_get(context, instance_id)
3940- if instance_ref["state"] == power_state.RUNNING:
3941+ if instance_ref["power_state"] == power_state.RUNNING:
3942 LOG.audit(_("instance %s: retrieving diagnostics"), instance_id,
3943 context=context)
3944 return self.driver.get_diagnostics(instance_ref)
3945@@ -1069,33 +1110,35 @@
3946 @checks_instance_lock
3947 def suspend_instance(self, context, instance_id):
3948 """Suspend the given instance."""
3949- context = context.elevated()
3950- instance_ref = self.db.instance_get(context, instance_id)
3951 LOG.audit(_('instance %s: suspending'), instance_id, context=context)
3952- self.db.instance_set_state(context, instance_id,
3953- power_state.NOSTATE,
3954- 'suspending')
3955- self.driver.suspend(instance_ref,
3956- lambda result: self._update_state_callback(self,
3957- context,
3958- instance_id,
3959- result))
3960+ context = context.elevated()
3961+
3962+ instance_ref = self.db.instance_get(context, instance_id)
3963+ self.driver.suspend(instance_ref, lambda result: None)
3964+
3965+ current_power_state = self._get_power_state(context, instance_ref)
3966+ self._instance_update(context,
3967+ instance_id,
3968+ power_state=current_power_state,
3969+ vm_state=vm_states.SUSPENDED,
3970+ task_state=None)
3971
3972 @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
3973 @checks_instance_lock
3974 def resume_instance(self, context, instance_id):
3975 """Resume the given suspended instance."""
3976- context = context.elevated()
3977- instance_ref = self.db.instance_get(context, instance_id)
3978 LOG.audit(_('instance %s: resuming'), instance_id, context=context)
3979- self.db.instance_set_state(context, instance_id,
3980- power_state.NOSTATE,
3981- 'resuming')
3982- self.driver.resume(instance_ref,
3983- lambda result: self._update_state_callback(self,
3984- context,
3985- instance_id,
3986- result))
3987+ context = context.elevated()
3988+
3989+ instance_ref = self.db.instance_get(context, instance_id)
3990+ self.driver.resume(instance_ref, lambda result: None)
3991+
3992+ current_power_state = self._get_power_state(context, instance_ref)
3993+ self._instance_update(context,
3994+ instance_id,
3995+ power_state=current_power_state,
3996+ vm_state=vm_states.ACTIVE,
3997+ task_state=None)
3998
3999 @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
4000 def lock_instance(self, context, instance_id):
4001@@ -1506,11 +1549,14 @@
4002 'block_migration': block_migration}})
4003
4004 # Restore instance state
4005- self.db.instance_update(ctxt,
4006- instance_ref['id'],
4007- {'state_description': 'running',
4008- 'state': power_state.RUNNING,
4009- 'host': dest})
4010+ current_power_state = self._get_power_state(ctxt, instance_ref)
4011+ self._instance_update(ctxt,
4012+ instance_ref["id"],
4013+ host=dest,
4014+ power_state=current_power_state,
4015+ vm_state=vm_states.ACTIVE,
4016+ task_state=None)
4017+
4018 # Restore volume state
4019 for volume_ref in instance_ref['volumes']:
4020 volume_id = volume_ref['id']
4021@@ -1556,11 +1602,11 @@
4022 This param specifies destination host.
4023 """
4024 host = instance_ref['host']
4025- self.db.instance_update(context,
4026- instance_ref['id'],
4027- {'state_description': 'running',
4028- 'state': power_state.RUNNING,
4029- 'host': host})
4030+ self._instance_update(context,
4031+ instance_ref['id'],
4032+ host=host,
4033+ vm_state=vm_states.ACTIVE,
4034+ task_state=None)
4035
4036 for volume_ref in instance_ref['volumes']:
4037 volume_id = volume_ref['id']
4038@@ -1608,10 +1654,9 @@
4039 error_list.append(ex)
4040
4041 try:
4042- self._poll_instance_states(context)
4043+ self._sync_power_states(context)
4044 except Exception as ex:
4045- LOG.warning(_("Error during instance poll: %s"),
4046- unicode(ex))
4047+ LOG.warning(_("Error during power_state sync: %s"), unicode(ex))
4048 error_list.append(ex)
4049
4050 return error_list
4051@@ -1626,68 +1671,40 @@
4052 self.update_service_capabilities(
4053 self.driver.get_host_stats(refresh=True))
4054
4055- def _poll_instance_states(self, context):
4056+ def _sync_power_states(self, context):
4057+ """Align power states between the database and the hypervisor.
4058+
4059+ The hypervisor is authoritative for the power_state data, so we
4060+ simply loop over all known instances for this host and update the
4061+ power_state according to the hypervisor. If the instance is not found
4062+ then it will be set to power_state.NOSTATE, because it doesn't exist
4063+ on the hypervisor.
4064+
4065+ """
4066 vm_instances = self.driver.list_instances_detail()
4067 vm_instances = dict((vm.name, vm) for vm in vm_instances)
4068-
4069- # Keep a list of VMs not in the DB, cross them off as we find them
4070- vms_not_found_in_db = list(vm_instances.keys())
4071-
4072 db_instances = self.db.instance_get_all_by_host(context, self.host)
4073
4074+ num_vm_instances = len(vm_instances)
4075+ num_db_instances = len(db_instances)
4076+
4077+ if num_vm_instances != num_db_instances:
4078+ LOG.info(_("Found %(num_db_instances)s in the database and "
4079+ "%(num_vm_instances)s on the hypervisor.") % locals())
4080+
4081 for db_instance in db_instances:
4082- name = db_instance['name']
4083- db_state = db_instance['state']
4084+ name = db_instance["name"]
4085+ db_power_state = db_instance['power_state']
4086 vm_instance = vm_instances.get(name)
4087
4088 if vm_instance is None:
4089- # NOTE(justinsb): We have to be very careful here, because a
4090- # concurrent operation could be in progress (e.g. a spawn)
4091- if db_state == power_state.BUILDING:
4092- # TODO(justinsb): This does mean that if we crash during a
4093- # spawn, the machine will never leave the spawning state,
4094- # but this is just the way nova is; this function isn't
4095- # trying to correct that problem.
4096- # We could have a separate task to correct this error.
4097- # TODO(justinsb): What happens during a live migration?
4098- LOG.info(_("Found instance '%(name)s' in DB but no VM. "
4099- "State=%(db_state)s, so assuming spawn is in "
4100- "progress.") % locals())
4101- vm_state = db_state
4102- else:
4103- LOG.info(_("Found instance '%(name)s' in DB but no VM. "
4104- "State=%(db_state)s, so setting state to "
4105- "shutoff.") % locals())
4106- vm_state = power_state.SHUTOFF
4107- if db_instance['state_description'] == 'stopping':
4108- self.db.instance_stop(context, db_instance['id'])
4109- continue
4110+ vm_power_state = power_state.NOSTATE
4111 else:
4112- vm_state = vm_instance.state
4113- vms_not_found_in_db.remove(name)
4114+ vm_power_state = vm_instance.state
4115
4116- if (db_instance['state_description'] in ['migrating', 'stopping']):
4117- # A situation which db record exists, but no instance"
4118- # sometimes occurs while live-migration at src compute,
4119- # this case should be ignored.
4120- LOG.debug(_("Ignoring %(name)s, as it's currently being "
4121- "migrated.") % locals())
4122+ if vm_power_state == db_power_state:
4123 continue
4124
4125- if vm_state != db_state:
4126- LOG.info(_("DB/VM state mismatch. Changing state from "
4127- "'%(db_state)s' to '%(vm_state)s'") % locals())
4128- self._update_state(context, db_instance['id'], vm_state)
4129-
4130- # NOTE(justinsb): We no longer auto-remove SHUTOFF instances
4131- # It's quite hard to get them back when we do.
4132-
4133- # Are there VMs not in the DB?
4134- for vm_not_found_in_db in vms_not_found_in_db:
4135- name = vm_not_found_in_db
4136-
4137- # We only care about instances that compute *should* know about
4138- if name.startswith("instance-"):
4139- # TODO(justinsb): What to do here? Adopt it? Shut it down?
4140- LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring")
4141- % locals())
4142+ self._instance_update(context,
4143+ db_instance["id"],
4144+ power_state=vm_power_state)
4145
4146=== added file 'nova/compute/task_states.py'
4147--- nova/compute/task_states.py 1970-01-01 00:00:00 +0000
4148+++ nova/compute/task_states.py 2011-09-09 09:29:27 +0000
4149@@ -0,0 +1,59 @@
4150+# vim: tabstop=4 shiftwidth=4 softtabstop=4
4151+
4152+# Copyright 2010 OpenStack LLC.
4153+# All Rights Reserved.
4154+#
4155+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4156+# not use this file except in compliance with the License. You may obtain
4157+# a copy of the License at
4158+#
4159+# http://www.apache.org/licenses/LICENSE-2.0
4160+#
4161+# Unless required by applicable law or agreed to in writing, software
4162+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
4163+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
4164+# License for the specific language governing permissions and limitations
4165+# under the License.
4166+
4167+"""Possible task states for instances.
4168+
4169+Compute instance task states represent what is happening to the instance at the
4170+current moment. These tasks can be generic, such as 'spawning', or specific,
4171+such as 'block_device_mapping'. These task states allow for a better view into
4172+what an instance is doing and should be displayed to users/administrators as
4173+necessary.
4174+
4175+"""
4176+
4177+SCHEDULING = 'scheduling'
4178+BLOCK_DEVICE_MAPPING = 'block_device_mapping'
4179+NETWORKING = 'networking'
4180+SPAWNING = 'spawning'
4181+
4182+IMAGE_SNAPSHOT = 'image_snapshot'
4183+IMAGE_BACKUP = 'image_backup'
4184+
4185+UPDATING_PASSWORD = 'updating_password'
4186+
4187+RESIZE_PREP = 'resize_prep'
4188+RESIZE_MIGRATING = 'resize_migrating'
4189+RESIZE_MIGRATED = 'resize_migrated'
4190+RESIZE_FINISH = 'resize_finish'
4191+RESIZE_REVERTING = 'resize_reverting'
4192+RESIZE_CONFIRMING = 'resize_confirming'
4193+RESIZE_VERIFY = 'resize_verify'
4194+
4195+REBUILDING = 'rebuilding'
4196+
4197+REBOOTING = 'rebooting'
4198+PAUSING = 'pausing'
4199+UNPAUSING = 'unpausing'
4200+SUSPENDING = 'suspending'
4201+RESUMING = 'resuming'
4202+
4203+RESCUING = 'rescuing'
4204+UNRESCUING = 'unrescuing'
4205+
4206+DELETING = 'deleting'
4207+STOPPING = 'stopping'
4208+STARTING = 'starting'
4209
4210=== added file 'nova/compute/vm_states.py'
4211--- nova/compute/vm_states.py 1970-01-01 00:00:00 +0000
4212+++ nova/compute/vm_states.py 2011-09-09 09:29:27 +0000
4213@@ -0,0 +1,39 @@
4214+# vim: tabstop=4 shiftwidth=4 softtabstop=4
4215+
4216+# Copyright 2010 OpenStack LLC.
4217+# All Rights Reserved.
4218+#
4219+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4220+# not use this file except in compliance with the License. You may obtain
4221+# a copy of the License at
4222+#
4223+# http://www.apache.org/licenses/LICENSE-2.0
4224+#
4225+# Unless required by applicable law or agreed to in writing, software
4226+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
4227+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
4228+# License for the specific language governing permissions and limitations
4229+# under the License.
4230+
4231+"""Possible vm states for instances.
4232+
4233+Compute instance vm states represent the state of an instance as it pertains to
4234+a user or administrator. When combined with task states (task_states.py), a
4235+better picture can be formed regarding the instance's health.
4236+
4237+"""
4238+
4239+ACTIVE = 'active'
4240+BUILDING = 'building'
4241+REBUILDING = 'rebuilding'
4242+
4243+PAUSED = 'paused'
4244+SUSPENDED = 'suspended'
4245+RESCUED = 'rescued'
4246+DELETED = 'deleted'
4247+STOPPED = 'stopped'
4248+
4249+MIGRATING = 'migrating'
4250+RESIZING = 'resizing'
4251+
4252+ERROR = 'error'
4253
4254=== modified file 'nova/context.py'
4255--- nova/context.py 2011-08-02 16:30:41 +0000
4256+++ nova/context.py 2011-09-09 09:29:27 +0000
4257@@ -38,7 +38,7 @@
4258 self.roles = roles or []
4259 self.is_admin = is_admin
4260 if self.is_admin is None:
4261- self.admin = 'admin' in self.roles
4262+ self.is_admin = 'admin' in [x.lower() for x in self.roles]
4263 self.read_deleted = read_deleted
4264 self.remote_address = remote_address
4265 if not timestamp:
4266
4267=== modified file 'nova/db/api.py'
4268--- nova/db/api.py 2011-08-22 23:35:09 +0000
4269+++ nova/db/api.py 2011-09-09 09:29:27 +0000
4270@@ -49,7 +49,8 @@
4271 'Template string to be used to generate instance names')
4272 flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x',
4273 'Template string to be used to generate snapshot names')
4274-
4275+flags.DEFINE_string('vsa_name_template', 'vsa-%08x',
4276+ 'Template string to be used to generate VSA names')
4277
4278 IMPL = utils.LazyPluggable(FLAGS['db_backend'],
4279 sqlalchemy='nova.db.sqlalchemy.api')
4280@@ -419,6 +420,11 @@
4281 return IMPL.virtual_interface_get_by_address(context, address)
4282
4283
4284+def virtual_interface_get_by_uuid(context, vif_uuid):
4285+ """Gets a virtual interface from the table filtering on vif uuid."""
4286+ return IMPL.virtual_interface_get_by_uuid(context, vif_uuid)
4287+
4288+
4289 def virtual_interface_get_by_fixed_ip(context, fixed_ip_id):
4290 """Gets the virtual interface fixed_ip is associated with."""
4291 return IMPL.virtual_interface_get_by_fixed_ip(context, fixed_ip_id)
4292@@ -495,9 +501,20 @@
4293 return IMPL.instance_get_all_by_filters(context, filters)
4294
4295
4296-def instance_get_active_by_window(context, begin, end=None):
4297- """Get instances active during a certain time window."""
4298- return IMPL.instance_get_active_by_window(context, begin, end)
4299+def instance_get_active_by_window(context, begin, end=None, project_id=None):
4300+ """Get instances active during a certain time window.
4301+
4302+ Specifying a project_id will filter for a certain project."""
4303+ return IMPL.instance_get_active_by_window(context, begin, end, project_id)
4304+
4305+
4306+def instance_get_active_by_window_joined(context, begin, end=None,
4307+ project_id=None):
4308+ """Get instances and joins active during a certain time window.
4309+
4310+ Specifying a project_id will filter for a certain project."""
4311+ return IMPL.instance_get_active_by_window_joined(context, begin, end,
4312+ project_id)
4313
4314
4315 def instance_get_all_by_user(context, user_id):
4316@@ -703,6 +720,11 @@
4317 return IMPL.network_get_by_bridge(context, bridge)
4318
4319
4320+def network_get_by_uuid(context, uuid):
4321+ """Get a network by uuid or raise if it does not exist."""
4322+ return IMPL.network_get_by_uuid(context, uuid)
4323+
4324+
4325 def network_get_by_cidr(context, cidr):
4326 """Get a network by cidr or raise if it does not exist"""
4327 return IMPL.network_get_by_cidr(context, cidr)
4328@@ -1436,3 +1458,112 @@
4329 key/value pairs specified in the extra specs dict argument"""
4330 IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id,
4331 extra_specs)
4332+
4333+
4334+##################
4335+
4336+
4337+def volume_metadata_get(context, volume_id):
4338+ """Get all metadata for a volume."""
4339+ return IMPL.volume_metadata_get(context, volume_id)
4340+
4341+
4342+def volume_metadata_delete(context, volume_id, key):
4343+ """Delete the given metadata item."""
4344+ IMPL.volume_metadata_delete(context, volume_id, key)
4345+
4346+
4347+def volume_metadata_update(context, volume_id, metadata, delete):
4348+ """Update metadata if it exists, otherwise create it."""
4349+ IMPL.volume_metadata_update(context, volume_id, metadata, delete)
4350+
4351+
4352+##################
4353+
4354+
4355+def volume_type_create(context, values):
4356+ """Create a new volume type."""
4357+ return IMPL.volume_type_create(context, values)
4358+
4359+
4360+def volume_type_get_all(context, inactive=False):
4361+ """Get all volume types."""
4362+ return IMPL.volume_type_get_all(context, inactive)
4363+
4364+
4365+def volume_type_get(context, id):
4366+ """Get volume type by id."""
4367+ return IMPL.volume_type_get(context, id)
4368+
4369+
4370+def volume_type_get_by_name(context, name):
4371+ """Get volume type by name."""
4372+ return IMPL.volume_type_get_by_name(context, name)
4373+
4374+
4375+def volume_type_destroy(context, name):
4376+ """Delete a volume type."""
4377+ return IMPL.volume_type_destroy(context, name)
4378+
4379+
4380+def volume_type_purge(context, name):
4381+ """Purges (removes) a volume type from DB.
4382+
4383+ Use volume_type_destroy for most cases
4384+
4385+ """
4386+ return IMPL.volume_type_purge(context, name)
4387+
4388+
4389+####################
4390+
4391+
4392+def volume_type_extra_specs_get(context, volume_type_id):
4393+ """Get all extra specs for a volume type."""
4394+ return IMPL.volume_type_extra_specs_get(context, volume_type_id)
4395+
4396+
4397+def volume_type_extra_specs_delete(context, volume_type_id, key):
4398+ """Delete the given extra specs item."""
4399+ IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
4400+
4401+
4402+def volume_type_extra_specs_update_or_create(context, volume_type_id,
4403+ extra_specs):
4404+ """Create or update volume type extra specs. This adds or modifies the
4405+ key/value pairs specified in the extra specs dict argument"""
4406+ IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
4407+ extra_specs)
4408+
4409+
4410+####################
4411+
4412+
4413+def vsa_create(context, values):
4414+ """Creates Virtual Storage Array record."""
4415+ return IMPL.vsa_create(context, values)
4416+
4417+
4418+def vsa_update(context, vsa_id, values):
4419+ """Updates Virtual Storage Array record."""
4420+ return IMPL.vsa_update(context, vsa_id, values)
4421+
4422+
4423+def vsa_destroy(context, vsa_id):
4424+ """Deletes Virtual Storage Array record."""
4425+ return IMPL.vsa_destroy(context, vsa_id)
4426+
4427+
4428+def vsa_get(context, vsa_id):
4429+ """Get Virtual Storage Array record by ID."""
4430+ return IMPL.vsa_get(context, vsa_id)
4431+
4432+
4433+def vsa_get_all(context):
4434+ """Get all Virtual Storage Array records."""
4435+ return IMPL.vsa_get_all(context)
4436+
4437+
4438+def vsa_get_all_by_project(context, project_id):
4439+ """Get all Virtual Storage Array records by project ID."""
4440+ return IMPL.vsa_get_all_by_project(context, project_id)
4441
4442=== modified file 'nova/db/sqlalchemy/api.py'
4443--- nova/db/sqlalchemy/api.py 2011-08-22 23:35:09 +0000
4444+++ nova/db/sqlalchemy/api.py 2011-09-09 09:29:27 +0000
4445@@ -28,6 +28,7 @@
4446 from nova import ipv6
4447 from nova import utils
4448 from nova import log as logging
4449+from nova.compute import vm_states
4450 from nova.db.sqlalchemy import models
4451 from nova.db.sqlalchemy.session import get_session
4452 from sqlalchemy import or_
4453@@ -35,6 +36,7 @@
4454 from sqlalchemy.orm import joinedload
4455 from sqlalchemy.orm import joinedload_all
4456 from sqlalchemy.sql import func
4457+from sqlalchemy.sql.expression import desc
4458 from sqlalchemy.sql.expression import literal_column
4459
4460 FLAGS = flags.FLAGS
4461@@ -132,6 +134,20 @@
4462 return wrapper
4463
4464
4465+def require_volume_exists(f):
4466+ """Decorator to require the specified volume to exist.
4467+
4468+ Requres the wrapped function to use context and volume_id as
4469+ their first two arguments.
4470+ """
4471+
4472+ def wrapper(context, volume_id, *args, **kwargs):
4473+ db.api.volume_get(context, volume_id)
4474+ return f(context, volume_id, *args, **kwargs)
4475+ wrapper.__name__ = f.__name__
4476+ return wrapper
4477+
4478+
4479 ###################
4480
4481
4482@@ -929,6 +945,22 @@
4483
4484
4485 @require_context
4486+def virtual_interface_get_by_uuid(context, vif_uuid):
4487+ """Gets a virtual interface from the table.
4488+
4489+ :param vif_uuid: the uuid of the interface you're looking to get
4490+ """
4491+ session = get_session()
4492+ vif_ref = session.query(models.VirtualInterface).\
4493+ filter_by(uuid=vif_uuid).\
4494+ options(joinedload('network')).\
4495+ options(joinedload('instance')).\
4496+ options(joinedload('fixed_ips')).\
4497+ first()
4498+ return vif_ref
4499+
4500+
4501+@require_context
4502 def virtual_interface_get_by_fixed_ip(context, fixed_ip_id):
4503 """Gets the virtual interface fixed_ip is associated with.
4504
4505@@ -1019,11 +1051,11 @@
4506 ###################
4507
4508
4509-def _metadata_refs(metadata_dict):
4510+def _metadata_refs(metadata_dict, meta_class):
4511 metadata_refs = []
4512 if metadata_dict:
4513 for k, v in metadata_dict.iteritems():
4514- metadata_ref = models.InstanceMetadata()
4515+ metadata_ref = meta_class()
4516 metadata_ref['key'] = k
4517 metadata_ref['value'] = v
4518 metadata_refs.append(metadata_ref)
4519@@ -1037,8 +1069,8 @@
4520 context - request context object
4521 values - dict containing column values.
4522 """
4523- values['metadata'] = _metadata_refs(values.get('metadata'))
4524-
4525+ values['metadata'] = _metadata_refs(values.get('metadata'),
4526+ models.InstanceMetadata)
4527 instance_ref = models.Instance()
4528 instance_ref['uuid'] = str(utils.gen_uuid())
4529
4530@@ -1088,12 +1120,11 @@
4531 def instance_stop(context, instance_id):
4532 session = get_session()
4533 with session.begin():
4534- from nova.compute import power_state
4535 session.query(models.Instance).\
4536 filter_by(id=instance_id).\
4537 update({'host': None,
4538- 'state': power_state.SHUTOFF,
4539- 'state_description': 'stopped',
4540+ 'vm_state': vm_states.STOPPED,
4541+ 'task_state': None,
4542 'updated_at': literal_column('updated_at')})
4543 session.query(models.SecurityGroupInstanceAssociation).\
4544 filter_by(instance_id=instance_id).\
4545@@ -1236,12 +1267,17 @@
4546 options(joinedload_all('fixed_ips.network')).\
4547 options(joinedload('metadata')).\
4548 options(joinedload('instance_type')).\
4549- filter_by(deleted=can_read_deleted(context))
4550+ order_by(desc(models.Instance.created_at))
4551
4552 # Make a copy of the filters dictionary to use going forward, as we'll
4553 # be modifying it and we shouldn't affect the caller's use of it.
4554 filters = filters.copy()
4555
4556+ if 'changes-since' in filters:
4557+ changes_since = filters['changes-since']
4558+ query_prefix = query_prefix.\
4559+ filter(models.Instance.updated_at > changes_since)
4560+
4561 if not context.is_admin:
4562 # If we're not admin context, add appropriate filter..
4563 if context.project_id:
4564@@ -1252,7 +1288,7 @@
4565 # Filters for exact matches that we can do along with the SQL query...
4566 # For other filters that don't match this, we will do regexp matching
4567 exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
4568- 'state', 'instance_type_id', 'deleted']
4569+ 'vm_state', 'instance_type_id', 'deleted']
4570
4571 query_filters = [key for key in filters.iterkeys()
4572 if key in exact_match_filter_names]
4573@@ -1263,9 +1299,7 @@
4574 query_prefix = _exact_match_filter(query_prefix, filter_name,
4575 filters.pop(filter_name))
4576
4577- instances = query_prefix.\
4578- filter_by(deleted=can_read_deleted(context)).\
4579- all()
4580+ instances = query_prefix.all()
4581
4582 if not instances:
4583 return []
4584@@ -1292,21 +1326,40 @@
4585 return instances
4586
4587
4588+@require_context
4589+def instance_get_active_by_window(context, begin, end=None, project_id=None):
4590+ """Return instances that were continuously active over window."""
4591+ session = get_session()
4592+ query = session.query(models.Instance).\
4593+ filter(models.Instance.launched_at < begin)
4594+ if end:
4595+ query = query.filter(or_(models.Instance.terminated_at == None,
4596+ models.Instance.terminated_at > end))
4597+ else:
4598+ query = query.filter(models.Instance.terminated_at == None)
4599+ if project_id:
4600+ query = query.filter_by(project_id=project_id)
4601+ return query.all()
4602+
4603+
4604 @require_admin_context
4605-def instance_get_active_by_window(context, begin, end=None):
4606- """Return instances that were continuously active over the given window"""
4607+def instance_get_active_by_window_joined(context, begin, end=None,
4608+ project_id=None):
4609+ """Return instances and joins that were continuously active over window."""
4610 session = get_session()
4611 query = session.query(models.Instance).\
4612- options(joinedload_all('fixed_ips.floating_ips')).\
4613- options(joinedload('security_groups')).\
4614- options(joinedload_all('fixed_ips.network')).\
4615- options(joinedload('instance_type')).\
4616- filter(models.Instance.launched_at < begin)
4617+ options(joinedload_all('fixed_ips.floating_ips')).\
4618+ options(joinedload('security_groups')).\
4619+ options(joinedload_all('fixed_ips.network')).\
4620+ options(joinedload('instance_type')).\
4621+ filter(models.Instance.launched_at < begin)
4622 if end:
4623 query = query.filter(or_(models.Instance.terminated_at == None,
4624 models.Instance.terminated_at > end))
4625 else:
4626 query = query.filter(models.Instance.terminated_at == None)
4627+ if project_id:
4628+ query = query.filter_by(project_id=project_id)
4629 return query.all()
4630
4631
4632@@ -1470,18 +1523,6 @@
4633 return fixed_ip_refs[0].floating_ips[0]['address']
4634
4635
4636-@require_admin_context
4637-def instance_set_state(context, instance_id, state, description=None):
4638- # TODO(devcamcar): Move this out of models and into driver
4639- from nova.compute import power_state
4640- if not description:
4641- description = power_state.name(state)
4642- db.instance_update(context,
4643- instance_id,
4644- {'state': state,
4645- 'state_description': description})
4646-
4647-
4648 @require_context
4649 def instance_update(context, instance_id, values):
4650 session = get_session()
4651@@ -1833,6 +1874,19 @@
4652
4653
4654 @require_admin_context
4655+def network_get_by_uuid(context, uuid):
4656+ session = get_session()
4657+ result = session.query(models.Network).\
4658+ filter_by(uuid=uuid).\
4659+ filter_by(deleted=False).\
4660+ first()
4661+
4662+ if not result:
4663+ raise exception.NetworkNotFoundForUUID(uuid=uuid)
4664+ return result
4665+
4666+
4667+@require_admin_context
4668 def network_get_by_cidr(context, cidr):
4669 session = get_session()
4670 result = session.query(models.Network).\
4671@@ -2144,6 +2198,8 @@
4672
4673 @require_context
4674 def volume_create(context, values):
4675+ values['volume_metadata'] = _metadata_refs(values.get('metadata'),
4676+ models.VolumeMetadata)
4677 volume_ref = models.Volume()
4678 volume_ref.update(values)
4679
4680@@ -2180,6 +2236,11 @@
4681 session.query(models.IscsiTarget).\
4682 filter_by(volume_id=volume_id).\
4683 update({'volume_id': None})
4684+ session.query(models.VolumeMetadata).\
4685+ filter_by(volume_id=volume_id).\
4686+ update({'deleted': True,
4687+ 'deleted_at': utils.utcnow(),
4688+ 'updated_at': literal_column('updated_at')})
4689
4690
4691 @require_admin_context
4692@@ -2203,12 +2264,16 @@
4693 if is_admin_context(context):
4694 result = session.query(models.Volume).\
4695 options(joinedload('instance')).\
4696+ options(joinedload('volume_metadata')).\
4697+ options(joinedload('volume_type')).\
4698 filter_by(id=volume_id).\
4699 filter_by(deleted=can_read_deleted(context)).\
4700 first()
4701 elif is_user_context(context):
4702 result = session.query(models.Volume).\
4703 options(joinedload('instance')).\
4704+ options(joinedload('volume_metadata')).\
4705+ options(joinedload('volume_type')).\
4706 filter_by(project_id=context.project_id).\
4707 filter_by(id=volume_id).\
4708 filter_by(deleted=False).\
4709@@ -2224,6 +2289,8 @@
4710 session = get_session()
4711 return session.query(models.Volume).\
4712 options(joinedload('instance')).\
4713+ options(joinedload('volume_metadata')).\
4714+ options(joinedload('volume_type')).\
4715 filter_by(deleted=can_read_deleted(context)).\
4716 all()
4717
4718@@ -2233,6 +2300,8 @@
4719 session = get_session()
4720 return session.query(models.Volume).\
4721 options(joinedload('instance')).\
4722+ options(joinedload('volume_metadata')).\
4723+ options(joinedload('volume_type')).\
4724 filter_by(host=host).\
4725 filter_by(deleted=can_read_deleted(context)).\
4726 all()
4727@@ -2242,6 +2311,8 @@
4728 def volume_get_all_by_instance(context, instance_id):
4729 session = get_session()
4730 result = session.query(models.Volume).\
4731+ options(joinedload('volume_metadata')).\
4732+ options(joinedload('volume_type')).\
4733 filter_by(instance_id=instance_id).\
4734 filter_by(deleted=False).\
4735 all()
4736@@ -2257,6 +2328,8 @@
4737 session = get_session()
4738 return session.query(models.Volume).\
4739 options(joinedload('instance')).\
4740+ options(joinedload('volume_metadata')).\
4741+ options(joinedload('volume_type')).\
4742 filter_by(project_id=project_id).\
4743 filter_by(deleted=can_read_deleted(context)).\
4744 all()
4745@@ -2269,6 +2342,8 @@
4746 filter_by(id=volume_id).\
4747 filter_by(deleted=can_read_deleted(context)).\
4748 options(joinedload('instance')).\
4749+ options(joinedload('volume_metadata')).\
4750+ options(joinedload('volume_type')).\
4751 first()
4752 if not result:
4753 raise exception.VolumeNotFound(volume_id=volume_id)
4754@@ -2303,12 +2378,116 @@
4755 @require_context
4756 def volume_update(context, volume_id, values):
4757 session = get_session()
4758+ metadata = values.get('metadata')
4759+ if metadata is not None:
4760+ volume_metadata_update(context,
4761+ volume_id,
4762+ values.pop('metadata'),
4763+ delete=True)
4764 with session.begin():
4765 volume_ref = volume_get(context, volume_id, session=session)
4766 volume_ref.update(values)
4767 volume_ref.save(session=session)
4768
4769
4770+####################
4771+
4772+
4773+@require_context
4774+@require_volume_exists
4775+def volume_metadata_get(context, volume_id):
4776+ session = get_session()
4777+
4778+ meta_results = session.query(models.VolumeMetadata).\
4779+ filter_by(volume_id=volume_id).\
4780+ filter_by(deleted=False).\
4781+ all()
4782+
4783+ meta_dict = {}
4784+ for i in meta_results:
4785+ meta_dict[i['key']] = i['value']
4786+ return meta_dict
4787+
4788+
4789+@require_context
4790+@require_volume_exists
4791+def volume_metadata_delete(context, volume_id, key):
4792+ session = get_session()
4793+ session.query(models.VolumeMetadata).\
4794+ filter_by(volume_id=volume_id).\
4795+ filter_by(key=key).\
4796+ filter_by(deleted=False).\
4797+ update({'deleted': True,
4798+ 'deleted_at': utils.utcnow(),
4799+ 'updated_at': literal_column('updated_at')})
4800+
4801+
4802+@require_context
4803+@require_volume_exists
4804+def volume_metadata_delete_all(context, volume_id):
4805+ session = get_session()
4806+ session.query(models.VolumeMetadata).\
4807+ filter_by(volume_id=volume_id).\
4808+ filter_by(deleted=False).\
4809+ update({'deleted': True,
4810+ 'deleted_at': utils.utcnow(),
4811+ 'updated_at': literal_column('updated_at')})
4812+
4813+
4814+@require_context
4815+@require_volume_exists
4816+def volume_metadata_get_item(context, volume_id, key, session=None):
4817+ if not session:
4818+ session = get_session()
4819+
4820+ meta_result = session.query(models.VolumeMetadata).\
4821+ filter_by(volume_id=volume_id).\
4822+ filter_by(key=key).\
4823+ filter_by(deleted=False).\
4824+ first()
4825+
4826+ if not meta_result:
4827+ raise exception.VolumeMetadataNotFound(metadata_key=key,
4828+ volume_id=volume_id)
4829+ return meta_result
4830+
4831+
4832+@require_context
4833+@require_volume_exists
4834+def volume_metadata_update(context, volume_id, metadata, delete):
4835+ session = get_session()
4836+
4837+ # Set existing metadata to deleted if delete argument is True
4838+ if delete:
4839+ original_metadata = volume_metadata_get(context, volume_id)
4840+ for meta_key, meta_value in original_metadata.iteritems():
4841+ if meta_key not in metadata:
4842+ meta_ref = volume_metadata_get_item(context, volume_id,
4843+ meta_key, session)
4844+ meta_ref.update({'deleted': True})
4845+ meta_ref.save(session=session)
4846+
4847+ meta_ref = None
4848+
4849+ # Now update all existing items with new values, or create new meta objects
4850+ for meta_key, meta_value in metadata.iteritems():
4851+
4852+ # update the value whether it exists or not
4853+ item = {"value": meta_value}
4854+
4855+ try:
4856+ meta_ref = volume_metadata_get_item(context, volume_id,
4857+ meta_key, session)
4858+ except exception.VolumeMetadataNotFound, e:
4859+ meta_ref = models.VolumeMetadata()
4860+ item.update({"key": meta_key, "volume_id": volume_id})
4861+
4862+ meta_ref.update(item)
4863+ meta_ref.save(session=session)
4864+
4865+ return metadata
4866+
4867+
4868 ###################
4869
4870
4871@@ -3143,7 +3322,7 @@
4872
4873
4874 def _dict_with_extra_specs(inst_type_query):
4875- """Takes an instance type query returned by sqlalchemy
4876+ """Takes an instance OR volume type query returned by sqlalchemy
4877 and returns it as a dictionary, converting the extra_specs
4878 entry from a list of dicts:
4879
4880@@ -3525,3 +3704,278 @@
4881 "deleted": 0})
4882 spec_ref.save(session=session)
4883 return specs
4884+
4885+
4886+##################
4887+
4888+
4889+@require_admin_context
4890+def volume_type_create(_context, values):
4891+ """Create a new instance type. In order to pass in extra specs,
4892+ the values dict should contain a 'extra_specs' key/value pair:
4893+
4894+ {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
4895+
4896+ """
4897+ try:
4898+ specs = values.get('extra_specs')
4899+
4900+ values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
4901+ models.VolumeTypeExtraSpecs)
4902+ volume_type_ref = models.VolumeTypes()
4903+ volume_type_ref.update(values)
4904+ volume_type_ref.save()
4905+ except Exception, e:
4906+ raise exception.DBError(e)
4907+ return volume_type_ref
4908+
4909+
4910+@require_context
4911+def volume_type_get_all(context, inactive=False, filters={}):
4912+ """
4913+ Returns a dict describing all volume_types with name as key.
4914+ """
4915+ session = get_session()
4916+ if inactive:
4917+ vol_types = session.query(models.VolumeTypes).\
4918+ options(joinedload('extra_specs')).\
4919+ order_by("name").\
4920+ all()
4921+ else:
4922+ vol_types = session.query(models.VolumeTypes).\
4923+ options(joinedload('extra_specs')).\
4924+ filter_by(deleted=False).\
4925+ order_by("name").\
4926+ all()
4927+ vol_dict = {}
4928+ if vol_types:
4929+ for i in vol_types:
4930+ vol_dict[i['name']] = _dict_with_extra_specs(i)
4931+ return vol_dict
4932+
4933+
4934+@require_context
4935+def volume_type_get(context, id):
4936+ """Returns a dict describing specific volume_type"""
4937+ session = get_session()
4938+ vol_type = session.query(models.VolumeTypes).\
4939+ options(joinedload('extra_specs')).\
4940+ filter_by(id=id).\
4941+ first()
4942+
4943+ if not vol_type:
4944+ raise exception.VolumeTypeNotFound(volume_type=id)
4945+ else:
4946+ return _dict_with_extra_specs(vol_type)
4947+
4948+
4949+@require_context
4950+def volume_type_get_by_name(context, name):
4951+ """Returns a dict describing specific volume_type"""
4952+ session = get_session()
4953+ vol_type = session.query(models.VolumeTypes).\
4954+ options(joinedload('extra_specs')).\
4955+ filter_by(name=name).\
4956+ first()
4957+ if not vol_type:
4958+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
4959+ else:
4960+ return _dict_with_extra_specs(vol_type)
4961+
4962+
4963+@require_admin_context
4964+def volume_type_destroy(context, name):
4965+ """ Marks specific volume_type as deleted"""
4966+ session = get_session()
4967+ volume_type_ref = session.query(models.VolumeTypes).\
4968+ filter_by(name=name)
4969+ records = volume_type_ref.update(dict(deleted=True))
4970+ if records == 0:
4971+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
4972+ else:
4973+ return volume_type_ref
4974+
4975+
4976+@require_admin_context
4977+def volume_type_purge(context, name):
4978+ """ Removes specific volume_type from DB
4979+ Usually volume_type_destroy should be used
4980+ """
4981+ session = get_session()
4982+ volume_type_ref = session.query(models.VolumeTypes).\
4983+ filter_by(name=name)
4984+ records = volume_type_ref.delete()
4985+ if records == 0:
4986+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
4987+ else:
4988+ return volume_type_ref
4989+
4990+
4991+####################
4992+
4993+
4994+@require_context
4995+def volume_type_extra_specs_get(context, volume_type_id):
4996+ session = get_session()
4997+
4998+ spec_results = session.query(models.VolumeTypeExtraSpecs).\
4999+ filter_by(volume_type_id=volume_type_id).\
5000+ filter_by(deleted=False).\
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches