Merge lp:~rackspace-titan/nova/instance-uuids into lp:~hudson-openstack/nova/trunk
- instance-uuids
- Merge into trunk
Status: | Work in progress | ||||
---|---|---|---|---|---|
Proposed branch: | lp:~rackspace-titan/nova/instance-uuids | ||||
Merge into: | lp:~hudson-openstack/nova/trunk | ||||
Diff against target: |
10619 lines (+2448/-1783) 58 files modified
bin/nova-manage (+1/-1) nova/api/ec2/__init__.py (+2/-2) nova/api/ec2/cloud.py (+45/-23) nova/api/ec2/ec2utils.py (+18/-1) nova/api/openstack/contrib/volumes.py (+8/-8) nova/api/openstack/images.py (+4/-1) nova/api/openstack/schemas/v1.1/server.rng (+0/-1) nova/api/openstack/servers.py (+107/-67) nova/api/openstack/views/servers.py (+18/-8) nova/compute/api.py (+234/-215) nova/compute/manager.py (+203/-220) nova/console/manager.py (+4/-4) nova/console/vmrc.py (+2/-2) nova/db/api.py (+91/-93) nova/db/sqlalchemy/api.py (+121/-142) nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py (+0/-1) nova/db/sqlalchemy/migrate_repo/versions/050_renames_for_instance_uuids.py (+117/-0) nova/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql (+266/-0) nova/db/sqlalchemy/migrate_repo/versions/050_sqlite_upgrade.sql (+265/-0) nova/db/sqlalchemy/migration.py (+5/-2) nova/db/sqlalchemy/models.py (+38/-28) nova/exception.py (+6/-9) nova/network/api.py (+9/-9) nova/network/manager.py (+64/-69) nova/network/quantum/manager.py (+11/-11) nova/network/quantum/nova_ipam_lib.py (+1/-1) nova/notifier/api.py (+1/-1) nova/scheduler/api.py (+22/-42) nova/scheduler/driver.py (+6/-6) nova/scheduler/zone.py (+2/-2) nova/tests/api/ec2/test_cloud.py (+66/-49) nova/tests/api/ec2/test_middleware.py (+1/-1) nova/tests/api/openstack/contrib/test_volumes.py (+0/-1) nova/tests/api/openstack/test_images.py (+8/-0) nova/tests/api/openstack/test_server_actions.py (+24/-19) nova/tests/api/openstack/test_servers.py (+185/-223) nova/tests/api/openstack/test_zones.py (+6/-6) nova/tests/fake_network.py (+5/-5) nova/tests/integrated/test_volumes.py (+1/-1) nova/tests/scheduler/test_scheduler.py (+28/-41) nova/tests/test_compute.py (+274/-299) nova/tests/test_console.py (+14/-14) nova/tests/test_db_api.py (+9/-9) nova/tests/test_libvirt.py (+34/-31) nova/tests/test_metadata.py (+1/-0) nova/tests/test_network.py (+20/-20) nova/tests/test_quantum.py (+4/-4) nova/tests/test_test_utils.py (+1/-1) nova/tests/test_volume.py (+18/-18) nova/tests/test_xenapi.py (+22/-15) nova/tests/xenapi/stubs.py (+1/-1) nova/utils.py (+1/-1) nova/virt/fake.py (+1/-1) nova/virt/libvirt/connection.py (+4/-4) nova/virt/libvirt/firewall.py (+9/-9) nova/virt/vmwareapi_conn.py (+0/-1) nova/virt/xenapi/vmops.py (+39/-39) nova/virt/xenapi_conn.py (+1/-1) |
||||
To merge this branch: | bzr merge lp:~rackspace-titan/nova/instance-uuids | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Nova Core security contacts | Pending | ||
Review via email: mp+75430@code.launchpad.net |
Commit message
Description of the change
This needs to be updated. This is a work in progress.
- 1568. By Brian Lamar
-
Merged trunk.
- 1569. By Brian Lamar
-
Merged trunk.
- 1570. By Brian Lamar
-
SQLite migrations.
- 1571. By Brian Waldon
-
merging trunk
- 1572. By Brian Waldon
-
fixing typo
- 1573. By Brian Waldon
-
removing useless stub
- 1574. By Brian Lamar
-
Compute API fix.
- 1575. By Brian Waldon
-
tests passing!
- 1576. By Brian Waldon
-
merging Lamar
- 1577. By Brian Waldon
-
fixing delete test
- 1578. By Brian Waldon
-
most compute tests work
- 1579. By Brian Waldon
-
merging lamar
- 1580. By Brian Lamar
-
Fixed notifier
- 1581. By Brian Lamar
-
instance_id -> instance_uuid change batch
- 1582. By Brian Lamar
-
More ID -> UUID updates.
- 1583. By Brian Lamar
-
Merged with Waldon.
- 1584. By Brian Lamar
-
Added instance_actions table to all migrations.
- 1585. By Brian Waldon
-
fixing tests
- 1586. By Brian Waldon
-
merging lamar
- 1587. By Brian Waldon
-
merging trunk
- 1588. By Brian Waldon
-
merging lamar
- 1589. By Brian Lamar
-
Updated instance_id -> instance_uuid in new 'soft delete' functionality.
- 1590. By Brian Lamar
-
fixes?
- 1591. By Brian Lamar
-
Merged with Waldon.
- 1592. By Brian Lamar
-
Arg, last two XenAPI tests working.
- 1593. By Brian Lamar
-
Small compute layer fix
- 1594. By Brian Lamar
-
Merged trunk.
- 1595. By Brian Lamar
-
Moved migrations
- 1596. By Brian Lamar
-
Making test_get_
instance_ mapping not as hacky. - 1597. By Brian Lamar
-
Merged trunk and resolved conflicts.
- 1598. By Brian Lamar
-
Updated migration numbers.
- 1599. By Brian Lamar
-
Bad merge.
- 1600. By Brian Lamar
-
Fixed xen_api tests.
- 1601. By Brian Lamar
-
Libvirt tests running
- 1602. By Brian Lamar
-
Fix for test_ip_
association_ and_allocation_ of_other_ project - 1603. By Brian Lamar
-
ID -> UUID updates.
- 1604. By Brian Lamar
-
Merged Trunk.
- 1605. By Brian Lamar
-
Can't have chain name use the UUID, it will be too long.
- 1606. By Brian Lamar
-
Update xenapi_conn.py to use UUIDs where needed.
Unmerged revisions
- 1606. By Brian Lamar
-
Update xenapi_conn.py to use UUIDs where needed.
- 1605. By Brian Lamar
-
Can't have chain name use the UUID, it will be too long.
- 1604. By Brian Lamar
-
Merged Trunk.
- 1603. By Brian Lamar
-
ID -> UUID updates.
- 1602. By Brian Lamar
-
Fix for test_ip_
association_ and_allocation_ of_other_ project - 1601. By Brian Lamar
-
Libvirt tests running
- 1600. By Brian Lamar
-
Fixed xen_api tests.
- 1599. By Brian Lamar
-
Bad merge.
- 1598. By Brian Lamar
-
Updated migration numbers.
- 1597. By Brian Lamar
-
Merged trunk and resolved conflicts.
Preview Diff
1 | === modified file 'bin/nova-manage' |
2 | --- bin/nova-manage 2011-09-20 09:55:56 +0000 |
3 | +++ bin/nova-manage 2011-09-22 19:29:25 +0000 |
4 | @@ -669,7 +669,7 @@ |
5 | try: |
6 | fixed_ip = db.fixed_ip_get_by_address(ctxt, address) |
7 | if fixed_ip is None: |
8 | - raise exception.NotFound('Could not find address') |
9 | + raise exception.NotFound() |
10 | db.fixed_ip_update(ctxt, fixed_ip['address'], |
11 | {'reserved': reserved}) |
12 | except exception.NotFound as ex: |
13 | |
14 | === modified file 'nova/api/ec2/__init__.py' |
15 | --- nova/api/ec2/__init__.py 2011-09-21 09:00:18 +0000 |
16 | +++ nova/api/ec2/__init__.py 2011-09-22 19:29:25 +0000 |
17 | @@ -348,8 +348,8 @@ |
18 | except exception.InstanceNotFound as ex: |
19 | LOG.info(_('InstanceNotFound raised: %s'), unicode(ex), |
20 | context=context) |
21 | - ec2_id = ec2utils.id_to_ec2_id(ex.kwargs['instance_id']) |
22 | - message = ex.message % {'instance_id': ec2_id} |
23 | + ec2_id = ec2utils.id_to_ec2_id(ex.kwargs['instance_uuid']) |
24 | + message = ex.message % {'instance_uuid': ec2_id} |
25 | return self._error(req, context, type(ex).__name__, message) |
26 | except exception.VolumeNotFound as ex: |
27 | LOG.info(_('VolumeNotFound raised: %s'), unicode(ex), |
28 | |
29 | === modified file 'nova/api/ec2/cloud.py' |
30 | --- nova/api/ec2/cloud.py 2011-09-21 15:54:30 +0000 |
31 | +++ nova/api/ec2/cloud.py 2011-09-22 19:29:25 +0000 |
32 | @@ -283,7 +283,7 @@ |
33 | |
34 | # 'ephemeralN', 'swap' and ebs |
35 | for bdm in db.block_device_mapping_get_all_by_instance( |
36 | - ctxt, instance_ref['id']): |
37 | + ctxt, instance_ref['uuid']): |
38 | if bdm['no_device']: |
39 | continue |
40 | |
41 | @@ -324,18 +324,18 @@ |
42 | |
43 | # This ensures that all attributes of the instance |
44 | # are populated. |
45 | - instance_ref = db.instance_get(ctxt, instance_ref[0]['id']) |
46 | + instance_ref = db.instance_get(ctxt, instance_ref[0]['uuid']) |
47 | |
48 | mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) |
49 | hostname = instance_ref['hostname'] |
50 | host = instance_ref['host'] |
51 | availability_zone = self._get_availability_zone_by_host(ctxt, host) |
52 | floating_ip = db.instance_get_floating_address(ctxt, |
53 | - instance_ref['id']) |
54 | + instance_ref['uuid']) |
55 | ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) |
56 | image_ec2_id = self.image_ec2_id(instance_ref['image_ref']) |
57 | security_groups = db.security_group_get_by_instance(ctxt, |
58 | - instance_ref['id']) |
59 | + instance_ref['uuid']) |
60 | security_groups = [x['name'] for x in security_groups] |
61 | mappings = self._format_instance_mapping(ctxt, instance_ref) |
62 | data = { |
63 | @@ -916,8 +916,10 @@ |
64 | else: |
65 | ec2_id = instance_id |
66 | instance_id = ec2utils.ec2_id_to_id(ec2_id) |
67 | + instance_uuid = self.compute_api.get_instance_uuid(context, |
68 | + instance_id) |
69 | output = self.compute_api.get_console_output( |
70 | - context, instance_id=instance_id) |
71 | + context, instance_uuid=instance_uuid) |
72 | now = utils.utcnow() |
73 | return {"InstanceId": ec2_id, |
74 | "Timestamp": now, |
75 | @@ -926,15 +928,19 @@ |
76 | def get_ajax_console(self, context, instance_id, **kwargs): |
77 | ec2_id = instance_id[0] |
78 | instance_id = ec2utils.ec2_id_to_id(ec2_id) |
79 | + instance_uuid = self.compute_api.get_instance_uuid(context, |
80 | + instance_id) |
81 | return self.compute_api.get_ajax_console(context, |
82 | - instance_id=instance_id) |
83 | + instance_uuid=instance_uuid) |
84 | |
85 | def get_vnc_console(self, context, instance_id, **kwargs): |
86 | """Returns vnc browser url. Used by OS dashboard.""" |
87 | ec2_id = instance_id |
88 | instance_id = ec2utils.ec2_id_to_id(ec2_id) |
89 | + instance_uuid = self.compute_api.get_instance_uuid(context, |
90 | + instance_id) |
91 | return self.compute_api.get_vnc_console(context, |
92 | - instance_id=instance_id) |
93 | + instance_uuid=instance_uuid) |
94 | |
95 | def describe_volumes(self, context, volume_id=None, **kwargs): |
96 | if volume_id: |
97 | @@ -1029,11 +1035,13 @@ |
98 | def attach_volume(self, context, volume_id, instance_id, device, **kwargs): |
99 | volume_id = ec2utils.ec2_id_to_id(volume_id) |
100 | instance_id = ec2utils.ec2_id_to_id(instance_id) |
101 | - msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" |
102 | + instance_uuid = self.compute_api.get_instance_uuid(context, |
103 | + instance_id) |
104 | + msg = _("Attach volume %(volume_id)s to instance %(instance_uuid)s" |
105 | " at %(device)s") % locals() |
106 | LOG.audit(msg, context=context) |
107 | self.compute_api.attach_volume(context, |
108 | - instance_id=instance_id, |
109 | + instance_uuid=instance_uuid, |
110 | volume_id=volume_id, |
111 | device=device) |
112 | volume = self.volume_api.get(context, volume_id=volume_id) |
113 | @@ -1140,7 +1148,9 @@ |
114 | |
115 | ec2_instance_id = instance_id |
116 | instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) |
117 | - instance = self.compute_api.get(context, instance_id) |
118 | + instance_uuid = self.compute_api.get_instance_uuid(context, |
119 | + instance_id) |
120 | + instance = self.compute_api.get(context, instance_uuid) |
121 | result = {'instance_id': ec2_instance_id} |
122 | fn(instance, result) |
123 | return result |
124 | @@ -1169,9 +1179,11 @@ |
125 | result): |
126 | """Format InstanceBlockDeviceMappingResponseItemType""" |
127 | root_device_type = 'instance-store' |
128 | + instance_uuid = self.compute_api.get_instance_uuid(context, |
129 | + instance_id) |
130 | mapping = [] |
131 | for bdm in db.block_device_mapping_get_all_by_instance(context, |
132 | - instance_id): |
133 | + instance_uuid): |
134 | volume_id = bdm['volume_id'] |
135 | if (volume_id is None or bdm['no_device']): |
136 | continue |
137 | @@ -1228,9 +1240,11 @@ |
138 | if instance_id: |
139 | instances = [] |
140 | for ec2_id in instance_id: |
141 | - internal_id = ec2utils.ec2_id_to_id(ec2_id) |
142 | + instance_id = ec2utils.ec2_id_to_id(ec2_id) |
143 | + instance_uuid = self.compute_api.get_instance_uuid(context, |
144 | + instance_id) |
145 | try: |
146 | - instance = self.compute_api.get(context, internal_id) |
147 | + instance = self.compute_api.get(context, instance_uuid) |
148 | except exception.NotFound: |
149 | continue |
150 | instances.append(instance) |
151 | @@ -1285,7 +1299,7 @@ |
152 | self._format_instance_type(instance, i) |
153 | i['launchTime'] = instance['created_at'] |
154 | i['amiLaunchIndex'] = instance['launch_index'] |
155 | - i['displayName'] = instance['display_name'] |
156 | + i['displayName'] = "Server %s" % instance_id |
157 | i['displayDescription'] = instance['display_description'] |
158 | self._format_instance_root_device_name(instance, i) |
159 | self._format_instance_bdm(context, instance_id, |
160 | @@ -1353,8 +1367,10 @@ |
161 | LOG.audit(_("Associate address %(public_ip)s to" |
162 | " instance %(instance_id)s") % locals(), context=context) |
163 | instance_id = ec2utils.ec2_id_to_id(instance_id) |
164 | + instance_uuid = self.compute_api.get_instance_uuid(context, |
165 | + instance_id) |
166 | self.compute_api.associate_floating_ip(context, |
167 | - instance_id=instance_id, |
168 | + instance_uuid=instance_uuid, |
169 | address=public_ip) |
170 | return {'associateResponse': ["Address associated."]} |
171 | |
172 | @@ -1405,10 +1421,12 @@ |
173 | |
174 | def _do_instance(self, action, context, ec2_id): |
175 | instance_id = ec2utils.ec2_id_to_id(ec2_id) |
176 | - action(context, instance_id=instance_id) |
177 | + instance_uuid = self.compute_api.get_instance_uuid(context, |
178 | + instance_id) |
179 | + action(context, instance_uuid=instance_uuid) |
180 | |
181 | - def _do_instances(self, action, context, instance_id): |
182 | - for ec2_id in instance_id: |
183 | + def _do_instances(self, action, context, ec2_id_list): |
184 | + for ec2_id in ec2_id_list: |
185 | self._do_instance(action, context, ec2_id) |
186 | |
187 | def terminate_instances(self, context, instance_id, **kwargs): |
188 | @@ -1456,7 +1474,9 @@ |
189 | changes[field] = kwargs[field] |
190 | if changes: |
191 | instance_id = ec2utils.ec2_id_to_id(instance_id) |
192 | - self.compute_api.update(context, instance_id=instance_id, |
193 | + instance_uuid = self.compute_api.get_instance_uuid(context, |
194 | + instance_id) |
195 | + self.compute_api.update(context, instance_uuid=instance_uuid, |
196 | **changes) |
197 | return True |
198 | |
199 | @@ -1670,7 +1690,9 @@ |
200 | |
201 | ec2_instance_id = instance_id |
202 | instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) |
203 | - instance = self.compute_api.get(context, instance_id) |
204 | + instance_uuid = self.compute_api.get_instance_uuid(context, |
205 | + instance_id) |
206 | + instance = self.compute_api.get(context, instance_uuid) |
207 | |
208 | # stop the instance if necessary |
209 | restart_instance = False |
210 | @@ -1683,7 +1705,7 @@ |
211 | |
212 | if vm_state == vm_states.ACTIVE: |
213 | restart_instance = True |
214 | - self.compute_api.stop(context, instance_id=instance_id) |
215 | + self.compute_api.stop(context, instance_uuid=instance_uuid) |
216 | |
217 | # wait instance for really stopped |
218 | start_time = time.time() |
219 | @@ -1706,7 +1728,7 @@ |
220 | |
221 | mapping = [] |
222 | bdms = db.block_device_mapping_get_all_by_instance(context, |
223 | - instance_id) |
224 | + instance_uuid) |
225 | for bdm in bdms: |
226 | if bdm.no_device: |
227 | continue |
228 | @@ -1759,6 +1781,6 @@ |
229 | image_id = self._register_image(context, src_image) |
230 | |
231 | if restart_instance: |
232 | - self.compute_api.start(context, instance_id=instance_id) |
233 | + self.compute_api.start(context, instance_uuid=instance_uuid) |
234 | |
235 | return {'imageId': image_id} |
236 | |
237 | === modified file 'nova/api/ec2/ec2utils.py' |
238 | --- nova/api/ec2/ec2utils.py 2011-09-21 16:34:59 +0000 |
239 | +++ nova/api/ec2/ec2utils.py 2011-09-22 19:29:25 +0000 |
240 | @@ -18,11 +18,28 @@ |
241 | |
242 | import re |
243 | |
244 | +from nova import context |
245 | +from nova import db |
246 | from nova import exception |
247 | +import nova.compute.api |
248 | + |
249 | + |
250 | +def ec2_id_to_uuid(_context, ec2_id): |
251 | + """convert an ec2 id (i-[base 16 number]) to an instance uuid (string)""" |
252 | + ctx = context.get_admin_context() |
253 | + try: |
254 | + instance_id = int(ec2_id.split('-')[-1], 16) |
255 | + except valueerror: |
256 | + raise exception.InvalidEc2Id(ec2_id=ec2_id) |
257 | + |
258 | + for instance in db.instance_get_all(ctx): |
259 | + raise Exception(instance.__dict__) |
260 | + |
261 | + return compute_api.get_instance_uuid(ctx, instance_id) |
262 | |
263 | |
264 | def ec2_id_to_id(ec2_id): |
265 | - """Convert an ec2 ID (i-[base 16 number]) to an instance id (int)""" |
266 | + """convert an ec2 id (i-[base 16 number]) to an instance id (int)""" |
267 | try: |
268 | return int(ec2_id.split('-')[-1], 16) |
269 | except ValueError: |
270 | |
271 | === modified file 'nova/api/openstack/contrib/volumes.py' |
272 | --- nova/api/openstack/contrib/volumes.py 2011-09-14 19:33:51 +0000 |
273 | +++ nova/api/openstack/contrib/volumes.py 2011-09-22 19:29:25 +0000 |
274 | @@ -200,8 +200,8 @@ |
275 | d['id'] = volume_id |
276 | |
277 | d['volumeId'] = volume_id |
278 | - if vol.get('instance_id'): |
279 | - d['serverId'] = vol['instance_id'] |
280 | + if vol.get('instance_uuid'): |
281 | + d['serverId'] = vol['instance_uuid'] |
282 | if vol.get('mountpoint'): |
283 | d['device'] = vol['mountpoint'] |
284 | |
285 | @@ -245,8 +245,8 @@ |
286 | LOG.debug("volume_id not found") |
287 | return faults.Fault(exc.HTTPNotFound()) |
288 | |
289 | - if str(vol['instance_id']) != server_id: |
290 | - LOG.debug("instance_id != server_id") |
291 | + if str(vol['instance_uuid']) != server_id: |
292 | + LOG.debug("instance_uuid != server_id") |
293 | return faults.Fault(exc.HTTPNotFound()) |
294 | |
295 | return {'volumeAttachment': _translate_attachment_detail_view(context, |
296 | @@ -259,7 +259,7 @@ |
297 | if not body: |
298 | return faults.Fault(exc.HTTPUnprocessableEntity()) |
299 | |
300 | - instance_id = server_id |
301 | + instance_uuid = server_id |
302 | volume_id = body['volumeAttachment']['volumeId'] |
303 | device = body['volumeAttachment']['device'] |
304 | |
305 | @@ -269,7 +269,7 @@ |
306 | |
307 | try: |
308 | self.compute_api.attach_volume(context, |
309 | - instance_id=instance_id, |
310 | + instance_uuid=instance_uuid, |
311 | volume_id=volume_id, |
312 | device=device) |
313 | except exception.NotFound: |
314 | @@ -307,8 +307,8 @@ |
315 | except exception.NotFound: |
316 | return faults.Fault(exc.HTTPNotFound()) |
317 | |
318 | - if str(vol['instance_id']) != server_id: |
319 | - LOG.debug("instance_id != server_id") |
320 | + if str(vol['instance_uuid']) != server_id: |
321 | + LOG.debug("instance_uuid != server_id") |
322 | return faults.Fault(exc.HTTPNotFound()) |
323 | |
324 | self.compute_api.detach_volume(context, |
325 | |
326 | === modified file 'nova/api/openstack/images.py' |
327 | --- nova/api/openstack/images.py 2011-09-20 21:11:49 +0000 |
328 | +++ nova/api/openstack/images.py 2011-09-22 19:29:25 +0000 |
329 | @@ -124,9 +124,12 @@ |
330 | raise webob.exc.HTTPBadRequest(explanation=msg) |
331 | |
332 | context = req.environ["nova.context"] |
333 | + instance_uuid = self._compute_service.get_instance_uuid(context, |
334 | + instance_id) |
335 | + #NOTE(bcwaldon): purposefully use instance id here instead of uuid |
336 | props = {'instance_id': instance_id} |
337 | image = self._compute_service.snapshot(context, |
338 | - instance_id, |
339 | + instance_uuid, |
340 | image_name, |
341 | extra_properties=props) |
342 | |
343 | |
344 | === modified file 'nova/api/openstack/schemas/v1.1/server.rng' |
345 | --- nova/api/openstack/schemas/v1.1/server.rng 2011-09-08 17:44:37 +0000 |
346 | +++ nova/api/openstack/schemas/v1.1/server.rng 2011-09-22 19:29:25 +0000 |
347 | @@ -4,7 +4,6 @@ |
348 | <attribute name="userId"> <text/> </attribute> |
349 | <attribute name="tenantId"> <text/> </attribute> |
350 | <attribute name="id"> <text/> </attribute> |
351 | - <attribute name="uuid"> <text/> </attribute> |
352 | <attribute name="updated"> <text/> </attribute> |
353 | <attribute name="created"> <text/> </attribute> |
354 | <attribute name="hostId"> <text/> </attribute> |
355 | |
356 | === modified file 'nova/api/openstack/servers.py' |
357 | --- nova/api/openstack/servers.py 2011-09-22 15:41:34 +0000 |
358 | +++ nova/api/openstack/servers.py 2011-09-22 19:29:25 +0000 |
359 | @@ -74,6 +74,9 @@ |
360 | self.compute_api = compute.API() |
361 | self.helper = helper.CreateInstanceHelper(self) |
362 | |
363 | + def _convert_id(self, _context, instance_uuid): |
364 | + return instance_uuid |
365 | + |
366 | def index(self, req): |
367 | """ Returns a list of server names and ids for a given user """ |
368 | try: |
369 | @@ -103,7 +106,7 @@ |
370 | def _limit_items(self, items, req): |
371 | raise NotImplementedError() |
372 | |
373 | - def _action_rebuild(self, info, request, instance_id): |
374 | + def _action_rebuild(self, info, request, instance_uuid): |
375 | raise NotImplementedError() |
376 | |
377 | def _get_servers(self, req, is_detail): |
378 | @@ -161,9 +164,10 @@ |
379 | @scheduler_api.redirect_handler |
380 | def show(self, req, id): |
381 | """ Returns server details by server id """ |
382 | + context = req.environ['nova.context'] |
383 | + uuid = self._convert_id(context, id) |
384 | try: |
385 | - instance = self.compute_api.routing_get( |
386 | - req.environ['nova.context'], id) |
387 | + instance = self.compute_api.routing_get(context, uuid) |
388 | return self._build_view(req, instance, is_detail=True) |
389 | except exception.NotFound: |
390 | raise exc.HTTPNotFound() |
391 | @@ -208,6 +212,8 @@ |
392 | raise exc.HTTPUnprocessableEntity() |
393 | |
394 | ctxt = req.environ['nova.context'] |
395 | + uuid = self._convert_id(ctxt, id) |
396 | + |
397 | update_dict = {} |
398 | |
399 | if 'name' in body['server']: |
400 | @@ -224,13 +230,13 @@ |
401 | update_dict['access_ip_v6'] = access_ipv6.strip() |
402 | |
403 | try: |
404 | - self.compute_api.update(ctxt, id, **update_dict) |
405 | + self.compute_api.update(ctxt, uuid, **update_dict) |
406 | except exception.NotFound: |
407 | raise exc.HTTPNotFound() |
408 | |
409 | - return self._update(ctxt, req, id, body) |
410 | + return self._update(ctxt, req, uuid, body) |
411 | |
412 | - def _update(self, context, req, id, inst_dict): |
413 | + def _update(self, context, req, uuid, inst_dict): |
414 | return exc.HTTPNotImplemented() |
415 | |
416 | @novaclient_exception_converter |
417 | @@ -264,7 +270,7 @@ |
418 | msg = _("Invalid request body") |
419 | raise exc.HTTPBadRequest(explanation=msg) |
420 | |
421 | - def _action_create_backup(self, input_dict, req, instance_id): |
422 | + def _action_create_backup(self, input_dict, req, instance_uuid): |
423 | """Backup a server instance. |
424 | |
425 | Images now have an `image_type` associated with them, which can be |
426 | @@ -277,6 +283,9 @@ |
427 | """ |
428 | entity = input_dict["createBackup"] |
429 | |
430 | + context = req.environ["nova.context"] |
431 | + uuid = self._convert_id(context, instance_uuid) |
432 | + |
433 | try: |
434 | image_name = entity["name"] |
435 | backup_type = entity["backup_type"] |
436 | @@ -297,13 +306,10 @@ |
437 | raise webob.exc.HTTPBadRequest(explanation=msg) |
438 | |
439 | # preserve link to server in image properties |
440 | - server_ref = os.path.join(req.application_url, |
441 | - 'servers', |
442 | - str(instance_id)) |
443 | + server_ref = os.path.join(req.application_url, 'servers', uuid) |
444 | props = {'instance_ref': server_ref} |
445 | |
446 | metadata = entity.get('metadata', {}) |
447 | - context = req.environ["nova.context"] |
448 | common.check_img_metadata_quota_limit(context, metadata) |
449 | try: |
450 | props.update(metadata) |
451 | @@ -312,7 +318,7 @@ |
452 | raise webob.exc.HTTPBadRequest(explanation=msg) |
453 | |
454 | image = self.compute_api.backup(context, |
455 | - instance_id, |
456 | + uuid, |
457 | image_name, |
458 | backup_type, |
459 | rotation, |
460 | @@ -327,32 +333,36 @@ |
461 | return resp |
462 | |
463 | @common.check_snapshots_enabled |
464 | - def _action_create_image(self, input_dict, req, id): |
465 | - return exc.HTTPNotImplemented() |
466 | - |
467 | - def _action_change_password(self, input_dict, req, id): |
468 | - return exc.HTTPNotImplemented() |
469 | - |
470 | - def _action_confirm_resize(self, input_dict, req, id): |
471 | + def _action_create_image(self, input_dict, req, instance_uuid): |
472 | + return exc.HTTPNotImplemented() |
473 | + |
474 | + def _action_change_password(self, input_dict, req, instance_uuid): |
475 | + return exc.HTTPNotImplemented() |
476 | + |
477 | + def _action_confirm_resize(self, input_dict, req, instance_uuid): |
478 | + context = req.environ['nova.context'] |
479 | + uuid = self._convert_id(context, instance_uuid) |
480 | try: |
481 | - self.compute_api.confirm_resize(req.environ['nova.context'], id) |
482 | + self.compute_api.confirm_resize(context, uuid) |
483 | except Exception, e: |
484 | LOG.exception(_("Error in confirm-resize %s"), e) |
485 | raise exc.HTTPBadRequest() |
486 | return exc.HTTPNoContent() |
487 | |
488 | - def _action_revert_resize(self, input_dict, req, id): |
489 | + def _action_revert_resize(self, input_dict, req, instance_uuid): |
490 | + context = req.environ['nova.context'] |
491 | + uuid = self._convert_id(context, instance_uuid) |
492 | try: |
493 | - self.compute_api.revert_resize(req.environ['nova.context'], id) |
494 | + self.compute_api.revert_resize(context, uuid) |
495 | except Exception, e: |
496 | LOG.exception(_("Error in revert-resize %s"), e) |
497 | raise exc.HTTPBadRequest() |
498 | return webob.Response(status_int=202) |
499 | |
500 | - def _action_resize(self, input_dict, req, id): |
501 | + def _action_resize(self, input_dict, req, instance_uuid): |
502 | return exc.HTTPNotImplemented() |
503 | |
504 | - def _action_reboot(self, input_dict, req, id): |
505 | + def _action_reboot(self, input_dict, req, instance_uuid): |
506 | if 'reboot' in input_dict and 'type' in input_dict['reboot']: |
507 | valid_reboot_types = ['HARD', 'SOFT'] |
508 | reboot_type = input_dict['reboot']['type'].upper() |
509 | @@ -364,9 +374,12 @@ |
510 | msg = _("Missing argument 'type' for reboot") |
511 | LOG.exception(msg) |
512 | raise exc.HTTPBadRequest(explanation=msg) |
513 | + |
514 | + context = req.environ['nova.context'] |
515 | + uuid = self._convert_id(context, instance_uuid) |
516 | + |
517 | try: |
518 | - self.compute_api.reboot(req.environ['nova.context'], id, |
519 | - reboot_type) |
520 | + self.compute_api.reboot(context, uuid, reboot_type) |
521 | except Exception, e: |
522 | LOG.exception(_("Error in reboot %s"), e) |
523 | raise exc.HTTPUnprocessableEntity() |
524 | @@ -381,8 +394,9 @@ |
525 | |
526 | """ |
527 | context = req.environ['nova.context'] |
528 | + uuid = self._convert_id(context, id) |
529 | try: |
530 | - self.compute_api.lock(context, id) |
531 | + self.compute_api.lock(context, uuid) |
532 | except Exception: |
533 | readable = traceback.format_exc() |
534 | LOG.exception(_("Compute.api::lock %s"), readable) |
535 | @@ -398,8 +412,9 @@ |
536 | |
537 | """ |
538 | context = req.environ['nova.context'] |
539 | + uuid = self._convert_id(context, id) |
540 | try: |
541 | - self.compute_api.unlock(context, id) |
542 | + self.compute_api.unlock(context, uuid) |
543 | except Exception: |
544 | readable = traceback.format_exc() |
545 | LOG.exception(_("Compute.api::unlock %s"), readable) |
546 | @@ -414,8 +429,9 @@ |
547 | |
548 | """ |
549 | context = req.environ['nova.context'] |
550 | + uuid = self._convert_id(context, id) |
551 | try: |
552 | - self.compute_api.get_lock(context, id) |
553 | + self.compute_api.get_lock(context, uuid) |
554 | except Exception: |
555 | readable = traceback.format_exc() |
556 | LOG.exception(_("Compute.api::get_lock %s"), readable) |
557 | @@ -430,8 +446,9 @@ |
558 | |
559 | """ |
560 | context = req.environ['nova.context'] |
561 | + uuid = self._convert_id(context, id) |
562 | try: |
563 | - self.compute_api.reset_network(context, id) |
564 | + self.compute_api.reset_network(context, uuid) |
565 | except Exception: |
566 | readable = traceback.format_exc() |
567 | LOG.exception(_("Compute.api::reset_network %s"), readable) |
568 | @@ -446,8 +463,9 @@ |
569 | |
570 | """ |
571 | context = req.environ['nova.context'] |
572 | + uuid = self._convert_id(context, id) |
573 | try: |
574 | - self.compute_api.inject_network_info(context, id) |
575 | + self.compute_api.inject_network_info(context, uuid) |
576 | except Exception: |
577 | readable = traceback.format_exc() |
578 | LOG.exception(_("Compute.api::inject_network_info %s"), readable) |
579 | @@ -459,8 +477,9 @@ |
580 | def pause(self, req, id): |
581 | """ Permit Admins to Pause the server. """ |
582 | ctxt = req.environ['nova.context'] |
583 | + uuid = self._convert_id(ctxt, id) |
584 | try: |
585 | - self.compute_api.pause(ctxt, id) |
586 | + self.compute_api.pause(ctxt, uuid) |
587 | except Exception: |
588 | readable = traceback.format_exc() |
589 | LOG.exception(_("Compute.api::pause %s"), readable) |
590 | @@ -472,8 +491,9 @@ |
591 | def unpause(self, req, id): |
592 | """ Permit Admins to Unpause the server. """ |
593 | ctxt = req.environ['nova.context'] |
594 | + uuid = self._convert_id(ctxt, id) |
595 | try: |
596 | - self.compute_api.unpause(ctxt, id) |
597 | + self.compute_api.unpause(ctxt, uuid) |
598 | except Exception: |
599 | readable = traceback.format_exc() |
600 | LOG.exception(_("Compute.api::unpause %s"), readable) |
601 | @@ -485,8 +505,9 @@ |
602 | def suspend(self, req, id): |
603 | """permit admins to suspend the server""" |
604 | context = req.environ['nova.context'] |
605 | + uuid = self._convert_id(context, id) |
606 | try: |
607 | - self.compute_api.suspend(context, id) |
608 | + self.compute_api.suspend(context, uuid) |
609 | except Exception: |
610 | readable = traceback.format_exc() |
611 | LOG.exception(_("compute.api::suspend %s"), readable) |
612 | @@ -498,8 +519,9 @@ |
613 | def resume(self, req, id): |
614 | """permit admins to resume the server from suspend""" |
615 | context = req.environ['nova.context'] |
616 | + uuid = self._convert_id(context, id) |
617 | try: |
618 | - self.compute_api.resume(context, id) |
619 | + self.compute_api.resume(context, uuid) |
620 | except Exception: |
621 | readable = traceback.format_exc() |
622 | LOG.exception(_("compute.api::resume %s"), readable) |
623 | @@ -509,8 +531,10 @@ |
624 | @novaclient_exception_converter |
625 | @scheduler_api.redirect_handler |
626 | def migrate(self, req, id): |
627 | + context = req.environ['nova.context'] |
628 | + uuid = self._convert_id(context, id) |
629 | try: |
630 | - self.compute_api.resize(req.environ['nova.context'], id) |
631 | + self.compute_api.resize(context, id) |
632 | except Exception, e: |
633 | LOG.exception(_("Error in migrate %s"), e) |
634 | raise exc.HTTPBadRequest() |
635 | @@ -521,13 +545,14 @@ |
636 | def rescue(self, req, id, body={}): |
637 | """Permit users to rescue the server.""" |
638 | context = req.environ["nova.context"] |
639 | + uuid = self._convert_id(context, id) |
640 | try: |
641 | if 'rescue' in body and body['rescue'] and \ |
642 | 'adminPass' in body['rescue']: |
643 | password = body['rescue']['adminPass'] |
644 | else: |
645 | password = utils.generate_password(FLAGS.password_length) |
646 | - self.compute_api.rescue(context, id, rescue_password=password) |
647 | + self.compute_api.rescue(context, uuid, rescue_password=password) |
648 | except Exception: |
649 | readable = traceback.format_exc() |
650 | LOG.exception(_("compute.api::rescue %s"), readable) |
651 | @@ -540,6 +565,7 @@ |
652 | def unrescue(self, req, id): |
653 | """Permit users to unrescue the server.""" |
654 | context = req.environ["nova.context"] |
655 | + uuid = self._convert_id(context, id) |
656 | try: |
657 | self.compute_api.unrescue(context, id) |
658 | except Exception: |
659 | @@ -552,9 +578,10 @@ |
660 | @scheduler_api.redirect_handler |
661 | def get_ajax_console(self, req, id): |
662 | """Returns a url to an instance's ajaxterm console.""" |
663 | + context = req.environ['nova.context'] |
664 | + uuid = self._convert_id(context, id) |
665 | try: |
666 | - self.compute_api.get_ajax_console(req.environ['nova.context'], |
667 | - int(id)) |
668 | + self.compute_api.get_ajax_console(context, uuid) |
669 | except exception.NotFound: |
670 | raise exc.HTTPNotFound() |
671 | return webob.Response(status_int=202) |
672 | @@ -563,9 +590,10 @@ |
673 | @scheduler_api.redirect_handler |
674 | def get_vnc_console(self, req, id): |
675 | """Returns a url to an instance's ajaxterm console.""" |
676 | + context = req.environ['nova.context'] |
677 | + uuid = self._convert_id(context, id) |
678 | try: |
679 | - self.compute_api.get_vnc_console(req.environ['nova.context'], |
680 | - int(id)) |
681 | + self.compute_api.get_vnc_console(context, uuid) |
682 | except exception.NotFound: |
683 | raise exc.HTTPNotFound() |
684 | return webob.Response(status_int=202) |
685 | @@ -575,12 +603,14 @@ |
686 | def diagnostics(self, req, id): |
687 | """Permit Admins to retrieve server diagnostics.""" |
688 | ctxt = req.environ["nova.context"] |
689 | - return self.compute_api.get_diagnostics(ctxt, id) |
690 | + uuid = self._convert_id(ctxt, id) |
691 | + return self.compute_api.get_diagnostics(ctxt, uuid) |
692 | |
693 | def actions(self, req, id): |
694 | """Permit Admins to retrieve server actions.""" |
695 | ctxt = req.environ["nova.context"] |
696 | - items = self.compute_api.get_actions(ctxt, id) |
697 | + uuid = self._convert_id(ctxt, id) |
698 | + items = self.compute_api.get_actions(ctxt, uuid) |
699 | actions = [] |
700 | # TODO(jk0): Do not do pre-serialization here once the default |
701 | # serializer is updated |
702 | @@ -591,12 +621,12 @@ |
703 | error=item.error)) |
704 | return dict(actions=actions) |
705 | |
706 | - def resize(self, req, instance_id, flavor_id): |
707 | + def resize(self, req, instance_uuid, flavor_id): |
708 | """Begin the resize process with given instance/flavor.""" |
709 | context = req.environ["nova.context"] |
710 | |
711 | try: |
712 | - self.compute_api.resize(context, instance_id, flavor_id) |
713 | + self.compute_api.resize(context, instance_uuid, flavor_id) |
714 | except exception.FlavorNotFound: |
715 | msg = _("Unable to locate requested flavor.") |
716 | raise exc.HTTPBadRequest(explanation=msg) |
717 | @@ -613,12 +643,20 @@ |
718 | class ControllerV10(Controller): |
719 | """v1.0 OpenStack API controller""" |
720 | |
721 | + def _convert_id(self, context, instance_id): |
722 | + try: |
723 | + return self.compute_api.get_instance_uuid(context, instance_id) |
724 | + except exception.NotFound: |
725 | + raise exc.HTTPNotFound() |
726 | + |
727 | @novaclient_exception_converter |
728 | @scheduler_api.redirect_handler |
729 | def delete(self, req, id): |
730 | """ Destroys a server """ |
731 | + context = req.environ['nova.context'] |
732 | + uuid = self._convert_id(context, id) |
733 | try: |
734 | - self._delete(req.environ['nova.context'], id) |
735 | + self._delete(req.environ['nova.context'], uuid) |
736 | except exception.NotFound: |
737 | raise exc.HTTPNotFound() |
738 | return webob.Response(status_int=202) |
739 | @@ -649,25 +687,27 @@ |
740 | def _limit_items(self, items, req): |
741 | return common.limited(items, req) |
742 | |
743 | - def _update(self, context, req, id, inst_dict): |
744 | + def _update(self, context, req, uuid, inst_dict): |
745 | if 'adminPass' in inst_dict['server']: |
746 | - self.compute_api.set_admin_password(context, id, |
747 | - inst_dict['server']['adminPass']) |
748 | + admin_pass = inst_dict['server']['adminPass'] |
749 | + self.compute_api.set_admin_password(context, uuid, admin_pass) |
750 | return exc.HTTPNoContent() |
751 | |
752 | - def _action_resize(self, input_dict, req, id): |
753 | + def _action_resize(self, input_dict, req, instance_id): |
754 | """ Resizes a given instance to the flavor size requested """ |
755 | + context = req.environ['nova.context'] |
756 | + uuid = self._convert_id(context, instance_id) |
757 | try: |
758 | flavor_id = input_dict["resize"]["flavorId"] |
759 | except (KeyError, TypeError): |
760 | msg = _("Resize requests require 'flavorId' attribute.") |
761 | raise exc.HTTPBadRequest(explanation=msg) |
762 | |
763 | - return self.resize(req, id, flavor_id) |
764 | + return self.resize(req, uuid, flavor_id) |
765 | |
766 | def _action_rebuild(self, info, request, instance_id): |
767 | context = request.environ['nova.context'] |
768 | - |
769 | + uuid = self._convert_id(context, instance_id) |
770 | try: |
771 | image_id = info["rebuild"]["imageId"] |
772 | except (KeyError, TypeError): |
773 | @@ -678,9 +718,9 @@ |
774 | password = utils.generate_password(FLAGS.password_length) |
775 | |
776 | try: |
777 | - self.compute_api.rebuild(context, instance_id, image_id, password) |
778 | + self.compute_api.rebuild(context, uuid, image_id, password) |
779 | except exception.RebuildRequiresActiveInstance: |
780 | - msg = _("Instance %s must be active to rebuild.") % instance_id |
781 | + msg = _("Instance %s must be active to rebuild.") % uuid |
782 | raise exc.HTTPConflict(explanation=msg) |
783 | |
784 | return webob.Response(status_int=202) |
785 | @@ -806,11 +846,11 @@ |
786 | LOG.info(msg) |
787 | raise exc.HTTPBadRequest(explanation=msg) |
788 | |
789 | - def _update(self, context, req, id, inst_dict): |
790 | - instance = self.compute_api.routing_get(context, id) |
791 | + def _update(self, context, req, uuid, inst_dict): |
792 | + instance = self.compute_api.routing_get(context, uuid) |
793 | return self._build_view(req, instance, is_detail=True) |
794 | |
795 | - def _action_resize(self, input_dict, req, id): |
796 | + def _action_resize(self, input_dict, req, instance_uuid): |
797 | """ Resizes a given instance to the flavor size requested """ |
798 | try: |
799 | flavor_ref = input_dict["resize"]["flavorRef"] |
800 | @@ -821,9 +861,9 @@ |
801 | msg = _("Resize requests require 'flavorRef' attribute.") |
802 | raise exc.HTTPBadRequest(explanation=msg) |
803 | |
804 | - return self.resize(req, id, flavor_ref) |
805 | + return self.resize(req, instance_uuid, flavor_ref) |
806 | |
807 | - def _action_rebuild(self, info, request, instance_id): |
808 | + def _action_rebuild(self, info, request, instance_uuid): |
809 | context = request.environ['nova.context'] |
810 | |
811 | try: |
812 | @@ -847,24 +887,24 @@ |
813 | password = utils.generate_password(FLAGS.password_length) |
814 | |
815 | try: |
816 | - self.compute_api.rebuild(context, instance_id, image_href, |
817 | + self.compute_api.rebuild(context, instance_uuid, image_href, |
818 | password, name=name, metadata=metadata, |
819 | files_to_inject=personalities) |
820 | except exception.RebuildRequiresActiveInstance: |
821 | - msg = _("Instance %s must be active to rebuild.") % instance_id |
822 | + msg = _("Instance %s must be active to rebuild.") % instance_uuid |
823 | raise exc.HTTPConflict(explanation=msg) |
824 | except exception.InstanceNotFound: |
825 | - msg = _("Instance %s could not be found") % instance_id |
826 | + msg = _("Instance %s could not be found") % instance_uuid |
827 | raise exc.HTTPNotFound(explanation=msg) |
828 | |
829 | - instance = self.compute_api.routing_get(context, instance_id) |
830 | + instance = self.compute_api.routing_get(context, instance_uuid) |
831 | view = self._build_view(request, instance, is_detail=True) |
832 | view['server']['adminPass'] = password |
833 | |
834 | return view |
835 | |
836 | @common.check_snapshots_enabled |
837 | - def _action_create_image(self, input_dict, req, instance_id): |
838 | + def _action_create_image(self, input_dict, req, instance_uuid): |
839 | """Snapshot a server instance.""" |
840 | entity = input_dict.get("createImage", {}) |
841 | |
842 | @@ -882,7 +922,7 @@ |
843 | # preserve link to server in image properties |
844 | server_ref = os.path.join(req.application_url, |
845 | 'servers', |
846 | - str(instance_id)) |
847 | + instance_uuid) |
848 | props = {'instance_ref': server_ref} |
849 | |
850 | metadata = entity.get('metadata', {}) |
851 | @@ -895,7 +935,7 @@ |
852 | raise webob.exc.HTTPBadRequest(explanation=msg) |
853 | |
854 | image = self.compute_api.snapshot(context, |
855 | - instance_id, |
856 | + instance_uuid, |
857 | image_name, |
858 | extra_properties=props) |
859 | |
860 | @@ -977,8 +1017,8 @@ |
861 | |
862 | server_elem.set('name', server_dict['name']) |
863 | server_elem.set('id', str(server_dict['id'])) |
864 | + |
865 | if detailed: |
866 | - server_elem.set('uuid', str(server_dict['uuid'])) |
867 | server_elem.set('userId', str(server_dict['user_id'])) |
868 | server_elem.set('tenantId', str(server_dict['tenant_id'])) |
869 | server_elem.set('updated', str(server_dict['updated'])) |
870 | |
871 | === modified file 'nova/api/openstack/views/servers.py' |
872 | --- nova/api/openstack/views/servers.py 2011-09-22 15:41:34 +0000 |
873 | +++ nova/api/openstack/views/servers.py 2011-09-22 19:29:25 +0000 |
874 | @@ -66,7 +66,12 @@ |
875 | |
876 | def _build_simple(self, inst): |
877 | """Return a simple model of a server.""" |
878 | - return dict(server=dict(id=inst['id'], name=inst['display_name'])) |
879 | + return { |
880 | + 'server': { |
881 | + 'id': self._get_instance_id(inst), |
882 | + 'name': inst['display_name'], |
883 | + }, |
884 | + } |
885 | |
886 | def _build_detail(self, inst): |
887 | """Returns a detailed model of a server.""" |
888 | @@ -74,7 +79,7 @@ |
889 | task_state = inst.get('task_state') |
890 | |
891 | inst_dict = { |
892 | - 'id': inst['id'], |
893 | + 'id': self._get_instance_id(inst), |
894 | 'name': inst['display_name'], |
895 | 'user_id': inst.get('user_id', ''), |
896 | 'tenant_id': inst.get('project_id', ''), |
897 | @@ -111,13 +116,13 @@ |
898 | def _build_extra(self, response, inst): |
899 | pass |
900 | |
901 | + def _get_instance_id(self, inst): |
902 | + raise NotImplementedError() |
903 | + |
904 | |
905 | class ViewBuilderV10(ViewBuilder): |
906 | """Model an Openstack API V1.0 server response.""" |
907 | |
908 | - def _build_extra(self, response, inst): |
909 | - response['uuid'] = inst['uuid'] |
910 | - |
911 | def _build_image(self, response, inst): |
912 | if inst.get('image_ref', None): |
913 | image_ref = inst['image_ref'] |
914 | @@ -132,6 +137,9 @@ |
915 | def _build_addresses(self, response, inst): |
916 | response['addresses'] = self.addresses_builder.build(inst) |
917 | |
918 | + def _get_instance_id(self, inst): |
919 | + return inst.get('id') |
920 | + |
921 | |
922 | class ViewBuilderV11(ViewBuilder): |
923 | """Model an Openstack API V1.0 server response.""" |
924 | @@ -196,11 +204,10 @@ |
925 | |
926 | def _build_extra(self, response, inst): |
927 | self._build_links(response, inst) |
928 | - response['uuid'] = inst['uuid'] |
929 | |
930 | def _build_links(self, response, inst): |
931 | - href = self.generate_href(inst["id"]) |
932 | - bookmark = self.generate_bookmark(inst["id"]) |
933 | + href = self.generate_href(inst["uuid"]) |
934 | + bookmark = self.generate_bookmark(inst["uuid"]) |
935 | |
936 | links = [ |
937 | { |
938 | @@ -249,3 +256,6 @@ |
939 | """Create an url that refers to a specific flavor id.""" |
940 | return os.path.join(common.remove_version_from_href(self.base_url), |
941 | self.project_id, "servers", str(server_id)) |
942 | + |
943 | + def _get_instance_id(self, inst): |
944 | + return inst.get('uuid') |
945 | |
946 | === modified file 'nova/compute/api.py' |
947 | --- nova/compute/api.py 2011-09-21 21:00:53 +0000 |
948 | +++ nova/compute/api.py 2011-09-22 19:29:25 +0000 |
949 | @@ -74,7 +74,7 @@ |
950 | return display_name.translate(table, deletions) |
951 | |
952 | |
953 | -def _is_able_to_shutdown(instance, instance_id): |
954 | +def _is_able_to_shutdown(instance, instance_uuid): |
955 | vm_state = instance["vm_state"] |
956 | task_state = instance["task_state"] |
957 | |
958 | @@ -85,19 +85,20 @@ |
959 | ] |
960 | |
961 | if vm_state not in valid_shutdown_states: |
962 | - LOG.warn(_("Instance %(instance_id)s is not in an 'active' state. It " |
963 | - "is currently %(vm_state)s. Shutdown aborted.") % locals()) |
964 | + LOG.warn(_("Instance %(instance_uuid)s is not in an 'active' " |
965 | + "state. It is currently %(vm_state)s. Shutdown " |
966 | + "aborted.") % locals()) |
967 | return False |
968 | |
969 | return True |
970 | |
971 | |
972 | -def _is_queued_delete(instance, instance_id): |
973 | +def _is_queued_delete(instance, instance_uuid): |
974 | vm_state = instance["vm_state"] |
975 | task_state = instance["task_state"] |
976 | |
977 | if vm_state != vm_states.SOFT_DELETE: |
978 | - LOG.warn(_("Instance %(instance_id)s is not in a 'soft delete' " |
979 | + LOG.warn(_("Instance %(instance_uuid)s is not in a 'soft delete' " |
980 | "state. It is currently %(vm_state)s. Action aborted.") % |
981 | locals()) |
982 | return False |
983 | @@ -320,7 +321,7 @@ |
984 | return size |
985 | |
986 | def _update_image_block_device_mapping(self, elevated_context, |
987 | - instance_type, instance_id, |
988 | + instance_type, instance_uuid, |
989 | mappings): |
990 | """tell vm driver to create ephemeral/swap device at boot time by |
991 | updating BlockDeviceMapping |
992 | @@ -343,7 +344,7 @@ |
993 | continue |
994 | |
995 | values = { |
996 | - 'instance_id': instance_id, |
997 | + 'instance_uuid': instance_uuid, |
998 | 'device_name': bdm['device'], |
999 | 'virtual_name': virtual_name, |
1000 | 'volume_size': size} |
1001 | @@ -351,7 +352,7 @@ |
1002 | values) |
1003 | |
1004 | def _update_block_device_mapping(self, elevated_context, |
1005 | - instance_type, instance_id, |
1006 | + instance_type, instance_uuid, |
1007 | block_device_mapping): |
1008 | """tell vm driver to attach volume at boot time by updating |
1009 | BlockDeviceMapping |
1010 | @@ -360,7 +361,7 @@ |
1011 | for bdm in block_device_mapping: |
1012 | assert 'device_name' in bdm |
1013 | |
1014 | - values = {'instance_id': instance_id} |
1015 | + values = {'instance_uuid': instance_uuid} |
1016 | for key in ('device_name', 'delete_on_termination', 'virtual_name', |
1017 | 'snapshot_id', 'volume_id', 'volume_size', |
1018 | 'no_device'): |
1019 | @@ -413,47 +414,55 @@ |
1020 | |
1021 | instance = dict(launch_index=num, **base_options) |
1022 | instance = self.db.instance_create(context, instance) |
1023 | - instance_id = instance['id'] |
1024 | + instance_uuid = instance['uuid'] |
1025 | |
1026 | for security_group_id in security_groups: |
1027 | self.db.instance_add_security_group(elevated, |
1028 | - instance_id, |
1029 | + instance_uuid, |
1030 | security_group_id) |
1031 | |
1032 | - # BlockDeviceMapping table |
1033 | - self._update_image_block_device_mapping(elevated, instance_type, |
1034 | - instance_id, image['properties'].get('mappings', [])) |
1035 | - self._update_block_device_mapping(elevated, instance_type, instance_id, |
1036 | - image['properties'].get('block_device_mapping', [])) |
1037 | - # override via command line option |
1038 | - self._update_block_device_mapping(elevated, instance_type, instance_id, |
1039 | + mappings = image['properties'].get('mappings', []) |
1040 | + self._update_image_block_device_mapping(elevated, |
1041 | + instance_type, |
1042 | + instance_uuid, |
1043 | + mappings) |
1044 | + |
1045 | + device_mapping = image['properties'].get('block_device_mapping', []) |
1046 | + self._update_block_device_mapping(elevated, |
1047 | + instance_type, |
1048 | + instance_uuid, |
1049 | + device_mapping) |
1050 | + |
1051 | + self._update_block_device_mapping(elevated, |
1052 | + instance_type, |
1053 | + instance_uuid, |
1054 | block_device_mapping) |
1055 | |
1056 | # Set sane defaults if not specified |
1057 | updates = {} |
1058 | if (not hasattr(instance, 'display_name') or |
1059 | instance.display_name is None): |
1060 | - updates['display_name'] = "Server %s" % instance_id |
1061 | + updates['display_name'] = "Server %s" % instance_uuid |
1062 | instance['display_name'] = updates['display_name'] |
1063 | updates['hostname'] = self.hostname_factory(instance) |
1064 | updates['vm_state'] = vm_states.BUILDING |
1065 | updates['task_state'] = task_states.SCHEDULING |
1066 | |
1067 | - instance = self.update(context, instance_id, **updates) |
1068 | + instance = self.update(context, instance_uuid, **updates) |
1069 | return instance |
1070 | |
1071 | def _ask_scheduler_to_create_instance(self, context, base_options, |
1072 | instance_type, zone_blob, |
1073 | availability_zone, injected_files, |
1074 | admin_password, image, |
1075 | - instance_id=None, num_instances=1, |
1076 | + instance_uuid=None, num_instances=1, |
1077 | requested_networks=None): |
1078 | """Send the run_instance request to the schedulers for processing.""" |
1079 | pid = context.project_id |
1080 | uid = context.user_id |
1081 | - if instance_id: |
1082 | + if instance_uuid: |
1083 | LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" |
1084 | - " instance %(instance_id)s (single-shot)") % locals()) |
1085 | + " instance %(instance_uuid)s (single-shot)") % locals()) |
1086 | else: |
1087 | LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" |
1088 | " (all-at-once)") % locals()) |
1089 | @@ -471,7 +480,7 @@ |
1090 | FLAGS.scheduler_topic, |
1091 | {"method": "run_instance", |
1092 | "args": {"topic": FLAGS.compute_topic, |
1093 | - "instance_id": instance_id, |
1094 | + "instance_uuid": instance_uuid, |
1095 | "request_spec": request_spec, |
1096 | "availability_zone": availability_zone, |
1097 | "admin_password": admin_password, |
1098 | @@ -560,13 +569,13 @@ |
1099 | base_options, security_group, |
1100 | block_device_mapping, num=num) |
1101 | instances.append(instance) |
1102 | - instance_id = instance['id'] |
1103 | + instance_uuid = instance['uuid'] |
1104 | |
1105 | self._ask_scheduler_to_create_instance(context, base_options, |
1106 | instance_type, zone_blob, |
1107 | availability_zone, injected_files, |
1108 | admin_password, image, |
1109 | - instance_id=instance_id, |
1110 | + instance_uuid=instance_uuid, |
1111 | requested_networks=requested_networks) |
1112 | |
1113 | return [dict(x.iteritems()) for x in instances] |
1114 | @@ -670,7 +679,7 @@ |
1115 | {'method': 'refresh_provider_fw_rules', 'args': {}}) |
1116 | |
1117 | def _is_security_group_associated_with_server(self, security_group, |
1118 | - instance_id): |
1119 | + instance_uuid): |
1120 | """Check if the security group is already associated |
1121 | with the instance. If Yes, return True. |
1122 | """ |
1123 | @@ -683,58 +692,59 @@ |
1124 | return False |
1125 | |
1126 | inst_id = None |
1127 | - for inst_id in (instance['id'] for instance in instances \ |
1128 | - if instance_id == instance['id']): |
1129 | + for inst_uuid in (instance['id'] for instance in instances \ |
1130 | + if instance_uuid == instance['id']): |
1131 | return True |
1132 | |
1133 | return False |
1134 | |
1135 | - def add_security_group(self, context, instance_id, security_group_name): |
1136 | + def add_security_group(self, context, instance_uuid, security_group_name): |
1137 | """Add security group to the instance""" |
1138 | security_group = self.db.security_group_get_by_name(context, |
1139 | context.project_id, |
1140 | security_group_name) |
1141 | # check if the server exists |
1142 | - inst = self.db.instance_get(context, instance_id) |
1143 | + inst = self.db.instance_get(context, instance_uuid) |
1144 | #check if the security group is associated with the server |
1145 | if self._is_security_group_associated_with_server(security_group, |
1146 | - instance_id): |
1147 | + instance_uuid): |
1148 | raise exception.SecurityGroupExistsForInstance( |
1149 | security_group_id=security_group['id'], |
1150 | - instance_id=instance_id) |
1151 | + instance_uuid=instance_uuid) |
1152 | |
1153 | #check if the instance is in running state |
1154 | if inst['state'] != power_state.RUNNING: |
1155 | - raise exception.InstanceNotRunning(instance_id=instance_id) |
1156 | + raise exception.InstanceNotRunning(instance_uuid=instance_uuid) |
1157 | |
1158 | self.db.instance_add_security_group(context.elevated(), |
1159 | - instance_id, |
1160 | + instance_uuid, |
1161 | security_group['id']) |
1162 | rpc.cast(context, |
1163 | self.db.queue_get_for(context, FLAGS.compute_topic, inst['host']), |
1164 | {"method": "refresh_security_group_rules", |
1165 | "args": {"security_group_id": security_group['id']}}) |
1166 | |
1167 | - def remove_security_group(self, context, instance_id, security_group_name): |
1168 | + def remove_security_group(self, context, instance_uuid, |
1169 | + security_group_name): |
1170 | """Remove the security group associated with the instance""" |
1171 | security_group = self.db.security_group_get_by_name(context, |
1172 | context.project_id, |
1173 | security_group_name) |
1174 | # check if the server exists |
1175 | - inst = self.db.instance_get(context, instance_id) |
1176 | + inst = self.db.instance_get(context, instance_uuid) |
1177 | #check if the security group is associated with the server |
1178 | if not self._is_security_group_associated_with_server(security_group, |
1179 | - instance_id): |
1180 | + instance_uuid): |
1181 | raise exception.SecurityGroupNotExistsForInstance( |
1182 | security_group_id=security_group['id'], |
1183 | - instance_id=instance_id) |
1184 | + instance_uuid=instance_uuid) |
1185 | |
1186 | #check if the instance is in running state |
1187 | if inst['state'] != power_state.RUNNING: |
1188 | - raise exception.InstanceNotRunning(instance_id=instance_id) |
1189 | + raise exception.InstanceNotRunning(instance_uuid=instance_uuid) |
1190 | |
1191 | self.db.instance_remove_security_group(context.elevated(), |
1192 | - instance_id, |
1193 | + instance_uuid, |
1194 | security_group['id']) |
1195 | rpc.cast(context, |
1196 | self.db.queue_get_for(context, FLAGS.compute_topic, inst['host']), |
1197 | @@ -742,36 +752,35 @@ |
1198 | "args": {"security_group_id": security_group['id']}}) |
1199 | |
1200 | @scheduler_api.reroute_compute("update") |
1201 | - def update(self, context, instance_id, **kwargs): |
1202 | + def update(self, context, instance_uuid, **kwargs): |
1203 | """Updates the instance in the datastore. |
1204 | |
1205 | :param context: The security context |
1206 | - :param instance_id: ID of the instance to update |
1207 | + :param instance_uuid: UUID of the instance to update |
1208 | :param kwargs: All additional keyword args are treated |
1209 | as data fields of the instance to be |
1210 | updated |
1211 | |
1212 | :returns: None |
1213 | """ |
1214 | - rv = self.db.instance_update(context, instance_id, kwargs) |
1215 | + rv = self.db.instance_update(context, instance_uuid, kwargs) |
1216 | return dict(rv.iteritems()) |
1217 | |
1218 | - def _get_instance(self, context, instance_id, action_str): |
1219 | + def _get_instance(self, context, instance_uuid, action_str): |
1220 | try: |
1221 | - return self.get(context, instance_id) |
1222 | + return self.get(context, instance_uuid) |
1223 | except exception.NotFound: |
1224 | - LOG.warning(_("Instance %(instance_id)s was not found during " |
1225 | - "%(action_str)s") % |
1226 | - {'instance_id': instance_id, 'action_str': action_str}) |
1227 | + LOG.warning(_("Instance %(instance_uuid)s was not found during " |
1228 | + "%(action_str)s") % locals()) |
1229 | raise |
1230 | |
1231 | @scheduler_api.reroute_compute("soft_delete") |
1232 | - def soft_delete(self, context, instance_id): |
1233 | + def soft_delete(self, context, instance_uuid): |
1234 | """Terminate an instance.""" |
1235 | - LOG.debug(_("Going to try to soft delete %s"), instance_id) |
1236 | - instance = self._get_instance(context, instance_id, 'soft delete') |
1237 | + LOG.debug(_("Going to try to soft delete %s"), instance_uuid) |
1238 | + instance = self._get_instance(context, instance_uuid, 'soft delete') |
1239 | |
1240 | - if not _is_able_to_shutdown(instance, instance_id): |
1241 | + if not _is_able_to_shutdown(instance, instance_uuid): |
1242 | return |
1243 | |
1244 | # NOTE(jerdfelt): The compute daemon handles reclaiming instances |
1245 | @@ -780,50 +789,50 @@ |
1246 | host = instance['host'] |
1247 | if host: |
1248 | self.update(context, |
1249 | - instance_id, |
1250 | + instance_uuid, |
1251 | vm_state=vm_states.SOFT_DELETE, |
1252 | task_state=task_states.POWERING_OFF, |
1253 | deleted_at=utils.utcnow()) |
1254 | |
1255 | self._cast_compute_message('power_off_instance', context, |
1256 | - instance_id, host) |
1257 | + instance_uuid, host) |
1258 | else: |
1259 | LOG.warning(_("No host for instance %s, deleting immediately"), |
1260 | - instance_id) |
1261 | - terminate_volumes(self.db, context, instance_id) |
1262 | - self.db.instance_destroy(context, instance_id) |
1263 | + instance_uuid) |
1264 | + terminate_volumes(self.db, context, instance_uuid) |
1265 | + self.db.instance_destroy(context, instance_uuid) |
1266 | |
1267 | @scheduler_api.reroute_compute("delete") |
1268 | - def delete(self, context, instance_id): |
1269 | + def delete(self, context, instance_uuid): |
1270 | """Terminate an instance.""" |
1271 | - LOG.debug(_("Going to try to terminate %s"), instance_id) |
1272 | - instance = self._get_instance(context, instance_id, 'delete') |
1273 | + LOG.debug(_("Going to try to terminate %s"), instance_uuid) |
1274 | + instance = self._get_instance(context, instance_uuid, 'delete') |
1275 | |
1276 | - if not _is_able_to_shutdown(instance, instance_id): |
1277 | + if not _is_able_to_shutdown(instance, instance_uuid): |
1278 | return |
1279 | |
1280 | host = instance['host'] |
1281 | if host: |
1282 | self.update(context, |
1283 | - instance_id, |
1284 | + instance_uuid, |
1285 | task_state=task_states.DELETING) |
1286 | |
1287 | self._cast_compute_message('terminate_instance', context, |
1288 | - instance_id, host) |
1289 | + instance_uuid, host) |
1290 | else: |
1291 | - terminate_volumes(self.db, context, instance_id) |
1292 | - self.db.instance_destroy(context, instance_id) |
1293 | + terminate_volumes(self.db, context, instance_uuid) |
1294 | + self.db.instance_destroy(context, instance_uuid) |
1295 | |
1296 | @scheduler_api.reroute_compute("restore") |
1297 | - def restore(self, context, instance_id): |
1298 | + def restore(self, context, instance_uuid): |
1299 | """Restore a previously deleted (but not reclaimed) instance.""" |
1300 | - instance = self._get_instance(context, instance_id, 'restore') |
1301 | + instance = self._get_instance(context, instance_uuid, 'restore') |
1302 | |
1303 | - if not _is_queued_delete(instance, instance_id): |
1304 | + if not _is_queued_delete(instance, instance_uuid): |
1305 | return |
1306 | |
1307 | self.update(context, |
1308 | - instance_id, |
1309 | + instance_uuid, |
1310 | vm_state=vm_states.ACTIVE, |
1311 | task_state=None, |
1312 | deleted_at=None) |
1313 | @@ -831,43 +840,43 @@ |
1314 | host = instance['host'] |
1315 | if host: |
1316 | self.update(context, |
1317 | - instance_id, |
1318 | + instance_uuid, |
1319 | task_state=task_states.POWERING_ON) |
1320 | self._cast_compute_message('power_on_instance', context, |
1321 | - instance_id, host) |
1322 | + instance_uuid, host) |
1323 | |
1324 | @scheduler_api.reroute_compute("force_delete") |
1325 | - def force_delete(self, context, instance_id): |
1326 | + def force_delete(self, context, instance_uuid): |
1327 | """Force delete a previously deleted (but not reclaimed) instance.""" |
1328 | - instance = self._get_instance(context, instance_id, 'force delete') |
1329 | + instance = self._get_instance(context, instance_uuid, 'force delete') |
1330 | |
1331 | - if not _is_queued_delete(instance, instance_id): |
1332 | + if not _is_queued_delete(instance, instance_uuid): |
1333 | return |
1334 | |
1335 | self.update(context, |
1336 | - instance_id, |
1337 | + instance_uuid, |
1338 | task_state=task_states.DELETING, |
1339 | progress=0) |
1340 | |
1341 | host = instance['host'] |
1342 | if host: |
1343 | self._cast_compute_message('terminate_instance', context, |
1344 | - instance_id, host) |
1345 | + instance_uuid, host) |
1346 | else: |
1347 | - terminate_volumes(self.db, context, instance_id) |
1348 | - self.db.instance_destroy(context, instance_id) |
1349 | + terminate_volumes(self.db, context, instance_uuid) |
1350 | + self.db.instance_destroy(context, instance_uuid) |
1351 | |
1352 | @scheduler_api.reroute_compute("stop") |
1353 | - def stop(self, context, instance_id): |
1354 | + def stop(self, context, instance_uuid): |
1355 | """Stop an instance.""" |
1356 | - LOG.debug(_("Going to try to stop %s"), instance_id) |
1357 | + LOG.debug(_("Going to try to stop %s"), instance_uuid) |
1358 | |
1359 | - instance = self._get_instance(context, instance_id, 'stopping') |
1360 | - if not _is_able_to_shutdown(instance, instance_id): |
1361 | + instance = self._get_instance(context, instance_uuid, 'stopping') |
1362 | + if not _is_able_to_shutdown(instance, instance_uuid): |
1363 | return |
1364 | |
1365 | self.update(context, |
1366 | - instance_id, |
1367 | + instance_uuid, |
1368 | vm_state=vm_states.ACTIVE, |
1369 | task_state=task_states.STOPPING, |
1370 | terminated_at=utils.utcnow(), |
1371 | @@ -876,21 +885,21 @@ |
1372 | host = instance['host'] |
1373 | if host: |
1374 | self._cast_compute_message('stop_instance', context, |
1375 | - instance_id, host) |
1376 | + instance_uuid, host) |
1377 | |
1378 | - def start(self, context, instance_id): |
1379 | + def start(self, context, instance_uuid): |
1380 | """Start an instance.""" |
1381 | - LOG.debug(_("Going to try to start %s"), instance_id) |
1382 | - instance = self._get_instance(context, instance_id, 'starting') |
1383 | + LOG.debug(_("Going to try to start %s"), instance_uuid) |
1384 | + instance = self._get_instance(context, instance_uuid, 'starting') |
1385 | vm_state = instance["vm_state"] |
1386 | |
1387 | if vm_state != vm_states.STOPPED: |
1388 | - LOG.warning(_("Instance %(instance_id)s is not " |
1389 | + LOG.warning(_("Instance %(instance_uuid)s is not " |
1390 | "stopped. (%(vm_state)s)") % locals()) |
1391 | return |
1392 | |
1393 | self.update(context, |
1394 | - instance_id, |
1395 | + instance_uuid, |
1396 | vm_state=vm_states.STOPPED, |
1397 | task_state=task_states.STARTING) |
1398 | |
1399 | @@ -901,7 +910,7 @@ |
1400 | FLAGS.scheduler_topic, |
1401 | {"method": "start_instance", |
1402 | "args": {"topic": FLAGS.compute_topic, |
1403 | - "instance_id": instance_id}}) |
1404 | + "instance_uuid": instance_uuid}}) |
1405 | |
1406 | def get_active_by_window(self, context, begin, end=None, project_id=None): |
1407 | """Get instances that were continuously active over a window.""" |
1408 | @@ -912,27 +921,20 @@ |
1409 | """Get an instance type by instance type id.""" |
1410 | return self.db.instance_type_get(context, instance_type_id) |
1411 | |
1412 | - def get(self, context, instance_id): |
1413 | - """Get a single instance with the given instance_id.""" |
1414 | - # NOTE(sirp): id used to be exclusively integer IDs; now we're |
1415 | - # accepting both UUIDs and integer IDs. The handling of this |
1416 | - # is done in db/sqlalchemy/api/instance_get |
1417 | - if utils.is_uuid_like(instance_id): |
1418 | - uuid = instance_id |
1419 | - instance = self.db.instance_get_by_uuid(context, uuid) |
1420 | - else: |
1421 | - instance = self.db.instance_get(context, instance_id) |
1422 | + def get(self, context, instance_uuid): |
1423 | + """Get a single instance with the given instance_uuid.""" |
1424 | + instance = self.db.instance_get(context, instance_uuid) |
1425 | return dict(instance.iteritems()) |
1426 | |
1427 | @scheduler_api.reroute_compute("get") |
1428 | - def routing_get(self, context, instance_id): |
1429 | + def routing_get(self, context, instance_uuid): |
1430 | """A version of get with special routing characteristics. |
1431 | |
1432 | Use this method instead of get() if this is the only operation you |
1433 | intend to to. It will route to novaclient.get if the instance is not |
1434 | found. |
1435 | """ |
1436 | - return self.get(context, instance_id) |
1437 | + return self.get(context, instance_uuid) |
1438 | |
1439 | def get_all(self, context, search_opts=None): |
1440 | """Get all instances filtered by one of the given parameters. |
1441 | @@ -1025,7 +1027,7 @@ |
1442 | |
1443 | return self.db.instance_get_all_by_filters(context, filters) |
1444 | |
1445 | - def _cast_compute_message(self, method, context, instance_id, host=None, |
1446 | + def _cast_compute_message(self, method, context, instance_uuid, host=None, |
1447 | params=None): |
1448 | """Generic handler for RPC casts to compute. |
1449 | |
1450 | @@ -1037,14 +1039,14 @@ |
1451 | if not params: |
1452 | params = {} |
1453 | if not host: |
1454 | - instance = self.get(context, instance_id) |
1455 | + instance = self.get(context, instance_uuid) |
1456 | host = instance['host'] |
1457 | queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) |
1458 | - params['instance_id'] = instance_id |
1459 | + params['instance_uuid'] = instance_uuid |
1460 | kwargs = {'method': method, 'args': params} |
1461 | rpc.cast(context, queue, kwargs) |
1462 | |
1463 | - def _call_compute_message(self, method, context, instance_id, host=None, |
1464 | + def _call_compute_message(self, method, context, instance_uuid, host=None, |
1465 | params=None): |
1466 | """Generic handler for RPC calls to compute. |
1467 | |
1468 | @@ -1056,10 +1058,10 @@ |
1469 | if not params: |
1470 | params = {} |
1471 | if not host: |
1472 | - instance = self.get(context, instance_id) |
1473 | + instance = self.get(context, instance_uuid) |
1474 | host = instance['host'] |
1475 | queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) |
1476 | - params['instance_id'] = instance_id |
1477 | + params['instance_uuid'] = instance_uuid |
1478 | kwargs = {'method': method, 'args': params} |
1479 | return rpc.call(context, queue, kwargs) |
1480 | |
1481 | @@ -1067,53 +1069,53 @@ |
1482 | """Generic handler for RPC calls to the scheduler.""" |
1483 | rpc.cast(context, FLAGS.scheduler_topic, args) |
1484 | |
1485 | - def _find_host(self, context, instance_id): |
1486 | + def _find_host(self, context, instance_uuid): |
1487 | """Find the host associated with an instance.""" |
1488 | for attempts in xrange(FLAGS.find_host_timeout): |
1489 | - instance = self.get(context, instance_id) |
1490 | + instance = self.get(context, instance_uuid) |
1491 | host = instance["host"] |
1492 | if host: |
1493 | return host |
1494 | time.sleep(1) |
1495 | raise exception.Error(_("Unable to find host for Instance %s") |
1496 | - % instance_id) |
1497 | + % instance_uuid) |
1498 | |
1499 | @scheduler_api.reroute_compute("backup") |
1500 | - def backup(self, context, instance_id, name, backup_type, rotation, |
1501 | + def backup(self, context, instance_uuid, name, backup_type, rotation, |
1502 | extra_properties=None): |
1503 | """Backup the given instance |
1504 | |
1505 | - :param instance_id: nova.db.sqlalchemy.models.Instance.Id |
1506 | + :param instance_uuid: nova.db.sqlalchemy.models.Instance.Id |
1507 | :param name: name of the backup or snapshot |
1508 | name = backup_type # daily backups are called 'daily' |
1509 | :param rotation: int representing how many backups to keep around; |
1510 | None if rotation shouldn't be used (as in the case of snapshots) |
1511 | :param extra_properties: dict of extra image properties to include |
1512 | """ |
1513 | - recv_meta = self._create_image(context, instance_id, name, 'backup', |
1514 | + recv_meta = self._create_image(context, instance_uuid, name, 'backup', |
1515 | backup_type=backup_type, rotation=rotation, |
1516 | extra_properties=extra_properties) |
1517 | return recv_meta |
1518 | |
1519 | @scheduler_api.reroute_compute("snapshot") |
1520 | - def snapshot(self, context, instance_id, name, extra_properties=None): |
1521 | + def snapshot(self, context, instance_uuid, name, extra_properties=None): |
1522 | """Snapshot the given instance. |
1523 | |
1524 | - :param instance_id: nova.db.sqlalchemy.models.Instance.Id |
1525 | + :param instance_uuid: nova.db.sqlalchemy.models.Instance.Id |
1526 | :param name: name of the backup or snapshot |
1527 | :param extra_properties: dict of extra image properties to include |
1528 | |
1529 | :returns: A dict containing image metadata |
1530 | """ |
1531 | - return self._create_image(context, instance_id, name, 'snapshot', |
1532 | + return self._create_image(context, instance_uuid, name, 'snapshot', |
1533 | extra_properties=extra_properties) |
1534 | |
1535 | - def _create_image(self, context, instance_id, name, image_type, |
1536 | + def _create_image(self, context, instance_uuid, name, image_type, |
1537 | backup_type=None, rotation=None, extra_properties=None): |
1538 | """Create snapshot or backup for an instance on this host. |
1539 | |
1540 | :param context: security context |
1541 | - :param instance_id: nova.db.sqlalchemy.models.Instance.Id |
1542 | + :param instance_uuid: nova.db.sqlalchemy.models.Instance.Id |
1543 | :param name: string for name of the snapshot |
1544 | :param image_type: snapshot | backup |
1545 | :param backup_type: daily | weekly |
1546 | @@ -1122,7 +1124,7 @@ |
1547 | :param extra_properties: dict of extra image properties to include |
1548 | |
1549 | """ |
1550 | - instance = self.db.instance_get(context, instance_id) |
1551 | + instance = self.db.instance_get(context, instance_uuid) |
1552 | properties = {'instance_uuid': instance['uuid'], |
1553 | 'user_id': str(context.user_id), |
1554 | 'image_state': 'creating', |
1555 | @@ -1134,25 +1136,25 @@ |
1556 | recv_meta = self.image_service.create(context, sent_meta) |
1557 | params = {'image_id': recv_meta['id'], 'image_type': image_type, |
1558 | 'backup_type': backup_type, 'rotation': rotation} |
1559 | - self._cast_compute_message('snapshot_instance', context, instance_id, |
1560 | + self._cast_compute_message('snapshot_instance', context, instance_uuid, |
1561 | params=params) |
1562 | return recv_meta |
1563 | |
1564 | @scheduler_api.reroute_compute("reboot") |
1565 | - def reboot(self, context, instance_id, reboot_type): |
1566 | + def reboot(self, context, instance_uuid, reboot_type): |
1567 | """Reboot the given instance.""" |
1568 | self.update(context, |
1569 | - instance_id, |
1570 | + instance_uuid, |
1571 | vm_state=vm_states.ACTIVE, |
1572 | task_state=task_states.REBOOTING) |
1573 | - self._cast_compute_message('reboot_instance', context, instance_id, |
1574 | + self._cast_compute_message('reboot_instance', context, instance_uuid, |
1575 | params={'reboot_type': reboot_type}) |
1576 | |
1577 | @scheduler_api.reroute_compute("rebuild") |
1578 | - def rebuild(self, context, instance_id, image_href, admin_password, |
1579 | + def rebuild(self, context, instance_uuid, image_href, admin_password, |
1580 | name=None, metadata=None, files_to_inject=None): |
1581 | """Rebuild the given instance with the provided metadata.""" |
1582 | - instance = self.db.instance_get(context, instance_id) |
1583 | + instance = self.db.instance_get(context, instance_uuid) |
1584 | name = name or instance["display_name"] |
1585 | |
1586 | if instance["vm_state"] != vm_states.ACTIVE: |
1587 | @@ -1166,7 +1168,7 @@ |
1588 | self._check_metadata_properties_quota(context, metadata) |
1589 | |
1590 | self.update(context, |
1591 | - instance_id, |
1592 | + instance_uuid, |
1593 | metadata=metadata, |
1594 | display_name=name, |
1595 | image_ref=image_href, |
1596 | @@ -1181,23 +1183,24 @@ |
1597 | |
1598 | self._cast_compute_message('rebuild_instance', |
1599 | context, |
1600 | - instance_id, |
1601 | + instance_uuid, |
1602 | params=rebuild_params) |
1603 | |
1604 | @scheduler_api.reroute_compute("revert_resize") |
1605 | - def revert_resize(self, context, instance_id): |
1606 | + def revert_resize(self, context, instance_uuid): |
1607 | """Reverts a resize, deleting the 'new' instance in the process.""" |
1608 | context = context.elevated() |
1609 | - instance_ref = self._get_instance(context, instance_id, |
1610 | + instance_ref = self._get_instance(context, instance_uuid, |
1611 | 'revert_resize') |
1612 | migration_ref = self.db.migration_get_by_instance_and_status(context, |
1613 | instance_ref['uuid'], 'finished') |
1614 | if not migration_ref: |
1615 | - raise exception.MigrationNotFoundByStatus(instance_id=instance_id, |
1616 | - status='finished') |
1617 | + raise exception.MigrationNotFoundByStatus( |
1618 | + instance_uuid=instance_uuid, |
1619 | + status='finished') |
1620 | |
1621 | self.update(context, |
1622 | - instance_id, |
1623 | + instance_uuid, |
1624 | vm_state=vm_states.ACTIVE, |
1625 | task_state=None) |
1626 | |
1627 | @@ -1211,19 +1214,20 @@ |
1628 | {'status': 'reverted'}) |
1629 | |
1630 | @scheduler_api.reroute_compute("confirm_resize") |
1631 | - def confirm_resize(self, context, instance_id): |
1632 | + def confirm_resize(self, context, instance_uuid): |
1633 | """Confirms a migration/resize and deletes the 'old' instance.""" |
1634 | context = context.elevated() |
1635 | - instance_ref = self._get_instance(context, instance_id, |
1636 | + instance_ref = self._get_instance(context, instance_uuid, |
1637 | 'confirm_resize') |
1638 | migration_ref = self.db.migration_get_by_instance_and_status(context, |
1639 | instance_ref['uuid'], 'finished') |
1640 | if not migration_ref: |
1641 | - raise exception.MigrationNotFoundByStatus(instance_id=instance_id, |
1642 | - status='finished') |
1643 | + raise exception.MigrationNotFoundByStatus( |
1644 | + instance_uuid=instance_uuid, |
1645 | + status='finished') |
1646 | |
1647 | self.update(context, |
1648 | - instance_id, |
1649 | + instance_uuid, |
1650 | vm_state=vm_states.ACTIVE, |
1651 | task_state=None) |
1652 | |
1653 | @@ -1235,18 +1239,18 @@ |
1654 | |
1655 | self.db.migration_update(context, migration_ref['id'], |
1656 | {'status': 'confirmed'}) |
1657 | - self.db.instance_update(context, instance_id, |
1658 | + self.db.instance_update(context, instance_uuid, |
1659 | {'host': migration_ref['dest_compute'], }) |
1660 | |
1661 | @scheduler_api.reroute_compute("resize") |
1662 | - def resize(self, context, instance_id, flavor_id=None): |
1663 | + def resize(self, context, instance_uuid, flavor_id=None): |
1664 | """Resize (ie, migrate) a running instance. |
1665 | |
1666 | If flavor_id is None, the process is considered a migration, keeping |
1667 | the original flavor_id. If flavor_id is not None, the instance should |
1668 | be migrated to a new host and resized to the new flavor_id. |
1669 | """ |
1670 | - instance_ref = self._get_instance(context, instance_id, 'resize') |
1671 | + instance_ref = self._get_instance(context, instance_uuid, 'resize') |
1672 | current_instance_type = instance_ref['instance_type'] |
1673 | |
1674 | # If flavor_id is not provided, only migrate the instance. |
1675 | @@ -1273,29 +1277,29 @@ |
1676 | raise exception.CannotResizeToSameSize() |
1677 | |
1678 | self.update(context, |
1679 | - instance_id, |
1680 | + instance_uuid, |
1681 | vm_state=vm_states.RESIZING, |
1682 | task_state=task_states.RESIZE_PREP) |
1683 | |
1684 | - instance_ref = self._get_instance(context, instance_id, 'resize') |
1685 | + instance_ref = self._get_instance(context, instance_uuid, 'resize') |
1686 | self._cast_scheduler_message(context, |
1687 | {"method": "prep_resize", |
1688 | "args": {"topic": FLAGS.compute_topic, |
1689 | - "instance_id": instance_ref['uuid'], |
1690 | + "instance_uuid": instance_ref['uuid'], |
1691 | "instance_type_id": new_instance_type['id']}}) |
1692 | |
1693 | @scheduler_api.reroute_compute("add_fixed_ip") |
1694 | - def add_fixed_ip(self, context, instance_id, network_id): |
1695 | + def add_fixed_ip(self, context, instance_uuid, network_id): |
1696 | """Add fixed_ip from specified network to given instance.""" |
1697 | self._cast_compute_message('add_fixed_ip_to_instance', context, |
1698 | - instance_id, |
1699 | + instance_uuid, |
1700 | params=dict(network_id=network_id)) |
1701 | |
1702 | @scheduler_api.reroute_compute("remove_fixed_ip") |
1703 | - def remove_fixed_ip(self, context, instance_id, address): |
1704 | + def remove_fixed_ip(self, context, instance_uuid, address): |
1705 | """Remove fixed_ip from specified network to given instance.""" |
1706 | self._cast_compute_message('remove_fixed_ip_from_instance', context, |
1707 | - instance_id, params=dict(address=address)) |
1708 | + instance_uuid, params=dict(address=address)) |
1709 | |
1710 | #TODO(tr3buchet): how to run this in the correct zone? |
1711 | def add_network_to_project(self, context, project_id): |
1712 | @@ -1308,22 +1312,22 @@ |
1713 | self.network_api.add_network_to_project(context, project_id) |
1714 | |
1715 | @scheduler_api.reroute_compute("pause") |
1716 | - def pause(self, context, instance_id): |
1717 | + def pause(self, context, instance_uuid): |
1718 | """Pause the given instance.""" |
1719 | self.update(context, |
1720 | - instance_id, |
1721 | + instance_uuid, |
1722 | vm_state=vm_states.ACTIVE, |
1723 | task_state=task_states.PAUSING) |
1724 | - self._cast_compute_message('pause_instance', context, instance_id) |
1725 | + self._cast_compute_message('pause_instance', context, instance_uuid) |
1726 | |
1727 | @scheduler_api.reroute_compute("unpause") |
1728 | - def unpause(self, context, instance_id): |
1729 | + def unpause(self, context, instance_uuid): |
1730 | """Unpause the given instance.""" |
1731 | self.update(context, |
1732 | - instance_id, |
1733 | + instance_uuid, |
1734 | vm_state=vm_states.PAUSED, |
1735 | task_state=task_states.UNPAUSING) |
1736 | - self._cast_compute_message('unpause_instance', context, instance_id) |
1737 | + self._cast_compute_message('unpause_instance', context, instance_uuid) |
1738 | |
1739 | def _call_compute_message_for_host(self, action, context, host, params): |
1740 | """Call method deliberately designed to make host/service only calls""" |
1741 | @@ -1342,76 +1346,79 @@ |
1742 | context, host=host, params={"action": action}) |
1743 | |
1744 | @scheduler_api.reroute_compute("diagnostics") |
1745 | - def get_diagnostics(self, context, instance_id): |
1746 | + def get_diagnostics(self, context, instance_uuid): |
1747 | """Retrieve diagnostics for the given instance.""" |
1748 | return self._call_compute_message("get_diagnostics", |
1749 | context, |
1750 | - instance_id) |
1751 | + instance_uuid) |
1752 | |
1753 | - def get_actions(self, context, instance_id): |
1754 | + def get_actions(self, context, instance_uuid): |
1755 | """Retrieve actions for the given instance.""" |
1756 | - return self.db.instance_get_actions(context, instance_id) |
1757 | + return self.db.instance_get_actions(context, instance_uuid) |
1758 | |
1759 | @scheduler_api.reroute_compute("suspend") |
1760 | - def suspend(self, context, instance_id): |
1761 | + def suspend(self, context, instance_uuid): |
1762 | """Suspend the given instance.""" |
1763 | self.update(context, |
1764 | - instance_id, |
1765 | + instance_uuid, |
1766 | vm_state=vm_states.ACTIVE, |
1767 | task_state=task_states.SUSPENDING) |
1768 | - self._cast_compute_message('suspend_instance', context, instance_id) |
1769 | + self._cast_compute_message('suspend_instance', context, instance_uuid) |
1770 | |
1771 | @scheduler_api.reroute_compute("resume") |
1772 | - def resume(self, context, instance_id): |
1773 | + def resume(self, context, instance_uuid): |
1774 | """Resume the given instance.""" |
1775 | self.update(context, |
1776 | - instance_id, |
1777 | + instance_uuid, |
1778 | vm_state=vm_states.SUSPENDED, |
1779 | task_state=task_states.RESUMING) |
1780 | - self._cast_compute_message('resume_instance', context, instance_id) |
1781 | + self._cast_compute_message('resume_instance', context, instance_uuid) |
1782 | |
1783 | @scheduler_api.reroute_compute("rescue") |
1784 | - def rescue(self, context, instance_id, rescue_password=None): |
1785 | + def rescue(self, context, instance_uuid, rescue_password=None): |
1786 | """Rescue the given instance.""" |
1787 | self.update(context, |
1788 | - instance_id, |
1789 | + instance_uuid, |
1790 | vm_state=vm_states.ACTIVE, |
1791 | task_state=task_states.RESCUING) |
1792 | - |
1793 | rescue_params = { |
1794 | "rescue_password": rescue_password |
1795 | } |
1796 | - self._cast_compute_message('rescue_instance', context, instance_id, |
1797 | + self._cast_compute_message('rescue_instance', context, instance_uuid, |
1798 | params=rescue_params) |
1799 | |
1800 | @scheduler_api.reroute_compute("unrescue") |
1801 | - def unrescue(self, context, instance_id): |
1802 | + def unrescue(self, context, instance_uuid): |
1803 | """Unrescue the given instance.""" |
1804 | self.update(context, |
1805 | - instance_id, |
1806 | + instance_uuid, |
1807 | vm_state=vm_states.RESCUED, |
1808 | task_state=task_states.UNRESCUING) |
1809 | - self._cast_compute_message('unrescue_instance', context, instance_id) |
1810 | + self._cast_compute_message('unrescue_instance', context, instance_uuid) |
1811 | |
1812 | @scheduler_api.reroute_compute("set_admin_password") |
1813 | - def set_admin_password(self, context, instance_id, password=None): |
1814 | + def set_admin_password(self, context, instance_uuid, password=None): |
1815 | """Set the root/admin password for the given instance.""" |
1816 | - host = self._find_host(context, instance_id) |
1817 | - |
1818 | - rpc.cast(context, |
1819 | - self.db.queue_get_for(context, FLAGS.compute_topic, host), |
1820 | - {"method": "set_admin_password", |
1821 | - "args": {"instance_id": instance_id, "new_pass": password}}) |
1822 | - |
1823 | - def inject_file(self, context, instance_id): |
1824 | + host = self._find_host(context, instance_uuid) |
1825 | + queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) |
1826 | + message = { |
1827 | + "method": "set_admin_password", |
1828 | + "args": { |
1829 | + "instance_uuid": instance_uuid, |
1830 | + "new_pass": password, |
1831 | + }, |
1832 | + } |
1833 | + rpc.cast(context, queue, message) |
1834 | + |
1835 | + def inject_file(self, context, instance_uuid): |
1836 | """Write a file to the given instance.""" |
1837 | - self._cast_compute_message('inject_file', context, instance_id) |
1838 | + self._cast_compute_message('inject_file', context, instance_uuid) |
1839 | |
1840 | - def get_ajax_console(self, context, instance_id): |
1841 | + def get_ajax_console(self, context, instance_uuid): |
1842 | """Get a url to an AJAX Console.""" |
1843 | output = self._call_compute_message('get_ajax_console', |
1844 | context, |
1845 | - instance_id) |
1846 | + instance_uuid) |
1847 | rpc.cast(context, '%s' % FLAGS.ajax_console_proxy_topic, |
1848 | {'method': 'authorize_ajax_console', |
1849 | 'args': {'token': output['token'], 'host': output['host'], |
1850 | @@ -1419,12 +1426,12 @@ |
1851 | return {'url': '%s/?token=%s' % (FLAGS.ajax_console_proxy_url, |
1852 | output['token'])} |
1853 | |
1854 | - def get_vnc_console(self, context, instance_id): |
1855 | + def get_vnc_console(self, context, instance_uuid): |
1856 | """Get a url to a VNC Console.""" |
1857 | - instance = self.get(context, instance_id) |
1858 | + instance = self.get(context, instance_uuid) |
1859 | output = self._call_compute_message('get_vnc_console', |
1860 | context, |
1861 | - instance_id) |
1862 | + instance_uuid) |
1863 | rpc.call(context, '%s' % FLAGS.vncproxy_topic, |
1864 | {'method': 'authorize_vnc_console', |
1865 | 'args': {'token': output['token'], |
1866 | @@ -1438,46 +1445,48 @@ |
1867 | 'hostignore', |
1868 | 'portignore')} |
1869 | |
1870 | - def get_console_output(self, context, instance_id): |
1871 | + def get_console_output(self, context, instance_uuid): |
1872 | """Get console output for an an instance.""" |
1873 | return self._call_compute_message('get_console_output', |
1874 | context, |
1875 | - instance_id) |
1876 | + instance_uuid) |
1877 | |
1878 | - def lock(self, context, instance_id): |
1879 | + def lock(self, context, instance_uuid): |
1880 | """Lock the given instance.""" |
1881 | - self._cast_compute_message('lock_instance', context, instance_id) |
1882 | + self._cast_compute_message('lock_instance', context, instance_uuid) |
1883 | |
1884 | - def unlock(self, context, instance_id): |
1885 | + def unlock(self, context, instance_uuid): |
1886 | """Unlock the given instance.""" |
1887 | - self._cast_compute_message('unlock_instance', context, instance_id) |
1888 | + self._cast_compute_message('unlock_instance', context, instance_uuid) |
1889 | |
1890 | - def get_lock(self, context, instance_id): |
1891 | + def get_lock(self, context, instance_uuid): |
1892 | """Return the boolean state of given instance's lock.""" |
1893 | - instance = self.get(context, instance_id) |
1894 | + instance = self.get(context, instance_uuid) |
1895 | return instance['locked'] |
1896 | |
1897 | - def reset_network(self, context, instance_id): |
1898 | + def reset_network(self, context, instance_uuid): |
1899 | """Reset networking on the instance.""" |
1900 | - self._cast_compute_message('reset_network', context, instance_id) |
1901 | + self._cast_compute_message('reset_network', context, instance_uuid) |
1902 | |
1903 | - def inject_network_info(self, context, instance_id): |
1904 | + def inject_network_info(self, context, instance_uuid): |
1905 | """Inject network info for the instance.""" |
1906 | - self._cast_compute_message('inject_network_info', context, instance_id) |
1907 | + self._cast_compute_message('inject_network_info', |
1908 | + context, |
1909 | + instance_uuid) |
1910 | |
1911 | - def attach_volume(self, context, instance_id, volume_id, device): |
1912 | + def attach_volume(self, context, instance_uuid, volume_id, device): |
1913 | """Attach an existing volume to an existing instance.""" |
1914 | if not re.match("^/dev/[a-z]d[a-z]+$", device): |
1915 | raise exception.ApiError(_("Invalid device specified: %s. " |
1916 | "Example device: /dev/vdb") % device) |
1917 | self.volume_api.check_attach(context, volume_id=volume_id) |
1918 | - instance = self.get(context, instance_id) |
1919 | + instance = self.get(context, instance_uuid) |
1920 | host = instance['host'] |
1921 | rpc.cast(context, |
1922 | self.db.queue_get_for(context, FLAGS.compute_topic, host), |
1923 | {"method": "attach_volume", |
1924 | "args": {"volume_id": volume_id, |
1925 | - "instance_id": instance_id, |
1926 | + "instance_uuid": instance_uuid, |
1927 | "mountpoint": device}}) |
1928 | |
1929 | def detach_volume(self, context, volume_id): |
1930 | @@ -1490,23 +1499,22 @@ |
1931 | rpc.cast(context, |
1932 | self.db.queue_get_for(context, FLAGS.compute_topic, host), |
1933 | {"method": "detach_volume", |
1934 | - "args": {"instance_id": instance['id'], |
1935 | + "args": {"instance_uuid": instance['uuid'], |
1936 | "volume_id": volume_id}}) |
1937 | return instance |
1938 | |
1939 | - def associate_floating_ip(self, context, instance_id, address): |
1940 | + def associate_floating_ip(self, context, instance_uuid, address): |
1941 | """Makes calls to network_api to associate_floating_ip. |
1942 | |
1943 | :param address: is a string floating ip address |
1944 | """ |
1945 | - instance = self.get(context, instance_id) |
1946 | + instance = self.get(context, instance_uuid) |
1947 | |
1948 | # TODO(tr3buchet): currently network_info doesn't contain floating IPs |
1949 | # in its info, if this changes, the next few lines will need to |
1950 | # accomodate the info containing floating as well as fixed ip addresses |
1951 | fixed_ip_addrs = [] |
1952 | - for info in self.network_api.get_instance_nw_info(context, |
1953 | - instance): |
1954 | + for info in self.network_api.get_instance_nw_info(context, instance): |
1955 | ips = info[1]['ips'] |
1956 | fixed_ip_addrs.extend([ip_dict['ip'] for ip_dict in ips]) |
1957 | |
1958 | @@ -1515,7 +1523,7 @@ |
1959 | # support specifying a particular fixed_ip if multiple exist. |
1960 | if not fixed_ip_addrs: |
1961 | msg = _("instance |%s| has no fixed_ips. " |
1962 | - "unable to associate floating ip") % instance_id |
1963 | + "unable to associate floating ip") % instance_uuid |
1964 | raise exception.ApiError(msg) |
1965 | if len(fixed_ip_addrs) > 1: |
1966 | LOG.warning(_("multiple fixed_ips exist, using the first: %s"), |
1967 | @@ -1524,16 +1532,16 @@ |
1968 | floating_ip=address, |
1969 | fixed_ip=fixed_ip_addrs[0]) |
1970 | |
1971 | - def get_instance_metadata(self, context, instance_id): |
1972 | + def get_instance_metadata(self, context, instance_uuid): |
1973 | """Get all metadata associated with an instance.""" |
1974 | - rv = self.db.instance_metadata_get(context, instance_id) |
1975 | + rv = self.db.instance_metadata_get(context, instance_uuid) |
1976 | return dict(rv.iteritems()) |
1977 | |
1978 | - def delete_instance_metadata(self, context, instance_id, key): |
1979 | + def delete_instance_metadata(self, context, instance_uuid, key): |
1980 | """Delete the given metadata item from an instance.""" |
1981 | - self.db.instance_metadata_delete(context, instance_id, key) |
1982 | + self.db.instance_metadata_delete(context, instance_uuid, key) |
1983 | |
1984 | - def update_instance_metadata(self, context, instance_id, |
1985 | + def update_instance_metadata(self, context, instance_uuid, |
1986 | metadata, delete=False): |
1987 | """Updates or creates instance metadata. |
1988 | |
1989 | @@ -1544,9 +1552,20 @@ |
1990 | if delete: |
1991 | _metadata = metadata |
1992 | else: |
1993 | - _metadata = self.get_instance_metadata(context, instance_id) |
1994 | + _metadata = self.get_instance_metadata(context, instance_uuid) |
1995 | _metadata.update(metadata) |
1996 | |
1997 | self._check_metadata_properties_quota(context, _metadata) |
1998 | - self.db.instance_metadata_update(context, instance_id, _metadata, True) |
1999 | + self.db.instance_metadata_update(context, |
2000 | + instance_uuid, |
2001 | + _metadata, |
2002 | + True) |
2003 | return _metadata |
2004 | + |
2005 | + def get_instance_uuid(self, context, instance_id): |
2006 | + """Return instance uuid corresponding to the provided id""" |
2007 | + uuids = self.db.instance_get_id_to_uuid_mapping(context, [instance_id]) |
2008 | + try: |
2009 | + return uuids[instance_id] |
2010 | + except KeyError: |
2011 | + raise exception.InstanceNotFound(instance_uuid=instance_id) |
2012 | |
2013 | === modified file 'nova/compute/manager.py' |
2014 | --- nova/compute/manager.py 2011-09-21 20:59:40 +0000 |
2015 | +++ nova/compute/manager.py 2011-09-22 19:29:25 +0000 |
2016 | @@ -98,16 +98,12 @@ |
2017 | def checks_instance_lock(function): |
2018 | """Decorator to prevent action against locked instances for non-admins.""" |
2019 | @functools.wraps(function) |
2020 | - def decorated_function(self, context, instance_id, *args, **kwargs): |
2021 | - #TODO(anyone): this being called instance_id is forcing a slightly |
2022 | - # confusing convention of pushing instance_uuids |
2023 | - # through an "instance_id" key in the queue args dict when |
2024 | - # casting through the compute API |
2025 | + def decorated_function(self, context, instance_uuid, *args, **kwargs): |
2026 | LOG.info(_("check_instance_lock: decorating: |%s|"), function, |
2027 | context=context) |
2028 | LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|" |
2029 | - " |%(instance_id)s|") % locals(), context=context) |
2030 | - locked = self.get_lock(context, instance_id) |
2031 | + " |%(instance_uuid)s|") % locals(), context=context) |
2032 | + locked = self.get_lock(context, instance_uuid) |
2033 | admin = context.is_admin |
2034 | LOG.info(_("check_instance_lock: locked: |%s|"), locked, |
2035 | context=context) |
2036 | @@ -118,7 +114,7 @@ |
2037 | if admin or not locked: |
2038 | LOG.info(_("check_instance_lock: executing: |%s|"), function, |
2039 | context=context) |
2040 | - function(self, context, instance_id, *args, **kwargs) |
2041 | + function(self, context, instance_uuid, *args, **kwargs) |
2042 | else: |
2043 | LOG.error(_("check_instance_lock: not executing |%s|"), |
2044 | function, context=context) |
2045 | @@ -152,9 +148,9 @@ |
2046 | super(ComputeManager, self).__init__(service_name="compute", |
2047 | *args, **kwargs) |
2048 | |
2049 | - def _instance_update(self, context, instance_id, **kwargs): |
2050 | + def _instance_update(self, context, instance_uuid, **kwargs): |
2051 | """Update an instance in the database using kwargs as value.""" |
2052 | - return self.db.instance_update(context, instance_id, kwargs) |
2053 | + return self.db.instance_update(context, instance_uuid, kwargs) |
2054 | |
2055 | def init_host(self): |
2056 | """Initialization for a standalone compute service.""" |
2057 | @@ -176,7 +172,7 @@ |
2058 | or FLAGS.start_guests_on_host_boot: |
2059 | LOG.info(_('Rebooting instance %(inst_name)s after ' |
2060 | 'nova-compute restart.'), locals()) |
2061 | - self.reboot_instance(context, instance['id']) |
2062 | + self.reboot_instance(context, instance['uuid']) |
2063 | elif drv_state == power_state.RUNNING: |
2064 | # Hyper-V and VMWareAPI drivers will raise an exception |
2065 | try: |
2066 | @@ -243,14 +239,14 @@ |
2067 | instance) |
2068 | return network_info |
2069 | |
2070 | - def _setup_block_device_mapping(self, context, instance_id): |
2071 | + def _setup_block_device_mapping(self, context, instance_uuid): |
2072 | """setup volumes for block device mapping""" |
2073 | volume_api = volume.API() |
2074 | block_device_mapping = [] |
2075 | swap = None |
2076 | ephemerals = [] |
2077 | for bdm in self.db.block_device_mapping_get_all_by_instance( |
2078 | - context, instance_id): |
2079 | + context, instance_uuid): |
2080 | LOG.debug(_("setting up bdm %s"), bdm) |
2081 | |
2082 | if bdm['no_device']: |
2083 | @@ -296,7 +292,7 @@ |
2084 | if bdm['volume_id'] is not None: |
2085 | volume_api.check_attach(context, |
2086 | volume_id=bdm['volume_id']) |
2087 | - dev_path = self._attach_volume_boot(context, instance_id, |
2088 | + dev_path = self._attach_volume_boot(context, instance_uuid, |
2089 | bdm['volume_id'], |
2090 | bdm['device_name']) |
2091 | block_device_mapping.append({'device_path': dev_path, |
2092 | @@ -305,7 +301,7 @@ |
2093 | |
2094 | return (swap, ephemerals, block_device_mapping) |
2095 | |
2096 | - def _run_instance(self, context, instance_id, **kwargs): |
2097 | + def _run_instance(self, context, instance_uuid, **kwargs): |
2098 | """Launch a new instance with specified options.""" |
2099 | def _check_image_size(): |
2100 | """Ensure image is smaller than the maximum size allowed by the |
2101 | @@ -365,7 +361,7 @@ |
2102 | raise exception.ImageTooLarge() |
2103 | |
2104 | context = context.elevated() |
2105 | - instance = self.db.instance_get(context, instance_id) |
2106 | + instance = self.db.instance_get(context, instance_uuid) |
2107 | |
2108 | requested_networks = kwargs.get('requested_networks', None) |
2109 | |
2110 | @@ -374,14 +370,14 @@ |
2111 | |
2112 | _check_image_size() |
2113 | |
2114 | - LOG.audit(_("instance %s: starting..."), instance_id, |
2115 | + LOG.audit(_("instance %s: starting..."), instance_uuid, |
2116 | context=context) |
2117 | updates = {} |
2118 | updates['host'] = self.host |
2119 | updates['launched_on'] = self.host |
2120 | updates['vm_state'] = vm_states.BUILDING |
2121 | updates['task_state'] = task_states.NETWORKING |
2122 | - instance = self.db.instance_update(context, instance_id, updates) |
2123 | + instance = self.db.instance_update(context, instance_uuid, updates) |
2124 | instance['injected_files'] = kwargs.get('injected_files', []) |
2125 | instance['admin_pass'] = kwargs.get('admin_password', None) |
2126 | |
2127 | @@ -404,13 +400,13 @@ |
2128 | network_info = [] |
2129 | |
2130 | self._instance_update(context, |
2131 | - instance_id, |
2132 | + instance_uuid, |
2133 | vm_state=vm_states.BUILDING, |
2134 | task_state=task_states.BLOCK_DEVICE_MAPPING) |
2135 | |
2136 | (swap, ephemerals, |
2137 | block_device_mapping) = self._setup_block_device_mapping( |
2138 | - context, instance_id) |
2139 | + context, instance_uuid) |
2140 | block_device_info = { |
2141 | 'root_device_name': instance['root_device_name'], |
2142 | 'swap': swap, |
2143 | @@ -418,7 +414,7 @@ |
2144 | 'block_device_mapping': block_device_mapping} |
2145 | |
2146 | self._instance_update(context, |
2147 | - instance_id, |
2148 | + instance_uuid, |
2149 | vm_state=vm_states.BUILDING, |
2150 | task_state=task_states.SPAWNING) |
2151 | |
2152 | @@ -427,7 +423,7 @@ |
2153 | self.driver.spawn(context, instance, |
2154 | network_info, block_device_info) |
2155 | except Exception as ex: # pylint: disable=W0702 |
2156 | - msg = _("Instance '%(instance_id)s' failed to spawn. Is " |
2157 | + msg = _("Instance '%(instance_uuid)s' failed to spawn. Is " |
2158 | "virtualization enabled in the BIOS? Details: " |
2159 | "%(ex)s") % locals() |
2160 | LOG.exception(msg) |
2161 | @@ -435,7 +431,7 @@ |
2162 | |
2163 | current_power_state = self._get_power_state(context, instance) |
2164 | self._instance_update(context, |
2165 | - instance_id, |
2166 | + instance_uuid, |
2167 | power_state=current_power_state, |
2168 | vm_state=vm_states.ACTIVE, |
2169 | task_state=None, |
2170 | @@ -454,23 +450,23 @@ |
2171 | pass |
2172 | |
2173 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2174 | - def run_instance(self, context, instance_id, **kwargs): |
2175 | - self._run_instance(context, instance_id, **kwargs) |
2176 | + def run_instance(self, context, instance_uuid, **kwargs): |
2177 | + self._run_instance(context, instance_uuid, **kwargs) |
2178 | |
2179 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2180 | @checks_instance_lock |
2181 | - def start_instance(self, context, instance_id): |
2182 | + def start_instance(self, context, instance_uuid): |
2183 | """Starting an instance on this host.""" |
2184 | # TODO(yamahata): injected_files isn't supported. |
2185 | # Anyway OSAPI doesn't support stop/start yet |
2186 | - self._run_instance(context, instance_id) |
2187 | + self._run_instance(context, instance_uuid) |
2188 | |
2189 | - def _shutdown_instance(self, context, instance_id, action_str): |
2190 | + def _shutdown_instance(self, context, instance_uuid, action_str): |
2191 | """Shutdown an instance on this host.""" |
2192 | context = context.elevated() |
2193 | - instance = self.db.instance_get(context, instance_id) |
2194 | - LOG.audit(_("%(action_str)s instance %(instance_id)s") % |
2195 | - {'action_str': action_str, 'instance_id': instance_id}, |
2196 | + instance = self.db.instance_get(context, instance_uuid) |
2197 | + LOG.audit(_("%(action_str)s instance %(instance_uuid)s") % |
2198 | + {'action_str': action_str, 'instance_uuid': instance_uuid}, |
2199 | context=context) |
2200 | |
2201 | network_info = self._get_instance_nw_info(context, instance) |
2202 | @@ -479,28 +475,28 @@ |
2203 | |
2204 | volumes = instance.get('volumes') or [] |
2205 | for volume in volumes: |
2206 | - self._detach_volume(context, instance_id, volume['id'], False) |
2207 | + self._detach_volume(context, instance_uuid, volume['id'], False) |
2208 | |
2209 | if instance['power_state'] == power_state.SHUTOFF: |
2210 | - self.db.instance_destroy(context, instance_id) |
2211 | + self.db.instance_destroy(context, instance_uuid) |
2212 | raise exception.Error(_('trying to destroy already destroyed' |
2213 | - ' instance: %s') % instance_id) |
2214 | + ' instance: %s') % instance_uuid) |
2215 | self.driver.destroy(instance, network_info) |
2216 | |
2217 | if action_str == 'Terminating': |
2218 | - terminate_volumes(self.db, context, instance_id) |
2219 | + terminate_volumes(self.db, context, instance_uuid) |
2220 | |
2221 | - def _delete_instance(self, context, instance_id): |
2222 | + def _delete_instance(self, context, instance_uuid): |
2223 | """Delete an instance on this host.""" |
2224 | - self._shutdown_instance(context, instance_id, 'Terminating') |
2225 | - instance = self.db.instance_get(context.elevated(), instance_id) |
2226 | + self._shutdown_instance(context, instance_uuid, 'Terminating') |
2227 | + instance = self.db.instance_get(context.elevated(), instance_uuid) |
2228 | self._instance_update(context, |
2229 | - instance_id, |
2230 | + instance_uuid, |
2231 | vm_state=vm_states.DELETED, |
2232 | task_state=None, |
2233 | terminated_at=utils.utcnow()) |
2234 | |
2235 | - self.db.instance_destroy(context, instance_id) |
2236 | + self.db.instance_destroy(context, instance_uuid) |
2237 | |
2238 | usage_info = utils.usage_from_instance(instance) |
2239 | notifier.notify('compute.%s' % self.host, |
2240 | @@ -509,65 +505,65 @@ |
2241 | |
2242 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2243 | @checks_instance_lock |
2244 | - def terminate_instance(self, context, instance_id): |
2245 | + def terminate_instance(self, context, instance_uuid): |
2246 | """Terminate an instance on this host.""" |
2247 | - self._delete_instance(context, instance_id) |
2248 | + self._delete_instance(context, instance_uuid) |
2249 | |
2250 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2251 | @checks_instance_lock |
2252 | - def stop_instance(self, context, instance_id): |
2253 | + def stop_instance(self, context, instance_uuid): |
2254 | """Stopping an instance on this host.""" |
2255 | - self._shutdown_instance(context, instance_id, 'Stopping') |
2256 | + self._shutdown_instance(context, instance_uuid, 'Stopping') |
2257 | self._instance_update(context, |
2258 | - instance_id, |
2259 | + instance_uuid, |
2260 | vm_state=vm_states.STOPPED, |
2261 | task_state=None) |
2262 | |
2263 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2264 | @checks_instance_lock |
2265 | - def power_off_instance(self, context, instance_id): |
2266 | + def power_off_instance(self, context, instance_uuid): |
2267 | """Power off an instance on this host.""" |
2268 | - instance = self.db.instance_get(context, instance_id) |
2269 | + instance = self.db.instance_get(context, instance_uuid) |
2270 | self.driver.power_off(instance) |
2271 | current_power_state = self._get_power_state(context, instance) |
2272 | self._instance_update(context, |
2273 | - instance_id, |
2274 | + instance_uuid, |
2275 | power_state=current_power_state, |
2276 | task_state=None) |
2277 | |
2278 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2279 | @checks_instance_lock |
2280 | - def power_on_instance(self, context, instance_id): |
2281 | + def power_on_instance(self, context, instance_uuid): |
2282 | """Power on an instance on this host.""" |
2283 | - instance = self.db.instance_get(context, instance_id) |
2284 | + instance = self.db.instance_get(context, instance_uuid) |
2285 | self.driver.power_on(instance) |
2286 | current_power_state = self._get_power_state(context, instance) |
2287 | self._instance_update(context, |
2288 | - instance_id, |
2289 | + instance_uuid, |
2290 | power_state=current_power_state, |
2291 | task_state=None) |
2292 | |
2293 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2294 | @checks_instance_lock |
2295 | - def rebuild_instance(self, context, instance_id, **kwargs): |
2296 | + def rebuild_instance(self, context, instance_uuid, **kwargs): |
2297 | """Destroy and re-make this instance. |
2298 | |
2299 | A 'rebuild' effectively purges all existing data from the system and |
2300 | remakes the VM with given 'metadata' and 'personalities'. |
2301 | |
2302 | :param context: `nova.RequestContext` object |
2303 | - :param instance_id: Instance identifier (integer) |
2304 | + :param instance_uuid: Instance identifier (integer) |
2305 | :param injected_files: Files to inject |
2306 | :param new_pass: password to set on rebuilt instance |
2307 | """ |
2308 | context = context.elevated() |
2309 | |
2310 | - instance_ref = self.db.instance_get(context, instance_id) |
2311 | - LOG.audit(_("Rebuilding instance %s"), instance_id, context=context) |
2312 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2313 | + LOG.audit(_("Rebuilding instance %s"), instance_uuid, context=context) |
2314 | |
2315 | current_power_state = self._get_power_state(context, instance_ref) |
2316 | self._instance_update(context, |
2317 | - instance_id, |
2318 | + instance_uuid, |
2319 | power_state=current_power_state, |
2320 | vm_state=vm_states.REBUILDING, |
2321 | task_state=None) |
2322 | @@ -576,17 +572,17 @@ |
2323 | self.driver.destroy(instance_ref, network_info) |
2324 | |
2325 | self._instance_update(context, |
2326 | - instance_id, |
2327 | + instance_uuid, |
2328 | vm_state=vm_states.REBUILDING, |
2329 | task_state=task_states.BLOCK_DEVICE_MAPPING) |
2330 | |
2331 | instance_ref.injected_files = kwargs.get('injected_files', []) |
2332 | network_info = self.network_api.get_instance_nw_info(context, |
2333 | instance_ref) |
2334 | - bd_mapping = self._setup_block_device_mapping(context, instance_id) |
2335 | + bd_mapping = self._setup_block_device_mapping(context, instance_uuid) |
2336 | |
2337 | self._instance_update(context, |
2338 | - instance_id, |
2339 | + instance_uuid, |
2340 | vm_state=vm_states.REBUILDING, |
2341 | task_state=task_states.SPAWNING) |
2342 | |
2343 | @@ -598,7 +594,7 @@ |
2344 | |
2345 | current_power_state = self._get_power_state(context, instance_ref) |
2346 | self._instance_update(context, |
2347 | - instance_id, |
2348 | + instance_uuid, |
2349 | power_state=current_power_state, |
2350 | vm_state=vm_states.ACTIVE, |
2351 | task_state=None, |
2352 | @@ -612,15 +608,15 @@ |
2353 | |
2354 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2355 | @checks_instance_lock |
2356 | - def reboot_instance(self, context, instance_id, reboot_type="SOFT"): |
2357 | + def reboot_instance(self, context, instance_uuid, reboot_type="SOFT"): |
2358 | """Reboot an instance on this host.""" |
2359 | - LOG.audit(_("Rebooting instance %s"), instance_id, context=context) |
2360 | + LOG.audit(_("Rebooting instance %s"), instance_uuid, context=context) |
2361 | context = context.elevated() |
2362 | - instance_ref = self.db.instance_get(context, instance_id) |
2363 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2364 | |
2365 | current_power_state = self._get_power_state(context, instance_ref) |
2366 | self._instance_update(context, |
2367 | - instance_id, |
2368 | + instance_uuid, |
2369 | power_state=current_power_state, |
2370 | vm_state=vm_states.ACTIVE, |
2371 | task_state=task_states.REBOOTING) |
2372 | @@ -629,7 +625,7 @@ |
2373 | state = instance_ref['power_state'] |
2374 | running = power_state.RUNNING |
2375 | LOG.warn(_('trying to reboot a non-running ' |
2376 | - 'instance: %(instance_id)s (state: %(state)s ' |
2377 | + 'instance: %(instance_uuid)s (state: %(state)s ' |
2378 | 'expected: %(running)s)') % locals(), |
2379 | context=context) |
2380 | |
2381 | @@ -638,19 +634,19 @@ |
2382 | |
2383 | current_power_state = self._get_power_state(context, instance_ref) |
2384 | self._instance_update(context, |
2385 | - instance_id, |
2386 | + instance_uuid, |
2387 | power_state=current_power_state, |
2388 | vm_state=vm_states.ACTIVE, |
2389 | task_state=None) |
2390 | |
2391 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2392 | - def snapshot_instance(self, context, instance_id, image_id, |
2393 | + def snapshot_instance(self, context, instance_uuid, image_id, |
2394 | image_type='snapshot', backup_type=None, |
2395 | rotation=None): |
2396 | """Snapshot an instance on this host. |
2397 | |
2398 | :param context: security context |
2399 | - :param instance_id: nova.db.sqlalchemy.models.Instance.Id |
2400 | + :param instance_uuid: nova.db.sqlalchemy.models.Instance.Id |
2401 | :param image_id: glance.db.sqlalchemy.models.Image.Id |
2402 | :param image_type: snapshot | backup |
2403 | :param backup_type: daily | weekly |
2404 | @@ -665,27 +661,27 @@ |
2405 | raise Exception(_('Image type not recognized %s') % image_type) |
2406 | |
2407 | context = context.elevated() |
2408 | - instance_ref = self.db.instance_get(context, instance_id) |
2409 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2410 | |
2411 | current_power_state = self._get_power_state(context, instance_ref) |
2412 | self._instance_update(context, |
2413 | - instance_id, |
2414 | + instance_uuid, |
2415 | power_state=current_power_state, |
2416 | vm_state=vm_states.ACTIVE, |
2417 | task_state=task_state) |
2418 | |
2419 | - LOG.audit(_('instance %s: snapshotting'), instance_id, |
2420 | + LOG.audit(_('instance %s: snapshotting'), instance_uuid, |
2421 | context=context) |
2422 | |
2423 | if instance_ref['power_state'] != power_state.RUNNING: |
2424 | state = instance_ref['power_state'] |
2425 | running = power_state.RUNNING |
2426 | LOG.warn(_('trying to snapshot a non-running ' |
2427 | - 'instance: %(instance_id)s (state: %(state)s ' |
2428 | + 'instance: %(instance_uuid)s (state: %(state)s ' |
2429 | 'expected: %(running)s)') % locals()) |
2430 | |
2431 | self.driver.snapshot(context, instance_ref, image_id) |
2432 | - self._instance_update(context, instance_id, task_state=None) |
2433 | + self._instance_update(context, instance_uuid, task_state=None) |
2434 | |
2435 | if image_type == 'snapshot' and rotation: |
2436 | raise exception.ImageRotationNotAllowed() |
2437 | @@ -745,7 +741,7 @@ |
2438 | |
2439 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2440 | @checks_instance_lock |
2441 | - def set_admin_password(self, context, instance_id, new_pass=None): |
2442 | + def set_admin_password(self, context, instance_uuid, new_pass=None): |
2443 | """Set the root/admin password for an instance on this host. |
2444 | |
2445 | This is generally only called by API password resets after an |
2446 | @@ -761,8 +757,8 @@ |
2447 | max_tries = 10 |
2448 | |
2449 | for i in xrange(max_tries): |
2450 | - instance_ref = self.db.instance_get(context, instance_id) |
2451 | - instance_id = instance_ref["id"] |
2452 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2453 | + instance_uuid = instance_ref["uuid"] |
2454 | instance_state = instance_ref["power_state"] |
2455 | expected_state = power_state.RUNNING |
2456 | |
2457 | @@ -793,16 +789,16 @@ |
2458 | |
2459 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2460 | @checks_instance_lock |
2461 | - def inject_file(self, context, instance_id, path, file_contents): |
2462 | + def inject_file(self, context, instance_uuid, path, file_contents): |
2463 | """Write a file to the specified path in an instance on this host.""" |
2464 | context = context.elevated() |
2465 | - instance_ref = self.db.instance_get(context, instance_id) |
2466 | - instance_id = instance_ref['id'] |
2467 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2468 | + instance_uuid = instance_ref['uuid'] |
2469 | instance_state = instance_ref['power_state'] |
2470 | expected_state = power_state.RUNNING |
2471 | if instance_state != expected_state: |
2472 | LOG.warn(_('trying to inject a file into a non-running ' |
2473 | - 'instance: %(instance_id)s (state: %(instance_state)s ' |
2474 | + 'instance: %(instance_uuid)s (state: %(instance_state)s ' |
2475 | 'expected: %(expected_state)s)') % locals()) |
2476 | nm = instance_ref['name'] |
2477 | msg = _('instance %(nm)s: injecting file to %(path)s') % locals() |
2478 | @@ -811,16 +807,16 @@ |
2479 | |
2480 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2481 | @checks_instance_lock |
2482 | - def agent_update(self, context, instance_id, url, md5hash): |
2483 | + def agent_update(self, context, instance_uuid, url, md5hash): |
2484 | """Update agent running on an instance on this host.""" |
2485 | context = context.elevated() |
2486 | - instance_ref = self.db.instance_get(context, instance_id) |
2487 | - instance_id = instance_ref['id'] |
2488 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2489 | + instance_uuid = instance_ref['uuid'] |
2490 | instance_state = instance_ref['power_state'] |
2491 | expected_state = power_state.RUNNING |
2492 | if instance_state != expected_state: |
2493 | LOG.warn(_('trying to update agent on a non-running ' |
2494 | - 'instance: %(instance_id)s (state: %(instance_state)s ' |
2495 | + 'instance: %(instance_uuid)s (state: %(instance_state)s ' |
2496 | 'expected: %(expected_state)s)') % locals()) |
2497 | nm = instance_ref['name'] |
2498 | msg = _('instance %(nm)s: updating agent to %(url)s') % locals() |
2499 | @@ -829,16 +825,16 @@ |
2500 | |
2501 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2502 | @checks_instance_lock |
2503 | - def rescue_instance(self, context, instance_id, **kwargs): |
2504 | + def rescue_instance(self, context, instance_uuid, **kwargs): |
2505 | """ |
2506 | Rescue an instance on this host. |
2507 | :param rescue_password: password to set on rescue instance |
2508 | """ |
2509 | |
2510 | - LOG.audit(_('instance %s: rescuing'), instance_id, context=context) |
2511 | + LOG.audit(_('instance %s: rescuing'), instance_uuid, context=context) |
2512 | context = context.elevated() |
2513 | |
2514 | - instance_ref = self.db.instance_get(context, instance_id) |
2515 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2516 | instance_ref.admin_pass = kwargs.get('rescue_password', |
2517 | utils.generate_password(FLAGS.password_length)) |
2518 | network_info = self._get_instance_nw_info(context, instance_ref) |
2519 | @@ -848,19 +844,19 @@ |
2520 | |
2521 | current_power_state = self._get_power_state(context, instance_ref) |
2522 | self._instance_update(context, |
2523 | - instance_id, |
2524 | + instance_uuid, |
2525 | vm_state=vm_states.RESCUED, |
2526 | task_state=None, |
2527 | power_state=current_power_state) |
2528 | |
2529 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2530 | @checks_instance_lock |
2531 | - def unrescue_instance(self, context, instance_id): |
2532 | + def unrescue_instance(self, context, instance_uuid): |
2533 | """Rescue an instance on this host.""" |
2534 | - LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) |
2535 | + LOG.audit(_('instance %s: unrescuing'), instance_uuid, context=context) |
2536 | context = context.elevated() |
2537 | |
2538 | - instance_ref = self.db.instance_get(context, instance_id) |
2539 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2540 | network_info = self._get_instance_nw_info(context, instance_ref) |
2541 | |
2542 | # NOTE(blamar): None of the virt drivers use the 'callback' param |
2543 | @@ -868,18 +864,17 @@ |
2544 | |
2545 | current_power_state = self._get_power_state(context, instance_ref) |
2546 | self._instance_update(context, |
2547 | - instance_id, |
2548 | + instance_uuid, |
2549 | vm_state=vm_states.ACTIVE, |
2550 | task_state=None, |
2551 | power_state=current_power_state) |
2552 | |
2553 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2554 | @checks_instance_lock |
2555 | - def confirm_resize(self, context, instance_id, migration_id): |
2556 | + def confirm_resize(self, context, instance_uuid, migration_id): |
2557 | """Destroys the source instance.""" |
2558 | migration_ref = self.db.migration_get(context, migration_id) |
2559 | - instance_ref = self.db.instance_get_by_uuid(context, |
2560 | - migration_ref.instance_uuid) |
2561 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2562 | |
2563 | network_info = self._get_instance_nw_info(context, instance_ref) |
2564 | self.driver.confirm_migration( |
2565 | @@ -893,7 +888,7 @@ |
2566 | |
2567 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2568 | @checks_instance_lock |
2569 | - def revert_resize(self, context, instance_id, migration_id): |
2570 | + def revert_resize(self, context, instance_uuid, migration_id): |
2571 | """Destroys the new instance on the destination machine. |
2572 | |
2573 | Reverts the model changes, and powers on the old instance on the |
2574 | @@ -901,8 +896,7 @@ |
2575 | |
2576 | """ |
2577 | migration_ref = self.db.migration_get(context, migration_id) |
2578 | - instance_ref = self.db.instance_get_by_uuid(context, |
2579 | - migration_ref.instance_uuid) |
2580 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2581 | |
2582 | network_info = self._get_instance_nw_info(context, instance_ref) |
2583 | self.driver.destroy(instance_ref, network_info) |
2584 | @@ -910,13 +904,13 @@ |
2585 | instance_ref['host']) |
2586 | rpc.cast(context, topic, |
2587 | {'method': 'finish_revert_resize', |
2588 | - 'args': {'instance_id': instance_ref['uuid'], |
2589 | + 'args': {'instance_uuid': instance_ref['uuid'], |
2590 | 'migration_id': migration_ref['id']}, |
2591 | }) |
2592 | |
2593 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2594 | @checks_instance_lock |
2595 | - def finish_revert_resize(self, context, instance_id, migration_id): |
2596 | + def finish_revert_resize(self, context, instance_uuid, migration_id): |
2597 | """Finishes the second half of reverting a resize. |
2598 | |
2599 | Power back on the source instance and revert the resized attributes |
2600 | @@ -924,8 +918,7 @@ |
2601 | |
2602 | """ |
2603 | migration_ref = self.db.migration_get(context, migration_id) |
2604 | - instance_ref = self.db.instance_get_by_uuid(context, |
2605 | - migration_ref.instance_uuid) |
2606 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2607 | |
2608 | instance_type = self.db.instance_type_get(context, |
2609 | migration_ref['old_instance_type_id']) |
2610 | @@ -950,23 +943,19 @@ |
2611 | |
2612 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2613 | @checks_instance_lock |
2614 | - def prep_resize(self, context, instance_id, instance_type_id): |
2615 | + def prep_resize(self, context, instance_uuid, instance_type_id): |
2616 | """Initiates the process of moving a running instance to another host. |
2617 | |
2618 | Possibly changes the RAM and disk size in the process. |
2619 | |
2620 | """ |
2621 | context = context.elevated() |
2622 | - |
2623 | - # Because of checks_instance_lock, this must currently be called |
2624 | - # instance_id. However, the compute API is always passing the UUID |
2625 | - # of the instance down |
2626 | - instance_ref = self.db.instance_get_by_uuid(context, instance_id) |
2627 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2628 | |
2629 | same_host = instance_ref['host'] == FLAGS.host |
2630 | if same_host and not FLAGS.allow_resize_to_same_host: |
2631 | self._instance_update(context, |
2632 | - instance_id, |
2633 | + instance_uuid, |
2634 | vm_state=vm_states.ERROR) |
2635 | msg = _('Migration error: destination same as source!') |
2636 | raise exception.Error(msg) |
2637 | @@ -991,7 +980,7 @@ |
2638 | instance_ref['host']) |
2639 | rpc.cast(context, topic, |
2640 | {'method': 'resize_instance', |
2641 | - 'args': {'instance_id': instance_ref['uuid'], |
2642 | + 'args': {'instance_uuid': instance_ref['uuid'], |
2643 | 'migration_id': migration_ref['id']}}) |
2644 | |
2645 | usage_info = utils.usage_from_instance(instance_ref, |
2646 | @@ -1004,11 +993,10 @@ |
2647 | |
2648 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2649 | @checks_instance_lock |
2650 | - def resize_instance(self, context, instance_id, migration_id): |
2651 | + def resize_instance(self, context, instance_uuid, migration_id): |
2652 | """Starts the migration of a running instance to another host.""" |
2653 | migration_ref = self.db.migration_get(context, migration_id) |
2654 | - instance_ref = self.db.instance_get_by_uuid(context, |
2655 | - migration_ref.instance_uuid) |
2656 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2657 | |
2658 | self.db.migration_update(context, |
2659 | migration_id, |
2660 | @@ -1027,13 +1015,13 @@ |
2661 | migration_ref['dest_compute']) |
2662 | params = {'migration_id': migration_id, |
2663 | 'disk_info': disk_info, |
2664 | - 'instance_id': instance_ref['uuid']} |
2665 | + 'instance_uuid': instance_ref['uuid']} |
2666 | rpc.cast(context, topic, {'method': 'finish_resize', |
2667 | 'args': params}) |
2668 | |
2669 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2670 | @checks_instance_lock |
2671 | - def finish_resize(self, context, instance_id, migration_id, disk_info): |
2672 | + def finish_resize(self, context, instance_uuid, migration_id, disk_info): |
2673 | """Completes the migration process. |
2674 | |
2675 | Sets up the newly transferred disk and turns on the instance at its |
2676 | @@ -1043,8 +1031,7 @@ |
2677 | migration_ref = self.db.migration_get(context, migration_id) |
2678 | |
2679 | resize_instance = False |
2680 | - instance_ref = self.db.instance_get_by_uuid(context, |
2681 | - migration_ref.instance_uuid) |
2682 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2683 | if migration_ref['old_instance_type_id'] != \ |
2684 | migration_ref['new_instance_type_id']: |
2685 | instance_type = self.db.instance_type_get(context, |
2686 | @@ -1056,15 +1043,13 @@ |
2687 | local_gb=instance_type['local_gb'])) |
2688 | resize_instance = True |
2689 | |
2690 | - instance_ref = self.db.instance_get_by_uuid(context, |
2691 | - instance_ref.uuid) |
2692 | - |
2693 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2694 | network_info = self._get_instance_nw_info(context, instance_ref) |
2695 | self.driver.finish_migration(context, migration_ref, instance_ref, |
2696 | disk_info, network_info, resize_instance) |
2697 | |
2698 | self._instance_update(context, |
2699 | - instance_id, |
2700 | + instance_uuid, |
2701 | vm_state=vm_states.ACTIVE, |
2702 | task_state=task_states.RESIZE_VERIFY) |
2703 | |
2704 | @@ -1073,58 +1058,58 @@ |
2705 | |
2706 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2707 | @checks_instance_lock |
2708 | - def add_fixed_ip_to_instance(self, context, instance_id, network_id): |
2709 | + def add_fixed_ip_to_instance(self, context, instance_uuid, network_id): |
2710 | """Calls network_api to add new fixed_ip to instance |
2711 | then injects the new network info and resets instance networking. |
2712 | |
2713 | """ |
2714 | - self.network_api.add_fixed_ip_to_instance(context, instance_id, |
2715 | + self.network_api.add_fixed_ip_to_instance(context, instance_uuid, |
2716 | self.host, network_id) |
2717 | - self.inject_network_info(context, instance_id) |
2718 | - self.reset_network(context, instance_id) |
2719 | + self.inject_network_info(context, instance_uuid) |
2720 | + self.reset_network(context, instance_uuid) |
2721 | |
2722 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2723 | @checks_instance_lock |
2724 | - def remove_fixed_ip_from_instance(self, context, instance_id, address): |
2725 | + def remove_fixed_ip_from_instance(self, context, instance_uuid, address): |
2726 | """Calls network_api to remove existing fixed_ip from instance |
2727 | by injecting the altered network info and resetting |
2728 | instance networking. |
2729 | """ |
2730 | - self.network_api.remove_fixed_ip_from_instance(context, instance_id, |
2731 | + self.network_api.remove_fixed_ip_from_instance(context, instance_uuid, |
2732 | address) |
2733 | - self.inject_network_info(context, instance_id) |
2734 | - self.reset_network(context, instance_id) |
2735 | + self.inject_network_info(context, instance_uuid) |
2736 | + self.reset_network(context, instance_uuid) |
2737 | |
2738 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2739 | @checks_instance_lock |
2740 | - def pause_instance(self, context, instance_id): |
2741 | + def pause_instance(self, context, instance_uuid): |
2742 | """Pause an instance on this host.""" |
2743 | - LOG.audit(_('instance %s: pausing'), instance_id, context=context) |
2744 | + LOG.audit(_('instance %s: pausing'), instance_uuid, context=context) |
2745 | context = context.elevated() |
2746 | |
2747 | - instance_ref = self.db.instance_get(context, instance_id) |
2748 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2749 | self.driver.pause(instance_ref, lambda result: None) |
2750 | |
2751 | current_power_state = self._get_power_state(context, instance_ref) |
2752 | self._instance_update(context, |
2753 | - instance_id, |
2754 | + instance_uuid, |
2755 | power_state=current_power_state, |
2756 | vm_state=vm_states.PAUSED, |
2757 | task_state=None) |
2758 | |
2759 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2760 | @checks_instance_lock |
2761 | - def unpause_instance(self, context, instance_id): |
2762 | + def unpause_instance(self, context, instance_uuid): |
2763 | """Unpause a paused instance on this host.""" |
2764 | - LOG.audit(_('instance %s: unpausing'), instance_id, context=context) |
2765 | + LOG.audit(_('instance %s: unpausing'), instance_uuid, context=context) |
2766 | context = context.elevated() |
2767 | |
2768 | - instance_ref = self.db.instance_get(context, instance_id) |
2769 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2770 | self.driver.unpause(instance_ref, lambda result: None) |
2771 | |
2772 | current_power_state = self._get_power_state(context, instance_ref) |
2773 | self._instance_update(context, |
2774 | - instance_id, |
2775 | + instance_uuid, |
2776 | power_state=current_power_state, |
2777 | vm_state=vm_states.ACTIVE, |
2778 | task_state=None) |
2779 | @@ -1140,123 +1125,120 @@ |
2780 | return self.driver.set_host_enabled(host, enabled) |
2781 | |
2782 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2783 | - def get_diagnostics(self, context, instance_id): |
2784 | + def get_diagnostics(self, context, instance_uuid): |
2785 | """Retrieve diagnostics for an instance on this host.""" |
2786 | - instance_ref = self.db.instance_get(context, instance_id) |
2787 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2788 | if instance_ref["power_state"] == power_state.RUNNING: |
2789 | - LOG.audit(_("instance %s: retrieving diagnostics"), instance_id, |
2790 | + LOG.audit(_("instance %s: retrieving diagnostics"), instance_uuid, |
2791 | context=context) |
2792 | return self.driver.get_diagnostics(instance_ref) |
2793 | |
2794 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2795 | @checks_instance_lock |
2796 | - def suspend_instance(self, context, instance_id): |
2797 | + def suspend_instance(self, context, instance_uuid): |
2798 | """Suspend the given instance.""" |
2799 | - LOG.audit(_('instance %s: suspending'), instance_id, context=context) |
2800 | + LOG.audit(_('instance %s: suspending'), instance_uuid, context=context) |
2801 | context = context.elevated() |
2802 | |
2803 | - instance_ref = self.db.instance_get(context, instance_id) |
2804 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2805 | self.driver.suspend(instance_ref, lambda result: None) |
2806 | |
2807 | current_power_state = self._get_power_state(context, instance_ref) |
2808 | self._instance_update(context, |
2809 | - instance_id, |
2810 | + instance_uuid, |
2811 | power_state=current_power_state, |
2812 | vm_state=vm_states.SUSPENDED, |
2813 | task_state=None) |
2814 | |
2815 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2816 | @checks_instance_lock |
2817 | - def resume_instance(self, context, instance_id): |
2818 | + def resume_instance(self, context, instance_uuid): |
2819 | """Resume the given suspended instance.""" |
2820 | - LOG.audit(_('instance %s: resuming'), instance_id, context=context) |
2821 | + LOG.audit(_('instance %s: resuming'), instance_uuid, context=context) |
2822 | context = context.elevated() |
2823 | |
2824 | - instance_ref = self.db.instance_get(context, instance_id) |
2825 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2826 | self.driver.resume(instance_ref, lambda result: None) |
2827 | |
2828 | current_power_state = self._get_power_state(context, instance_ref) |
2829 | self._instance_update(context, |
2830 | - instance_id, |
2831 | + instance_uuid, |
2832 | power_state=current_power_state, |
2833 | vm_state=vm_states.ACTIVE, |
2834 | task_state=None) |
2835 | |
2836 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2837 | - def lock_instance(self, context, instance_id): |
2838 | + def lock_instance(self, context, instance_uuid): |
2839 | """Lock the given instance.""" |
2840 | context = context.elevated() |
2841 | |
2842 | - LOG.debug(_('instance %s: locking'), instance_id, context=context) |
2843 | - self.db.instance_update(context, instance_id, {'locked': True}) |
2844 | + LOG.debug(_('instance %s: locking'), instance_uuid, context=context) |
2845 | + self.db.instance_update(context, instance_uuid, {'locked': True}) |
2846 | |
2847 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2848 | - def unlock_instance(self, context, instance_id): |
2849 | + def unlock_instance(self, context, instance_uuid): |
2850 | """Unlock the given instance.""" |
2851 | context = context.elevated() |
2852 | |
2853 | - LOG.debug(_('instance %s: unlocking'), instance_id, context=context) |
2854 | - self.db.instance_update(context, instance_id, {'locked': False}) |
2855 | + LOG.debug(_('instance %s: unlocking'), instance_uuid, context=context) |
2856 | + self.db.instance_update(context, instance_uuid, {'locked': False}) |
2857 | |
2858 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2859 | - def get_lock(self, context, instance_id): |
2860 | + def get_lock(self, context, instance_uuid): |
2861 | """Return the boolean state of the given instance's lock.""" |
2862 | context = context.elevated() |
2863 | - LOG.debug(_('instance %s: getting locked state'), instance_id, |
2864 | + LOG.debug(_('instance %s: getting locked state'), instance_uuid, |
2865 | context=context) |
2866 | - if utils.is_uuid_like(instance_id): |
2867 | - uuid = instance_id |
2868 | - instance_ref = self.db.instance_get_by_uuid(context, uuid) |
2869 | - else: |
2870 | - instance_ref = self.db.instance_get(context, instance_id) |
2871 | - return instance_ref['locked'] |
2872 | + instance = self.db.instance_get(context, instance_uuid) |
2873 | + return instance['locked'] |
2874 | |
2875 | @checks_instance_lock |
2876 | - def reset_network(self, context, instance_id): |
2877 | + def reset_network(self, context, instance_uuid): |
2878 | """Reset networking on the given instance.""" |
2879 | - instance = self.db.instance_get(context, instance_id) |
2880 | - LOG.debug(_('instance %s: reset network'), instance_id, |
2881 | + instance = self.db.instance_get(context, instance_uuid) |
2882 | + LOG.debug(_('instance %s: reset network'), instance_uuid, |
2883 | context=context) |
2884 | self.driver.reset_network(instance) |
2885 | |
2886 | @checks_instance_lock |
2887 | - def inject_network_info(self, context, instance_id): |
2888 | + def inject_network_info(self, context, instance_uuid): |
2889 | """Inject network info for the given instance.""" |
2890 | - LOG.debug(_('instance %s: inject network info'), instance_id, |
2891 | + LOG.debug(_('instance %s: inject network info'), instance_uuid, |
2892 | context=context) |
2893 | - instance = self.db.instance_get(context, instance_id) |
2894 | + instance = self.db.instance_get(context, instance_uuid) |
2895 | network_info = self._get_instance_nw_info(context, instance) |
2896 | LOG.debug(_("network_info to inject: |%s|"), network_info) |
2897 | |
2898 | self.driver.inject_network_info(instance, network_info) |
2899 | |
2900 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2901 | - def get_console_output(self, context, instance_id): |
2902 | + def get_console_output(self, context, instance_uuid): |
2903 | """Send the console output for the given instance.""" |
2904 | context = context.elevated() |
2905 | - instance_ref = self.db.instance_get(context, instance_id) |
2906 | - LOG.audit(_("Get console output for instance %s"), instance_id, |
2907 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2908 | + LOG.audit(_("Get console output for instance %s"), instance_uuid, |
2909 | context=context) |
2910 | output = self.driver.get_console_output(instance_ref) |
2911 | return output.decode('utf-8', 'replace').encode('ascii', 'replace') |
2912 | |
2913 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2914 | - def get_ajax_console(self, context, instance_id): |
2915 | + def get_ajax_console(self, context, instance_uuid): |
2916 | """Return connection information for an ajax console.""" |
2917 | context = context.elevated() |
2918 | - LOG.debug(_("instance %s: getting ajax console"), instance_id) |
2919 | - instance_ref = self.db.instance_get(context, instance_id) |
2920 | + LOG.debug(_("instance %s: getting ajax console"), instance_uuid) |
2921 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2922 | return self.driver.get_ajax_console(instance_ref) |
2923 | |
2924 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2925 | - def get_vnc_console(self, context, instance_id): |
2926 | + def get_vnc_console(self, context, instance_uuid): |
2927 | """Return connection information for a vnc console.""" |
2928 | context = context.elevated() |
2929 | - LOG.debug(_("instance %s: getting vnc console"), instance_id) |
2930 | - instance_ref = self.db.instance_get(context, instance_id) |
2931 | + LOG.debug(_("instance %s: getting vnc console"), instance_uuid) |
2932 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2933 | return self.driver.get_vnc_console(instance_ref) |
2934 | |
2935 | - def _attach_volume_boot(self, context, instance_id, volume_id, mountpoint): |
2936 | + def _attach_volume_boot(self, context, instance_uuid, volume_id, |
2937 | + mountpoint): |
2938 | """Attach a volume to an instance at boot time. So actual attach |
2939 | is done by instance creation""" |
2940 | |
2941 | @@ -1265,20 +1247,21 @@ |
2942 | volume.API().check_attach(context, volume_id) |
2943 | |
2944 | context = context.elevated() |
2945 | - LOG.audit(_("instance %(instance_id)s: booting with " |
2946 | + LOG.audit(_("instance %(instance_uuid)s: booting with " |
2947 | "volume %(volume_id)s at %(mountpoint)s") % |
2948 | locals(), context=context) |
2949 | dev_path = self.volume_manager.setup_compute_volume(context, volume_id) |
2950 | - self.db.volume_attached(context, volume_id, instance_id, mountpoint) |
2951 | + self.db.volume_attached(context, volume_id, instance_uuid, mountpoint) |
2952 | return dev_path |
2953 | |
2954 | @checks_instance_lock |
2955 | - def attach_volume(self, context, instance_id, volume_id, mountpoint): |
2956 | + def attach_volume(self, context, instance_uuid, volume_id, mountpoint): |
2957 | """Attach a volume to an instance.""" |
2958 | context = context.elevated() |
2959 | - instance_ref = self.db.instance_get(context, instance_id) |
2960 | - LOG.audit(_("instance %(instance_id)s: attaching volume %(volume_id)s" |
2961 | - " to %(mountpoint)s") % locals(), context=context) |
2962 | + instance_ref = self.db.instance_get(context, instance_uuid) |
2963 | + LOG.audit(_("instance %(instance_uuid)s: attaching volume " |
2964 | + "%(volume_id)s to %(mountpoint)s") % locals(), |
2965 | + context=context) |
2966 | dev_path = self.volume_manager.setup_compute_volume(context, |
2967 | volume_id) |
2968 | try: |
2969 | @@ -1287,10 +1270,10 @@ |
2970 | mountpoint) |
2971 | self.db.volume_attached(context, |
2972 | volume_id, |
2973 | - instance_id, |
2974 | + instance_uuid, |
2975 | mountpoint) |
2976 | values = { |
2977 | - 'instance_id': instance_id, |
2978 | + 'instance_uuid': instance_uuid, |
2979 | 'device_name': mountpoint, |
2980 | 'delete_on_termination': False, |
2981 | 'virtual_name': None, |
2982 | @@ -1303,7 +1286,7 @@ |
2983 | # NOTE(vish): The inline callback eats the exception info so we |
2984 | # log the traceback here and reraise the same |
2985 | # ecxception below. |
2986 | - LOG.exception(_("instance %(instance_id)s: attach failed" |
2987 | + LOG.exception(_("instance %(instance_uuid)s: attach failed" |
2988 | " %(mountpoint)s, removing") % locals(), context=context) |
2989 | self.volume_manager.remove_compute_volume(context, |
2990 | volume_id) |
2991 | @@ -1313,17 +1296,17 @@ |
2992 | |
2993 | @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) |
2994 | @checks_instance_lock |
2995 | - def _detach_volume(self, context, instance_id, volume_id, destroy_bdm): |
2996 | + def _detach_volume(self, context, instance_uuid, volume_id, destroy_bdm): |
2997 | """Detach a volume from an instance.""" |
2998 | context = context.elevated() |
2999 | - instance_ref = self.db.instance_get(context, instance_id) |
3000 | + instance_ref = self.db.instance_get(context, instance_uuid) |
3001 | volume_ref = self.db.volume_get(context, volume_id) |
3002 | mp = volume_ref['mountpoint'] |
3003 | LOG.audit(_("Detach volume %(volume_id)s from mountpoint %(mp)s" |
3004 | - " on instance %(instance_id)s") % locals(), context=context) |
3005 | + " on instance %(instance_uuid)s") % locals(), context=context) |
3006 | if instance_ref['name'] not in self.driver.list_instances(): |
3007 | LOG.warn(_("Detaching volume from unknown instance %s"), |
3008 | - instance_id, context=context) |
3009 | + instance_uuid, context=context) |
3010 | else: |
3011 | self.driver.detach_volume(instance_ref['name'], |
3012 | volume_ref['mountpoint']) |
3013 | @@ -1331,12 +1314,12 @@ |
3014 | self.db.volume_detached(context, volume_id) |
3015 | if destroy_bdm: |
3016 | self.db.block_device_mapping_destroy_by_instance_and_volume( |
3017 | - context, instance_id, volume_id) |
3018 | + context, instance_uuid, volume_id) |
3019 | return True |
3020 | |
3021 | - def detach_volume(self, context, instance_id, volume_id): |
3022 | + def detach_volume(self, context, instance_uuid, volume_id): |
3023 | """Detach a volume from an instance.""" |
3024 | - return self._detach_volume(context, instance_id, volume_id, True) |
3025 | + return self._detach_volume(context, instance_uuid, volume_id, True) |
3026 | |
3027 | def remove_volume(self, context, volume_id): |
3028 | """Remove volume on compute host. |
3029 | @@ -1413,12 +1396,12 @@ |
3030 | """ |
3031 | return self.driver.update_available_resource(context, self.host) |
3032 | |
3033 | - def pre_live_migration(self, context, instance_id, time=None, |
3034 | + def pre_live_migration(self, context, instance_uuid, time=None, |
3035 | block_migration=False, disk=None): |
3036 | """Preparations for live migration at dest host. |
3037 | |
3038 | :param context: security context |
3039 | - :param instance_id: nova.db.sqlalchemy.models.Instance.Id |
3040 | + :param instance_uuid: nova.db.sqlalchemy.models.Instance.Id |
3041 | :param block_migration: if true, prepare for block migration |
3042 | |
3043 | """ |
3044 | @@ -1426,7 +1409,7 @@ |
3045 | time = greenthread |
3046 | |
3047 | # Getting instance info |
3048 | - instance_ref = self.db.instance_get(context, instance_id) |
3049 | + instance_ref = self.db.instance_get(context, instance_uuid) |
3050 | hostname = instance_ref['hostname'] |
3051 | |
3052 | # If any volume is mounted, prepare here. |
3053 | @@ -1447,7 +1430,7 @@ |
3054 | |
3055 | fixed_ips = [nw_info[1]['ips'] for nw_info in network_info] |
3056 | if not fixed_ips: |
3057 | - raise exception.FixedIpNotFoundForInstance(instance_id=instance_id) |
3058 | + raise exception.FixedIpNotFoundForInstance(instance_id=instance_uuid) |
3059 | |
3060 | max_retry = FLAGS.live_migration_retry_count |
3061 | for cnt in range(max_retry): |
3062 | @@ -1478,18 +1461,18 @@ |
3063 | instance_ref, |
3064 | disk) |
3065 | |
3066 | - def live_migration(self, context, instance_id, |
3067 | + def live_migration(self, context, instance_uuid, |
3068 | dest, block_migration=False): |
3069 | """Executing live migration. |
3070 | |
3071 | :param context: security context |
3072 | - :param instance_id: nova.db.sqlalchemy.models.Instance.Id |
3073 | + :param instance_uuid: nova.db.sqlalchemy.models.Instance.Id |
3074 | :param dest: destination host |
3075 | :param block_migration: if true, do block migration |
3076 | |
3077 | """ |
3078 | # Get instance for error handling. |
3079 | - instance_ref = self.db.instance_get(context, instance_id) |
3080 | + instance_ref = self.db.instance_get(context, instance_uuid) |
3081 | |
3082 | try: |
3083 | # Checking volume node is working correctly when any volumes |
3084 | @@ -1498,7 +1481,7 @@ |
3085 | rpc.call(context, |
3086 | FLAGS.volume_topic, |
3087 | {"method": "check_for_export", |
3088 | - "args": {'instance_id': instance_id}}) |
3089 | + "args": {'instance_uuid': instance_uuid}}) |
3090 | |
3091 | if block_migration: |
3092 | disk = self.driver.get_instance_disk_info(context, |
3093 | @@ -1509,7 +1492,7 @@ |
3094 | rpc.call(context, |
3095 | self.db.queue_get_for(context, FLAGS.compute_topic, dest), |
3096 | {"method": "pre_live_migration", |
3097 | - "args": {'instance_id': instance_id, |
3098 | + "args": {'instance_uuid': instance_uuid, |
3099 | 'block_migration': block_migration, |
3100 | 'disk': disk}}) |
3101 | |
3102 | @@ -1537,18 +1520,18 @@ |
3103 | and mainly updating database record. |
3104 | |
3105 | :param ctxt: security context |
3106 | - :param instance_id: nova.db.sqlalchemy.models.Instance.Id |
3107 | + :param instance_uuid: nova.db.sqlalchemy.models.Instance.Id |
3108 | :param dest: destination host |
3109 | :param block_migration: if true, do block migration |
3110 | |
3111 | """ |
3112 | |
3113 | LOG.info(_('post_live_migration() is started..')) |
3114 | - instance_id = instance_ref['id'] |
3115 | + instance_uuid = instance_ref['uuid'] |
3116 | |
3117 | # Detaching volumes. |
3118 | try: |
3119 | - for vol in self.db.volume_get_all_by_instance(ctxt, instance_id): |
3120 | + for vol in self.db.volume_get_all_by_instance(ctxt, instance_uuid): |
3121 | self.volume_manager.remove_compute_volume(ctxt, vol['id']) |
3122 | except exception.NotFound: |
3123 | pass |
3124 | @@ -1566,7 +1549,7 @@ |
3125 | # Not return if floating_ip is not found, otherwise, |
3126 | # instance never be accessible.. |
3127 | floating_ip = self.db.instance_get_floating_address(ctxt, |
3128 | - instance_id) |
3129 | + instance_uuid) |
3130 | if not floating_ip: |
3131 | LOG.info(_('No floating_ip is found for %s.'), i_name) |
3132 | else: |
3133 | @@ -1587,13 +1570,13 @@ |
3134 | rpc.call(ctxt, |
3135 | self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), |
3136 | {"method": "post_live_migration_at_destination", |
3137 | - "args": {'instance_id': instance_ref.id, |
3138 | + "args": {'instance_uuid': instance_ref.uuid, |
3139 | 'block_migration': block_migration}}) |
3140 | |
3141 | # Restore instance state |
3142 | current_power_state = self._get_power_state(ctxt, instance_ref) |
3143 | self._instance_update(ctxt, |
3144 | - instance_ref["id"], |
3145 | + instance_ref["uuid"], |
3146 | host=dest, |
3147 | power_state=current_power_state, |
3148 | vm_state=vm_states.ACTIVE, |
3149 | @@ -1616,15 +1599,15 @@ |
3150 | "This error can be safely ignored.")) |
3151 | |
3152 | def post_live_migration_at_destination(self, context, |
3153 | - instance_id, block_migration=False): |
3154 | + instance_uuid, block_migration=False): |
3155 | """Post operations for live migration . |
3156 | |
3157 | :param context: security context |
3158 | - :param instance_id: nova.db.sqlalchemy.models.Instance.Id |
3159 | + :param instance_uuid: nova.db.sqlalchemy.models.Instance.Id |
3160 | :param block_migration: block_migration |
3161 | |
3162 | """ |
3163 | - instance_ref = self.db.instance_get(context, instance_id) |
3164 | + instance_ref = self.db.instance_get(context, instance_uuid) |
3165 | LOG.info(_('Post operation of migraton started for %s .') |
3166 | % instance_ref.name) |
3167 | network_info = self._get_instance_nw_info(context, instance_ref) |
3168 | @@ -1638,14 +1621,14 @@ |
3169 | """Recovers Instance/volume state from migrating -> running. |
3170 | |
3171 | :param context: security context |
3172 | - :param instance_id: nova.db.sqlalchemy.models.Instance.Id |
3173 | + :param instance_uuid: nova.db.sqlalchemy.models.Instance.Id |
3174 | :param dest: |
3175 | This method is called from live migration src host. |
3176 | This param specifies destination host. |
3177 | """ |
3178 | host = instance_ref['host'] |
3179 | self._instance_update(context, |
3180 | - instance_ref['id'], |
3181 | + instance_ref['uuid'], |
3182 | host=host, |
3183 | vm_state=vm_states.ACTIVE, |
3184 | task_state=None) |
3185 | @@ -1662,15 +1645,15 @@ |
3186 | rpc.cast(context, |
3187 | self.db.queue_get_for(context, FLAGS.compute_topic, dest), |
3188 | {"method": "rollback_live_migration_at_destination", |
3189 | - "args": {'instance_id': instance_ref['id']}}) |
3190 | + "args": {'instance_uuid': instance_ref['uuid']}}) |
3191 | |
3192 | - def rollback_live_migration_at_destination(self, context, instance_id): |
3193 | + def rollback_live_migration_at_destination(self, context, instance_uuid): |
3194 | """ Cleaning up image directory that is created pre_live_migration. |
3195 | |
3196 | :param context: security context |
3197 | - :param instance_id: nova.db.sqlalchemy.models.Instance.Id |
3198 | + :param instance_uuid: nova.db.sqlalchemy.models.Instance.Id |
3199 | """ |
3200 | - instances_ref = self.db.instance_get(context, instance_id) |
3201 | + instances_ref = self.db.instance_get(context, instance_uuid) |
3202 | network_info = self._get_instance_nw_info(context, instances_ref) |
3203 | self.driver.destroy(instances_ref, network_info) |
3204 | |
3205 | @@ -1764,7 +1747,7 @@ |
3206 | continue |
3207 | |
3208 | self._instance_update(context, |
3209 | - db_instance["id"], |
3210 | + db_instance["uuid"], |
3211 | power_state=vm_power_state) |
3212 | |
3213 | def _reclaim_queued_deletes(self, context): |
3214 | @@ -1779,4 +1762,4 @@ |
3215 | if instance['vm_state'] == vm_states.SOFT_DELETE and \ |
3216 | (curtime - instance['deleted_at']) >= queue_time: |
3217 | LOG.info('Deleting %s' % instance['name']) |
3218 | - self._delete_instance(context, instance['id']) |
3219 | + self._delete_instance(context, instance['uuid']) |
3220 | |
3221 | === modified file 'nova/console/manager.py' |
3222 | --- nova/console/manager.py 2011-06-30 01:30:15 +0000 |
3223 | +++ nova/console/manager.py 2011-09-22 19:29:25 +0000 |
3224 | @@ -57,16 +57,16 @@ |
3225 | self.driver.init_host() |
3226 | |
3227 | @exception.wrap_exception() |
3228 | - def add_console(self, context, instance_id, password=None, |
3229 | + def add_console(self, context, instance_uuid, password=None, |
3230 | port=None, **kwargs): |
3231 | - instance = self.db.instance_get(context, instance_id) |
3232 | + instance = self.db.instance_get(context, instance_uuid) |
3233 | host = instance['host'] |
3234 | name = instance['name'] |
3235 | pool = self.get_pool_for_instance_host(context, host) |
3236 | try: |
3237 | console = self.db.console_get_by_pool_instance(context, |
3238 | pool['id'], |
3239 | - instance_id) |
3240 | + instance_uuid) |
3241 | except exception.NotFound: |
3242 | logging.debug(_('Adding console')) |
3243 | if not password: |
3244 | @@ -74,7 +74,7 @@ |
3245 | if not port: |
3246 | port = self.driver.get_port(context) |
3247 | console_data = {'instance_name': name, |
3248 | - 'instance_id': instance_id, |
3249 | + 'instance_uuid': instance_uuid, |
3250 | 'password': password, |
3251 | 'pool_id': pool['id']} |
3252 | if port: |
3253 | |
3254 | === modified file 'nova/console/vmrc.py' |
3255 | --- nova/console/vmrc.py 2011-06-07 17:32:53 +0000 |
3256 | +++ nova/console/vmrc.py 2011-09-22 19:29:25 +0000 |
3257 | @@ -90,7 +90,7 @@ |
3258 | vm_ds_path_name = ds_path_name |
3259 | break |
3260 | if vm_ref is None: |
3261 | - raise exception.InstanceNotFound(instance_id=instance_name) |
3262 | + raise exception.InstanceNotFound(instance_uuid=instance_name) |
3263 | json_data = json.dumps({'vm_id': vm_ds_path_name, |
3264 | 'username': username, |
3265 | 'password': password}) |
3266 | @@ -124,7 +124,7 @@ |
3267 | if vm.propSet[0].val == instance_name: |
3268 | vm_ref = vm.obj |
3269 | if vm_ref is None: |
3270 | - raise exception.InstanceNotFound(instance_id=instance_name) |
3271 | + raise exception.InstanceNotFound(instance_uuid=instance_name) |
3272 | virtual_machine_ticket = \ |
3273 | vim_session._call_method( |
3274 | vim_session._get_vim(), |
3275 | |
3276 | === modified file 'nova/db/api.py' |
3277 | --- nova/db/api.py 2011-09-21 16:29:36 +0000 |
3278 | +++ nova/db/api.py 2011-09-22 19:29:25 +0000 |
3279 | @@ -43,7 +43,7 @@ |
3280 | 'The backend to use for db') |
3281 | flags.DEFINE_boolean('enable_new_services', True, |
3282 | 'Services to be added to the available pool on create') |
3283 | -flags.DEFINE_string('instance_name_template', 'instance-%08x', |
3284 | +flags.DEFINE_string('instance_name_template', 'instance-%s', |
3285 | 'Template string to be used to generate instance names') |
3286 | flags.DEFINE_string('volume_name_template', 'volume-%08x', |
3287 | 'Template string to be used to generate instance names') |
3288 | @@ -74,9 +74,9 @@ |
3289 | ################### |
3290 | |
3291 | |
3292 | -def service_destroy(context, instance_id): |
3293 | +def service_destroy(context, instance_uuid): |
3294 | """Destroy the service or raise if it does not exist.""" |
3295 | - return IMPL.service_destroy(context, instance_id) |
3296 | + return IMPL.service_destroy(context, instance_uuid) |
3297 | |
3298 | |
3299 | def service_get(context, service_id): |
3300 | @@ -331,25 +331,25 @@ |
3301 | #################### |
3302 | |
3303 | |
3304 | -def fixed_ip_associate(context, address, instance_id, network_id=None, |
3305 | +def fixed_ip_associate(context, address, instance_uuid, network_id=None, |
3306 | reserved=False): |
3307 | """Associate fixed ip to instance. |
3308 | |
3309 | Raises if fixed ip is not available. |
3310 | |
3311 | """ |
3312 | - return IMPL.fixed_ip_associate(context, address, instance_id, network_id, |
3313 | + return IMPL.fixed_ip_associate(context, address, instance_uuid, network_id, |
3314 | reserved) |
3315 | |
3316 | |
3317 | -def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): |
3318 | +def fixed_ip_associate_pool(context, network_id, instance_uuid=None, host=None): |
3319 | """Find free ip in network and associate it to instance or host. |
3320 | |
3321 | Raises if one is not available. |
3322 | |
3323 | """ |
3324 | return IMPL.fixed_ip_associate_pool(context, network_id, |
3325 | - instance_id, host) |
3326 | + instance_uuid, host) |
3327 | |
3328 | |
3329 | def fixed_ip_create(context, values): |
3330 | @@ -382,9 +382,9 @@ |
3331 | return IMPL.fixed_ip_get_by_address(context, address) |
3332 | |
3333 | |
3334 | -def fixed_ip_get_by_instance(context, instance_id): |
3335 | +def fixed_ip_get_by_instance(context, instance_uuid): |
3336 | """Get fixed ips by instance or raise if none exist.""" |
3337 | - return IMPL.fixed_ip_get_by_instance(context, instance_id) |
3338 | + return IMPL.fixed_ip_get_by_instance(context, instance_uuid) |
3339 | |
3340 | |
3341 | def fixed_ip_get_by_network_host(context, network_id, host): |
3342 | @@ -439,16 +439,16 @@ |
3343 | return IMPL.virtual_interface_get_by_fixed_ip(context, fixed_ip_id) |
3344 | |
3345 | |
3346 | -def virtual_interface_get_by_instance(context, instance_id): |
3347 | +def virtual_interface_get_by_instance(context, instance_uuid): |
3348 | """Gets all virtual_interfaces for instance.""" |
3349 | - return IMPL.virtual_interface_get_by_instance(context, instance_id) |
3350 | - |
3351 | - |
3352 | -def virtual_interface_get_by_instance_and_network(context, instance_id, |
3353 | + return IMPL.virtual_interface_get_by_instance(context, instance_uuid) |
3354 | + |
3355 | + |
3356 | +def virtual_interface_get_by_instance_and_network(context, instance_uuid, |
3357 | network_id): |
3358 | """Gets all virtual interfaces for instance.""" |
3359 | return IMPL.virtual_interface_get_by_instance_and_network(context, |
3360 | - instance_id, |
3361 | + instance_uuid, |
3362 | network_id) |
3363 | |
3364 | |
3365 | @@ -462,9 +462,9 @@ |
3366 | return IMPL.virtual_interface_delete(context, vif_id) |
3367 | |
3368 | |
3369 | -def virtual_interface_delete_by_instance(context, instance_id): |
3370 | +def virtual_interface_delete_by_instance(context, instance_uuid): |
3371 | """Delete virtual interface records associated with instance.""" |
3372 | - return IMPL.virtual_interface_delete_by_instance(context, instance_id) |
3373 | + return IMPL.virtual_interface_delete_by_instance(context, instance_uuid) |
3374 | |
3375 | |
3376 | def virtual_interface_get_all(context): |
3377 | @@ -485,24 +485,19 @@ |
3378 | return IMPL.instance_data_get_for_project(context, project_id) |
3379 | |
3380 | |
3381 | -def instance_destroy(context, instance_id): |
3382 | +def instance_destroy(context, instance_uuid): |
3383 | """Destroy the instance or raise if it does not exist.""" |
3384 | - return IMPL.instance_destroy(context, instance_id) |
3385 | - |
3386 | - |
3387 | -def instance_stop(context, instance_id): |
3388 | + return IMPL.instance_destroy(context, instance_uuid) |
3389 | + |
3390 | + |
3391 | +def instance_stop(context, instance_uuid): |
3392 | """Stop the instance or raise if it does not exist.""" |
3393 | - return IMPL.instance_stop(context, instance_id) |
3394 | - |
3395 | - |
3396 | -def instance_get_by_uuid(context, uuid): |
3397 | - """Get an instance or raise if it does not exist.""" |
3398 | - return IMPL.instance_get_by_uuid(context, uuid) |
3399 | - |
3400 | - |
3401 | -def instance_get(context, instance_id): |
3402 | - """Get an instance or raise if it does not exist.""" |
3403 | - return IMPL.instance_get(context, instance_id) |
3404 | + return IMPL.instance_stop(context, instance_uuid) |
3405 | + |
3406 | + |
3407 | +def instance_get(context, instance_uuid): |
3408 | + """Get an instance or raise if it does not exist.""" |
3409 | + return IMPL.instance_get(context, instance_uuid) |
3410 | |
3411 | |
3412 | def instance_get_all(context): |
3413 | @@ -561,48 +556,51 @@ |
3414 | return IMPL.instance_get_by_fixed_ipv6(context, address) |
3415 | |
3416 | |
3417 | -def instance_get_fixed_addresses(context, instance_id): |
3418 | +def instance_get_fixed_addresses(context, instance_uuid): |
3419 | """Get the fixed ip address of an instance.""" |
3420 | - return IMPL.instance_get_fixed_addresses(context, instance_id) |
3421 | - |
3422 | - |
3423 | -def instance_get_fixed_addresses_v6(context, instance_id): |
3424 | - return IMPL.instance_get_fixed_addresses_v6(context, instance_id) |
3425 | - |
3426 | - |
3427 | -def instance_get_floating_address(context, instance_id): |
3428 | + return IMPL.instance_get_fixed_addresses(context, instance_uuid) |
3429 | + |
3430 | + |
3431 | +def instance_get_fixed_addresses_v6(context, instance_uuid): |
3432 | + return IMPL.instance_get_fixed_addresses_v6(context, instance_uuid) |
3433 | + |
3434 | + |
3435 | +def instance_get_floating_address(context, instance_uuid): |
3436 | """Get the first floating ip address of an instance.""" |
3437 | - return IMPL.instance_get_floating_address(context, instance_id) |
3438 | + return IMPL.instance_get_floating_address(context, instance_uuid) |
3439 | |
3440 | |
3441 | def instance_get_project_vpn(context, project_id): |
3442 | """Get a vpn instance by project or return None.""" |
3443 | return IMPL.instance_get_project_vpn(context, project_id) |
3444 | |
3445 | +def instance_get_id_by_uuid(context, instance_uuid): |
3446 | + """Get corresponding instance id or raise if it does not exist.""" |
3447 | + return IMPL.instance_get_id_by_uuid(context, instance_uuid) |
3448 | |
3449 | -def instance_set_state(context, instance_id, state, description=None): |
3450 | +def instance_set_state(context, instance_uuid, state, description=None): |
3451 | """Set the state of an instance.""" |
3452 | - return IMPL.instance_set_state(context, instance_id, state, description) |
3453 | - |
3454 | - |
3455 | -def instance_update(context, instance_id, values): |
3456 | + return IMPL.instance_set_state(context, instance_uuid, state, description) |
3457 | + |
3458 | + |
3459 | +def instance_update(context, instance_uuid, values): |
3460 | """Set the given properties on an instance and update it. |
3461 | |
3462 | Raises NotFound if instance does not exist. |
3463 | |
3464 | """ |
3465 | - return IMPL.instance_update(context, instance_id, values) |
3466 | - |
3467 | - |
3468 | -def instance_add_security_group(context, instance_id, security_group_id): |
3469 | + return IMPL.instance_update(context, instance_uuid, values) |
3470 | + |
3471 | + |
3472 | +def instance_add_security_group(context, instance_uuid, security_group_id): |
3473 | """Associate the given security group with the given instance.""" |
3474 | - return IMPL.instance_add_security_group(context, instance_id, |
3475 | + return IMPL.instance_add_security_group(context, instance_uuid, |
3476 | security_group_id) |
3477 | |
3478 | |
3479 | -def instance_remove_security_group(context, instance_id, security_group_id): |
3480 | +def instance_remove_security_group(context, instance_uuid, security_group_id): |
3481 | """Disassociate the given security group from the given instance.""" |
3482 | - return IMPL.instance_remove_security_group(context, instance_id, |
3483 | + return IMPL.instance_remove_security_group(context, instance_uuid, |
3484 | security_group_id) |
3485 | |
3486 | |
3487 | @@ -611,9 +609,9 @@ |
3488 | return IMPL.instance_action_create(context, values) |
3489 | |
3490 | |
3491 | -def instance_get_actions(context, instance_id): |
3492 | +def instance_get_actions(context, instance_uuid): |
3493 | """Get instance actions by instance id.""" |
3494 | - return IMPL.instance_get_actions(context, instance_id) |
3495 | + return IMPL.instance_get_actions(context, instance_uuid) |
3496 | |
3497 | |
3498 | def instance_get_id_to_uuid_mapping(context, ids): |
3499 | @@ -749,14 +747,14 @@ |
3500 | return IMPL.network_get_by_cidr(context, cidr) |
3501 | |
3502 | |
3503 | -def network_get_by_instance(context, instance_id): |
3504 | +def network_get_by_instance(context, instance_uuid): |
3505 | """Get a network by instance id or raise if it does not exist.""" |
3506 | - return IMPL.network_get_by_instance(context, instance_id) |
3507 | - |
3508 | - |
3509 | -def network_get_all_by_instance(context, instance_id): |
3510 | + return IMPL.network_get_by_instance(context, instance_uuid) |
3511 | + |
3512 | + |
3513 | +def network_get_all_by_instance(context, instance_uuid): |
3514 | """Get all networks by instance id or raise if none exist.""" |
3515 | - return IMPL.network_get_all_by_instance(context, instance_id) |
3516 | + return IMPL.network_get_all_by_instance(context, instance_uuid) |
3517 | |
3518 | |
3519 | def network_get_all_by_host(context, host): |
3520 | @@ -908,9 +906,9 @@ |
3521 | return IMPL.volume_allocate_iscsi_target(context, volume_id, host) |
3522 | |
3523 | |
3524 | -def volume_attached(context, volume_id, instance_id, mountpoint): |
3525 | +def volume_attached(context, volume_id, instance_uuid, mountpoint): |
3526 | """Ensure that a volume is set as attached.""" |
3527 | - return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) |
3528 | + return IMPL.volume_attached(context, volume_id, instance_uuid, mountpoint) |
3529 | |
3530 | |
3531 | def volume_create(context, values): |
3532 | @@ -948,9 +946,9 @@ |
3533 | return IMPL.volume_get_all_by_host(context, host) |
3534 | |
3535 | |
3536 | -def volume_get_all_by_instance(context, instance_id): |
3537 | +def volume_get_all_by_instance(context, instance_uuid): |
3538 | """Get all volumes belonging to a instance.""" |
3539 | - return IMPL.volume_get_all_by_instance(context, instance_id) |
3540 | + return IMPL.volume_get_all_by_instance(context, instance_uuid) |
3541 | |
3542 | |
3543 | def volume_get_all_by_project(context, project_id): |
3544 | @@ -1043,9 +1041,9 @@ |
3545 | return IMPL.block_device_mapping_update_or_create(context, values) |
3546 | |
3547 | |
3548 | -def block_device_mapping_get_all_by_instance(context, instance_id): |
3549 | +def block_device_mapping_get_all_by_instance(context, instance_uuid): |
3550 | """Get all block device mapping belonging to a instance""" |
3551 | - return IMPL.block_device_mapping_get_all_by_instance(context, instance_id) |
3552 | + return IMPL.block_device_mapping_get_all_by_instance(context, instance_uuid) |
3553 | |
3554 | |
3555 | def block_device_mapping_destroy(context, bdm_id): |
3556 | @@ -1053,11 +1051,11 @@ |
3557 | return IMPL.block_device_mapping_destroy(context, bdm_id) |
3558 | |
3559 | |
3560 | -def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, |
3561 | +def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, |
3562 | volume_id): |
3563 | """Destroy the block device mapping or raise if it does not exist.""" |
3564 | return IMPL.block_device_mapping_destroy_by_instance_and_volume( |
3565 | - context, instance_id, volume_id) |
3566 | + context, instance_uuid, volume_id) |
3567 | |
3568 | |
3569 | #################### |
3570 | @@ -1083,9 +1081,9 @@ |
3571 | return IMPL.security_group_get_by_project(context, project_id) |
3572 | |
3573 | |
3574 | -def security_group_get_by_instance(context, instance_id): |
3575 | +def security_group_get_by_instance(context, instance_uuid): |
3576 | """Get security groups to which the instance is assigned.""" |
3577 | - return IMPL.security_group_get_by_instance(context, instance_id) |
3578 | + return IMPL.security_group_get_by_instance(context, instance_uuid) |
3579 | |
3580 | |
3581 | def security_group_exists(context, project_id, group_name): |
3582 | @@ -1326,19 +1324,19 @@ |
3583 | return IMPL.console_delete(context, console_id) |
3584 | |
3585 | |
3586 | -def console_get_by_pool_instance(context, pool_id, instance_id): |
3587 | +def console_get_by_pool_instance(context, pool_id, instance_uuid): |
3588 | """Get console entry for a given instance and pool.""" |
3589 | - return IMPL.console_get_by_pool_instance(context, pool_id, instance_id) |
3590 | - |
3591 | - |
3592 | -def console_get_all_by_instance(context, instance_id): |
3593 | + return IMPL.console_get_by_pool_instance(context, pool_id, instance_uuid) |
3594 | + |
3595 | + |
3596 | +def console_get_all_by_instance(context, instance_uuid): |
3597 | """Get consoles for a given instance.""" |
3598 | - return IMPL.console_get_all_by_instance(context, instance_id) |
3599 | - |
3600 | - |
3601 | -def console_get(context, console_id, instance_id=None): |
3602 | + return IMPL.console_get_all_by_instance(context, instance_uuid) |
3603 | + |
3604 | + |
3605 | +def console_get(context, console_id, instance_uuid=None): |
3606 | """Get a specific console (possibly on a given instance).""" |
3607 | - return IMPL.console_get(context, console_id, instance_id) |
3608 | + return IMPL.console_get(context, console_id, instance_uuid) |
3609 | |
3610 | |
3611 | ################## |
3612 | @@ -1415,19 +1413,19 @@ |
3613 | #################### |
3614 | |
3615 | |
3616 | -def instance_metadata_get(context, instance_id): |
3617 | +def instance_metadata_get(context, instance_uuid): |
3618 | """Get all metadata for an instance.""" |
3619 | - return IMPL.instance_metadata_get(context, instance_id) |
3620 | - |
3621 | - |
3622 | -def instance_metadata_delete(context, instance_id, key): |
3623 | + return IMPL.instance_metadata_get(context, instance_uuid) |
3624 | + |
3625 | + |
3626 | +def instance_metadata_delete(context, instance_uuid, key): |
3627 | """Delete the given metadata item.""" |
3628 | - IMPL.instance_metadata_delete(context, instance_id, key) |
3629 | - |
3630 | - |
3631 | -def instance_metadata_update(context, instance_id, metadata, delete): |
3632 | + IMPL.instance_metadata_delete(context, instance_uuid, key) |
3633 | + |
3634 | + |
3635 | +def instance_metadata_update(context, instance_uuid, metadata, delete): |
3636 | """Update metadata if it exists, otherwise create it.""" |
3637 | - IMPL.instance_metadata_update(context, instance_id, metadata, delete) |
3638 | + IMPL.instance_metadata_update(context, instance_uuid, metadata, delete) |
3639 | |
3640 | |
3641 | #################### |
3642 | |
3643 | === modified file 'nova/db/sqlalchemy/api.py' |
3644 | --- nova/db/sqlalchemy/api.py 2011-09-21 22:49:35 +0000 |
3645 | +++ nova/db/sqlalchemy/api.py 2011-09-22 19:29:25 +0000 |
3646 | @@ -124,13 +124,13 @@ |
3647 | def require_instance_exists(f): |
3648 | """Decorator to require the specified instance to exist. |
3649 | |
3650 | - Requres the wrapped function to use context and instance_id as |
3651 | + Requres the wrapped function to use context and instance_uuid as |
3652 | their first two arguments. |
3653 | """ |
3654 | |
3655 | - def wrapper(context, instance_id, *args, **kwargs): |
3656 | - db.api.instance_get(context, instance_id) |
3657 | - return f(context, instance_id, *args, **kwargs) |
3658 | + def wrapper(context, instance_uuid, *args, **kwargs): |
3659 | + db.api.instance_get(context, instance_uuid) |
3660 | + return f(context, instance_uuid, *args, **kwargs) |
3661 | wrapper.__name__ = f.__name__ |
3662 | return wrapper |
3663 | |
3664 | @@ -674,7 +674,7 @@ |
3665 | |
3666 | |
3667 | @require_admin_context |
3668 | -def fixed_ip_associate(context, address, instance_id, network_id=None, |
3669 | +def fixed_ip_associate(context, address, instance_uuid, network_id=None, |
3670 | reserved=False): |
3671 | """Keyword arguments: |
3672 | reserved -- should be a boolean value(True or False), exact value will be |
3673 | @@ -704,14 +704,14 @@ |
3674 | network_id, |
3675 | session=session) |
3676 | fixed_ip_ref.instance = instance_get(context, |
3677 | - instance_id, |
3678 | + instance_uuid, |
3679 | session=session) |
3680 | session.add(fixed_ip_ref) |
3681 | return fixed_ip_ref['address'] |
3682 | |
3683 | |
3684 | @require_admin_context |
3685 | -def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): |
3686 | +def fixed_ip_associate_pool(context, network_id, instance_uuid=None, host=None): |
3687 | session = get_session() |
3688 | with session.begin(): |
3689 | network_or_none = or_(models.FixedIp.network_id == network_id, |
3690 | @@ -732,9 +732,9 @@ |
3691 | fixed_ip_ref.network = network_get(context, |
3692 | network_id, |
3693 | session=session) |
3694 | - if instance_id: |
3695 | + if instance_uuid: |
3696 | fixed_ip_ref.instance = instance_get(context, |
3697 | - instance_id, |
3698 | + instance_uuid, |
3699 | session=session) |
3700 | if host: |
3701 | fixed_ip_ref.host = host |
3702 | @@ -770,9 +770,9 @@ |
3703 | result = session.query(models.FixedIp).\ |
3704 | filter(models.FixedIp.network_id.in_(inner_q)).\ |
3705 | filter(models.FixedIp.updated_at < time).\ |
3706 | - filter(models.FixedIp.instance_id != None).\ |
3707 | + filter(models.FixedIp.instance_uuid != None).\ |
3708 | filter_by(allocated=False).\ |
3709 | - update({'instance_id': None, |
3710 | + update({'instance_uuid': None, |
3711 | 'leased': False, |
3712 | 'updated_at': utils.utcnow()}, |
3713 | synchronize_session='fetch') |
3714 | @@ -830,15 +830,15 @@ |
3715 | |
3716 | |
3717 | @require_context |
3718 | -def fixed_ip_get_by_instance(context, instance_id): |
3719 | +def fixed_ip_get_by_instance(context, instance_uuid): |
3720 | session = get_session() |
3721 | rv = session.query(models.FixedIp).\ |
3722 | options(joinedload('floating_ips')).\ |
3723 | - filter_by(instance_id=instance_id).\ |
3724 | + filter_by(instance_uuid=instance_uuid).\ |
3725 | filter_by(deleted=False).\ |
3726 | all() |
3727 | if not rv: |
3728 | - raise exception.FixedIpNotFoundForInstance(instance_id=instance_id) |
3729 | + raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid) |
3730 | return rv |
3731 | |
3732 | |
3733 | @@ -984,14 +984,14 @@ |
3734 | |
3735 | @require_context |
3736 | @require_instance_exists |
3737 | -def virtual_interface_get_by_instance(context, instance_id): |
3738 | +def virtual_interface_get_by_instance(context, instance_uuid): |
3739 | """Gets all virtual interfaces for instance. |
3740 | |
3741 | - :param instance_id: = id of the instance to retreive vifs for |
3742 | + :param instance_uuid: = uuid of the instance to retreive vifs for |
3743 | """ |
3744 | session = get_session() |
3745 | vif_refs = session.query(models.VirtualInterface).\ |
3746 | - filter_by(instance_id=instance_id).\ |
3747 | + filter_by(instance_uuid=instance_uuid).\ |
3748 | options(joinedload('network')).\ |
3749 | options(joinedload('fixed_ips')).\ |
3750 | all() |
3751 | @@ -999,12 +999,13 @@ |
3752 | |
3753 | |
3754 | @require_context |
3755 | -def virtual_interface_get_by_instance_and_network(context, instance_id, |
3756 | - network_id): |
3757 | +def virtual_interface_get_by_instance_and_network(context, |
3758 | + instance_uuid, |
3759 | + network_id): |
3760 | """Gets virtual interface for instance that's associated with network.""" |
3761 | session = get_session() |
3762 | vif_ref = session.query(models.VirtualInterface).\ |
3763 | - filter_by(instance_id=instance_id).\ |
3764 | + filter_by(instance_uuid=instance_uuid).\ |
3765 | filter_by(network_id=network_id).\ |
3766 | options(joinedload('network')).\ |
3767 | options(joinedload('fixed_ips')).\ |
3768 | @@ -1040,13 +1041,13 @@ |
3769 | |
3770 | |
3771 | @require_context |
3772 | -def virtual_interface_delete_by_instance(context, instance_id): |
3773 | +def virtual_interface_delete_by_instance(context, instance_uuid): |
3774 | """Delete virtual interface records that are associated |
3775 | - with the instance given by instance_id. |
3776 | + with the instance given by instance_uuid. |
3777 | |
3778 | - :param instance_id: = id of instance |
3779 | + :param instance_uuid: = id of instance |
3780 | """ |
3781 | - vif_refs = virtual_interface_get_by_instance(context, instance_id) |
3782 | + vif_refs = virtual_interface_get_by_instance(context, instance_uuid) |
3783 | for vif_ref in vif_refs: |
3784 | virtual_interface_delete(context, vif_ref['id']) |
3785 | |
3786 | @@ -1110,85 +1111,70 @@ |
3787 | |
3788 | |
3789 | @require_context |
3790 | -def instance_destroy(context, instance_id): |
3791 | +def instance_destroy(context, instance_uuid): |
3792 | session = get_session() |
3793 | with session.begin(): |
3794 | session.query(models.Instance).\ |
3795 | - filter_by(id=instance_id).\ |
3796 | + filter_by(uuid=instance_uuid).\ |
3797 | update({'deleted': True, |
3798 | 'deleted_at': utils.utcnow(), |
3799 | 'updated_at': literal_column('updated_at')}) |
3800 | session.query(models.SecurityGroupInstanceAssociation).\ |
3801 | - filter_by(instance_id=instance_id).\ |
3802 | + filter_by(instance_uuid=instance_uuid).\ |
3803 | update({'deleted': True, |
3804 | 'deleted_at': utils.utcnow(), |
3805 | 'updated_at': literal_column('updated_at')}) |
3806 | session.query(models.InstanceMetadata).\ |
3807 | - filter_by(instance_id=instance_id).\ |
3808 | + filter_by(instance_uuid=instance_uuid).\ |
3809 | update({'deleted': True, |
3810 | 'deleted_at': utils.utcnow(), |
3811 | 'updated_at': literal_column('updated_at')}) |
3812 | |
3813 | |
3814 | @require_context |
3815 | -def instance_stop(context, instance_id): |
3816 | +def instance_stop(context, instance_uuid): |
3817 | session = get_session() |
3818 | with session.begin(): |
3819 | session.query(models.Instance).\ |
3820 | - filter_by(id=instance_id).\ |
3821 | + filter_by(uuid=instance_uuid).\ |
3822 | update({'host': None, |
3823 | 'vm_state': vm_states.STOPPED, |
3824 | 'task_state': None, |
3825 | 'updated_at': literal_column('updated_at')}) |
3826 | session.query(models.SecurityGroupInstanceAssociation).\ |
3827 | - filter_by(instance_id=instance_id).\ |
3828 | + filter_by(instance_uuid=instance_uuid).\ |
3829 | update({'updated_at': literal_column('updated_at')}) |
3830 | session.query(models.InstanceMetadata).\ |
3831 | - filter_by(instance_id=instance_id).\ |
3832 | + filter_by(instance_uuid=instance_uuid).\ |
3833 | update({'updated_at': literal_column('updated_at')}) |
3834 | |
3835 | |
3836 | @require_context |
3837 | -def instance_get_by_uuid(context, uuid, session=None): |
3838 | - partial = _build_instance_get(context, session=session) |
3839 | - result = partial.filter_by(uuid=uuid) |
3840 | - result = result.first() |
3841 | - if not result: |
3842 | - # FIXME(sirp): it would be nice if InstanceNotFound would accept a |
3843 | - # uuid parameter as well |
3844 | - raise exception.InstanceNotFound(instance_id=uuid) |
3845 | - return result |
3846 | - |
3847 | - |
3848 | -@require_context |
3849 | -def instance_get(context, instance_id, session=None): |
3850 | - partial = _build_instance_get(context, session=session) |
3851 | - result = partial.filter_by(id=instance_id) |
3852 | - result = result.first() |
3853 | - if not result: |
3854 | - raise exception.InstanceNotFound(instance_id=instance_id) |
3855 | - return result |
3856 | - |
3857 | - |
3858 | -@require_context |
3859 | -def _build_instance_get(context, session=None): |
3860 | +def instance_get(context, instance_uuid, session=None): |
3861 | if not session: |
3862 | session = get_session() |
3863 | |
3864 | partial = session.query(models.Instance).\ |
3865 | - options(joinedload_all('fixed_ips.floating_ips')).\ |
3866 | - options(joinedload_all('fixed_ips.network')).\ |
3867 | - options(joinedload_all('security_groups.rules')).\ |
3868 | - options(joinedload('volumes')).\ |
3869 | - options(joinedload('metadata')).\ |
3870 | - options(joinedload('instance_type')) |
3871 | + options(joinedload_all('fixed_ips.floating_ips')).\ |
3872 | + options(joinedload_all('fixed_ips.network')).\ |
3873 | + options(joinedload_all('security_groups.rules')).\ |
3874 | + options(joinedload('volumes')).\ |
3875 | + options(joinedload('metadata')).\ |
3876 | + options(joinedload('instance_type')).\ |
3877 | + filter_by(uuid=instance_uuid) |
3878 | |
3879 | if is_admin_context(context): |
3880 | partial = partial.filter_by(deleted=can_read_deleted(context)) |
3881 | elif is_user_context(context): |
3882 | partial = partial.filter_by(project_id=context.project_id).\ |
3883 | - filter_by(deleted=False) |
3884 | - return partial |
3885 | + filter_by(deleted=False) |
3886 | + |
3887 | + result = partial.first() |
3888 | + |
3889 | + if not result: |
3890 | + raise exception.InstanceNotFound(instance_uuid=instance_uuid) |
3891 | + |
3892 | + return result |
3893 | |
3894 | |
3895 | @require_admin_context |
3896 | @@ -1427,26 +1413,26 @@ |
3897 | |
3898 | |
3899 | @require_context |
3900 | -def instance_get_fixed_addresses(context, instance_id): |
3901 | +def instance_get_fixed_addresses(context, instance_uuid): |
3902 | session = get_session() |
3903 | with session.begin(): |
3904 | - instance_ref = instance_get(context, instance_id, session=session) |
3905 | + instance_ref = instance_get(context, instance_uuid, session=session) |
3906 | try: |
3907 | - fixed_ips = fixed_ip_get_by_instance(context, instance_id) |
3908 | + fixed_ips = fixed_ip_get_by_instance(context, instance_uuid) |
3909 | except exception.NotFound: |
3910 | return [] |
3911 | return [fixed_ip.address for fixed_ip in fixed_ips] |
3912 | |
3913 | |
3914 | @require_context |
3915 | -def instance_get_fixed_addresses_v6(context, instance_id): |
3916 | +def instance_get_fixed_addresses_v6(context, instance_uuid): |
3917 | session = get_session() |
3918 | with session.begin(): |
3919 | # get instance |
3920 | - instance_ref = instance_get(context, instance_id, session=session) |
3921 | + instance_ref = instance_get(context, instance_uuid, session=session) |
3922 | # assume instance has 1 mac for each network associated with it |
3923 | # get networks associated with instance |
3924 | - network_refs = network_get_all_by_instance(context, instance_id) |
3925 | + network_refs = network_get_all_by_instance(context, instance_uuid) |
3926 | # compile a list of cidr_v6 prefixes sorted by network id |
3927 | prefixes = [ref.cidr_v6 for ref in |
3928 | sorted(network_refs, key=lambda ref: ref.id)] |
3929 | @@ -1464,8 +1450,8 @@ |
3930 | |
3931 | |
3932 | @require_context |
3933 | -def instance_get_floating_address(context, instance_id): |
3934 | - fixed_ip_refs = fixed_ip_get_by_instance(context, instance_id) |
3935 | +def instance_get_floating_address(context, instance_uuid): |
3936 | + fixed_ip_refs = fixed_ip_get_by_instance(context, instance_uuid) |
3937 | if not fixed_ip_refs: |
3938 | return None |
3939 | # NOTE(tr3buchet): this only gets the first fixed_ip |
3940 | @@ -1477,30 +1463,26 @@ |
3941 | |
3942 | |
3943 | @require_context |
3944 | -def instance_update(context, instance_id, values): |
3945 | +def instance_update(context, instance_uuid, values): |
3946 | session = get_session() |
3947 | metadata = values.get('metadata') |
3948 | if metadata is not None: |
3949 | instance_metadata_update(context, |
3950 | - instance_id, |
3951 | + instance_uuid, |
3952 | values.pop('metadata'), |
3953 | delete=True) |
3954 | with session.begin(): |
3955 | - if utils.is_uuid_like(instance_id): |
3956 | - instance_ref = instance_get_by_uuid(context, instance_id, |
3957 | - session=session) |
3958 | - else: |
3959 | - instance_ref = instance_get(context, instance_id, session=session) |
3960 | + instance_ref = instance_get(context, instance_uuid, session=session) |
3961 | instance_ref.update(values) |
3962 | instance_ref.save(session=session) |
3963 | return instance_ref |
3964 | |
3965 | |
3966 | -def instance_add_security_group(context, instance_id, security_group_id): |
3967 | +def instance_add_security_group(context, instance_uuid, security_group_id): |
3968 | """Associate the given security group with the given instance""" |
3969 | session = get_session() |
3970 | with session.begin(): |
3971 | - instance_ref = instance_get(context, instance_id, session=session) |
3972 | + instance_ref = instance_get(context, instance_uuid, session=session) |
3973 | security_group_ref = security_group_get(context, |
3974 | security_group_id, |
3975 | session=session) |
3976 | @@ -1509,12 +1491,12 @@ |
3977 | |
3978 | |
3979 | @require_context |
3980 | -def instance_remove_security_group(context, instance_id, security_group_id): |
3981 | +def instance_remove_security_group(context, instance_uuid, security_group_id): |
3982 | """Disassociate the given security group from the given instance""" |
3983 | session = get_session() |
3984 | |
3985 | session.query(models.SecurityGroupInstanceAssociation).\ |
3986 | - filter_by(instance_id=instance_id).\ |
3987 | + filter_by(instance_uuid=instance_uuid).\ |
3988 | filter_by(security_group_id=security_group_id).\ |
3989 | update({'deleted': True, |
3990 | 'deleted_at': utils.utcnow(), |
3991 | @@ -1534,17 +1516,13 @@ |
3992 | |
3993 | |
3994 | @require_admin_context |
3995 | -def instance_get_actions(context, instance_id): |
3996 | - """Return the actions associated to the given instance id""" |
3997 | +def instance_get_actions(context, instance_uuid): |
3998 | + """Return the actions associated to the given instance uuid""" |
3999 | session = get_session() |
4000 | |
4001 | - if utils.is_uuid_like(instance_id): |
4002 | - instance = instance_get_by_uuid(context, instance_id, session) |
4003 | - instance_id = instance.id |
4004 | - |
4005 | return session.query(models.InstanceActions).\ |
4006 | - filter_by(instance_id=instance_id).\ |
4007 | - all() |
4008 | + filter_by(instance_uuid=instance_uuid).\ |
4009 | + all() |
4010 | |
4011 | |
4012 | @require_context |
4013 | @@ -1819,7 +1797,7 @@ |
4014 | return session.query(models.FixedIp).\ |
4015 | options(joinedload_all('instance')).\ |
4016 | filter_by(network_id=network_id).\ |
4017 | - filter(models.FixedIp.instance_id != None).\ |
4018 | + filter(models.FixedIp.instance_uuid != None).\ |
4019 | filter(models.FixedIp.virtual_interface_id != None).\ |
4020 | filter_by(deleted=False).\ |
4021 | all() |
4022 | @@ -1866,32 +1844,32 @@ |
4023 | |
4024 | |
4025 | @require_admin_context |
4026 | -def network_get_by_instance(_context, instance_id): |
4027 | +def network_get_by_instance(_context, instance_uuid): |
4028 | # note this uses fixed IP to get to instance |
4029 | # only works for networks the instance has an IP from |
4030 | session = get_session() |
4031 | rv = session.query(models.Network).\ |
4032 | filter_by(deleted=False).\ |
4033 | join(models.Network.fixed_ips).\ |
4034 | - filter_by(instance_id=instance_id).\ |
4035 | + filter_by(instance_uuid=instance_uuid).\ |
4036 | filter_by(deleted=False).\ |
4037 | first() |
4038 | if not rv: |
4039 | - raise exception.NetworkNotFoundForInstance(instance_id=instance_id) |
4040 | + raise exception.NetworkNotFoundForInstance(instance_uuid=instance_uuid) |
4041 | return rv |
4042 | |
4043 | |
4044 | @require_admin_context |
4045 | -def network_get_all_by_instance(_context, instance_id): |
4046 | +def network_get_all_by_instance(_context, instance_uuid): |
4047 | session = get_session() |
4048 | rv = session.query(models.Network).\ |
4049 | filter_by(deleted=False).\ |
4050 | join(models.Network.fixed_ips).\ |
4051 | - filter_by(instance_id=instance_id).\ |
4052 | + filter_by(instance_uuid=instance_uuid).\ |
4053 | filter_by(deleted=False).\ |
4054 | all() |
4055 | if not rv: |
4056 | - raise exception.NetworkNotFoundForInstance(instance_id=instance_id) |
4057 | + raise exception.NetworkNotFoundForInstance(instance_uuid=instance_uuid) |
4058 | return rv |
4059 | |
4060 | |
4061 | @@ -2149,15 +2127,14 @@ |
4062 | |
4063 | |
4064 | @require_admin_context |
4065 | -def volume_attached(context, volume_id, instance_id, mountpoint): |
4066 | +def volume_attached(context, volume_id, instance_uuid, mountpoint): |
4067 | session = get_session() |
4068 | with session.begin(): |
4069 | volume_ref = volume_get(context, volume_id, session=session) |
4070 | volume_ref['status'] = 'in-use' |
4071 | volume_ref['mountpoint'] = mountpoint |
4072 | volume_ref['attach_status'] = 'attached' |
4073 | - volume_ref.instance = instance_get(context, instance_id, |
4074 | - session=session) |
4075 | + volume_ref.instance = instance_get(context, instance_uuid, session) |
4076 | volume_ref.save(session=session) |
4077 | |
4078 | |
4079 | @@ -2273,16 +2250,16 @@ |
4080 | |
4081 | |
4082 | @require_admin_context |
4083 | -def volume_get_all_by_instance(context, instance_id): |
4084 | +def volume_get_all_by_instance(context, instance_uuid): |
4085 | session = get_session() |
4086 | result = session.query(models.Volume).\ |
4087 | options(joinedload('volume_metadata')).\ |
4088 | options(joinedload('volume_type')).\ |
4089 | - filter_by(instance_id=instance_id).\ |
4090 | + filter_by(instance_uuid=instance_uuid).\ |
4091 | filter_by(deleted=False).\ |
4092 | all() |
4093 | if not result: |
4094 | - raise exception.VolumeNotFoundForInstance(instance_id=instance_id) |
4095 | + raise exception.VolumeNotFoundForInstance(instance_uuid=instance_uuid) |
4096 | return result |
4097 | |
4098 | |
4099 | @@ -2557,7 +2534,7 @@ |
4100 | session = get_session() |
4101 | with session.begin(): |
4102 | result = session.query(models.BlockDeviceMapping).\ |
4103 | - filter_by(instance_id=values['instance_id']).\ |
4104 | + filter_by(instance_uuid=values['instance_uuid']).\ |
4105 | filter_by(device_name=values['device_name']).\ |
4106 | filter_by(deleted=False).\ |
4107 | first() |
4108 | @@ -2574,7 +2551,7 @@ |
4109 | if (virtual_name is not None and |
4110 | block_device.is_swap_or_ephemeral(virtual_name)): |
4111 | session.query(models.BlockDeviceMapping).\ |
4112 | - filter_by(instance_id=values['instance_id']).\ |
4113 | + filter_by(instance_uuid=values['instance_uuid']).\ |
4114 | filter_by(virtual_name=virtual_name).\ |
4115 | filter(models.BlockDeviceMapping.device_name != |
4116 | values['device_name']).\ |
4117 | @@ -2584,10 +2561,10 @@ |
4118 | |
4119 | |
4120 | @require_context |
4121 | -def block_device_mapping_get_all_by_instance(context, instance_id): |
4122 | +def block_device_mapping_get_all_by_instance(context, instance_uuid): |
4123 | session = get_session() |
4124 | result = session.query(models.BlockDeviceMapping).\ |
4125 | - filter_by(instance_id=instance_id).\ |
4126 | + filter_by(instance_uuid=instance_uuid).\ |
4127 | filter_by(deleted=False).\ |
4128 | all() |
4129 | if not result: |
4130 | @@ -2607,12 +2584,12 @@ |
4131 | |
4132 | |
4133 | @require_context |
4134 | -def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, |
4135 | +def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, |
4136 | volume_id): |
4137 | session = get_session() |
4138 | with session.begin(): |
4139 | session.query(models.BlockDeviceMapping).\ |
4140 | - filter_by(instance_id=instance_id).\ |
4141 | + filter_by(instance_uuid=instance_uuid).\ |
4142 | filter_by(volume_id=volume_id).\ |
4143 | filter_by(deleted=False).\ |
4144 | update({'deleted': True, |
4145 | @@ -2684,13 +2661,13 @@ |
4146 | |
4147 | |
4148 | @require_context |
4149 | -def security_group_get_by_instance(context, instance_id): |
4150 | +def security_group_get_by_instance(context, instance_uuid): |
4151 | session = get_session() |
4152 | return session.query(models.SecurityGroup).\ |
4153 | filter_by(deleted=False).\ |
4154 | options(joinedload_all('rules')).\ |
4155 | join(models.SecurityGroup.instances).\ |
4156 | - filter_by(id=instance_id).\ |
4157 | + filter_by(uuid=instance_uuid).\ |
4158 | filter_by(deleted=False).\ |
4159 | all() |
4160 | |
4161 | @@ -3151,7 +3128,7 @@ |
4162 | filter_by(instance_uuid=instance_uuid).\ |
4163 | filter_by(status=status).first() |
4164 | if not result: |
4165 | - raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, |
4166 | + raise exception.MigrationNotFoundByStatus(instance_uuid=instance_uuid, |
4167 | status=status) |
4168 | return result |
4169 | |
4170 | @@ -3236,41 +3213,43 @@ |
4171 | delete() |
4172 | |
4173 | |
4174 | -def console_get_by_pool_instance(context, pool_id, instance_id): |
4175 | +def console_get_by_pool_instance(context, pool_id, instance_uuid): |
4176 | session = get_session() |
4177 | result = session.query(models.Console).\ |
4178 | filter_by(pool_id=pool_id).\ |
4179 | - filter_by(instance_id=instance_id).\ |
4180 | + filter_by(instance_uuid=instance_uuid).\ |
4181 | options(joinedload('pool')).\ |
4182 | first() |
4183 | if not result: |
4184 | - raise exception.ConsoleNotFoundInPoolForInstance(pool_id=pool_id, |
4185 | - instance_id=instance_id) |
4186 | + params = {'pool_id': pool_id, 'instance_uuid': instance_uuid} |
4187 | + raise exception.ConsoleNotFoundInPoolForInstance(**params) |
4188 | return result |
4189 | |
4190 | |
4191 | -def console_get_all_by_instance(context, instance_id): |
4192 | +def console_get_all_by_instance(context, instance_uuid): |
4193 | session = get_session() |
4194 | results = session.query(models.Console).\ |
4195 | - filter_by(instance_id=instance_id).\ |
4196 | - options(joinedload('pool')).\ |
4197 | - all() |
4198 | + filter_by(instance_uuid=instance_uuid).\ |
4199 | + options(joinedload('pool')).\ |
4200 | + all() |
4201 | return results |
4202 | |
4203 | |
4204 | -def console_get(context, console_id, instance_id=None): |
4205 | +def console_get(context, console_id, instance_uuid=None): |
4206 | session = get_session() |
4207 | query = session.query(models.Console).\ |
4208 | filter_by(id=console_id) |
4209 | - if instance_id: |
4210 | - query = query.filter_by(instance_id=instance_id) |
4211 | + if instance_uuid: |
4212 | + query = query.filter_by(instance_uuid=instance_uuid) |
4213 | result = query.options(joinedload('pool')).first() |
4214 | if not result: |
4215 | - if instance_id: |
4216 | - raise exception.ConsoleNotFoundForInstance(console_id=console_id, |
4217 | - instance_id=instance_id) |
4218 | - else: |
4219 | - raise exception.ConsoleNotFound(console_id=console_id) |
4220 | + params = {'console_id': console_id} |
4221 | + if not instance_uuid: |
4222 | + raise exception.ConsoleNotFound(**params) |
4223 | + |
4224 | + params['instance_uuid'] = instance_uuid |
4225 | + raise exception.ConsoleNotFoundForInstance(**params) |
4226 | + |
4227 | return result |
4228 | |
4229 | |
4230 | @@ -3473,11 +3452,11 @@ |
4231 | |
4232 | @require_context |
4233 | @require_instance_exists |
4234 | -def instance_metadata_get(context, instance_id): |
4235 | +def instance_metadata_get(context, instance_uuid): |
4236 | session = get_session() |
4237 | |
4238 | meta_results = session.query(models.InstanceMetadata).\ |
4239 | - filter_by(instance_id=instance_id).\ |
4240 | + filter_by(instance_uuid=instance_uuid).\ |
4241 | filter_by(deleted=False).\ |
4242 | all() |
4243 | |
4244 | @@ -3489,10 +3468,10 @@ |
4245 | |
4246 | @require_context |
4247 | @require_instance_exists |
4248 | -def instance_metadata_delete(context, instance_id, key): |
4249 | +def instance_metadata_delete(context, instance_uuid, key): |
4250 | session = get_session() |
4251 | session.query(models.InstanceMetadata).\ |
4252 | - filter_by(instance_id=instance_id).\ |
4253 | + filter_by(instance_uuid=instance_uuid).\ |
4254 | filter_by(key=key).\ |
4255 | filter_by(deleted=False).\ |
4256 | update({'deleted': True, |
4257 | @@ -3502,10 +3481,10 @@ |
4258 | |
4259 | @require_context |
4260 | @require_instance_exists |
4261 | -def instance_metadata_delete_all(context, instance_id): |
4262 | +def instance_metadata_delete_all(context, instance_uuid): |
4263 | session = get_session() |
4264 | session.query(models.InstanceMetadata).\ |
4265 | - filter_by(instance_id=instance_id).\ |
4266 | + filter_by(instance_uuid=instance_uuid).\ |
4267 | filter_by(deleted=False).\ |
4268 | update({'deleted': True, |
4269 | 'deleted_at': utils.utcnow(), |
4270 | @@ -3514,33 +3493,33 @@ |
4271 | |
4272 | @require_context |
4273 | @require_instance_exists |
4274 | -def instance_metadata_get_item(context, instance_id, key, session=None): |
4275 | +def instance_metadata_get_item(context, instance_uuid, key, session=None): |
4276 | if not session: |
4277 | session = get_session() |
4278 | |
4279 | meta_result = session.query(models.InstanceMetadata).\ |
4280 | - filter_by(instance_id=instance_id).\ |
4281 | + filter_by(instance_uuid=instance_uuid).\ |
4282 | filter_by(key=key).\ |
4283 | filter_by(deleted=False).\ |
4284 | first() |
4285 | |
4286 | if not meta_result: |
4287 | raise exception.InstanceMetadataNotFound(metadata_key=key, |
4288 | - instance_id=instance_id) |
4289 | + instance_uuid=instance_uuid) |
4290 | return meta_result |
4291 | |
4292 | |
4293 | @require_context |
4294 | @require_instance_exists |
4295 | -def instance_metadata_update(context, instance_id, metadata, delete): |
4296 | +def instance_metadata_update(context, instance_uuid, metadata, delete): |
4297 | session = get_session() |
4298 | |
4299 | # Set existing metadata to deleted if delete argument is True |
4300 | if delete: |
4301 | - original_metadata = instance_metadata_get(context, instance_id) |
4302 | + original_metadata = instance_metadata_get(context, instance_uuid) |
4303 | for meta_key, meta_value in original_metadata.iteritems(): |
4304 | if meta_key not in metadata: |
4305 | - meta_ref = instance_metadata_get_item(context, instance_id, |
4306 | + meta_ref = instance_metadata_get_item(context, instance_uuid, |
4307 | meta_key, session) |
4308 | meta_ref.update({'deleted': True}) |
4309 | meta_ref.save(session=session) |
4310 | @@ -3554,11 +3533,11 @@ |
4311 | item = {"value": meta_value} |
4312 | |
4313 | try: |
4314 | - meta_ref = instance_metadata_get_item(context, instance_id, |
4315 | + meta_ref = instance_metadata_get_item(context, instance_uuid, |
4316 | meta_key, session) |
4317 | except exception.InstanceMetadataNotFound, e: |
4318 | meta_ref = models.InstanceMetadata() |
4319 | - item.update({"key": meta_key, "instance_id": instance_id}) |
4320 | + item.update({"key": meta_key, "instance_uuid": instance_uuid}) |
4321 | |
4322 | meta_ref.update(item) |
4323 | meta_ref.save(session=session) |
4324 | |
4325 | === modified file 'nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py' |
4326 | --- nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py 2011-06-30 19:20:59 +0000 |
4327 | +++ nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py 2011-09-22 19:29:25 +0000 |
4328 | @@ -23,7 +23,6 @@ |
4329 | |
4330 | meta = MetaData() |
4331 | |
4332 | - |
4333 | def upgrade(migrate_engine): |
4334 | meta.bind = migrate_engine |
4335 | dialect = migrate_engine.url.get_dialect().name |
4336 | |
4337 | === added file 'nova/db/sqlalchemy/migrate_repo/versions/050_renames_for_instance_uuids.py' |
4338 | --- nova/db/sqlalchemy/migrate_repo/versions/050_renames_for_instance_uuids.py 1970-01-01 00:00:00 +0000 |
4339 | +++ nova/db/sqlalchemy/migrate_repo/versions/050_renames_for_instance_uuids.py 2011-09-22 19:29:25 +0000 |
4340 | @@ -0,0 +1,117 @@ |
4341 | +# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
4342 | + |
4343 | +# Copyright 2011 OpenStack LLC. |
4344 | +# |
4345 | +# Licensed under the Apache License, Version 2.0 (the "License"); you may |
4346 | +# not use this file except in compliance with the License. You may obtain |
4347 | +# a copy of the License at |
4348 | +# |
4349 | +# http://www.apache.org/licenses/LICENSE-2.0 |
4350 | +# |
4351 | +# Unless required by applicable law or agreed to in writing, software |
4352 | +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
4353 | +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
4354 | +# License for the specific language governing permissions and limitations |
4355 | +# under the License. |
4356 | + |
4357 | +import migrate |
4358 | +import sqlalchemy |
4359 | + |
4360 | +from nova import utils |
4361 | + |
4362 | + |
4363 | +meta = sqlalchemy.MetaData() |
4364 | + |
4365 | + |
4366 | +table_names = [ |
4367 | + 'consoles', |
4368 | + 'instance_actions', |
4369 | + 'block_device_mapping', |
4370 | + 'fixed_ips', |
4371 | + 'security_group_instance_association', |
4372 | + 'volumes', |
4373 | + 'instance_metadata', |
4374 | + 'virtual_interfaces', |
4375 | +] |
4376 | + |
4377 | + |
4378 | +skip_fk_tables = [ |
4379 | + 'virtual_interfaces', |
4380 | + 'consoles', |
4381 | +] |
4382 | + |
4383 | + |
4384 | +def upgrade(migrate_engine): |
4385 | + """Upgrade database schema.""" |
4386 | + meta.bind = migrate_engine |
4387 | + |
4388 | + instances = sqlalchemy.Table('instances', meta, autoload=True) |
4389 | + |
4390 | + sqlalchemy.Index('uuid_idx', instances.c.uuid).create(bind=migrate_engine) |
4391 | + |
4392 | + mapping = {} |
4393 | + for instance in migrate_engine.execute(instances.select()): |
4394 | + mapping[instance.id] = instance.uuid or utils.gen_uuid() |
4395 | + |
4396 | + for table_name in table_names: |
4397 | + # Load table definition |
4398 | + table = sqlalchemy.Table(table_name, meta, autoload=True) |
4399 | + instance_uuid_column = sqlalchemy.Column('instance_uuid', |
4400 | + sqlalchemy.String(36)) |
4401 | + |
4402 | + # Add a new instance_uuid column |
4403 | + table.create_column(instance_uuid_column) |
4404 | + |
4405 | + if table_name not in skip_fk_tables: |
4406 | + migrate.ForeignKeyConstraint([table.c.instance_uuid], |
4407 | + [instances.c.uuid]).create() |
4408 | + |
4409 | + # Insert correct uuid data |
4410 | + for instance_id, instance_uuid in mapping.iteritems(): |
4411 | + query = table.update().\ |
4412 | + where(table.c.instance_id == instance_id).\ |
4413 | + values(instance_uuid = instance_uuid) |
4414 | + migrate_engine.execute(query) |
4415 | + |
4416 | + # Drop the old instance_id column |
4417 | + table.c.instance_id.drop() |
4418 | + |
4419 | + |
4420 | +def downgrade(migrate_engine): |
4421 | + """Downgrade database schema.""" |
4422 | + meta.bind = migrate_engine |
4423 | + |
4424 | + instances = sqlalchemy.Table('instances', meta, autoload=True) |
4425 | + |
4426 | + mapping = {} |
4427 | + for instance in migrate_engine.execute(instances.select()): |
4428 | + mapping[instance.uuid] = instance.id |
4429 | + |
4430 | + for table_name in table_names: |
4431 | + # Load table definition |
4432 | + table = sqlalchemy.Table(table_name, meta, autoload=True) |
4433 | + instance_id_column = sqlalchemy.Column('instance_id', |
4434 | + sqlalchemy.Integer()) |
4435 | + |
4436 | + # Add a new instance_id column |
4437 | + table.create_column(instance_id_column) |
4438 | + |
4439 | + # Insert correct uuid data |
4440 | + for instance_uuid, instance_id in mapping.iteritems(): |
4441 | + query = table.update().\ |
4442 | + where(table.c.instance_uuid == instance_uuid).\ |
4443 | + values(instance_id = instance_id) |
4444 | + migrate_engine.execute(query) |
4445 | + |
4446 | + if table_name not in skip_fk_tables: |
4447 | + migrate.ForeignKeyConstraint([table.c.instance_uuid], |
4448 | + [instances.c.uuid]).drop() |
4449 | + |
4450 | + # Drop the old instance_id column |
4451 | + table.c.instance_uuid.drop() |
4452 | + |
4453 | + if table_name not in skip_fk_tables: |
4454 | + migrate.ForeignKeyConstraint([table.c.instance_id], |
4455 | + [instances.c.id]).create() |
4456 | + |
4457 | + sqlalchemy.Index('uuid_idx', instances.c.uuid).drop(bind=migrate_engine) |
4458 | |
4459 | === added file 'nova/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql' |
4460 | --- nova/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql 1970-01-01 00:00:00 +0000 |
4461 | +++ nova/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql 2011-09-22 19:29:25 +0000 |
4462 | @@ -0,0 +1,266 @@ |
4463 | +BEGIN TRANSACTION; |
4464 | + |
4465 | +-- START consoles |
4466 | +ALTER TABLE consoles RENAME TO consoles_backup; |
4467 | + |
4468 | +CREATE TABLE consoles ( |
4469 | + created_at DATETIME, |
4470 | + updated_at DATETIME, |
4471 | + deleted_at DATETIME, |
4472 | + deleted BOOLEAN, |
4473 | + id INTEGER NOT NULL, |
4474 | + instance_name VARCHAR(255), |
4475 | + instance_id INTEGER, |
4476 | + password VARCHAR(255), |
4477 | + port INTEGER, |
4478 | + pool_id INTEGER, |
4479 | + PRIMARY KEY (id), |
4480 | + FOREIGN KEY(pool_id) REFERENCES console_pools (id), |
4481 | + CHECK (deleted IN (0, 1)) |
4482 | +); |
4483 | + |
4484 | +INSERT INTO consoles |
4485 | + SELECT * FROM consoles_backup; |
4486 | + |
4487 | +UPDATE consoles |
4488 | + SET instance_uuid = ( |
4489 | + SELECT id FROM instances WHERE uuid = consoles.instance_id |
4490 | + ); |
4491 | + |
4492 | +DROP TABLE consoles_backup; |
4493 | +-- END consoles |
4494 | + |
4495 | + |
4496 | +-- START instance_actions |
4497 | +ALTER TABLE instance_actions RENAME TO instance_actions_backup; |
4498 | + |
4499 | +CREATE TABLE instance_actions ( |
4500 | + created_at DATETIME, |
4501 | + updated_at DATETIME, |
4502 | + deleted_at DATETIME, |
4503 | + deleted BOOLEAN, |
4504 | + id INTEGER NOT NULL, |
4505 | + instance_id INTEGER, |
4506 | + action VARCHAR(255), |
4507 | + error TEXT, |
4508 | + PRIMARY KEY (id), |
4509 | + FOREIGN KEY(instance_id) REFERENCES instances (id), |
4510 | + CHECK (deleted IN (0, 1)) |
4511 | +); |
4512 | + |
4513 | +INSERT INTO instance_actions |
4514 | + SELECT * FROM instance_actions_backup; |
4515 | + |
4516 | +UPDATE instance_actions |
4517 | + SET instance_id = ( |
4518 | + SELECT id FROM instances WHERE uuid = instance_actions.instance_id |
4519 | + ); |
4520 | + |
4521 | +DROP TABLE instance_actions_backup; |
4522 | +-- END instance_actions |
4523 | + |
4524 | + |
4525 | +-- START fixed_ips |
4526 | +ALTER TABLE fixed_ips RENAME TO fixed_ips_backup; |
4527 | + |
4528 | +CREATE TABLE fixed_ips ( |
4529 | + id INTEGER NOT NULL, |
4530 | + address VARCHAR(255), |
4531 | + virtual_interface_id INTEGER, |
4532 | + network_id INTEGER, |
4533 | + instance_id INTEGER, |
4534 | + allocated BOOLEAN default FALSE, |
4535 | + leased BOOLEAN default FALSE, |
4536 | + reserved BOOLEAN default FALSE, |
4537 | + created_at DATETIME NOT NULL, |
4538 | + updated_at DATETIME, |
4539 | + deleted_at DATETIME, |
4540 | + deleted BOOLEAN NOT NULL, host VARCHAR(255), |
4541 | + PRIMARY KEY (id), |
4542 | + FOREIGN KEY(instance_id) REFERENCES instances (id), |
4543 | + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id), |
4544 | + CHECK (deleted IN (0, 1)) |
4545 | +); |
4546 | + |
4547 | +INSERT INTO fixed_ips |
4548 | + SELECT * FROM fixed_ips_backup; |
4549 | + |
4550 | +UPDATE fixed_ips |
4551 | + SET instance_id = ( |
4552 | + SELECT id FROM instances WHERE uuid = fixed_ips.instance_id |
4553 | + ); |
4554 | + |
4555 | +DROP TABLE fixed_ips_backup; |
4556 | +-- END fixed_ips |
4557 | + |
4558 | + |
4559 | +-- START security_group_instance_association |
4560 | +ALTER TABLE security_group_instance_association |
4561 | + RENAME TO security_group_instance_association_backup; |
4562 | + |
4563 | +CREATE TABLE security_group_instance_association ( |
4564 | + id INTEGER NOT NULL, |
4565 | + security_group_id INTEGER, |
4566 | + instance_id INTEGER, |
4567 | + created_at DATETIME, |
4568 | + updated_at DATETIME, |
4569 | + deleted_at DATETIME, |
4570 | + deleted BOOLEAN, |
4571 | + PRIMARY KEY (id), |
4572 | + FOREIGN KEY(instance_id) REFERENCES instances (id), |
4573 | + FOREIGN KEY(security_group_id) REFERENCES security_groups (id), |
4574 | + CHECK (deleted IN (0, 1)) |
4575 | +); |
4576 | + |
4577 | +INSERT INTO security_group_instance_association |
4578 | + SELECT * FROM security_group_instance_association_backup; |
4579 | + |
4580 | +UPDATE security_group_instance_association |
4581 | + SET instance_id = ( |
4582 | + SELECT id FROM instances WHERE uuid = security_group_instance_association.instance_id |
4583 | + ); |
4584 | + |
4585 | +DROP TABLE security_group_instance_association_backup; |
4586 | +-- END security_group_instance_association |
4587 | + |
4588 | + |
4589 | +-- START volumes |
4590 | +ALTER TABLE volumes RENAME TO volumes_backup; |
4591 | + |
4592 | +CREATE TABLE volumes ( |
4593 | + created_at DATETIME, |
4594 | + updated_at DATETIME, |
4595 | + deleted_at DATETIME, |
4596 | + deleted BOOLEAN, |
4597 | + id INTEGER NOT NULL, |
4598 | + ec2_id VARCHAR(255), |
4599 | + user_id VARCHAR(255), |
4600 | + project_id VARCHAR(255), |
4601 | + host VARCHAR(255), |
4602 | + size INTEGER, |
4603 | + availability_zone VARCHAR(255), |
4604 | + instance_id INTEGER, |
4605 | + mountpoint VARCHAR(255), |
4606 | + attach_time VARCHAR(255), |
4607 | + status VARCHAR(255), |
4608 | + attach_status VARCHAR(255), |
4609 | + scheduled_at DATETIME, |
4610 | + launched_at DATETIME, |
4611 | + terminated_at DATETIME, |
4612 | + display_name VARCHAR(255), |
4613 | + display_description VARCHAR(255), |
4614 | + provider_location VARCHAR(256), |
4615 | + provider_auth VARCHAR(256), |
4616 | + snapshot_id INTEGER, |
4617 | + volume_type_id INTEGER, |
4618 | + PRIMARY KEY (id), |
4619 | + FOREIGN KEY(instance_id) REFERENCES instances (id), |
4620 | + CHECK (deleted IN (0, 1)) |
4621 | +); |
4622 | + |
4623 | +INSERT INTO volumes SELECT * FROM volumes_backup; |
4624 | + |
4625 | +UPDATE volumes |
4626 | + SET instance_id = ( |
4627 | + SELECT id FROM instances WHERE uuid = volumes.instance_id |
4628 | + ); |
4629 | + |
4630 | +DROP TABLE volumes_backup; |
4631 | +-- END volumes |
4632 | + |
4633 | + |
4634 | +-- START block_device_mapping |
4635 | +ALTER TABLE block_device_mapping RENAME TO block_device_mapping_backup; |
4636 | + |
4637 | +CREATE TABLE block_device_mapping ( |
4638 | + created_at DATETIME, |
4639 | + updated_at DATETIME, |
4640 | + deleted_at DATETIME, |
4641 | + deleted BOOLEAN, |
4642 | + id INTEGER NOT NULL, |
4643 | + instance_id INTEGER NOT NULL, |
4644 | + device_name VARCHAR(255) NOT NULL, |
4645 | + delete_on_termination BOOLEAN, |
4646 | + virtual_name VARCHAR(255), |
4647 | + snapshot_id INTEGER, |
4648 | + volume_id INTEGER, |
4649 | + volume_size INTEGER, |
4650 | + no_device BOOLEAN, |
4651 | + PRIMARY KEY (id), |
4652 | + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), |
4653 | + FOREIGN KEY(volume_id) REFERENCES volumes (id), |
4654 | + FOREIGN KEY(instance_id) REFERENCES instances (id), |
4655 | + CHECK (delete_on_termination IN (0, 1)), |
4656 | + CHECK (deleted IN (0, 1)), |
4657 | + CHECK (no_device IN (0, 1)) |
4658 | +); |
4659 | + |
4660 | +INSERT INTO block_device_mapping SELECT * FROM block_device_mapping_backup; |
4661 | + |
4662 | +UPDATE block_device_mapping |
4663 | + SET instance_id = ( |
4664 | + SELECT id FROM instances WHERE uuid = block_device_mapping.instance_id |
4665 | + ); |
4666 | + |
4667 | +DROP TABLE block_device_mapping_backup; |
4668 | +-- END block_device_mapping |
4669 | + |
4670 | + |
4671 | +-- START virtual_interfaces |
4672 | +ALTER TABLE virtual_interfaces RENAME TO virtual_interfaces_backup; |
4673 | + |
4674 | +CREATE TABLE virtual_interfaces ( |
4675 | + created_at DATETIME, |
4676 | + updated_at DATETIME, |
4677 | + deleted_at DATETIME, |
4678 | + deleted BOOLEAN, |
4679 | + id INTEGER NOT NULL, |
4680 | + address VARCHAR(255), |
4681 | + network_id INTEGER, |
4682 | + instance_id INTEGER, |
4683 | + uuid VARCHAR(36), |
4684 | + PRIMARY KEY (id), |
4685 | + FOREIGN KEY(network_id) REFERENCES networks (id), |
4686 | + UNIQUE (address), |
4687 | + CHECK (deleted IN (0, 1)) |
4688 | +); |
4689 | + |
4690 | +INSERT INTO virtual_interfaces SELECT * FROM virtual_interfaces_backup; |
4691 | + |
4692 | +UPDATE virtual_interfaces |
4693 | + SET instance_id = ( |
4694 | + SELECT id FROM instances WHERE uuid = virtual_interfaces.instance_id |
4695 | + ); |
4696 | + |
4697 | +DROP TABLE virtual_interfaces_backup; |
4698 | +-- END virtual_interfaces |
4699 | + |
4700 | + |
4701 | +-- START instance_metadata |
4702 | +ALTER TABLE instance_metadata RENAME TO instance_metadata_backup; |
4703 | + |
4704 | +CREATE TABLE instance_metadata ( |
4705 | + created_at DATETIME, |
4706 | + updated_at DATETIME, |
4707 | + deleted_at DATETIME, |
4708 | + deleted BOOLEAN, |
4709 | + id INTEGER NOT NULL, |
4710 | + instance_id INTEGER, |
4711 | + key VARCHAR(255), |
4712 | + value VARCHAR(255), |
4713 | + PRIMARY KEY (id), |
4714 | + FOREIGN KEY(instance_id) REFERENCES instances (id), |
4715 | + CHECK (deleted IN (0, 1)) |
4716 | +); |
4717 | + |
4718 | +INSERT INTO instance_metadata SELECT * FROM instance_metadata_backup; |
4719 | + |
4720 | +UPDATE instance_metadata |
4721 | + SET instance_id = ( |
4722 | + SELECT id FROM instances WHERE uuid = instance_metadata.instance_id |
4723 | + ); |
4724 | + |
4725 | +DROP TABLE instance_metadata_backup; |
4726 | +-- END instance_metadata |
4727 | + |
4728 | +COMMIT; |
4729 | |
4730 | === added file 'nova/db/sqlalchemy/migrate_repo/versions/050_sqlite_upgrade.sql' |
4731 | --- nova/db/sqlalchemy/migrate_repo/versions/050_sqlite_upgrade.sql 1970-01-01 00:00:00 +0000 |
4732 | +++ nova/db/sqlalchemy/migrate_repo/versions/050_sqlite_upgrade.sql 2011-09-22 19:29:25 +0000 |
4733 | @@ -0,0 +1,265 @@ |
4734 | +BEGIN TRANSACTION; |
4735 | + |
4736 | +-- START consoles |
4737 | +ALTER TABLE consoles RENAME TO consoles_backup; |
4738 | + |
4739 | +CREATE TABLE consoles ( |
4740 | + created_at DATETIME, |
4741 | + updated_at DATETIME, |
4742 | + deleted_at DATETIME, |
4743 | + deleted BOOLEAN, |
4744 | + id INTEGER NOT NULL, |
4745 | + instance_name VARCHAR(255), |
4746 | + instance_uuid VARCHAR(36), |
4747 | + password VARCHAR(255), |
4748 | + port INTEGER, |
4749 | + pool_id INTEGER, |
4750 | + PRIMARY KEY (id), |
4751 | + FOREIGN KEY(pool_id) REFERENCES console_pools (id), |
4752 | + CHECK (deleted IN (0, 1)) |
4753 | +); |
4754 | + |
4755 | +INSERT INTO consoles |
4756 | + SELECT * FROM consoles_backup; |
4757 | + |
4758 | +UPDATE consoles |
4759 | + SET instance_uuid = ( |
4760 | + SELECT uuid FROM instances WHERE id = consoles.instance_uuid |
4761 | + ); |
4762 | + |
4763 | +DROP TABLE consoles_backup; |
4764 | +-- END consoles |
4765 | + |
4766 | +-- START instance_actions |
4767 | +ALTER TABLE instance_actions RENAME TO instance_actions_backup; |
4768 | + |
4769 | +CREATE TABLE instance_actions ( |
4770 | + created_at DATETIME, |
4771 | + updated_at DATETIME, |
4772 | + deleted_at DATETIME, |
4773 | + deleted BOOLEAN, |
4774 | + id INTEGER NOT NULL, |
4775 | + instance_uuid VARCHAR(36), |
4776 | + action VARCHAR(255), |
4777 | + error TEXT, |
4778 | + PRIMARY KEY (id), |
4779 | + FOREIGN KEY(instance_uuid) REFERENCES instances (uuid), |
4780 | + CHECK (deleted IN (0, 1)) |
4781 | +); |
4782 | + |
4783 | +INSERT INTO instance_actions |
4784 | + SELECT * FROM instance_actions_backup; |
4785 | + |
4786 | +UPDATE instance_actions |
4787 | + SET instance_uuid = ( |
4788 | + SELECT uuid FROM instances WHERE id = instance_actions.instance_uuid |
4789 | + ); |
4790 | + |
4791 | +DROP TABLE instance_actions_backup; |
4792 | +-- END instance_actions |
4793 | + |
4794 | + |
4795 | +-- START fixed_ips |
4796 | +ALTER TABLE fixed_ips RENAME TO fixed_ips_backup; |
4797 | + |
4798 | +CREATE TABLE fixed_ips ( |
4799 | + id INTEGER NOT NULL, |
4800 | + address VARCHAR(255), |
4801 | + virtual_interface_id INTEGER, |
4802 | + network_id INTEGER, |
4803 | + instance_uuid VARCHAR(255), |
4804 | + allocated BOOLEAN default FALSE, |
4805 | + leased BOOLEAN default FALSE, |
4806 | + reserved BOOLEAN default FALSE, |
4807 | + created_at DATETIME NOT NULL, |
4808 | + updated_at DATETIME, |
4809 | + deleted_at DATETIME, |
4810 | + deleted BOOLEAN NOT NULL, host VARCHAR(255), |
4811 | + PRIMARY KEY (id), |
4812 | + FOREIGN KEY(instance_uuid) REFERENCES instances (uuid), |
4813 | + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id), |
4814 | + CHECK (deleted IN (0, 1)) |
4815 | +); |
4816 | + |
4817 | +INSERT INTO fixed_ips |
4818 | + SELECT * FROM fixed_ips_backup; |
4819 | + |
4820 | +UPDATE fixed_ips |
4821 | + SET instance_uuid = ( |
4822 | + SELECT uuid FROM instances WHERE id = fixed_ips.instance_uuid |
4823 | + ); |
4824 | + |
4825 | +DROP TABLE fixed_ips_backup; |
4826 | +-- END fixed_ips |
4827 | + |
4828 | + |
4829 | +-- START security_group_instance_association |
4830 | +ALTER TABLE security_group_instance_association |
4831 | + RENAME TO security_group_instance_association_backup; |
4832 | + |
4833 | +CREATE TABLE security_group_instance_association ( |
4834 | + id INTEGER NOT NULL, |
4835 | + security_group_id INTEGER, |
4836 | + instance_uuid VARCHAR(36), |
4837 | + created_at DATETIME, |
4838 | + updated_at DATETIME, |
4839 | + deleted_at DATETIME, |
4840 | + deleted BOOLEAN, |
4841 | + PRIMARY KEY (id), |
4842 | + FOREIGN KEY(instance_uuid) REFERENCES instances (uuid), |
4843 | + FOREIGN KEY(security_group_id) REFERENCES security_groups (id), |
4844 | + CHECK (deleted IN (0, 1)) |
4845 | +); |
4846 | + |
4847 | +INSERT INTO security_group_instance_association |
4848 | + SELECT * FROM security_group_instance_association_backup; |
4849 | + |
4850 | +UPDATE security_group_instance_association |
4851 | + SET instance_uuid = ( |
4852 | + SELECT uuid FROM instances WHERE id = security_group_instance_association.instance_uuid |
4853 | + ); |
4854 | + |
4855 | +DROP TABLE security_group_instance_association_backup; |
4856 | +-- END security_group_instance_association |
4857 | + |
4858 | + |
4859 | +-- START volumes |
4860 | +ALTER TABLE volumes RENAME TO volumes_backup; |
4861 | + |
4862 | +CREATE TABLE volumes ( |
4863 | + created_at DATETIME, |
4864 | + updated_at DATETIME, |
4865 | + deleted_at DATETIME, |
4866 | + deleted BOOLEAN, |
4867 | + id INTEGER NOT NULL, |
4868 | + ec2_id VARCHAR(255), |
4869 | + user_id VARCHAR(255), |
4870 | + project_id VARCHAR(255), |
4871 | + host VARCHAR(255), |
4872 | + size INTEGER, |
4873 | + availability_zone VARCHAR(255), |
4874 | + instance_uuid VARCHAR(36), |
4875 | + mountpoint VARCHAR(255), |
4876 | + attach_time VARCHAR(255), |
4877 | + status VARCHAR(255), |
4878 | + attach_status VARCHAR(255), |
4879 | + scheduled_at DATETIME, |
4880 | + launched_at DATETIME, |
4881 | + terminated_at DATETIME, |
4882 | + display_name VARCHAR(255), |
4883 | + display_description VARCHAR(255), |
4884 | + provider_location VARCHAR(256), |
4885 | + provider_auth VARCHAR(256), |
4886 | + snapshot_id INTEGER, |
4887 | + volume_type_id INTEGER, |
4888 | + PRIMARY KEY (id), |
4889 | + FOREIGN KEY(instance_uuid) REFERENCES instances (uuid), |
4890 | + CHECK (deleted IN (0, 1)) |
4891 | +); |
4892 | + |
4893 | +INSERT INTO volumes SELECT * FROM volumes_backup; |
4894 | + |
4895 | +UPDATE volumes |
4896 | + SET instance_uuid = ( |
4897 | + SELECT uuid FROM instances WHERE id = volumes.instance_uuid |
4898 | + ); |
4899 | + |
4900 | +DROP TABLE volumes_backup; |
4901 | +-- END volumes |
4902 | + |
4903 | + |
4904 | +-- START block_device_mapping |
4905 | +ALTER TABLE block_device_mapping RENAME TO block_device_mapping_backup; |
4906 | + |
4907 | +CREATE TABLE block_device_mapping ( |
4908 | + created_at DATETIME, |
4909 | + updated_at DATETIME, |
4910 | + deleted_at DATETIME, |
4911 | + deleted BOOLEAN, |
4912 | + id INTEGER NOT NULL, |
4913 | + instance_uuid VARCHAR(36) NOT NULL, |
4914 | + device_name VARCHAR(255) NOT NULL, |
4915 | + delete_on_termination BOOLEAN, |
4916 | + virtual_name VARCHAR(255), |
4917 | + snapshot_id INTEGER, |
4918 | + volume_id INTEGER, |
4919 | + volume_size INTEGER, |
4920 | + no_device BOOLEAN, |
4921 | + PRIMARY KEY (id), |
4922 | + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), |
4923 | + FOREIGN KEY(volume_id) REFERENCES volumes (id), |
4924 | + FOREIGN KEY(instance_uuid) REFERENCES instances (uuid), |
4925 | + CHECK (delete_on_termination IN (0, 1)), |
4926 | + CHECK (deleted IN (0, 1)), |
4927 | + CHECK (no_device IN (0, 1)) |
4928 | +); |
4929 | + |
4930 | +INSERT INTO block_device_mapping SELECT * FROM block_device_mapping_backup; |
4931 | + |
4932 | +UPDATE block_device_mapping |
4933 | + SET instance_uuid = ( |
4934 | + SELECT uuid FROM instances WHERE id = block_device_mapping.instance_uuid |
4935 | + ); |
4936 | + |
4937 | +DROP TABLE block_device_mapping_backup; |
4938 | +-- END block_device_mapping |
4939 | + |
4940 | + |
4941 | +-- START virtual_interfaces |
4942 | +ALTER TABLE virtual_interfaces RENAME TO virtual_interfaces_backup; |
4943 | + |
4944 | +CREATE TABLE virtual_interfaces ( |
4945 | + created_at DATETIME, |
4946 | + updated_at DATETIME, |
4947 | + deleted_at DATETIME, |
4948 | + deleted BOOLEAN, |
4949 | + id INTEGER NOT NULL, |
4950 | + address VARCHAR(255), |
4951 | + network_id INTEGER, |
4952 | + instance_uuid VARCHAR(36), |
4953 | + uuid VARCHAR(36), |
4954 | + PRIMARY KEY (id), |
4955 | + FOREIGN KEY(network_id) REFERENCES networks (id), |
4956 | + UNIQUE (address), |
4957 | + CHECK (deleted IN (0, 1)) |
4958 | +); |
4959 | + |
4960 | +INSERT INTO virtual_interfaces SELECT * FROM virtual_interfaces_backup; |
4961 | + |
4962 | +UPDATE virtual_interfaces |
4963 | + SET instance_uuid = ( |
4964 | + SELECT uuid FROM instances WHERE id = virtual_interfaces.instance_uuid |
4965 | + ); |
4966 | + |
4967 | +DROP TABLE virtual_interfaces_backup; |
4968 | +-- END virtual_interfaces |
4969 | + |
4970 | + |
4971 | +-- START instance_metadata |
4972 | +ALTER TABLE instance_metadata RENAME TO instance_metadata_backup; |
4973 | + |
4974 | +CREATE TABLE instance_metadata ( |
4975 | + created_at DATETIME, |
4976 | + updated_at DATETIME, |
4977 | + deleted_at DATETIME, |
4978 | + deleted BOOLEAN, |
4979 | + id INTEGER NOT NULL, |
4980 | + instance_uuid VARCHAR(36), |
4981 | + key VARCHAR(255), |
4982 | + value VARCHAR(255), |
4983 | + PRIMARY KEY (id), |
4984 | + FOREIGN KEY(instance_uuid) REFERENCES instances (uuid), |
4985 | + CHECK (deleted IN (0, 1)) |
4986 | +); |
4987 | + |
4988 | +INSERT INTO instance_metadata SELECT * FROM instance_metadata_backup; |
4989 | + |
4990 | +UPDATE instance_metadata |
4991 | + SET instance_uuid = ( |
4992 | + SELECT uuid FROM instances WHERE id = instance_metadata.instance_uuid |
4993 | + ); |
4994 | + |
4995 | +DROP TABLE instance_metadata_backup; |
4996 | +-- END instance_metadata |
4997 | + |
4998 | +COMMIT; |
4999 | |
5000 | === modified file 'nova/db/sqlalchemy/migration.py' |