Merge lp:~gandelman-a/nova/libvirt_patch_refresh into lp:~ubuntu-server-dev/nova/essex
- libvirt_patch_refresh
- Merge into essex
Proposed by
Adam Gandelman
Status: | Merged |
---|---|
Merge reported by: | Chuck Short |
Merged at revision: | not available |
Proposed branch: | lp:~gandelman-a/nova/libvirt_patch_refresh |
Merge into: | lp:~ubuntu-server-dev/nova/essex |
Diff against target: | 320169 lines |
To merge this branch: | bzr merge lp:~gandelman-a/nova/libvirt_patch_refresh |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Ubuntu Server Developers | Pending | ||
Review via email: mp+95024@code.launchpad.net |
Commit message
Description of the change
This refreshes the libvirt patch correctly and passes all tests. In the future, we really need to gate things like this making it into this repositroy on whether or not its passing CI testing. We shouldn't be committing changes to both places at once, it defeats the purpose.
To post a comment you must log in.
- 327. By Chuck Short
-
Update libvirt-
console- patch again.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'debian/patches/libvirt-use-console-pipe.patch' |
2 | --- debian/patches/libvirt-use-console-pipe.patch 2012-02-28 14:11:36 +0000 |
3 | +++ debian/patches/libvirt-use-console-pipe.patch 2012-02-28 19:17:20 +0000 |
4 | @@ -1,7 +1,8 @@ |
5 | -diff -Naurp nova.orig/Authors nova/Authors |
6 | ---- nova.orig/Authors 2012-02-28 09:08:10.785887173 -0500 |
7 | -+++ nova/Authors 2012-02-28 09:08:58.677887983 -0500 |
8 | -@@ -147,6 +147,7 @@ Ricardo Carrillo Cruz <emaildericky@gmai |
9 | +Index: nova-src/Authors |
10 | +=================================================================== |
11 | +--- nova-src.orig/Authors 2012-02-28 10:30:58.000000000 -0800 |
12 | ++++ nova-src/Authors 2012-02-28 10:32:36.086640111 -0800 |
13 | +@@ -147,6 +147,7 @@ |
14 | Rick Clark <rick@openstack.org> |
15 | Rick Harris <rconradharris@gmail.com> |
16 | Rob Kost <kost@isi.edu> |
17 | @@ -9,10 +10,11 @@ |
18 | Russell Bryant <rbryant@redhat.com> |
19 | Russell Sim <russell.sim@gmail.com> |
20 | Ryan Lane <rlane@wikimedia.org> |
21 | -diff -Naurp nova.orig/nova/tests/test_libvirt.py nova/nova/tests/test_libvirt.py |
22 | ---- nova.orig/nova/tests/test_libvirt.py 2012-02-28 09:08:10.873887173 -0500 |
23 | -+++ nova/nova/tests/test_libvirt.py 2012-02-28 09:08:58.681887793 -0500 |
24 | -@@ -872,7 +872,7 @@ class LibvirtConnTestCase(test.TestCase) |
25 | +Index: nova-src/nova/tests/test_libvirt.py |
26 | +=================================================================== |
27 | +--- nova-src.orig/nova/tests/test_libvirt.py 2012-02-28 10:30:58.000000000 -0800 |
28 | ++++ nova-src/nova/tests/test_libvirt.py 2012-02-28 10:32:36.086640111 -0800 |
29 | +@@ -872,7 +872,7 @@ |
30 | (lambda t: _ipv4_like(t.findall(parameter)[1].get('value'), |
31 | '192.168.*.1'), True), |
32 | (lambda t: t.find('./devices/serial/source').get( |
33 | @@ -21,7 +23,7 @@ |
34 | (lambda t: t.find('./memory').text, '2097152')] |
35 | if rescue: |
36 | common_checks += [ |
37 | -@@ -2308,3 +2308,53 @@ class LibvirtConnectionTestCase(test.Tes |
38 | +@@ -2308,3 +2308,53 @@ |
39 | |
40 | ref = self.libvirtconnection.finish_revert_migration(ins_ref, None) |
41 | self.assertTrue(isinstance(ref, eventlet.event.Event)) |
42 | @@ -75,10 +77,89 @@ |
43 | + os.unlink(self.ringbuffer_path) |
44 | + os.unlink(self.fifo_path) |
45 | + os.rmdir(self.directory_path) |
46 | -diff -Naurp nova.orig/nova/utils.py nova/nova/utils.py |
47 | ---- nova.orig/nova/utils.py 2012-02-28 09:08:10.877887173 -0500 |
48 | -+++ nova/nova/utils.py 2012-02-28 09:08:58.685887622 -0500 |
49 | -@@ -33,6 +33,7 @@ import re |
50 | +Index: nova-src/nova/tests/test_utils.py |
51 | +=================================================================== |
52 | +--- nova-src.orig/nova/tests/test_utils.py 2012-02-28 10:00:07.000000000 -0800 |
53 | ++++ nova-src/nova/tests/test_utils.py 2012-02-28 10:32:36.086640111 -0800 |
54 | +@@ -17,6 +17,7 @@ |
55 | + import __builtin__ |
56 | + import datetime |
57 | + import hashlib |
58 | ++import itertools |
59 | + import os |
60 | + import os.path |
61 | + import socket |
62 | +@@ -25,6 +26,7 @@ |
63 | + |
64 | + import iso8601 |
65 | + import mox |
66 | ++import nose |
67 | + |
68 | + import nova |
69 | + from nova import exception |
70 | +@@ -813,6 +815,57 @@ |
71 | + west = utils.parse_isotime(str) |
72 | + normed = utils.normalize_time(west) |
73 | + self._instaneous(normed, 2012, 2, 13, 23, 53, 07, 0) |
74 | ++ |
75 | ++class RingBufferTestCase(test.TestCase): |
76 | ++ """Unit test for utils.RingBuffer().""" |
77 | ++ def setUp(self): |
78 | ++ super(ingBufferTestCase, self).setUp() |
79 | ++ self.f = tempfile.NamedTemporaryFile() |
80 | ++ self.r = utils.RingBuffer(self.f.name, max_size=4) |
81 | ++ |
82 | ++ def tearDown(self): |
83 | ++ super(RingBufferTestCase, self).tearDown() |
84 | ++ self.r.close() |
85 | ++ self.f.close() |
86 | ++ |
87 | ++ def testEmpty(self): |
88 | ++ self.assertEquals(self.r.peek(), '') |
89 | ++ |
90 | ++ def testReOpen(self): |
91 | ++ self.r.write('1') |
92 | ++ self.r.close() |
93 | ++ self.r = utils.RingBuffer(self.f.name, max_size=4) |
94 | ++ self.assertEquals(self.r.peek(), '1') |
95 | ++ |
96 | ++ |
97 | ++def testPermutations(): |
98 | ++ """Test various permutations of writing to a RingBuffer. |
99 | ++ |
100 | ++ Try all permutations of writing [0,5) bytes three times to a RingBuffer |
101 | ++ of size 4. This makes use of nose's test generator capability so cannot |
102 | ++ be a subclass of test.TestCase. |
103 | ++ |
104 | ++ """ |
105 | ++ def check_buffer(r, expected): |
106 | ++ nose.tools.eq_(r.peek(), expected) |
107 | ++ |
108 | ++ SIZE = 4 |
109 | ++ for sequence in itertools.product(range(5), range(5), range(5)): |
110 | ++ f = tempfile.NamedTemporaryFile() |
111 | ++ r = utils.RingBuffer(f.name, max_size=SIZE) |
112 | ++ source = itertools.count() |
113 | ++ expected = '' |
114 | ++ |
115 | ++ def next_n(n): |
116 | ++ return ''.join(str(next(source)) for x in range(n)) |
117 | ++ for entry in sequence: |
118 | ++ to_insert = next_n(entry) |
119 | ++ expected += to_insert |
120 | ++ expected = expected[max(0, len(expected) - SIZE):] |
121 | ++ r.write(to_insert) |
122 | ++ yield check_buffer, r, expected |
123 | ++ r.close() |
124 | ++ f.close() |
125 | + |
126 | + |
127 | + class TestLockCleanup(test.TestCase): |
128 | +Index: nova-src/nova/utils.py |
129 | +=================================================================== |
130 | +--- nova-src.orig/nova/utils.py 2012-02-28 10:30:58.000000000 -0800 |
131 | ++++ nova-src/nova/utils.py 2012-02-28 10:32:36.090640111 -0800 |
132 | +@@ -33,6 +33,7 @@ |
133 | import shlex |
134 | import shutil |
135 | import socket |
136 | @@ -86,7 +167,7 @@ |
137 | import struct |
138 | import sys |
139 | import tempfile |
140 | -@@ -56,6 +57,7 @@ from nova import log as logging |
141 | +@@ -56,6 +57,7 @@ |
142 | from nova.openstack.common import cfg |
143 | |
144 | |
145 | @@ -94,10 +175,147 @@ |
146 | LOG = logging.getLogger(__name__) |
147 | ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" |
148 | PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" |
149 | -diff -Naurp nova.orig/nova/virt/libvirt/connection.py nova/nova/virt/libvirt/connection.py |
150 | ---- nova.orig/nova/virt/libvirt/connection.py 2012-02-28 09:08:10.877887173 -0500 |
151 | -+++ nova/nova/virt/libvirt/connection.py 2012-02-28 09:09:56.549887233 -0500 |
152 | -@@ -39,15 +39,19 @@ Supports KVM, LXC, QEMU, UML, and XEN. |
153 | +@@ -1525,6 +1527,135 @@ |
154 | + return out |
155 | + except exception.ProcessExecutionError: |
156 | + raise exception.FileNotFound(file_path=file_path) |
157 | ++ |
158 | ++class RingBuffer(object): |
159 | ++ """Generic userspace on-disk ringbuffer implementation.""" |
160 | ++ _header_max_int = (2 ** (struct.calcsize('I') * BITS_PER_BYTE)) - 1 |
161 | ++ _header_format = 'II' |
162 | ++ _header_size = struct.calcsize(_header_format) |
163 | ++ |
164 | ++ def __init__(self, backing_file, max_size=65536): |
165 | ++ # We need one extra byte as the buffer is kept with |
166 | ++ # one byte free to avoid the head==tail full/empty |
167 | ++ # problem |
168 | ++ max_size += 1 |
169 | ++ |
170 | ++ if not 0 < max_size <= RingBuffer._header_max_int: |
171 | ++ raise ValueError(_('RingBuffer size out of range')) |
172 | ++ had_already_existed = os.path.exists(backing_file) |
173 | ++ self.f = self._open(backing_file) |
174 | ++ if had_already_existed: |
175 | ++ file_size = os.fstat(self.f.fileno()).st_size |
176 | ++ if file_size: |
177 | ++ current_size = file_size - self._header_size |
178 | ++ if not 0 < current_size <= RingBuffer._header_max_int: |
179 | ++ self.f.close() |
180 | ++ raise ValueError(_('Disk RingBuffer size out of range')) |
181 | ++ self.max_size = current_size |
182 | ++ |
183 | ++ # If the file doesn't contain a header, assume it is corrupt |
184 | ++ # and recreate |
185 | ++ if file_size < self._header_size: |
186 | ++ self._write_header(0, 0) # initialise to empty |
187 | ++ |
188 | ++ # If head or tail point beyond the file then bomb out |
189 | ++ head, tail = self._read_header() |
190 | ++ if head >= current_size or tail >= current_size: |
191 | ++ self.f.close() |
192 | ++ raise ValueError(_('RingBuffer %s is corrupt') % |
193 | ++ backing_file) |
194 | ++ else: |
195 | ++ # File is zero bytes: treat as new file |
196 | ++ self.max_size = max_size |
197 | ++ self._initialise_empty_file() |
198 | ++ else: |
199 | ++ self.max_size = max_size |
200 | ++ self._initialise_empty_file() |
201 | ++ |
202 | ++ def _initialise_empty_file(self): |
203 | ++ os.ftruncate(self.f.fileno(), self.max_size + self._header_size) |
204 | ++ self._write_header(0, 0) # head == tail means no data |
205 | ++ |
206 | ++ @staticmethod |
207 | ++ def _open(filename): |
208 | ++ """Open a file without truncating it for both reading and writing in |
209 | ++ binary mode.""" |
210 | ++ # Built-in open() cannot open in read/write mode without truncating. |
211 | ++ fd = os.open(filename, os.O_RDWR | os.O_CREAT, 0666) |
212 | ++ return os.fdopen(fd, 'w+') |
213 | ++ |
214 | ++ def _read_header(self): |
215 | ++ self.f.seek(0) |
216 | ++ return struct.unpack(self._header_format, |
217 | ++ self.f.read(self._header_size)) |
218 | ++ |
219 | ++ def _write_header(self, head, tail): |
220 | ++ self.f.seek(0) |
221 | ++ self.f.write(struct.pack(self._header_format, head, tail)) |
222 | ++ |
223 | ++ def _seek(self, pos): |
224 | ++ """Seek to pos in data (ignoring header).""" |
225 | ++ self.f.seek(self._header_size + pos) |
226 | ++ |
227 | ++ def _read_slice(self, start, end): |
228 | ++ if start == end: |
229 | ++ return '' |
230 | ++ |
231 | ++ self._seek(start) |
232 | ++ return self.f.read(end - start) |
233 | ++ |
234 | ++ def _write_slice(self, pos, data): |
235 | ++ self._seek(pos) |
236 | ++ self.f.write(data) |
237 | ++ |
238 | ++ def peek(self): |
239 | ++ """Read the entire ringbuffer without consuming it.""" |
240 | ++ head, tail = self._read_header() |
241 | ++ if head < tail: |
242 | ++ # Wraps around |
243 | ++ before_wrap = self._read_slice(tail, self.max_size) |
244 | ++ after_wrap = self._read_slice(0, head) |
245 | ++ return before_wrap + after_wrap |
246 | ++ else: |
247 | ++ # Just from here to head |
248 | ++ return self._read_slice(tail, head) |
249 | ++ |
250 | ++ def write(self, data): |
251 | ++ """Write some amount of data to the ringbuffer, discarding the oldest |
252 | ++ data as max_size is exceeded.""" |
253 | ++ head, tail = self._read_header() |
254 | ++ while data: |
255 | ++ # Amount of data to be written on this pass |
256 | ++ len_to_write = min(len(data), self.max_size - head) |
257 | ++ |
258 | ++ # Where head will be after this write |
259 | ++ new_head = head + len_to_write |
260 | ++ |
261 | ++ # In the next comparison, new_head may be self.max_size which is |
262 | ++ # logically the same point as tail == 0 and must still be within |
263 | ++ # the range tested. |
264 | ++ unwrapped_tail = tail if tail else self.max_size |
265 | ++ |
266 | ++ if head < unwrapped_tail <= new_head: |
267 | ++ # Write will go past tail so tail needs to be pushed back |
268 | ++ tail = new_head + 1 # one past head to indicate full |
269 | ++ tail %= self.max_size |
270 | ++ self._write_header(head, tail) |
271 | ++ |
272 | ++ # Write the data |
273 | ++ self._write_slice(head, data[:len_to_write]) |
274 | ++ data = data[len_to_write:] # data now left |
275 | ++ |
276 | ++ # Push head back |
277 | ++ head = new_head |
278 | ++ head %= self.max_size |
279 | ++ self._write_header(head, tail) |
280 | ++ |
281 | ++ def flush(self): |
282 | ++ self.f.flush() |
283 | ++ |
284 | ++ def close(self): |
285 | ++ self.f.close() |
286 | + |
287 | + |
288 | + @contextlib.contextmanager |
289 | +Index: nova-src/nova/virt/libvirt/connection.py |
290 | +=================================================================== |
291 | +--- nova-src.orig/nova/virt/libvirt/connection.py 2012-02-28 10:30:58.000000000 -0800 |
292 | ++++ nova-src/nova/virt/libvirt/connection.py 2012-02-28 10:32:36.090640111 -0800 |
293 | +@@ -39,15 +39,19 @@ |
294 | |
295 | """ |
296 | |
297 | @@ -109,15 +327,15 @@ |
298 | import os |
299 | +import select |
300 | import shutil |
301 | ++import stat |
302 | import sys |
303 | -+import stat |
304 | import uuid |
305 | |
306 | +import eventlet |
307 | from eventlet import greenthread |
308 | from xml.dom import minidom |
309 | from xml.etree import ElementTree |
310 | -@@ -151,6 +155,9 @@ libvirt_opts = [ |
311 | +@@ -151,6 +155,9 @@ |
312 | help='Override the default disk prefix for the devices attached' |
313 | ' to a server, which is dependent on libvirt_type. ' |
314 | '(valid options are: sd, xvd, uvd, vd)'), |
315 | @@ -127,7 +345,7 @@ |
316 | ] |
317 | |
318 | FLAGS = flags.FLAGS |
319 | -@@ -184,6 +191,57 @@ def _get_eph_disk(ephemeral): |
320 | +@@ -184,6 +191,57 @@ |
321 | return 'disk.eph' + str(ephemeral['num']) |
322 | |
323 | |
324 | @@ -185,7 +403,7 @@ |
325 | class LibvirtConnection(driver.ComputeDriver): |
326 | |
327 | def __init__(self, read_only): |
328 | -@@ -217,6 +275,8 @@ class LibvirtConnection(driver.ComputeDr |
329 | +@@ -217,6 +275,8 @@ |
330 | |
331 | self.image_cache_manager = imagecache.ImageCacheManager() |
332 | |
333 | @@ -194,7 +412,7 @@ |
334 | @property |
335 | def host_state(self): |
336 | if not self._host_state: |
337 | -@@ -225,7 +285,11 @@ class LibvirtConnection(driver.ComputeDr |
338 | +@@ -225,7 +285,11 @@ |
339 | |
340 | def init_host(self, host): |
341 | # NOTE(nsokolov): moved instance restarting to ComputeManager |
342 | @@ -207,7 +425,7 @@ |
343 | |
344 | @property |
345 | def libvirt_xml(self): |
346 | -@@ -302,6 +366,15 @@ class LibvirtConnection(driver.ComputeDr |
347 | +@@ -302,6 +366,15 @@ |
348 | return [self._conn.lookupByID(x).name() |
349 | for x in self._conn.listDomainsID() |
350 | if x != 0] # We skip domains with ID 0 (hypervisors). |
351 | @@ -223,7 +441,7 @@ |
352 | |
353 | @staticmethod |
354 | def _map_to_instance_info(domain): |
355 | -@@ -427,6 +500,7 @@ class LibvirtConnection(driver.ComputeDr |
356 | +@@ -427,6 +500,7 @@ |
357 | def _cleanup(self, instance): |
358 | target = os.path.join(FLAGS.instances_path, instance['name']) |
359 | instance_name = instance['name'] |
360 | @@ -231,7 +449,7 @@ |
361 | LOG.info(_('Deleting instance files %(target)s') % locals(), |
362 | instance=instance) |
363 | if FLAGS.libvirt_type == 'lxc': |
364 | -@@ -856,10 +930,10 @@ class LibvirtConnection(driver.ComputeDr |
365 | +@@ -867,10 +941,10 @@ |
366 | |
367 | @exception.wrap_exception() |
368 | def get_console_output(self, instance): |
369 | @@ -245,7 +463,7 @@ |
370 | |
371 | if FLAGS.libvirt_type == 'xen': |
372 | # Xen is special |
373 | -@@ -867,14 +941,12 @@ class LibvirtConnection(driver.ComputeDr |
374 | +@@ -878,14 +952,12 @@ |
375 | 'ttyconsole', |
376 | instance['name']) |
377 | data = self._flush_xen_console(virsh_output) |
378 | @@ -262,7 +480,7 @@ |
379 | |
380 | @staticmethod |
381 | def get_host_ip_addr(): |
382 | -@@ -1000,8 +1072,25 @@ class LibvirtConnection(driver.ComputeDr |
383 | +@@ -1011,8 +1083,25 @@ |
384 | container_dir = '%s/rootfs' % basepath(suffix='') |
385 | libvirt_utils.ensure_tree(container_dir) |
386 | |
387 | @@ -290,9 +508,10 @@ |
388 | |
389 | if not disk_images: |
390 | disk_images = {'image_id': instance['image_ref'], |
391 | -diff -Naurp nova.orig/nova/virt/libvirt.xml.template nova/nova/virt/libvirt.xml.template |
392 | ---- nova.orig/nova/virt/libvirt.xml.template 2012-02-28 09:08:10.877887173 -0500 |
393 | -+++ nova/nova/virt/libvirt.xml.template 2012-02-28 09:08:58.685887622 -0500 |
394 | +Index: nova-src/nova/virt/libvirt.xml.template |
395 | +=================================================================== |
396 | +--- nova-src.orig/nova/virt/libvirt.xml.template 2012-02-28 10:30:58.000000000 -0800 |
397 | ++++ nova-src/nova/virt/libvirt.xml.template 2012-02-28 10:32:36.094640111 -0800 |
398 | @@ -160,8 +160,8 @@ |
399 | |
400 | #end for |
401 | @@ -304,319768 +523,3 @@ |
402 | <target port='1'/> |
403 | </serial> |
404 | |
405 | -diff -Naurp nova.orig/patch nova/patch |
406 | ---- nova.orig/patch 1969-12-31 19:00:00.000000000 -0500 |
407 | -+++ nova/patch 2012-02-28 09:10:28.545887239 -0500 |
408 | -@@ -0,0 +1,319761 @@ |
409 | -+diff -Naurp nova.orig/api/auth.py nova/api/auth.py |
410 | -+--- nova.orig/api/auth.py 1969-12-31 19:00:00.000000000 -0500 |
411 | -++++ nova/api/auth.py 2012-02-28 09:08:10.821887173 -0500 |
412 | -+@@ -0,0 +1,89 @@ |
413 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
414 | -++ |
415 | -++# Copyright (c) 2011 OpenStack, LLC |
416 | -++# |
417 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
418 | -++# not use this file except in compliance with the License. You may obtain |
419 | -++# a copy of the License at |
420 | -++# |
421 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
422 | -++# |
423 | -++# Unless required by applicable law or agreed to in writing, software |
424 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
425 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
426 | -++# License for the specific language governing permissions and limitations |
427 | -++# under the License. |
428 | -++""" |
429 | -++Common Auth Middleware. |
430 | -++ |
431 | -++""" |
432 | -++ |
433 | -++import webob.dec |
434 | -++import webob.exc |
435 | -++ |
436 | -++from nova import context |
437 | -++from nova import flags |
438 | -++from nova import log as logging |
439 | -++from nova.openstack.common import cfg |
440 | -++from nova import wsgi |
441 | -++ |
442 | -++ |
443 | -++use_forwarded_for_opt = cfg.BoolOpt('use_forwarded_for', |
444 | -++ default=False, |
445 | -++ help='Treat X-Forwarded-For as the canonical remote address. ' |
446 | -++ 'Only enable this if you have a sanitizing proxy.') |
447 | -++ |
448 | -++FLAGS = flags.FLAGS |
449 | -++FLAGS.register_opt(use_forwarded_for_opt) |
450 | -++LOG = logging.getLogger(__name__) |
451 | -++ |
452 | -++ |
453 | -++class InjectContext(wsgi.Middleware): |
454 | -++ """Add a 'nova.context' to WSGI environ.""" |
455 | -++ |
456 | -++ def __init__(self, context, *args, **kwargs): |
457 | -++ self.context = context |
458 | -++ super(InjectContext, self).__init__(*args, **kwargs) |
459 | -++ |
460 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
461 | -++ def __call__(self, req): |
462 | -++ req.environ['nova.context'] = self.context |
463 | -++ return self.application |
464 | -++ |
465 | -++ |
466 | -++class NovaKeystoneContext(wsgi.Middleware): |
467 | -++ """Make a request context from keystone headers""" |
468 | -++ |
469 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
470 | -++ def __call__(self, req): |
471 | -++ user_id = req.headers.get('X_USER') |
472 | -++ user_id = req.headers.get('X_USER_ID', user_id) |
473 | -++ if user_id is None: |
474 | -++ LOG.debug("Neither X_USER_ID nor X_USER found in request") |
475 | -++ return webob.exc.HTTPUnauthorized() |
476 | -++ # get the roles |
477 | -++ roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] |
478 | -++ if 'X_TENANT_ID' in req.headers: |
479 | -++ # This is the new header since Keystone went to ID/Name |
480 | -++ project_id = req.headers['X_TENANT_ID'] |
481 | -++ else: |
482 | -++ # This is for legacy compatibility |
483 | -++ project_id = req.headers['X_TENANT'] |
484 | -++ |
485 | -++ # Get the auth token |
486 | -++ auth_token = req.headers.get('X_AUTH_TOKEN', |
487 | -++ req.headers.get('X_STORAGE_TOKEN')) |
488 | -++ |
489 | -++ # Build a context, including the auth_token... |
490 | -++ remote_address = req.remote_addr |
491 | -++ if FLAGS.use_forwarded_for: |
492 | -++ remote_address = req.headers.get('X-Forwarded-For', remote_address) |
493 | -++ ctx = context.RequestContext(user_id, |
494 | -++ project_id, |
495 | -++ roles=roles, |
496 | -++ auth_token=auth_token, |
497 | -++ strategy='keystone', |
498 | -++ remote_address=remote_address) |
499 | -++ |
500 | -++ req.environ['nova.context'] = ctx |
501 | -++ return self.application |
502 | -+diff -Naurp nova.orig/api/direct.py nova/api/direct.py |
503 | -+--- nova.orig/api/direct.py 1969-12-31 19:00:00.000000000 -0500 |
504 | -++++ nova/api/direct.py 2012-02-28 09:08:10.821887173 -0500 |
505 | -+@@ -0,0 +1,378 @@ |
506 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
507 | -++ |
508 | -++# Copyright 2010 United States Government as represented by the |
509 | -++# Administrator of the National Aeronautics and Space Administration. |
510 | -++# All Rights Reserved. |
511 | -++# |
512 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
513 | -++# not use this file except in compliance with the License. You may obtain |
514 | -++# a copy of the License at |
515 | -++# |
516 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
517 | -++# |
518 | -++# Unless required by applicable law or agreed to in writing, software |
519 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
520 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
521 | -++# License for the specific language governing permissions and limitations |
522 | -++# under the License. |
523 | -++ |
524 | -++"""Public HTTP interface that allows services to self-register. |
525 | -++ |
526 | -++The general flow of a request is: |
527 | -++ - Request is parsed into WSGI bits. |
528 | -++ - Some middleware checks authentication. |
529 | -++ - Routing takes place based on the URL to find a controller. |
530 | -++ (/controller/method) |
531 | -++ - Parameters are parsed from the request and passed to a method on the |
532 | -++ controller as keyword arguments. |
533 | -++ - Optionally 'json' is decoded to provide all the parameters. |
534 | -++ - Actual work is done and a result is returned. |
535 | -++ - That result is turned into json and returned. |
536 | -++ |
537 | -++""" |
538 | -++ |
539 | -++import inspect |
540 | -++import urllib |
541 | -++ |
542 | -++import routes |
543 | -++import webob |
544 | -++ |
545 | -++import nova.api.openstack.wsgi |
546 | -++from nova import context |
547 | -++from nova import exception |
548 | -++from nova import utils |
549 | -++from nova import wsgi |
550 | -++ |
551 | -++ |
552 | -++# Global storage for registering modules. |
553 | -++ROUTES = {} |
554 | -++ |
555 | -++ |
556 | -++def register_service(path, handle): |
557 | -++ """Register a service handle at a given path. |
558 | -++ |
559 | -++ Services registered in this way will be made available to any instances of |
560 | -++ nova.api.direct.Router. |
561 | -++ |
562 | -++ :param path: `routes` path, can be a basic string like "/path" |
563 | -++ :param handle: an object whose methods will be made available via the api |
564 | -++ |
565 | -++ """ |
566 | -++ ROUTES[path] = handle |
567 | -++ |
568 | -++ |
569 | -++class Router(wsgi.Router): |
570 | -++ """A simple WSGI router configured via `register_service`. |
571 | -++ |
572 | -++ This is a quick way to attach multiple services to a given endpoint. |
573 | -++ It will automatically load the routes registered in the `ROUTES` global. |
574 | -++ |
575 | -++ TODO(termie): provide a paste-deploy version of this. |
576 | -++ |
577 | -++ """ |
578 | -++ |
579 | -++ def __init__(self, mapper=None): |
580 | -++ if mapper is None: |
581 | -++ mapper = routes.Mapper() |
582 | -++ |
583 | -++ self._load_registered_routes(mapper) |
584 | -++ super(Router, self).__init__(mapper=mapper) |
585 | -++ |
586 | -++ def _load_registered_routes(self, mapper): |
587 | -++ for route in ROUTES: |
588 | -++ mapper.connect('/%s/{action}' % route, |
589 | -++ controller=ServiceWrapper(ROUTES[route])) |
590 | -++ |
591 | -++ |
592 | -++class DelegatedAuthMiddleware(wsgi.Middleware): |
593 | -++ """A simple and naive authentication middleware. |
594 | -++ |
595 | -++ Designed mostly to provide basic support for alternative authentication |
596 | -++ schemes, this middleware only desires the identity of the user and will |
597 | -++ generate the appropriate nova.context.RequestContext for the rest of the |
598 | -++ application. This allows any middleware above it in the stack to |
599 | -++ authenticate however it would like while only needing to conform to a |
600 | -++ minimal interface. |
601 | -++ |
602 | -++ Expects two headers to determine identity: |
603 | -++ - X-OpenStack-User |
604 | -++ - X-OpenStack-Project |
605 | -++ |
606 | -++ This middleware is tied to identity management and will need to be kept |
607 | -++ in sync with any changes to the way identity is dealt with internally. |
608 | -++ |
609 | -++ """ |
610 | -++ |
611 | -++ def process_request(self, request): |
612 | -++ os_user = request.headers['X-OpenStack-User'] |
613 | -++ os_project = request.headers['X-OpenStack-Project'] |
614 | -++ context_ref = context.RequestContext(user_id=os_user, |
615 | -++ project_id=os_project) |
616 | -++ request.environ['openstack.context'] = context_ref |
617 | -++ |
618 | -++ |
619 | -++class JsonParamsMiddleware(wsgi.Middleware): |
620 | -++ """Middleware to allow method arguments to be passed as serialized JSON. |
621 | -++ |
622 | -++ Accepting arguments as JSON is useful for accepting data that may be more |
623 | -++ complex than simple primitives. |
624 | -++ |
625 | -++ In this case we accept it as urlencoded data under the key 'json' as in |
626 | -++ json=<urlencoded_json> but this could be extended to accept raw JSON |
627 | -++ in the POST body. |
628 | -++ |
629 | -++ Filters out the parameters `self`, `context` and anything beginning with |
630 | -++ an underscore. |
631 | -++ |
632 | -++ """ |
633 | -++ |
634 | -++ def process_request(self, request): |
635 | -++ if 'json' not in request.params: |
636 | -++ return |
637 | -++ |
638 | -++ params_json = request.params['json'] |
639 | -++ params_parsed = utils.loads(params_json) |
640 | -++ params = {} |
641 | -++ for k, v in params_parsed.iteritems(): |
642 | -++ if k in ('self', 'context'): |
643 | -++ continue |
644 | -++ if k.startswith('_'): |
645 | -++ continue |
646 | -++ params[k] = v |
647 | -++ |
648 | -++ request.environ['openstack.params'] = params |
649 | -++ |
650 | -++ |
651 | -++class PostParamsMiddleware(wsgi.Middleware): |
652 | -++ """Middleware to allow method arguments to be passed as POST parameters. |
653 | -++ |
654 | -++ Filters out the parameters `self`, `context` and anything beginning with |
655 | -++ an underscore. |
656 | -++ |
657 | -++ """ |
658 | -++ |
659 | -++ def process_request(self, request): |
660 | -++ params_parsed = request.params |
661 | -++ params = {} |
662 | -++ for k, v in params_parsed.iteritems(): |
663 | -++ if k in ('self', 'context'): |
664 | -++ continue |
665 | -++ if k.startswith('_'): |
666 | -++ continue |
667 | -++ params[k] = v |
668 | -++ |
669 | -++ request.environ['openstack.params'] = params |
670 | -++ |
671 | -++ |
672 | -++class Reflection(object): |
673 | -++ """Reflection methods to list available methods. |
674 | -++ |
675 | -++ This is an object that expects to be registered via register_service. |
676 | -++ These methods allow the endpoint to be self-describing. They introspect |
677 | -++ the exposed methods and provide call signatures and documentation for |
678 | -++ them allowing quick experimentation. |
679 | -++ |
680 | -++ """ |
681 | -++ |
682 | -++ def __init__(self): |
683 | -++ self._methods = {} |
684 | -++ self._controllers = {} |
685 | -++ |
686 | -++ def _gather_methods(self): |
687 | -++ """Introspect available methods and generate documentation for them.""" |
688 | -++ methods = {} |
689 | -++ controllers = {} |
690 | -++ for route, handler in ROUTES.iteritems(): |
691 | -++ controllers[route] = handler.__doc__.split('\n')[0] |
692 | -++ for k in dir(handler): |
693 | -++ if k.startswith('_'): |
694 | -++ continue |
695 | -++ f = getattr(handler, k) |
696 | -++ if not callable(f): |
697 | -++ continue |
698 | -++ |
699 | -++ # bunch of ugly formatting stuff |
700 | -++ argspec = inspect.getargspec(f) |
701 | -++ args = [x for x in argspec[0] |
702 | -++ if x != 'self' and x != 'context'] |
703 | -++ defaults = argspec[3] and argspec[3] or [] |
704 | -++ args_r = list(reversed(args)) |
705 | -++ defaults_r = list(reversed(defaults)) |
706 | -++ |
707 | -++ args_out = [] |
708 | -++ while args_r: |
709 | -++ if defaults_r: |
710 | -++ args_out.append((args_r.pop(0), |
711 | -++ repr(defaults_r.pop(0)))) |
712 | -++ else: |
713 | -++ args_out.append((str(args_r.pop(0)),)) |
714 | -++ |
715 | -++ # if the method accepts keywords |
716 | -++ if argspec[2]: |
717 | -++ args_out.insert(0, ('**%s' % argspec[2],)) |
718 | -++ |
719 | -++ if f.__doc__: |
720 | -++ short_doc = f.__doc__.split('\n')[0] |
721 | -++ doc = f.__doc__ |
722 | -++ else: |
723 | -++ short_doc = doc = _('not available') |
724 | -++ |
725 | -++ methods['/%s/%s' % (route, k)] = { |
726 | -++ 'short_doc': short_doc, |
727 | -++ 'doc': doc, |
728 | -++ 'name': k, |
729 | -++ 'args': list(reversed(args_out))} |
730 | -++ |
731 | -++ self._methods = methods |
732 | -++ self._controllers = controllers |
733 | -++ |
734 | -++ def get_controllers(self, context): |
735 | -++ """List available controllers.""" |
736 | -++ if not self._controllers: |
737 | -++ self._gather_methods() |
738 | -++ |
739 | -++ return self._controllers |
740 | -++ |
741 | -++ def get_methods(self, context): |
742 | -++ """List available methods.""" |
743 | -++ if not self._methods: |
744 | -++ self._gather_methods() |
745 | -++ |
746 | -++ method_list = self._methods.keys() |
747 | -++ method_list.sort() |
748 | -++ methods = {} |
749 | -++ for k in method_list: |
750 | -++ methods[k] = self._methods[k]['short_doc'] |
751 | -++ return methods |
752 | -++ |
753 | -++ def get_method_info(self, context, method): |
754 | -++ """Get detailed information about a method.""" |
755 | -++ if not self._methods: |
756 | -++ self._gather_methods() |
757 | -++ return self._methods[method] |
758 | -++ |
759 | -++ |
760 | -++class ServiceWrapper(object): |
761 | -++ """Wrapper to dynamically provide a WSGI controller for arbitrary objects. |
762 | -++ |
763 | -++ With lightweight introspection allows public methods on the object to |
764 | -++ be accessed via simple WSGI routing and parameters and serializes the |
765 | -++ return values. |
766 | -++ |
767 | -++ Automatically used be nova.api.direct.Router to wrap registered instances. |
768 | -++ |
769 | -++ """ |
770 | -++ |
771 | -++ def __init__(self, service_handle): |
772 | -++ self.service_handle = service_handle |
773 | -++ |
774 | -++ @webob.dec.wsgify(RequestClass=nova.api.openstack.wsgi.Request) |
775 | -++ def __call__(self, req): |
776 | -++ arg_dict = req.environ['wsgiorg.routing_args'][1] |
777 | -++ action = arg_dict['action'] |
778 | -++ del arg_dict['action'] |
779 | -++ |
780 | -++ context = req.environ['openstack.context'] |
781 | -++ # allow middleware up the stack to override the params |
782 | -++ params = {} |
783 | -++ if 'openstack.params' in req.environ: |
784 | -++ params = req.environ['openstack.params'] |
785 | -++ |
786 | -++ # TODO(termie): do some basic normalization on methods |
787 | -++ method = getattr(self.service_handle, action) |
788 | -++ |
789 | -++ # NOTE(vish): make sure we have no unicode keys for py2.6. |
790 | -++ params = dict([(str(k), v) for (k, v) in params.iteritems()]) |
791 | -++ result = method(context, **params) |
792 | -++ |
793 | -++ if result is None or isinstance(result, basestring): |
794 | -++ return result |
795 | -++ |
796 | -++ try: |
797 | -++ content_type = req.best_match_content_type() |
798 | -++ serializer = { |
799 | -++ 'application/xml': nova.api.openstack.wsgi.XMLDictSerializer(), |
800 | -++ 'application/json': nova.api.openstack.wsgi.JSONDictSerializer(), |
801 | -++ }[content_type] |
802 | -++ return serializer.serialize(result) |
803 | -++ except Exception, e: |
804 | -++ raise exception.Error(_("Returned non-serializeable type: %s") |
805 | -++ % result) |
806 | -++ |
807 | -++ |
808 | -++class Limited(object): |
809 | -++ __notdoc = """Limit the available methods on a given object. |
810 | -++ |
811 | -++ (Not a docstring so that the docstring can be conditionally overridden.) |
812 | -++ |
813 | -++ Useful when defining a public API that only exposes a subset of an |
814 | -++ internal API. |
815 | -++ |
816 | -++ Expected usage of this class is to define a subclass that lists the allowed |
817 | -++ methods in the 'allowed' variable. |
818 | -++ |
819 | -++ Additionally where appropriate methods can be added or overwritten, for |
820 | -++ example to provide backwards compatibility. |
821 | -++ |
822 | -++ The wrapping approach has been chosen so that the wrapped API can maintain |
823 | -++ its own internal consistency, for example if it calls "self.create" it |
824 | -++ should get its own create method rather than anything we do here. |
825 | -++ |
826 | -++ """ |
827 | -++ |
828 | -++ _allowed = None |
829 | -++ |
830 | -++ def __init__(self, proxy): |
831 | -++ self._proxy = proxy |
832 | -++ if not self.__doc__: # pylint: disable=E0203 |
833 | -++ self.__doc__ = proxy.__doc__ |
834 | -++ if not self._allowed: |
835 | -++ self._allowed = [] |
836 | -++ |
837 | -++ def __getattr__(self, key): |
838 | -++ """Only return methods that are named in self._allowed.""" |
839 | -++ if key not in self._allowed: |
840 | -++ raise AttributeError() |
841 | -++ return getattr(self._proxy, key) |
842 | -++ |
843 | -++ def __dir__(self): |
844 | -++ """Only return methods that are named in self._allowed.""" |
845 | -++ return [x for x in dir(self._proxy) if x in self._allowed] |
846 | -++ |
847 | -++ |
848 | -++class Proxy(object): |
849 | -++ """Pretend a Direct API endpoint is an object. |
850 | -++ |
851 | -++ This is mostly useful in testing at the moment though it should be easily |
852 | -++ extendable to provide a basic API library functionality. |
853 | -++ |
854 | -++ In testing we use this to stub out internal objects to verify that results |
855 | -++ from the API are serializable. |
856 | -++ |
857 | -++ """ |
858 | -++ |
859 | -++ def __init__(self, app, prefix=None): |
860 | -++ self.app = app |
861 | -++ self.prefix = prefix |
862 | -++ |
863 | -++ def __do_request(self, path, context, **kwargs): |
864 | -++ req = wsgi.Request.blank(path) |
865 | -++ req.method = 'POST' |
866 | -++ req.body = urllib.urlencode({'json': utils.dumps(kwargs)}) |
867 | -++ req.environ['openstack.context'] = context |
868 | -++ resp = req.get_response(self.app) |
869 | -++ try: |
870 | -++ return utils.loads(resp.body) |
871 | -++ except Exception: |
872 | -++ return resp.body |
873 | -++ |
874 | -++ def __getattr__(self, key): |
875 | -++ if self.prefix is None: |
876 | -++ return self.__class__(self.app, prefix=key) |
877 | -++ |
878 | -++ def _wrapper(context, **kwargs): |
879 | -++ return self.__do_request('/%s/%s' % (self.prefix, key), |
880 | -++ context, |
881 | -++ **kwargs) |
882 | -++ _wrapper.func_name = key |
883 | -++ return _wrapper |
884 | -+diff -Naurp nova.orig/api/ec2/apirequest.py nova/api/ec2/apirequest.py |
885 | -+--- nova.orig/api/ec2/apirequest.py 1969-12-31 19:00:00.000000000 -0500 |
886 | -++++ nova/api/ec2/apirequest.py 2012-02-28 09:08:10.821887173 -0500 |
887 | -+@@ -0,0 +1,139 @@ |
888 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
889 | -++ |
890 | -++# Copyright 2010 United States Government as represented by the |
891 | -++# Administrator of the National Aeronautics and Space Administration. |
892 | -++# All Rights Reserved. |
893 | -++# |
894 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
895 | -++# not use this file except in compliance with the License. You may obtain |
896 | -++# a copy of the License at |
897 | -++# |
898 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
899 | -++# |
900 | -++# Unless required by applicable law or agreed to in writing, software |
901 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
902 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
903 | -++# License for the specific language governing permissions and limitations |
904 | -++# under the License. |
905 | -++ |
906 | -++""" |
907 | -++APIRequest class |
908 | -++""" |
909 | -++ |
910 | -++import datetime |
911 | -++# TODO(termie): replace minidom with etree |
912 | -++from xml.dom import minidom |
913 | -++ |
914 | -++from nova.api.ec2 import ec2utils |
915 | -++from nova import exception |
916 | -++from nova import flags |
917 | -++from nova import log as logging |
918 | -++ |
919 | -++LOG = logging.getLogger(__name__) |
920 | -++FLAGS = flags.FLAGS |
921 | -++ |
922 | -++ |
923 | -++def _underscore_to_camelcase(str): |
924 | -++ return ''.join([x[:1].upper() + x[1:] for x in str.split('_')]) |
925 | -++ |
926 | -++ |
927 | -++def _underscore_to_xmlcase(str): |
928 | -++ res = _underscore_to_camelcase(str) |
929 | -++ return res[:1].lower() + res[1:] |
930 | -++ |
931 | -++ |
932 | -++def _database_to_isoformat(datetimeobj): |
933 | -++ """Return a xs:dateTime parsable string from datatime""" |
934 | -++ return datetimeobj.strftime("%Y-%m-%dT%H:%M:%SZ") |
935 | -++ |
936 | -++ |
937 | -++class APIRequest(object): |
938 | -++ def __init__(self, controller, action, version, args): |
939 | -++ self.controller = controller |
940 | -++ self.action = action |
941 | -++ self.version = version |
942 | -++ self.args = args |
943 | -++ |
944 | -++ def invoke(self, context): |
945 | -++ try: |
946 | -++ method = getattr(self.controller, |
947 | -++ ec2utils.camelcase_to_underscore(self.action)) |
948 | -++ except AttributeError: |
949 | -++ controller = self.controller |
950 | -++ action = self.action |
951 | -++ _error = _('Unsupported API request: controller = %(controller)s,' |
952 | -++ ' action = %(action)s') % locals() |
953 | -++ LOG.exception(_error) |
954 | -++ # TODO: Raise custom exception, trap in apiserver, |
955 | -++ # and reraise as 400 error. |
956 | -++ raise exception.InvalidRequest() |
957 | -++ |
958 | -++ args = ec2utils.dict_from_dotted_str(self.args.items()) |
959 | -++ |
960 | -++ for key in args.keys(): |
961 | -++ # NOTE(vish): Turn numeric dict keys into lists |
962 | -++ if isinstance(args[key], dict): |
963 | -++ if args[key] != {} and args[key].keys()[0].isdigit(): |
964 | -++ s = args[key].items() |
965 | -++ s.sort() |
966 | -++ args[key] = [v for k, v in s] |
967 | -++ |
968 | -++ result = method(context, **args) |
969 | -++ return self._render_response(result, context.request_id) |
970 | -++ |
971 | -++ def _render_response(self, response_data, request_id): |
972 | -++ xml = minidom.Document() |
973 | -++ |
974 | -++ response_el = xml.createElement(self.action + 'Response') |
975 | -++ response_el.setAttribute('xmlns', |
976 | -++ 'http://ec2.amazonaws.com/doc/%s/' % self.version) |
977 | -++ request_id_el = xml.createElement('requestId') |
978 | -++ request_id_el.appendChild(xml.createTextNode(request_id)) |
979 | -++ response_el.appendChild(request_id_el) |
980 | -++ if response_data is True: |
981 | -++ self._render_dict(xml, response_el, {'return': 'true'}) |
982 | -++ else: |
983 | -++ self._render_dict(xml, response_el, response_data) |
984 | -++ |
985 | -++ xml.appendChild(response_el) |
986 | -++ |
987 | -++ response = xml.toxml() |
988 | -++ xml.unlink() |
989 | -++ |
990 | -++ # Don't write private key to log |
991 | -++ if self.action != "CreateKeyPair": |
992 | -++ LOG.debug(response) |
993 | -++ else: |
994 | -++ LOG.debug("CreateKeyPair: Return Private Key") |
995 | -++ |
996 | -++ return response |
997 | -++ |
998 | -++ def _render_dict(self, xml, el, data): |
999 | -++ try: |
1000 | -++ for key in data.keys(): |
1001 | -++ val = data[key] |
1002 | -++ el.appendChild(self._render_data(xml, key, val)) |
1003 | -++ except Exception: |
1004 | -++ LOG.debug(data) |
1005 | -++ raise |
1006 | -++ |
1007 | -++ def _render_data(self, xml, el_name, data): |
1008 | -++ el_name = _underscore_to_xmlcase(el_name) |
1009 | -++ data_el = xml.createElement(el_name) |
1010 | -++ |
1011 | -++ if isinstance(data, list): |
1012 | -++ for item in data: |
1013 | -++ data_el.appendChild(self._render_data(xml, 'item', item)) |
1014 | -++ elif isinstance(data, dict): |
1015 | -++ self._render_dict(xml, data_el, data) |
1016 | -++ elif hasattr(data, '__dict__'): |
1017 | -++ self._render_dict(xml, data_el, data.__dict__) |
1018 | -++ elif isinstance(data, bool): |
1019 | -++ data_el.appendChild(xml.createTextNode(str(data).lower())) |
1020 | -++ elif isinstance(data, datetime.datetime): |
1021 | -++ data_el.appendChild( |
1022 | -++ xml.createTextNode(_database_to_isoformat(data))) |
1023 | -++ elif data is not None: |
1024 | -++ data_el.appendChild(xml.createTextNode(str(data))) |
1025 | -++ |
1026 | -++ return data_el |
1027 | -+diff -Naurp nova.orig/api/ec2/cloud.py nova/api/ec2/cloud.py |
1028 | -+--- nova.orig/api/ec2/cloud.py 1969-12-31 19:00:00.000000000 -0500 |
1029 | -++++ nova/api/ec2/cloud.py 2012-02-28 09:08:10.821887173 -0500 |
1030 | -+@@ -0,0 +1,1612 @@ |
1031 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
1032 | -++ |
1033 | -++# Copyright 2010 United States Government as represented by the |
1034 | -++# Administrator of the National Aeronautics and Space Administration. |
1035 | -++# All Rights Reserved. |
1036 | -++# |
1037 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
1038 | -++# not use this file except in compliance with the License. You may obtain |
1039 | -++# a copy of the License at |
1040 | -++# |
1041 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
1042 | -++# |
1043 | -++# Unless required by applicable law or agreed to in writing, software |
1044 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
1045 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
1046 | -++# License for the specific language governing permissions and limitations |
1047 | -++# under the License. |
1048 | -++ |
1049 | -++""" |
1050 | -++Cloud Controller: Implementation of EC2 REST API calls, which are |
1051 | -++dispatched to other nodes via AMQP RPC. State is via distributed |
1052 | -++datastore. |
1053 | -++""" |
1054 | -++ |
1055 | -++import base64 |
1056 | -++import re |
1057 | -++import time |
1058 | -++import urllib |
1059 | -++ |
1060 | -++from nova.api.ec2 import ec2utils |
1061 | -++from nova.compute import instance_types |
1062 | -++from nova.api.ec2 import inst_state |
1063 | -++from nova import block_device |
1064 | -++from nova import compute |
1065 | -++from nova.compute import vm_states |
1066 | -++from nova import crypto |
1067 | -++from nova import db |
1068 | -++from nova import exception |
1069 | -++from nova import flags |
1070 | -++from nova.image import s3 |
1071 | -++from nova import log as logging |
1072 | -++from nova import network |
1073 | -++from nova import rpc |
1074 | -++from nova import utils |
1075 | -++from nova import volume |
1076 | -++from nova.api import validator |
1077 | -++ |
1078 | -++ |
1079 | -++FLAGS = flags.FLAGS |
1080 | -++flags.DECLARE('dhcp_domain', 'nova.network.manager') |
1081 | -++ |
1082 | -++LOG = logging.getLogger(__name__) |
1083 | -++ |
1084 | -++ |
1085 | -++def validate_ec2_id(val): |
1086 | -++ if not validator.validate_str()(val): |
1087 | -++ raise exception.InvalidInstanceIDMalformed(val) |
1088 | -++ try: |
1089 | -++ ec2utils.ec2_id_to_id(val) |
1090 | -++ except exception.InvalidEc2Id: |
1091 | -++ raise exception.InvalidInstanceIDMalformed(val) |
1092 | -++ |
1093 | -++ |
1094 | -++def _gen_key(context, user_id, key_name): |
1095 | -++ """Generate a key |
1096 | -++ |
1097 | -++ This is a module level method because it is slow and we need to defer |
1098 | -++ it into a process pool.""" |
1099 | -++ # NOTE(vish): generating key pair is slow so check for legal |
1100 | -++ # creation before creating key_pair |
1101 | -++ try: |
1102 | -++ db.key_pair_get(context, user_id, key_name) |
1103 | -++ raise exception.KeyPairExists(key_name=key_name) |
1104 | -++ except exception.NotFound: |
1105 | -++ pass |
1106 | -++ private_key, public_key, fingerprint = crypto.generate_key_pair() |
1107 | -++ key = {} |
1108 | -++ key['user_id'] = user_id |
1109 | -++ key['name'] = key_name |
1110 | -++ key['public_key'] = public_key |
1111 | -++ key['fingerprint'] = fingerprint |
1112 | -++ db.key_pair_create(context, key) |
1113 | -++ return {'private_key': private_key, 'fingerprint': fingerprint} |
1114 | -++ |
1115 | -++ |
1116 | -++# EC2 API can return the following values as documented in the EC2 API |
1117 | -++# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ |
1118 | -++# ApiReference-ItemType-InstanceStateType.html |
1119 | -++# pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 | |
1120 | -++# stopped 80 |
1121 | -++_STATE_DESCRIPTION_MAP = { |
1122 | -++ None: inst_state.PENDING, |
1123 | -++ vm_states.ACTIVE: inst_state.RUNNING, |
1124 | -++ vm_states.BUILDING: inst_state.PENDING, |
1125 | -++ vm_states.REBUILDING: inst_state.PENDING, |
1126 | -++ vm_states.DELETED: inst_state.TERMINATED, |
1127 | -++ vm_states.SOFT_DELETE: inst_state.TERMINATED, |
1128 | -++ vm_states.STOPPED: inst_state.STOPPED, |
1129 | -++ vm_states.SHUTOFF: inst_state.SHUTOFF, |
1130 | -++ vm_states.MIGRATING: inst_state.MIGRATE, |
1131 | -++ vm_states.RESIZING: inst_state.RESIZE, |
1132 | -++ vm_states.PAUSED: inst_state.PAUSE, |
1133 | -++ vm_states.SUSPENDED: inst_state.SUSPEND, |
1134 | -++ vm_states.RESCUED: inst_state.RESCUE, |
1135 | -++} |
1136 | -++ |
1137 | -++ |
1138 | -++def _state_description(vm_state, shutdown_terminate): |
1139 | -++ """Map the vm state to the server status string""" |
1140 | -++ if (vm_state == vm_states.SHUTOFF and |
1141 | -++ not shutdown_terminate): |
1142 | -++ name = inst_state.STOPPED |
1143 | -++ else: |
1144 | -++ name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) |
1145 | -++ |
1146 | -++ return {'code': inst_state.name_to_code(name), |
1147 | -++ 'name': name} |
1148 | -++ |
1149 | -++ |
1150 | -++def _parse_block_device_mapping(bdm): |
1151 | -++ """Parse BlockDeviceMappingItemType into flat hash |
1152 | -++ BlockDevicedMapping.<N>.DeviceName |
1153 | -++ BlockDevicedMapping.<N>.Ebs.SnapshotId |
1154 | -++ BlockDevicedMapping.<N>.Ebs.VolumeSize |
1155 | -++ BlockDevicedMapping.<N>.Ebs.DeleteOnTermination |
1156 | -++ BlockDevicedMapping.<N>.Ebs.NoDevice |
1157 | -++ BlockDevicedMapping.<N>.VirtualName |
1158 | -++ => remove .Ebs and allow volume id in SnapshotId |
1159 | -++ """ |
1160 | -++ ebs = bdm.pop('ebs', None) |
1161 | -++ if ebs: |
1162 | -++ ec2_id = ebs.pop('snapshot_id', None) |
1163 | -++ if ec2_id: |
1164 | -++ id = ec2utils.ec2_id_to_id(ec2_id) |
1165 | -++ if ec2_id.startswith('snap-'): |
1166 | -++ bdm['snapshot_id'] = id |
1167 | -++ elif ec2_id.startswith('vol-'): |
1168 | -++ bdm['volume_id'] = id |
1169 | -++ ebs.setdefault('delete_on_termination', True) |
1170 | -++ bdm.update(ebs) |
1171 | -++ return bdm |
1172 | -++ |
1173 | -++ |
1174 | -++def _properties_get_mappings(properties): |
1175 | -++ return block_device.mappings_prepend_dev(properties.get('mappings', [])) |
1176 | -++ |
1177 | -++ |
1178 | -++def _format_block_device_mapping(bdm): |
1179 | -++ """Contruct BlockDeviceMappingItemType |
1180 | -++ {'device_name': '...', 'snapshot_id': , ...} |
1181 | -++ => BlockDeviceMappingItemType |
1182 | -++ """ |
1183 | -++ keys = (('deviceName', 'device_name'), |
1184 | -++ ('virtualName', 'virtual_name')) |
1185 | -++ item = {} |
1186 | -++ for name, k in keys: |
1187 | -++ if k in bdm: |
1188 | -++ item[name] = bdm[k] |
1189 | -++ if bdm.get('no_device'): |
1190 | -++ item['noDevice'] = True |
1191 | -++ if ('snapshot_id' in bdm) or ('volume_id' in bdm): |
1192 | -++ ebs_keys = (('snapshotId', 'snapshot_id'), |
1193 | -++ ('snapshotId', 'volume_id'), # snapshotId is abused |
1194 | -++ ('volumeSize', 'volume_size'), |
1195 | -++ ('deleteOnTermination', 'delete_on_termination')) |
1196 | -++ ebs = {} |
1197 | -++ for name, k in ebs_keys: |
1198 | -++ if k in bdm: |
1199 | -++ if k == 'snapshot_id': |
1200 | -++ ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) |
1201 | -++ elif k == 'volume_id': |
1202 | -++ ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) |
1203 | -++ else: |
1204 | -++ ebs[name] = bdm[k] |
1205 | -++ assert 'snapshotId' in ebs |
1206 | -++ item['ebs'] = ebs |
1207 | -++ return item |
1208 | -++ |
1209 | -++ |
1210 | -++def _format_mappings(properties, result): |
1211 | -++ """Format multiple BlockDeviceMappingItemType""" |
1212 | -++ mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} |
1213 | -++ for m in _properties_get_mappings(properties) |
1214 | -++ if block_device.is_swap_or_ephemeral(m['virtual'])] |
1215 | -++ |
1216 | -++ block_device_mapping = [_format_block_device_mapping(bdm) for bdm in |
1217 | -++ properties.get('block_device_mapping', [])] |
1218 | -++ |
1219 | -++ # NOTE(yamahata): overwrite mappings with block_device_mapping |
1220 | -++ for bdm in block_device_mapping: |
1221 | -++ for i in range(len(mappings)): |
1222 | -++ if bdm['deviceName'] == mappings[i]['deviceName']: |
1223 | -++ del mappings[i] |
1224 | -++ break |
1225 | -++ mappings.append(bdm) |
1226 | -++ |
1227 | -++ # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? |
1228 | -++ mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] |
1229 | -++ |
1230 | -++ if mappings: |
1231 | -++ result['blockDeviceMapping'] = mappings |
1232 | -++ |
1233 | -++ |
1234 | -++class CloudController(object): |
1235 | -++ """ CloudController provides the critical dispatch between |
1236 | -++ inbound API calls through the endpoint and messages |
1237 | -++ sent to the other nodes. |
1238 | -++""" |
1239 | -++ def __init__(self): |
1240 | -++ self.image_service = s3.S3ImageService() |
1241 | -++ self.network_api = network.API() |
1242 | -++ self.volume_api = volume.API() |
1243 | -++ self.compute_api = compute.API(network_api=self.network_api, |
1244 | -++ volume_api=self.volume_api) |
1245 | -++ self.sgh = utils.import_object(FLAGS.security_group_handler) |
1246 | -++ |
1247 | -++ def __str__(self): |
1248 | -++ return 'CloudController' |
1249 | -++ |
1250 | -++ def _get_image_state(self, image): |
1251 | -++ # NOTE(vish): fallback status if image_state isn't set |
1252 | -++ state = image.get('status') |
1253 | -++ if state == 'active': |
1254 | -++ state = 'available' |
1255 | -++ return image['properties'].get('image_state', state) |
1256 | -++ |
1257 | -++ def describe_availability_zones(self, context, **kwargs): |
1258 | -++ if ('zone_name' in kwargs and |
1259 | -++ 'verbose' in kwargs['zone_name'] and |
1260 | -++ context.is_admin): |
1261 | -++ return self._describe_availability_zones_verbose(context, |
1262 | -++ **kwargs) |
1263 | -++ else: |
1264 | -++ return self._describe_availability_zones(context, **kwargs) |
1265 | -++ |
1266 | -++ def _describe_availability_zones(self, context, **kwargs): |
1267 | -++ ctxt = context.elevated() |
1268 | -++ enabled_services = db.service_get_all(ctxt, False) |
1269 | -++ disabled_services = db.service_get_all(ctxt, True) |
1270 | -++ available_zones = [] |
1271 | -++ for zone in [service.availability_zone for service |
1272 | -++ in enabled_services]: |
1273 | -++ if not zone in available_zones: |
1274 | -++ available_zones.append(zone) |
1275 | -++ not_available_zones = [] |
1276 | -++ for zone in [service.availability_zone for service in disabled_services |
1277 | -++ if not service['availability_zone'] in available_zones]: |
1278 | -++ if not zone in not_available_zones: |
1279 | -++ not_available_zones.append(zone) |
1280 | -++ result = [] |
1281 | -++ for zone in available_zones: |
1282 | -++ result.append({'zoneName': zone, |
1283 | -++ 'zoneState': "available"}) |
1284 | -++ for zone in not_available_zones: |
1285 | -++ result.append({'zoneName': zone, |
1286 | -++ 'zoneState': "not available"}) |
1287 | -++ return {'availabilityZoneInfo': result} |
1288 | -++ |
1289 | -++ def _describe_availability_zones_verbose(self, context, **kwargs): |
1290 | -++ rv = {'availabilityZoneInfo': [{'zoneName': 'nova', |
1291 | -++ 'zoneState': 'available'}]} |
1292 | -++ |
1293 | -++ services = db.service_get_all(context, False) |
1294 | -++ hosts = [] |
1295 | -++ for host in [service['host'] for service in services]: |
1296 | -++ if not host in hosts: |
1297 | -++ hosts.append(host) |
1298 | -++ for host in hosts: |
1299 | -++ rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, |
1300 | -++ 'zoneState': ''}) |
1301 | -++ hsvcs = [service for service in services |
1302 | -++ if service['host'] == host] |
1303 | -++ for svc in hsvcs: |
1304 | -++ alive = utils.service_is_up(svc) |
1305 | -++ art = (alive and ":-)") or "XXX" |
1306 | -++ active = 'enabled' |
1307 | -++ if svc['disabled']: |
1308 | -++ active = 'disabled' |
1309 | -++ rv['availabilityZoneInfo'].append({ |
1310 | -++ 'zoneName': '| |- %s' % svc['binary'], |
1311 | -++ 'zoneState': '%s %s %s' % (active, art, |
1312 | -++ svc['updated_at'])}) |
1313 | -++ return rv |
1314 | -++ |
1315 | -++ def describe_regions(self, context, region_name=None, **kwargs): |
1316 | -++ if FLAGS.region_list: |
1317 | -++ regions = [] |
1318 | -++ for region in FLAGS.region_list: |
1319 | -++ name, _sep, host = region.partition('=') |
1320 | -++ endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, |
1321 | -++ host, |
1322 | -++ FLAGS.ec2_port, |
1323 | -++ FLAGS.ec2_path) |
1324 | -++ regions.append({'regionName': name, |
1325 | -++ 'regionEndpoint': endpoint}) |
1326 | -++ else: |
1327 | -++ regions = [{'regionName': 'nova', |
1328 | -++ 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, |
1329 | -++ FLAGS.ec2_host, |
1330 | -++ FLAGS.ec2_port, |
1331 | -++ FLAGS.ec2_path)}] |
1332 | -++ return {'regionInfo': regions} |
1333 | -++ |
1334 | -++ def describe_snapshots(self, |
1335 | -++ context, |
1336 | -++ snapshot_id=None, |
1337 | -++ owner=None, |
1338 | -++ restorable_by=None, |
1339 | -++ **kwargs): |
1340 | -++ if snapshot_id: |
1341 | -++ snapshots = [] |
1342 | -++ for ec2_id in snapshot_id: |
1343 | -++ internal_id = ec2utils.ec2_id_to_id(ec2_id) |
1344 | -++ snapshot = self.volume_api.get_snapshot( |
1345 | -++ context, |
1346 | -++ snapshot_id=internal_id) |
1347 | -++ snapshots.append(snapshot) |
1348 | -++ else: |
1349 | -++ snapshots = self.volume_api.get_all_snapshots(context) |
1350 | -++ snapshots = [self._format_snapshot(context, s) for s in snapshots] |
1351 | -++ return {'snapshotSet': snapshots} |
1352 | -++ |
1353 | -++ def _format_snapshot(self, context, snapshot): |
1354 | -++ s = {} |
1355 | -++ s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) |
1356 | -++ s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) |
1357 | -++ s['status'] = snapshot['status'] |
1358 | -++ s['startTime'] = snapshot['created_at'] |
1359 | -++ s['progress'] = snapshot['progress'] |
1360 | -++ s['ownerId'] = snapshot['project_id'] |
1361 | -++ s['volumeSize'] = snapshot['volume_size'] |
1362 | -++ s['description'] = snapshot['display_description'] |
1363 | -++ return s |
1364 | -++ |
1365 | -++ def create_snapshot(self, context, volume_id, **kwargs): |
1366 | -++ validate_ec2_id(volume_id) |
1367 | -++ LOG.audit(_("Create snapshot of volume %s"), volume_id, |
1368 | -++ context=context) |
1369 | -++ volume_id = ec2utils.ec2_id_to_id(volume_id) |
1370 | -++ volume = self.volume_api.get(context, volume_id) |
1371 | -++ snapshot = self.volume_api.create_snapshot( |
1372 | -++ context, |
1373 | -++ volume, |
1374 | -++ None, |
1375 | -++ kwargs.get('description')) |
1376 | -++ return self._format_snapshot(context, snapshot) |
1377 | -++ |
1378 | -++ def delete_snapshot(self, context, snapshot_id, **kwargs): |
1379 | -++ snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) |
1380 | -++ snapshot = self.volume_api.get_snapshot(context, snapshot_id) |
1381 | -++ self.volume_api.delete_snapshot(context, snapshot) |
1382 | -++ return True |
1383 | -++ |
1384 | -++ def describe_key_pairs(self, context, key_name=None, **kwargs): |
1385 | -++ key_pairs = db.key_pair_get_all_by_user(context, context.user_id) |
1386 | -++ if not key_name is None: |
1387 | -++ key_pairs = [x for x in key_pairs if x['name'] in key_name] |
1388 | -++ |
1389 | -++ result = [] |
1390 | -++ for key_pair in key_pairs: |
1391 | -++ # filter out the vpn keys |
1392 | -++ suffix = FLAGS.vpn_key_suffix |
1393 | -++ if context.is_admin or not key_pair['name'].endswith(suffix): |
1394 | -++ result.append({ |
1395 | -++ 'keyName': key_pair['name'], |
1396 | -++ 'keyFingerprint': key_pair['fingerprint'], |
1397 | -++ }) |
1398 | -++ |
1399 | -++ return {'keySet': result} |
1400 | -++ |
1401 | -++ def create_key_pair(self, context, key_name, **kwargs): |
1402 | -++ LOG.audit(_("Create key pair %s"), key_name, context=context) |
1403 | -++ data = _gen_key(context, context.user_id, key_name) |
1404 | -++ return {'keyName': key_name, |
1405 | -++ 'keyFingerprint': data['fingerprint'], |
1406 | -++ 'keyMaterial': data['private_key']} |
1407 | -++ # TODO(vish): when context is no longer an object, pass it here |
1408 | -++ |
1409 | -++ def import_key_pair(self, context, key_name, public_key_material, |
1410 | -++ **kwargs): |
1411 | -++ LOG.audit(_("Import key %s"), key_name, context=context) |
1412 | -++ try: |
1413 | -++ db.key_pair_get(context, context.user_id, key_name) |
1414 | -++ raise exception.KeyPairExists(key_name=key_name) |
1415 | -++ except exception.NotFound: |
1416 | -++ pass |
1417 | -++ public_key = base64.b64decode(public_key_material) |
1418 | -++ fingerprint = crypto.generate_fingerprint(public_key) |
1419 | -++ key = {} |
1420 | -++ key['user_id'] = context.user_id |
1421 | -++ key['name'] = key_name |
1422 | -++ key['public_key'] = public_key |
1423 | -++ key['fingerprint'] = fingerprint |
1424 | -++ db.key_pair_create(context, key) |
1425 | -++ return {'keyName': key_name, |
1426 | -++ 'keyFingerprint': fingerprint} |
1427 | -++ |
1428 | -++ def delete_key_pair(self, context, key_name, **kwargs): |
1429 | -++ LOG.audit(_("Delete key pair %s"), key_name, context=context) |
1430 | -++ try: |
1431 | -++ db.key_pair_destroy(context, context.user_id, key_name) |
1432 | -++ except exception.NotFound: |
1433 | -++ # aws returns true even if the key doesn't exist |
1434 | -++ pass |
1435 | -++ return True |
1436 | -++ |
1437 | -++ def describe_security_groups(self, context, group_name=None, group_id=None, |
1438 | -++ **kwargs): |
1439 | -++ self.compute_api.ensure_default_security_group(context) |
1440 | -++ if group_name or group_id: |
1441 | -++ groups = [] |
1442 | -++ if group_name: |
1443 | -++ for name in group_name: |
1444 | -++ group = db.security_group_get_by_name(context, |
1445 | -++ context.project_id, |
1446 | -++ name) |
1447 | -++ groups.append(group) |
1448 | -++ if group_id: |
1449 | -++ for gid in group_id: |
1450 | -++ group = db.security_group_get(context, gid) |
1451 | -++ groups.append(group) |
1452 | -++ elif context.is_admin: |
1453 | -++ groups = db.security_group_get_all(context) |
1454 | -++ else: |
1455 | -++ groups = db.security_group_get_by_project(context, |
1456 | -++ context.project_id) |
1457 | -++ groups = [self._format_security_group(context, g) for g in groups] |
1458 | -++ |
1459 | -++ return {'securityGroupInfo': |
1460 | -++ list(sorted(groups, |
1461 | -++ key=lambda k: (k['ownerId'], k['groupName'])))} |
1462 | -++ |
1463 | -++ def _format_security_group(self, context, group): |
1464 | -++ g = {} |
1465 | -++ g['groupDescription'] = group.description |
1466 | -++ g['groupName'] = group.name |
1467 | -++ g['ownerId'] = group.project_id |
1468 | -++ g['ipPermissions'] = [] |
1469 | -++ for rule in group.rules: |
1470 | -++ r = {} |
1471 | -++ r['groups'] = [] |
1472 | -++ r['ipRanges'] = [] |
1473 | -++ if rule.group_id: |
1474 | -++ source_group = db.security_group_get(context, rule.group_id) |
1475 | -++ r['groups'] += [{'groupName': source_group.name, |
1476 | -++ 'userId': source_group.project_id}] |
1477 | -++ if rule.protocol: |
1478 | -++ r['ipProtocol'] = rule.protocol |
1479 | -++ r['fromPort'] = rule.from_port |
1480 | -++ r['toPort'] = rule.to_port |
1481 | -++ g['ipPermissions'] += [dict(r)] |
1482 | -++ else: |
1483 | -++ for protocol, min_port, max_port in (('icmp', -1, -1), |
1484 | -++ ('tcp', 1, 65535), |
1485 | -++ ('udp', 1, 65536)): |
1486 | -++ r['ipProtocol'] = protocol |
1487 | -++ r['fromPort'] = min_port |
1488 | -++ r['toPort'] = max_port |
1489 | -++ g['ipPermissions'] += [dict(r)] |
1490 | -++ else: |
1491 | -++ r['ipProtocol'] = rule.protocol |
1492 | -++ r['fromPort'] = rule.from_port |
1493 | -++ r['toPort'] = rule.to_port |
1494 | -++ r['ipRanges'] += [{'cidrIp': rule.cidr}] |
1495 | -++ g['ipPermissions'] += [r] |
1496 | -++ return g |
1497 | -++ |
1498 | -++ def _rule_args_to_dict(self, context, kwargs): |
1499 | -++ rules = [] |
1500 | -++ if not 'groups' in kwargs and not 'ip_ranges' in kwargs: |
1501 | -++ rule = self._rule_dict_last_step(context, **kwargs) |
1502 | -++ if rule: |
1503 | -++ rules.append(rule) |
1504 | -++ return rules |
1505 | -++ if 'ip_ranges' in kwargs: |
1506 | -++ rules = self._cidr_args_split(kwargs) |
1507 | -++ else: |
1508 | -++ rules = [kwargs] |
1509 | -++ finalset = [] |
1510 | -++ for rule in rules: |
1511 | -++ if 'groups' in rule: |
1512 | -++ groups_values = self._groups_args_split(rule) |
1513 | -++ for groups_value in groups_values: |
1514 | -++ final = self._rule_dict_last_step(context, **groups_value) |
1515 | -++ finalset.append(final) |
1516 | -++ else: |
1517 | -++ final = self._rule_dict_last_step(context, **rule) |
1518 | -++ finalset.append(final) |
1519 | -++ return finalset |
1520 | -++ |
1521 | -++ def _cidr_args_split(self, kwargs): |
1522 | -++ cidr_args_split = [] |
1523 | -++ cidrs = kwargs['ip_ranges'] |
1524 | -++ for key, cidr in cidrs.iteritems(): |
1525 | -++ mykwargs = kwargs.copy() |
1526 | -++ del mykwargs['ip_ranges'] |
1527 | -++ mykwargs['cidr_ip'] = cidr['cidr_ip'] |
1528 | -++ cidr_args_split.append(mykwargs) |
1529 | -++ return cidr_args_split |
1530 | -++ |
1531 | -++ def _groups_args_split(self, kwargs): |
1532 | -++ groups_args_split = [] |
1533 | -++ groups = kwargs['groups'] |
1534 | -++ for key, group in groups.iteritems(): |
1535 | -++ mykwargs = kwargs.copy() |
1536 | -++ del mykwargs['groups'] |
1537 | -++ if 'group_name' in group: |
1538 | -++ mykwargs['source_security_group_name'] = group['group_name'] |
1539 | -++ if 'user_id' in group: |
1540 | -++ mykwargs['source_security_group_owner_id'] = group['user_id'] |
1541 | -++ if 'group_id' in group: |
1542 | -++ mykwargs['source_security_group_id'] = group['group_id'] |
1543 | -++ groups_args_split.append(mykwargs) |
1544 | -++ return groups_args_split |
1545 | -++ |
1546 | -++ def _rule_dict_last_step(self, context, to_port=None, from_port=None, |
1547 | -++ ip_protocol=None, cidr_ip=None, user_id=None, |
1548 | -++ source_security_group_name=None, |
1549 | -++ source_security_group_owner_id=None): |
1550 | -++ |
1551 | -++ values = {} |
1552 | -++ |
1553 | -++ if source_security_group_name: |
1554 | -++ source_project_id = self._get_source_project_id(context, |
1555 | -++ source_security_group_owner_id) |
1556 | -++ |
1557 | -++ source_security_group = db.security_group_get_by_name( |
1558 | -++ context.elevated(), |
1559 | -++ source_project_id, |
1560 | -++ source_security_group_name) |
1561 | -++ notfound = exception.SecurityGroupNotFound |
1562 | -++ if not source_security_group: |
1563 | -++ raise notfound(security_group_id=source_security_group_name) |
1564 | -++ values['group_id'] = source_security_group['id'] |
1565 | -++ elif cidr_ip: |
1566 | -++ # If this fails, it throws an exception. This is what we want. |
1567 | -++ cidr_ip = urllib.unquote(cidr_ip).decode() |
1568 | -++ |
1569 | -++ if not utils.is_valid_cidr(cidr_ip): |
1570 | -++ # Raise exception for non-valid address |
1571 | -++ raise exception.InvalidCidr(cidr=cidr_ip) |
1572 | -++ |
1573 | -++ values['cidr'] = cidr_ip |
1574 | -++ else: |
1575 | -++ values['cidr'] = '0.0.0.0/0' |
1576 | -++ |
1577 | -++ if ip_protocol and from_port and to_port: |
1578 | -++ |
1579 | -++ ip_protocol = str(ip_protocol) |
1580 | -++ try: |
1581 | -++ # Verify integer conversions |
1582 | -++ from_port = int(from_port) |
1583 | -++ to_port = int(to_port) |
1584 | -++ except ValueError: |
1585 | -++ if ip_protocol.upper() == 'ICMP': |
1586 | -++ raise exception.InvalidInput(reason="Type and" |
1587 | -++ " Code must be integers for ICMP protocol type") |
1588 | -++ else: |
1589 | -++ raise exception.InvalidInput(reason="To and From ports " |
1590 | -++ "must be integers") |
1591 | -++ |
1592 | -++ if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: |
1593 | -++ raise exception.InvalidIpProtocol(protocol=ip_protocol) |
1594 | -++ |
1595 | -++ # Verify that from_port must always be less than |
1596 | -++ # or equal to to_port |
1597 | -++ if from_port > to_port: |
1598 | -++ raise exception.InvalidPortRange(from_port=from_port, |
1599 | -++ to_port=to_port, msg="Former value cannot" |
1600 | -++ " be greater than the later") |
1601 | -++ |
1602 | -++ # Verify valid TCP, UDP port ranges |
1603 | -++ if (ip_protocol.upper() in ['TCP', 'UDP'] and |
1604 | -++ (from_port < 1 or to_port > 65535)): |
1605 | -++ raise exception.InvalidPortRange(from_port=from_port, |
1606 | -++ to_port=to_port, msg="Valid TCP ports should" |
1607 | -++ " be between 1-65535") |
1608 | -++ |
1609 | -++ # Verify ICMP type and code |
1610 | -++ if (ip_protocol.upper() == "ICMP" and |
1611 | -++ (from_port < -1 or to_port > 255)): |
1612 | -++ raise exception.InvalidPortRange(from_port=from_port, |
1613 | -++ to_port=to_port, msg="For ICMP, the" |
1614 | -++ " type:code must be valid") |
1615 | -++ |
1616 | -++ values['protocol'] = ip_protocol |
1617 | -++ values['from_port'] = from_port |
1618 | -++ values['to_port'] = to_port |
1619 | -++ else: |
1620 | -++ # If cidr based filtering, protocol and ports are mandatory |
1621 | -++ if 'cidr' in values: |
1622 | -++ return None |
1623 | -++ |
1624 | -++ return values |
1625 | -++ |
1626 | -++ def _security_group_rule_exists(self, security_group, values): |
1627 | -++ """Indicates whether the specified rule values are already |
1628 | -++ defined in the given security group. |
1629 | -++ """ |
1630 | -++ for rule in security_group.rules: |
1631 | -++ is_duplicate = True |
1632 | -++ keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') |
1633 | -++ for key in keys: |
1634 | -++ if rule.get(key) != values.get(key): |
1635 | -++ is_duplicate = False |
1636 | -++ break |
1637 | -++ if is_duplicate: |
1638 | -++ return rule['id'] |
1639 | -++ return False |
1640 | -++ |
1641 | -++ def revoke_security_group_ingress(self, context, group_name=None, |
1642 | -++ group_id=None, **kwargs): |
1643 | -++ if not group_name and not group_id: |
1644 | -++ err = "Not enough parameters, need group_name or group_id" |
1645 | -++ raise exception.EC2APIError(_(err)) |
1646 | -++ self.compute_api.ensure_default_security_group(context) |
1647 | -++ notfound = exception.SecurityGroupNotFound |
1648 | -++ if group_name: |
1649 | -++ security_group = db.security_group_get_by_name(context, |
1650 | -++ context.project_id, |
1651 | -++ group_name) |
1652 | -++ if not security_group: |
1653 | -++ raise notfound(security_group_id=group_name) |
1654 | -++ if group_id: |
1655 | -++ security_group = db.security_group_get(context, group_id) |
1656 | -++ if not security_group: |
1657 | -++ raise notfound(security_group_id=group_id) |
1658 | -++ |
1659 | -++ msg = "Revoke security group ingress %s" |
1660 | -++ LOG.audit(_(msg), security_group['name'], context=context) |
1661 | -++ prevalues = [] |
1662 | -++ try: |
1663 | -++ prevalues = kwargs['ip_permissions'] |
1664 | -++ except KeyError: |
1665 | -++ prevalues.append(kwargs) |
1666 | -++ rule_id = None |
1667 | -++ rule_ids = [] |
1668 | -++ for values in prevalues: |
1669 | -++ rulesvalues = self._rule_args_to_dict(context, values) |
1670 | -++ if not rulesvalues: |
1671 | -++ err = "%s Not enough parameters to build a valid rule" |
1672 | -++ raise exception.EC2APIError(_(err % rulesvalues)) |
1673 | -++ |
1674 | -++ for values_for_rule in rulesvalues: |
1675 | -++ values_for_rule['parent_group_id'] = security_group.id |
1676 | -++ rule_id = self._security_group_rule_exists(security_group, |
1677 | -++ values_for_rule) |
1678 | -++ if rule_id: |
1679 | -++ db.security_group_rule_destroy(context, rule_id) |
1680 | -++ rule_ids.append(rule_id) |
1681 | -++ if rule_id: |
1682 | -++ # NOTE(vish): we removed a rule, so refresh |
1683 | -++ self.compute_api.trigger_security_group_rules_refresh( |
1684 | -++ context, |
1685 | -++ security_group_id=security_group['id']) |
1686 | -++ self.sgh.trigger_security_group_rule_destroy_refresh( |
1687 | -++ context, rule_ids) |
1688 | -++ return True |
1689 | -++ raise exception.EC2APIError(_("No rule for the specified parameters.")) |
1690 | -++ |
1691 | -++ # TODO(soren): This has only been tested with Boto as the client. |
1692 | -++ # Unfortunately, it seems Boto is using an old API |
1693 | -++ # for these operations, so support for newer API versions |
1694 | -++ # is sketchy. |
1695 | -++ def authorize_security_group_ingress(self, context, group_name=None, |
1696 | -++ group_id=None, **kwargs): |
1697 | -++ if not group_name and not group_id: |
1698 | -++ err = "Not enough parameters, need group_name or group_id" |
1699 | -++ raise exception.EC2APIError(_(err)) |
1700 | -++ self.compute_api.ensure_default_security_group(context) |
1701 | -++ notfound = exception.SecurityGroupNotFound |
1702 | -++ if group_name: |
1703 | -++ security_group = db.security_group_get_by_name(context, |
1704 | -++ context.project_id, |
1705 | -++ group_name) |
1706 | -++ if not security_group: |
1707 | -++ raise notfound(security_group_id=group_name) |
1708 | -++ if group_id: |
1709 | -++ security_group = db.security_group_get(context, group_id) |
1710 | -++ if not security_group: |
1711 | -++ raise notfound(security_group_id=group_id) |
1712 | -++ |
1713 | -++ msg = "Authorize security group ingress %s" |
1714 | -++ LOG.audit(_(msg), security_group['name'], context=context) |
1715 | -++ prevalues = [] |
1716 | -++ try: |
1717 | -++ prevalues = kwargs['ip_permissions'] |
1718 | -++ except KeyError: |
1719 | -++ prevalues.append(kwargs) |
1720 | -++ postvalues = [] |
1721 | -++ for values in prevalues: |
1722 | -++ rulesvalues = self._rule_args_to_dict(context, values) |
1723 | -++ if not rulesvalues: |
1724 | -++ err = "%s Not enough parameters to build a valid rule" |
1725 | -++ raise exception.EC2APIError(_(err % rulesvalues)) |
1726 | -++ for values_for_rule in rulesvalues: |
1727 | -++ values_for_rule['parent_group_id'] = security_group.id |
1728 | -++ if self._security_group_rule_exists(security_group, |
1729 | -++ values_for_rule): |
1730 | -++ err = '%s - This rule already exists in group' |
1731 | -++ raise exception.EC2APIError(_(err) % values_for_rule) |
1732 | -++ postvalues.append(values_for_rule) |
1733 | -++ |
1734 | -++ rule_ids = [] |
1735 | -++ for values_for_rule in postvalues: |
1736 | -++ security_group_rule = db.security_group_rule_create( |
1737 | -++ context, |
1738 | -++ values_for_rule) |
1739 | -++ rule_ids.append(security_group_rule['id']) |
1740 | -++ |
1741 | -++ if postvalues: |
1742 | -++ self.compute_api.trigger_security_group_rules_refresh( |
1743 | -++ context, |
1744 | -++ security_group_id=security_group['id']) |
1745 | -++ self.sgh.trigger_security_group_rule_create_refresh( |
1746 | -++ context, rule_ids) |
1747 | -++ return True |
1748 | -++ |
1749 | -++ raise exception.EC2APIError(_("No rule for the specified parameters.")) |
1750 | -++ |
1751 | -++ def _get_source_project_id(self, context, source_security_group_owner_id): |
1752 | -++ if source_security_group_owner_id: |
1753 | -++ # Parse user:project for source group. |
1754 | -++ source_parts = source_security_group_owner_id.split(':') |
1755 | -++ |
1756 | -++ # If no project name specified, assume it's same as user name. |
1757 | -++ # Since we're looking up by project name, the user name is not |
1758 | -++ # used here. It's only read for EC2 API compatibility. |
1759 | -++ if len(source_parts) == 2: |
1760 | -++ source_project_id = source_parts[1] |
1761 | -++ else: |
1762 | -++ source_project_id = source_parts[0] |
1763 | -++ else: |
1764 | -++ source_project_id = context.project_id |
1765 | -++ |
1766 | -++ return source_project_id |
1767 | -++ |
1768 | -++ def create_security_group(self, context, group_name, group_description): |
1769 | -++ if not re.match('^[a-zA-Z0-9_\- ]+$', str(group_name)): |
1770 | -++ # Some validation to ensure that values match API spec. |
1771 | -++ # - Alphanumeric characters, spaces, dashes, and underscores. |
1772 | -++ # TODO(Daviey): LP: #813685 extend beyond group_name checking, and |
1773 | -++ # probably create a param validator that can be used elsewhere. |
1774 | -++ err = _("Value (%s) for parameter GroupName is invalid." |
1775 | -++ " Content limited to Alphanumeric characters, " |
1776 | -++ "spaces, dashes, and underscores.") % group_name |
1777 | -++ # err not that of master ec2 implementation, as they fail to raise. |
1778 | -++ raise exception.InvalidParameterValue(err=err) |
1779 | -++ |
1780 | -++ if len(str(group_name)) > 255: |
1781 | -++ err = _("Value (%s) for parameter GroupName is invalid." |
1782 | -++ " Length exceeds maximum of 255.") % group_name |
1783 | -++ raise exception.InvalidParameterValue(err=err) |
1784 | -++ |
1785 | -++ LOG.audit(_("Create Security Group %s"), group_name, context=context) |
1786 | -++ self.compute_api.ensure_default_security_group(context) |
1787 | -++ if db.security_group_exists(context, context.project_id, group_name): |
1788 | -++ msg = _('group %s already exists') |
1789 | -++ raise exception.EC2APIError(msg % group_name) |
1790 | -++ |
1791 | -++ group = {'user_id': context.user_id, |
1792 | -++ 'project_id': context.project_id, |
1793 | -++ 'name': group_name, |
1794 | -++ 'description': group_description} |
1795 | -++ group_ref = db.security_group_create(context, group) |
1796 | -++ |
1797 | -++ self.sgh.trigger_security_group_create_refresh(context, group) |
1798 | -++ |
1799 | -++ return {'securityGroupSet': [self._format_security_group(context, |
1800 | -++ group_ref)]} |
1801 | -++ |
1802 | -++ def delete_security_group(self, context, group_name=None, group_id=None, |
1803 | -++ **kwargs): |
1804 | -++ if not group_name and not group_id: |
1805 | -++ err = "Not enough parameters, need group_name or group_id" |
1806 | -++ raise exception.EC2APIError(_(err)) |
1807 | -++ notfound = exception.SecurityGroupNotFound |
1808 | -++ if group_name: |
1809 | -++ security_group = db.security_group_get_by_name(context, |
1810 | -++ context.project_id, |
1811 | -++ group_name) |
1812 | -++ if not security_group: |
1813 | -++ raise notfound(security_group_id=group_name) |
1814 | -++ elif group_id: |
1815 | -++ security_group = db.security_group_get(context, group_id) |
1816 | -++ if not security_group: |
1817 | -++ raise notfound(security_group_id=group_id) |
1818 | -++ if db.security_group_in_use(context, security_group.id): |
1819 | -++ raise exception.InvalidGroup(reason="In Use") |
1820 | -++ LOG.audit(_("Delete security group %s"), group_name, context=context) |
1821 | -++ db.security_group_destroy(context, security_group.id) |
1822 | -++ |
1823 | -++ self.sgh.trigger_security_group_destroy_refresh(context, |
1824 | -++ security_group.id) |
1825 | -++ return True |
1826 | -++ |
1827 | -++ def get_console_output(self, context, instance_id, **kwargs): |
1828 | -++ LOG.audit(_("Get console output for instance %s"), instance_id, |
1829 | -++ context=context) |
1830 | -++ # instance_id may be passed in as a list of instances |
1831 | -++ if isinstance(instance_id, list): |
1832 | -++ ec2_id = instance_id[0] |
1833 | -++ else: |
1834 | -++ ec2_id = instance_id |
1835 | -++ validate_ec2_id(ec2_id) |
1836 | -++ instance_id = ec2utils.ec2_id_to_id(ec2_id) |
1837 | -++ instance = self.compute_api.get(context, instance_id) |
1838 | -++ output = self.compute_api.get_console_output(context, instance) |
1839 | -++ now = utils.utcnow() |
1840 | -++ return {"InstanceId": ec2_id, |
1841 | -++ "Timestamp": now, |
1842 | -++ "output": base64.b64encode(output)} |
1843 | -++ |
1844 | -++ def describe_volumes(self, context, volume_id=None, **kwargs): |
1845 | -++ if volume_id: |
1846 | -++ volumes = [] |
1847 | -++ for ec2_id in volume_id: |
1848 | -++ validate_ec2_id(ec2_id) |
1849 | -++ internal_id = ec2utils.ec2_id_to_id(ec2_id) |
1850 | -++ volume = self.volume_api.get(context, internal_id) |
1851 | -++ volumes.append(volume) |
1852 | -++ else: |
1853 | -++ volumes = self.volume_api.get_all(context) |
1854 | -++ volumes = [self._format_volume(context, v) for v in volumes] |
1855 | -++ return {'volumeSet': volumes} |
1856 | -++ |
1857 | -++ def _format_volume(self, context, volume): |
1858 | -++ instance_ec2_id = None |
1859 | -++ instance_data = None |
1860 | -++ if volume.get('instance', None): |
1861 | -++ instance_id = volume['instance']['id'] |
1862 | -++ instance_ec2_id = ec2utils.id_to_ec2_id(instance_id) |
1863 | -++ instance_data = '%s[%s]' % (instance_ec2_id, |
1864 | -++ volume['instance']['host']) |
1865 | -++ v = {} |
1866 | -++ v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) |
1867 | -++ v['status'] = volume['status'] |
1868 | -++ v['size'] = volume['size'] |
1869 | -++ v['availabilityZone'] = volume['availability_zone'] |
1870 | -++ v['createTime'] = volume['created_at'] |
1871 | -++ if context.is_admin: |
1872 | -++ v['status'] = '%s (%s, %s, %s, %s)' % ( |
1873 | -++ volume['status'], |
1874 | -++ volume['project_id'], |
1875 | -++ volume['host'], |
1876 | -++ instance_data, |
1877 | -++ volume['mountpoint']) |
1878 | -++ if volume['attach_status'] == 'attached': |
1879 | -++ v['attachmentSet'] = [{'attachTime': volume['attach_time'], |
1880 | -++ 'deleteOnTermination': False, |
1881 | -++ 'device': volume['mountpoint'], |
1882 | -++ 'instanceId': instance_ec2_id, |
1883 | -++ 'status': 'attached', |
1884 | -++ 'volumeId': v['volumeId']}] |
1885 | -++ else: |
1886 | -++ v['attachmentSet'] = [{}] |
1887 | -++ if volume.get('snapshot_id') is not None: |
1888 | -++ v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) |
1889 | -++ else: |
1890 | -++ v['snapshotId'] = None |
1891 | -++ |
1892 | -++ return v |
1893 | -++ |
1894 | -++ def create_volume(self, context, **kwargs): |
1895 | -++ size = kwargs.get('size') |
1896 | -++ if kwargs.get('snapshot_id') is not None: |
1897 | -++ snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) |
1898 | -++ snapshot = self.volume_api.get_snapshot(context, snapshot_id) |
1899 | -++ LOG.audit(_("Create volume from snapshot %s"), snapshot_id, |
1900 | -++ context=context) |
1901 | -++ else: |
1902 | -++ snapshot = None |
1903 | -++ LOG.audit(_("Create volume of %s GB"), size, context=context) |
1904 | -++ |
1905 | -++ availability_zone = kwargs.get('availability_zone', None) |
1906 | -++ |
1907 | -++ volume = self.volume_api.create(context, |
1908 | -++ size, |
1909 | -++ None, |
1910 | -++ None, |
1911 | -++ snapshot, |
1912 | -++ availability_zone=availability_zone) |
1913 | -++ # TODO(vish): Instance should be None at db layer instead of |
1914 | -++ # trying to lazy load, but for now we turn it into |
1915 | -++ # a dict to avoid an error. |
1916 | -++ return self._format_volume(context, dict(volume)) |
1917 | -++ |
1918 | -++ def delete_volume(self, context, volume_id, **kwargs): |
1919 | -++ validate_ec2_id(volume_id) |
1920 | -++ volume_id = ec2utils.ec2_id_to_id(volume_id) |
1921 | -++ volume = self.volume_api.get(context, volume_id) |
1922 | -++ self.volume_api.delete(context, volume) |
1923 | -++ return True |
1924 | -++ |
1925 | -++ def attach_volume(self, context, volume_id, instance_id, device, **kwargs): |
1926 | -++ validate_ec2_id(instance_id) |
1927 | -++ validate_ec2_id(volume_id) |
1928 | -++ volume_id = ec2utils.ec2_id_to_id(volume_id) |
1929 | -++ instance_id = ec2utils.ec2_id_to_id(instance_id) |
1930 | -++ instance = self.compute_api.get(context, instance_id) |
1931 | -++ msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" |
1932 | -++ " at %(device)s") % locals() |
1933 | -++ LOG.audit(msg, context=context) |
1934 | -++ self.compute_api.attach_volume(context, instance, volume_id, device) |
1935 | -++ volume = self.volume_api.get(context, volume_id) |
1936 | -++ return {'attachTime': volume['attach_time'], |
1937 | -++ 'device': volume['mountpoint'], |
1938 | -++ 'instanceId': ec2utils.id_to_ec2_id(instance_id), |
1939 | -++ 'requestId': context.request_id, |
1940 | -++ 'status': volume['attach_status'], |
1941 | -++ 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} |
1942 | -++ |
1943 | -++ def detach_volume(self, context, volume_id, **kwargs): |
1944 | -++ validate_ec2_id(volume_id) |
1945 | -++ volume_id = ec2utils.ec2_id_to_id(volume_id) |
1946 | -++ LOG.audit(_("Detach volume %s"), volume_id, context=context) |
1947 | -++ volume = self.volume_api.get(context, volume_id) |
1948 | -++ instance = self.compute_api.detach_volume(context, volume_id=volume_id) |
1949 | -++ return {'attachTime': volume['attach_time'], |
1950 | -++ 'device': volume['mountpoint'], |
1951 | -++ 'instanceId': ec2utils.id_to_ec2_id(instance['id']), |
1952 | -++ 'requestId': context.request_id, |
1953 | -++ 'status': volume['attach_status'], |
1954 | -++ 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} |
1955 | -++ |
1956 | -++ def _format_kernel_id(self, context, instance_ref, result, key): |
1957 | -++ kernel_uuid = instance_ref['kernel_id'] |
1958 | -++ if kernel_uuid is None or kernel_uuid == '': |
1959 | -++ return |
1960 | -++ kernel_id = self._get_image_id(context, kernel_uuid) |
1961 | -++ result[key] = ec2utils.image_ec2_id(kernel_id, 'aki') |
1962 | -++ |
1963 | -++ def _format_ramdisk_id(self, context, instance_ref, result, key): |
1964 | -++ ramdisk_uuid = instance_ref['ramdisk_id'] |
1965 | -++ if ramdisk_uuid is None or ramdisk_uuid == '': |
1966 | -++ return |
1967 | -++ ramdisk_id = self._get_image_id(context, ramdisk_uuid) |
1968 | -++ result[key] = ec2utils.image_ec2_id(ramdisk_id, 'ari') |
1969 | -++ |
1970 | -++ def describe_instance_attribute(self, context, instance_id, attribute, |
1971 | -++ **kwargs): |
1972 | -++ def _unsupported_attribute(instance, result): |
1973 | -++ raise exception.EC2APIError(_('attribute not supported: %s') % |
1974 | -++ attribute) |
1975 | -++ |
1976 | -++ def _format_attr_block_device_mapping(instance, result): |
1977 | -++ tmp = {} |
1978 | -++ self._format_instance_root_device_name(instance, tmp) |
1979 | -++ self._format_instance_bdm(context, instance_id, |
1980 | -++ tmp['rootDeviceName'], result) |
1981 | -++ |
1982 | -++ def _format_attr_disable_api_termination(instance, result): |
1983 | -++ result['disableApiTermination'] = instance['disable_terminate'] |
1984 | -++ |
1985 | -++ def _format_attr_group_set(instance, result): |
1986 | -++ CloudController._format_group_set(instance, result) |
1987 | -++ |
1988 | -++ def _format_attr_instance_initiated_shutdown_behavior(instance, |
1989 | -++ result): |
1990 | -++ if instance['shutdown_terminate']: |
1991 | -++ result['instanceInitiatedShutdownBehavior'] = 'terminate' |
1992 | -++ else: |
1993 | -++ result['instanceInitiatedShutdownBehavior'] = 'stop' |
1994 | -++ |
1995 | -++ def _format_attr_instance_type(instance, result): |
1996 | -++ self._format_instance_type(instance, result) |
1997 | -++ |
1998 | -++ def _format_attr_kernel(instance, result): |
1999 | -++ self._format_kernel_id(context, instance, result, 'kernel') |
2000 | -++ |
2001 | -++ def _format_attr_ramdisk(instance, result): |
2002 | -++ self._format_ramdisk_id(context, instance, result, 'ramdisk') |
2003 | -++ |
2004 | -++ def _format_attr_root_device_name(instance, result): |
2005 | -++ self._format_instance_root_device_name(instance, result) |
2006 | -++ |
2007 | -++ def _format_attr_source_dest_check(instance, result): |
2008 | -++ _unsupported_attribute(instance, result) |
2009 | -++ |
2010 | -++ def _format_attr_user_data(instance, result): |
2011 | -++ result['userData'] = base64.b64decode(instance['user_data']) |
2012 | -++ |
2013 | -++ attribute_formatter = { |
2014 | -++ 'blockDeviceMapping': _format_attr_block_device_mapping, |
2015 | -++ 'disableApiTermination': _format_attr_disable_api_termination, |
2016 | -++ 'groupSet': _format_attr_group_set, |
2017 | -++ 'instanceInitiatedShutdownBehavior': |
2018 | -++ _format_attr_instance_initiated_shutdown_behavior, |
2019 | -++ 'instanceType': _format_attr_instance_type, |
2020 | -++ 'kernel': _format_attr_kernel, |
2021 | -++ 'ramdisk': _format_attr_ramdisk, |
2022 | -++ 'rootDeviceName': _format_attr_root_device_name, |
2023 | -++ 'sourceDestCheck': _format_attr_source_dest_check, |
2024 | -++ 'userData': _format_attr_user_data, |
2025 | -++ } |
2026 | -++ |
2027 | -++ fn = attribute_formatter.get(attribute) |
2028 | -++ if fn is None: |
2029 | -++ raise exception.EC2APIError( |
2030 | -++ _('attribute not supported: %s') % attribute) |
2031 | -++ |
2032 | -++ ec2_instance_id = instance_id |
2033 | -++ validate_ec2_id(instance_id) |
2034 | -++ instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) |
2035 | -++ instance = self.compute_api.get(context, instance_id) |
2036 | -++ result = {'instance_id': ec2_instance_id} |
2037 | -++ fn(instance, result) |
2038 | -++ return result |
2039 | -++ |
2040 | -++ def describe_instances(self, context, **kwargs): |
2041 | -++ # Optional DescribeInstances argument |
2042 | -++ instance_id = kwargs.get('instance_id', None) |
2043 | -++ return self._format_describe_instances(context, |
2044 | -++ instance_id=instance_id) |
2045 | -++ |
2046 | -++ def describe_instances_v6(self, context, **kwargs): |
2047 | -++ # Optional DescribeInstancesV6 argument |
2048 | -++ instance_id = kwargs.get('instance_id', None) |
2049 | -++ return self._format_describe_instances(context, |
2050 | -++ instance_id=instance_id, use_v6=True) |
2051 | -++ |
2052 | -++ def _format_describe_instances(self, context, **kwargs): |
2053 | -++ return {'reservationSet': self._format_instances(context, **kwargs)} |
2054 | -++ |
2055 | -++ def _format_run_instances(self, context, reservation_id): |
2056 | -++ i = self._format_instances(context, reservation_id=reservation_id) |
2057 | -++ assert len(i) == 1 |
2058 | -++ return i[0] |
2059 | -++ |
2060 | -++ def _format_terminate_instances(self, context, instance_id, |
2061 | -++ previous_states): |
2062 | -++ instances_set = [] |
2063 | -++ for (ec2_id, previous_state) in zip(instance_id, previous_states): |
2064 | -++ i = {} |
2065 | -++ i['instanceId'] = ec2_id |
2066 | -++ i['previousState'] = _state_description(previous_state['vm_state'], |
2067 | -++ previous_state['shutdown_terminate']) |
2068 | -++ try: |
2069 | -++ internal_id = ec2utils.ec2_id_to_id(ec2_id) |
2070 | -++ instance = self.compute_api.get(context, internal_id) |
2071 | -++ i['shutdownState'] = _state_description(instance['vm_state'], |
2072 | -++ instance['shutdown_terminate']) |
2073 | -++ except exception.NotFound: |
2074 | -++ i['shutdownState'] = _state_description(vm_states.DELETED, |
2075 | -++ True) |
2076 | -++ instances_set.append(i) |
2077 | -++ return {'instancesSet': instances_set} |
2078 | -++ |
2079 | -++ def _format_instance_bdm(self, context, instance_id, root_device_name, |
2080 | -++ result): |
2081 | -++ """Format InstanceBlockDeviceMappingResponseItemType""" |
2082 | -++ root_device_type = 'instance-store' |
2083 | -++ mapping = [] |
2084 | -++ for bdm in db.block_device_mapping_get_all_by_instance(context, |
2085 | -++ instance_id): |
2086 | -++ volume_id = bdm['volume_id'] |
2087 | -++ if (volume_id is None or bdm['no_device']): |
2088 | -++ continue |
2089 | -++ |
2090 | -++ if (bdm['device_name'] == root_device_name and |
2091 | -++ (bdm['snapshot_id'] or bdm['volume_id'])): |
2092 | -++ assert not bdm['virtual_name'] |
2093 | -++ root_device_type = 'ebs' |
2094 | -++ |
2095 | -++ vol = self.volume_api.get(context, volume_id) |
2096 | -++ LOG.debug(_("vol = %s\n"), vol) |
2097 | -++ # TODO(yamahata): volume attach time |
2098 | -++ ebs = {'volumeId': volume_id, |
2099 | -++ 'deleteOnTermination': bdm['delete_on_termination'], |
2100 | -++ 'attachTime': vol['attach_time'] or '-', |
2101 | -++ 'status': vol['status'], } |
2102 | -++ res = {'deviceName': bdm['device_name'], |
2103 | -++ 'ebs': ebs, } |
2104 | -++ mapping.append(res) |
2105 | -++ |
2106 | -++ if mapping: |
2107 | -++ result['blockDeviceMapping'] = mapping |
2108 | -++ result['rootDeviceType'] = root_device_type |
2109 | -++ |
2110 | -++ @staticmethod |
2111 | -++ def _format_instance_root_device_name(instance, result): |
2112 | -++ result['rootDeviceName'] = (instance.get('root_device_name') or |
2113 | -++ block_device.DEFAULT_ROOT_DEV_NAME) |
2114 | -++ |
2115 | -++ @staticmethod |
2116 | -++ def _format_instance_type(instance, result): |
2117 | -++ if instance['instance_type']: |
2118 | -++ result['instanceType'] = instance['instance_type'].get('name') |
2119 | -++ else: |
2120 | -++ result['instanceType'] = None |
2121 | -++ |
2122 | -++ @staticmethod |
2123 | -++ def _format_group_set(instance, result): |
2124 | -++ security_group_names = [] |
2125 | -++ if instance.get('security_groups'): |
2126 | -++ for security_group in instance['security_groups']: |
2127 | -++ security_group_names.append(security_group['name']) |
2128 | -++ result['groupSet'] = utils.convert_to_list_dict( |
2129 | -++ security_group_names, 'groupId') |
2130 | -++ |
2131 | -++ def _format_instances(self, context, instance_id=None, use_v6=False, |
2132 | -++ **search_opts): |
2133 | -++ # TODO(termie): this method is poorly named as its name does not imply |
2134 | -++ # that it will be making a variety of database calls |
2135 | -++ # rather than simply formatting a bunch of instances that |
2136 | -++ # were handed to it |
2137 | -++ reservations = {} |
2138 | -++ # NOTE(vish): instance_id is an optional list of ids to filter by |
2139 | -++ if instance_id: |
2140 | -++ instances = [] |
2141 | -++ for ec2_id in instance_id: |
2142 | -++ internal_id = ec2utils.ec2_id_to_id(ec2_id) |
2143 | -++ try: |
2144 | -++ instance = self.compute_api.get(context, internal_id) |
2145 | -++ except exception.NotFound: |
2146 | -++ continue |
2147 | -++ instances.append(instance) |
2148 | -++ else: |
2149 | -++ try: |
2150 | -++ # always filter out deleted instances |
2151 | -++ search_opts['deleted'] = False |
2152 | -++ instances = self.compute_api.get_all(context, |
2153 | -++ search_opts=search_opts) |
2154 | -++ except exception.NotFound: |
2155 | -++ instances = [] |
2156 | -++ for instance in instances: |
2157 | -++ if not context.is_admin: |
2158 | -++ if instance['image_ref'] == str(FLAGS.vpn_image_id): |
2159 | -++ continue |
2160 | -++ i = {} |
2161 | -++ instance_id = instance['id'] |
2162 | -++ ec2_id = ec2utils.id_to_ec2_id(instance_id) |
2163 | -++ i['instanceId'] = ec2_id |
2164 | -++ image_uuid = instance['image_ref'] |
2165 | -++ image_id = self._get_image_id(context, image_uuid) |
2166 | -++ i['imageId'] = ec2utils.image_ec2_id(image_id) |
2167 | -++ self._format_kernel_id(context, instance, i, 'kernelId') |
2168 | -++ self._format_ramdisk_id(context, instance, i, 'ramdiskId') |
2169 | -++ i['instanceState'] = _state_description( |
2170 | -++ instance['vm_state'], instance['shutdown_terminate']) |
2171 | -++ |
2172 | -++ fixed_ip = None |
2173 | -++ floating_ip = None |
2174 | -++ ip_info = ec2utils.get_ip_info_for_instance(context, instance) |
2175 | -++ if ip_info['fixed_ips']: |
2176 | -++ fixed_ip = ip_info['fixed_ips'][0] |
2177 | -++ if ip_info['floating_ips']: |
2178 | -++ floating_ip = ip_info['floating_ips'][0] |
2179 | -++ if ip_info['fixed_ip6s']: |
2180 | -++ i['dnsNameV6'] = ip_info['fixed_ip6s'][0] |
2181 | -++ i['privateDnsName'] = instance['hostname'] |
2182 | -++ i['privateIpAddress'] = fixed_ip |
2183 | -++ i['publicDnsName'] = floating_ip |
2184 | -++ i['ipAddress'] = floating_ip or fixed_ip |
2185 | -++ i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] |
2186 | -++ i['keyName'] = instance['key_name'] |
2187 | -++ |
2188 | -++ if context.is_admin: |
2189 | -++ i['keyName'] = '%s (%s, %s)' % (i['keyName'], |
2190 | -++ instance['project_id'], |
2191 | -++ instance['host']) |
2192 | -++ i['productCodesSet'] = utils.convert_to_list_dict([], |
2193 | -++ 'product_codes') |
2194 | -++ self._format_instance_type(instance, i) |
2195 | -++ i['launchTime'] = instance['created_at'] |
2196 | -++ i['amiLaunchIndex'] = instance['launch_index'] |
2197 | -++ self._format_instance_root_device_name(instance, i) |
2198 | -++ self._format_instance_bdm(context, instance_id, |
2199 | -++ i['rootDeviceName'], i) |
2200 | -++ host = instance['host'] |
2201 | -++ services = db.service_get_all_by_host(context.elevated(), host) |
2202 | -++ zone = ec2utils.get_availability_zone_by_host(services, host) |
2203 | -++ i['placement'] = {'availabilityZone': zone} |
2204 | -++ if instance['reservation_id'] not in reservations: |
2205 | -++ r = {} |
2206 | -++ r['reservationId'] = instance['reservation_id'] |
2207 | -++ r['ownerId'] = instance['project_id'] |
2208 | -++ self._format_group_set(instance, r) |
2209 | -++ r['instancesSet'] = [] |
2210 | -++ reservations[instance['reservation_id']] = r |
2211 | -++ reservations[instance['reservation_id']]['instancesSet'].append(i) |
2212 | -++ |
2213 | -++ return list(reservations.values()) |
2214 | -++ |
2215 | -++ def describe_addresses(self, context, **kwargs): |
2216 | -++ return self.format_addresses(context) |
2217 | -++ |
2218 | -++ def format_addresses(self, context): |
2219 | -++ addresses = [] |
2220 | -++ floaters = self.network_api.get_floating_ips_by_project(context) |
2221 | -++ for floating_ip_ref in floaters: |
2222 | -++ if floating_ip_ref['project_id'] is None: |
2223 | -++ continue |
2224 | -++ address = floating_ip_ref['address'] |
2225 | -++ ec2_id = None |
2226 | -++ if floating_ip_ref['fixed_ip_id']: |
2227 | -++ fixed_id = floating_ip_ref['fixed_ip_id'] |
2228 | -++ fixed = self.network_api.get_fixed_ip(context, fixed_id) |
2229 | -++ if fixed['instance_id'] is not None: |
2230 | -++ ec2_id = ec2utils.id_to_ec2_id(fixed['instance_id']) |
2231 | -++ address_rv = {'public_ip': address, |
2232 | -++ 'instance_id': ec2_id} |
2233 | -++ if context.is_admin: |
2234 | -++ details = "%s (%s)" % (address_rv['instance_id'], |
2235 | -++ floating_ip_ref['project_id']) |
2236 | -++ address_rv['instance_id'] = details |
2237 | -++ addresses.append(address_rv) |
2238 | -++ return {'addressesSet': addresses} |
2239 | -++ |
2240 | -++ def allocate_address(self, context, **kwargs): |
2241 | -++ LOG.audit(_("Allocate address"), context=context) |
2242 | -++ try: |
2243 | -++ public_ip = self.network_api.allocate_floating_ip(context) |
2244 | -++ return {'publicIp': public_ip} |
2245 | -++ except rpc.RemoteError as ex: |
2246 | -++ # NOTE(tr3buchet) - why does this block exist? |
2247 | -++ if ex.exc_type == 'NoMoreFloatingIps': |
2248 | -++ raise exception.NoMoreFloatingIps() |
2249 | -++ else: |
2250 | -++ raise |
2251 | -++ |
2252 | -++ def release_address(self, context, public_ip, **kwargs): |
2253 | -++ LOG.audit(_("Release address %s"), public_ip, context=context) |
2254 | -++ self.network_api.release_floating_ip(context, address=public_ip) |
2255 | -++ return {'return': "true"} |
2256 | -++ |
2257 | -++ def associate_address(self, context, instance_id, public_ip, **kwargs): |
2258 | -++ LOG.audit(_("Associate address %(public_ip)s to" |
2259 | -++ " instance %(instance_id)s") % locals(), context=context) |
2260 | -++ instance_id = ec2utils.ec2_id_to_id(instance_id) |
2261 | -++ instance = self.compute_api.get(context, instance_id) |
2262 | -++ self.compute_api.associate_floating_ip(context, |
2263 | -++ instance, |
2264 | -++ address=public_ip) |
2265 | -++ return {'return': "true"} |
2266 | -++ |
2267 | -++ def disassociate_address(self, context, public_ip, **kwargs): |
2268 | -++ LOG.audit(_("Disassociate address %s"), public_ip, context=context) |
2269 | -++ self.network_api.disassociate_floating_ip(context, address=public_ip) |
2270 | -++ return {'return': "true"} |
2271 | -++ |
2272 | -++ def run_instances(self, context, **kwargs): |
2273 | -++ max_count = int(kwargs.get('max_count', 1)) |
2274 | -++ if kwargs.get('kernel_id'): |
2275 | -++ kernel = self._get_image(context, kwargs['kernel_id']) |
2276 | -++ kwargs['kernel_id'] = self._get_image_uuid(context, kernel['id']) |
2277 | -++ if kwargs.get('ramdisk_id'): |
2278 | -++ ramdisk = self._get_image(context, kwargs['ramdisk_id']) |
2279 | -++ kwargs['ramdisk_id'] = self._get_image_uuid(context, |
2280 | -++ ramdisk['id']) |
2281 | -++ for bdm in kwargs.get('block_device_mapping', []): |
2282 | -++ _parse_block_device_mapping(bdm) |
2283 | -++ |
2284 | -++ image = self._get_image(context, kwargs['image_id']) |
2285 | -++ image_uuid = self._get_image_uuid(context, image['id']) |
2286 | -++ |
2287 | -++ if image: |
2288 | -++ image_state = self._get_image_state(image) |
2289 | -++ else: |
2290 | -++ raise exception.ImageNotFound(image_id=kwargs['image_id']) |
2291 | -++ |
2292 | -++ if image_state != 'available': |
2293 | -++ raise exception.EC2APIError(_('Image must be available')) |
2294 | -++ |
2295 | -++ (instances, resv_id) = self.compute_api.create(context, |
2296 | -++ instance_type=instance_types.get_instance_type_by_name( |
2297 | -++ kwargs.get('instance_type', None)), |
2298 | -++ image_href=image_uuid, |
2299 | -++ min_count=int(kwargs.get('min_count', max_count)), |
2300 | -++ max_count=max_count, |
2301 | -++ kernel_id=kwargs.get('kernel_id'), |
2302 | -++ ramdisk_id=kwargs.get('ramdisk_id'), |
2303 | -++ key_name=kwargs.get('key_name'), |
2304 | -++ user_data=kwargs.get('user_data'), |
2305 | -++ security_group=kwargs.get('security_group'), |
2306 | -++ availability_zone=kwargs.get('placement', {}).get( |
2307 | -++ 'availability_zone'), |
2308 | -++ block_device_mapping=kwargs.get('block_device_mapping', {})) |
2309 | -++ return self._format_run_instances(context, resv_id) |
2310 | -++ |
2311 | -++ def terminate_instances(self, context, instance_id, **kwargs): |
2312 | -++ """Terminate each instance in instance_id, which is a list of ec2 ids. |
2313 | -++ instance_id is a kwarg so its name cannot be modified.""" |
2314 | -++ LOG.debug(_("Going to start terminating instances")) |
2315 | -++ previous_states = [] |
2316 | -++ for ec2_id in instance_id: |
2317 | -++ validate_ec2_id(ec2_id) |
2318 | -++ _instance_id = ec2utils.ec2_id_to_id(ec2_id) |
2319 | -++ instance = self.compute_api.get(context, _instance_id) |
2320 | -++ previous_states.append(instance) |
2321 | -++ self.compute_api.delete(context, instance) |
2322 | -++ return self._format_terminate_instances(context, |
2323 | -++ instance_id, |
2324 | -++ previous_states) |
2325 | -++ |
2326 | -++ def reboot_instances(self, context, instance_id, **kwargs): |
2327 | -++ """instance_id is a list of instance ids""" |
2328 | -++ LOG.audit(_("Reboot instance %r"), instance_id, context=context) |
2329 | -++ for ec2_id in instance_id: |
2330 | -++ validate_ec2_id(ec2_id) |
2331 | -++ _instance_id = ec2utils.ec2_id_to_id(ec2_id) |
2332 | -++ instance = self.compute_api.get(context, _instance_id) |
2333 | -++ self.compute_api.reboot(context, instance, 'HARD') |
2334 | -++ return True |
2335 | -++ |
2336 | -++ def stop_instances(self, context, instance_id, **kwargs): |
2337 | -++ """Stop each instances in instance_id. |
2338 | -++ Here instance_id is a list of instance ids""" |
2339 | -++ LOG.debug(_("Going to stop instances")) |
2340 | -++ for ec2_id in instance_id: |
2341 | -++ validate_ec2_id(ec2_id) |
2342 | -++ _instance_id = ec2utils.ec2_id_to_id(ec2_id) |
2343 | -++ instance = self.compute_api.get(context, _instance_id) |
2344 | -++ self.compute_api.stop(context, instance) |
2345 | -++ return True |
2346 | -++ |
2347 | -++ def start_instances(self, context, instance_id, **kwargs): |
2348 | -++ """Start each instances in instance_id. |
2349 | -++ Here instance_id is a list of instance ids""" |
2350 | -++ LOG.debug(_("Going to start instances")) |
2351 | -++ for ec2_id in instance_id: |
2352 | -++ validate_ec2_id(ec2_id) |
2353 | -++ _instance_id = ec2utils.ec2_id_to_id(ec2_id) |
2354 | -++ instance = self.compute_api.get(context, _instance_id) |
2355 | -++ self.compute_api.start(context, instance) |
2356 | -++ return True |
2357 | -++ |
2358 | -++ def _get_image(self, context, ec2_id): |
2359 | -++ try: |
2360 | -++ internal_id = ec2utils.ec2_id_to_id(ec2_id) |
2361 | -++ image = self.image_service.show(context, internal_id) |
2362 | -++ except (exception.InvalidEc2Id, exception.ImageNotFound): |
2363 | -++ try: |
2364 | -++ return self.image_service.show_by_name(context, ec2_id) |
2365 | -++ except exception.NotFound: |
2366 | -++ raise exception.ImageNotFound(image_id=ec2_id) |
2367 | -++ image_type = ec2_id.split('-')[0] |
2368 | -++ if ec2utils.image_type(image.get('container_format')) != image_type: |
2369 | -++ raise exception.ImageNotFound(image_id=ec2_id) |
2370 | -++ return image |
2371 | -++ |
2372 | -++ # NOTE(bcwaldon): We need access to the image uuid since we directly |
2373 | -++ # call the compute api from this class |
2374 | -++ def _get_image_uuid(self, context, internal_id): |
2375 | -++ return self.image_service.get_image_uuid(context, internal_id) |
2376 | -++ |
2377 | -++ # NOTE(bcwaldon): We also need to be able to map image uuids to integers |
2378 | -++ def _get_image_id(self, context, image_uuid): |
2379 | -++ return self.image_service.get_image_id(context, image_uuid) |
2380 | -++ |
2381 | -++ def _format_image(self, image): |
2382 | -++ """Convert from format defined by GlanceImageService to S3 format.""" |
2383 | -++ i = {} |
2384 | -++ image_type = ec2utils.image_type(image.get('container_format')) |
2385 | -++ ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type) |
2386 | -++ name = image.get('name') |
2387 | -++ i['imageId'] = ec2_id |
2388 | -++ kernel_id = image['properties'].get('kernel_id') |
2389 | -++ if kernel_id: |
2390 | -++ i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki') |
2391 | -++ ramdisk_id = image['properties'].get('ramdisk_id') |
2392 | -++ if ramdisk_id: |
2393 | -++ i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari') |
2394 | -++ i['imageOwnerId'] = image['properties'].get('owner_id') |
2395 | -++ if name: |
2396 | -++ i['imageLocation'] = "%s (%s)" % (image['properties']. |
2397 | -++ get('image_location'), name) |
2398 | -++ else: |
2399 | -++ i['imageLocation'] = image['properties'].get('image_location') |
2400 | -++ |
2401 | -++ i['imageState'] = self._get_image_state(image) |
2402 | -++ i['description'] = image.get('description') |
2403 | -++ display_mapping = {'aki': 'kernel', |
2404 | -++ 'ari': 'ramdisk', |
2405 | -++ 'ami': 'machine'} |
2406 | -++ i['imageType'] = display_mapping.get(image_type) |
2407 | -++ i['isPublic'] = not not image.get('is_public') |
2408 | -++ i['architecture'] = image['properties'].get('architecture') |
2409 | -++ |
2410 | -++ properties = image['properties'] |
2411 | -++ root_device_name = block_device.properties_root_device_name(properties) |
2412 | -++ root_device_type = 'instance-store' |
2413 | -++ for bdm in properties.get('block_device_mapping', []): |
2414 | -++ if (bdm.get('device_name') == root_device_name and |
2415 | -++ ('snapshot_id' in bdm or 'volume_id' in bdm) and |
2416 | -++ not bdm.get('no_device')): |
2417 | -++ root_device_type = 'ebs' |
2418 | -++ i['rootDeviceName'] = (root_device_name or |
2419 | -++ block_device.DEFAULT_ROOT_DEV_NAME) |
2420 | -++ i['rootDeviceType'] = root_device_type |
2421 | -++ |
2422 | -++ _format_mappings(properties, i) |
2423 | -++ |
2424 | -++ return i |
2425 | -++ |
2426 | -++ def describe_images(self, context, image_id=None, **kwargs): |
2427 | -++ # NOTE: image_id is a list! |
2428 | -++ if image_id: |
2429 | -++ images = [] |
2430 | -++ for ec2_id in image_id: |
2431 | -++ try: |
2432 | -++ image = self._get_image(context, ec2_id) |
2433 | -++ except exception.NotFound: |
2434 | -++ raise exception.ImageNotFound(image_id=ec2_id) |
2435 | -++ images.append(image) |
2436 | -++ else: |
2437 | -++ images = self.image_service.detail(context) |
2438 | -++ images = [self._format_image(i) for i in images] |
2439 | -++ return {'imagesSet': images} |
2440 | -++ |
2441 | -++ def deregister_image(self, context, image_id, **kwargs): |
2442 | -++ LOG.audit(_("De-registering image %s"), image_id, context=context) |
2443 | -++ image = self._get_image(context, image_id) |
2444 | -++ internal_id = image['id'] |
2445 | -++ self.image_service.delete(context, internal_id) |
2446 | -++ return {'imageId': image_id} |
2447 | -++ |
2448 | -++ def _register_image(self, context, metadata): |
2449 | -++ image = self.image_service.create(context, metadata) |
2450 | -++ image_type = ec2utils.image_type(image.get('container_format')) |
2451 | -++ image_id = ec2utils.image_ec2_id(image['id'], image_type) |
2452 | -++ return image_id |
2453 | -++ |
2454 | -++ def register_image(self, context, image_location=None, **kwargs): |
2455 | -++ if image_location is None and 'name' in kwargs: |
2456 | -++ image_location = kwargs['name'] |
2457 | -++ metadata = {'properties': {'image_location': image_location}} |
2458 | -++ |
2459 | -++ if 'root_device_name' in kwargs: |
2460 | -++ metadata['properties']['root_device_name'] = kwargs.get( |
2461 | -++ 'root_device_name') |
2462 | -++ |
2463 | -++ mappings = [_parse_block_device_mapping(bdm) for bdm in |
2464 | -++ kwargs.get('block_device_mapping', [])] |
2465 | -++ if mappings: |
2466 | -++ metadata['properties']['block_device_mapping'] = mappings |
2467 | -++ |
2468 | -++ image_id = self._register_image(context, metadata) |
2469 | -++ msg = _("Registered image %(image_location)s with" |
2470 | -++ " id %(image_id)s") % locals() |
2471 | -++ LOG.audit(msg, context=context) |
2472 | -++ return {'imageId': image_id} |
2473 | -++ |
2474 | -++ def describe_image_attribute(self, context, image_id, attribute, **kwargs): |
2475 | -++ def _block_device_mapping_attribute(image, result): |
2476 | -++ _format_mappings(image['properties'], result) |
2477 | -++ |
2478 | -++ def _launch_permission_attribute(image, result): |
2479 | -++ result['launchPermission'] = [] |
2480 | -++ if image['is_public']: |
2481 | -++ result['launchPermission'].append({'group': 'all'}) |
2482 | -++ |
2483 | -++ def _root_device_name_attribute(image, result): |
2484 | -++ _prop_root_dev_name = block_device.properties_root_device_name |
2485 | -++ result['rootDeviceName'] = _prop_root_dev_name(image['properties']) |
2486 | -++ if result['rootDeviceName'] is None: |
2487 | -++ result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME |
2488 | -++ |
2489 | -++ supported_attributes = { |
2490 | -++ 'blockDeviceMapping': _block_device_mapping_attribute, |
2491 | -++ 'launchPermission': _launch_permission_attribute, |
2492 | -++ 'rootDeviceName': _root_device_name_attribute, |
2493 | -++ } |
2494 | -++ |
2495 | -++ fn = supported_attributes.get(attribute) |
2496 | -++ if fn is None: |
2497 | -++ raise exception.EC2APIError(_('attribute not supported: %s') |
2498 | -++ % attribute) |
2499 | -++ try: |
2500 | -++ image = self._get_image(context, image_id) |
2501 | -++ except exception.NotFound: |
2502 | -++ raise exception.ImageNotFound(image_id=image_id) |
2503 | -++ |
2504 | -++ result = {'imageId': image_id} |
2505 | -++ fn(image, result) |
2506 | -++ return result |
2507 | -++ |
2508 | -++ def modify_image_attribute(self, context, image_id, attribute, |
2509 | -++ operation_type, **kwargs): |
2510 | -++ # TODO(devcamcar): Support users and groups other than 'all'. |
2511 | -++ if attribute != 'launchPermission': |
2512 | -++ raise exception.EC2APIError(_('attribute not supported: %s') |
2513 | -++ % attribute) |
2514 | -++ if not 'user_group' in kwargs: |
2515 | -++ raise exception.EC2APIError(_('user or group not specified')) |
2516 | -++ if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': |
2517 | -++ raise exception.EC2APIError(_('only group "all" is supported')) |
2518 | -++ if not operation_type in ['add', 'remove']: |
2519 | -++ msg = _('operation_type must be add or remove') |
2520 | -++ raise exception.EC2APIError(msg) |
2521 | -++ LOG.audit(_("Updating image %s publicity"), image_id, context=context) |
2522 | -++ |
2523 | -++ try: |
2524 | -++ image = self._get_image(context, image_id) |
2525 | -++ except exception.NotFound: |
2526 | -++ raise exception.ImageNotFound(image_id=image_id) |
2527 | -++ internal_id = image['id'] |
2528 | -++ del(image['id']) |
2529 | -++ |
2530 | -++ image['is_public'] = (operation_type == 'add') |
2531 | -++ return self.image_service.update(context, internal_id, image) |
2532 | -++ |
2533 | -++ def update_image(self, context, image_id, **kwargs): |
2534 | -++ internal_id = ec2utils.ec2_id_to_id(image_id) |
2535 | -++ result = self.image_service.update(context, internal_id, dict(kwargs)) |
2536 | -++ return result |
2537 | -++ |
2538 | -++ # TODO(yamahata): race condition |
2539 | -++ # At the moment there is no way to prevent others from |
2540 | -++ # manipulating instances/volumes/snapshots. |
2541 | -++ # As other code doesn't take it into consideration, here we don't |
2542 | -++ # care of it for now. Ostrich algorithm |
2543 | -++ def create_image(self, context, instance_id, **kwargs): |
2544 | -++ # NOTE(yamahata): name/description are ignored by register_image(), |
2545 | -++ # do so here |
2546 | -++ no_reboot = kwargs.get('no_reboot', False) |
2547 | -++ validate_ec2_id(instance_id) |
2548 | -++ ec2_instance_id = instance_id |
2549 | -++ instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) |
2550 | -++ instance = self.compute_api.get(context, instance_id) |
2551 | -++ |
2552 | -++ # stop the instance if necessary |
2553 | -++ restart_instance = False |
2554 | -++ if not no_reboot: |
2555 | -++ vm_state = instance['vm_state'] |
2556 | -++ |
2557 | -++ # if the instance is in subtle state, refuse to proceed. |
2558 | -++ if vm_state not in (vm_states.ACTIVE, vm_states.SHUTOFF, |
2559 | -++ vm_states.STOPPED): |
2560 | -++ raise exception.InstanceNotRunning(instance_id=ec2_instance_id) |
2561 | -++ |
2562 | -++ if vm_state in (vm_states.ACTIVE, vm_states.SHUTOFF): |
2563 | -++ restart_instance = True |
2564 | -++ self.compute_api.stop(context, instance) |
2565 | -++ |
2566 | -++ # wait instance for really stopped |
2567 | -++ start_time = time.time() |
2568 | -++ while vm_state != vm_states.STOPPED: |
2569 | -++ time.sleep(1) |
2570 | -++ instance = self.compute_api.get(context, instance_id) |
2571 | -++ vm_state = instance['vm_state'] |
2572 | -++ # NOTE(yamahata): timeout and error. 1 hour for now for safety. |
2573 | -++ # Is it too short/long? |
2574 | -++ # Or is there any better way? |
2575 | -++ timeout = 1 * 60 * 60 * 60 |
2576 | -++ if time.time() > start_time + timeout: |
2577 | -++ raise exception.EC2APIError( |
2578 | -++ _('Couldn\'t stop instance with in %d sec') % timeout) |
2579 | -++ |
2580 | -++ src_image = self._get_image(context, instance['image_ref']) |
2581 | -++ properties = src_image['properties'] |
2582 | -++ if instance['root_device_name']: |
2583 | -++ properties['root_device_name'] = instance['root_device_name'] |
2584 | -++ |
2585 | -++ mapping = [] |
2586 | -++ bdms = db.block_device_mapping_get_all_by_instance(context, |
2587 | -++ instance_id) |
2588 | -++ for bdm in bdms: |
2589 | -++ if bdm.no_device: |
2590 | -++ continue |
2591 | -++ m = {} |
2592 | -++ for attr in ('device_name', 'snapshot_id', 'volume_id', |
2593 | -++ 'volume_size', 'delete_on_termination', 'no_device', |
2594 | -++ 'virtual_name'): |
2595 | -++ val = getattr(bdm, attr) |
2596 | -++ if val is not None: |
2597 | -++ m[attr] = val |
2598 | -++ |
2599 | -++ volume_id = m.get('volume_id') |
2600 | -++ if m.get('snapshot_id') and volume_id: |
2601 | -++ # create snapshot based on volume_id |
2602 | -++ volume = self.volume_api.get(context, volume_id) |
2603 | -++ # NOTE(yamahata): Should we wait for snapshot creation? |
2604 | -++ # Linux LVM snapshot creation completes in |
2605 | -++ # short time, it doesn't matter for now. |
2606 | -++ snapshot = self.volume_api.create_snapshot_force( |
2607 | -++ context, volume, volume['display_name'], |
2608 | -++ volume['display_description']) |
2609 | -++ m['snapshot_id'] = snapshot['id'] |
2610 | -++ del m['volume_id'] |
2611 | -++ |
2612 | -++ if m: |
2613 | -++ mapping.append(m) |
2614 | -++ |
2615 | -++ for m in _properties_get_mappings(properties): |
2616 | -++ virtual_name = m['virtual'] |
2617 | -++ if virtual_name in ('ami', 'root'): |
2618 | -++ continue |
2619 | -++ |
2620 | -++ assert block_device.is_swap_or_ephemeral(virtual_name) |
2621 | -++ device_name = m['device'] |
2622 | -++ if device_name in [b['device_name'] for b in mapping |
2623 | -++ if not b.get('no_device', False)]: |
2624 | -++ continue |
2625 | -++ |
2626 | -++ # NOTE(yamahata): swap and ephemeral devices are specified in |
2627 | -++ # AMI, but disabled for this instance by user. |
2628 | -++ # So disable those device by no_device. |
2629 | -++ mapping.append({'device_name': device_name, 'no_device': True}) |
2630 | -++ |
2631 | -++ if mapping: |
2632 | -++ properties['block_device_mapping'] = mapping |
2633 | -++ |
2634 | -++ for attr in ('status', 'location', 'id'): |
2635 | -++ src_image.pop(attr, None) |
2636 | -++ |
2637 | -++ image_id = self._register_image(context, src_image) |
2638 | -++ |
2639 | -++ if restart_instance: |
2640 | -++ self.compute_api.start(context, instance_id=instance_id) |
2641 | -++ |
2642 | -++ return {'imageId': image_id} |
2643 | -+diff -Naurp nova.orig/api/ec2/ec2utils.py nova/api/ec2/ec2utils.py |
2644 | -+--- nova.orig/api/ec2/ec2utils.py 1969-12-31 19:00:00.000000000 -0500 |
2645 | -++++ nova/api/ec2/ec2utils.py 2012-02-28 09:08:10.821887173 -0500 |
2646 | -+@@ -0,0 +1,208 @@ |
2647 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
2648 | -++ |
2649 | -++# Copyright 2010 United States Government as represented by the |
2650 | -++# Administrator of the National Aeronautics and Space Administration. |
2651 | -++# All Rights Reserved. |
2652 | -++# |
2653 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
2654 | -++# not use this file except in compliance with the License. You may obtain |
2655 | -++# a copy of the License at |
2656 | -++# |
2657 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
2658 | -++# |
2659 | -++# Unless required by applicable law or agreed to in writing, software |
2660 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
2661 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
2662 | -++# License for the specific language governing permissions and limitations |
2663 | -++# under the License. |
2664 | -++ |
2665 | -++import re |
2666 | -++ |
2667 | -++from nova import exception |
2668 | -++from nova import flags |
2669 | -++from nova import log as logging |
2670 | -++from nova import network |
2671 | -++from nova.network import model as network_model |
2672 | -++ |
2673 | -++ |
2674 | -++FLAGS = flags.FLAGS |
2675 | -++LOG = logging.getLogger(__name__) |
2676 | -++ |
2677 | -++ |
2678 | -++def image_type(image_type): |
2679 | -++ """Converts to a three letter image type. |
2680 | -++ |
2681 | -++ aki, kernel => aki |
2682 | -++ ari, ramdisk => ari |
2683 | -++ anything else => ami |
2684 | -++ |
2685 | -++ """ |
2686 | -++ if image_type == 'kernel': |
2687 | -++ return 'aki' |
2688 | -++ if image_type == 'ramdisk': |
2689 | -++ return 'ari' |
2690 | -++ if image_type not in ['aki', 'ari']: |
2691 | -++ return 'ami' |
2692 | -++ return image_type |
2693 | -++ |
2694 | -++ |
2695 | -++def ec2_id_to_id(ec2_id): |
2696 | -++ """Convert an ec2 ID (i-[base 16 number]) to an instance id (int)""" |
2697 | -++ try: |
2698 | -++ return int(ec2_id.split('-')[-1], 16) |
2699 | -++ except ValueError: |
2700 | -++ raise exception.InvalidEc2Id(ec2_id=ec2_id) |
2701 | -++ |
2702 | -++ |
2703 | -++def image_ec2_id(image_id, image_type='ami'): |
2704 | -++ """Returns image ec2_id using id and three letter type.""" |
2705 | -++ template = image_type + '-%08x' |
2706 | -++ try: |
2707 | -++ return id_to_ec2_id(image_id, template=template) |
2708 | -++ except ValueError: |
2709 | -++ #TODO(wwolf): once we have ec2_id -> glance_id mapping |
2710 | -++ # in place, this wont be necessary |
2711 | -++ return "ami-00000000" |
2712 | -++ |
2713 | -++ |
2714 | -++def get_ip_info_for_instance_from_nw_info(nw_info): |
2715 | -++ ip_info = dict(fixed_ips=[], fixed_ip6s=[], floating_ips=[]) |
2716 | -++ for vif in nw_info: |
2717 | -++ vif_fixed_ips = vif.fixed_ips() |
2718 | -++ |
2719 | -++ fixed_ips = [ip['address'] |
2720 | -++ for ip in vif_fixed_ips if ip['version'] == 4] |
2721 | -++ fixed_ip6s = [ip['address'] |
2722 | -++ for ip in vif_fixed_ips if ip['version'] == 6] |
2723 | -++ floating_ips = [ip['address'] |
2724 | -++ for ip in vif.floating_ips()] |
2725 | -++ ip_info['fixed_ips'].extend(fixed_ips) |
2726 | -++ ip_info['fixed_ip6s'].extend(fixed_ip6s) |
2727 | -++ ip_info['floating_ips'].extend(floating_ips) |
2728 | -++ |
2729 | -++ return ip_info |
2730 | -++ |
2731 | -++ |
2732 | -++def get_ip_info_for_instance(context, instance): |
2733 | -++ """Return a dictionary of IP information for an instance""" |
2734 | -++ |
2735 | -++ cached_nwinfo = instance['info_cache']['network_info'] |
2736 | -++ # Make sure empty response is turned into [] |
2737 | -++ if not cached_nwinfo: |
2738 | -++ cached_nwinfo = [] |
2739 | -++ nw_info = network_model.NetworkInfo.hydrate(cached_nwinfo) |
2740 | -++ return get_ip_info_for_instance_from_nw_info(nw_info) |
2741 | -++ |
2742 | -++ |
2743 | -++def get_availability_zone_by_host(services, host): |
2744 | -++ if len(services) > 0: |
2745 | -++ return services[0]['availability_zone'] |
2746 | -++ return 'unknown zone' |
2747 | -++ |
2748 | -++ |
2749 | -++def id_to_ec2_id(instance_id, template='i-%08x'): |
2750 | -++ """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])""" |
2751 | -++ return template % int(instance_id) |
2752 | -++ |
2753 | -++ |
2754 | -++def id_to_ec2_snap_id(instance_id): |
2755 | -++ """Convert an snapshot ID (int) to an ec2 snapshot ID |
2756 | -++ (snap-[base 16 number])""" |
2757 | -++ return id_to_ec2_id(instance_id, 'snap-%08x') |
2758 | -++ |
2759 | -++ |
2760 | -++def id_to_ec2_vol_id(instance_id): |
2761 | -++ """Convert an volume ID (int) to an ec2 volume ID (vol-[base 16 number])""" |
2762 | -++ return id_to_ec2_id(instance_id, 'vol-%08x') |
2763 | -++ |
2764 | -++ |
2765 | -++_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') |
2766 | -++ |
2767 | -++ |
2768 | -++def camelcase_to_underscore(str): |
2769 | -++ return _c2u.sub(r'_\1', str).lower().strip('_') |
2770 | -++ |
2771 | -++ |
2772 | -++def _try_convert(value): |
2773 | -++ """Return a non-string from a string or unicode, if possible. |
2774 | -++ |
2775 | -++ ============= ===================================================== |
2776 | -++ When value is returns |
2777 | -++ ============= ===================================================== |
2778 | -++ zero-length '' |
2779 | -++ 'None' None |
2780 | -++ 'True' True case insensitive |
2781 | -++ 'False' False case insensitive |
2782 | -++ '0', '-0' 0 |
2783 | -++ 0xN, -0xN int from hex (positive) (N is any number) |
2784 | -++ 0bN, -0bN int from binary (positive) (N is any number) |
2785 | -++ * try conversion to int, float, complex, fallback value |
2786 | -++ |
2787 | -++ """ |
2788 | -++ if len(value) == 0: |
2789 | -++ return '' |
2790 | -++ if value == 'None': |
2791 | -++ return None |
2792 | -++ lowered_value = value.lower() |
2793 | -++ if lowered_value == 'true': |
2794 | -++ return True |
2795 | -++ if lowered_value == 'false': |
2796 | -++ return False |
2797 | -++ valueneg = value[1:] if value[0] == '-' else value |
2798 | -++ if valueneg == '0': |
2799 | -++ return 0 |
2800 | -++ if valueneg == '': |
2801 | -++ return value |
2802 | -++ if valueneg[0] == '0': |
2803 | -++ if valueneg[1] in 'xX': |
2804 | -++ return int(value, 16) |
2805 | -++ elif valueneg[1] in 'bB': |
2806 | -++ return int(value, 2) |
2807 | -++ else: |
2808 | -++ try: |
2809 | -++ return int(value, 8) |
2810 | -++ except ValueError: |
2811 | -++ pass |
2812 | -++ try: |
2813 | -++ return int(value) |
2814 | -++ except ValueError: |
2815 | -++ pass |
2816 | -++ try: |
2817 | -++ return float(value) |
2818 | -++ except ValueError: |
2819 | -++ pass |
2820 | -++ try: |
2821 | -++ return complex(value) |
2822 | -++ except ValueError: |
2823 | -++ return value |
2824 | -++ |
2825 | -++ |
2826 | -++def dict_from_dotted_str(items): |
2827 | -++ """parse multi dot-separated argument into dict. |
2828 | -++ EBS boot uses multi dot-separated arguments like |
2829 | -++ BlockDeviceMapping.1.DeviceName=snap-id |
2830 | -++ Convert the above into |
2831 | -++ {'block_device_mapping': {'1': {'device_name': snap-id}}} |
2832 | -++ """ |
2833 | -++ args = {} |
2834 | -++ for key, value in items: |
2835 | -++ parts = key.split(".") |
2836 | -++ key = str(camelcase_to_underscore(parts[0])) |
2837 | -++ if isinstance(value, str) or isinstance(value, unicode): |
2838 | -++ # NOTE(vish): Automatically convert strings back |
2839 | -++ # into their respective values |
2840 | -++ value = _try_convert(value) |
2841 | -++ |
2842 | -++ if len(parts) > 1: |
2843 | -++ d = args.get(key, {}) |
2844 | -++ args[key] = d |
2845 | -++ for k in parts[1:-1]: |
2846 | -++ k = camelcase_to_underscore(k) |
2847 | -++ v = d.get(k, {}) |
2848 | -++ d[k] = v |
2849 | -++ d = v |
2850 | -++ d[camelcase_to_underscore(parts[-1])] = value |
2851 | -++ else: |
2852 | -++ args[key] = value |
2853 | -++ |
2854 | -++ return args |
2855 | -+diff -Naurp nova.orig/api/ec2/faults.py nova/api/ec2/faults.py |
2856 | -+--- nova.orig/api/ec2/faults.py 1969-12-31 19:00:00.000000000 -0500 |
2857 | -++++ nova/api/ec2/faults.py 2012-02-28 09:08:10.821887173 -0500 |
2858 | -+@@ -0,0 +1,64 @@ |
2859 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
2860 | -++ |
2861 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
2862 | -++# not use this file except in compliance with the License. You may obtain |
2863 | -++# a copy of the License at |
2864 | -++# |
2865 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
2866 | -++# |
2867 | -++# Unless required by applicable law or agreed to in writing, software |
2868 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
2869 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
2870 | -++# License for the specific language governing permissions and limitations |
2871 | -++# under the License. |
2872 | -++ |
2873 | -++import webob.dec |
2874 | -++import webob.exc |
2875 | -++ |
2876 | -++from nova import context |
2877 | -++from nova import flags |
2878 | -++from nova import utils |
2879 | -++ |
2880 | -++FLAGS = flags.FLAGS |
2881 | -++ |
2882 | -++ |
2883 | -++class Fault(webob.exc.HTTPException): |
2884 | -++ """Captures exception and return REST Response.""" |
2885 | -++ |
2886 | -++ def __init__(self, exception): |
2887 | -++ """Create a response for the given webob.exc.exception.""" |
2888 | -++ self.wrapped_exc = exception |
2889 | -++ |
2890 | -++ @webob.dec.wsgify |
2891 | -++ def __call__(self, req): |
2892 | -++ """Generate a WSGI response based on the exception passed to ctor.""" |
2893 | -++ code = self.wrapped_exc.status_int |
2894 | -++ message = self.wrapped_exc.explanation |
2895 | -++ |
2896 | -++ if code == 501: |
2897 | -++ message = "The requested function is not supported" |
2898 | -++ code = str(code) |
2899 | -++ |
2900 | -++ if 'AWSAccessKeyId' not in req.params: |
2901 | -++ raise webob.exc.HTTPBadRequest() |
2902 | -++ user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':') |
2903 | -++ project_id = project_id or user_id |
2904 | -++ remote_address = getattr(req, 'remote_address', '127.0.0.1') |
2905 | -++ if FLAGS.use_forwarded_for: |
2906 | -++ remote_address = req.headers.get('X-Forwarded-For', remote_address) |
2907 | -++ |
2908 | -++ ctxt = context.RequestContext(user_id, |
2909 | -++ project_id, |
2910 | -++ remote_address=remote_address) |
2911 | -++ |
2912 | -++ resp = webob.Response() |
2913 | -++ resp.status = self.wrapped_exc.status_int |
2914 | -++ resp.headers['Content-Type'] = 'text/xml' |
2915 | -++ resp.body = str('<?xml version="1.0"?>\n' |
2916 | -++ '<Response><Errors><Error><Code>%s</Code>' |
2917 | -++ '<Message>%s</Message></Error></Errors>' |
2918 | -++ '<RequestID>%s</RequestID></Response>' % |
2919 | -++ (utils.utf8(code), utils.utf8(message), |
2920 | -++ utils.utf8(ctxt.request_id))) |
2921 | -++ |
2922 | -++ return resp |
2923 | -+diff -Naurp nova.orig/api/ec2/__init__.py nova/api/ec2/__init__.py |
2924 | -+--- nova.orig/api/ec2/__init__.py 1969-12-31 19:00:00.000000000 -0500 |
2925 | -++++ nova/api/ec2/__init__.py 2012-02-28 09:08:10.821887173 -0500 |
2926 | -+@@ -0,0 +1,651 @@ |
2927 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
2928 | -++ |
2929 | -++# Copyright 2010 United States Government as represented by the |
2930 | -++# Administrator of the National Aeronautics and Space Administration. |
2931 | -++# All Rights Reserved. |
2932 | -++# |
2933 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
2934 | -++# not use this file except in compliance with the License. You may obtain |
2935 | -++# a copy of the License at |
2936 | -++# |
2937 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
2938 | -++# |
2939 | -++# Unless required by applicable law or agreed to in writing, software |
2940 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
2941 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
2942 | -++# License for the specific language governing permissions and limitations |
2943 | -++# under the License. |
2944 | -++""" |
2945 | -++Starting point for routing EC2 requests. |
2946 | -++ |
2947 | -++""" |
2948 | -++ |
2949 | -++import urlparse |
2950 | -++ |
2951 | -++from eventlet.green import httplib |
2952 | -++import webob |
2953 | -++import webob.dec |
2954 | -++import webob.exc |
2955 | -++ |
2956 | -++from nova.api.ec2 import apirequest |
2957 | -++from nova.api.ec2 import ec2utils |
2958 | -++from nova.api.ec2 import faults |
2959 | -++from nova.api import validator |
2960 | -++from nova.auth import manager |
2961 | -++from nova import context |
2962 | -++from nova import exception |
2963 | -++from nova import flags |
2964 | -++from nova import log as logging |
2965 | -++from nova.openstack.common import cfg |
2966 | -++from nova import utils |
2967 | -++from nova import wsgi |
2968 | -++ |
2969 | -++ |
2970 | -++LOG = logging.getLogger(__name__) |
2971 | -++ |
2972 | -++ec2_opts = [ |
2973 | -++ cfg.IntOpt('lockout_attempts', |
2974 | -++ default=5, |
2975 | -++ help='Number of failed auths before lockout.'), |
2976 | -++ cfg.IntOpt('lockout_minutes', |
2977 | -++ default=15, |
2978 | -++ help='Number of minutes to lockout if triggered.'), |
2979 | -++ cfg.IntOpt('lockout_window', |
2980 | -++ default=15, |
2981 | -++ help='Number of minutes for lockout window.'), |
2982 | -++ cfg.StrOpt('keystone_ec2_url', |
2983 | -++ default='http://localhost:5000/v2.0/ec2tokens', |
2984 | -++ help='URL to get token from ec2 request.'), |
2985 | -++ ] |
2986 | -++ |
2987 | -++FLAGS = flags.FLAGS |
2988 | -++FLAGS.register_opts(ec2_opts) |
2989 | -++ |
2990 | -++flags.DECLARE('use_forwarded_for', 'nova.api.auth') |
2991 | -++ |
2992 | -++ |
2993 | -++def ec2_error(req, request_id, code, message): |
2994 | -++ """Helper to send an ec2_compatible error""" |
2995 | -++ LOG.error(_('%(code)s: %(message)s') % locals()) |
2996 | -++ resp = webob.Response() |
2997 | -++ resp.status = 400 |
2998 | -++ resp.headers['Content-Type'] = 'text/xml' |
2999 | -++ resp.body = str('<?xml version="1.0"?>\n' |
3000 | -++ '<Response><Errors><Error><Code>%s</Code>' |
3001 | -++ '<Message>%s</Message></Error></Errors>' |
3002 | -++ '<RequestID>%s</RequestID></Response>' % |
3003 | -++ (utils.utf8(code), utils.utf8(message), |
3004 | -++ utils.utf8(request_id))) |
3005 | -++ return resp |
3006 | -++ |
3007 | -++ |
3008 | -++## Fault Wrapper around all EC2 requests ## |
3009 | -++class FaultWrapper(wsgi.Middleware): |
3010 | -++ """Calls the middleware stack, captures any exceptions into faults.""" |
3011 | -++ |
3012 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3013 | -++ def __call__(self, req): |
3014 | -++ try: |
3015 | -++ return req.get_response(self.application) |
3016 | -++ except Exception as ex: |
3017 | -++ LOG.exception(_("FaultWrapper: %s"), unicode(ex)) |
3018 | -++ return faults.Fault(webob.exc.HTTPInternalServerError()) |
3019 | -++ |
3020 | -++ |
3021 | -++class RequestLogging(wsgi.Middleware): |
3022 | -++ """Access-Log akin logging for all EC2 API requests.""" |
3023 | -++ |
3024 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3025 | -++ def __call__(self, req): |
3026 | -++ start = utils.utcnow() |
3027 | -++ rv = req.get_response(self.application) |
3028 | -++ self.log_request_completion(rv, req, start) |
3029 | -++ return rv |
3030 | -++ |
3031 | -++ def log_request_completion(self, response, request, start): |
3032 | -++ apireq = request.environ.get('ec2.request', None) |
3033 | -++ if apireq: |
3034 | -++ controller = apireq.controller |
3035 | -++ action = apireq.action |
3036 | -++ else: |
3037 | -++ controller = None |
3038 | -++ action = None |
3039 | -++ ctxt = request.environ.get('nova.context', None) |
3040 | -++ delta = utils.utcnow() - start |
3041 | -++ seconds = delta.seconds |
3042 | -++ microseconds = delta.microseconds |
3043 | -++ LOG.info( |
3044 | -++ "%s.%ss %s %s %s %s:%s %s [%s] %s %s", |
3045 | -++ seconds, |
3046 | -++ microseconds, |
3047 | -++ request.remote_addr, |
3048 | -++ request.method, |
3049 | -++ "%s%s" % (request.script_name, request.path_info), |
3050 | -++ controller, |
3051 | -++ action, |
3052 | -++ response.status_int, |
3053 | -++ request.user_agent, |
3054 | -++ request.content_type, |
3055 | -++ response.content_type, |
3056 | -++ context=ctxt) |
3057 | -++ |
3058 | -++ |
3059 | -++class Lockout(wsgi.Middleware): |
3060 | -++ """Lockout for x minutes on y failed auths in a z minute period. |
3061 | -++ |
3062 | -++ x = lockout_timeout flag |
3063 | -++ y = lockout_window flag |
3064 | -++ z = lockout_attempts flag |
3065 | -++ |
3066 | -++ Uses memcached if lockout_memcached_servers flag is set, otherwise it |
3067 | -++ uses a very simple in-process cache. Due to the simplicity of |
3068 | -++ the implementation, the timeout window is started with the first |
3069 | -++ failed request, so it will block if there are x failed logins within |
3070 | -++ that period. |
3071 | -++ |
3072 | -++ There is a possible race condition where simultaneous requests could |
3073 | -++ sneak in before the lockout hits, but this is extremely rare and would |
3074 | -++ only result in a couple of extra failed attempts.""" |
3075 | -++ |
3076 | -++ def __init__(self, application): |
3077 | -++ """middleware can use fake for testing.""" |
3078 | -++ if FLAGS.memcached_servers: |
3079 | -++ import memcache |
3080 | -++ else: |
3081 | -++ from nova.testing.fake import memcache |
3082 | -++ self.mc = memcache.Client(FLAGS.memcached_servers, |
3083 | -++ debug=0) |
3084 | -++ super(Lockout, self).__init__(application) |
3085 | -++ |
3086 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3087 | -++ def __call__(self, req): |
3088 | -++ access_key = str(req.params['AWSAccessKeyId']) |
3089 | -++ failures_key = "authfailures-%s" % access_key |
3090 | -++ failures = int(self.mc.get(failures_key) or 0) |
3091 | -++ if failures >= FLAGS.lockout_attempts: |
3092 | -++ detail = _("Too many failed authentications.") |
3093 | -++ raise webob.exc.HTTPForbidden(detail=detail) |
3094 | -++ res = req.get_response(self.application) |
3095 | -++ if res.status_int == 403: |
3096 | -++ failures = self.mc.incr(failures_key) |
3097 | -++ if failures is None: |
3098 | -++ # NOTE(vish): To use incr, failures has to be a string. |
3099 | -++ self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60) |
3100 | -++ elif failures >= FLAGS.lockout_attempts: |
3101 | -++ lock_mins = FLAGS.lockout_minutes |
3102 | -++ msg = _('Access key %(access_key)s has had %(failures)d' |
3103 | -++ ' failed authentications and will be locked out' |
3104 | -++ ' for %(lock_mins)d minutes.') % locals() |
3105 | -++ LOG.warn(msg) |
3106 | -++ self.mc.set(failures_key, str(failures), |
3107 | -++ time=FLAGS.lockout_minutes * 60) |
3108 | -++ return res |
3109 | -++ |
3110 | -++ |
3111 | -++class EC2Token(wsgi.Middleware): |
3112 | -++ """Deprecated, only here to make merging easier.""" |
3113 | -++ |
3114 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3115 | -++ def __call__(self, req): |
3116 | -++ # Read request signature and access id. |
3117 | -++ try: |
3118 | -++ signature = req.params['Signature'] |
3119 | -++ access = req.params['AWSAccessKeyId'] |
3120 | -++ except KeyError, e: |
3121 | -++ LOG.exception(e) |
3122 | -++ raise webob.exc.HTTPBadRequest() |
3123 | -++ |
3124 | -++ # Make a copy of args for authentication and signature verification. |
3125 | -++ auth_params = dict(req.params) |
3126 | -++ # Not part of authentication args |
3127 | -++ auth_params.pop('Signature') |
3128 | -++ |
3129 | -++ if "ec2" in FLAGS.keystone_ec2_url: |
3130 | -++ LOG.warning("Configuration setting for keystone_ec2_url needs " |
3131 | -++ "to be updated to /tokens only. The /ec2 prefix is " |
3132 | -++ "being deprecated") |
3133 | -++ # Authenticate the request. |
3134 | -++ creds = {'ec2Credentials': {'access': access, |
3135 | -++ 'signature': signature, |
3136 | -++ 'host': req.host, |
3137 | -++ 'verb': req.method, |
3138 | -++ 'path': req.path, |
3139 | -++ 'params': auth_params, |
3140 | -++ }} |
3141 | -++ else: |
3142 | -++ # Authenticate the request. |
3143 | -++ creds = {'auth': {'OS-KSEC2:ec2Credentials': {'access': access, |
3144 | -++ 'signature': signature, |
3145 | -++ 'host': req.host, |
3146 | -++ 'verb': req.method, |
3147 | -++ 'path': req.path, |
3148 | -++ 'params': auth_params, |
3149 | -++ }}} |
3150 | -++ creds_json = utils.dumps(creds) |
3151 | -++ headers = {'Content-Type': 'application/json'} |
3152 | -++ |
3153 | -++ # Disable "has no x member" pylint error |
3154 | -++ # for httplib and urlparse |
3155 | -++ # pylint: disable-msg=E1101 |
3156 | -++ o = urlparse.urlparse(FLAGS.keystone_ec2_url) |
3157 | -++ if o.scheme == "http": |
3158 | -++ conn = httplib.HTTPConnection(o.netloc) |
3159 | -++ else: |
3160 | -++ conn = httplib.HTTPSConnection(o.netloc) |
3161 | -++ conn.request('POST', o.path, body=creds_json, headers=headers) |
3162 | -++ response = conn.getresponse().read() |
3163 | -++ conn.close() |
3164 | -++ |
3165 | -++ # NOTE(vish): We could save a call to keystone by |
3166 | -++ # having keystone return token, tenant, |
3167 | -++ # user, and roles from this call. |
3168 | -++ |
3169 | -++ result = utils.loads(response) |
3170 | -++ try: |
3171 | -++ token_id = result['access']['token']['id'] |
3172 | -++ except (AttributeError, KeyError), e: |
3173 | -++ LOG.exception(e) |
3174 | -++ raise webob.exc.HTTPBadRequest() |
3175 | -++ |
3176 | -++ # Authenticated! |
3177 | -++ req.headers['X-Auth-Token'] = token_id |
3178 | -++ return self.application |
3179 | -++ |
3180 | -++ |
3181 | -++class EC2KeystoneAuth(wsgi.Middleware): |
3182 | -++ """Authenticate an EC2 request with keystone and convert to context.""" |
3183 | -++ |
3184 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3185 | -++ def __call__(self, req): |
3186 | -++ request_id = context.generate_request_id() |
3187 | -++ signature = req.params.get('Signature') |
3188 | -++ if not signature: |
3189 | -++ msg = _("Signature not provided") |
3190 | -++ return ec2_error(req, request_id, "Unauthorized", msg) |
3191 | -++ access = req.params.get('AWSAccessKeyId') |
3192 | -++ if not access: |
3193 | -++ msg = _("Access key not provided") |
3194 | -++ return ec2_error(req, request_id, "Unauthorized", msg) |
3195 | -++ |
3196 | -++ # Make a copy of args for authentication and signature verification. |
3197 | -++ auth_params = dict(req.params) |
3198 | -++ # Not part of authentication args |
3199 | -++ auth_params.pop('Signature') |
3200 | -++ |
3201 | -++ cred_dict = { |
3202 | -++ 'access': access, |
3203 | -++ 'signature': signature, |
3204 | -++ 'host': req.host, |
3205 | -++ 'verb': req.method, |
3206 | -++ 'path': req.path, |
3207 | -++ 'params': auth_params, |
3208 | -++ } |
3209 | -++ if "ec2" in FLAGS.keystone_ec2_url: |
3210 | -++ creds = {'ec2Credentials': cred_dict} |
3211 | -++ else: |
3212 | -++ creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}} |
3213 | -++ creds_json = utils.dumps(creds) |
3214 | -++ headers = {'Content-Type': 'application/json'} |
3215 | -++ |
3216 | -++ o = urlparse.urlparse(FLAGS.keystone_ec2_url) |
3217 | -++ if o.scheme == "http": |
3218 | -++ conn = httplib.HTTPConnection(o.netloc) |
3219 | -++ else: |
3220 | -++ conn = httplib.HTTPSConnection(o.netloc) |
3221 | -++ conn.request('POST', o.path, body=creds_json, headers=headers) |
3222 | -++ response = conn.getresponse() |
3223 | -++ data = response.read() |
3224 | -++ if response.status != 200: |
3225 | -++ if response.status == 401: |
3226 | -++ msg = response.reason |
3227 | -++ else: |
3228 | -++ msg = _("Failure communicating with keystone") |
3229 | -++ return ec2_error(req, request_id, "Unauthorized", msg) |
3230 | -++ result = utils.loads(data) |
3231 | -++ conn.close() |
3232 | -++ |
3233 | -++ try: |
3234 | -++ token_id = result['access']['token']['id'] |
3235 | -++ user_id = result['access']['user']['id'] |
3236 | -++ project_id = result['access']['token']['tenant']['id'] |
3237 | -++ roles = [role['name'] for role |
3238 | -++ in result['access']['user']['roles']] |
3239 | -++ except (AttributeError, KeyError), e: |
3240 | -++ LOG.exception("Keystone failure: %s" % e) |
3241 | -++ msg = _("Failure communicating with keystone") |
3242 | -++ return ec2_error(req, request_id, "Unauthorized", msg) |
3243 | -++ |
3244 | -++ remote_address = req.remote_addr |
3245 | -++ if FLAGS.use_forwarded_for: |
3246 | -++ remote_address = req.headers.get('X-Forwarded-For', |
3247 | -++ remote_address) |
3248 | -++ ctxt = context.RequestContext(user_id, |
3249 | -++ project_id, |
3250 | -++ roles=roles, |
3251 | -++ auth_token=token_id, |
3252 | -++ strategy='keystone', |
3253 | -++ remote_address=remote_address) |
3254 | -++ |
3255 | -++ req.environ['nova.context'] = ctxt |
3256 | -++ |
3257 | -++ return self.application |
3258 | -++ |
3259 | -++ |
3260 | -++class NoAuth(wsgi.Middleware): |
3261 | -++ """Add user:project as 'nova.context' to WSGI environ.""" |
3262 | -++ |
3263 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3264 | -++ def __call__(self, req): |
3265 | -++ if 'AWSAccessKeyId' not in req.params: |
3266 | -++ raise webob.exc.HTTPBadRequest() |
3267 | -++ user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':') |
3268 | -++ project_id = project_id or user_id |
3269 | -++ remote_address = req.remote_addr |
3270 | -++ if FLAGS.use_forwarded_for: |
3271 | -++ remote_address = req.headers.get('X-Forwarded-For', remote_address) |
3272 | -++ ctx = context.RequestContext(user_id, |
3273 | -++ project_id, |
3274 | -++ is_admin=True, |
3275 | -++ remote_address=remote_address) |
3276 | -++ |
3277 | -++ req.environ['nova.context'] = ctx |
3278 | -++ return self.application |
3279 | -++ |
3280 | -++ |
3281 | -++class Authenticate(wsgi.Middleware): |
3282 | -++ """Authenticate an EC2 request and add 'nova.context' to WSGI environ.""" |
3283 | -++ |
3284 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3285 | -++ def __call__(self, req): |
3286 | -++ # Read request signature and access id. |
3287 | -++ try: |
3288 | -++ signature = req.params['Signature'] |
3289 | -++ access = req.params['AWSAccessKeyId'] |
3290 | -++ except KeyError: |
3291 | -++ raise webob.exc.HTTPBadRequest() |
3292 | -++ |
3293 | -++ # Make a copy of args for authentication and signature verification. |
3294 | -++ auth_params = dict(req.params) |
3295 | -++ # Not part of authentication args |
3296 | -++ auth_params.pop('Signature') |
3297 | -++ |
3298 | -++ # Authenticate the request. |
3299 | -++ authman = manager.AuthManager() |
3300 | -++ try: |
3301 | -++ (user, project) = authman.authenticate( |
3302 | -++ access, |
3303 | -++ signature, |
3304 | -++ auth_params, |
3305 | -++ req.method, |
3306 | -++ req.host, |
3307 | -++ req.path) |
3308 | -++ # Be explicit for what exceptions are 403, the rest bubble as 500 |
3309 | -++ except (exception.NotFound, exception.NotAuthorized, |
3310 | -++ exception.InvalidSignature) as ex: |
3311 | -++ LOG.audit(_("Authentication Failure: %s"), unicode(ex)) |
3312 | -++ raise webob.exc.HTTPForbidden() |
3313 | -++ |
3314 | -++ # Authenticated! |
3315 | -++ remote_address = req.remote_addr |
3316 | -++ if FLAGS.use_forwarded_for: |
3317 | -++ remote_address = req.headers.get('X-Forwarded-For', remote_address) |
3318 | -++ roles = authman.get_active_roles(user, project) |
3319 | -++ ctxt = context.RequestContext(user_id=user.id, |
3320 | -++ project_id=project.id, |
3321 | -++ is_admin=user.is_admin(), |
3322 | -++ roles=roles, |
3323 | -++ remote_address=remote_address) |
3324 | -++ req.environ['nova.context'] = ctxt |
3325 | -++ uname = user.name |
3326 | -++ pname = project.name |
3327 | -++ msg = _('Authenticated Request For %(uname)s:%(pname)s)') % locals() |
3328 | -++ LOG.audit(msg, context=req.environ['nova.context']) |
3329 | -++ return self.application |
3330 | -++ |
3331 | -++ |
3332 | -++class Requestify(wsgi.Middleware): |
3333 | -++ |
3334 | -++ def __init__(self, app, controller): |
3335 | -++ super(Requestify, self).__init__(app) |
3336 | -++ self.controller = utils.import_class(controller)() |
3337 | -++ |
3338 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3339 | -++ def __call__(self, req): |
3340 | -++ non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', |
3341 | -++ 'SignatureVersion', 'Version', 'Timestamp'] |
3342 | -++ args = dict(req.params) |
3343 | -++ try: |
3344 | -++ # Raise KeyError if omitted |
3345 | -++ action = req.params['Action'] |
3346 | -++ # Fix bug lp:720157 for older (version 1) clients |
3347 | -++ version = req.params['SignatureVersion'] |
3348 | -++ if int(version) == 1: |
3349 | -++ non_args.remove('SignatureMethod') |
3350 | -++ if 'SignatureMethod' in args: |
3351 | -++ args.pop('SignatureMethod') |
3352 | -++ for non_arg in non_args: |
3353 | -++ # Remove, but raise KeyError if omitted |
3354 | -++ args.pop(non_arg) |
3355 | -++ except KeyError, e: |
3356 | -++ raise webob.exc.HTTPBadRequest() |
3357 | -++ |
3358 | -++ LOG.debug(_('action: %s'), action) |
3359 | -++ for key, value in args.items(): |
3360 | -++ LOG.debug(_('arg: %(key)s\t\tval: %(value)s') % locals()) |
3361 | -++ |
3362 | -++ # Success! |
3363 | -++ api_request = apirequest.APIRequest(self.controller, action, |
3364 | -++ req.params['Version'], args) |
3365 | -++ req.environ['ec2.request'] = api_request |
3366 | -++ return self.application |
3367 | -++ |
3368 | -++ |
3369 | -++class Authorizer(wsgi.Middleware): |
3370 | -++ |
3371 | -++ """Authorize an EC2 API request. |
3372 | -++ |
3373 | -++ Return a 401 if ec2.controller and ec2.action in WSGI environ may not be |
3374 | -++ executed in nova.context. |
3375 | -++ """ |
3376 | -++ |
3377 | -++ def __init__(self, application): |
3378 | -++ super(Authorizer, self).__init__(application) |
3379 | -++ self.action_roles = { |
3380 | -++ 'CloudController': { |
3381 | -++ 'DescribeAvailabilityZones': ['all'], |
3382 | -++ 'DescribeRegions': ['all'], |
3383 | -++ 'DescribeSnapshots': ['all'], |
3384 | -++ 'DescribeKeyPairs': ['all'], |
3385 | -++ 'CreateKeyPair': ['all'], |
3386 | -++ 'DeleteKeyPair': ['all'], |
3387 | -++ 'DescribeSecurityGroups': ['all'], |
3388 | -++ 'ImportKeyPair': ['all'], |
3389 | -++ 'AuthorizeSecurityGroupIngress': ['netadmin'], |
3390 | -++ 'RevokeSecurityGroupIngress': ['netadmin'], |
3391 | -++ 'CreateSecurityGroup': ['netadmin'], |
3392 | -++ 'DeleteSecurityGroup': ['netadmin'], |
3393 | -++ 'GetConsoleOutput': ['projectmanager', 'sysadmin'], |
3394 | -++ 'DescribeVolumes': ['projectmanager', 'sysadmin'], |
3395 | -++ 'CreateVolume': ['projectmanager', 'sysadmin'], |
3396 | -++ 'AttachVolume': ['projectmanager', 'sysadmin'], |
3397 | -++ 'DetachVolume': ['projectmanager', 'sysadmin'], |
3398 | -++ 'DescribeInstances': ['all'], |
3399 | -++ 'DescribeAddresses': ['all'], |
3400 | -++ 'AllocateAddress': ['netadmin'], |
3401 | -++ 'ReleaseAddress': ['netadmin'], |
3402 | -++ 'AssociateAddress': ['netadmin'], |
3403 | -++ 'DisassociateAddress': ['netadmin'], |
3404 | -++ 'RunInstances': ['projectmanager', 'sysadmin'], |
3405 | -++ 'TerminateInstances': ['projectmanager', 'sysadmin'], |
3406 | -++ 'RebootInstances': ['projectmanager', 'sysadmin'], |
3407 | -++ 'UpdateInstance': ['projectmanager', 'sysadmin'], |
3408 | -++ 'StartInstances': ['projectmanager', 'sysadmin'], |
3409 | -++ 'StopInstances': ['projectmanager', 'sysadmin'], |
3410 | -++ 'DeleteVolume': ['projectmanager', 'sysadmin'], |
3411 | -++ 'DescribeImages': ['all'], |
3412 | -++ 'DeregisterImage': ['projectmanager', 'sysadmin'], |
3413 | -++ 'RegisterImage': ['projectmanager', 'sysadmin'], |
3414 | -++ 'DescribeImageAttribute': ['all'], |
3415 | -++ 'ModifyImageAttribute': ['projectmanager', 'sysadmin'], |
3416 | -++ 'UpdateImage': ['projectmanager', 'sysadmin'], |
3417 | -++ 'CreateImage': ['projectmanager', 'sysadmin'], |
3418 | -++ }, |
3419 | -++ 'AdminController': { |
3420 | -++ # All actions have the same permission: ['none'] (the default) |
3421 | -++ # superusers will be allowed to run them |
3422 | -++ # all others will get HTTPUnauthorized. |
3423 | -++ }, |
3424 | -++ } |
3425 | -++ |
3426 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3427 | -++ def __call__(self, req): |
3428 | -++ context = req.environ['nova.context'] |
3429 | -++ controller = req.environ['ec2.request'].controller.__class__.__name__ |
3430 | -++ action = req.environ['ec2.request'].action |
3431 | -++ allowed_roles = self.action_roles[controller].get(action, ['none']) |
3432 | -++ if self._matches_any_role(context, allowed_roles): |
3433 | -++ return self.application |
3434 | -++ else: |
3435 | -++ LOG.audit(_('Unauthorized request for controller=%(controller)s ' |
3436 | -++ 'and action=%(action)s') % locals(), context=context) |
3437 | -++ raise webob.exc.HTTPUnauthorized() |
3438 | -++ |
3439 | -++ def _matches_any_role(self, context, roles): |
3440 | -++ """Return True if any role in roles is allowed in context.""" |
3441 | -++ if context.is_admin: |
3442 | -++ return True |
3443 | -++ if 'all' in roles: |
3444 | -++ return True |
3445 | -++ if 'none' in roles: |
3446 | -++ return False |
3447 | -++ return any(role in context.roles for role in roles) |
3448 | -++ |
3449 | -++ |
3450 | -++class Validator(wsgi.Middleware): |
3451 | -++ |
3452 | -++ def validate_ec2_id(val): |
3453 | -++ if not validator.validate_str()(val): |
3454 | -++ return False |
3455 | -++ try: |
3456 | -++ ec2utils.ec2_id_to_id(val) |
3457 | -++ except exception.InvalidEc2Id: |
3458 | -++ return False |
3459 | -++ return True |
3460 | -++ |
3461 | -++ validator.validate_ec2_id = validate_ec2_id |
3462 | -++ |
3463 | -++ validator.DEFAULT_VALIDATOR = { |
3464 | -++ 'instance_id': validator.validate_ec2_id, |
3465 | -++ 'volume_id': validator.validate_ec2_id, |
3466 | -++ 'image_id': validator.validate_ec2_id, |
3467 | -++ 'attribute': validator.validate_str(), |
3468 | -++ 'image_location': validator.validate_image_path, |
3469 | -++ 'public_ip': validator.validate_ipv4, |
3470 | -++ 'region_name': validator.validate_str(), |
3471 | -++ 'group_name': validator.validate_str(max_length=255), |
3472 | -++ 'group_description': validator.validate_str(max_length=255), |
3473 | -++ 'size': validator.validate_int(), |
3474 | -++ 'user_data': validator.validate_user_data |
3475 | -++ } |
3476 | -++ |
3477 | -++ def __init__(self, application): |
3478 | -++ super(Validator, self).__init__(application) |
3479 | -++ |
3480 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3481 | -++ def __call__(self, req): |
3482 | -++ if validator.validate(req.environ['ec2.request'].args, |
3483 | -++ validator.DEFAULT_VALIDATOR): |
3484 | -++ return self.application |
3485 | -++ else: |
3486 | -++ raise webob.exc.HTTPBadRequest() |
3487 | -++ |
3488 | -++ |
3489 | -++class Executor(wsgi.Application): |
3490 | -++ |
3491 | -++ """Execute an EC2 API request. |
3492 | -++ |
3493 | -++ Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and |
3494 | -++ 'ec2.action_args' (all variables in WSGI environ.) Returns an XML |
3495 | -++ response, or a 400 upon failure. |
3496 | -++ """ |
3497 | -++ |
3498 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3499 | -++ def __call__(self, req): |
3500 | -++ context = req.environ['nova.context'] |
3501 | -++ request_id = context.request_id |
3502 | -++ api_request = req.environ['ec2.request'] |
3503 | -++ result = None |
3504 | -++ try: |
3505 | -++ result = api_request.invoke(context) |
3506 | -++ except exception.InstanceNotFound as ex: |
3507 | -++ LOG.info(_('InstanceNotFound raised: %s'), unicode(ex), |
3508 | -++ context=context) |
3509 | -++ ec2_id = ec2utils.id_to_ec2_id(ex.kwargs['instance_id']) |
3510 | -++ message = ex.message % {'instance_id': ec2_id} |
3511 | -++ return ec2_error(req, request_id, type(ex).__name__, message) |
3512 | -++ except exception.VolumeNotFound as ex: |
3513 | -++ LOG.info(_('VolumeNotFound raised: %s'), unicode(ex), |
3514 | -++ context=context) |
3515 | -++ ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id']) |
3516 | -++ message = ex.message % {'volume_id': ec2_id} |
3517 | -++ return ec2_error(req, request_id, type(ex).__name__, message) |
3518 | -++ except exception.SnapshotNotFound as ex: |
3519 | -++ LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), |
3520 | -++ context=context) |
3521 | -++ ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id']) |
3522 | -++ message = ex.message % {'snapshot_id': ec2_id} |
3523 | -++ return ec2_error(req, request_id, type(ex).__name__, message) |
3524 | -++ except exception.NotFound as ex: |
3525 | -++ LOG.info(_('NotFound raised: %s'), unicode(ex), context=context) |
3526 | -++ return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) |
3527 | -++ except exception.EC2APIError as ex: |
3528 | -++ LOG.exception(_('EC2APIError raised: %s'), unicode(ex), |
3529 | -++ context=context) |
3530 | -++ if ex.code: |
3531 | -++ return ec2_error(req, request_id, ex.code, unicode(ex)) |
3532 | -++ else: |
3533 | -++ return ec2_error(req, request_id, type(ex).__name__, |
3534 | -++ unicode(ex)) |
3535 | -++ except exception.KeyPairExists as ex: |
3536 | -++ LOG.debug(_('KeyPairExists raised: %s'), unicode(ex), |
3537 | -++ context=context) |
3538 | -++ return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) |
3539 | -++ except exception.InvalidParameterValue as ex: |
3540 | -++ LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex), |
3541 | -++ context=context) |
3542 | -++ return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) |
3543 | -++ except exception.InvalidPortRange as ex: |
3544 | -++ LOG.debug(_('InvalidPortRange raised: %s'), unicode(ex), |
3545 | -++ context=context) |
3546 | -++ return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) |
3547 | -++ except exception.NotAuthorized as ex: |
3548 | -++ LOG.info(_('NotAuthorized raised: %s'), unicode(ex), |
3549 | -++ context=context) |
3550 | -++ return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) |
3551 | -++ except exception.InvalidRequest as ex: |
3552 | -++ LOG.debug(_('InvalidRequest raised: %s'), unicode(ex), |
3553 | -++ context=context) |
3554 | -++ return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) |
3555 | -++ except exception.InvalidInstanceIDMalformed as ex: |
3556 | -++ LOG.debug(_('ValidatorError raised: %s'), unicode(ex), |
3557 | -++ context=context) |
3558 | -++ #EC2 Compatibility |
3559 | -++ return self._error(req, context, "InvalidInstanceID.Malformed", |
3560 | -++ unicode(ex)) |
3561 | -++ except Exception as ex: |
3562 | -++ env = req.environ.copy() |
3563 | -++ for k in env.keys(): |
3564 | -++ if not isinstance(env[k], basestring): |
3565 | -++ env.pop(k) |
3566 | -++ |
3567 | -++ LOG.exception(_('Unexpected error raised: %s'), unicode(ex)) |
3568 | -++ LOG.error(_('Environment: %s') % utils.dumps(env)) |
3569 | -++ return ec2_error(req, request_id, 'UnknownError', |
3570 | -++ _('An unknown error has occurred. ' |
3571 | -++ 'Please try your request again.')) |
3572 | -++ else: |
3573 | -++ resp = webob.Response() |
3574 | -++ resp.status = 200 |
3575 | -++ resp.headers['Content-Type'] = 'text/xml' |
3576 | -++ resp.body = str(result) |
3577 | -++ return resp |
3578 | -+diff -Naurp nova.orig/api/ec2/inst_state.py nova/api/ec2/inst_state.py |
3579 | -+--- nova.orig/api/ec2/inst_state.py 1969-12-31 19:00:00.000000000 -0500 |
3580 | -++++ nova/api/ec2/inst_state.py 2012-02-28 09:08:10.821887173 -0500 |
3581 | -+@@ -0,0 +1,60 @@ |
3582 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
3583 | -++ |
3584 | -++# Copyright 2011 Isaku Yamahata <yamahata at valinux co jp> |
3585 | -++# All Rights Reserved. |
3586 | -++# |
3587 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
3588 | -++# not use this file except in compliance with the License. You may obtain |
3589 | -++# a copy of the License at |
3590 | -++# |
3591 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
3592 | -++# |
3593 | -++# Unless required by applicable law or agreed to in writing, software |
3594 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
3595 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
3596 | -++# License for the specific language governing permissions and limitations |
3597 | -++# under the License. |
3598 | -++ |
3599 | -++PENDING_CODE = 0 |
3600 | -++RUNNING_CODE = 16 |
3601 | -++SHUTTING_DOWN_CODE = 32 |
3602 | -++TERMINATED_CODE = 48 |
3603 | -++STOPPING_CODE = 64 |
3604 | -++STOPPED_CODE = 80 |
3605 | -++ |
3606 | -++PENDING = 'pending' |
3607 | -++RUNNING = 'running' |
3608 | -++SHUTTING_DOWN = 'shutting-down' |
3609 | -++TERMINATED = 'terminated' |
3610 | -++STOPPING = 'stopping' |
3611 | -++STOPPED = 'stopped' |
3612 | -++ |
3613 | -++# non-ec2 value |
3614 | -++SHUTOFF = 'shutoff' |
3615 | -++MIGRATE = 'migrate' |
3616 | -++RESIZE = 'resize' |
3617 | -++PAUSE = 'pause' |
3618 | -++SUSPEND = 'suspend' |
3619 | -++RESCUE = 'rescue' |
3620 | -++ |
3621 | -++# EC2 API instance status code |
3622 | -++_NAME_TO_CODE = { |
3623 | -++ PENDING: PENDING_CODE, |
3624 | -++ RUNNING: RUNNING_CODE, |
3625 | -++ SHUTTING_DOWN: SHUTTING_DOWN_CODE, |
3626 | -++ TERMINATED: TERMINATED_CODE, |
3627 | -++ STOPPING: STOPPING_CODE, |
3628 | -++ STOPPED: STOPPED_CODE, |
3629 | -++ |
3630 | -++ # approximation |
3631 | -++ SHUTOFF: TERMINATED_CODE, |
3632 | -++ MIGRATE: RUNNING_CODE, |
3633 | -++ RESIZE: RUNNING_CODE, |
3634 | -++ PAUSE: STOPPED_CODE, |
3635 | -++ SUSPEND: STOPPED_CODE, |
3636 | -++ RESCUE: RUNNING_CODE, |
3637 | -++} |
3638 | -++ |
3639 | -++ |
3640 | -++def name_to_code(name): |
3641 | -++ return _NAME_TO_CODE.get(name, PENDING_CODE) |
3642 | -+diff -Naurp nova.orig/api/__init__.py nova/api/__init__.py |
3643 | -+--- nova.orig/api/__init__.py 1969-12-31 19:00:00.000000000 -0500 |
3644 | -++++ nova/api/__init__.py 2012-02-28 09:08:10.817887173 -0500 |
3645 | -+@@ -0,0 +1,17 @@ |
3646 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
3647 | -++ |
3648 | -++# Copyright 2010 United States Government as represented by the |
3649 | -++# Administrator of the National Aeronautics and Space Administration. |
3650 | -++# All Rights Reserved. |
3651 | -++# |
3652 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
3653 | -++# not use this file except in compliance with the License. You may obtain |
3654 | -++# a copy of the License at |
3655 | -++# |
3656 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
3657 | -++# |
3658 | -++# Unless required by applicable law or agreed to in writing, software |
3659 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
3660 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
3661 | -++# License for the specific language governing permissions and limitations |
3662 | -++# under the License. |
3663 | -+diff -Naurp nova.orig/api/manager.py nova/api/manager.py |
3664 | -+--- nova.orig/api/manager.py 1969-12-31 19:00:00.000000000 -0500 |
3665 | -++++ nova/api/manager.py 2012-02-28 09:08:10.821887173 -0500 |
3666 | -+@@ -0,0 +1,42 @@ |
3667 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
3668 | -++ |
3669 | -++# Copyright 2010 United States Government as represented by the |
3670 | -++# Administrator of the National Aeronautics and Space Administration. |
3671 | -++# All Rights Reserved. |
3672 | -++# |
3673 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
3674 | -++# not use this file except in compliance with the License. You may obtain |
3675 | -++# a copy of the License at |
3676 | -++# |
3677 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
3678 | -++# |
3679 | -++# Unless required by applicable law or agreed to in writing, software |
3680 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
3681 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
3682 | -++# License for the specific language governing permissions and limitations |
3683 | -++# under the License. |
3684 | -++ |
3685 | -++from nova import flags |
3686 | -++from nova import manager |
3687 | -++from nova import utils |
3688 | -++ |
3689 | -++FLAGS = flags.FLAGS |
3690 | -++ |
3691 | -++ |
3692 | -++class MetadataManager(manager.Manager): |
3693 | -++ """Metadata Manager. |
3694 | -++ |
3695 | -++ This class manages the Metadata API service initialization. Currently, it |
3696 | -++ just adds an iptables filter rule for the metadata service. |
3697 | -++ """ |
3698 | -++ def __init__(self, *args, **kwargs): |
3699 | -++ super(MetadataManager, self).__init__(*args, **kwargs) |
3700 | -++ self.network_driver = utils.import_object(FLAGS.network_driver) |
3701 | -++ |
3702 | -++ def init_host(self): |
3703 | -++ """Perform any initialization. |
3704 | -++ |
3705 | -++ Currently, we only add an iptables filter rule for the metadata |
3706 | -++ service. |
3707 | -++ """ |
3708 | -++ self.network_driver.metadata_accept() |
3709 | -+diff -Naurp nova.orig/api/metadata/handler.py nova/api/metadata/handler.py |
3710 | -+--- nova.orig/api/metadata/handler.py 1969-12-31 19:00:00.000000000 -0500 |
3711 | -++++ nova/api/metadata/handler.py 2012-02-28 09:08:10.821887173 -0500 |
3712 | -+@@ -0,0 +1,260 @@ |
3713 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
3714 | -++ |
3715 | -++# Copyright 2010 United States Government as represented by the |
3716 | -++# Administrator of the National Aeronautics and Space Administration. |
3717 | -++# All Rights Reserved. |
3718 | -++# |
3719 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
3720 | -++# not use this file except in compliance with the License. You may obtain |
3721 | -++# a copy of the License at |
3722 | -++# |
3723 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
3724 | -++# |
3725 | -++# Unless required by applicable law or agreed to in writing, software |
3726 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
3727 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
3728 | -++# License for the specific language governing permissions and limitations |
3729 | -++# under the License. |
3730 | -++ |
3731 | -++"""Metadata request handler.""" |
3732 | -++ |
3733 | -++import base64 |
3734 | -++ |
3735 | -++import webob.dec |
3736 | -++import webob.exc |
3737 | -++ |
3738 | -++from nova.api.ec2 import ec2utils |
3739 | -++from nova import block_device |
3740 | -++from nova import compute |
3741 | -++from nova import context |
3742 | -++from nova import db |
3743 | -++from nova import exception |
3744 | -++from nova import flags |
3745 | -++from nova import log as logging |
3746 | -++from nova import network |
3747 | -++from nova import volume |
3748 | -++from nova import wsgi |
3749 | -++ |
3750 | -++ |
3751 | -++LOG = logging.getLogger(__name__) |
3752 | -++FLAGS = flags.FLAGS |
3753 | -++flags.DECLARE('use_forwarded_for', 'nova.api.auth') |
3754 | -++flags.DECLARE('dhcp_domain', 'nova.network.manager') |
3755 | -++ |
3756 | -++_DEFAULT_MAPPINGS = {'ami': 'sda1', |
3757 | -++ 'ephemeral0': 'sda2', |
3758 | -++ 'root': block_device.DEFAULT_ROOT_DEV_NAME, |
3759 | -++ 'swap': 'sda3'} |
3760 | -++ |
3761 | -++ |
3762 | -++class Versions(wsgi.Application): |
3763 | -++ |
3764 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3765 | -++ def __call__(self, req): |
3766 | -++ """Respond to a request for all versions.""" |
3767 | -++ # available api versions |
3768 | -++ versions = [ |
3769 | -++ '1.0', |
3770 | -++ '2007-01-19', |
3771 | -++ '2007-03-01', |
3772 | -++ '2007-08-29', |
3773 | -++ '2007-10-10', |
3774 | -++ '2007-12-15', |
3775 | -++ '2008-02-01', |
3776 | -++ '2008-09-01', |
3777 | -++ '2009-04-04', |
3778 | -++ ] |
3779 | -++ return ''.join('%s\n' % v for v in versions) |
3780 | -++ |
3781 | -++ |
3782 | -++class MetadataRequestHandler(wsgi.Application): |
3783 | -++ """Serve metadata.""" |
3784 | -++ |
3785 | -++ def __init__(self): |
3786 | -++ self.network_api = network.API() |
3787 | -++ self.compute_api = compute.API( |
3788 | -++ network_api=self.network_api, |
3789 | -++ volume_api=volume.API()) |
3790 | -++ |
3791 | -++ def _get_mpi_data(self, context, project_id): |
3792 | -++ result = {} |
3793 | -++ search_opts = {'project_id': project_id, 'deleted': False} |
3794 | -++ for instance in self.compute_api.get_all(context, |
3795 | -++ search_opts=search_opts): |
3796 | -++ ip_info = ec2utils.get_ip_info_for_instance(context, instance) |
3797 | -++ # only look at ipv4 addresses |
3798 | -++ fixed_ips = ip_info['fixed_ips'] |
3799 | -++ if fixed_ips: |
3800 | -++ line = '%s slots=%d' % (fixed_ips[0], instance['vcpus']) |
3801 | -++ key = str(instance['key_name']) |
3802 | -++ if key in result: |
3803 | -++ result[key].append(line) |
3804 | -++ else: |
3805 | -++ result[key] = [line] |
3806 | -++ return result |
3807 | -++ |
3808 | -++ def _format_instance_mapping(self, ctxt, instance_ref): |
3809 | -++ root_device_name = instance_ref['root_device_name'] |
3810 | -++ if root_device_name is None: |
3811 | -++ return _DEFAULT_MAPPINGS |
3812 | -++ |
3813 | -++ mappings = {} |
3814 | -++ mappings['ami'] = block_device.strip_dev(root_device_name) |
3815 | -++ mappings['root'] = root_device_name |
3816 | -++ default_ephemeral_device = instance_ref.get('default_ephemeral_device') |
3817 | -++ if default_ephemeral_device: |
3818 | -++ mappings['ephemeral0'] = default_ephemeral_device |
3819 | -++ default_swap_device = instance_ref.get('default_swap_device') |
3820 | -++ if default_swap_device: |
3821 | -++ mappings['swap'] = default_swap_device |
3822 | -++ ebs_devices = [] |
3823 | -++ |
3824 | -++ # 'ephemeralN', 'swap' and ebs |
3825 | -++ for bdm in db.block_device_mapping_get_all_by_instance( |
3826 | -++ ctxt, instance_ref['id']): |
3827 | -++ if bdm['no_device']: |
3828 | -++ continue |
3829 | -++ |
3830 | -++ # ebs volume case |
3831 | -++ if (bdm['volume_id'] or bdm['snapshot_id']): |
3832 | -++ ebs_devices.append(bdm['device_name']) |
3833 | -++ continue |
3834 | -++ |
3835 | -++ virtual_name = bdm['virtual_name'] |
3836 | -++ if not virtual_name: |
3837 | -++ continue |
3838 | -++ |
3839 | -++ if block_device.is_swap_or_ephemeral(virtual_name): |
3840 | -++ mappings[virtual_name] = bdm['device_name'] |
3841 | -++ |
3842 | -++ # NOTE(yamahata): I'm not sure how ebs device should be numbered. |
3843 | -++ # Right now sort by device name for deterministic |
3844 | -++ # result. |
3845 | -++ if ebs_devices: |
3846 | -++ nebs = 0 |
3847 | -++ ebs_devices.sort() |
3848 | -++ for ebs in ebs_devices: |
3849 | -++ mappings['ebs%d' % nebs] = ebs |
3850 | -++ nebs += 1 |
3851 | -++ |
3852 | -++ return mappings |
3853 | -++ |
3854 | -++ def get_metadata(self, address): |
3855 | -++ if not address: |
3856 | -++ raise exception.FixedIpNotFoundForAddress(address=address) |
3857 | -++ |
3858 | -++ ctxt = context.get_admin_context() |
3859 | -++ try: |
3860 | -++ fixed_ip = self.network_api.get_fixed_ip_by_address(ctxt, address) |
3861 | -++ instance_ref = db.instance_get(ctxt, fixed_ip['instance_id']) |
3862 | -++ except exception.NotFound: |
3863 | -++ return None |
3864 | -++ |
3865 | -++ mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) |
3866 | -++ hostname = "%s.%s" % (instance_ref['hostname'], FLAGS.dhcp_domain) |
3867 | -++ host = instance_ref['host'] |
3868 | -++ services = db.service_get_all_by_host(ctxt.elevated(), host) |
3869 | -++ availability_zone = ec2utils.get_availability_zone_by_host(services, |
3870 | -++ host) |
3871 | -++ |
3872 | -++ ip_info = ec2utils.get_ip_info_for_instance(ctxt, instance_ref) |
3873 | -++ floating_ips = ip_info['floating_ips'] |
3874 | -++ floating_ip = floating_ips and floating_ips[0] or '' |
3875 | -++ |
3876 | -++ ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) |
3877 | -++ image_ec2_id = ec2utils.image_ec2_id(instance_ref['image_ref']) |
3878 | -++ security_groups = db.security_group_get_by_instance(ctxt, |
3879 | -++ instance_ref['id']) |
3880 | -++ security_groups = [x['name'] for x in security_groups] |
3881 | -++ mappings = self._format_instance_mapping(ctxt, instance_ref) |
3882 | -++ data = { |
3883 | -++ 'user-data': base64.b64decode(instance_ref['user_data']), |
3884 | -++ 'meta-data': { |
3885 | -++ 'ami-id': image_ec2_id, |
3886 | -++ 'ami-launch-index': instance_ref['launch_index'], |
3887 | -++ 'ami-manifest-path': 'FIXME', |
3888 | -++ 'block-device-mapping': mappings, |
3889 | -++ 'hostname': hostname, |
3890 | -++ 'instance-action': 'none', |
3891 | -++ 'instance-id': ec2_id, |
3892 | -++ 'instance-type': instance_ref['instance_type']['name'], |
3893 | -++ 'local-hostname': hostname, |
3894 | -++ 'local-ipv4': address, |
3895 | -++ 'placement': {'availability-zone': availability_zone}, |
3896 | -++ 'public-hostname': hostname, |
3897 | -++ 'public-ipv4': floating_ip, |
3898 | -++ 'reservation-id': instance_ref['reservation_id'], |
3899 | -++ 'security-groups': security_groups, |
3900 | -++ 'mpi': mpi}} |
3901 | -++ |
3902 | -++ # public-keys should be in meta-data only if user specified one |
3903 | -++ if instance_ref['key_name']: |
3904 | -++ data['meta-data']['public-keys'] = { |
3905 | -++ '0': {'_name': instance_ref['key_name'], |
3906 | -++ 'openssh-key': instance_ref['key_data']}} |
3907 | -++ |
3908 | -++ for image_type in ['kernel', 'ramdisk']: |
3909 | -++ if instance_ref.get('%s_id' % image_type): |
3910 | -++ ec2_id = ec2utils.image_ec2_id( |
3911 | -++ instance_ref['%s_id' % image_type], |
3912 | -++ ec2utils.image_type(image_type)) |
3913 | -++ data['meta-data']['%s-id' % image_type] = ec2_id |
3914 | -++ |
3915 | -++ if False: # TODO(vish): store ancestor ids |
3916 | -++ data['ancestor-ami-ids'] = [] |
3917 | -++ if False: # TODO(vish): store product codes |
3918 | -++ data['product-codes'] = [] |
3919 | -++ return data |
3920 | -++ |
3921 | -++ def print_data(self, data): |
3922 | -++ if isinstance(data, dict): |
3923 | -++ output = '' |
3924 | -++ for key in data: |
3925 | -++ if key == '_name': |
3926 | -++ continue |
3927 | -++ output += key |
3928 | -++ if isinstance(data[key], dict): |
3929 | -++ if '_name' in data[key]: |
3930 | -++ output += '=' + str(data[key]['_name']) |
3931 | -++ else: |
3932 | -++ output += '/' |
3933 | -++ output += '\n' |
3934 | -++ # Cut off last \n |
3935 | -++ return output[:-1] |
3936 | -++ elif isinstance(data, list): |
3937 | -++ return '\n'.join(data) |
3938 | -++ else: |
3939 | -++ return str(data) |
3940 | -++ |
3941 | -++ def lookup(self, path, data): |
3942 | -++ items = path.split('/') |
3943 | -++ for item in items: |
3944 | -++ if item: |
3945 | -++ if not isinstance(data, dict): |
3946 | -++ return data |
3947 | -++ if not item in data: |
3948 | -++ return None |
3949 | -++ data = data[item] |
3950 | -++ return data |
3951 | -++ |
3952 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
3953 | -++ def __call__(self, req): |
3954 | -++ remote_address = req.remote_addr |
3955 | -++ if FLAGS.use_forwarded_for: |
3956 | -++ remote_address = req.headers.get('X-Forwarded-For', remote_address) |
3957 | -++ try: |
3958 | -++ meta_data = self.get_metadata(remote_address) |
3959 | -++ except Exception: |
3960 | -++ LOG.exception(_('Failed to get metadata for ip: %s'), |
3961 | -++ remote_address) |
3962 | -++ msg = _('An unknown error has occurred. ' |
3963 | -++ 'Please try your request again.') |
3964 | -++ exc = webob.exc.HTTPInternalServerError(explanation=unicode(msg)) |
3965 | -++ return exc |
3966 | -++ if meta_data is None: |
3967 | -++ LOG.error(_('Failed to get metadata for ip: %s'), remote_address) |
3968 | -++ raise webob.exc.HTTPNotFound() |
3969 | -++ data = self.lookup(req.path_info, meta_data) |
3970 | -++ if data is None: |
3971 | -++ raise webob.exc.HTTPNotFound() |
3972 | -++ return self.print_data(data) |
3973 | -+diff -Naurp nova.orig/api/metadata/__init__.py nova/api/metadata/__init__.py |
3974 | -+--- nova.orig/api/metadata/__init__.py 1969-12-31 19:00:00.000000000 -0500 |
3975 | -++++ nova/api/metadata/__init__.py 2012-02-28 09:08:10.821887173 -0500 |
3976 | -+@@ -0,0 +1,25 @@ |
3977 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
3978 | -++ |
3979 | -++# Copyright 2011 Openstack, LLC. |
3980 | -++# |
3981 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
3982 | -++# not use this file except in compliance with the License. You may obtain |
3983 | -++# a copy of the License at |
3984 | -++# |
3985 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
3986 | -++# |
3987 | -++# Unless required by applicable law or agreed to in writing, software |
3988 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
3989 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
3990 | -++# License for the specific language governing permissions and limitations |
3991 | -++# under the License. |
3992 | -++ |
3993 | -++""" |
3994 | -++:mod:`nova.api.metadata` -- Nova Metadata Server |
3995 | -++================================================ |
3996 | -++ |
3997 | -++.. automodule:: nova.api.metadata |
3998 | -++ :platform: Unix |
3999 | -++ :synopsis: Metadata Server for Nova |
4000 | -++.. moduleauthor:: Vishvananda Ishaya <vishvananda@gmail.com> |
4001 | -++""" |
4002 | -+diff -Naurp nova.orig/api/openstack/auth.py nova/api/openstack/auth.py |
4003 | -+--- nova.orig/api/openstack/auth.py 1969-12-31 19:00:00.000000000 -0500 |
4004 | -++++ nova/api/openstack/auth.py 2012-02-28 09:08:10.821887173 -0500 |
4005 | -+@@ -0,0 +1,263 @@ |
4006 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
4007 | -++ |
4008 | -++# Copyright 2010 OpenStack LLC. |
4009 | -++# All Rights Reserved. |
4010 | -++# |
4011 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
4012 | -++# not use this file except in compliance with the License. You may obtain |
4013 | -++# a copy of the License at |
4014 | -++# |
4015 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
4016 | -++# |
4017 | -++# Unless required by applicable law or agreed to in writing, software |
4018 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
4019 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
4020 | -++# License for the specific language governing permissions and limitations |
4021 | -++# under the License. |
4022 | -++ |
4023 | -++import hashlib |
4024 | -++import os |
4025 | -++import time |
4026 | -++ |
4027 | -++import webob.dec |
4028 | -++import webob.exc |
4029 | -++ |
4030 | -++from nova.api.openstack import common |
4031 | -++from nova.api.openstack import wsgi |
4032 | -++from nova.auth import manager |
4033 | -++from nova import context |
4034 | -++from nova import exception |
4035 | -++from nova import flags |
4036 | -++from nova import log as logging |
4037 | -++from nova import utils |
4038 | -++from nova import wsgi as base_wsgi |
4039 | -++ |
4040 | -++LOG = logging.getLogger(__name__) |
4041 | -++FLAGS = flags.FLAGS |
4042 | -++flags.DECLARE('use_forwarded_for', 'nova.api.auth') |
4043 | -++ |
4044 | -++ |
4045 | -++class NoAuthMiddleware(base_wsgi.Middleware): |
4046 | -++ """Return a fake token if one isn't specified.""" |
4047 | -++ |
4048 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
4049 | -++ def __call__(self, req): |
4050 | -++ if 'X-Auth-Token' not in req.headers: |
4051 | -++ user_id = req.headers.get('X-Auth-User', 'admin') |
4052 | -++ project_id = req.headers.get('X-Auth-Project-Id', 'admin') |
4053 | -++ os_url = os.path.join(req.url, project_id) |
4054 | -++ res = webob.Response() |
4055 | -++ # NOTE(vish): This is expecting and returning Auth(1.1), whereas |
4056 | -++ # keystone uses 2.0 auth. We should probably allow |
4057 | -++ # 2.0 auth here as well. |
4058 | -++ res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) |
4059 | -++ res.headers['X-Server-Management-Url'] = os_url |
4060 | -++ res.content_type = 'text/plain' |
4061 | -++ res.status = '204' |
4062 | -++ return res |
4063 | -++ |
4064 | -++ token = req.headers['X-Auth-Token'] |
4065 | -++ user_id, _sep, project_id = token.partition(':') |
4066 | -++ project_id = project_id or user_id |
4067 | -++ remote_address = getattr(req, 'remote_address', '127.0.0.1') |
4068 | -++ if FLAGS.use_forwarded_for: |
4069 | -++ remote_address = req.headers.get('X-Forwarded-For', remote_address) |
4070 | -++ ctx = context.RequestContext(user_id, |
4071 | -++ project_id, |
4072 | -++ is_admin=True, |
4073 | -++ remote_address=remote_address) |
4074 | -++ |
4075 | -++ req.environ['nova.context'] = ctx |
4076 | -++ return self.application |
4077 | -++ |
4078 | -++ |
4079 | -++class AuthMiddleware(base_wsgi.Middleware): |
4080 | -++ """Authorize the openstack API request or return an HTTP Forbidden.""" |
4081 | -++ |
4082 | -++ def __init__(self, application, db_driver=None): |
4083 | -++ if not db_driver: |
4084 | -++ db_driver = FLAGS.db_driver |
4085 | -++ self.db = utils.import_object(db_driver) |
4086 | -++ self.auth = manager.AuthManager() |
4087 | -++ super(AuthMiddleware, self).__init__(application) |
4088 | -++ |
4089 | -++ @webob.dec.wsgify(RequestClass=wsgi.Request) |
4090 | -++ def __call__(self, req): |
4091 | -++ if not self.has_authentication(req): |
4092 | -++ return self.authenticate(req) |
4093 | -++ user_id = self.get_user_by_authentication(req) |
4094 | -++ if not user_id: |
4095 | -++ token = req.headers["X-Auth-Token"] |
4096 | -++ msg = _("%(user_id)s could not be found with token '%(token)s'") |
4097 | -++ LOG.warn(msg % locals()) |
4098 | -++ return wsgi.Fault(webob.exc.HTTPUnauthorized()) |
4099 | -++ |
4100 | -++ # Get all valid projects for the user |
4101 | -++ projects = self.auth.get_projects(user_id) |
4102 | -++ if not projects: |
4103 | -++ return wsgi.Fault(webob.exc.HTTPUnauthorized()) |
4104 | -++ |
4105 | -++ project_id = "" |
4106 | -++ path_parts = req.path.split('/') |
4107 | -++ # TODO(wwolf): this v1.1 check will be temporary as |
4108 | -++ # keystone should be taking this over at some point |
4109 | -++ if len(path_parts) > 1 and path_parts[1] in ('v1.1', 'v2'): |
4110 | -++ project_id = path_parts[2] |
4111 | -++ # Check that the project for project_id exists, and that user |
4112 | -++ # is authorized to use it |
4113 | -++ try: |
4114 | -++ self.auth.get_project(project_id) |
4115 | -++ except exception.ProjectNotFound: |
4116 | -++ return wsgi.Fault(webob.exc.HTTPUnauthorized()) |
4117 | -++ if project_id not in [p.id for p in projects]: |
4118 | -++ return wsgi.Fault(webob.exc.HTTPUnauthorized()) |
4119 | -++ else: |
4120 | -++ # As a fallback, set project_id from the headers, which is the v1.0 |
4121 | -++ # behavior. As a last resort, be forgiving to the user and set |
4122 | -++ # project_id based on a valid project of theirs. |
4123 | -++ try: |
4124 | -++ project_id = req.headers["X-Auth-Project-Id"] |
4125 | -++ except KeyError: |
4126 | -++ project_id = projects[0].id |
4127 | -++ |
4128 | -++ is_admin = self.auth.is_admin(user_id) |
4129 | -++ remote_address = getattr(req, 'remote_address', '127.0.0.1') |
4130 | -++ if FLAGS.use_forwarded_for: |
4131 | -++ remote_address = req.headers.get('X-Forwarded-For', remote_address) |
4132 | -++ ctx = context.RequestContext(user_id, |
4133 | -++ project_id, |
4134 | -++ is_admin=is_admin, |
4135 | -++ remote_address=remote_address) |
4136 | -++ req.environ['nova.context'] = ctx |
4137 | -++ |
4138 | -++ if not is_admin and not self.auth.is_project_member(user_id, |
4139 | -++ project_id): |
4140 | -++ msg = _("%(user_id)s must be an admin or a " |
4141 | -++ "member of %(project_id)s") |
4142 | -++ LOG.warn(msg % locals()) |
4143 | -++ return wsgi.Fault(webob.exc.HTTPUnauthorized()) |
4144 | -++ |
4145 | -++ return self.application |
4146 | -++ |
4147 | -++ def has_authentication(self, req): |
4148 | -++ return 'X-Auth-Token' in req.headers |
4149 | -++ |
4150 | -++ def get_user_by_authentication(self, req): |
4151 | -++ return self.authorize_token(req.headers["X-Auth-Token"]) |
4152 | -++ |
4153 | -++ def authenticate(self, req): |
4154 | -++ # Unless the request is explicitly made against /<version>/ don't |
4155 | -++ # honor it |
4156 | -++ path_info = req.path_info |
4157 | -++ if len(path_info) > 1: |
4158 | -++ msg = _("Authentication requests must be made against a version " |
4159 | -++ "root (e.g. /v2).") |
4160 | -++ LOG.warn(msg) |
4161 | -++ return wsgi.Fault(webob.exc.HTTPUnauthorized(explanation=msg)) |
4162 | -++ |
4163 | -++ def _get_auth_header(key): |
4164 | -++ """Ensures that the KeyError returned is meaningful.""" |
4165 | -++ try: |
4166 | -++ return req.headers[key] |
4167 | -++ except KeyError as ex: |
4168 | -++ raise KeyError(key) |
4169 | -++ try: |
4170 | -++ username = _get_auth_header('X-Auth-User') |
4171 | -++ key = _get_auth_header('X-Auth-Key') |
4172 | -++ except KeyError as ex: |
4173 | -++ msg = _("Could not find %s in request.") % ex |
4174 | -++ LOG.warn(msg) |
4175 | -++ return wsgi.Fault(webob.exc.HTTPUnauthorized(explanation=msg)) |
4176 | -++ |
4177 | -++ token, user = self._authorize_user(username, key, req) |
4178 | -++ if user and token: |
4179 | -++ res = webob.Response() |
4180 | -++ res.headers['X-Auth-Token'] = token['token_hash'] |
4181 | -++ _x_server_url = 'X-Server-Management-Url' |
4182 | -++ _server_url = 'server_management_url' |
4183 | -++ res.headers[_x_server_url] = token[_server_url] |
4184 | -++ |
4185 | -++ if token['storage_url']: |
4186 | -++ _x_storage_url = 'X-Storage-Url' |
4187 | -++ _storage_url = 'storage_url' |
4188 | -++ res.headers[_x_storage_url] = token[_storage_url] |
4189 | -++ |
4190 | -++ if token['cdn_management_url']: |
4191 | -++ _x_cdn_url = 'X-CDN-Management-Url' |
4192 | -++ _cdn_url = 'cdn_management_url' |
4193 | -++ res.headers[_x_cdn_url] = token[_cdn_url] |
4194 | -++ |
4195 | -++ res.content_type = 'text/plain' |
4196 | -++ res.status = '204' |
4197 | -++ LOG.debug(_("Successfully authenticated '%s'") % username) |
4198 | -++ return res |
4199 | -++ else: |
4200 | -++ return wsgi.Fault(webob.exc.HTTPUnauthorized()) |
4201 | -++ |
4202 | -++ def authorize_token(self, token_hash): |
4203 | -++ """ retrieves user information from the datastore given a token |
4204 | -++ |
4205 | -++ If the token has expired, returns None |
4206 | -++ If the token is not found, returns None |
4207 | -++ Otherwise returns dict(id=(the authorized user's id)) |
4208 | -++ |
4209 | -++ This method will also remove the token if the timestamp is older than |
4210 | -++ 2 days ago. |
4211 | -++ """ |
4212 | -++ ctxt = context.get_admin_context() |
4213 | -++ try: |
4214 | -++ token = self.db.auth_token_get(ctxt, token_hash) |
4215 | -++ except exception.NotFound: |
4216 | -++ return None |
4217 | -++ if token: |
4218 | -++ delta = utils.utcnow() - token['created_at'] |
4219 | -++ if delta.days >= 2: |
4220 | -++ self.db.auth_token_destroy(ctxt, token['token_hash']) |
4221 | -++ else: |
4222 | -++ return token['user_id'] |
4223 | -++ return None |
4224 | -++ |
4225 | -++ def _authorize_user(self, username, key, req): |
4226 | -++ """Generates a new token and assigns it to a user. |
4227 | -++ |
4228 | -++ username - string |
4229 | -++ key - string API key |
4230 | -++ req - wsgi.Request object |
4231 | -++ """ |
4232 | -++ ctxt = context.get_admin_context() |
4233 | -++ |
4234 | -++ project_id = req.headers.get('X-Auth-Project-Id') |
4235 | -++ if project_id is None: |
4236 | -++ # If the project_id is not provided in the headers, be forgiving to |
4237 | -++ # the user and set project_id based on a valid project of theirs. |
4238 | -++ user = self.auth.get_user_from_access_key(key) |
4239 | -++ projects = self.auth.get_projects(user.id) |
4240 | -++ if not projects: |
4241 | -++ raise webob.exc.HTTPUnauthorized() |
4242 | -++ project_id = projects[0].id |
4243 | -++ |
4244 | -++ try: |
4245 | -++ user = self.auth.get_user_from_access_key(key) |
4246 | -++ except exception.NotFound: |
4247 | -++ LOG.warn(_("User not found with provided API key.")) |
4248 | -++ user = None |
4249 | -++ |
4250 | -++ if user and user.name == username: |
4251 | -++ token_hash = hashlib.sha1('%s%s%f' % (username, key, |
4252 | -++ time.time())).hexdigest() |
4253 | -++ token_dict = {} |
4254 | -++ token_dict['token_hash'] = token_hash |
4255 | -++ token_dict['cdn_management_url'] = '' |
4256 | -++ os_url = req.url.strip('/') |
4257 | -++ os_url += '/' + project_id |
4258 | -++ token_dict['server_management_url'] = os_url |
4259 | -++ token_dict['storage_url'] = '' |
4260 | -++ token_dict['user_id'] = user.id |
4261 | -++ token = self.db.auth_token_create(ctxt, token_dict) |
4262 | -++ return token, user |
4263 | -++ elif user and user.name != username: |
4264 | -++ msg = _("Provided API key is valid, but not for user " |
4265 | -++ "'%(username)s'") % locals() |
4266 | -++ LOG.warn(msg) |
4267 | -++ |
4268 | -++ return None, None |
4269 | -+diff -Naurp nova.orig/api/openstack/common.py nova/api/openstack/common.py |
4270 | -+--- nova.orig/api/openstack/common.py 1969-12-31 19:00:00.000000000 -0500 |
4271 | -++++ nova/api/openstack/common.py 2012-02-28 09:08:10.821887173 -0500 |
4272 | -+@@ -0,0 +1,493 @@ |
4273 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
4274 | -++ |
4275 | -++# Copyright 2010 OpenStack LLC. |
4276 | -++# All Rights Reserved. |
4277 | -++# |
4278 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
4279 | -++# not use this file except in compliance with the License. You may obtain |
4280 | -++# a copy of the License at |
4281 | -++# |
4282 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
4283 | -++# |
4284 | -++# Unless required by applicable law or agreed to in writing, software |
4285 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
4286 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
4287 | -++# License for the specific language governing permissions and limitations |
4288 | -++# under the License. |
4289 | -++ |
4290 | -++import functools |
4291 | -++import os |
4292 | -++import re |
4293 | -++import urlparse |
4294 | -++ |
4295 | -++import webob |
4296 | -++from xml.dom import minidom |
4297 | -++ |
4298 | -++from nova.api.openstack import wsgi |
4299 | -++from nova.api.openstack import xmlutil |
4300 | -++from nova.compute import vm_states |
4301 | -++from nova.compute import task_states |
4302 | -++from nova import exception |
4303 | -++from nova import flags |
4304 | -++from nova import log as logging |
4305 | -++from nova import network |
4306 | -++from nova.network import model as network_model |
4307 | -++from nova import quota |
4308 | -++ |
4309 | -++ |
4310 | -++LOG = logging.getLogger(__name__) |
4311 | -++FLAGS = flags.FLAGS |
4312 | -++ |
4313 | -++ |
4314 | -++XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1' |
4315 | -++ |
4316 | -++ |
4317 | -++_STATE_MAP = { |
4318 | -++ vm_states.ACTIVE: { |
4319 | -++ 'default': 'ACTIVE', |
4320 | -++ task_states.REBOOTING: 'REBOOT', |
4321 | -++ task_states.REBOOTING_HARD: 'HARD_REBOOT', |
4322 | -++ task_states.UPDATING_PASSWORD: 'PASSWORD', |
4323 | -++ task_states.RESIZE_VERIFY: 'VERIFY_RESIZE', |
4324 | -++ }, |
4325 | -++ vm_states.BUILDING: { |
4326 | -++ 'default': 'BUILD', |
4327 | -++ }, |
4328 | -++ vm_states.REBUILDING: { |
4329 | -++ 'default': 'REBUILD', |
4330 | -++ }, |
4331 | -++ vm_states.STOPPED: { |
4332 | -++ 'default': 'STOPPED', |
4333 | -++ }, |
4334 | -++ vm_states.SHUTOFF: { |
4335 | -++ 'default': 'SHUTOFF', |
4336 | -++ }, |
4337 | -++ vm_states.MIGRATING: { |
4338 | -++ 'default': 'MIGRATING', |
4339 | -++ }, |
4340 | -++ vm_states.RESIZING: { |
4341 | -++ 'default': 'RESIZE', |
4342 | -++ task_states.RESIZE_REVERTING: 'REVERT_RESIZE', |
4343 | -++ }, |
4344 | -++ vm_states.PAUSED: { |
4345 | -++ 'default': 'PAUSED', |
4346 | -++ }, |
4347 | -++ vm_states.SUSPENDED: { |
4348 | -++ 'default': 'SUSPENDED', |
4349 | -++ }, |
4350 | -++ vm_states.RESCUED: { |
4351 | -++ 'default': 'RESCUE', |
4352 | -++ }, |
4353 | -++ vm_states.ERROR: { |
4354 | -++ 'default': 'ERROR', |
4355 | -++ }, |
4356 | -++ vm_states.DELETED: { |
4357 | -++ 'default': 'DELETED', |
4358 | -++ }, |
4359 | -++ vm_states.SOFT_DELETE: { |
4360 | -++ 'default': 'DELETED', |
4361 | -++ }, |
4362 | -++} |
4363 | -++ |
4364 | -++ |
4365 | -++def status_from_state(vm_state, task_state='default'): |
4366 | -++ """Given vm_state and task_state, return a status string.""" |
4367 | -++ task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN_STATE')) |
4368 | -++ status = task_map.get(task_state, task_map['default']) |
4369 | -++ LOG.debug("Generated %(status)s from vm_state=%(vm_state)s " |
4370 | -++ "task_state=%(task_state)s." % locals()) |
4371 | -++ return status |
4372 | -++ |
4373 | -++ |
4374 | -++def vm_state_from_status(status): |
4375 | -++ """Map the server status string to a vm state.""" |
4376 | -++ for state, task_map in _STATE_MAP.iteritems(): |
4377 | -++ status_string = task_map.get("default") |
4378 | -++ if status.lower() == status_string.lower(): |
4379 | -++ return state |
4380 | -++ |
4381 | -++ |
4382 | -++def get_pagination_params(request): |
4383 | -++ """Return marker, limit tuple from request. |
4384 | -++ |
4385 | -++ :param request: `wsgi.Request` possibly containing 'marker' and 'limit' |
4386 | -++ GET variables. 'marker' is the id of the last element |
4387 | -++ the client has seen, and 'limit' is the maximum number |
4388 | -++ of items to return. If 'limit' is not specified, 0, or |
4389 | -++ > max_limit, we default to max_limit. Negative values |
4390 | -++ for either marker or limit will cause |
4391 | -++ exc.HTTPBadRequest() exceptions to be raised. |
4392 | -++ |
4393 | -++ """ |
4394 | -++ params = {} |
4395 | -++ if 'limit' in request.GET: |
4396 | -++ params['limit'] = _get_limit_param(request) |
4397 | -++ if 'marker' in request.GET: |
4398 | -++ params['marker'] = _get_marker_param(request) |
4399 | -++ return params |
4400 | -++ |
4401 | -++ |
4402 | -++def _get_limit_param(request): |
4403 | -++ """Extract integer limit from request or fail""" |
4404 | -++ try: |
4405 | -++ limit = int(request.GET['limit']) |
4406 | -++ except ValueError: |
4407 | -++ msg = _('limit param must be an integer') |
4408 | -++ raise webob.exc.HTTPBadRequest(explanation=msg) |
4409 | -++ if limit < 0: |
4410 | -++ msg = _('limit param must be positive') |
4411 | -++ raise webob.exc.HTTPBadRequest(explanation=msg) |
4412 | -++ return limit |
4413 | -++ |
4414 | -++ |
4415 | -++def _get_marker_param(request): |
4416 | -++ """Extract marker id from request or fail""" |
4417 | -++ return request.GET['marker'] |
4418 | -++ |
4419 | -++ |
4420 | -++def limited(items, request, max_limit=FLAGS.osapi_max_limit): |
4421 | -++ """ |
4422 | -++ Return a slice of items according to requested offset and limit. |
4423 | -++ |
4424 | -++ @param items: A sliceable entity |
4425 | -++ @param request: `wsgi.Request` possibly containing 'offset' and 'limit' |
4426 | -++ GET variables. 'offset' is where to start in the list, |
4427 | -++ and 'limit' is the maximum number of items to return. If |
4428 | -++ 'limit' is not specified, 0, or > max_limit, we default |
4429 | -++ to max_limit. Negative values for either offset or limit |
4430 | -++ will cause exc.HTTPBadRequest() exceptions to be raised. |
4431 | -++ @kwarg max_limit: The maximum number of items to return from 'items' |
4432 | -++ """ |
4433 | -++ try: |
4434 | -++ offset = int(request.GET.get('offset', 0)) |
4435 | -++ except ValueError: |
4436 | -++ msg = _('offset param must be an integer') |
4437 | -++ raise webob.exc.HTTPBadRequest(explanation=msg) |
4438 | -++ |
4439 | -++ try: |
4440 | -++ limit = int(request.GET.get('limit', max_limit)) |
4441 | -++ except ValueError: |
4442 | -++ msg = _('limit param must be an integer') |
4443 | -++ raise webob.exc.HTTPBadRequest(explanation=msg) |
4444 | -++ |
4445 | -++ if limit < 0: |
4446 | -++ msg = _('limit param must be positive') |
4447 | -++ raise webob.exc.HTTPBadRequest(explanation=msg) |
4448 | -++ |
4449 | -++ if offset < 0: |
4450 | -++ msg = _('offset param must be positive') |
4451 | -++ raise webob.exc.HTTPBadRequest(explanation=msg) |
4452 | -++ |
4453 | -++ limit = min(max_limit, limit or max_limit) |
4454 | -++ range_end = offset + limit |
4455 | -++ return items[offset:range_end] |
4456 | -++ |
4457 | -++ |
4458 | -++def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): |
4459 | -++ """Return a slice of items according to the requested marker and limit.""" |
4460 | -++ params = get_pagination_params(request) |
4461 | -++ |
4462 | -++ limit = params.get('limit', max_limit) |
4463 | -++ marker = params.get('marker') |
4464 | -++ |
4465 | -++ limit = min(max_limit, limit) |
4466 | -++ start_index = 0 |
4467 | -++ if marker: |
4468 | -++ start_index = -1 |
4469 | -++ for i, item in enumerate(items): |
4470 | -++ if item['id'] == marker or item.get('uuid') == marker: |
4471 | -++ start_index = i + 1 |
4472 | -++ break |
4473 | -++ if start_index < 0: |
4474 | -++ msg = _('marker [%s] not found') % marker |
4475 | -++ raise webob.exc.HTTPBadRequest(explanation=msg) |
4476 | -++ range_end = start_index + limit |
4477 | -++ return items[start_index:range_end] |
4478 | -++ |
4479 | -++ |
4480 | -++def get_id_from_href(href): |
4481 | -++ """Return the id or uuid portion of a url. |
4482 | -++ |
4483 | -++ Given: 'http://www.foo.com/bar/123?q=4' |
4484 | -++ Returns: '123' |
4485 | -++ |
4486 | -++ Given: 'http://www.foo.com/bar/abc123?q=4' |
4487 | -++ Returns: 'abc123' |
4488 | -++ |
4489 | -++ """ |
4490 | -++ return urlparse.urlsplit("%s" % href).path.split('/')[-1] |
4491 | -++ |
4492 | -++ |
4493 | -++def remove_version_from_href(href): |
4494 | -++ """Removes the first api version from the href. |
4495 | -++ |
4496 | -++ Given: 'http://www.nova.com/v1.1/123' |
4497 | -++ Returns: 'http://www.nova.com/123' |
4498 | -++ |
4499 | -++ Given: 'http://www.nova.com/v1.1' |
4500 | -++ Returns: 'http://www.nova.com' |
4501 | -++ |
4502 | -++ """ |
4503 | -++ parsed_url = urlparse.urlsplit(href) |
4504 | -++ url_parts = parsed_url.path.split('/', 2) |
4505 | -++ |
4506 | -++ # NOTE: this should match vX.X or vX |
4507 | -++ expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') |
4508 | -++ if expression.match(url_parts[1]): |
4509 | -++ del url_parts[1] |
4510 | -++ |
4511 | -++ new_path = '/'.join(url_parts) |
4512 | -++ |
4513 | -++ if new_path == parsed_url.path: |
4514 | -++ msg = _('href %s does not contain version') % href |
4515 | -++ LOG.debug(msg) |
4516 | -++ raise ValueError(msg) |
4517 | -++ |
4518 | -++ parsed_url = list(parsed_url) |
4519 | -++ parsed_url[2] = new_path |
4520 | -++ return urlparse.urlunsplit(parsed_url) |
4521 | -++ |
4522 | -++ |
4523 | -++def get_version_from_href(href): |
4524 | -++ """Returns the api version in the href. |
4525 | -++ |
4526 | -++ Returns the api version in the href. |
4527 | -++ If no version is found, '2' is returned |
4528 | -++ |
4529 | -++ Given: 'http://www.nova.com/123' |
4530 | -++ Returns: '2' |
4531 | -++ |
4532 | -++ Given: 'http://www.nova.com/v1.1' |
4533 | -++ Returns: '1.1' |
4534 | -++ |
4535 | -++ """ |
4536 | -++ try: |
4537 | -++ expression = r'/v([0-9]+|[0-9]+\.[0-9]+)(/|$)' |
4538 | -++ return re.findall(expression, href)[0][0] |
4539 | -++ except IndexError: |
4540 | -++ return '2' |
4541 | -++ |
4542 | -++ |
4543 | -++def check_img_metadata_quota_limit(context, metadata): |
4544 | -++ if metadata is None: |
4545 | -++ return |
4546 | -++ num_metadata = len(metadata) |
4547 | -++ quota_metadata = quota.allowed_metadata_items(context, num_metadata) |
4548 | -++ if quota_metadata < num_metadata: |
4549 | -++ expl = _("Image metadata limit exceeded") |
4550 | -++ raise webob.exc.HTTPRequestEntityTooLarge(explanation=expl, |
4551 | -++ headers={'Retry-After': 0}) |
4552 | -++ |
4553 | -++ |
4554 | -++def dict_to_query_str(params): |
4555 | -++ # TODO: we should just use urllib.urlencode instead of this |
4556 | -++ # But currently we don't work with urlencoded url's |
4557 | -++ param_str = "" |
4558 | -++ for key, val in params.iteritems(): |
4559 | -++ param_str = param_str + '='.join([str(key), str(val)]) + '&' |
4560 | -++ |
4561 | -++ return param_str.rstrip('&') |
4562 | -++ |
4563 | -++ |
4564 | -++def get_networks_for_instance_from_nw_info(nw_info): |
4565 | -++ networks = {} |
4566 | -++ |
4567 | -++ for vif in nw_info: |
4568 | -++ ips = vif.fixed_ips() |
4569 | -++ floaters = vif.floating_ips() |
4570 | -++ label = vif['network']['label'] |
4571 | -++ if label not in networks: |
4572 | -++ networks[label] = {'ips': [], 'floating_ips': []} |
4573 | -++ |
4574 | -++ networks[label]['ips'].extend(ips) |
4575 | -++ networks[label]['floating_ips'].extend(floaters) |
4576 | -++ return networks |
4577 | -++ |
4578 | -++ |
4579 | -++def get_nw_info_for_instance(context, instance): |
4580 | -++ cached_nwinfo = instance['info_cache'].get('network_info') or [] |
4581 | -++ return network_model.NetworkInfo.hydrate(cached_nwinfo) |
4582 | -++ |
4583 | -++ |
4584 | -++def get_networks_for_instance(context, instance): |
4585 | -++ """Returns a prepared nw_info list for passing into the view |
4586 | -++ builders |
4587 | -++ |
4588 | -++ We end up with a data structure like: |
4589 | -++ {'public': {'ips': [{'addr': '10.0.0.1', 'version': 4}, |
4590 | -++ {'addr': '2001::1', 'version': 6}], |
4591 | -++ 'floating_ips': [{'addr': '172.16.0.1', 'version': 4}, |
4592 | -++ {'addr': '172.16.2.1', 'version': 4}]}, |
4593 | -++ ...} |
4594 | -++ """ |
4595 | -++ nw_info = get_nw_info_for_instance(context, instance) |
4596 | -++ return get_networks_for_instance_from_nw_info(nw_info) |
4597 | -++ |
4598 | -++ |
4599 | -++def raise_http_conflict_for_instance_invalid_state(exc, action): |
4600 | -++ """Return a webob.exc.HTTPConflict instance containing a message |
4601 | -++ appropriate to return via the API based on the original |
4602 | -++ InstanceInvalidState exception. |
4603 | -++ """ |
4604 | -++ attr = exc.kwargs.get('attr') |
4605 | -++ state = exc.kwargs.get('state') |
4606 | -++ if attr and state: |
4607 | -++ msg = _("Cannot '%(action)s' while instance is in %(attr)s %(state)s") |
4608 | -++ else: |
4609 | -++ # At least give some meaningful message |
4610 | -++ msg = _("Instance is in an invalid state for '%(action)s'") |
4611 | -++ raise webob.exc.HTTPConflict(explanation=msg % locals()) |
4612 | -++ |
4613 | -++ |
4614 | -++class MetadataDeserializer(wsgi.MetadataXMLDeserializer): |
4615 | -++ def deserialize(self, text): |
4616 | -++ dom = minidom.parseString(text) |
4617 | -++ metadata_node = self.find_first_child_named(dom, "metadata") |
4618 | -++ metadata = self.extract_metadata(metadata_node) |
4619 | -++ return {'body': {'metadata': metadata}} |
4620 | -++ |
4621 | -++ |
4622 | -++class MetaItemDeserializer(wsgi.MetadataXMLDeserializer): |
4623 | -++ def deserialize(self, text): |
4624 | -++ dom = minidom.parseString(text) |
4625 | -++ metadata_item = self.extract_metadata(dom) |
4626 | -++ return {'body': {'meta': metadata_item}} |
4627 | -++ |
4628 | -++ |
4629 | -++class MetadataXMLDeserializer(wsgi.XMLDeserializer): |
4630 | -++ |
4631 | -++ def extract_metadata(self, metadata_node): |
4632 | -++ """Marshal the metadata attribute of a parsed request""" |
4633 | -++ if metadata_node is None: |
4634 | -++ return {} |
4635 | -++ metadata = {} |
4636 | -++ for meta_node in self.find_children_named(metadata_node, "meta"): |
4637 | -++ key = meta_node.getAttribute("key") |
4638 | -++ metadata[key] = self.extract_text(meta_node) |
4639 | -++ return metadata |
4640 | -++ |
4641 | -++ def _extract_metadata_container(self, datastring): |
4642 | -++ dom = minidom.parseString(datastring) |
4643 | -++ metadata_node = self.find_first_child_named(dom, "metadata") |
4644 | -++ metadata = self.extract_metadata(metadata_node) |
4645 | -++ return {'body': {'metadata': metadata}} |
4646 | -++ |
4647 | -++ def create(self, datastring): |
4648 | -++ return self._extract_metadata_container(datastring) |
4649 | -++ |
4650 | -++ def update_all(self, datastring): |
4651 | -++ return self._extract_metadata_container(datastring) |
4652 | -++ |
4653 | -++ def update(self, datastring): |
4654 | -++ dom = minidom.parseString(datastring) |
4655 | -++ metadata_item = self.extract_metadata(dom) |
4656 | -++ return {'body': {'meta': metadata_item}} |
4657 | -++ |
4658 | -++ |
4659 | -++metadata_nsmap = {None: xmlutil.XMLNS_V11} |
4660 | -++ |
4661 | -++ |
4662 | -++class MetaItemTemplate(xmlutil.TemplateBuilder): |
4663 | -++ def construct(self): |
4664 | -++ sel = xmlutil.Selector('meta', xmlutil.get_items, 0) |
4665 | -++ root = xmlutil.TemplateElement('meta', selector=sel) |
4666 | -++ root.set('key', 0) |
4667 | -++ root.text = 1 |
4668 | -++ return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) |
4669 | -++ |
4670 | -++ |
4671 | -++class MetadataTemplateElement(xmlutil.TemplateElement): |
4672 | -++ def will_render(self, datum): |
4673 | -++ return True |
4674 | -++ |
4675 | -++ |
4676 | -++class MetadataTemplate(xmlutil.TemplateBuilder): |
4677 | -++ def construct(self): |
4678 | -++ root = MetadataTemplateElement('metadata', selector='metadata') |
4679 | -++ elem = xmlutil.SubTemplateElement(root, 'meta', |
4680 | -++ selector=xmlutil.get_items) |
4681 | -++ elem.set('key', 0) |
4682 | -++ elem.text = 1 |
4683 | -++ return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) |
4684 | -++ |
4685 | -++ |
4686 | -++def check_snapshots_enabled(f): |
4687 | -++ @functools.wraps(f) |
4688 | -++ def inner(*args, **kwargs): |
4689 | -++ if not FLAGS.allow_instance_snapshots: |
4690 | -++ LOG.warn(_('Rejecting snapshot request, snapshots currently' |
4691 | -++ ' disabled')) |
4692 | -++ msg = _("Instance snapshots are not permitted at this time.") |
4693 | -++ raise webob.exc.HTTPBadRequest(explanation=msg) |
4694 | -++ return f(*args, **kwargs) |
4695 | -++ return inner |
4696 | -++ |
4697 | -++ |
4698 | -++class ViewBuilder(object): |
4699 | -++ """Model API responses as dictionaries.""" |
4700 | -++ |
4701 | -++ _collection_name = None |
4702 | -++ |
4703 | -++ def _get_links(self, request, identifier): |
4704 | -++ return [{ |
4705 | -++ "rel": "self", |
4706 | -++ "href": self._get_href_link(request, identifier), |
4707 | -++ }, |
4708 | -++ { |
4709 | -++ "rel": "bookmark", |
4710 | -++ "href": self._get_bookmark_link(request, identifier), |
4711 | -++ }] |
4712 | -++ |
4713 | -++ def _get_next_link(self, request, identifier): |
4714 | -++ """Return href string with proper limit and marker params.""" |
4715 | -++ params = request.params.copy() |
4716 | -++ params["marker"] = identifier |
4717 | -++ prefix = self._update_link_prefix(request.application_url, |
4718 | -++ FLAGS.osapi_compute_link_prefix) |
4719 | -++ url = os.path.join(prefix, |
4720 | -++ request.environ["nova.context"].project_id, |
4721 | -++ self._collection_name) |
4722 | -++ return "%s?%s" % (url, dict_to_query_str(params)) |
4723 | -++ |
4724 | -++ def _get_href_link(self, request, identifier): |
4725 | -++ """Return an href string pointing to this object.""" |
4726 | -++ prefix = self._update_link_prefix(request.application_url, |
4727 | -++ FLAGS.osapi_compute_link_prefix) |
4728 | -++ return os.path.join(prefix, |
4729 | -++ request.environ["nova.context"].project_id, |
4730 | -++ self._collection_name, |
4731 | -++ str(identifier)) |
4732 | -++ |
4733 | -++ def _get_bookmark_link(self, request, identifier): |
4734 | -++ """Create a URL that refers to a specific resource.""" |
4735 | -++ base_url = remove_version_from_href(request.application_url) |
4736 | -++ base_url = self._update_link_prefix(base_url, |
4737 | -++ FLAGS.osapi_compute_link_prefix) |
4738 | -++ return os.path.join(base_url, |
4739 | -++ request.environ["nova.context"].project_id, |
4740 | -++ self._collection_name, |
4741 | -++ str(identifier)) |
4742 | -++ |
4743 | -++ def _get_collection_links(self, request, items, id_key="uuid"): |
4744 | -++ """Retrieve 'next' link, if applicable.""" |
4745 | -++ links = [] |
4746 | -++ limit = int(request.params.get("limit", 0)) |
4747 | -++ if limit and limit == len(items): |
4748 | -++ last_item = items[-1] |
4749 | -++ if id_key in last_item: |
4750 | -++ last_item_id = last_item[id_key] |
4751 | -++ else: |
4752 | -++ last_item_id = last_item["id"] |
4753 | -++ links.append({ |
4754 | -++ "rel": "next", |
4755 | -++ "href": self._get_next_link(request, last_item_id), |
4756 | -++ }) |
4757 | -++ return links |
4758 | -++ |
4759 | -++ def _update_link_prefix(self, orig_url, prefix): |
4760 | -++ if not prefix: |
4761 | -++ return orig_url |
4762 | -++ url_parts = list(urlparse.urlsplit(orig_url)) |
4763 | -++ prefix_parts = list(urlparse.urlsplit(prefix)) |
4764 | -++ url_parts[0:2] = prefix_parts[0:2] |
4765 | -++ return urlparse.urlunsplit(url_parts) |
4766 | -+diff -Naurp nova.orig/api/openstack/compute/consoles.py nova/api/openstack/compute/consoles.py |
4767 | -+--- nova.orig/api/openstack/compute/consoles.py 1969-12-31 19:00:00.000000000 -0500 |
4768 | -++++ nova/api/openstack/compute/consoles.py 2012-02-28 09:08:10.821887173 -0500 |
4769 | -+@@ -0,0 +1,131 @@ |
4770 | -++# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
4771 | -++ |
4772 | -++# Copyright 2010 OpenStack LLC. |
4773 | -++# All Rights Reserved. |
4774 | -++# |
4775 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
4776 | -++# not use this file except in compliance with the License. You may obtain |
4777 | -++# a copy of the License at |
4778 | -++# |
4779 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
4780 | -++# |
4781 | -++# Unless required by applicable law or agreed to in writing, software |
4782 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
4783 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
4784 | -++# License for the specific language governing permissions and limitations |
4785 | -++# under the License. |
4786 | -++ |
4787 | -++import webob |
4788 | -++from webob import exc |
4789 | -++ |
4790 | -++from nova.api.openstack import wsgi |
4791 | -++from nova.api.openstack import xmlutil |
4792 | -++from nova import console |
4793 | -++from nova import exception |
4794 | -++ |
4795 | -++ |
4796 | -++def _translate_keys(cons): |
4797 | -++ """Coerces a console instance into proper dictionary format """ |
4798 | -++ pool = cons['pool'] |
4799 | -++ info = {'id': cons['id'], |
4800 | -++ 'console_type': pool['console_type']} |
4801 | -++ return dict(console=info) |
4802 | -++ |
4803 | -++ |
4804 | -++def _translate_detail_keys(cons): |
4805 | -++ """Coerces a console instance into proper dictionary format with |
4806 | -++ correctly mapped attributes """ |
4807 | -++ pool = cons['pool'] |
4808 | -++ info = {'id': cons['id'], |
4809 | -++ 'console_type': pool['console_type'], |
4810 | -++ 'password': cons['password'], |
4811 | -++ 'instance_name': cons['instance_name'], |
4812 | -++ 'port': cons['port'], |
4813 | -++ 'host': pool['public_hostname']} |
4814 | -++ return dict(console=info) |
4815 | -++ |
4816 | -++ |
4817 | -++class ConsoleTemplate(xmlutil.TemplateBuilder): |
4818 | -++ def construct(self): |
4819 | -++ root = xmlutil.TemplateElement('console', selector='console') |
4820 | -++ |
4821 | -++ id_elem = xmlutil.SubTemplateElement(root, 'id', selector='id') |
4822 | -++ id_elem.text = xmlutil.Selector() |
4823 | -++ |
4824 | -++ port_elem = xmlutil.SubTemplateElement(root, 'port', selector='port') |
4825 | -++ port_elem.text = xmlutil.Selector() |
4826 | -++ |
4827 | -++ host_elem = xmlutil.SubTemplateElement(root, 'host', selector='host') |
4828 | -++ host_elem.text = xmlutil.Selector() |
4829 | -++ |
4830 | -++ passwd_elem = xmlutil.SubTemplateElement(root, 'password', |
4831 | -++ selector='password') |
4832 | -++ passwd_elem.text = xmlutil.Selector() |
4833 | -++ |
4834 | -++ constype_elem = xmlutil.SubTemplateElement(root, 'console_type', |
4835 | -++ selector='console_type') |
4836 | -++ constype_elem.text = xmlutil.Selector() |
4837 | -++ |
4838 | -++ return xmlutil.MasterTemplate(root, 1) |
4839 | -++ |
4840 | -++ |
4841 | -++class ConsolesTemplate(xmlutil.TemplateBuilder): |
4842 | -++ def construct(self): |
4843 | -++ root = xmlutil.TemplateElement('consoles') |
4844 | -++ console = xmlutil.SubTemplateElement(root, 'console', |
4845 | -++ selector='consoles') |
4846 | -++ console.append(ConsoleTemplate()) |
4847 | -++ |
4848 | -++ return xmlutil.MasterTemplate(root, 1) |
4849 | -++ |
4850 | -++ |
4851 | -++class Controller(object): |
4852 | -++ """The Consoles controller for the Openstack API""" |
4853 | -++ |
4854 | -++ def __init__(self): |
4855 | -++ self.console_api = console.API() |
4856 | -++ |
4857 | -++ @wsgi.serializers(xml=ConsolesTemplate) |
4858 | -++ def index(self, req, server_id): |
4859 | -++ """Returns a list of consoles for this instance""" |
4860 | -++ consoles = self.console_api.get_consoles( |
4861 | -++ req.environ['nova.context'], |
4862 | -++ server_id) |
4863 | -++ return dict(consoles=[_translate_keys(console) |
4864 | -++ for console in consoles]) |
4865 | -++ |
4866 | -++ def create(self, req, server_id): |
4867 | -++ """Creates a new console""" |
4868 | -++ self.console_api.create_console( |
4869 | -++ req.environ['nova.context'], |
4870 | -++ server_id) |
4871 | -++ |
4872 | -++ @wsgi.serializers(xml=ConsoleTemplate) |
4873 | -++ def show(self, req, server_id, id): |
4874 | -++ """Shows in-depth information on a specific console""" |
4875 | -++ try: |
4876 | -++ console = self.console_api.get_console( |
4877 | -++ req.environ['nova.context'], |
4878 | -++ server_id, |
4879 | -++ int(id)) |
4880 | -++ except exception.NotFound: |
4881 | -++ raise exc.HTTPNotFound() |
4882 | -++ return _translate_detail_keys(console) |
4883 | -++ |
4884 | -++ def update(self, req, server_id, id): |
4885 | -++ """You can't update a console""" |
4886 | -++ raise exc.HTTPNotImplemented() |
4887 | -++ |
4888 | -++ def delete(self, req, server_id, id): |
4889 | -++ """Deletes a console""" |
4890 | -++ try: |
4891 | -++ self.console_api.delete_console(req.environ['nova.context'], |
4892 | -++ server_id, |
4893 | -++ int(id)) |
4894 | -++ except exception.NotFound: |
4895 | -++ raise exc.HTTPNotFound() |
4896 | -++ return webob.Response(status_int=202) |
4897 | -++ |
4898 | -++ |
4899 | -++def create_resource(): |
4900 | -++ return wsgi.Resource(Controller()) |
4901 | -+diff -Naurp nova.orig/api/openstack/compute/contrib/accounts.py nova/api/openstack/compute/contrib/accounts.py |
4902 | -+--- nova.orig/api/openstack/compute/contrib/accounts.py 1969-12-31 19:00:00.000000000 -0500 |
4903 | -++++ nova/api/openstack/compute/contrib/accounts.py 2012-02-28 09:08:10.821887173 -0500 |
4904 | -+@@ -0,0 +1,100 @@ |
4905 | -++# Copyright 2011 OpenStack LLC. |
4906 | -++# All Rights Reserved. |
4907 | -++# |
4908 | -++# Licensed under the Apache License, Version 2.0 (the "License"); you may |
4909 | -++# not use this file except in compliance with the License. You may obtain |
4910 | -++# a copy of the License at |
4911 | -++# |
4912 | -++# http://www.apache.org/licenses/LICENSE-2.0 |
4913 | -++# |
4914 | -++# Unless required by applicable law or agreed to in writing, software |
4915 | -++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
4916 | -++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
4917 | -++# License for the specific language governing permissions and limitations |
4918 | -++# under the License. |
4919 | -++ |
4920 | -++import webob.exc |
4921 | -++ |
4922 | -++from nova.api.openstack import extensions |
4923 | -++from nova.api.openstack import wsgi |
4924 | -++from nova.api.openstack import xmlutil |
4925 | -++from nova.auth import manager |
4926 | -++from nova import exception |
4927 | -++from nova import flags |
4928 | -++from nova import log as logging |
4929 | -++ |
4930 | -++ |
4931 | -++FLAGS = flags.FLAGS |
4932 | -++LOG = logging.getLogger(__name__) |
4933 | -++authorize = extensions.extension_authorizer('compute', 'accounts') |
4934 | -++ |
4935 | -++ |
4936 | -++class AccountTemplate(xmlutil.TemplateBuilder): |
4937 | -++ def construct(self): |
4938 | -++ root = xmlutil.TemplateElement('account', selector='account') |
4939 | -++ root.set('id', 'id') |
4940 | -++ root.set('name', 'name') |
4941 | -++ root.set('description', 'description') |
4942 | -++ root.set('manager', 'manager') |
4943 | -++ |
4944 | -++ return xmlutil.MasterTemplate(root, 1) |
4945 | -++ |
4946 | -++ |
4947 | -++def _translate_keys(account): |
4948 | -++ return dict(id=account.id, |
4949 | -++ name=account.name, |
4950 | -++ description=account.description, |
4951 | -++ manager=account.project_manager_id) |
4952 | -++ |
4953 | -++ |
4954 | -++class Controller(object): |
4955 | -++ |
4956 | -++ def __init__(self): |
4957 | -++ self.manager = manager.AuthManager() |
4958 | -++ |
4959 | -++ def index(self, req): |
4960 | -++ raise webob.exc.HTTPNotImplemented() |
4961 | -++ |
4962 | -++ @wsgi.serializers(xml=AccountTemplate) |
4963 | -++ def show(self, req, id): |
4964 | -++ """Return data about the given account id""" |
4965 | -++ authorize(req.environ['nova.context']) |
4966 | -++ account = self.manager.get_project(id) |
4967 | -++ return dict(account=_translate_keys(account)) |
4968 | -++ |
4969 | -++ def delete(self, req, id): |
4970 | -++ authorize(req.environ['nova.context']) |
4971 | -++ self.manager.delete_project(id) |
4972 | -++ return {} |
4973 | -++ |
4974 | -++ def create(self, req, body): |
4975 | -++ """We use update with create-or-update semantics |
4976 | -++ because the id comes from an external source""" |
4977 | -++ raise webob.exc.HTTPNotImplemented() |
4978 | -++ |
4979 | -++ @wsgi.serializers(xml=AccountTemplate) |
4980 | -++ def update(self, req, id, body): |
4981 | -++ """This is really create or update.""" |
4982 | -++ authorize(req.environ['nova.context']) |
4983 | -++ description = body['account'].get('description') |
4984 | -++ manager = body['account'].get('manager') |
4985 | -++ try: |
4986 | -++ account = self.manager.get_project(id) |
4987 | -++ self.manager.modify_project(id, manager, description) |
4988 | -++ except exception.NotFound: |
4989 | -++ account = self.manager.create_project(id, manager, description) |
4990 | -++ return dict(account=_translate_keys(account)) |
4991 | -++ |
4992 | -++ |
4993 | -++class Accounts(extensions.ExtensionDescriptor): |
4994 | -++ """Admin-only access to accounts""" |
4995 | -++ |
4996 | -++ name = "Accounts" |
4997 | -++ alias = "os-accounts" |
4998 | -++ namespace = "http://docs.openstack.org/compute/ext/accounts/api/v1.1" |
4999 | -++ updated = "2011-12-23T00:00:00+00:00" |
5000 | -++ |
The diff has been truncated for viewing.