Merge lp:~gandelman-a/ubuntu/precise/nova/UCA_2012.2.1 into lp:~ubuntu-cloud-archive/ubuntu/precise/nova/folsom

Proposed by Adam Gandelman
Status: Merged
Approved by: Chuck Short
Approved revision: 97
Merged at revision: 96
Proposed branch: lp:~gandelman-a/ubuntu/precise/nova/UCA_2012.2.1
Merge into: lp:~ubuntu-cloud-archive/ubuntu/precise/nova/folsom
Diff against target: 30561 lines (+6582/-18252)
124 files modified
.pc/applied-patches (+0/-4)
.pc/avoid_setuptools_git_dependency.patch/tools/pip-requires (+2/-2)
.pc/fix-ubuntu-tests.patch/nova/tests/test_api.py (+1/-1)
.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost (+3/-1)
.pc/rbd-security.patch/nova/virt/libvirt/volume.py (+2/-2)
.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy/api.py (+0/-5256)
.pc/ubuntu/fix-libvirt-firewall-slowdown.patch/nova/tests/test_libvirt.py (+0/-3919)
.pc/ubuntu/fix-libvirt-firewall-slowdown.patch/nova/virt/firewall.py (+0/-536)
.pc/ubuntu/ubuntu-fix-32-64-bit-iss.patch/nova/tests/test_nfs.py (+0/-629)
.pc/ubuntu/ubuntu-fix-32-64-bit-iss.patch/nova/volume/nfs.py (+0/-293)
.pc/ubuntu/ubuntu-fix-ec2-instance-id-mappings.patch/nova/db/sqlalchemy/api.py (+0/-5253)
AUTHORS (+51/-38)
ChangeLog (+3668/-1329)
PKG-INFO (+1/-1)
bin/nova-dhcpbridge (+1/-1)
bin/nova-manage (+5/-1)
bin/nova-rootwrap (+9/-0)
debian/changelog (+200/-68)
debian/control (+2/-2)
debian/patches/avoid_setuptools_git_dependency.patch (+4/-4)
debian/patches/series (+0/-5)
debian/patches/ubuntu/fix-ec2-volume-id-mappings.patch (+0/-43)
debian/patches/ubuntu/fix-libvirt-firewall-slowdown.patch (+0/-106)
debian/patches/ubuntu/ubuntu-fix-32-64-bit-iss.patch (+0/-75)
debian/patches/ubuntu/ubuntu-fix-ec2-instance-id-mappings.patch (+0/-17)
etc/nova/api-paste.ini (+1/-1)
etc/nova/nova.conf.sample (+1/-1)
etc/nova/rootwrap.d/volume.filters (+1/-0)
nova.egg-info/PKG-INFO (+1/-1)
nova.egg-info/requires.txt (+2/-2)
nova/api/ec2/cloud.py (+14/-2)
nova/api/ec2/ec2utils.py (+3/-1)
nova/api/metadata/base.py (+8/-3)
nova/api/openstack/compute/contrib/admin_actions.py (+11/-5)
nova/api/openstack/compute/contrib/cloudpipe.py (+10/-4)
nova/api/openstack/compute/contrib/floating_ips.py (+20/-12)
nova/api/openstack/compute/contrib/networks.py (+2/-2)
nova/api/openstack/compute/server_metadata.py (+3/-0)
nova/api/openstack/compute/servers.py (+36/-11)
nova/api/openstack/wsgi.py (+14/-4)
nova/block_device.py (+8/-1)
nova/cloudpipe/pipelib.py (+1/-0)
nova/compute/api.py (+180/-118)
nova/compute/instance_types.py (+4/-1)
nova/compute/manager.py (+109/-30)
nova/compute/resource_tracker.py (+25/-2)
nova/consoleauth/manager.py (+2/-2)
nova/db/sqlalchemy/api.py (+9/-2)
nova/exception.py (+18/-4)
nova/flags.py (+9/-0)
nova/image/glance.py (+2/-2)
nova/network/__init__.py (+6/-4)
nova/network/api.py (+1/-1)
nova/network/linux_net.py (+1/-0)
nova/network/manager.py (+68/-45)
nova/network/quantumv2/api.py (+218/-11)
nova/openstack/common/cfg.py (+1/-1)
nova/openstack/common/log.py (+4/-5)
nova/openstack/common/notifier/api.py (+3/-3)
nova/openstack/common/policy.py (+3/-2)
nova/openstack/common/rpc/__init__.py (+9/-3)
nova/openstack/common/rpc/amqp.py (+8/-0)
nova/openstack/common/rpc/impl_kombu.py (+18/-12)
nova/openstack/common/rpc/impl_qpid.py (+35/-53)
nova/openstack/common/setup.py (+11/-5)
nova/openstack/common/timeutils.py (+7/-3)
nova/scheduler/chance.py (+1/-1)
nova/scheduler/driver.py (+4/-4)
nova/scheduler/filter_scheduler.py (+1/-2)
nova/scheduler/host_manager.py (+29/-16)
nova/tests/api/ec2/test_cloud.py (+56/-4)
nova/tests/api/ec2/test_middleware.py (+9/-0)
nova/tests/api/openstack/compute/contrib/test_admin_actions.py (+35/-2)
nova/tests/api/openstack/compute/contrib/test_cloudpipe.py (+5/-6)
nova/tests/api/openstack/compute/contrib/test_floating_ips.py (+25/-0)
nova/tests/api/openstack/compute/test_server_actions.py (+69/-0)
nova/tests/api/openstack/compute/test_server_metadata.py (+6/-6)
nova/tests/api/openstack/compute/test_servers.py (+131/-4)
nova/tests/api/openstack/fakes.py (+3/-2)
nova/tests/compute/test_compute.py (+158/-43)
nova/tests/compute/test_compute_utils.py (+30/-13)
nova/tests/compute/test_resource_tracker.py (+22/-18)
nova/tests/fake_libvirt_utils.py (+4/-1)
nova/tests/fake_network.py (+2/-2)
nova/tests/fakelibvirt.py (+14/-0)
nova/tests/network/test_linux_net.py (+1/-1)
nova/tests/network/test_manager.py (+48/-0)
nova/tests/network/test_quantumv2.py (+325/-9)
nova/tests/scheduler/fakes.py (+4/-4)
nova/tests/scheduler/test_chance_scheduler.py (+4/-4)
nova/tests/scheduler/test_filter_scheduler.py (+1/-1)
nova/tests/scheduler/test_scheduler.py (+1/-1)
nova/tests/test_api.py (+1/-1)
nova/tests/test_exception.py (+0/-1)
nova/tests/test_imagecache.py (+24/-0)
nova/tests/test_libvirt.py (+222/-14)
nova/tests/test_metadata.py (+2/-2)
nova/tests/test_notifications.py (+2/-2)
nova/tests/test_nova_manage.py (+5/-0)
nova/tests/test_utils.py (+54/-6)
nova/tests/test_virt_drivers.py (+17/-0)
nova/tests/test_volume.py (+2/-0)
nova/tests/test_xenapi.py (+59/-6)
nova/tests/xenapi/stubs.py (+13/-22)
nova/utils.py (+29/-0)
nova/version.py (+2/-2)
nova/virt/firewall.py (+3/-3)
nova/virt/hyperv/driver.py (+2/-2)
nova/virt/hyperv/livemigrationops.py (+1/-1)
nova/virt/libvirt/driver.py (+86/-43)
nova/virt/libvirt/imagecache.py (+4/-1)
nova/virt/libvirt/utils.py (+45/-1)
nova/virt/libvirt/volume.py (+2/-2)
nova/virt/xenapi/driver.py (+25/-2)
nova/virt/xenapi/fake.py (+8/-2)
nova/virt/xenapi/host.py (+19/-0)
nova/virt/xenapi/vm_utils.py (+26/-37)
nova/virt/xenapi/vmops.py (+18/-2)
nova/virt/xenapi/volumeops.py (+3/-1)
nova/volume/driver.py (+63/-5)
nova/volume/iscsi.py (+9/-1)
nova/volume/manager.py (+7/-1)
plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost (+3/-1)
tools/pip-requires (+2/-2)
To merge this branch: bzr merge lp:~gandelman-a/ubuntu/precise/nova/UCA_2012.2.1
Reviewer Review Type Date Requested Status
Chuck Short Pending
Review via email: mp+142218@code.launchpad.net

Description of the change

Syncs the lp:~ubuntu-cloud-archive branch to current state of the package in the U.C.A., and imports current SRU pending in quantal-proposed.

To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file '.pc/applied-patches'
2--- .pc/applied-patches 2012-10-12 12:35:01 +0000
3+++ .pc/applied-patches 2013-01-08 00:32:22 +0000
4@@ -3,7 +3,3 @@
5 fix-docs-build-without-network.patch
6 avoid_setuptools_git_dependency.patch
7 rbd-security.patch
8-ubuntu/ubuntu-fix-32-64-bit-iss.patch
9-ubuntu/ubuntu-fix-ec2-instance-id-mappings.patch
10-ubuntu/fix-libvirt-firewall-slowdown.patch
11-ubuntu/fix-ec2-volume-id-mappings.patch
12
13=== modified file '.pc/avoid_setuptools_git_dependency.patch/tools/pip-requires'
14--- .pc/avoid_setuptools_git_dependency.patch/tools/pip-requires 2012-10-12 12:35:01 +0000
15+++ .pc/avoid_setuptools_git_dependency.patch/tools/pip-requires 2013-01-08 00:32:22 +0000
16@@ -1,4 +1,4 @@
17-SQLAlchemy>=0.7.3
18+SQLAlchemy>=0.7.8,<=0.7.9
19 Cheetah==2.4.4
20 amqplib==0.6.1
21 anyjson==0.2.4
22@@ -19,5 +19,5 @@
23 iso8601>=0.1.4
24 httplib2
25 setuptools_git>=0.4
26-python-quantumclient>=2.0
27+python-quantumclient>=2.1
28 python-glanceclient>=0.5.0,<2
29
30=== modified file '.pc/fix-ubuntu-tests.patch/nova/tests/test_api.py'
31--- .pc/fix-ubuntu-tests.patch/nova/tests/test_api.py 2012-08-16 14:04:11 +0000
32+++ .pc/fix-ubuntu-tests.patch/nova/tests/test_api.py 2013-01-08 00:32:22 +0000
33@@ -448,7 +448,7 @@
34 # Invalid Cidr for ICMP type
35 _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
36 # Invalid protocol
37- _assert('An unknown error has occurred', 'xyz', 1, 14, '0.0.0.0/0')
38+ _assert('Invalid IP protocol', 'xyz', 1, 14, '0.0.0.0/0')
39 # Invalid port
40 _assert('An unknown error has occurred', 'tcp', " ", "81", '0.0.0.0/0')
41 # Invalid icmp port
42
43=== modified file '.pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost'
44--- .pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost 2012-08-16 14:04:11 +0000
45+++ .pc/path-to-the-xenhost.conf-fixup.patch/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost 2013-01-08 00:32:22 +0000
46@@ -398,7 +398,9 @@
47 for oc_fld in oc.split("; "):
48 ock, ocv = strip_kv(oc_fld)
49 ocd[ock] = ocv
50-# out["host_capabilities"] = dct.get("capabilities", "").split("; ")
51+
52+ capabilities = dct.get("capabilities", "")
53+ out["host_capabilities"] = capabilities.replace(";", "").split()
54 # out["host_allowed-operations"] = dct.get(
55 # "allowed-operations", "").split("; ")
56 # lsrv = dct.get("license-server", "")
57
58=== modified file '.pc/rbd-security.patch/nova/virt/libvirt/volume.py'
59--- .pc/rbd-security.patch/nova/virt/libvirt/volume.py 2012-10-12 12:35:01 +0000
60+++ .pc/rbd-security.patch/nova/virt/libvirt/volume.py 2013-01-08 00:32:22 +0000
61@@ -199,8 +199,8 @@
62 devices = [dev for dev in devices if dev.startswith(device_prefix)]
63 if not devices:
64 self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
65- check_exit_code=[0, 255])
66+ check_exit_code=[0, 21, 255])
67 self._run_iscsiadm(iscsi_properties, ("--logout",),
68- check_exit_code=[0, 255])
69+ check_exit_code=[0, 21, 255])
70 self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
71 check_exit_code=[0, 21, 255])
72
73=== removed directory '.pc/ubuntu'
74=== removed directory '.pc/ubuntu/fix-ec2-volume-id-mappings.patch'
75=== removed directory '.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova'
76=== removed directory '.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db'
77=== removed directory '.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy'
78=== removed file '.pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy/api.py'
79--- .pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy/api.py 2012-10-12 12:35:01 +0000
80+++ .pc/ubuntu/fix-ec2-volume-id-mappings.patch/nova/db/sqlalchemy/api.py 1970-01-01 00:00:00 +0000
81@@ -1,5256 +0,0 @@
82-# vim: tabstop=4 shiftwidth=4 softtabstop=4
83-
84-# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
85-# Copyright 2010 United States Government as represented by the
86-# Administrator of the National Aeronautics and Space Administration.
87-# All Rights Reserved.
88-#
89-# Licensed under the Apache License, Version 2.0 (the "License"); you may
90-# not use this file except in compliance with the License. You may obtain
91-# a copy of the License at
92-#
93-# http://www.apache.org/licenses/LICENSE-2.0
94-#
95-# Unless required by applicable law or agreed to in writing, software
96-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
97-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
98-# License for the specific language governing permissions and limitations
99-# under the License.
100-
101-"""Implementation of SQLAlchemy backend."""
102-
103-import collections
104-import copy
105-import datetime
106-import functools
107-import warnings
108-
109-from nova import block_device
110-from nova.common.sqlalchemyutils import paginate_query
111-from nova.compute import vm_states
112-from nova import db
113-from nova.db.sqlalchemy import models
114-from nova.db.sqlalchemy.session import get_session
115-from nova import exception
116-from nova import flags
117-from nova.openstack.common import log as logging
118-from nova.openstack.common import timeutils
119-from nova import utils
120-from sqlalchemy import and_
121-from sqlalchemy.exc import IntegrityError
122-from sqlalchemy import or_
123-from sqlalchemy.orm import joinedload
124-from sqlalchemy.orm import joinedload_all
125-from sqlalchemy.sql.expression import asc
126-from sqlalchemy.sql.expression import desc
127-from sqlalchemy.sql.expression import literal_column
128-from sqlalchemy.sql import func
129-
130-FLAGS = flags.FLAGS
131-
132-LOG = logging.getLogger(__name__)
133-
134-
135-def is_admin_context(context):
136- """Indicates if the request context is an administrator."""
137- if not context:
138- warnings.warn(_('Use of empty request context is deprecated'),
139- DeprecationWarning)
140- raise Exception('die')
141- return context.is_admin
142-
143-
144-def is_user_context(context):
145- """Indicates if the request context is a normal user."""
146- if not context:
147- return False
148- if context.is_admin:
149- return False
150- if not context.user_id or not context.project_id:
151- return False
152- return True
153-
154-
155-def authorize_project_context(context, project_id):
156- """Ensures a request has permission to access the given project."""
157- if is_user_context(context):
158- if not context.project_id:
159- raise exception.NotAuthorized()
160- elif context.project_id != project_id:
161- raise exception.NotAuthorized()
162-
163-
164-def authorize_user_context(context, user_id):
165- """Ensures a request has permission to access the given user."""
166- if is_user_context(context):
167- if not context.user_id:
168- raise exception.NotAuthorized()
169- elif context.user_id != user_id:
170- raise exception.NotAuthorized()
171-
172-
173-def authorize_quota_class_context(context, class_name):
174- """Ensures a request has permission to access the given quota class."""
175- if is_user_context(context):
176- if not context.quota_class:
177- raise exception.NotAuthorized()
178- elif context.quota_class != class_name:
179- raise exception.NotAuthorized()
180-
181-
182-def require_admin_context(f):
183- """Decorator to require admin request context.
184-
185- The first argument to the wrapped function must be the context.
186-
187- """
188-
189- def wrapper(*args, **kwargs):
190- if not is_admin_context(args[0]):
191- raise exception.AdminRequired()
192- return f(*args, **kwargs)
193- return wrapper
194-
195-
196-def require_context(f):
197- """Decorator to require *any* user or admin context.
198-
199- This does no authorization for user or project access matching, see
200- :py:func:`authorize_project_context` and
201- :py:func:`authorize_user_context`.
202-
203- The first argument to the wrapped function must be the context.
204-
205- """
206-
207- def wrapper(*args, **kwargs):
208- if not is_admin_context(args[0]) and not is_user_context(args[0]):
209- raise exception.NotAuthorized()
210- return f(*args, **kwargs)
211- return wrapper
212-
213-
214-def require_instance_exists(f):
215- """Decorator to require the specified instance to exist.
216-
217- Requires the wrapped function to use context and instance_id as
218- their first two arguments.
219- """
220- @functools.wraps(f)
221- def wrapper(context, instance_id, *args, **kwargs):
222- db.instance_get(context, instance_id)
223- return f(context, instance_id, *args, **kwargs)
224-
225- return wrapper
226-
227-
228-def require_instance_exists_using_uuid(f):
229- """Decorator to require the specified instance to exist.
230-
231- Requires the wrapped function to use context and instance_uuid as
232- their first two arguments.
233- """
234- @functools.wraps(f)
235- def wrapper(context, instance_uuid, *args, **kwargs):
236- db.instance_get_by_uuid(context, instance_uuid)
237- return f(context, instance_uuid, *args, **kwargs)
238-
239- return wrapper
240-
241-
242-def require_volume_exists(f):
243- """Decorator to require the specified volume to exist.
244-
245- Requires the wrapped function to use context and volume_id as
246- their first two arguments.
247- """
248-
249- def wrapper(context, volume_id, *args, **kwargs):
250- db.volume_get(context, volume_id)
251- return f(context, volume_id, *args, **kwargs)
252- wrapper.__name__ = f.__name__
253- return wrapper
254-
255-
256-def require_aggregate_exists(f):
257- """Decorator to require the specified aggregate to exist.
258-
259- Requires the wrapped function to use context and aggregate_id as
260- their first two arguments.
261- """
262-
263- @functools.wraps(f)
264- def wrapper(context, aggregate_id, *args, **kwargs):
265- db.aggregate_get(context, aggregate_id)
266- return f(context, aggregate_id, *args, **kwargs)
267- return wrapper
268-
269-
270-def model_query(context, model, *args, **kwargs):
271- """Query helper that accounts for context's `read_deleted` field.
272-
273- :param context: context to query under
274- :param session: if present, the session to use
275- :param read_deleted: if present, overrides context's read_deleted field.
276- :param project_only: if present and context is user-type, then restrict
277- query to match the context's project_id. If set to 'allow_none',
278- restriction includes project_id = None.
279- """
280- session = kwargs.get('session') or get_session()
281- read_deleted = kwargs.get('read_deleted') or context.read_deleted
282- project_only = kwargs.get('project_only', False)
283-
284- query = session.query(model, *args)
285-
286- if read_deleted == 'no':
287- query = query.filter_by(deleted=False)
288- elif read_deleted == 'yes':
289- pass # omit the filter to include deleted and active
290- elif read_deleted == 'only':
291- query = query.filter_by(deleted=True)
292- else:
293- raise Exception(
294- _("Unrecognized read_deleted value '%s'") % read_deleted)
295-
296- if is_user_context(context) and project_only:
297- if project_only == 'allow_none':
298- query = query.filter(or_(model.project_id == context.project_id,
299- model.project_id == None))
300- else:
301- query = query.filter_by(project_id=context.project_id)
302-
303- return query
304-
305-
306-def exact_filter(query, model, filters, legal_keys):
307- """Applies exact match filtering to a query.
308-
309- Returns the updated query. Modifies filters argument to remove
310- filters consumed.
311-
312- :param query: query to apply filters to
313- :param model: model object the query applies to, for IN-style
314- filtering
315- :param filters: dictionary of filters; values that are lists,
316- tuples, sets, or frozensets cause an 'IN' test to
317- be performed, while exact matching ('==' operator)
318- is used for other values
319- :param legal_keys: list of keys to apply exact filtering to
320- """
321-
322- filter_dict = {}
323-
324- # Walk through all the keys
325- for key in legal_keys:
326- # Skip ones we're not filtering on
327- if key not in filters:
328- continue
329-
330- # OK, filtering on this key; what value do we search for?
331- value = filters.pop(key)
332-
333- if key == 'metadata':
334- column_attr = getattr(model, key)
335- if isinstance(value, list):
336- for item in value:
337- for k, v in item.iteritems():
338- query = query.filter(column_attr.any(key=k))
339- query = query.filter(column_attr.any(value=v))
340-
341- else:
342- for k, v in value.iteritems():
343- query = query.filter(column_attr.any(key=k))
344- query = query.filter(column_attr.any(value=v))
345- elif isinstance(value, (list, tuple, set, frozenset)):
346- # Looking for values in a list; apply to query directly
347- column_attr = getattr(model, key)
348- query = query.filter(column_attr.in_(value))
349- else:
350- # OK, simple exact match; save for later
351- filter_dict[key] = value
352-
353- # Apply simple exact matches
354- if filter_dict:
355- query = query.filter_by(**filter_dict)
356-
357- return query
358-
359-
360-###################
361-
362-
363-def constraint(**conditions):
364- return Constraint(conditions)
365-
366-
367-def equal_any(*values):
368- return EqualityCondition(values)
369-
370-
371-def not_equal(*values):
372- return InequalityCondition(values)
373-
374-
375-class Constraint(object):
376-
377- def __init__(self, conditions):
378- self.conditions = conditions
379-
380- def apply(self, model, query):
381- for key, condition in self.conditions.iteritems():
382- for clause in condition.clauses(getattr(model, key)):
383- query = query.filter(clause)
384- return query
385-
386-
387-class EqualityCondition(object):
388-
389- def __init__(self, values):
390- self.values = values
391-
392- def clauses(self, field):
393- return or_([field == value for value in self.values])
394-
395-
396-class InequalityCondition(object):
397-
398- def __init__(self, values):
399- self.values = values
400-
401- def clauses(self, field):
402- return [field != value for value in self.values]
403-
404-
405-###################
406-
407-
408-@require_admin_context
409-def service_destroy(context, service_id):
410- session = get_session()
411- with session.begin():
412- service_ref = service_get(context, service_id, session=session)
413- service_ref.delete(session=session)
414-
415- if service_ref.topic == 'compute' and service_ref.compute_node:
416- for c in service_ref.compute_node:
417- c.delete(session=session)
418-
419-
420-@require_admin_context
421-def service_get(context, service_id, session=None):
422- result = model_query(context, models.Service, session=session).\
423- options(joinedload('compute_node')).\
424- filter_by(id=service_id).\
425- first()
426- if not result:
427- raise exception.ServiceNotFound(service_id=service_id)
428-
429- return result
430-
431-
432-@require_admin_context
433-def service_get_all(context, disabled=None):
434- query = model_query(context, models.Service)
435-
436- if disabled is not None:
437- query = query.filter_by(disabled=disabled)
438-
439- return query.all()
440-
441-
442-@require_admin_context
443-def service_get_all_by_topic(context, topic):
444- return model_query(context, models.Service, read_deleted="no").\
445- filter_by(disabled=False).\
446- filter_by(topic=topic).\
447- all()
448-
449-
450-@require_admin_context
451-def service_get_by_host_and_topic(context, host, topic):
452- return model_query(context, models.Service, read_deleted="no").\
453- filter_by(disabled=False).\
454- filter_by(host=host).\
455- filter_by(topic=topic).\
456- first()
457-
458-
459-@require_admin_context
460-def service_get_all_by_host(context, host):
461- return model_query(context, models.Service, read_deleted="no").\
462- filter_by(host=host).\
463- all()
464-
465-
466-@require_admin_context
467-def service_get_all_compute_by_host(context, host):
468- result = model_query(context, models.Service, read_deleted="no").\
469- options(joinedload('compute_node')).\
470- filter_by(host=host).\
471- filter_by(topic="compute").\
472- all()
473-
474- if not result:
475- raise exception.ComputeHostNotFound(host=host)
476-
477- return result
478-
479-
480-@require_admin_context
481-def _service_get_all_topic_subquery(context, session, topic, subq, label):
482- sort_value = getattr(subq.c, label)
483- return model_query(context, models.Service,
484- func.coalesce(sort_value, 0),
485- session=session, read_deleted="no").\
486- filter_by(topic=topic).\
487- filter_by(disabled=False).\
488- outerjoin((subq, models.Service.host == subq.c.host)).\
489- order_by(sort_value).\
490- all()
491-
492-
493-@require_admin_context
494-def service_get_all_compute_sorted(context):
495- session = get_session()
496- with session.begin():
497- # NOTE(vish): The intended query is below
498- # SELECT services.*, COALESCE(inst_cores.instance_cores,
499- # 0)
500- # FROM services LEFT OUTER JOIN
501- # (SELECT host, SUM(instances.vcpus) AS instance_cores
502- # FROM instances GROUP BY host) AS inst_cores
503- # ON services.host = inst_cores.host
504- topic = 'compute'
505- label = 'instance_cores'
506- subq = model_query(context, models.Instance.host,
507- func.sum(models.Instance.vcpus).label(label),
508- session=session, read_deleted="no").\
509- group_by(models.Instance.host).\
510- subquery()
511- return _service_get_all_topic_subquery(context,
512- session,
513- topic,
514- subq,
515- label)
516-
517-
518-@require_admin_context
519-def service_get_all_volume_sorted(context):
520- session = get_session()
521- with session.begin():
522- topic = 'volume'
523- label = 'volume_gigabytes'
524- subq = model_query(context, models.Volume.host,
525- func.sum(models.Volume.size).label(label),
526- session=session, read_deleted="no").\
527- group_by(models.Volume.host).\
528- subquery()
529- return _service_get_all_topic_subquery(context,
530- session,
531- topic,
532- subq,
533- label)
534-
535-
536-@require_admin_context
537-def service_get_by_args(context, host, binary):
538- result = model_query(context, models.Service).\
539- filter_by(host=host).\
540- filter_by(binary=binary).\
541- first()
542-
543- if not result:
544- raise exception.HostBinaryNotFound(host=host, binary=binary)
545-
546- return result
547-
548-
549-@require_admin_context
550-def service_create(context, values):
551- service_ref = models.Service()
552- service_ref.update(values)
553- if not FLAGS.enable_new_services:
554- service_ref.disabled = True
555- service_ref.save()
556- return service_ref
557-
558-
559-@require_admin_context
560-def service_update(context, service_id, values):
561- session = get_session()
562- with session.begin():
563- service_ref = service_get(context, service_id, session=session)
564- service_ref.update(values)
565- service_ref.save(session=session)
566-
567-
568-###################
569-
570-def compute_node_get(context, compute_id, session=None):
571- result = model_query(context, models.ComputeNode, session=session).\
572- filter_by(id=compute_id).\
573- options(joinedload('service')).\
574- options(joinedload('stats')).\
575- first()
576-
577- if not result:
578- raise exception.ComputeHostNotFound(host=compute_id)
579-
580- return result
581-
582-
583-@require_admin_context
584-def compute_node_get_all(context, session=None):
585- return model_query(context, models.ComputeNode, session=session).\
586- options(joinedload('service')).\
587- options(joinedload('stats')).\
588- all()
589-
590-
591-@require_admin_context
592-def compute_node_search_by_hypervisor(context, hypervisor_match):
593- field = models.ComputeNode.hypervisor_hostname
594- return model_query(context, models.ComputeNode).\
595- options(joinedload('service')).\
596- filter(field.like('%%%s%%' % hypervisor_match)).\
597- all()
598-
599-
600-def _prep_stats_dict(values):
601- """Make list of ComputeNodeStats"""
602- stats = []
603- d = values.get('stats', {})
604- for k, v in d.iteritems():
605- stat = models.ComputeNodeStat()
606- stat['key'] = k
607- stat['value'] = v
608- stats.append(stat)
609- values['stats'] = stats
610-
611-
612-@require_admin_context
613-def compute_node_create(context, values, session=None):
614- """Creates a new ComputeNode and populates the capacity fields
615- with the most recent data."""
616- _prep_stats_dict(values)
617-
618- if not session:
619- session = get_session()
620-
621- with session.begin(subtransactions=True):
622- compute_node_ref = models.ComputeNode()
623- session.add(compute_node_ref)
624- compute_node_ref.update(values)
625- return compute_node_ref
626-
627-
628-def _update_stats(context, new_stats, compute_id, session, prune_stats=False):
629-
630- existing = model_query(context, models.ComputeNodeStat, session=session,
631- read_deleted="no").filter_by(compute_node_id=compute_id).all()
632- statmap = {}
633- for stat in existing:
634- key = stat['key']
635- statmap[key] = stat
636-
637- stats = []
638- for k, v in new_stats.iteritems():
639- old_stat = statmap.pop(k, None)
640- if old_stat:
641- # update existing value:
642- old_stat.update({'value': v})
643- stats.append(old_stat)
644- else:
645- # add new stat:
646- stat = models.ComputeNodeStat()
647- stat['compute_node_id'] = compute_id
648- stat['key'] = k
649- stat['value'] = v
650- stats.append(stat)
651-
652- if prune_stats:
653- # prune un-touched old stats:
654- for stat in statmap.values():
655- session.add(stat)
656- stat.update({'deleted': True})
657-
658- # add new and updated stats
659- for stat in stats:
660- session.add(stat)
661-
662-
663-@require_admin_context
664-def compute_node_update(context, compute_id, values, prune_stats=False):
665- """Updates the ComputeNode record with the most recent data"""
666- stats = values.pop('stats', {})
667-
668- session = get_session()
669- with session.begin(subtransactions=True):
670- _update_stats(context, stats, compute_id, session, prune_stats)
671- compute_ref = compute_node_get(context, compute_id, session=session)
672- compute_ref.update(values)
673- return compute_ref
674-
675-
676-def compute_node_get_by_host(context, host):
677- """Get all capacity entries for the given host."""
678- session = get_session()
679- with session.begin():
680- node = session.query(models.ComputeNode).\
681- join('service').\
682- filter(models.Service.host == host).\
683- filter_by(deleted=False)
684- return node.first()
685-
686-
687-def compute_node_statistics(context):
688- """Compute statistics over all compute nodes."""
689- result = model_query(context,
690- func.count(models.ComputeNode.id),
691- func.sum(models.ComputeNode.vcpus),
692- func.sum(models.ComputeNode.memory_mb),
693- func.sum(models.ComputeNode.local_gb),
694- func.sum(models.ComputeNode.vcpus_used),
695- func.sum(models.ComputeNode.memory_mb_used),
696- func.sum(models.ComputeNode.local_gb_used),
697- func.sum(models.ComputeNode.free_ram_mb),
698- func.sum(models.ComputeNode.free_disk_gb),
699- func.sum(models.ComputeNode.current_workload),
700- func.sum(models.ComputeNode.running_vms),
701- func.sum(models.ComputeNode.disk_available_least),
702- read_deleted="no").first()
703-
704- # Build a dict of the info--making no assumptions about result
705- fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
706- 'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
707- 'current_workload', 'running_vms', 'disk_available_least')
708- return dict((field, int(result[idx] or 0))
709- for idx, field in enumerate(fields))
710-
711-
712-###################
713-
714-
715-@require_admin_context
716-def certificate_get(context, certificate_id, session=None):
717- result = model_query(context, models.Certificate, session=session).\
718- filter_by(id=certificate_id).\
719- first()
720-
721- if not result:
722- raise exception.CertificateNotFound(certificate_id=certificate_id)
723-
724- return result
725-
726-
727-@require_admin_context
728-def certificate_create(context, values):
729- certificate_ref = models.Certificate()
730- for (key, value) in values.iteritems():
731- certificate_ref[key] = value
732- certificate_ref.save()
733- return certificate_ref
734-
735-
736-@require_admin_context
737-def certificate_get_all_by_project(context, project_id):
738- return model_query(context, models.Certificate, read_deleted="no").\
739- filter_by(project_id=project_id).\
740- all()
741-
742-
743-@require_admin_context
744-def certificate_get_all_by_user(context, user_id):
745- return model_query(context, models.Certificate, read_deleted="no").\
746- filter_by(user_id=user_id).\
747- all()
748-
749-
750-@require_admin_context
751-def certificate_get_all_by_user_and_project(context, user_id, project_id):
752- return model_query(context, models.Certificate, read_deleted="no").\
753- filter_by(user_id=user_id).\
754- filter_by(project_id=project_id).\
755- all()
756-
757-
758-###################
759-
760-
761-@require_context
762-def floating_ip_get(context, id):
763- result = model_query(context, models.FloatingIp, project_only=True).\
764- filter_by(id=id).\
765- first()
766-
767- if not result:
768- raise exception.FloatingIpNotFound(id=id)
769-
770- return result
771-
772-
773-@require_context
774-def floating_ip_get_pools(context):
775- pools = []
776- for result in model_query(context, models.FloatingIp.pool).distinct():
777- pools.append({'name': result[0]})
778- return pools
779-
780-
781-@require_context
782-def floating_ip_allocate_address(context, project_id, pool):
783- authorize_project_context(context, project_id)
784- session = get_session()
785- with session.begin():
786- floating_ip_ref = model_query(context, models.FloatingIp,
787- session=session, read_deleted="no").\
788- filter_by(fixed_ip_id=None).\
789- filter_by(project_id=None).\
790- filter_by(pool=pool).\
791- with_lockmode('update').\
792- first()
793- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
794- # then this has concurrency issues
795- if not floating_ip_ref:
796- raise exception.NoMoreFloatingIps()
797- floating_ip_ref['project_id'] = project_id
798- session.add(floating_ip_ref)
799- return floating_ip_ref['address']
800-
801-
802-@require_context
803-def floating_ip_bulk_create(context, ips):
804- existing_ips = {}
805- for floating in _floating_ip_get_all(context).all():
806- existing_ips[floating['address']] = floating
807-
808- session = get_session()
809- with session.begin():
810- for ip in ips:
811- addr = ip['address']
812- if (addr in existing_ips and
813- ip.get('id') != existing_ips[addr]['id']):
814- raise exception.FloatingIpExists(**dict(existing_ips[addr]))
815-
816- model = models.FloatingIp()
817- model.update(ip)
818- session.add(model)
819-
820-
821-def _ip_range_splitter(ips, block_size=256):
822- """Yields blocks of IPs no more than block_size elements long."""
823- out = []
824- count = 0
825- for ip in ips:
826- out.append(ip['address'])
827- count += 1
828-
829- if count > block_size - 1:
830- yield out
831- out = []
832- count = 0
833-
834- if out:
835- yield out
836-
837-
838-@require_context
839-def floating_ip_bulk_destroy(context, ips):
840- session = get_session()
841- with session.begin():
842- for ip_block in _ip_range_splitter(ips):
843- model_query(context, models.FloatingIp).\
844- filter(models.FloatingIp.address.in_(ip_block)).\
845- update({'deleted': True,
846- 'deleted_at': timeutils.utcnow()},
847- synchronize_session='fetch')
848-
849-
850-@require_context
851-def floating_ip_create(context, values, session=None):
852- if not session:
853- session = get_session()
854-
855- floating_ip_ref = models.FloatingIp()
856- floating_ip_ref.update(values)
857-
858- # check uniqueness for not deleted addresses
859- if not floating_ip_ref.deleted:
860- try:
861- floating_ip = floating_ip_get_by_address(context,
862- floating_ip_ref.address,
863- session)
864- except exception.FloatingIpNotFoundForAddress:
865- pass
866- else:
867- if floating_ip.id != floating_ip_ref.id:
868- raise exception.FloatingIpExists(**dict(floating_ip_ref))
869-
870- floating_ip_ref.save(session=session)
871- return floating_ip_ref['address']
872-
873-
874-@require_context
875-def floating_ip_count_by_project(context, project_id, session=None):
876- authorize_project_context(context, project_id)
877- # TODO(tr3buchet): why leave auto_assigned floating IPs out?
878- return model_query(context, models.FloatingIp, read_deleted="no",
879- session=session).\
880- filter_by(project_id=project_id).\
881- filter_by(auto_assigned=False).\
882- count()
883-
884-
885-@require_context
886-def floating_ip_fixed_ip_associate(context, floating_address,
887- fixed_address, host):
888- session = get_session()
889- with session.begin():
890- floating_ip_ref = floating_ip_get_by_address(context,
891- floating_address,
892- session=session)
893- fixed_ip_ref = fixed_ip_get_by_address(context,
894- fixed_address,
895- session=session)
896- floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"]
897- floating_ip_ref.host = host
898- floating_ip_ref.save(session=session)
899-
900-
901-@require_context
902-def floating_ip_deallocate(context, address):
903- session = get_session()
904- with session.begin():
905- floating_ip_ref = floating_ip_get_by_address(context,
906- address,
907- session=session)
908- floating_ip_ref['project_id'] = None
909- floating_ip_ref['host'] = None
910- floating_ip_ref['auto_assigned'] = False
911- floating_ip_ref.save(session=session)
912-
913-
914-@require_context
915-def floating_ip_destroy(context, address):
916- session = get_session()
917- with session.begin():
918- floating_ip_ref = floating_ip_get_by_address(context,
919- address,
920- session=session)
921- floating_ip_ref.delete(session=session)
922-
923-
924-@require_context
925-def floating_ip_disassociate(context, address):
926- session = get_session()
927- with session.begin():
928- floating_ip_ref = floating_ip_get_by_address(context,
929- address,
930- session=session)
931- fixed_ip_ref = fixed_ip_get(context,
932- floating_ip_ref['fixed_ip_id'])
933- if fixed_ip_ref:
934- fixed_ip_address = fixed_ip_ref['address']
935- else:
936- fixed_ip_address = None
937- floating_ip_ref.fixed_ip_id = None
938- floating_ip_ref.host = None
939- floating_ip_ref.save(session=session)
940- return fixed_ip_address
941-
942-
943-@require_context
944-def floating_ip_set_auto_assigned(context, address):
945- session = get_session()
946- with session.begin():
947- floating_ip_ref = floating_ip_get_by_address(context,
948- address,
949- session=session)
950- floating_ip_ref.auto_assigned = True
951- floating_ip_ref.save(session=session)
952-
953-
954-def _floating_ip_get_all(context, session=None):
955- return model_query(context, models.FloatingIp, read_deleted="no",
956- session=session)
957-
958-
959-@require_admin_context
960-def floating_ip_get_all(context):
961- floating_ip_refs = _floating_ip_get_all(context).all()
962- if not floating_ip_refs:
963- raise exception.NoFloatingIpsDefined()
964- return floating_ip_refs
965-
966-
967-@require_admin_context
968-def floating_ip_get_all_by_host(context, host):
969- floating_ip_refs = _floating_ip_get_all(context).\
970- filter_by(host=host).\
971- all()
972- if not floating_ip_refs:
973- raise exception.FloatingIpNotFoundForHost(host=host)
974- return floating_ip_refs
975-
976-
977-@require_context
978-def floating_ip_get_all_by_project(context, project_id):
979- authorize_project_context(context, project_id)
980- # TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
981- return _floating_ip_get_all(context).\
982- filter_by(project_id=project_id).\
983- filter_by(auto_assigned=False).\
984- all()
985-
986-
987-@require_context
988-def floating_ip_get_by_address(context, address, session=None):
989- result = model_query(context, models.FloatingIp, session=session).\
990- filter_by(address=address).\
991- first()
992-
993- if not result:
994- raise exception.FloatingIpNotFoundForAddress(address=address)
995-
996- # If the floating IP has a project ID set, check to make sure
997- # the non-admin user has access.
998- if result.project_id and is_user_context(context):
999- authorize_project_context(context, result.project_id)
1000-
1001- return result
1002-
1003-
1004-@require_context
1005-def floating_ip_get_by_fixed_address(context, fixed_address, session=None):
1006- if not session:
1007- session = get_session()
1008-
1009- fixed_ip = fixed_ip_get_by_address(context, fixed_address, session)
1010- fixed_ip_id = fixed_ip['id']
1011-
1012- return model_query(context, models.FloatingIp, session=session).\
1013- filter_by(fixed_ip_id=fixed_ip_id).\
1014- all()
1015-
1016- # NOTE(tr3buchet) please don't invent an exception here, empty list is fine
1017-
1018-
1019-@require_context
1020-def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None):
1021- if not session:
1022- session = get_session()
1023-
1024- return model_query(context, models.FloatingIp, session=session).\
1025- filter_by(fixed_ip_id=fixed_ip_id).\
1026- all()
1027-
1028-
1029-@require_context
1030-def floating_ip_update(context, address, values):
1031- session = get_session()
1032- with session.begin():
1033- floating_ip_ref = floating_ip_get_by_address(context, address, session)
1034- for (key, value) in values.iteritems():
1035- floating_ip_ref[key] = value
1036- floating_ip_ref.save(session=session)
1037-
1038-
1039-@require_context
1040-def _dnsdomain_get(context, session, fqdomain):
1041- return model_query(context, models.DNSDomain,
1042- session=session, read_deleted="no").\
1043- filter_by(domain=fqdomain).\
1044- with_lockmode('update').\
1045- first()
1046-
1047-
1048-@require_context
1049-def dnsdomain_get(context, fqdomain):
1050- session = get_session()
1051- with session.begin():
1052- return _dnsdomain_get(context, session, fqdomain)
1053-
1054-
1055-@require_admin_context
1056-def _dnsdomain_get_or_create(context, session, fqdomain):
1057- domain_ref = _dnsdomain_get(context, session, fqdomain)
1058- if not domain_ref:
1059- dns_ref = models.DNSDomain()
1060- dns_ref.update({'domain': fqdomain,
1061- 'availability_zone': None,
1062- 'project_id': None})
1063- return dns_ref
1064-
1065- return domain_ref
1066-
1067-
1068-@require_admin_context
1069-def dnsdomain_register_for_zone(context, fqdomain, zone):
1070- session = get_session()
1071- with session.begin():
1072- domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
1073- domain_ref.scope = 'private'
1074- domain_ref.availability_zone = zone
1075- domain_ref.save(session=session)
1076-
1077-
1078-@require_admin_context
1079-def dnsdomain_register_for_project(context, fqdomain, project):
1080- session = get_session()
1081- with session.begin():
1082- domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
1083- domain_ref.scope = 'public'
1084- domain_ref.project_id = project
1085- domain_ref.save(session=session)
1086-
1087-
1088-@require_admin_context
1089-def dnsdomain_unregister(context, fqdomain):
1090- session = get_session()
1091- with session.begin():
1092- session.query(models.DNSDomain).\
1093- filter_by(domain=fqdomain).\
1094- delete()
1095-
1096-
1097-@require_context
1098-def dnsdomain_list(context):
1099- session = get_session()
1100- records = model_query(context, models.DNSDomain,
1101- session=session, read_deleted="no").\
1102- all()
1103- domains = []
1104- for record in records:
1105- domains.append(record.domain)
1106-
1107- return domains
1108-
1109-
1110-###################
1111-
1112-
1113-@require_admin_context
1114-def fixed_ip_associate(context, address, instance_uuid, network_id=None,
1115- reserved=False):
1116- """Keyword arguments:
1117- reserved -- should be a boolean value(True or False), exact value will be
1118- used to filter on the fixed ip address
1119- """
1120- if not utils.is_uuid_like(instance_uuid):
1121- raise exception.InvalidUUID(uuid=instance_uuid)
1122-
1123- session = get_session()
1124- with session.begin():
1125- network_or_none = or_(models.FixedIp.network_id == network_id,
1126- models.FixedIp.network_id == None)
1127- fixed_ip_ref = model_query(context, models.FixedIp, session=session,
1128- read_deleted="no").\
1129- filter(network_or_none).\
1130- filter_by(reserved=reserved).\
1131- filter_by(address=address).\
1132- with_lockmode('update').\
1133- first()
1134- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
1135- # then this has concurrency issues
1136- if fixed_ip_ref is None:
1137- raise exception.FixedIpNotFoundForNetwork(address=address,
1138- network_id=network_id)
1139- if fixed_ip_ref.instance_uuid:
1140- raise exception.FixedIpAlreadyInUse(address=address)
1141-
1142- if not fixed_ip_ref.network_id:
1143- fixed_ip_ref.network_id = network_id
1144- fixed_ip_ref.instance_uuid = instance_uuid
1145- session.add(fixed_ip_ref)
1146- return fixed_ip_ref['address']
1147-
1148-
1149-@require_admin_context
1150-def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
1151- host=None):
1152- if instance_uuid and not utils.is_uuid_like(instance_uuid):
1153- raise exception.InvalidUUID(uuid=instance_uuid)
1154-
1155- session = get_session()
1156- with session.begin():
1157- network_or_none = or_(models.FixedIp.network_id == network_id,
1158- models.FixedIp.network_id == None)
1159- fixed_ip_ref = model_query(context, models.FixedIp, session=session,
1160- read_deleted="no").\
1161- filter(network_or_none).\
1162- filter_by(reserved=False).\
1163- filter_by(instance_uuid=None).\
1164- filter_by(host=None).\
1165- with_lockmode('update').\
1166- first()
1167- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
1168- # then this has concurrency issues
1169- if not fixed_ip_ref:
1170- raise exception.NoMoreFixedIps()
1171-
1172- if fixed_ip_ref['network_id'] is None:
1173- fixed_ip_ref['network'] = network_id
1174-
1175- if instance_uuid:
1176- fixed_ip_ref['instance_uuid'] = instance_uuid
1177-
1178- if host:
1179- fixed_ip_ref['host'] = host
1180- session.add(fixed_ip_ref)
1181- return fixed_ip_ref['address']
1182-
1183-
1184-@require_context
1185-def fixed_ip_create(context, values):
1186- fixed_ip_ref = models.FixedIp()
1187- fixed_ip_ref.update(values)
1188- fixed_ip_ref.save()
1189- return fixed_ip_ref['address']
1190-
1191-
1192-@require_context
1193-def fixed_ip_bulk_create(context, ips):
1194- session = get_session()
1195- with session.begin():
1196- for ip in ips:
1197- model = models.FixedIp()
1198- model.update(ip)
1199- session.add(model)
1200-
1201-
1202-@require_context
1203-def fixed_ip_disassociate(context, address):
1204- session = get_session()
1205- with session.begin():
1206- fixed_ip_ref = fixed_ip_get_by_address(context,
1207- address,
1208- session=session)
1209- fixed_ip_ref['instance_uuid'] = None
1210- fixed_ip_ref.save(session=session)
1211-
1212-
1213-@require_admin_context
1214-def fixed_ip_disassociate_all_by_timeout(context, host, time):
1215- session = get_session()
1216- # NOTE(vish): only update fixed ips that "belong" to this
1217- # host; i.e. the network host or the instance
1218- # host matches. Two queries necessary because
1219- # join with update doesn't work.
1220- host_filter = or_(and_(models.Instance.host == host,
1221- models.Network.multi_host == True),
1222- models.Network.host == host)
1223- result = session.query(models.FixedIp.id).\
1224- filter(models.FixedIp.deleted == False).\
1225- filter(models.FixedIp.allocated == False).\
1226- filter(models.FixedIp.updated_at < time).\
1227- join((models.Network,
1228- models.Network.id == models.FixedIp.network_id)).\
1229- join((models.Instance,
1230- models.Instance.uuid == \
1231- models.FixedIp.instance_uuid)).\
1232- filter(host_filter).\
1233- all()
1234- fixed_ip_ids = [fip[0] for fip in result]
1235- if not fixed_ip_ids:
1236- return 0
1237- result = model_query(context, models.FixedIp, session=session).\
1238- filter(models.FixedIp.id.in_(fixed_ip_ids)).\
1239- update({'instance_uuid': None,
1240- 'leased': False,
1241- 'updated_at': timeutils.utcnow()},
1242- synchronize_session='fetch')
1243- return result
1244-
1245-
1246-@require_context
1247-def fixed_ip_get(context, id, session=None):
1248- result = model_query(context, models.FixedIp, session=session).\
1249- filter_by(id=id).\
1250- first()
1251- if not result:
1252- raise exception.FixedIpNotFound(id=id)
1253-
1254- # FIXME(sirp): shouldn't we just use project_only here to restrict the
1255- # results?
1256- if is_user_context(context) and result['instance_uuid'] is not None:
1257- instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
1258- result['instance_uuid'],
1259- session)
1260- authorize_project_context(context, instance.project_id)
1261-
1262- return result
1263-
1264-
1265-@require_admin_context
1266-def fixed_ip_get_all(context, session=None):
1267- result = model_query(context, models.FixedIp, session=session,
1268- read_deleted="yes").\
1269- all()
1270- if not result:
1271- raise exception.NoFixedIpsDefined()
1272-
1273- return result
1274-
1275-
1276-@require_context
1277-def fixed_ip_get_by_address(context, address, session=None):
1278- result = model_query(context, models.FixedIp, session=session).\
1279- filter_by(address=address).\
1280- first()
1281- if not result:
1282- raise exception.FixedIpNotFoundForAddress(address=address)
1283-
1284- # NOTE(sirp): shouldn't we just use project_only here to restrict the
1285- # results?
1286- if is_user_context(context) and result['instance_uuid'] is not None:
1287- instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
1288- result['instance_uuid'],
1289- session)
1290- authorize_project_context(context, instance.project_id)
1291-
1292- return result
1293-
1294-
1295-@require_context
1296-def fixed_ip_get_by_instance(context, instance_uuid):
1297- if not utils.is_uuid_like(instance_uuid):
1298- raise exception.InvalidUUID(uuid=instance_uuid)
1299-
1300- result = model_query(context, models.FixedIp, read_deleted="no").\
1301- filter_by(instance_uuid=instance_uuid).\
1302- all()
1303-
1304- if not result:
1305- raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
1306-
1307- return result
1308-
1309-
1310-@require_context
1311-def fixed_ip_get_by_network_host(context, network_id, host):
1312- result = model_query(context, models.FixedIp, read_deleted="no").\
1313- filter_by(network_id=network_id).\
1314- filter_by(host=host).\
1315- first()
1316-
1317- if not result:
1318- raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
1319- host=host)
1320- return result
1321-
1322-
1323-@require_context
1324-def fixed_ips_by_virtual_interface(context, vif_id):
1325- result = model_query(context, models.FixedIp, read_deleted="no").\
1326- filter_by(virtual_interface_id=vif_id).\
1327- all()
1328-
1329- return result
1330-
1331-
1332-@require_admin_context
1333-def fixed_ip_get_network(context, address):
1334- fixed_ip_ref = fixed_ip_get_by_address(context, address)
1335- return fixed_ip_ref.network
1336-
1337-
1338-@require_context
1339-def fixed_ip_update(context, address, values):
1340- session = get_session()
1341- with session.begin():
1342- fixed_ip_ref = fixed_ip_get_by_address(context,
1343- address,
1344- session=session)
1345- fixed_ip_ref.update(values)
1346- fixed_ip_ref.save(session=session)
1347-
1348-
1349-###################
1350-
1351-
1352-@require_context
1353-def virtual_interface_create(context, values):
1354- """Create a new virtual interface record in the database.
1355-
1356- :param values: = dict containing column values
1357- """
1358- try:
1359- vif_ref = models.VirtualInterface()
1360- vif_ref.update(values)
1361- vif_ref.save()
1362- except IntegrityError:
1363- raise exception.VirtualInterfaceCreateException()
1364-
1365- return vif_ref
1366-
1367-
1368-@require_context
1369-def _virtual_interface_query(context, session=None):
1370- return model_query(context, models.VirtualInterface, session=session,
1371- read_deleted="yes")
1372-
1373-
1374-@require_context
1375-def virtual_interface_get(context, vif_id, session=None):
1376- """Gets a virtual interface from the table.
1377-
1378- :param vif_id: = id of the virtual interface
1379- """
1380- vif_ref = _virtual_interface_query(context, session=session).\
1381- filter_by(id=vif_id).\
1382- first()
1383- return vif_ref
1384-
1385-
1386-@require_context
1387-def virtual_interface_get_by_address(context, address):
1388- """Gets a virtual interface from the table.
1389-
1390- :param address: = the address of the interface you're looking to get
1391- """
1392- vif_ref = _virtual_interface_query(context).\
1393- filter_by(address=address).\
1394- first()
1395- return vif_ref
1396-
1397-
1398-@require_context
1399-def virtual_interface_get_by_uuid(context, vif_uuid):
1400- """Gets a virtual interface from the table.
1401-
1402- :param vif_uuid: the uuid of the interface you're looking to get
1403- """
1404- vif_ref = _virtual_interface_query(context).\
1405- filter_by(uuid=vif_uuid).\
1406- first()
1407- return vif_ref
1408-
1409-
1410-@require_context
1411-@require_instance_exists_using_uuid
1412-def virtual_interface_get_by_instance(context, instance_uuid):
1413- """Gets all virtual interfaces for instance.
1414-
1415- :param instance_uuid: = uuid of the instance to retrieve vifs for
1416- """
1417- vif_refs = _virtual_interface_query(context).\
1418- filter_by(instance_uuid=instance_uuid).\
1419- all()
1420- return vif_refs
1421-
1422-
1423-@require_context
1424-def virtual_interface_get_by_instance_and_network(context, instance_uuid,
1425- network_id):
1426- """Gets virtual interface for instance that's associated with network."""
1427- vif_ref = _virtual_interface_query(context).\
1428- filter_by(instance_uuid=instance_uuid).\
1429- filter_by(network_id=network_id).\
1430- first()
1431- return vif_ref
1432-
1433-
1434-@require_context
1435-def virtual_interface_delete(context, vif_id):
1436- """Delete virtual interface record from the database.
1437-
1438- :param vif_id: = id of vif to delete
1439- """
1440- session = get_session()
1441- vif_ref = virtual_interface_get(context, vif_id, session)
1442- with session.begin():
1443- session.delete(vif_ref)
1444-
1445-
1446-@require_context
1447-def virtual_interface_delete_by_instance(context, instance_uuid):
1448- """Delete virtual interface records that are associated
1449- with the instance given by instance_id.
1450-
1451- :param instance_uuid: = uuid of instance
1452- """
1453- vif_refs = virtual_interface_get_by_instance(context, instance_uuid)
1454- for vif_ref in vif_refs:
1455- virtual_interface_delete(context, vif_ref['id'])
1456-
1457-
1458-@require_context
1459-def virtual_interface_get_all(context):
1460- """Get all vifs"""
1461- vif_refs = _virtual_interface_query(context).all()
1462- return vif_refs
1463-
1464-
1465-###################
1466-
1467-
1468-def _metadata_refs(metadata_dict, meta_class):
1469- metadata_refs = []
1470- if metadata_dict:
1471- for k, v in metadata_dict.iteritems():
1472- metadata_ref = meta_class()
1473- metadata_ref['key'] = k
1474- metadata_ref['value'] = v
1475- metadata_refs.append(metadata_ref)
1476- return metadata_refs
1477-
1478-
1479-@require_context
1480-def instance_create(context, values):
1481- """Create a new Instance record in the database.
1482-
1483- context - request context object
1484- values - dict containing column values.
1485- """
1486- values = values.copy()
1487- values['metadata'] = _metadata_refs(
1488- values.get('metadata'), models.InstanceMetadata)
1489-
1490- values['system_metadata'] = _metadata_refs(
1491- values.get('system_metadata'), models.InstanceSystemMetadata)
1492-
1493- instance_ref = models.Instance()
1494- if not values.get('uuid'):
1495- values['uuid'] = str(utils.gen_uuid())
1496- instance_ref['info_cache'] = models.InstanceInfoCache()
1497- info_cache = values.pop('info_cache', None)
1498- if info_cache is not None:
1499- instance_ref['info_cache'].update(info_cache)
1500- security_groups = values.pop('security_groups', [])
1501- instance_ref.update(values)
1502-
1503- def _get_sec_group_models(session, security_groups):
1504- models = []
1505- _existed, default_group = security_group_ensure_default(context,
1506- session=session)
1507- if 'default' in security_groups:
1508- models.append(default_group)
1509- # Generate a new list, so we don't modify the original
1510- security_groups = [x for x in security_groups if x != 'default']
1511- if security_groups:
1512- models.extend(_security_group_get_by_names(context,
1513- session, context.project_id, security_groups))
1514- return models
1515-
1516- session = get_session()
1517- with session.begin():
1518- instance_ref.security_groups = _get_sec_group_models(session,
1519- security_groups)
1520- instance_ref.save(session=session)
1521- # NOTE(comstud): This forces instance_type to be loaded so it
1522- # exists in the ref when we return. Fixes lazy loading issues.
1523- instance_ref.instance_type
1524-
1525- # create the instance uuid to ec2_id mapping entry for instance
1526- ec2_instance_create(context, instance_ref['uuid'])
1527-
1528- return instance_ref
1529-
1530-
1531-@require_admin_context
1532-def instance_data_get_for_project(context, project_id, session=None):
1533- result = model_query(context,
1534- func.count(models.Instance.id),
1535- func.sum(models.Instance.vcpus),
1536- func.sum(models.Instance.memory_mb),
1537- read_deleted="no",
1538- session=session).\
1539- filter_by(project_id=project_id).\
1540- first()
1541- # NOTE(vish): convert None to 0
1542- return (result[0] or 0, result[1] or 0, result[2] or 0)
1543-
1544-
1545-@require_context
1546-def instance_destroy(context, instance_uuid, constraint=None):
1547- session = get_session()
1548- with session.begin():
1549- if utils.is_uuid_like(instance_uuid):
1550- instance_ref = instance_get_by_uuid(context, instance_uuid,
1551- session=session)
1552- else:
1553- raise exception.InvalidUUID(instance_uuid)
1554-
1555- query = session.query(models.Instance).\
1556- filter_by(uuid=instance_ref['uuid'])
1557- if constraint is not None:
1558- query = constraint.apply(models.Instance, query)
1559- count = query.update({'deleted': True,
1560- 'deleted_at': timeutils.utcnow(),
1561- 'updated_at': literal_column('updated_at')})
1562- if count == 0:
1563- raise exception.ConstraintNotMet()
1564- session.query(models.SecurityGroupInstanceAssociation).\
1565- filter_by(instance_uuid=instance_ref['uuid']).\
1566- update({'deleted': True,
1567- 'deleted_at': timeutils.utcnow(),
1568- 'updated_at': literal_column('updated_at')})
1569-
1570- instance_info_cache_delete(context, instance_ref['uuid'],
1571- session=session)
1572- return instance_ref
1573-
1574-
1575-@require_context
1576-def instance_get_by_uuid(context, uuid, session=None):
1577- result = _build_instance_get(context, session=session).\
1578- filter_by(uuid=uuid).\
1579- first()
1580-
1581- if not result:
1582- raise exception.InstanceNotFound(instance_id=uuid)
1583-
1584- return result
1585-
1586-
1587-@require_context
1588-def instance_get(context, instance_id, session=None):
1589- result = _build_instance_get(context, session=session).\
1590- filter_by(id=instance_id).\
1591- first()
1592-
1593- if not result:
1594- raise exception.InstanceNotFound(instance_id=instance_id)
1595-
1596- return result
1597-
1598-
1599-@require_context
1600-def _build_instance_get(context, session=None):
1601- return model_query(context, models.Instance, session=session,
1602- project_only=True).\
1603- options(joinedload_all('security_groups.rules')).\
1604- options(joinedload('info_cache')).\
1605- options(joinedload('metadata')).\
1606- options(joinedload('instance_type'))
1607-
1608-
1609-@require_admin_context
1610-def instance_get_all(context, columns_to_join=None):
1611- if columns_to_join is None:
1612- columns_to_join = ['info_cache', 'security_groups',
1613- 'metadata', 'instance_type']
1614- query = model_query(context, models.Instance)
1615- for column in columns_to_join:
1616- query = query.options(joinedload(column))
1617- return query.all()
1618-
1619-
1620-@require_context
1621-def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
1622- limit=None, marker=None):
1623- """Return instances that match all filters. Deleted instances
1624- will be returned by default, unless there's a filter that says
1625- otherwise"""
1626-
1627- sort_fn = {'desc': desc, 'asc': asc}
1628-
1629- session = get_session()
1630- query_prefix = session.query(models.Instance).\
1631- options(joinedload('info_cache')).\
1632- options(joinedload('security_groups')).\
1633- options(joinedload('metadata')).\
1634- options(joinedload('instance_type')).\
1635- order_by(sort_fn[sort_dir](getattr(models.Instance, sort_key)))
1636-
1637- # Make a copy of the filters dictionary to use going forward, as we'll
1638- # be modifying it and we shouldn't affect the caller's use of it.
1639- filters = filters.copy()
1640-
1641- if 'changes-since' in filters:
1642- changes_since = timeutils.normalize_time(filters['changes-since'])
1643- query_prefix = query_prefix.\
1644- filter(models.Instance.updated_at > changes_since)
1645-
1646- if 'deleted' in filters:
1647- # Instances can be soft or hard deleted and the query needs to
1648- # include or exclude both
1649- if filters.pop('deleted'):
1650- deleted = or_(models.Instance.deleted == True,
1651- models.Instance.vm_state == vm_states.SOFT_DELETED)
1652- query_prefix = query_prefix.filter(deleted)
1653- else:
1654- query_prefix = query_prefix.\
1655- filter_by(deleted=False).\
1656- filter(models.Instance.vm_state != vm_states.SOFT_DELETED)
1657-
1658- if not context.is_admin:
1659- # If we're not admin context, add appropriate filter..
1660- if context.project_id:
1661- filters['project_id'] = context.project_id
1662- else:
1663- filters['user_id'] = context.user_id
1664-
1665- # Filters for exact matches that we can do along with the SQL query...
1666- # For other filters that don't match this, we will do regexp matching
1667- exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
1668- 'vm_state', 'instance_type_id', 'uuid',
1669- 'metadata']
1670-
1671- # Filter the query
1672- query_prefix = exact_filter(query_prefix, models.Instance,
1673- filters, exact_match_filter_names)
1674-
1675- query_prefix = regex_filter(query_prefix, models.Instance, filters)
1676-
1677- # paginate query
1678- if marker is not None:
1679- try:
1680- marker = instance_get_by_uuid(context, marker, session=session)
1681- except exception.InstanceNotFound as e:
1682- raise exception.MarkerNotFound(marker)
1683- query_prefix = paginate_query(query_prefix, models.Instance, limit,
1684- [sort_key, 'created_at', 'id'],
1685- marker=marker,
1686- sort_dir=sort_dir)
1687-
1688- instances = query_prefix.all()
1689- return instances
1690-
1691-
1692-def regex_filter(query, model, filters):
1693- """Applies regular expression filtering to a query.
1694-
1695- Returns the updated query.
1696-
1697- :param query: query to apply filters to
1698- :param model: model object the query applies to
1699- :param filters: dictionary of filters with regex values
1700- """
1701-
1702- regexp_op_map = {
1703- 'postgresql': '~',
1704- 'mysql': 'REGEXP',
1705- 'oracle': 'REGEXP_LIKE',
1706- 'sqlite': 'REGEXP'
1707- }
1708- db_string = FLAGS.sql_connection.split(':')[0].split('+')[0]
1709- db_regexp_op = regexp_op_map.get(db_string, 'LIKE')
1710- for filter_name in filters.iterkeys():
1711- try:
1712- column_attr = getattr(model, filter_name)
1713- except AttributeError:
1714- continue
1715- if 'property' == type(column_attr).__name__:
1716- continue
1717- query = query.filter(column_attr.op(db_regexp_op)(
1718- str(filters[filter_name])))
1719- return query
1720-
1721-
1722-@require_context
1723-def instance_get_active_by_window(context, begin, end=None,
1724- project_id=None, host=None):
1725- """Return instances that were active during window."""
1726- session = get_session()
1727- query = session.query(models.Instance)
1728-
1729- query = query.filter(or_(models.Instance.terminated_at == None,
1730- models.Instance.terminated_at > begin))
1731- if end:
1732- query = query.filter(models.Instance.launched_at < end)
1733- if project_id:
1734- query = query.filter_by(project_id=project_id)
1735- if host:
1736- query = query.filter_by(host=host)
1737-
1738- return query.all()
1739-
1740-
1741-@require_admin_context
1742-def instance_get_active_by_window_joined(context, begin, end=None,
1743- project_id=None, host=None):
1744- """Return instances and joins that were active during window."""
1745- session = get_session()
1746- query = session.query(models.Instance)
1747-
1748- query = query.options(joinedload('info_cache')).\
1749- options(joinedload('security_groups')).\
1750- options(joinedload('metadata')).\
1751- options(joinedload('instance_type')).\
1752- filter(or_(models.Instance.terminated_at == None,
1753- models.Instance.terminated_at > begin))
1754- if end:
1755- query = query.filter(models.Instance.launched_at < end)
1756- if project_id:
1757- query = query.filter_by(project_id=project_id)
1758- if host:
1759- query = query.filter_by(host=host)
1760-
1761- return query.all()
1762-
1763-
1764-@require_admin_context
1765-def _instance_get_all_query(context, project_only=False):
1766- return model_query(context, models.Instance, project_only=project_only).\
1767- options(joinedload('info_cache')).\
1768- options(joinedload('security_groups')).\
1769- options(joinedload('metadata')).\
1770- options(joinedload('instance_type'))
1771-
1772-
1773-@require_admin_context
1774-def instance_get_all_by_host(context, host):
1775- return _instance_get_all_query(context).filter_by(host=host).all()
1776-
1777-
1778-@require_admin_context
1779-def instance_get_all_by_host_and_not_type(context, host, type_id=None):
1780- return _instance_get_all_query(context).filter_by(host=host).\
1781- filter(models.Instance.instance_type_id != type_id).all()
1782-
1783-
1784-@require_context
1785-def instance_get_all_by_project(context, project_id):
1786- authorize_project_context(context, project_id)
1787- return _instance_get_all_query(context).\
1788- filter_by(project_id=project_id).\
1789- all()
1790-
1791-
1792-@require_context
1793-def instance_get_all_by_reservation(context, reservation_id):
1794- return _instance_get_all_query(context, project_only=True).\
1795- filter_by(reservation_id=reservation_id).\
1796- all()
1797-
1798-
1799-# NOTE(jkoelker) This is only being left here for compat with floating
1800-# ips. Currently the network_api doesn't return floaters
1801-# in network_info. Once it starts return the model. This
1802-# function and its call in compute/manager.py on 1829 can
1803-# go away
1804-@require_context
1805-def instance_get_floating_address(context, instance_id):
1806- instance = instance_get(context, instance_id)
1807- fixed_ips = fixed_ip_get_by_instance(context, instance['uuid'])
1808-
1809- if not fixed_ips:
1810- return None
1811-
1812- # NOTE(tr3buchet): this only gets the first fixed_ip
1813- # won't find floating ips associated with other fixed_ips
1814- floating_ips = floating_ip_get_by_fixed_address(context,
1815- fixed_ips[0]['address'])
1816- if not floating_ips:
1817- return None
1818- # NOTE(vish): this just returns the first floating ip
1819- return floating_ips[0]['address']
1820-
1821-
1822-@require_admin_context
1823-def instance_get_all_hung_in_rebooting(context, reboot_window, session=None):
1824- reboot_window = (timeutils.utcnow() -
1825- datetime.timedelta(seconds=reboot_window))
1826-
1827- if not session:
1828- session = get_session()
1829-
1830- results = session.query(models.Instance).\
1831- filter(models.Instance.updated_at <= reboot_window).\
1832- filter_by(task_state="rebooting").all()
1833-
1834- return results
1835-
1836-
1837-@require_context
1838-def instance_test_and_set(context, instance_uuid, attr, ok_states,
1839- new_state, session=None):
1840- """Atomically check if an instance is in a valid state, and if it is, set
1841- the instance into a new state.
1842- """
1843- if not session:
1844- session = get_session()
1845-
1846- with session.begin():
1847- query = model_query(context, models.Instance, session=session,
1848- project_only=True)
1849-
1850- if utils.is_uuid_like(instance_uuid):
1851- query = query.filter_by(uuid=instance_uuid)
1852- else:
1853- raise exception.InvalidUUID(instance_uuid)
1854-
1855- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
1856- # then this has concurrency issues
1857- instance = query.with_lockmode('update').first()
1858-
1859- state = instance[attr]
1860- if state not in ok_states:
1861- raise exception.InstanceInvalidState(
1862- attr=attr,
1863- instance_uuid=instance['uuid'],
1864- state=state,
1865- method='instance_test_and_set')
1866-
1867- instance[attr] = new_state
1868- instance.save(session=session)
1869-
1870-
1871-@require_context
1872-def instance_update(context, instance_uuid, values):
1873- instance_ref = _instance_update(context, instance_uuid, values)[1]
1874- return instance_ref
1875-
1876-
1877-@require_context
1878-def instance_update_and_get_original(context, instance_uuid, values):
1879- """Set the given properties on an instance and update it. Return
1880- a shallow copy of the original instance reference, as well as the
1881- updated one.
1882-
1883- :param context: = request context object
1884- :param instance_uuid: = instance uuid
1885- :param values: = dict containing column values
1886-
1887- If "expected_task_state" exists in values, the update can only happen
1888- when the task state before update matches expected_task_state. Otherwise
1889- a UnexpectedTaskStateError is thrown.
1890-
1891- :returns: a tuple of the form (old_instance_ref, new_instance_ref)
1892-
1893- Raises NotFound if instance does not exist.
1894- """
1895- return _instance_update(context, instance_uuid, values,
1896- copy_old_instance=True)
1897-
1898-
1899-def _instance_update(context, instance_uuid, values, copy_old_instance=False):
1900- session = get_session()
1901-
1902- if not utils.is_uuid_like(instance_uuid):
1903- raise exception.InvalidUUID(instance_uuid)
1904-
1905- with session.begin():
1906- instance_ref = instance_get_by_uuid(context, instance_uuid,
1907- session=session)
1908- if "expected_task_state" in values:
1909- # it is not a db column so always pop out
1910- expected = values.pop("expected_task_state")
1911- if not isinstance(expected, (tuple, list, set)):
1912- expected = (expected,)
1913- actual_state = instance_ref["task_state"]
1914- if actual_state not in expected:
1915- raise exception.UnexpectedTaskStateError(actual=actual_state,
1916- expected=expected)
1917-
1918- if copy_old_instance:
1919- old_instance_ref = copy.copy(instance_ref)
1920- else:
1921- old_instance_ref = None
1922-
1923- metadata = values.get('metadata')
1924- if metadata is not None:
1925- instance_metadata_update(context, instance_ref['uuid'],
1926- values.pop('metadata'), True,
1927- session=session)
1928-
1929- system_metadata = values.get('system_metadata')
1930- if system_metadata is not None:
1931- instance_system_metadata_update(
1932- context, instance_ref['uuid'], values.pop('system_metadata'),
1933- delete=True, session=session)
1934-
1935- instance_ref.update(values)
1936- instance_ref.save(session=session)
1937-
1938- return (old_instance_ref, instance_ref)
1939-
1940-
1941-def instance_add_security_group(context, instance_uuid, security_group_id):
1942- """Associate the given security group with the given instance"""
1943- session = get_session()
1944- with session.begin():
1945- instance_ref = instance_get_by_uuid(context, instance_uuid,
1946- session=session)
1947- security_group_ref = security_group_get(context,
1948- security_group_id,
1949- session=session)
1950- instance_ref.security_groups += [security_group_ref]
1951- instance_ref.save(session=session)
1952-
1953-
1954-@require_context
1955-def instance_remove_security_group(context, instance_uuid, security_group_id):
1956- """Disassociate the given security group from the given instance"""
1957- session = get_session()
1958- instance_ref = instance_get_by_uuid(context, instance_uuid,
1959- session=session)
1960- session.query(models.SecurityGroupInstanceAssociation).\
1961- filter_by(instance_uuid=instance_ref['uuid']).\
1962- filter_by(security_group_id=security_group_id).\
1963- update({'deleted': True,
1964- 'deleted_at': timeutils.utcnow(),
1965- 'updated_at': literal_column('updated_at')})
1966-
1967-
1968-###################
1969-
1970-
1971-@require_context
1972-def instance_info_cache_create(context, values):
1973- """Create a new instance cache record in the table.
1974-
1975- :param context: = request context object
1976- :param values: = dict containing column values
1977- """
1978- info_cache = models.InstanceInfoCache()
1979- info_cache.update(values)
1980-
1981- session = get_session()
1982- with session.begin():
1983- info_cache.save(session=session)
1984- return info_cache
1985-
1986-
1987-@require_context
1988-def instance_info_cache_get(context, instance_uuid, session=None):
1989- """Gets an instance info cache from the table.
1990-
1991- :param instance_uuid: = uuid of the info cache's instance
1992- :param session: = optional session object
1993- """
1994- session = session or get_session()
1995-
1996- info_cache = session.query(models.InstanceInfoCache).\
1997- filter_by(instance_uuid=instance_uuid).\
1998- first()
1999- return info_cache
2000-
2001-
2002-@require_context
2003-def instance_info_cache_update(context, instance_uuid, values,
2004- session=None):
2005- """Update an instance info cache record in the table.
2006-
2007- :param instance_uuid: = uuid of info cache's instance
2008- :param values: = dict containing column values to update
2009- :param session: = optional session object
2010- """
2011- session = session or get_session()
2012- info_cache = instance_info_cache_get(context, instance_uuid,
2013- session=session)
2014- if info_cache:
2015- # NOTE(tr3buchet): let's leave it alone if it's already deleted
2016- if info_cache['deleted']:
2017- return info_cache
2018-
2019- info_cache.update(values)
2020- info_cache.save(session=session)
2021- else:
2022- # NOTE(tr3buchet): just in case someone blows away an instance's
2023- # cache entry
2024- values['instance_uuid'] = instance_uuid
2025- info_cache = instance_info_cache_create(context, values)
2026-
2027- return info_cache
2028-
2029-
2030-@require_context
2031-def instance_info_cache_delete(context, instance_uuid, session=None):
2032- """Deletes an existing instance_info_cache record
2033-
2034- :param instance_uuid: = uuid of the instance tied to the cache record
2035- :param session: = optional session object
2036- """
2037- values = {'deleted': True,
2038- 'deleted_at': timeutils.utcnow()}
2039- instance_info_cache_update(context, instance_uuid, values, session)
2040-
2041-
2042-###################
2043-
2044-
2045-@require_context
2046-def key_pair_create(context, values):
2047- key_pair_ref = models.KeyPair()
2048- key_pair_ref.update(values)
2049- key_pair_ref.save()
2050- return key_pair_ref
2051-
2052-
2053-@require_context
2054-def key_pair_destroy(context, user_id, name):
2055- authorize_user_context(context, user_id)
2056- session = get_session()
2057- with session.begin():
2058- key_pair_ref = key_pair_get(context, user_id, name, session=session)
2059- key_pair_ref.delete(session=session)
2060-
2061-
2062-@require_context
2063-def key_pair_destroy_all_by_user(context, user_id):
2064- authorize_user_context(context, user_id)
2065- session = get_session()
2066- with session.begin():
2067- session.query(models.KeyPair).\
2068- filter_by(user_id=user_id).\
2069- update({'deleted': True,
2070- 'deleted_at': timeutils.utcnow(),
2071- 'updated_at': literal_column('updated_at')})
2072-
2073-
2074-@require_context
2075-def key_pair_get(context, user_id, name, session=None):
2076- authorize_user_context(context, user_id)
2077- result = model_query(context, models.KeyPair, session=session).\
2078- filter_by(user_id=user_id).\
2079- filter_by(name=name).\
2080- first()
2081-
2082- if not result:
2083- raise exception.KeypairNotFound(user_id=user_id, name=name)
2084-
2085- return result
2086-
2087-
2088-@require_context
2089-def key_pair_get_all_by_user(context, user_id):
2090- authorize_user_context(context, user_id)
2091- return model_query(context, models.KeyPair, read_deleted="no").\
2092- filter_by(user_id=user_id).\
2093- all()
2094-
2095-
2096-def key_pair_count_by_user(context, user_id):
2097- authorize_user_context(context, user_id)
2098- return model_query(context, models.KeyPair, read_deleted="no").\
2099- filter_by(user_id=user_id).\
2100- count()
2101-
2102-
2103-###################
2104-
2105-
2106-@require_admin_context
2107-def network_associate(context, project_id, network_id=None, force=False):
2108- """Associate a project with a network.
2109-
2110- called by project_get_networks under certain conditions
2111- and network manager add_network_to_project()
2112-
2113- only associate if the project doesn't already have a network
2114- or if force is True
2115-
2116- force solves race condition where a fresh project has multiple instance
2117- builds simultaneously picked up by multiple network hosts which attempt
2118- to associate the project with multiple networks
2119- force should only be used as a direct consequence of user request
2120- all automated requests should not use force
2121- """
2122- session = get_session()
2123- with session.begin():
2124-
2125- def network_query(project_filter, id=None):
2126- filter_kwargs = {'project_id': project_filter}
2127- if id is not None:
2128- filter_kwargs['id'] = id
2129- return model_query(context, models.Network, session=session,
2130- read_deleted="no").\
2131- filter_by(**filter_kwargs).\
2132- with_lockmode('update').\
2133- first()
2134-
2135- if not force:
2136- # find out if project has a network
2137- network_ref = network_query(project_id)
2138-
2139- if force or not network_ref:
2140- # in force mode or project doesn't have a network so associate
2141- # with a new network
2142-
2143- # get new network
2144- network_ref = network_query(None, network_id)
2145- if not network_ref:
2146- raise db.NoMoreNetworks()
2147-
2148- # associate with network
2149- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
2150- # then this has concurrency issues
2151- network_ref['project_id'] = project_id
2152- session.add(network_ref)
2153- return network_ref
2154-
2155-
2156-@require_admin_context
2157-def network_count(context):
2158- return model_query(context, models.Network).count()
2159-
2160-
2161-@require_admin_context
2162-def _network_ips_query(context, network_id):
2163- return model_query(context, models.FixedIp, read_deleted="no").\
2164- filter_by(network_id=network_id)
2165-
2166-
2167-@require_admin_context
2168-def network_count_reserved_ips(context, network_id):
2169- return _network_ips_query(context, network_id).\
2170- filter_by(reserved=True).\
2171- count()
2172-
2173-
2174-@require_admin_context
2175-def network_create_safe(context, values):
2176- if values.get('vlan'):
2177- if model_query(context, models.Network, read_deleted="no")\
2178- .filter_by(vlan=values['vlan'])\
2179- .first():
2180- raise exception.DuplicateVlan(vlan=values['vlan'])
2181-
2182- network_ref = models.Network()
2183- network_ref['uuid'] = str(utils.gen_uuid())
2184- network_ref.update(values)
2185-
2186- try:
2187- network_ref.save()
2188- return network_ref
2189- except IntegrityError:
2190- return None
2191-
2192-
2193-@require_admin_context
2194-def network_delete_safe(context, network_id):
2195- session = get_session()
2196- with session.begin():
2197- result = session.query(models.FixedIp).\
2198- filter_by(network_id=network_id).\
2199- filter_by(deleted=False).\
2200- filter_by(allocated=True).\
2201- all()
2202- if result:
2203- raise exception.NetworkInUse(network_id=network_id)
2204- network_ref = network_get(context, network_id=network_id,
2205- session=session)
2206- session.query(models.FixedIp).\
2207- filter_by(network_id=network_id).\
2208- filter_by(deleted=False).\
2209- update({'deleted': True,
2210- 'updated_at': literal_column('updated_at'),
2211- 'deleted_at': timeutils.utcnow()})
2212- session.delete(network_ref)
2213-
2214-
2215-@require_admin_context
2216-def network_disassociate(context, network_id):
2217- network_update(context, network_id, {'project_id': None,
2218- 'host': None})
2219-
2220-
2221-@require_context
2222-def network_get(context, network_id, session=None, project_only='allow_none'):
2223- result = model_query(context, models.Network, session=session,
2224- project_only=project_only).\
2225- filter_by(id=network_id).\
2226- first()
2227-
2228- if not result:
2229- raise exception.NetworkNotFound(network_id=network_id)
2230-
2231- return result
2232-
2233-
2234-@require_context
2235-def network_get_all(context):
2236- result = model_query(context, models.Network, read_deleted="no").all()
2237-
2238- if not result:
2239- raise exception.NoNetworksFound()
2240-
2241- return result
2242-
2243-
2244-@require_context
2245-def network_get_all_by_uuids(context, network_uuids,
2246- project_only="allow_none"):
2247- result = model_query(context, models.Network, read_deleted="no",
2248- project_only=project_only).\
2249- filter(models.Network.uuid.in_(network_uuids)).\
2250- all()
2251-
2252- if not result:
2253- raise exception.NoNetworksFound()
2254-
2255- #check if the result contains all the networks
2256- #we are looking for
2257- for network_uuid in network_uuids:
2258- found = False
2259- for network in result:
2260- if network['uuid'] == network_uuid:
2261- found = True
2262- break
2263- if not found:
2264- if project_only:
2265- raise exception.NetworkNotFoundForProject(
2266- network_uuid=network_uuid, project_id=context.project_id)
2267- raise exception.NetworkNotFound(network_id=network_uuid)
2268-
2269- return result
2270-
2271-# NOTE(vish): pylint complains because of the long method name, but
2272-# it fits with the names of the rest of the methods
2273-# pylint: disable=C0103
2274-
2275-
2276-@require_admin_context
2277-def network_get_associated_fixed_ips(context, network_id, host=None):
2278- # FIXME(sirp): since this returns fixed_ips, this would be better named
2279- # fixed_ip_get_all_by_network.
2280- # NOTE(vish): The ugly joins here are to solve a performance issue and
2281- # should be removed once we can add and remove leases
2282- # without regenerating the whole list
2283- vif_and = and_(models.VirtualInterface.id ==
2284- models.FixedIp.virtual_interface_id,
2285- models.VirtualInterface.deleted == False)
2286- inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
2287- models.Instance.deleted == False)
2288- session = get_session()
2289- query = session.query(models.FixedIp.address,
2290- models.FixedIp.instance_uuid,
2291- models.FixedIp.network_id,
2292- models.FixedIp.virtual_interface_id,
2293- models.VirtualInterface.address,
2294- models.Instance.hostname,
2295- models.Instance.updated_at,
2296- models.Instance.created_at).\
2297- filter(models.FixedIp.deleted == False).\
2298- filter(models.FixedIp.network_id == network_id).\
2299- filter(models.FixedIp.allocated == True).\
2300- join((models.VirtualInterface, vif_and)).\
2301- join((models.Instance, inst_and)).\
2302- filter(models.FixedIp.instance_uuid != None).\
2303- filter(models.FixedIp.virtual_interface_id != None)
2304- if host:
2305- query = query.filter(models.Instance.host == host)
2306- result = query.all()
2307- data = []
2308- for datum in result:
2309- cleaned = {}
2310- cleaned['address'] = datum[0]
2311- cleaned['instance_uuid'] = datum[1]
2312- cleaned['network_id'] = datum[2]
2313- cleaned['vif_id'] = datum[3]
2314- cleaned['vif_address'] = datum[4]
2315- cleaned['instance_hostname'] = datum[5]
2316- cleaned['instance_updated'] = datum[6]
2317- cleaned['instance_created'] = datum[7]
2318- data.append(cleaned)
2319- return data
2320-
2321-
2322-@require_admin_context
2323-def _network_get_query(context, session=None):
2324- return model_query(context, models.Network, session=session,
2325- read_deleted="no")
2326-
2327-
2328-@require_admin_context
2329-def network_get_by_bridge(context, bridge):
2330- result = _network_get_query(context).filter_by(bridge=bridge).first()
2331-
2332- if not result:
2333- raise exception.NetworkNotFoundForBridge(bridge=bridge)
2334-
2335- return result
2336-
2337-
2338-@require_admin_context
2339-def network_get_by_uuid(context, uuid):
2340- result = _network_get_query(context).filter_by(uuid=uuid).first()
2341-
2342- if not result:
2343- raise exception.NetworkNotFoundForUUID(uuid=uuid)
2344-
2345- return result
2346-
2347-
2348-@require_admin_context
2349-def network_get_by_cidr(context, cidr):
2350- result = _network_get_query(context).\
2351- filter(or_(models.Network.cidr == cidr,
2352- models.Network.cidr_v6 == cidr)).\
2353- first()
2354-
2355- if not result:
2356- raise exception.NetworkNotFoundForCidr(cidr=cidr)
2357-
2358- return result
2359-
2360-
2361-@require_admin_context
2362-def network_get_by_instance(context, instance_id):
2363- # note this uses fixed IP to get to instance
2364- # only works for networks the instance has an IP from
2365- result = _network_get_query(context).\
2366- filter_by(instance_id=instance_id).\
2367- first()
2368-
2369- if not result:
2370- raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
2371-
2372- return result
2373-
2374-
2375-@require_admin_context
2376-def network_get_all_by_instance(context, instance_id):
2377- result = _network_get_query(context).\
2378- filter_by(instance_id=instance_id).\
2379- all()
2380-
2381- if not result:
2382- raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
2383-
2384- return result
2385-
2386-
2387-@require_admin_context
2388-def network_get_all_by_host(context, host):
2389- session = get_session()
2390- fixed_ip_query = model_query(context, models.FixedIp.network_id,
2391- session=session).\
2392- filter(models.FixedIp.host == host)
2393- # NOTE(vish): return networks that have host set
2394- # or that have a fixed ip with host set
2395- host_filter = or_(models.Network.host == host,
2396- models.Network.id.in_(fixed_ip_query.subquery()))
2397- return _network_get_query(context, session=session).\
2398- filter(host_filter).\
2399- all()
2400-
2401-
2402-@require_admin_context
2403-def network_set_host(context, network_id, host_id):
2404- session = get_session()
2405- with session.begin():
2406- network_ref = _network_get_query(context, session=session).\
2407- filter_by(id=network_id).\
2408- with_lockmode('update').\
2409- first()
2410-
2411- if not network_ref:
2412- raise exception.NetworkNotFound(network_id=network_id)
2413-
2414- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
2415- # then this has concurrency issues
2416- if not network_ref['host']:
2417- network_ref['host'] = host_id
2418- session.add(network_ref)
2419-
2420- return network_ref['host']
2421-
2422-
2423-@require_context
2424-def network_update(context, network_id, values):
2425- session = get_session()
2426- with session.begin():
2427- network_ref = network_get(context, network_id, session=session)
2428- network_ref.update(values)
2429- network_ref.save(session=session)
2430- return network_ref
2431-
2432-
2433-###################
2434-
2435-
2436-@require_admin_context
2437-def iscsi_target_count_by_host(context, host):
2438- return model_query(context, models.IscsiTarget).\
2439- filter_by(host=host).\
2440- count()
2441-
2442-
2443-@require_admin_context
2444-def iscsi_target_create_safe(context, values):
2445- iscsi_target_ref = models.IscsiTarget()
2446-
2447- for (key, value) in values.iteritems():
2448- iscsi_target_ref[key] = value
2449- try:
2450- iscsi_target_ref.save()
2451- return iscsi_target_ref
2452- except IntegrityError:
2453- return None
2454-
2455-
2456-###################
2457-
2458-
2459-@require_context
2460-def quota_get(context, project_id, resource, session=None):
2461- result = model_query(context, models.Quota, session=session,
2462- read_deleted="no").\
2463- filter_by(project_id=project_id).\
2464- filter_by(resource=resource).\
2465- first()
2466-
2467- if not result:
2468- raise exception.ProjectQuotaNotFound(project_id=project_id)
2469-
2470- return result
2471-
2472-
2473-@require_context
2474-def quota_get_all_by_project(context, project_id):
2475- authorize_project_context(context, project_id)
2476-
2477- rows = model_query(context, models.Quota, read_deleted="no").\
2478- filter_by(project_id=project_id).\
2479- all()
2480-
2481- result = {'project_id': project_id}
2482- for row in rows:
2483- result[row.resource] = row.hard_limit
2484-
2485- return result
2486-
2487-
2488-@require_admin_context
2489-def quota_create(context, project_id, resource, limit):
2490- quota_ref = models.Quota()
2491- quota_ref.project_id = project_id
2492- quota_ref.resource = resource
2493- quota_ref.hard_limit = limit
2494- quota_ref.save()
2495- return quota_ref
2496-
2497-
2498-@require_admin_context
2499-def quota_update(context, project_id, resource, limit):
2500- session = get_session()
2501- with session.begin():
2502- quota_ref = quota_get(context, project_id, resource, session=session)
2503- quota_ref.hard_limit = limit
2504- quota_ref.save(session=session)
2505-
2506-
2507-@require_admin_context
2508-def quota_destroy(context, project_id, resource):
2509- session = get_session()
2510- with session.begin():
2511- quota_ref = quota_get(context, project_id, resource, session=session)
2512- quota_ref.delete(session=session)
2513-
2514-
2515-###################
2516-
2517-
2518-@require_context
2519-def quota_class_get(context, class_name, resource, session=None):
2520- result = model_query(context, models.QuotaClass, session=session,
2521- read_deleted="no").\
2522- filter_by(class_name=class_name).\
2523- filter_by(resource=resource).\
2524- first()
2525-
2526- if not result:
2527- raise exception.QuotaClassNotFound(class_name=class_name)
2528-
2529- return result
2530-
2531-
2532-@require_context
2533-def quota_class_get_all_by_name(context, class_name):
2534- authorize_quota_class_context(context, class_name)
2535-
2536- rows = model_query(context, models.QuotaClass, read_deleted="no").\
2537- filter_by(class_name=class_name).\
2538- all()
2539-
2540- result = {'class_name': class_name}
2541- for row in rows:
2542- result[row.resource] = row.hard_limit
2543-
2544- return result
2545-
2546-
2547-@require_admin_context
2548-def quota_class_create(context, class_name, resource, limit):
2549- quota_class_ref = models.QuotaClass()
2550- quota_class_ref.class_name = class_name
2551- quota_class_ref.resource = resource
2552- quota_class_ref.hard_limit = limit
2553- quota_class_ref.save()
2554- return quota_class_ref
2555-
2556-
2557-@require_admin_context
2558-def quota_class_update(context, class_name, resource, limit):
2559- session = get_session()
2560- with session.begin():
2561- quota_class_ref = quota_class_get(context, class_name, resource,
2562- session=session)
2563- quota_class_ref.hard_limit = limit
2564- quota_class_ref.save(session=session)
2565-
2566-
2567-@require_admin_context
2568-def quota_class_destroy(context, class_name, resource):
2569- session = get_session()
2570- with session.begin():
2571- quota_class_ref = quota_class_get(context, class_name, resource,
2572- session=session)
2573- quota_class_ref.delete(session=session)
2574-
2575-
2576-@require_admin_context
2577-def quota_class_destroy_all_by_name(context, class_name):
2578- session = get_session()
2579- with session.begin():
2580- quota_classes = model_query(context, models.QuotaClass,
2581- session=session, read_deleted="no").\
2582- filter_by(class_name=class_name).\
2583- all()
2584-
2585- for quota_class_ref in quota_classes:
2586- quota_class_ref.delete(session=session)
2587-
2588-
2589-###################
2590-
2591-
2592-@require_context
2593-def quota_usage_get(context, project_id, resource, session=None):
2594- result = model_query(context, models.QuotaUsage, session=session,
2595- read_deleted="no").\
2596- filter_by(project_id=project_id).\
2597- filter_by(resource=resource).\
2598- first()
2599-
2600- if not result:
2601- raise exception.QuotaUsageNotFound(project_id=project_id)
2602-
2603- return result
2604-
2605-
2606-@require_context
2607-def quota_usage_get_all_by_project(context, project_id):
2608- authorize_project_context(context, project_id)
2609-
2610- rows = model_query(context, models.QuotaUsage, read_deleted="no").\
2611- filter_by(project_id=project_id).\
2612- all()
2613-
2614- result = {'project_id': project_id}
2615- for row in rows:
2616- result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved)
2617-
2618- return result
2619-
2620-
2621-@require_admin_context
2622-def quota_usage_create(context, project_id, resource, in_use, reserved,
2623- until_refresh, session=None):
2624- quota_usage_ref = models.QuotaUsage()
2625- quota_usage_ref.project_id = project_id
2626- quota_usage_ref.resource = resource
2627- quota_usage_ref.in_use = in_use
2628- quota_usage_ref.reserved = reserved
2629- quota_usage_ref.until_refresh = until_refresh
2630- quota_usage_ref.save(session=session)
2631-
2632- return quota_usage_ref
2633-
2634-
2635-@require_admin_context
2636-def quota_usage_update(context, project_id, resource, in_use, reserved,
2637- until_refresh, session=None):
2638- def do_update(session):
2639- quota_usage_ref = quota_usage_get(context, project_id, resource,
2640- session=session)
2641- quota_usage_ref.in_use = in_use
2642- quota_usage_ref.reserved = reserved
2643- quota_usage_ref.until_refresh = until_refresh
2644- quota_usage_ref.save(session=session)
2645-
2646- if session:
2647- # Assume caller started a transaction
2648- do_update(session)
2649- else:
2650- session = get_session()
2651- with session.begin():
2652- do_update(session)
2653-
2654-
2655-@require_admin_context
2656-def quota_usage_destroy(context, project_id, resource):
2657- session = get_session()
2658- with session.begin():
2659- quota_usage_ref = quota_usage_get(context, project_id, resource,
2660- session=session)
2661- quota_usage_ref.delete(session=session)
2662-
2663-
2664-###################
2665-
2666-
2667-@require_context
2668-def reservation_get(context, uuid, session=None):
2669- result = model_query(context, models.Reservation, session=session,
2670- read_deleted="no").\
2671- filter_by(uuid=uuid).\
2672- first()
2673-
2674- if not result:
2675- raise exception.ReservationNotFound(uuid=uuid)
2676-
2677- return result
2678-
2679-
2680-@require_context
2681-def reservation_get_all_by_project(context, project_id):
2682- authorize_project_context(context, project_id)
2683-
2684- rows = model_query(context, models.QuotaUsage, read_deleted="no").\
2685- filter_by(project_id=project_id).\
2686- all()
2687-
2688- result = {'project_id': project_id}
2689- for row in rows:
2690- result.setdefault(row.resource, {})
2691- result[row.resource][row.uuid] = row.delta
2692-
2693- return result
2694-
2695-
2696-@require_admin_context
2697-def reservation_create(context, uuid, usage, project_id, resource, delta,
2698- expire, session=None):
2699- reservation_ref = models.Reservation()
2700- reservation_ref.uuid = uuid
2701- reservation_ref.usage_id = usage['id']
2702- reservation_ref.project_id = project_id
2703- reservation_ref.resource = resource
2704- reservation_ref.delta = delta
2705- reservation_ref.expire = expire
2706- reservation_ref.save(session=session)
2707- return reservation_ref
2708-
2709-
2710-@require_admin_context
2711-def reservation_destroy(context, uuid):
2712- session = get_session()
2713- with session.begin():
2714- reservation_ref = reservation_get(context, uuid, session=session)
2715- reservation_ref.delete(session=session)
2716-
2717-
2718-###################
2719-
2720-
2721-# NOTE(johannes): The quota code uses SQL locking to ensure races don't
2722-# cause under or over counting of resources. To avoid deadlocks, this
2723-# code always acquires the lock on quota_usages before acquiring the lock
2724-# on reservations.
2725-
2726-def _get_quota_usages(context, session):
2727- # Broken out for testability
2728- rows = model_query(context, models.QuotaUsage,
2729- read_deleted="no",
2730- session=session).\
2731- filter_by(project_id=context.project_id).\
2732- with_lockmode('update').\
2733- all()
2734- return dict((row.resource, row) for row in rows)
2735-
2736-
2737-@require_context
2738-def quota_reserve(context, resources, quotas, deltas, expire,
2739- until_refresh, max_age):
2740- elevated = context.elevated()
2741- session = get_session()
2742- with session.begin():
2743- # Get the current usages
2744- usages = _get_quota_usages(context, session)
2745-
2746- # Handle usage refresh
2747- work = set(deltas.keys())
2748- while work:
2749- resource = work.pop()
2750-
2751- # Do we need to refresh the usage?
2752- refresh = False
2753- if resource not in usages:
2754- usages[resource] = quota_usage_create(elevated,
2755- context.project_id,
2756- resource,
2757- 0, 0,
2758- until_refresh or None,
2759- session=session)
2760- refresh = True
2761- elif usages[resource].in_use < 0:
2762- # Negative in_use count indicates a desync, so try to
2763- # heal from that...
2764- refresh = True
2765- elif usages[resource].until_refresh is not None:
2766- usages[resource].until_refresh -= 1
2767- if usages[resource].until_refresh <= 0:
2768- refresh = True
2769- elif max_age and (usages[resource].updated_at -
2770- timeutils.utcnow()).seconds >= max_age:
2771- refresh = True
2772-
2773- # OK, refresh the usage
2774- if refresh:
2775- # Grab the sync routine
2776- sync = resources[resource].sync
2777-
2778- updates = sync(elevated, context.project_id, session)
2779- for res, in_use in updates.items():
2780- # Make sure we have a destination for the usage!
2781- if res not in usages:
2782- usages[res] = quota_usage_create(elevated,
2783- context.project_id,
2784- res,
2785- 0, 0,
2786- until_refresh or None,
2787- session=session)
2788-
2789- # Update the usage
2790- usages[res].in_use = in_use
2791- usages[res].until_refresh = until_refresh or None
2792-
2793- # Because more than one resource may be refreshed
2794- # by the call to the sync routine, and we don't
2795- # want to double-sync, we make sure all refreshed
2796- # resources are dropped from the work set.
2797- work.discard(res)
2798-
2799- # NOTE(Vek): We make the assumption that the sync
2800- # routine actually refreshes the
2801- # resources that it is the sync routine
2802- # for. We don't check, because this is
2803- # a best-effort mechanism.
2804-
2805- # Check for deltas that would go negative
2806- unders = [resource for resource, delta in deltas.items()
2807- if delta < 0 and
2808- delta + usages[resource].in_use < 0]
2809-
2810- # Now, let's check the quotas
2811- # NOTE(Vek): We're only concerned about positive increments.
2812- # If a project has gone over quota, we want them to
2813- # be able to reduce their usage without any
2814- # problems.
2815- overs = [resource for resource, delta in deltas.items()
2816- if quotas[resource] >= 0 and delta >= 0 and
2817- quotas[resource] < delta + usages[resource].total]
2818-
2819- # NOTE(Vek): The quota check needs to be in the transaction,
2820- # but the transaction doesn't fail just because
2821- # we're over quota, so the OverQuota raise is
2822- # outside the transaction. If we did the raise
2823- # here, our usage updates would be discarded, but
2824- # they're not invalidated by being over-quota.
2825-
2826- # Create the reservations
2827- if not overs:
2828- reservations = []
2829- for resource, delta in deltas.items():
2830- reservation = reservation_create(elevated,
2831- str(utils.gen_uuid()),
2832- usages[resource],
2833- context.project_id,
2834- resource, delta, expire,
2835- session=session)
2836- reservations.append(reservation.uuid)
2837-
2838- # Also update the reserved quantity
2839- # NOTE(Vek): Again, we are only concerned here about
2840- # positive increments. Here, though, we're
2841- # worried about the following scenario:
2842- #
2843- # 1) User initiates resize down.
2844- # 2) User allocates a new instance.
2845- # 3) Resize down fails or is reverted.
2846- # 4) User is now over quota.
2847- #
2848- # To prevent this, we only update the
2849- # reserved value if the delta is positive.
2850- if delta > 0:
2851- usages[resource].reserved += delta
2852-
2853- # Apply updates to the usages table
2854- for usage_ref in usages.values():
2855- usage_ref.save(session=session)
2856-
2857- if unders:
2858- LOG.warning(_("Change will make usage less than 0 for the following "
2859- "resources: %(unders)s") % locals())
2860- if overs:
2861- usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
2862- for k, v in usages.items())
2863- raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
2864- usages=usages)
2865-
2866- return reservations
2867-
2868-
2869-def _quota_reservations(session, context, reservations):
2870- """Return the relevant reservations."""
2871-
2872- # Get the listed reservations
2873- return model_query(context, models.Reservation,
2874- read_deleted="no",
2875- session=session).\
2876- filter(models.Reservation.uuid.in_(reservations)).\
2877- with_lockmode('update').\
2878- all()
2879-
2880-
2881-@require_context
2882-def reservation_commit(context, reservations):
2883- session = get_session()
2884- with session.begin():
2885- usages = _get_quota_usages(context, session)
2886-
2887- for reservation in _quota_reservations(session, context, reservations):
2888- usage = usages[reservation.resource]
2889- if reservation.delta >= 0:
2890- usage.reserved -= reservation.delta
2891- usage.in_use += reservation.delta
2892-
2893- reservation.delete(session=session)
2894-
2895- for usage in usages.values():
2896- usage.save(session=session)
2897-
2898-
2899-@require_context
2900-def reservation_rollback(context, reservations):
2901- session = get_session()
2902- with session.begin():
2903- usages = _get_quota_usages(context, session)
2904-
2905- for reservation in _quota_reservations(session, context, reservations):
2906- usage = usages[reservation.resource]
2907- if reservation.delta >= 0:
2908- usage.reserved -= reservation.delta
2909-
2910- reservation.delete(session=session)
2911-
2912- for usage in usages.values():
2913- usage.save(session=session)
2914-
2915-
2916-@require_admin_context
2917-def quota_destroy_all_by_project(context, project_id):
2918- session = get_session()
2919- with session.begin():
2920- quotas = model_query(context, models.Quota, session=session,
2921- read_deleted="no").\
2922- filter_by(project_id=project_id).\
2923- all()
2924-
2925- for quota_ref in quotas:
2926- quota_ref.delete(session=session)
2927-
2928- quota_usages = model_query(context, models.QuotaUsage,
2929- session=session, read_deleted="no").\
2930- filter_by(project_id=project_id).\
2931- all()
2932-
2933- for quota_usage_ref in quota_usages:
2934- quota_usage_ref.delete(session=session)
2935-
2936- reservations = model_query(context, models.Reservation,
2937- session=session, read_deleted="no").\
2938- filter_by(project_id=project_id).\
2939- all()
2940-
2941- for reservation_ref in reservations:
2942- reservation_ref.delete(session=session)
2943-
2944-
2945-@require_admin_context
2946-def reservation_expire(context):
2947- session = get_session()
2948- with session.begin():
2949- current_time = timeutils.utcnow()
2950- results = model_query(context, models.Reservation, session=session,
2951- read_deleted="no").\
2952- filter(models.Reservation.expire < current_time).\
2953- all()
2954-
2955- if results:
2956- for reservation in results:
2957- if reservation.delta >= 0:
2958- reservation.usage.reserved -= reservation.delta
2959- reservation.usage.save(session=session)
2960-
2961- reservation.delete(session=session)
2962-
2963-
2964-###################
2965-
2966-
2967-@require_admin_context
2968-def volume_allocate_iscsi_target(context, volume_id, host):
2969- session = get_session()
2970- with session.begin():
2971- iscsi_target_ref = model_query(context, models.IscsiTarget,
2972- session=session, read_deleted="no").\
2973- filter_by(volume=None).\
2974- filter_by(host=host).\
2975- with_lockmode('update').\
2976- first()
2977-
2978- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
2979- # then this has concurrency issues
2980- if not iscsi_target_ref:
2981- raise db.NoMoreTargets()
2982-
2983- iscsi_target_ref.volume_id = volume_id
2984- session.add(iscsi_target_ref)
2985-
2986- return iscsi_target_ref.target_num
2987-
2988-
2989-@require_admin_context
2990-def volume_attached(context, volume_id, instance_uuid, mountpoint):
2991- if not utils.is_uuid_like(instance_uuid):
2992- raise exception.InvalidUUID(instance_uuid)
2993-
2994- session = get_session()
2995- with session.begin():
2996- volume_ref = volume_get(context, volume_id, session=session)
2997- volume_ref['status'] = 'in-use'
2998- volume_ref['mountpoint'] = mountpoint
2999- volume_ref['attach_status'] = 'attached'
3000- volume_ref['instance_uuid'] = instance_uuid
3001- volume_ref['attach_time'] = timeutils.utcnow()
3002- volume_ref.save(session=session)
3003-
3004-
3005-@require_context
3006-def volume_create(context, values):
3007- values['volume_metadata'] = _metadata_refs(values.get('metadata'),
3008- models.VolumeMetadata)
3009- volume_ref = models.Volume()
3010- if not values.get('id'):
3011- values['id'] = str(utils.gen_uuid())
3012- volume_ref.update(values)
3013-
3014- session = get_session()
3015- with session.begin():
3016- volume_ref.save(session=session)
3017-
3018- return volume_get(context, values['id'], session=session)
3019-
3020-
3021-@require_admin_context
3022-def volume_data_get_for_project(context, project_id, session=None):
3023- result = model_query(context,
3024- func.count(models.Volume.id),
3025- func.sum(models.Volume.size),
3026- read_deleted="no",
3027- session=session).\
3028- filter_by(project_id=project_id).\
3029- first()
3030-
3031- # NOTE(vish): convert None to 0
3032- return (result[0] or 0, result[1] or 0)
3033-
3034-
3035-@require_admin_context
3036-def volume_destroy(context, volume_id):
3037- session = get_session()
3038- with session.begin():
3039- volume_ref = volume_get(context, volume_id, session=session)
3040- session.query(models.Volume).\
3041- filter_by(id=volume_id).\
3042- update({'deleted': True,
3043- 'deleted_at': timeutils.utcnow(),
3044- 'updated_at': literal_column('updated_at')})
3045- session.query(models.IscsiTarget).\
3046- filter_by(volume_id=volume_id).\
3047- update({'volume_id': None})
3048- session.query(models.VolumeMetadata).\
3049- filter_by(volume_id=volume_id).\
3050- update({'deleted': True,
3051- 'deleted_at': timeutils.utcnow(),
3052- 'updated_at': literal_column('updated_at')})
3053- return volume_ref
3054-
3055-
3056-@require_admin_context
3057-def volume_detached(context, volume_id):
3058- session = get_session()
3059- with session.begin():
3060- volume_ref = volume_get(context, volume_id, session=session)
3061- volume_ref['status'] = 'available'
3062- volume_ref['mountpoint'] = None
3063- volume_ref['attach_status'] = 'detached'
3064- volume_ref['instance_uuid'] = None
3065- volume_ref.save(session=session)
3066-
3067-
3068-@require_context
3069-def _volume_get_query(context, session=None, project_only=False):
3070- return model_query(context, models.Volume, session=session,
3071- project_only=project_only).\
3072- options(joinedload('volume_metadata')).\
3073- options(joinedload('volume_type'))
3074-
3075-
3076-@require_context
3077-def _ec2_volume_get_query(context, session=None):
3078- return model_query(context, models.VolumeIdMapping, session=session)
3079-
3080-
3081-@require_context
3082-def _ec2_snapshot_get_query(context, session=None):
3083- return model_query(context, models.SnapshotIdMapping, session=session)
3084-
3085-
3086-@require_context
3087-def volume_get(context, volume_id, session=None):
3088- result = _volume_get_query(context, session=session, project_only=True).\
3089- filter_by(id=volume_id).\
3090- first()
3091-
3092- if not result:
3093- raise exception.VolumeNotFound(volume_id=volume_id)
3094-
3095- return result
3096-
3097-
3098-@require_admin_context
3099-def volume_get_all(context):
3100- return _volume_get_query(context).all()
3101-
3102-
3103-@require_admin_context
3104-def volume_get_all_by_host(context, host):
3105- return _volume_get_query(context).filter_by(host=host).all()
3106-
3107-
3108-@require_admin_context
3109-def volume_get_all_by_instance_uuid(context, instance_uuid):
3110- result = model_query(context, models.Volume, read_deleted="no").\
3111- options(joinedload('volume_metadata')).\
3112- options(joinedload('volume_type')).\
3113- filter_by(instance_uuid=instance_uuid).\
3114- all()
3115-
3116- if not result:
3117- return []
3118-
3119- return result
3120-
3121-
3122-@require_context
3123-def volume_get_all_by_project(context, project_id):
3124- authorize_project_context(context, project_id)
3125- return _volume_get_query(context).filter_by(project_id=project_id).all()
3126-
3127-
3128-@require_admin_context
3129-def volume_get_iscsi_target_num(context, volume_id):
3130- result = model_query(context, models.IscsiTarget, read_deleted="yes").\
3131- filter_by(volume_id=volume_id).\
3132- first()
3133-
3134- if not result:
3135- raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
3136-
3137- return result.target_num
3138-
3139-
3140-@require_context
3141-def volume_update(context, volume_id, values):
3142- session = get_session()
3143- volume_ref = volume_get(context, volume_id, session=session)
3144- metadata = values.get('metadata')
3145- if metadata is not None:
3146- volume_metadata_update(context,
3147- volume_id,
3148- values.pop('metadata'),
3149- delete=True)
3150- with session.begin():
3151- volume_ref.update(values)
3152- volume_ref.save(session=session)
3153-
3154- return volume_ref
3155-
3156-
3157-@require_context
3158-def ec2_volume_create(context, volume_uuid, id=None):
3159- """Create ec2 compatable volume by provided uuid"""
3160- ec2_volume_ref = models.VolumeIdMapping()
3161- ec2_volume_ref.update({'uuid': volume_uuid})
3162- if id is not None:
3163- ec2_volume_ref.update({'id': id})
3164-
3165- ec2_volume_ref.save()
3166-
3167- return ec2_volume_ref
3168-
3169-
3170-@require_context
3171-def get_ec2_volume_id_by_uuid(context, volume_id, session=None):
3172- result = _ec2_volume_get_query(context, session=session).\
3173- filter_by(uuid=volume_id).\
3174- first()
3175-
3176- if not result:
3177- raise exception.VolumeNotFound(volume_id=volume_id)
3178-
3179- return result['id']
3180-
3181-
3182-@require_context
3183-def get_volume_uuid_by_ec2_id(context, ec2_id, session=None):
3184- result = _ec2_volume_get_query(context, session=session).\
3185- filter_by(id=ec2_id).\
3186- first()
3187-
3188- if not result:
3189- raise exception.VolumeNotFound(volume_id=ec2_id)
3190-
3191- return result['uuid']
3192-
3193-
3194-@require_context
3195-def ec2_snapshot_create(context, snapshot_uuid, id=None):
3196- """Create ec2 compatable snapshot by provided uuid"""
3197- ec2_snapshot_ref = models.SnapshotIdMapping()
3198- ec2_snapshot_ref.update({'uuid': snapshot_uuid})
3199- if id is not None:
3200- ec2_snapshot_ref.update({'id': id})
3201-
3202- ec2_snapshot_ref.save()
3203-
3204- return ec2_snapshot_ref
3205-
3206-
3207-@require_context
3208-def get_ec2_snapshot_id_by_uuid(context, snapshot_id, session=None):
3209- result = _ec2_snapshot_get_query(context, session=session).\
3210- filter_by(uuid=snapshot_id).\
3211- first()
3212-
3213- if not result:
3214- raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
3215-
3216- return result['id']
3217-
3218-
3219-@require_context
3220-def get_snapshot_uuid_by_ec2_id(context, ec2_id, session=None):
3221- result = _ec2_snapshot_get_query(context, session=session).\
3222- filter_by(id=ec2_id).\
3223- first()
3224-
3225- if not result:
3226- raise exception.SnapshotNotFound(snapshot_id=ec2_id)
3227-
3228- return result['uuid']
3229-
3230-
3231-####################
3232-
3233-def _volume_metadata_get_query(context, volume_id, session=None):
3234- return model_query(context, models.VolumeMetadata,
3235- session=session, read_deleted="no").\
3236- filter_by(volume_id=volume_id)
3237-
3238-
3239-@require_context
3240-@require_volume_exists
3241-def volume_metadata_get(context, volume_id):
3242- rows = _volume_metadata_get_query(context, volume_id).all()
3243- result = {}
3244- for row in rows:
3245- result[row['key']] = row['value']
3246-
3247- return result
3248-
3249-
3250-@require_context
3251-@require_volume_exists
3252-def volume_metadata_delete(context, volume_id, key):
3253- _volume_metadata_get_query(context, volume_id).\
3254- filter_by(key=key).\
3255- update({'deleted': True,
3256- 'deleted_at': timeutils.utcnow(),
3257- 'updated_at': literal_column('updated_at')})
3258-
3259-
3260-@require_context
3261-@require_volume_exists
3262-def volume_metadata_get_item(context, volume_id, key, session=None):
3263- result = _volume_metadata_get_query(context, volume_id, session=session).\
3264- filter_by(key=key).\
3265- first()
3266-
3267- if not result:
3268- raise exception.VolumeMetadataNotFound(metadata_key=key,
3269- volume_id=volume_id)
3270- return result
3271-
3272-
3273-@require_context
3274-@require_volume_exists
3275-def volume_metadata_update(context, volume_id, metadata, delete):
3276- session = get_session()
3277-
3278- # Set existing metadata to deleted if delete argument is True
3279- if delete:
3280- original_metadata = volume_metadata_get(context, volume_id)
3281- for meta_key, meta_value in original_metadata.iteritems():
3282- if meta_key not in metadata:
3283- meta_ref = volume_metadata_get_item(context, volume_id,
3284- meta_key, session)
3285- meta_ref.update({'deleted': True})
3286- meta_ref.save(session=session)
3287-
3288- meta_ref = None
3289-
3290- # Now update all existing items with new values, or create new meta objects
3291- for meta_key, meta_value in metadata.iteritems():
3292-
3293- # update the value whether it exists or not
3294- item = {"value": meta_value}
3295-
3296- try:
3297- meta_ref = volume_metadata_get_item(context, volume_id,
3298- meta_key, session)
3299- except exception.VolumeMetadataNotFound, e:
3300- meta_ref = models.VolumeMetadata()
3301- item.update({"key": meta_key, "volume_id": volume_id})
3302-
3303- meta_ref.update(item)
3304- meta_ref.save(session=session)
3305-
3306- return metadata
3307-
3308-
3309-###################
3310-
3311-
3312-@require_context
3313-def snapshot_create(context, values):
3314- snapshot_ref = models.Snapshot()
3315- if not values.get('id'):
3316- values['id'] = str(utils.gen_uuid())
3317- snapshot_ref.update(values)
3318-
3319- session = get_session()
3320- with session.begin():
3321- snapshot_ref.save(session=session)
3322- return snapshot_ref
3323-
3324-
3325-@require_admin_context
3326-def snapshot_destroy(context, snapshot_id):
3327- session = get_session()
3328- with session.begin():
3329- session.query(models.Snapshot).\
3330- filter_by(id=snapshot_id).\
3331- update({'deleted': True,
3332- 'deleted_at': timeutils.utcnow(),
3333- 'updated_at': literal_column('updated_at')})
3334-
3335-
3336-@require_context
3337-def snapshot_get(context, snapshot_id, session=None):
3338- result = model_query(context, models.Snapshot, session=session,
3339- project_only=True).\
3340- filter_by(id=snapshot_id).\
3341- first()
3342-
3343- if not result:
3344- raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
3345-
3346- return result
3347-
3348-
3349-@require_admin_context
3350-def snapshot_get_all(context):
3351- return model_query(context, models.Snapshot).all()
3352-
3353-
3354-@require_context
3355-def snapshot_get_all_for_volume(context, volume_id):
3356- return model_query(context, models.Snapshot, read_deleted='no',
3357- project_only=True).\
3358- filter_by(volume_id=volume_id).all()
3359-
3360-
3361-@require_context
3362-def snapshot_get_all_by_project(context, project_id):
3363- authorize_project_context(context, project_id)
3364- return model_query(context, models.Snapshot).\
3365- filter_by(project_id=project_id).\
3366- all()
3367-
3368-
3369-@require_context
3370-def snapshot_update(context, snapshot_id, values):
3371- session = get_session()
3372- with session.begin():
3373- snapshot_ref = snapshot_get(context, snapshot_id, session=session)
3374- snapshot_ref.update(values)
3375- snapshot_ref.save(session=session)
3376-
3377-
3378-###################
3379-
3380-
3381-def _block_device_mapping_get_query(context, session=None):
3382- return model_query(context, models.BlockDeviceMapping, session=session)
3383-
3384-
3385-@require_context
3386-def block_device_mapping_create(context, values):
3387- bdm_ref = models.BlockDeviceMapping()
3388- bdm_ref.update(values)
3389-
3390- session = get_session()
3391- with session.begin():
3392- bdm_ref.save(session=session)
3393-
3394-
3395-@require_context
3396-def block_device_mapping_update(context, bdm_id, values):
3397- session = get_session()
3398- with session.begin():
3399- _block_device_mapping_get_query(context, session=session).\
3400- filter_by(id=bdm_id).\
3401- update(values)
3402-
3403-
3404-@require_context
3405-def block_device_mapping_update_or_create(context, values):
3406- session = get_session()
3407- with session.begin():
3408- result = _block_device_mapping_get_query(context, session=session).\
3409- filter_by(instance_uuid=values['instance_uuid']).\
3410- filter_by(device_name=values['device_name']).\
3411- first()
3412- if not result:
3413- bdm_ref = models.BlockDeviceMapping()
3414- bdm_ref.update(values)
3415- bdm_ref.save(session=session)
3416- else:
3417- result.update(values)
3418-
3419- # NOTE(yamahata): same virtual device name can be specified multiple
3420- # times. So delete the existing ones.
3421- virtual_name = values['virtual_name']
3422- if (virtual_name is not None and
3423- block_device.is_swap_or_ephemeral(virtual_name)):
3424- session.query(models.BlockDeviceMapping).\
3425- filter_by(instance_uuid=values['instance_uuid']).\
3426- filter_by(virtual_name=virtual_name).\
3427- filter(models.BlockDeviceMapping.device_name !=
3428- values['device_name']).\
3429- update({'deleted': True,
3430- 'deleted_at': timeutils.utcnow(),
3431- 'updated_at': literal_column('updated_at')})
3432-
3433-
3434-@require_context
3435-def block_device_mapping_get_all_by_instance(context, instance_uuid):
3436- return _block_device_mapping_get_query(context).\
3437- filter_by(instance_uuid=instance_uuid).\
3438- all()
3439-
3440-
3441-@require_context
3442-def block_device_mapping_destroy(context, bdm_id):
3443- session = get_session()
3444- with session.begin():
3445- session.query(models.BlockDeviceMapping).\
3446- filter_by(id=bdm_id).\
3447- update({'deleted': True,
3448- 'deleted_at': timeutils.utcnow(),
3449- 'updated_at': literal_column('updated_at')})
3450-
3451-
3452-@require_context
3453-def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
3454- volume_id):
3455- session = get_session()
3456- with session.begin():
3457- _block_device_mapping_get_query(context, session=session).\
3458- filter_by(instance_uuid=instance_uuid).\
3459- filter_by(volume_id=volume_id).\
3460- update({'deleted': True,
3461- 'deleted_at': timeutils.utcnow(),
3462- 'updated_at': literal_column('updated_at')})
3463-
3464-
3465-@require_context
3466-def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
3467- device_name):
3468- session = get_session()
3469- with session.begin():
3470- _block_device_mapping_get_query(context, session=session).\
3471- filter_by(instance_uuid=instance_uuid).\
3472- filter_by(device_name=device_name).\
3473- update({'deleted': True,
3474- 'deleted_at': timeutils.utcnow(),
3475- 'updated_at': literal_column('updated_at')})
3476-
3477-
3478-###################
3479-
3480-def _security_group_get_query(context, session=None, read_deleted=None,
3481- project_only=False, join_rules=True):
3482- query = model_query(context, models.SecurityGroup, session=session,
3483- read_deleted=read_deleted, project_only=project_only)
3484- if join_rules:
3485- query = query.options(joinedload_all('rules'))
3486- return query
3487-
3488-
3489-def _security_group_get_by_names(context, session, project_id, group_names):
3490- """
3491- Get security group models for a project by a list of names.
3492- Raise SecurityGroupNotFoundForProject for a name not found.
3493- """
3494- query = _security_group_get_query(context, session=session,
3495- read_deleted="no", join_rules=False).\
3496- filter_by(project_id=project_id).\
3497- filter(models.SecurityGroup.name.in_(group_names))
3498- sg_models = query.all()
3499- if len(sg_models) == len(group_names):
3500- return sg_models
3501- # Find the first one missing and raise
3502- group_names_from_models = [x.name for x in sg_models]
3503- for group_name in group_names:
3504- if group_name not in group_names_from_models:
3505- raise exception.SecurityGroupNotFoundForProject(
3506- project_id=project_id, security_group_id=group_name)
3507- # Not Reached
3508-
3509-
3510-@require_context
3511-def security_group_get_all(context):
3512- return _security_group_get_query(context).all()
3513-
3514-
3515-@require_context
3516-def security_group_get(context, security_group_id, session=None):
3517- result = _security_group_get_query(context, session=session,
3518- project_only=True).\
3519- filter_by(id=security_group_id).\
3520- options(joinedload_all('instances')).\
3521- first()
3522-
3523- if not result:
3524- raise exception.SecurityGroupNotFound(
3525- security_group_id=security_group_id)
3526-
3527- return result
3528-
3529-
3530-@require_context
3531-def security_group_get_by_name(context, project_id, group_name,
3532- columns_to_join=None, session=None):
3533- if session is None:
3534- session = get_session()
3535-
3536- query = _security_group_get_query(context, session=session,
3537- read_deleted="no", join_rules=False).\
3538- filter_by(project_id=project_id).\
3539- filter_by(name=group_name)
3540-
3541- if columns_to_join is None:
3542- columns_to_join = ['instances', 'rules']
3543-
3544- for column in columns_to_join:
3545- query = query.options(joinedload_all(column))
3546-
3547- result = query.first()
3548- if not result:
3549- raise exception.SecurityGroupNotFoundForProject(
3550- project_id=project_id, security_group_id=group_name)
3551-
3552- return result
3553-
3554-
3555-@require_context
3556-def security_group_get_by_project(context, project_id):
3557- return _security_group_get_query(context, read_deleted="no").\
3558- filter_by(project_id=project_id).\
3559- all()
3560-
3561-
3562-@require_context
3563-def security_group_get_by_instance(context, instance_id):
3564- return _security_group_get_query(context, read_deleted="no").\
3565- join(models.SecurityGroup.instances).\
3566- filter_by(id=instance_id).\
3567- all()
3568-
3569-
3570-@require_context
3571-def security_group_exists(context, project_id, group_name):
3572- try:
3573- group = security_group_get_by_name(context, project_id, group_name)
3574- return group is not None
3575- except exception.NotFound:
3576- return False
3577-
3578-
3579-@require_context
3580-def security_group_in_use(context, group_id):
3581- session = get_session()
3582- with session.begin():
3583- # Are there any instances that haven't been deleted
3584- # that include this group?
3585- inst_assoc = session.query(models.SecurityGroupInstanceAssociation).\
3586- filter_by(security_group_id=group_id).\
3587- filter_by(deleted=False).\
3588- all()
3589- for ia in inst_assoc:
3590- num_instances = session.query(models.Instance).\
3591- filter_by(deleted=False).\
3592- filter_by(uuid=ia.instance_uuid).\
3593- count()
3594- if num_instances:
3595- return True
3596-
3597- return False
3598-
3599-
3600-@require_context
3601-def security_group_create(context, values, session=None):
3602- security_group_ref = models.SecurityGroup()
3603- # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
3604- # once save() is called. This will get cleaned up in next orm pass.
3605- security_group_ref.rules
3606- security_group_ref.update(values)
3607- if session is None:
3608- session = get_session()
3609- security_group_ref.save(session=session)
3610- return security_group_ref
3611-
3612-
3613-def security_group_ensure_default(context, session=None):
3614- """Ensure default security group exists for a project_id.
3615-
3616- Returns a tuple with the first element being a bool indicating
3617- if the default security group previously existed. Second
3618- element is the dict used to create the default security group.
3619- """
3620- try:
3621- default_group = security_group_get_by_name(context,
3622- context.project_id, 'default',
3623- columns_to_join=[], session=session)
3624- return (True, default_group)
3625- except exception.NotFound:
3626- values = {'name': 'default',
3627- 'description': 'default',
3628- 'user_id': context.user_id,
3629- 'project_id': context.project_id}
3630- default_group = security_group_create(context, values,
3631- session=session)
3632- return (False, default_group)
3633-
3634-
3635-@require_context
3636-def security_group_destroy(context, security_group_id):
3637- session = get_session()
3638- with session.begin():
3639- session.query(models.SecurityGroup).\
3640- filter_by(id=security_group_id).\
3641- update({'deleted': True,
3642- 'deleted_at': timeutils.utcnow(),
3643- 'updated_at': literal_column('updated_at')})
3644- session.query(models.SecurityGroupInstanceAssociation).\
3645- filter_by(security_group_id=security_group_id).\
3646- update({'deleted': True,
3647- 'deleted_at': timeutils.utcnow(),
3648- 'updated_at': literal_column('updated_at')})
3649- session.query(models.SecurityGroupIngressRule).\
3650- filter_by(group_id=security_group_id).\
3651- update({'deleted': True,
3652- 'deleted_at': timeutils.utcnow(),
3653- 'updated_at': literal_column('updated_at')})
3654-
3655-
3656-@require_context
3657-def security_group_count_by_project(context, project_id, session=None):
3658- authorize_project_context(context, project_id)
3659- return model_query(context, models.SecurityGroup, read_deleted="no",
3660- session=session).\
3661- filter_by(project_id=project_id).\
3662- count()
3663-
3664-###################
3665-
3666-
3667-def _security_group_rule_get_query(context, session=None):
3668- return model_query(context, models.SecurityGroupIngressRule,
3669- session=session)
3670-
3671-
3672-@require_context
3673-def security_group_rule_get(context, security_group_rule_id, session=None):
3674- result = _security_group_rule_get_query(context, session=session).\
3675- filter_by(id=security_group_rule_id).\
3676- first()
3677-
3678- if not result:
3679- raise exception.SecurityGroupNotFoundForRule(
3680- rule_id=security_group_rule_id)
3681-
3682- return result
3683-
3684-
3685-@require_context
3686-def security_group_rule_get_by_security_group(context, security_group_id,
3687- session=None):
3688- return _security_group_rule_get_query(context, session=session).\
3689- filter_by(parent_group_id=security_group_id).\
3690- options(joinedload_all('grantee_group.instances.instance_type')).\
3691- all()
3692-
3693-
3694-@require_context
3695-def security_group_rule_get_by_security_group_grantee(context,
3696- security_group_id,
3697- session=None):
3698-
3699- return _security_group_rule_get_query(context, session=session).\
3700- filter_by(group_id=security_group_id).\
3701- all()
3702-
3703-
3704-@require_context
3705-def security_group_rule_create(context, values):
3706- security_group_rule_ref = models.SecurityGroupIngressRule()
3707- security_group_rule_ref.update(values)
3708- security_group_rule_ref.save()
3709- return security_group_rule_ref
3710-
3711-
3712-@require_context
3713-def security_group_rule_destroy(context, security_group_rule_id):
3714- session = get_session()
3715- with session.begin():
3716- security_group_rule = security_group_rule_get(context,
3717- security_group_rule_id,
3718- session=session)
3719- security_group_rule.delete(session=session)
3720-
3721-
3722-@require_context
3723-def security_group_rule_count_by_group(context, security_group_id):
3724- return model_query(context, models.SecurityGroupIngressRule,
3725- read_deleted="no").\
3726- filter_by(parent_group_id=security_group_id).\
3727- count()
3728-
3729-#
3730-###################
3731-
3732-
3733-@require_admin_context
3734-def provider_fw_rule_create(context, rule):
3735- fw_rule_ref = models.ProviderFirewallRule()
3736- fw_rule_ref.update(rule)
3737- fw_rule_ref.save()
3738- return fw_rule_ref
3739-
3740-
3741-@require_admin_context
3742-def provider_fw_rule_get_all(context):
3743- return model_query(context, models.ProviderFirewallRule).all()
3744-
3745-
3746-@require_admin_context
3747-def provider_fw_rule_destroy(context, rule_id):
3748- session = get_session()
3749- with session.begin():
3750- session.query(models.ProviderFirewallRule).\
3751- filter_by(id=rule_id).\
3752- update({'deleted': True,
3753- 'deleted_at': timeutils.utcnow(),
3754- 'updated_at': literal_column('updated_at')})
3755-
3756-
3757-###################
3758-
3759-
3760-@require_context
3761-def project_get_networks(context, project_id, associate=True):
3762- # NOTE(tr3buchet): as before this function will associate
3763- # a project with a network if it doesn't have one and
3764- # associate is true
3765- result = model_query(context, models.Network, read_deleted="no").\
3766- filter_by(project_id=project_id).\
3767- all()
3768-
3769- if not result:
3770- if not associate:
3771- return []
3772-
3773- return [network_associate(context, project_id)]
3774-
3775- return result
3776-
3777-
3778-###################
3779-
3780-
3781-@require_admin_context
3782-def migration_create(context, values):
3783- migration = models.Migration()
3784- migration.update(values)
3785- migration.save()
3786- return migration
3787-
3788-
3789-@require_admin_context
3790-def migration_update(context, id, values):
3791- session = get_session()
3792- with session.begin():
3793- migration = migration_get(context, id, session=session)
3794- migration.update(values)
3795- migration.save(session=session)
3796- return migration
3797-
3798-
3799-@require_admin_context
3800-def migration_get(context, id, session=None):
3801- result = model_query(context, models.Migration, session=session,
3802- read_deleted="yes").\
3803- filter_by(id=id).\
3804- first()
3805-
3806- if not result:
3807- raise exception.MigrationNotFound(migration_id=id)
3808-
3809- return result
3810-
3811-
3812-@require_admin_context
3813-def migration_get_by_instance_and_status(context, instance_uuid, status):
3814- result = model_query(context, models.Migration, read_deleted="yes").\
3815- filter_by(instance_uuid=instance_uuid).\
3816- filter_by(status=status).\
3817- first()
3818-
3819- if not result:
3820- raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
3821- status=status)
3822-
3823- return result
3824-
3825-
3826-@require_admin_context
3827-def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
3828- dest_compute, session=None):
3829- confirm_window = (timeutils.utcnow() -
3830- datetime.timedelta(seconds=confirm_window))
3831-
3832- return model_query(context, models.Migration, session=session,
3833- read_deleted="yes").\
3834- filter(models.Migration.updated_at <= confirm_window).\
3835- filter_by(status="finished").\
3836- filter_by(dest_compute=dest_compute).\
3837- all()
3838-
3839-
3840-##################
3841-
3842-
3843-def console_pool_create(context, values):
3844- pool = models.ConsolePool()
3845- pool.update(values)
3846- pool.save()
3847- return pool
3848-
3849-
3850-def console_pool_get_by_host_type(context, compute_host, host,
3851- console_type):
3852-
3853- result = model_query(context, models.ConsolePool, read_deleted="no").\
3854- filter_by(host=host).\
3855- filter_by(console_type=console_type).\
3856- filter_by(compute_host=compute_host).\
3857- options(joinedload('consoles')).\
3858- first()
3859-
3860- if not result:
3861- raise exception.ConsolePoolNotFoundForHostType(
3862- host=host, console_type=console_type,
3863- compute_host=compute_host)
3864-
3865- return result
3866-
3867-
3868-def console_pool_get_all_by_host_type(context, host, console_type):
3869- return model_query(context, models.ConsolePool, read_deleted="no").\
3870- filter_by(host=host).\
3871- filter_by(console_type=console_type).\
3872- options(joinedload('consoles')).\
3873- all()
3874-
3875-
3876-def console_create(context, values):
3877- console = models.Console()
3878- console.update(values)
3879- console.save()
3880- return console
3881-
3882-
3883-def console_delete(context, console_id):
3884- session = get_session()
3885- with session.begin():
3886- # NOTE(mdragon): consoles are meant to be transient.
3887- session.query(models.Console).\
3888- filter_by(id=console_id).\
3889- delete()
3890-
3891-
3892-def console_get_by_pool_instance(context, pool_id, instance_uuid):
3893- result = model_query(context, models.Console, read_deleted="yes").\
3894- filter_by(pool_id=pool_id).\
3895- filter_by(instance_uuid=instance_uuid).\
3896- options(joinedload('pool')).\
3897- first()
3898-
3899- if not result:
3900- raise exception.ConsoleNotFoundInPoolForInstance(
3901- pool_id=pool_id, instance_uuid=instance_uuid)
3902-
3903- return result
3904-
3905-
3906-def console_get_all_by_instance(context, instance_uuid):
3907- return model_query(context, models.Console, read_deleted="yes").\
3908- filter_by(instance_uuid=instance_uuid).\
3909- all()
3910-
3911-
3912-def console_get(context, console_id, instance_uuid=None):
3913- query = model_query(context, models.Console, read_deleted="yes").\
3914- filter_by(id=console_id).\
3915- options(joinedload('pool'))
3916-
3917- if instance_uuid is not None:
3918- query = query.filter_by(instance_uuid=instance_uuid)
3919-
3920- result = query.first()
3921-
3922- if not result:
3923- if instance_uuid:
3924- raise exception.ConsoleNotFoundForInstance(
3925- console_id=console_id, instance_uuid=instance_uuid)
3926- else:
3927- raise exception.ConsoleNotFound(console_id=console_id)
3928-
3929- return result
3930-
3931-
3932-##################
3933-
3934-
3935-@require_admin_context
3936-def instance_type_create(context, values):
3937- """Create a new instance type. In order to pass in extra specs,
3938- the values dict should contain a 'extra_specs' key/value pair:
3939-
3940- {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
3941-
3942- """
3943- session = get_session()
3944- with session.begin():
3945- try:
3946- instance_type_get_by_name(context, values['name'], session)
3947- raise exception.InstanceTypeExists(name=values['name'])
3948- except exception.InstanceTypeNotFoundByName:
3949- pass
3950- try:
3951- instance_type_get_by_flavor_id(context, values['flavorid'],
3952- session)
3953- raise exception.InstanceTypeExists(name=values['name'])
3954- except exception.FlavorNotFound:
3955- pass
3956- try:
3957- specs = values.get('extra_specs')
3958- specs_refs = []
3959- if specs:
3960- for k, v in specs.iteritems():
3961- specs_ref = models.InstanceTypeExtraSpecs()
3962- specs_ref['key'] = k
3963- specs_ref['value'] = v
3964- specs_refs.append(specs_ref)
3965- values['extra_specs'] = specs_refs
3966- instance_type_ref = models.InstanceTypes()
3967- instance_type_ref.update(values)
3968- instance_type_ref.save(session=session)
3969- except Exception, e:
3970- raise exception.DBError(e)
3971- return _dict_with_extra_specs(instance_type_ref)
3972-
3973-
3974-def _dict_with_extra_specs(inst_type_query):
3975- """Takes an instance, volume, or instance type query returned
3976- by sqlalchemy and returns it as a dictionary, converting the
3977- extra_specs entry from a list of dicts:
3978-
3979- 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
3980-
3981- to a single dict:
3982-
3983- 'extra_specs' : {'k1': 'v1'}
3984-
3985- """
3986- inst_type_dict = dict(inst_type_query)
3987- extra_specs = dict([(x['key'], x['value'])
3988- for x in inst_type_query['extra_specs']])
3989- inst_type_dict['extra_specs'] = extra_specs
3990- return inst_type_dict
3991-
3992-
3993-def _instance_type_get_query(context, session=None, read_deleted=None):
3994- return model_query(context, models.InstanceTypes, session=session,
3995- read_deleted=read_deleted).\
3996- options(joinedload('extra_specs'))
3997-
3998-
3999-@require_context
4000-def instance_type_get_all(context, inactive=False, filters=None):
4001- """
4002- Returns all instance types.
4003- """
4004- filters = filters or {}
4005-
4006- # FIXME(sirp): now that we have the `disabled` field for instance-types, we
4007- # should probably remove the use of `deleted` to mark inactive. `deleted`
4008- # should mean truly deleted, e.g. we can safely purge the record out of the
4009- # database.
4010- read_deleted = "yes" if inactive else "no"
4011-
4012- query = _instance_type_get_query(context, read_deleted=read_deleted)
4013-
4014- if 'min_memory_mb' in filters:
4015- query = query.filter(
4016- models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
4017-
4018- if 'min_root_gb' in filters:
4019- query = query.filter(
4020- models.InstanceTypes.root_gb >= filters['min_root_gb'])
4021-
4022- if 'disabled' in filters:
4023- query = query.filter(
4024- models.InstanceTypes.disabled == filters['disabled'])
4025-
4026- if 'is_public' in filters and filters['is_public'] is not None:
4027- the_filter = [models.InstanceTypes.is_public == filters['is_public']]
4028- if filters['is_public'] and context.project_id is not None:
4029- the_filter.extend([
4030- models.InstanceTypes.projects.any(
4031- project_id=context.project_id, deleted=False)
4032- ])
4033- if len(the_filter) > 1:
4034- query = query.filter(or_(*the_filter))
4035- else:
4036- query = query.filter(the_filter[0])
4037- del filters['is_public']
4038-
4039- inst_types = query.order_by("name").all()
4040-
4041- return [_dict_with_extra_specs(i) for i in inst_types]
4042-
4043-
4044-@require_context
4045-def instance_type_get(context, id, session=None):
4046- """Returns a dict describing specific instance_type"""
4047- result = _instance_type_get_query(context, session=session).\
4048- filter_by(id=id).\
4049- first()
4050-
4051- if not result:
4052- raise exception.InstanceTypeNotFound(instance_type_id=id)
4053-
4054- return _dict_with_extra_specs(result)
4055-
4056-
4057-@require_context
4058-def instance_type_get_by_name(context, name, session=None):
4059- """Returns a dict describing specific instance_type"""
4060- result = _instance_type_get_query(context, session=session).\
4061- filter_by(name=name).\
4062- first()
4063-
4064- if not result:
4065- raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
4066-
4067- return _dict_with_extra_specs(result)
4068-
4069-
4070-@require_context
4071-def instance_type_get_by_flavor_id(context, flavor_id, session=None):
4072- """Returns a dict describing specific flavor_id"""
4073- result = _instance_type_get_query(context, session=session).\
4074- filter_by(flavorid=flavor_id).\
4075- first()
4076-
4077- if not result:
4078- raise exception.FlavorNotFound(flavor_id=flavor_id)
4079-
4080- return _dict_with_extra_specs(result)
4081-
4082-
4083-@require_admin_context
4084-def instance_type_destroy(context, name):
4085- """Marks specific instance_type as deleted"""
4086- session = get_session()
4087- with session.begin():
4088- instance_type_ref = instance_type_get_by_name(context, name,
4089- session=session)
4090- instance_type_id = instance_type_ref['id']
4091- session.query(models.InstanceTypes).\
4092- filter_by(id=instance_type_id).\
4093- update({'deleted': True,
4094- 'deleted_at': timeutils.utcnow(),
4095- 'updated_at': literal_column('updated_at')})
4096- session.query(models.InstanceTypeExtraSpecs).\
4097- filter_by(instance_type_id=instance_type_id).\
4098- update({'deleted': True,
4099- 'deleted_at': timeutils.utcnow(),
4100- 'updated_at': literal_column('updated_at')})
4101-
4102-
4103-@require_context
4104-def _instance_type_access_query(context, session=None):
4105- return model_query(context, models.InstanceTypeProjects, session=session,
4106- read_deleted="yes")
4107-
4108-
4109-@require_admin_context
4110-def instance_type_access_get_by_flavor_id(context, flavor_id):
4111- """Get flavor access list by flavor id"""
4112- instance_type_ref = _instance_type_get_query(context).\
4113- filter_by(flavorid=flavor_id).\
4114- first()
4115-
4116- return [r for r in instance_type_ref.projects]
4117-
4118-
4119-@require_admin_context
4120-def instance_type_access_add(context, flavor_id, project_id):
4121- """Add given tenant to the flavor access list"""
4122- session = get_session()
4123- with session.begin():
4124- instance_type_ref = instance_type_get_by_flavor_id(context, flavor_id,
4125- session=session)
4126- instance_type_id = instance_type_ref['id']
4127- access_ref = _instance_type_access_query(context, session=session).\
4128- filter_by(instance_type_id=instance_type_id).\
4129- filter_by(project_id=project_id).first()
4130-
4131- if not access_ref:
4132- access_ref = models.InstanceTypeProjects()
4133- access_ref.instance_type_id = instance_type_id
4134- access_ref.project_id = project_id
4135- access_ref.save(session=session)
4136- elif access_ref.deleted:
4137- access_ref.update({'deleted': False,
4138- 'deleted_at': None})
4139- access_ref.save(session=session)
4140- else:
4141- raise exception.FlavorAccessExists(flavor_id=flavor_id,
4142- project_id=project_id)
4143-
4144- return access_ref
4145-
4146-
4147-@require_admin_context
4148-def instance_type_access_remove(context, flavor_id, project_id):
4149- """Remove given tenant from the flavor access list"""
4150- session = get_session()
4151- with session.begin():
4152- instance_type_ref = instance_type_get_by_flavor_id(context, flavor_id,
4153- session=session)
4154- instance_type_id = instance_type_ref['id']
4155- access_ref = _instance_type_access_query(context, session=session).\
4156- filter_by(instance_type_id=instance_type_id).\
4157- filter_by(project_id=project_id).first()
4158-
4159- if access_ref:
4160- access_ref.update({'deleted': True,
4161- 'deleted_at': timeutils.utcnow(),
4162- 'updated_at': literal_column('updated_at')})
4163- else:
4164- raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
4165- project_id=project_id)
4166-
4167-
4168-########################
4169-# User-provided metadata
4170-
4171-def _instance_metadata_get_query(context, instance_uuid, session=None):
4172- return model_query(context, models.InstanceMetadata, session=session,
4173- read_deleted="no").\
4174- filter_by(instance_uuid=instance_uuid)
4175-
4176-
4177-@require_context
4178-def instance_metadata_get(context, instance_uuid, session=None):
4179- rows = _instance_metadata_get_query(context, instance_uuid,
4180- session=session).all()
4181-
4182- result = {}
4183- for row in rows:
4184- result[row['key']] = row['value']
4185-
4186- return result
4187-
4188-
4189-@require_context
4190-def instance_metadata_delete(context, instance_uuid, key):
4191- _instance_metadata_get_query(context, instance_uuid).\
4192- filter_by(key=key).\
4193- update({'deleted': True,
4194- 'deleted_at': timeutils.utcnow(),
4195- 'updated_at': literal_column('updated_at')})
4196-
4197-
4198-@require_context
4199-def instance_metadata_get_item(context, instance_uuid, key, session=None):
4200- result = _instance_metadata_get_query(
4201- context, instance_uuid, session=session).\
4202- filter_by(key=key).\
4203- first()
4204-
4205- if not result:
4206- raise exception.InstanceMetadataNotFound(metadata_key=key,
4207- instance_uuid=instance_uuid)
4208-
4209- return result
4210-
4211-
4212-@require_context
4213-def instance_metadata_update(context, instance_uuid, metadata, delete,
4214- session=None):
4215- if session is None:
4216- session = get_session()
4217- # Set existing metadata to deleted if delete argument is True
4218- if delete:
4219- original_metadata = instance_metadata_get(context, instance_uuid,
4220- session=session)
4221- for meta_key, meta_value in original_metadata.iteritems():
4222- if meta_key not in metadata:
4223- meta_ref = instance_metadata_get_item(context, instance_uuid,
4224- meta_key, session)
4225- meta_ref.update({'deleted': True})
4226- meta_ref.save(session=session)
4227-
4228- meta_ref = None
4229-
4230- # Now update all existing items with new values, or create new meta objects
4231- for meta_key, meta_value in metadata.iteritems():
4232-
4233- # update the value whether it exists or not
4234- item = {"value": meta_value}
4235-
4236- try:
4237- meta_ref = instance_metadata_get_item(context, instance_uuid,
4238- meta_key, session)
4239- except exception.InstanceMetadataNotFound, e:
4240- meta_ref = models.InstanceMetadata()
4241- item.update({"key": meta_key, "instance_uuid": instance_uuid})
4242-
4243- meta_ref.update(item)
4244- meta_ref.save(session=session)
4245-
4246- return metadata
4247-
4248-
4249-#######################
4250-# System-owned metadata
4251-
4252-def _instance_system_metadata_get_query(context, instance_uuid, session=None):
4253- return model_query(context, models.InstanceSystemMetadata,
4254- session=session).\
4255- filter_by(instance_uuid=instance_uuid)
4256-
4257-
4258-@require_context
4259-def instance_system_metadata_get(context, instance_uuid, session=None):
4260- rows = _instance_system_metadata_get_query(context, instance_uuid,
4261- session=session).all()
4262-
4263- result = {}
4264- for row in rows:
4265- result[row['key']] = row['value']
4266-
4267- return result
4268-
4269-
4270-@require_context
4271-def instance_system_metadata_delete(context, instance_uuid, key):
4272- _instance_system_metadata_get_query(context, instance_uuid).\
4273- filter_by(key=key).\
4274- update({'deleted': True,
4275- 'deleted_at': timeutils.utcnow(),
4276- 'updated_at': literal_column('updated_at')})
4277-
4278-
4279-def _instance_system_metadata_get_item(context, instance_uuid, key,
4280- session=None):
4281- result = _instance_system_metadata_get_query(
4282- context, instance_uuid, session=session).\
4283- filter_by(key=key).\
4284- first()
4285-
4286- if not result:
4287- raise exception.InstanceSystemMetadataNotFound(
4288- metadata_key=key, instance_uuid=instance_uuid)
4289-
4290- return result
4291-
4292-
4293-@require_context
4294-def instance_system_metadata_update(context, instance_uuid, metadata, delete,
4295- session=None):
4296- if session is None:
4297- session = get_session()
4298-
4299- # Set existing metadata to deleted if delete argument is True
4300- if delete:
4301- original_metadata = instance_system_metadata_get(
4302- context, instance_uuid, session=session)
4303- for meta_key, meta_value in original_metadata.iteritems():
4304- if meta_key not in metadata:
4305- meta_ref = _instance_system_metadata_get_item(
4306- context, instance_uuid, meta_key, session)
4307- meta_ref.update({'deleted': True})
4308- meta_ref.save(session=session)
4309-
4310- meta_ref = None
4311-
4312- # Now update all existing items with new values, or create new meta objects
4313- for meta_key, meta_value in metadata.iteritems():
4314-
4315- # update the value whether it exists or not
4316- item = {"value": meta_value}
4317-
4318- try:
4319- meta_ref = _instance_system_metadata_get_item(
4320- context, instance_uuid, meta_key, session)
4321- except exception.InstanceSystemMetadataNotFound, e:
4322- meta_ref = models.InstanceSystemMetadata()
4323- item.update({"key": meta_key, "instance_uuid": instance_uuid})
4324-
4325- meta_ref.update(item)
4326- meta_ref.save(session=session)
4327-
4328- return metadata
4329-
4330-
4331-####################
4332-
4333-
4334-@require_admin_context
4335-def agent_build_create(context, values):
4336- agent_build_ref = models.AgentBuild()
4337- agent_build_ref.update(values)
4338- agent_build_ref.save()
4339- return agent_build_ref
4340-
4341-
4342-@require_admin_context
4343-def agent_build_get_by_triple(context, hypervisor, os, architecture,
4344- session=None):
4345- return model_query(context, models.AgentBuild, session=session,
4346- read_deleted="no").\
4347- filter_by(hypervisor=hypervisor).\
4348- filter_by(os=os).\
4349- filter_by(architecture=architecture).\
4350- first()
4351-
4352-
4353-@require_admin_context
4354-def agent_build_get_all(context):
4355- return model_query(context, models.AgentBuild, read_deleted="no").\
4356- all()
4357-
4358-
4359-@require_admin_context
4360-def agent_build_destroy(context, agent_build_id):
4361- session = get_session()
4362- with session.begin():
4363- model_query(context, models.AgentBuild, session=session,
4364- read_deleted="yes").\
4365- filter_by(id=agent_build_id).\
4366- update({'deleted': True,
4367- 'deleted_at': timeutils.utcnow(),
4368- 'updated_at': literal_column('updated_at')})
4369-
4370-
4371-@require_admin_context
4372-def agent_build_update(context, agent_build_id, values):
4373- session = get_session()
4374- with session.begin():
4375- agent_build_ref = model_query(context, models.AgentBuild,
4376- session=session, read_deleted="yes").\
4377- filter_by(id=agent_build_id).\
4378- first()
4379-
4380- agent_build_ref.update(values)
4381- agent_build_ref.save(session=session)
4382-
4383-
4384-####################
4385-
4386-@require_context
4387-def bw_usage_get_by_uuids(context, uuids, start_period):
4388- return model_query(context, models.BandwidthUsage, read_deleted="yes").\
4389- filter(models.BandwidthUsage.uuid.in_(uuids)).\
4390- filter_by(start_period=start_period).\
4391- all()
4392-
4393-
4394-@require_context
4395-def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
4396- last_refreshed=None, session=None):
4397- if not session:
4398- session = get_session()
4399-
4400- if last_refreshed is None:
4401- last_refreshed = timeutils.utcnow()
4402-
4403- # NOTE(comstud): More often than not, we'll be updating records vs
4404- # creating records. Optimize accordingly, trying to update existing
4405- # records. Fall back to creation when no rows are updated.
4406- with session.begin():
4407- values = {'last_refreshed': last_refreshed,
4408- 'bw_in': bw_in,
4409- 'bw_out': bw_out}
4410- rows = model_query(context, models.BandwidthUsage,
4411- session=session, read_deleted="yes").\
4412- filter_by(start_period=start_period).\
4413- filter_by(uuid=uuid).\
4414- filter_by(mac=mac).\
4415- update(values, synchronize_session=False)
4416- if rows:
4417- return
4418-
4419- bwusage = models.BandwidthUsage()
4420- bwusage.start_period = start_period
4421- bwusage.uuid = uuid
4422- bwusage.mac = mac
4423- bwusage.last_refreshed = last_refreshed
4424- bwusage.bw_in = bw_in
4425- bwusage.bw_out = bw_out
4426- bwusage.save(session=session)
4427-
4428-
4429-####################
4430-
4431-
4432-def _instance_type_extra_specs_get_query(context, flavor_id,
4433- session=None):
4434- # Two queries necessary because join with update doesn't work.
4435- t = model_query(context, models.InstanceTypes.id,
4436- session=session, read_deleted="no").\
4437- filter(models.InstanceTypes.flavorid == flavor_id).\
4438- subquery()
4439- return model_query(context, models.InstanceTypeExtraSpecs,
4440- session=session, read_deleted="no").\
4441- filter(models.InstanceTypeExtraSpecs.\
4442- instance_type_id.in_(t))
4443-
4444-
4445-@require_context
4446-def instance_type_extra_specs_get(context, flavor_id):
4447- rows = _instance_type_extra_specs_get_query(
4448- context, flavor_id).\
4449- all()
4450-
4451- result = {}
4452- for row in rows:
4453- result[row['key']] = row['value']
4454-
4455- return result
4456-
4457-
4458-@require_context
4459-def instance_type_extra_specs_delete(context, flavor_id, key):
4460- # Don't need synchronize the session since we will not use the query result
4461- _instance_type_extra_specs_get_query(
4462- context, flavor_id).\
4463- filter(models.InstanceTypeExtraSpecs.key == key).\
4464- update({'deleted': True,
4465- 'deleted_at': timeutils.utcnow(),
4466- 'updated_at': literal_column('updated_at')},
4467- synchronize_session=False)
4468-
4469-
4470-@require_context
4471-def instance_type_extra_specs_get_item(context, flavor_id, key,
4472- session=None):
4473- result = _instance_type_extra_specs_get_query(
4474- context, flavor_id, session=session).\
4475- filter(models.InstanceTypeExtraSpecs.key == key).\
4476- first()
4477- if not result:
4478- raise exception.InstanceTypeExtraSpecsNotFound(
4479- extra_specs_key=key, instance_type_id=flavor_id)
4480-
4481- return result
4482-
4483-
4484-@require_context
4485-def instance_type_extra_specs_update_or_create(context, flavor_id,
4486- specs):
4487- session = get_session()
4488- spec_ref = None
4489- instance_type = instance_type_get_by_flavor_id(context, flavor_id)
4490- for key, value in specs.iteritems():
4491- try:
4492- spec_ref = instance_type_extra_specs_get_item(
4493- context, flavor_id, key, session)
4494- except exception.InstanceTypeExtraSpecsNotFound, e:
4495- spec_ref = models.InstanceTypeExtraSpecs()
4496- spec_ref.update({"key": key, "value": value,
4497- "instance_type_id": instance_type["id"],
4498- "deleted": False})
4499- spec_ref.save(session=session)
4500- return specs
4501-
4502-
4503-##################
4504-
4505-
4506-@require_admin_context
4507-def volume_type_create(context, values):
4508- """Create a new instance type. In order to pass in extra specs,
4509- the values dict should contain a 'extra_specs' key/value pair:
4510-
4511- {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
4512-
4513- """
4514- session = get_session()
4515- with session.begin():
4516- try:
4517- volume_type_get_by_name(context, values['name'], session)
4518- raise exception.VolumeTypeExists(name=values['name'])
4519- except exception.VolumeTypeNotFoundByName:
4520- pass
4521- try:
4522- values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
4523- models.VolumeTypeExtraSpecs)
4524- volume_type_ref = models.VolumeTypes()
4525- volume_type_ref.update(values)
4526- volume_type_ref.save()
4527- except Exception, e:
4528- raise exception.DBError(e)
4529- return volume_type_ref
4530-
4531-
4532-@require_context
4533-def volume_type_get_all(context, inactive=False, filters=None):
4534- """
4535- Returns a dict describing all volume_types with name as key.
4536- """
4537- filters = filters or {}
4538-
4539- read_deleted = "yes" if inactive else "no"
4540- rows = model_query(context, models.VolumeTypes,
4541- read_deleted=read_deleted).\
4542- options(joinedload('extra_specs')).\
4543- order_by("name").\
4544- all()
4545-
4546- # TODO(sirp): this patern of converting rows to a result with extra_specs
4547- # is repeated quite a bit, might be worth creating a method for it
4548- result = {}
4549- for row in rows:
4550- result[row['name']] = _dict_with_extra_specs(row)
4551-
4552- return result
4553-
4554-
4555-@require_context
4556-def volume_type_get(context, id, session=None):
4557- """Returns a dict describing specific volume_type"""
4558- result = model_query(context, models.VolumeTypes, session=session).\
4559- options(joinedload('extra_specs')).\
4560- filter_by(id=id).\
4561- first()
4562-
4563- if not result:
4564- raise exception.VolumeTypeNotFound(volume_type_id=id)
4565-
4566- return _dict_with_extra_specs(result)
4567-
4568-
4569-@require_context
4570-def volume_type_get_by_name(context, name, session=None):
4571- """Returns a dict describing specific volume_type"""
4572- result = model_query(context, models.VolumeTypes, session=session).\
4573- options(joinedload('extra_specs')).\
4574- filter_by(name=name).\
4575- first()
4576-
4577- if not result:
4578- raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
4579- else:
4580- return _dict_with_extra_specs(result)
4581-
4582-
4583-@require_admin_context
4584-def volume_type_destroy(context, name):
4585- session = get_session()
4586- with session.begin():
4587- volume_type_ref = volume_type_get_by_name(context, name,
4588- session=session)
4589- volume_type_id = volume_type_ref['id']
4590- session.query(models.VolumeTypes).\
4591- filter_by(id=volume_type_id).\
4592- update({'deleted': True,
4593- 'deleted_at': timeutils.utcnow(),
4594- 'updated_at': literal_column('updated_at')})
4595- session.query(models.VolumeTypeExtraSpecs).\
4596- filter_by(volume_type_id=volume_type_id).\
4597- update({'deleted': True,
4598- 'deleted_at': timeutils.utcnow(),
4599- 'updated_at': literal_column('updated_at')})
4600-
4601-
4602-@require_context
4603-def volume_get_active_by_window(context, begin, end=None,
4604- project_id=None):
4605- """Return volumes that were active during window."""
4606- session = get_session()
4607- query = session.query(models.Volume)
4608-
4609- query = query.filter(or_(models.Volume.deleted_at == None,
4610- models.Volume.deleted_at > begin))
4611- if end:
4612- query = query.filter(models.Volume.created_at < end)
4613- if project_id:
4614- query = query.filter_by(project_id=project_id)
4615-
4616- return query.all()
4617-
4618-
4619-####################
4620-
4621-
4622-def _volume_type_extra_specs_query(context, volume_type_id, session=None):
4623- return model_query(context, models.VolumeTypeExtraSpecs, session=session,
4624- read_deleted="no").\
4625- filter_by(volume_type_id=volume_type_id)
4626-
4627-
4628-@require_context
4629-def volume_type_extra_specs_get(context, volume_type_id):
4630- rows = _volume_type_extra_specs_query(context, volume_type_id).\
4631- all()
4632-
4633- result = {}
4634- for row in rows:
4635- result[row['key']] = row['value']
4636-
4637- return result
4638-
4639-
4640-@require_context
4641-def volume_type_extra_specs_delete(context, volume_type_id, key):
4642- _volume_type_extra_specs_query(context, volume_type_id).\
4643- filter_by(key=key).\
4644- update({'deleted': True,
4645- 'deleted_at': timeutils.utcnow(),
4646- 'updated_at': literal_column('updated_at')})
4647-
4648-
4649-@require_context
4650-def volume_type_extra_specs_get_item(context, volume_type_id, key,
4651- session=None):
4652- result = _volume_type_extra_specs_query(
4653- context, volume_type_id, session=session).\
4654- filter_by(key=key).\
4655- first()
4656-
4657- if not result:
4658- raise exception.VolumeTypeExtraSpecsNotFound(
4659- extra_specs_key=key, volume_type_id=volume_type_id)
4660-
4661- return result
4662-
4663-
4664-@require_context
4665-def volume_type_extra_specs_update_or_create(context, volume_type_id,
4666- specs):
4667- session = get_session()
4668- spec_ref = None
4669- for key, value in specs.iteritems():
4670- try:
4671- spec_ref = volume_type_extra_specs_get_item(
4672- context, volume_type_id, key, session)
4673- except exception.VolumeTypeExtraSpecsNotFound, e:
4674- spec_ref = models.VolumeTypeExtraSpecs()
4675- spec_ref.update({"key": key, "value": value,
4676- "volume_type_id": volume_type_id,
4677- "deleted": False})
4678- spec_ref.save(session=session)
4679- return specs
4680-
4681-
4682-####################
4683-
4684-
4685-def s3_image_get(context, image_id):
4686- """Find local s3 image represented by the provided id"""
4687- result = model_query(context, models.S3Image, read_deleted="yes").\
4688- filter_by(id=image_id).\
4689- first()
4690-
4691- if not result:
4692- raise exception.ImageNotFound(image_id=image_id)
4693-
4694- return result
4695-
4696-
4697-def s3_image_get_by_uuid(context, image_uuid):
4698- """Find local s3 image represented by the provided uuid"""
4699- result = model_query(context, models.S3Image, read_deleted="yes").\
4700- filter_by(uuid=image_uuid).\
4701- first()
4702-
4703- if not result:
4704- raise exception.ImageNotFound(image_id=image_uuid)
4705-
4706- return result
4707-
4708-
4709-def s3_image_create(context, image_uuid):
4710- """Create local s3 image represented by provided uuid"""
4711- try:
4712- s3_image_ref = models.S3Image()
4713- s3_image_ref.update({'uuid': image_uuid})
4714- s3_image_ref.save()
4715- except Exception, e:
4716- raise exception.DBError(e)
4717-
4718- return s3_image_ref
4719-
4720-
4721-####################
4722-
4723-
4724-@require_admin_context
4725-def sm_backend_conf_create(context, values):
4726- session = get_session()
4727- with session.begin():
4728- config_params = values['config_params']
4729- backend_conf = model_query(context, models.SMBackendConf,
4730- session=session,
4731- read_deleted="yes").\
4732- filter_by(config_params=config_params).\
4733- first()
4734-
4735- if backend_conf:
4736- raise exception.Duplicate(_('Backend exists'))
4737- else:
4738- backend_conf = models.SMBackendConf()
4739- backend_conf.update(values)
4740- backend_conf.save(session=session)
4741- return backend_conf
4742-
4743-
4744-@require_admin_context
4745-def sm_backend_conf_update(context, sm_backend_id, values):
4746- session = get_session()
4747- with session.begin():
4748- backend_conf = model_query(context, models.SMBackendConf,
4749- session=session,
4750- read_deleted="yes").\
4751- filter_by(id=sm_backend_id).\
4752- first()
4753-
4754- if not backend_conf:
4755- raise exception.NotFound(
4756- _("No backend config with id %(sm_backend_id)s") % locals())
4757-
4758- backend_conf.update(values)
4759- backend_conf.save(session=session)
4760- return backend_conf
4761-
4762-
4763-@require_admin_context
4764-def sm_backend_conf_delete(context, sm_backend_id):
4765- # FIXME(sirp): for consistency, shouldn't this just mark as deleted with
4766- # `purge` actually deleting the record?
4767- session = get_session()
4768- with session.begin():
4769- model_query(context, models.SMBackendConf, session=session,
4770- read_deleted="yes").\
4771- filter_by(id=sm_backend_id).\
4772- delete()
4773-
4774-
4775-@require_admin_context
4776-def sm_backend_conf_get(context, sm_backend_id):
4777- result = model_query(context, models.SMBackendConf, read_deleted="yes").\
4778- filter_by(id=sm_backend_id).\
4779- first()
4780-
4781- if not result:
4782- raise exception.NotFound(_("No backend config with id "
4783- "%(sm_backend_id)s") % locals())
4784-
4785- return result
4786-
4787-
4788-@require_admin_context
4789-def sm_backend_conf_get_by_sr(context, sr_uuid):
4790- result = model_query(context, models.SMBackendConf, read_deleted="yes").\
4791- filter_by(sr_uuid=sr_uuid).\
4792- first()
4793- if not result:
4794- raise exception.NotFound(_("No backend config with sr uuid "
4795- "%(sr_uuid)s") % locals())
4796- return result
4797-
4798-
4799-@require_admin_context
4800-def sm_backend_conf_get_all(context):
4801- return model_query(context, models.SMBackendConf, read_deleted="yes").\
4802- all()
4803-
4804-
4805-####################
4806-
4807-
4808-def _sm_flavor_get_query(context, sm_flavor_id, session=None):
4809- return model_query(context, models.SMFlavors, session=session,
4810- read_deleted="yes").\
4811- filter_by(id=sm_flavor_id)
4812-
4813-
4814-@require_admin_context
4815-def sm_flavor_create(context, values):
4816- session = get_session()
4817- with session.begin():
4818- sm_flavor = model_query(context, models.SMFlavors,
4819- session=session,
4820- read_deleted="yes").\
4821- filter_by(label=values['label']).\
4822- first()
4823- if not sm_flavor:
4824- sm_flavor = models.SMFlavors()
4825- sm_flavor.update(values)
4826- sm_flavor.save(session=session)
4827- else:
4828- raise exception.Duplicate(_('Flavor exists'))
4829- return sm_flavor
4830-
4831-
4832-@require_admin_context
4833-def sm_flavor_update(context, sm_flavor_id, values):
4834- session = get_session()
4835- with session.begin():
4836- sm_flavor = model_query(context, models.SMFlavors,
4837- session=session,
4838- read_deleted="yes").\
4839- filter_by(id=sm_flavor_id).\
4840- first()
4841- if not sm_flavor:
4842- raise exception.NotFound(
4843- _('%(sm_flavor_id) flavor not found') % locals())
4844- sm_flavor.update(values)
4845- sm_flavor.save(session=session)
4846- return sm_flavor
4847-
4848-
4849-@require_admin_context
4850-def sm_flavor_delete(context, sm_flavor_id):
4851- session = get_session()
4852- with session.begin():
4853- _sm_flavor_get_query(context, sm_flavor_id).delete()
4854-
4855-
4856-@require_admin_context
4857-def sm_flavor_get(context, sm_flavor_id):
4858- result = _sm_flavor_get_query(context, sm_flavor_id).first()
4859-
4860- if not result:
4861- raise exception.NotFound(
4862- _("No sm_flavor called %(sm_flavor_id)s") % locals())
4863-
4864- return result
4865-
4866-
4867-@require_admin_context
4868-def sm_flavor_get_all(context):
4869- return model_query(context, models.SMFlavors, read_deleted="yes").all()
4870-
4871-
4872-@require_admin_context
4873-def sm_flavor_get_by_label(context, sm_flavor_label):
4874- result = model_query(context, models.SMFlavors,
4875- read_deleted="yes").\
4876- filter_by(label=sm_flavor_label).first()
4877- if not result:
4878- raise exception.NotFound(
4879- _("No sm_flavor called %(sm_flavor_label)s") % locals())
4880- return result
4881-
4882-
4883-###############################
4884-
4885-
4886-def _sm_volume_get_query(context, volume_id, session=None):
4887- return model_query(context, models.SMVolume, session=session,
4888- read_deleted="yes").\
4889- filter_by(id=volume_id)
4890-
4891-
4892-def sm_volume_create(context, values):
4893- sm_volume = models.SMVolume()
4894- sm_volume.update(values)
4895- sm_volume.save()
4896- return sm_volume
4897-
4898-
4899-def sm_volume_update(context, volume_id, values):
4900- sm_volume = sm_volume_get(context, volume_id)
4901- sm_volume.update(values)
4902- sm_volume.save()
4903- return sm_volume
4904-
4905-
4906-def sm_volume_delete(context, volume_id):
4907- session = get_session()
4908- with session.begin():
4909- _sm_volume_get_query(context, volume_id, session=session).delete()
4910-
4911-
4912-def sm_volume_get(context, volume_id):
4913- result = _sm_volume_get_query(context, volume_id).first()
4914-
4915- if not result:
4916- raise exception.NotFound(
4917- _("No sm_volume with id %(volume_id)s") % locals())
4918-
4919- return result
4920-
4921-
4922-def sm_volume_get_all(context):
4923- return model_query(context, models.SMVolume, read_deleted="yes").all()
4924-
4925-
4926-################
4927-
4928-
4929-def _aggregate_get_query(context, model_class, id_field, id,
4930- session=None, read_deleted=None):
4931- return model_query(context, model_class, session=session,
4932- read_deleted=read_deleted).filter(id_field == id)
4933-
4934-
4935-@require_admin_context
4936-def aggregate_create(context, values, metadata=None):
4937- session = get_session()
4938- aggregate = _aggregate_get_query(context,
4939- models.Aggregate,
4940- models.Aggregate.name,
4941- values['name'],
4942- session=session,
4943- read_deleted='no').first()
4944- if not aggregate:
4945- aggregate = models.Aggregate()
4946- aggregate.update(values)
4947- aggregate.save(session=session)
4948- else:
4949- raise exception.AggregateNameExists(aggregate_name=values['name'])
4950- if metadata:
4951- aggregate_metadata_add(context, aggregate.id, metadata)
4952- return aggregate
4953-
4954-
4955-@require_admin_context
4956-def aggregate_get(context, aggregate_id):
4957- aggregate = _aggregate_get_query(context,
4958- models.Aggregate,
4959- models.Aggregate.id,
4960- aggregate_id).first()
4961-
4962- if not aggregate:
4963- raise exception.AggregateNotFound(aggregate_id=aggregate_id)
4964-
4965- return aggregate
4966-
4967-
4968-@require_admin_context
4969-def aggregate_get_by_host(context, host, key=None):
4970- query = model_query(context, models.Aggregate).join(
4971- "_hosts").filter(models.AggregateHost.host == host)
4972-
4973- if key:
4974- query = query.join("_metadata").filter(
4975- models.AggregateMetadata.key == key)
4976- return query.all()
4977-
4978-
4979-@require_admin_context
4980-def aggregate_metadata_get_by_host(context, host, key=None):
4981- query = model_query(context, models.Aggregate).join(
4982- "_hosts").filter(models.AggregateHost.host == host).join(
4983- "_metadata")
4984-
4985- if key:
4986- query = query.filter(models.AggregateMetadata.key == key)
4987- rows = query.all()
4988- metadata = collections.defaultdict(set)
4989- for agg in rows:
4990- for kv in agg._metadata:
4991- metadata[kv['key']].add(kv['value'])
4992- return metadata
4993-
4994-
4995-@require_admin_context
4996-def aggregate_update(context, aggregate_id, values):
4997- session = get_session()
4998- aggregate = _aggregate_get_query(context,
4999- models.Aggregate,
5000- models.Aggregate.id,
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches