Merge lp:~midokura/nova/network-service into lp:~ntt-pf-lab/nova/network-service
- network-service
- Merge into network-service
Proposed by
Ryu Ishimoto
Status: | Merged |
---|---|
Merged at revision: | 775 |
Proposed branch: | lp:~midokura/nova/network-service |
Merge into: | lp:~ntt-pf-lab/nova/network-service |
Diff against target: |
4572 lines (+2361/-1171) (has conflicts) 19 files modified
bin/nova-network-manage (+289/-0) nova/api/ec2/cloud.py (+99/-105) nova/auth/manager.py (+1/-1) nova/compute/api.py (+35/-34) nova/network/flat_vlan/api/openstack/ethernet_cards.py (+67/-0) nova/network/flat_vlan/api/openstack/virtual_nics.py (+0/-68) nova/network/flat_vlan/api/service.py (+282/-27) nova/network/flat_vlan/compute.py (+86/-3) nova/network/flat_vlan/db/api.py (+110/-18) nova/network/flat_vlan/db/migration.py (+36/-0) nova/network/flat_vlan/db/sqlalchemy/migrate_repo/versions/001_diablo.py (+65/-4) nova/network/flat_vlan/db/sqlalchemy/migration.py (+81/-0) nova/network/flat_vlan/db/sqlalchemy/models.py (+294/-32) nova/network/flat_vlan/firewall.py (+581/-0) nova/network/flat_vlan/flags.py (+3/-0) nova/network/flat_vlan/manager.py (+11/-11) nova/network/flat_vlan/network.py (+17/-15) nova/network/service.py (+232/-14) nova/virt/libvirt_conn.py (+72/-839) Contents conflict in bin/nova-net-flat-vlan-manage |
To merge this branch: | bzr merge lp:~midokura/nova/network-service |
Related bugs: | |
Related blueprints: |
Refactor Networking
(Essential)
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
NTT PF Lab. | Pending | ||
Review via email: mp+58061@code.launchpad.net |
Commit message
Moved security group/firewall logic out of compute into the plugin. Still needs a lot more refactoring and testing.
Description of the change
Moved security group/firewall logic out of compute into the plugin. Still needs a lot more refactoring and testing.
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === renamed file 'bin/nova-net-flat-vlan-manage' => 'bin/nova-net-flat-vlan-manage.THIS' |
2 | === added file 'bin/nova-network-manage' |
3 | --- bin/nova-network-manage 1970-01-01 00:00:00 +0000 |
4 | +++ bin/nova-network-manage 2011-04-18 00:30:54 +0000 |
5 | @@ -0,0 +1,289 @@ |
6 | +#!/usr/bin/env python |
7 | +# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
8 | + |
9 | +# Copyright 2011 Midokura KK |
10 | +# All Rights Reserved. |
11 | +# |
12 | +# Licensed under the Apache License, Version 2.0 (the "License"); you may |
13 | +# not use this file except in compliance with the License. You may obtain |
14 | +# a copy of the License at |
15 | +# |
16 | +# http://www.apache.org/licenses/LICENSE-2.0 |
17 | +# |
18 | +# Unless required by applicable law or agreed to in writing, software |
19 | +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
20 | +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
21 | +# License for the specific language governing permissions and limitations |
22 | +# under the License. |
23 | + |
24 | +# Interactive shell based on Django: |
25 | +# |
26 | +# Copyright (c) 2005, the Lawrence Journal-World |
27 | +# All rights reserved. |
28 | +# |
29 | +# Redistribution and use in source and binary forms, with or without |
30 | +# modification, are permitted provided that the following conditions are met: |
31 | +# |
32 | +# 1. Redistributions of source code must retain the above copyright notice, |
33 | +# this list of conditions and the following disclaimer. |
34 | +# |
35 | +# 2. Redistributions in binary form must reproduce the above copyright |
36 | +# notice, this list of conditions and the following disclaimer in the |
37 | +# documentation and/or other materials provided with the distribution. |
38 | +# |
39 | +# 3. Neither the name of Django nor the names of its contributors may be |
40 | +# used to endorse or promote products derived from this software without |
41 | +# specific prior written permission. |
42 | +# |
43 | +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
44 | +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
45 | +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
46 | +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
47 | +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
48 | +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
49 | +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
50 | +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
51 | +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
52 | +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
53 | +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
54 | + |
55 | + |
56 | +""" |
57 | + CLI interface for nova flat vlan network management. |
58 | +""" |
59 | + |
60 | +import gettext |
61 | +import os |
62 | +import sys |
63 | + |
64 | +import IPy |
65 | + |
66 | +import novaclient |
67 | +from novaclient import base |
68 | + |
69 | +# If ../nova/__init__.py exists, add ../ to Python search path, so that |
70 | +# it will override what happens to be installed in /usr/(local/)lib/python... |
71 | +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), |
72 | + os.pardir, |
73 | + os.pardir)) |
74 | +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): |
75 | + sys.path.insert(0, possible_topdir) |
76 | + |
77 | +gettext.install('nova', unicode=1) |
78 | + |
79 | +from nova import context |
80 | +from nova import db as nova_db |
81 | +from nova import exception |
82 | +from nova import flags as nova_flags |
83 | +from nova import log as logging |
84 | +from nova import utils |
85 | + |
86 | +from nova.network.flat_vlan import db |
87 | + |
88 | +from nova.network.flat_vlan import client as net_client |
89 | +from nova.network.flat_vlan.db import migration |
90 | + |
91 | +NOVA_FLAGS = nova_flags.FLAGS |
92 | + |
93 | +## OS_CLIENT = net_client.NetOpenStack(os.environ['NOVA_USERNAME'], |
94 | +## os.environ['NOVA_API_KEY'], |
95 | +## os.environ['NOVA_URL']) |
96 | +OS_CLIENT = net_client.NetOpenStack('ryu', |
97 | + 'b3573cfc-9c21-4345-a601-8f17a04d43a1', |
98 | + 'http://localhost:8774/v1.0') |
99 | + |
100 | +class NetworkCommands(object): |
101 | + """Class for managing network.""" |
102 | + |
103 | + def _print_networks(self, networks): |
104 | + """Prints network info to terminal. |
105 | + |
106 | + Args: |
107 | + networks: a list of networks. |
108 | + """ |
109 | + print "%-18s\t%-15s\t%-15s\t%-15s" % (_('network'), |
110 | + _('netmask'), |
111 | + _('start address'), |
112 | + 'DNS') |
113 | + for network in networks: |
114 | + print "%-18s\t%-15s\t%-15s\t%-15s" % (network.cidr, |
115 | + network.netmask, |
116 | + network.dhcp_start, |
117 | + network.dns) |
118 | + |
119 | + def create(self, cidr, num_networks, network_size, vlan_start, vpn_start, |
120 | + cidr_v6, label='public'): |
121 | + """Creates networks. |
122 | + |
123 | + Args: |
124 | + cidr: ipv4 ip range |
125 | + num_networks: number of networks to create |
126 | + network_size: number of IPs to create for each network. |
127 | + vlan_start: VLAN starting ID number. |
128 | + vpn_start: VPN starting port number. |
129 | + cidr_v6: ipv6 IP range. |
130 | + label: network label. |
131 | + """ |
132 | + if not cidr: |
133 | + raise TypeError(_('Fixed range in the form of 10.0.0.0/8 is ' |
134 | + 'required to create networks.')) |
135 | + OS_CLIENT.networks.create_with_fixed_ips(cidr, num_networks, |
136 | + network_size, vlan_start, |
137 | + vpn_start, cidr_v6, label) |
138 | + def list(self): |
139 | + """List all created networks |
140 | + """ |
141 | + networks = OS_CLIENT.networks.list() |
142 | + self._print_networks(networks) |
143 | + |
144 | + def delete(self, fixed_range): |
145 | + """Deletes the network with fixed_range. |
146 | + |
147 | + Args: |
148 | + fixed_range: CIDR of the network to delete. |
149 | + """ |
150 | + OS_CLIENT.networks.delete_by_cidr(fixed_range) |
151 | + |
152 | +class FixedIpCommands(object): |
153 | + """Management commands for Fixed IPs.""" |
154 | + |
155 | + def _print_fixed_ips(self, ips): |
156 | + """Prints IPs. |
157 | + |
158 | + Args: |
159 | + ips: IP address list. |
160 | + """ |
161 | + #TODO need to get host information from instances. |
162 | + print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (_('network'), |
163 | + _('IP address'), |
164 | + _('MAC address'), |
165 | + _('hostname'), |
166 | + _('host')) |
167 | + for ip in ips: |
168 | + print ip.address |
169 | + print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % ( |
170 | + ip.network['cidr'], |
171 | + ip.address, |
172 | + ip.ethernet_card['mac_address'], |
173 | + "", "") |
174 | + |
175 | + def list(self): |
176 | + """List all created fixed IPs. |
177 | + """ |
178 | + ips = OS_CLIENT.fixed_ips.list() |
179 | + self._print_fixed_ips(ips) |
180 | + |
181 | +class FloatingIpCommands(object): |
182 | + """Class for managing floating ip.""" |
183 | + |
184 | + def create(self, host, cidr): |
185 | + """Creates floating ips for host by range |
186 | + arguments: host ip_range""" |
187 | + ips = OS_CLIENT.floating_ips.create_by_cidr(host, cidr) |
188 | + |
189 | + |
190 | + def delete(self, cidr): |
191 | + """Deletes floating ips by range |
192 | + arguments: range""" |
193 | + ips = OS_CLIENT.floating_ips.delete_by_cidr(cidr) |
194 | + |
195 | + def list(self, host=None): |
196 | + """Lists all floating ips (optionally by host) |
197 | + arguments: [host]""" |
198 | + floating_ips = OS_CLIENT.floating_ips.list() |
199 | + for floating_ip in floating_ips: |
200 | + if floating_ip.fixed_ip and floating_ip.fixed_ip.ethernet_card: |
201 | + |
202 | + print "%s\t%s\t%s" % (floating_ip.host, |
203 | + floating_ip.address, |
204 | + "") |
205 | + |
206 | +class DbCommands(object): |
207 | + """Class for managing the database.""" |
208 | + |
209 | + def __init__(self): |
210 | + pass |
211 | + |
212 | + def sync(self, version=None): |
213 | + """Sync the database up to the most recent version.""" |
214 | + return migration.db_sync(version) |
215 | + |
216 | + def version(self): |
217 | + """Print the current database version.""" |
218 | + print migration.db_version() |
219 | + |
220 | +CATEGORIES = [ |
221 | + ('network', NetworkCommands), |
222 | + ('fixed', FixedIpCommands), |
223 | + ('floating', FloatingIpCommands), |
224 | + ('db', DbCommands)] |
225 | + |
226 | +def lazy_match(name, key_value_tuples): |
227 | + """Finds all objects that have a key that case insensitively contains |
228 | + [name] key_value_tuples is a list of tuples of the form (key, value) |
229 | + returns a list of tuples of the form (key, value)""" |
230 | + result = [] |
231 | + for (k, v) in key_value_tuples: |
232 | + if k.lower().find(name.lower()) == 0: |
233 | + result.append((k, v)) |
234 | + if len(result) == 0: |
235 | + print "%s does not match any options:" % name |
236 | + for k, _v in key_value_tuples: |
237 | + print "\t%s" % k |
238 | + sys.exit(2) |
239 | + if len(result) > 1: |
240 | + print "%s matched multiple options:" % name |
241 | + for k, _v in result: |
242 | + print "\t%s" % k |
243 | + sys.exit(2) |
244 | + return result |
245 | + |
246 | +def methods_of(obj): |
247 | + """Get all callable methods of an object that don't start with underscore |
248 | + returns a list of tuples of the form (method_name, method)""" |
249 | + result = [] |
250 | + for i in dir(obj): |
251 | + if callable(getattr(obj, i)) and not i.startswith('_'): |
252 | + result.append((i, getattr(obj, i))) |
253 | + return result |
254 | + |
255 | + |
256 | +def main(): |
257 | + """Parse options and call the appropriate class/method.""" |
258 | + utils.default_flagfile() |
259 | + argv = NOVA_FLAGS(sys.argv) |
260 | + logging.setup() |
261 | + |
262 | + script_name = argv.pop(0) |
263 | + if len(argv) < 1: |
264 | + print script_name + " category action [<args>]" |
265 | + print "Available categories:" |
266 | + for k, _ in CATEGORIES: |
267 | + print "\t%s" % k |
268 | + sys.exit(2) |
269 | + category = argv.pop(0) |
270 | + matches = lazy_match(category, CATEGORIES) |
271 | + # instantiate the command group object |
272 | + category, fn = matches[0] |
273 | + command_object = fn() |
274 | + actions = methods_of(command_object) |
275 | + if len(argv) < 1: |
276 | + print script_name + " category action [<args>]" |
277 | + print "Available actions for %s category:" % category |
278 | + for k, _v in actions: |
279 | + print "\t%s" % k |
280 | + sys.exit(2) |
281 | + action = argv.pop(0) |
282 | + matches = lazy_match(action, actions) |
283 | + action, fn = matches[0] |
284 | + # call the action with the remaining arguments |
285 | + try: |
286 | + fn(*argv) |
287 | + sys.exit(0) |
288 | + except TypeError: |
289 | + print "Possible wrong number of arguments supplied" |
290 | + print "%s %s: %s" % (category, action, fn.__doc__) |
291 | + raise |
292 | + |
293 | +if __name__ == '__main__': |
294 | + main() |
295 | |
296 | === modified file 'nova/api/ec2/cloud.py' |
297 | --- nova/api/ec2/cloud.py 2011-04-12 10:57:56 +0000 |
298 | +++ nova/api/ec2/cloud.py 2011-04-18 00:30:54 +0000 |
299 | @@ -75,6 +75,11 @@ |
300 | db.key_pair_create(context, key) |
301 | return {'private_key': private_key, 'fingerprint': fingerprint} |
302 | |
303 | +def _get_net_api_service(context, project_id): |
304 | + """Gets network API service for a given project. |
305 | + """ |
306 | + net_factory = net_service.get_service_factory(context, project_id) |
307 | + return net_factory.get_api_service() |
308 | |
309 | class CloudController(object): |
310 | """ CloudController provides the critical dispatch between |
311 | @@ -116,7 +121,7 @@ |
312 | # TODO(vish): Do this with M2Crypto instead |
313 | utils.runthis(_("Generating root CA: %s"), "sh", genrootca_sh_path) |
314 | os.chdir(start) |
315 | - |
316 | + |
317 | def _get_mpi_data(self, context, project_id): |
318 | result = {} |
319 | for instance in self.compute_api.get_all(context, |
320 | @@ -161,9 +166,7 @@ |
321 | vnic_ids = db.virtual_nics_get_by_instance(ctxt, instance_ref['id']) |
322 | floating_ip = None |
323 | if vnic_ids: |
324 | - net_factory = net_service.get_service_factory(ctxt, |
325 | - instance_ref['project_id']) |
326 | - net_api = net_factory.get_api_service() |
327 | + net_api = _get_net_api_service(ctxt, instance_ref['project_id']) |
328 | net = net_api.get_vnic_network_info(ctxt, vnic_ids[0]) |
329 | if net and net['IPs'] and net['IPs'][0]['floating_ips']: |
330 | floating_ip = net['IPs'][0]['floating_ips'][0] |
331 | @@ -339,49 +342,57 @@ |
332 | |
333 | def describe_security_groups(self, context, group_name=None, **kwargs): |
334 | self.compute_api.ensure_default_security_group(context) |
335 | + net_api = _get_net_api_service(context, context.project_id) |
336 | + |
337 | if group_name: |
338 | groups = [] |
339 | for name in group_name: |
340 | - group = db.security_group_get_by_name(context, |
341 | - context.project_id, |
342 | - name) |
343 | + group = net_api.get_security_group_by_name(context, name, |
344 | + context.project_id) |
345 | groups.append(group) |
346 | elif context.is_admin: |
347 | - groups = db.security_group_get_all(context) |
348 | + groups = net_api.get_security_groups(context) |
349 | else: |
350 | - groups = db.security_group_get_by_project(context, |
351 | - context.project_id) |
352 | - groups = [self._format_security_group(context, g) for g in groups] |
353 | + groups = net_api.get_security_groups(context, |
354 | + tenant_id=context.project_id) |
355 | + groups = [self._format_security_group(context, g, net_api) for g in |
356 | + groups] |
357 | |
358 | return {'securityGroupInfo': |
359 | list(sorted(groups, |
360 | key=lambda k: (k['ownerId'], k['groupName'])))} |
361 | |
362 | - def _format_security_group(self, context, group): |
363 | + def _format_security_group_rule(self, context, rule, net_api): |
364 | + r = {} |
365 | + r['ipProtocol'] = rule['protocol'] |
366 | + r['fromPort'] = rule['from_port'] |
367 | + r['toPort'] = rule['to_port'] |
368 | + r['groups'] = [] |
369 | + r['ipRanges'] = [] |
370 | + if rule['group_id']: |
371 | + source_group = net_api.get_security_group(context, rule['group_id'], |
372 | + context.project_id) |
373 | + r['groups'] += [{'groupName': source_group['name'], |
374 | + 'userId': source_group['tenant_id']}] |
375 | + else: |
376 | + r['ipRanges'] += [{'cidrIp': rule['cidr']}] |
377 | + return r |
378 | + |
379 | + def _format_security_group(self, context, group, net_api): |
380 | g = {} |
381 | - g['groupDescription'] = group.description |
382 | - g['groupName'] = group.name |
383 | - g['ownerId'] = group.project_id |
384 | + g['groupDescription'] = group['description'] |
385 | + g['groupName'] = group['name'] |
386 | + g['ownerId'] = group['tenant_id'] |
387 | g['ipPermissions'] = [] |
388 | - for rule in group.rules: |
389 | - r = {} |
390 | - r['ipProtocol'] = rule.protocol |
391 | - r['fromPort'] = rule.from_port |
392 | - r['toPort'] = rule.to_port |
393 | - r['groups'] = [] |
394 | - r['ipRanges'] = [] |
395 | - if rule.group_id: |
396 | - source_group = db.security_group_get(context, rule.group_id) |
397 | - r['groups'] += [{'groupName': source_group.name, |
398 | - 'userId': source_group.project_id}] |
399 | - else: |
400 | - r['ipRanges'] += [{'cidrIp': rule.cidr}] |
401 | - g['ipPermissions'] += [r] |
402 | + if 'rules' in group: |
403 | + for rule in group['rules']: |
404 | + g['ipPermissions'] += [self._format_security_group_rule( |
405 | + context, rule, net_api)] |
406 | return g |
407 | |
408 | - def _revoke_rule_args_to_dict(self, context, to_port=None, from_port=None, |
409 | - ip_protocol=None, cidr_ip=None, user_id=None, |
410 | - source_security_group_name=None, |
411 | + def _revoke_rule_args_to_dict(self, context, net_api, to_port=None, |
412 | + from_port=None, ip_protocol=None, cidr_ip=None, |
413 | + user_id=None, source_security_group_name=None, |
414 | source_security_group_owner_id=None): |
415 | |
416 | values = {} |
417 | @@ -390,10 +401,9 @@ |
418 | source_project_id = self._get_source_project_id(context, |
419 | source_security_group_owner_id) |
420 | |
421 | - source_security_group = \ |
422 | - db.security_group_get_by_name(context.elevated(), |
423 | - source_project_id, |
424 | - source_security_group_name) |
425 | + source_security_group = net_api.get_security_group_by_name( |
426 | + context.elevated(), source_security_group_name, |
427 | + source_project_id) |
428 | values['group_id'] = source_security_group['id'] |
429 | elif cidr_ip: |
430 | # If this fails, it throws an exception. This is what we want. |
431 | @@ -425,11 +435,11 @@ |
432 | |
433 | return values |
434 | |
435 | - def _security_group_rule_exists(self, security_group, values): |
436 | + def _security_group_rule_exists(self, rules, values): |
437 | """Indicates whether the specified rule values are already |
438 | defined in the given security group. |
439 | """ |
440 | - for rule in security_group.rules: |
441 | + for rule in rules: |
442 | if 'group_id' in values: |
443 | if rule['group_id'] == values['group_id']: |
444 | return True |
445 | @@ -447,22 +457,21 @@ |
446 | LOG.audit(_("Revoke security group ingress %s"), group_name, |
447 | context=context) |
448 | self.compute_api.ensure_default_security_group(context) |
449 | - security_group = db.security_group_get_by_name(context, |
450 | - context.project_id, |
451 | - group_name) |
452 | - |
453 | - criteria = self._revoke_rule_args_to_dict(context, **kwargs) |
454 | + net_api = _get_net_api_service(context, context.project_id) |
455 | + criteria = self._revoke_rule_args_to_dict(context, net_api, **kwargs) |
456 | if criteria == None: |
457 | raise exception.ApiError(_("Not enough parameters to build a " |
458 | "valid rule.")) |
459 | - |
460 | - for rule in security_group.rules: |
461 | + |
462 | + security_group = net_api.get_security_group_by_name(context, group_name, |
463 | + context.project_id) |
464 | + for rule in security_group['rules']: |
465 | match = True |
466 | for (k, v) in criteria.iteritems(): |
467 | - if getattr(rule, k, False) != v: |
468 | + if (not k in rule) or rule[k] != v: |
469 | match = False |
470 | if match: |
471 | - db.security_group_rule_destroy(context, rule['id']) |
472 | + net_api.delete_security_group_rule(context, rule['id']) |
473 | self.compute_api.trigger_security_group_rules_refresh(context, |
474 | security_group['id']) |
475 | return True |
476 | @@ -476,25 +485,26 @@ |
477 | LOG.audit(_("Authorize security group ingress %s"), group_name, |
478 | context=context) |
479 | self.compute_api.ensure_default_security_group(context) |
480 | - security_group = db.security_group_get_by_name(context, |
481 | - context.project_id, |
482 | - group_name) |
483 | - |
484 | - values = self._revoke_rule_args_to_dict(context, **kwargs) |
485 | + net_api = _get_net_api_service(context, context.project_id) |
486 | + values = self._revoke_rule_args_to_dict(context, net_api, **kwargs) |
487 | if values is None: |
488 | raise exception.ApiError(_("Not enough parameters to build a " |
489 | "valid rule.")) |
490 | - values['parent_group_id'] = security_group.id |
491 | - |
492 | - if self._security_group_rule_exists(security_group, values): |
493 | + |
494 | + security_group = net_api.get_security_group_by_name(context, group_name, |
495 | + context.project_id) |
496 | + |
497 | + if self._security_group_rule_exists(security_group['rules'], values): |
498 | raise exception.ApiError(_('This rule already exists in group %s') |
499 | % group_name) |
500 | |
501 | - security_group_rule = db.security_group_rule_create(context, values) |
502 | + rule = net_api.create_security_group_rule(context, security_group['id'], |
503 | + **values) |
504 | + LOG.audit(_("Created Security Group Rule: %s for group %s"), |
505 | + rule['id'], security_group['id'], context=context) |
506 | |
507 | self.compute_api.trigger_security_group_rules_refresh(context, |
508 | security_group['id']) |
509 | - |
510 | return True |
511 | |
512 | def _get_source_project_id(self, context, source_security_group_owner_id): |
513 | @@ -517,24 +527,22 @@ |
514 | def create_security_group(self, context, group_name, group_description): |
515 | LOG.audit(_("Create Security Group %s"), group_name, context=context) |
516 | self.compute_api.ensure_default_security_group(context) |
517 | - if db.security_group_exists(context, context.project_id, group_name): |
518 | - raise exception.ApiError(_('group %s already exists') % group_name) |
519 | - |
520 | - group = {'user_id': context.user_id, |
521 | - 'project_id': context.project_id, |
522 | - 'name': group_name, |
523 | - 'description': group_description} |
524 | - group_ref = db.security_group_create(context, group) |
525 | - |
526 | + net_api = _get_net_api_service(context, context.project_id) |
527 | + group = net_api.create_security_group(context, context.user_id, |
528 | + context.project_id, |
529 | + group_name, |
530 | + group_description) |
531 | return {'securityGroupSet': [self._format_security_group(context, |
532 | - group_ref)]} |
533 | + group, |
534 | + net_api)]} |
535 | |
536 | def delete_security_group(self, context, group_name, **kwargs): |
537 | LOG.audit(_("Delete security group %s"), group_name, context=context) |
538 | - security_group = db.security_group_get_by_name(context, |
539 | - context.project_id, |
540 | - group_name) |
541 | - db.security_group_destroy(context, security_group.id) |
542 | + net_api = _get_net_api_service(context, context.project_id) |
543 | + security_group = net_api.get_security_group_by_name(context, |
544 | + group_name, |
545 | + context.project_id) |
546 | + net_api.delete_security_group(context, security_group['id']) |
547 | return True |
548 | |
549 | def get_console_output(self, context, instance_id, **kwargs): |
550 | @@ -727,13 +735,11 @@ |
551 | fixed_addr = None |
552 | floating_addr = None |
553 | vnic_ids = db.virtual_nics_get_by_instance(context, instance['id']) |
554 | + net_api = _get_net_api_service(context, instance['project_id']) |
555 | + net = None |
556 | if vnic_ids: |
557 | - net_factory = net_service.get_service_factory(context, |
558 | - context.project_id) |
559 | - net_api_service = net_factory.get_api_service() |
560 | # For now just get one VNIC info |
561 | - net = net_api_service.get_vnic_network_info(context, |
562 | - vnic_ids[0]) |
563 | + net = net_api.get_vnic_network_info(context, vnic_ids[0]) |
564 | # Assume only one IP for now. |
565 | if net['IPs']: |
566 | ip_info = net['IPs'][0] |
567 | @@ -769,12 +775,13 @@ |
568 | r = {} |
569 | r['reservationId'] = instance['reservation_id'] |
570 | r['ownerId'] = instance['project_id'] |
571 | - security_group_names = [] |
572 | - if instance.get('security_groups'): |
573 | - for security_group in instance['security_groups']: |
574 | + |
575 | + if net and net['security_groups']: |
576 | + security_group_names = [] |
577 | + for security_group in net['security_groups']: |
578 | security_group_names.append(security_group['name']) |
579 | - r['groupSet'] = self._convert_to_set(security_group_names, |
580 | - 'groupId') |
581 | + r['groupSet'] = self._convert_to_set(security_group_names, |
582 | + 'groupId') |
583 | r['instancesSet'] = [] |
584 | reservations[instance['reservation_id']] = r |
585 | reservations[instance['reservation_id']]['instancesSet'].append(i) |
586 | @@ -786,10 +793,7 @@ |
587 | |
588 | def format_addresses(self, context): |
589 | addresses = [] |
590 | - |
591 | - net_factory = net_service.get_service_factory(context, |
592 | - context.project_id) |
593 | - net_api = net_factory.get_api_service() |
594 | + net_api = _get_net_api_service(context, context.project_id) |
595 | project_id = context.project_id if not context.is_admin else None |
596 | ip_info_list = net_api.get_addresses(context, project_id) |
597 | |
598 | @@ -815,15 +819,12 @@ |
599 | |
600 | def allocate_address(self, context, **kwargs): |
601 | LOG.audit(_("Allocate address"), context=context) |
602 | - |
603 | - net_factory = net_service.get_service_factory(context, |
604 | - context.project_id) |
605 | - net_api_service = net_factory.get_api_service() |
606 | + net_api = _get_net_api_service(context, context.project_id) |
607 | ip_quota = quota.get_quota(context, context.project_id)['floating_ips'] |
608 | try: |
609 | - public_ip = net_api_service.allocate_address(context, |
610 | - context.project_id, |
611 | - ip_quota) |
612 | + public_ip = net_api.allocate_address(context, |
613 | + context.project_id, |
614 | + ip_quota) |
615 | except quota.QuotaError, ex: |
616 | LOG.warn(_("Quota exceeded for %s, tried to allocate " |
617 | "address"), |
618 | @@ -834,11 +835,8 @@ |
619 | |
620 | def release_address(self, context, public_ip, **kwargs): |
621 | LOG.audit(_("Release address %s"), public_ip, context=context) |
622 | - net_factory = net_service.get_service_factory(context, |
623 | - context.project_id) |
624 | - net_api_service = net_factory.get_api_service() |
625 | - |
626 | - net_api_service.deallocate_address(context, public_ip) |
627 | + net_api = _get_net_api_service(context, context.project_id) |
628 | + net_api.deallocate_address(context, public_ip) |
629 | return {'releaseResponse': ["Address released."]} |
630 | |
631 | def associate_address(self, context, instance_id, public_ip, **kwargs): |
632 | @@ -846,18 +844,14 @@ |
633 | " instance %(instance_id)s") % locals(), context=context) |
634 | instance_id = ec2utils.ec2_id_to_id(instance_id) |
635 | vnic_ids = db.virtual_nics_get_by_instance(context, instance_id) |
636 | - net_factory = net_service.get_service_factory(context, |
637 | - context.project_id) |
638 | - net_api_service = net_factory.get_api_service() |
639 | - net_api_service.associate_address(context, vnic_ids[0], public_ip) |
640 | + net_api = _get_net_api_service(context, context.project_id) |
641 | + net_api.associate_address(context, vnic_ids[0], public_ip) |
642 | return {'associateResponse': ["Address associated."]} |
643 | |
644 | def disassociate_address(self, context, public_ip, **kwargs): |
645 | LOG.audit(_("Disassociate address %s"), public_ip, context=context) |
646 | - net_factory = net_service.get_service_factory(context, |
647 | - context.project_id) |
648 | - net_api_service = net_factory.get_api_service() |
649 | - net_api_service.disassociate_address(context, public_ip) |
650 | + net_api = _get_net_api_service(context, context.project_id) |
651 | + net_api.disassociate_address(context, public_ip) |
652 | return {'disassociateResponse': ["Address disassociated."]} |
653 | |
654 | def run_instances(self, context, **kwargs): |
655 | |
656 | === modified file 'nova/auth/manager.py' |
657 | --- nova/auth/manager.py 2011-04-07 06:34:15 +0000 |
658 | +++ nova/auth/manager.py 2011-04-18 00:30:54 +0000 |
659 | @@ -590,7 +590,7 @@ |
660 | project_id = Project.safe_id(project) |
661 | net_factory = net_service.get_service_factory(ctxt, project_id) |
662 | net_api = net_factory.get_api_service() |
663 | - net = net_api.get_project_network_info(ctxt, project_id) |
664 | + net = net_api.get_tenant_network_info(ctxt, project_id) |
665 | if net: |
666 | return (net['vpn_public_address'], net['vpn_public_port']) |
667 | else: |
668 | |
669 | === modified file 'nova/compute/api.py' |
670 | --- nova/compute/api.py 2011-04-12 10:57:56 +0000 |
671 | +++ nova/compute/api.py 2011-04-18 00:30:54 +0000 |
672 | @@ -28,7 +28,6 @@ |
673 | from nova import exception |
674 | from nova import flags |
675 | from nova import log as logging |
676 | -from nova import network |
677 | from nova import quota |
678 | from nova import rpc |
679 | from nova import utils |
680 | @@ -45,10 +44,18 @@ |
681 | FLAGS = flags.FLAGS |
682 | flags.DECLARE('vncproxy_topic', 'nova.vnc') |
683 | |
684 | +_DEFAULT_SECURITY_GROUP_NAME = 'default' |
685 | +_DEFAULT_SECURITY_GROUP_DESCRIPTION = 'default' |
686 | + |
687 | def generate_default_hostname(instance_id): |
688 | """Default function to generate a hostname given an instance reference.""" |
689 | return str(instance_id) |
690 | |
691 | +def _get_net_api_service(context, project_id): |
692 | + """Gets network API service for a given project. |
693 | + """ |
694 | + net_factory = net_service.get_service_factory(context, project_id) |
695 | + return net_factory.get_api_service() |
696 | |
697 | class API(base.Base): |
698 | """API for interacting with the compute manager.""" |
699 | @@ -174,6 +181,8 @@ |
700 | if ramdisk_id: |
701 | self.image_service.show(context, ramdisk_id) |
702 | |
703 | + net_api = _get_net_api_service(context, context.project_id) |
704 | + |
705 | if security_group is None: |
706 | security_group = ['default'] |
707 | if not type(security_group) is list: |
708 | @@ -182,9 +191,9 @@ |
709 | security_groups = [] |
710 | self.ensure_default_security_group(context) |
711 | for security_group_name in security_group: |
712 | - group = db.security_group_get_by_name(context, |
713 | - context.project_id, |
714 | - security_group_name) |
715 | + group = net_api.get_security_group_by_name(context, |
716 | + security_group_name, |
717 | + context.project_id) |
718 | security_groups.append(group['id']) |
719 | |
720 | if key_data is None and key_name: |
721 | @@ -228,22 +237,16 @@ |
722 | if vnic_list: |
723 | vnics = vnic_list.pop(0) |
724 | else: |
725 | - net_factory = net_service.get_service_factory(context, |
726 | - context.project_id) |
727 | - net_api_service = net_factory.get_api_service() |
728 | - vnics = [net_api_service.create_vnic(context)] |
729 | + vnics = [net_api.create_vnic(context)] |
730 | + |
731 | # Associate these vnics to the instance. |
732 | for vnic_id in vnics: |
733 | self.db.instance_add_virtual_nic(context, instance_id, |
734 | vnic_id) |
735 | - |
736 | - elevated = context.elevated() |
737 | - if not security_groups: |
738 | - security_groups = [] |
739 | - for security_group_id in security_groups: |
740 | - self.db.instance_add_security_group(elevated, |
741 | - instance_id, |
742 | - security_group_id) |
743 | + for security_group_id in security_groups: |
744 | + net_api.associate_vnic_and_security_group( |
745 | + elevated, vnic_id, security_group_id, |
746 | + context.project_id) |
747 | |
748 | # Set sane defaults if not specified |
749 | updates = dict(hostname=self.hostname_factory(instance_id)) |
750 | @@ -288,31 +291,31 @@ |
751 | :param context: the security context |
752 | |
753 | """ |
754 | - try: |
755 | - db.security_group_get_by_name(context, context.project_id, |
756 | - 'default') |
757 | - except exception.NotFound: |
758 | - values = {'name': 'default', |
759 | - 'description': 'default', |
760 | - 'user_id': context.user_id, |
761 | - 'project_id': context.project_id} |
762 | - db.security_group_create(context, values) |
763 | + net_api = _get_net_api_service(context, context.project_id) |
764 | + group = net_api.get_security_group_by_name( |
765 | + context, _DEFAULT_SECURITY_GROUP_NAME, context.project_id) |
766 | + if not group: |
767 | + net_api.create_security_group( |
768 | + context, context.user_id, context.project_id, |
769 | + _DEFAULT_SECURITY_GROUP_NAME, |
770 | + _DEFAULT_SECURITY_GROUP_DESCRIPTION) |
771 | |
772 | def trigger_security_group_rules_refresh(self, context, security_group_id): |
773 | """Called when a rule is added to or removed from a security_group""" |
774 | |
775 | - security_group = self.db.security_group_get(context, security_group_id) |
776 | - |
777 | + net_api = _get_net_api_service(context, context.project_id) |
778 | + security_group = net_api.get_security_group(context, security_group_id) |
779 | hosts = set() |
780 | - for instance in security_group['instances']: |
781 | - if instance['host'] is not None: |
782 | + for vnic_id in security_group['vnic_ids']: |
783 | + instance = self.db.instance_get_by_virtual_nic(context, vnic_id) |
784 | + if instance and instance['host']: |
785 | hosts.add(instance['host']) |
786 | |
787 | for host in hosts: |
788 | rpc.cast(context, |
789 | self.db.queue_get_for(context, FLAGS.compute_topic, host), |
790 | {"method": "refresh_security_group_rules", |
791 | - "args": {"security_group_id": security_group.id}}) |
792 | + "args": {"security_group_id": security_group_id}}) |
793 | |
794 | def trigger_security_group_members_refresh(self, context, group_id): |
795 | """Called when a security group gains a new or loses a member |
796 | @@ -425,10 +428,8 @@ |
797 | context, reservation_id) |
798 | |
799 | if fixed_ip is not None: |
800 | - net_factory = net_service.get_service_factory(context, |
801 | - project_id) |
802 | - net_api_service = net_factory.get_api_service() |
803 | - vnic_id = net_api_service.get_vnic_by_ip(context, fixed_ip) |
804 | + net_api = _get_net_api_service(context, project_id) |
805 | + vnic_id = net_api.get_vnic_by_ip(context, fixed_ip) |
806 | return self.db.instance_get_by_virtual_nic(context, vnic_id) |
807 | |
808 | if project_id or not context.is_admin: |
809 | |
810 | === added file 'nova/network/flat_vlan/api/openstack/ethernet_cards.py' |
811 | --- nova/network/flat_vlan/api/openstack/ethernet_cards.py 1970-01-01 00:00:00 +0000 |
812 | +++ nova/network/flat_vlan/api/openstack/ethernet_cards.py 2011-04-18 00:30:54 +0000 |
813 | @@ -0,0 +1,67 @@ |
814 | +# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
815 | + |
816 | +# Copyright (c) 2011 NTT. |
817 | +# All Rights Reserved. |
818 | +# |
819 | +# Licensed under the Apache License, Version 2.0 (the "License"); you may |
820 | +# not use this file except in compliance with the License. You may obtain |
821 | +# a copy of the License at |
822 | +# |
823 | +# http://www.apache.org/licenses/LICENSE-2.0 |
824 | +# |
825 | +# Unless required by applicable law or agreed to in writing, software |
826 | +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
827 | +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
828 | +# License for the specific language governing permissions and limitations |
829 | +# under the License. |
830 | +from webob import exc |
831 | + |
832 | +from nova.api.openstack import common as nova_common |
833 | +from nova import exception |
834 | +from nova import wsgi |
835 | +from nova.api.openstack import faults |
836 | +from nova.network.flat_vlan import common |
837 | +from nova.network.flat_vlan import db |
838 | +from nova.network.flat_vlan import manager |
839 | + |
840 | +class Controller(wsgi.Controller): |
841 | + """ The Virtual NICs API controller for the OpenStack API """ |
842 | + |
843 | + def __init__(self): |
844 | + super(Controller, self).__init__() |
845 | + |
846 | + def index(self, req): |
847 | + """Return all ethernet cards in brief""" |
848 | + items = db.api.ethernet_card_get_all(req.environ['nova.context']) |
849 | + items = nova_common.limited(items, req) |
850 | + items = [common.filter_keys(item, ('id', 'mac_address')) |
851 | + for item in items] |
852 | + return dict(ethernet_cards=items) |
853 | + |
854 | + def create(self, req): |
855 | + """ Creates a new ethernet card. """ |
856 | + ethernet_card = manager.ethernet_card_create_with_random_mac( |
857 | + req.environ['nova.context']) |
858 | + return dict(id=ethernet_card.id) |
859 | + |
860 | + def update(self, req, id): |
861 | + """Update an ethernet card""" |
862 | + return exc.HTTPNoContent() |
863 | + |
864 | + def show(self, req, id): |
865 | + """ Gets the ethernet card with ID given """ |
866 | + try: |
867 | + ethernet_card = db.api.ethernet_card_get( |
868 | + req.environ['nova.context'], id) |
869 | + return dict(id=ethernet_card.id, |
870 | + mac_address=ethernet_card.mac_address) |
871 | + except exception.NotFound: |
872 | + return faults.Fault(exc.HTTPNotFound()) |
873 | + |
874 | + def delete(self, req, id): |
875 | + """ Destroys an ethernet card """ |
876 | + try: |
877 | + db.api.ethernet_card_delete(req.environ['nova.context'], id) |
878 | + except exception.NotFound: |
879 | + return faults.Fault(exc.HTTPNotFound()) |
880 | + return exc.HTTPAccepted() |
881 | |
882 | === removed file 'nova/network/flat_vlan/api/openstack/virtual_nics.py' |
883 | --- nova/network/flat_vlan/api/openstack/virtual_nics.py 2011-03-31 07:15:23 +0000 |
884 | +++ nova/network/flat_vlan/api/openstack/virtual_nics.py 1970-01-01 00:00:00 +0000 |
885 | @@ -1,68 +0,0 @@ |
886 | -# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
887 | - |
888 | -# Copyright (c) 2011 NTT. |
889 | -# All Rights Reserved. |
890 | -# |
891 | -# Licensed under the Apache License, Version 2.0 (the "License"); you may |
892 | -# not use this file except in compliance with the License. You may obtain |
893 | -# a copy of the License at |
894 | -# |
895 | -# http://www.apache.org/licenses/LICENSE-2.0 |
896 | -# |
897 | -# Unless required by applicable law or agreed to in writing, software |
898 | -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
899 | -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
900 | -# License for the specific language governing permissions and limitations |
901 | -# under the License. |
902 | -from webob import exc |
903 | - |
904 | -from nova.api.openstack import common as nova_common |
905 | -from nova import exception |
906 | -from nova import wsgi |
907 | -from nova.api.openstack import faults |
908 | -from nova.db import base |
909 | -from nova.network.flat_vlan import common |
910 | -from nova.network.flat_vlan import db |
911 | -from nova.network.flat_vlan import manager |
912 | - |
913 | -class Controller(wsgi.Controller): |
914 | - """ The Virtual NICs API controller for the OpenStack API """ |
915 | - |
916 | - def __init__(self): |
917 | - super(Controller, self).__init__() |
918 | - |
919 | - def index(self, req): |
920 | - """Return all ethernet cards in brief""" |
921 | - items = db.api.ethernet_card_get_all(req.environ['nova.context']) |
922 | - items = nova_common.limited(items, req) |
923 | - items = [common.filter_keys(item, ('id', 'mac_address')) |
924 | - for item in items] |
925 | - return dict(ethernet_cards=items) |
926 | - |
927 | - def create(self, req): |
928 | - """ Creates a new ethernet card. """ |
929 | - ethernet_card = manager.ethernet_card_create_with_random_mac( |
930 | - req.environ['nova.context']) |
931 | - return dict(id=ethernet_card.id) |
932 | - |
933 | - def update(self, req, id): |
934 | - """Update an ethernet card""" |
935 | - return exc.HTTPNoContent() |
936 | - |
937 | - def show(self, req, id): |
938 | - """ Gets the ethernet card with ID given """ |
939 | - try: |
940 | - ethernet_card = db.api.ethernet_card_get( |
941 | - req.environ['nova.context'], id) |
942 | - return dict(id=ethernet_card.id, |
943 | - mac_address=ethernet_card.mac_address) |
944 | - except exception.NotFound: |
945 | - return faults.Fault(exc.HTTPNotFound()) |
946 | - |
947 | - def delete(self, req, id): |
948 | - """ Destroys an ethernet card """ |
949 | - try: |
950 | - db.api.ethernet_card_delete(req.environ['nova.context'], id) |
951 | - except exception.NotFound: |
952 | - return faults.Fault(exc.HTTPNotFound()) |
953 | - return exc.HTTPAccepted() |
954 | |
955 | === modified file 'nova/network/flat_vlan/api/service.py' |
956 | --- nova/network/flat_vlan/api/service.py 2011-04-12 06:44:35 +0000 |
957 | +++ nova/network/flat_vlan/api/service.py 2011-04-18 00:30:54 +0000 |
958 | @@ -25,6 +25,7 @@ |
959 | from nova.network.flat_vlan import flags |
960 | from nova.network.flat_vlan import manager |
961 | from nova.network.flat_vlan import db |
962 | +from nova.network.flat_vlan.api.openstack import ethernet_cards |
963 | from nova.network.flat_vlan.api.openstack import fixed_ips |
964 | from nova.network.flat_vlan.api.openstack import floating_ips |
965 | from nova.network.flat_vlan.api.openstack import networks |
966 | @@ -32,7 +33,9 @@ |
967 | FLAGS = flags.FlagAccessor() |
968 | |
969 | class NetworkApiService(object): |
970 | - """Network API Service for this plugin.""" |
971 | + """Network API Service for this plugin. This should be eventually |
972 | + refactored into separate API services, such as OpenStack API routes, |
973 | + Security Group, Network IP management, etc.""" |
974 | |
975 | interface.implements(service.INetworkApiService) |
976 | |
977 | @@ -43,6 +46,10 @@ |
978 | route_map: NetworkServiceRouteMap object to add the routes to. |
979 | """ |
980 | # Set up fixed IPs |
981 | + route_map.resource("ethernet_cards", "/ethernet_cards", |
982 | + controller=ethernet_cards.Controller()) |
983 | + |
984 | + # Set up fixed IPs |
985 | route_map.resource("fixed_ips", "/fixed_ips", |
986 | controller=fixed_ips.Controller()) |
987 | |
988 | @@ -133,6 +140,18 @@ |
989 | fixed_ip: Fixed IPv4 IP address |
990 | floating_ips: A list of floating IPv4 IP addresses |
991 | ip_v6: IPv6 address |
992 | + security_groups: A list of security group dictionary. |
993 | + id: unique identifier of the security group. |
994 | + name: Name of the security group. |
995 | + description: Security group description |
996 | + tenant_id: The tenant to which the security group belongs to. |
997 | + rules: A list of ingress rules for this security group. |
998 | + parent_group_id: Parent security group ID. |
999 | + protocol: Protocol for the rule. |
1000 | + from_port: The source port for the rule. |
1001 | + to_port: The destination port for the rule. |
1002 | + cidr: CIDR for the rule. |
1003 | + security_group_id: The security group ID. |
1004 | """ |
1005 | ethernet_card = db.ethernet_card_get(context, vnic_id) |
1006 | if not ethernet_card: |
1007 | @@ -140,18 +159,16 @@ |
1008 | |
1009 | net = dict(ipv6_enabled=FLAGS.net_flat_vlan_use_ipv6, |
1010 | mac_address=ethernet_card.mac_address, |
1011 | - IPs=[]) |
1012 | - |
1013 | - if not ethernet_card.fixed_ips: |
1014 | - return net |
1015 | + IPs=[], security_groups=[]) |
1016 | |
1017 | for fixed_ip in ethernet_card.fixed_ips: |
1018 | ip_info = dict() |
1019 | ip_info['fixed_ip'] = fixed_ip.address |
1020 | |
1021 | self._fill_dict_with_network_info(ip_info, fixed_ip.network) |
1022 | - ip_info['ip_v6'] = utils.to_global_ipv6(fixed_ip.network.cidr_v6, |
1023 | - ethernet_card.mac_address) |
1024 | + if FLAGS.net_flat_vlan_use_ipv6 and fixed_ip.network.cidr_v6: |
1025 | + ip_info['ip_v6'] = utils.to_global_ipv6( |
1026 | + fixed_ip.network.cidr_v6, ethernet_card.mac_address) |
1027 | |
1028 | ip_info['floating_ips'] = [] |
1029 | for floating_ip in fixed_ip.floating_ips: |
1030 | @@ -159,14 +176,22 @@ |
1031 | |
1032 | net['IPs'].append(ip_info) |
1033 | |
1034 | + for security_group in ethernet_card.security_groups: |
1035 | + group_dict = self._convert_to_security_group_dict(security_group) |
1036 | + group_dict['rules'] = [] |
1037 | + for rule in security_group.rules: |
1038 | + group_dict['rules'].append( |
1039 | + self._convert_to_security_group_rule_dict(rule)) |
1040 | + net['security_groups'].append(group_dict) |
1041 | + |
1042 | return net |
1043 | |
1044 | - def get_project_network_info(self, context, project_id): |
1045 | - """Gets network related information for the project. |
1046 | + def get_tenant_network_info(self, context, tenant_id): |
1047 | + """Gets network related information for the tenant. |
1048 | |
1049 | Args: |
1050 | context: Nova context object. |
1051 | - project_id: project ID to get the data for. |
1052 | + tenant_id: tenant ID to get the data for. |
1053 | |
1054 | Returns: |
1055 | A dictionary with the following keys: |
1056 | @@ -184,7 +209,7 @@ |
1057 | netmask_v6: network mask for IPv6 if applicable |
1058 | gateway_v6: network gateway for IPv6 if applicable |
1059 | """ |
1060 | - network = db.network_get_by_project(context, project_id) |
1061 | + network = db.network_get_by_tenant(context, tenant_id) |
1062 | if not network: |
1063 | return None |
1064 | |
1065 | @@ -192,32 +217,32 @@ |
1066 | self._fill_dict_with_network_info(net, network) |
1067 | return net |
1068 | |
1069 | - def get_addresses(self, context, project_id=None): |
1070 | + def get_addresses(self, context, tenant_id=None): |
1071 | """Gets all the floating IPs, their mapped fixed IPs and |
1072 | ethernet card ID. |
1073 | |
1074 | Args: |
1075 | context: Nova context object. |
1076 | - project_id: Project ID to filter the IPs by.(optional) |
1077 | + tenant_id: Project ID to filter the IPs by.(optional) |
1078 | |
1079 | Returns: |
1080 | A list of dictionary with keys: |
1081 | floating_ip: Public IP |
1082 | fixed_ip: Private IP |
1083 | - project_id: Project ID for the floating IP. |
1084 | + tenant_id: Project ID for the floating IP. |
1085 | vnic_id: VNIC ID |
1086 | mac_address: MAC address |
1087 | """ |
1088 | - if project_id: |
1089 | - floating_ips = db.floating_ip_get_all_by_project(context, |
1090 | - project_id) |
1091 | + if tenant_id: |
1092 | + floating_ips = db.floating_ip_get_all_by_tenant(context, |
1093 | + tenant_id) |
1094 | else: |
1095 | floating_ips = db.floating_ip_get_all(context) |
1096 | |
1097 | addresses = [] |
1098 | for floating_ip in floating_ips: |
1099 | ip_info = dict(floating_ip=floating_ip.address, |
1100 | - project_id=floating_ip.project_id, |
1101 | + tenant_id=floating_ip.tenant_id, |
1102 | fixed_ip=None, |
1103 | vnic_id=None, |
1104 | mac_address=None) |
1105 | @@ -231,12 +256,12 @@ |
1106 | addresses.append(ip_info) |
1107 | return addresses |
1108 | |
1109 | - def allocate_address(self, context, project_id, ip_quota): |
1110 | - """Gets the number of floating IPs associated with a project. |
1111 | + def allocate_address(self, context, tenant_id, ip_quota): |
1112 | + """Gets the number of floating IPs associated with a tenant. |
1113 | |
1114 | Args: |
1115 | context: Nova context object needed to access the DB. |
1116 | - project_id: Project to allocate the address from. |
1117 | + tenant_id: Tenant to allocate the address from. |
1118 | ip_quota: Quota for IP addresses. |
1119 | |
1120 | Returns: |
1121 | @@ -245,12 +270,12 @@ |
1122 | Raises: |
1123 | quota.QuotaError: Over the quota limit. |
1124 | """ |
1125 | - used_floating_ips = db.floating_ip_count_by_project(context, |
1126 | - project_id) |
1127 | + used_floating_ips = db.floating_ip_count_by_tenant(context, |
1128 | + tenant_id) |
1129 | allowed_floating_ips = min(1, ip_quota - used_floating_ips) |
1130 | if allowed_floating_ips < 1: |
1131 | - raise quota.QuotaError(_("Quota exceeeded for %s, tried to allocate" |
1132 | - " address"), project_id) |
1133 | + raise quota.QuotaError("Quota exceeeded for %s, tried to allocate" |
1134 | + " address", tenant_id) |
1135 | |
1136 | # Let the network service handle this because the DB update requires the |
1137 | # network service host name. |
1138 | @@ -258,12 +283,12 @@ |
1139 | FLAGS.net_flat_vlan_network_topic, |
1140 | {"method": "get_host"}) |
1141 | |
1142 | - ip = db.floating_ip_allocate_address(context, host, project_id) |
1143 | + ip = db.floating_ip_allocate_address(context, host, tenant_id) |
1144 | return ip.address |
1145 | |
1146 | def deallocate_address(self, context, floating_address): |
1147 | """Deallocates the public IP address by removing it from any |
1148 | - project. |
1149 | + tenant. |
1150 | |
1151 | Args: |
1152 | context: nova context object needed to access the DB. |
1153 | @@ -310,3 +335,233 @@ |
1154 | {"method": "deactivate_public_ip", |
1155 | "args": {"floating_address": floating_ip['address'], |
1156 | "fixed_address": fixed_ip['address']}}) |
1157 | + |
1158 | + def _convert_to_security_group_dict(self, security_group): |
1159 | + """Creates a dictionary from security group model. |
1160 | + |
1161 | + Args: |
1162 | + security_group: SecurityGroup model. |
1163 | + |
1164 | + Returns: |
1165 | + A dictionary of keys that map to SecurityGroup attributes. |
1166 | + """ |
1167 | + return dict(id=security_group.id, name=security_group.name, |
1168 | + description=security_group.description, |
1169 | + user_id=security_group.user_id, |
1170 | + tenant_id=security_group.tenant_id) |
1171 | + |
1172 | + def _convert_to_security_group_rule_dict(self, security_group_rule): |
1173 | + """Creates a dictionary from security group rule model. |
1174 | + |
1175 | + Args: |
1176 | + security_group_rule: SecurityGroupIngressRule model. |
1177 | + |
1178 | + Returns: |
1179 | + A dictionary of keys that map to SecurityGroupIngressRule |
1180 | + attributes. |
1181 | + """ |
1182 | + return dict(id=security_group_rule.id, |
1183 | + parent_group_id=security_group_rule.parent_group_id, |
1184 | + protocol=security_group_rule.protocol, |
1185 | + from_port=security_group_rule.from_port, |
1186 | + to_port=security_group_rule.to_port, |
1187 | + cidr=security_group_rule.cidr, |
1188 | + group_id=security_group_rule.group_id) |
1189 | + |
1190 | + def _convert_to_security_group_dict_detailed(self, group): |
1191 | + """Converts a SecurityGroup model into a dictionary, including |
1192 | + EthernetCards IDs and rules. |
1193 | + |
1194 | + Args: |
1195 | + group: SecurityGroup object. |
1196 | + |
1197 | + Returns: |
1198 | + A dictionary of security group, rules ane ethernet card info. |
1199 | + """ |
1200 | + group_dict = self._convert_to_security_group_dict(group) |
1201 | + group_dict['rules'] = [self._convert_to_security_group_rule_dict(rule) |
1202 | + for rule in group.rules] |
1203 | + group_dict['vnic_ids'] = [str(ethernet_card.id) for ethernet_card in |
1204 | + group.ethernet_cards] |
1205 | + return group_dict |
1206 | + |
1207 | + def create_security_group(self, context, user_id, tenant_id, name, |
1208 | + description): |
1209 | + """Creates a new security group record if it does not exist. |
1210 | + |
1211 | + Args: |
1212 | + context: Nova context |
1213 | + user_id: The user ID to associate the group with. |
1214 | + tenant_id: The tenant in which security group associates. |
1215 | + name: Name for the security group. |
1216 | + description: Description of the security group. |
1217 | + |
1218 | + Returns: |
1219 | + A dictionary of security group information with keys: |
1220 | + id: unique identifier of the security group. |
1221 | + name: Name of the security group. |
1222 | + description: Security group description |
1223 | + tenant_id: The tenant to which the security group belongs to. |
1224 | + |
1225 | + Raises: |
1226 | + nova.exception.ApiError: Group already exists. |
1227 | + """ |
1228 | + if db.security_group_exists(context, tenant_id, name): |
1229 | + raise exception.ApiError('group %s already exists' % name) |
1230 | + |
1231 | + values = {'user_id': user_id, |
1232 | + 'tenant_id': tenant_id, |
1233 | + 'name': name, |
1234 | + 'description': description} |
1235 | + group = db.security_group_create(context, values) |
1236 | + return self._convert_to_security_group_dict(group) |
1237 | + |
1238 | + def get_security_group(self, context, id, tenant_id=None): |
1239 | + """Gets a security group with a unique identifier of the group. |
1240 | + |
1241 | + Args: |
1242 | + context: Nova context object. |
1243 | + id: Security group ID. |
1244 | + tenant_id: Tenant to get the security group for. |
1245 | + |
1246 | + Returns: |
1247 | + security group info in dictionary with the following keys: |
1248 | + id: unique identifier of the security group. |
1249 | + name: Name of the security group. |
1250 | + description: Security group description |
1251 | + tenant_id: The tenant to which the security group belongs to. |
1252 | + rules: A list of dictionary of security group rules: |
1253 | + parent_group_id: Parent security group ID. |
1254 | + protocol: Protocol for the rule. |
1255 | + from_port: The source port for the rule. |
1256 | + to_port: The destination port for the rule. |
1257 | + cidr: CIDR or the rule. |
1258 | + security_group_id: The security group ID. |
1259 | + vnic_ids: A list of virtual NIC IDs. |
1260 | + """ |
1261 | + group = db.security_group_get(context, id, tenant_id) |
1262 | + return self._convert_to_security_group_dict_detailed( |
1263 | + group) if group else None |
1264 | + |
1265 | + def get_security_group_by_name(self, context, name, tenant_id): |
1266 | + """Gets a security group with a given name of the tenant. |
1267 | + This is expected to return something all the time, since for |
1268 | + EC2 compatibility, VMs cannot be created without one. |
1269 | + |
1270 | + Args: |
1271 | + context: Nova context object. |
1272 | + name: The name of the security group to search. |
1273 | + tenant_id: Tenant to get the security group of. |
1274 | + Returns: |
1275 | + security group info in dictionary with the following keys: |
1276 | + |
1277 | + id: unique identifier of the security group. |
1278 | + name: Name of the security group. |
1279 | + description: Security group description |
1280 | + tenant_id: The tenant to which the security group belongs to. |
1281 | + rules: A list of dictionary of security group rules: |
1282 | + parent_group_id: Parent security group ID. |
1283 | + protocol: Protocol for the rule. |
1284 | + from_port: The source port for the rule. |
1285 | + to_port: The destination port for the rule. |
1286 | + cidr: CIDR or the rule. |
1287 | + security_group_id: The security group ID. |
1288 | + vnic_ids: A list of virtual NIC IDs. |
1289 | + None if it does not exist. |
1290 | + """ |
1291 | + group = db.security_group_get_by_name(context, tenant_id, name) |
1292 | + return self._convert_to_security_group_dict_detailed( |
1293 | + group) if group else None |
1294 | + |
1295 | + def get_security_groups(self, context, tenant_id=None): |
1296 | + """Gets a list of security groups, filtered by tenant_id if specified. |
1297 | + |
1298 | + Args: |
1299 | + context: Nova context object. |
1300 | + tenant_id: Tenant to get the security group for. |
1301 | + |
1302 | + Returns: |
1303 | + A list of security groups in dictionary with the following keys: |
1304 | + |
1305 | + id: unique identifier of the security group. |
1306 | + name: Name of the security group. |
1307 | + description: Security group description |
1308 | + tenant_id: The tenant to which the security group belongs to. |
1309 | + """ |
1310 | + if tenant_id: |
1311 | + groups = db.security_group_get_by_tenant(context, tenant_id) |
1312 | + else: |
1313 | + groups = db.security_group_get_all(context) |
1314 | + |
1315 | + group_list = [] |
1316 | + for group in groups: |
1317 | + group_list.append(self._convert_to_security_group_dict(group)) |
1318 | + return group_list |
1319 | + |
1320 | + def delete_security_group(self, context, id): |
1321 | + """Deletes a security group. |
1322 | + |
1323 | + Args: |
1324 | + context: Nova context object. |
1325 | + id: ID of the security group to delete. |
1326 | + """ |
1327 | + db.security_group_destroy(context, id) |
1328 | + |
1329 | + def create_security_group_rule(self, context, parent_group_id, cidr=None, |
1330 | + from_port=None, to_port=None, protocol=None, |
1331 | + group_id=None): |
1332 | + """Creates a new security group ingress rule. |
1333 | + |
1334 | + Args: |
1335 | + context: Nova context object. |
1336 | + parent_group_id: Security group this rule belongs to. |
1337 | + cidr: CIDR of the rule. |
1338 | + from_port: The source port. |
1339 | + to_port: The destination port. |
1340 | + protocol: The network protocol for the rule. |
1341 | + group_id: The target security group ID. |
1342 | + |
1343 | + Returns: |
1344 | + The new security group rule info as a dictionary: |
1345 | + Keys: |
1346 | + parent_group_id: Parent security group ID. |
1347 | + protocol: Protocol for the rule. |
1348 | + from_port: The source port for the rule. |
1349 | + to_port: The destination port for the rule. |
1350 | + cidr: CIDR for the rule. |
1351 | + security_group_id: The security group ID. |
1352 | + """ |
1353 | + values = {'parent_group_id': parent_group_id, |
1354 | + 'cidr': cidr, |
1355 | + 'from_port': from_port, |
1356 | + 'to_port': to_port, |
1357 | + 'protocol': protocol, |
1358 | + 'group_id': group_id} |
1359 | + rule = db.security_group_rule_create(context, values) |
1360 | + return self._convert_to_security_group_rule_dict(rule) |
1361 | + |
1362 | + def delete_security_group_rule(self, context, id): |
1363 | + """Deletes a security group rule with the given ID. |
1364 | + |
1365 | + Args: |
1366 | + context: Nova context object. |
1367 | + id: ID of the security group rule to delete. |
1368 | + """ |
1369 | + db.security_group_rule_destroy(context, id) |
1370 | + |
1371 | + def associate_vnic_and_security_group(self, context, vnic_id, |
1372 | + security_group_id, tenant_id): |
1373 | + """Associates a VNIC to a security group. |
1374 | + |
1375 | + Args: |
1376 | + context: Nova context object. |
1377 | + vnic_id: Virtual NIC ID. |
1378 | + security_group_id: ID of the security group to associate. |
1379 | + tenant_id: The tenant that security group should belong to. |
1380 | + """ |
1381 | + groups = db.security_group_get_by_ethernet_card(context, vnic_id) |
1382 | + for group in groups: |
1383 | + if group.id == security_group_id: |
1384 | + return |
1385 | + db.ethernet_card_add_security_group(context, vnic_id, |
1386 | + security_group_id, tenant_id) |
1387 | |
1388 | === modified file 'nova/network/flat_vlan/compute.py' |
1389 | --- nova/network/flat_vlan/compute.py 2011-04-12 06:44:35 +0000 |
1390 | +++ nova/network/flat_vlan/compute.py 2011-04-18 00:30:54 +0000 |
1391 | @@ -37,6 +37,7 @@ |
1392 | _vnics_to_taps = {} |
1393 | _net_driver = utils.import_object(FLAGS.net_flat_vlan_network_driver) |
1394 | _filter_driver = utils.import_object(FLAGS.net_flat_vlan_filter_driver) |
1395 | + _firewall_driver = utils.import_object(FLAGS.net_flat_vlan_firewall_driver) |
1396 | _network_topic = FLAGS.net_flat_vlan_network_topic |
1397 | _use_vlan = FLAGS.net_flat_vlan_use_vlan |
1398 | _use_dhcp = FLAGS.net_flat_vlan_use_dhcp |
1399 | @@ -62,8 +63,8 @@ |
1400 | The host of the network service. |
1401 | """ |
1402 | if self._use_vlan: |
1403 | - network_ref = db.network_get_by_project(context, |
1404 | - context.project_id) |
1405 | + network_ref = db.network_get_by_tenant(context, |
1406 | + context.project_id) |
1407 | else: |
1408 | network_ref = db.network_get_by_bridge(context, |
1409 | self._network_bridge) |
1410 | @@ -93,7 +94,7 @@ |
1411 | return self._host |
1412 | |
1413 | def _get_network_topic(self, context, **kwargs): |
1414 | - """Retrieves the network host for a project on this host |
1415 | + """Retrieves the network host for a tenant on this host |
1416 | |
1417 | Args: |
1418 | context: Nova context |
1419 | @@ -218,3 +219,85 @@ |
1420 | net['tap'] = tap |
1421 | |
1422 | return dict(net) |
1423 | + |
1424 | + def prepare_vnic_filter(self, context, vnic_id, is_vpn, allow_traffic, |
1425 | + conn): |
1426 | + """Prepares filters for VNIC. |
1427 | + |
1428 | + Args: |
1429 | + contxt: Nova context object |
1430 | + vnic_id: VNIC ID |
1431 | + is_vpn: True if setting VPN. |
1432 | + allow_traffic: True if traffic can flow. |
1433 | + conn: connection to hypervisor. |
1434 | + """ |
1435 | + net = self._api.get_vnic_network_info(context, vnic_id) |
1436 | + self._firewall_driver.prepare_vnic_filter(vnic_id, net, is_vpn, |
1437 | + allow_traffic, conn) |
1438 | + |
1439 | + def unfilter_vnic(self, vnic_id): |
1440 | + """Removes filters for vnic. |
1441 | + |
1442 | + Args: |
1443 | + vnic_id: VNIC ID. |
1444 | + """ |
1445 | + self._firewall_driver.unfilter_vnic(vnic_id, |
1446 | + FLAGS.net_flat_vlan_use_ipv6) |
1447 | + |
1448 | + def apply_vnic_filter(self, vnic_id): |
1449 | + """Applies the filter for VNIC. Currently unused. |
1450 | + |
1451 | + Args: |
1452 | + vnic_id: VNIC ID |
1453 | + """ |
1454 | + self._firewall_driver.apply_vnic_filter(vnic_id) |
1455 | + |
1456 | + def refresh_security_group_rules(self, context, security_group_id, |
1457 | + tenant_id, allow_traffic, conn): |
1458 | + """Refreshes the security group rules. |
1459 | + |
1460 | + Args: |
1461 | + context: Nova context object. |
1462 | + security_group_id: Security group ID. |
1463 | + tenant_id: ID of the tenant. |
1464 | + allow_traffic: True if ok to allow traffic. |
1465 | + conn: hyperviser connection. |
1466 | + """ |
1467 | + group = self._api.get_security_group(context, security_group_id, |
1468 | + tenant_id) |
1469 | + self._firewall_driver.refresh_security_group_rules( |
1470 | + security_group_id, group['rules'], |
1471 | + FLAGS.net_flat_vlan_use_ipv6, allow_traffic, conn) |
1472 | + |
1473 | + def refresh_security_group_members(self, security_group_id): |
1474 | + """Refreshes security group members. |
1475 | + |
1476 | + Args: |
1477 | + security_group_id: Security group ID. |
1478 | + """ |
1479 | + self._firewall_driver.refresh_security_group_members( |
1480 | + security_group_id) |
1481 | + |
1482 | + def setup_basic_filtering(self, context, vnic_id, allow_traffic, conn): |
1483 | + """Sets up basic filtering for VNIC. |
1484 | + |
1485 | + Args: |
1486 | + context: Nova context object. |
1487 | + vnic_id: VNIC to set the filters for. |
1488 | + allow_traffic: True if ok to allow traffic. |
1489 | + conn: hyperviser connection. |
1490 | + """ |
1491 | + net = self._api.get_vnic_network_info(context, vnic_id) |
1492 | + self._firewall_driver.setup_basic_filtering(vnic_id, net, |
1493 | + allow_traffic, conn) |
1494 | + |
1495 | + def vnic_filter_exists(self, vnic_id, conn): |
1496 | + """Checks whether filters for VNIC exists. |
1497 | + |
1498 | + Args: |
1499 | + vnic_id: VNIC ID to chekc. |
1500 | + conn: hypervisor conneciotn. |
1501 | + """ |
1502 | + return self._firewall_driver.vnic_filter_exists(vnic_id, conn) |
1503 | + |
1504 | + |
1505 | |
1506 | === modified file 'nova/network/flat_vlan/db/api.py' |
1507 | --- nova/network/flat_vlan/db/api.py 2011-04-07 11:20:06 +0000 |
1508 | +++ nova/network/flat_vlan/db/api.py 2011-04-18 00:30:54 +0000 |
1509 | @@ -109,8 +109,8 @@ |
1510 | return IMPL.dao_factory.get_dao().\ |
1511 | network_get_all(read_deleted=can_read_deleted) |
1512 | |
1513 | -def network_get_by_project(context, project_id, associate=True): |
1514 | - """Return the network associated with the project. |
1515 | +def network_get_by_tenant(context, tenant_id, associate=True): |
1516 | + """Return the network associated with the tenant. |
1517 | |
1518 | If associate is true, it will attempt to associate a new |
1519 | network if one is not found, otherwise it returns None. |
1520 | @@ -119,7 +119,7 @@ |
1521 | if associate and not nova_api.is_admin_context(context): |
1522 | raise exception.NotAuthorized() |
1523 | return IMPL.dao_factory.get_dao().\ |
1524 | - network_get_by_project(project_id, associate) |
1525 | + network_get_by_tenant(tenant_id, associate) |
1526 | |
1527 | def network_update(context, network_id, values): |
1528 | """Update network.""" |
1529 | @@ -137,31 +137,31 @@ |
1530 | network_delete_safe(network_id) |
1531 | |
1532 | @nova_api.require_admin_context |
1533 | -def network_associate(context, project_id): |
1534 | - """Associate a free network to a project.""" |
1535 | +def network_associate(context, tenant_id): |
1536 | + """Associate a free network to a tenant.""" |
1537 | return IMPL.dao_factory.get_dao().\ |
1538 | - network_associate(project_id) |
1539 | + network_associate(tenant_id) |
1540 | |
1541 | -def floating_ip_allocate_address(context, host, project_id): |
1542 | +def floating_ip_allocate_address(context, host, tenant_id): |
1543 | """Allocate free floating ip and return the address. |
1544 | |
1545 | Raises if one is not available. |
1546 | |
1547 | """ |
1548 | - nova_api.authorize_project_context(context, project_id) |
1549 | + nova_api.authorize_project_context(context, tenant_id) |
1550 | return IMPL.dao_factory.get_dao().\ |
1551 | - floating_ip_allocate_address(host, project_id) |
1552 | + floating_ip_allocate_address(host, tenant_id) |
1553 | |
1554 | def floating_ip_create(context, values): |
1555 | """Create a floating ip from the values dictionary.""" |
1556 | return IMPL.dao_factory.get_dao().\ |
1557 | floating_ip_create(values) |
1558 | |
1559 | -def floating_ip_count_by_project(context, project_id): |
1560 | - """Count floating ips used by project.""" |
1561 | - nova_api.authorize_project_context(context, project_id) |
1562 | +def floating_ip_count_by_tenant(context, tenant_id): |
1563 | + """Count floating ips used by tenant.""" |
1564 | + nova_api.authorize_project_context(context, tenant_id) |
1565 | return IMPL.dao_factory.get_dao().\ |
1566 | - floating_ip_count_by_project(project_id) |
1567 | + floating_ip_count_by_tenant(tenant_id) |
1568 | |
1569 | def floating_ip_deallocate(context, address): |
1570 | """Deallocate an floating ip by address""" |
1571 | @@ -211,11 +211,11 @@ |
1572 | return IMPL.dao_factory.get_dao().\ |
1573 | floating_ip_get_all_by_host(host) |
1574 | |
1575 | -def floating_ip_get_all_by_project(context, project_id): |
1576 | - """Get all floating ips by project.""" |
1577 | - nova_api.authorize_project_context(context, project_id) |
1578 | +def floating_ip_get_all_by_tenant(context, tenant_id): |
1579 | + """Get all floating ips by tenant.""" |
1580 | + nova_api.authorize_project_context(context, tenant_id) |
1581 | return IMPL.dao_factory.get_dao().\ |
1582 | - floating_ip_get_all_by_project(project_id) |
1583 | + floating_ip_get_all_by_tenant(tenant_id) |
1584 | |
1585 | def floating_ip_get_by_address(context, address): |
1586 | """Get a floating ip by address or raise if it doesn't exist.""" |
1587 | @@ -272,7 +272,7 @@ |
1588 | def fixed_ip_disassociate(context, fixed_ip_id): |
1589 | """Removes reference to ethernet card from IP.""" |
1590 | return IMPL.dao_factory.get_dao().\ |
1591 | - fixed_ip_disassociate(fixed_id_ip) |
1592 | + fixed_ip_disassociate(fixed_ip_id) |
1593 | |
1594 | def fixed_ip_associate(context, fixed_ip_id, ethernet_card_id): |
1595 | """Associates an IP address to an ethernet card.""" |
1596 | @@ -290,3 +290,95 @@ |
1597 | """Disassociates fixed IP by timeout.""" |
1598 | return IMPL.dao_factory.get_dao().\ |
1599 | fixed_ip_disassociate_all_by_timeout(host, time) |
1600 | + |
1601 | +def security_group_get_all(context): |
1602 | + """Get all security groups.""" |
1603 | + can_read_deleted = nova_api.can_read_deleted(context) |
1604 | + return IMPL.dao_factory.get_dao().\ |
1605 | + security_group_get_all(can_read_deleted) |
1606 | + |
1607 | +def security_group_get(context, security_group_id, tenant_id): |
1608 | + """Get security group by its id.""" |
1609 | + is_admin = nova_api.is_admin_context(context) |
1610 | + can_read_deleted = nova_api.can_read_deleted(context) |
1611 | + if not is_admin and not tenant_id: |
1612 | + raise exception.NotAuthorized() |
1613 | + |
1614 | + return IMPL.dao_factory.get_dao().\ |
1615 | + security_group_get(security_group_id, tenant_id, |
1616 | + is_admin=is_admin, |
1617 | + read_deleted=can_read_deleted) |
1618 | + |
1619 | +def security_group_get_by_name(context, tenant_id, group_name): |
1620 | + """Returns a security group with the specified name of a tenant.""" |
1621 | + return IMPL.dao_factory.get_dao().\ |
1622 | + security_group_get_by_name(tenant_id, group_name) |
1623 | + |
1624 | +def security_group_get_by_tenant(context, tenant_id): |
1625 | + """Get all security groups belonging to a tenant.""" |
1626 | + return IMPL.dao_factory.get_dao().\ |
1627 | + security_group_get_by_tenant(tenant_id) |
1628 | + |
1629 | +def security_group_get_by_ethernet_card(context, ethernet_card_id): |
1630 | + """Get security groups to which the ethernet_card is assigned.""" |
1631 | + return IMPL.dao_factory.get_dao().\ |
1632 | + security_group_get_by_ethernet_card(ethernet_card_id) |
1633 | + |
1634 | +def security_group_exists(context, tenant_id, group_name): |
1635 | + """Indicates if a group name exists for a tenant.""" |
1636 | + return IMPL.dao_factory.get_dao().\ |
1637 | + security_group_exists(tenant_id, group_name) |
1638 | + |
1639 | +def security_group_create(context, values): |
1640 | + """Create a new security group.""" |
1641 | + return IMPL.dao_factory.get_dao().\ |
1642 | + security_group_create(values) |
1643 | + |
1644 | +def security_group_rule_create(context, values): |
1645 | + """Create a new security group.""" |
1646 | + return IMPL.dao_factory.get_dao().\ |
1647 | + security_group_rule_create(values) |
1648 | + |
1649 | +def security_group_rule_get_by_security_group(context, security_group_id): |
1650 | + """Get all rules for a a given security group.""" |
1651 | + is_admin = nova_api.is_admin_context(context) |
1652 | + can_read_deleted = nova_api.can_read_deleted(context) |
1653 | + return IMPL.dao_factory.get_dao().\ |
1654 | + security_group_rule_get_by_security_group( |
1655 | + security_group_id, is_admin=is_admin, |
1656 | + read_deleted=can_read_deleted) |
1657 | + |
1658 | +def security_group_rule_get_by_security_group_grantee(context, |
1659 | + security_group_id): |
1660 | + """Get all rules that grant access to the given security group.""" |
1661 | + is_admin = nova_api.is_admin_context(context) |
1662 | + can_read_deleted = nova_api.can_read_deleted(context) |
1663 | + return IMPL.dao_factory.get_dao().\ |
1664 | + security_group_rule_get_by_security_group_grantee( |
1665 | + security_group_id, is_admin=is_admin, |
1666 | + read_deletd=can_read_deleted) |
1667 | + |
1668 | +def security_group_rule_destroy(context, security_group_rule_id): |
1669 | + """Deletes a security group rule.""" |
1670 | + return IMPL.dao_factory.get_dao().\ |
1671 | + security_group_rule_destroy(security_group_rule_id) |
1672 | + |
1673 | +def ethernet_card_add_security_group(context, ethernet_card_id, |
1674 | + security_group_id, tenant_id): |
1675 | + """Associate the given security group with the given ethernet card.""" |
1676 | + is_admin = nova_api.is_admin_context(context) |
1677 | + return IMPL.dao_factory.get_dao().\ |
1678 | + ethernet_card_add_security_group(ethernet_card_id, |
1679 | + security_group_id, |
1680 | + tenant_id, |
1681 | + is_admin=is_admin) |
1682 | + |
1683 | +def security_group_destroy(context, security_group_id): |
1684 | + """Deletes a security group.""" |
1685 | + return IMPL.dao_factory.get_dao().\ |
1686 | + security_group_destroy(security_group_id) |
1687 | + |
1688 | +def security_group_destroy_all(context): |
1689 | + """Deletes a security group.""" |
1690 | + return IMPL.dao_factory.get_dao().\ |
1691 | + security_group_destroy_all() |
1692 | |
1693 | === added file 'nova/network/flat_vlan/db/migration.py' |
1694 | --- nova/network/flat_vlan/db/migration.py 1970-01-01 00:00:00 +0000 |
1695 | +++ nova/network/flat_vlan/db/migration.py 2011-04-18 00:30:54 +0000 |
1696 | @@ -0,0 +1,36 @@ |
1697 | +# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
1698 | + |
1699 | +# Copyright 2011 Midokura KK |
1700 | +# All Rights Reserved. |
1701 | +# |
1702 | +# Licensed under the Apache License, Version 2.0 (the "License"); you may |
1703 | +# not use this file except in compliance with the License. You may obtain |
1704 | +# a copy of the License at |
1705 | +# |
1706 | +# http://www.apache.org/licenses/LICENSE-2.0 |
1707 | +# |
1708 | +# Unless required by applicable law or agreed to in writing, software |
1709 | +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
1710 | +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
1711 | +# License for the specific language governing permissions and limitations |
1712 | +# under the License. |
1713 | +"""Database setup and migration commands.""" |
1714 | + |
1715 | +from nova import utils |
1716 | +from nova.network.flat_vlan import flags |
1717 | + |
1718 | +FLAGS = flags.FlagAccessor() |
1719 | + |
1720 | +IMPL = utils.LazyPluggable( |
1721 | + FLAGS.get('net_flat_vlan_db_backend'), |
1722 | + sqlalchemy='nova.network.flat_vlan.db.sqlalchemy.migration') |
1723 | + |
1724 | + |
1725 | +def db_sync(version=None): |
1726 | + """Migrate the database to `version` or the most recent version.""" |
1727 | + return IMPL.db_sync(version=version) |
1728 | + |
1729 | + |
1730 | +def db_version(): |
1731 | + """Display the current database version.""" |
1732 | + return IMPL.db_version() |
1733 | |
1734 | === modified file 'nova/network/flat_vlan/db/sqlalchemy/migrate_repo/versions/001_diablo.py' |
1735 | --- nova/network/flat_vlan/db/sqlalchemy/migrate_repo/versions/001_diablo.py 2011-04-04 09:52:18 +0000 |
1736 | +++ nova/network/flat_vlan/db/sqlalchemy/migrate_repo/versions/001_diablo.py 2011-04-18 00:30:54 +0000 |
1737 | @@ -56,7 +56,7 @@ |
1738 | Integer(), |
1739 | ForeignKey('fixed_ips.id'), |
1740 | nullable=True), |
1741 | - Column('project_id', |
1742 | + Column('tenant_id', |
1743 | String(length=255, convert_unicode=False, assert_unicode=None, |
1744 | unicode_error=None, _warn_on_bytestring=False)), |
1745 | Column('host', |
1746 | @@ -112,16 +112,75 @@ |
1747 | Column('label', |
1748 | String(length=255, convert_unicode=False, assert_unicode=None, |
1749 | unicode_error=None, _warn_on_bytestring=False)), |
1750 | - Column('project_id', |
1751 | + Column('tenant_id', |
1752 | String(length=255, convert_unicode=False, assert_unicode=None, |
1753 | unicode_error=None, _warn_on_bytestring=False)), |
1754 | Column('host', String(255)), |
1755 | ) |
1756 | |
1757 | +security_groups = Table('security_groups', meta, |
1758 | + Column('created_at', DateTime(timezone=False)), |
1759 | + Column('updated_at', DateTime(timezone=False)), |
1760 | + Column('deleted_at', DateTime(timezone=False)), |
1761 | + Column('deleted', Boolean(create_constraint=True, name=None)), |
1762 | + Column('id', Integer(), primary_key=True, nullable=False), |
1763 | + Column('name', |
1764 | + String(length=255, convert_unicode=False, assert_unicode=None, |
1765 | + unicode_error=None, _warn_on_bytestring=False)), |
1766 | + Column('description', |
1767 | + String(length=255, convert_unicode=False, assert_unicode=None, |
1768 | + unicode_error=None, _warn_on_bytestring=False)), |
1769 | + |
1770 | + Column('user_id', |
1771 | + String(length=255, convert_unicode=False, assert_unicode=None, |
1772 | + unicode_error=None, _warn_on_bytestring=False)), |
1773 | + Column('tenant_id', |
1774 | + String(length=255, convert_unicode=False, assert_unicode=None, |
1775 | + unicode_error=None, _warn_on_bytestring=False)), |
1776 | + ) |
1777 | + |
1778 | +security_group_ethernet_card_assoc = Table( |
1779 | + 'security_group_ethernet_card_association', meta, |
1780 | + Column('created_at', DateTime(timezone=False)), |
1781 | + Column('updated_at', DateTime(timezone=False)), |
1782 | + Column('deleted_at', DateTime(timezone=False)), |
1783 | + Column('deleted', Boolean(create_constraint=True, name=None)), |
1784 | + Column('id', Integer(), primary_key=True, nullable=False), |
1785 | + Column('security_group_id', |
1786 | + Integer(), |
1787 | + ForeignKey('security_groups.id')), |
1788 | + Column('ethernet_card_id', Integer(), ForeignKey('ethernet_cards.id')), |
1789 | + ) |
1790 | + |
1791 | + |
1792 | +security_group_rules = Table('security_group_rules', meta, |
1793 | + Column('created_at', DateTime(timezone=False)), |
1794 | + Column('updated_at', DateTime(timezone=False)), |
1795 | + Column('deleted_at', DateTime(timezone=False)), |
1796 | + Column('deleted', Boolean(create_constraint=True, name=None)), |
1797 | + Column('id', Integer(), primary_key=True, nullable=False), |
1798 | + Column('parent_group_id', |
1799 | + Integer(), |
1800 | + ForeignKey('security_groups.id')), |
1801 | + Column('protocol', |
1802 | + String(length=255, convert_unicode=False, assert_unicode=None, |
1803 | + unicode_error=None, _warn_on_bytestring=False)), |
1804 | + Column('from_port', Integer()), |
1805 | + Column('to_port', Integer()), |
1806 | + Column('cidr', |
1807 | + String(length=255, convert_unicode=False, assert_unicode=None, |
1808 | + unicode_error=None, _warn_on_bytestring=False)), |
1809 | + Column('group_id', |
1810 | + Integer(), |
1811 | + ForeignKey('security_groups.id')), |
1812 | + ) |
1813 | + |
1814 | def upgrade(migrate_engine): |
1815 | meta.bind = migrate_engine |
1816 | |
1817 | - for table in (ethernet_cards, networks, fixed_ips, floating_ips): |
1818 | + for table in (ethernet_cards, networks, fixed_ips, floating_ips, |
1819 | + security_groups, security_group_rules, |
1820 | + security_group_ethernet_card_assoc): |
1821 | try: |
1822 | table.create() |
1823 | except Exception: |
1824 | @@ -132,7 +191,9 @@ |
1825 | def downgrade(migrate_engine): |
1826 | meta.bind = migrate_engine |
1827 | |
1828 | - for table in (floating_ips, fixed_ips, networks, ethernet_cards): |
1829 | + for table in (security_group_ethernet_card_assoc, |
1830 | + security_group_rules, security_groups, |
1831 | + floating_ips, fixed_ips, networks, ethernet_cards): |
1832 | try: |
1833 | table.drop() |
1834 | except Exception: |
1835 | |
1836 | === added file 'nova/network/flat_vlan/db/sqlalchemy/migration.py' |
1837 | --- nova/network/flat_vlan/db/sqlalchemy/migration.py 1970-01-01 00:00:00 +0000 |
1838 | +++ nova/network/flat_vlan/db/sqlalchemy/migration.py 2011-04-18 00:30:54 +0000 |
1839 | @@ -0,0 +1,81 @@ |
1840 | +# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
1841 | + |
1842 | +# Copyright 2011 Midokura KK |
1843 | +# All Rights Reserved. |
1844 | +# |
1845 | +# Licensed under the Apache License, Version 2.0 (the "License"); you may |
1846 | +# not use this file except in compliance with the License. You may obtain |
1847 | +# a copy of the License at |
1848 | +# |
1849 | +# http://www.apache.org/licenses/LICENSE-2.0 |
1850 | +# |
1851 | +# Unless required by applicable law or agreed to in writing, software |
1852 | +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
1853 | +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
1854 | +# License for the specific language governing permissions and limitations |
1855 | +# under the License. |
1856 | + |
1857 | +import os |
1858 | +import sys |
1859 | + |
1860 | +from nova.network.flat_vlan import flags |
1861 | + |
1862 | +import sqlalchemy |
1863 | +from migrate.versioning import api as versioning_api |
1864 | + |
1865 | +try: |
1866 | + from migrate.versioning import exceptions as versioning_exceptions |
1867 | +except ImportError: |
1868 | + try: |
1869 | + # python-migration changed location of exceptions after 1.6.3 |
1870 | + # See LP Bug #717467 |
1871 | + from migrate import exceptions as versioning_exceptions |
1872 | + except ImportError: |
1873 | + sys.exit(_("python-migrate is not installed. Exiting.")) |
1874 | + |
1875 | +FLAGS = flags.FlagAccessor() |
1876 | + |
1877 | + |
1878 | +def db_sync(version=None): |
1879 | + db_version() |
1880 | + repo_path = _find_migrate_repo() |
1881 | + return versioning_api.upgrade(FLAGS.net_flat_vlan_sql_connection, |
1882 | + repo_path, version) |
1883 | + |
1884 | + |
1885 | +def db_version(): |
1886 | + repo_path = _find_migrate_repo() |
1887 | + try: |
1888 | + return versioning_api.db_version(FLAGS.net_flat_vlan_sql_connection, |
1889 | + repo_path) |
1890 | + except versioning_exceptions.DatabaseNotControlledError: |
1891 | + # If we aren't version controlled we may already have the database |
1892 | + # in the state from before we started version control, check for that |
1893 | + # and set up version_control appropriately |
1894 | + meta = sqlalchemy.MetaData() |
1895 | + engine = sqlalchemy.create_engine(FLAGS.net_flat_vlan_sql_connection, |
1896 | + echo=False) |
1897 | + meta.reflect(bind=engine) |
1898 | + try: |
1899 | + for table in ('ethernet_cards', 'fixed_ips', 'floating_ips', |
1900 | + 'networks'): |
1901 | + assert table in meta.tables |
1902 | + return db_version_control(1) |
1903 | + except AssertionError: |
1904 | + return db_version_control(0) |
1905 | + |
1906 | + |
1907 | +def db_version_control(version=None): |
1908 | + repo_path = _find_migrate_repo() |
1909 | + versioning_api.version_control(FLAGS.net_flat_vlan_sql_connection, |
1910 | + repo_path, version) |
1911 | + return version |
1912 | + |
1913 | + |
1914 | +def _find_migrate_repo(): |
1915 | + """Get the path for the migrate repository.""" |
1916 | + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), |
1917 | + 'migrate_repo') |
1918 | + assert os.path.exists(path) |
1919 | + return path |
1920 | + |
1921 | |
1922 | === modified file 'nova/network/flat_vlan/db/sqlalchemy/models.py' |
1923 | --- nova/network/flat_vlan/db/sqlalchemy/models.py 2011-04-07 11:20:06 +0000 |
1924 | +++ nova/network/flat_vlan/db/sqlalchemy/models.py 2011-04-18 00:30:54 +0000 |
1925 | @@ -17,6 +17,8 @@ |
1926 | """ |
1927 | SQLAlchemy models for flat vlan network service data. |
1928 | """ |
1929 | +import datetime |
1930 | + |
1931 | from sqlalchemy import or_ |
1932 | from sqlalchemy.orm import relationship, backref |
1933 | from sqlalchemy.orm import joinedload, joinedload_all |
1934 | @@ -24,6 +26,7 @@ |
1935 | from sqlalchemy import ForeignKey |
1936 | from sqlalchemy.exc import IntegrityError |
1937 | from sqlalchemy.ext.declarative import declarative_base |
1938 | +from sqlalchemy.sql.expression import literal_column |
1939 | |
1940 | from nova import exception |
1941 | from nova.db import api as nova_db |
1942 | @@ -61,7 +64,7 @@ |
1943 | gateway_v6 = Column(String(255)) |
1944 | netmask_v6 = Column(String(255)) |
1945 | label = Column(String(255)) |
1946 | - project_id = Column(String(255)) |
1947 | + tenant_id = Column(String(255)) |
1948 | host = Column(String(255)) # , ForeignKey('hosts.id')) |
1949 | |
1950 | |
1951 | @@ -91,10 +94,63 @@ |
1952 | primaryjoin='and_(' |
1953 | 'FloatingIp.fixed_ip_id == FixedIp.id,' |
1954 | 'FloatingIp.deleted == False)') |
1955 | - project_id = Column(String(255)) |
1956 | + tenant_id = Column(String(255)) |
1957 | host = Column(String(255)) # , ForeignKey('hosts.id')) |
1958 | |
1959 | |
1960 | +class SecurityGroup(BASE, NovaBase): |
1961 | + """Represents a security group.""" |
1962 | + __tablename__ = 'security_groups' |
1963 | + id = Column(Integer, primary_key=True) |
1964 | + |
1965 | + name = Column(String(255)) |
1966 | + description = Column(String(255)) |
1967 | + user_id = Column(String(255)) |
1968 | + tenant_id = Column(String(255)) |
1969 | + |
1970 | + ethernet_cards = relationship( |
1971 | + EthernetCard, secondary="security_group_ethernet_card_association", |
1972 | + primaryjoin='and_(' |
1973 | + 'SecurityGroup.id == ' |
1974 | + 'SecurityGroupEthernetCardAssociation.security_group_id,' |
1975 | + 'SecurityGroupEthernetCardAssociation.deleted == False,' |
1976 | + 'SecurityGroup.deleted == False)', |
1977 | + secondaryjoin='and_(' |
1978 | + 'SecurityGroupEthernetCardAssociation.ethernet_card_id ' |
1979 | + '== EthernetCard.id,' |
1980 | + # (anthony) the condition below shouldn't be necessary now that the |
1981 | + # association is being marked as deleted. However, removing this |
1982 | + # may cause existing deployments to choke, so I'm leaving it |
1983 | + 'EthernetCard.deleted == False)', |
1984 | + backref='security_groups') |
1985 | + |
1986 | +class SecurityGroupEthernetCardAssociation(BASE, NovaBase): |
1987 | + __tablename__ = 'security_group_ethernet_card_association' |
1988 | + id = Column(Integer, primary_key=True) |
1989 | + security_group_id = Column(Integer, ForeignKey('security_groups.id')) |
1990 | + ethernet_card_id = Column(Integer, ForeignKey('ethernet_cards.id')) |
1991 | + |
1992 | +class SecurityGroupIngressRule(BASE, NovaBase): |
1993 | + """Represents a rule in a security group.""" |
1994 | + __tablename__ = 'security_group_rules' |
1995 | + id = Column(Integer, primary_key=True) |
1996 | + |
1997 | + parent_group_id = Column(Integer, ForeignKey('security_groups.id')) |
1998 | + parent_group = relationship("SecurityGroup", backref="rules", |
1999 | + foreign_keys=parent_group_id, |
2000 | + primaryjoin='and_(' |
2001 | + 'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,' |
2002 | + 'SecurityGroupIngressRule.deleted == False)') |
2003 | + |
2004 | + protocol = Column(String(5)) # "tcp", "udp", or "icmp" |
2005 | + from_port = Column(Integer) |
2006 | + to_port = Column(Integer) |
2007 | + cidr = Column(String(255)) |
2008 | + |
2009 | + # Note: This is not the parent SecurityGroup. It's SecurityGroup we're |
2010 | + # granting access for. |
2011 | + group_id = Column(Integer, ForeignKey('security_groups.id')) |
2012 | + |
2013 | class DataAccess(object): |
2014 | """The base class to implement Data Access Objects. |
2015 | """ |
2016 | @@ -121,7 +177,7 @@ |
2017 | first() |
2018 | |
2019 | if not result: |
2020 | - raise exception.NotFound(_("No ethernet_card with id %s") % id) |
2021 | + raise exception.NotFound("No ethernet_card with id %s" % id) |
2022 | |
2023 | return result |
2024 | |
2025 | @@ -161,7 +217,7 @@ |
2026 | first() |
2027 | |
2028 | if not result: |
2029 | - raise exception.NotFound(_("No network with id %s") % id) |
2030 | + raise exception.NotFound("No network with id %s" % id) |
2031 | |
2032 | return result |
2033 | |
2034 | @@ -172,7 +228,7 @@ |
2035 | first() |
2036 | |
2037 | if not result: |
2038 | - raise exception.NotFound(_('No network for bridge %s') % bridge) |
2039 | + raise exception.NotFound('No network for bridge %s' % bridge) |
2040 | |
2041 | return result |
2042 | |
2043 | @@ -184,7 +240,7 @@ |
2044 | filter_by(deleted=False).\ |
2045 | first() |
2046 | if not rv: |
2047 | - raise exception.NotFound(_('No network for ethernet card %s') % |
2048 | + raise exception.NotFound('No network for ethernet card %s' % |
2049 | ethernet_card_id) |
2050 | return rv |
2051 | |
2052 | @@ -254,14 +310,14 @@ |
2053 | first() |
2054 | |
2055 | if not result: |
2056 | - raise exception.NotFound(_("No Fixed IP with id %s") % id) |
2057 | + raise exception.NotFound("No Fixed IP with id %s" % id) |
2058 | |
2059 | return result |
2060 | |
2061 | def fixed_ip_get_all(self): |
2062 | result = self._session.query(self.fixed_ip_dto_class).all() |
2063 | if not result: |
2064 | - raise exception.NotFound(_('No fixed ips defined')) |
2065 | + raise exception.NotFound('No fixed ips defined') |
2066 | |
2067 | return result |
2068 | |
2069 | @@ -283,7 +339,7 @@ |
2070 | options(joinedload('ethernet_card')).\ |
2071 | first() |
2072 | if not result: |
2073 | - raise exception.NotFound(_('No fixed ip for address %s') % address) |
2074 | + raise exception.NotFound('No fixed ip for address %s' % address) |
2075 | |
2076 | return result |
2077 | |
2078 | @@ -321,18 +377,18 @@ |
2079 | |
2080 | floating_ip_dto_class = FloatingIp |
2081 | |
2082 | - def floating_ip_count_by_project(self, project_id): |
2083 | + def floating_ip_count_by_tenant(self, tenant_id): |
2084 | return self._session.query(self.floating_ip_dto_class).\ |
2085 | - filter_by(project_id=project_id).\ |
2086 | + filter_by(tenant_id=tenant_id).\ |
2087 | filter_by(deleted=False).\ |
2088 | count() |
2089 | |
2090 | - def floating_ip_allocate_address(self, host, project_id): |
2091 | + def floating_ip_allocate_address(self, host, tenant_id): |
2092 | with self._session.begin(): |
2093 | floating_ip_ref = self._session.query(self.floating_ip_dto_class).\ |
2094 | filter_by(host=host).\ |
2095 | filter_by(fixed_ip_id=None).\ |
2096 | - filter_by(project_id=None).\ |
2097 | + filter_by(tenant_id=None).\ |
2098 | filter_by(deleted=False).\ |
2099 | with_lockmode('update').\ |
2100 | first() |
2101 | @@ -340,7 +396,7 @@ |
2102 | # then this has concurrency issues |
2103 | if not floating_ip_ref: |
2104 | raise nova_db.NoMoreAddresses() |
2105 | - floating_ip_ref['project_id'] = project_id |
2106 | + floating_ip_ref['tenant_id'] = tenant_id |
2107 | self._session.add(floating_ip_ref) |
2108 | return floating_ip_ref |
2109 | |
2110 | @@ -348,7 +404,7 @@ |
2111 | with self._session.begin(): |
2112 | floating_ip_ref = self.floating_ip_get_by_address( |
2113 | address, read_deleted=False) |
2114 | - floating_ip_ref['project_id'] = None |
2115 | + floating_ip_ref['tenant_id'] = None |
2116 | floating_ip_ref.save(session=self._session) |
2117 | |
2118 | def floating_ip_destroy(self, address): |
2119 | @@ -376,10 +432,10 @@ |
2120 | filter_by(deleted=False).\ |
2121 | all() |
2122 | |
2123 | - def floating_ip_get_all_by_project(self, project_id): |
2124 | + def floating_ip_get_all_by_tenant(self, tenant_id): |
2125 | return self._session.query(self.floating_ip_dto_class).\ |
2126 | options(joinedload_all('fixed_ip.ethernet_card')).\ |
2127 | - filter_by(project_id=project_id).\ |
2128 | + filter_by(tenant_id=tenant_id).\ |
2129 | filter_by(deleted=False).\ |
2130 | all() |
2131 | |
2132 | @@ -412,10 +468,6 @@ |
2133 | floating_ip_ref = self.floating_ip_get_by_address( |
2134 | address, read_deleted=False) |
2135 | fixed_ip_ref = floating_ip_ref.fixed_ip |
2136 | - if fixed_ip_ref: |
2137 | - fixed_ip_address = fixed_ip_ref['address'] |
2138 | - else: |
2139 | - fixed_ip_address = None |
2140 | floating_ip_ref.fixed_ip = None |
2141 | floating_ip_ref.save(session=self._session) |
2142 | return fixed_ip_ref |
2143 | @@ -427,14 +479,187 @@ |
2144 | filter_by(deleted=False).\ |
2145 | all() |
2146 | |
2147 | +class SecurityGroupDataAccess(DataAccess): |
2148 | + """A Data Access object to access security groups. |
2149 | + """ |
2150 | + |
2151 | + security_group_dto_class = SecurityGroup |
2152 | + |
2153 | + def security_group_get_all(self, read_deleted=False): |
2154 | + return self._session.query(self.security_group_dto_class).\ |
2155 | + filter_by(deleted=read_deleted).\ |
2156 | + options(joinedload_all('rules')).\ |
2157 | + all() |
2158 | + |
2159 | + def security_group_get(self, security_group_id, tenant_id, is_admin=False, |
2160 | + read_deleted=False): |
2161 | + if is_admin: |
2162 | + result = self._session.query(self.security_group_dto_class).\ |
2163 | + filter_by(deleted=read_deleted,).\ |
2164 | + filter_by(id=security_group_id).\ |
2165 | + options(joinedload_all('rules')).\ |
2166 | + first() |
2167 | + else: |
2168 | + result = self._session.query(self.security_group_dto_class).\ |
2169 | + filter_by(deleted=False).\ |
2170 | + filter_by(id=security_group_id).\ |
2171 | + filter_by(tenant_id=tenant_id).\ |
2172 | + options(joinedload_all('rules')).\ |
2173 | + first() |
2174 | + if not result: |
2175 | + raise exception.NotFound("No security group with id %s" % |
2176 | + security_group_id) |
2177 | + return result |
2178 | + |
2179 | + def security_group_get_by_name(self, tenant_id, group_name): |
2180 | + result = self._session.query(self.security_group_dto_class).\ |
2181 | + filter_by(tenant_id=tenant_id).\ |
2182 | + filter_by(name=group_name).\ |
2183 | + filter_by(deleted=False).\ |
2184 | + options(joinedload_all('rules')).\ |
2185 | + options(joinedload_all('ethernet_cards')).\ |
2186 | + first() |
2187 | + if not result: |
2188 | + raise exception.NotFound( |
2189 | + 'No security group named %(group_name)s' |
2190 | + ' for tenant: %(tenant_id)s' % locals()) |
2191 | + return result |
2192 | + |
2193 | + def security_group_get_by_tenant(self, tenant_id): |
2194 | + return self._session.query(self.security_group_dto_class).\ |
2195 | + filter_by(tenant_id=tenant_id).\ |
2196 | + filter_by(deleted=False).\ |
2197 | + options(joinedload_all('rules')).\ |
2198 | + all() |
2199 | + |
2200 | + def security_group_get_by_ethernet_card(self, ethernet_card_id): |
2201 | + return self._session.query(self.security_group_dto_class).\ |
2202 | + filter_by(deleted=False).\ |
2203 | + options(joinedload_all('rules')).\ |
2204 | + join(self.security_group_dto_class.ethernet_cards).\ |
2205 | + filter_by(id=ethernet_card_id).\ |
2206 | + filter_by(deleted=False).\ |
2207 | + all() |
2208 | + |
2209 | + def security_group_exists(self, tenant_id, group_name): |
2210 | + try: |
2211 | + group = self.security_group_get_by_name(tenant_id, group_name) |
2212 | + return group != None |
2213 | + except exception.NotFound: |
2214 | + return False |
2215 | + |
2216 | + def security_group_create(self, values): |
2217 | + security_group_ref = self.security_group_dto_class() |
2218 | + # FIXME(devcamcar): Unless I do this, rules fails with lazy load |
2219 | + # exception once save() is called. This will get cleaned up in next orm |
2220 | + # pass. |
2221 | + security_group_ref.rules |
2222 | + security_group_ref.update(values) |
2223 | + security_group_ref.save(session=self._session) |
2224 | + return security_group_ref |
2225 | + |
2226 | +class SecurityGroupEthernetCardAssociationDataAccess( |
2227 | + EthernetCardDataAccess, SecurityGroupDataAccess): |
2228 | + """A Data Access object to access security group ethernet card association. |
2229 | + """ |
2230 | + security_group_dto_class = SecurityGroup |
2231 | + ethernet_card_dto_class = EthernetCard |
2232 | + |
2233 | + def ethernet_card_add_security_group(self, ethernet_card_id, |
2234 | + security_group_id, tenant_id, |
2235 | + is_admin=False): |
2236 | + """Associate the given security group with the given ethernet card""" |
2237 | + with self._session.begin(): |
2238 | + ethernet_card_ref = self.ethernet_card_get(ethernet_card_id) |
2239 | + security_group_ref = self.security_group_get(security_group_id, |
2240 | + tenant_id, |
2241 | + is_admin=is_admin) |
2242 | + ethernet_card_ref.security_groups += [security_group_ref] |
2243 | + ethernet_card_ref.save(session=self._session) |
2244 | + |
2245 | + |
2246 | +class SecurityGroupIngressRuleDataAccess(DataAccess): |
2247 | + """A Data Access object to access security groups. |
2248 | + """ |
2249 | + |
2250 | + security_group_ingress_rule_dto_class = SecurityGroupIngressRule |
2251 | + |
2252 | + def security_group_rule_get(self, security_group_rule_id, is_admin=False, |
2253 | + read_deleted=False): |
2254 | + if is_admin: |
2255 | + result = self._session.query( |
2256 | + self.security_group_ingress_rule_dto_class).\ |
2257 | + filter_by(deleted=read_deleted).\ |
2258 | + filter_by(id=security_group_rule_id).\ |
2259 | + first() |
2260 | + else: |
2261 | + # TODO(vish): Join to group and check for tenant_id |
2262 | + result = self._session.query( |
2263 | + self.security_group_ingress_rule_dto_class).\ |
2264 | + filter_by(deleted=False).\ |
2265 | + filter_by(id=security_group_rule_id).\ |
2266 | + first() |
2267 | + if not result: |
2268 | + raise exception.NotFound("No secuity group rule with id %s" % |
2269 | + security_group_rule_id) |
2270 | + return result |
2271 | + |
2272 | + def security_group_rule_get_by_security_group(self, security_group_id, |
2273 | + is_admin=False, |
2274 | + read_deleted=False): |
2275 | + if is_admin: |
2276 | + result = self._session.query( |
2277 | + self.security_group_ingress_rule_dto_class).\ |
2278 | + filter_by(deleted=read_deleted).\ |
2279 | + filter_by(parent_group_id=security_group_id).\ |
2280 | + all() |
2281 | + else: |
2282 | + # TODO(vish): Join to group and check for tenant_id |
2283 | + result = self._session.query( |
2284 | + self.security_group_ingress_rule_dto_class).\ |
2285 | + filter_by(deleted=False).\ |
2286 | + filter_by(parent_group_id=security_group_id).\ |
2287 | + all() |
2288 | + return result |
2289 | + |
2290 | + def security_group_rule_get_by_security_group_grantee(self, |
2291 | + security_group_id, |
2292 | + is_admin=False, |
2293 | + read_deleted=False): |
2294 | + if is_admin: |
2295 | + result = self._session.query( |
2296 | + self.security_group_ingress_rule_dto_class).\ |
2297 | + filter_by(deleted=read_deleted).\ |
2298 | + filter_by(group_id=security_group_id).\ |
2299 | + all() |
2300 | + else: |
2301 | + result = self._session.query( |
2302 | + self.security_group_ingress_rule_dto_class ).\ |
2303 | + filter_by(deleted=False).\ |
2304 | + filter_by(group_id=security_group_id).\ |
2305 | + all() |
2306 | + return result |
2307 | + |
2308 | + def security_group_rule_create(self, values): |
2309 | + security_group_rule_ref = self.security_group_ingress_rule_dto_class() |
2310 | + security_group_rule_ref.update(values) |
2311 | + security_group_rule_ref.save(session=self._session) |
2312 | + return security_group_rule_ref |
2313 | + |
2314 | + def security_group_rule_destroy(self, security_group_rule_id): |
2315 | + with self._session.begin(): |
2316 | + security_group_rule = self.security_group_rule_get( |
2317 | + security_group_rule_id) |
2318 | + security_group_rule.delete(session=self._session) |
2319 | |
2320 | # ---------------------- |
2321 | # Data Access Objects. |
2322 | # ---------------------- |
2323 | |
2324 | class FlatVlanNetworkDataAccess( |
2325 | - EthernetCardDataAccess, NetworkDataAccess, FixedIpDataAccess, |
2326 | - FloatingIpDataAccess): |
2327 | + NetworkDataAccess, FixedIpDataAccess, FloatingIpDataAccess, |
2328 | + SecurityGroupEthernetCardAssociationDataAccess, |
2329 | + SecurityGroupIngressRuleDataAccess): |
2330 | """A Data Access Object to access all data for the Flat VLAN network |
2331 | service. |
2332 | |
2333 | @@ -449,22 +674,29 @@ |
2334 | |
2335 | floating_ip_dto_class = FloatingIp |
2336 | |
2337 | - def network_get_by_project(self, project_id, associate=True): |
2338 | + security_group_dto_class = SecurityGroup |
2339 | + |
2340 | + security_group_ingress_rule_dto_class = SecurityGroupIngressRule |
2341 | + |
2342 | + security_group_ethernet_card_association_dto_class = \ |
2343 | + SecurityGroupEthernetCardAssociation |
2344 | + |
2345 | + def network_get_by_tenant(self, tenant_id, associate=True): |
2346 | result = self._session.query(self.network_dto_class).\ |
2347 | - filter_by(project_id=project_id).\ |
2348 | + filter_by(tenant_id=tenant_id).\ |
2349 | filter_by(deleted=False).\ |
2350 | first() |
2351 | if not result: |
2352 | if not associate: |
2353 | return None |
2354 | try: |
2355 | - return self.network_associate(project_id) |
2356 | + return self.network_associate(tenant_id) |
2357 | except IntegrityError: |
2358 | # NOTE(vish): We hit this if there is a race and two |
2359 | # processes are attempting to allocate the |
2360 | # network at the same time |
2361 | result = self._session.query(self.network_dto_class).\ |
2362 | - filter_by(project_id=project_id).\ |
2363 | + filter_by(tenant_id=tenant_id).\ |
2364 | filter_by(deleted=False).\ |
2365 | first() |
2366 | return result |
2367 | @@ -474,22 +706,22 @@ |
2368 | filter_by(cidr=cidr).first() |
2369 | |
2370 | if not result: |
2371 | - raise exception.NotFound(_('Network with cidr %s does not exist') % |
2372 | + raise exception.NotFound('Network with cidr %s does not exist' % |
2373 | cidr) |
2374 | return result |
2375 | |
2376 | - def network_associate(self, project_id): |
2377 | + def network_associate(self, tenant_id): |
2378 | with self._session.begin(): |
2379 | network_ref = self._session.query(self.network_dto_class).\ |
2380 | filter_by(deleted=False).\ |
2381 | - filter_by(project_id=None).\ |
2382 | + filter_by(tenant_id=None).\ |
2383 | with_lockmode('update').\ |
2384 | first() |
2385 | # NOTE(vish): if with_lockmode isn't supported, as in sqlite, |
2386 | # then this has concurrency issues |
2387 | if not network_ref: |
2388 | - raise nova_api.NoMoreNetworks() |
2389 | - network_ref['project_id'] = project_id |
2390 | + raise nova_db.NoMoreNetworks() |
2391 | + network_ref['tenant_id'] = tenant_id |
2392 | self._session.add(network_ref) |
2393 | return network_ref |
2394 | |
2395 | @@ -542,3 +774,33 @@ |
2396 | ip_ref.ethernet_card = self.ethernet_card_get(ethernet_card_id) |
2397 | self._session.add(ip_ref) |
2398 | return ip_ref |
2399 | + |
2400 | + def security_group_destroy(self, security_group_id): |
2401 | + with self._session.begin(): |
2402 | + self._session.query(self.security_group_dto_class).\ |
2403 | + filter_by(id=security_group_id).\ |
2404 | + update({'deleted': 1, |
2405 | + 'deleted_at': datetime.datetime.utcnow(), |
2406 | + 'updated_at': literal_column('updated_at')}) |
2407 | + self._session.query( |
2408 | + self.security_group_ethernet_card_association_dto_class).\ |
2409 | + filter_by(security_group_id=security_group_id).\ |
2410 | + update({'deleted': 1, |
2411 | + 'deleted_at': datetime.datetime.utcnow(), |
2412 | + 'updated_at': literal_column('updated_at')}) |
2413 | + self._session.query(self.security_group_ingress_rule_dto_class).\ |
2414 | + filter_by(group_id=security_group_id).\ |
2415 | + update({'deleted': 1, |
2416 | + 'deleted_at': datetime.datetime.utcnow(), |
2417 | + 'updated_at': literal_column('updated_at')}) |
2418 | + |
2419 | + def security_group_destroy_all(self): |
2420 | + with self._session.begin(): |
2421 | + self._session.query(self.security_group_dto_class).\ |
2422 | + update({'deleted': 1, |
2423 | + 'deleted_at': datetime.datetime.utcnow(), |
2424 | + 'updated_at': literal_column('updated_at')}) |
2425 | + self._session.query(self.security_group_ingress_rule_dto_class).\ |
2426 | + update({'deleted': 1, |
2427 | + 'deleted_at': datetime.datetime.utcnow(), |
2428 | + 'updated_at': literal_column('updated_at')}) |
2429 | |
2430 | === added file 'nova/network/flat_vlan/firewall.py' |
2431 | --- nova/network/flat_vlan/firewall.py 1970-01-01 00:00:00 +0000 |
2432 | +++ nova/network/flat_vlan/firewall.py 2011-04-18 00:30:54 +0000 |
2433 | @@ -0,0 +1,581 @@ |
2434 | +# vim: tabstop=4 shiftwidth=4 softtabstop=4 |
2435 | + |
2436 | +# Copyright 2011 Midokura KK |
2437 | +# All Rights Reserved. |
2438 | +# |
2439 | +# Licensed under the Apache License, Version 2.0 (the "License"); you may |
2440 | +# not use this file except in compliance with the License. You may obtain |
2441 | +# a copy of the License at |
2442 | +# |
2443 | +# http://www.apache.org/licenses/LICENSE-2.0 |
2444 | +# |
2445 | +# Unless required by applicable law or agreed to in writing, software |
2446 | +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
2447 | +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
2448 | +# License for the specific language governing permissions and limitations |
2449 | +# under the License |
2450 | +from eventlet import tpool |
2451 | +import IPy |
2452 | +import libvirt |
2453 | + |
2454 | +from nova import log as logging |
2455 | +from nova import utils |
2456 | + |
2457 | +LOG = logging.getLogger('nova.network.flat_vlan.firewall') |
2458 | + |
2459 | +def _get_ip_version(cidr): |
2460 | + net = IPy.IP(cidr) |
2461 | + return int(net.version()) |
2462 | + |
2463 | +def _get_net_and_mask(cidr): |
2464 | + net = IPy.IP(cidr) |
2465 | + return str(net.net()), str(net.netmask()) |
2466 | + |
2467 | +def _get_net_and_prefixlen(cidr): |
2468 | + net = IPy.IP(cidr) |
2469 | + return str(net.net()), str(net.prefixlen()) |
2470 | + |
2471 | +class FirewallDriver(object): |
2472 | + |
2473 | + def prepare_vnic_filter(self, vnic_id, net_info, is_vpn, allow_traffic, |
2474 | + conn): |
2475 | + """Prepare filters for the vnic. |
2476 | + |
2477 | + At this point, the instance of the vnic isn't running yet.""" |
2478 | + raise NotImplementedError() |
2479 | + |
2480 | + def unfilter_vnic(self, vnic_id, ipv6_enabled): |
2481 | + """Stop filtering vnic""" |
2482 | + raise NotImplementedError() |
2483 | + |
2484 | + def apply_vnic_filter(self, vnic_id): |
2485 | + """Apply vnic filter. |
2486 | + |
2487 | + Once this method returns, the vnic should be firewalled |
2488 | + appropriately. This method should as far as possible be a |
2489 | + no-op. It's vastly preferred to get everything set up in |
2490 | + prepare_vnic_filter. |
2491 | + """ |
2492 | + raise NotImplementedError() |
2493 | + |
2494 | + def refresh_security_group_rules(self, security_group_id, rules, |
2495 | + ipv6_enabled, allow_traffic, |
2496 | + conn): |
2497 | + """Refresh security group rules from data store |
2498 | + |
2499 | + Gets called when a rule has been added to or removed from |
2500 | + the security group.""" |
2501 | + raise NotImplementedError() |
2502 | + |
2503 | + def refresh_security_group_members(self, security_group_id): |
2504 | + """Refresh security group members from data store |
2505 | + |
2506 | + Gets called when an instance gets added to or removed from |
2507 | + the security group.""" |
2508 | + raise NotImplementedError() |
2509 | + |
2510 | + def setup_basic_filtering(self, vnic_id, net_info, allow_traffic, |
2511 | + conn): |
2512 | + """Create rules to block spoofing and allow dhcp. |
2513 | + |
2514 | + This gets called when spawning an instance, before |
2515 | + :method:`prepare_vnic_filter`. |
2516 | + |
2517 | + """ |
2518 | + raise NotImplementedError() |
2519 | + |
2520 | + def vnic_filter_exists(self, vnic_id, conn): |
2521 | + """Check nova-vnic-vnic-xxx exists""" |
2522 | + raise NotImplementedError() |
2523 | + |
2524 | +class NWFilterFirewall(FirewallDriver): |
2525 | + """ |
2526 | + This class implements a network filtering mechanism versatile |
2527 | + enough for EC2 style Security Group filtering by leveraging |
2528 | + libvirt's nwfilter. |
2529 | + |
2530 | + First, all vnics get a filter ("nova-base-filter") applied. |
2531 | + This filter provides some basic security such as protection against |
2532 | + MAC spoofing, IP spoofing, and ARP spoofing. |
2533 | + |
2534 | + This filter drops all incoming ipv4 and ipv6 connections. |
2535 | + Outgoing connections are never blocked. |
2536 | + |
2537 | + Second, every security group maps to a nwfilter filter(*). |
2538 | + NWFilters can be updated at runtime and changes are applied |
2539 | + immediately, so changes to security groups can be applied at |
2540 | + runtime (as mandated by the spec). |
2541 | + |
2542 | + Security group rules are named "nova-secgroup-<id>" where <id> |
2543 | + is the internal id of the security group. They're applied only on |
2544 | + hosts that have instances in the security group in question. |
2545 | + |
2546 | + Updates to security groups are done by updating the data model |
2547 | + (in response to API calls) followed by a request sent to all |
2548 | + the nodes with instances in the security group to refresh the |
2549 | + security group. |
2550 | + |
2551 | + Each vnic has its own NWFilter, which references the above |
2552 | + mentioned security group NWFilters. This was done because |
2553 | + interfaces can only reference one filter while filters can |
2554 | + reference multiple other filters. This has the added benefit of |
2555 | + actually being able to add and remove security groups from a |
2556 | + vnic at run time. This functionality is not exposed anywhere, |
2557 | + though. |
2558 | + |
2559 | + Outstanding questions: |
2560 | + |
2561 | + The name is unique, so would there be any good reason to sync |
2562 | + the uuid across the nodes (by assigning it from the datamodel)? |
2563 | + |
2564 | + |
2565 | + (*) This sentence brought to you by the redundancy department of |
2566 | + redundancy. |
2567 | + |
2568 | + """ |
2569 | + |
2570 | + def __init__(self, **kwargs): |
2571 | + self.static_filters_configured = False |
2572 | + self.handle_security_groups = False |
2573 | + |
2574 | + def apply_vnic_filter(self, _vnic_id): |
2575 | + """No-op. Everything is done in prepare_vnic_filter""" |
2576 | + pass |
2577 | + |
2578 | + def nova_dhcp_filter(self): |
2579 | + """The standard allow-dhcp-server filter is an <ip> one, so it uses |
2580 | + ebtables to allow traffic through. Without a corresponding rule in |
2581 | + iptables, it'll get blocked anyway.""" |
2582 | + |
2583 | + return '''<filter name='nova-allow-dhcp-server' chain='ipv4'> |
2584 | + <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid> |
2585 | + <rule action='accept' direction='out' |
2586 | + priority='100'> |
2587 | + <udp srcipaddr='0.0.0.0' |
2588 | + dstipaddr='255.255.255.255' |
2589 | + srcportstart='68' |
2590 | + dstportstart='67'/> |
2591 | + </rule> |
2592 | + <rule action='accept' direction='in' |
2593 | + priority='100'> |
2594 | + <udp srcipaddr='$DHCPSERVER' |
2595 | + srcportstart='67' |
2596 | + dstportstart='68'/> |
2597 | + </rule> |
2598 | + </filter>''' |
2599 | + |
2600 | + def nova_ra_filter(self): |
2601 | + return '''<filter name='nova-allow-ra-server' chain='root'> |
2602 | + <uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid> |
2603 | + <rule action='accept' direction='inout' |
2604 | + priority='100'> |
2605 | + <icmpv6 srcipaddr='$RASERVER'/> |
2606 | + </rule> |
2607 | + </filter>''' |
2608 | + |
2609 | + def setup_basic_filtering(self, vnic_id, net_info, allow_traffic, |
2610 | + conn): |
2611 | + """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" |
2612 | + logging.info('called setup_basic_filtering in nwfilter') |
2613 | + |
2614 | + if self.handle_security_groups: |
2615 | + # No point in setting up a filter set that we'll be overriding |
2616 | + # anyway. |
2617 | + return |
2618 | + |
2619 | + logging.info('ensuring static filters') |
2620 | + self._ensure_static_filters(vnic_id, net_info, allow_traffic, conn) |
2621 | + |
2622 | + vnic_filter_name = self._vnic_filter_name(vnic_id) |
2623 | + self._define_filter(self._filter_container(vnic_filter_name, |
2624 | + ['nova-base']), |
2625 | + conn) |
2626 | + |
2627 | + def _ensure_static_filters(self, vnic_id, net_info, allow_traffic, |
2628 | + conn): |
2629 | + if self.static_filters_configured: |
2630 | + return |
2631 | + |
2632 | + self._define_filter(self._filter_container('nova-base', |
2633 | + ['no-mac-spoofing', |
2634 | + 'no-ip-spoofing', |
2635 | + 'no-arp-spoofing', |
2636 | + 'allow-dhcp-server']), |
2637 | + conn) |
2638 | + self._define_filter(self.nova_base_ipv4_filter, conn) |
2639 | + self._define_filter(self.nova_base_ipv6_filter, conn) |
2640 | + self._define_filter(self.nova_dhcp_filter, conn) |
2641 | + self._define_filter(self.nova_ra_filter, conn) |
2642 | + self._define_filter(self.nova_vpn_filter, conn) |
2643 | + if allow_traffic: |
2644 | + self._define_filter(self.nova_project_filter, conn) |
2645 | + if net_info['ipv6_enabled']: |
2646 | + self._define_filter(self.nova_project_filter_v6, conn) |
2647 | + |
2648 | + self.static_filters_configured = True |
2649 | + |
2650 | + def _filter_container(self, name, filters): |
2651 | + xml = '''<filter name='%s' chain='root'>%s</filter>''' % ( |
2652 | + name, |
2653 | + ''.join(["<filterref filter='%s'/>" % (f,) for f in filters])) |
2654 | + return xml |
2655 | + |
2656 | + nova_vpn_filter = '''<filter name='nova-vpn' chain='root'> |
2657 | + <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid> |
2658 | + <filterref filter='allow-dhcp-server'/> |
2659 | + <filterref filter='nova-allow-dhcp-server'/> |
2660 | + <filterref filter='nova-base-ipv4'/> |
2661 | + <filterref filter='nova-base-ipv6'/> |
2662 | + </filter>''' |
2663 | + |
2664 | + def nova_base_ipv4_filter(self): |
2665 | + retval = "<filter name='nova-base-ipv4' chain='ipv4'>" |
2666 | + for protocol in ['tcp', 'udp', 'icmp']: |
2667 | + for direction, action, priority in [('out', 'accept', 399), |
2668 | + ('in', 'drop', 400)]: |
2669 | + retval += """<rule action='%s' direction='%s' priority='%d'> |
2670 | + <%s /> |
2671 | + </rule>""" % (action, direction, |
2672 | + priority, protocol) |
2673 | + retval += '</filter>' |
2674 | + return retval |
2675 | + |
2676 | + def nova_base_ipv6_filter(self): |
2677 | + retval = "<filter name='nova-base-ipv6' chain='ipv6'>" |
2678 | + for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: |
2679 | + for direction, action, priority in [('out', 'accept', 399), |
2680 | + ('in', 'drop', 400)]: |
2681 | + retval += """<rule action='%s' direction='%s' priority='%d'> |
2682 | + <%s /> |
2683 | + </rule>""" % (action, direction, |
2684 | + priority, protocol) |
2685 | + retval += '</filter>' |
2686 | + return retval |
2687 | + |
2688 | + def nova_project_filter(self): |
2689 | + retval = "<filter name='nova-project' chain='ipv4'>" |
2690 | + for protocol in ['tcp', 'udp', 'icmp']: |
2691 | + retval += """<rule action='accept' direction='in' priority='200'> |
2692 | + <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' /> |
2693 | + </rule>""" % protocol |
2694 | + retval += '</filter>' |
2695 | + return retval |
2696 | + |
2697 | + def nova_project_filter_v6(self): |
2698 | + retval = "<filter name='nova-project-v6' chain='ipv6'>" |
2699 | + for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: |
2700 | + retval += """<rule action='accept' direction='inout' |
2701 | + priority='200'> |
2702 | + <%s srcipaddr='$PROJNETV6' |
2703 | + srcipmask='$PROJMASKV6' /> |
2704 | + </rule>""" % (protocol) |
2705 | + retval += '</filter>' |
2706 | + return retval |
2707 | + |
2708 | + def _define_filter(self, xml, conn): |
2709 | + if callable(xml): |
2710 | + xml = xml() |
2711 | + # execute in a native thread and block current greenthread until done |
2712 | + tpool.execute(conn().nwfilterDefineXML, xml) |
2713 | + |
2714 | + def unfilter_vnic(self, _vnic_id, _ipv6_enabled): |
2715 | + # Nothing to do |
2716 | + pass |
2717 | + |
2718 | + def prepare_vnic_filter(self, vnic_id, net_info, is_vpn, allow_traffic, |
2719 | + conn): |
2720 | + """ |
2721 | + Creates an NWFilter for the given instance. In the process, |
2722 | + it makes sure the filters for the security groups as well as |
2723 | + the base filter are all in place. |
2724 | + """ |
2725 | + if is_vpn: |
2726 | + base_filter = 'nova-vpn' |
2727 | + else: |
2728 | + base_filter = 'nova-base' |
2729 | + |
2730 | + vnic_secgroup_filter_name = \ |
2731 | + '%s-secgroup' % (self._vnic_filter_name(vnic_id)) |
2732 | + |
2733 | + vnic_secgroup_filter_children = ['nova-base-ipv4', |
2734 | + 'nova-base-ipv6', |
2735 | + 'nova-allow-dhcp-server'] |
2736 | + for security_group in net_info['security_groups']: |
2737 | + |
2738 | + self.refresh_security_group_rules(security_group['id'], |
2739 | + security_group['rules'], |
2740 | + net_info['ipv6_enabled'], |
2741 | + allow_traffic, |
2742 | + conn) |
2743 | + |
2744 | + vnic_secgroup_filter_children += [('nova-secgroup-%s' % |
2745 | + security_group['id'])] |
2746 | + |
2747 | + self._define_filter( |
2748 | + self._filter_container(vnic_secgroup_filter_name, |
2749 | + vnic_secgroup_filter_children), |
2750 | + conn) |
2751 | + |
2752 | + vnic_filter_name = self._vnic_filter_name(vnic_id) |
2753 | + vnic_filter_children = [base_filter, vnic_secgroup_filter_name] |
2754 | + if net_info['ipv6_enabled'] and net_info['gateway_v6']: |
2755 | + vnic_secgroup_filter_children += ['nova-allow-ra-server'] |
2756 | + |
2757 | + if allow_traffic: |
2758 | + vnic_filter_children += ['nova-project'] |
2759 | + if net_info['ipv6_enabled']: |
2760 | + vnic_filter_children += ['nova-project-v6'] |
2761 | + |
2762 | + self._define_filter(self._filter_container(vnic_filter_name, |
2763 | + vnic_filter_children), |
2764 | + conn) |
2765 | + return |
2766 | + |
2767 | + def refresh_security_group_rules(self, security_group_id, rules, |
2768 | + ipv6_enabled, _allow_traffic, |
2769 | + conn): |
2770 | + return self._define_filter( |
2771 | + self.security_group_to_nwfilter_xml(security_group_id, rules, |
2772 | + ipv6_enabled), |
2773 | + conn) |
2774 | + |
2775 | + def security_group_to_nwfilter_xml(self, security_group_id, rules, |
2776 | + ipv6_enabled): |
2777 | + rule_xml = "" |
2778 | + v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} |
2779 | + for rule in rules: |
2780 | + rule_xml += "<rule action='accept' direction='in' priority='300'>" |
2781 | + if rule['cidr']: |
2782 | + version = _get_ip_version(rule['cidr']) |
2783 | + if(ipv6_enabled and version == 6): |
2784 | + net, prefixlen = _get_net_and_prefixlen(rule['cidr']) |
2785 | + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ |
2786 | + (v6protocol[rule['protocol']], net, prefixlen) |
2787 | + else: |
2788 | + net, mask = _get_net_and_mask(rule['cidr']) |
2789 | + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ |
2790 | + (rule['protocol'], net, mask) |
2791 | + if rule['protocol'] in ['tcp', 'udp']: |
2792 | + rule_xml += "dstportstart='%s' dstportend='%s' " % \ |
2793 | + (rule['from_port'], rule['to_port']) |
2794 | + elif rule['protocol'] == 'icmp': |
2795 | + LOG.info('rule.protocol: %r, rule.from_port: %r, ' |
2796 | + 'rule.to_port: %r', rule['protocol'], |
2797 | + rule['from_port'], rule['to_port']) |
2798 | + if rule['from_port'] != -1: |
2799 | + rule_xml += "type='%s' " % rule['from_port'] |
2800 | + if rule['to_port'] != -1: |
2801 | + rule_xml += "code='%s' " % rule['to_port'] |
2802 | + |
2803 | + rule_xml += '/>\n' |
2804 | + rule_xml += "</rule>\n" |
2805 | + xml = "<filter name='nova-secgroup-%s' " % security_group_id |
2806 | + if(ipv6_enabled): |
2807 | + xml += "chain='root'>%s</filter>" % rule_xml |
2808 | + else: |
2809 | + xml += "chain='ipv4'>%s</filter>" % rule_xml |
2810 | + return xml |
2811 | + |
2812 | + def _vnic_filter_name(self, vnic_id): |
2813 | + return 'nova-vnic-%s' % vnic_id |
2814 | + |
2815 | + def vnic_filter_exists(self, vnic_id, conn): |
2816 | + vnic_filter_name = self._vnic_filter_name(vnic_id) |
2817 | + try: |
2818 | + conn().nwfilterLookupByName(vnic_filter_name) |
2819 | + except libvirt.libvirtError: |
2820 | + name = vnic_id |
2821 | + LOG.debug(_('The nwfilter(%(vnic_filter_name)s) for' |
2822 | + '%(name)s is not found.') % locals()) |
2823 | + return False |
2824 | + return True |
2825 | + |
2826 | +class IptablesFirewallDriver(FirewallDriver): |
2827 | + def __init__(self, execute=None, **kwargs): |
2828 | + from nova.network.flat_vlan.driver.linux import filter |
2829 | + self.iptables = filter.iptables_manager |
2830 | + self.vnics = {} |
2831 | + self.nwfilter = NWFilterFirewall() |
2832 | + |
2833 | + self.iptables.ipv4['filter'].add_chain('sg-fallback') |
2834 | + self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP') |
2835 | + self.iptables.ipv6['filter'].add_chain('sg-fallback') |
2836 | + self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') |
2837 | + |
2838 | + def setup_basic_filtering(self, vnic_id, net_info, allow_traffic, |
2839 | + conn): |
2840 | + """Use NWFilter from libvirt for this.""" |
2841 | + return self.nwfilter.setup_basic_filtering(vnic_id, net_info, |
2842 | + allow_traffic, conn) |
2843 | + |
2844 | + def apply_vnic_filter(self, _vnic_id): |
2845 | + """No-op. Everything is done in prepare_vnic_filter""" |
2846 | + pass |
2847 | + |
2848 | + def unfilter_vnic(self, vnic_id, ipv6_enabled): |
2849 | + if self.vnics.pop(vnic_id, None): |
2850 | + self._remove_filters_for_vnic(vnic_id, ipv6_enabled) |
2851 | + self.iptables.apply() |
2852 | + else: |
2853 | + LOG.info('Attempted to unfilter vnic %s which is not ' |
2854 | + 'filtered', vnic_id) |
2855 | + |
2856 | + def prepare_vnic_filter(self, vnic_id, net_info, _is_vpn, allow_traffic, |
2857 | + _conn): |
2858 | + self.vnics[vnic_id] = net_info |
2859 | + self._add_filters_for_vnic(vnic_id, net_info, allow_traffic) |
2860 | + self.iptables.apply() |
2861 | + |
2862 | + def _add_filters_for_vnic(self, vnic_id, net_info, allow_traffic): |
2863 | + chain_name = self._vnic_chain_name(vnic_id) |
2864 | + |
2865 | + self.iptables.ipv4['filter'].add_chain(chain_name) |
2866 | + ipv4_address = net_info['IPs'][0]['fixed_ip'] |
2867 | + self.iptables.ipv4['filter'].add_rule('local', |
2868 | + '-d %s -j $%s' % |
2869 | + (ipv4_address, chain_name)) |
2870 | + |
2871 | + if net_info['ipv6_enabled']: |
2872 | + self.iptables.ipv6['filter'].add_chain(chain_name) |
2873 | + ipv6_address = net_info['IPs'][0]['ip_v6'] |
2874 | + self.iptables.ipv6['filter'].add_rule('local', |
2875 | + '-d %s -j $%s' % |
2876 | + (ipv6_address, |
2877 | + chain_name)) |
2878 | + |
2879 | + ipv4_rules, ipv6_rules = self._vnic_rules(net_info, allow_traffic) |
2880 | + |
2881 | + for rule in ipv4_rules: |
2882 | + self.iptables.ipv4['filter'].add_rule(chain_name, rule) |
2883 | + |
2884 | + if net_info['ipv6_enabled']: |
2885 | + for rule in ipv6_rules: |
2886 | + self.iptables.ipv6['filter'].add_rule(chain_name, rule) |
2887 | + |
2888 | + def _remove_filters_for_vnic(self, vnic_id, ipv6_enabled): |
2889 | + |
2890 | + chain_name = self._vnic_chain_name(vnic_id) |
2891 | + |
2892 | + self.iptables.ipv4['filter'].remove_chain(chain_name) |
2893 | + if ipv6_enabled: |
2894 | + self.iptables.ipv6['filter'].remove_chain(chain_name) |
2895 | + |
2896 | + def _vnic_rules(self, net_info, allow_traffic): |
2897 | + |
2898 | + ipv4_rules = [] |
2899 | + ipv6_rules = [] |
2900 | + |
2901 | + # Always drop invalid packets |
2902 | + ipv4_rules += ['-m state --state ' 'INVALID -j DROP'] |
2903 | + ipv6_rules += ['-m state --state ' 'INVALID -j DROP'] |
2904 | + |
2905 | + # Allow established connections |
2906 | + ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] |
2907 | + ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] |
2908 | + |
2909 | + if net_info['IPs'][0]['gateway']: |
2910 | + ipv4_rules += ['-s %s -p udp --sport 67 --dport 68 ' |
2911 | + '-j ACCEPT' % (net_info['IPs'][0]['gateway'],)] |
2912 | + |
2913 | + #Allow project network traffic |
2914 | + if allow_traffic: |
2915 | + if net_info['IPs'][0]['cidr']: |
2916 | + ipv4_rules += ['-s %s -j ACCEPT' % |
2917 | + (net_info['IPs'][0]['cidr'],)] |
2918 | + |
2919 | + if net_info['ipv6_enabled']: |
2920 | + # Allow RA responses |
2921 | + if net_info['IPs'][0]['gateway_v6']: |
2922 | + ipv6_rules += ['-s %s/128 -p icmpv6 -j ACCEPT' % |
2923 | + (net_info['IPs'][0]['gateway_v6'],)] |
2924 | + |
2925 | + #Allow project network traffic |
2926 | + if allow_traffic: |
2927 | + if net_info['IPs'][0]['cidr_v6']: |
2928 | + ipv6_rules += ['-s %s -j ACCEPT' % |
2929 | + (net_info['IPs'][0]['cidr_v6'],)] |
2930 | + |
2931 | + security_groups = net_info['security_groups'] |
2932 | + |
2933 | + # then, security group chains and rules |
2934 | + for security_group in security_groups: |
2935 | + |
2936 | + for rule in security_group['rules']: |
2937 | + logging.info('%r', rule) |
2938 | + |
2939 | + if not rule.cidr: |
2940 | + # Eventually, a mechanism to grant access for security |
2941 | + # groups will turn up here. It'll use ipsets. |
2942 | + continue |
2943 | + |
2944 | + version = _get_ip_version(rule.cidr) |
2945 | + if version == 4: |
2946 | + rules = ipv4_rules |
2947 | + else: |
2948 | + rules = ipv6_rules |
2949 | + |
2950 | + protocol = rule.protocol |
2951 | + if version == 6 and rule.protocol == 'icmp': |
2952 | + protocol = 'icmpv6' |
2953 | + |
2954 | + args = ['-p', protocol, '-s', rule.cidr] |
2955 | + |
2956 | + if rule.protocol in ['udp', 'tcp']: |
2957 | + if rule.from_port == rule.to_port: |
2958 | + args += ['--dport', '%s' % (rule.from_port,)] |
2959 | + else: |
2960 | + args += ['-m', 'multiport', |
2961 | + '--dports', '%s:%s' % (rule.from_port, |
2962 | + rule.to_port)] |
2963 | + elif rule.protocol == 'icmp': |
2964 | + icmp_type = rule.from_port |
2965 | + icmp_code = rule.to_port |
2966 | + |
2967 | + if icmp_type == -1: |
2968 | + icmp_type_arg = None |
2969 | + else: |
2970 | + icmp_type_arg = '%s' % icmp_type |
2971 | + if not icmp_code == -1: |
2972 | + icmp_type_arg += '/%s' % icmp_code |
2973 | + |
2974 | + if icmp_type_arg: |
2975 | + if version == 4: |
2976 | + args += ['-m', 'icmp', '--icmp-type', |
2977 | + icmp_type_arg] |
2978 | + elif version == 6: |
2979 | + args += ['-m', 'icmp6', '--icmpv6-type', |
2980 | + icmp_type_arg] |
2981 | + |
2982 | + args += ['-j ACCEPT'] |
2983 | + rules += [' '.join(args)] |
2984 | + |
2985 | + ipv4_rules += ['-j $sg-fallback'] |
2986 | + ipv6_rules += ['-j $sg-fallback'] |
2987 | + |
2988 | + return ipv4_rules, ipv6_rules |
2989 | + |
2990 | + def vnic_filter_exists(self, vnic_id, conn): |
2991 | + """Check nova-vnic_id exists""" |
2992 | + return self.nwfilter.vnic_filter_exists(vnic_id, conn) |
2993 | + |
2994 | + def refresh_security_group_members(self, _security_group): |
2995 | + pass |
2996 | + |
2997 | + def refresh_security_group_rules(self, _security_group_id, _rules, |
2998 | + _ipv6_enabled, allow_traffic, |
2999 | + _conn): |
3000 | + self._do_refresh_security_group_rules(allow_traffic) |
3001 | + self.iptables.apply() |
3002 | + |
3003 | + @utils.synchronized('iptables', external=True) |
3004 | + def _do_refresh_security_group_rules(self, allow_traffic): |
3005 | + for vnic_id, net_info in self.vnics.items(): |
3006 | + self._remove_filters_for_vnic(vnic_id, net_info['ipv6_enabled']) |
3007 | + self._add_filters_for_vnic(vnic_id, net_info, allow_traffic) |
3008 | + |
3009 | + def _security_group_chain_name(self, security_group_id): |
3010 | + return 'nova-sg-%s' % (security_group_id,) |
3011 | + |
3012 | + def _vnic_chain_name(self, vnic_id): |
3013 | + return 'vnic-%s' % (vnic_id,) |
3014 | + |
3015 | |
3016 | === modified file 'nova/network/flat_vlan/flags.py' |
3017 | --- nova/network/flat_vlan/flags.py 2011-04-04 09:52:18 +0000 |
3018 | +++ nova/network/flat_vlan/flags.py 2011-04-18 00:30:54 +0000 |
3019 | @@ -100,6 +100,9 @@ |
3020 | 'Fixed IP address block') |
3021 | flags.DEFINE_string('net_flat_vlan_fixed_range_v6', 'fd00::/48', |
3022 | 'Fixed IPv6 address block') |
3023 | +flags.DEFINE_string('net_flat_vlan_firewall_driver', |
3024 | + 'nova.network.flat_vlan.firewall.IptablesFirewallDriver', |
3025 | + 'Firewall driver (defaults to iptables)') |
3026 | |
3027 | # Sanity check. VLAN must use DHCP. |
3028 | if (flags.FLAGS.net_flat_vlan_use_vlan and |
3029 | |
3030 | === modified file 'nova/network/flat_vlan/manager.py' |
3031 | --- nova/network/flat_vlan/manager.py 2011-04-04 09:52:18 +0000 |
3032 | +++ nova/network/flat_vlan/manager.py 2011-04-18 00:30:54 +0000 |
3033 | @@ -52,15 +52,15 @@ |
3034 | # Check that num_networks + vlan_start is not > 4094, fixes lp708025 |
3035 | if vlan_start: |
3036 | if num_networks + vlan_start > 4094: |
3037 | - raise ValueError(_('The sum between the number of networks and' |
3038 | - ' the vlan start cannot be greater' |
3039 | - ' than 4094')) |
3040 | + raise ValueError('The sum between the number of networks and' |
3041 | + ' the vlan start cannot be greater' |
3042 | + ' than 4094') |
3043 | |
3044 | fixed_net = IPy.IP(cidr) |
3045 | if fixed_net.len() < num_networks * network_size: |
3046 | - raise ValueError(_('The network range is not big enough to fit ' |
3047 | + raise ValueError('The network range is not big enough to fit ' |
3048 | '%(num_networks)s. Network size is %(network_size)s' % |
3049 | - locals())) |
3050 | + locals()) |
3051 | |
3052 | fixed_net_v6 = IPy.IP(cidr_v6) |
3053 | network_size_v6 = 1 << 64 |
3054 | @@ -112,8 +112,8 @@ |
3055 | pass |
3056 | |
3057 | if network_ref is not None: |
3058 | - raise ValueError(_('Network with cidr %s already exists' % |
3059 | - cidr)) |
3060 | + raise ValueError('Network with cidr %s already exists' % |
3061 | + cidr) |
3062 | |
3063 | network_ref = api.network_create_safe(context, net) |
3064 | if network_ref: |
3065 | @@ -122,10 +122,10 @@ |
3066 | |
3067 | return networks |
3068 | |
3069 | -def allocate_ip_by_project(context, ethernet_card_id, is_vpn): |
3070 | +def allocate_ip_by_tenant(context, ethernet_card_id, is_vpn): |
3071 | """Gets an IP from the pool.""" |
3072 | ctxt = context.elevated() |
3073 | - network_ref = api.network_get_by_project(ctxt, context.project_id) |
3074 | + network_ref = api.network_get_by_tenant(ctxt, context.project_id) |
3075 | |
3076 | if is_vpn: |
3077 | ip_ref = api.fixed_ip_get_by_address(ctxt, |
3078 | @@ -139,7 +139,7 @@ |
3079 | api.fixed_ip_update(context, ip_ref['id'], {'allocated': True}) |
3080 | return ip_ref |
3081 | |
3082 | -def bind_ip_to_ethernet_card_by_project(context, ethernet_card_id, is_vpn): |
3083 | +def bind_ip_to_ethernet_card_by_tenant(context, ethernet_card_id, is_vpn): |
3084 | """Assigns IP to ethernet card.""" |
3085 | |
3086 | # Get the ethernet card |
3087 | @@ -152,7 +152,7 @@ |
3088 | ethernet_card_id) |
3089 | # Check to see if this ethernet card does not have IP allocated. |
3090 | if not ip_address: |
3091 | - ip_address = allocate_ip_by_project(context, ethernet_card_id, is_vpn) |
3092 | + ip_address = allocate_ip_by_tenant(context, ethernet_card_id, is_vpn) |
3093 | |
3094 | return ip_address |
3095 | |
3096 | |
3097 | === modified file 'nova/network/flat_vlan/network.py' |
3098 | --- nova/network/flat_vlan/network.py 2011-04-10 16:27:55 +0000 |
3099 | +++ nova/network/flat_vlan/network.py 2011-04-18 00:30:54 +0000 |
3100 | @@ -17,9 +17,9 @@ |
3101 | |
3102 | import collections |
3103 | import datetime |
3104 | -import os |
3105 | |
3106 | from nova import context |
3107 | +from nova import exception |
3108 | from nova import log as logging |
3109 | from nova import manager as nova_manager |
3110 | from nova import rpc |
3111 | @@ -76,7 +76,7 @@ |
3112 | |
3113 | if FLAGS.net_flat_vlan_use_vlan or FLAGS.net_flat_vlan_use_dhcp: |
3114 | self.filter_driver.init_host() |
3115 | - # Set up networking for the projects for which we're already |
3116 | + # Set up networking for the tenants for which we're already |
3117 | # the designated network host. |
3118 | floating_ips = self.db.floating_ip_get_all_by_host(ctxt, |
3119 | self.host) |
3120 | @@ -106,7 +106,7 @@ |
3121 | self.host, |
3122 | time) |
3123 | if num: |
3124 | - LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) |
3125 | + LOG.debug("Dissassociated %s stale fixed ip(s)", num) |
3126 | |
3127 | def _set_vlan_host(self, context, network_id): |
3128 | """Sets up VLAN networking service host. |
3129 | @@ -236,7 +236,7 @@ |
3130 | """ |
3131 | for vnic_id in vnic_ids: |
3132 | if FLAGS.net_flat_vlan_use_vlan: |
3133 | - ip_address = manager.bind_ip_to_ethernet_card_by_project( |
3134 | + ip_address = manager.bind_ip_to_ethernet_card_by_tenant( |
3135 | context, |
3136 | vnic_id, |
3137 | is_vpn) |
3138 | @@ -265,9 +265,11 @@ |
3139 | mac: MAC address to lease the IP address against. |
3140 | address: IP address to lease. |
3141 | """ |
3142 | - LOG.debug(_("Leasing IP %s"), address, context=context) |
3143 | + LOG.debug("Leasing IP %s", address, context=context) |
3144 | fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) |
3145 | ethernet_card = fixed_ip_ref['ethernet_card'] |
3146 | + |
3147 | + # TODO: Figure out a way to not use instance |
3148 | instance = nova_db_api.instance_get_by_virtual_nic(context, |
3149 | ethernet_card['id']) |
3150 | if not instance: |
3151 | @@ -326,13 +328,13 @@ |
3152 | mac: MAC address to release the IP against. |
3153 | address: IP address to release |
3154 | """ |
3155 | - LOG.debug(_("Releasing IP %s"), address, context=context) |
3156 | + LOG.debug("Releasing IP %s", address, context=context) |
3157 | fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) |
3158 | ethernet_card = fixed_ip_ref['ethernet_card'] |
3159 | instance = nova_db_api.instance_get_by_virtual_nic(context, |
3160 | ethernet_card['id']) |
3161 | if not instance: |
3162 | - raise exception.Error(_("IP %s released that isn't associated") % |
3163 | + raise exception.Error("IP %s released that isn't associated" % |
3164 | address) |
3165 | |
3166 | if nova_db_api.is_user_context(context): |
3167 | @@ -340,11 +342,11 @@ |
3168 | nova_db_api.authorize_project_context(context, instance.project_id) |
3169 | |
3170 | if ethernet_card['mac_address'] != mac: |
3171 | - raise exception.Error(_("IP %(address)s leased to bad" |
3172 | - " mac %s vs %s") % (ethernet_card['mac_address'], mac)) |
3173 | + raise exception.Error("IP %(address)s leased to bad" |
3174 | + " mac %s vs %s" % (ethernet_card['mac_address'], mac)) |
3175 | |
3176 | if not fixed_ip_ref['leased']: |
3177 | - LOG.warn(_("IP %s released that was not leased"), address, |
3178 | + LOG.warn("IP %s released that was not leased", address, |
3179 | context=context) |
3180 | self.db.fixed_ip_update(context, |
3181 | fixed_ip_ref['address'], |
3182 | @@ -355,7 +357,7 @@ |
3183 | # means there will stale entries in the conf file |
3184 | # the code below will update the file if necessary |
3185 | if FLAGS.net_flat_vlan_update_dhcp_on_disassociate: |
3186 | - network_ref = fixed_ip['network'] |
3187 | + network_ref = fixed_ip_ref['network'] |
3188 | hosts = self._get_dhcp_hosts(context, network_ref['id']) |
3189 | self.dhcp_driver.update_dhcp(network_ref['bridge'], |
3190 | network_ref['gateway'], |
3191 | @@ -405,7 +407,7 @@ |
3192 | """Get the list of hosts for an interface.""" |
3193 | ctxt = context.get_admin_context() |
3194 | if FLAGS.fake_rabbit: |
3195 | - LOG_DHCP.debug(_("initializing leases")) |
3196 | + LOG_DHCP.debug("initializing leases") |
3197 | return NetworkService().get_dhcp_host_leases(ctxt, interface) |
3198 | else: |
3199 | return rpc.cast(ctxt, |
3200 | @@ -419,7 +421,7 @@ |
3201 | """Set the IP that was assigned by the DHCP server.""" |
3202 | ctxt = context.get_admin_context() |
3203 | if FLAGS.fake_rabbit: |
3204 | - LOG_DHCP.debug(_("leasing ip")) |
3205 | + LOG_DHCP.debug("leasing ip") |
3206 | return NetworkService().lease_fixed_ip(ctxt, mac, ip_address) |
3207 | else: |
3208 | rpc.cast(ctxt, |
3209 | @@ -432,7 +434,7 @@ |
3210 | @classmethod |
3211 | def old_lease(cls, mac, ip_address, hostname, interface): |
3212 | """Update just as add lease.""" |
3213 | - LOG_DHCP.debug(_("Adopted old lease or got a change of mac/hostname")) |
3214 | + LOG_DHCP.debug("Adopted old lease or got a change of mac/hostname") |
3215 | cls.add_lease(mac, ip_address, hostname, interface) |
3216 | |
3217 | @classmethod |
3218 | @@ -440,7 +442,7 @@ |
3219 | """Called when a lease expires.""" |
3220 | ctxt = context.get_admin_context() |
3221 | if FLAGS.fake_rabbit: |
3222 | - LOG_DHCP.debug(_("releasing ip")) |
3223 | + LOG_DHCP.debug("releasing ip") |
3224 | NetworkService().release_fixed_ip(ctxt, mac, ip_address) |
3225 | else: |
3226 | rpc.cast(ctxt, |
3227 | |
3228 | === modified file 'nova/network/service.py' |
3229 | --- nova/network/service.py 2011-04-08 03:00:41 +0000 |
3230 | +++ nova/network/service.py 2011-04-18 00:30:54 +0000 |
3231 | @@ -50,7 +50,9 @@ |
3232 | """ |
3233 | |
3234 | class INetworkApiService(interface.Interface): |
3235 | - """A Network API service object.""" |
3236 | + """A Network API service object. This should be eventually |
3237 | + refactored into separate API services, such as OpenStack API routes, |
3238 | + Security Group, VNIC management, Network IP management, etc.""" |
3239 | |
3240 | def set_os_routes(net_mapper): |
3241 | """Sets up OpenStack API routes. |
3242 | @@ -104,14 +106,26 @@ |
3243 | fixed_ip: Fixed IPv4 IP address |
3244 | floating_ips: A list of Floating IPv4 IP addresses |
3245 | ip_v6: IPv6 address |
3246 | + security_groups: A list of security group dictionary. |
3247 | + id: unique identifier of the security group. |
3248 | + name: Name of the security group. |
3249 | + description: Security group description |
3250 | + tenant_id: The tenant to which the security group belongs to. |
3251 | + rules: A list of ingress rules for this security group. |
3252 | + parent_group_id: Parent security group ID. |
3253 | + protocol: Protocol for the rule. |
3254 | + from_port: The source port for the rule. |
3255 | + to_port: The destination port for the rule. |
3256 | + cidr: CIDR for the rule. |
3257 | + security_group_id: The security group ID. |
3258 | """ |
3259 | |
3260 | - def get_project_network_info(context, project_id): |
3261 | - """Gets network related information for the project. |
3262 | + def get_tenant_network_info(context, tenant_id): |
3263 | + """Gets network related information for the tenant. |
3264 | |
3265 | Args: |
3266 | context: Nova context object. |
3267 | - project_id: project ID to get the data for. |
3268 | + tenant_id: tenant ID to get the data for. |
3269 | |
3270 | Returns: |
3271 | A dictionary with the following keys: |
3272 | @@ -130,29 +144,29 @@ |
3273 | gateway_v6: network gateway for IPv6 if applicable |
3274 | """ |
3275 | |
3276 | - def get_addresses(context, project_id=None): |
3277 | + def get_addresses(context, tenant_id=None): |
3278 | """Gets all the floating IPs, their mapped fixed IPs and |
3279 | ethernet card ID. |
3280 | |
3281 | Args: |
3282 | context: Nova context object. |
3283 | - project_id: Project ID to filter the IPs by.(optional) |
3284 | + tenant_id: Tenant ID to filter the IPs by.(optional) |
3285 | |
3286 | Returns: |
3287 | A list of dictionary with keys: |
3288 | floating_ip: Public IP |
3289 | fixed_ip: Private IP |
3290 | - project_id: Project ID of the floating IP. |
3291 | + tenant_id: Tenant ID of the floating IP. |
3292 | vnic_id: VNIC ID |
3293 | mac_address: MAC address |
3294 | """ |
3295 | |
3296 | - def allocate_address(context, project_id, ip_quota): |
3297 | - """Gets the number of floating IPs associated with a project. |
3298 | + def allocate_address(context, tenant_id, ip_quota): |
3299 | + """Gets the number of floating IPs associated with a tenant. |
3300 | |
3301 | Args: |
3302 | context: Nova context object needed to access the DB. |
3303 | - project_id: Project to allocate the address from. |
3304 | + tenant_id: Tenant to allocate the address from. |
3305 | ip_quota: Quota for IP addresses. |
3306 | |
3307 | Returns: |
3308 | @@ -162,15 +176,15 @@ |
3309 | quota.QuotaError: Over the quota limit. |
3310 | """ |
3311 | |
3312 | - def deallocate_address(self, context, floating_address): |
3313 | + def deallocate_address(context, floating_address): |
3314 | """Deallocates the public IP address by removing it from any |
3315 | - project. |
3316 | + tenant. |
3317 | |
3318 | Args: |
3319 | context: nova context object needed to access the DB. |
3320 | address: Public IP address to deallocate. |
3321 | """ |
3322 | - def associate_address(self, context, vnic_id, floating_address): |
3323 | + def associate_address(context, vnic_id, floating_address): |
3324 | """Associates a floating address to the fixed IP address of the vnic. |
3325 | |
3326 | Args: |
3327 | @@ -179,7 +193,7 @@ |
3328 | floating_address: public IP address to assign to the VNIC. |
3329 | """ |
3330 | |
3331 | - def disassociate_address(self, context, floating_address): |
3332 | + def disassociate_address(context, floating_address): |
3333 | """Diassociates public IP. |
3334 | |
3335 | Args: |
3336 | @@ -187,6 +201,149 @@ |
3337 | floating_address: public IP address |
3338 | """ |
3339 | |
3340 | + def create_security_group(context, user_id, tenant_id, name, description): |
3341 | + """Creates a new security group record if it does not exist. |
3342 | + |
3343 | + Args: |
3344 | + context: Nova context |
3345 | + user_id: The user ID to associate the group with. |
3346 | + tenant_id: The tenant in which security group associates. |
3347 | + name: Name for the security group. |
3348 | + description: Description of the security group. |
3349 | + |
3350 | + Returns: |
3351 | + A dictionary of security group information with keys: |
3352 | + id: unique identifier of the security group. |
3353 | + name: Name of the security group. |
3354 | + description: Security group description |
3355 | + tenant_id: The tenant to which the security group belongs to. |
3356 | + |
3357 | + Raises: |
3358 | + nova.exception.ApiError: Group already exists. |
3359 | + """ |
3360 | + |
3361 | + def get_security_group(context, id, tenant_id=None): |
3362 | + """Gets a security group with a unique identifier of the group. |
3363 | + |
3364 | + Args: |
3365 | + context: Nova context object. |
3366 | + id: Security group ID. |
3367 | + tenant_id: Tenant to get the security group for. |
3368 | + |
3369 | + Returns: |
3370 | + security group info in dictionary with the following keys: |
3371 | + |
3372 | + id: unique identifier of the security group. |
3373 | + name: Name of the security group. |
3374 | + description: Security group description |
3375 | + tenant_id: The tenant to which the security group belongs to. |
3376 | + rules: A list of dictionary of security group rules: |
3377 | + parent_group_id: Parent security group ID. |
3378 | + protocol: Protocol for the rule. |
3379 | + from_port: The source port for the rule. |
3380 | + to_port: The destination port for the rule. |
3381 | + cidr: CIDR or the rule. |
3382 | + security_group_id: The security group ID. |
3383 | + vnic_ids: A list of virtual NIC IDs. |
3384 | + """ |
3385 | + |
3386 | + def get_security_groups(context, tenant_id=None): |
3387 | + """Gets a list of security groups, filtered by tenant_id if specified. |
3388 | + |
3389 | + Args: |
3390 | + context: Nova context object. |
3391 | + tenant_id: Tenant to get the security group for. |
3392 | + |
3393 | + Returns: |
3394 | + A list of security groups in dictionary with the following keys: |
3395 | + |
3396 | + id: unique identifier of the security group. |
3397 | + name: Name of the security group. |
3398 | + description: Security group description |
3399 | + tenant_id: The tenant to which the security group belongs to. |
3400 | + """ |
3401 | + |
3402 | + def get_security_group_by_name(context, name, tenant_id): |
3403 | + """Gets a security group with a given name of the tenant. |
3404 | + This is expected to return something all the time, since for |
3405 | + EC2 compatibility, VMs cannot be created without one. |
3406 | + |
3407 | + Args: |
3408 | + context: Nova context object. |
3409 | + name: The name of the security group to search. |
3410 | + tenant_id: Tenant to get the security group of. |
3411 | + |
3412 | + Returns: |
3413 | + security group info in dictionary with the following keys: |
3414 | + |
3415 | + id: unique identifier of the security group. |
3416 | + name: Name of the security group. |
3417 | + description: Security group description |
3418 | + tenant_id: The tenant to which the security group belongs to. |
3419 | + rules: A list of dictionary of security group rules: |
3420 | + parent_group_id: Parent security group ID. |
3421 | + protocol: Protocol for the rule. |
3422 | + from_port: The source port for the rule. |
3423 | + to_port: The destination port for the rule. |
3424 | + cidr: CIDR or the rule. |
3425 | + security_group_id: The security group ID. |
3426 | + vnic_ids: A list of virtual NIC IDs. |
3427 | + None if it does not exist. |
3428 | + """ |
3429 | + |
3430 | + def delete_security_group(context, id): |
3431 | + """Deletes a security group. |
3432 | + |
3433 | + Args: |
3434 | + context: Nova context object. |
3435 | + id: ID of the security group to delete. |
3436 | + """ |
3437 | + |
3438 | + def create_security_group_rule(context, parent_group_id, cidr=None, |
3439 | + from_port=None, to_port=None, protocol=None, |
3440 | + group_id=None): |
3441 | + """Creates a new security group ingress rule. |
3442 | + |
3443 | + Args: |
3444 | + context: Nova context object. |
3445 | + parent_group_id: Security group this rule belongs to. |
3446 | + cidr: CIDR of the rule. |
3447 | + from_port: The source port. |
3448 | + to_port: The destination port. |
3449 | + protocol: The network protocol for the rule. |
3450 | + group_id: The target security group ID. |
3451 | + |
3452 | + Returns: |
3453 | + The new security group rule info as a dictionary: |
3454 | + Keys: |
3455 | + parent_group_id: Parent security group ID. |
3456 | + protocol: Protocol for the rule. |
3457 | + from_port: The source port for the rule. |
3458 | + to_port: The destination port for the rule. |
3459 | + cidr: CIDR for the rule. |
3460 | + security_group_id: The security group ID. |
3461 | + """ |
3462 | + |
3463 | + def delete_security_group_rule(context, id): |
3464 | + """Deletes a security group rule with the given ID. |
3465 | + |
3466 | + Args: |
3467 | + context: Nova context object. |
3468 | + id: ID of the security group rule to delete. |
3469 | + """ |
3470 | + |
3471 | + def associate_vnic_and_security_group(context, vnic_id, |
3472 | + security_group_id, |
3473 | + tenant_id): |
3474 | + """Associates a VNIC to a security group. |
3475 | + |
3476 | + Args: |
3477 | + context: Nova context object. |
3478 | + vnic_id: Virtual NIC ID. |
3479 | + security_group_id: ID of the security group to associate. |
3480 | + tenant_id: Tenant that securiy group should belong to. |
3481 | + """ |
3482 | + |
3483 | class INetworkAgentService(interface.Interface): |
3484 | """An OpenStack network agent service object.""" |
3485 | |
3486 | @@ -237,6 +394,67 @@ |
3487 | ip_address, bridge, gateway_v6, and netmast_v6 |
3488 | """ |
3489 | |
3490 | + def prepare_vnic_filter(context, vnic_id, is_vpn, allow_traffic, conn): |
3491 | + """Prepares filters for VNIC. |
3492 | + |
3493 | + Args: |
3494 | + contxt: Nova context object |
3495 | + vnic_id: VNIC ID |
3496 | + is_vpn: True if setting VPN. |
3497 | + allow_traffic: True if traffic can flow. |
3498 | + conn: connection to hypervisor. |
3499 | + """ |
3500 | + def unfilter_vnic(vnic_id): |
3501 | + """Removes filters for vnic. |
3502 | + |
3503 | + Args: |
3504 | + vnic_id: VNIC ID. |
3505 | + """ |
3506 | + |
3507 | + def apply_vnic_filter(vnic_id): |
3508 | + """Applies the filter for VNIC. Currently unused. |
3509 | + |
3510 | + Args: |
3511 | + vnic_id: VNIC ID |
3512 | + """ |
3513 | + |
3514 | + def refresh_security_group_rules(context, security_group_id, |
3515 | + tenant_id, allow_traffic, conn): |
3516 | + """Refreshes the security group rules. |
3517 | + |
3518 | + Args: |
3519 | + context: Nova context object. |
3520 | + security_group_id: Security group ID. |
3521 | + tenant_id: ID of the tenant. |
3522 | + allow_traffic: True if ok to allow traffic. |
3523 | + conn: hyperviser connection. |
3524 | + """ |
3525 | + |
3526 | + def refresh_security_group_members(security_group_id): |
3527 | + """Refreshes security group members. |
3528 | + |
3529 | + Args: |
3530 | + security_group_id: Security group ID. |
3531 | + """ |
3532 | + |
3533 | + def setup_basic_filtering(context, vnic_id, allow_traffic, conn): |
3534 | + """Sets up basic filtering for VNIC. |
3535 | + |
3536 | + Args: |
3537 | + context: Nova context object. |
3538 | + vnic_id: VNIC to set the filters for. |
3539 | + allow_traffic: True if ok to allow traffic. |
3540 | + conn: hyperviser connection. |
3541 | + """ |
3542 | + |
3543 | + def vnic_filter_exists(vnic_id, conn): |
3544 | + """Checks whether filters for VNIC exists. |
3545 | + |
3546 | + Args: |
3547 | + vnic_id: VNIC ID to chekc. |
3548 | + conn: hypervisor conneciotn. |
3549 | + """ |
3550 | + |
3551 | class NetworkServiceRouteMap(object): |
3552 | """Wrapper class for route mapper.""" |
3553 | |
3554 | |
3555 | === modified file 'nova/virt/libvirt_conn.py' |
3556 | --- nova/virt/libvirt_conn.py 2011-04-12 10:57:56 +0000 |
3557 | +++ nova/virt/libvirt_conn.py 2011-04-18 00:30:54 +0000 |
3558 | @@ -49,7 +49,6 @@ |
3559 | from xml.etree import ElementTree |
3560 | |
3561 | from eventlet import greenthread |
3562 | -from eventlet import tpool |
3563 | |
3564 | import IPy |
3565 | |
3566 | @@ -102,9 +101,6 @@ |
3567 | flags.DEFINE_string('ajaxterm_portrange', |
3568 | '10000-12000', |
3569 | 'Range of ports that ajaxterm should randomly try to bind') |
3570 | -flags.DEFINE_string('firewall_driver', |
3571 | - 'nova.virt.libvirt_conn.IptablesFirewallDriver', |
3572 | - 'Firewall driver (defaults to iptables)') |
3573 | flags.DEFINE_string('cpuinfo_xml_template', |
3574 | utils.abspath('virt/cpuinfo.xml.template'), |
3575 | 'CpuInfo XML Template (Used only live migration now)') |
3576 | @@ -217,9 +213,6 @@ |
3577 | self._wrapped_conn = None |
3578 | self.read_only = read_only |
3579 | |
3580 | - fw_class = utils.import_class(FLAGS.firewall_driver) |
3581 | - self.firewall_driver = fw_class(get_connection=self._get_connection) |
3582 | - |
3583 | def init_host(self, host): |
3584 | # Adopt existing VM's running here |
3585 | ctxt = context.get_admin_context() |
3586 | @@ -241,8 +234,13 @@ |
3587 | continue |
3588 | |
3589 | vnics = db.virtual_nics_get_by_instance(ctxt, instance['id']) |
3590 | + is_vpn = (instance['image_id'] == FLAGS.vpn_image_id) |
3591 | + net_agent = self._get_net_agent(ctxt, instance['project_id']) |
3592 | for vnic_id in vnics: |
3593 | - self.firewall_driver.prepare_vnic_filter(vnic_id, instance) |
3594 | + net_agent.prepare_vnic_filter( |
3595 | + context.get_admin_context(), vnic_id, is_vpn, |
3596 | + FLAGS.allow_project_net_traffic, |
3597 | + self._get_connection) |
3598 | |
3599 | def _get_connection(self): |
3600 | if not self._wrapped_conn or not self._test_connection(): |
3601 | @@ -384,8 +382,14 @@ |
3602 | power_state.SHUTOFF) |
3603 | break |
3604 | |
3605 | - self.firewall_driver.unfilter_instance(instance) |
3606 | - |
3607 | + vnics = db.virtual_nics_get_by_instance(context.get_admin_context(), |
3608 | + instance['id']) |
3609 | + net_agent = self._get_net_agent(context, instance['project_id']) |
3610 | + for vnic_id in vnics: |
3611 | + net_agent.prepare_vnic_filter(context.get_admin_context(), |
3612 | + vnic_id, |
3613 | + self._get_connection) |
3614 | + |
3615 | if cleanup: |
3616 | self._cleanup(instance) |
3617 | |
3618 | @@ -534,25 +538,36 @@ |
3619 | def reboot(self, instance): |
3620 | self.destroy(instance, False) |
3621 | xml = self.to_xml(instance) |
3622 | - self.firewall_driver.setup_basic_filtering(instance) |
3623 | - self.firewall_driver.prepare_instance_filter(instance) |
3624 | + |
3625 | + ctxt = context.get_admin_context() |
3626 | + vnics = db.virtual_nics_get_by_instance(ctxt, instance['id']) |
3627 | + net_agent = self._get_net_agent(ctxt, instance['project_id']) |
3628 | + is_vpn = (instance['image_id'] == FLAGS.vpn_image_id) |
3629 | + for vnic_id in vnics: |
3630 | + net_agent.setup_basic_filtering( |
3631 | + ctxt, vnic_id, FLAGS.allow_project_net_traffic, |
3632 | + self._get_connection) |
3633 | + net_agent.prepare_vnic_filter(ctxt, vnic_id, is_vpn, |
3634 | + FLAGS.allow_project_net_traffic, |
3635 | + self._get_connection) |
3636 | + |
3637 | self._create_new_domain(xml) |
3638 | - self.firewall_driver.apply_instance_filter(instance) |
3639 | + |
3640 | + for vnic_id in vnics: |
3641 | + net_agent.apply_vnic_filter(vnic_id) |
3642 | |
3643 | timer = utils.LoopingCall(f=None) |
3644 | |
3645 | def _wait_for_reboot(): |
3646 | try: |
3647 | state = self.get_info(instance['name'])['state'] |
3648 | - db.instance_set_state(context.get_admin_context(), |
3649 | - instance['id'], state) |
3650 | + db.instance_set_state(ctxt, instance['id'], state) |
3651 | if state == power_state.RUNNING: |
3652 | LOG.debug(_('instance %s: rebooted'), instance['name']) |
3653 | timer.stop() |
3654 | except Exception, exn: |
3655 | LOG.exception(_('_wait_for_reboot failed: %s'), exn) |
3656 | - db.instance_set_state(context.get_admin_context(), |
3657 | - instance['id'], |
3658 | + db.instance_set_state(ctxt, instance['id'], |
3659 | power_state.SHUTDOWN) |
3660 | timer.stop() |
3661 | |
3662 | @@ -620,24 +635,29 @@ |
3663 | @exception.wrap_exception |
3664 | def spawn(self, instance, network_info=None): |
3665 | xml = self.to_xml(instance, network_info) |
3666 | - db.instance_set_state(context.get_admin_context(), |
3667 | + ctxt = context.get_admin_context() |
3668 | + db.instance_set_state(ctxt, |
3669 | instance['id'], |
3670 | power_state.NOSTATE, |
3671 | 'launching') |
3672 | |
3673 | - vnics = db.api.virtual_nics_get_by_instance( |
3674 | - context.get_admin_context(), |
3675 | - instance['id']) |
3676 | + vnics = db.api.virtual_nics_get_by_instance(ctxt, |
3677 | + instance['id']) |
3678 | + net_agent = self._get_net_agent(ctxt, instance['project_id']) |
3679 | + is_vpn = (instance['image_id'] == FLAGS.vpn_image_id) |
3680 | for vnic_id in vnics: |
3681 | - self.firewall_driver.setup_basic_filtering_for_vnic(vnic_id, |
3682 | - instance) |
3683 | - self.firewall_driver.prepare_vnic_filter(vnic_id, instance) |
3684 | + net_agent.setup_basic_filtering( |
3685 | + ctxt, vnic_id, FLAGS.allow_project_net_traffic, |
3686 | + self._get_connection) |
3687 | + net_agent.prepare_vnic_filter(ctxt, vnic_id, is_vpn, |
3688 | + FLAGS.allow_project_net_traffic, |
3689 | + self._get_connection) |
3690 | |
3691 | self._create_image(instance, xml, network_info) |
3692 | domain = self._create_new_domain(xml) |
3693 | LOG.debug(_("instance %s: is running"), instance['name']) |
3694 | for vnic_id in vnics: |
3695 | - self.firewall_driver.apply_vnic_filter(vnic_id) |
3696 | + net_agent.apply_vnic_filter(vnic_id) |
3697 | |
3698 | if FLAGS.start_guests_on_host_boot: |
3699 | LOG.debug(_("instance %s: setting autostart ON") % |
3700 | @@ -1383,10 +1403,17 @@ |
3701 | 'password': 'fakepassword'} |
3702 | |
3703 | def refresh_security_group_rules(self, security_group_id): |
3704 | - self.firewall_driver.refresh_security_group_rules(security_group_id) |
3705 | + ctxt = context.get_admin_context() |
3706 | + net_agent = self._get_net_agent(ctxt, ctxt.project_id) |
3707 | + net_agent.refresh_security_group_rules(ctxt, security_group_id, |
3708 | + ctxt.project_id, |
3709 | + FLAGS.allow_project_net_traffic, |
3710 | + self.get_connection) |
3711 | |
3712 | def refresh_security_group_members(self, security_group_id): |
3713 | - self.firewall_driver.refresh_security_group_members(security_group_id) |
3714 | + ctxt = context.get_admin_context() |
3715 | + net_agent = self._get_net_agent(ctxt, ctxt.project_id) |
3716 | + net_agent.refresh_security_group_members(security_group_id) |
3717 | |
3718 | def update_available_resource(self, ctxt, host): |
3719 | """Updates compute manager resource info on ComputeNode table. |
3720 | @@ -1472,7 +1499,7 @@ |
3721 | |
3722 | Concretely, the below method must be called. |
3723 | - setup_basic_filtering (for nova-basic, etc.) |
3724 | - - prepare_instance_filter(for nova-instance-instance-xxx, etc.) |
3725 | + - prepare_vnic_filter(for nova-instance-instance-xxx, etc.) |
3726 | |
3727 | to_xml may have to be called since it defines PROJNET, PROJMASK. |
3728 | but libvirt migrates those value through migrateToURI(), |
3729 | @@ -1489,16 +1516,23 @@ |
3730 | if not time: |
3731 | time = greenthread |
3732 | |
3733 | - # If any instances never launch at destination host, |
3734 | - # basic-filtering must be set here. |
3735 | - self.firewall_driver.setup_basic_filtering(instance_ref) |
3736 | - # setting up n)ova-instance-instance-xx mainly. |
3737 | - self.firewall_driver.prepare_instance_filter(instance_ref) |
3738 | + ctxt = context.get_admin_context() |
3739 | + vnics = db.api.virtual_nics_get_by_instance(ctxt, instance_ref['id']) |
3740 | + net_agent = self._get_net_agent(ctxt, instance_ref['project_id']) |
3741 | + is_vpn = (instance_ref['image_id'] == FLAGS.vpn_image_id) |
3742 | + for vnic_id in vnics: |
3743 | + net_agent.setup_basic_filtering(ctxt, vnic_id, |
3744 | + FLAGS.allow_project_net_traffic, |
3745 | + self._get_connection) |
3746 | + net_agent.prepare_vnic_filter(ctxt, vnic_id, is_vpn, |
3747 | + FLAGS.allow_project_net_traffic, |
3748 | + self._get_connection) |
3749 | |
3750 | # wait for completion |
3751 | timeout_count = range(FLAGS.live_migration_retry_count) |
3752 | while timeout_count: |
3753 | - if self.firewall_driver.instance_filter_exists(instance_ref): |
3754 | + if net_agent.vnic_filter_exists(vnic_id, |
3755 | + self._get_connection): |
3756 | break |
3757 | timeout_count.pop() |
3758 | if len(timeout_count) == 0: |
3759 | @@ -1586,809 +1620,8 @@ |
3760 | |
3761 | def unfilter_instance(self, instance_ref): |
3762 | """See comments of same method in firewall_driver.""" |
3763 | - self.firewall_driver.unfilter_instance(instance_ref) |
3764 | - |
3765 | - |
3766 | -class FirewallDriver(object): |
3767 | - def prepare_instance_filter(self, instance, network_info=None): |
3768 | - """Prepare filters for the instance. |
3769 | - |
3770 | - At this point, the instance isn't running yet.""" |
3771 | - raise NotImplementedError() |
3772 | - |
3773 | - def unfilter_instance(self, instance): |
3774 | - """Stop filtering instance""" |
3775 | - raise NotImplementedError() |
3776 | - |
3777 | - def apply_instance_filter(self, instance): |
3778 | - """Apply instance filter. |
3779 | - |
3780 | - Once this method returns, the instance should be firewalled |
3781 | - appropriately. This method should as far as possible be a |
3782 | - no-op. It's vastly preferred to get everything set up in |
3783 | - prepare_instance_filter. |
3784 | - """ |
3785 | - raise NotImplementedError() |
3786 | - |
3787 | - def refresh_security_group_rules(self, security_group_id): |
3788 | - """Refresh security group rules from data store |
3789 | - |
3790 | - Gets called when a rule has been added to or removed from |
3791 | - the security group.""" |
3792 | - raise NotImplementedError() |
3793 | - |
3794 | - def refresh_security_group_members(self, security_group_id): |
3795 | - """Refresh security group members from data store |
3796 | - |
3797 | - Gets called when an instance gets added to or removed from |
3798 | - the security group.""" |
3799 | - raise NotImplementedError() |
3800 | - |
3801 | - def setup_basic_filtering(self, instance, network_info=None): |
3802 | - """Create rules to block spoofing and allow dhcp. |
3803 | - |
3804 | - This gets called when spawning an instance, before |
3805 | - :method:`prepare_instance_filter`. |
3806 | - |
3807 | - """ |
3808 | - raise NotImplementedError() |
3809 | - |
3810 | - def instance_filter_exists(self, instance): |
3811 | - """Check nova-instance-instance-xxx exists""" |
3812 | - raise NotImplementedError() |
3813 | - |
3814 | - def vnic_filter_exists(self, vnic_id): |
3815 | - """Check nova-instance-vnic_id exists""" |
3816 | - raise NotImplementedError() |
3817 | - |
3818 | -class NWFilterFirewall(FirewallDriver): |
3819 | - """ |
3820 | - This class implements a network filtering mechanism versatile |
3821 | - enough for EC2 style Security Group filtering by leveraging |
3822 | - libvirt's nwfilter. |
3823 | - |
3824 | - First, all instances get a filter ("nova-base-filter") applied. |
3825 | - This filter provides some basic security such as protection against |
3826 | - MAC spoofing, IP spoofing, and ARP spoofing. |
3827 | - |
3828 | - This filter drops all incoming ipv4 and ipv6 connections. |
3829 | - Outgoing connections are never blocked. |
3830 | - |
3831 | - Second, every security group maps to a nwfilter filter(*). |
3832 | - NWFilters can be updated at runtime and changes are applied |
3833 | - immediately, so changes to security groups can be applied at |
3834 | - runtime (as mandated by the spec). |
3835 | - |
3836 | - Security group rules are named "nova-secgroup-<id>" where <id> |
3837 | - is the internal id of the security group. They're applied only on |
3838 | - hosts that have instances in the security group in question. |
3839 | - |
3840 | - Updates to security groups are done by updating the data model |
3841 | - (in response to API calls) followed by a request sent to all |
3842 | - the nodes with instances in the security group to refresh the |
3843 | - security group. |
3844 | - |
3845 | - Each instance has its own NWFilter, which references the above |
3846 | - mentioned security group NWFilters. This was done because |
3847 | - interfaces can only reference one filter while filters can |
3848 | - reference multiple other filters. This has the added benefit of |
3849 | - actually being able to add and remove security groups from an |
3850 | - instance at run time. This functionality is not exposed anywhere, |
3851 | - though. |
3852 | - |
3853 | - Outstanding questions: |
3854 | - |
3855 | - The name is unique, so would there be any good reason to sync |
3856 | - the uuid across the nodes (by assigning it from the datamodel)? |
3857 | - |
3858 | - |
3859 | - (*) This sentence brought to you by the redundancy department of |
3860 | - redundancy. |
3861 | - |
3862 | - """ |
3863 | - |
3864 | - def __init__(self, get_connection, **kwargs): |
3865 | - self._libvirt_get_connection = get_connection |
3866 | - self.static_filters_configured = False |
3867 | - self.handle_security_groups = False |
3868 | - |
3869 | - def apply_instance_filter(self, instance): |
3870 | - """No-op. Everything is done in prepare_instance_filter""" |
3871 | - pass |
3872 | - |
3873 | - def apply_vnic_filter(self, vnic_id): |
3874 | - """No-op. Everything is done in prepare_vnic_filter""" |
3875 | - pass |
3876 | - |
3877 | - def _get_connection(self): |
3878 | - return self._libvirt_get_connection() |
3879 | - _conn = property(_get_connection) |
3880 | - |
3881 | - def nova_dhcp_filter(self): |
3882 | - """The standard allow-dhcp-server filter is an <ip> one, so it uses |
3883 | - ebtables to allow traffic through. Without a corresponding rule in |
3884 | - iptables, it'll get blocked anyway.""" |
3885 | - |
3886 | - return '''<filter name='nova-allow-dhcp-server' chain='ipv4'> |
3887 | - <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid> |
3888 | - <rule action='accept' direction='out' |
3889 | - priority='100'> |
3890 | - <udp srcipaddr='0.0.0.0' |
3891 | - dstipaddr='255.255.255.255' |
3892 | - srcportstart='68' |
3893 | - dstportstart='67'/> |
3894 | - </rule> |
3895 | - <rule action='accept' direction='in' |
3896 | - priority='100'> |
3897 | - <udp srcipaddr='$DHCPSERVER' |
3898 | - srcportstart='67' |
3899 | - dstportstart='68'/> |
3900 | - </rule> |
3901 | - </filter>''' |
3902 | - |
3903 | - def nova_ra_filter(self): |
3904 | - return '''<filter name='nova-allow-ra-server' chain='root'> |
3905 | - <uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid> |
3906 | - <rule action='accept' direction='inout' |
3907 | - priority='100'> |
3908 | - <icmpv6 srcipaddr='$RASERVER'/> |
3909 | - </rule> |
3910 | - </filter>''' |
3911 | - |
3912 | - def setup_basic_filtering(self, instance, network_info=None): |
3913 | - """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" |
3914 | - logging.info('called setup_basic_filtering in nwfilter') |
3915 | - |
3916 | - if self.handle_security_groups: |
3917 | - # No point in setting up a filter set that we'll be overriding |
3918 | - # anyway. |
3919 | - return |
3920 | - |
3921 | - logging.info('ensuring static filters') |
3922 | - self._ensure_static_filters() |
3923 | - |
3924 | - for (network, mapping) in network_info: |
3925 | - nic_id = mapping['mac'].replace(':', '') |
3926 | - instance_filter_name = self._instance_filter_name(instance, nic_id) |
3927 | - self._define_filter(self._filter_container(instance_filter_name, |
3928 | - ['nova-base'])) |
3929 | - |
3930 | - def setup_basic_filtering_for_vnic(self, vnic_id, instance): |
3931 | - """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" |
3932 | - logging.info('called setup_basic_filtering_for_vnic in nwfilter') |
3933 | - |
3934 | - if self.handle_security_groups: |
3935 | - # No point in setting up a filter set that we'll be overriding |
3936 | - # anyway. |
3937 | - return |
3938 | - |
3939 | - net_agent = self._net_agent_for_project(instance['project_id']) |
3940 | - ctxt = context.get_admin_context() |
3941 | - net_info = net_agent.get_network_info(ctxt, vnic_id) |
3942 | - |
3943 | - logging.info('ensuring static filters') |
3944 | - self._ensure_static_filters_for_vnic(net_info) |
3945 | - |
3946 | - vnic_filter_name = self._vnic_filter_name(vnic_id) |
3947 | - self._define_filter(self._filter_container(vnic_filter_name, |
3948 | - ['nova-base'])) |
3949 | - def _net_agent_for_project(self, project_id): |
3950 | - ctxt = context.get_admin_context() |
3951 | - net_factory = net_service.get_service_factory(ctxt, project_id) |
3952 | - return net_factory.get_net_agent() |
3953 | - |
3954 | - def _ensure_static_filters_for_vnic(self, net_info): |
3955 | - if self.static_filters_configured: |
3956 | - return |
3957 | - |
3958 | - self._define_filter(self._filter_container('nova-base', |
3959 | - ['no-mac-spoofing', |
3960 | - 'no-ip-spoofing', |
3961 | - 'no-arp-spoofing', |
3962 | - 'allow-dhcp-server'])) |
3963 | - self._define_filter(self.nova_base_ipv4_filter) |
3964 | - self._define_filter(self.nova_base_ipv6_filter) |
3965 | - self._define_filter(self.nova_dhcp_filter) |
3966 | - self._define_filter(self.nova_ra_filter) |
3967 | - self._define_filter(self.nova_vpn_filter) |
3968 | - if FLAGS.allow_project_net_traffic: |
3969 | - self._define_filter(self.nova_project_filter) |
3970 | - if net_info['ipv6_enabled']: |
3971 | - self._define_filter(self.nova_project_filter_v6) |
3972 | - |
3973 | - self.static_filters_configured = True |
3974 | - |
3975 | - |
3976 | - def _ensure_static_filters(self): |
3977 | - if self.static_filters_configured: |
3978 | - return |
3979 | - |
3980 | - self._define_filter(self._filter_container('nova-base', |
3981 | - ['no-mac-spoofing', |
3982 | - 'no-ip-spoofing', |
3983 | - 'no-arp-spoofing', |
3984 | - 'allow-dhcp-server'])) |
3985 | - self._define_filter(self.nova_base_ipv4_filter) |
3986 | - self._define_filter(self.nova_base_ipv6_filter) |
3987 | - self._define_filter(self.nova_dhcp_filter) |
3988 | - self._define_filter(self.nova_ra_filter) |
3989 | - self._define_filter(self.nova_vpn_filter) |
3990 | - if FLAGS.allow_project_net_traffic: |
3991 | - self._define_filter(self.nova_project_filter) |
3992 | - if FLAGS.use_ipv6: |
3993 | - self._define_filter(self.nova_project_filter_v6) |
3994 | - |
3995 | - self.static_filters_configured = True |
3996 | - |
3997 | - def _filter_container(self, name, filters): |
3998 | - xml = '''<filter name='%s' chain='root'>%s</filter>''' % ( |
3999 | - name, |
4000 | - ''.join(["<filterref filter='%s'/>" % (f,) for f in filters])) |
4001 | - return xml |
4002 | - |
4003 | - nova_vpn_filter = '''<filter name='nova-vpn' chain='root'> |
4004 | - <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid> |
4005 | - <filterref filter='allow-dhcp-server'/> |
4006 | - <filterref filter='nova-allow-dhcp-server'/> |
4007 | - <filterref filter='nova-base-ipv4'/> |
4008 | - <filterref filter='nova-base-ipv6'/> |
4009 | - </filter>''' |
4010 | - |
4011 | - def nova_base_ipv4_filter(self): |
4012 | - retval = "<filter name='nova-base-ipv4' chain='ipv4'>" |
4013 | - for protocol in ['tcp', 'udp', 'icmp']: |
4014 | - for direction, action, priority in [('out', 'accept', 399), |
4015 | - ('in', 'drop', 400)]: |
4016 | - retval += """<rule action='%s' direction='%s' priority='%d'> |
4017 | - <%s /> |
4018 | - </rule>""" % (action, direction, |
4019 | - priority, protocol) |
4020 | - retval += '</filter>' |
4021 | - return retval |
4022 | - |
4023 | - def nova_base_ipv6_filter(self): |
4024 | - retval = "<filter name='nova-base-ipv6' chain='ipv6'>" |
4025 | - for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: |
4026 | - for direction, action, priority in [('out', 'accept', 399), |
4027 | - ('in', 'drop', 400)]: |
4028 | - retval += """<rule action='%s' direction='%s' priority='%d'> |
4029 | - <%s /> |
4030 | - </rule>""" % (action, direction, |
4031 | - priority, protocol) |
4032 | - retval += '</filter>' |
4033 | - return retval |
4034 | - |
4035 | - def nova_project_filter(self): |
4036 | - retval = "<filter name='nova-project' chain='ipv4'>" |
4037 | - for protocol in ['tcp', 'udp', 'icmp']: |
4038 | - retval += """<rule action='accept' direction='in' priority='200'> |
4039 | - <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' /> |
4040 | - </rule>""" % protocol |
4041 | - retval += '</filter>' |
4042 | - return retval |
4043 | - |
4044 | - def nova_project_filter_v6(self): |
4045 | - retval = "<filter name='nova-project-v6' chain='ipv6'>" |
4046 | - for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: |
4047 | - retval += """<rule action='accept' direction='inout' |
4048 | - priority='200'> |
4049 | - <%s srcipaddr='$PROJNETV6' |
4050 | - srcipmask='$PROJMASKV6' /> |
4051 | - </rule>""" % (protocol) |
4052 | - retval += '</filter>' |
4053 | - return retval |
4054 | - |
4055 | - def _define_filter(self, xml): |
4056 | - if callable(xml): |
4057 | - xml = xml() |
4058 | - # execute in a native thread and block current greenthread until done |
4059 | - tpool.execute(self._conn.nwfilterDefineXML, xml) |
4060 | - |
4061 | - def unfilter_instance(self, instance): |
4062 | - # Nothing to do |
4063 | - pass |
4064 | - |
4065 | - def prepare_instance_filter(self, instance, network_info=None): |
4066 | - """ |
4067 | - Creates an NWFilter for the given instance. In the process, |
4068 | - it makes sure the filters for the security groups as well as |
4069 | - the base filter are all in place. |
4070 | - """ |
4071 | - if instance['image_id'] == FLAGS.vpn_image_id: |
4072 | - base_filter = 'nova-vpn' |
4073 | - else: |
4074 | - base_filter = 'nova-base' |
4075 | - |
4076 | - ctxt = context.get_admin_context() |
4077 | - |
4078 | - instance_secgroup_filter_name = \ |
4079 | - '%s-secgroup' % (self._instance_filter_name(instance)) |
4080 | - #% (instance_filter_name,) |
4081 | - |
4082 | - instance_secgroup_filter_children = ['nova-base-ipv4', |
4083 | - 'nova-base-ipv6', |
4084 | - 'nova-allow-dhcp-server'] |
4085 | - |
4086 | - for security_group in \ |
4087 | - db.security_group_get_by_instance(ctxt, instance['id']): |
4088 | - |
4089 | - self.refresh_security_group_rules(security_group['id']) |
4090 | - |
4091 | - instance_secgroup_filter_children += [('nova-secgroup-%s' % |
4092 | - security_group['id'])] |
4093 | - |
4094 | - self._define_filter( |
4095 | - self._filter_container(instance_secgroup_filter_name, |
4096 | - instance_secgroup_filter_children)) |
4097 | - |
4098 | - for (network, mapping) in network_info: |
4099 | - nic_id = mapping['mac'].replace(':', '') |
4100 | - instance_filter_name = self._instance_filter_name(instance, nic_id) |
4101 | - instance_filter_children = \ |
4102 | - [base_filter, instance_secgroup_filter_name] |
4103 | - |
4104 | - if FLAGS.use_ipv6: |
4105 | - gateway_v6 = network['gateway_v6'] |
4106 | - |
4107 | - if gateway_v6: |
4108 | - instance_secgroup_filter_children += \ |
4109 | - ['nova-allow-ra-server'] |
4110 | - |
4111 | - if FLAGS.allow_project_net_traffic: |
4112 | - instance_filter_children += ['nova-project'] |
4113 | - if FLAGS.use_ipv6: |
4114 | - instance_filter_children += ['nova-project-v6'] |
4115 | - |
4116 | - self._define_filter( |
4117 | - self._filter_container(instance_filter_name, |
4118 | - instance_filter_children)) |
4119 | - |
4120 | - return |
4121 | - |
4122 | - def refresh_security_group_rules(self, security_group_id): |
4123 | - return self._define_filter( |
4124 | - self.security_group_to_nwfilter_xml(security_group_id)) |
4125 | - |
4126 | - def security_group_to_nwfilter_xml(self, security_group_id): |
4127 | - security_group = db.security_group_get(context.get_admin_context(), |
4128 | - security_group_id) |
4129 | - rule_xml = "" |
4130 | - v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} |
4131 | - for rule in security_group.rules: |
4132 | - rule_xml += "<rule action='accept' direction='in' priority='300'>" |
4133 | - if rule.cidr: |
4134 | - version = _get_ip_version(rule.cidr) |
4135 | - if(FLAGS.use_ipv6 and version == 6): |
4136 | - net, prefixlen = _get_net_and_prefixlen(rule.cidr) |
4137 | - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ |
4138 | - (v6protocol[rule.protocol], net, prefixlen) |
4139 | - else: |
4140 | - net, mask = _get_net_and_mask(rule.cidr) |
4141 | - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ |
4142 | - (rule.protocol, net, mask) |
4143 | - if rule.protocol in ['tcp', 'udp']: |
4144 | - rule_xml += "dstportstart='%s' dstportend='%s' " % \ |
4145 | - (rule.from_port, rule.to_port) |
4146 | - elif rule.protocol == 'icmp': |
4147 | - LOG.info('rule.protocol: %r, rule.from_port: %r, ' |
4148 | - 'rule.to_port: %r', rule.protocol, |
4149 | - rule.from_port, rule.to_port) |
4150 | - if rule.from_port != -1: |
4151 | - rule_xml += "type='%s' " % rule.from_port |
4152 | - if rule.to_port != -1: |
4153 | - rule_xml += "code='%s' " % rule.to_port |
4154 | - |
4155 | - rule_xml += '/>\n' |
4156 | - rule_xml += "</rule>\n" |
4157 | - xml = "<filter name='nova-secgroup-%s' " % security_group_id |
4158 | - if(FLAGS.use_ipv6): |
4159 | - xml += "chain='root'>%s</filter>" % rule_xml |
4160 | - else: |
4161 | - xml += "chain='ipv4'>%s</filter>" % rule_xml |
4162 | - return xml |
4163 | - |
4164 | - def _instance_filter_name(self, instance, nic_id=None): |
4165 | - if not nic_id: |
4166 | - return 'nova-instance-%s' % (instance['name']) |
4167 | - return 'nova-instance-%s-%s' % (instance['name'], nic_id) |
4168 | - |
4169 | - def _vnic_filter_name(self, vnic_id): |
4170 | - return 'nova-vnic-%s' % vnic_id |
4171 | - |
4172 | - def vnic_filter_exists(self, vnic_id): |
4173 | - vnic_filter_name = self._vnic_filter_name(vnic_id) |
4174 | - try: |
4175 | - self._conn.nwfilterLookupByName(vnic_filter_name) |
4176 | - except libvirt.libvirtError: |
4177 | - name = vnic_id |
4178 | - LOG.debug(_('The nwfilter(%(instance_filter_name)s) for' |
4179 | - '%(name)s is not found.') % locals()) |
4180 | - return False |
4181 | - return True |
4182 | - |
4183 | - def instance_filter_exists(self, instance): |
4184 | - """Check nova-instance-instance-xxx exists""" |
4185 | - network_info = _get_network_info(instance) |
4186 | - for (network, mapping) in network_info: |
4187 | - nic_id = mapping['mac'].replace(':', '') |
4188 | - instance_filter_name = self._instance_filter_name(instance, nic_id) |
4189 | - try: |
4190 | - self._conn.nwfilterLookupByName(instance_filter_name) |
4191 | - except libvirt.libvirtError: |
4192 | - name = instance.name |
4193 | - LOG.debug(_('The nwfilter(%(instance_filter_name)s) for' |
4194 | - '%(name)s is not found.') % locals()) |
4195 | - return False |
4196 | - return True |
4197 | - |
4198 | - |
4199 | -class IptablesFirewallDriver(FirewallDriver): |
4200 | - def __init__(self, execute=None, **kwargs): |
4201 | - from nova.network.flat_vlan.driver.linux import filter |
4202 | - self.iptables = filter.iptables_manager |
4203 | - self.instances = {} |
4204 | - self.vnics = {} |
4205 | - self.nwfilter = NWFilterFirewall(kwargs['get_connection']) |
4206 | - |
4207 | - self.iptables.ipv4['filter'].add_chain('sg-fallback') |
4208 | - self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP') |
4209 | - self.iptables.ipv6['filter'].add_chain('sg-fallback') |
4210 | - self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') |
4211 | - |
4212 | - def setup_basic_filtering(self, instance, network_info=None): |
4213 | - """Use NWFilter from libvirt for this.""" |
4214 | - return self.nwfilter.setup_basic_filtering(instance, network_info) |
4215 | - |
4216 | - def setup_basic_filtering_for_vnic(self, vnic_id, instance): |
4217 | - """Use NWFilter from libvirt for this.""" |
4218 | - return self.nwfilter.setup_basic_filtering_for_vnic(vnic_id, instance) |
4219 | - |
4220 | - def apply_instance_filter(self, instance): |
4221 | - """No-op. Everything is done in prepare_instance_filter""" |
4222 | - pass |
4223 | - |
4224 | - def apply_vnic_filter(self, vnic_id): |
4225 | - """No-op. Everything is done in prepare_vnic_filter""" |
4226 | - pass |
4227 | - |
4228 | - def unfilter_instance(self, instance): |
4229 | - if self.instances.pop(instance['id'], None): |
4230 | - self.remove_filters_for_instance(instance) |
4231 | - self.iptables.apply() |
4232 | - else: |
4233 | - LOG.info(_('Attempted to unfilter instance %s which is not ' |
4234 | - 'filtered'), instance['id']) |
4235 | - |
4236 | - def prepare_instance_filter(self, instance, network_info=None): |
4237 | - self.instances[instance['id']] = instance |
4238 | - self.add_filters_for_instance(instance, network_info) |
4239 | - self.iptables.apply() |
4240 | - |
4241 | - def prepare_vnic_filter(self, vnic_id, instance): |
4242 | - self.vnics[vnic_id] = vnic_id |
4243 | - self.add_filters_for_vnic(vnic_id, instance) |
4244 | - self.iptables.apply() |
4245 | - |
4246 | - def add_filters_for_vnic(self, vnic_id, instance): |
4247 | - ctxt = context.get_admin_context() |
4248 | - net_agent = self._net_agent_for_project(instance['project_id']) |
4249 | - net_info = net_agent.get_network_info(ctxt, vnic_id) |
4250 | - chain_name = self._vnic_chain_name(vnic_id) |
4251 | - |
4252 | - self.iptables.ipv4['filter'].add_chain(chain_name) |
4253 | - ipv4_address = net_info['IPs'][0]['fixed_ip'] |
4254 | - self.iptables.ipv4['filter'].add_rule('local', |
4255 | - '-d %s -j $%s' % |
4256 | - (ipv4_address, chain_name)) |
4257 | - |
4258 | - if net_info['ipv6_enabled']: |
4259 | - self.iptables.ipv6['filter'].add_chain(chain_name) |
4260 | - ipv6_address = net_info['IPs'][0]['ip_v6'] |
4261 | - self.iptables.ipv6['filter'].add_rule('local', |
4262 | - '-d %s -j $%s' % |
4263 | - (ipv6_address, |
4264 | - chain_name)) |
4265 | - |
4266 | - ipv4_rules, ipv6_rules = self.vnic_rules(vnic_id, net_info, |
4267 | - instance) |
4268 | - |
4269 | - for rule in ipv4_rules: |
4270 | - self.iptables.ipv4['filter'].add_rule(chain_name, rule) |
4271 | - |
4272 | - if net_info['ipv6_enabled']: |
4273 | - for rule in ipv6_rules: |
4274 | - self.iptables.ipv6['filter'].add_rule(chain_name, rule) |
4275 | - |
4276 | - def add_filters_for_instance(self, instance, network_info=None): |
4277 | - |
4278 | - chain_name = self._instance_chain_name(instance) |
4279 | - |
4280 | - self.iptables.ipv4['filter'].add_chain(chain_name) |
4281 | - |
4282 | - ips_v4 = [ip['ip'] for (_, mapping) in network_info |
4283 | - for ip in mapping['ips']] |
4284 | - |
4285 | - for ipv4_address in ips_v4: |
4286 | - self.iptables.ipv4['filter'].add_rule('local', |
4287 | - '-d %s -j $%s' % |
4288 | - (ipv4_address, chain_name)) |
4289 | - |
4290 | - if FLAGS.use_ipv6: |
4291 | - self.iptables.ipv6['filter'].add_chain(chain_name) |
4292 | - ips_v6 = [ip['ip'] for (_, mapping) in network_info |
4293 | - for ip in mapping['ip6s']] |
4294 | - |
4295 | - for ipv6_address in ips_v6: |
4296 | - self.iptables.ipv6['filter'].add_rule('local', |
4297 | - '-d %s -j $%s' % |
4298 | - (ipv6_address, |
4299 | - chain_name)) |
4300 | - |
4301 | - ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info) |
4302 | - |
4303 | - for rule in ipv4_rules: |
4304 | - self.iptables.ipv4['filter'].add_rule(chain_name, rule) |
4305 | - |
4306 | - if FLAGS.use_ipv6: |
4307 | - for rule in ipv6_rules: |
4308 | - self.iptables.ipv6['filter'].add_rule(chain_name, rule) |
4309 | - |
4310 | - def remove_filters_for_instance(self, instance): |
4311 | - ctxt = context.get_admin_context() |
4312 | - net_agent = self._net_agent_for_project(instance['project_id']) |
4313 | - vnics = self._vnics_for_instance(instance) |
4314 | + ctxt = context.get_admin_context() |
4315 | + vnics = db.api.virtual_nics_get_by_instance(ctxt, instance_ref['id']) |
4316 | + net_agent = self._get_net_agent(ctxt, instance_ref['project_id']) |
4317 | for vnic_id in vnics: |
4318 | - |
4319 | - net_info = net_agent.get_network_info(ctxt, vnic_id) |
4320 | - chain_name = self._vnic_chain_name(vnic_id) |
4321 | - |
4322 | - self.iptables.ipv4['filter'].remove_chain(chain_name) |
4323 | - if net_info['ipv6_enabled']: |
4324 | - self.iptables.ipv6['filter'].remove_chain(chain_name) |
4325 | - |
4326 | - def instance_rules(self, instance, network_info=None): |
4327 | - |
4328 | - ctxt = context.get_admin_context() |
4329 | - |
4330 | - ipv4_rules = [] |
4331 | - ipv6_rules = [] |
4332 | - |
4333 | - # Always drop invalid packets |
4334 | - ipv4_rules += ['-m state --state ' 'INVALID -j DROP'] |
4335 | - ipv6_rules += ['-m state --state ' 'INVALID -j DROP'] |
4336 | - |
4337 | - # Allow established connections |
4338 | - ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] |
4339 | - ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] |
4340 | - |
4341 | - dhcp_servers = [network['gateway'] for (network, _m) in network_info] |
4342 | - |
4343 | - for dhcp_server in dhcp_servers: |
4344 | - ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' |
4345 | - '-j ACCEPT' % (dhcp_server,)) |
4346 | - |
4347 | - #Allow project network traffic |
4348 | - if FLAGS.allow_project_net_traffic: |
4349 | - cidrs = [network['cidr'] for (network, _m) in network_info] |
4350 | - for cidr in cidrs: |
4351 | - ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) |
4352 | - |
4353 | - # We wrap these in FLAGS.use_ipv6 because they might cause |
4354 | - # a DB lookup. The other ones are just list operations, so |
4355 | - # they're not worth the clutter. |
4356 | - if FLAGS.use_ipv6: |
4357 | - # Allow RA responses |
4358 | - gateways_v6 = [network['gateway_v6'] for (network, _) in |
4359 | - network_info] |
4360 | - for gateway_v6 in gateways_v6: |
4361 | - ipv6_rules.append( |
4362 | - '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)) |
4363 | - |
4364 | - #Allow project network traffic |
4365 | - if FLAGS.allow_project_net_traffic: |
4366 | - cidrv6s = [network['cidr_v6'] for (network, _m) |
4367 | - in network_info] |
4368 | - |
4369 | - for cidrv6 in cidrv6s: |
4370 | - ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,)) |
4371 | - |
4372 | - security_groups = db.security_group_get_by_instance(ctxt, |
4373 | - instance['id']) |
4374 | - |
4375 | - # then, security group chains and rules |
4376 | - for security_group in security_groups: |
4377 | - rules = db.security_group_rule_get_by_security_group(ctxt, |
4378 | - security_group['id']) |
4379 | - |
4380 | - for rule in rules: |
4381 | - logging.info('%r', rule) |
4382 | - |
4383 | - if not rule.cidr: |
4384 | - # Eventually, a mechanism to grant access for security |
4385 | - # groups will turn up here. It'll use ipsets. |
4386 | - continue |
4387 | - |
4388 | - version = _get_ip_version(rule.cidr) |
4389 | - if version == 4: |
4390 | - rules = ipv4_rules |
4391 | - else: |
4392 | - rules = ipv6_rules |
4393 | - |
4394 | - protocol = rule.protocol |
4395 | - if version == 6 and rule.protocol == 'icmp': |
4396 | - protocol = 'icmpv6' |
4397 | - |
4398 | - args = ['-p', protocol, '-s', rule.cidr] |
4399 | - |
4400 | - if rule.protocol in ['udp', 'tcp']: |
4401 | - if rule.from_port == rule.to_port: |
4402 | - args += ['--dport', '%s' % (rule.from_port,)] |
4403 | - else: |
4404 | - args += ['-m', 'multiport', |
4405 | - '--dports', '%s:%s' % (rule.from_port, |
4406 | - rule.to_port)] |
4407 | - elif rule.protocol == 'icmp': |
4408 | - icmp_type = rule.from_port |
4409 | - icmp_code = rule.to_port |
4410 | - |
4411 | - if icmp_type == -1: |
4412 | - icmp_type_arg = None |
4413 | - else: |
4414 | - icmp_type_arg = '%s' % icmp_type |
4415 | - if not icmp_code == -1: |
4416 | - icmp_type_arg += '/%s' % icmp_code |
4417 | - |
4418 | - if icmp_type_arg: |
4419 | - if version == 4: |
4420 | - args += ['-m', 'icmp', '--icmp-type', |
4421 | - icmp_type_arg] |
4422 | - elif version == 6: |
4423 | - args += ['-m', 'icmp6', '--icmpv6-type', |
4424 | - icmp_type_arg] |
4425 | - |
4426 | - args += ['-j ACCEPT'] |
4427 | - rules += [' '.join(args)] |
4428 | - |
4429 | - ipv4_rules += ['-j $sg-fallback'] |
4430 | - ipv6_rules += ['-j $sg-fallback'] |
4431 | - |
4432 | - def vnic_rules(self, vnic_id, net_info, instance): |
4433 | - ctxt = context.get_admin_context() |
4434 | - |
4435 | - ipv4_rules = [] |
4436 | - ipv6_rules = [] |
4437 | - |
4438 | - # Always drop invalid packets |
4439 | - ipv4_rules += ['-m state --state ' 'INVALID -j DROP'] |
4440 | - ipv6_rules += ['-m state --state ' 'INVALID -j DROP'] |
4441 | - |
4442 | - # Allow established connections |
4443 | - ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] |
4444 | - ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] |
4445 | - |
4446 | - if net_info['IPs'][0]['gateway']: |
4447 | - ipv4_rules += ['-s %s -p udp --sport 67 --dport 68 ' |
4448 | - '-j ACCEPT' % (net_info['IPs'][0]['gateway'],)] |
4449 | - |
4450 | - #Allow project network traffic |
4451 | - if FLAGS.allow_project_net_traffic: |
4452 | - if net_info['IPs'][0]['cidr']: |
4453 | - ipv4_rules += ['-s %s -j ACCEPT' % |
4454 | - (net_info['IPs'][0]['cidr'],)] |
4455 | - |
4456 | - # We wrap these in FLAGS.use_ipv6 because they might cause |
4457 | - # a DB lookup. The other ones are just list operations, so |
4458 | - # they're not worth the clutter. |
4459 | - if net_info['ipv6_enabled']: |
4460 | - # Allow RA responses |
4461 | - if net_info['IPs'][0]['gateway_v6']: |
4462 | - ipv6_rules += ['-s %s/128 -p icmpv6 -j ACCEPT' % |
4463 | - (net_info['IPs'][0]['gateway_v6'],)] |
4464 | - |
4465 | - #Allow project network traffic |
4466 | - if FLAGS.allow_project_net_traffic: |
4467 | - if net_info['IPs'][0]['cidr_v6']: |
4468 | - ipv6_rules += ['-s %s -j ACCEPT' % |
4469 | - (net_info['IPs'][0]['cidr_v6'],)] |
4470 | - |
4471 | - security_groups = db.security_group_get_by_instance(ctxt, |
4472 | - instance['id']) |
4473 | - |
4474 | - # then, security group chains and rules |
4475 | - for security_group in security_groups: |
4476 | - rules = db.security_group_rule_get_by_security_group(ctxt, |
4477 | - security_group['id']) |
4478 | - |
4479 | - for rule in rules: |
4480 | - logging.info('%r', rule) |
4481 | - |
4482 | - if not rule.cidr: |
4483 | - # Eventually, a mechanism to grant access for security |
4484 | - # groups will turn up here. It'll use ipsets. |
4485 | - continue |
4486 | - |
4487 | - version = _get_ip_version(rule.cidr) |
4488 | - if version == 4: |
4489 | - rules = ipv4_rules |
4490 | - else: |
4491 | - rules = ipv6_rules |
4492 | - |
4493 | - protocol = rule.protocol |
4494 | - if version == 6 and rule.protocol == 'icmp': |
4495 | - protocol = 'icmpv6' |
4496 | - |
4497 | - args = ['-p', protocol, '-s', rule.cidr] |
4498 | - |
4499 | - if rule.protocol in ['udp', 'tcp']: |
4500 | - if rule.from_port == rule.to_port: |
4501 | - args += ['--dport', '%s' % (rule.from_port,)] |
4502 | - else: |
4503 | - args += ['-m', 'multiport', |
4504 | - '--dports', '%s:%s' % (rule.from_port, |
4505 | - rule.to_port)] |
4506 | - elif rule.protocol == 'icmp': |
4507 | - icmp_type = rule.from_port |
4508 | - icmp_code = rule.to_port |
4509 | - |
4510 | - if icmp_type == -1: |
4511 | - icmp_type_arg = None |
4512 | - else: |
4513 | - icmp_type_arg = '%s' % icmp_type |
4514 | - if not icmp_code == -1: |
4515 | - icmp_type_arg += '/%s' % icmp_code |
4516 | - |
4517 | - if icmp_type_arg: |
4518 | - if version == 4: |
4519 | - args += ['-m', 'icmp', '--icmp-type', |
4520 | - icmp_type_arg] |
4521 | - elif version == 6: |
4522 | - args += ['-m', 'icmp6', '--icmpv6-type', |
4523 | - icmp_type_arg] |
4524 | - |
4525 | - args += ['-j ACCEPT'] |
4526 | - rules += [' '.join(args)] |
4527 | - |
4528 | - ipv4_rules += ['-j $sg-fallback'] |
4529 | - ipv6_rules += ['-j $sg-fallback'] |
4530 | - |
4531 | - return ipv4_rules, ipv6_rules |
4532 | - |
4533 | - def vnic_filter_exists(self, vnic_id): |
4534 | - """Check nova-vnic_id exists""" |
4535 | - return self.nwfilter.vnic_filter_exists(vnic_id) |
4536 | - |
4537 | - def instance_filter_exists(self, instance): |
4538 | - """Check nova-instance-instance-xxx exists""" |
4539 | - return self.nwfilter.instance_filter_exists(instance) |
4540 | - |
4541 | - def refresh_security_group_members(self, security_group): |
4542 | - pass |
4543 | - |
4544 | - def refresh_security_group_rules(self, security_group): |
4545 | - self.do_refresh_security_group_rules(security_group) |
4546 | - self.iptables.apply() |
4547 | - |
4548 | - def _vnics_for_instance(self, instance): |
4549 | - return db.virtual_nics_get_by_instance(context.get_admin_context(), |
4550 | - instance['id']) |
4551 | - |
4552 | - def _net_agent_for_project(self, project_id): |
4553 | - ctxt = context.get_admin_context() |
4554 | - net_factory = net_service.get_service_factory(ctxt, project_id) |
4555 | - return net_factory.get_net_agent() |
4556 | - |
4557 | - @utils.synchronized('iptables', external=True) |
4558 | - def do_refresh_security_group_rules(self, security_group): |
4559 | - for instance in self.instances.values(): |
4560 | - self.remove_filters_for_instance(instance) |
4561 | - self.add_filters_for_instance(instance) |
4562 | - |
4563 | - def _security_group_chain_name(self, security_group_id): |
4564 | - return 'nova-sg-%s' % (security_group_id,) |
4565 | - |
4566 | - def _instance_chain_name(self, instance): |
4567 | - return 'inst-%s' % (instance['id'],) |
4568 | - |
4569 | - def _vnic_chain_name(self, vnic_id): |
4570 | - return 'vnic-%s' % (vnic_id,) |
4571 | - |
4572 | + net_agent.unfilter_vnic(vnic_id) |