Merge lp:~openstack-gd/nova/libvirt-multinic-nova into lp:~hudson-openstack/nova/trunk
- libvirt-multinic-nova
- Merge into trunk
Proposed by
Ilya Alekseyev
Status: | Superseded |
---|---|
Proposed branch: | lp:~openstack-gd/nova/libvirt-multinic-nova |
Merge into: | lp:~hudson-openstack/nova/trunk |
Diff against target: |
499 lines (+206/-100) 3 files modified
nova/virt/interfaces.template (+13/-12) nova/virt/libvirt.xml.template (+12/-9) nova/virt/libvirt_conn.py (+181/-79) |
To merge this branch: | bzr merge lp:~openstack-gd/nova/libvirt-multinic-nova |
Related bugs: | |
Related blueprints: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Sandy Walsh (community) | Needs Fixing | ||
Joshua McKenty | Pending | ||
Trey Morris | Pending | ||
Review via email: mp+53871@code.launchpad.net |
This proposal has been superseded by a proposal from 2011-03-23.
Commit message
Description of the change
libvirt driver multi_nic support. In this phase libvirt can work with and without multi_nic support, as in multi_nic support for xenapi: https:/
To post a comment you must log in.
- 813. By Eldar Nugaev
-
merge with trunk
- 814. By Eldar Nugaev
-
merge with trunk
- 815. By Eldar Nugaev
-
merge with trunk
- 816. By Ilya Alekseyev
-
review comments fixed
- 817. By Ilya Alekseyev
-
trunk merged
- 818. By Ilya Alekseyev
-
pep8 fixed
- 819. By Ilya Alekseyev
-
xml template fixed
- 820. By Ilya Alekseyev
-
one more minor fix
Revision history for this message
Ilya Alekseyev (ilyaalekseyev) wrote : | # |
Sandy, thank you very much for good review. Please look at my only comment below.
> The 'eth##' will be incremented each time ... even if injected, is that what
> you wanted?
>
yes
- 821. By Eldar Nugaev
-
migration gateway_v6 to network_info
- 822. By Eldar Nugaev
-
small fix
- 823. By Ilya Alekseyev
-
couple of bugs fixed
- 824. By Eldar Nugaev
-
pep8 clearing
- 825. By Ilya Alekseyev
-
trunk merged. conflicts resolved
- 826. By Ilya Alekseyev
-
style and spacing fixed
Unmerged revisions
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'nova/virt/interfaces.template' |
2 | --- nova/virt/interfaces.template 2011-03-02 01:12:47 +0000 |
3 | +++ nova/virt/interfaces.template 2011-03-23 19:11:11 +0000 |
4 | @@ -5,19 +5,20 @@ |
5 | auto lo |
6 | iface lo inet loopback |
7 | |
8 | -# The primary network interface |
9 | -auto eth0 |
10 | -iface eth0 inet static |
11 | - address ${address} |
12 | - netmask ${netmask} |
13 | - broadcast ${broadcast} |
14 | - gateway ${gateway} |
15 | - dns-nameservers ${dns} |
16 | +#for $ifc in $interfaces |
17 | +auto ${ifc.name} |
18 | +iface ${ifc.name} inet static |
19 | + address ${ifc.address} |
20 | + netmask ${ifc.netmask} |
21 | + broadcast ${ifc.broadcast} |
22 | + gateway ${ifc.gateway} |
23 | + dns-nameservers ${ifc.dns} |
24 | |
25 | #if $use_ipv6 |
26 | -iface eth0 inet6 static |
27 | - address ${address_v6} |
28 | - netmask ${netmask_v6} |
29 | - gateway ${gateway_v6} |
30 | +iface ${ifc.name} inet6 static |
31 | + address ${ifc.address_v6} |
32 | + netmask ${ifc.netmask_v6} |
33 | + gateway ${ifc.gateway_v6} |
34 | #end if |
35 | |
36 | +#end for |
37 | |
38 | === modified file 'nova/virt/libvirt.xml.template' |
39 | --- nova/virt/libvirt.xml.template 2011-03-02 01:12:47 +0000 |
40 | +++ nova/virt/libvirt.xml.template 2011-03-23 19:11:11 +0000 |
41 | @@ -69,21 +69,24 @@ |
42 | </disk> |
43 | #end if |
44 | #end if |
45 | + |
46 | +#for $nic in $nics |
47 | <interface type='bridge'> |
48 | - <source bridge='${bridge_name}'/> |
49 | - <mac address='${mac_address}'/> |
50 | + <source bridge='${nic.bridge_name}'/> |
51 | + <mac address='${nic.mac_address}'/> |
52 | <!-- <model type='virtio'/> CANT RUN virtio network right now --> |
53 | - <filterref filter="nova-instance-${name}"> |
54 | - <parameter name="IP" value="${ip_address}" /> |
55 | - <parameter name="DHCPSERVER" value="${dhcp_server}" /> |
56 | -#if $getVar('extra_params', False) |
57 | - ${extra_params} |
58 | + <filterref filter="nova-instance-${name}-${nic.id}"> |
59 | + <parameter name="IP" value="${nic.ip_address}" /> |
60 | + <parameter name="DHCPSERVER" value="${nic.dhcp_server}" /> |
61 | +#if $getVar('nic.extra_params', False) |
62 | + ${nic.extra_params} |
63 | #end if |
64 | -#if $getVar('gateway_v6', False) |
65 | - <parameter name="RASERVER" value="${gateway_v6}" /> |
66 | +#if $getVar('nic.gateway_v6', False) |
67 | + <parameter name="RASERVER" value="${nic.gateway_v6}" /> |
68 | #end if |
69 | </filterref> |
70 | </interface> |
71 | +#end for |
72 | |
73 | <!-- The order is significant here. File must be defined first --> |
74 | <serial type="file"> |
75 | |
76 | === modified file 'nova/virt/libvirt_conn.py' |
77 | --- nova/virt/libvirt_conn.py 2011-03-23 05:29:32 +0000 |
78 | +++ nova/virt/libvirt_conn.py 2011-03-23 19:11:11 +0000 |
79 | @@ -153,6 +153,49 @@ |
80 | return int(net.version()) |
81 | |
82 | |
83 | +def _get_network_info(instance): |
84 | + #TODO(ilyaalekseyev) If we will keep this function |
85 | + # we should cache network_info |
86 | + admin_context = context.get_admin_context() |
87 | + |
88 | + ip_addresses = db.fixed_ip_get_all_by_instance(admin_context, |
89 | + instance['id']) |
90 | + |
91 | + networks = db.network_get_all_by_instance(admin_context, |
92 | + instance['id']) |
93 | + network_info = [] |
94 | + |
95 | + def ip_dict(ip): |
96 | + return { |
97 | + "ip": ip.address, |
98 | + "netmask": network["netmask"], |
99 | + "enabled": "1"} |
100 | + |
101 | + def ip6_dict(ip6): |
102 | + prefix = ip6.network.cidr_v6 |
103 | + mac = instance.mac_address |
104 | + return { |
105 | + "ip": utils.to_global_ipv6(prefix, mac), |
106 | + "netmask": ip6.network.netmask_v6, |
107 | + "gateway": ip6.network.gateway_v6, |
108 | + "enabled": "1"} |
109 | + |
110 | + for network in networks: |
111 | + network_ips = [ip for ip in ip_addresses |
112 | + if ip.network_id == network.id] |
113 | + |
114 | + mapping = { |
115 | + 'label': network['label'], |
116 | + 'gateway': network['gateway'], |
117 | + 'mac': instance.mac_address, |
118 | + 'dns': [network['dns']], |
119 | + 'ips': [ip_dict(ip) for ip in network_ips], |
120 | + 'ip6s': [ip6_dict(ip) for ip in network_ips]} |
121 | + |
122 | + network_info.append((network, mapping)) |
123 | + return network_info |
124 | + |
125 | + |
126 | class LibvirtConnection(object): |
127 | |
128 | def __init__(self, read_only): |
129 | @@ -416,16 +459,18 @@ |
130 | # the normal xml file, we can just call reboot here |
131 | self.reboot(instance) |
132 | |
133 | + # NOTE(ilyaalekseyev): Implementation like in multinics |
134 | + # for xenapi(tr3buchet) |
135 | @exception.wrap_exception |
136 | - def spawn(self, instance): |
137 | - xml = self.to_xml(instance) |
138 | + def spawn(self, instance, network_info=None): |
139 | + xml = self.to_xml(instance, network_info) |
140 | db.instance_set_state(context.get_admin_context(), |
141 | instance['id'], |
142 | power_state.NOSTATE, |
143 | 'launching') |
144 | - self.firewall_driver.setup_basic_filtering(instance) |
145 | - self.firewall_driver.prepare_instance_filter(instance) |
146 | - self._create_image(instance, xml) |
147 | + self.firewall_driver.setup_basic_filtering(instance, network_info) |
148 | + self.firewall_driver.prepare_instance_filter(instance, network_info) |
149 | + self._create_image(instance, xml, network_info) |
150 | self._conn.createXML(xml, 0) |
151 | LOG.debug(_("instance %s: is running"), instance['name']) |
152 | self.firewall_driver.apply_instance_filter(instance) |
153 | @@ -582,7 +627,11 @@ |
154 | utils.execute('truncate', target, '-s', "%dG" % local_gb) |
155 | # TODO(vish): should we format disk by default? |
156 | |
157 | - def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None): |
158 | + def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, |
159 | + network_info=None): |
160 | + if not network_info: |
161 | + network_info = _get_network_info(inst) |
162 | + |
163 | # syntactic nicety |
164 | def basepath(fname='', suffix=suffix): |
165 | return os.path.join(FLAGS.instances_path, |
166 | @@ -658,28 +707,35 @@ |
167 | |
168 | key = str(inst['key_data']) |
169 | net = None |
170 | - network_ref = db.network_get_by_instance(context.get_admin_context(), |
171 | - inst['id']) |
172 | - if network_ref['injected']: |
173 | - admin_context = context.get_admin_context() |
174 | - address = db.instance_get_fixed_address(admin_context, inst['id']) |
175 | + |
176 | + nets = [] |
177 | + ifc_template = open(FLAGS.injected_network_template).read() |
178 | + ifc_num = -1 |
179 | + admin_context = context.get_admin_context() |
180 | + for (network_ref, mapping) in network_info: |
181 | + ifc_num += 1 |
182 | + |
183 | + if not 'injected' in network_ref: |
184 | + continue |
185 | + |
186 | + address = mapping['ips'][0]['ip'] |
187 | address_v6 = None |
188 | if FLAGS.use_ipv6: |
189 | - address_v6 = db.instance_get_fixed_address_v6(admin_context, |
190 | - inst['id']) |
191 | - |
192 | - interfaces_info = {'address': address, |
193 | - 'netmask': network_ref['netmask'], |
194 | - 'gateway': network_ref['gateway'], |
195 | - 'broadcast': network_ref['broadcast'], |
196 | - 'dns': network_ref['dns'], |
197 | - 'address_v6': address_v6, |
198 | - 'gateway_v6': network_ref['gateway_v6'], |
199 | - 'netmask_v6': network_ref['netmask_v6'], |
200 | - 'use_ipv6': FLAGS.use_ipv6} |
201 | - |
202 | - net = str(Template(self.interfaces_xml, |
203 | - searchList=[interfaces_info])) |
204 | + address_v6 = mapping['ip6s'][0]['ip'] |
205 | + net_info = {'name': 'eth%d' % ifc_num, |
206 | + 'address': address, |
207 | + 'netmask': network_ref['netmask'], |
208 | + 'gateway': network_ref['gateway'], |
209 | + 'broadcast': network_ref['broadcast'], |
210 | + 'dns': network_ref['dns'], |
211 | + 'address_v6': address_v6, |
212 | + 'gateway_v6': network_ref['gateway_v6'], |
213 | + 'netmask_v6': network_ref['netmask_v6'], |
214 | + 'use_ipv6': FLAGS.use_ipv6} |
215 | + nets.append(net_info) |
216 | + |
217 | + net = str(Template(ifc_template, searchList=[{'interfaces': nets}])) |
218 | + |
219 | if key or net: |
220 | inst_name = inst['name'] |
221 | img_id = inst.image_id |
222 | @@ -701,20 +757,11 @@ |
223 | if FLAGS.libvirt_type == 'uml': |
224 | utils.execute('sudo', 'chown', 'root', basepath('disk')) |
225 | |
226 | - def to_xml(self, instance, rescue=False): |
227 | - # TODO(termie): cache? |
228 | - LOG.debug(_('instance %s: starting toXML method'), instance['name']) |
229 | - network = db.network_get_by_instance(context.get_admin_context(), |
230 | - instance['id']) |
231 | - # FIXME(vish): stick this in db |
232 | - instance_type = instance['instance_type'] |
233 | - # instance_type = test.INSTANCE_TYPES[instance_type] |
234 | - instance_type = instance_types.get_instance_type(instance_type) |
235 | - ip_address = db.instance_get_fixed_address(context.get_admin_context(), |
236 | - instance['id']) |
237 | + def _get_nic_for_xml(self, instance_id, network, mapping): |
238 | # Assume that the gateway also acts as the dhcp server. |
239 | dhcp_server = network['gateway'] |
240 | gateway_v6 = network['gateway_v6'] |
241 | + mac_id = mapping['mac'].replace(':', '') |
242 | |
243 | if FLAGS.allow_project_net_traffic: |
244 | if FLAGS.use_ipv6: |
245 | @@ -739,6 +786,41 @@ |
246 | (net, mask) |
247 | else: |
248 | extra_params = "\n" |
249 | + |
250 | + result = { |
251 | + 'id': mac_id, |
252 | + 'bridge_name': network['bridge'], |
253 | + 'mac_address': mapping['mac'], |
254 | + 'ip_address': mapping['ips'][0]['ip'], |
255 | + 'dhcp_server': dhcp_server, |
256 | + 'extra_params': extra_params, |
257 | + } |
258 | + |
259 | + if gateway_v6: |
260 | + result['gateway_v6'] = gateway_v6 + "/128" |
261 | + |
262 | + return result |
263 | + |
264 | + def to_xml(self, instance, rescue=False, network_info=None): |
265 | + admin_context = context.get_admin_context() |
266 | + |
267 | + # TODO(termie): cache? |
268 | + LOG.debug(_('instance %s: starting toXML method'), instance['name']) |
269 | + |
270 | + #TODO(ilyaalekseyev) remove network_info creation code |
271 | + # when multinics will be completed |
272 | + if not network_info: |
273 | + network_info = _get_network_info(instance) |
274 | + |
275 | + nics = [] |
276 | + for (network, mapping) in network_info: |
277 | + nics.append(self._get_nic_for_xml(instance['id'], |
278 | + network, |
279 | + mapping)) |
280 | + # FIXME(vish): stick this in db |
281 | + instance_type_name = instance['instance_type'] |
282 | + instance_type = instance_types.get_instance_type(instance_type_name) |
283 | + |
284 | if FLAGS.use_cow_images: |
285 | driver_type = 'qcow2' |
286 | else: |
287 | @@ -750,17 +832,11 @@ |
288 | instance['name']), |
289 | 'memory_kb': instance_type['memory_mb'] * 1024, |
290 | 'vcpus': instance_type['vcpus'], |
291 | - 'bridge_name': network['bridge'], |
292 | - 'mac_address': instance['mac_address'], |
293 | - 'ip_address': ip_address, |
294 | - 'dhcp_server': dhcp_server, |
295 | - 'extra_params': extra_params, |
296 | 'rescue': rescue, |
297 | 'local': instance_type['local_gb'], |
298 | - 'driver_type': driver_type} |
299 | + 'driver_type': driver_type, |
300 | + 'nics': nics} |
301 | |
302 | - if gateway_v6: |
303 | - xml_info['gateway_v6'] = gateway_v6 + "/128" |
304 | if not rescue: |
305 | if instance['kernel_id']: |
306 | xml_info['kernel'] = xml_info['basepath'] + "/kernel" |
307 | @@ -773,7 +849,6 @@ |
308 | xml = str(Template(self.libvirt_xml, searchList=[xml_info])) |
309 | LOG.debug(_('instance %s: finished toXML method'), |
310 | instance['name']) |
311 | - |
312 | return xml |
313 | |
314 | def get_info(self, instance_name): |
315 | @@ -1274,7 +1349,7 @@ |
316 | |
317 | |
318 | class FirewallDriver(object): |
319 | - def prepare_instance_filter(self, instance): |
320 | + def prepare_instance_filter(self, instance, network_info=None): |
321 | """Prepare filters for the instance. |
322 | |
323 | At this point, the instance isn't running yet.""" |
324 | @@ -1308,7 +1383,7 @@ |
325 | the security group.""" |
326 | raise NotImplementedError() |
327 | |
328 | - def setup_basic_filtering(self, instance): |
329 | + def setup_basic_filtering(self, instance, network_info=None): |
330 | """Create rules to block spoofing and allow dhcp. |
331 | |
332 | This gets called when spawning an instance, before |
333 | @@ -1322,6 +1397,11 @@ |
334 | instance['id']) |
335 | return network['gateway_v6'] |
336 | |
337 | + def _all_gateway_v6_for_instance(self, instance): |
338 | + networks = db.network_get_all_by_instance(context.get_admin_context(), |
339 | + instance['id']) |
340 | + return [network['gateway_v6'] for network in networks] |
341 | + |
342 | |
343 | class NWFilterFirewall(FirewallDriver): |
344 | """ |
345 | @@ -1413,7 +1493,7 @@ |
346 | </rule> |
347 | </filter>''' |
348 | |
349 | - def setup_basic_filtering(self, instance): |
350 | + def setup_basic_filtering(self, instance, network_info=None): |
351 | """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" |
352 | logging.info('called setup_basic_filtering in nwfilter') |
353 | |
354 | @@ -1518,7 +1598,7 @@ |
355 | # Nothing to do |
356 | pass |
357 | |
358 | - def prepare_instance_filter(self, instance): |
359 | + def prepare_instance_filter(self, instance, network_info=None): |
360 | """ |
361 | Creates an NWFilter for the given instance. In the process, |
362 | it makes sure the filters for the security groups as well as |
363 | @@ -1536,8 +1616,8 @@ |
364 | 'nova-base-ipv6', |
365 | 'nova-allow-dhcp-server'] |
366 | if FLAGS.use_ipv6: |
367 | - gateway_v6 = self._gateway_v6_for_instance(instance) |
368 | - if gateway_v6: |
369 | + gateways_v6 = self._all_gateway_v6_for_instance(instance) |
370 | + if gateways_v6: |
371 | instance_secgroup_filter_children += ['nova-allow-ra-server'] |
372 | |
373 | ctxt = context.get_admin_context() |
374 | @@ -1623,9 +1703,11 @@ |
375 | self.iptables.ipv6['filter'].add_chain('sg-fallback') |
376 | self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') |
377 | |
378 | - def setup_basic_filtering(self, instance): |
379 | + def setup_basic_filtering(self, instance, network_info=None): |
380 | """Use NWFilter from libvirt for this.""" |
381 | - return self.nwfilter.setup_basic_filtering(instance) |
382 | + if not network_info: |
383 | + network_info = _get_network_info(instance) |
384 | + return self.nwfilter.setup_basic_filtering(instance, network_info) |
385 | |
386 | def apply_instance_filter(self, instance): |
387 | """No-op. Everything is done in prepare_instance_filter""" |
388 | @@ -1639,29 +1721,40 @@ |
389 | LOG.info(_('Attempted to unfilter instance %s which is not ' |
390 | 'filtered'), instance['id']) |
391 | |
392 | - def prepare_instance_filter(self, instance): |
393 | + def prepare_instance_filter(self, instance, network_info=None): |
394 | + if not network_info: |
395 | + network_info = _get_network_info(instance) |
396 | self.instances[instance['id']] = instance |
397 | - self.add_filters_for_instance(instance) |
398 | + self.add_filters_for_instance(instance, network_info) |
399 | self.iptables.apply() |
400 | |
401 | - def add_filters_for_instance(self, instance): |
402 | + def add_filters_for_instance(self, instance, network_info=None): |
403 | + if not network_info: |
404 | + network_info = _get_network_info(instance) |
405 | chain_name = self._instance_chain_name(instance) |
406 | |
407 | self.iptables.ipv4['filter'].add_chain(chain_name) |
408 | - ipv4_address = self._ip_for_instance(instance) |
409 | - self.iptables.ipv4['filter'].add_rule('local', |
410 | - '-d %s -j $%s' % |
411 | - (ipv4_address, chain_name)) |
412 | + |
413 | + ips_v4 = [ip['ip'] for (_, mapping) in network_info |
414 | + for ip in mapping['ips']] |
415 | + |
416 | + for ipv4_address in ips_v4: |
417 | + self.iptables.ipv4['filter'].add_rule('local', |
418 | + '-d %s -j $%s' % |
419 | + (ipv4_address, chain_name)) |
420 | |
421 | if FLAGS.use_ipv6: |
422 | self.iptables.ipv6['filter'].add_chain(chain_name) |
423 | - ipv6_address = self._ip_for_instance_v6(instance) |
424 | - self.iptables.ipv6['filter'].add_rule('local', |
425 | - '-d %s -j $%s' % |
426 | - (ipv6_address, |
427 | - chain_name)) |
428 | - |
429 | - ipv4_rules, ipv6_rules = self.instance_rules(instance) |
430 | + ips_v6 = [ip['ip'] for (_, mapping) in network_info |
431 | + for ip in mapping['ip6s']] |
432 | + |
433 | + for ipv6_address in ips_v6: |
434 | + self.iptables.ipv6['filter'].add_rule('local', |
435 | + '-d %s -j $%s' % |
436 | + (ipv6_address, |
437 | + chain_name)) |
438 | + |
439 | + ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info) |
440 | |
441 | for rule in ipv4_rules: |
442 | self.iptables.ipv4['filter'].add_rule(chain_name, rule) |
443 | @@ -1677,7 +1770,9 @@ |
444 | if FLAGS.use_ipv6: |
445 | self.iptables.ipv6['filter'].remove_chain(chain_name) |
446 | |
447 | - def instance_rules(self, instance): |
448 | + def instance_rules(self, instance, network_info=None): |
449 | + if not network_info: |
450 | + network_info = _get_network_info(instance) |
451 | ctxt = context.get_admin_context() |
452 | |
453 | ipv4_rules = [] |
454 | @@ -1691,28 +1786,35 @@ |
455 | ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] |
456 | ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] |
457 | |
458 | - dhcp_server = self._dhcp_server_for_instance(instance) |
459 | - ipv4_rules += ['-s %s -p udp --sport 67 --dport 68 ' |
460 | - '-j ACCEPT' % (dhcp_server,)] |
461 | + dhcp_servers = [network['gateway'] for (network, _m) in network_info] |
462 | + |
463 | + for dhcp_server in dhcp_servers: |
464 | + ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' |
465 | + '-j ACCEPT' % (dhcp_server,)) |
466 | |
467 | #Allow project network traffic |
468 | if FLAGS.allow_project_net_traffic: |
469 | - cidr = self._project_cidr_for_instance(instance) |
470 | - ipv4_rules += ['-s %s -j ACCEPT' % (cidr,)] |
471 | + cidrs = [network['cidr'] for (network, _m) in network_info] |
472 | + for cidr in cidrs: |
473 | + ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) |
474 | |
475 | # We wrap these in FLAGS.use_ipv6 because they might cause |
476 | # a DB lookup. The other ones are just list operations, so |
477 | # they're not worth the clutter. |
478 | if FLAGS.use_ipv6: |
479 | # Allow RA responses |
480 | - gateway_v6 = self._gateway_v6_for_instance(instance) |
481 | - if gateway_v6: |
482 | - ipv6_rules += ['-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)] |
483 | + gateways_v6 = self._all_gateway_v6_for_instance(instance) |
484 | + for gateway_v6 in gateways_v6: |
485 | + ipv6_rules.append( |
486 | + '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)) |
487 | |
488 | #Allow project network traffic |
489 | if FLAGS.allow_project_net_traffic: |
490 | - cidrv6 = self._project_cidrv6_for_instance(instance) |
491 | - ipv6_rules += ['-s %s -j ACCEPT' % (cidrv6,)] |
492 | + cidrv6s = [network['cidr_v6'] for (network, _m) |
493 | + in network_info] |
494 | + |
495 | + for cidrv6 in cidrv6s: |
496 | + ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,)) |
497 | |
498 | security_groups = db.security_group_get_by_instance(ctxt, |
499 | instance['id']) |
149 if not network_info:
180 if _m is unused, you can just use _
182 if network_ ref['injected' ]:
if 'injected' in network_ref: might be safer (no KeyError if missing)
or better
if not 'injected' in network_ref:
continue
and pull the indent in the for rest
183 do we need to make the context inside the loop? can this be done outside?
186-188 can be replaced with ra_server = network_ ref.get( 'ra_server' , "fd00::"
The 'eth##' will be incremented each time ... even if injected, is that what you wanted?
253 if not ...
263 commented out code?
264 seems to overwrite 262?
351 353 commented out code?
354 if ra_servers:
398 should be like 416
450, 459, 482 use append()?