Merge lp:~julian-edwards/maas/allocate-ip-on-start-2 into lp:~maas-committers/maas/trunk
- allocate-ip-on-start-2
- Merge into trunk
Proposed by
Julian Edwards
Status: | Superseded | ||||
---|---|---|---|---|---|
Proposed branch: | lp:~julian-edwards/maas/allocate-ip-on-start-2 | ||||
Merge into: | lp:~maas-committers/maas/trunk | ||||
Diff against target: |
575 lines (+300/-45) 7 files modified
src/maasserver/models/__init__.py (+1/-1) src/maasserver/models/macaddress.py (+4/-1) src/maasserver/models/node.py (+105/-11) src/maasserver/models/staticipaddress.py (+1/-2) src/maasserver/models/tests/test_macaddress.py (+3/-15) src/maasserver/models/tests/test_node.py (+165/-13) src/maasserver/testing/factory.py (+21/-2) |
||||
To merge this branch: | bzr merge lp:~julian-edwards/maas/allocate-ip-on-start-2 | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
MAAS Maintainers | Pending | ||
Review via email:
|
This proposal has been superseded by a proposal from 2014-06-11.
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'src/maasserver/models/__init__.py' |
2 | --- src/maasserver/models/__init__.py 2014-06-10 14:45:14 +0000 |
3 | +++ src/maasserver/models/__init__.py 2014-06-11 05:52:49 +0000 |
4 | @@ -51,7 +51,6 @@ |
5 | from maasserver.models.dhcplease import DHCPLease |
6 | from maasserver.models.downloadprogress import DownloadProgress |
7 | from maasserver.models.filestorage import FileStorage |
8 | -from maasserver.models.staticipaddress import StaticIPAddress |
9 | from maasserver.models.macaddress import MACAddress |
10 | from maasserver.models.macipaddresslink import MACStaticIPAddressLink |
11 | from maasserver.models.network import Network |
12 | @@ -59,6 +58,7 @@ |
13 | from maasserver.models.nodegroup import NodeGroup |
14 | from maasserver.models.nodegroupinterface import NodeGroupInterface |
15 | from maasserver.models.sshkey import SSHKey |
16 | +from maasserver.models.staticipaddress import StaticIPAddress |
17 | from maasserver.models.tag import Tag |
18 | from maasserver.models.user import create_user |
19 | from maasserver.models.userprofile import UserProfile |
20 | |
21 | === modified file 'src/maasserver/models/macaddress.py' |
22 | --- src/maasserver/models/macaddress.py 2014-06-10 14:45:14 +0000 |
23 | +++ src/maasserver/models/macaddress.py 2014-06-11 05:52:49 +0000 |
24 | @@ -90,7 +90,10 @@ |
25 | def claim_static_ip(self, alloc_type=IPADDRESS_TYPE.AUTO): |
26 | """Assign a static IP to this MAC. |
27 | |
28 | - TODO: Also set a host DHCP entry. |
29 | + It is the caller's responsibility to create a celery Task that will |
30 | + write the dhcp host. It is not done here because celery doesn't |
31 | + guarantee job ordering, and if the host entry is written after |
32 | + the host boots it is too late. |
33 | |
34 | :param alloc_type: See :class:`StaticIPAddress`.alloc_type. |
35 | :return: A :class:`StaticIPAddress` object. Returns None if |
36 | |
37 | === modified file 'src/maasserver/models/node.py' |
38 | --- src/maasserver/models/node.py 2014-06-10 15:26:47 +0000 |
39 | +++ src/maasserver/models/node.py 2014-06-11 05:52:49 +0000 |
40 | @@ -28,6 +28,7 @@ |
41 | from string import whitespace |
42 | from uuid import uuid1 |
43 | |
44 | +import celery |
45 | from django.contrib.auth.models import User |
46 | from django.core.exceptions import ( |
47 | PermissionDenied, |
48 | @@ -54,16 +55,20 @@ |
49 | NODE_STATUS, |
50 | NODE_STATUS_CHOICES, |
51 | NODE_STATUS_CHOICES_DICT, |
52 | + NODEGROUPINTERFACE_MANAGEMENT, |
53 | ) |
54 | from maasserver.exceptions import NodeStateViolation |
55 | from maasserver.fields import ( |
56 | JSONObjectField, |
57 | MAC, |
58 | ) |
59 | -from maasserver.models import StaticIPAddress |
60 | from maasserver.models.cleansave import CleanSave |
61 | from maasserver.models.config import Config |
62 | from maasserver.models.dhcplease import DHCPLease |
63 | +from maasserver.models.staticipaddress import ( |
64 | + StaticIPAddress, |
65 | + StaticIPAddressExhaustion, |
66 | + ) |
67 | from maasserver.models.tag import Tag |
68 | from maasserver.models.timestampedmodel import TimestampedModel |
69 | from maasserver.models.zone import Zone |
70 | @@ -74,6 +79,7 @@ |
71 | from piston.models import Token |
72 | from provisioningserver.drivers.osystem import OperatingSystemRegistry |
73 | from provisioningserver.tasks import ( |
74 | + add_new_dhcp_host_map, |
75 | power_off, |
76 | power_on, |
77 | remove_dhcp_host_map, |
78 | @@ -400,9 +406,24 @@ |
79 | else: |
80 | do_start = True |
81 | if do_start: |
82 | - power_on.apply_async( |
83 | - queue=node.work_queue, args=[node_power_type], |
84 | - kwargs=power_params) |
85 | + try: |
86 | + tasks = node.claim_static_ips() |
87 | + except StaticIPAddressExhaustion: |
88 | + # TODO: send error back to user, or fall back to a |
89 | + # dynamic IP? |
90 | + logger.error( |
91 | + "Node %s: Unable to allocate static IP due to address" |
92 | + " exhaustion." % node.system_id) |
93 | + continue |
94 | + |
95 | + task = power_on.si(node_power_type, **power_params) |
96 | + task.set(queue=node.work_queue) |
97 | + tasks.append(task) |
98 | + chained_tasks = celery.chain(tasks) |
99 | + chained_tasks.apply_async() |
100 | + # TODO: if any of this fails it needs to release the |
101 | + # static IPs back to the pool. As part of the robustness |
102 | + # work coming up, it also needs to inform the user. |
103 | processed_nodes.append(node) |
104 | return processed_nodes |
105 | |
106 | @@ -566,6 +587,49 @@ |
107 | else: |
108 | return self.hostname |
109 | |
110 | + def claim_static_ips(self): |
111 | + """Assign static IPs for our MACs and return an array of Celery tasks |
112 | + that need executing. If nothing needs executing, the empty array |
113 | + is returned. |
114 | + |
115 | + Each MAC on the node that is connected to a managed cluster |
116 | + interface will get an IP. |
117 | + |
118 | + This operation is atomic, claiming an IP on a particular MAC fails |
119 | + then none of the MACs will get an IP and StaticIPAddressExhaustion |
120 | + is raised. |
121 | + """ |
122 | + # TODO: Release claimed MACs inside loop if fail to claim single |
123 | + # one (ie make this atomic). |
124 | + tasks = [] |
125 | + # Get a new AUTO static IP for each MAC on a managed interface. |
126 | + macs = self.mac_addresses_on_managed_interfaces() |
127 | + for mac in macs: |
128 | + sip = mac.claim_static_ip() |
129 | + # This is creating an array of celery 'Signatures' which will be |
130 | + # chained together later. We make the Signatures immutable |
131 | + # otherwise the result of the previous in the chain is passed to |
132 | + # the next, this is done with the "si()" call. |
133 | + # See docs.celeryproject.org/en/latest/userguide/canvas.html |
134 | + |
135 | + # Note that this may be None if the static range is not yet |
136 | + # defined, which will be the case when migrating from older |
137 | + # versions of the code. |
138 | + if sip is not None: |
139 | + # Delete any existing dynamic maps first. |
140 | + del_existing = self._build_dynamic_host_map_deletion_task() |
141 | + if del_existing is not None: |
142 | + # del_existing is a chain so does not need an explicit |
143 | + # queue to be set as each subtask will have one. |
144 | + tasks.append(del_existing) |
145 | + dhcp_key = self.nodegroup.dhcp_key |
146 | + mapping = {sip.ip: mac.mac_address} |
147 | + dhcp_task = add_new_dhcp_host_map.si( |
148 | + mapping, '127.0.0.1', dhcp_key) |
149 | + dhcp_task.set(queue=self.work_queue) |
150 | + tasks.append(dhcp_task) |
151 | + return tasks |
152 | + |
153 | def ip_addresses(self): |
154 | """IP addresses allocated to this node. |
155 | |
156 | @@ -632,6 +696,16 @@ |
157 | query = dhcpleases_qs.filter(mac__in=macs) |
158 | return query.values_list('ip', flat=True) |
159 | |
160 | + def mac_addresses_on_managed_interfaces(self): |
161 | + """Return MACAddresses for this node that have a managed cluster |
162 | + interface.""" |
163 | + # Avoid circular imports |
164 | + from maasserver.models import MACAddress |
165 | + unmanaged = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED |
166 | + return MACAddress.objects.filter( |
167 | + node=self, cluster_interface__isnull=False).exclude( |
168 | + cluster_interface__management=unmanaged) |
169 | + |
170 | def tag_names(self): |
171 | # We don't use self.tags.values_list here because this does not |
172 | # take advantage of the cache. |
173 | @@ -761,6 +835,21 @@ |
174 | raise NodeStateViolation( |
175 | "Cannot delete node %s: node is in state %s." |
176 | % (self.system_id, NODE_STATUS_CHOICES_DICT[self.status])) |
177 | + # Delete any dynamic host maps in the DHCP server. |
178 | + self._delete_dynamic_host_maps() |
179 | + # Delete the related mac addresses. |
180 | + # The DHCPLease objects corresponding to these MACs will be deleted |
181 | + # as well. See maasserver/models/dhcplease:delete_lease(). |
182 | + self.macaddress_set.all().delete() |
183 | + |
184 | + super(Node, self).delete() |
185 | + |
186 | + def _build_dynamic_host_map_deletion_task(self): |
187 | + """Create a chained celery task that will delete dhcp host maps. |
188 | + |
189 | + Return None if there is nothing to delete. |
190 | + """ |
191 | + tasks = [] |
192 | nodegroup = self.nodegroup |
193 | if len(nodegroup.get_managed_interfaces()) > 0: |
194 | # Delete the host map(s) in the DHCP server. |
195 | @@ -772,14 +861,19 @@ |
196 | ip_address=lease.ip, |
197 | server_address="127.0.0.1", |
198 | omapi_key=nodegroup.dhcp_key) |
199 | - remove_dhcp_host_map.apply_async( |
200 | - queue=nodegroup.uuid, kwargs=task_kwargs) |
201 | - # Delete the related mac addresses. |
202 | - # The DHCPLease objects corresponding to these MACs will be deleted |
203 | - # as well. See maasserver/models/dhcplease:delete_lease(). |
204 | - self.macaddress_set.all().delete() |
205 | + task = remove_dhcp_host_map.si(**task_kwargs) |
206 | + task.set(queue=self.work_queue) |
207 | + tasks.append(task) |
208 | + if len(tasks) > 0: |
209 | + return celery.chain(tasks) |
210 | + return None |
211 | |
212 | - super(Node, self).delete() |
213 | + def _delete_dynamic_host_maps(self): |
214 | + """If any DHCPLeases exist for this node, remove any associated |
215 | + host maps.""" |
216 | + chain = self._build_dynamic_host_map_deletion_task() |
217 | + if chain is not None: |
218 | + chain.apply_async() |
219 | |
220 | def set_random_hostname(self): |
221 | """Set 5 character `hostname` using non-ambiguous characters. |
222 | |
223 | === modified file 'src/maasserver/models/staticipaddress.py' |
224 | --- src/maasserver/models/staticipaddress.py 2014-06-05 01:28:35 +0000 |
225 | +++ src/maasserver/models/staticipaddress.py 2014-06-11 05:52:49 +0000 |
226 | @@ -120,7 +120,7 @@ |
227 | # __iter__ does not work here for some reason, so using |
228 | # iteritems(). |
229 | # XXX: convert this into a reverse_map_enum in maasserver.utils. |
230 | - for k,v in IPADDRESS_TYPE.__dict__.iteritems(): |
231 | + for k, v in IPADDRESS_TYPE.__dict__.iteritems(): |
232 | if v == self.alloc_type: |
233 | strtype = k |
234 | break |
235 | @@ -135,4 +135,3 @@ |
236 | After return, this object is no longer valid. |
237 | """ |
238 | self.delete() |
239 | - |
240 | |
241 | === modified file 'src/maasserver/models/tests/test_macaddress.py' |
242 | --- src/maasserver/models/tests/test_macaddress.py 2014-06-10 14:45:14 +0000 |
243 | +++ src/maasserver/models/tests/test_macaddress.py 2014-06-11 05:52:49 +0000 |
244 | @@ -84,25 +84,13 @@ |
245 | |
246 | class TestMACAddressForStaticIPClaiming(MAASServerTestCase): |
247 | |
248 | - def make_node_with_mac_attached_to_nodegroupinterface(self): |
249 | - nodegroup = factory.make_node_group() |
250 | - node = factory.make_node(mac=True, nodegroup=nodegroup) |
251 | - low_ip, high_ip = factory.make_ip_range() |
252 | - ngi = factory.make_node_group_interface( |
253 | - nodegroup, static_ip_range_low=low_ip.ipv4().format(), |
254 | - static_ip_range_high=high_ip.ipv4().format()) |
255 | - mac = node.get_primary_mac() |
256 | - mac.cluster_interface = ngi |
257 | - mac.save() |
258 | - return node |
259 | - |
260 | def test_claim_static_ip_returns_none_if_no_cluster_interface(self): |
261 | # If mac.cluster_interface is None, we can't allocate any IP. |
262 | mac = factory.make_mac_address() |
263 | self.assertIsNone(mac.claim_static_ip()) |
264 | |
265 | def test_claim_static_ip_reserves_an_ip_address(self): |
266 | - node = self.make_node_with_mac_attached_to_nodegroupinterface() |
267 | + node = factory.make_node_with_mac_attached_to_nodegroupinterface() |
268 | mac = node.get_primary_mac() |
269 | claimed_ip = mac.claim_static_ip() |
270 | self.assertIsInstance(claimed_ip, StaticIPAddress) |
271 | @@ -111,13 +99,13 @@ |
272 | IPADDRESS_TYPE.AUTO, StaticIPAddress.objects.all()[0].alloc_type) |
273 | |
274 | def test_claim_static_ip_sets_type_as_required(self): |
275 | - node = self.make_node_with_mac_attached_to_nodegroupinterface() |
276 | + node = factory.make_node_with_mac_attached_to_nodegroupinterface() |
277 | mac = node.get_primary_mac() |
278 | claimed_ip = mac.claim_static_ip(alloc_type=IPADDRESS_TYPE.STICKY) |
279 | self.assertEqual(IPADDRESS_TYPE.STICKY, claimed_ip.alloc_type) |
280 | |
281 | def test_claim_static_ip_returns_none_if_no_static_range_defined(self): |
282 | - node = self.make_node_with_mac_attached_to_nodegroupinterface() |
283 | + node = factory.make_node_with_mac_attached_to_nodegroupinterface() |
284 | mac = node.get_primary_mac() |
285 | mac.cluster_interface.static_ip_range_low = None |
286 | mac.cluster_interface.static_ip_range_high = None |
287 | |
288 | === modified file 'src/maasserver/models/tests/test_node.py' |
289 | --- src/maasserver/models/tests/test_node.py 2014-06-10 14:45:14 +0000 |
290 | +++ src/maasserver/models/tests/test_node.py 2014-06-11 05:52:49 +0000 |
291 | @@ -17,6 +17,7 @@ |
292 | from datetime import timedelta |
293 | import random |
294 | |
295 | +import celery |
296 | from django.core.exceptions import ValidationError |
297 | from maasserver.clusterrpc.power_parameters import get_power_types |
298 | from maasserver.enum import ( |
299 | @@ -55,12 +56,12 @@ |
300 | NodeUserData, |
301 | ) |
302 | from provisioningserver.power.poweraction import PowerAction |
303 | +from provisioningserver.tasks import Omshell |
304 | from testtools.matchers import ( |
305 | AllMatch, |
306 | Contains, |
307 | Equals, |
308 | MatchesAll, |
309 | - MatchesListwise, |
310 | Not, |
311 | ) |
312 | |
313 | @@ -294,19 +295,26 @@ |
314 | lease = factory.make_dhcp_lease() |
315 | node = factory.make_node(nodegroup=lease.nodegroup) |
316 | node.add_mac_address(lease.mac) |
317 | - mocked_task = self.patch(node_module, "remove_dhcp_host_map") |
318 | - mocked_apply_async = self.patch(mocked_task, "apply_async") |
319 | + self.patch(Omshell, 'remove') |
320 | node.delete() |
321 | - args, kwargs = mocked_apply_async.call_args |
322 | - expected = ( |
323 | - Equals(kwargs['queue']), |
324 | + self.assertThat( |
325 | + self.celery.tasks[0]['kwargs'], |
326 | Equals({ |
327 | 'ip_address': lease.ip, |
328 | 'server_address': "127.0.0.1", |
329 | 'omapi_key': lease.nodegroup.dhcp_key, |
330 | })) |
331 | - observed = node.work_queue, kwargs['kwargs'] |
332 | - self.assertThat(observed, MatchesListwise(expected)) |
333 | + |
334 | + def test_delete_dynamic_host_maps_sends_to_correct_queue(self): |
335 | + lease = factory.make_dhcp_lease() |
336 | + node = factory.make_node(nodegroup=lease.nodegroup) |
337 | + node.add_mac_address(lease.mac) |
338 | + self.patch(Omshell, 'remove') |
339 | + option_call = self.patch(celery.canvas.Signature, 'set') |
340 | + work_queue = node.work_queue |
341 | + node.delete() |
342 | + args, kwargs = option_call.call_args |
343 | + self.assertEqual(work_queue, kwargs['queue']) |
344 | |
345 | def test_delete_node_removes_multiple_host_maps(self): |
346 | lease1 = factory.make_dhcp_lease() |
347 | @@ -314,10 +322,9 @@ |
348 | node = factory.make_node(nodegroup=lease1.nodegroup) |
349 | node.add_mac_address(lease1.mac) |
350 | node.add_mac_address(lease2.mac) |
351 | - mocked_task = self.patch(node_module, "remove_dhcp_host_map") |
352 | - mocked_apply_async = self.patch(mocked_task, "apply_async") |
353 | + self.patch(Omshell, 'remove') |
354 | node.delete() |
355 | - self.assertEqual(2, mocked_apply_async.call_count) |
356 | + self.assertEqual(2, len(self.celery.tasks)) |
357 | |
358 | def test_set_random_hostname_set_hostname(self): |
359 | # Blank out enlistment_domain. |
360 | @@ -986,6 +993,31 @@ |
361 | node = factory.make_node(architecture=full_arch) |
362 | self.assertEqual((main_arch, sub_arch), node.split_arch()) |
363 | |
364 | + def test_mac_addresses_on_managed_interfaces_returns_only_managed(self): |
365 | + node = factory.make_node_with_mac_attached_to_nodegroupinterface() |
366 | + primary_cluster_interface = node.get_primary_mac().cluster_interface |
367 | + primary_cluster_interface.management = ( |
368 | + NODEGROUPINTERFACE_MANAGEMENT.DHCP) |
369 | + primary_cluster_interface.save() |
370 | + |
371 | + mac_with_no_interface = factory.make_mac_address(node=node) |
372 | + mac_with_no_interface = mac_with_no_interface # STFU linter |
373 | + unmanaged_interface = factory.make_node_group_interface( |
374 | + nodegroup=node.nodegroup, |
375 | + management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) |
376 | + |
377 | + mac_with_unmanaged_interface = factory.make_mac_address( |
378 | + node=node, cluster_interface=unmanaged_interface) |
379 | + mac_with_unmanaged_interface = mac_with_unmanaged_interface # linter |
380 | + |
381 | + observed = node.mac_addresses_on_managed_interfaces() |
382 | + self.assertItemsEqual([node.get_primary_mac()], observed) |
383 | + |
384 | + def test_mac_addresses_on_managed_interfaces_returns_empty_if_none(self): |
385 | + node = factory.make_node(mac=True) |
386 | + observed = node.mac_addresses_on_managed_interfaces() |
387 | + self.assertItemsEqual([], observed) |
388 | + |
389 | |
390 | class NodeRoutersTest(MAASServerTestCase): |
391 | |
392 | @@ -1274,13 +1306,69 @@ |
393 | self.celery.tasks[0]['kwargs']['mac_address'], |
394 | )) |
395 | |
396 | + def test_start_nodes_issues_dhcp_host_task(self): |
397 | + user = factory.make_user() |
398 | + node = factory.make_node_with_mac_attached_to_nodegroupinterface( |
399 | + owner=user, power_type='ether_wake') |
400 | + omshell_create = self.patch(Omshell, 'create') |
401 | + output = Node.objects.start_nodes([node.system_id], user) |
402 | + |
403 | + # Check that the single node was started, and that the tasks |
404 | + # issued are all there and in the right order. |
405 | + self.assertItemsEqual([node], output) |
406 | + self.assertEqual( |
407 | + [ |
408 | + 'provisioningserver.tasks.add_new_dhcp_host_map', |
409 | + 'provisioningserver.tasks.power_on', |
410 | + ], |
411 | + [ |
412 | + task['task'].name for task in self.celery.tasks |
413 | + ]) |
414 | + |
415 | + # Also check that Omshell.create() was called with the right |
416 | + # parameters. |
417 | + mac = node.get_primary_mac() |
418 | + [ip] = mac.ip_addresses.all() |
419 | + expected_ip = ip.ip |
420 | + expected_mac = mac.mac_address |
421 | + args, kwargs = omshell_create.call_args |
422 | + self.assertEqual((expected_ip, expected_mac), args) |
423 | + |
424 | + def test_start_nodes_clears_existing_dynamic_maps(self): |
425 | + user = factory.make_user() |
426 | + node = factory.make_node_with_mac_attached_to_nodegroupinterface( |
427 | + owner=user, power_type='ether_wake') |
428 | + factory.make_dhcp_lease( |
429 | + nodegroup=node.nodegroup, mac=node.get_primary_mac().mac_address) |
430 | + self.patch(Omshell, 'create') |
431 | + self.patch(Omshell, 'remove') |
432 | + output = Node.objects.start_nodes([node.system_id], user) |
433 | + |
434 | + # Check that the single node was started, and that the tasks |
435 | + # issued are all there and in the right order. |
436 | + self.assertItemsEqual([node], output) |
437 | + self.assertEqual( |
438 | + [ |
439 | + 'provisioningserver.tasks.remove_dhcp_host_map', |
440 | + 'provisioningserver.tasks.add_new_dhcp_host_map', |
441 | + 'provisioningserver.tasks.power_on', |
442 | + ], |
443 | + [ |
444 | + task['task'].name for task in self.celery.tasks |
445 | + ]) |
446 | + |
447 | def test_start_nodes_task_routed_to_nodegroup_worker(self): |
448 | + # Startup jobs are chained, so the normal way of inspecting a |
449 | + # task directly for routing options doesn't work here, because |
450 | + # in EAGER mode that we use in the test suite, the options are |
451 | + # not passed all the way down to the tasks. Instead, we patch |
452 | + # some celery code to inspect the options that were passed. |
453 | user = factory.make_user() |
454 | node, mac = self.make_node_with_mac( |
455 | user, power_type='ether_wake') |
456 | - task = self.patch(node_module, 'power_on') |
457 | + option_call = self.patch(celery.canvas.Signature, 'set') |
458 | Node.objects.start_nodes([node.system_id], user) |
459 | - args, kwargs = task.apply_async.call_args |
460 | + args, kwargs = option_call.call_args |
461 | self.assertEqual(node.work_queue, kwargs['queue']) |
462 | |
463 | def test_start_nodes_does_not_attempt_power_task_if_no_power_type(self): |
464 | @@ -1401,3 +1489,67 @@ |
465 | node = factory.make_node(netboot=True) |
466 | node.set_netboot(False) |
467 | self.assertFalse(node.netboot) |
468 | + |
469 | + def test_claim_static_ips_ignores_unmanaged_macs(self): |
470 | + node = factory.make_node() |
471 | + for _ in range(0, 10): |
472 | + factory.make_mac_address(node=node) |
473 | + observed = node.claim_static_ips() |
474 | + self.assertItemsEqual([], observed) |
475 | + |
476 | + def test_claim_static_ips_creates_task_for_each_managed_mac(self): |
477 | + nodegroup = factory.make_node_group() |
478 | + node = factory.make_node(nodegroup=nodegroup) |
479 | + |
480 | + # Add a bunch of MACs attached to managed interfaces. |
481 | + for _ in range(0, 10): |
482 | + low_ip, high_ip = factory.make_ip_range() |
483 | + ngi = factory.make_node_group_interface( |
484 | + nodegroup, static_ip_range_low=low_ip.ipv4().format(), |
485 | + static_ip_range_high=high_ip.ipv4().format(), |
486 | + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) |
487 | + mac = factory.make_mac_address(node=node) |
488 | + mac.cluster_interface = ngi |
489 | + mac.save() |
490 | + |
491 | + observed = node.claim_static_ips() |
492 | + expected = ['provisioningserver.tasks.add_new_dhcp_host_map'] * 10 |
493 | + |
494 | + self.assertEqual( |
495 | + expected, |
496 | + [task.task for task in observed] |
497 | + ) |
498 | + |
499 | + def test_claim_static_ips_creates_deletion_task(self): |
500 | + # If dhcp leases exist before creating a static IP, the code |
501 | + # should attempt to remove their host maps. |
502 | + node = factory.make_node_with_mac_attached_to_nodegroupinterface() |
503 | + factory.make_dhcp_lease( |
504 | + nodegroup=node.nodegroup, mac=node.get_primary_mac().mac_address) |
505 | + |
506 | + observed = node.claim_static_ips() |
507 | + |
508 | + self.assertEqual( |
509 | + [ |
510 | + 'celery.chain', |
511 | + 'provisioningserver.tasks.add_new_dhcp_host_map', |
512 | + ], |
513 | + [ |
514 | + task.task for task in observed |
515 | + ]) |
516 | + |
517 | + # Probe the chain to make sure it has the deletion task. |
518 | + self.assertEqual( |
519 | + 'provisioningserver.tasks.remove_dhcp_host_map', |
520 | + observed[0].tasks[0].task, |
521 | + ) |
522 | + |
523 | + def test_claim_static_ips_ignores_interface_with_no_static_range(self): |
524 | + node = factory.make_node_with_mac_attached_to_nodegroupinterface() |
525 | + ngi = node.get_primary_mac().cluster_interface |
526 | + ngi.static_ip_range_low = None |
527 | + ngi.static_ip_range_high = None |
528 | + ngi.save() |
529 | + |
530 | + observed = node.claim_static_ips() |
531 | + self.assertItemsEqual([], observed) |
532 | |
533 | === modified file 'src/maasserver/testing/factory.py' |
534 | --- src/maasserver/testing/factory.py 2014-06-11 04:38:41 +0000 |
535 | +++ src/maasserver/testing/factory.py 2014-06-11 05:52:49 +0000 |
536 | @@ -354,18 +354,37 @@ |
537 | """Generate a random MAC address, in the form of a MAC object.""" |
538 | return MAC(self.getRandomMACAddress()) |
539 | |
540 | - def make_mac_address(self, address=None, node=None, networks=None): |
541 | + def make_mac_address(self, address=None, node=None, networks=None, |
542 | + **kwargs): |
543 | """Create a MACAddress model object.""" |
544 | if node is None: |
545 | node = self.make_node() |
546 | if address is None: |
547 | address = self.getRandomMACAddress() |
548 | - mac = MACAddress(mac_address=MAC(address), node=node) |
549 | + mac = MACAddress(mac_address=MAC(address), node=node, **kwargs) |
550 | mac.save() |
551 | if networks is not None: |
552 | mac.networks.add(*networks) |
553 | return mac |
554 | |
555 | + def make_node_with_mac_attached_to_nodegroupinterface(self, **kwargs): |
556 | + """Create a Node that has a MACAddress which has a |
557 | + NodeGroupInterface. |
558 | + |
559 | + :param **kwargs: Additional parameters to pass to make_node. |
560 | + """ |
561 | + nodegroup = self.make_node_group() |
562 | + node = self.make_node(mac=True, nodegroup=nodegroup, **kwargs) |
563 | + low_ip, high_ip = factory.make_ip_range() |
564 | + ngi = self.make_node_group_interface( |
565 | + nodegroup, static_ip_range_low=low_ip.ipv4().format(), |
566 | + static_ip_range_high=high_ip.ipv4().format(), |
567 | + management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) |
568 | + mac = node.get_primary_mac() |
569 | + mac.cluster_interface = ngi |
570 | + mac.save() |
571 | + return node |
572 | + |
573 | def make_staticipaddress(self, ip=None, alloc_type=IPADDRESS_TYPE.AUTO, |
574 | mac=None): |
575 | """Create and return a StaticIPAddress model object. |