Merge lp:~gmb/maas/1.7-backport-bug-1330765 into lp:maas/1.7
- 1.7-backport-bug-1330765
- Merge into 1.7
Proposed by
Graham Binns
Status: | Rejected | ||||
---|---|---|---|---|---|
Rejected by: | Graham Binns | ||||
Proposed branch: | lp:~gmb/maas/1.7-backport-bug-1330765 | ||||
Merge into: | lp:maas/1.7 | ||||
Diff against target: |
2061 lines (+708/-804) 6 files modified
src/maasserver/api/nodes.py (+12/-14) src/maasserver/api/tests/test_node.py (+12/-17) src/maasserver/models/node.py (+132/-147) src/maasserver/models/tests/test_node.py (+516/-576) src/maasserver/node_action.py (+3/-6) src/maasserver/tests/test_node_action.py (+33/-44) |
||||
To merge this branch: | bzr merge lp:~gmb/maas/1.7-backport-bug-1330765 | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Christian Reis (community) | Disapprove | ||
Gavin Panella (community) | Approve | ||
Review via email: mp+239175@code.launchpad.net |
Commit message
Backport of all the fixes for bug 1330765 to 1.7. Also backport the fix for bug 1382108.
Description of the change
To post a comment you must log in.
Revision history for this message
Christian Reis (kiko) wrote : | # |
This looks like too much for 1.7.
review:
Disapprove
Revision history for this message
Graham Binns (gmb) wrote : | # |
On 22 October 2014 11:59, Christian Reis <email address hidden> wrote:
> This looks like too much for 1.7.
Patch size doesn't equate to risk. There's a huge chunk of removals
and mechanical changes here. Moreover, this fixes some fairly
significant bugs. Is it really worth the risk of *not* landing it?
Revision history for this message
Christian Reis (kiko) wrote : | # |
Yes, because it is too big.
Revision history for this message
Christian Reis (kiko) wrote : | # |
Waitin' for a simpler fix for bug 1382108 as we discussed today..
review:
Disapprove
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'src/maasserver/api/nodes.py' |
2 | --- src/maasserver/api/nodes.py 2014-10-20 22:01:33 +0000 |
3 | +++ src/maasserver/api/nodes.py 2014-10-22 10:46:23 +0000 |
4 | @@ -259,12 +259,11 @@ |
5 | node = Node.objects.get_node_or_404( |
6 | system_id=system_id, user=request.user, |
7 | perm=NODE_PERMISSION.EDIT) |
8 | - nodes_stopped = Node.objects.stop_nodes( |
9 | - [node.system_id], request.user, stop_mode=stop_mode) |
10 | - if len(nodes_stopped) == 0: |
11 | + power_action_sent = node.stop(request.user, stop_mode=stop_mode) |
12 | + if power_action_sent: |
13 | + return node |
14 | + else: |
15 | return None |
16 | - else: |
17 | - return node |
18 | |
19 | @operation(idempotent=False) |
20 | def start(self, request, system_id): |
21 | @@ -285,12 +284,14 @@ |
22 | user_data = request.POST.get('user_data', None) |
23 | series = request.POST.get('distro_series', None) |
24 | license_key = request.POST.get('license_key', None) |
25 | + |
26 | + node = Node.objects.get_node_or_404( |
27 | + system_id=system_id, user=request.user, |
28 | + perm=NODE_PERMISSION.EDIT) |
29 | + |
30 | if user_data is not None: |
31 | user_data = b64decode(user_data) |
32 | if series is not None or license_key is not None: |
33 | - node = Node.objects.get_node_or_404( |
34 | - system_id=system_id, user=request.user, |
35 | - perm=NODE_PERMISSION.EDIT) |
36 | Form = get_node_edit_form(request.user) |
37 | form = Form(instance=node) |
38 | if series is not None: |
39 | @@ -301,19 +302,16 @@ |
40 | form.save() |
41 | else: |
42 | raise ValidationError(form.errors) |
43 | + |
44 | try: |
45 | - nodes = Node.objects.start_nodes( |
46 | - [system_id], request.user, user_data=user_data) |
47 | + node.start(request.user, user_data=user_data) |
48 | except StaticIPAddressExhaustion: |
49 | # The API response should contain error text with the |
50 | # system_id in it, as that is the primary API key to a node. |
51 | raise StaticIPAddressExhaustion( |
52 | "%s: Unable to allocate static IP due to address" |
53 | " exhaustion." % system_id) |
54 | - if len(nodes) == 0: |
55 | - raise PermissionDenied( |
56 | - "You are not allowed to start up this node.") |
57 | - return nodes[0] |
58 | + return node |
59 | |
60 | @operation(idempotent=False) |
61 | def release(self, request, system_id): |
62 | |
63 | === modified file 'src/maasserver/api/tests/test_node.py' |
64 | --- src/maasserver/api/tests/test_node.py 2014-09-29 10:26:31 +0000 |
65 | +++ src/maasserver/api/tests/test_node.py 2014-10-22 10:46:23 +0000 |
66 | @@ -215,28 +215,27 @@ |
67 | |
68 | def test_POST_stop_checks_permission(self): |
69 | node = factory.make_Node() |
70 | - stop_nodes = self.patch_autospec(Node.objects, "stop_nodes") |
71 | + node_stop = self.patch(node, 'stop') |
72 | response = self.client.post(self.get_node_uri(node), {'op': 'stop'}) |
73 | self.assertEqual(httplib.FORBIDDEN, response.status_code) |
74 | - self.assertThat(stop_nodes, MockNotCalled()) |
75 | + self.assertThat(node_stop, MockNotCalled()) |
76 | |
77 | def test_POST_stop_returns_nothing_if_node_was_not_stopped(self): |
78 | # The node may not be stopped by stop_nodes because, for example, its |
79 | # power type does not support it. In this case the node is not |
80 | # returned to the caller. |
81 | node = factory.make_Node(owner=self.logged_in_user) |
82 | - stop_nodes = self.patch_autospec(Node.objects, "stop_nodes") |
83 | - stop_nodes.return_value = [] |
84 | + node_stop = self.patch(node_module.Node, 'stop') |
85 | + node_stop.return_value = False |
86 | response = self.client.post(self.get_node_uri(node), {'op': 'stop'}) |
87 | self.assertEqual(httplib.OK, response.status_code) |
88 | self.assertIsNone(json.loads(response.content)) |
89 | - self.assertThat(stop_nodes, MockCalledOnceWith( |
90 | - [node.system_id], ANY, stop_mode=ANY)) |
91 | + self.assertThat(node_stop, MockCalledOnceWith( |
92 | + ANY, stop_mode=ANY)) |
93 | |
94 | def test_POST_stop_returns_node(self): |
95 | node = factory.make_Node(owner=self.logged_in_user) |
96 | - stop_nodes = self.patch_autospec(Node.objects, "stop_nodes") |
97 | - stop_nodes.return_value = [node] |
98 | + self.patch(node_module.Node, 'stop').return_value = True |
99 | response = self.client.post(self.get_node_uri(node), {'op': 'stop'}) |
100 | self.assertEqual(httplib.OK, response.status_code) |
101 | self.assertEqual( |
102 | @@ -246,23 +245,20 @@ |
103 | node = factory.make_Node( |
104 | owner=self.logged_in_user, mac=True, |
105 | power_type='ether_wake') |
106 | - stop_nodes = self.patch_autospec(Node.objects, "stop_nodes") |
107 | - stop_nodes.return_value = [node] |
108 | + self.patch(node, 'stop') |
109 | self.client.post(self.get_node_uri(node), {'op': 'stop'}) |
110 | response = self.client.post(self.get_node_uri(node), {'op': 'stop'}) |
111 | self.assertEqual(httplib.OK, response.status_code) |
112 | |
113 | def test_POST_stop_stops_nodes(self): |
114 | node = factory.make_Node(owner=self.logged_in_user) |
115 | - stop_nodes = self.patch_autospec(Node.objects, "stop_nodes") |
116 | - stop_nodes.return_value = [node] |
117 | + node_stop = self.patch(node_module.Node, 'stop') |
118 | stop_mode = factory.make_name('stop_mode') |
119 | self.client.post( |
120 | self.get_node_uri(node), {'op': 'stop', 'stop_mode': stop_mode}) |
121 | self.assertThat( |
122 | - stop_nodes, |
123 | - MockCalledOnceWith( |
124 | - [node.system_id], self.logged_in_user, stop_mode=stop_mode)) |
125 | + node_stop, |
126 | + MockCalledOnceWith(self.logged_in_user, stop_mode=stop_mode)) |
127 | |
128 | def test_POST_start_checks_permission(self): |
129 | node = factory.make_Node() |
130 | @@ -1278,8 +1274,7 @@ |
131 | def test_abort_operation_changes_state(self): |
132 | node = factory.make_Node( |
133 | status=NODE_STATUS.DISK_ERASING, owner=self.logged_in_user) |
134 | - stop_nodes = self.patch_autospec(Node.objects, "stop_nodes") |
135 | - stop_nodes.return_value = [node] |
136 | + self.patch_autospec(node_module.Node, "stop") |
137 | response = self.client.post( |
138 | self.get_node_uri(node), {'op': 'abort_operation'}) |
139 | self.assertEqual(httplib.OK, response.status_code) |
140 | |
141 | === modified file 'src/maasserver/models/node.py' |
142 | --- src/maasserver/models/node.py 2014-10-15 20:00:42 +0000 |
143 | +++ src/maasserver/models/node.py 2014-10-22 10:46:23 +0000 |
144 | @@ -325,140 +325,6 @@ |
145 | available_nodes = self.get_nodes(for_user, NODE_PERMISSION.VIEW) |
146 | return available_nodes.filter(status=NODE_STATUS.READY) |
147 | |
148 | - def stop_nodes(self, ids, by_user, stop_mode='hard'): |
149 | - """Request on given user's behalf that the given nodes be shut down. |
150 | - |
151 | - Shutdown is only requested for nodes that the user has ownership |
152 | - privileges for; any other nodes in the request are ignored. |
153 | - |
154 | - :param ids: The `system_id` values for nodes to be shut down. |
155 | - :type ids: Sequence |
156 | - :param by_user: Requesting user. |
157 | - :type by_user: User_ |
158 | - :param stop_mode: Power off mode - usually 'soft' or 'hard'. |
159 | - :type stop_mode: unicode |
160 | - :return: Those Nodes for which shutdown was actually requested. |
161 | - :rtype: list |
162 | - """ |
163 | - # Obtain node model objects for each node specified. |
164 | - nodes = self.get_nodes(by_user, NODE_PERMISSION.EDIT, ids=ids) |
165 | - |
166 | - # Helper function to whittle the list of nodes down to those that we |
167 | - # can actually stop, and keep hold of their power control info. |
168 | - def gen_power_info(nodes): |
169 | - for node in nodes: |
170 | - power_info = node.get_effective_power_info() |
171 | - if power_info.can_be_stopped: |
172 | - # Smuggle in a hint about how to power-off the node. |
173 | - power_info.power_parameters['power_off_mode'] = stop_mode |
174 | - yield node, power_info |
175 | - |
176 | - # Create info that we can pass into the reactor (no model objects). |
177 | - nodes_stop_info = list( |
178 | - (node.system_id, node.hostname, node.nodegroup.uuid, power_info) |
179 | - for node, power_info in gen_power_info(nodes)) |
180 | - powered_systems = [ |
181 | - system_id for system_id, _, _, _ in nodes_stop_info] |
182 | - |
183 | - # Request that these nodes be powered off and wait for the |
184 | - # commands to return or fail. |
185 | - deferreds = power_off_nodes(nodes_stop_info).viewvalues() |
186 | - wait_for_power_commands(deferreds) |
187 | - |
188 | - # Return a list of those nodes that we've sent power commands for. |
189 | - return list( |
190 | - node for node in nodes if node.system_id in powered_systems) |
191 | - |
192 | - def start_nodes(self, ids, by_user, user_data=None): |
193 | - """Request on given user's behalf that the given nodes be started up. |
194 | - |
195 | - Power-on is only requested for nodes that the user has ownership |
196 | - privileges for; any other nodes in the request are ignored. |
197 | - |
198 | - Nodes are also ignored if they don't have a valid power type |
199 | - configured. |
200 | - |
201 | - :param ids: The `system_id` values for nodes to be started. |
202 | - :type ids: Sequence |
203 | - :param by_user: Requesting user. |
204 | - :type by_user: User_ |
205 | - :param user_data: Optional blob of user-data to be made available to |
206 | - the nodes through the metadata service. If not given, any |
207 | - previous user data is used. |
208 | - :type user_data: unicode |
209 | - :return: Those Nodes for which power-on was actually requested. |
210 | - :rtype: list |
211 | - |
212 | - :raises MultipleFailures: When there are failures originating from a |
213 | - remote process. There could be one or more failures -- it's not |
214 | - strictly *multiple* -- but they do all originate from comms with |
215 | - remote processes. |
216 | - :raises: `StaticIPAddressExhaustion` if there are not enough IP |
217 | - addresses left in the static range.. |
218 | - """ |
219 | - # Avoid circular imports. |
220 | - from metadataserver.models import NodeUserData |
221 | - |
222 | - # Obtain node model objects for each node specified. |
223 | - nodes = self.get_nodes(by_user, NODE_PERMISSION.EDIT, ids=ids) |
224 | - |
225 | - # Record the same user data for all nodes we've been *requested* to |
226 | - # start, regardless of whether or not we actually can; the user may |
227 | - # choose to manually start them. |
228 | - NodeUserData.objects.bulk_set_user_data(nodes, user_data) |
229 | - |
230 | - # Claim static IP addresses for all nodes we've been *requested* to |
231 | - # start, such that they're recorded in the database. This results in a |
232 | - # mapping of nodegroups to (ips, macs). |
233 | - static_mappings = defaultdict(dict) |
234 | - for node in nodes: |
235 | - if node.status == NODE_STATUS.ALLOCATED: |
236 | - claims = node.claim_static_ip_addresses() |
237 | - static_mappings[node.nodegroup].update(claims) |
238 | - node.start_deployment() |
239 | - |
240 | - # XXX 2014-06-17 bigjools bug=1330765 |
241 | - # If the above fails it needs to release the static IPs back to the |
242 | - # pool. An enclosing transaction or savepoint from the caller may take |
243 | - # care of this, given that a serious problem above will result in an |
244 | - # exception. If we're being belt-n-braces though it ought to clear up |
245 | - # before returning too. As part of the robustness work coming up, it |
246 | - # also needs to inform the user. |
247 | - |
248 | - # Update host maps and wait for them so that we can report failures |
249 | - # directly to the caller. |
250 | - update_host_maps_failures = list(update_host_maps(static_mappings)) |
251 | - if len(update_host_maps_failures) != 0: |
252 | - raise MultipleFailures(*update_host_maps_failures) |
253 | - |
254 | - # Update the DNS zone with the new static IP info as necessary. |
255 | - from maasserver.dns.config import change_dns_zones |
256 | - change_dns_zones({node.nodegroup for node in nodes}) |
257 | - |
258 | - # Helper function to whittle the list of nodes down to those that we |
259 | - # can actually start, and keep hold of their power control info. |
260 | - def gen_power_info(nodes): |
261 | - for node in nodes: |
262 | - power_info = node.get_effective_power_info() |
263 | - if power_info.can_be_started: |
264 | - yield node, power_info |
265 | - |
266 | - # Create info that we can pass into the reactor (no model objects). |
267 | - nodes_start_info = list( |
268 | - (node.system_id, node.hostname, node.nodegroup.uuid, power_info) |
269 | - for node, power_info in gen_power_info(nodes)) |
270 | - powered_systems = [ |
271 | - system_id for system_id, _, _, _ in nodes_start_info] |
272 | - |
273 | - # Request that these nodes be powered off and wait for the |
274 | - # commands to return or fail. |
275 | - deferreds = power_on_nodes(nodes_start_info).viewvalues() |
276 | - wait_for_power_commands(deferreds) |
277 | - |
278 | - # Return a list of those nodes that we've sent power commands for. |
279 | - return list( |
280 | - node for node in nodes if node.system_id in powered_systems) |
281 | - |
282 | |
283 | def patch_pgarray_types(): |
284 | """Monkey-patch incompatibility with recent versions of `djorm_pgarray`. |
285 | @@ -969,11 +835,7 @@ |
286 | self.save() |
287 | transaction.commit() |
288 | try: |
289 | - # We don't check for which nodes we've started here, because |
290 | - # it's possible we can't start the node - its power type may not |
291 | - # allow us to do that. |
292 | - Node.objects.start_nodes( |
293 | - [self.system_id], user, user_data=commissioning_user_data) |
294 | + self.start(user, user_data=commissioning_user_data) |
295 | except Exception as ex: |
296 | maaslog.error( |
297 | "%s: Unable to start node: %s", |
298 | @@ -997,10 +859,7 @@ |
299 | maaslog.info( |
300 | "%s: Aborting commissioning", self.hostname) |
301 | try: |
302 | - # We don't check for which nodes we've stopped here, because |
303 | - # it's possible we can't stop the node - its power type may |
304 | - # not allow us to do that. |
305 | - Node.objects.stop_nodes([self.system_id], user) |
306 | + self.stop(user) |
307 | except Exception as ex: |
308 | maaslog.error( |
309 | "%s: Unable to shut node down: %s", |
310 | @@ -1287,8 +1146,7 @@ |
311 | self.save() |
312 | transaction.commit() |
313 | try: |
314 | - Node.objects.start_nodes( |
315 | - [self.system_id], user, user_data=disk_erase_user_data) |
316 | + self.start(user, user_data=disk_erase_user_data) |
317 | except Exception as ex: |
318 | maaslog.error( |
319 | "%s: Unable to start node: %s", |
320 | @@ -1318,7 +1176,7 @@ |
321 | maaslog.info( |
322 | "%s: Aborting disk erasing", self.hostname) |
323 | try: |
324 | - Node.objects.stop_nodes([self.system_id], user) |
325 | + self.stop(user) |
326 | except Exception as ex: |
327 | maaslog.error( |
328 | "%s: Unable to shut node down: %s", |
329 | @@ -1348,7 +1206,7 @@ |
330 | """ |
331 | maaslog.info("%s: Releasing node", self.hostname) |
332 | try: |
333 | - Node.objects.stop_nodes([self.system_id], self.owner) |
334 | + self.stop(self.owner) |
335 | except Exception as ex: |
336 | maaslog.error( |
337 | "%s: Unable to shut node down: %s", self.hostname, |
338 | @@ -1550,3 +1408,130 @@ |
339 | return self.pxe_mac |
340 | |
341 | return self.macaddress_set.first() |
342 | + |
343 | + def is_pxe_mac_on_managed_interface(self): |
344 | + pxe_mac = self.get_pxe_mac() |
345 | + if pxe_mac is not None: |
346 | + cluster_interface = pxe_mac.cluster_interface |
347 | + if cluster_interface is not None: |
348 | + return cluster_interface.is_managed |
349 | + return False |
350 | + |
351 | + def start(self, by_user, user_data=None): |
352 | + """Request on given user's behalf that the node be started up. |
353 | + |
354 | + :param by_user: Requesting user. |
355 | + :type by_user: User_ |
356 | + :param user_data: Optional blob of user-data to be made available to |
357 | + the node through the metadata service. If not given, any |
358 | + previous user data is used. |
359 | + :type user_data: unicode |
360 | + |
361 | + :raises MultipleFailures: When there are failures originating from a |
362 | + remote process. There could be one or more failures -- it's not |
363 | + strictly *multiple* -- but they do all originate from comms with |
364 | + remote processes. |
365 | + :raises: `StaticIPAddressExhaustion` if there are not enough IP |
366 | + addresses left in the static range for this node toget all |
367 | + the addresses it needs. |
368 | + """ |
369 | + # Avoid circular imports. |
370 | + from metadataserver.models import NodeUserData |
371 | + from maasserver.dns.config import change_dns_zones |
372 | + |
373 | + if not by_user.has_perm(NODE_PERMISSION.EDIT, self): |
374 | + # You can't stop a node you don't own unless you're an |
375 | + # admin, so we return early. This is consistent with the |
376 | + # behaviour of NodeManager.stop_nodes(); it may be better to |
377 | + # raise an error here. |
378 | + return |
379 | + |
380 | + # Record the user data for the node. Note that we do this |
381 | + # whether or not we can actually send power commands to the |
382 | + # node; the user may choose to start it manually. |
383 | + NodeUserData.objects.set_user_data(self, user_data) |
384 | + |
385 | + # Claim static IP addresses for the node if it's ALLOCATED. |
386 | + if self.status == NODE_STATUS.ALLOCATED: |
387 | + static_mappings = defaultdict(dict) |
388 | + claims = self.claim_static_ip_addresses() |
389 | + # If the PXE mac is on a managed interface then we can ask |
390 | + # the cluster to generate the DHCP host map(s). |
391 | + if self.is_pxe_mac_on_managed_interface(): |
392 | + static_mappings[self.nodegroup].update(claims) |
393 | + update_host_maps_failures = list( |
394 | + update_host_maps(static_mappings)) |
395 | + if len(update_host_maps_failures) != 0: |
396 | + # We've hit errors, so release any IPs we've claimed |
397 | + # and then raise the errors for the call site to |
398 | + # handle. |
399 | + StaticIPAddress.objects.deallocate_by_node(self) |
400 | + raise MultipleFailures(*update_host_maps_failures) |
401 | + |
402 | + if self.status == NODE_STATUS.ALLOCATED: |
403 | + self.start_deployment() |
404 | + |
405 | + # Update the DNS zone with the new static IP info as necessary. |
406 | + change_dns_zones(self.nodegroup) |
407 | + |
408 | + power_info = self.get_effective_power_info() |
409 | + if not power_info.can_be_started: |
410 | + # The node can't be powered on by MAAS, so return early. |
411 | + # Everything we've done up to this point is still valid; |
412 | + # this is not an error state. |
413 | + return |
414 | + |
415 | + # We need to convert the node into something that we can |
416 | + # pass into the reactor. |
417 | + start_info = ( |
418 | + self.system_id, self.hostname, self.nodegroup.uuid, power_info,) |
419 | + |
420 | + try: |
421 | + # Send the power on command to the node and wait for it to |
422 | + # return. |
423 | + deferreds = power_on_nodes([start_info]).viewvalues() |
424 | + wait_for_power_commands(deferreds) |
425 | + except: |
426 | + # If we encounter any failure here, we deallocate the static |
427 | + # IPs we claimed earlier. We don't try to handle the error; |
428 | + # that's the job of the call site. |
429 | + StaticIPAddress.objects.deallocate_by_node(self) |
430 | + raise |
431 | + |
432 | + def stop(self, by_user, stop_mode='hard'): |
433 | + """Request that the node be powered down. |
434 | + |
435 | + :param by_user: Requesting user. |
436 | + :type by_user: User_ |
437 | + :param stop_mode: Power off mode - usually 'soft' or 'hard'. |
438 | + :type stop_mode: unicode |
439 | + :raises MultipleFailures: When there are failures originating |
440 | + from the RPC power action. |
441 | + :return: True if the power action was sent to the node; False if |
442 | + it wasn't sent. If the user doesn't have permission to stop |
443 | + the node, return None. |
444 | + """ |
445 | + if not by_user.has_perm(NODE_PERMISSION.EDIT, self): |
446 | + # You can't stop a node you don't own unless you're an |
447 | + # admin, so we return early. This is consistent with the |
448 | + # behaviour of NodeManager.stop_nodes(); it may be better to |
449 | + # raise an error here. |
450 | + return |
451 | + |
452 | + power_info = self.get_effective_power_info() |
453 | + if not power_info.can_be_stopped: |
454 | + # We can't stop this node, so just return; trying to stop a |
455 | + # node we don't know how to stop isn't an error state, but |
456 | + # it's a no-op. |
457 | + return False |
458 | + |
459 | + # Smuggle in a hint about how to power-off the self. |
460 | + power_info.power_parameters['power_off_mode'] = stop_mode |
461 | + stop_info = ( |
462 | + self.system_id, self.hostname, self.nodegroup.uuid, power_info) |
463 | + |
464 | + # Request that the node be powered off and wait for the command |
465 | + # to return or fail. |
466 | + deferreds = power_off_nodes([stop_info]).viewvalues() |
467 | + wait_for_power_commands(deferreds) |
468 | + return True |
469 | |
470 | === modified file 'src/maasserver/models/tests/test_node.py' |
471 | --- src/maasserver/models/tests/test_node.py 2014-10-15 22:29:41 +0000 |
472 | +++ src/maasserver/models/tests/test_node.py 2014-10-22 10:46:23 +0000 |
473 | @@ -18,7 +18,6 @@ |
474 | datetime, |
475 | timedelta, |
476 | ) |
477 | -from itertools import izip |
478 | import random |
479 | |
480 | import crochet |
481 | @@ -84,7 +83,6 @@ |
482 | from maastesting.matchers import ( |
483 | MockAnyCall, |
484 | MockCalledOnceWith, |
485 | - MockCallsMatch, |
486 | MockNotCalled, |
487 | ) |
488 | from metadataserver.enum import RESULT_TYPE |
489 | @@ -99,7 +97,6 @@ |
490 | ) |
491 | from mock import ( |
492 | ANY, |
493 | - call, |
494 | Mock, |
495 | sentinel, |
496 | ) |
497 | @@ -112,14 +109,10 @@ |
498 | NoConnectionsAvailable, |
499 | ) |
500 | from provisioningserver.rpc.power import QUERY_POWER_TYPES |
501 | -from provisioningserver.rpc.testing import ( |
502 | - always_succeed_with, |
503 | - TwistedLoggerFixture, |
504 | - ) |
505 | +from provisioningserver.rpc.testing import always_succeed_with |
506 | from provisioningserver.utils.enum import map_enum |
507 | from testtools.matchers import ( |
508 | Equals, |
509 | - HasLength, |
510 | Is, |
511 | IsInstance, |
512 | MatchesStructure, |
513 | @@ -740,13 +733,13 @@ |
514 | owner = factory.make_User() |
515 | node = factory.make_Node( |
516 | status=NODE_STATUS.ALLOCATED, owner=owner, agent_name=agent_name) |
517 | - start_nodes = self.patch(Node.objects, "start_nodes") |
518 | + node_start = self.patch(node, 'start') |
519 | node.start_disk_erasing(owner) |
520 | - self.assertEqual( |
521 | - (owner, NODE_STATUS.DISK_ERASING, agent_name), |
522 | - (node.owner, node.status, node.agent_name)) |
523 | - self.assertThat(start_nodes, MockCalledOnceWith( |
524 | - [node.system_id], owner, user_data=ANY)) |
525 | + self.expectThat(node.owner, Equals(owner)) |
526 | + self.expectThat(node.status, Equals(NODE_STATUS.DISK_ERASING)) |
527 | + self.expectThat(node.agent_name, Equals(agent_name)) |
528 | + self.assertThat( |
529 | + node_start, MockCalledOnceWith(owner, user_data=ANY)) |
530 | |
531 | def test_abort_disk_erasing_changes_state_and_stops_node(self): |
532 | agent_name = factory.make_name('agent-name') |
533 | @@ -754,14 +747,12 @@ |
534 | node = factory.make_Node( |
535 | status=NODE_STATUS.DISK_ERASING, owner=owner, |
536 | agent_name=agent_name) |
537 | - stop_nodes = self.patch(Node.objects, "stop_nodes") |
538 | - stop_nodes.return_value = [node] |
539 | + node_stop = self.patch(node, 'stop') |
540 | node.abort_disk_erasing(owner) |
541 | self.assertEqual( |
542 | (owner, NODE_STATUS.FAILED_DISK_ERASING, agent_name), |
543 | (node.owner, node.status, node.agent_name)) |
544 | - self.assertThat(stop_nodes, MockCalledOnceWith( |
545 | - [node.system_id], owner)) |
546 | + self.assertThat(node_stop, MockCalledOnceWith(owner)) |
547 | |
548 | def test_start_disk_erasing_reverts_to_sane_state_on_error(self): |
549 | # If start_disk_erasing encounters an error when calling |
550 | @@ -769,53 +760,35 @@ |
551 | # Failures encountered in one call to start_disk_erasing() won't |
552 | # affect subsequent calls. |
553 | admin = factory.make_admin() |
554 | - nodes = [ |
555 | - factory.make_Node( |
556 | - status=NODE_STATUS.ALLOCATED, power_type="virsh") |
557 | - for _ in range(3) |
558 | - ] |
559 | + node = factory.make_Node(status=NODE_STATUS.ALLOCATED) |
560 | generate_user_data = self.patch(disk_erasing, 'generate_user_data') |
561 | - start_nodes = self.patch(Node.objects, 'start_nodes') |
562 | - start_nodes.side_effect = [ |
563 | - None, |
564 | - MultipleFailures( |
565 | - Failure(NoConnectionsAvailable())), |
566 | - None, |
567 | - ] |
568 | + node_start = self.patch(node, 'start') |
569 | + node_start.side_effect = MultipleFailures( |
570 | + Failure(NoConnectionsAvailable())), |
571 | |
572 | with transaction.atomic(): |
573 | - for node in nodes: |
574 | - try: |
575 | - node.start_disk_erasing(admin) |
576 | - except RPC_EXCEPTIONS: |
577 | - # Suppress all the expected errors coming out of |
578 | - # start_disk_erasing() because they're tested |
579 | - # eleswhere. |
580 | - pass |
581 | + try: |
582 | + node.start_disk_erasing(admin) |
583 | + except RPC_EXCEPTIONS: |
584 | + # Suppress all the expected errors coming out of |
585 | + # start_disk_erasing() because they're tested |
586 | + # eleswhere. |
587 | + pass |
588 | |
589 | - expected_calls = ( |
590 | - call( |
591 | - [node.system_id], admin, |
592 | - user_data=generate_user_data.return_value) |
593 | - for node in nodes) |
594 | self.assertThat( |
595 | - start_nodes, MockCallsMatch(*expected_calls)) |
596 | - self.assertEqual( |
597 | - [ |
598 | - NODE_STATUS.DISK_ERASING, |
599 | - NODE_STATUS.FAILED_DISK_ERASING, |
600 | - NODE_STATUS.DISK_ERASING, |
601 | - ], |
602 | - [node.status for node in nodes]) |
603 | + node_start, MockCalledOnceWith( |
604 | + admin, user_data=generate_user_data.return_value)) |
605 | + self.assertEqual(NODE_STATUS.FAILED_DISK_ERASING, node.status) |
606 | |
607 | def test_start_disk_erasing_logs_and_raises_errors_in_starting(self): |
608 | admin = factory.make_admin() |
609 | node = factory.make_Node(status=NODE_STATUS.ALLOCATED) |
610 | maaslog = self.patch(node_module, 'maaslog') |
611 | - exception = NoConnectionsAvailable(factory.make_name()) |
612 | - self.patch(Node.objects, 'start_nodes').side_effect = exception |
613 | + exception_type = factory.make_exception_type() |
614 | + exception = exception_type(factory.make_name()) |
615 | + self.patch(node, 'start').side_effect = exception |
616 | self.assertRaises( |
617 | - NoConnectionsAvailable, node.start_disk_erasing, admin) |
618 | + exception_type, node.start_disk_erasing, admin) |
619 | self.assertEqual(NODE_STATUS.FAILED_DISK_ERASING, node.status) |
620 | self.assertThat( |
621 | maaslog.error, MockCalledOnceWith( |
622 | @@ -846,51 +819,33 @@ |
623 | # Failures encountered in one call to start_disk_erasing() won't |
624 | # affect subsequent calls. |
625 | admin = factory.make_admin() |
626 | - nodes = [ |
627 | - factory.make_Node( |
628 | - status=NODE_STATUS.DISK_ERASING, power_type="virsh") |
629 | - for _ in range(3) |
630 | - ] |
631 | - stop_nodes = self.patch(Node.objects, 'stop_nodes') |
632 | - stop_nodes.return_value = [ |
633 | - [node] for node in nodes |
634 | - ] |
635 | - stop_nodes.side_effect = [ |
636 | - None, |
637 | - MultipleFailures( |
638 | - Failure(NoConnectionsAvailable())), |
639 | - None, |
640 | - ] |
641 | + node = factory.make_Node( |
642 | + status=NODE_STATUS.DISK_ERASING, power_type="virsh") |
643 | + node_stop = self.patch(node, 'stop') |
644 | + node_stop.side_effect = MultipleFailures( |
645 | + Failure(NoConnectionsAvailable())) |
646 | |
647 | with transaction.atomic(): |
648 | - for node in nodes: |
649 | - try: |
650 | - node.abort_disk_erasing(admin) |
651 | - except RPC_EXCEPTIONS: |
652 | - # Suppress all the expected errors coming out of |
653 | - # abort_disk_erasing() because they're tested |
654 | - # eleswhere. |
655 | - pass |
656 | + try: |
657 | + node.abort_disk_erasing(admin) |
658 | + except RPC_EXCEPTIONS: |
659 | + # Suppress all the expected errors coming out of |
660 | + # abort_disk_erasing() because they're tested |
661 | + # eleswhere. |
662 | + pass |
663 | |
664 | - self.assertThat( |
665 | - stop_nodes, MockCallsMatch( |
666 | - *(call([node.system_id], admin) for node in nodes))) |
667 | - self.assertEqual( |
668 | - [ |
669 | - NODE_STATUS.FAILED_DISK_ERASING, |
670 | - NODE_STATUS.DISK_ERASING, |
671 | - NODE_STATUS.FAILED_DISK_ERASING, |
672 | - ], |
673 | - [node.status for node in nodes]) |
674 | + self.assertThat(node_stop, MockCalledOnceWith(admin)) |
675 | + self.assertEqual(NODE_STATUS.DISK_ERASING, node.status) |
676 | |
677 | def test_abort_disk_erasing_logs_and_raises_errors_in_stopping(self): |
678 | admin = factory.make_admin() |
679 | node = factory.make_Node(status=NODE_STATUS.DISK_ERASING) |
680 | maaslog = self.patch(node_module, 'maaslog') |
681 | - exception = NoConnectionsAvailable(factory.make_name()) |
682 | - self.patch(Node.objects, 'stop_nodes').side_effect = exception |
683 | + exception_class = factory.make_exception_type() |
684 | + exception = exception_class(factory.make_name()) |
685 | + self.patch(node, 'stop').side_effect = exception |
686 | self.assertRaises( |
687 | - NoConnectionsAvailable, node.abort_disk_erasing, admin) |
688 | + exception_class, node.abort_disk_erasing, admin) |
689 | self.assertEqual(NODE_STATUS.DISK_ERASING, node.status) |
690 | self.assertThat( |
691 | maaslog.error, MockCalledOnceWith( |
692 | @@ -1188,13 +1143,13 @@ |
693 | self.assertEqual("", node.distro_series) |
694 | |
695 | def test_release_powers_off_node(self): |
696 | - stop_nodes = self.patch_autospec(Node.objects, "stop_nodes") |
697 | user = factory.make_User() |
698 | node = factory.make_Node( |
699 | status=NODE_STATUS.ALLOCATED, owner=user, power_type='virsh') |
700 | + node_stop = self.patch(node, 'stop') |
701 | node.release() |
702 | self.assertThat( |
703 | - stop_nodes, MockCalledOnceWith([node.system_id], user)) |
704 | + node_stop, MockCalledOnceWith(user)) |
705 | |
706 | def test_release_deallocates_static_ips(self): |
707 | deallocate = self.patch(StaticIPAddressManager, 'deallocate_by_node') |
708 | @@ -1206,6 +1161,7 @@ |
709 | self.assertThat(deallocate, MockCalledOnceWith(node)) |
710 | |
711 | def test_release_updates_dns(self): |
712 | + self.patch(node_module, 'wait_for_power_commands') |
713 | change_dns_zones = self.patch(dns_config, 'change_dns_zones') |
714 | nodegroup = factory.make_NodeGroup( |
715 | management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, |
716 | @@ -1219,9 +1175,10 @@ |
717 | def test_release_logs_and_raises_errors_in_stopping(self): |
718 | node = factory.make_Node(status=NODE_STATUS.DEPLOYED) |
719 | maaslog = self.patch(node_module, 'maaslog') |
720 | - exception = NoConnectionsAvailable(factory.make_name()) |
721 | - self.patch(Node.objects, 'stop_nodes').side_effect = exception |
722 | - self.assertRaises(NoConnectionsAvailable, node.release) |
723 | + exception_class = factory.make_exception_type() |
724 | + exception = exception_class(factory.make_name()) |
725 | + self.patch(node, 'stop').side_effect = exception |
726 | + self.assertRaises(exception_class, node.release) |
727 | self.assertEqual(NODE_STATUS.DEPLOYED, node.status) |
728 | self.assertThat( |
729 | maaslog.error, MockCalledOnceWith( |
730 | @@ -1231,45 +1188,27 @@ |
731 | def test_release_reverts_to_sane_state_on_error(self): |
732 | # If release() encounters an error when stopping the node, it |
733 | # will leave the node in its previous state (i.e. DEPLOYED). |
734 | - nodes = [ |
735 | - factory.make_Node( |
736 | - status=NODE_STATUS.DEPLOYED, power_type="virsh") |
737 | - for _ in range(3) |
738 | - ] |
739 | - stop_nodes = self.patch(Node.objects, 'stop_nodes') |
740 | - stop_nodes.return_value = [ |
741 | - [node] for node in nodes |
742 | - ] |
743 | - stop_nodes.side_effect = [ |
744 | - None, |
745 | - MultipleFailures( |
746 | - Failure(NoConnectionsAvailable())), |
747 | - None, |
748 | - ] |
749 | + node = factory.make_Node( |
750 | + status=NODE_STATUS.DEPLOYED, power_type="virsh", |
751 | + owner=factory.make_User()) |
752 | + node_stop = self.patch(node, 'stop') |
753 | + node_stop.side_effect = MultipleFailures( |
754 | + Failure(NoConnectionsAvailable())) |
755 | |
756 | with transaction.atomic(): |
757 | - for node in nodes: |
758 | - try: |
759 | - node.release() |
760 | - except RPC_EXCEPTIONS: |
761 | - # Suppress all expected errors; we test for them |
762 | - # elsewhere. |
763 | - pass |
764 | + try: |
765 | + node.release() |
766 | + except RPC_EXCEPTIONS: |
767 | + # Suppress all expected errors; we test for them |
768 | + # elsewhere. |
769 | + pass |
770 | |
771 | - self.assertThat( |
772 | - stop_nodes, MockCallsMatch( |
773 | - *(call([node.system_id], None) for node in nodes))) |
774 | - self.assertEqual( |
775 | - [ |
776 | - NODE_STATUS.RELEASING, |
777 | - NODE_STATUS.DEPLOYED, |
778 | - NODE_STATUS.RELEASING, |
779 | - ], |
780 | - [node.status for node in nodes]) |
781 | + self.assertThat(node_stop, MockCalledOnceWith(node.owner)) |
782 | + self.assertEqual(NODE_STATUS.DEPLOYED, node.status) |
783 | |
784 | def test_release_commits_after_status_change(self): |
785 | node = factory.make_Node(status=NODE_STATUS.DEPLOYED) |
786 | - self.patch(Node.objects, 'stop_nodes') |
787 | + self.patch(node, 'stop') |
788 | commit = self.patch(transaction, 'commit') |
789 | node.release() |
790 | self.assertThat(commit, MockCalledOnceWith()) |
791 | @@ -1337,8 +1276,7 @@ |
792 | def test_start_commissioning_changes_status_and_starts_node(self): |
793 | node = factory.make_Node( |
794 | status=NODE_STATUS.NEW, power_type='ether_wake') |
795 | - start_nodes = self.patch(Node.objects, "start_nodes") |
796 | - start_nodes.return_value = [node] |
797 | + node_start = self.patch(node, 'start') |
798 | factory.make_MACAddress(node=node) |
799 | admin = factory.make_admin() |
800 | node.start_commissioning(admin) |
801 | @@ -1347,21 +1285,20 @@ |
802 | 'status': NODE_STATUS.COMMISSIONING, |
803 | } |
804 | self.assertAttributes(node, expected_attrs) |
805 | - self.assertThat(start_nodes, MockCalledOnceWith( |
806 | - [node.system_id], admin, user_data=ANY)) |
807 | + self.assertThat(node_start, MockCalledOnceWith( |
808 | + admin, user_data=ANY)) |
809 | |
810 | def test_start_commissioning_sets_user_data(self): |
811 | - start_nodes = self.patch(Node.objects, "start_nodes") |
812 | - |
813 | node = factory.make_Node(status=NODE_STATUS.NEW) |
814 | + node_start = self.patch(node, 'start') |
815 | user_data = factory.make_string().encode('ascii') |
816 | generate_user_data = self.patch( |
817 | commissioning, 'generate_user_data') |
818 | generate_user_data.return_value = user_data |
819 | admin = factory.make_admin() |
820 | node.start_commissioning(admin) |
821 | - self.assertThat(start_nodes, MockCalledOnceWith( |
822 | - [node.system_id], admin, user_data=user_data)) |
823 | + self.assertThat(node_start, MockCalledOnceWith( |
824 | + admin, user_data=user_data)) |
825 | |
826 | def test_start_commissioning_clears_node_commissioning_results(self): |
827 | node = factory.make_Node(status=NODE_STATUS.NEW) |
828 | @@ -1391,49 +1328,32 @@ |
829 | # start the node, it will revert the node to its previous |
830 | # status. |
831 | admin = factory.make_admin() |
832 | - nodes = [ |
833 | - factory.make_Node(status=NODE_STATUS.NEW, power_type="ether_wake") |
834 | - for _ in range(3) |
835 | - ] |
836 | + node = factory.make_Node(status=NODE_STATUS.NEW) |
837 | generate_user_data = self.patch(commissioning, 'generate_user_data') |
838 | - start_nodes = self.patch(Node.objects, 'start_nodes') |
839 | - start_nodes.side_effect = [ |
840 | - None, |
841 | - MultipleFailures( |
842 | - Failure(NoConnectionsAvailable())), |
843 | - None, |
844 | - ] |
845 | + node_start = self.patch(node, 'start') |
846 | + node_start.side_effect = MultipleFailures( |
847 | + Failure(NoConnectionsAvailable())) |
848 | |
849 | with transaction.atomic(): |
850 | - for node in nodes: |
851 | - try: |
852 | - node.start_commissioning(admin) |
853 | - except RPC_EXCEPTIONS: |
854 | - # Suppress all expected errors; we test for them |
855 | - # elsewhere. |
856 | - pass |
857 | + try: |
858 | + node.start_commissioning(admin) |
859 | + except RPC_EXCEPTIONS: |
860 | + # Suppress all expected errors; we test for them |
861 | + # elsewhere. |
862 | + pass |
863 | |
864 | - expected_calls = ( |
865 | - call( |
866 | - [node.system_id], admin, |
867 | - user_data=generate_user_data.return_value) |
868 | - for node in nodes) |
869 | self.assertThat( |
870 | - start_nodes, MockCallsMatch(*expected_calls)) |
871 | - self.assertEqual( |
872 | - [ |
873 | - NODE_STATUS.COMMISSIONING, |
874 | - NODE_STATUS.NEW, |
875 | - NODE_STATUS.COMMISSIONING |
876 | - ], |
877 | - [node.status for node in nodes]) |
878 | + node_start, |
879 | + MockCalledOnceWith( |
880 | + admin, user_data=generate_user_data.return_value)) |
881 | + self.assertEqual(NODE_STATUS.NEW, node.status) |
882 | |
883 | def test_start_commissioning_logs_and_raises_errors_in_starting(self): |
884 | admin = factory.make_admin() |
885 | node = factory.make_Node(status=NODE_STATUS.NEW) |
886 | maaslog = self.patch(node_module, 'maaslog') |
887 | exception = NoConnectionsAvailable(factory.make_name()) |
888 | - self.patch(Node.objects, 'start_nodes').side_effect = exception |
889 | + self.patch(node, 'start').side_effect = exception |
890 | self.assertRaises( |
891 | NoConnectionsAvailable, node.start_commissioning, admin) |
892 | self.assertEqual(NODE_STATUS.NEW, node.status) |
893 | @@ -1447,50 +1367,32 @@ |
894 | # node, it will revert the node to the state it was in before |
895 | # abort_commissioning() was called. |
896 | admin = factory.make_admin() |
897 | - nodes = [ |
898 | - factory.make_Node( |
899 | - status=NODE_STATUS.COMMISSIONING, power_type="virsh") |
900 | - for _ in range(3) |
901 | - ] |
902 | - stop_nodes = self.patch(Node.objects, 'stop_nodes') |
903 | - stop_nodes.return_value = [ |
904 | - [node] for node in nodes |
905 | - ] |
906 | - stop_nodes.side_effect = [ |
907 | - None, |
908 | - MultipleFailures( |
909 | - Failure(NoConnectionsAvailable())), |
910 | - None, |
911 | - ] |
912 | + node = factory.make_Node( |
913 | + status=NODE_STATUS.COMMISSIONING, power_type="virsh") |
914 | + node_stop = self.patch(node, 'stop') |
915 | + node_stop.side_effect = MultipleFailures( |
916 | + Failure(NoConnectionsAvailable())) |
917 | |
918 | with transaction.atomic(): |
919 | - for node in nodes: |
920 | - try: |
921 | - node.abort_commissioning(admin) |
922 | - except RPC_EXCEPTIONS: |
923 | - # Suppress all expected errors; we test for them |
924 | - # elsewhere. |
925 | - pass |
926 | + try: |
927 | + node.abort_commissioning(admin) |
928 | + except RPC_EXCEPTIONS: |
929 | + # Suppress all expected errors; we test for them |
930 | + # elsewhere. |
931 | + pass |
932 | |
933 | - self.assertThat( |
934 | - stop_nodes, MockCallsMatch( |
935 | - *(call([node.system_id], admin) for node in nodes))) |
936 | - self.assertEqual( |
937 | - [ |
938 | - NODE_STATUS.NEW, |
939 | - NODE_STATUS.COMMISSIONING, |
940 | - NODE_STATUS.NEW, |
941 | - ], |
942 | - [node.status for node in nodes]) |
943 | + self.assertThat(node_stop, MockCalledOnceWith(admin)) |
944 | + self.assertEqual(NODE_STATUS.COMMISSIONING, node.status) |
945 | |
946 | def test_abort_commissioning_logs_and_raises_errors_in_stopping(self): |
947 | admin = factory.make_admin() |
948 | node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) |
949 | maaslog = self.patch(node_module, 'maaslog') |
950 | - exception = NoConnectionsAvailable(factory.make_name()) |
951 | - self.patch(Node.objects, 'stop_nodes').side_effect = exception |
952 | + exception_class = factory.make_exception_type() |
953 | + exception = exception_class(factory.make_name()) |
954 | + self.patch(node, 'stop').side_effect = exception |
955 | self.assertRaises( |
956 | - NoConnectionsAvailable, node.abort_commissioning, admin) |
957 | + exception_class, node.abort_commissioning, admin) |
958 | self.assertEqual(NODE_STATUS.COMMISSIONING, node.status) |
959 | self.assertThat( |
960 | maaslog.error, MockCalledOnceWith( |
961 | @@ -1502,16 +1404,14 @@ |
962 | status=NODE_STATUS.COMMISSIONING, power_type='virsh') |
963 | admin = factory.make_admin() |
964 | |
965 | - stop_nodes = self.patch_autospec(Node.objects, "stop_nodes") |
966 | - stop_nodes.return_value = [node] |
967 | + node_stop = self.patch(node, 'stop') |
968 | |
969 | node.abort_commissioning(admin) |
970 | expected_attrs = { |
971 | 'status': NODE_STATUS.NEW, |
972 | } |
973 | self.assertAttributes(node, expected_attrs) |
974 | - self.assertThat( |
975 | - stop_nodes, MockCalledOnceWith([node.system_id], admin)) |
976 | + self.assertThat(node_stop, MockCalledOnceWith(admin)) |
977 | |
978 | def test_abort_commisssioning_errors_if_node_is_not_commissioning(self): |
979 | unaccepted_statuses = set(map_enum(NODE_STATUS).values()) |
980 | @@ -1834,6 +1734,28 @@ |
981 | self.assertEqual(node.macaddress_set.first(), node.get_pxe_mac()) |
982 | |
983 | |
984 | +class TestNode_pxe_mac_on_managed_interface(MAASServerTestCase): |
985 | + |
986 | + def test__returns_true_if_managed(self): |
987 | + node = factory.make_node_with_mac_attached_to_nodegroupinterface() |
988 | + self.assertTrue(node.is_pxe_mac_on_managed_interface()) |
989 | + |
990 | + def test__returns_false_if_no_pxe_mac(self): |
991 | + node = factory.make_Node() |
992 | + self.assertFalse(node.is_pxe_mac_on_managed_interface()) |
993 | + |
994 | + def test__returns_false_if_no_attached_cluster_interface(self): |
995 | + node = factory.make_Node() |
996 | + node.pxe_mac = factory.make_MACAddress(node=node) |
997 | + node.save() |
998 | + self.assertFalse(node.is_pxe_mac_on_managed_interface()) |
999 | + |
1000 | + def test__returns_false_if_cluster_interface_unmanaged(self): |
1001 | + node = factory.make_node_with_mac_attached_to_nodegroupinterface( |
1002 | + management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) |
1003 | + self.assertFalse(node.is_pxe_mac_on_managed_interface()) |
1004 | + |
1005 | + |
1006 | class NodeRoutersTest(MAASServerTestCase): |
1007 | |
1008 | def test_routers_stores_mac_address(self): |
1009 | @@ -2101,375 +2023,6 @@ |
1010 | self.assertThat(erase_mock, MockNotCalled()) |
1011 | |
1012 | |
1013 | -class NodeManagerTest_StartNodes(MAASServerTestCase): |
1014 | - |
1015 | - def setUp(self): |
1016 | - super(NodeManagerTest_StartNodes, self).setUp() |
1017 | - self.useFixture(RegionEventLoopFixture("rpc")) |
1018 | - self.useFixture(RunningEventLoopFixture()) |
1019 | - self.rpc_fixture = self.useFixture(MockLiveRegionToClusterRPCFixture()) |
1020 | - |
1021 | - def prepare_rpc_to_cluster(self, nodegroup): |
1022 | - protocol = self.rpc_fixture.makeCluster( |
1023 | - nodegroup, cluster_module.CreateHostMaps, cluster_module.PowerOn, |
1024 | - cluster_module.StartMonitors) |
1025 | - protocol.CreateHostMaps.side_effect = always_succeed_with({}) |
1026 | - protocol.StartMonitors.side_effect = always_succeed_with({}) |
1027 | - protocol.PowerOn.side_effect = always_succeed_with({}) |
1028 | - return protocol |
1029 | - |
1030 | - def make_acquired_nodes_with_macs(self, user, nodegroup=None, count=3): |
1031 | - nodes = [] |
1032 | - for _ in xrange(count): |
1033 | - node = factory.make_node_with_mac_attached_to_nodegroupinterface( |
1034 | - nodegroup=nodegroup, status=NODE_STATUS.READY) |
1035 | - self.prepare_rpc_to_cluster(node.nodegroup) |
1036 | - node.acquire(user) |
1037 | - nodes.append(node) |
1038 | - return nodes |
1039 | - |
1040 | - def test__sets_user_data(self): |
1041 | - user = factory.make_User() |
1042 | - nodegroup = factory.make_NodeGroup() |
1043 | - self.prepare_rpc_to_cluster(nodegroup) |
1044 | - nodes = self.make_acquired_nodes_with_macs(user, nodegroup) |
1045 | - user_data = factory.make_bytes() |
1046 | - |
1047 | - with TwistedLoggerFixture() as twisted_log: |
1048 | - Node.objects.start_nodes( |
1049 | - list(node.system_id for node in nodes), |
1050 | - user, user_data=user_data) |
1051 | - |
1052 | - # All three nodes have been given the same user data. |
1053 | - nuds = NodeUserData.objects.filter( |
1054 | - node_id__in=(node.id for node in nodes)) |
1055 | - self.assertEqual({user_data}, {nud.data for nud in nuds}) |
1056 | - # No complaints are made to the Twisted log. |
1057 | - self.assertFalse(twisted_log.containsError(), twisted_log.output) |
1058 | - |
1059 | - def test__resets_user_data(self): |
1060 | - user = factory.make_User() |
1061 | - nodegroup = factory.make_NodeGroup() |
1062 | - self.prepare_rpc_to_cluster(nodegroup) |
1063 | - nodes = self.make_acquired_nodes_with_macs(user, nodegroup) |
1064 | - |
1065 | - with TwistedLoggerFixture() as twisted_log: |
1066 | - Node.objects.start_nodes( |
1067 | - list(node.system_id for node in nodes), |
1068 | - user, user_data=None) |
1069 | - |
1070 | - # All three nodes have been given the same user data. |
1071 | - nuds = NodeUserData.objects.filter( |
1072 | - node_id__in=(node.id for node in nodes)) |
1073 | - self.assertThat(list(nuds), HasLength(0)) |
1074 | - # No complaints are made to the Twisted log. |
1075 | - self.assertFalse(twisted_log.containsError(), twisted_log.output) |
1076 | - |
1077 | - def test__claims_static_ip_addresses(self): |
1078 | - user = factory.make_User() |
1079 | - nodegroup = factory.make_NodeGroup() |
1080 | - self.prepare_rpc_to_cluster(nodegroup) |
1081 | - nodes = self.make_acquired_nodes_with_macs(user, nodegroup) |
1082 | - |
1083 | - claim_static_ip_addresses = self.patch_autospec( |
1084 | - Node, "claim_static_ip_addresses", spec_set=False) |
1085 | - claim_static_ip_addresses.return_value = {} |
1086 | - |
1087 | - with TwistedLoggerFixture() as twisted_log: |
1088 | - Node.objects.start_nodes( |
1089 | - list(node.system_id for node in nodes), user) |
1090 | - |
1091 | - for node in nodes: |
1092 | - self.expectThat(claim_static_ip_addresses, MockAnyCall(node)) |
1093 | - # No complaints are made to the Twisted log. |
1094 | - self.assertFalse(twisted_log.containsError(), twisted_log.output) |
1095 | - |
1096 | - def test__claims_static_ip_addresses_for_allocated_nodes_only(self): |
1097 | - user = factory.make_User() |
1098 | - nodegroup = factory.make_NodeGroup() |
1099 | - self.prepare_rpc_to_cluster(nodegroup) |
1100 | - nodes = self.make_acquired_nodes_with_macs(user, nodegroup, count=2) |
1101 | - |
1102 | - # Change the status of the first node to something other than |
1103 | - # allocated. |
1104 | - broken_node, allocated_node = nodes |
1105 | - broken_node.status = NODE_STATUS.BROKEN |
1106 | - broken_node.save() |
1107 | - |
1108 | - claim_static_ip_addresses = self.patch_autospec( |
1109 | - Node, "claim_static_ip_addresses", spec_set=False) |
1110 | - claim_static_ip_addresses.return_value = {} |
1111 | - |
1112 | - with TwistedLoggerFixture() as twisted_log: |
1113 | - Node.objects.start_nodes( |
1114 | - list(node.system_id for node in nodes), user) |
1115 | - |
1116 | - # Only one call is made to claim_static_ip_addresses(), for the |
1117 | - # still-allocated node. |
1118 | - self.assertThat( |
1119 | - claim_static_ip_addresses, |
1120 | - MockCalledOnceWith(allocated_node)) |
1121 | - # No complaints are made to the Twisted log. |
1122 | - self.assertFalse(twisted_log.containsError(), twisted_log.output) |
1123 | - |
1124 | - def test__updates_host_maps(self): |
1125 | - user = factory.make_User() |
1126 | - nodes = self.make_acquired_nodes_with_macs(user) |
1127 | - |
1128 | - update_host_maps = self.patch(node_module, "update_host_maps") |
1129 | - update_host_maps.return_value = [] # No failures. |
1130 | - |
1131 | - with TwistedLoggerFixture() as twisted_log: |
1132 | - Node.objects.start_nodes( |
1133 | - list(node.system_id for node in nodes), user) |
1134 | - |
1135 | - # Host maps are updated. |
1136 | - self.assertThat( |
1137 | - update_host_maps, MockCalledOnceWith({ |
1138 | - node.nodegroup: { |
1139 | - ip_address.ip: mac.mac_address |
1140 | - for ip_address in mac.ip_addresses.all() |
1141 | - } |
1142 | - for node in nodes |
1143 | - for mac in node.mac_addresses_on_managed_interfaces() |
1144 | - })) |
1145 | - # No complaints are made to the Twisted log. |
1146 | - self.assertFalse(twisted_log.containsError(), twisted_log.output) |
1147 | - |
1148 | - def test__propagates_errors_when_updating_host_maps(self): |
1149 | - user = factory.make_User() |
1150 | - nodes = self.make_acquired_nodes_with_macs(user) |
1151 | - |
1152 | - update_host_maps = self.patch(node_module, "update_host_maps") |
1153 | - update_host_maps.return_value = [ |
1154 | - Failure(AssertionError("That is so not true")), |
1155 | - Failure(ZeroDivisionError("I cannot defy mathematics")), |
1156 | - ] |
1157 | - |
1158 | - with TwistedLoggerFixture() as twisted_log: |
1159 | - error = self.assertRaises( |
1160 | - MultipleFailures, Node.objects.start_nodes, |
1161 | - list(node.system_id for node in nodes), user) |
1162 | - |
1163 | - self.assertSequenceEqual( |
1164 | - update_host_maps.return_value, error.args) |
1165 | - |
1166 | - # No complaints are made to the Twisted log. |
1167 | - self.assertFalse(twisted_log.containsError(), twisted_log.output) |
1168 | - |
1169 | - def test__updates_dns(self): |
1170 | - user = factory.make_User() |
1171 | - nodes = self.make_acquired_nodes_with_macs(user) |
1172 | - |
1173 | - change_dns_zones = self.patch(dns_config, "change_dns_zones") |
1174 | - |
1175 | - with TwistedLoggerFixture() as twisted_log: |
1176 | - Node.objects.start_nodes( |
1177 | - list(node.system_id for node in nodes), user) |
1178 | - |
1179 | - self.assertThat( |
1180 | - change_dns_zones, MockCalledOnceWith( |
1181 | - {node.nodegroup for node in nodes})) |
1182 | - |
1183 | - # No complaints are made to the Twisted log. |
1184 | - self.assertFalse(twisted_log.containsError(), twisted_log.output) |
1185 | - |
1186 | - def test__starts_nodes(self): |
1187 | - user = factory.make_User() |
1188 | - nodes = self.make_acquired_nodes_with_macs(user) |
1189 | - power_infos = list( |
1190 | - node.get_effective_power_info() |
1191 | - for node in nodes) |
1192 | - |
1193 | - power_on_nodes = self.patch(node_module, "power_on_nodes") |
1194 | - power_on_nodes.return_value = {} |
1195 | - |
1196 | - with TwistedLoggerFixture() as twisted_log: |
1197 | - Node.objects.start_nodes( |
1198 | - list(node.system_id for node in nodes), user) |
1199 | - |
1200 | - self.assertThat(power_on_nodes, MockCalledOnceWith(ANY)) |
1201 | - |
1202 | - nodes_start_info_observed = power_on_nodes.call_args[0][0] |
1203 | - nodes_start_info_expected = [ |
1204 | - (node.system_id, node.hostname, node.nodegroup.uuid, power_info) |
1205 | - for node, power_info in izip(nodes, power_infos) |
1206 | - ] |
1207 | - |
1208 | - # If the following fails the diff is big, but it's useful. |
1209 | - self.maxDiff = None |
1210 | - |
1211 | - self.assertItemsEqual( |
1212 | - nodes_start_info_expected, |
1213 | - nodes_start_info_observed) |
1214 | - |
1215 | - # No complaints are made to the Twisted log. |
1216 | - self.assertFalse(twisted_log.containsError(), twisted_log.output) |
1217 | - |
1218 | - def test__raises_failures_for_nodes_that_cannot_be_started(self): |
1219 | - power_on_nodes = self.patch(node_module, "power_on_nodes") |
1220 | - power_on_nodes.return_value = { |
1221 | - factory.make_name("system_id"): defer.fail( |
1222 | - ZeroDivisionError("Defiance is futile")), |
1223 | - factory.make_name("system_id"): defer.succeed({}), |
1224 | - } |
1225 | - |
1226 | - failures = self.assertRaises( |
1227 | - MultipleFailures, Node.objects.start_nodes, [], |
1228 | - factory.make_User()) |
1229 | - [failure] = failures.args |
1230 | - self.assertThat(failure.value, IsInstance(ZeroDivisionError)) |
1231 | - |
1232 | - def test__marks_allocated_node_as_deploying(self): |
1233 | - user = factory.make_User() |
1234 | - [node] = self.make_acquired_nodes_with_macs(user, count=1) |
1235 | - nodes_started = Node.objects.start_nodes([node.system_id], user) |
1236 | - self.assertItemsEqual([node], nodes_started) |
1237 | - self.assertEqual( |
1238 | - NODE_STATUS.DEPLOYING, reload_object(node).status) |
1239 | - |
1240 | - def test__does_not_change_state_of_deployed_node(self): |
1241 | - user = factory.make_User() |
1242 | - node = factory.make_Node( |
1243 | - power_type='ether_wake', status=NODE_STATUS.DEPLOYED, |
1244 | - owner=user) |
1245 | - factory.make_MACAddress(node=node) |
1246 | - power_on_nodes = self.patch(node_module, "power_on_nodes") |
1247 | - power_on_nodes.return_value = { |
1248 | - node.system_id: defer.succeed({}), |
1249 | - } |
1250 | - nodes_started = Node.objects.start_nodes([node.system_id], user) |
1251 | - self.assertItemsEqual([node], nodes_started) |
1252 | - self.assertEqual( |
1253 | - NODE_STATUS.DEPLOYED, reload_object(node).status) |
1254 | - |
1255 | - def test__only_returns_nodes_for_which_power_commands_have_been_sent(self): |
1256 | - user = factory.make_User() |
1257 | - node1, node2 = self.make_acquired_nodes_with_macs(user, count=2) |
1258 | - node1.power_type = 'ether_wake' # Can be started. |
1259 | - node1.save() |
1260 | - node2.power_type = '' # Undefined power type, cannot be started. |
1261 | - node2.save() |
1262 | - nodes_started = Node.objects.start_nodes( |
1263 | - [node1.system_id, node2.system_id], user) |
1264 | - self.assertItemsEqual([node1], nodes_started) |
1265 | - |
1266 | - def test__does_not_try_to_start_nodes_not_allocated_to_user(self): |
1267 | - user1 = factory.make_User() |
1268 | - [node1] = self.make_acquired_nodes_with_macs(user1, count=1) |
1269 | - node1.power_type = 'ether_wake' # can be started. |
1270 | - node1.save() |
1271 | - user2 = factory.make_User() |
1272 | - [node2] = self.make_acquired_nodes_with_macs(user2, count=1) |
1273 | - node2.power_type = 'ether_wake' # can be started. |
1274 | - node2.save() |
1275 | - |
1276 | - self.patch(node_module, 'power_on_nodes') |
1277 | - self.patch(node_module, 'wait_for_power_commands') |
1278 | - nodes_started = Node.objects.start_nodes( |
1279 | - [node1.system_id, node2.system_id], user1) |
1280 | - |
1281 | - # Since no power commands were sent to the node, it isn't |
1282 | - # returned by start_nodes(). |
1283 | - # test__only_returns_nodes_for_which_power_commands_have_been_sent() |
1284 | - # demonstrates this behaviour. |
1285 | - self.assertItemsEqual([node1], nodes_started) |
1286 | - |
1287 | - |
1288 | -class NodeManagerTest_StopNodes(MAASServerTestCase): |
1289 | - |
1290 | - def make_nodes_with_macs(self, user, nodegroup=None, count=3): |
1291 | - nodes = [] |
1292 | - for _ in xrange(count): |
1293 | - node = factory.make_node_with_mac_attached_to_nodegroupinterface( |
1294 | - nodegroup=nodegroup, status=NODE_STATUS.READY, |
1295 | - power_type='virsh') |
1296 | - node.acquire(user) |
1297 | - nodes.append(node) |
1298 | - return nodes |
1299 | - |
1300 | - def test_stop_nodes_stops_nodes(self): |
1301 | - wait_for_power_commands = self.patch_autospec( |
1302 | - node_module, 'wait_for_power_commands') |
1303 | - power_off_nodes = self.patch_autospec(node_module, "power_off_nodes") |
1304 | - power_off_nodes.side_effect = lambda nodes: { |
1305 | - system_id: Deferred() for system_id, _, _, _ in nodes} |
1306 | - |
1307 | - user = factory.make_User() |
1308 | - nodes = self.make_nodes_with_macs(user) |
1309 | - power_infos = list(node.get_effective_power_info() for node in nodes) |
1310 | - |
1311 | - stop_mode = factory.make_name('stop-mode') |
1312 | - nodes_stopped = Node.objects.stop_nodes( |
1313 | - list(node.system_id for node in nodes), user, stop_mode) |
1314 | - |
1315 | - self.assertItemsEqual(nodes, nodes_stopped) |
1316 | - self.assertThat(power_off_nodes, MockCalledOnceWith(ANY)) |
1317 | - self.assertThat(wait_for_power_commands, MockCalledOnceWith(ANY)) |
1318 | - |
1319 | - nodes_stop_info_observed = power_off_nodes.call_args[0][0] |
1320 | - nodes_stop_info_expected = [ |
1321 | - (node.system_id, node.hostname, node.nodegroup.uuid, power_info) |
1322 | - for node, power_info in izip(nodes, power_infos) |
1323 | - ] |
1324 | - |
1325 | - # The stop mode is added into the power info that's passed. |
1326 | - for _, _, _, power_info in nodes_stop_info_expected: |
1327 | - power_info.power_parameters['power_off_mode'] = stop_mode |
1328 | - |
1329 | - # If the following fails the diff is big, but it's useful. |
1330 | - self.maxDiff = None |
1331 | - |
1332 | - self.assertItemsEqual( |
1333 | - nodes_stop_info_expected, |
1334 | - nodes_stop_info_observed) |
1335 | - |
1336 | - def test_stop_nodes_ignores_uneditable_nodes(self): |
1337 | - owner = factory.make_User() |
1338 | - nodes = self.make_nodes_with_macs(owner) |
1339 | - |
1340 | - user = factory.make_User() |
1341 | - nodes_stopped = Node.objects.stop_nodes( |
1342 | - list(node.system_id for node in nodes), user) |
1343 | - |
1344 | - self.assertItemsEqual([], nodes_stopped) |
1345 | - |
1346 | - def test_stop_nodes_does_not_attempt_power_off_if_no_power_type(self): |
1347 | - # If the node has a power_type set to UNKNOWN_POWER_TYPE, stop_nodes() |
1348 | - # won't attempt to power it off. |
1349 | - user = factory.make_User() |
1350 | - [node] = self.make_nodes_with_macs(user, count=1) |
1351 | - node.power_type = "" |
1352 | - node.save() |
1353 | - |
1354 | - nodes_stopped = Node.objects.stop_nodes([node.system_id], user) |
1355 | - self.assertItemsEqual([], nodes_stopped) |
1356 | - |
1357 | - def test_stop_nodes_does_not_attempt_power_off_if_cannot_be_stopped(self): |
1358 | - # If the node has a power_type that MAAS knows stopping does not work, |
1359 | - # stop_nodes() won't attempt to power it off. |
1360 | - user = factory.make_User() |
1361 | - [node] = self.make_nodes_with_macs(user, count=1) |
1362 | - node.power_type = "ether_wake" |
1363 | - node.save() |
1364 | - |
1365 | - nodes_stopped = Node.objects.stop_nodes([node.system_id], user) |
1366 | - self.assertItemsEqual([], nodes_stopped) |
1367 | - |
1368 | - def test__raises_failures_for_nodes_that_cannot_be_stopped(self): |
1369 | - power_off_nodes = self.patch(node_module, "power_off_nodes") |
1370 | - power_off_nodes.return_value = { |
1371 | - factory.make_name("system_id"): defer.fail( |
1372 | - ZeroDivisionError("Ee by gum lad, that's a rum 'un.")), |
1373 | - factory.make_name("system_id"): defer.succeed({}), |
1374 | - } |
1375 | - |
1376 | - failures = self.assertRaises( |
1377 | - MultipleFailures, Node.objects.stop_nodes, [], factory.make_User()) |
1378 | - [failure] = failures.args |
1379 | - self.assertThat(failure.value, IsInstance(ZeroDivisionError)) |
1380 | - |
1381 | - |
1382 | class TestNodeTransitionMonitors(MAASServerTestCase): |
1383 | |
1384 | def prepare_rpc(self): |
1385 | @@ -2584,3 +2137,390 @@ |
1386 | ) |
1387 | node = factory.make_Node(status=status) |
1388 | self.assertEqual("Not in deployment", node.get_deployment_status()) |
1389 | + |
1390 | + |
1391 | +class TestNode_Start(MAASServerTestCase): |
1392 | + """Tests for Node.start().""" |
1393 | + |
1394 | + def setUp(self): |
1395 | + super(TestNode_Start, self).setUp() |
1396 | + self.useFixture(RegionEventLoopFixture("rpc")) |
1397 | + self.useFixture(RunningEventLoopFixture()) |
1398 | + self.rpc_fixture = self.useFixture(MockLiveRegionToClusterRPCFixture()) |
1399 | + |
1400 | + def prepare_rpc_to_cluster(self, nodegroup): |
1401 | + protocol = self.rpc_fixture.makeCluster( |
1402 | + nodegroup, cluster_module.CreateHostMaps, cluster_module.PowerOn, |
1403 | + cluster_module.StartMonitors) |
1404 | + protocol.CreateHostMaps.side_effect = always_succeed_with({}) |
1405 | + protocol.StartMonitors.side_effect = always_succeed_with({}) |
1406 | + protocol.PowerOn.side_effect = always_succeed_with({}) |
1407 | + return protocol |
1408 | + |
1409 | + def make_acquired_node_with_mac(self, user, nodegroup=None): |
1410 | + node = factory.make_node_with_mac_attached_to_nodegroupinterface( |
1411 | + nodegroup=nodegroup, status=NODE_STATUS.READY) |
1412 | + self.prepare_rpc_to_cluster(node.nodegroup) |
1413 | + node.acquire(user) |
1414 | + return node |
1415 | + |
1416 | + def test__sets_user_data(self): |
1417 | + user = factory.make_User() |
1418 | + nodegroup = factory.make_NodeGroup() |
1419 | + self.prepare_rpc_to_cluster(nodegroup) |
1420 | + node = self.make_acquired_node_with_mac(user, nodegroup) |
1421 | + user_data = factory.make_bytes() |
1422 | + |
1423 | + node.start(user, user_data=user_data) |
1424 | + |
1425 | + nud = NodeUserData.objects.get(node=node) |
1426 | + self.assertEqual(user_data, nud.data) |
1427 | + |
1428 | + def test__resets_user_data(self): |
1429 | + user = factory.make_User() |
1430 | + nodegroup = factory.make_NodeGroup() |
1431 | + self.prepare_rpc_to_cluster(nodegroup) |
1432 | + node = self.make_acquired_node_with_mac(user, nodegroup) |
1433 | + user_data = factory.make_bytes() |
1434 | + NodeUserData.objects.set_user_data(node, user_data) |
1435 | + |
1436 | + node.start(user, user_data=None) |
1437 | + |
1438 | + self.assertFalse(NodeUserData.objects.filter(node=node).exists()) |
1439 | + |
1440 | + def test__claims_static_ip_addresses(self): |
1441 | + user = factory.make_User() |
1442 | + nodegroup = factory.make_NodeGroup() |
1443 | + self.prepare_rpc_to_cluster(nodegroup) |
1444 | + node = self.make_acquired_node_with_mac(user, nodegroup) |
1445 | + |
1446 | + claim_static_ip_addresses = self.patch_autospec( |
1447 | + node, "claim_static_ip_addresses", spec_set=False) |
1448 | + claim_static_ip_addresses.return_value = {} |
1449 | + |
1450 | + node.start(user) |
1451 | + |
1452 | + self.expectThat(node.claim_static_ip_addresses, MockAnyCall()) |
1453 | + |
1454 | + def test__only_claims_static_addresses_when_allocated(self): |
1455 | + user = factory.make_User() |
1456 | + nodegroup = factory.make_NodeGroup() |
1457 | + self.prepare_rpc_to_cluster(nodegroup) |
1458 | + node = self.make_acquired_node_with_mac(user, nodegroup) |
1459 | + node.status = NODE_STATUS.BROKEN |
1460 | + node.save() |
1461 | + |
1462 | + claim_static_ip_addresses = self.patch_autospec( |
1463 | + node, "claim_static_ip_addresses", spec_set=False) |
1464 | + claim_static_ip_addresses.return_value = {} |
1465 | + |
1466 | + node.start(user) |
1467 | + |
1468 | + # No calls are made to claim_static_ip_addresses, since the node |
1469 | + # isn't ALLOCATED. |
1470 | + self.assertThat(claim_static_ip_addresses, MockNotCalled()) |
1471 | + |
1472 | + def test__does_not_generate_host_maps_if_not_on_managed_interface(self): |
1473 | + user = factory.make_User() |
1474 | + node = self.make_acquired_node_with_mac(user) |
1475 | + self.patch( |
1476 | + node, 'is_pxe_mac_on_managed_interface').return_value = False |
1477 | + update_host_maps = self.patch(node_module, "update_host_maps") |
1478 | + node.start(user) |
1479 | + self.assertThat(update_host_maps, MockNotCalled()) |
1480 | + |
1481 | + def test__updates_host_maps(self): |
1482 | + user = factory.make_User() |
1483 | + node = self.make_acquired_node_with_mac(user) |
1484 | + |
1485 | + update_host_maps = self.patch(node_module, "update_host_maps") |
1486 | + update_host_maps.return_value = [] # No failures. |
1487 | + |
1488 | + node.start(user) |
1489 | + |
1490 | + # Host maps are updated. |
1491 | + self.assertThat( |
1492 | + update_host_maps, MockCalledOnceWith({ |
1493 | + node.nodegroup: { |
1494 | + ip_address.ip: mac.mac_address |
1495 | + for ip_address in mac.ip_addresses.all() |
1496 | + } |
1497 | + for mac in node.mac_addresses_on_managed_interfaces() |
1498 | + })) |
1499 | + |
1500 | + def test__propagates_errors_when_updating_host_maps(self): |
1501 | + user = factory.make_User() |
1502 | + node = self.make_acquired_node_with_mac(user) |
1503 | + |
1504 | + update_host_maps = self.patch(node_module, "update_host_maps") |
1505 | + update_host_maps.return_value = [ |
1506 | + Failure(AssertionError("Please, don't do that.")), |
1507 | + ] |
1508 | + |
1509 | + error = self.assertRaises( |
1510 | + MultipleFailures, node.start, user) |
1511 | + |
1512 | + self.assertSequenceEqual( |
1513 | + update_host_maps.return_value, error.args) |
1514 | + |
1515 | + def test__updates_dns(self): |
1516 | + user = factory.make_User() |
1517 | + node = self.make_acquired_node_with_mac(user) |
1518 | + |
1519 | + change_dns_zones = self.patch(dns_config, "change_dns_zones") |
1520 | + |
1521 | + node.start(user) |
1522 | + |
1523 | + self.assertThat( |
1524 | + change_dns_zones, MockCalledOnceWith(node.nodegroup)) |
1525 | + |
1526 | + def test__starts_nodes(self): |
1527 | + user = factory.make_User() |
1528 | + node = self.make_acquired_node_with_mac(user) |
1529 | + power_info = node.get_effective_power_info() |
1530 | + |
1531 | + power_on_nodes = self.patch(node_module, "power_on_nodes") |
1532 | + power_on_nodes.return_value = {} |
1533 | + |
1534 | + node.start(user) |
1535 | + |
1536 | + self.assertThat(power_on_nodes, MockCalledOnceWith(ANY)) |
1537 | + |
1538 | + nodes_start_info_observed = power_on_nodes.call_args[0][0] |
1539 | + nodes_start_info_expected = [ |
1540 | + (node.system_id, node.hostname, node.nodegroup.uuid, power_info) |
1541 | + ] |
1542 | + |
1543 | + # If the following fails the diff is big, but it's useful. |
1544 | + self.maxDiff = None |
1545 | + |
1546 | + self.assertItemsEqual( |
1547 | + nodes_start_info_expected, |
1548 | + nodes_start_info_observed) |
1549 | + |
1550 | + def test__raises_failures_when_power_action_fails(self): |
1551 | + class PraiseBeToJTVException(Exception): |
1552 | + """A nonsense exception for this test. |
1553 | + |
1554 | + (Though jtv is praiseworthy, and that's worth noting). |
1555 | + """ |
1556 | + |
1557 | + power_on_nodes = self.patch(node_module, "power_on_nodes") |
1558 | + power_on_nodes.return_value = { |
1559 | + factory.make_name("system_id"): defer.fail( |
1560 | + PraiseBeToJTVException("Defiance is futile")), |
1561 | + } |
1562 | + |
1563 | + user = factory.make_User() |
1564 | + node = self.make_acquired_node_with_mac(user) |
1565 | + failures = self.assertRaises(MultipleFailures, node.start, user) |
1566 | + [failure] = failures.args |
1567 | + self.assertThat(failure.value, IsInstance(PraiseBeToJTVException)) |
1568 | + |
1569 | + def test__marks_allocated_node_as_deploying(self): |
1570 | + user = factory.make_User() |
1571 | + node = self.make_acquired_node_with_mac(user) |
1572 | + node.start(user) |
1573 | + self.assertEqual( |
1574 | + NODE_STATUS.DEPLOYING, reload_object(node).status) |
1575 | + |
1576 | + def test__does_not_change_state_of_deployed_node(self): |
1577 | + user = factory.make_User() |
1578 | + # Create a node that we can execute power actions on, so that we |
1579 | + # exercise the whole of start(). |
1580 | + node = factory.make_Node( |
1581 | + power_type='ether_wake', status=NODE_STATUS.DEPLOYED, |
1582 | + owner=user) |
1583 | + factory.make_MACAddress(node=node) |
1584 | + power_on_nodes = self.patch(node_module, "power_on_nodes") |
1585 | + power_on_nodes.return_value = { |
1586 | + node.system_id: defer.succeed({}), |
1587 | + } |
1588 | + node.start(user) |
1589 | + self.assertEqual( |
1590 | + NODE_STATUS.DEPLOYED, reload_object(node).status) |
1591 | + |
1592 | + def test__does_not_try_to_start_nodes_that_cant_be_started_by_MAAS(self): |
1593 | + user = factory.make_User() |
1594 | + node = self.make_acquired_node_with_mac(user) |
1595 | + power_info = PowerInfo( |
1596 | + can_be_started=False, |
1597 | + can_be_stopped=True, |
1598 | + can_be_queried=True, |
1599 | + power_type=node.get_effective_power_type(), |
1600 | + power_parameters=node.get_effective_power_parameters(), |
1601 | + ) |
1602 | + self.patch(node, 'get_effective_power_info').return_value = power_info |
1603 | + power_on_nodes = self.patch(node_module, "power_on_nodes") |
1604 | + node.start(user) |
1605 | + self.assertThat(power_on_nodes, MockNotCalled()) |
1606 | + |
1607 | + def test__does_not_start_nodes_the_user_cannot_edit(self): |
1608 | + power_on_nodes = self.patch_autospec(node_module, "power_on_nodes") |
1609 | + owner = factory.make_User() |
1610 | + node = self.make_acquired_node_with_mac(owner) |
1611 | + |
1612 | + user = factory.make_User() |
1613 | + node.start(user) |
1614 | + self.assertThat(power_on_nodes, MockNotCalled()) |
1615 | + |
1616 | + def test__allows_admin_to_start_any_node(self): |
1617 | + wait_for_power_commands = self.patch_autospec( |
1618 | + node_module, 'wait_for_power_commands') |
1619 | + power_on_nodes = self.patch_autospec(node_module, "power_on_nodes") |
1620 | + owner = factory.make_User() |
1621 | + node = self.make_acquired_node_with_mac(owner) |
1622 | + |
1623 | + admin = factory.make_admin() |
1624 | + node.start(admin) |
1625 | + |
1626 | + self.expectThat(power_on_nodes, MockCalledOnceWith(ANY)) |
1627 | + self.expectThat(wait_for_power_commands, MockCalledOnceWith(ANY)) |
1628 | + |
1629 | + def test__releases_static_ips_when_power_action_fails(self): |
1630 | + power_on_nodes = self.patch(node_module, "power_on_nodes") |
1631 | + power_on_nodes.return_value = { |
1632 | + factory.make_name("system_id"): defer.fail( |
1633 | + factory.make_exception("He's fallen in the water!")) |
1634 | + } |
1635 | + deallocate_ips = self.patch( |
1636 | + node_module.StaticIPAddress.objects, 'deallocate_by_node') |
1637 | + |
1638 | + user = factory.make_User() |
1639 | + node = self.make_acquired_node_with_mac(user) |
1640 | + |
1641 | + self.assertRaises(MultipleFailures, node.start, user) |
1642 | + self.assertThat(deallocate_ips, MockCalledOnceWith(node)) |
1643 | + |
1644 | + def test__releases_static_ips_when_update_host_maps_fails(self): |
1645 | + update_host_maps = self.patch(node_module, "update_host_maps") |
1646 | + update_host_maps.return_value = [ |
1647 | + Failure(factory.make_exception("You steaming nit, you!")) |
1648 | + ] |
1649 | + deallocate_ips = self.patch( |
1650 | + node_module.StaticIPAddress.objects, 'deallocate_by_node') |
1651 | + |
1652 | + user = factory.make_User() |
1653 | + node = self.make_acquired_node_with_mac(user) |
1654 | + |
1655 | + self.assertRaises(MultipleFailures, node.start, user) |
1656 | + self.assertThat(deallocate_ips, MockCalledOnceWith(node)) |
1657 | + |
1658 | + |
1659 | +class TestNode_Stop(MAASServerTestCase): |
1660 | + """Tests for Node.stop().""" |
1661 | + |
1662 | + def make_node_with_mac(self, user, nodegroup=None, power_type="virsh"): |
1663 | + node = factory.make_node_with_mac_attached_to_nodegroupinterface( |
1664 | + nodegroup=nodegroup, status=NODE_STATUS.READY, |
1665 | + power_type=power_type) |
1666 | + node.acquire(user) |
1667 | + return node |
1668 | + |
1669 | + def test__stops_nodes(self): |
1670 | + wait_for_power_commands = self.patch_autospec( |
1671 | + node_module, 'wait_for_power_commands') |
1672 | + power_off_nodes = self.patch_autospec(node_module, "power_off_nodes") |
1673 | + power_off_nodes.side_effect = lambda nodes: { |
1674 | + system_id: Deferred() for system_id, _, _, _ in nodes} |
1675 | + |
1676 | + user = factory.make_User() |
1677 | + node = self.make_node_with_mac(user) |
1678 | + power_info = node.get_effective_power_info() |
1679 | + |
1680 | + stop_mode = factory.make_name('stop-mode') |
1681 | + node.stop(user, stop_mode) |
1682 | + |
1683 | + self.assertThat(power_off_nodes, MockCalledOnceWith(ANY)) |
1684 | + self.assertThat(wait_for_power_commands, MockCalledOnceWith(ANY)) |
1685 | + |
1686 | + nodes_stop_info_observed = power_off_nodes.call_args[0][0] |
1687 | + nodes_stop_info_expected = [ |
1688 | + (node.system_id, node.hostname, node.nodegroup.uuid, power_info) |
1689 | + ] |
1690 | + |
1691 | + # The stop mode is added into the power info that's passed. |
1692 | + for _, _, _, power_info in nodes_stop_info_expected: |
1693 | + power_info.power_parameters['power_off_mode'] = stop_mode |
1694 | + |
1695 | + # If the following fails the diff is big, but it's useful. |
1696 | + self.maxDiff = None |
1697 | + |
1698 | + self.assertItemsEqual( |
1699 | + nodes_stop_info_expected, |
1700 | + nodes_stop_info_observed) |
1701 | + |
1702 | + def test__does_not_stop_nodes_the_user_cannot_edit(self): |
1703 | + power_off_nodes = self.patch_autospec(node_module, "power_off_nodes") |
1704 | + owner = factory.make_User() |
1705 | + node = self.make_node_with_mac(owner) |
1706 | + |
1707 | + user = factory.make_User() |
1708 | + node.stop(user) |
1709 | + self.assertThat(power_off_nodes, MockNotCalled()) |
1710 | + |
1711 | + def test__allows_admin_to_stop_any_node(self): |
1712 | + wait_for_power_commands = self.patch_autospec( |
1713 | + node_module, 'wait_for_power_commands') |
1714 | + power_off_nodes = self.patch_autospec(node_module, "power_off_nodes") |
1715 | + owner = factory.make_User() |
1716 | + node = self.make_node_with_mac(owner) |
1717 | + |
1718 | + admin = factory.make_admin() |
1719 | + node.stop(admin) |
1720 | + |
1721 | + self.assertThat(power_off_nodes, MockCalledOnceWith(ANY)) |
1722 | + self.assertThat(wait_for_power_commands, MockCalledOnceWith(ANY)) |
1723 | + |
1724 | + def test__does_not_attempt_power_off_if_no_power_type(self): |
1725 | + # If the node has a power_type set to UNKNOWN_POWER_TYPE, stop() |
1726 | + # won't attempt to power it off. |
1727 | + user = factory.make_User() |
1728 | + node = self.make_node_with_mac(user) |
1729 | + node.power_type = "" |
1730 | + node.save() |
1731 | + |
1732 | + power_off_nodes = self.patch_autospec(node_module, "power_off_nodes") |
1733 | + node.stop(user) |
1734 | + self.assertThat(power_off_nodes, MockNotCalled()) |
1735 | + |
1736 | + def test__does_not_attempt_power_off_if_cannot_be_stopped(self): |
1737 | + # If the node has a power_type that doesn't allow MAAS to power |
1738 | + # the node off, stop() won't attempt to send the power command. |
1739 | + user = factory.make_User() |
1740 | + node = self.make_node_with_mac(user, power_type="ether_wake") |
1741 | + node.save() |
1742 | + |
1743 | + power_off_nodes = self.patch_autospec(node_module, "power_off_nodes") |
1744 | + node.stop(user) |
1745 | + self.assertThat(power_off_nodes, MockNotCalled()) |
1746 | + |
1747 | + def test__propagates_failures_when_power_action_fails(self): |
1748 | + fake_exception_type = factory.make_exception_type() |
1749 | + |
1750 | + power_off_nodes = self.patch(node_module, "power_off_nodes") |
1751 | + power_off_nodes.return_value = { |
1752 | + factory.make_name("system_id"): defer.fail( |
1753 | + fake_exception_type("Soon be the weekend!")) |
1754 | + } |
1755 | + |
1756 | + user = factory.make_User() |
1757 | + node = self.make_node_with_mac(user) |
1758 | + |
1759 | + error = self.assertRaises(MultipleFailures, node.stop, user) |
1760 | + [failure] = error.args |
1761 | + self.assertThat(failure.value, IsInstance(fake_exception_type)) |
1762 | + |
1763 | + def test__returns_false_if_power_action_not_sent(self): |
1764 | + user = factory.make_User() |
1765 | + node = self.make_node_with_mac(user, power_type="") |
1766 | + |
1767 | + self.patch_autospec(node_module, "power_off_nodes") |
1768 | + self.assertIs(False, node.stop(user)) |
1769 | + |
1770 | + def test__returns_true_if_power_action_sent(self): |
1771 | + user = factory.make_User() |
1772 | + node = self.make_node_with_mac(user, power_type="virsh") |
1773 | + |
1774 | + self.patch_autospec(node_module, "power_off_nodes") |
1775 | + self.assertIs(True, node.stop(user)) |
1776 | |
1777 | === modified file 'src/maasserver/node_action.py' |
1778 | --- src/maasserver/node_action.py 2014-10-20 22:01:33 +0000 |
1779 | +++ src/maasserver/node_action.py 2014-10-22 10:46:23 +0000 |
1780 | @@ -47,10 +47,7 @@ |
1781 | Redirect, |
1782 | StaticIPAddressExhaustion, |
1783 | ) |
1784 | -from maasserver.models import ( |
1785 | - Node, |
1786 | - SSHKey, |
1787 | - ) |
1788 | +from maasserver.models import SSHKey |
1789 | from maasserver.node_status import is_failed_status |
1790 | from provisioningserver.rpc.exceptions import ( |
1791 | MultipleFailures, |
1792 | @@ -345,7 +342,7 @@ |
1793 | self.node.acquire(self.user, token=None) |
1794 | |
1795 | try: |
1796 | - Node.objects.start_nodes([self.node.system_id], self.user) |
1797 | + self.node.start(self.user) |
1798 | except StaticIPAddressExhaustion: |
1799 | raise NodeActionError( |
1800 | "%s: Failed to start, static IP addresses are exhausted." |
1801 | @@ -378,7 +375,7 @@ |
1802 | def execute(self, allow_redirect=True): |
1803 | """See `NodeAction.execute`.""" |
1804 | try: |
1805 | - Node.objects.stop_nodes([self.node.system_id], self.user) |
1806 | + self.node.stop(self.user) |
1807 | except RPC_EXCEPTIONS as exception: |
1808 | raise NodeActionError(exception) |
1809 | else: |
1810 | |
1811 | === modified file 'src/maasserver/tests/test_node_action.py' |
1812 | --- src/maasserver/tests/test_node_action.py 2014-10-20 22:01:33 +0000 |
1813 | +++ src/maasserver/tests/test_node_action.py 2014-10-22 10:46:23 +0000 |
1814 | @@ -33,7 +33,6 @@ |
1815 | Redirect, |
1816 | ) |
1817 | from maasserver.models import StaticIPAddress |
1818 | -from maasserver.models.node import Node |
1819 | from maasserver.node_action import ( |
1820 | AbortCommissioning, |
1821 | AbortOperation, |
1822 | @@ -230,17 +229,16 @@ |
1823 | ) |
1824 | |
1825 | def test_Commission_starts_commissioning(self): |
1826 | - start_nodes = self.patch(Node.objects, "start_nodes") |
1827 | node = factory.make_Node( |
1828 | mac=True, status=self.status, |
1829 | power_type='ether_wake') |
1830 | + node_start = self.patch(node, 'start') |
1831 | admin = factory.make_admin() |
1832 | action = Commission(node, admin) |
1833 | action.execute() |
1834 | self.assertEqual(NODE_STATUS.COMMISSIONING, node.status) |
1835 | self.assertThat( |
1836 | - start_nodes, MockCalledOnceWith( |
1837 | - [node.system_id], admin, user_data=ANY)) |
1838 | + node_start, MockCalledOnceWith(admin, user_data=ANY)) |
1839 | |
1840 | |
1841 | class TestAbortCommissioningNodeAction(MAASServerTestCase): |
1842 | @@ -249,14 +247,12 @@ |
1843 | node = factory.make_Node( |
1844 | mac=True, status=NODE_STATUS.COMMISSIONING, |
1845 | power_type='virsh') |
1846 | - stop_nodes = self.patch(Node.objects, "stop_nodes") |
1847 | - stop_nodes.return_value = [node] |
1848 | + node_stop = self.patch_autospec(node, 'stop') |
1849 | admin = factory.make_admin() |
1850 | |
1851 | AbortCommissioning(node, admin).execute() |
1852 | self.assertEqual(NODE_STATUS.NEW, node.status) |
1853 | - self.assertThat( |
1854 | - stop_nodes, MockCalledOnceWith([node.system_id], admin)) |
1855 | + self.assertThat(node_stop, MockCalledOnceWith(admin)) |
1856 | |
1857 | |
1858 | class TestAbortOperationNodeAction(MAASServerTestCase): |
1859 | @@ -265,14 +261,12 @@ |
1860 | owner = factory.make_User() |
1861 | node = factory.make_Node( |
1862 | status=NODE_STATUS.DISK_ERASING, owner=owner) |
1863 | - stop_nodes = self.patch_autospec(Node.objects, "stop_nodes") |
1864 | - stop_nodes.return_value = [node] |
1865 | + node_stop = self.patch_autospec(node, 'stop') |
1866 | |
1867 | AbortOperation(node, owner).execute() |
1868 | |
1869 | self.assertEqual(NODE_STATUS.FAILED_DISK_ERASING, node.status) |
1870 | - self.assertThat( |
1871 | - stop_nodes, MockCalledOnceWith([node.system_id], owner)) |
1872 | + self.assertThat(node_stop, MockCalledOnceWith(owner)) |
1873 | |
1874 | |
1875 | class TestAcquireNodeNodeAction(MAASServerTestCase): |
1876 | @@ -314,14 +308,14 @@ |
1877 | self.assertIn("SSH key", inhibition) |
1878 | |
1879 | def test_StartNode_starts_node(self): |
1880 | - start_nodes = self.patch(Node.objects, "start_nodes") |
1881 | user = factory.make_User() |
1882 | node = factory.make_Node( |
1883 | mac=True, status=NODE_STATUS.ALLOCATED, |
1884 | power_type='ether_wake', owner=user) |
1885 | + node_start = self.patch(node, 'start') |
1886 | StartNode(node, user).execute() |
1887 | self.assertThat( |
1888 | - start_nodes, MockCalledOnceWith([node.system_id], user)) |
1889 | + node_start, MockCalledOnceWith(user)) |
1890 | |
1891 | def test_StartNode_returns_error_when_no_more_static_IPs(self): |
1892 | user = factory.make_User() |
1893 | @@ -351,9 +345,9 @@ |
1894 | self.assertFalse(StartNode(node, user).is_permitted()) |
1895 | |
1896 | def test_StartNode_allocates_node_if_node_not_already_allocated(self): |
1897 | - self.patch(Node.objects, "start_nodes") |
1898 | user = factory.make_User() |
1899 | node = factory.make_Node(status=NODE_STATUS.READY) |
1900 | + self.patch(node, 'start') |
1901 | action = StartNode(node, user) |
1902 | action.execute() |
1903 | |
1904 | @@ -361,16 +355,16 @@ |
1905 | self.assertEqual(NODE_STATUS.ALLOCATED, node.status) |
1906 | |
1907 | def test_StartNode_label_shows_allocate_if_unallocated(self): |
1908 | - self.patch(Node.objects, "start_nodes") |
1909 | user = factory.make_User() |
1910 | node = factory.make_Node(status=NODE_STATUS.READY) |
1911 | + self.patch(node, 'start') |
1912 | action = StartNode(node, user) |
1913 | self.assertEqual("Acquire and start node", action.display) |
1914 | |
1915 | def test_StartNode_label_hides_allocate_if_allocated(self): |
1916 | - self.patch(Node.objects, "start_nodes") |
1917 | user = factory.make_User() |
1918 | node = factory.make_Node(status=NODE_STATUS.READY) |
1919 | + self.patch(node, 'start') |
1920 | node.acquire(user) |
1921 | action = StartNode(node, user) |
1922 | self.assertEqual("Start node", action.display) |
1923 | @@ -384,17 +378,17 @@ |
1924 | self.assertEqual("Start node", action.display) |
1925 | |
1926 | def test_StartNode_does_not_reallocate_when_run_by_non_owner(self): |
1927 | - self.patch(Node.objects, "start_nodes") |
1928 | user = factory.make_User() |
1929 | admin = factory.make_admin() |
1930 | node = factory.make_Node(status=NODE_STATUS.READY) |
1931 | + self.patch(node, 'start') |
1932 | node.acquire(user) |
1933 | action = StartNode(node, admin) |
1934 | |
1935 | # This action.execute() will not fail because the non-owner is |
1936 | # an admin, so they can start the node. Even if they weren't an |
1937 | - # admin, the node still wouldn't start; |
1938 | - # NodeManager.start_nodes() would ignore it. |
1939 | + # admin, the node still wouldn't start; Node.start() would |
1940 | + # ignore it. |
1941 | action.execute() |
1942 | self.assertEqual(user, node.owner) |
1943 | self.assertEqual(NODE_STATUS.ALLOCATED, node.status) |
1944 | @@ -412,13 +406,11 @@ |
1945 | mac=True, status=NODE_STATUS.DEPLOYED, |
1946 | power_type='ipmi', |
1947 | owner=user, power_parameters=params) |
1948 | - stop_nodes = self.patch_autospec(Node.objects, "stop_nodes") |
1949 | - stop_nodes.return_value = [node] |
1950 | + node_stop = self.patch_autospec(node, 'stop') |
1951 | |
1952 | StopNode(node, user).execute() |
1953 | |
1954 | - self.assertThat( |
1955 | - stop_nodes, MockCalledOnceWith([node.system_id], user)) |
1956 | + self.assertThat(node_stop, MockCalledOnceWith(user)) |
1957 | |
1958 | def test_StopNode_actionnable_for_failed_states(self): |
1959 | status = random.choice(FAILED_STATUSES) |
1960 | @@ -452,14 +444,13 @@ |
1961 | mac=True, status=self.actionable_status, |
1962 | power_type='ipmi', |
1963 | owner=user, power_parameters=params) |
1964 | - stop_nodes = self.patch_autospec(Node.objects, "stop_nodes") |
1965 | - stop_nodes.return_value = [node] |
1966 | + node_stop = self.patch_autospec(node, 'stop') |
1967 | |
1968 | ReleaseNode(node, user).execute() |
1969 | |
1970 | self.expectThat(node.status, Equals(NODE_STATUS.RELEASING)) |
1971 | self.assertThat( |
1972 | - stop_nodes, MockCalledOnceWith([node.system_id], user)) |
1973 | + node_stop, MockCalledOnceWith(user)) |
1974 | |
1975 | |
1976 | class TestUseCurtinNodeAction(MAASServerTestCase): |
1977 | @@ -571,12 +562,10 @@ |
1978 | exception = self.exception_class(factory.make_name("exception")) |
1979 | return exception |
1980 | |
1981 | - def patch_rpc_methods(self): |
1982 | + def patch_rpc_methods(self, node): |
1983 | exception = self.make_exception() |
1984 | - self.patch(Node.objects, "start_nodes").side_effect = ( |
1985 | - exception) |
1986 | - self.patch(Node.objects, "stop_nodes").side_effect = ( |
1987 | - exception) |
1988 | + self.patch(node, 'start').side_effect = exception |
1989 | + self.patch(node, 'stop').side_effect = exception |
1990 | |
1991 | def make_action(self, action_class, node_status): |
1992 | node = factory.make_Node( |
1993 | @@ -586,56 +575,56 @@ |
1994 | |
1995 | def test_Commission_handles_rpc_errors(self): |
1996 | action = self.make_action(Commission, NODE_STATUS.READY) |
1997 | - self.patch_rpc_methods() |
1998 | + self.patch_rpc_methods(action.node) |
1999 | exception = self.assertRaises(NodeActionError, action.execute) |
2000 | self.assertEqual( |
2001 | get_error_message_for_exception( |
2002 | - Node.objects.start_nodes.side_effect), |
2003 | + action.node.start.side_effect), |
2004 | unicode(exception)) |
2005 | |
2006 | def test_AbortCommissioning_handles_rpc_errors(self): |
2007 | action = self.make_action( |
2008 | AbortCommissioning, NODE_STATUS.COMMISSIONING) |
2009 | - self.patch_rpc_methods() |
2010 | + self.patch_rpc_methods(action.node) |
2011 | exception = self.assertRaises(NodeActionError, action.execute) |
2012 | self.assertEqual( |
2013 | get_error_message_for_exception( |
2014 | - Node.objects.stop_nodes.side_effect), |
2015 | + action.node.stop.side_effect), |
2016 | unicode(exception)) |
2017 | |
2018 | def test_AbortOperation_handles_rpc_errors(self): |
2019 | action = self.make_action( |
2020 | AbortOperation, NODE_STATUS.DISK_ERASING) |
2021 | - self.patch_rpc_methods() |
2022 | + self.patch_rpc_methods(action.node) |
2023 | exception = self.assertRaises(NodeActionError, action.execute) |
2024 | self.assertEqual( |
2025 | get_error_message_for_exception( |
2026 | - Node.objects.stop_nodes.side_effect), |
2027 | + action.node.stop.side_effect), |
2028 | unicode(exception)) |
2029 | |
2030 | def test_StartNode_handles_rpc_errors(self): |
2031 | action = self.make_action(StartNode, NODE_STATUS.READY) |
2032 | - self.patch_rpc_methods() |
2033 | + self.patch_rpc_methods(action.node) |
2034 | exception = self.assertRaises(NodeActionError, action.execute) |
2035 | self.assertEqual( |
2036 | get_error_message_for_exception( |
2037 | - Node.objects.start_nodes.side_effect), |
2038 | + action.node.start.side_effect), |
2039 | unicode(exception)) |
2040 | |
2041 | def test_StopNode_handles_rpc_errors(self): |
2042 | action = self.make_action(StopNode, NODE_STATUS.DEPLOYED) |
2043 | - self.patch_rpc_methods() |
2044 | + self.patch_rpc_methods(action.node) |
2045 | exception = self.assertRaises(NodeActionError, action.execute) |
2046 | self.assertEqual( |
2047 | get_error_message_for_exception( |
2048 | - Node.objects.stop_nodes.side_effect), |
2049 | + action.node.stop.side_effect), |
2050 | unicode(exception)) |
2051 | |
2052 | def test_ReleaseNode_handles_rpc_errors(self): |
2053 | action = self.make_action(ReleaseNode, NODE_STATUS.ALLOCATED) |
2054 | - self.patch_rpc_methods() |
2055 | + self.patch_rpc_methods(action.node) |
2056 | exception = self.assertRaises(NodeActionError, action.execute) |
2057 | self.assertEqual( |
2058 | get_error_message_for_exception( |
2059 | - Node.objects.stop_nodes.side_effect), |
2060 | + action.node.stop.side_effect), |
2061 | unicode(exception)) |
Rubberstamp.