Merge ~troyanov/maas:node-release-scriptset into maas:master
- Git
- lp:~troyanov/maas
- node-release-scriptset
- Merge into master
Proposed by
Anton Troyanov
Status: | Merged |
---|---|
Approved by: | Anton Troyanov |
Approved revision: | 5672c8464f22aa4de1ee90d7bcb88fcf8cf6e02d |
Merge reported by: | MAAS Lander |
Merged at revision: | not available |
Proposed branch: | ~troyanov/maas:node-release-scriptset |
Merge into: | maas:master |
Diff against target: |
540 lines (+244/-83) 9 files modified
src/maasserver/api/machines.py (+21/-7) src/maasserver/api/tests/test_machine.py (+25/-24) src/maasserver/models/node.py (+144/-26) src/maasserver/models/tests/test_node.py (+15/-18) src/maasserver/node_action.py (+16/-5) src/maasserver/node_status.py (+1/-0) src/maasserver/tests/test_node_action.py (+7/-3) src/metadataserver/api.py (+9/-0) src/metadataserver/api_twisted.py (+6/-0) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Christian Grabowski | Approve | ||
MAAS Lander | Approve | ||
Review via email: mp+461347@code.launchpad.net |
Commit message
feat: machine release scripts
Description of the change
To post a comment you must log in.
Revision history for this message
Christian Grabowski (cgrabowski) wrote : | # |
One minor nit inline, otherwise looks good, +1
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/src/maasserver/api/machines.py b/src/maasserver/api/machines.py |
2 | index f8cddf0..2c0a116 100644 |
3 | --- a/src/maasserver/api/machines.py |
4 | +++ b/src/maasserver/api/machines.py |
5 | @@ -973,12 +973,23 @@ class MachineHandler(NodeHandler, WorkloadAnnotationsMixin, PowerMixin): |
6 | # postcondition is achieved, so call this success. |
7 | pass |
8 | elif machine.status in RELEASABLE_STATUSES: |
9 | - machine.release_or_erase( |
10 | - request.user, |
11 | - form.cleaned_data["comment"], |
12 | - erase=form.cleaned_data["erase"], |
13 | - secure_erase=form.cleaned_data["secure_erase"], |
14 | - quick_erase=form.cleaned_data["quick_erase"], |
15 | + scripts = form.cleaned_data["scripts"] |
16 | + |
17 | + params = {} |
18 | + if form.cleaned_data["erase"]: |
19 | + params["wipe-disks"] = {} |
20 | + params["wipe-disks"]["secure_erase"] = form.cleaned_data[ |
21 | + "secure_erase" |
22 | + ] |
23 | + params["wipe-disks"]["quick_erase"] = form.cleaned_data[ |
24 | + "quick_erase" |
25 | + ] |
26 | + params = params | form.get_script_param_dict(scripts) |
27 | + machine.start_releasing( |
28 | + user=request.user, |
29 | + comment=form.cleaned_data["comment"], |
30 | + scripts=scripts, |
31 | + script_input=params, |
32 | force=form.cleaned_data["force"], |
33 | ) |
34 | else: |
35 | @@ -2225,7 +2236,10 @@ class MachinesHandler(NodesHandler, PowersMixin): |
36 | # Nothing to do. |
37 | pass |
38 | elif machine.status in RELEASABLE_STATUSES: |
39 | - machine.release_or_erase(request.user, comment) |
40 | + machine.start_releasing( |
41 | + user=request.user, |
42 | + comment=comment, |
43 | + ) |
44 | released_ids.append(machine.system_id) |
45 | else: |
46 | failed.append( |
47 | diff --git a/src/maasserver/api/tests/test_machine.py b/src/maasserver/api/tests/test_machine.py |
48 | index f5a2b33..42b3511 100644 |
49 | --- a/src/maasserver/api/tests/test_machine.py |
50 | +++ b/src/maasserver/api/tests/test_machine.py |
51 | @@ -1683,17 +1683,16 @@ class TestMachineAPI(APITestCase.ForUser): |
52 | ) |
53 | self.become_admin() |
54 | comment = factory.make_name("comment") |
55 | - machine_release = self.patch(node_module.Machine, "release_or_erase") |
56 | + machine_release = self.patch(node_module.Machine, "start_releasing") |
57 | self.client.post( |
58 | self.get_machine_uri(machine), |
59 | {"op": "release", "comment": comment}, |
60 | ) |
61 | machine_release.assert_called_once_with( |
62 | - self.user, |
63 | - comment, |
64 | - erase=False, |
65 | - quick_erase=False, |
66 | - secure_erase=False, |
67 | + user=self.user, |
68 | + comment=comment, |
69 | + scripts=[], |
70 | + script_input={}, |
71 | force=False, |
72 | ) |
73 | |
74 | @@ -1707,7 +1706,7 @@ class TestMachineAPI(APITestCase.ForUser): |
75 | self.become_admin() |
76 | secure_erase = factory.pick_bool() |
77 | quick_erase = factory.pick_bool() |
78 | - machine_release = self.patch(node_module.Machine, "release_or_erase") |
79 | + machine_release = self.patch(node_module.Machine, "start_releasing") |
80 | self.client.post( |
81 | self.get_machine_uri(machine), |
82 | { |
83 | @@ -1718,11 +1717,15 @@ class TestMachineAPI(APITestCase.ForUser): |
84 | }, |
85 | ) |
86 | machine_release.assert_called_once_with( |
87 | - self.user, |
88 | - "", |
89 | - erase=True, |
90 | - quick_erase=quick_erase, |
91 | - secure_erase=secure_erase, |
92 | + user=self.user, |
93 | + comment="", |
94 | + scripts=["wipe-disks"], |
95 | + script_input={ |
96 | + "wipe-disks": { |
97 | + "secure_erase": secure_erase, |
98 | + "quick_erase": quick_erase, |
99 | + } |
100 | + }, |
101 | force=False, |
102 | ) |
103 | |
104 | @@ -1735,16 +1738,15 @@ class TestMachineAPI(APITestCase.ForUser): |
105 | ) |
106 | self.become_admin() |
107 | force = factory.pick_bool() |
108 | - machine_release = self.patch(node_module.Machine, "release_or_erase") |
109 | + machine_release = self.patch(node_module.Machine, "start_releasing") |
110 | self.client.post( |
111 | self.get_machine_uri(machine), {"op": "release", "force": force} |
112 | ) |
113 | machine_release.assert_called_once_with( |
114 | - self.user, |
115 | - "", |
116 | - erase=False, |
117 | - quick_erase=False, |
118 | - secure_erase=False, |
119 | + user=self.user, |
120 | + comment="", |
121 | + scripts=[], |
122 | + script_input={}, |
123 | force=force, |
124 | ) |
125 | |
126 | @@ -1756,14 +1758,13 @@ class TestMachineAPI(APITestCase.ForUser): |
127 | power_state=POWER_STATE.OFF, |
128 | ) |
129 | self.become_admin() |
130 | - machine_release = self.patch(node_module.Machine, "release_or_erase") |
131 | + machine_release = self.patch(node_module.Machine, "start_releasing") |
132 | self.client.post(self.get_machine_uri(machine), {"op": "release"}) |
133 | machine_release.assert_called_once_with( |
134 | - self.user, |
135 | - "", |
136 | - erase=False, |
137 | - quick_erase=False, |
138 | - secure_erase=False, |
139 | + user=self.user, |
140 | + comment="", |
141 | + scripts=[], |
142 | + script_input={}, |
143 | force=False, |
144 | ) |
145 | |
146 | diff --git a/src/maasserver/models/node.py b/src/maasserver/models/node.py |
147 | index 0e66b44..e34f5d4 100644 |
148 | --- a/src/maasserver/models/node.py |
149 | +++ b/src/maasserver/models/node.py |
150 | @@ -19,6 +19,7 @@ from itertools import chain, count |
151 | import json |
152 | import logging |
153 | from operator import attrgetter |
154 | +import os |
155 | import random |
156 | import re |
157 | import socket |
158 | @@ -174,7 +175,11 @@ from metadataserver.enum import ( |
159 | SCRIPT_STATUS_FAILED, |
160 | SCRIPT_STATUS_RUNNING_OR_PENDING, |
161 | ) |
162 | -from metadataserver.user_data import generate_user_data_for_status |
163 | +from metadataserver.user_data import ( |
164 | + generate_user_data, |
165 | + generate_user_data_for_status, |
166 | +) |
167 | +from metadataserver.user_data.snippets import get_userdata_template_dir |
168 | from provisioningserver.drivers.osystem import BOOT_IMAGE_PURPOSE |
169 | from provisioningserver.drivers.pod import Capabilities |
170 | from provisioningserver.drivers.power.ipmi import IPMI_BOOT_TYPE |
171 | @@ -3623,6 +3628,144 @@ class Node(CleanSave, TimestampedModel): |
172 | % (self.system_id, NODE_STATUS_CHOICES_DICT[self.status]) |
173 | ) |
174 | |
175 | + def start_releasing( |
176 | + self, user, comment=None, scripts=None, script_input=None, force=False |
177 | + ): |
178 | + """Start the release process for the machine.""" |
179 | + from maasserver.models import ScriptSet |
180 | + |
181 | + if scripts is None: |
182 | + scripts = [] |
183 | + |
184 | + self.maybe_delete_pods(not force) |
185 | + |
186 | + config = Config.objects.get_configs( |
187 | + [ |
188 | + "commissioning_osystem", |
189 | + "commissioning_distro_series", |
190 | + "default_osystem", |
191 | + "default_distro_series", |
192 | + "disk_erase_with_secure_erase", |
193 | + "disk_erase_with_quick_erase", |
194 | + "enable_disk_erasing_on_release", |
195 | + ] |
196 | + ) |
197 | + |
198 | + if config.get("enable_disk_erasing_on_release"): |
199 | + scripts.append("wipe-disks") |
200 | + |
201 | + if not scripts: |
202 | + self.release(user, comment) |
203 | + return |
204 | + |
205 | + self.current_release_script_set = ( |
206 | + ScriptSet.objects.create_release_script_set( |
207 | + self, scripts=scripts, script_input=script_input |
208 | + ) |
209 | + ) |
210 | + self.save() |
211 | + |
212 | + template_file = os.path.join( |
213 | + get_userdata_template_dir(), "script_runner.template" |
214 | + ) |
215 | + user_data = generate_user_data(self, template_file) |
216 | + |
217 | + failed_status = NODE_STATUS.FAILED_RELEASING |
218 | + # Before machine release scripts were introduced there was Erase Disk |
219 | + # functionality. We want to maintain backward compatible statuses |
220 | + # to be reported when only "wipe-disk" script is executed. |
221 | + if len(scripts) == 1 and "wipe-disks" in scripts: |
222 | + self._register_request_event( |
223 | + user, |
224 | + EVENT_TYPES.REQUEST_NODE_ERASE_DISK, |
225 | + action="start disk erasing", |
226 | + comment=comment, |
227 | + ) |
228 | + old_status = self.update_status(NODE_STATUS.DISK_ERASING) |
229 | + failed_status = NODE_STATUS.FAILED_DISK_ERASING |
230 | + else: |
231 | + self._register_request_event( |
232 | + user, |
233 | + EVENT_TYPES.REQUEST_NODE_RELEASE, |
234 | + action="start node releasing", |
235 | + comment=comment, |
236 | + ) |
237 | + |
238 | + old_status = self.update_status(NODE_STATUS.RELEASING) |
239 | + |
240 | + self.save() |
241 | + |
242 | + try: |
243 | + # Node.start() has synchronous and asynchronous parts, so catch |
244 | + # exceptions arising synchronously, and chain callbacks to the |
245 | + # Deferred it returns for the asynchronous (post-commit) bits. |
246 | + starting = self._start( |
247 | + user, |
248 | + user_data, |
249 | + old_status, |
250 | + allow_power_cycle=True, |
251 | + config=config, |
252 | + ) |
253 | + except Exception as error: |
254 | + # We always mark the node as failed here, although we could |
255 | + # potentially move it back to the state it was in previously. For |
256 | + # now, though, this is safer, since it marks the node as needing |
257 | + # attention. |
258 | + self.update_status(failed_status) |
259 | + self.save() |
260 | + maaslog.error( |
261 | + f"{self.hostname}: Could not start node for release: {error}" |
262 | + ) |
263 | + # Let the exception bubble up, since the UI or API will have to |
264 | + # deal with it. |
265 | + raise |
266 | + else: |
267 | + # Don't permit naive mocking of start(); it causes too much |
268 | + # confusion when testing. Return a Deferred from side_effect. |
269 | + assert isinstance(starting, Deferred) or starting is None |
270 | + |
271 | + if starting is None: |
272 | + starting = post_commit() |
273 | + # MAAS cannot start the node itself. |
274 | + is_starting = False |
275 | + else: |
276 | + # MAAS can direct the node to start. |
277 | + is_starting = True |
278 | + |
279 | + @asynchronous |
280 | + def async_start(is_starting, hostname): |
281 | + if is_starting: |
282 | + maaslog.info(f"{hostname}: Release started") |
283 | + else: |
284 | + maaslog.warning( |
285 | + f"{hostname}: Could not start node for release; it " |
286 | + "must be started manually", |
287 | + ) |
288 | + |
289 | + starting.addCallback( |
290 | + callOut, |
291 | + async_start, |
292 | + is_starting, |
293 | + self.hostname, |
294 | + ) |
295 | + |
296 | + # If there's an error, reset the node's status. |
297 | + starting.addErrback( |
298 | + callOutToDatabase, |
299 | + Node._set_status, |
300 | + self.system_id, |
301 | + status=failed_status, |
302 | + ) |
303 | + |
304 | + def eb_start(failure, hostname): |
305 | + maaslog.error( |
306 | + f"{hostname}: Could not start node for " |
307 | + f"releasing: {failure.getErrorMessage()}", |
308 | + ) |
309 | + return failure # Propagate. |
310 | + |
311 | + return starting.addErrback(eb_start, self.hostname) |
312 | + |
313 | def release(self, user=None, comment=None): |
314 | self._register_request_event( |
315 | user, |
316 | @@ -3758,31 +3901,6 @@ class Node(CleanSave, TimestampedModel): |
317 | # Remove all set owner data. |
318 | OwnerData.objects.filter(node=self).delete() |
319 | |
320 | - def release_or_erase( |
321 | - self, |
322 | - user, |
323 | - comment=None, |
324 | - erase=False, |
325 | - secure_erase=None, |
326 | - quick_erase=None, |
327 | - force=False, |
328 | - ): |
329 | - """Either release the node or erase the node then release it, depending |
330 | - on settings and parameters.""" |
331 | - self.maybe_delete_pods(not force) |
332 | - erase_on_release = Config.objects.get_config( |
333 | - "enable_disk_erasing_on_release" |
334 | - ) |
335 | - if erase or erase_on_release: |
336 | - self.start_disk_erasing( |
337 | - user, |
338 | - comment, |
339 | - secure_erase=secure_erase, |
340 | - quick_erase=quick_erase, |
341 | - ) |
342 | - else: |
343 | - self.release(user, comment) |
344 | - |
345 | def maybe_delete_pods(self, dry_run: bool): |
346 | """Check if any pods are associated with this Node. |
347 | |
348 | diff --git a/src/maasserver/models/tests/test_node.py b/src/maasserver/models/tests/test_node.py |
349 | index 7706930..6195c28 100644 |
350 | --- a/src/maasserver/models/tests/test_node.py |
351 | +++ b/src/maasserver/models/tests/test_node.py |
352 | @@ -22,6 +22,7 @@ from fixtures import LoggerFixture |
353 | from netaddr import IPAddress, IPNetwork |
354 | from testscenarios import multiply_scenarios |
355 | from twisted.internet import defer |
356 | +from twisted.internet.defer import succeed |
357 | from twisted.internet.error import ConnectionDone |
358 | import yaml |
359 | |
360 | @@ -7273,44 +7274,40 @@ class TestNodeManagerGetNodesRBAC(MAASServerTestCase): |
361 | |
362 | |
363 | class TestNodeErase(MAASServerTestCase): |
364 | - def test_release_or_erase_erases_when_enabled(self): |
365 | + def test_start_releasing_erases_when_enabled(self): |
366 | owner = factory.make_User() |
367 | node = factory.make_Node(status=NODE_STATUS.ALLOCATED, owner=owner) |
368 | Config.objects.set_config("enable_disk_erasing_on_release", True) |
369 | - erase_mock = self.patch_autospec(node, "start_disk_erasing") |
370 | release_mock = self.patch_autospec(node, "release") |
371 | - node.release_or_erase(owner) |
372 | - erase_mock.assert_called_once_with( |
373 | - owner, None, secure_erase=None, quick_erase=None |
374 | - ) |
375 | + self.patch(node, "_start").return_value = succeed(None) |
376 | + node.start_releasing(owner) |
377 | release_mock.assert_not_called() |
378 | |
379 | - def test_release_or_erase_erases_when_disabled_and_erase_param(self): |
380 | + def test_start_releasing_erases_when_disabled_and_erase_param(self): |
381 | owner = factory.make_User() |
382 | node = factory.make_Node(status=NODE_STATUS.ALLOCATED, owner=owner) |
383 | Config.objects.set_config("enable_disk_erasing_on_release", False) |
384 | - erase_mock = self.patch_autospec(node, "start_disk_erasing") |
385 | release_mock = self.patch_autospec(node, "release") |
386 | + self.patch(node, "_start").return_value = succeed(None) |
387 | secure_erase = factory.pick_bool() |
388 | quick_erase = factory.pick_bool() |
389 | - node.release_or_erase( |
390 | - owner, |
391 | - erase=True, |
392 | - secure_erase=secure_erase, |
393 | - quick_erase=quick_erase, |
394 | - ) |
395 | - erase_mock.assert_called_once_with( |
396 | - owner, None, secure_erase=secure_erase, quick_erase=quick_erase |
397 | + node.start_releasing( |
398 | + user=owner, |
399 | + scripts=["wipe-disks"], |
400 | + script_input={ |
401 | + "secure_erase": secure_erase, |
402 | + "quick_erase": quick_erase, |
403 | + }, |
404 | ) |
405 | release_mock.assert_not_called() |
406 | |
407 | - def test_release_or_erase_releases_when_disabled(self): |
408 | + def test_start_releasing_releases_when_disabled(self): |
409 | owner = factory.make_User() |
410 | node = factory.make_Node(status=NODE_STATUS.ALLOCATED, owner=owner) |
411 | Config.objects.set_config("enable_disk_erasing_on_release", False) |
412 | erase_mock = self.patch_autospec(node, "start_disk_erasing") |
413 | release_mock = self.patch_autospec(node, "release") |
414 | - node.release_or_erase(owner) |
415 | + node.start_releasing(owner) |
416 | release_mock.assert_called_once_with(owner, None) |
417 | erase_mock.assert_not_called() |
418 | |
419 | diff --git a/src/maasserver/node_action.py b/src/maasserver/node_action.py |
420 | index 0c19d9e..ec7a87d 100644 |
421 | --- a/src/maasserver/node_action.py |
422 | +++ b/src/maasserver/node_action.py |
423 | @@ -728,11 +728,22 @@ class Release(NodeAction): |
424 | if not form.is_valid(): |
425 | raise NodeActionError(form.errors) |
426 | try: |
427 | - self.node.release_or_erase( |
428 | - self.user, |
429 | - erase=form.cleaned_data["erase"], |
430 | - secure_erase=form.cleaned_data["secure_erase"], |
431 | - quick_erase=form.cleaned_data["quick_erase"], |
432 | + scripts = [] |
433 | + params = {} |
434 | + if form.cleaned_data["erase"]: |
435 | + scripts.append("wipe-disks") |
436 | + params["wipe-disks"] = {} |
437 | + params["wipe-disks"]["secure_erase"] = form.cleaned_data[ |
438 | + "secure_erase" |
439 | + ] |
440 | + params["wipe-disks"]["quick_erase"] = form.cleaned_data[ |
441 | + "quick_erase" |
442 | + ] |
443 | + params = params | form.get_script_param_dict(scripts) |
444 | + self.node.start_releasing( |
445 | + user=self.user, |
446 | + scripts=scripts, |
447 | + script_input=params, |
448 | ) |
449 | except RPC_EXCEPTIONS + (ExternalProcessError,) as exception: |
450 | raise NodeActionError(exception) |
451 | diff --git a/src/maasserver/node_status.py b/src/maasserver/node_status.py |
452 | index 3a344ef..4ea19f1 100644 |
453 | --- a/src/maasserver/node_status.py |
454 | +++ b/src/maasserver/node_status.py |
455 | @@ -312,6 +312,7 @@ COMMISSIONING_LIKE_STATUSES = [ |
456 | NODE_STATUS.COMMISSIONING, |
457 | NODE_STATUS.DISK_ERASING, |
458 | NODE_STATUS.ENTERING_RESCUE_MODE, |
459 | + NODE_STATUS.RELEASING, |
460 | NODE_STATUS.RESCUE_MODE, |
461 | NODE_STATUS.TESTING, |
462 | ] |
463 | diff --git a/src/maasserver/tests/test_node_action.py b/src/maasserver/tests/test_node_action.py |
464 | index 41e0a04..b04105e 100644 |
465 | --- a/src/maasserver/tests/test_node_action.py |
466 | +++ b/src/maasserver/tests/test_node_action.py |
467 | @@ -1555,15 +1555,19 @@ class TestReleaseAction(MAASServerTestCase): |
468 | owner=user, |
469 | power_parameters=params, |
470 | ) |
471 | - node_release_or_erase = self.patch_autospec(node, "release_or_erase") |
472 | + node_start_releasing = self.patch_autospec(node, "start_releasing") |
473 | |
474 | with post_commit_hooks: |
475 | Release(node, user, request).execute( |
476 | erase=True, secure_erase=True, quick_erase=True |
477 | ) |
478 | |
479 | - node_release_or_erase.assert_called_once_with( |
480 | - user, erase=True, secure_erase=True, quick_erase=True |
481 | + node_start_releasing.assert_called_once_with( |
482 | + user=user, |
483 | + scripts=["wipe-disks"], |
484 | + script_input={ |
485 | + "wipe-disks": {"secure_erase": True, "quick_erase": True} |
486 | + }, |
487 | ) |
488 | |
489 | |
490 | diff --git a/src/metadataserver/api.py b/src/metadataserver/api.py |
491 | index d2394c2..35f9a9e 100644 |
492 | --- a/src/metadataserver/api.py |
493 | +++ b/src/metadataserver/api.py |
494 | @@ -519,6 +519,7 @@ class VersionIndexHandler(MetadataViewHandler): |
495 | NODE_STATUS.FAILED_DEPLOYMENT, |
496 | NODE_STATUS.READY, |
497 | NODE_STATUS.DISK_ERASING, |
498 | + NODE_STATUS.RELEASING, |
499 | NODE_STATUS.ENTERING_RESCUE_MODE, |
500 | NODE_STATUS.RESCUE_MODE, |
501 | NODE_STATUS.FAILED_ENTERING_RESCUE_MODE, |
502 | @@ -531,6 +532,7 @@ class VersionIndexHandler(MetadataViewHandler): |
503 | NODE_STATUS.COMMISSIONING, |
504 | NODE_STATUS.DEPLOYING, |
505 | NODE_STATUS.DISK_ERASING, |
506 | + NODE_STATUS.RELEASING, |
507 | NODE_STATUS.ENTERING_RESCUE_MODE, |
508 | NODE_STATUS.RESCUE_MODE, |
509 | NODE_STATUS.TESTING, |
510 | @@ -789,6 +791,13 @@ class VersionIndexHandler(MetadataViewHandler): |
511 | self._store_results( |
512 | node, node.current_release_script_set, request, status |
513 | ) |
514 | + |
515 | + if status == SIGNAL_STATUS.OK: |
516 | + node.release() |
517 | + |
518 | + if status == SIGNAL_STATUS.FAILED: |
519 | + node.mark_failed(comment="Failed to release machine.") |
520 | + |
521 | return None |
522 | |
523 | @operation(idempotent=False) |
524 | diff --git a/src/metadataserver/api_twisted.py b/src/metadataserver/api_twisted.py |
525 | index ca432fa..a5c4f9e 100644 |
526 | --- a/src/metadataserver/api_twisted.py |
527 | +++ b/src/metadataserver/api_twisted.py |
528 | @@ -503,6 +503,12 @@ class StatusWorkerService(TimerService): |
529 | comment="Failed to erase disks.", commit=False |
530 | ) |
531 | save_node = True |
532 | + elif node.status == NODE_STATUS.RELEASING: |
533 | + if failed: |
534 | + node.mark_failed( |
535 | + comment="Failed to release machine.", commit=False |
536 | + ) |
537 | + save_node = True |
538 | # Deallocate the node if we enter any terminal state. |
539 | if node.node_type == NODE_TYPE.MACHINE and node.status in [ |
540 | NODE_STATUS.READY, |
UNIT TESTS scriptset lp:~troyanov/maas/+git/maas into -b master lp:~maas-committers/maas
-b node-release-
STATUS: SUCCESS de1ee90d7bcb88f cf8cf6e02d
COMMIT: 5672c8464f22aa4