Merge lp:~newell-jensen/maas/fix-1685835 into lp:~maas-committers/maas/trunk
- fix-1685835
- Merge into trunk
Proposed by
Newell Jensen
Status: | Merged |
---|---|
Approved by: | Blake Rouse |
Approved revision: | no longer in the source branch. |
Merged at revision: | 6018 |
Proposed branch: | lp:~newell-jensen/maas/fix-1685835 |
Merge into: | lp:~maas-committers/maas/trunk |
Diff against target: |
306 lines (+104/-33) 2 files modified
src/provisioningserver/drivers/pod/rsd.py (+55/-18) src/provisioningserver/drivers/pod/tests/test_rsd.py (+49/-15) |
To merge this branch: | bzr merge lp:~newell-jensen/maas/fix-1685835 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Blake Rouse (community) | Approve | ||
Review via email: mp+323188@code.launchpad.net |
Commit message
Ignore remote storage if the InitiatorIQN is non-empty in pre-composed nodes. This information is taken into account for calculating the total, available, and used iscsi storage on the RSD pod.
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'src/provisioningserver/drivers/pod/rsd.py' | |||
2 | --- src/provisioningserver/drivers/pod/rsd.py 2017-04-24 20:10:24 +0000 | |||
3 | +++ src/provisioningserver/drivers/pod/rsd.py 2017-04-25 23:06:27 +0000 | |||
4 | @@ -519,17 +519,36 @@ | |||
5 | 519 | discovered_machine_block_device) | 519 | discovered_machine_block_device) |
6 | 520 | 520 | ||
7 | 521 | def get_pod_machine_remote_storages( | 521 | def get_pod_machine_remote_storages( |
9 | 522 | self, node_data, url, headers, logical_drives, | 522 | self, node_data, url, headers, remote_drives, logical_drives, |
10 | 523 | targets, discovered_machine, request=None): | 523 | targets, discovered_machine, request=None): |
11 | 524 | """Get pod machine remote storages.""" | 524 | """Get pod machine remote storages.""" |
14 | 525 | remote_drives = node_data.get('Links', {}).get('RemoteDrives', []) | 525 | node_remote_drives = node_data.get('Links', {}).get('RemoteDrives', []) |
15 | 526 | for remote_drive in remote_drives: | 526 | remote_drives_to_delete = [] |
16 | 527 | logical_drives_to_delete = [] | ||
17 | 528 | for node_remote_drive in node_remote_drives: | ||
18 | 529 | target_data = targets[ | ||
19 | 530 | node_remote_drive['@odata.id'].lstrip('/').encode('utf-8')] | ||
20 | 531 | initiator = target_data.get('Initiator')[0] | ||
21 | 532 | initiator_iqn = initiator.get('iSCSI', {}).get('InitiatorIQN') | ||
22 | 533 | if initiator_iqn: | ||
23 | 534 | # Since InitiatorIQN is not an empty string we will not be | ||
24 | 535 | # including this storage into MAAS. | ||
25 | 536 | # Remove this remote drive, target, and associated logical | ||
26 | 537 | # drives associated with this target. | ||
27 | 538 | for lv, lv_data in logical_drives.items(): | ||
28 | 539 | lv_targets = lv_data.get('Links', {}).get('Targets', []) | ||
29 | 540 | for lv_target in lv_targets: | ||
30 | 541 | if (node_remote_drive['@odata.id'] == | ||
31 | 542 | lv_target['@odata.id']): | ||
32 | 543 | remote_drives_to_delete.append( | ||
33 | 544 | node_remote_drive['@odata.id']) | ||
34 | 545 | logical_drives_to_delete.append(lv) | ||
35 | 546 | continue | ||
36 | 547 | |||
37 | 527 | discovered_machine_block_device = ( | 548 | discovered_machine_block_device = ( |
38 | 528 | DiscoveredMachineBlockDevice( | 549 | DiscoveredMachineBlockDevice( |
39 | 529 | model=None, serial=None, size=0, | 550 | model=None, serial=None, size=0, |
40 | 530 | type=BlockDeviceType.ISCSI)) | 551 | type=BlockDeviceType.ISCSI)) |
41 | 531 | target_data = targets[ | ||
42 | 532 | remote_drive['@odata.id'].lstrip('/').encode('utf-8')] | ||
43 | 533 | addresses = target_data.get('Addresses')[0] | 552 | addresses = target_data.get('Addresses')[0] |
44 | 534 | host = addresses.get('iSCSI', {}).get('TargetPortalIP') | 553 | host = addresses.get('iSCSI', {}).get('TargetPortalIP') |
45 | 535 | proto = '6' # curtin currently only supports TCP. | 554 | proto = '6' # curtin currently only supports TCP. |
46 | @@ -547,9 +566,11 @@ | |||
47 | 547 | # find which one contains this remote drive. | 566 | # find which one contains this remote drive. |
48 | 548 | for lv, lv_data in logical_drives.items(): | 567 | for lv, lv_data in logical_drives.items(): |
49 | 549 | lv_targets = lv_data.get('Links', {}).get('Targets', []) | 568 | lv_targets = lv_data.get('Links', {}).get('Targets', []) |
53 | 550 | if remote_drive in lv_targets: | 569 | for lv_target in lv_targets: |
54 | 551 | discovered_machine_block_device.size = float( | 570 | if (node_remote_drive['@odata.id'] == |
55 | 552 | lv_data['CapacityGiB']) * (1024 ** 3) | 571 | lv_target['@odata.id']): |
56 | 572 | discovered_machine_block_device.size = float( | ||
57 | 573 | lv_data['CapacityGiB']) * (1024 ** 3) | ||
58 | 553 | 574 | ||
59 | 554 | # Map the tags from the request block devices to the discovered | 575 | # Map the tags from the request block devices to the discovered |
60 | 555 | # block devices. This ensures that the composed machine has the | 576 | # block devices. This ensures that the composed machine has the |
61 | @@ -570,6 +591,16 @@ | |||
62 | 570 | discovered_machine.block_devices.append( | 591 | discovered_machine.block_devices.append( |
63 | 571 | discovered_machine_block_device) | 592 | discovered_machine_block_device) |
64 | 572 | 593 | ||
65 | 594 | # Remove the remote drives, targests, and logical drives that | ||
66 | 595 | # are no longer needed. These will be used in later calculations | ||
67 | 596 | # for the total usable iscsi remote storage. | ||
68 | 597 | for remote_drive in set(remote_drives_to_delete): | ||
69 | 598 | del targets[ | ||
70 | 599 | remote_drive.lstrip('/').encode('utf-8')] | ||
71 | 600 | remote_drives.remove(remote_drive) | ||
72 | 601 | for logical_drive in set(logical_drives_to_delete): | ||
73 | 602 | del logical_drives[logical_drive] | ||
74 | 603 | |||
75 | 573 | @inlineCallbacks | 604 | @inlineCallbacks |
76 | 574 | def get_pod_machine_interfaces( | 605 | def get_pod_machine_interfaces( |
77 | 575 | self, node_data, url, headers, discovered_machine): | 606 | self, node_data, url, headers, discovered_machine): |
78 | @@ -627,7 +658,8 @@ | |||
79 | 627 | 658 | ||
80 | 628 | @inlineCallbacks | 659 | @inlineCallbacks |
81 | 629 | def get_pod_machine( | 660 | def get_pod_machine( |
83 | 630 | self, node, url, headers, logical_drives, targets, request=None): | 661 | self, node, url, headers, remote_drives, |
84 | 662 | logical_drives, targets, request=None): | ||
85 | 631 | """Get pod composed machine. | 663 | """Get pod composed machine. |
86 | 632 | 664 | ||
87 | 633 | If required resources cannot be found, this | 665 | If required resources cannot be found, this |
88 | @@ -661,7 +693,7 @@ | |||
89 | 661 | node_data, url, headers, discovered_machine, request) | 693 | node_data, url, headers, discovered_machine, request) |
90 | 662 | # Get remote storages. | 694 | # Get remote storages. |
91 | 663 | self.get_pod_machine_remote_storages( | 695 | self.get_pod_machine_remote_storages( |
93 | 664 | node_data, url, headers, logical_drives, | 696 | node_data, url, headers, remote_drives, logical_drives, |
94 | 665 | targets, discovered_machine, request) | 697 | targets, discovered_machine, request) |
95 | 666 | # Get interfaces. | 698 | # Get interfaces. |
96 | 667 | yield self.get_pod_machine_interfaces( | 699 | yield self.get_pod_machine_interfaces( |
97 | @@ -674,7 +706,8 @@ | |||
98 | 674 | 706 | ||
99 | 675 | @inlineCallbacks | 707 | @inlineCallbacks |
100 | 676 | def get_pod_machines( | 708 | def get_pod_machines( |
102 | 677 | self, url, headers, logical_drives, targets, request=None): | 709 | self, url, headers, remote_drives, |
103 | 710 | logical_drives, targets, request=None): | ||
104 | 678 | """Get pod composed machines. | 711 | """Get pod composed machines. |
105 | 679 | 712 | ||
106 | 680 | If required resources cannot be found, these | 713 | If required resources cannot be found, these |
107 | @@ -688,7 +721,8 @@ | |||
108 | 688 | # Iterate over all composed nodes in the pod. | 721 | # Iterate over all composed nodes in the pod. |
109 | 689 | for node in nodes: | 722 | for node in nodes: |
110 | 690 | discovered_machine = yield self.get_pod_machine( | 723 | discovered_machine = yield self.get_pod_machine( |
112 | 691 | node, url, headers, logical_drives, targets, request) | 724 | node, url, headers, remote_drives, |
113 | 725 | logical_drives, targets, request) | ||
114 | 692 | discovered_machines.append(discovered_machine) | 726 | discovered_machines.append(discovered_machine) |
115 | 693 | return discovered_machines | 727 | return discovered_machines |
116 | 694 | 728 | ||
117 | @@ -729,21 +763,24 @@ | |||
118 | 729 | logical_drives, targets = yield self.scrape_logical_drives_and_targets( | 763 | logical_drives, targets = yield self.scrape_logical_drives_and_targets( |
119 | 730 | url, headers) | 764 | url, headers) |
120 | 731 | remote_drives = yield self.scrape_remote_drives(url, headers) | 765 | remote_drives = yield self.scrape_remote_drives(url, headers) |
121 | 766 | |||
122 | 767 | # Discover composed machines. | ||
123 | 768 | pod_machines = yield self.get_pod_machines( | ||
124 | 769 | url, headers, remote_drives, logical_drives, targets) | ||
125 | 770 | |||
126 | 771 | # Discover pod resources. | ||
127 | 732 | pod_remote_storage, pod_hints_remote_storage = ( | 772 | pod_remote_storage, pod_hints_remote_storage = ( |
128 | 733 | self.calculate_pod_remote_storage( | 773 | self.calculate_pod_remote_storage( |
129 | 734 | remote_drives, logical_drives, targets)) | 774 | remote_drives, logical_drives, targets)) |
130 | 735 | |||
131 | 736 | # Discover pod resources. | ||
132 | 737 | discovered_pod = yield self.get_pod_resources(url, headers) | 775 | discovered_pod = yield self.get_pod_resources(url, headers) |
133 | 738 | 776 | ||
134 | 777 | # Add machines to pod. | ||
135 | 778 | discovered_pod.machines = pod_machines | ||
136 | 779 | |||
137 | 739 | # Discover pod remote storage resources. | 780 | # Discover pod remote storage resources. |
138 | 740 | discovered_pod.capabilities.append(Capabilities.ISCSI_STORAGE) | 781 | discovered_pod.capabilities.append(Capabilities.ISCSI_STORAGE) |
139 | 741 | discovered_pod.iscsi_storage = pod_remote_storage | 782 | discovered_pod.iscsi_storage = pod_remote_storage |
140 | 742 | 783 | ||
141 | 743 | # Discover composed machines. | ||
142 | 744 | discovered_pod.machines = yield self.get_pod_machines( | ||
143 | 745 | url, headers, logical_drives, targets) | ||
144 | 746 | |||
145 | 747 | # Discover pod hints. | 784 | # Discover pod hints. |
146 | 748 | discovered_pod.hints = self.get_pod_hints(discovered_pod) | 785 | discovered_pod.hints = self.get_pod_hints(discovered_pod) |
147 | 749 | discovered_pod.hints.iscsi_storage = pod_hints_remote_storage | 786 | discovered_pod.hints.iscsi_storage = pod_hints_remote_storage |
148 | 750 | 787 | ||
149 | === modified file 'src/provisioningserver/drivers/pod/tests/test_rsd.py' | |||
150 | --- src/provisioningserver/drivers/pod/tests/test_rsd.py 2017-04-24 20:10:24 +0000 | |||
151 | +++ src/provisioningserver/drivers/pod/tests/test_rsd.py 2017-04-25 23:06:27 +0000 | |||
152 | @@ -1013,7 +1013,7 @@ | |||
153 | 1013 | b"redfish/v1/Services/1/Targets/3": SAMPLE_JSON_TARGET, | 1013 | b"redfish/v1/Services/1/Targets/3": SAMPLE_JSON_TARGET, |
154 | 1014 | } | 1014 | } |
155 | 1015 | remote_drives = set( | 1015 | remote_drives = set( |
157 | 1016 | "redfish/v1/Services/1/Targets/1") | 1016 | b"/redfish/v1/Services/1/Targets/1") |
158 | 1017 | 1017 | ||
159 | 1018 | remote_storage = driver.calculate_remote_storage( | 1018 | remote_storage = driver.calculate_remote_storage( |
160 | 1019 | remote_drives, logical_drives, targets) | 1019 | remote_drives, logical_drives, targets) |
161 | @@ -1313,23 +1313,51 @@ | |||
162 | 1313 | context = make_context() | 1313 | context = make_context() |
163 | 1314 | url = driver.get_url(context) | 1314 | url = driver.get_url(context) |
164 | 1315 | headers = driver.make_auth_headers(**context) | 1315 | headers = driver.make_auth_headers(**context) |
165 | 1316 | node_data = SAMPLE_JSON_NODE | ||
166 | 1317 | discovered_machine = make_discovered_machine(block_devices=[]) | 1316 | discovered_machine = make_discovered_machine(block_devices=[]) |
169 | 1318 | TARGET_CHANGED = deepcopy(SAMPLE_JSON_TARGET) | 1317 | node_data = deepcopy(SAMPLE_JSON_NODE) |
170 | 1319 | TARGET_CHANGED['Addresses'][0]['iSCSI']['TargetLUN'].append({'LUN': 3}) | 1318 | node_data['Links']['RemoteDrives'].append({ |
171 | 1319 | "@odata.id": "/redfish/v1/Services/1/Targets/3"}) | ||
172 | 1320 | LV_CHANGED = deepcopy(SAMPLE_JSON_LV) | ||
173 | 1321 | LV_CHANGED['Links']['Targets'].append({ | ||
174 | 1322 | "@odata.id": "/redfish/v1/Services/1/Targets/3"}) | ||
175 | 1323 | TARGET_CHANGED_1 = deepcopy(SAMPLE_JSON_TARGET) | ||
176 | 1324 | TARGET_CHANGED_2 = deepcopy(SAMPLE_JSON_TARGET) | ||
177 | 1325 | TARGET_CHANGED_1['Addresses'][0]['iSCSI']['TargetLUN'].append( | ||
178 | 1326 | {'LUN': 3}) | ||
179 | 1327 | TARGET_CHANGED_2['Initiator'][0]['iSCSI']['InitiatorIQN'] = "ALL" | ||
180 | 1328 | remote_drives = set([ | ||
181 | 1329 | "/redfish/v1/Services/1/Targets/1", | ||
182 | 1330 | "/redfish/v1/Services/1/Targets/2", | ||
183 | 1331 | "/redfish/v1/Services/1/Targets/3", | ||
184 | 1332 | ]) | ||
185 | 1320 | logical_drives = { | 1333 | logical_drives = { |
186 | 1321 | b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV, | 1334 | b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV, |
187 | 1322 | b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG, | 1335 | b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG, |
189 | 1323 | b"redfish/v1/Services/1/LogicalDrives/3": SAMPLE_JSON_LV | 1336 | b"redfish/v1/Services/1/LogicalDrives/3": LV_CHANGED, |
190 | 1324 | } | 1337 | } |
191 | 1325 | targets = { | 1338 | targets = { |
192 | 1326 | b"redfish/v1/Services/1/Targets/1": SAMPLE_JSON_TARGET, | 1339 | b"redfish/v1/Services/1/Targets/1": SAMPLE_JSON_TARGET, |
194 | 1327 | b"redfish/v1/Services/1/Targets/2": TARGET_CHANGED, | 1340 | b"redfish/v1/Services/1/Targets/2": TARGET_CHANGED_1, |
195 | 1341 | b"redfish/v1/Services/1/Targets/3": TARGET_CHANGED_2, | ||
196 | 1328 | } | 1342 | } |
197 | 1329 | 1343 | ||
198 | 1330 | driver.get_pod_machine_remote_storages( | 1344 | driver.get_pod_machine_remote_storages( |
200 | 1331 | node_data, url, headers, logical_drives, | 1345 | node_data, url, headers, remote_drives, logical_drives, |
201 | 1332 | targets, discovered_machine) | 1346 | targets, discovered_machine) |
202 | 1347 | self.assertEquals( | ||
203 | 1348 | set([ | ||
204 | 1349 | "/redfish/v1/Services/1/Targets/1", | ||
205 | 1350 | "/redfish/v1/Services/1/Targets/2"]), remote_drives) | ||
206 | 1351 | self.assertEquals( | ||
207 | 1352 | { | ||
208 | 1353 | b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV, | ||
209 | 1354 | b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG | ||
210 | 1355 | }, logical_drives) | ||
211 | 1356 | self.assertEquals( | ||
212 | 1357 | { | ||
213 | 1358 | b"redfish/v1/Services/1/Targets/1": SAMPLE_JSON_TARGET, | ||
214 | 1359 | b"redfish/v1/Services/1/Targets/2": TARGET_CHANGED_1 | ||
215 | 1360 | }, targets) | ||
216 | 1333 | self.assertThat( | 1361 | self.assertThat( |
217 | 1334 | discovered_machine.block_devices, MatchesListwise([ | 1362 | discovered_machine.block_devices, MatchesListwise([ |
218 | 1335 | MatchesStructure( | 1363 | MatchesStructure( |
219 | @@ -1368,6 +1396,8 @@ | |||
220 | 1368 | discovered_machine = make_discovered_machine(block_devices=[]) | 1396 | discovered_machine = make_discovered_machine(block_devices=[]) |
221 | 1369 | TARGET_CHANGED = deepcopy(SAMPLE_JSON_TARGET) | 1397 | TARGET_CHANGED = deepcopy(SAMPLE_JSON_TARGET) |
222 | 1370 | TARGET_CHANGED['Addresses'][0]['iSCSI']['TargetLUN'].append({'LUN': 3}) | 1398 | TARGET_CHANGED['Addresses'][0]['iSCSI']['TargetLUN'].append({'LUN': 3}) |
223 | 1399 | remote_drives = set( | ||
224 | 1400 | b"/redfish/v1/Services/1/Targets/1") | ||
225 | 1371 | logical_drives = { | 1401 | logical_drives = { |
226 | 1372 | b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV, | 1402 | b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV, |
227 | 1373 | b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG, | 1403 | b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG, |
228 | @@ -1379,7 +1409,7 @@ | |||
229 | 1379 | } | 1409 | } |
230 | 1380 | 1410 | ||
231 | 1381 | driver.get_pod_machine_remote_storages( | 1411 | driver.get_pod_machine_remote_storages( |
233 | 1382 | node_data, url, headers, logical_drives, | 1412 | node_data, url, headers, remote_drives, logical_drives, |
234 | 1383 | targets, discovered_machine, request) | 1413 | targets, discovered_machine, request) |
235 | 1384 | self.assertThat( | 1414 | self.assertThat( |
236 | 1385 | discovered_machine.block_devices, MatchesListwise([ | 1415 | discovered_machine.block_devices, MatchesListwise([ |
237 | @@ -1481,6 +1511,8 @@ | |||
238 | 1481 | node_data = SAMPLE_JSON_NODE | 1511 | node_data = SAMPLE_JSON_NODE |
239 | 1482 | TARGET_CHANGED = deepcopy(SAMPLE_JSON_TARGET) | 1512 | TARGET_CHANGED = deepcopy(SAMPLE_JSON_TARGET) |
240 | 1483 | TARGET_CHANGED['Addresses'][0]['iSCSI']['TargetLUN'].append({'LUN': 3}) | 1513 | TARGET_CHANGED['Addresses'][0]['iSCSI']['TargetLUN'].append({'LUN': 3}) |
241 | 1514 | remote_drives = set( | ||
242 | 1515 | b"/redfish/v1/Services/1/Targets/1") | ||
243 | 1484 | logical_drives = { | 1516 | logical_drives = { |
244 | 1485 | b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV, | 1517 | b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV, |
245 | 1486 | b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG, | 1518 | b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG, |
246 | @@ -1506,7 +1538,7 @@ | |||
247 | 1506 | driver, 'get_pod_machine_interfaces') | 1538 | driver, 'get_pod_machine_interfaces') |
248 | 1507 | 1539 | ||
249 | 1508 | machine = yield driver.get_pod_machine( | 1540 | machine = yield driver.get_pod_machine( |
251 | 1509 | b"redfish/v1/Nodes/1", url, headers, | 1541 | b"redfish/v1/Nodes/1", url, headers, remote_drives, |
252 | 1510 | logical_drives, targets, request) | 1542 | logical_drives, targets, request) |
253 | 1511 | self.assertEquals(node_data['Name'], machine.hostname) | 1543 | self.assertEquals(node_data['Name'], machine.hostname) |
254 | 1512 | self.assertEquals( | 1544 | self.assertEquals( |
255 | @@ -1523,8 +1555,8 @@ | |||
256 | 1523 | MockCalledOnceWith(node_data, url, headers, machine, request)) | 1555 | MockCalledOnceWith(node_data, url, headers, machine, request)) |
257 | 1524 | self.assertThat( | 1556 | self.assertThat( |
258 | 1525 | mock_get_pod_machine_remote_storages, | 1557 | mock_get_pod_machine_remote_storages, |
261 | 1526 | MockCalledOnceWith(node_data, url, headers, logical_drives, | 1558 | MockCalledOnceWith(node_data, url, headers, remote_drives, |
262 | 1527 | targets, machine, request)) | 1559 | logical_drives, targets, machine, request)) |
263 | 1528 | self.assertThat( | 1560 | self.assertThat( |
264 | 1529 | mock_get_pod_machine_interfaces, | 1561 | mock_get_pod_machine_interfaces, |
265 | 1530 | MockCalledOnceWith(node_data, url, headers, machine)) | 1562 | MockCalledOnceWith(node_data, url, headers, machine)) |
266 | @@ -1535,6 +1567,8 @@ | |||
267 | 1535 | context = make_context() | 1567 | context = make_context() |
268 | 1536 | url = driver.get_url(context) | 1568 | url = driver.get_url(context) |
269 | 1537 | headers = driver.make_auth_headers(**context) | 1569 | headers = driver.make_auth_headers(**context) |
270 | 1570 | remote_drives = set( | ||
271 | 1571 | b"redfish/v1/Services/1/Targets/1") | ||
272 | 1538 | logical_drives = { | 1572 | logical_drives = { |
273 | 1539 | factory.make_name('lv_path'): factory.make_name('lv_data') | 1573 | factory.make_name('lv_path'): factory.make_name('lv_data') |
274 | 1540 | for _ in range(3) | 1574 | for _ in range(3) |
275 | @@ -1557,11 +1591,11 @@ | |||
276 | 1557 | mock_get_pod_machine.return_value = expected_machines | 1591 | mock_get_pod_machine.return_value = expected_machines |
277 | 1558 | 1592 | ||
278 | 1559 | discovered_machines = yield driver.get_pod_machines( | 1593 | discovered_machines = yield driver.get_pod_machines( |
280 | 1560 | url, headers, logical_drives, targets) | 1594 | url, headers, remote_drives, logical_drives, targets) |
281 | 1561 | self.assertEquals(1, len(discovered_machines)) | 1595 | self.assertEquals(1, len(discovered_machines)) |
282 | 1562 | self.assertThat(mock_get_pod_machine, MockCalledOnceWith( | 1596 | self.assertThat(mock_get_pod_machine, MockCalledOnceWith( |
283 | 1563 | b"redfish/v1/Nodes/1", url, headers, | 1597 | b"redfish/v1/Nodes/1", url, headers, |
285 | 1564 | logical_drives, targets, None)) | 1598 | remote_drives, logical_drives, targets, None)) |
286 | 1565 | 1599 | ||
287 | 1566 | def test__get_pod_hints(self): | 1600 | def test__get_pod_hints(self): |
288 | 1567 | driver = RSDPodDriver() | 1601 | driver = RSDPodDriver() |
289 | @@ -1638,7 +1672,7 @@ | |||
290 | 1638 | mock_get_pod_resources, MockCalledOnceWith(url, headers)) | 1672 | mock_get_pod_resources, MockCalledOnceWith(url, headers)) |
291 | 1639 | self.assertThat( | 1673 | self.assertThat( |
292 | 1640 | mock_get_pod_machines, MockCalledOnceWith( | 1674 | mock_get_pod_machines, MockCalledOnceWith( |
294 | 1641 | url, headers, logical_drives, targets)) | 1675 | url, headers, remote_drives, logical_drives, targets)) |
295 | 1642 | self.assertThat(mock_get_pod_hints, MockCalledOnceWith( | 1676 | self.assertThat(mock_get_pod_hints, MockCalledOnceWith( |
296 | 1643 | mock_get_pod_resources.return_value)) | 1677 | mock_get_pod_resources.return_value)) |
297 | 1644 | 1678 | ||
298 | @@ -1815,7 +1849,7 @@ | |||
299 | 1815 | for _ in range(3) | 1849 | for _ in range(3) |
300 | 1816 | } | 1850 | } |
301 | 1817 | remote_drives = set([ | 1851 | remote_drives = set([ |
303 | 1818 | factory.make_name('target_path') | 1852 | factory.make_name('target_path').encode('utf-8') |
304 | 1819 | for _ in range(3) | 1853 | for _ in range(3) |
305 | 1820 | ]) | 1854 | ]) |
306 | 1821 | mock_scrape_logical_drives_and_targets = self.patch( | 1855 | mock_scrape_logical_drives_and_targets = self.patch( |
Looks good.