Merge lp:~newell-jensen/maas/fix-1685835 into lp:~maas-committers/maas/trunk

Proposed by Newell Jensen
Status: Merged
Approved by: Blake Rouse
Approved revision: no longer in the source branch.
Merged at revision: 6018
Proposed branch: lp:~newell-jensen/maas/fix-1685835
Merge into: lp:~maas-committers/maas/trunk
Diff against target: 306 lines (+104/-33)
2 files modified
src/provisioningserver/drivers/pod/rsd.py (+55/-18)
src/provisioningserver/drivers/pod/tests/test_rsd.py (+49/-15)
To merge this branch: bzr merge lp:~newell-jensen/maas/fix-1685835
Reviewer Review Type Date Requested Status
Blake Rouse (community) Approve
Review via email: mp+323188@code.launchpad.net

Commit message

Ignore remote storage if the InitiatorIQN is non-empty in pre-composed nodes. This information is taken into account for calculating the total, available, and used iscsi storage on the RSD pod.

To post a comment you must log in.
Revision history for this message
Blake Rouse (blake-rouse) wrote :

Looks good.

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'src/provisioningserver/drivers/pod/rsd.py'
2--- src/provisioningserver/drivers/pod/rsd.py 2017-04-24 20:10:24 +0000
3+++ src/provisioningserver/drivers/pod/rsd.py 2017-04-25 23:06:27 +0000
4@@ -519,17 +519,36 @@
5 discovered_machine_block_device)
6
7 def get_pod_machine_remote_storages(
8- self, node_data, url, headers, logical_drives,
9+ self, node_data, url, headers, remote_drives, logical_drives,
10 targets, discovered_machine, request=None):
11 """Get pod machine remote storages."""
12- remote_drives = node_data.get('Links', {}).get('RemoteDrives', [])
13- for remote_drive in remote_drives:
14+ node_remote_drives = node_data.get('Links', {}).get('RemoteDrives', [])
15+ remote_drives_to_delete = []
16+ logical_drives_to_delete = []
17+ for node_remote_drive in node_remote_drives:
18+ target_data = targets[
19+ node_remote_drive['@odata.id'].lstrip('/').encode('utf-8')]
20+ initiator = target_data.get('Initiator')[0]
21+ initiator_iqn = initiator.get('iSCSI', {}).get('InitiatorIQN')
22+ if initiator_iqn:
23+ # Since InitiatorIQN is not an empty string we will not be
24+ # including this storage into MAAS.
25+ # Remove this remote drive, target, and associated logical
26+ # drives associated with this target.
27+ for lv, lv_data in logical_drives.items():
28+ lv_targets = lv_data.get('Links', {}).get('Targets', [])
29+ for lv_target in lv_targets:
30+ if (node_remote_drive['@odata.id'] ==
31+ lv_target['@odata.id']):
32+ remote_drives_to_delete.append(
33+ node_remote_drive['@odata.id'])
34+ logical_drives_to_delete.append(lv)
35+ continue
36+
37 discovered_machine_block_device = (
38 DiscoveredMachineBlockDevice(
39 model=None, serial=None, size=0,
40 type=BlockDeviceType.ISCSI))
41- target_data = targets[
42- remote_drive['@odata.id'].lstrip('/').encode('utf-8')]
43 addresses = target_data.get('Addresses')[0]
44 host = addresses.get('iSCSI', {}).get('TargetPortalIP')
45 proto = '6' # curtin currently only supports TCP.
46@@ -547,9 +566,11 @@
47 # find which one contains this remote drive.
48 for lv, lv_data in logical_drives.items():
49 lv_targets = lv_data.get('Links', {}).get('Targets', [])
50- if remote_drive in lv_targets:
51- discovered_machine_block_device.size = float(
52- lv_data['CapacityGiB']) * (1024 ** 3)
53+ for lv_target in lv_targets:
54+ if (node_remote_drive['@odata.id'] ==
55+ lv_target['@odata.id']):
56+ discovered_machine_block_device.size = float(
57+ lv_data['CapacityGiB']) * (1024 ** 3)
58
59 # Map the tags from the request block devices to the discovered
60 # block devices. This ensures that the composed machine has the
61@@ -570,6 +591,16 @@
62 discovered_machine.block_devices.append(
63 discovered_machine_block_device)
64
65+ # Remove the remote drives, targests, and logical drives that
66+ # are no longer needed. These will be used in later calculations
67+ # for the total usable iscsi remote storage.
68+ for remote_drive in set(remote_drives_to_delete):
69+ del targets[
70+ remote_drive.lstrip('/').encode('utf-8')]
71+ remote_drives.remove(remote_drive)
72+ for logical_drive in set(logical_drives_to_delete):
73+ del logical_drives[logical_drive]
74+
75 @inlineCallbacks
76 def get_pod_machine_interfaces(
77 self, node_data, url, headers, discovered_machine):
78@@ -627,7 +658,8 @@
79
80 @inlineCallbacks
81 def get_pod_machine(
82- self, node, url, headers, logical_drives, targets, request=None):
83+ self, node, url, headers, remote_drives,
84+ logical_drives, targets, request=None):
85 """Get pod composed machine.
86
87 If required resources cannot be found, this
88@@ -661,7 +693,7 @@
89 node_data, url, headers, discovered_machine, request)
90 # Get remote storages.
91 self.get_pod_machine_remote_storages(
92- node_data, url, headers, logical_drives,
93+ node_data, url, headers, remote_drives, logical_drives,
94 targets, discovered_machine, request)
95 # Get interfaces.
96 yield self.get_pod_machine_interfaces(
97@@ -674,7 +706,8 @@
98
99 @inlineCallbacks
100 def get_pod_machines(
101- self, url, headers, logical_drives, targets, request=None):
102+ self, url, headers, remote_drives,
103+ logical_drives, targets, request=None):
104 """Get pod composed machines.
105
106 If required resources cannot be found, these
107@@ -688,7 +721,8 @@
108 # Iterate over all composed nodes in the pod.
109 for node in nodes:
110 discovered_machine = yield self.get_pod_machine(
111- node, url, headers, logical_drives, targets, request)
112+ node, url, headers, remote_drives,
113+ logical_drives, targets, request)
114 discovered_machines.append(discovered_machine)
115 return discovered_machines
116
117@@ -729,21 +763,24 @@
118 logical_drives, targets = yield self.scrape_logical_drives_and_targets(
119 url, headers)
120 remote_drives = yield self.scrape_remote_drives(url, headers)
121+
122+ # Discover composed machines.
123+ pod_machines = yield self.get_pod_machines(
124+ url, headers, remote_drives, logical_drives, targets)
125+
126+ # Discover pod resources.
127 pod_remote_storage, pod_hints_remote_storage = (
128 self.calculate_pod_remote_storage(
129 remote_drives, logical_drives, targets))
130-
131- # Discover pod resources.
132 discovered_pod = yield self.get_pod_resources(url, headers)
133
134+ # Add machines to pod.
135+ discovered_pod.machines = pod_machines
136+
137 # Discover pod remote storage resources.
138 discovered_pod.capabilities.append(Capabilities.ISCSI_STORAGE)
139 discovered_pod.iscsi_storage = pod_remote_storage
140
141- # Discover composed machines.
142- discovered_pod.machines = yield self.get_pod_machines(
143- url, headers, logical_drives, targets)
144-
145 # Discover pod hints.
146 discovered_pod.hints = self.get_pod_hints(discovered_pod)
147 discovered_pod.hints.iscsi_storage = pod_hints_remote_storage
148
149=== modified file 'src/provisioningserver/drivers/pod/tests/test_rsd.py'
150--- src/provisioningserver/drivers/pod/tests/test_rsd.py 2017-04-24 20:10:24 +0000
151+++ src/provisioningserver/drivers/pod/tests/test_rsd.py 2017-04-25 23:06:27 +0000
152@@ -1013,7 +1013,7 @@
153 b"redfish/v1/Services/1/Targets/3": SAMPLE_JSON_TARGET,
154 }
155 remote_drives = set(
156- "redfish/v1/Services/1/Targets/1")
157+ b"/redfish/v1/Services/1/Targets/1")
158
159 remote_storage = driver.calculate_remote_storage(
160 remote_drives, logical_drives, targets)
161@@ -1313,23 +1313,51 @@
162 context = make_context()
163 url = driver.get_url(context)
164 headers = driver.make_auth_headers(**context)
165- node_data = SAMPLE_JSON_NODE
166 discovered_machine = make_discovered_machine(block_devices=[])
167- TARGET_CHANGED = deepcopy(SAMPLE_JSON_TARGET)
168- TARGET_CHANGED['Addresses'][0]['iSCSI']['TargetLUN'].append({'LUN': 3})
169+ node_data = deepcopy(SAMPLE_JSON_NODE)
170+ node_data['Links']['RemoteDrives'].append({
171+ "@odata.id": "/redfish/v1/Services/1/Targets/3"})
172+ LV_CHANGED = deepcopy(SAMPLE_JSON_LV)
173+ LV_CHANGED['Links']['Targets'].append({
174+ "@odata.id": "/redfish/v1/Services/1/Targets/3"})
175+ TARGET_CHANGED_1 = deepcopy(SAMPLE_JSON_TARGET)
176+ TARGET_CHANGED_2 = deepcopy(SAMPLE_JSON_TARGET)
177+ TARGET_CHANGED_1['Addresses'][0]['iSCSI']['TargetLUN'].append(
178+ {'LUN': 3})
179+ TARGET_CHANGED_2['Initiator'][0]['iSCSI']['InitiatorIQN'] = "ALL"
180+ remote_drives = set([
181+ "/redfish/v1/Services/1/Targets/1",
182+ "/redfish/v1/Services/1/Targets/2",
183+ "/redfish/v1/Services/1/Targets/3",
184+ ])
185 logical_drives = {
186 b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV,
187 b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG,
188- b"redfish/v1/Services/1/LogicalDrives/3": SAMPLE_JSON_LV
189+ b"redfish/v1/Services/1/LogicalDrives/3": LV_CHANGED,
190 }
191 targets = {
192 b"redfish/v1/Services/1/Targets/1": SAMPLE_JSON_TARGET,
193- b"redfish/v1/Services/1/Targets/2": TARGET_CHANGED,
194+ b"redfish/v1/Services/1/Targets/2": TARGET_CHANGED_1,
195+ b"redfish/v1/Services/1/Targets/3": TARGET_CHANGED_2,
196 }
197
198 driver.get_pod_machine_remote_storages(
199- node_data, url, headers, logical_drives,
200+ node_data, url, headers, remote_drives, logical_drives,
201 targets, discovered_machine)
202+ self.assertEquals(
203+ set([
204+ "/redfish/v1/Services/1/Targets/1",
205+ "/redfish/v1/Services/1/Targets/2"]), remote_drives)
206+ self.assertEquals(
207+ {
208+ b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV,
209+ b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG
210+ }, logical_drives)
211+ self.assertEquals(
212+ {
213+ b"redfish/v1/Services/1/Targets/1": SAMPLE_JSON_TARGET,
214+ b"redfish/v1/Services/1/Targets/2": TARGET_CHANGED_1
215+ }, targets)
216 self.assertThat(
217 discovered_machine.block_devices, MatchesListwise([
218 MatchesStructure(
219@@ -1368,6 +1396,8 @@
220 discovered_machine = make_discovered_machine(block_devices=[])
221 TARGET_CHANGED = deepcopy(SAMPLE_JSON_TARGET)
222 TARGET_CHANGED['Addresses'][0]['iSCSI']['TargetLUN'].append({'LUN': 3})
223+ remote_drives = set(
224+ b"/redfish/v1/Services/1/Targets/1")
225 logical_drives = {
226 b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV,
227 b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG,
228@@ -1379,7 +1409,7 @@
229 }
230
231 driver.get_pod_machine_remote_storages(
232- node_data, url, headers, logical_drives,
233+ node_data, url, headers, remote_drives, logical_drives,
234 targets, discovered_machine, request)
235 self.assertThat(
236 discovered_machine.block_devices, MatchesListwise([
237@@ -1481,6 +1511,8 @@
238 node_data = SAMPLE_JSON_NODE
239 TARGET_CHANGED = deepcopy(SAMPLE_JSON_TARGET)
240 TARGET_CHANGED['Addresses'][0]['iSCSI']['TargetLUN'].append({'LUN': 3})
241+ remote_drives = set(
242+ b"/redfish/v1/Services/1/Targets/1")
243 logical_drives = {
244 b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV,
245 b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG,
246@@ -1506,7 +1538,7 @@
247 driver, 'get_pod_machine_interfaces')
248
249 machine = yield driver.get_pod_machine(
250- b"redfish/v1/Nodes/1", url, headers,
251+ b"redfish/v1/Nodes/1", url, headers, remote_drives,
252 logical_drives, targets, request)
253 self.assertEquals(node_data['Name'], machine.hostname)
254 self.assertEquals(
255@@ -1523,8 +1555,8 @@
256 MockCalledOnceWith(node_data, url, headers, machine, request))
257 self.assertThat(
258 mock_get_pod_machine_remote_storages,
259- MockCalledOnceWith(node_data, url, headers, logical_drives,
260- targets, machine, request))
261+ MockCalledOnceWith(node_data, url, headers, remote_drives,
262+ logical_drives, targets, machine, request))
263 self.assertThat(
264 mock_get_pod_machine_interfaces,
265 MockCalledOnceWith(node_data, url, headers, machine))
266@@ -1535,6 +1567,8 @@
267 context = make_context()
268 url = driver.get_url(context)
269 headers = driver.make_auth_headers(**context)
270+ remote_drives = set(
271+ b"redfish/v1/Services/1/Targets/1")
272 logical_drives = {
273 factory.make_name('lv_path'): factory.make_name('lv_data')
274 for _ in range(3)
275@@ -1557,11 +1591,11 @@
276 mock_get_pod_machine.return_value = expected_machines
277
278 discovered_machines = yield driver.get_pod_machines(
279- url, headers, logical_drives, targets)
280+ url, headers, remote_drives, logical_drives, targets)
281 self.assertEquals(1, len(discovered_machines))
282 self.assertThat(mock_get_pod_machine, MockCalledOnceWith(
283 b"redfish/v1/Nodes/1", url, headers,
284- logical_drives, targets, None))
285+ remote_drives, logical_drives, targets, None))
286
287 def test__get_pod_hints(self):
288 driver = RSDPodDriver()
289@@ -1638,7 +1672,7 @@
290 mock_get_pod_resources, MockCalledOnceWith(url, headers))
291 self.assertThat(
292 mock_get_pod_machines, MockCalledOnceWith(
293- url, headers, logical_drives, targets))
294+ url, headers, remote_drives, logical_drives, targets))
295 self.assertThat(mock_get_pod_hints, MockCalledOnceWith(
296 mock_get_pod_resources.return_value))
297
298@@ -1815,7 +1849,7 @@
299 for _ in range(3)
300 }
301 remote_drives = set([
302- factory.make_name('target_path')
303+ factory.make_name('target_path').encode('utf-8')
304 for _ in range(3)
305 ])
306 mock_scrape_logical_drives_and_targets = self.patch(