Merge lp:~blake-rouse/maas/rsd-compose-storage into lp:~maas-committers/maas/trunk

Proposed by Blake Rouse
Status: Rejected
Rejected by: MAAS Lander
Proposed branch: lp:~blake-rouse/maas/rsd-compose-storage
Merge into: lp:~maas-committers/maas/trunk
Diff against target: 951 lines (+442/-140)
2 files modified
src/provisioningserver/drivers/pod/rsd.py (+226/-74)
src/provisioningserver/drivers/pod/tests/test_rsd.py (+216/-66)
To merge this branch: bzr merge lp:~blake-rouse/maas/rsd-compose-storage
Reviewer Review Type Date Requested Status
MAAS Maintainers Pending
Review via email: mp+322203@code.launchpad.net
To post a comment you must log in.
5936. By Blake Rouse

Merge pod-storage discovery.

Revision history for this message
MAAS Lander (maas-lander) wrote :

Transitioned to Git.

lp:maas has now moved from Bzr to Git.
Please propose your branches with Launchpad using Git.

git clone https://git.launchpad.net/maas

Unmerged revisions

5936. By Blake Rouse

Merge pod-storage discovery.

5935. By Blake Rouse

Improve the logic of generating the JSON payload to allocation.

5934. By Blake Rouse

Merge trunk.

5933. By Blake Rouse

Use newells branch to set the master to clone from.

5932. By Blake Rouse

Merge trunk.

5931. By Blake Rouse

Merge rsd scrape storage.

5930. By Blake Rouse

Add remote storage to compose.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'src/provisioningserver/drivers/pod/rsd.py'
--- src/provisioningserver/drivers/pod/rsd.py 2017-04-06 19:59:20 +0000
+++ src/provisioningserver/drivers/pod/rsd.py 2017-04-10 13:49:22 +0000
@@ -19,6 +19,7 @@
19 SETTING_SCOPE,19 SETTING_SCOPE,
20)20)
21from provisioningserver.drivers.pod import (21from provisioningserver.drivers.pod import (
22 BlockDeviceType,
22 Capabilities,23 Capabilities,
23 DiscoveredMachine,24 DiscoveredMachine,
24 DiscoveredMachineBlockDevice,25 DiscoveredMachineBlockDevice,
@@ -31,7 +32,10 @@
31)32)
32from provisioningserver.logger import get_maas_logger33from provisioningserver.logger import get_maas_logger
33from provisioningserver.rpc.exceptions import PodInvalidResources34from provisioningserver.rpc.exceptions import PodInvalidResources
34from provisioningserver.utils.twisted import asynchronous35from provisioningserver.utils.twisted import (
36 asynchronous,
37 pause,
38)
35from twisted.internet import reactor39from twisted.internet import reactor
36from twisted.internet._sslverify import (40from twisted.internet._sslverify import (
37 ClientTLSOptions,41 ClientTLSOptions,
@@ -229,12 +233,7 @@
229 targets.append(remote_drive['@odata.id'])233 targets.append(remote_drive['@odata.id'])
230 return set(targets)234 return set(targets)
231235
232 @inlineCallbacks236 def calculate_remote_storage(self, remote_drives, logical_drives, targets):
233 def calculate_remote_storage(self, url, headers):
234 logical_drives, target_links = (
235 yield self.scrape_logical_drives_and_targets(url, headers))
236 remote_drives = yield self.scrape_remote_drives(url, headers)
237
238 # Find LVGs and LVs out of all logical drives.237 # Find LVGs and LVs out of all logical drives.
239 lvgs = {}238 lvgs = {}
240 lvs = {}239 lvs = {}
@@ -251,7 +250,8 @@
251 total = 0250 total = 0
252 available = 0251 available = 0
253 master_id = 0252 master_id = 0
254 master = None253 master_size = 0
254 master_path = None
255255
256 # Find size of LVG and get LVs for this LVG.256 # Find size of LVG and get LVs for this LVG.
257 lvg_capacity = lvg_data['CapacityGiB']257 lvg_capacity = lvg_data['CapacityGiB']
@@ -282,8 +282,9 @@
282 if not (lv_target_links & remote_drives):282 if not (lv_target_links & remote_drives):
283 lvs_capacity_unused += lv_capacity283 lvs_capacity_unused += lv_capacity
284 new_master_id = int(lv_info['Id'])284 new_master_id = int(lv_info['Id'])
285 if (master is None or master_id > new_master_id):285 if (master_path is None or master_id > new_master_id):
286 master = lv_link286 master_path = b"/" + lv_link
287 master_size = lv_capacity
287 master_id = new_master_id288 master_id = new_master_id
288289
289 total = (290 total = (
@@ -293,10 +294,34 @@
293 remote_storage[lvg] = {294 remote_storage[lvg] = {
294 'total': total,295 'total': total,
295 'available': available,296 'available': available,
296 'master': master297 'master': {
298 'path': master_path,
299 'size': master_size
300 }
297 }301 }
298 return remote_storage302 return remote_storage
299303
304 def calculate_pod_remote_storage(
305 self, remote_drives, logical_drives, targets):
306 """Calculate the total sum of LVG capacities in the pod and retrieve the
307 largest LV for the hints.
308 """
309 remote_storage = self.calculate_remote_storage(
310 remote_drives, logical_drives, targets)
311
312 total_capacity = 0
313 for lvg, rs_data in remote_storage.items():
314 total_capacity += rs_data['total']
315
316 hint_capacity = 0
317 for lv, lv_data in logical_drives.items():
318 if lv_data['Mode'] == "LV":
319 if lv_data['CapacityGiB'] > hint_capacity:
320 hint_capacity = lv_data['CapacityGiB']
321 hint_capacity *= 1024 ** 3
322
323 return total_capacity, hint_capacity
324
300 @inlineCallbacks325 @inlineCallbacks
301 def get_pod_memory_resources(self, url, headers, system):326 def get_pod_memory_resources(self, url, headers, system):
302 """Get all the memory resources for the given system."""327 """Get all the memory resources for the given system."""
@@ -408,7 +433,7 @@
408 return discovered_pod433 return discovered_pod
409434
410 @inlineCallbacks435 @inlineCallbacks
411 def get_pod_machine(self, node, url, headers):436 def get_pod_machine(self, node, url, headers, logical_drives, targets):
412 """Get pod composed machine.437 """Get pod composed machine.
413438
414 If required resources cannot be found, this439 If required resources cannot be found, this
@@ -425,44 +450,37 @@
425 node_data, _ = yield self.redfish_request(450 node_data, _ = yield self.redfish_request(
426 b"GET", join(url, node), headers)451 b"GET", join(url, node), headers)
427 # Get hostname.452 # Get hostname.
428 hostname = node_data.get('Name')453 discovered_machine.hostname = node_data['Name']
429 if hostname is not None:
430 discovered_machine.hostname = hostname
431 # Get power state.454 # Get power state.
432 power_state = node_data.get('PowerState')455 power_state = node_data['PowerState']
433 if power_state is not None:456 discovered_machine.power_state = RSD_SYSTEM_POWER_STATE.get(
434 discovered_machine.power_state = RSD_SYSTEM_POWER_STATE.get(457 power_state)
435 power_state)
436 # Get memories.458 # Get memories.
437 memories = node_data.get('Links', {}).get('Memory')459 memories = node_data.get('Links', {}).get('Memory', [])
438 for memory in memories:460 for memory in memories:
439 memory_data, _ = yield self.redfish_request(461 memory_data, _ = yield self.redfish_request(
440 b"GET", join(url, memory[462 b"GET", join(url, memory[
441 '@odata.id'].lstrip('/').encode('utf-8')), headers)463 '@odata.id'].lstrip('/').encode('utf-8')), headers)
442 mem = memory_data.get('CapacityMiB')464 discovered_machine.memory += memory_data['CapacityMiB']
443 if mem is not None:
444 discovered_machine.memory += mem
445 # Get processors.465 # Get processors.
446 processors = node_data.get('Links', {}).get('Processors')466 processors = node_data.get('Links', {}).get('Processors', [])
447 for processor in processors:467 for processor in processors:
448 processor_data, _ = yield self.redfish_request(468 processor_data, _ = yield self.redfish_request(
449 b"GET", join(url, processor[469 b"GET", join(url, processor[
450 '@odata.id'].lstrip('/').encode('utf-8')), headers)470 '@odata.id'].lstrip('/').encode('utf-8')), headers)
451 # Using 'TotalThreads' instead of 'TotalCores'471 # Using 'TotalThreads' instead of 'TotalCores'
452 # as this is what MAAS finds when commissioning.472 # as this is what MAAS finds when commissioning.
453 total_threads = processor_data.get('TotalThreads')473 discovered_machine.cores += processor_data['TotalThreads']
454 if total_threads is not None:
455 discovered_machine.cores += total_threads
456 discovered_machine.cpu_speeds.append(474 discovered_machine.cpu_speeds.append(
457 processor_data.get('MaxSpeedMHz'))475 processor_data['MaxSpeedMHz'])
458 # Set architecture to first processor476 # Set architecture to first processor
459 # architecture type found.477 # architecture type found.
460 if not discovered_machine.architecture:478 if not discovered_machine.architecture:
461 arch = processor_data.get('InstructionSet')479 arch = processor_data['InstructionSet']
462 discovered_machine.architecture = (480 discovered_machine.architecture = (
463 RSD_ARCH.get(arch, arch))481 RSD_ARCH.get(arch, arch))
464 # Get local storages.482 # Get local storages.
465 local_drives = node_data.get('Links', {}).get('LocalDrives')483 local_drives = node_data.get('Links', {}).get('LocalDrives', [])
466 for local_drive in local_drives:484 for local_drive in local_drives:
467 discovered_machine_block_device = (485 discovered_machine_block_device = (
468 DiscoveredMachineBlockDevice(486 DiscoveredMachineBlockDevice(
@@ -470,38 +488,61 @@
470 drive_data, _ = yield self.redfish_request(488 drive_data, _ = yield self.redfish_request(
471 b"GET", join(url, local_drive[489 b"GET", join(url, local_drive[
472 '@odata.id'].lstrip('/').encode('utf-8')), headers)490 '@odata.id'].lstrip('/').encode('utf-8')), headers)
473 model = drive_data.get('Model')491 discovered_machine_block_device.model = drive_data['Model']
474 if model is not None:492 discovered_machine_block_device.serial = drive_data['SerialNumber']
475 discovered_machine_block_device.model = model493 discovered_machine_block_device.size = float(
476 serial_number = drive_data.get('SerialNumber')494 drive_data['CapacityGiB']) * (1024 ** 3)
477 if serial_number is not None:495 if drive_data['Type'] == 'SSD':
478 discovered_machine_block_device.serial = (
479 serial_number)
480 capacity = drive_data.get('CapacityGiB')
481 if capacity is not None:
482 # GiB to Bytes.
483 discovered_machine_block_device.size = float(
484 capacity) * 1073741824
485 if drive_data.get('Type') == 'SSD':
486 discovered_machine_block_device.tags = ['ssd']496 discovered_machine_block_device.tags = ['ssd']
487 discovered_machine.block_devices.append(497 discovered_machine.block_devices.append(
488 discovered_machine_block_device)498 discovered_machine_block_device)
499 # Get remote storages.
500 remote_drives = node_data.get('Links', {}).get('RemoteDrives', [])
501 for remote_drive in remote_drives:
502 discovered_machine_block_device = (
503 DiscoveredMachineBlockDevice(
504 model=None, serial=None, size=0,
505 type=BlockDeviceType.ISCSI))
506 target_data = targets[
507 remote_drive['@odata.id'].lstrip('/').encode('utf-8')]
508 addresses = target_data.get('Addresses')[0]
509 host = addresses.get('iSCSI', {}).get('TargetPortalIP')
510 proto = '6' # curtin currently only supports TCP.
511 port = str(addresses.get('iSCSI', {}).get('TargetPortalPort'))
512 luns = addresses.get('iSCSI', {}).get('TargetLUN', [])
513 if luns:
514 lun = str(luns[0]['LUN'])
515 else:
516 # Set LUN to 0 if not available.
517 lun = '0'
518 target_name = addresses.get('iSCSI', {}).get('TargetIQN')
519 discovered_machine_block_device.iscsi_target = ':'.join(
520 [host, proto, port, lun, target_name])
521 discovered_machine_block_device.tags = ['iscsi']
522 # Loop through all the logical drives till we
523 # find which one contains this remote drive.
524 for lv, lv_data in logical_drives.items():
525 lv_targets = lv_data.get('Links', {}).get('Targets', [])
526 if remote_drive in lv_targets:
527 discovered_machine_block_device.size = float(
528 lv_data['CapacityGiB']) * (1024 ** 3)
529 discovered_machine.block_devices.append(
530 discovered_machine_block_device)
489 # Get interfaces.531 # Get interfaces.
490 interfaces = node_data.get('Links', {}).get('EthernetInterfaces')532 interfaces = node_data.get('Links', {}).get('EthernetInterfaces', [])
491 for interface in interfaces:533 for interface in interfaces:
492 discovered_machine_interface = DiscoveredMachineInterface(534 discovered_machine_interface = DiscoveredMachineInterface(
493 mac_address='')535 mac_address='')
494 interface_data, _ = yield self.redfish_request(536 interface_data, _ = yield self.redfish_request(
495 b"GET", join(url, interface[537 b"GET", join(url, interface[
496 '@odata.id'].lstrip('/').encode('utf-8')), headers)538 '@odata.id'].lstrip('/').encode('utf-8')), headers)
497 mac_address = interface_data.get('MACAddress')539 discovered_machine_interface.mac_address = (
498 if mac_address is not None:540 interface_data['MACAddress'])
499 discovered_machine_interface.mac_address = mac_address541 nic_speed = interface_data['SpeedMbps']
500 nic_speed = interface_data.get('SpeedMbps')
501 if nic_speed is not None:542 if nic_speed is not None:
502 if nic_speed < 1000:543 if nic_speed < 1000:
503 discovered_machine_interface.tags = ["e%s" % nic_speed]544 discovered_machine_interface.tags = ["e%s" % nic_speed]
504 elif nic_speed == "1000":545 elif nic_speed == 1000:
505 discovered_machine_interface.tags = ["1g", "e1000"]546 discovered_machine_interface.tags = ["1g", "e1000"]
506 else:547 else:
507 # We know that the Mbps > 1000548 # We know that the Mbps > 1000
@@ -522,9 +563,8 @@
522 vlan = vlan.lstrip('/').encode('utf-8')563 vlan = vlan.lstrip('/').encode('utf-8')
523 vlan_data, _ = yield self.redfish_request(564 vlan_data, _ = yield self.redfish_request(
524 b"GET", join(url, vlan), headers)565 b"GET", join(url, vlan), headers)
525 vlan_id = vlan_data.get('VLANId')566 discovered_machine_interface.vid = (
526 if vlan_id is not None:567 vlan_data['VLANId'])
527 discovered_machine_interface.vid = vlan_id
528 else:568 else:
529 # If no NeighborPort, this interface is on569 # If no NeighborPort, this interface is on
530 # the management network.570 # the management network.
@@ -547,7 +587,7 @@
547 return discovered_machine587 return discovered_machine
548588
549 @inlineCallbacks589 @inlineCallbacks
550 def get_pod_machines(self, url, headers):590 def get_pod_machines(self, url, headers, logical_drives, targets):
551 """Get pod composed machines.591 """Get pod composed machines.
552592
553 If required resources cannot be found, these593 If required resources cannot be found, these
@@ -560,7 +600,8 @@
560 nodes = yield self.list_resources(nodes_uri, headers)600 nodes = yield self.list_resources(nodes_uri, headers)
561 # Iterate over all composed nodes in the pod.601 # Iterate over all composed nodes in the pod.
562 for node in nodes:602 for node in nodes:
563 discovered_machine = yield self.get_pod_machine(node, url, headers)603 discovered_machine = yield self.get_pod_machine(
604 node, url, headers, logical_drives, targets)
564 discovered_machines.append(discovered_machine)605 discovered_machines.append(discovered_machine)
565 return discovered_machines606 return discovered_machines
566607
@@ -573,8 +614,6 @@
573 for cpu_speed in machine.cpu_speeds:614 for cpu_speed in machine.cpu_speeds:
574 if cpu_speed in discovered_pod.cpu_speeds:615 if cpu_speed in discovered_pod.cpu_speeds:
575 discovered_pod.cpu_speeds.remove(cpu_speed)616 discovered_pod.cpu_speeds.remove(cpu_speed)
576 # Delete cpu_speeds place holder.
577 del machine.cpu_speeds
578 used_cores += machine.cores617 used_cores += machine.cores
579 used_memory += machine.memory618 used_memory += machine.memory
580 for blk_dev in machine.block_devices:619 for blk_dev in machine.block_devices:
@@ -600,22 +639,50 @@
600 """639 """
601 url = self.get_url(context)640 url = self.get_url(context)
602 headers = self.make_auth_headers(**context)641 headers = self.make_auth_headers(**context)
642 logical_drives, targets = yield self.scrape_logical_drives_and_targets(
643 url, headers)
644 remote_drives = yield self.scrape_remote_drives(url, headers)
645 pod_remote_storage, pod_hints_remote_storage = (
646 self.calculate_pod_remote_storage(
647 remote_drives, logical_drives, targets))
603648
604 # Discover pod resources.649 # Discover pod resources.
605 discovered_pod = yield self.get_pod_resources(url, headers)650 discovered_pod = yield self.get_pod_resources(url, headers)
606651
652 # Discover pod remote storage resources.
653 discovered_pod.capabilities.append(Capabilities.ISCSI_STORAGE)
654 discovered_pod.iscsi_storage = pod_remote_storage
655
607 # Discover composed machines.656 # Discover composed machines.
608 discovered_pod.machines = yield self.get_pod_machines(657 discovered_pod.machines = yield self.get_pod_machines(
609 url, headers)658 url, headers)
610659
611 # Discover pod hints.660 # Discover pod hints.
612 discovered_pod.hints = self.get_pod_hints(discovered_pod)661 discovered_pod.hints = self.get_pod_hints(discovered_pod)
662 discovered_pod.hints.iscsi_storage = pod_hints_remote_storage
613663
614 # Delete cpu_speeds place holder.
615 del discovered_pod.cpu_speeds
616 return discovered_pod664 return discovered_pod
617665
618 def convert_request_to_json_payload(self, processors, cores, request):666 def select_remote_master(self, remote_storage, size):
667 """Select the remote master drive that has enough space."""
668 for lvg, data in remote_storage.items():
669 if data['master'] and data['available'] >= size:
670 data['available'] -= size
671 return data['master']
672
673 def set_drive_type(self, drive, block_device):
674 """Set type of drive requested on `drive` based on the tags on
675 `block_device`."""
676 if 'ssd' in block_device.tags:
677 drive['Type'] = 'SSD'
678 elif 'nvme' in block_device.tags:
679 drive['Type'] = 'NVMe'
680 elif 'hdd' in block_device.tags:
681 drive['Type'] = 'HDD'
682
683 def convert_request_to_json_payload(
684 self, processors, cores, request,
685 remote_drives, logical_drives, targets):
619 """Convert the RequestedMachine object to JSON."""686 """Convert the RequestedMachine object to JSON."""
620 # The below fields are for RSD allocation.687 # The below fields are for RSD allocation.
621 # Most of these fields are nullable and could be used at688 # Most of these fields are nullable and could be used at
@@ -643,6 +710,14 @@
643 "SerialNumber": None,710 "SerialNumber": None,
644 "Interface": None,711 "Interface": None,
645 }712 }
713 remote_drive = {
714 "CapacityGiB": None,
715 "iSCSIAddress": None,
716 "Master": {
717 "Type": "Clone",
718 "Resource": None,
719 },
720 }
646 interface = {721 interface = {
647 "SpeedMbps": None,722 "SpeedMbps": None,
648 "PrimaryVLAN": None,723 "PrimaryVLAN": None,
@@ -652,41 +727,108 @@
652 "Processors": [],727 "Processors": [],
653 "Memory": [],728 "Memory": [],
654 "LocalDrives": [],729 "LocalDrives": [],
730 "RemoteDrives": [],
655 "EthernetInterfaces": [],731 "EthernetInterfaces": [],
656 }732 }
657 request = request.asdict()
658733
659 # Processors.734 # Processors.
660 for _ in range(processors):735 for _ in range(processors):
661 proc = processor.copy()736 proc = processor.copy()
662 proc['TotalCores'] = cores737 proc['TotalCores'] = cores
663 arch = request.get('architecture')738 arch = request.architecture
664 for key, val in RSD_ARCH.items():739 for key, val in RSD_ARCH.items():
665 if val == arch:740 if val == arch:
666 proc['InstructionSet'] = key741 proc['InstructionSet'] = key
667 # cpu_speed is only optional field in request.742 # cpu_speed is only optional field in request.
668 cpu_speed = request.get('cpu_speed')743 cpu_speed = request.cpu_speed
669 if cpu_speed is not None:744 if cpu_speed is not None:
670 proc['AchievableSpeedMHz'] = cpu_speed745 proc['AchievableSpeedMHz'] = cpu_speed
671 data['Processors'].append(proc)746 data['Processors'].append(proc)
672747
748 # Determine remote storage information if more than one driver is
749 # requested.
750 remote_storage = None
751 if len(request.block_devices) > 1:
752 remote_storage = self.calculate_remote_storage(
753 remote_drives, logical_drives, targets)
754
673 # Block Devices.755 # Block Devices.
674 block_devices = request.get('block_devices')756 #
675 for block_device in block_devices:757 # Tags are matched on the block devices to create different types
676 drive = local_drive.copy()758 # of requested storage for a block device.
677 # Convert from bytes to GiB.759 # local: Locally attached disk (aka. LocalDrive).
678 drive['CapacityGiB'] = block_device['size'] / 1073741824760 # ssd: Locally attached SSD (aka. LocalDrive).
679 data['LocalDrives'].append(drive)761 # hdd: Locally attached HDD (aka. LocalDrive).
762 # nvme: Locally attached NVMe (aka. LocalDrive).
763 # iscsi: Remotely attached disk over ISCSI (aka. RemoteTarget)
764 # (none): Remotely attached disk will be picked unless its the
765 # first disk. First disk is locally attached.
766 block_devices = request.block_devices
767 boot_disk = True
768 for idx, block_device in enumerate(block_devices):
769 if boot_disk:
770 if 'iscsi' in block_device.tags:
771 raise PodActionError(
772 'iSCSI is not supported as being a boot disk.')
773 else:
774 # Force 'local' into the tags if not present.
775 if 'local' not in block_device.tags:
776 block_device.tags.append('local')
777 drive = local_drive.copy()
778 # Convert from bytes to GiB.
779 drive['CapacityGiB'] = block_device.size / (1024 ** 3)
780 self.set_drive_type(drive, block_device.tags)
781 data['LocalDrives'].append(drive)
782 boot_disk = False
783 else:
784 is_local = max(
785 tag in block_device.tags
786 for tag in ['local', 'ssd', 'nvme', 'hdd']
787 )
788 if is_local:
789 # Force the local tag if it wasn't provided.
790 if 'local' not in block_device.tags:
791 block_device.tags.append('local')
792 drive = local_drive.copy()
793 # Convert from bytes to GiB.
794 drive['CapacityGiB'] = block_device.size / (1024 ** 3)
795 self.set_drive_type(drive, block_device.tags)
796 data['LocalDrives'].append(drive)
797 else:
798 # Force 'iscsi' into the tags if not present.
799 if 'iscsi' not in block_device.tags:
800 block_device.tags.append('iscsi')
801 size = block_device.size / (1024 ** 3)
802 # Determine the remote master that can be used.
803 remote_master = self.select_remote_master(
804 remote_storage, size)
805 if remote_master is None:
806 raise PodActionError(
807 'iSCSI remote drive cannot be created because '
808 'not enough space is available.')
809 drive = remote_drive.copy()
810 # Convert from bytes to GiB.
811 drive['CapacityGiB'] = size
812 drive['iSCSIAddress'] = 'iqn.2010-08.io.maas:%s-%s' % (
813 request.hostname, idx)
814 drive['Master']['Resource'] = {
815 '@odata.id': remote_master.decode('utf-8'),
816 }
817 data['RemoteDrives'].append(drive)
818 # Save the iSCSIAddress on the RequestBlockDevice. This is
819 # used to map the DiscoveredMachineBlockDevice tags to
820 # the same tags used during the request.
821 block_device.iscsi_target = drive['iSCSIAddress']
680822
681 # Interfaces.823 # Interfaces.
682 interfaces = request.get('interfaces')824 interfaces = request.interfaces
683 for iface in interfaces:825 for iface in interfaces:
684 nic = interface.copy()826 nic = interface.copy()
685 data['EthernetInterfaces'].append(nic)827 data['EthernetInterfaces'].append(nic)
686828
687 # Memory.829 # Memory.
688 mem = memory.copy()830 mem = memory.copy()
689 mem['CapacityMiB'] = request.get('memory')831 mem['CapacityMiB'] = request.memory
690 data['Memory'].append(mem)832 data['Memory'].append(mem)
691833
692 return json.dumps(data).encode('utf-8')834 return json.dumps(data).encode('utf-8')
@@ -697,6 +839,9 @@
697 url = self.get_url(context)839 url = self.get_url(context)
698 headers = self.make_auth_headers(**context)840 headers = self.make_auth_headers(**context)
699 endpoint = b"redfish/v1/Nodes/Actions/Allocate"841 endpoint = b"redfish/v1/Nodes/Actions/Allocate"
842 logical_drives, targets = (
843 yield self.scrape_logical_drives_and_targets(url, headers))
844 remote_drives = yield self.scrape_remote_drives(url, headers)
700 # Create allocate payload.845 # Create allocate payload.
701 requested_cores = request.cores846 requested_cores = request.cores
702 if requested_cores % 2 != 0:847 if requested_cores % 2 != 0:
@@ -709,9 +854,11 @@
709 # Find the correct procesors and cores combination from RSD POD.854 # Find the correct procesors and cores combination from RSD POD.
710 processors = 1855 processors = 1
711 cores = requested_cores856 cores = requested_cores
857 response_headers = None
712 while True:858 while True:
713 payload = self.convert_request_to_json_payload(859 payload = self.convert_request_to_json_payload(
714 processors, cores, request)860 processors, cores, request, remote_drives,
861 logical_drives, targets)
715 try:862 try:
716 _, response_headers = yield self.redfish_request(863 _, response_headers = yield self.redfish_request(
717 b"POST", join(url, endpoint), headers,864 b"POST", join(url, endpoint), headers,
@@ -732,14 +879,16 @@
732 node_id = location[0].rsplit('/', 1)[-1]879 node_id = location[0].rsplit('/', 1)[-1]
733 node_path = location[0].split('/', 3)[-1]880 node_path = location[0].split('/', 3)[-1]
734881
735 # Retrieve new node.
736 discovered_machine = yield self.get_pod_machine(
737 node_path.encode('utf-8'), url, headers)
738 # Assemble the node.882 # Assemble the node.
739 yield self.assemble_node(url, node_id.encode('utf-8'), headers)883 yield self.assemble_node(url, node_id.encode('utf-8'), headers)
740 # Set to PXE boot.884 # Set to PXE boot.
741 yield self.set_pxe_boot(url, node_id.encode('utf-8'), headers)885 yield self.set_pxe_boot(url, node_id.encode('utf-8'), headers)
742886
887 # Retrieve new node.
888 discovered_machine = yield self.get_pod_machine(
889 node_path.encode('utf-8'), url,
890 headers, logical_drives, targets)
891
743 # Retrieve pod resources.892 # Retrieve pod resources.
744 discovered_pod = yield self.get_pod_resources(url, headers)893 discovered_pod = yield self.get_pod_resources(url, headers)
745 # Retrive pod hints.894 # Retrive pod hints.
@@ -830,8 +979,11 @@
830 node_state = yield self.get_composed_node_state(979 node_state = yield self.get_composed_node_state(
831 url, node_id, headers)980 url, node_id, headers)
832 while node_state == 'Assembling':981 while node_state == 'Assembling':
982 # Wait 2 seconds before getting updated state.
983 yield pause(2)
833 node_state = yield self.get_composed_node_state(984 node_state = yield self.get_composed_node_state(
834 url, node_id, headers)985 url, node_id, headers)
986
835 # Check one last time if the state has became `Failed`.987 # Check one last time if the state has became `Failed`.
836 if node_state == 'Failed':988 if node_state == 'Failed':
837 # Broken system.989 # Broken system.
838990
=== modified file 'src/provisioningserver/drivers/pod/tests/test_rsd.py'
--- src/provisioningserver/drivers/pod/tests/test_rsd.py 2017-04-06 19:26:13 +0000
+++ src/provisioningserver/drivers/pod/tests/test_rsd.py 2017-04-10 13:49:22 +0000
@@ -28,6 +28,7 @@
28 MAASTwistedRunTest,28 MAASTwistedRunTest,
29)29)
30from provisioningserver.drivers.pod import (30from provisioningserver.drivers.pod import (
31 BlockDeviceType,
31 Capabilities,32 Capabilities,
32 DiscoveredMachine,33 DiscoveredMachine,
33 DiscoveredMachineBlockDevice,34 DiscoveredMachineBlockDevice,
@@ -50,6 +51,7 @@
50from testtools import ExpectedException51from testtools import ExpectedException
51from testtools.matchers import (52from testtools.matchers import (
52 Equals,53 Equals,
54 Is,
53 MatchesDict,55 MatchesDict,
54 MatchesListwise,56 MatchesListwise,
55 MatchesStructure,57 MatchesStructure,
@@ -279,6 +281,8 @@
279 }],281 }],
280 "RemoteDrives": [{282 "RemoteDrives": [{
281 "@odata.id": "/redfish/v1/Services/1/Targets/1"283 "@odata.id": "/redfish/v1/Services/1/Targets/1"
284 }, {
285 "@odata.id": "/redfish/v1/Services/1/Targets/2"
282 }],286 }],
283 "ManagedBy": [{287 "ManagedBy": [{
284 "@odata.id": "/redfish/v1/Managers/1"288 "@odata.id": "/redfish/v1/Managers/1"
@@ -600,6 +604,8 @@
600 }],604 }],
601 "Targets": [{605 "Targets": [{
602 "@odata.id": "/redfish/v1/Services/1/Targets/1"606 "@odata.id": "/redfish/v1/Services/1/Targets/1"
607 }, {
608 "@odata.id": "/redfish/v1/Services/1/Targets/2"
603 }]609 }]
604 }610 }
605}611}
@@ -958,14 +964,12 @@
958 mock_redfish_request.return_value = (SAMPLE_JSON_NODE, None)964 mock_redfish_request.return_value = (SAMPLE_JSON_NODE, None)
959965
960 remote_drives = yield driver.scrape_remote_drives(url, headers)966 remote_drives = yield driver.scrape_remote_drives(url, headers)
961 self.assertEquals({'/redfish/v1/Services/1/Targets/1'}, remote_drives)967 self.assertEquals({
968 '/redfish/v1/Services/1/Targets/1',
969 '/redfish/v1/Services/1/Targets/2'}, remote_drives)
962970
963 @inlineCallbacks
964 def test__calculate_remote_storage(self):971 def test__calculate_remote_storage(self):
965 driver = RSDPodDriver()972 driver = RSDPodDriver()
966 context = make_context()
967 url = driver.get_url(context)
968 headers = driver.make_auth_headers(**context)
969 LV_NO_TARGETS = deepcopy(SAMPLE_JSON_LV)973 LV_NO_TARGETS = deepcopy(SAMPLE_JSON_LV)
970 LV_NO_TARGETS['Links']['Targets'] = []974 LV_NO_TARGETS['Links']['Targets'] = []
971 logical_drives = {975 logical_drives = {
@@ -975,31 +979,64 @@
975 b"redfish/v1/Services/1/LogicalDrives/4": SAMPLE_JSON_PV,979 b"redfish/v1/Services/1/LogicalDrives/4": SAMPLE_JSON_PV,
976 b"redfish/v1/Services/1/LogicalDrives/5": SAMPLE_JSON_PV,980 b"redfish/v1/Services/1/LogicalDrives/5": SAMPLE_JSON_PV,
977 }981 }
978 target_links = {982 targets = {
979 b"redfish/v1/Services/1/Targets/1": SAMPLE_JSON_TARGET,983 b"redfish/v1/Services/1/Targets/1": SAMPLE_JSON_TARGET,
980 b"redfish/v1/Services/1/Targets/2": SAMPLE_JSON_TARGET,984 b"redfish/v1/Services/1/Targets/2": SAMPLE_JSON_TARGET,
981 b"redfish/v1/Services/1/Targets/3": SAMPLE_JSON_TARGET,985 b"redfish/v1/Services/1/Targets/3": SAMPLE_JSON_TARGET,
982 }986 }
983 remote_drives = set(987 remote_drives = set(
984 "redfish/v1/Services/1/Targets/1")988 "redfish/v1/Services/1/Targets/1")
985 mock_scrape_logical_drives_and_targets = self.patch(
986 driver, 'scrape_logical_drives_and_targets')
987 mock_scrape_logical_drives_and_targets.return_value = (
988 logical_drives, target_links)
989 mock_scrape_remote_drives = self.patch(driver, 'scrape_remote_drives')
990 mock_scrape_remote_drives.return_value = remote_drives
991989
992 remote_storage = yield driver.calculate_remote_storage(url, headers)990 remote_storage = driver.calculate_remote_storage(
991 remote_drives, logical_drives, targets)
993 self.assertDictEqual(992 self.assertDictEqual(
994 remote_storage,993 remote_storage,
995 {994 {
996 b'redfish/v1/Services/1/LogicalDrives/2': {995 b'redfish/v1/Services/1/LogicalDrives/2': {
997 'total': 11830638411776.0,996 'total': 11830638411776.0,
998 'available': 11830638411776.0,997 'available': 11830638411776.0,
999 'master': b'redfish/v1/Services/1/LogicalDrives/1'998 'master': {
999 'path': b'/redfish/v1/Services/1/LogicalDrives/1',
1000 'size': 80
1001 }
1000 }1002 }
1001 })1003 })
10021004
1005 def test__calculate_pod_remote_storage(self):
1006 driver = RSDPodDriver()
1007 logical_drives = {
1008 b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV,
1009 b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG,
1010 b"redfish/v1/Services/1/LogicalDrives/3": SAMPLE_JSON_LV
1011 }
1012 remote_storage = {
1013 b"redfish/v1/Services/1/LogicalDrives/1": {
1014 'total': 80 * (1024 ** 3),
1015 'available': None,
1016 'master': {
1017 'path': None,
1018 'size': None
1019 }
1020 },
1021 b"redfish/v1/Services/1/LogicalDrives/2": {
1022 'total': 12 * (1024 ** 3),
1023 'available': None,
1024 'master': {
1025 'path': None,
1026 'size': None
1027 }
1028 }
1029 }
1030 mock_calculate_remote_storage = self.patch(
1031 driver, 'calculate_remote_storage')
1032 mock_calculate_remote_storage.return_value = remote_storage
1033
1034 pod_capacity, pod_hints_capacity = driver.calculate_pod_remote_storage(
1035 factory.make_name('remote_drives'),
1036 logical_drives, factory.make_name('targets'))
1037 self.assertEquals(92 * (1024 ** 3), pod_capacity)
1038 self.assertEquals(80 * (1024 ** 3), pod_hints_capacity)
1039
1003 @inlineCallbacks1040 @inlineCallbacks
1004 def test__get_pod_memory_resources(self):1041 def test__get_pod_memory_resources(self):
1005 driver = RSDPodDriver()1042 driver = RSDPodDriver()
@@ -1147,10 +1184,6 @@
1147 url = driver.get_url(context)1184 url = driver.get_url(context)
1148 headers = driver.make_auth_headers(**context)1185 headers = driver.make_auth_headers(**context)
1149 mock_redfish_request = self.patch(driver, 'redfish_request')1186 mock_redfish_request = self.patch(driver, 'redfish_request')
1150 NO_MEMORY = deepcopy(SAMPLE_JSON_MEMORY)
1151 NO_MEMORY['CapacityMiB'] = None
1152 NO_THREADS = deepcopy(SAMPLE_JSON_PROCESSOR)
1153 NO_THREADS['TotalThreads'] = None
1154 NIC1_DATA = deepcopy(SAMPLE_JSON_INTERFACE)1187 NIC1_DATA = deepcopy(SAMPLE_JSON_INTERFACE)
1155 NIC1_DATA['SpeedMbps'] = 9001188 NIC1_DATA['SpeedMbps'] = 900
1156 NIC2_DATA = deepcopy(SAMPLE_JSON_INTERFACE)1189 NIC2_DATA = deepcopy(SAMPLE_JSON_INTERFACE)
@@ -1162,10 +1195,10 @@
1162 mock_redfish_request.side_effect = [1195 mock_redfish_request.side_effect = [
1163 (SAMPLE_JSON_NODE, None),1196 (SAMPLE_JSON_NODE, None),
1164 (SAMPLE_JSON_MEMORY, None),1197 (SAMPLE_JSON_MEMORY, None),
1165 (NO_MEMORY, None),1198 (SAMPLE_JSON_MEMORY, None),
1166 (SAMPLE_JSON_MEMORY, None),1199 (SAMPLE_JSON_MEMORY, None),
1167 (SAMPLE_JSON_MEMORY, None),1200 (SAMPLE_JSON_MEMORY, None),
1168 (NO_THREADS, None),1201 (SAMPLE_JSON_PROCESSOR, None),
1169 (SAMPLE_JSON_PROCESSOR, None),1202 (SAMPLE_JSON_PROCESSOR, None),
1170 (SAMPLE_JSON_DEVICE, None),1203 (SAMPLE_JSON_DEVICE, None),
1171 (SAMPLE_JSON_DEVICE, None),1204 (SAMPLE_JSON_DEVICE, None),
@@ -1182,46 +1215,98 @@
1182 (SAMPLE_JSON_PORT, None),1215 (SAMPLE_JSON_PORT, None),
1183 (SAMPLE_JSON_VLAN, None),1216 (SAMPLE_JSON_VLAN, None),
1184 ]1217 ]
1218 LV_NO_TARGETS = deepcopy(SAMPLE_JSON_LV)
1219 LV_NO_TARGETS['Links']['Targets'] = []
1220 TARGET_LUN = deepcopy(SAMPLE_JSON_TARGET)
1221 TARGET_LUN['Addresses'][0]['iSCSI']['TargetLUN'].append({'LUN': 3})
1222 logical_drives = {
1223 b"redfish/v1/Services/1/LogicalDrives/1": SAMPLE_JSON_LV,
1224 b"redfish/v1/Services/1/LogicalDrives/2": SAMPLE_JSON_LVG,
1225 b"redfish/v1/Services/1/LogicalDrives/3": LV_NO_TARGETS,
1226 b"redfish/v1/Services/1/LogicalDrives/4": SAMPLE_JSON_PV,
1227 b"redfish/v1/Services/1/LogicalDrives/5": SAMPLE_JSON_PV,
1228 }
1229 targets = {
1230 b"redfish/v1/Services/1/Targets/1": SAMPLE_JSON_TARGET,
1231 b"redfish/v1/Services/1/Targets/2": TARGET_LUN,
1232 b"redfish/v1/Services/1/Targets/3": SAMPLE_JSON_TARGET,
1233 b"redfish/v1/Services/1/Targets/4": SAMPLE_JSON_TARGET,
1234 }
11851235
1186 machine = yield driver.get_pod_machine(1236 machine = yield driver.get_pod_machine(
1187 b"redfish/v1/Nodes/1", url, headers)1237 b"redfish/v1/Nodes/1", url, headers, logical_drives, targets)
1188 self.assertEquals("amd64/generic", machine.architecture)1238 self.assertThat(machine, MatchesStructure(
1189 self.assertEquals(28, machine.cores)1239 architecture=Equals("amd64/generic"),
1190 self.assertEquals(2300, machine.cpu_speed)1240 cores=Equals(56),
1191 self.assertEquals(23436, machine.memory)1241 cpu_speed=Equals(2300),
1192 self.assertEquals(1242 memory=Equals(31248),
1193 "INTEL_SSDMCEAC120B3", machine.block_devices[0].model)1243 power_state=Equals("off"),
1194 self.assertEquals("CVLI310601PY120E", machine.block_devices[0].serial)1244 power_parameters=MatchesDict({'node_id': Equals('1')}),
1195 self.assertEquals(119999999999.99997, machine.block_devices[0].size)1245 interfaces=MatchesListwise([
1196 self.assertEquals(['ssd'], machine.block_devices[0].tags)1246 MatchesStructure(
1197 self.assertEquals("off", machine.power_state)1247 mac_address=Equals('54:ab:3a:36:af:45'),
1198 self.assertEquals({'node_id': '1'}, machine.power_parameters)1248 vid=Equals(4088),
1199 self.assertThat(machine.interfaces, MatchesListwise([1249 tags=Equals(['e900']),
1200 MatchesStructure(1250 boot=Equals(False),
1201 mac_address=Equals('54:ab:3a:36:af:45'),1251 ),
1202 vid=Equals(4088),1252 MatchesStructure(
1203 tags=Equals(['e900']),1253 mac_address=Equals('54:ab:3a:36:af:45'),
1204 boot=Equals(False),1254 vid=Equals(4088),
1205 ),1255 tags=Equals(['1g', 'e1000']),
1206 MatchesStructure(1256 boot=Equals(False),
1207 mac_address=Equals('54:ab:3a:36:af:45'),1257 ),
1208 vid=Equals(4088),1258 MatchesStructure(
1209 tags=Equals(['1.0']),1259 mac_address=Equals('54:ab:3a:36:af:45'),
1210 boot=Equals(False),1260 vid=Equals(4088),
1211 ),1261 tags=Equals(['2.0']),
1212 MatchesStructure(1262 boot=Equals(False),
1213 mac_address=Equals('54:ab:3a:36:af:45'),1263 ),
1214 vid=Equals(4088),1264 MatchesStructure(
1215 tags=Equals(['2.0']),1265 mac_address=Equals('54:ab:3a:36:af:45'),
1216 boot=Equals(False),1266 vid=Equals(-1),
1217 ),1267 tags=Equals([]),
1218 MatchesStructure(1268 boot=Equals(True),
1219 mac_address=Equals('54:ab:3a:36:af:45'),1269 ),
1220 vid=Equals(-1),1270 ]),
1221 tags=Equals([]),1271 block_devices=MatchesListwise([
1222 boot=Equals(True),1272 MatchesStructure(
1223 ),1273 model=Equals('INTEL_SSDMCEAC120B3'),
1224 ]))1274 serial=Equals('CVLI310601PY120E'),
1275 size=Equals(119999999999.99997),
1276 block_size=Equals(512),
1277 tags=Equals(['ssd']),
1278 type=Equals(BlockDeviceType.PHYSICAL),
1279 ),
1280 MatchesStructure(
1281 model=Equals('INTEL_SSDMCEAC120B3'),
1282 serial=Equals('CVLI310601PY120E'),
1283 size=Equals(119999999999.99997),
1284 block_size=Equals(512),
1285 tags=Equals(['ssd']),
1286 type=Equals(BlockDeviceType.PHYSICAL),
1287 ),
1288 MatchesStructure(
1289 model=Is(None),
1290 serial=Is(None),
1291 size=Equals(85899345920.0),
1292 block_size=Equals(512),
1293 tags=Equals(['iscsi']),
1294 type=Equals(BlockDeviceType.ISCSI),
1295 iscsi_target=Equals(
1296 '10.1.0.100:6:3260:0:iqn.maas.io:test'),
1297 ),
1298 MatchesStructure(
1299 model=Is(None),
1300 serial=Is(None),
1301 size=Equals(85899345920.0),
1302 block_size=Equals(512),
1303 tags=Equals(['iscsi']),
1304 type=Equals(BlockDeviceType.ISCSI),
1305 iscsi_target=Equals(
1306 '10.1.0.100:6:3260:3:iqn.maas.io:test'),
1307 )
1308 ])
1309 ))
12251310
1226 @inlineCallbacks1311 @inlineCallbacks
1227 def test__get_pod_machines(self):1312 def test__get_pod_machines(self):
@@ -1229,6 +1314,14 @@
1229 context = make_context()1314 context = make_context()
1230 url = driver.get_url(context)1315 url = driver.get_url(context)
1231 headers = driver.make_auth_headers(**context)1316 headers = driver.make_auth_headers(**context)
1317 logical_drives = {
1318 factory.make_name('lv_path'): factory.make_name('lv_data')
1319 for _ in range(3)
1320 }
1321 targets = {
1322 factory.make_name('target_path'): factory.make_name('target_data')
1323 for _ in range(3)
1324 }
1232 mock_list_resources = self.patch(driver, 'list_resources')1325 mock_list_resources = self.patch(driver, 'list_resources')
1233 mock_list_resources.side_effect = [1326 mock_list_resources.side_effect = [
1234 [b"redfish/v1/Nodes/1"],1327 [b"redfish/v1/Nodes/1"],
@@ -1242,10 +1335,11 @@
1242 mock_get_pod_machine = self.patch(driver, 'get_pod_machine')1335 mock_get_pod_machine = self.patch(driver, 'get_pod_machine')
1243 mock_get_pod_machine.return_value = expected_machines1336 mock_get_pod_machine.return_value = expected_machines
12441337
1245 discovered_machines = yield driver.get_pod_machines(url, headers)1338 discovered_machines = yield driver.get_pod_machines(
1339 url, headers, logical_drives, targets)
1246 self.assertEquals(1, len(discovered_machines))1340 self.assertEquals(1, len(discovered_machines))
1247 self.assertThat(mock_get_pod_machine, MockCalledOnceWith(1341 self.assertThat(mock_get_pod_machine, MockCalledOnceWith(
1248 b"redfish/v1/Nodes/1", url, headers))1342 b"redfish/v1/Nodes/1", url, headers, logical_drives, targets))
12491343
1250 def test__get_pod_hints(self):1344 def test__get_pod_hints(self):
1251 driver = RSDPodDriver()1345 driver = RSDPodDriver()
@@ -1283,12 +1377,41 @@
1283 context = make_context()1377 context = make_context()
1284 headers = driver.make_auth_headers(**context)1378 headers = driver.make_auth_headers(**context)
1285 url = driver.get_url(context)1379 url = driver.get_url(context)
1380 remote_drives = factory.make_name('remote_drive')
1381 logical_drives = factory.make_name('logical_drives')
1382 targets = factory.make_name('targets')
1383 pod_iscsi_capacity = random.randint(
1384 10 * 1024 ** 3, 20 * 1024 ** 3)
1385 pod_hints_iscsi_capacity = random.randint(
1386 10 * 1024 ** 3, 20 * 1024 ** 3)
1387 mock_scrape_logical_drives_and_targets = self.patch(
1388 driver, 'scrape_logical_drives_and_targets')
1389 mock_scrape_logical_drives_and_targets.return_value = (
1390 logical_drives, targets)
1391 mock_scrape_remote_drives = self.patch(driver, 'scrape_remote_drives')
1392 mock_scrape_remote_drives.return_value = remote_drives
1393 mock_calculate_pod_remote_storage = self.patch(
1394 driver, 'calculate_pod_remote_storage')
1395 mock_calculate_pod_remote_storage.return_value = (
1396 pod_iscsi_capacity, pod_hints_iscsi_capacity)
1286 mock_get_pod_resources = self.patch(1397 mock_get_pod_resources = self.patch(
1287 driver, 'get_pod_resources')1398 driver, 'get_pod_resources')
1288 mock_get_pod_machines = self.patch(driver, 'get_pod_machines')1399 mock_get_pod_machines = self.patch(driver, 'get_pod_machines')
1289 mock_get_pod_hints = self.patch(driver, 'get_pod_hints')1400 mock_get_pod_hints = self.patch(driver, 'get_pod_hints')
12901401
1291 yield driver.discover(factory.make_name('system_id'), context)1402 discovered_pod = yield driver.discover(
1403 factory.make_name('system_id'), context)
1404 self.assertEquals(pod_iscsi_capacity, discovered_pod.iscsi_storage)
1405 self.assertEquals(
1406 pod_hints_iscsi_capacity, discovered_pod.hints.iscsi_storage)
1407 self.assertThat(
1408 mock_scrape_logical_drives_and_targets, MockCalledOnceWith(
1409 url, headers))
1410 self.assertThat(
1411 mock_scrape_remote_drives, MockCalledOnceWith(url, headers))
1412 self.assertThat(
1413 mock_calculate_pod_remote_storage, MockCalledOnceWith(
1414 remote_drives, logical_drives, targets))
1292 self.assertThat(1415 self.assertThat(
1293 mock_get_pod_resources, MockCalledOnceWith(url, headers))1416 mock_get_pod_resources, MockCalledOnceWith(url, headers))
1294 self.assertThat(1417 self.assertThat(
@@ -1302,7 +1425,7 @@
1302 processors = 21425 processors = 2
1303 cores = request.cores / 21426 cores = request.cores / 2
1304 payload = driver.convert_request_to_json_payload(1427 payload = driver.convert_request_to_json_payload(
1305 processors, cores, request)1428 processors, cores, request, None, None, None)
1306 self.assertThat(1429 self.assertThat(
1307 json.loads(payload.decode('utf-8')),1430 json.loads(payload.decode('utf-8')),
1308 MatchesDict({1431 MatchesDict({
@@ -1383,6 +1506,25 @@
1383 request = make_requested_machine(cores=64)1506 request = make_requested_machine(cores=64)
1384 discovered_pod = make_discovered_pod()1507 discovered_pod = make_discovered_pod()
1385 new_machine = make_discovered_machine()1508 new_machine = make_discovered_machine()
1509 logical_drives = {
1510 factory.make_name('lv_path'): factory.make_name('lv_data')
1511 for _ in range(3)
1512 }
1513 targets = {
1514 factory.make_name('target_path'): factory.make_name('target_data')
1515 for _ in range(3)
1516 }
1517 remote_drives = set([
1518 factory.make_name('target_path')
1519 for _ in range(3)
1520 ])
1521 mock_scrape_logical_drives_and_targets = self.patch(
1522 driver, 'scrape_logical_drives_and_targets')
1523 mock_scrape_logical_drives_and_targets.return_value = (
1524 logical_drives, targets)
1525 mock_scrape_remote_drives = self.patch(
1526 driver, 'scrape_remote_drives')
1527 mock_scrape_remote_drives.return_value = remote_drives
1386 mock_get_pod_machine = self.patch(driver, 'get_pod_machine')1528 mock_get_pod_machine = self.patch(driver, 'get_pod_machine')
1387 mock_get_pod_machine.return_value = new_machine1529 mock_get_pod_machine.return_value = new_machine
1388 mock_convert_request_to_json_payload = self.patch(1530 mock_convert_request_to_json_payload = self.patch(
@@ -1408,8 +1550,10 @@
1408 factory.make_name('system_id'), context, request)1550 factory.make_name('system_id'), context, request)
1409 self.assertThat(1551 self.assertThat(
1410 mock_convert_request_to_json_payload, MockCallsMatch(1552 mock_convert_request_to_json_payload, MockCallsMatch(
1411 call(1, 32, request), call(2, 16, request),1553 call(1, 32, request, remote_drives, logical_drives, targets),
1412 call(4, 8, request), call(8, 4, request)))1554 call(2, 16, request, remote_drives, logical_drives, targets),
1555 call(4, 8, request, remote_drives, logical_drives, targets),
1556 call(8, 4, request, remote_drives, logical_drives, targets)))
1413 self.assertThat(mock_assemble_node, MockCalledOnceWith(1557 self.assertThat(mock_assemble_node, MockCalledOnceWith(
1414 url, new_machine.power_parameters.get(1558 url, new_machine.power_parameters.get(
1415 'node_id').encode('utf-8'), headers))1559 'node_id').encode('utf-8'), headers))
@@ -1427,6 +1571,12 @@
1427 discovered_pod = make_discovered_pod()1571 discovered_pod = make_discovered_pod()
1428 new_machines = deepcopy(discovered_pod.machines)1572 new_machines = deepcopy(discovered_pod.machines)
1429 machines = deepcopy(new_machines)1573 machines = deepcopy(new_machines)
1574 mock_scrape_logical_drives_and_targets = self.patch(
1575 driver, 'scrape_logical_drives_and_targets')
1576 mock_scrape_logical_drives_and_targets.return_value = (None, None)
1577 mock_scrape_remote_drives = self.patch(
1578 driver, 'scrape_remote_drives')
1579 mock_scrape_remote_drives.return_value = None
1430 mock_get_pod_machines = self.patch(driver, 'get_pod_machines')1580 mock_get_pod_machines = self.patch(driver, 'get_pod_machines')
1431 mock_get_pod_machines.side_effect = [1581 mock_get_pod_machines.side_effect = [
1432 machines, new_machines]1582 machines, new_machines]