Merge ~mwhudson/curtin:action-objects-more into curtin:master
- Git
- lp:~mwhudson/curtin
- action-objects-more
- Merge into master
Proposed by
Michael Hudson-Doyle
Status: | Needs review |
---|---|
Proposed branch: | ~mwhudson/curtin:action-objects-more |
Merge into: | curtin:master |
Diff against target: |
870 lines (+224/-183) 5 files modified
curtin/block/mkfs.py (+3/-6) curtin/commands/block_meta.py (+152/-113) curtin/storage_actions.py (+4/-0) tests/unittests/test_block_mkfs.py (+42/-42) tests/unittests/test_commands_block_meta.py (+23/-22) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Needs Fixing | |
curtin developers | Pending | ||
Review via email: mp+448442@code.launchpad.net |
Commit message
block_meta: continue to turn dictionaries into objects
dasd, disk and partition handlers.
Description of the change
To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
review:
Needs Fixing
(continuous-integration)
Revision history for this message
Dan Bungert (dbungert) wrote : | # |
Do we have the ability to test the Dasd parts of this? I think it looks safe enough but if it's wrong then a future change is doomed.
Unmerged commits
- b8ecacf... by Michael Hudson-Doyle
-
bla
- cb80880... by Michael Hudson-Doyle
-
sigh, need to stick to attr for now
- 436a777... by Michael Hudson-Doyle
-
fix a lot of things
- 3d590ab... by Michael Hudson-Doyle
-
keep going
- b36a310... by Michael Hudson-Doyle
-
more typing
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/curtin/block/mkfs.py b/curtin/block/mkfs.py |
2 | index 20ee7b8..4e716e7 100644 |
3 | --- a/curtin/block/mkfs.py |
4 | +++ b/curtin/block/mkfs.py |
5 | @@ -230,15 +230,12 @@ def mkfs(path, fstype, strict=False, label=None, uuid=None, force=False, |
6 | return uuid |
7 | |
8 | |
9 | -def mkfs_from_config(path, info, strict=False): |
10 | +def mkfs_from_action(path, format, strict=False): |
11 | """Make filesystem on block device with given path according to storage |
12 | config given""" |
13 | - fstype = info.get('fstype') |
14 | - if fstype is None: |
15 | - raise ValueError("fstype must be specified") |
16 | # NOTE: Since old metadata on partitions that have not been wiped can cause |
17 | # some mkfs commands to refuse to work, it's best to use force=True |
18 | - mkfs(path, fstype, strict=strict, force=True, uuid=info.get('uuid'), |
19 | - label=info.get('label'), extra_options=info.get('extra_options')) |
20 | + mkfs(path, format.fstype, strict=strict, force=True, uuid=format.uuid, |
21 | + label=format.label, extra_options=format.extra_options) |
22 | |
23 | # vi: ts=4 expandtab syntax=python |
24 | diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py |
25 | index 11972b9..fc61e21 100644 |
26 | --- a/curtin/commands/block_meta.py |
27 | +++ b/curtin/commands/block_meta.py |
28 | @@ -3,8 +3,9 @@ |
29 | from collections import OrderedDict, namedtuple |
30 | from curtin import (block, compat, config, paths, storage_actions, util) |
31 | from curtin.block import schemas |
32 | -from curtin.block import (bcache, clear_holders, dasd, iscsi, lvm, mdadm, mkfs, |
33 | +from curtin.block import (bcache, clear_holders, iscsi, lvm, mdadm, mkfs, |
34 | multipath, zfs) |
35 | +from curtin.block.dasd import DasdDevice, DasdPartitionTable |
36 | from curtin import distro |
37 | from curtin.log import LOG, logged_time |
38 | from curtin.reporter import events |
39 | @@ -23,6 +24,8 @@ from curtin.udev import ( |
40 | udevadm_trigger, |
41 | ) |
42 | |
43 | +import attr |
44 | + |
45 | import glob |
46 | import json |
47 | import os |
48 | @@ -31,6 +34,7 @@ import string |
49 | import sys |
50 | import tempfile |
51 | import time |
52 | +from typing import List, Optional |
53 | |
54 | |
55 | FstabData = namedtuple( |
56 | @@ -471,7 +475,7 @@ def v1_get_path_to_disk(vol): |
57 | volume_path = '/dev/mapper/' + ( |
58 | multipath.get_mpath_id_from_device(volume_path)) |
59 | elif disk_key == 'device_id': |
60 | - dasd_device = dasd.DasdDevice(vol_value) |
61 | + dasd_device = DasdDevice(vol_value) |
62 | volume_path = dasd_device.devname |
63 | except ValueError: |
64 | continue |
65 | @@ -533,7 +537,7 @@ def v2_get_path_to_disk(vol): |
66 | *disk_by_keys( |
67 | vol['serial'], 'DM_SERIAL', 'ID_SERIAL', 'ID_SERIAL_SHORT')) |
68 | if 'device_id' in vol: |
69 | - dasd_device = dasd.DasdDevice(vol['device_id']) |
70 | + dasd_device = DasdDevice(vol['device_id']) |
71 | cands.append(set([dasd_device.devname])) |
72 | volume_path = dasd_device.devname |
73 | # verify path exists otherwise try the next key |
74 | @@ -667,11 +671,6 @@ class Image: |
75 | sector_size: int = storage_actions.size(default=512) |
76 | |
77 | |
78 | -@storage_actions.define("device") |
79 | -class Device: |
80 | - path: str |
81 | - |
82 | - |
83 | def image_handler(info, storage_config, context): |
84 | image: Image = storage_actions.asobject(info) |
85 | path = image.path |
86 | @@ -709,12 +708,28 @@ def image_handler(info, storage_config, context): |
87 | context.handlers['disk'](info, storage_config, context) |
88 | |
89 | |
90 | +@storage_actions.define("device") |
91 | +class Device: |
92 | + path: str |
93 | + |
94 | + |
95 | def device_handler(info, storage_config, context): |
96 | device: Device = storage_actions.asobject(info) |
97 | context.id_to_device[device.id] = device.path |
98 | context.handlers['disk'](info, storage_config, context) |
99 | |
100 | |
101 | +@storage_actions.define("dasd") |
102 | +class Dasd: |
103 | + device_id: str |
104 | + blocksize: int |
105 | + label: Optional[str] = None |
106 | + mode: str |
107 | + disk_layout: str |
108 | + wipe: bool = False |
109 | + preserve: bool = False |
110 | + |
111 | + |
112 | def dasd_handler(info, storage_config, context): |
113 | """ Prepare the specified dasd device per configuration |
114 | |
115 | @@ -733,92 +748,103 @@ def dasd_handler(info, storage_config, context): |
116 | 'disk_layout': 'cdl', |
117 | } |
118 | """ |
119 | - device_id = info.get('device_id') |
120 | - blocksize = info.get('blocksize') |
121 | - disk_layout = info.get('disk_layout') |
122 | - label = info.get('label') |
123 | - mode = info.get('mode') |
124 | - force_format = config.value_as_boolean(info.get('wipe')) |
125 | - |
126 | - dasd_device = dasd.DasdDevice(device_id) |
127 | - if (force_format or dasd_device.needs_formatting(blocksize, |
128 | - disk_layout, label)): |
129 | - if config.value_as_boolean(info.get('preserve')): |
130 | + dasd: Dasd = storage_actions.asobject(info) |
131 | + force_format = config.value_as_boolean(dasd.wipe) |
132 | + |
133 | + dasd_device = DasdDevice(dasd.device_id) |
134 | + if (force_format or dasd_device.needs_formatting(dasd.blocksize, |
135 | + dasd.disk_layout, |
136 | + dasd.label)): |
137 | + if config.value_as_boolean(dasd.preserve): |
138 | raise ValueError( |
139 | "dasd '%s' does not match configured properties and" |
140 | "preserve is set to true. The dasd needs formatting" |
141 | - "with the specified parameters to continue." % info.get('id')) |
142 | + "with the specified parameters to continue." % dasd.id) |
143 | |
144 | LOG.debug('Formatting dasd id=%s device_id=%s devname=%s', |
145 | - info.get('id'), device_id, dasd_device.devname) |
146 | - dasd_device.format(blksize=blocksize, layout=disk_layout, |
147 | - set_label=label, mode=mode) |
148 | + dasd.id, dasd.device_id, dasd_device.devname) |
149 | + dasd_device.format(blksize=dasd.blocksize, layout=dasd.disk_layout, |
150 | + set_label=dasd.label, mode=dasd.mode) |
151 | |
152 | # check post-format to ensure values match |
153 | - if dasd_device.needs_formatting(blocksize, disk_layout, label): |
154 | + if dasd_device.needs_formatting( |
155 | + dasd.blocksize, dasd.disk_layout, dasd.label): |
156 | raise RuntimeError( |
157 | "Dasd %s failed to format" % dasd_device.devname) |
158 | |
159 | |
160 | +@attr.s(auto_attribs=True) |
161 | +class _Partitionable: |
162 | + ptable: Optional[str] = None |
163 | + preserve: bool = storage_actions.boolean(default=False) |
164 | + wipe: Optional[str] = None |
165 | + name: Optional[str] = None |
166 | + |
167 | + |
168 | +@storage_actions.define("disk") |
169 | +class Disk(_Partitionable): |
170 | + pass |
171 | + |
172 | + |
173 | def disk_handler(info, storage_config, context): |
174 | + disk: _Partitionable = storage_actions.asobject(info) |
175 | _dos_names = ['dos', 'msdos'] |
176 | - ptable = info.get('ptable') |
177 | - if ptable and ptable not in PTABLES_VALID: |
178 | + if disk.ptable and disk.ptable not in PTABLES_VALID: |
179 | raise ValueError( |
180 | - 'Invalid partition table type: %s in %s' % (ptable, info)) |
181 | + 'Invalid partition table type: %s in %s' % (disk.ptable, disk)) |
182 | |
183 | - disk = get_path_to_storage_volume(info.get('id'), storage_config) |
184 | - context.id_to_device[info['id']] = disk |
185 | + disk_path = get_path_to_storage_volume(disk.id, storage_config) |
186 | + context.id_to_device[disk.id] = disk_path |
187 | # For disks, 'preserve' is what indicates whether the partition |
188 | # table should be reused or recreated but for compound devices |
189 | # such as raids, it indicates if the raid should be created or |
190 | # assumed to already exist. So to allow a pre-existing raid to get |
191 | # a new partition table, we use presence of 'wipe' field to |
192 | # indicate if the disk should be reformatted or not. |
193 | - if info['type'] == 'disk': |
194 | - preserve_ptable = config.value_as_boolean(info.get('preserve')) |
195 | + if disk.type == 'disk': |
196 | + preserve_ptable = disk.preserve |
197 | else: |
198 | - preserve_ptable = config.value_as_boolean(info.get('preserve')) \ |
199 | - and not config.value_as_boolean(info.get('wipe')) |
200 | + preserve_ptable = disk.preserve and not disk.wipe |
201 | if preserve_ptable: |
202 | # Handle preserve flag, verifying if ptable specified in config |
203 | - if ptable and ptable != PTABLE_UNSUPPORTED: |
204 | - current_ptable = block.get_part_table_type(disk) |
205 | + if disk.ptable and disk.ptable != PTABLE_UNSUPPORTED: |
206 | + current_ptable = block.get_part_table_type(disk_path) |
207 | LOG.debug('disk: current ptable type: %s', current_ptable) |
208 | if current_ptable not in PTABLES_SUPPORTED: |
209 | raise ValueError( |
210 | "disk '%s' does not have correct partition table or " |
211 | "cannot be read, but preserve is set to true (or wipe is " |
212 | "not set). cannot continue installation." % |
213 | - info.get('id')) |
214 | + disk.id) |
215 | LOG.info("disk '%s' marked to be preserved, so keeping partition " |
216 | - "table" % disk) |
217 | + "table" % disk.id) |
218 | else: |
219 | # wipe the disk and create the partition table if instructed to do so |
220 | - if config.value_as_boolean(info.get('wipe')): |
221 | - block.wipe_volume(disk, mode=info.get('wipe')) |
222 | - if config.value_as_boolean(ptable): |
223 | - LOG.info("labeling device: '%s' with '%s' partition table", disk, |
224 | - ptable) |
225 | - if ptable == "gpt": |
226 | + if config.value_as_boolean(disk.wipe): |
227 | + block.wipe_volume(disk_path, mode=disk.wipe) |
228 | + if disk.ptable is not None: |
229 | + LOG.info( |
230 | + "labeling device: '%s' with '%s' partition table", disk_path, |
231 | + disk.ptable) |
232 | + if disk.ptable == "gpt": |
233 | # Wipe both MBR and GPT that may be present on the disk. |
234 | # N.B.: wipe_volume wipes 1M at front and end of the disk. |
235 | # This could destroy disk data in filesystems that lived |
236 | # there. |
237 | - block.wipe_volume(disk, mode='superblock') |
238 | - elif ptable in _dos_names: |
239 | - util.subp(["parted", disk, "--script", "mklabel", "msdos"]) |
240 | - elif ptable == "vtoc": |
241 | - util.subp(["fdasd", "-c", "/dev/null", disk]) |
242 | - holders = clear_holders.get_holders(disk) |
243 | + block.wipe_volume(disk_path, mode='superblock') |
244 | + elif disk.ptable in _dos_names: |
245 | + util.subp(["parted", disk_path, "--script", "mklabel", "msdos"]) |
246 | + elif disk.ptable == "vtoc": |
247 | + util.subp(["fdasd", "-c", "/dev/null", disk_path]) |
248 | + holders = clear_holders.get_holders(disk_path) |
249 | if len(holders) > 0: |
250 | LOG.info('Detected block holders on disk %s: %s', disk, holders) |
251 | - clear_holders.clear_holders(disk) |
252 | - clear_holders.assert_clear(disk) |
253 | + clear_holders.clear_holders(disk_path) |
254 | + clear_holders.assert_clear(disk_path) |
255 | |
256 | # Make the name if needed |
257 | - if info.get('name'): |
258 | - make_dname(info.get('id'), storage_config) |
259 | + if disk.name: |
260 | + make_dname(disk.id, storage_config) |
261 | |
262 | |
263 | def getnumberoflogicaldisks(device, storage_config): |
264 | @@ -969,27 +995,26 @@ def verify_ptable_flag(devpath, expected_flag, label, part_info): |
265 | raise RuntimeError(msg) |
266 | |
267 | |
268 | -def partition_verify_sfdisk(part_action, label, sfdisk_part_info): |
269 | +def partition_verify_sfdisk(partition: "Partition", label, sfdisk_part_info): |
270 | devpath = os.path.realpath(sfdisk_part_info['node']) |
271 | verify_size( |
272 | - devpath, int(util.human2bytes(part_action['size'])), sfdisk_part_info) |
273 | - expected_flag = part_action.get('flag') |
274 | - if expected_flag: |
275 | - verify_ptable_flag(devpath, expected_flag, label, sfdisk_part_info) |
276 | + devpath, partition.size, sfdisk_part_info) |
277 | + if partition.flag: |
278 | + verify_ptable_flag(devpath, partition.flag, label, sfdisk_part_info) |
279 | |
280 | |
281 | -def partition_verify_fdasd(disk_path, partnumber, info): |
282 | +def partition_verify_fdasd(disk_path, partnumber, partition: "Partition"): |
283 | verify_exists(disk_path) |
284 | - pt = dasd.DasdPartitionTable.from_fdasd(disk_path) |
285 | + pt = DasdPartitionTable.from_fdasd(disk_path) |
286 | pt_entry = pt.partitions[partnumber-1] |
287 | - expected_tracks = pt.tracks_needed(util.human2bytes(info['size'])) |
288 | + expected_tracks = pt.tracks_needed(partition.size) |
289 | msg = ( |
290 | 'Verifying %s part %s size, expecting %s tracks, found %s tracks' % ( |
291 | disk_path, partnumber, expected_tracks, pt_entry.length)) |
292 | LOG.debug(msg) |
293 | if expected_tracks != pt_entry.length: |
294 | raise RuntimeError(msg) |
295 | - if info.get('flag', 'linux') != 'linux': |
296 | + if partition.flag not in [None, 'linux']: |
297 | raise RuntimeError("dasd partitions do not support flags") |
298 | |
299 | |
300 | @@ -1002,23 +1027,28 @@ def check_passed_path(info, actual_path): |
301 | info["type"], info, info['path'], actual_path)) |
302 | |
303 | |
304 | +@storage_actions.define("partition") |
305 | +class Partition: |
306 | + device: str |
307 | + size: int = storage_actions.size() |
308 | + flag: Optional[str] = None |
309 | + path: Optional[str] = None |
310 | + preserve: bool = storage_actions.boolean(default=False) |
311 | + wipe: Optional[str] = None |
312 | + |
313 | + |
314 | def partition_handler(info, storage_config, context): |
315 | - device = info.get('device') |
316 | - size = info.get('size') |
317 | - flag = info.get('flag') |
318 | - disk_ptable = storage_config.get(device).get('ptable') |
319 | + partition: Partition = storage_actions.asobject(info) |
320 | + disk_ptable = storage_config.get(partition.device).get('ptable') |
321 | partition_type = None |
322 | - if not device: |
323 | - raise ValueError("device must be set for partition to be created") |
324 | - if not size: |
325 | - raise ValueError("size must be specified for partition to be created") |
326 | |
327 | - disk = get_path_to_storage_volume(device, storage_config) |
328 | - partnumber = determine_partition_number(info.get('id'), storage_config) |
329 | + disk = get_path_to_storage_volume(partition.device, storage_config) |
330 | + partnumber = determine_partition_number(partition.id, storage_config) |
331 | disk_kname = block.path_to_kname(disk) |
332 | part_path = block.dev_path(block.partition_kname(disk_kname, partnumber)) |
333 | - check_passed_path(info, part_path) |
334 | - context.id_to_device[info['id']] = part_path |
335 | + if partition.path is not None: |
336 | + check_passed_path({'path': partition.path}, part_path) |
337 | + context.id_to_device[partition.id] = part_path |
338 | |
339 | # consider the disks logical sector size when calculating sectors |
340 | try: |
341 | @@ -1032,16 +1062,18 @@ def partition_handler(info, storage_config, context): |
342 | if partnumber > 1: |
343 | pnum = None |
344 | if partnumber == 5 and disk_ptable == "msdos": |
345 | - extended_part_id = find_extended_partition(device, storage_config) |
346 | + extended_part_id = find_extended_partition( |
347 | + partition.device, storage_config) |
348 | if not extended_part_id: |
349 | msg = ("Logical partition id=%s requires an extended partition" |
350 | " and no extended partition '(type: partition, flag: " |
351 | "extended)' was found in the storage config.") |
352 | - LOG.error(msg, info['id']) |
353 | - raise RuntimeError(msg, info['id']) |
354 | + LOG.error(msg, partition.id) |
355 | + raise RuntimeError(msg, partition.id) |
356 | pnum = determine_partition_number(extended_part_id, storage_config) |
357 | else: |
358 | - pnum = find_previous_partition(device, info['id'], storage_config) |
359 | + pnum = find_previous_partition( |
360 | + partition.device, partition.id, storage_config) |
361 | |
362 | # In case we fail to find previous partition let's error out now |
363 | if pnum is None: |
364 | @@ -1049,7 +1081,7 @@ def partition_handler(info, storage_config, context): |
365 | 'Cannot find previous partition on disk %s' % disk) |
366 | |
367 | LOG.debug("previous partition number for '%s' found to be '%s'", |
368 | - info.get('id'), pnum) |
369 | + partition.id, pnum) |
370 | partition_kname = block.partition_kname(disk_kname, pnum) |
371 | LOG.debug('partition_kname=%s', partition_kname) |
372 | (previous_start_sectors, previous_size_sectors) = ( |
373 | @@ -1062,12 +1094,12 @@ def partition_handler(info, storage_config, context): |
374 | offset_sectors = alignment_offset |
375 | else: |
376 | # further partitions |
377 | - if disk_ptable == "gpt" or flag != "logical": |
378 | + if disk_ptable == "gpt" or partition.flag != "logical": |
379 | # msdos primary and any gpt part start after former partition end |
380 | offset_sectors = previous_start_sectors + previous_size_sectors |
381 | else: |
382 | # msdos extended/logical partitions |
383 | - if flag == "logical": |
384 | + if partition.flag == "logical": |
385 | if partnumber == 5: |
386 | # First logical partition |
387 | # start at extended partition start + alignment_offset |
388 | @@ -1080,26 +1112,25 @@ def partition_handler(info, storage_config, context): |
389 | previous_size_sectors + |
390 | alignment_offset) |
391 | |
392 | - length_bytes = util.human2bytes(size) |
393 | # start sector is part of the sectors that define the partitions size |
394 | # so length has to be "size in sectors - 1" |
395 | - length_sectors = int(length_bytes / logical_block_size_bytes) - 1 |
396 | + length_sectors = int(partition.size / logical_block_size_bytes) - 1 |
397 | # logical partitions can't share their start sector with the extended |
398 | # partition and logical partitions can't go head-to-head, so we have to |
399 | # realign and for that increase size as required |
400 | - if info.get('flag') == "extended": |
401 | - logdisks = getnumberoflogicaldisks(device, storage_config) |
402 | + if partition.flag == "extended": |
403 | + logdisks = getnumberoflogicaldisks(partition.device, storage_config) |
404 | length_sectors = length_sectors + (logdisks * alignment_offset) |
405 | |
406 | # Handle preserve flag |
407 | create_partition = True |
408 | - if config.value_as_boolean(info.get('preserve')): |
409 | + if partition.preserve: |
410 | if disk_ptable == 'vtoc': |
411 | - partition_verify_fdasd(disk, partnumber, info) |
412 | + partition_verify_fdasd(disk, partnumber, partition) |
413 | else: |
414 | sfdisk_info = block.sfdisk_info(disk) |
415 | part_info = block.get_partition_sfdisk_info(part_path, sfdisk_info) |
416 | - partition_verify_sfdisk(info, sfdisk_info['label'], part_info) |
417 | + partition_verify_sfdisk(partition, sfdisk_info['label'], part_info) |
418 | LOG.debug( |
419 | '%s partition %s already present, skipping create', |
420 | disk, partnumber) |
421 | @@ -1109,17 +1140,17 @@ def partition_handler(info, storage_config, context): |
422 | # Set flag |
423 | # 'sgdisk --list-types' |
424 | LOG.info("adding partition '%s' to disk '%s' (ptable: '%s')", |
425 | - info.get('id'), device, disk_ptable) |
426 | + partition.id, partition.device, disk_ptable) |
427 | LOG.debug("partnum: %s offset_sectors: %s length_sectors: %s", |
428 | partnumber, offset_sectors, length_sectors) |
429 | |
430 | # Pre-Wipe the partition if told to do so, do not wipe dos extended |
431 | # partitions as this may damage the extended partition table |
432 | - if config.value_as_boolean(info.get('wipe')): |
433 | + if config.value_as_boolean(partition.wipe): |
434 | LOG.info("Preparing partition location on disk %s", disk) |
435 | - if info.get('flag') == "extended": |
436 | + if partition.flag == "extended": |
437 | LOG.warn("extended partitions do not need wiping, " |
438 | - "so skipping: '%s'" % info.get('id')) |
439 | + "so skipping: '%s'" % partition.id) |
440 | else: |
441 | # wipe the start of the new partition first by zeroing 1M at |
442 | # the length of the previous partition |
443 | @@ -1132,26 +1163,26 @@ def partition_handler(info, storage_config, context): |
444 | exclusive=False) |
445 | |
446 | if disk_ptable == "msdos": |
447 | - if flag and flag == 'prep': |
448 | + if partition.flag == 'prep': |
449 | raise ValueError( |
450 | 'PReP partitions require a GPT partition table') |
451 | |
452 | - if flag in ["extended", "logical", "primary"]: |
453 | - partition_type = flag |
454 | + if partition.flag in ["extended", "logical", "primary"]: |
455 | + partition_type = partition.flag |
456 | else: |
457 | partition_type = "primary" |
458 | cmd = ["parted", disk, "--script", "mkpart", partition_type] |
459 | - if flag == 'swap': |
460 | + if partition.flag == 'swap': |
461 | cmd.append("linux-swap") |
462 | cmd.append("%ss" % offset_sectors) |
463 | cmd.append("%ss" % (offset_sectors + length_sectors)) |
464 | - if flag == 'boot': |
465 | + if partition.flag == 'boot': |
466 | cmd.extend(['set', str(partnumber), 'boot', 'on']) |
467 | |
468 | util.subp(cmd, capture=True) |
469 | elif disk_ptable == "gpt": |
470 | - if flag and flag in SGDISK_FLAGS: |
471 | - typecode = SGDISK_FLAGS[flag] |
472 | + if partition.flag in SGDISK_FLAGS: |
473 | + typecode = SGDISK_FLAGS[partition.flag] |
474 | else: |
475 | typecode = SGDISK_FLAGS['linux'] |
476 | cmd = ["sgdisk", "--new", "%s:%s:%s" % (partnumber, offset_sectors, |
477 | @@ -1159,8 +1190,8 @@ def partition_handler(info, storage_config, context): |
478 | "--typecode=%s:%s" % (partnumber, typecode), disk] |
479 | util.subp(cmd, capture=True) |
480 | elif disk_ptable == "vtoc": |
481 | - dasd_pt = dasd.DasdPartitionTable.from_fdasd(disk) |
482 | - dasd_pt.add_partition(partnumber, length_bytes) |
483 | + dasd_pt = DasdPartitionTable.from_fdasd(disk) |
484 | + dasd_pt.add_partition(partnumber, partition.size) |
485 | else: |
486 | raise ValueError("parent partition has invalid partition table") |
487 | |
488 | @@ -1180,7 +1211,7 @@ def partition_handler(info, storage_config, context): |
489 | block.rescan_block_devices([disk]) |
490 | udevadm_settle(exists=part_path) |
491 | |
492 | - wipe_mode = info.get('wipe') |
493 | + wipe_mode = partition.wipe |
494 | if wipe_mode: |
495 | if wipe_mode == 'superblock' and create_partition: |
496 | # partition creation pre-wipes partition superblock locations |
497 | @@ -1190,29 +1221,37 @@ def partition_handler(info, storage_config, context): |
498 | block.wipe_volume(part_path, mode=wipe_mode, exclusive=False) |
499 | |
500 | # Make the name if needed |
501 | - if storage_config.get(device).get('name') and partition_type != 'extended': |
502 | - make_dname(info.get('id'), storage_config) |
503 | + if storage_config.get(partition.device).get('name') and \ |
504 | + partition_type != 'extended': |
505 | + make_dname(partition.id, storage_config) |
506 | + |
507 | + |
508 | +@storage_actions.define("format") |
509 | +class Format: |
510 | + volume: str |
511 | + preserve: bool = storage_actions.boolean(default=False) |
512 | + fstype: str |
513 | + label: Optional[str] = None |
514 | + uuid: Optional[str] = None |
515 | + extra_options: Optional[List[str]] = None |
516 | |
517 | |
518 | def format_handler(info, storage_config, context): |
519 | - volume = info.get('volume') |
520 | - if not volume: |
521 | - raise ValueError("volume must be specified for partition '%s'" % |
522 | - info.get('id')) |
523 | + format: Format = storage_actions.asobject(info) |
524 | |
525 | # Get path to volume |
526 | - volume_path = get_path_to_storage_volume(volume, storage_config) |
527 | + volume_path = get_path_to_storage_volume(format.volume, storage_config) |
528 | |
529 | # Handle preserve flag |
530 | - if config.value_as_boolean(info.get('preserve')): |
531 | + if format.preserve: |
532 | # Volume marked to be preserved, not formatting |
533 | return |
534 | |
535 | # Make filesystem using block library |
536 | - LOG.debug("mkfs %s info: %s", volume_path, info) |
537 | - mkfs.mkfs_from_config(volume_path, info) |
538 | + LOG.debug("mkfs %s info: %s", volume_path, format) |
539 | + mkfs.mkfs_from_action(volume_path, format) |
540 | |
541 | - device_type = storage_config.get(volume).get('type') |
542 | + device_type = storage_config.get(format.volume)['type'] |
543 | LOG.debug('Formated device type: %s', device_type) |
544 | if device_type == 'bcache': |
545 | # other devs have a udev watch on them. Not bcache (LP: #1680597). |
546 | diff --git a/curtin/storage_actions.py b/curtin/storage_actions.py |
547 | index b2a6fd4..d112645 100644 |
548 | --- a/curtin/storage_actions.py |
549 | +++ b/curtin/storage_actions.py |
550 | @@ -29,5 +29,9 @@ def asobject(obj): |
551 | return config.fromdict(cls, obj) |
552 | |
553 | |
554 | +def boolean(*, default=attr.NOTHING): |
555 | + return attr.ib(converter=config.value_as_boolean, default=default) |
556 | + |
557 | + |
558 | def size(*, default=attr.NOTHING): |
559 | return attr.ib(converter=_convert_size, default=default) |
560 | diff --git a/tests/unittests/test_block_mkfs.py b/tests/unittests/test_block_mkfs.py |
561 | index 163cee6..eb0ac9f 100644 |
562 | --- a/tests/unittests/test_block_mkfs.py |
563 | +++ b/tests/unittests/test_block_mkfs.py |
564 | @@ -1,18 +1,27 @@ |
565 | # This file is part of curtin. See LICENSE file for copyright and license info. |
566 | |
567 | from curtin.block import mkfs |
568 | +from curtin.storage_actions import asobject |
569 | |
570 | from .helpers import CiTestCase |
571 | from unittest import mock |
572 | |
573 | |
574 | +__import__("curtin.commands.block_meta") |
575 | + |
576 | + |
577 | class TestBlockMkfs(CiTestCase): |
578 | test_uuid = "fb26cc6c-ae73-11e5-9e38-2fb63f0c3155" |
579 | |
580 | - def _get_config(self, fstype): |
581 | - return {"fstype": fstype, "type": "format", "id": "testfmt", |
582 | - "volume": "null", "label": "format1", |
583 | - "uuid": self.test_uuid} |
584 | + def _get_action(self, fstype): |
585 | + return asobject({ |
586 | + "fstype": fstype, |
587 | + "type": "format", |
588 | + "id": "testfmt", |
589 | + "volume": "null", |
590 | + "label": "format1", |
591 | + "uuid": self.test_uuid, |
592 | + }) |
593 | |
594 | def _assert_same_flags(self, call, expected): |
595 | print("call:\n{}".format(call)) |
596 | @@ -38,7 +47,7 @@ class TestBlockMkfs(CiTestCase): |
597 | @mock.patch("curtin.block.mkfs.os") |
598 | @mock.patch("curtin.block.mkfs.util") |
599 | @mock.patch("curtin.block.mkfs.distro.lsb_release") |
600 | - def _run_mkfs_with_config(self, config, expected_cmd, expected_flags, |
601 | + def _run_mkfs_with_action(self, action, expected_cmd, expected_flags, |
602 | mock_lsb_release, mock_util, mock_os, mock_block, |
603 | release="wily", strict=False): |
604 | # Pretend we are on wily as there are no known edge cases for it |
605 | @@ -46,7 +55,7 @@ class TestBlockMkfs(CiTestCase): |
606 | mock_os.path.exists.return_value = True |
607 | mock_block.get_blockdev_sector_size.return_value = (512, 512) |
608 | |
609 | - mkfs.mkfs_from_config("/dev/null", config, strict=strict) |
610 | + mkfs.mkfs_from_action("/dev/null", action, strict=strict) |
611 | self.assertTrue(mock_util.subp.called) |
612 | calls = mock_util.subp.call_args_list |
613 | self.assertEquals(len(calls), 1) |
614 | @@ -58,92 +67,83 @@ class TestBlockMkfs(CiTestCase): |
615 | self._assert_same_flags(call, expected_flags) |
616 | |
617 | def test_mkfs_ext(self): |
618 | - conf = self._get_config("ext4") |
619 | + fmt = self._get_action("ext4") |
620 | expected_flags = [["-L", "format1"], "-F", |
621 | ["-U", self.test_uuid]] |
622 | - self._run_mkfs_with_config(conf, "mkfs.ext4", expected_flags) |
623 | + self._run_mkfs_with_action(fmt, "mkfs.ext4", expected_flags) |
624 | |
625 | def test_mkfs_ext_with_extra_options(self): |
626 | - conf = self._get_config("ext4") |
627 | + fmt = self._get_action("ext4") |
628 | extra_options = ["-D", "-e", "continue" |
629 | "-E", "offset=1024,nodiscard,resize=10"] |
630 | - conf['extra_options'] = extra_options |
631 | + fmt.extra_options = extra_options |
632 | expected_flags = [["-L", "format1"], "-F", |
633 | ["-U", self.test_uuid]] + extra_options |
634 | - self._run_mkfs_with_config(conf, "mkfs.ext4", expected_flags) |
635 | + self._run_mkfs_with_action(fmt, "mkfs.ext4", expected_flags) |
636 | |
637 | def test_mkfs_f2fs(self): |
638 | - conf = self._get_config("f2fs") |
639 | + fmt = self._get_action("f2fs") |
640 | expected_flags = [["-l", "format1"], "-f", |
641 | ["-U", self.test_uuid]] |
642 | - self._run_mkfs_with_config(conf, "mkfs.f2fs", expected_flags) |
643 | + self._run_mkfs_with_action(fmt, "mkfs.f2fs", expected_flags) |
644 | |
645 | def test_mkfs_btrfs(self): |
646 | - conf = self._get_config("btrfs") |
647 | + fmt = self._get_action("btrfs") |
648 | expected_flags = [["--label", "format1"], "--force", |
649 | ["--uuid", self.test_uuid]] |
650 | - self._run_mkfs_with_config(conf, "mkfs.btrfs", expected_flags) |
651 | + self._run_mkfs_with_action(fmt, "mkfs.btrfs", expected_flags) |
652 | |
653 | def test_mkfs_xfs(self): |
654 | """ mkfs.xfs passes uuid parameter """ |
655 | - conf = self._get_config("xfs") |
656 | + fmt = self._get_action("xfs") |
657 | expected_flags = ['-f', ['-L', 'format1'], |
658 | ['-m', 'uuid=%s' % self.test_uuid]] |
659 | - self._run_mkfs_with_config(conf, "mkfs.xfs", expected_flags) |
660 | + self._run_mkfs_with_action(fmt, "mkfs.xfs", expected_flags) |
661 | |
662 | def test_mkfs_btrfs_on_precise(self): |
663 | # Test precise+btrfs where there is no force or uuid |
664 | - conf = self._get_config("btrfs") |
665 | + fmt = self._get_action("btrfs") |
666 | expected_flags = [["--label", "format1"]] |
667 | - self._run_mkfs_with_config(conf, "mkfs.btrfs", expected_flags, |
668 | + self._run_mkfs_with_action(fmt, "mkfs.btrfs", expected_flags, |
669 | release="precise") |
670 | |
671 | def test_mkfs_btrfs_on_trusty(self): |
672 | # Test trusty btrfs where there is no uuid |
673 | - conf = self._get_config("btrfs") |
674 | + fmt = self._get_action("btrfs") |
675 | expected_flags = [["--label", "format1"], "--force"] |
676 | - self._run_mkfs_with_config(conf, "mkfs.btrfs", expected_flags, |
677 | + self._run_mkfs_with_action(fmt, "mkfs.btrfs", expected_flags, |
678 | release="trusty") |
679 | |
680 | def test_mkfs_fat(self): |
681 | - conf = self._get_config("fat32") |
682 | + fmt = self._get_action("fat32") |
683 | expected_flags = ["-I", ["-n", "format1"], ["-F", "32"]] |
684 | - self._run_mkfs_with_config(conf, "mkfs.vfat", expected_flags) |
685 | + self._run_mkfs_with_action(fmt, "mkfs.vfat", expected_flags) |
686 | |
687 | def test_mkfs_vfat(self): |
688 | """Ensure we can use vfat without fatsize""" |
689 | - conf = self._get_config("vfat") |
690 | + fmt = self._get_action("vfat") |
691 | expected_flags = ["-I", ["-n", "format1"], ] |
692 | - self._run_mkfs_with_config(conf, "mkfs.vfat", expected_flags) |
693 | - |
694 | - def test_mkfs_invalid_fstype(self): |
695 | - """Do not proceed if fstype is None or invalid""" |
696 | - with self.assertRaises(ValueError): |
697 | - conf = self._get_config(None) |
698 | - self._run_mkfs_with_config(conf, "mkfs.ext4", []) |
699 | - with self.assertRaises(ValueError): |
700 | - conf = self._get_config("fakefilesystemtype") |
701 | - self._run_mkfs_with_config(conf, "mkfs.ext3", []) |
702 | + self._run_mkfs_with_action(fmt, "mkfs.vfat", expected_flags) |
703 | |
704 | def test_mkfs_invalid_label(self): |
705 | """Do not proceed if filesystem label is too long""" |
706 | with self.assertRaises(ValueError): |
707 | - conf = self._get_config("ext4") |
708 | - conf['label'] = "thislabelislongerthan16chars" |
709 | - self._run_mkfs_with_config(conf, "mkfs.ext4", [], strict=True) |
710 | + fmt = self._get_action("ext4") |
711 | + fmt.label = "thislabelislongerthan16chars" |
712 | + self._run_mkfs_with_action(fmt, "mkfs.ext4", [], strict=True) |
713 | |
714 | - conf = self._get_config("swap") |
715 | + fmt = self._get_action("swap") |
716 | expected_flags = ["--force", ["--label", "abcdefghijklmno"], |
717 | - ["--uuid", conf['uuid']]] |
718 | - conf['label'] = "abcdefghijklmnop" # 16 chars, 15 is max |
719 | + ["--uuid", fmt.uuid]] |
720 | + fmt.label = "abcdefghijklmnop" # 16 chars, 15 is max |
721 | |
722 | # Raise error, do not truncate with strict = True |
723 | with self.assertRaises(ValueError): |
724 | - self._run_mkfs_with_config(conf, "mkswap", expected_flags, |
725 | + self._run_mkfs_with_action(fmt, "mkswap", expected_flags, |
726 | strict=True) |
727 | |
728 | # Do not raise with strict = False |
729 | - self._run_mkfs_with_config(conf, "mkswap", expected_flags) |
730 | + self._run_mkfs_with_action(fmt, "mkswap", expected_flags) |
731 | |
732 | @mock.patch("curtin.block.mkfs.block") |
733 | @mock.patch("curtin.block.mkfs.util") |
734 | diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py |
735 | index 6388e0b..85ffcc2 100644 |
736 | --- a/tests/unittests/test_commands_block_meta.py |
737 | +++ b/tests/unittests/test_commands_block_meta.py |
738 | @@ -14,7 +14,7 @@ import uuid |
739 | |
740 | from curtin.block import dasd |
741 | from curtin.commands import block_meta, block_meta_v2 |
742 | -from curtin import paths, util |
743 | +from curtin import paths, storage_actions, util |
744 | from .helpers import CiTestCase |
745 | |
746 | |
747 | @@ -1623,9 +1623,9 @@ class TestFstabVolumeSpec(CiTestCase): |
748 | |
749 | class TestDasdHandler(CiTestCase): |
750 | |
751 | - @patch('curtin.commands.block_meta.dasd.DasdDevice.devname') |
752 | - @patch('curtin.commands.block_meta.dasd.DasdDevice.format') |
753 | - @patch('curtin.commands.block_meta.dasd.DasdDevice.needs_formatting') |
754 | + @patch('curtin.commands.block_meta.DasdDevice.devname') |
755 | + @patch('curtin.commands.block_meta.DasdDevice.format') |
756 | + @patch('curtin.commands.block_meta.DasdDevice.needs_formatting') |
757 | @patch('curtin.commands.block_meta.block') |
758 | @patch('curtin.commands.block_meta.util') |
759 | @patch('curtin.commands.block_meta.get_path_to_storage_volume') |
760 | @@ -1647,8 +1647,8 @@ class TestDasdHandler(CiTestCase): |
761 | set_label='cloudimg-rootfs', |
762 | mode='quick') |
763 | |
764 | - @patch('curtin.commands.block_meta.dasd.DasdDevice.format') |
765 | - @patch('curtin.commands.block_meta.dasd.DasdDevice.needs_formatting') |
766 | + @patch('curtin.commands.block_meta.DasdDevice.format') |
767 | + @patch('curtin.commands.block_meta.DasdDevice.needs_formatting') |
768 | @patch('curtin.commands.block_meta.block') |
769 | @patch('curtin.commands.block_meta.util') |
770 | @patch('curtin.commands.block_meta.get_path_to_storage_volume') |
771 | @@ -1667,8 +1667,8 @@ class TestDasdHandler(CiTestCase): |
772 | block_meta.dasd_handler(info, storage_config, empty_context) |
773 | self.assertEqual(0, m_dasd_format.call_count) |
774 | |
775 | - @patch('curtin.commands.block_meta.dasd.DasdDevice.format') |
776 | - @patch('curtin.commands.block_meta.dasd.DasdDevice.needs_formatting') |
777 | + @patch('curtin.commands.block_meta.DasdDevice.format') |
778 | + @patch('curtin.commands.block_meta.DasdDevice.needs_formatting') |
779 | @patch('curtin.commands.block_meta.block') |
780 | @patch('curtin.commands.block_meta.util') |
781 | @patch('curtin.commands.block_meta.get_path_to_storage_volume') |
782 | @@ -1688,8 +1688,8 @@ class TestDasdHandler(CiTestCase): |
783 | self.assertEqual(1, m_dasd_needf.call_count) |
784 | self.assertEqual(0, m_dasd_format.call_count) |
785 | |
786 | - @patch('curtin.commands.block_meta.dasd.DasdDevice.format') |
787 | - @patch('curtin.commands.block_meta.dasd.DasdDevice.needs_formatting') |
788 | + @patch('curtin.commands.block_meta.DasdDevice.format') |
789 | + @patch('curtin.commands.block_meta.DasdDevice.needs_formatting') |
790 | @patch('curtin.commands.block_meta.block') |
791 | @patch('curtin.commands.block_meta.util') |
792 | @patch('curtin.commands.block_meta.get_path_to_storage_volume') |
793 | @@ -2749,7 +2749,8 @@ class TestPartitionHandler(CiTestCase): |
794 | |
795 | block_meta.partition_handler(sconfig[1], oconfig, empty_context) |
796 | |
797 | - m_verify_fdasd.assert_has_calls([call(devpath, 1, sconfig[1])]) |
798 | + m_verify_fdasd.assert_has_calls( |
799 | + [call(devpath, 1, storage_actions.asobject(sconfig[1]))]) |
800 | |
801 | |
802 | class TestMultipathPartitionHandler(CiTestCase): |
803 | @@ -2973,15 +2974,14 @@ class TestPartitionVerifySfdisk(CiTestCase): |
804 | self.add_patch(base + 'verify_ptable_flag', 'm_verify_ptable_flag') |
805 | self.add_patch(base + 'os.path.realpath', 'm_realpath') |
806 | self.m_realpath.side_effect = lambda x: x |
807 | - self.info = { |
808 | + self.part_action = storage_actions.asobject({ |
809 | 'id': 'disk-sda-part-2', |
810 | 'type': 'partition', |
811 | 'device': 'sda', |
812 | 'number': 2, |
813 | 'size': '5GB', |
814 | 'flag': 'boot', |
815 | - } |
816 | - self.part_size = int(util.human2bytes(self.info['size'])) |
817 | + }) |
818 | self.devpath = self.random_string() |
819 | |
820 | def test_partition_verify_sfdisk(self): |
821 | @@ -2990,24 +2990,24 @@ class TestPartitionVerifySfdisk(CiTestCase): |
822 | 'node': devpath, |
823 | } |
824 | label = self.random_string() |
825 | - block_meta.partition_verify_sfdisk(self.info, label, sfdisk_part_info) |
826 | + block_meta.partition_verify_sfdisk(self.part_action, label, sfdisk_part_info) |
827 | self.assertEqual( |
828 | - [call(devpath, self.part_size, sfdisk_part_info)], |
829 | + [call(devpath, self.part_action.size, sfdisk_part_info)], |
830 | self.m_verify_size.call_args_list) |
831 | self.assertEqual( |
832 | - [call(devpath, self.info['flag'], label, sfdisk_part_info)], |
833 | + [call(devpath, self.part_action.flag, label, sfdisk_part_info)], |
834 | self.m_verify_ptable_flag.call_args_list) |
835 | |
836 | def test_partition_verify_skips_ptable_no_flag(self): |
837 | - del self.info['flag'] |
838 | + self.part_action.flag = None |
839 | devpath = self.random_string() |
840 | sfdisk_part_info = { |
841 | 'node': devpath, |
842 | } |
843 | label = self.random_string() |
844 | - block_meta.partition_verify_sfdisk(self.info, label, sfdisk_part_info) |
845 | + block_meta.partition_verify_sfdisk(self.part_action, label, sfdisk_part_info) |
846 | self.assertEqual( |
847 | - [call(devpath, self.part_size, sfdisk_part_info)], |
848 | + [call(devpath, self.part_action.size, sfdisk_part_info)], |
849 | self.m_verify_size.call_args_list) |
850 | self.assertEqual([], self.m_verify_ptable_flag.call_args_list) |
851 | |
852 | @@ -3479,7 +3479,7 @@ class TestPartitionVerifyFdasd(CiTestCase): |
853 | base = 'curtin.commands.block_meta.' |
854 | self.add_patch(base + 'verify_exists', 'm_verify_exists') |
855 | self.add_patch( |
856 | - base + 'dasd.DasdPartitionTable.from_fdasd', 'm_from_fdasd', |
857 | + base + 'DasdPartitionTable.from_fdasd', 'm_from_fdasd', |
858 | autospec=False, spec=dasd.DasdPartitionTable.from_fdasd) |
859 | self.add_patch(base + 'verify_size', 'm_verify_size') |
860 | self.info = { |
861 | @@ -3504,7 +3504,8 @@ class TestPartitionVerifyFdasd(CiTestCase): |
862 | dasd.DasdPartition('', 0, 0, tracks_needed, '1', 'Linux'), |
863 | ] |
864 | self.m_from_fdasd.side_effect = iter([fake_pt]) |
865 | - block_meta.partition_verify_fdasd(self.devpath, 1, self.info) |
866 | + block_meta.partition_verify_fdasd( |
867 | + self.devpath, 1, storage_actions.asobject(self.info)) |
868 | self.assertEqual( |
869 | [call(self.devpath)], |
870 | self.m_verify_exists.call_args_list) |
FAILED: Continuous integration, rev:b8ecacf91ad b57d892b9a90e33 0629a28d054153 /jenkins. canonical. com/server- team/job/ curtin- ci/169/ /jenkins. canonical. com/server- team/job/ curtin- ci/nodes= metal-amd64/ 169/ /jenkins. canonical. com/server- team/job/ curtin- ci/nodes= metal-arm64/ 169/ /jenkins. canonical. com/server- team/job/ curtin- ci/nodes= metal-ppc64el/ 169/ /jenkins. canonical. com/server- team/job/ curtin- ci/nodes= metal-s390x/ 169/
https:/
Executed test runs:
FAILURE: https:/
FAILURE: https:/
FAILURE: https:/
FAILURE: https:/
Click here to trigger a rebuild: /jenkins. canonical. com/server- team/job/ curtin- ci/169/ /rebuild
https:/