Merge ~mwhudson/curtin:pass-handlers-dict-to-handlers into curtin:master

Proposed by Michael Hudson-Doyle
Status: Merged
Approved by: Michael Hudson-Doyle
Approved revision: 90ab45edf802fa27c33184d073b7fbca327f86df
Merge reported by: Server Team CI bot
Merged at revision: not available
Proposed branch: ~mwhudson/curtin:pass-handlers-dict-to-handlers
Merge into: curtin:master
Diff against target: 595 lines (+66/-66)
2 files modified
curtin/commands/block_meta.py (+17/-17)
tests/unittests/test_commands_block_meta.py (+49/-49)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
Dan Bungert Approve
Review via email: mp+412496@code.launchpad.net

Commit message

block_meta: pass handlers dict to all handlers

when we have different handlers for v2 and v1 we will need the
invocation of e.g. disk_handler from raid_handler to go to the right
version.

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Needs Fixing (continuous-integration)
Revision history for this message
Dan Bungert (dbungert) :
review: Approve
90ab45e... by Michael Hudson-Doyle

lint

Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
2index 5c3226d..c5ac137 100644
3--- a/curtin/commands/block_meta.py
4+++ b/curtin/commands/block_meta.py
5@@ -550,7 +550,7 @@ def get_path_to_storage_volume(volume, storage_config):
6 DEVS = set()
7
8
9-def image_handler(info, storage_config):
10+def image_handler(info, storage_config, handlers):
11 path = info['path']
12 if os.path.exists(path):
13 os.unlink(path)
14@@ -566,10 +566,10 @@ def image_handler(info, storage_config):
15 raise
16 info['dev'] = dev
17 DEVS.add(dev)
18- disk_handler(info, storage_config)
19+ handlers['disk'](info, storage_config, handlers)
20
21
22-def dasd_handler(info, storage_config):
23+def dasd_handler(info, storage_config, handlers):
24 """ Prepare the specified dasd device per configuration
25
26 params: info: dictionary of configuration, required keys are:
27@@ -614,7 +614,7 @@ def dasd_handler(info, storage_config):
28 "Dasd %s failed to format" % dasd_device.devname)
29
30
31-def disk_handler(info, storage_config):
32+def disk_handler(info, storage_config, handlers):
33 _dos_names = ['dos', 'msdos']
34 ptable = info.get('ptable')
35 if ptable and ptable not in PTABLES_VALID:
36@@ -843,7 +843,7 @@ def partition_verify_fdasd(disk_path, partnumber, info):
37 raise RuntimeError("dasd partitions do not support flags")
38
39
40-def partition_handler(info, storage_config):
41+def partition_handler(info, storage_config, handlers):
42 device = info.get('device')
43 size = info.get('size')
44 flag = info.get('flag')
45@@ -1030,7 +1030,7 @@ def partition_handler(info, storage_config):
46 make_dname(info.get('id'), storage_config)
47
48
49-def format_handler(info, storage_config):
50+def format_handler(info, storage_config, handlers):
51 volume = info.get('volume')
52 if not volume:
53 raise ValueError("volume must be specified for partition '%s'" %
54@@ -1280,7 +1280,7 @@ def mount_apply(fdata, target=None, fstab=None):
55 LOG.info("fstab not in environment, so not writing")
56
57
58-def mount_handler(info, storage_config):
59+def mount_handler(info, storage_config, handlers):
60 """ Handle storage config type: mount
61
62 info = {
63@@ -1316,7 +1316,7 @@ def lvm_volgroup_verify(vg_name, device_paths):
64 verify_volgroup_members(vg_name, device_paths)
65
66
67-def lvm_volgroup_handler(info, storage_config):
68+def lvm_volgroup_handler(info, storage_config, handlers):
69 devices = info.get('devices')
70 device_paths = []
71 name = info.get('name')
72@@ -1377,7 +1377,7 @@ def lvm_partition_verify(lv_name, vg_name, info):
73 verify_lv_size(lv_name, info['size'])
74
75
76-def lvm_partition_handler(info, storage_config):
77+def lvm_partition_handler(info, storage_config, handlers):
78 volgroup = storage_config[info['volgroup']]['name']
79 name = info['name']
80 if not volgroup:
81@@ -1439,7 +1439,7 @@ def dm_crypt_verify(dmcrypt_dev, volume_path):
82 verify_blkdev_used(dmcrypt_dev, volume_path)
83
84
85-def dm_crypt_handler(info, storage_config):
86+def dm_crypt_handler(info, storage_config, handlers):
87 state = util.load_command_environment(strict=True)
88 volume = info.get('volume')
89 keysize = info.get('keysize')
90@@ -1581,7 +1581,7 @@ def raid_verify(md_devname, raidlevel, device_paths, spare_paths, container):
91 md_devname, raidlevel, device_paths, spare_paths, container)
92
93
94-def raid_handler(info, storage_config):
95+def raid_handler(info, storage_config, handlers):
96 state = util.load_command_environment(strict=True)
97 devices = info.get('devices')
98 raidlevel = info.get('raidlevel')
99@@ -1663,7 +1663,7 @@ def raid_handler(info, storage_config):
100 # If ptable is specified, call disk_handler on this mdadm device to create
101 # the table
102 if info.get('ptable'):
103- disk_handler(info, storage_config)
104+ handlers['disk'](info, storage_config, handlers)
105
106
107 def verify_bcache_cachedev(cachedev):
108@@ -1730,7 +1730,7 @@ def bcache_verify(cachedev, backingdev, cache_mode):
109 return True
110
111
112-def bcache_handler(info, storage_config):
113+def bcache_handler(info, storage_config, handlers):
114 backing_device = get_path_to_storage_volume(info.get('backing_device'),
115 storage_config)
116 cache_device = get_path_to_storage_volume(info.get('cache_device'),
117@@ -1778,13 +1778,13 @@ def bcache_handler(info, storage_config):
118 make_dname(info.get('id'), storage_config)
119
120 if info.get('ptable'):
121- disk_handler(info, storage_config)
122+ handlers['disk'](info, storage_config, handlers)
123
124 LOG.debug('Finished bcache creation for backing %s or caching %s',
125 backing_device, cache_device)
126
127
128-def zpool_handler(info, storage_config):
129+def zpool_handler(info, storage_config, handlers):
130 """
131 Create a zpool based in storage_configuration
132 """
133@@ -1823,7 +1823,7 @@ def zpool_handler(info, storage_config):
134 zfs_properties=fs_properties)
135
136
137-def zfs_handler(info, storage_config):
138+def zfs_handler(info, storage_config, handlers):
139 """
140 Create a zfs filesystem
141 """
142@@ -2029,7 +2029,7 @@ def meta_custom(args):
143 description="configuring %s: %s" % (command['type'],
144 command['id'])):
145 try:
146- handler(command, storage_config_dict)
147+ handler(command, storage_config_dict, command_handlers)
148 except Exception as error:
149 LOG.error("An error occured handling '%s': %s - %s" %
150 (item_id, type(error).__name__, error))
151diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py
152index c3f8d14..b1e1f20 100644
153--- a/tests/unittests/test_commands_block_meta.py
154+++ b/tests/unittests/test_commands_block_meta.py
155@@ -371,7 +371,7 @@ class TestBlockMeta(CiTestCase):
156 holders = ['md1']
157 self.mock_get_holders.return_value = holders
158
159- block_meta.disk_handler(info, self.storage_config)
160+ block_meta.disk_handler(info, self.storage_config, {})
161
162 print("clear_holders: %s" % self.mock_clear_holders.call_args_list)
163 print("assert_clear: %s" % self.mock_assert_clear.call_args_list)
164@@ -394,7 +394,7 @@ class TestBlockMeta(CiTestCase):
165 self.mock_block_sys_block_path.return_value = '/sys/class/block/xxx'
166 self.mock_block_sector_size.return_value = (512, 512)
167
168- block_meta.partition_handler(part_info, self.storage_config)
169+ block_meta.partition_handler(part_info, self.storage_config, {})
170 part_offset = 2048 * 512
171 self.mock_block_zero_file.assert_called_with(disk_kname, [part_offset],
172 exclusive=False)
173@@ -421,7 +421,7 @@ class TestBlockMeta(CiTestCase):
174 }
175 self.mock_get_volume_type.return_value = 'part'
176
177- block_meta.mount_handler(mount_info, self.storage_config)
178+ block_meta.mount_handler(mount_info, self.storage_config, {})
179 options = 'defaults'
180 comment = "# / was on /wark/xxx during curtin installation"
181 expected = "%s\n%s %s %s %s 0 1\n" % (comment,
182@@ -449,7 +449,7 @@ class TestBlockMeta(CiTestCase):
183 }
184 self.mock_get_volume_type.return_value = 'part'
185
186- block_meta.mount_handler(mount_info, self.storage_config)
187+ block_meta.mount_handler(mount_info, self.storage_config, {})
188 options = 'ro'
189 comment = "# /readonly was on /wark/xxx during curtin installation"
190 expected = "%s\n%s %s %s %s 0 1\n" % (comment,
191@@ -478,7 +478,7 @@ class TestBlockMeta(CiTestCase):
192 }
193 self.mock_get_volume_type.return_value = 'part'
194
195- block_meta.mount_handler(mount_info, self.storage_config)
196+ block_meta.mount_handler(mount_info, self.storage_config, {})
197 options = 'defaults'
198 comment = "# /readonly was on /wark/xxx during curtin installation"
199 expected = "%s\n%s %s %s %s 0 1\n" % (comment,
200@@ -509,7 +509,7 @@ class TestBlockMeta(CiTestCase):
201 }
202 self.mock_get_volume_type.return_value = 'part'
203
204- block_meta.mount_handler(mount_info, self.storage_config)
205+ block_meta.mount_handler(mount_info, self.storage_config, {})
206 options = 'defaults'
207 comment = "# /readonly was on /wark/xxx during curtin installation"
208 expected = "#curtin-test\n%s\n%s %s %s %s 0 1\n" % (comment,
209@@ -542,7 +542,7 @@ class TestZpoolHandler(CiTestCase):
210 m_getpath.return_value = disk_path
211 m_block.disk_to_byid_path.return_value = None
212 m_util.load_command_environment.return_value = {'target': 'mytarget'}
213- block_meta.zpool_handler(info, storage_config)
214+ block_meta.zpool_handler(info, storage_config, {})
215 m_zfs.zpool_create.assert_called_with(
216 info['pool'], [disk_path],
217 mountpoint="/",
218@@ -1256,7 +1256,7 @@ class TestDasdHandler(CiTestCase):
219 m_dasd_devname.return_value = disk_path
220 m_getpath.return_value = disk_path
221 m_dasd_needf.side_effect = [True, False]
222- block_meta.dasd_handler(info, storage_config)
223+ block_meta.dasd_handler(info, storage_config, {})
224 m_dasd_format.assert_called_with(blksize=4096, layout='cdl',
225 set_label='cloudimg-rootfs',
226 mode='quick')
227@@ -1278,7 +1278,7 @@ class TestDasdHandler(CiTestCase):
228 disk_path = "/wark/dasda"
229 m_getpath.return_value = disk_path
230 m_dasd_needf.side_effect = [False, False]
231- block_meta.dasd_handler(info, storage_config)
232+ block_meta.dasd_handler(info, storage_config, {})
233 self.assertEqual(0, m_dasd_format.call_count)
234
235 @patch('curtin.commands.block_meta.dasd.DasdDevice.format')
236@@ -1298,7 +1298,7 @@ class TestDasdHandler(CiTestCase):
237 disk_path = "/wark/dasda"
238 m_getpath.return_value = disk_path
239 m_dasd_needf.side_effect = [False, False]
240- block_meta.dasd_handler(info, storage_config)
241+ block_meta.dasd_handler(info, storage_config, {})
242 self.assertEqual(1, m_dasd_needf.call_count)
243 self.assertEqual(0, m_dasd_format.call_count)
244
245@@ -1321,7 +1321,7 @@ class TestDasdHandler(CiTestCase):
246 m_getpath.return_value = disk_path
247 m_dasd_needf.side_effect = [True, False]
248 with self.assertRaises(ValueError):
249- block_meta.dasd_handler(info, storage_config)
250+ block_meta.dasd_handler(info, storage_config, {})
251 self.assertEqual(1, m_dasd_needf.call_count)
252 self.assertEqual(0, m_dasd_format.call_count)
253
254@@ -1344,7 +1344,7 @@ class TestDiskHandler(CiTestCase):
255 m_getpath.return_value = disk_path
256 m_block.get_part_table_type.return_value = 'vtoc'
257 m_getpath.return_value = disk_path
258- block_meta.disk_handler(info, storage_config)
259+ block_meta.disk_handler(info, storage_config, {})
260 m_getpath.assert_called_with(info['id'], storage_config)
261 m_block.get_part_table_type.assert_called_with(disk_path)
262
263@@ -1360,7 +1360,7 @@ class TestDiskHandler(CiTestCase):
264 m_getpath.return_value = disk_path
265 m_block.get_part_table_type.return_value = self.random_string()
266 m_getpath.return_value = disk_path
267- block_meta.disk_handler(info, storage_config)
268+ block_meta.disk_handler(info, storage_config, {})
269 m_getpath.assert_called_with(info['id'], storage_config)
270 self.assertEqual(0, m_block.get_part_table_type.call_count)
271
272@@ -1376,7 +1376,7 @@ class TestDiskHandler(CiTestCase):
273 m_getpath.return_value = disk_path
274 m_block.get_part_table_type.return_value = 'gpt'
275 m_getpath.return_value = disk_path
276- block_meta.disk_handler(info, storage_config)
277+ block_meta.disk_handler(info, storage_config, {})
278 m_getpath.assert_called_with(info['id'], storage_config)
279 self.assertEqual(0, m_block.get_part_table_type.call_count)
280
281@@ -1394,7 +1394,7 @@ class TestDiskHandler(CiTestCase):
282 m_block.get_part_table_type.return_value = None
283 m_getpath.return_value = disk_path
284 with self.assertRaises(ValueError):
285- block_meta.disk_handler(info, storage_config)
286+ block_meta.disk_handler(info, storage_config, {})
287 m_getpath.assert_called_with(info['id'], storage_config)
288 m_block.get_part_table_type.assert_called_with(disk_path)
289
290@@ -1406,7 +1406,7 @@ class TestDiskHandler(CiTestCase):
291 info = {'ptable': 'vtoc', 'type': 'disk', 'id': 'disk-foobar'}
292 path = m_getpath.return_value = self.random_string()
293 m_get_holders.return_value = []
294- block_meta.disk_handler(info, OrderedDict())
295+ block_meta.disk_handler(info, OrderedDict(), {})
296 m_subp.assert_called_once_with(['fdasd', '-c', '/dev/null', path])
297
298
299@@ -1453,7 +1453,7 @@ class TestLvmVolgroupHandler(CiTestCase):
300 self.m_getpath.side_effect = iter(devices)
301
302 block_meta.lvm_volgroup_handler(self.storage_config['lvm-volgroup1'],
303- self.storage_config)
304+ self.storage_config, {})
305
306 self.assertEqual([call(['vgcreate', '--force', '--zero=y', '--yes',
307 'vg1'] + devices, capture=True)],
308@@ -1469,7 +1469,7 @@ class TestLvmVolgroupHandler(CiTestCase):
309
310 self.storage_config['lvm-volgroup1']['preserve'] = True
311 block_meta.lvm_volgroup_handler(self.storage_config['lvm-volgroup1'],
312- self.storage_config)
313+ self.storage_config, {})
314
315 self.assertEqual(0, self.m_subp.call_count)
316 self.assertEqual(1, self.m_lvm.lvm_scan.call_count)
317@@ -1482,7 +1482,7 @@ class TestLvmVolgroupHandler(CiTestCase):
318 self.storage_config['lvm-volgroup1']['preserve'] = True
319
320 block_meta.lvm_volgroup_handler(self.storage_config['lvm-volgroup1'],
321- self.storage_config)
322+ self.storage_config, {})
323
324 self.assertEqual(1, self.m_lvm.activate_volgroups.call_count)
325 self.assertEqual([call('vg1')],
326@@ -1499,7 +1499,7 @@ class TestLvmVolgroupHandler(CiTestCase):
327
328 with self.assertRaises(RuntimeError):
329 block_meta.lvm_volgroup_handler(
330- self.storage_config['lvm-volgroup1'], self.storage_config)
331+ self.storage_config['lvm-volgroup1'], self.storage_config, {})
332
333 self.assertEqual(1, self.m_lvm.activate_volgroups.call_count)
334 self.assertEqual([call('vg1')],
335@@ -1550,7 +1550,7 @@ class TestLvmPartitionHandler(CiTestCase):
336 expected_size_str = "%sB" % util.human2bytes(lv_size)
337
338 block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
339- self.storage_config)
340+ self.storage_config, {})
341
342 call_name, call_args, call_kwargs = self.m_subp.mock_calls[0]
343 # call_args is an n-tuple of arg list
344@@ -1564,7 +1564,7 @@ class TestLvmPartitionHandler(CiTestCase):
345 self.m_getpath.return_value = devpath
346
347 block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
348- self.storage_config)
349+ self.storage_config, {})
350 self.m_wipe.assert_called_with(devpath, mode='superblock',
351 exclusive=False)
352
353@@ -1578,7 +1578,7 @@ class TestLvmPartitionHandler(CiTestCase):
354 wipe_mode = 'zero'
355 self.storage_config['lvm-part1']['wipe'] = wipe_mode
356 block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
357- self.storage_config)
358+ self.storage_config, {})
359 self.m_wipe.assert_called_with(devpath, mode=wipe_mode,
360 exclusive=False)
361
362@@ -1587,7 +1587,7 @@ class TestLvmPartitionHandler(CiTestCase):
363 m_verify.return_value = True
364 self.storage_config['lvm-part1']['preserve'] = True
365 block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
366- self.storage_config)
367+ self.storage_config, {})
368 self.assertEqual(0, self.m_distro.lsb_release.call_count)
369 self.assertEqual(0, self.m_subp.call_count)
370
371@@ -1597,7 +1597,7 @@ class TestLvmPartitionHandler(CiTestCase):
372 self.m_lvm.get_lv_size_bytes.return_value = 1073741824.0
373
374 block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
375- self.storage_config)
376+ self.storage_config, {})
377 self.assertEqual([call('vg1')],
378 self.m_lvm.get_lvols_in_volgroup.call_args_list)
379 self.assertEqual([call('lv1')],
380@@ -1611,7 +1611,7 @@ class TestLvmPartitionHandler(CiTestCase):
381
382 with self.assertRaises(RuntimeError):
383 block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
384- self.storage_config)
385+ self.storage_config, {})
386
387 self.assertEqual([call('vg1')],
388 self.m_lvm.get_lvols_in_volgroup.call_args_list)
389@@ -1626,7 +1626,7 @@ class TestLvmPartitionHandler(CiTestCase):
390
391 with self.assertRaises(RuntimeError):
392 block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
393- self.storage_config)
394+ self.storage_config, {})
395 self.assertEqual([call('vg1')],
396 self.m_lvm.get_lvols_in_volgroup.call_args_list)
397 self.assertEqual([call('lv1')],
398@@ -1694,7 +1694,7 @@ class TestDmCryptHandler(CiTestCase):
399 self.m_getpath.return_value = volume_path
400
401 info = self.storage_config['dmcrypt0']
402- block_meta.dm_crypt_handler(info, self.storage_config)
403+ block_meta.dm_crypt_handler(info, self.storage_config, {})
404 expected_calls = [
405 call(['cryptsetup', '--cipher', self.cipher,
406 '--key-size', self.keysize,
407@@ -1712,7 +1712,7 @@ class TestDmCryptHandler(CiTestCase):
408 info = self.storage_config['dmcrypt0']
409 del info['dm_name']
410
411- block_meta.dm_crypt_handler(info, self.storage_config)
412+ block_meta.dm_crypt_handler(info, self.storage_config, {})
413 expected_calls = [
414 call(['cryptsetup', '--cipher', self.cipher,
415 '--key-size', self.keysize,
416@@ -1736,7 +1736,7 @@ class TestDmCryptHandler(CiTestCase):
417
418 info = self.storage_config['dmcrypt0']
419 volume_name = "%s:%s" % (volume_byid, info['dm_name'])
420- block_meta.dm_crypt_handler(info, self.storage_config)
421+ block_meta.dm_crypt_handler(info, self.storage_config, {})
422 expected_calls = [
423 call(['zkey', 'generate', '--xts', '--volume-type', 'luks2',
424 '--sector-size', '4096', '--name', info['dm_name'],
425@@ -1771,7 +1771,7 @@ class TestDmCryptHandler(CiTestCase):
426
427 info = self.storage_config['dmcrypt0']
428 volume_name = "%s:%s" % (volume_byid, info['dm_name'])
429- block_meta.dm_crypt_handler(info, self.storage_config)
430+ block_meta.dm_crypt_handler(info, self.storage_config, {})
431 expected_calls = [
432 call(['zkey', 'generate', '--xts', '--volume-type', 'luks2',
433 '--sector-size', '4096', '--name', info['dm_name'],
434@@ -1808,7 +1808,7 @@ class TestDmCryptHandler(CiTestCase):
435
436 info = self.storage_config['dmcrypt0']
437 volume_name = "%s:%s" % (volume_byid, info['dm_name'])
438- block_meta.dm_crypt_handler(info, self.storage_config)
439+ block_meta.dm_crypt_handler(info, self.storage_config, {})
440 expected_calls = [
441 call(['zkey', 'generate', '--xts', '--volume-type', 'luks2',
442 '--sector-size', '4096', '--name', info['dm_name'],
443@@ -1835,7 +1835,7 @@ class TestDmCryptHandler(CiTestCase):
444
445 info = self.storage_config['dmcrypt0']
446 info['preserve'] = True
447- block_meta.dm_crypt_handler(info, self.storage_config)
448+ block_meta.dm_crypt_handler(info, self.storage_config, {})
449
450 self.assertEqual(0, self.m_subp.call_count)
451 self.assertEqual(len(util.load_file(self.crypttab).splitlines()), 1)
452@@ -1856,7 +1856,7 @@ class TestDmCryptHandler(CiTestCase):
453
454 info = self.storage_config['dmcrypt0']
455 info['preserve'] = True
456- block_meta.dm_crypt_handler(info, self.storage_config)
457+ block_meta.dm_crypt_handler(info, self.storage_config, {})
458 self.assertEqual(len(util.load_file(self.crypttab).splitlines()), 1)
459
460 @patch('curtin.commands.block_meta.os.path.exists')
461@@ -1868,7 +1868,7 @@ class TestDmCryptHandler(CiTestCase):
462 info = self.storage_config['dmcrypt0']
463 info['preserve'] = True
464 with self.assertRaises(RuntimeError):
465- block_meta.dm_crypt_handler(info, self.storage_config)
466+ block_meta.dm_crypt_handler(info, self.storage_config, {})
467
468 @patch('curtin.commands.block_meta.os.path.exists')
469 def test_dm_crypt_preserve_raises_exception_if_wrong_dev_used(self, m_ex):
470@@ -1886,7 +1886,7 @@ class TestDmCryptHandler(CiTestCase):
471 info = self.storage_config['dmcrypt0']
472 info['preserve'] = True
473 with self.assertRaises(RuntimeError):
474- block_meta.dm_crypt_handler(info, self.storage_config)
475+ block_meta.dm_crypt_handler(info, self.storage_config, {})
476
477
478 class TestRaidHandler(CiTestCase):
479@@ -1984,7 +1984,7 @@ class TestRaidHandler(CiTestCase):
480 self.storage_config['mddevice']['name'] = param
481 try:
482 block_meta.raid_handler(self.storage_config['mddevice'],
483- self.storage_config)
484+ self.storage_config, {})
485 except ValueError:
486 if param in ['bad/path']:
487 continue
488@@ -2006,7 +2006,7 @@ class TestRaidHandler(CiTestCase):
489 md_devname = '/dev/' + self.storage_config['mddevice']['name']
490 self.m_getpath.side_effect = iter(devices)
491 block_meta.raid_handler(self.storage_config['mddevice'],
492- self.storage_config)
493+ self.storage_config, {})
494 self.assertEqual([call(md_devname, 5, devices, [], None, '', None)],
495 self.m_mdadm.mdadm_create.call_args_list)
496
497@@ -2020,7 +2020,7 @@ class TestRaidHandler(CiTestCase):
498 self.m_getpath.side_effect = iter(devices)
499 self.storage_config['mddevice']['preserve'] = True
500 block_meta.raid_handler(self.storage_config['mddevice'],
501- self.storage_config)
502+ self.storage_config, {})
503 self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
504 self.assertEqual(
505 [call(md_devname, 5, devices, [], None)],
506@@ -2037,7 +2037,7 @@ class TestRaidHandler(CiTestCase):
507 del self.storage_config['mddevice']['devices']
508 self.storage_config['mddevice']['container'] = self.random_string()
509 block_meta.raid_handler(self.storage_config['mddevice'],
510- self.storage_config)
511+ self.storage_config, {})
512 self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
513 self.assertEqual(
514 [call(md_devname, 5, [], [], devices[0])],
515@@ -2053,7 +2053,7 @@ class TestRaidHandler(CiTestCase):
516 self.m_mdadm.md_check.return_value = True
517 self.storage_config['mddevice']['preserve'] = True
518 block_meta.raid_handler(self.storage_config['mddevice'],
519- self.storage_config)
520+ self.storage_config, {})
521 self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
522 self.assertEqual([call(md_devname, 5, devices, [], None)],
523 self.m_mdadm.md_check.call_args_list)
524@@ -2068,7 +2068,7 @@ class TestRaidHandler(CiTestCase):
525 self.m_mdadm.md_check.side_effect = iter([ValueError(), None])
526 self.storage_config['mddevice']['preserve'] = True
527 block_meta.raid_handler(self.storage_config['mddevice'],
528- self.storage_config)
529+ self.storage_config, {})
530 self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
531 self.assertEqual([call(md_devname, 5, devices, [], None)] * 2,
532 self.m_mdadm.md_check.call_args_list)
533@@ -2086,7 +2086,7 @@ class TestRaidHandler(CiTestCase):
534 self.storage_config['mddevice']['preserve'] = True
535 with self.assertRaises(ValueError):
536 block_meta.raid_handler(self.storage_config['mddevice'],
537- self.storage_config)
538+ self.storage_config, {})
539 self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
540 self.assertEqual([call(md_devname, 5, devices, [], None)] * 2,
541 self.m_mdadm.md_check.call_args_list)
542@@ -2179,7 +2179,7 @@ class TestBcacheHandler(CiTestCase):
543 self.m_bcache.create_cache_device.return_value = cset_uuid
544
545 block_meta.bcache_handler(self.storage_config['id_bcache0'],
546- self.storage_config)
547+ self.storage_config, {})
548 self.assertEqual([call(caching_device)],
549 self.m_bcache.create_cache_device.call_args_list)
550 self.assertEqual([
551@@ -2302,7 +2302,7 @@ class TestPartitionHandler(CiTestCase):
552 self.m_block.sys_block_path.return_value = 'sys/class/block/sda'
553 self.m_block.get_blockdev_sector_size.return_value = (512, 512)
554 m_ex_part.return_value = 'disk-sda-part-2'
555- block_meta.partition_handler(logical_part, self.storage_config)
556+ block_meta.partition_handler(logical_part, self.storage_config, {})
557 m_ex_part.assert_called_with('sda', self.storage_config)
558
559 def test_part_handler_raise_exception_missing_extended_part(self):
560@@ -2322,7 +2322,7 @@ class TestPartitionHandler(CiTestCase):
561 self.m_block.sys_block_path.return_value = 'sys/class/block/sda'
562 self.m_block.get_blockdev_sector_size.return_value = (512, 512)
563 with self.assertRaises(RuntimeError):
564- block_meta.partition_handler(logical_part, self.storage_config)
565+ block_meta.partition_handler(logical_part, self.storage_config, {})
566
567 @patch('curtin.commands.block_meta.partition_verify_fdasd')
568 def test_part_hander_reuse_vtoc(self, m_verify_fdasd):
569@@ -2349,7 +2349,7 @@ class TestPartitionHandler(CiTestCase):
570 m_verify_fdasd.return_value = True
571 devpath = self.m_getpath.return_value = self.random_string()
572
573- block_meta.partition_handler(sconfig[1], oconfig)
574+ block_meta.partition_handler(sconfig[1], oconfig, {})
575
576 m_verify_fdasd.assert_has_calls([call(devpath, 1, sconfig[1])])
577
578@@ -2412,7 +2412,7 @@ class TestMultipathPartitionHandler(CiTestCase):
579 m_part_info.return_value = (2048, 2048)
580
581 part2 = self.storage_config['disk-sda-part-2']
582- block_meta.partition_handler(part2, self.storage_config)
583+ block_meta.partition_handler(part2, self.storage_config, {})
584
585 expected_calls = [
586 call(['sgdisk', '--new', '2:4096:4096', '--typecode=2:8300',
587@@ -2441,7 +2441,7 @@ class TestMultipathPartitionHandler(CiTestCase):
588 m_part_info.return_value = (2048, 2048)
589
590 part2 = self.storage_config['disk-sda-part-2']
591- block_meta.partition_handler(part2, self.storage_config)
592+ block_meta.partition_handler(part2, self.storage_config, {})
593
594 expected_calls = [
595 call(['sgdisk', '--new', '2:4096:4096', '--typecode=2:8300',

Subscribers

People subscribed via source and target branches