Merge ~mwhudson/curtin:mdadm-check-container into curtin:master
- Git
- lp:~mwhudson/curtin
- mdadm-check-container
- Merge into master
Status: | Merged |
---|---|
Approved by: | Michael Hudson-Doyle |
Approved revision: | 12f5d09dd1352af0606473decbb02727e3c30525 |
Merge reported by: | Server Team CI bot |
Merged at revision: | not available |
Proposed branch: | ~mwhudson/curtin:mdadm-check-container |
Merge into: | curtin:master |
Diff against target: |
643 lines (+299/-108) 5 files modified
curtin/block/mdadm.py (+42/-63) curtin/block/schemas.py (+5/-1) curtin/commands/block_meta.py (+27/-11) tests/unittests/test_block_mdadm.py (+198/-26) tests/unittests/test_commands_block_meta.py (+27/-7) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
curtin developers | Pending | ||
Review via email: mp+402384@code.launchpad.net |
Commit message
block_meta: make preserve: true on a raid in a container work
Pass any supplied container name to raid_verify and on to md_check and
check it against the existing device.
Also tidy up a few other things in the raid verification code path: make
checking functions consistently raise ValueError on failure rather than
returning True / False and have the verification of raid level actually
check the level of the existing array.
This also fixes preserve: true on a raid0 array while we are there --
raid0 arrays do not have degraded or sync_action attributes.
Description of the change
Server Team CI bot (server-team-bot) wrote : | # |
Ryan Harper (raharper) wrote : | # |
I like your initial commit message, can you update the commit box with that one as that's what the autolander will take when it squash merges.
Once suggestion on the md_check refactor below.
Dan Bungert (dbungert) wrote : | # |
One minor suggestion below.
- 12f5d09... by Michael Hudson-Doyle
-
log failures a bit more clearly as per review
Michael Hudson-Doyle (mwhudson) wrote : | # |
Updated commit message and tweaked how failures are presented as suggested. Take another look?
Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:12f5d09dd13
https:/
Executed test runs:
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild:
https:/
Ryan Harper (raharper) wrote : | # |
Was there a LP bug number for this? If so please add the LP: #<number> to the commit message.
Looks good.
Michael Hudson-Doyle (mwhudson) wrote : | # |
No, I just found this by fooling around. Thanks for the reviews!
Server Team CI bot (server-team-bot) wrote : | # |
Autolanding: FAILED
More details in the following jenkins job:
https:/
Executed test runs:
FAILURE: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Server Team CI bot (server-team-bot) wrote : | # |
Autolanding: FAILED
More details in the following jenkins job:
https:/
Executed test runs:
FAILURE: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Server Team CI bot (server-team-bot) : | # |
Preview Diff
1 | diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py |
2 | index a6ac970..a5dfc9f 100644 |
3 | --- a/curtin/block/mdadm.py |
4 | +++ b/curtin/block/mdadm.py |
5 | @@ -505,9 +505,9 @@ def md_sysfs_attr_path(md_devname, attrname): |
6 | return os.path.join(sysmd, attrname) |
7 | |
8 | |
9 | -def md_sysfs_attr(md_devname, attrname): |
10 | +def md_sysfs_attr(md_devname, attrname, default=''): |
11 | """ Return the attribute str of an md device found under the 'md' dir """ |
12 | - attrdata = '' |
13 | + attrdata = default |
14 | if not valid_mdname(md_devname): |
15 | raise ValueError('Invalid md devicename: [{}]'.format(md_devname)) |
16 | |
17 | @@ -645,45 +645,6 @@ def md_device_key_dev(devname): |
18 | return 'MD_DEVICE_' + dev_short(devname) + '_DEV' |
19 | |
20 | |
21 | -def __upgrade_detail_dict(detail): |
22 | - ''' This method attempts to convert mdadm --detail output into |
23 | - a KEY=VALUE output the same as mdadm --detail --export from mdadm v3.3 |
24 | - ''' |
25 | - # if the input already has MD_UUID, it's already been converted |
26 | - if 'MD_UUID' in detail: |
27 | - return detail |
28 | - |
29 | - md_detail = { |
30 | - 'MD_LEVEL': detail['raid_level'], |
31 | - 'MD_DEVICES': detail['raid_devices'], |
32 | - 'MD_METADATA': detail['version'], |
33 | - 'MD_NAME': detail['name'].split()[0], |
34 | - } |
35 | - |
36 | - # exmaine has ARRAY UUID |
37 | - if 'array_uuid' in detail: |
38 | - md_detail.update({'MD_UUID': detail['array_uuid']}) |
39 | - # query,detail has UUID |
40 | - elif 'uuid' in detail: |
41 | - md_detail.update({'MD_UUID': detail['uuid']}) |
42 | - |
43 | - device = detail['device'] |
44 | - |
45 | - # MD_DEVICE_vdc1_DEV=/dev/vdc1 |
46 | - md_detail.update({md_device_key_dev(device): device}) |
47 | - |
48 | - if 'device_role' in detail: |
49 | - role = detail['device_role'] |
50 | - if role != 'spare': |
51 | - # device_role = Active device 1 |
52 | - role = role.split()[-1] |
53 | - |
54 | - # MD_DEVICE_vdc1_ROLE=spare |
55 | - md_detail.update({md_device_key_role(device): role}) |
56 | - |
57 | - return md_detail |
58 | - |
59 | - |
60 | def md_read_run_mdadm_map(): |
61 | ''' |
62 | md1 1.2 59beb40f:4c202f67:088e702b:efdf577a /dev/md1 |
63 | @@ -719,8 +680,6 @@ def md_check_array_uuid(md_devname, md_uuid): |
64 | '%s -> %s != %s' % (mduuid_path, mdlink_devname, md_devname)) |
65 | raise ValueError(err) |
66 | |
67 | - return True |
68 | - |
69 | |
70 | def md_get_uuid(md_devname): |
71 | valid_mdname(md_devname) |
72 | @@ -741,13 +700,24 @@ def _compare_devlist(expected, found): |
73 | " Missing: {} Extra: {}".format(missing, extra)) |
74 | |
75 | |
76 | -def md_check_raidlevel(raidlevel): |
77 | +def md_check_raidlevel(md_devname, detail, raidlevel): |
78 | # Validate raidlevel against what curtin supports configuring |
79 | if raidlevel not in VALID_RAID_LEVELS: |
80 | err = ('Invalid raidlevel: ' + raidlevel + |
81 | ' Must be one of: ' + str(VALID_RAID_LEVELS)) |
82 | raise ValueError(err) |
83 | - return True |
84 | + # normalize raidlevel to the values mdadm prints. |
85 | + if isinstance(raidlevel, int) or len(raidlevel) <= 2: |
86 | + raidlevel = 'raid' + str(raidlevel) |
87 | + elif raidlevel == 'stripe': |
88 | + raidlevel = 'raid0' |
89 | + elif raidlevel == 'mirror': |
90 | + raidlevel = 'raid1' |
91 | + actual_level = detail.get("MD_LEVEL") |
92 | + if actual_level != raidlevel: |
93 | + raise ValueError( |
94 | + "raid device %s should have level %r but has level %r" % ( |
95 | + md_devname, raidlevel, actual_level)) |
96 | |
97 | |
98 | def md_block_until_in_sync(md_devname): |
99 | @@ -770,24 +740,24 @@ def md_check_array_state(md_devname): |
100 | # check array state |
101 | |
102 | writable = md_check_array_state_rw(md_devname) |
103 | - degraded = md_sysfs_attr(md_devname, 'degraded') |
104 | - sync_action = md_sysfs_attr(md_devname, 'sync_action') |
105 | + # Raid 0 arrays do not have degraded or sync_action sysfs |
106 | + # attributes. |
107 | + degraded = md_sysfs_attr(md_devname, 'degraded', None) |
108 | + sync_action = md_sysfs_attr(md_devname, 'sync_action', None) |
109 | |
110 | if not writable: |
111 | raise ValueError('Array not in writable state: ' + md_devname) |
112 | - if degraded != "0": |
113 | + if degraded is not None and degraded != "0": |
114 | raise ValueError('Array in degraded state: ' + md_devname) |
115 | - if sync_action != "idle": |
116 | + if degraded is not None and sync_action != "idle": |
117 | raise ValueError('Array syncing, not idle state: ' + md_devname) |
118 | |
119 | - return True |
120 | - |
121 | |
122 | def md_check_uuid(md_devname): |
123 | md_uuid = md_get_uuid(md_devname) |
124 | if not md_uuid: |
125 | raise ValueError('Failed to get md UUID from device: ' + md_devname) |
126 | - return md_check_array_uuid(md_devname, md_uuid) |
127 | + md_check_array_uuid(md_devname, md_uuid) |
128 | |
129 | |
130 | def md_check_devices(md_devname, devices): |
131 | @@ -833,26 +803,35 @@ def md_check_array_membership(md_devname, devices): |
132 | raise ValueError(err) |
133 | |
134 | |
135 | -def md_check(md_devname, raidlevel, devices=[], spares=[]): |
136 | +def md_check(md_devname, raidlevel, devices, spares, container): |
137 | ''' Check passed in variables from storage configuration against |
138 | the system we're running upon. |
139 | ''' |
140 | LOG.debug('RAID validation: ' + |
141 | - 'name={} raidlevel={} devices={} spares={}'.format(md_devname, |
142 | - raidlevel, |
143 | - devices, |
144 | - spares)) |
145 | + 'name={} raidlevel={} devices={} spares={} container={}'.format( |
146 | + md_devname, raidlevel, devices, spares, container)) |
147 | assert_valid_devpath(md_devname) |
148 | |
149 | - md_check_array_state(md_devname) |
150 | - md_check_raidlevel(raidlevel) |
151 | + detail = mdadm_query_detail(md_devname) |
152 | + |
153 | + if raidlevel != "container": |
154 | + md_check_array_state(md_devname) |
155 | + md_check_raidlevel(md_devname, detail, raidlevel) |
156 | md_check_uuid(md_devname) |
157 | - md_check_devices(md_devname, devices) |
158 | - md_check_spares(md_devname, spares) |
159 | - md_check_array_membership(md_devname, devices + spares) |
160 | + if container is None: |
161 | + md_check_devices(md_devname, devices) |
162 | + md_check_spares(md_devname, spares) |
163 | + md_check_array_membership(md_devname, devices + spares) |
164 | + else: |
165 | + if 'MD_CONTAINER' not in detail: |
166 | + raise ValueError("%s is not in a container" % ( |
167 | + md_devname)) |
168 | + actual_container = os.path.realpath(detail['MD_CONTAINER']) |
169 | + if actual_container != container: |
170 | + raise ValueError("%s is in container %r, not %r" % ( |
171 | + md_devname, actual_container, container)) |
172 | |
173 | LOG.debug('RAID array OK: ' + md_devname) |
174 | - return True |
175 | |
176 | |
177 | def md_is_in_container(md_devname): |
178 | diff --git a/curtin/block/schemas.py b/curtin/block/schemas.py |
179 | index 3923321..d846505 100644 |
180 | --- a/curtin/block/schemas.py |
181 | +++ b/curtin/block/schemas.py |
182 | @@ -308,9 +308,13 @@ RAID = { |
183 | 'title': 'curtin storage configuration for a RAID.', |
184 | 'description': ('Declarative syntax for specifying RAID.'), |
185 | 'definitions': definitions, |
186 | - 'required': ['id', 'type', 'name', 'raidlevel', 'devices'], |
187 | + 'required': ['id', 'type', 'name', 'raidlevel'], |
188 | 'type': 'object', |
189 | 'additionalProperties': False, |
190 | + 'oneOf': [ |
191 | + {'required': ['devices']}, |
192 | + {'required': ['container']}, |
193 | + ], |
194 | 'properties': { |
195 | 'id': {'$ref': '#/definitions/id'}, |
196 | 'devices': {'$ref': '#/definitions/devices'}, |
197 | diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py |
198 | index 5a087a3..8cb2784 100644 |
199 | --- a/curtin/commands/block_meta.py |
200 | +++ b/curtin/commands/block_meta.py |
201 | @@ -1496,24 +1496,38 @@ def dm_crypt_handler(info, storage_config): |
202 | so not writing crypttab") |
203 | |
204 | |
205 | -def verify_md_components(md_devname, raidlevel, device_paths, spare_paths): |
206 | +def verify_md_components(md_devname, raidlevel, device_paths, spare_paths, |
207 | + container): |
208 | # check if the array is already up, if not try to assemble |
209 | - check_ok = mdadm.md_check(md_devname, raidlevel, device_paths, |
210 | - spare_paths) |
211 | - if not check_ok: |
212 | + errors = [] |
213 | + check_ok = False |
214 | + try: |
215 | + mdadm.md_check(md_devname, raidlevel, device_paths, |
216 | + spare_paths, container) |
217 | + check_ok = True |
218 | + except ValueError as err1: |
219 | + errors.append(err1) |
220 | LOG.info("assembling preserved raid for %s", md_devname) |
221 | mdadm.mdadm_assemble(md_devname, device_paths, spare_paths) |
222 | - check_ok = mdadm.md_check(md_devname, raidlevel, device_paths, |
223 | - spare_paths) |
224 | - msg = ('Verifying %s raid composition, found raid is %s' |
225 | + try: |
226 | + mdadm.md_check(md_devname, raidlevel, device_paths, |
227 | + spare_paths, container) |
228 | + check_ok = True |
229 | + except ValueError as err2: |
230 | + errors.append(err2) |
231 | + |
232 | + msg = ('Verified %s raid composition, raid is %s' |
233 | % (md_devname, 'OK' if check_ok else 'not OK')) |
234 | LOG.debug(msg) |
235 | if not check_ok: |
236 | - raise RuntimeError(msg) |
237 | + for err in errors: |
238 | + LOG.error("Error checking raid %s: %s", md_devname, err) |
239 | + raise ValueError(msg) |
240 | |
241 | |
242 | -def raid_verify(md_devname, raidlevel, device_paths, spare_paths): |
243 | - verify_md_components(md_devname, raidlevel, device_paths, spare_paths) |
244 | +def raid_verify(md_devname, raidlevel, device_paths, spare_paths, container): |
245 | + verify_md_components( |
246 | + md_devname, raidlevel, device_paths, spare_paths, container) |
247 | |
248 | |
249 | def raid_handler(info, storage_config): |
250 | @@ -1556,7 +1570,9 @@ def raid_handler(info, storage_config): |
251 | |
252 | create_raid = True |
253 | if preserve: |
254 | - raid_verify(md_devname, raidlevel, device_paths, spare_device_paths) |
255 | + raid_verify( |
256 | + md_devname, raidlevel, device_paths, spare_device_paths, |
257 | + container_dev) |
258 | LOG.debug('raid %s already present, skipping create', md_devname) |
259 | create_raid = False |
260 | |
261 | diff --git a/tests/unittests/test_block_mdadm.py b/tests/unittests/test_block_mdadm.py |
262 | index b04cf82..74396d8 100644 |
263 | --- a/tests/unittests/test_block_mdadm.py |
264 | +++ b/tests/unittests/test_block_mdadm.py |
265 | @@ -942,8 +942,8 @@ class TestBlockMdadmMdHelpers(CiTestCase): |
266 | devname = '/dev/md0' |
267 | md_uuid = '93a73e10:427f280b:b7076c02:204b8f7a' |
268 | mock_os.path.realpath.return_value = devname |
269 | - rv = mdadm.md_check_array_uuid(devname, md_uuid) |
270 | - self.assertTrue(rv) |
271 | + # "assertNotRaises" |
272 | + mdadm.md_check_array_uuid(devname, md_uuid) |
273 | |
274 | @patch('curtin.block.mdadm.os') |
275 | def test_md_check_array_uuid_mismatch(self, mock_os): |
276 | @@ -970,43 +970,87 @@ class TestBlockMdadmMdHelpers(CiTestCase): |
277 | |
278 | def test_md_check_raid_level(self): |
279 | for rl in mdadm.VALID_RAID_LEVELS: |
280 | - self.assertTrue(mdadm.md_check_raidlevel(rl)) |
281 | + if isinstance(rl, int) or len(rl) <= 2: |
282 | + el = 'raid%s' % (rl,) |
283 | + elif rl == 'stripe': |
284 | + el = 'raid0' |
285 | + elif rl == 'mirror': |
286 | + el = 'raid1' |
287 | + else: |
288 | + el = rl |
289 | + # "assertNotRaises" |
290 | + mdadm.md_check_raidlevel('md0', {'MD_LEVEL': el}, rl) |
291 | |
292 | def test_md_check_raid_level_bad(self): |
293 | bogus = '27' |
294 | self.assertTrue(bogus not in mdadm.VALID_RAID_LEVELS) |
295 | with self.assertRaises(ValueError): |
296 | - mdadm.md_check_raidlevel(bogus) |
297 | + mdadm.md_check_raidlevel('md0', {}, bogus) |
298 | |
299 | @patch('curtin.block.mdadm.md_sysfs_attr') |
300 | def test_md_check_array_state(self, mock_attr): |
301 | mdname = '/dev/md0' |
302 | - mock_attr.side_effect = [ |
303 | - 'clean', # array_state |
304 | - '0', # degraded |
305 | - 'idle', # sync_action |
306 | - ] |
307 | - self.assertTrue(mdadm.md_check_array_state(mdname)) |
308 | + |
309 | + def mock_attr_impl(md_devname, attrname, default=''): |
310 | + if attrname == 'array_state': |
311 | + return 'clean' |
312 | + elif attrname == 'degraded': |
313 | + return '0' |
314 | + elif attrname == 'sync_action': |
315 | + return 'idle' |
316 | + |
317 | + mock_attr.side_effect = mock_attr_impl |
318 | + # "assertNotRaises" |
319 | + mdadm.md_check_array_state(mdname) |
320 | + |
321 | + @patch('curtin.block.mdadm.md_sysfs_attr') |
322 | + def test_md_check_array_state_raid0(self, mock_attr): |
323 | + # Raid 0 arrays do not have a degraded or sync_action sysfs |
324 | + # attribute. |
325 | + mdname = '/dev/md0' |
326 | + |
327 | + def mock_attr_impl(md_devname, attrname, default=''): |
328 | + if attrname == 'array_state': |
329 | + return 'clean' |
330 | + elif attrname == 'degraded': |
331 | + return default |
332 | + elif attrname == 'sync_action': |
333 | + return default |
334 | + |
335 | + mock_attr.side_effect = mock_attr_impl |
336 | + # "assertNotRaises" |
337 | + mdadm.md_check_array_state(mdname) |
338 | |
339 | @patch('curtin.block.mdadm.md_sysfs_attr') |
340 | def test_md_check_array_state_norw(self, mock_attr): |
341 | mdname = '/dev/md0' |
342 | - mock_attr.side_effect = [ |
343 | - 'suspended', # array_state |
344 | - '0', # degraded |
345 | - 'idle', # sync_action |
346 | - ] |
347 | + |
348 | + def mock_attr_impl(md_devname, attrname, default=''): |
349 | + if attrname == 'array_state': |
350 | + return 'suspended' |
351 | + elif attrname == 'degraded': |
352 | + return '0' |
353 | + elif attrname == 'sync_action': |
354 | + return 'idle' |
355 | + |
356 | + mock_attr.side_effect = mock_attr_impl |
357 | with self.assertRaises(ValueError): |
358 | mdadm.md_check_array_state(mdname) |
359 | |
360 | @patch('curtin.block.mdadm.md_sysfs_attr') |
361 | def test_md_check_array_state_degraded(self, mock_attr): |
362 | mdname = '/dev/md0' |
363 | - mock_attr.side_effect = [ |
364 | - 'clean', # array_state |
365 | - '1', # degraded |
366 | - 'idle', # sync_action |
367 | - ] |
368 | + |
369 | + def mock_attr_impl(md_devname, attrname, default=''): |
370 | + if attrname == 'array_state': |
371 | + return 'clean' |
372 | + elif attrname == 'degraded': |
373 | + return '1' |
374 | + elif attrname == 'sync_action': |
375 | + return 'idle' |
376 | + |
377 | + mock_attr.side_effect = mock_attr_impl |
378 | + |
379 | with self.assertRaises(ValueError): |
380 | mdadm.md_check_array_state(mdname) |
381 | |
382 | @@ -1039,8 +1083,8 @@ class TestBlockMdadmMdHelpers(CiTestCase): |
383 | mock_guuid.return_value = '93a73e10:427f280b:b7076c02:204b8f7a' |
384 | mock_ckuuid.return_value = True |
385 | |
386 | - rv = mdadm.md_check_uuid(mdname) |
387 | - self.assertTrue(rv) |
388 | + # "assertNotRaises" |
389 | + mdadm.md_check_uuid(mdname) |
390 | |
391 | @patch('curtin.block.mdadm.md_check_array_uuid') |
392 | @patch('curtin.block.mdadm.md_get_uuid') |
393 | @@ -1152,6 +1196,7 @@ class TestBlockMdadmMdHelpers(CiTestCase): |
394 | with self.assertRaises(ValueError): |
395 | mdadm.md_check_array_membership(mdname, devices) |
396 | |
397 | + @patch('curtin.block.mdadm.mdadm_query_detail') |
398 | @patch('curtin.block.mdadm.md_check_array_membership') |
399 | @patch('curtin.block.mdadm.md_check_spares') |
400 | @patch('curtin.block.mdadm.md_check_devices') |
401 | @@ -1159,7 +1204,7 @@ class TestBlockMdadmMdHelpers(CiTestCase): |
402 | @patch('curtin.block.mdadm.md_check_raidlevel') |
403 | @patch('curtin.block.mdadm.md_check_array_state') |
404 | def test_md_check_all_good(self, mock_array, mock_raid, mock_uuid, |
405 | - mock_dev, mock_spare, mock_member): |
406 | + mock_dev, mock_spare, mock_member, mock_detail): |
407 | md_devname = '/dev/md0' |
408 | raidlevel = 1 |
409 | devices = ['/dev/vda', '/dev/vdb'] |
410 | @@ -1171,16 +1216,143 @@ class TestBlockMdadmMdHelpers(CiTestCase): |
411 | mock_dev.return_value = None |
412 | mock_spare.return_value = None |
413 | mock_member.return_value = None |
414 | + detail = {'MD_NAME': 'foo'} |
415 | + mock_detail.return_value = detail |
416 | |
417 | - mdadm.md_check(md_devname, raidlevel, devices=devices, spares=spares) |
418 | + mdadm.md_check( |
419 | + md_devname, raidlevel, devices=devices, spares=spares, |
420 | + container=None) |
421 | |
422 | mock_array.assert_has_calls([call(md_devname)]) |
423 | - mock_raid.assert_has_calls([call(raidlevel)]) |
424 | + mock_raid.assert_has_calls([call(md_devname, detail, raidlevel)]) |
425 | mock_uuid.assert_has_calls([call(md_devname)]) |
426 | mock_dev.assert_has_calls([call(md_devname, devices)]) |
427 | mock_spare.assert_has_calls([call(md_devname, spares)]) |
428 | mock_member.assert_has_calls([call(md_devname, devices + spares)]) |
429 | |
430 | + @patch('curtin.block.mdadm.os.path.realpath') |
431 | + @patch('curtin.block.mdadm.mdadm_query_detail') |
432 | + @patch('curtin.block.mdadm.md_check_array_membership') |
433 | + @patch('curtin.block.mdadm.md_check_spares') |
434 | + @patch('curtin.block.mdadm.md_check_devices') |
435 | + @patch('curtin.block.mdadm.md_check_uuid') |
436 | + @patch('curtin.block.mdadm.md_check_raidlevel') |
437 | + @patch('curtin.block.mdadm.md_check_array_state') |
438 | + def test_md_check_all_good_container(self, mock_array, mock_raid, |
439 | + mock_uuid, mock_dev, mock_spare, |
440 | + mock_member, mock_detail, |
441 | + mock_realpath): |
442 | + md_devname = '/dev/md0' |
443 | + raidlevel = 1 |
444 | + devices = ['/dev/vda', '/dev/vdb'] |
445 | + spares = ['/dev/vdc'] |
446 | + |
447 | + mock_array.return_value = None |
448 | + mock_raid.return_value = None |
449 | + mock_uuid.return_value = None |
450 | + mock_dev.return_value = None |
451 | + mock_spare.return_value = None |
452 | + mock_member.return_value = None |
453 | + container_name = self.random_string() |
454 | + container_dev = self.random_string() |
455 | + detail = {'MD_CONTAINER': container_name} |
456 | + mock_detail.return_value = detail |
457 | + |
458 | + def realpath_impl(path): |
459 | + if path == container_name: |
460 | + return container_dev |
461 | + else: |
462 | + self.fail("unexpected realpath arg %r" % (path,)) |
463 | + |
464 | + mock_realpath.side_effect = realpath_impl |
465 | + |
466 | + mdadm.md_check( |
467 | + md_devname, raidlevel, devices=devices, spares=spares, |
468 | + container=container_dev) |
469 | + |
470 | + mock_array.assert_has_calls([call(md_devname)]) |
471 | + mock_raid.assert_has_calls([call(md_devname, detail, raidlevel)]) |
472 | + mock_uuid.assert_has_calls([call(md_devname)]) |
473 | + mock_dev.assert_has_calls([]) |
474 | + mock_spare.assert_has_calls([]) |
475 | + mock_member.assert_has_calls([]) |
476 | + |
477 | + @patch('curtin.block.mdadm.mdadm_query_detail') |
478 | + @patch('curtin.block.mdadm.md_check_array_membership') |
479 | + @patch('curtin.block.mdadm.md_check_spares') |
480 | + @patch('curtin.block.mdadm.md_check_devices') |
481 | + @patch('curtin.block.mdadm.md_check_uuid') |
482 | + @patch('curtin.block.mdadm.md_check_raidlevel') |
483 | + @patch('curtin.block.mdadm.md_check_array_state') |
484 | + def test_md_check_all_no_container(self, mock_array, mock_raid, |
485 | + mock_uuid, mock_dev, mock_spare, |
486 | + mock_member, mock_detail): |
487 | + md_devname = '/dev/md0' |
488 | + raidlevel = 1 |
489 | + devices = ['/dev/vda', '/dev/vdb'] |
490 | + spares = ['/dev/vdc'] |
491 | + |
492 | + mock_array.return_value = None |
493 | + mock_raid.return_value = None |
494 | + mock_uuid.return_value = None |
495 | + mock_dev.return_value = None |
496 | + mock_spare.return_value = None |
497 | + mock_member.return_value = None |
498 | + container_name = self.random_string() |
499 | + detail = {} |
500 | + |
501 | + mock_detail.return_value = detail |
502 | + |
503 | + with self.assertRaises(ValueError): |
504 | + mdadm.md_check( |
505 | + md_devname, raidlevel, devices=devices, spares=spares, |
506 | + container=container_name) |
507 | + |
508 | + mock_array.assert_has_calls([call(md_devname)]) |
509 | + mock_raid.assert_has_calls([call(md_devname, detail, raidlevel)]) |
510 | + mock_uuid.assert_has_calls([call(md_devname)]) |
511 | + mock_dev.assert_has_calls([]) |
512 | + mock_spare.assert_has_calls([]) |
513 | + mock_member.assert_has_calls([]) |
514 | + |
515 | + @patch('curtin.block.mdadm.mdadm_query_detail') |
516 | + @patch('curtin.block.mdadm.md_check_array_membership') |
517 | + @patch('curtin.block.mdadm.md_check_spares') |
518 | + @patch('curtin.block.mdadm.md_check_devices') |
519 | + @patch('curtin.block.mdadm.md_check_uuid') |
520 | + @patch('curtin.block.mdadm.md_check_raidlevel') |
521 | + @patch('curtin.block.mdadm.md_check_array_state') |
522 | + def test_md_check_all_wrong_container(self, mock_array, mock_raid, |
523 | + mock_uuid, mock_dev, mock_spare, |
524 | + mock_member, mock_detail): |
525 | + md_devname = '/dev/md0' |
526 | + raidlevel = 1 |
527 | + devices = ['/dev/vda', '/dev/vdb'] |
528 | + spares = ['/dev/vdc'] |
529 | + |
530 | + mock_array.return_value = None |
531 | + mock_raid.return_value = None |
532 | + mock_uuid.return_value = None |
533 | + mock_dev.return_value = None |
534 | + mock_spare.return_value = None |
535 | + mock_member.return_value = None |
536 | + container_name = self.random_string() |
537 | + detail = {'MD_CONTAINER': container_name + '1'} |
538 | + |
539 | + mock_detail.return_value = detail |
540 | + |
541 | + with self.assertRaises(ValueError): |
542 | + mdadm.md_check( |
543 | + md_devname, raidlevel, devices=devices, spares=spares, |
544 | + container=container_name) |
545 | + |
546 | + mock_array.assert_has_calls([call(md_devname)]) |
547 | + mock_raid.assert_has_calls([call(md_devname, detail, raidlevel)]) |
548 | + mock_uuid.assert_has_calls([call(md_devname)]) |
549 | + mock_dev.assert_has_calls([]) |
550 | + mock_spare.assert_has_calls([]) |
551 | + mock_member.assert_has_calls([]) |
552 | + |
553 | def test_md_check_all_good_devshort(self): |
554 | md_devname = 'md0' |
555 | raidlevel = 1 |
556 | @@ -1189,7 +1361,7 @@ class TestBlockMdadmMdHelpers(CiTestCase): |
557 | |
558 | with self.assertRaises(ValueError): |
559 | mdadm.md_check(md_devname, raidlevel, devices=devices, |
560 | - spares=spares) |
561 | + spares=spares, container=None) |
562 | |
563 | def test_md_present(self): |
564 | mdname = 'md0' |
565 | diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py |
566 | index cd396f2..48a3217 100644 |
567 | --- a/tests/unittests/test_commands_block_meta.py |
568 | +++ b/tests/unittests/test_commands_block_meta.py |
569 | @@ -2008,12 +2008,32 @@ class TestRaidHandler(CiTestCase): |
570 | |
571 | devices = [self.random_string(), self.random_string(), |
572 | self.random_string()] |
573 | + md_devname = '/dev/' + self.storage_config['mddevice']['name'] |
574 | + self.m_getpath.side_effect = iter(devices) |
575 | + self.storage_config['mddevice']['preserve'] = True |
576 | + block_meta.raid_handler(self.storage_config['mddevice'], |
577 | + self.storage_config) |
578 | + self.assertEqual(0, self.m_mdadm.mdadm_create.call_count) |
579 | + self.assertEqual( |
580 | + [call(md_devname, 5, devices, [], None)], |
581 | + m_verify.call_args_list) |
582 | + |
583 | + @patch('curtin.commands.block_meta.raid_verify') |
584 | + def test_raid_handler_preserves_existing_device_container(self, m_verify): |
585 | + """ raid_handler preserves existing device. """ |
586 | + |
587 | + devices = [self.random_string()] |
588 | + md_devname = '/dev/' + self.storage_config['mddevice']['name'] |
589 | self.m_getpath.side_effect = iter(devices) |
590 | - m_verify.return_value = True |
591 | self.storage_config['mddevice']['preserve'] = True |
592 | + del self.storage_config['mddevice']['devices'] |
593 | + self.storage_config['mddevice']['container'] = self.random_string() |
594 | block_meta.raid_handler(self.storage_config['mddevice'], |
595 | self.storage_config) |
596 | self.assertEqual(0, self.m_mdadm.mdadm_create.call_count) |
597 | + self.assertEqual( |
598 | + [call(md_devname, 5, [], [], devices[0])], |
599 | + m_verify.call_args_list) |
600 | |
601 | def test_raid_handler_preserve_verifies_md_device(self): |
602 | """ raid_handler preserve verifies existing raid device. """ |
603 | @@ -2027,7 +2047,7 @@ class TestRaidHandler(CiTestCase): |
604 | block_meta.raid_handler(self.storage_config['mddevice'], |
605 | self.storage_config) |
606 | self.assertEqual(0, self.m_mdadm.mdadm_create.call_count) |
607 | - self.assertEqual([call(md_devname, 5, devices, [])], |
608 | + self.assertEqual([call(md_devname, 5, devices, [], None)], |
609 | self.m_mdadm.md_check.call_args_list) |
610 | |
611 | def test_raid_handler_preserve_verifies_md_device_after_assemble(self): |
612 | @@ -2037,12 +2057,12 @@ class TestRaidHandler(CiTestCase): |
613 | self.random_string()] |
614 | md_devname = '/dev/' + self.storage_config['mddevice']['name'] |
615 | self.m_getpath.side_effect = iter(devices) |
616 | - self.m_mdadm.md_check.side_effect = iter([False, True]) |
617 | + self.m_mdadm.md_check.side_effect = iter([ValueError(), None]) |
618 | self.storage_config['mddevice']['preserve'] = True |
619 | block_meta.raid_handler(self.storage_config['mddevice'], |
620 | self.storage_config) |
621 | self.assertEqual(0, self.m_mdadm.mdadm_create.call_count) |
622 | - self.assertEqual([call(md_devname, 5, devices, [])] * 2, |
623 | + self.assertEqual([call(md_devname, 5, devices, [], None)] * 2, |
624 | self.m_mdadm.md_check.call_args_list) |
625 | self.assertEqual([call(md_devname, devices, [])], |
626 | self.m_mdadm.mdadm_assemble.call_args_list) |
627 | @@ -2054,13 +2074,13 @@ class TestRaidHandler(CiTestCase): |
628 | self.random_string()] |
629 | md_devname = '/dev/' + self.storage_config['mddevice']['name'] |
630 | self.m_getpath.side_effect = iter(devices) |
631 | - self.m_mdadm.md_check.side_effect = iter([False, False]) |
632 | + self.m_mdadm.md_check.side_effect = iter([ValueError(), ValueError()]) |
633 | self.storage_config['mddevice']['preserve'] = True |
634 | - with self.assertRaises(RuntimeError): |
635 | + with self.assertRaises(ValueError): |
636 | block_meta.raid_handler(self.storage_config['mddevice'], |
637 | self.storage_config) |
638 | self.assertEqual(0, self.m_mdadm.mdadm_create.call_count) |
639 | - self.assertEqual([call(md_devname, 5, devices, [])] * 2, |
640 | + self.assertEqual([call(md_devname, 5, devices, [], None)] * 2, |
641 | self.m_mdadm.md_check.call_args_list) |
642 | self.assertEqual([call(md_devname, devices, [])], |
643 | self.m_mdadm.mdadm_assemble.call_args_list) |
FAILED: Continuous integration, rev:8604c3e71ad af6257824d376bb 4263a484f28f9b /jenkins. ubuntu. com/server/ job/curtin- ci/119/ /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-amd64/ 119/ /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-arm64/ 119/ /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-ppc64el/ 119/ /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-s390x/ 119/
https:/
Executed test runs:
FAILURE: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/curtin- ci/119/ /rebuild
https:/