Merge lp:~raharper/curtin/trunk.fix-lp1718699 into lp:~curtin-dev/curtin/trunk

Proposed by Ryan Harper
Status: Merged
Approved by: Scott Moser
Approved revision: 541
Merged at revision: 537
Proposed branch: lp:~raharper/curtin/trunk.fix-lp1718699
Merge into: lp:~curtin-dev/curtin/trunk
Diff against target: 272 lines (+143/-35)
7 files modified
curtin/block/clear_holders.py (+13/-0)
curtin/commands/block_meta.py (+15/-19)
curtin/commands/block_wipe.py (+4/-0)
curtin/commands/clear_holders.py (+1/-1)
examples/tests/bcache-wipe-xfs.yaml (+74/-0)
tests/unittests/test_commands_block_meta.py (+11/-15)
tests/vmtests/test_bcache_bug1718699.py (+25/-0)
To merge this branch: bzr merge lp:~raharper/curtin/trunk.fix-lp1718699
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
Scott Moser Approve
Review via email: mp+332660@code.launchpad.net

Description of the change

block: handle wiping bcache parts

In some cases block devices may have bcache parts which are owned by the
block layer but are not assembled into a bcache device. The result is that
curtin fails to get exclusive opens when attempting to wipe the device.
Resolve this by testing if a volume has a 'bcache' directory under sysfs
and issuing the appropriate stop commands before wiping.

It's also possible for old "buried" metadata for bcache or lvm or raid
at particular partition offsets. Once curtin starts partitioning the
device the bcache kernel layer "finds" the parts and claims device
ownership and prevents wiping of the partition. We resolve this issue
by having curtin wipe 1M at the location of the partition before we
actually create it.

Fix 'curtin clear-holders' subcommand.

LP: #1718699

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Needs Fixing (continuous-integration)
539. By Ryan Harper

merge from trunk

Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Needs Fixing (continuous-integration)
540. By Ryan Harper

Fix style

Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)
Revision history for this message
Scott Moser (smoser) wrote :

Please mention the fix in clear_holders_main. suggested:

   Also, fix a bug found in testing 'curtin clear-holders'.

Some tiny things, please fix.

Other than that, approve.

review: Approve
Revision history for this message
Ryan Harper (raharper) wrote :

Thanks for the review, will fix up, retest and push to trunk

541. By Ryan Harper

Reword vmtest test input

Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'curtin/block/clear_holders.py'
2--- curtin/block/clear_holders.py 2017-10-13 17:48:29 +0000
3+++ curtin/block/clear_holders.py 2017-10-25 20:32:23 +0000
4@@ -258,6 +258,19 @@
5 LOG.info("extended partitions do not need wiping, so skipping: '%s'",
6 blockdev)
7 else:
8+ # some volumes will be claimed by the bcache layer but do not surface
9+ # an actual /dev/bcacheN device which owns the parts (backing, cache)
10+ # The result is that some volumes cannot be wiped while bcache claims
11+ # the device. Resolve this by stopping bcache layer on those volumes
12+ # if present.
13+ for bcache_path in ['bcache', 'bcache/set']:
14+ stop_path = os.path.join(device, bcache_path)
15+ if os.path.exists(stop_path):
16+ LOG.debug('Attempting to release bcache layer from device: %s',
17+ device)
18+ maybe_stop_bcache_device(stop_path)
19+ continue
20+
21 retries = [1, 3, 5, 7]
22 LOG.info('wiping superblock on %s', blockdev)
23 for attempt, wait in enumerate(retries):
24
25=== modified file 'curtin/commands/block_meta.py'
26--- curtin/commands/block_meta.py 2017-10-11 16:38:32 +0000
27+++ curtin/commands/block_meta.py 2017-10-25 20:32:23 +0000
28@@ -548,6 +548,21 @@
29 info.get('id'), device, disk_ptable)
30 LOG.debug("partnum: %s offset_sectors: %s length_sectors: %s",
31 partnumber, offset_sectors, length_sectors)
32+
33+ # Wipe the partition if told to do so, do not wipe dos extended partitions
34+ # as this may damage the extended partition table
35+ if config.value_as_boolean(info.get('wipe')):
36+ LOG.info("Preparing partition location on disk %s", disk)
37+ if info.get('flag') == "extended":
38+ LOG.warn("extended partitions do not need wiping, so skipping: "
39+ "'%s'" % info.get('id'))
40+ else:
41+ # wipe the start of the new partition first by zeroing 1M at the
42+ # length of the previous partition
43+ wipe_offset = int(offset_sectors * logical_block_size_bytes)
44+ LOG.debug('Wiping 1M on %s at offset %s', disk, wipe_offset)
45+ block.zero_file_at_offsets(disk, [wipe_offset])
46+
47 if disk_ptable == "msdos":
48 if flag in ["extended", "logical", "primary"]:
49 partition_type = flag
50@@ -569,25 +584,6 @@
51 else:
52 raise ValueError("parent partition has invalid partition table")
53
54- # check if we've triggered hidden metadata like md, lvm or bcache
55- part_kname = get_path_to_storage_volume(info.get('id'), storage_config)
56- holders = clear_holders.get_holders(part_kname)
57- if len(holders) > 0:
58- LOG.debug('Detected block holders on partition %s: %s', part_kname,
59- holders)
60- clear_holders.clear_holders(part_kname)
61- clear_holders.assert_clear(part_kname)
62-
63- # Wipe the partition if told to do so, do not wipe dos extended partitions
64- # as this may damage the extended partition table
65- if config.value_as_boolean(info.get('wipe')):
66- if info.get('flag') == "extended":
67- LOG.warn("extended partitions do not need wiping, so skipping: "
68- "'%s'" % info.get('id'))
69- else:
70- block.wipe_volume(
71- get_path_to_storage_volume(info.get('id'), storage_config),
72- mode=info.get('wipe'))
73 # Make the name if needed
74 if storage_config.get(device).get('name') and partition_type != 'extended':
75 make_dname(info.get('id'), storage_config)
76
77=== modified file 'curtin/commands/block_wipe.py'
78--- curtin/commands/block_wipe.py 2016-08-10 20:18:07 +0000
79+++ curtin/commands/block_wipe.py 2017-10-25 20:32:23 +0000
80@@ -18,11 +18,15 @@
81 import sys
82 import curtin.block as block
83 from . import populate_one_subcmd
84+from .. import log
85+
86+LOG = log.LOG
87
88
89 def wipe_main(args):
90 for blockdev in args.devices:
91 try:
92+ LOG.debug('Wiping volume %s with mode=%s', blockdev, args.mode)
93 block.wipe_volume(blockdev, mode=args.mode)
94 except Exception as e:
95 sys.stderr.write(
96
97=== modified file 'curtin/commands/clear_holders.py'
98--- curtin/commands/clear_holders.py 2016-08-11 17:21:40 +0000
99+++ curtin/commands/clear_holders.py 2017-10-25 20:32:23 +0000
100@@ -28,7 +28,7 @@
101 raise ValueError('invalid devices specified')
102 block.clear_holders.start_clear_holders_deps()
103 block.clear_holders.clear_holders(args.devices, try_preserve=args.preserve)
104- if args.try_preserve:
105+ if args.preserve:
106 print('ran clear_holders attempting to preserve data. however, '
107 'hotplug support for some devices may cause holders to restart ')
108 block.clear_holders.assert_clear(args.devices)
109
110=== added file 'examples/tests/bcache-wipe-xfs.yaml'
111--- examples/tests/bcache-wipe-xfs.yaml 1970-01-01 00:00:00 +0000
112+++ examples/tests/bcache-wipe-xfs.yaml 2017-10-25 20:32:23 +0000
113@@ -0,0 +1,74 @@
114+showtrace: true
115+
116+early_commands:
117+ # Create a partitioned disk with bcache metadata in one of the partitions.
118+ # Then, wipe the partition table. This leaves "buried" bcache data that
119+ # would be seen as soon as the disk was partitioned and cause problems
120+ # for curtin's use of the disk.
121+ # This config recreates issue LP: #1718699
122+ 00_blockmeta: [env, -u, OUTPUT_FSTAB, TARGET_MOUNT_POINT=/tmp/my.bdir/target,
123+ WORKING_DIR=/tmp/my.bdir/work.d, curtin, --showtrace, -v,
124+ block-meta, --umount, custom]
125+ 01_clear_holders: [curtin, clear-holders, --preserve, /dev/vdb]
126+ 02_quick_erase: [curtin, block-wipe, --mode, superblock, /dev/vdb]
127+
128+storage:
129+ config:
130+ - grub_device: true
131+ id: vdb
132+ name: vdb
133+ path: /dev/vdb
134+ ptable: msdos
135+ type: disk
136+ wipe: superblock
137+ - id: vdc
138+ name: vdc
139+ path: /dev/vdc
140+ type: disk
141+ wipe: superblock
142+ - device: vdb
143+ id: vdb-part1
144+ name: vdb-part1
145+ number: 1
146+ offset: 4194304B
147+ size: 3997171712B
148+ type: partition
149+ uuid: 1d112703-1ff7-49fb-9655-741016e216bf
150+ wipe: superblock
151+ - device: vdb
152+ id: vdb-part2
153+ name: vdb-part2
154+ number: 2
155+ size: 3997171712B
156+ type: partition
157+ uuid: ec219a2e-c4a5-4623-b66a-965da2c6c1f1
158+ wipe: superblock
159+ - backing_device: vdc
160+ cache_device: vdb-part2
161+ cache_mode: writeback
162+ id: bcache0
163+ name: bcache0
164+ type: bcache
165+ - fstype: ext4
166+ id: vdb-part1_format
167+ label: ''
168+ type: format
169+ uuid: 0687dc8f-c089-4f30-8603-0ddf646a5dd7
170+ volume: vdb-part1
171+ - fstype: xfs
172+ id: bcache0_format
173+ label: ''
174+ type: format
175+ uuid: c40a45b2-1f12-454e-a0ec-784eb4ded4e6
176+ volume: bcache0
177+ - device: vdb-part1_format
178+ id: vdb-part1_mount
179+ options: ''
180+ path: /
181+ type: mount
182+ - device: bcache0_format
183+ id: bcache0_mount
184+ options: ''
185+ path: /var/lib/ceph-bcache/test-disk
186+ type: mount
187+ version: 1
188
189=== modified file 'tests/unittests/test_commands_block_meta.py'
190--- tests/unittests/test_commands_block_meta.py 2017-08-03 18:14:51 +0000
191+++ tests/unittests/test_commands_block_meta.py 2017-10-25 20:32:23 +0000
192@@ -130,6 +130,8 @@
193 'mock_clear_holders')
194 self.add_patch('curtin.block.clear_holders.assert_clear',
195 'mock_assert_clear')
196+ self.add_patch('curtin.block.zero_file_at_offsets',
197+ 'mock_block_zero_file_at_offsets')
198
199 self.target = "my_target"
200 self.config = {
201@@ -177,32 +179,26 @@
202 self.mock_clear_holders.assert_called_with(disk)
203 self.mock_assert_clear.assert_called_with(disk)
204
205- def test_partition_handler_calls_clear_holder(self):
206+ def test_partition_handler_wipes_at_partition_offset(self):
207+ """ Test wiping partition at offset prior to creating partition"""
208 disk_info = self.storage_config.get('sda')
209 part_info = self.storage_config.get('sda-part1')
210 disk_kname = disk_info.get('path')
211 part_kname = disk_kname + '1'
212 self.mock_getpath.side_effect = iter([
213- disk_info.get('id'),
214- part_kname,
215- part_kname,
216+ disk_kname,
217 part_kname,
218 ])
219-
220 self.mock_block_get_part_table_type.return_value = 'dos'
221 kname = 'xxx'
222 self.mock_block_path_to_kname.return_value = kname
223 self.mock_block_sys_block_path.return_value = '/sys/class/block/xxx'
224- self.mock_subp.side_effect = iter([
225- ("", 0), # parted mkpart
226- ("", 0), # ??
227- ])
228- holders = ['md1']
229- self.mock_get_holders.return_value = holders
230
231 block_meta.partition_handler(part_info, self.storage_config)
232
233- print("clear_holders: %s" % self.mock_clear_holders.call_args_list)
234- print("assert_clear: %s" % self.mock_assert_clear.call_args_list)
235- self.mock_clear_holders.assert_called_with(part_kname)
236- self.mock_assert_clear.assert_called_with(part_kname)
237+ part_offset = 2048 * 512
238+ self.mock_block_zero_file_at_offsets.assert_called_with(disk_kname,
239+ [part_offset])
240+ self.mock_subp.assert_called_with(['parted', disk_kname, '--script',
241+ 'mkpart', 'primary', '2048s',
242+ '1001471s'], capture=True)
243
244=== added file 'tests/vmtests/test_bcache_bug1718699.py'
245--- tests/vmtests/test_bcache_bug1718699.py 1970-01-01 00:00:00 +0000
246+++ tests/vmtests/test_bcache_bug1718699.py 2017-10-25 20:32:23 +0000
247@@ -0,0 +1,25 @@
248+from .releases import base_vm_classes as relbase
249+from .test_bcache_basic import TestBcacheBasic
250+
251+
252+class TestBcacheBug1718699(TestBcacheBasic):
253+ conf_file = "examples/tests/bcache-wipe-xfs.yaml"
254+ dirty_disks = False
255+ nr_cpus = 2
256+ extra_disks = ['10G']
257+
258+
259+class PreciseTestBcacheBug1718699(relbase.precise_hwe_t, TestBcacheBug1718699):
260+ __test__ = True
261+
262+
263+class XenialTestBcacheBug1718699(relbase.xenial, TestBcacheBug1718699):
264+ __test__ = True
265+
266+
267+class ZestyTestBcacheBug1718699(relbase.zesty, TestBcacheBug1718699):
268+ __test__ = True
269+
270+
271+class ArtfulTestBcacheBug1718699(relbase.artful, TestBcacheBug1718699):
272+ __test__ = True

Subscribers

People subscribed via source and target branches