Merge ~raharper/curtin:ubuntu/artful/sru-20180518 into curtin:ubuntu/artful
- Git
- lp:~raharper/curtin
- ubuntu/artful/sru-20180518
- Merge into ubuntu/artful
Status: | Merged | ||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Merged at revision: | bb317840c0e4ea17160c27bfe245a1e87a13d0fc | ||||||||||||||||||||||||
Proposed branch: | ~raharper/curtin:ubuntu/artful/sru-20180518 | ||||||||||||||||||||||||
Merge into: | curtin:ubuntu/artful | ||||||||||||||||||||||||
Diff against target: |
3718 lines (+1736/-596) 48 files modified
curtin/block/__init__.py (+18/-14) curtin/block/bcache.py (+87/-0) curtin/block/clear_holders.py (+90/-52) curtin/block/iscsi.py (+7/-8) curtin/block/mdadm.py (+68/-4) curtin/block/zfs.py (+26/-1) curtin/commands/apt_config.py (+5/-0) curtin/commands/block_meta.py (+173/-76) curtin/commands/curthooks.py (+3/-3) curtin/commands/install.py (+22/-23) curtin/util.py (+35/-25) debian/changelog (+22/-0) dev/null (+0/-128) doc/topics/integration-testing.rst (+16/-0) doc/topics/storage.rst (+61/-4) examples/tests/dirty_disks_config.yaml (+6/-0) examples/tests/filesystem_battery.yaml (+23/-0) examples/tests/lvm.yaml (+21/-0) tests/unittests/helpers.py (+3/-1) tests/unittests/test_block_zfs.py (+96/-0) tests/unittests/test_clear_holders.py (+87/-38) tests/unittests/test_commands_block_meta.py (+425/-25) tests/unittests/test_commands_install.py (+28/-0) tests/unittests/test_make_dname.py (+28/-1) tests/unittests/test_util.py (+47/-0) tests/vmtests/__init__.py (+146/-19) tests/vmtests/helpers.py (+49/-32) tests/vmtests/test_basic.py (+12/-9) tests/vmtests/test_centos_basic.py (+0/-2) tests/vmtests/test_fs_battery.py (+49/-0) tests/vmtests/test_lvm.py (+6/-10) tests/vmtests/test_lvm_iscsi.py (+8/-2) tests/vmtests/test_mdadm_bcache.py (+7/-73) tests/vmtests/test_network.py (+0/-1) tests/vmtests/test_network_alias.py (+0/-1) tests/vmtests/test_network_bonding.py (+0/-1) tests/vmtests/test_network_bridging.py (+0/-1) tests/vmtests/test_network_ipv6.py (+0/-1) tests/vmtests/test_network_mtu.py (+0/-1) tests/vmtests/test_network_static.py (+0/-1) tests/vmtests/test_network_vlan.py (+6/-1) tests/vmtests/test_nvme.py (+0/-18) tests/vmtests/test_pollinate_useragent.py (+2/-2) tests/vmtests/test_raid5_bcache.py (+0/-4) tests/vmtests/test_uefi_basic.py (+0/-9) tests/vmtests/test_zfsroot.py (+40/-1) tools/jenkins-runner (+12/-0) tools/vmtest-sync-images (+2/-4) |
||||||||||||||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
curtin developers | Pending | ||
Review via email: mp+345952@code.launchpad.net |
Commit message
curtin (18.1-17-
* New upstream snapshot. (LP: #1772044)
- tests: replace usage of mock.assert_called
- tools: jenkins-runner show curtin version in output.
- zfs: implement a supported check to handle i386
- Support mount entries not tied to a device, including bind and tmpfs.
- block/clear_
- clear_holders: only export zpools that have been imported
- vmtests: allow env control of apt, system_upgrade, package upgrade
- util.get_
- vmtests: adjust lvm_iscsi dnames to match configuration
- vmtest: always boot with BOOTIF and ip=:::::BOOTIF:dhcp
- make_dname for bcache should use backing device uuid
- zfsroot: add additional checks, do not require disk 'serial' attribute
- clear-holders: fix lvm name use when shutting down
- install: prevent unmount: disabled from swallowing installation failures
- vmtest: bionic images no longer use the vlan package
- pycodestyle: Fix invalid escape sequences in string literals.
Description of the change
Server Team CI bot (server-team-bot) wrote : | # |
Preview Diff
1 | diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py | |||
2 | index 50e953e..a8ee8a6 100644 | |||
3 | --- a/curtin/block/__init__.py | |||
4 | +++ b/curtin/block/__init__.py | |||
5 | @@ -378,7 +378,7 @@ def stop_all_unused_multipath_devices(): | |||
6 | 378 | LOG.warn("Failed to stop multipath devices: %s", e) | 378 | LOG.warn("Failed to stop multipath devices: %s", e) |
7 | 379 | 379 | ||
8 | 380 | 380 | ||
10 | 381 | def rescan_block_devices(): | 381 | def rescan_block_devices(warn_on_fail=True): |
11 | 382 | """ | 382 | """ |
12 | 383 | run 'blockdev --rereadpt' for all block devices not currently mounted | 383 | run 'blockdev --rereadpt' for all block devices not currently mounted |
13 | 384 | """ | 384 | """ |
14 | @@ -399,13 +399,15 @@ def rescan_block_devices(): | |||
15 | 399 | try: | 399 | try: |
16 | 400 | util.subp(cmd, capture=True) | 400 | util.subp(cmd, capture=True) |
17 | 401 | except util.ProcessExecutionError as e: | 401 | except util.ProcessExecutionError as e: |
25 | 402 | # FIXME: its less than ideal to swallow this error, but until | 402 | if warn_on_fail: |
26 | 403 | # we fix LP: #1489521 we kind of need to. | 403 | # FIXME: its less than ideal to swallow this error, but until |
27 | 404 | LOG.warn("Error rescanning devices, possibly known issue LP: #1489521") | 404 | # we fix LP: #1489521 we kind of need to. |
28 | 405 | # Reformatting the exception output so as to not trigger | 405 | LOG.warn( |
29 | 406 | # vmtest scanning for Unexepected errors in install logfile | 406 | "Error rescanning devices, possibly known issue LP: #1489521") |
30 | 407 | LOG.warn("cmd: %s\nstdout:%s\nstderr:%s\nexit_code:%s", e.cmd, | 407 | # Reformatting the exception output so as to not trigger |
31 | 408 | e.stdout, e.stderr, e.exit_code) | 408 | # vmtest scanning for Unexepected errors in install logfile |
32 | 409 | LOG.warn("cmd: %s\nstdout:%s\nstderr:%s\nexit_code:%s", e.cmd, | ||
33 | 410 | e.stdout, e.stderr, e.exit_code) | ||
34 | 409 | 411 | ||
35 | 410 | udevadm_settle() | 412 | udevadm_settle() |
36 | 411 | 413 | ||
37 | @@ -753,8 +755,9 @@ def check_dos_signature(device): | |||
38 | 753 | # the underlying disk uses a larger logical block size, so the start of | 755 | # the underlying disk uses a larger logical block size, so the start of |
39 | 754 | # this signature must be at 0x1fe | 756 | # this signature must be at 0x1fe |
40 | 755 | # https://en.wikipedia.org/wiki/Master_boot_record#Sector_layout | 757 | # https://en.wikipedia.org/wiki/Master_boot_record#Sector_layout |
43 | 756 | return (is_block_device(device) and util.file_size(device) >= 0x200 and | 758 | devname = dev_path(path_to_kname(device)) |
44 | 757 | (util.load_file(device, decode=False, read_len=2, offset=0x1fe) == | 759 | return (is_block_device(devname) and util.file_size(devname) >= 0x200 and |
45 | 760 | (util.load_file(devname, decode=False, read_len=2, offset=0x1fe) == | ||
46 | 758 | b'\x55\xAA')) | 761 | b'\x55\xAA')) |
47 | 759 | 762 | ||
48 | 760 | 763 | ||
49 | @@ -769,10 +772,11 @@ def check_efi_signature(device): | |||
50 | 769 | # the start of the gpt partition table header shoult have the signaure | 772 | # the start of the gpt partition table header shoult have the signaure |
51 | 770 | # 'EFI PART'. | 773 | # 'EFI PART'. |
52 | 771 | # https://en.wikipedia.org/wiki/GUID_Partition_Table | 774 | # https://en.wikipedia.org/wiki/GUID_Partition_Table |
57 | 772 | sector_size = get_blockdev_sector_size(device)[0] | 775 | devname = dev_path(path_to_kname(device)) |
58 | 773 | return (is_block_device(device) and | 776 | sector_size = get_blockdev_sector_size(devname)[0] |
59 | 774 | util.file_size(device) >= 2 * sector_size and | 777 | return (is_block_device(devname) and |
60 | 775 | (util.load_file(device, decode=False, read_len=8, | 778 | util.file_size(devname) >= 2 * sector_size and |
61 | 779 | (util.load_file(devname, decode=False, read_len=8, | ||
62 | 776 | offset=sector_size) == b'EFI PART')) | 780 | offset=sector_size) == b'EFI PART')) |
63 | 777 | 781 | ||
64 | 778 | 782 | ||
65 | diff --git a/curtin/block/bcache.py b/curtin/block/bcache.py | |||
66 | 779 | new file mode 100644 | 783 | new file mode 100644 |
67 | index 0000000..852cef2 | |||
68 | --- /dev/null | |||
69 | +++ b/curtin/block/bcache.py | |||
70 | @@ -0,0 +1,87 @@ | |||
71 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | ||
72 | 2 | |||
73 | 3 | import os | ||
74 | 4 | |||
75 | 5 | from curtin import util | ||
76 | 6 | from curtin.log import LOG | ||
77 | 7 | from . import sys_block_path | ||
78 | 8 | |||
79 | 9 | |||
80 | 10 | def superblock_asdict(device=None, data=None): | ||
81 | 11 | """ Convert output from bcache-super-show into a dictionary""" | ||
82 | 12 | |||
83 | 13 | if not device and not data: | ||
84 | 14 | raise ValueError('Supply a device name, or data to parse') | ||
85 | 15 | |||
86 | 16 | if not data: | ||
87 | 17 | data, _err = util.subp(['bcache-super-show', device], capture=True) | ||
88 | 18 | bcache_super = {} | ||
89 | 19 | for line in data.splitlines(): | ||
90 | 20 | if not line: | ||
91 | 21 | continue | ||
92 | 22 | values = [val for val in line.split('\t') if val] | ||
93 | 23 | bcache_super.update({values[0]: values[1]}) | ||
94 | 24 | |||
95 | 25 | return bcache_super | ||
96 | 26 | |||
97 | 27 | |||
98 | 28 | def parse_sb_version(sb_version): | ||
99 | 29 | """ Convert sb_version string to integer if possible""" | ||
100 | 30 | try: | ||
101 | 31 | # 'sb.version': '1 [backing device]' | ||
102 | 32 | # 'sb.version': '3 [caching device]' | ||
103 | 33 | version = int(sb_version.split()[0]) | ||
104 | 34 | except (AttributeError, ValueError): | ||
105 | 35 | LOG.warning("Failed to parse bcache 'sb.version' field" | ||
106 | 36 | " as integer: %s", sb_version) | ||
107 | 37 | return None | ||
108 | 38 | |||
109 | 39 | return version | ||
110 | 40 | |||
111 | 41 | |||
112 | 42 | def is_backing(device, superblock=False): | ||
113 | 43 | """ Test if device is a bcache backing device | ||
114 | 44 | |||
115 | 45 | A runtime check for an active bcache backing device is to | ||
116 | 46 | examine /sys/class/block/<kname>/bcache/label | ||
117 | 47 | |||
118 | 48 | However if a device is not active then read the superblock | ||
119 | 49 | of the device and check that sb.version == 1""" | ||
120 | 50 | |||
121 | 51 | if not superblock: | ||
122 | 52 | sys_block = sys_block_path(device) | ||
123 | 53 | bcache_sys_attr = os.path.join(sys_block, 'bcache', 'label') | ||
124 | 54 | return os.path.exists(bcache_sys_attr) | ||
125 | 55 | else: | ||
126 | 56 | bcache_super = superblock_asdict(device=device) | ||
127 | 57 | sb_version = parse_sb_version(bcache_super['sb.version']) | ||
128 | 58 | return bcache_super and sb_version == 1 | ||
129 | 59 | |||
130 | 60 | |||
131 | 61 | def is_caching(device, superblock=False): | ||
132 | 62 | """ Test if device is a bcache caching device | ||
133 | 63 | |||
134 | 64 | A runtime check for an active bcache backing device is to | ||
135 | 65 | examine /sys/class/block/<kname>/bcache/cache_replacement_policy | ||
136 | 66 | |||
137 | 67 | However if a device is not active then read the superblock | ||
138 | 68 | of the device and check that sb.version == 3""" | ||
139 | 69 | |||
140 | 70 | if not superblock: | ||
141 | 71 | sys_block = sys_block_path(device) | ||
142 | 72 | bcache_sysattr = os.path.join(sys_block, 'bcache', | ||
143 | 73 | 'cache_replacement_policy') | ||
144 | 74 | return os.path.exists(bcache_sysattr) | ||
145 | 75 | else: | ||
146 | 76 | bcache_super = superblock_asdict(device=device) | ||
147 | 77 | sb_version = parse_sb_version(bcache_super['sb.version']) | ||
148 | 78 | return bcache_super and sb_version == 3 | ||
149 | 79 | |||
150 | 80 | |||
151 | 81 | def write_label(label, device): | ||
152 | 82 | """ write label to bcache device """ | ||
153 | 83 | sys_block = sys_block_path(device) | ||
154 | 84 | bcache_sys_attr = os.path.join(sys_block, 'bcache', 'label') | ||
155 | 85 | util.write_file(bcache_sys_attr, content=label) | ||
156 | 86 | |||
157 | 87 | # vi: ts=4 expandtab syntax=python | ||
158 | diff --git a/curtin/block/clear_holders.py b/curtin/block/clear_holders.py | |||
159 | index 4b3feeb..20c572b 100644 | |||
160 | --- a/curtin/block/clear_holders.py | |||
161 | +++ b/curtin/block/clear_holders.py | |||
162 | @@ -110,6 +110,9 @@ def shutdown_bcache(device): | |||
163 | 110 | 'Device path must start with /sys/class/block/', | 110 | 'Device path must start with /sys/class/block/', |
164 | 111 | device) | 111 | device) |
165 | 112 | 112 | ||
166 | 113 | LOG.info('Wiping superblock on bcache device: %s', device) | ||
167 | 114 | _wipe_superblock(block.sysfs_to_devpath(device), exclusive=False) | ||
168 | 115 | |||
169 | 113 | # bcache device removal should be fast but in an extreme | 116 | # bcache device removal should be fast but in an extreme |
170 | 114 | # case, might require the cache device to flush large | 117 | # case, might require the cache device to flush large |
171 | 115 | # amounts of data to a backing device. The strategy here | 118 | # amounts of data to a backing device. The strategy here |
172 | @@ -187,15 +190,29 @@ def shutdown_lvm(device): | |||
173 | 187 | # lvm devices have a dm directory that containes a file 'name' containing | 190 | # lvm devices have a dm directory that containes a file 'name' containing |
174 | 188 | # '{volume group}-{logical volume}'. The volume can be freed using lvremove | 191 | # '{volume group}-{logical volume}'. The volume can be freed using lvremove |
175 | 189 | name_file = os.path.join(device, 'dm', 'name') | 192 | name_file = os.path.join(device, 'dm', 'name') |
177 | 190 | (vg_name, lv_name) = lvm.split_lvm_name(util.load_file(name_file)) | 193 | lvm_name = util.load_file(name_file).strip() |
178 | 194 | (vg_name, lv_name) = lvm.split_lvm_name(lvm_name) | ||
179 | 195 | vg_lv_name = "%s/%s" % (vg_name, lv_name) | ||
180 | 196 | devname = "/dev/" + vg_lv_name | ||
181 | 197 | |||
182 | 198 | # wipe contents of the logical volume first | ||
183 | 199 | LOG.info('Wiping lvm logical volume: %s', devname) | ||
184 | 200 | block.quick_zero(devname, partitions=False) | ||
185 | 191 | 201 | ||
189 | 192 | # use dmsetup as lvm commands require valid /etc/lvm/* metadata | 202 | # remove the logical volume |
190 | 193 | LOG.debug('using "dmsetup remove" on %s-%s', vg_name, lv_name) | 203 | LOG.debug('using "lvremove" on %s', vg_lv_name) |
191 | 194 | util.subp(['dmsetup', 'remove', '{}-{}'.format(vg_name, lv_name)]) | 204 | util.subp(['lvremove', '--force', '--force', vg_lv_name]) |
192 | 195 | 205 | ||
193 | 196 | # if that was the last lvol in the volgroup, get rid of volgroup | 206 | # if that was the last lvol in the volgroup, get rid of volgroup |
194 | 197 | if len(lvm.get_lvols_in_volgroup(vg_name)) == 0: | 207 | if len(lvm.get_lvols_in_volgroup(vg_name)) == 0: |
195 | 208 | pvols = lvm.get_pvols_in_volgroup(vg_name) | ||
196 | 198 | util.subp(['vgremove', '--force', '--force', vg_name], rcs=[0, 5]) | 209 | util.subp(['vgremove', '--force', '--force', vg_name], rcs=[0, 5]) |
197 | 210 | |||
198 | 211 | # wipe the underlying physical volumes | ||
199 | 212 | for pv in pvols: | ||
200 | 213 | LOG.info('Wiping lvm physical volume: %s', pv) | ||
201 | 214 | block.quick_zero(pv, partitions=False) | ||
202 | 215 | |||
203 | 199 | # refresh lvmetad | 216 | # refresh lvmetad |
204 | 200 | lvm.lvm_scan() | 217 | lvm.lvm_scan() |
205 | 201 | 218 | ||
206 | @@ -212,10 +229,31 @@ def shutdown_mdadm(device): | |||
207 | 212 | """ | 229 | """ |
208 | 213 | Shutdown specified mdadm device. | 230 | Shutdown specified mdadm device. |
209 | 214 | """ | 231 | """ |
210 | 232 | |||
211 | 215 | blockdev = block.sysfs_to_devpath(device) | 233 | blockdev = block.sysfs_to_devpath(device) |
212 | 234 | |||
213 | 235 | LOG.info('Wiping superblock on raid device: %s', device) | ||
214 | 236 | _wipe_superblock(blockdev, exclusive=False) | ||
215 | 237 | |||
216 | 238 | md_devs = ( | ||
217 | 239 | mdadm.md_get_devices_list(blockdev) + | ||
218 | 240 | mdadm.md_get_spares_list(blockdev)) | ||
219 | 241 | mdadm.set_sync_action(blockdev, action="idle") | ||
220 | 242 | mdadm.set_sync_action(blockdev, action="frozen") | ||
221 | 243 | for mddev in md_devs: | ||
222 | 244 | try: | ||
223 | 245 | mdadm.fail_device(blockdev, mddev) | ||
224 | 246 | mdadm.remove_device(blockdev, mddev) | ||
225 | 247 | except util.ProcessExecutionError as e: | ||
226 | 248 | LOG.debug('Non-fatal error clearing raid array: %s', e.stderr) | ||
227 | 249 | pass | ||
228 | 250 | |||
229 | 216 | LOG.debug('using mdadm.mdadm_stop on dev: %s', blockdev) | 251 | LOG.debug('using mdadm.mdadm_stop on dev: %s', blockdev) |
230 | 217 | mdadm.mdadm_stop(blockdev) | 252 | mdadm.mdadm_stop(blockdev) |
231 | 218 | 253 | ||
232 | 254 | for mddev in md_devs: | ||
233 | 255 | mdadm.zero_device(mddev) | ||
234 | 256 | |||
235 | 219 | # mdadm stop operation is asynchronous so we must wait for the kernel to | 257 | # mdadm stop operation is asynchronous so we must wait for the kernel to |
236 | 220 | # release resources. For more details see LP: #1682456 | 258 | # release resources. For more details see LP: #1682456 |
237 | 221 | try: | 259 | try: |
238 | @@ -243,32 +281,49 @@ def wipe_superblock(device): | |||
239 | 243 | blockdev = block.sysfs_to_devpath(device) | 281 | blockdev = block.sysfs_to_devpath(device) |
240 | 244 | # when operating on a disk that used to have a dos part table with an | 282 | # when operating on a disk that used to have a dos part table with an |
241 | 245 | # extended partition, attempting to wipe the extended partition will fail | 283 | # extended partition, attempting to wipe the extended partition will fail |
249 | 246 | if block.is_extended_partition(blockdev): | 284 | try: |
250 | 247 | LOG.info("extended partitions do not need wiping, so skipping: '%s'", | 285 | if block.is_extended_partition(blockdev): |
251 | 248 | blockdev) | 286 | LOG.info("extended partitions do not need wiping, so skipping:" |
252 | 249 | else: | 287 | " '%s'", blockdev) |
253 | 250 | # release zfs member by exporting the pool | 288 | return |
254 | 251 | if block.is_zfs_member(blockdev): | 289 | except OSError as e: |
255 | 252 | poolname = zfs.device_to_poolname(blockdev) | 290 | if util.is_file_not_found_exc(e): |
256 | 291 | LOG.debug('Device to wipe disappeared: %s', e) | ||
257 | 292 | LOG.debug('/proc/partitions says: %s', | ||
258 | 293 | util.load_file('/proc/partitions')) | ||
259 | 294 | |||
260 | 295 | (parent, partnum) = block.get_blockdev_for_partition(blockdev) | ||
261 | 296 | out, _e = util.subp(['sfdisk', '-d', parent], | ||
262 | 297 | capture=True, combine_capture=True) | ||
263 | 298 | LOG.debug('Disk partition info:\n%s', out) | ||
264 | 299 | return | ||
265 | 300 | else: | ||
266 | 301 | raise e | ||
267 | 302 | |||
268 | 303 | # release zfs member by exporting the pool | ||
269 | 304 | if block.is_zfs_member(blockdev): | ||
270 | 305 | poolname = zfs.device_to_poolname(blockdev) | ||
271 | 306 | # only export pools that have been imported | ||
272 | 307 | if poolname in zfs.zpool_list(): | ||
273 | 253 | zfs.zpool_export(poolname) | 308 | zfs.zpool_export(poolname) |
274 | 254 | 309 | ||
290 | 255 | if is_swap_device(blockdev): | 310 | if is_swap_device(blockdev): |
291 | 256 | shutdown_swap(blockdev) | 311 | shutdown_swap(blockdev) |
292 | 257 | 312 | ||
293 | 258 | # some volumes will be claimed by the bcache layer but do not surface | 313 | # some volumes will be claimed by the bcache layer but do not surface |
294 | 259 | # an actual /dev/bcacheN device which owns the parts (backing, cache) | 314 | # an actual /dev/bcacheN device which owns the parts (backing, cache) |
295 | 260 | # The result is that some volumes cannot be wiped while bcache claims | 315 | # The result is that some volumes cannot be wiped while bcache claims |
296 | 261 | # the device. Resolve this by stopping bcache layer on those volumes | 316 | # the device. Resolve this by stopping bcache layer on those volumes |
297 | 262 | # if present. | 317 | # if present. |
298 | 263 | for bcache_path in ['bcache', 'bcache/set']: | 318 | for bcache_path in ['bcache', 'bcache/set']: |
299 | 264 | stop_path = os.path.join(device, bcache_path) | 319 | stop_path = os.path.join(device, bcache_path) |
300 | 265 | if os.path.exists(stop_path): | 320 | if os.path.exists(stop_path): |
301 | 266 | LOG.debug('Attempting to release bcache layer from device: %s', | 321 | LOG.debug('Attempting to release bcache layer from device: %s', |
302 | 267 | device) | 322 | device) |
303 | 268 | maybe_stop_bcache_device(stop_path) | 323 | maybe_stop_bcache_device(stop_path) |
304 | 269 | continue | 324 | continue |
305 | 270 | 325 | ||
307 | 271 | _wipe_superblock(blockdev) | 326 | _wipe_superblock(blockdev) |
308 | 272 | 327 | ||
309 | 273 | 328 | ||
310 | 274 | def _wipe_superblock(blockdev, exclusive=True): | 329 | def _wipe_superblock(blockdev, exclusive=True): |
311 | @@ -509,28 +564,7 @@ def clear_holders(base_paths, try_preserve=False): | |||
312 | 509 | LOG.info('Current device storage tree:\n%s', | 564 | LOG.info('Current device storage tree:\n%s', |
313 | 510 | '\n'.join(format_holders_tree(tree) for tree in holder_trees)) | 565 | '\n'.join(format_holders_tree(tree) for tree in holder_trees)) |
314 | 511 | ordered_devs = plan_shutdown_holder_trees(holder_trees) | 566 | ordered_devs = plan_shutdown_holder_trees(holder_trees) |
337 | 512 | 567 | LOG.info('Shutdown Plan:\n%s', "\n".join(map(str, ordered_devs))) | |
316 | 513 | # run wipe-superblock on layered devices | ||
317 | 514 | for dev_info in ordered_devs: | ||
318 | 515 | dev_type = DEV_TYPES.get(dev_info['dev_type']) | ||
319 | 516 | shutdown_function = dev_type.get('shutdown') | ||
320 | 517 | if not shutdown_function: | ||
321 | 518 | continue | ||
322 | 519 | |||
323 | 520 | if try_preserve and shutdown_function in DATA_DESTROYING_HANDLERS: | ||
324 | 521 | LOG.info('shutdown function for holder type: %s is destructive. ' | ||
325 | 522 | 'attempting to preserve data, so skipping' % | ||
326 | 523 | dev_info['dev_type']) | ||
327 | 524 | continue | ||
328 | 525 | |||
329 | 526 | # for layered block devices, wipe first, then shutdown | ||
330 | 527 | if dev_info['dev_type'] in ['bcache', 'raid']: | ||
331 | 528 | LOG.info("Wiping superblock on layered device type: " | ||
332 | 529 | "'%s' syspath: '%s'", dev_info['dev_type'], | ||
333 | 530 | dev_info['device']) | ||
334 | 531 | # we just want to wipe data, we don't care about exclusive | ||
335 | 532 | _wipe_superblock(block.sysfs_to_devpath(dev_info['device']), | ||
336 | 533 | exclusive=False) | ||
338 | 534 | 568 | ||
339 | 535 | # run shutdown functions | 569 | # run shutdown functions |
340 | 536 | for dev_info in ordered_devs: | 570 | for dev_info in ordered_devs: |
341 | @@ -545,11 +579,12 @@ def clear_holders(base_paths, try_preserve=False): | |||
342 | 545 | dev_info['dev_type']) | 579 | dev_info['dev_type']) |
343 | 546 | continue | 580 | continue |
344 | 547 | 581 | ||
345 | 582 | # scan before we check | ||
346 | 583 | block.rescan_block_devices(warn_on_fail=False) | ||
347 | 548 | if os.path.exists(dev_info['device']): | 584 | if os.path.exists(dev_info['device']): |
348 | 549 | LOG.info("shutdown running on holder type: '%s' syspath: '%s'", | 585 | LOG.info("shutdown running on holder type: '%s' syspath: '%s'", |
349 | 550 | dev_info['dev_type'], dev_info['device']) | 586 | dev_info['dev_type'], dev_info['device']) |
350 | 551 | shutdown_function(dev_info['device']) | 587 | shutdown_function(dev_info['device']) |
351 | 552 | udev.udevadm_settle() | ||
352 | 553 | 588 | ||
353 | 554 | 589 | ||
354 | 555 | def start_clear_holders_deps(): | 590 | def start_clear_holders_deps(): |
355 | @@ -575,8 +610,11 @@ def start_clear_holders_deps(): | |||
356 | 575 | util.load_kernel_module('bcache') | 610 | util.load_kernel_module('bcache') |
357 | 576 | # the zfs module is needed to find and export devices which may be in-use | 611 | # the zfs module is needed to find and export devices which may be in-use |
358 | 577 | # and need to be cleared, only on xenial+. | 612 | # and need to be cleared, only on xenial+. |
361 | 578 | if not util.lsb_release()['codename'] in ['precise', 'trusty']: | 613 | try: |
362 | 579 | util.load_kernel_module('zfs') | 614 | if zfs.zfs_supported(): |
363 | 615 | util.load_kernel_module('zfs') | ||
364 | 616 | except RuntimeError as e: | ||
365 | 617 | LOG.warning('Failed to load zfs kernel module: %s', e) | ||
366 | 580 | 618 | ||
367 | 581 | 619 | ||
368 | 582 | # anything that is not identified can assumed to be a 'disk' or similar | 620 | # anything that is not identified can assumed to be a 'disk' or similar |
369 | diff --git a/curtin/block/iscsi.py b/curtin/block/iscsi.py | |||
370 | index 461f615..0c666b6 100644 | |||
371 | --- a/curtin/block/iscsi.py | |||
372 | +++ b/curtin/block/iscsi.py | |||
373 | @@ -416,18 +416,17 @@ class IscsiDisk(object): | |||
374 | 416 | self.portal, self.target, self.lun) | 416 | self.portal, self.target, self.lun) |
375 | 417 | 417 | ||
376 | 418 | def connect(self): | 418 | def connect(self): |
381 | 419 | if self.target in iscsiadm_sessions(): | 419 | if self.target not in iscsiadm_sessions(): |
382 | 420 | return | 420 | iscsiadm_discovery(self.portal) |
379 | 421 | |||
380 | 422 | iscsiadm_discovery(self.portal) | ||
383 | 423 | 421 | ||
386 | 424 | iscsiadm_authenticate(self.target, self.portal, self.user, | 422 | iscsiadm_authenticate(self.target, self.portal, self.user, |
387 | 425 | self.password, self.iuser, self.ipassword) | 423 | self.password, self.iuser, self.ipassword) |
388 | 426 | 424 | ||
390 | 427 | iscsiadm_login(self.target, self.portal) | 425 | iscsiadm_login(self.target, self.portal) |
391 | 428 | 426 | ||
393 | 429 | udev.udevadm_settle(self.devdisk_path) | 427 | udev.udevadm_settle(self.devdisk_path) |
394 | 430 | 428 | ||
395 | 429 | # always set automatic mode | ||
396 | 431 | iscsiadm_set_automatic(self.target, self.portal) | 430 | iscsiadm_set_automatic(self.target, self.portal) |
397 | 432 | 431 | ||
398 | 433 | def disconnect(self): | 432 | def disconnect(self): |
399 | diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py | |||
400 | index b0f5591..e0fe0d3 100644 | |||
401 | --- a/curtin/block/mdadm.py | |||
402 | +++ b/curtin/block/mdadm.py | |||
403 | @@ -237,6 +237,44 @@ def mdadm_examine(devpath, export=MDADM_USE_EXPORT): | |||
404 | 237 | return data | 237 | return data |
405 | 238 | 238 | ||
406 | 239 | 239 | ||
407 | 240 | def set_sync_action(devpath, action=None, retries=None): | ||
408 | 241 | assert_valid_devpath(devpath) | ||
409 | 242 | if not action: | ||
410 | 243 | return | ||
411 | 244 | |||
412 | 245 | if not retries: | ||
413 | 246 | retries = [0.2] * 60 | ||
414 | 247 | |||
415 | 248 | sync_action = md_sysfs_attr_path(devpath, 'sync_action') | ||
416 | 249 | if not os.path.exists(sync_action): | ||
417 | 250 | # arrays without sync_action can't set values | ||
418 | 251 | return | ||
419 | 252 | |||
420 | 253 | LOG.info("mdadm set sync_action=%s on array %s", action, devpath) | ||
421 | 254 | for (attempt, wait) in enumerate(retries): | ||
422 | 255 | try: | ||
423 | 256 | LOG.debug('mdadm: set sync_action %s attempt %s', | ||
424 | 257 | devpath, attempt) | ||
425 | 258 | val = md_sysfs_attr(devpath, 'sync_action').strip() | ||
426 | 259 | LOG.debug('sync_action = "%s" ? "%s"', val, action) | ||
427 | 260 | if val != action: | ||
428 | 261 | LOG.debug("mdadm: setting array sync_action=%s", action) | ||
429 | 262 | try: | ||
430 | 263 | util.write_file(sync_action, content=action) | ||
431 | 264 | except (IOError, OSError) as e: | ||
432 | 265 | LOG.debug("mdadm: (non-fatal) write to %s failed %s", | ||
433 | 266 | sync_action, e) | ||
434 | 267 | else: | ||
435 | 268 | LOG.debug("mdadm: set array sync_action=%s SUCCESS", action) | ||
436 | 269 | return | ||
437 | 270 | |||
438 | 271 | except util.ProcessExecutionError: | ||
439 | 272 | LOG.debug( | ||
440 | 273 | "mdadm: set sync_action failed, retrying in %s seconds", wait) | ||
441 | 274 | time.sleep(wait) | ||
442 | 275 | pass | ||
443 | 276 | |||
444 | 277 | |||
445 | 240 | def mdadm_stop(devpath, retries=None): | 278 | def mdadm_stop(devpath, retries=None): |
446 | 241 | assert_valid_devpath(devpath) | 279 | assert_valid_devpath(devpath) |
447 | 242 | if not retries: | 280 | if not retries: |
448 | @@ -305,6 +343,33 @@ def mdadm_remove(devpath): | |||
449 | 305 | LOG.debug("mdadm remove:\n%s\n%s", out, err) | 343 | LOG.debug("mdadm remove:\n%s\n%s", out, err) |
450 | 306 | 344 | ||
451 | 307 | 345 | ||
452 | 346 | def fail_device(mddev, arraydev): | ||
453 | 347 | assert_valid_devpath(mddev) | ||
454 | 348 | |||
455 | 349 | LOG.info("mdadm mark faulty: %s in array %s", arraydev, mddev) | ||
456 | 350 | out, err = util.subp(["mdadm", "--fail", mddev, arraydev], | ||
457 | 351 | rcs=[0], capture=True) | ||
458 | 352 | LOG.debug("mdadm mark faulty:\n%s\n%s", out, err) | ||
459 | 353 | |||
460 | 354 | |||
461 | 355 | def remove_device(mddev, arraydev): | ||
462 | 356 | assert_valid_devpath(mddev) | ||
463 | 357 | |||
464 | 358 | LOG.info("mdadm remove %s from array %s", arraydev, mddev) | ||
465 | 359 | out, err = util.subp(["mdadm", "--remove", mddev, arraydev], | ||
466 | 360 | rcs=[0], capture=True) | ||
467 | 361 | LOG.debug("mdadm remove:\n%s\n%s", out, err) | ||
468 | 362 | |||
469 | 363 | |||
470 | 364 | def zero_device(devpath): | ||
471 | 365 | assert_valid_devpath(devpath) | ||
472 | 366 | |||
473 | 367 | LOG.info("mdadm zero superblock on %s", devpath) | ||
474 | 368 | out, err = util.subp(["mdadm", "--zero-superblock", devpath], | ||
475 | 369 | rcs=[0], capture=True) | ||
476 | 370 | LOG.debug("mdadm zero superblock:\n%s\n%s", out, err) | ||
477 | 371 | |||
478 | 372 | |||
479 | 308 | def mdadm_query_detail(md_devname, export=MDADM_USE_EXPORT): | 373 | def mdadm_query_detail(md_devname, export=MDADM_USE_EXPORT): |
480 | 309 | valid_mdname(md_devname) | 374 | valid_mdname(md_devname) |
481 | 310 | 375 | ||
482 | @@ -483,7 +548,7 @@ def __mdadm_detail_to_dict(input): | |||
483 | 483 | ''' | 548 | ''' |
484 | 484 | data = {} | 549 | data = {} |
485 | 485 | 550 | ||
487 | 486 | device = re.findall('^(\/dev\/[a-zA-Z0-9-\._]+)', input) | 551 | device = re.findall(r'^(\/dev\/[a-zA-Z0-9-\._]+)', input) |
488 | 487 | if len(device) == 1: | 552 | if len(device) == 1: |
489 | 488 | data.update({'device': device[0]}) | 553 | data.update({'device': device[0]}) |
490 | 489 | else: | 554 | else: |
491 | @@ -491,9 +556,8 @@ def __mdadm_detail_to_dict(input): | |||
492 | 491 | 556 | ||
493 | 492 | # FIXME: probably could do a better regex to match the LHS which | 557 | # FIXME: probably could do a better regex to match the LHS which |
494 | 493 | # has one, two or three words | 558 | # has one, two or three words |
498 | 494 | for f in re.findall('(\w+|\w+\ \w+|\w+\ \w+\ \w+)' + | 559 | rem = r'(\w+|\w+\ \w+|\w+\ \w+\ \w+)\ \:\ ([a-zA-Z0-9\-\.,: \(\)=\']+)' |
499 | 495 | '\ \:\ ([a-zA-Z0-9\-\.,: \(\)=\']+)', | 560 | for f in re.findall(rem, input, re.MULTILINE): |
497 | 496 | input, re.MULTILINE): | ||
500 | 497 | key = f[0].replace(' ', '_').lower() | 561 | key = f[0].replace(' ', '_').lower() |
501 | 498 | val = f[1] | 562 | val = f[1] |
502 | 499 | if key in data: | 563 | if key in data: |
503 | diff --git a/curtin/block/zfs.py b/curtin/block/zfs.py | |||
504 | index 7670af4..cfb07a9 100644 | |||
505 | --- a/curtin/block/zfs.py | |||
506 | +++ b/curtin/block/zfs.py | |||
507 | @@ -21,6 +21,9 @@ ZFS_DEFAULT_PROPERTIES = { | |||
508 | 21 | 'normalization': 'formD', | 21 | 'normalization': 'formD', |
509 | 22 | } | 22 | } |
510 | 23 | 23 | ||
511 | 24 | ZFS_UNSUPPORTED_ARCHES = ['i386'] | ||
512 | 25 | ZFS_UNSUPPORTED_RELEASES = ['precise', 'trusty'] | ||
513 | 26 | |||
514 | 24 | 27 | ||
515 | 25 | def _join_flags(optflag, params): | 28 | def _join_flags(optflag, params): |
516 | 26 | """ | 29 | """ |
517 | @@ -69,6 +72,28 @@ def _join_pool_volume(poolname, volume): | |||
518 | 69 | return os.path.normpath("%s/%s" % (poolname, volume)) | 72 | return os.path.normpath("%s/%s" % (poolname, volume)) |
519 | 70 | 73 | ||
520 | 71 | 74 | ||
521 | 75 | def zfs_supported(): | ||
522 | 76 | """ Determine if the runtime system supports zfs. | ||
523 | 77 | returns: True if system supports zfs | ||
524 | 78 | raises: RuntimeError: if system does not support zfs | ||
525 | 79 | """ | ||
526 | 80 | arch = util.get_platform_arch() | ||
527 | 81 | if arch in ZFS_UNSUPPORTED_ARCHES: | ||
528 | 82 | raise RuntimeError("zfs is not supported on architecture: %s" % arch) | ||
529 | 83 | |||
530 | 84 | release = util.lsb_release()['codename'] | ||
531 | 85 | if release in ZFS_UNSUPPORTED_RELEASES: | ||
532 | 86 | raise RuntimeError("zfs is not supported on release: %s" % release) | ||
533 | 87 | |||
534 | 88 | try: | ||
535 | 89 | util.subp(['modinfo', 'zfs'], capture=True) | ||
536 | 90 | except util.ProcessExecutionError as err: | ||
537 | 91 | if err.stderr.startswith("modinfo: ERROR: Module zfs not found."): | ||
538 | 92 | raise RuntimeError("zfs kernel module is not available: %s" % err) | ||
539 | 93 | |||
540 | 94 | return True | ||
541 | 95 | |||
542 | 96 | |||
543 | 72 | def zpool_create(poolname, vdevs, mountpoint=None, altroot=None, | 97 | def zpool_create(poolname, vdevs, mountpoint=None, altroot=None, |
544 | 73 | pool_properties=None, zfs_properties=None): | 98 | pool_properties=None, zfs_properties=None): |
545 | 74 | """ | 99 | """ |
546 | @@ -184,7 +209,7 @@ def zfs_mount(poolname, volume): | |||
547 | 184 | 209 | ||
548 | 185 | def zpool_list(): | 210 | def zpool_list(): |
549 | 186 | """ | 211 | """ |
551 | 187 | Return a list of zfs pool names | 212 | Return a list of zfs pool names which have been imported |
552 | 188 | 213 | ||
553 | 189 | :returns: List of strings | 214 | :returns: List of strings |
554 | 190 | """ | 215 | """ |
555 | diff --git a/curtin/commands/apt_config.py b/curtin/commands/apt_config.py | |||
556 | index 971f78f..41c329e 100644 | |||
557 | --- a/curtin/commands/apt_config.py | |||
558 | +++ b/curtin/commands/apt_config.py | |||
559 | @@ -38,6 +38,9 @@ PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", | |||
560 | 38 | PRIMARY_ARCHES = ['amd64', 'i386'] | 38 | PRIMARY_ARCHES = ['amd64', 'i386'] |
561 | 39 | PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el'] | 39 | PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el'] |
562 | 40 | 40 | ||
563 | 41 | APT_SOURCES_PROPOSED = ( | ||
564 | 42 | "deb $MIRROR $RELEASE-proposed main restricted universe multiverse") | ||
565 | 43 | |||
566 | 41 | 44 | ||
567 | 42 | def get_default_mirrors(arch=None): | 45 | def get_default_mirrors(arch=None): |
568 | 43 | """returns the default mirrors for the target. These depend on the | 46 | """returns the default mirrors for the target. These depend on the |
569 | @@ -385,6 +388,8 @@ def add_apt_sources(srcdict, target=None, template_params=None, | |||
570 | 385 | if 'source' not in ent: | 388 | if 'source' not in ent: |
571 | 386 | continue | 389 | continue |
572 | 387 | source = ent['source'] | 390 | source = ent['source'] |
573 | 391 | if source == 'proposed': | ||
574 | 392 | source = APT_SOURCES_PROPOSED | ||
575 | 388 | source = util.render_string(source, template_params) | 393 | source = util.render_string(source, template_params) |
576 | 389 | 394 | ||
577 | 390 | if not ent['filename'].startswith("/"): | 395 | if not ent['filename'].startswith("/"): |
578 | diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py | |||
579 | index 504a16b..f5b82cf 100644 | |||
580 | --- a/curtin/commands/block_meta.py | |||
581 | +++ b/curtin/commands/block_meta.py | |||
582 | @@ -1,8 +1,8 @@ | |||
583 | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. | 1 | # This file is part of curtin. See LICENSE file for copyright and license info. |
584 | 2 | 2 | ||
586 | 3 | from collections import OrderedDict | 3 | from collections import OrderedDict, namedtuple |
587 | 4 | from curtin import (block, config, util) | 4 | from curtin import (block, config, util) |
589 | 5 | from curtin.block import (mdadm, mkfs, clear_holders, lvm, iscsi, zfs) | 5 | from curtin.block import (bcache, mdadm, mkfs, clear_holders, lvm, iscsi, zfs) |
590 | 6 | from curtin.log import LOG | 6 | from curtin.log import LOG |
591 | 7 | from curtin.reporter import events | 7 | from curtin.reporter import events |
592 | 8 | 8 | ||
593 | @@ -17,6 +17,12 @@ import sys | |||
594 | 17 | import tempfile | 17 | import tempfile |
595 | 18 | import time | 18 | import time |
596 | 19 | 19 | ||
597 | 20 | FstabData = namedtuple( | ||
598 | 21 | "FstabData", ('spec', 'path', 'fstype', 'options', 'freq', 'passno', | ||
599 | 22 | 'device')) | ||
600 | 23 | FstabData.__new__.__defaults__ = (None, None, None, "", "0", "0", None) | ||
601 | 24 | |||
602 | 25 | |||
603 | 20 | SIMPLE = 'simple' | 26 | SIMPLE = 'simple' |
604 | 21 | SIMPLE_BOOT = 'simple-boot' | 27 | SIMPLE_BOOT = 'simple-boot' |
605 | 22 | CUSTOM = 'custom' | 28 | CUSTOM = 'custom' |
606 | @@ -224,7 +230,15 @@ def make_dname(volume, storage_config): | |||
607 | 224 | md_uuid = md_data.get('MD_UUID') | 230 | md_uuid = md_data.get('MD_UUID') |
608 | 225 | rule.append(compose_udev_equality("ENV{MD_UUID}", md_uuid)) | 231 | rule.append(compose_udev_equality("ENV{MD_UUID}", md_uuid)) |
609 | 226 | elif vol.get('type') == "bcache": | 232 | elif vol.get('type') == "bcache": |
611 | 227 | rule.append(compose_udev_equality("ENV{DEVNAME}", path)) | 233 | # bind dname to bcache backing device's dev.uuid as the bcache minor |
612 | 234 | # device numbers are not stable across reboots. | ||
613 | 235 | backing_dev = get_path_to_storage_volume(vol.get('backing_device'), | ||
614 | 236 | storage_config) | ||
615 | 237 | bcache_super = bcache.superblock_asdict(device=backing_dev) | ||
616 | 238 | if bcache_super and bcache_super['sb.version'].startswith('1'): | ||
617 | 239 | bdev_uuid = bcache_super['dev.uuid'] | ||
618 | 240 | rule.append(compose_udev_equality("ENV{CACHED_UUID}", bdev_uuid)) | ||
619 | 241 | bcache.write_label(sanitize_dname(dname), backing_dev) | ||
620 | 228 | elif vol.get('type') == "lvm_partition": | 242 | elif vol.get('type') == "lvm_partition": |
621 | 229 | volgroup_name = storage_config.get(vol.get('volgroup')).get('name') | 243 | volgroup_name = storage_config.get(vol.get('volgroup')).get('name') |
622 | 230 | dname = "%s-%s" % (volgroup_name, dname) | 244 | dname = "%s-%s" % (volgroup_name, dname) |
623 | @@ -241,8 +255,7 @@ def make_dname(volume, storage_config): | |||
624 | 241 | LOG.warning( | 255 | LOG.warning( |
625 | 242 | "dname modified to remove invalid chars. old: '{}' new: '{}'" | 256 | "dname modified to remove invalid chars. old: '{}' new: '{}'" |
626 | 243 | .format(dname, sanitized)) | 257 | .format(dname, sanitized)) |
629 | 244 | 258 | rule.append("SYMLINK+=\"disk/by-dname/%s\"\n" % sanitized) | |
628 | 245 | rule.append("SYMLINK+=\"disk/by-dname/%s\"" % sanitized) | ||
630 | 246 | LOG.debug("Writing dname udev rule '{}'".format(str(rule))) | 259 | LOG.debug("Writing dname udev rule '{}'".format(str(rule))) |
631 | 247 | util.ensure_dir(rules_dir) | 260 | util.ensure_dir(rules_dir) |
632 | 248 | rule_file = os.path.join(rules_dir, '{}.rules'.format(sanitized)) | 261 | rule_file = os.path.join(rules_dir, '{}.rules'.format(sanitized)) |
633 | @@ -621,6 +634,142 @@ def format_handler(info, storage_config): | |||
634 | 621 | udevadm_trigger([volume_path]) | 634 | udevadm_trigger([volume_path]) |
635 | 622 | 635 | ||
636 | 623 | 636 | ||
637 | 637 | def mount_data(info, storage_config): | ||
638 | 638 | """Return information necessary for a mount or fstab entry. | ||
639 | 639 | |||
640 | 640 | :param info: a 'mount' type from storage config. | ||
641 | 641 | :param storage_config: related storage_config ordered dict by id. | ||
642 | 642 | |||
643 | 643 | :return FstabData type.""" | ||
644 | 644 | if info.get('type') != "mount": | ||
645 | 645 | raise ValueError("entry is not type 'mount' (%s)" % info) | ||
646 | 646 | |||
647 | 647 | spec = info.get('spec') | ||
648 | 648 | fstype = info.get('fstype') | ||
649 | 649 | path = info.get('path') | ||
650 | 650 | freq = str(info.get('freq', 0)) | ||
651 | 651 | passno = str(info.get('passno', 0)) | ||
652 | 652 | |||
653 | 653 | # turn empty options into "defaults", which works in fstab and mount -o. | ||
654 | 654 | if not info.get('options'): | ||
655 | 655 | options = ["defaults"] | ||
656 | 656 | else: | ||
657 | 657 | options = info.get('options').split(",") | ||
658 | 658 | |||
659 | 659 | volume_path = None | ||
660 | 660 | |||
661 | 661 | if 'device' not in info: | ||
662 | 662 | missing = [m for m in ('spec', 'fstype') if not info.get(m)] | ||
663 | 663 | if not (fstype and spec): | ||
664 | 664 | raise ValueError( | ||
665 | 665 | "mount entry without 'device' missing: %s. (%s)" % | ||
666 | 666 | (missing, info)) | ||
667 | 667 | |||
668 | 668 | else: | ||
669 | 669 | if info['device'] not in storage_config: | ||
670 | 670 | raise ValueError( | ||
671 | 671 | "mount entry refers to non-existant device %s: (%s)" % | ||
672 | 672 | (info['device'], info)) | ||
673 | 673 | if not (fstype and spec): | ||
674 | 674 | format_info = storage_config.get(info['device']) | ||
675 | 675 | if not fstype: | ||
676 | 676 | fstype = format_info['fstype'] | ||
677 | 677 | if not spec: | ||
678 | 678 | if format_info.get('volume') not in storage_config: | ||
679 | 679 | raise ValueError( | ||
680 | 680 | "format type refers to non-existant id %s: (%s)" % | ||
681 | 681 | (format_info.get('volume'), format_info)) | ||
682 | 682 | volume_path = get_path_to_storage_volume( | ||
683 | 683 | format_info['volume'], storage_config) | ||
684 | 684 | if "_netdev" not in options: | ||
685 | 685 | if iscsi.volpath_is_iscsi(volume_path): | ||
686 | 686 | options.append("_netdev") | ||
687 | 687 | |||
688 | 688 | if fstype in ("fat", "fat12", "fat16", "fat32", "fat64"): | ||
689 | 689 | fstype = "vfat" | ||
690 | 690 | |||
691 | 691 | return FstabData( | ||
692 | 692 | spec, path, fstype, ",".join(options), freq, passno, volume_path) | ||
693 | 693 | |||
694 | 694 | |||
695 | 695 | def fstab_line_for_data(fdata): | ||
696 | 696 | """Return a string representing fdata in /etc/fstab format. | ||
697 | 697 | |||
698 | 698 | :param fdata: a FstabData type | ||
699 | 699 | :return a newline terminated string for /etc/fstab.""" | ||
700 | 700 | path = fdata.path | ||
701 | 701 | if not path: | ||
702 | 702 | if fdata.fstype == "swap": | ||
703 | 703 | path = "none" | ||
704 | 704 | else: | ||
705 | 705 | raise ValueError("empty path in %s." % str(fdata)) | ||
706 | 706 | |||
707 | 707 | if fdata.spec is None: | ||
708 | 708 | if not fdata.device: | ||
709 | 709 | raise ValueError("FstabData missing both spec and device.") | ||
710 | 710 | uuid = block.get_volume_uuid(fdata.device) | ||
711 | 711 | spec = ("UUID=%s" % uuid) if uuid else fdata.device | ||
712 | 712 | else: | ||
713 | 713 | spec = fdata.spec | ||
714 | 714 | |||
715 | 715 | if fdata.options in (None, "", "defaults"): | ||
716 | 716 | if fdata.fstype == "swap": | ||
717 | 717 | options = "sw" | ||
718 | 718 | else: | ||
719 | 719 | options = "defaults" | ||
720 | 720 | else: | ||
721 | 721 | options = fdata.options | ||
722 | 722 | |||
723 | 723 | return ' '.join((spec, path, fdata.fstype, options, | ||
724 | 724 | fdata.freq, fdata.passno)) + "\n" | ||
725 | 725 | |||
726 | 726 | |||
727 | 727 | def mount_fstab_data(fdata, target=None): | ||
728 | 728 | """mount the FstabData fdata with root at target. | ||
729 | 729 | |||
730 | 730 | :param fdata: a FstabData type | ||
731 | 731 | :return None.""" | ||
732 | 732 | mp = util.target_path(target, fdata.path) | ||
733 | 733 | if fdata.device: | ||
734 | 734 | device = fdata.device | ||
735 | 735 | else: | ||
736 | 736 | if fdata.spec.startswith("/") and not fdata.spec.startswith("/dev/"): | ||
737 | 737 | device = util.target_path(target, fdata.spec) | ||
738 | 738 | else: | ||
739 | 739 | device = fdata.spec | ||
740 | 740 | |||
741 | 741 | options = fdata.options if fdata.options else "defaults" | ||
742 | 742 | |||
743 | 743 | mcmd = ['mount'] | ||
744 | 744 | if fdata.fstype not in ("bind", None, "none"): | ||
745 | 745 | mcmd.extend(['-t', fdata.fstype]) | ||
746 | 746 | mcmd.extend(['-o', options, device, mp]) | ||
747 | 747 | |||
748 | 748 | if fdata.fstype == "bind" or "bind" in options.split(","): | ||
749 | 749 | # for bind mounts, create the 'src' dir (mount -o bind src target) | ||
750 | 750 | util.ensure_dir(device) | ||
751 | 751 | util.ensure_dir(mp) | ||
752 | 752 | |||
753 | 753 | try: | ||
754 | 754 | util.subp(mcmd, capture=True) | ||
755 | 755 | except util.ProcessExecutionError as e: | ||
756 | 756 | LOG.exception(e) | ||
757 | 757 | msg = 'Mount failed: %s @ %s with options %s' % (device, mp, options) | ||
758 | 758 | LOG.error(msg) | ||
759 | 759 | raise RuntimeError(msg) | ||
760 | 760 | |||
761 | 761 | |||
762 | 762 | def mount_apply(fdata, target=None, fstab=None): | ||
763 | 763 | if fdata.fstype != "swap": | ||
764 | 764 | mount_fstab_data(fdata, target=target) | ||
765 | 765 | |||
766 | 766 | # Add volume to fstab | ||
767 | 767 | if fstab: | ||
768 | 768 | util.write_file(fstab, fstab_line_for_data(fdata), omode="a") | ||
769 | 769 | else: | ||
770 | 770 | LOG.info("fstab not in environment, so not writing") | ||
771 | 771 | |||
772 | 772 | |||
773 | 624 | def mount_handler(info, storage_config): | 773 | def mount_handler(info, storage_config): |
774 | 625 | """ Handle storage config type: mount | 774 | """ Handle storage config type: mount |
775 | 626 | 775 | ||
776 | @@ -636,74 +785,8 @@ def mount_handler(info, storage_config): | |||
777 | 636 | fstab entry. | 785 | fstab entry. |
778 | 637 | """ | 786 | """ |
779 | 638 | state = util.load_command_environment() | 787 | state = util.load_command_environment() |
848 | 639 | path = info.get('path') | 788 | mount_apply(mount_data(info, storage_config), |
849 | 640 | filesystem = storage_config.get(info.get('device')) | 789 | target=state.get('target'), fstab=state.get('fstab')) |
782 | 641 | mount_options = info.get('options') | ||
783 | 642 | # handle unset, or empty('') strings | ||
784 | 643 | if not mount_options: | ||
785 | 644 | mount_options = 'defaults' | ||
786 | 645 | |||
787 | 646 | if not path and filesystem.get('fstype') != "swap": | ||
788 | 647 | raise ValueError("path to mountpoint must be specified") | ||
789 | 648 | volume = storage_config.get(filesystem.get('volume')) | ||
790 | 649 | |||
791 | 650 | # Get path to volume | ||
792 | 651 | volume_path = get_path_to_storage_volume(filesystem.get('volume'), | ||
793 | 652 | storage_config) | ||
794 | 653 | |||
795 | 654 | if filesystem.get('fstype') != "swap": | ||
796 | 655 | # Figure out what point should be | ||
797 | 656 | while len(path) > 0 and path[0] == "/": | ||
798 | 657 | path = path[1:] | ||
799 | 658 | mount_point = os.path.sep.join([state['target'], path]) | ||
800 | 659 | mount_point = os.path.normpath(mount_point) | ||
801 | 660 | |||
802 | 661 | options = mount_options.split(",") | ||
803 | 662 | # If the volume_path's kname is backed by iSCSI or (in the case of | ||
804 | 663 | # LVM/DM) if any of its slaves are backed by iSCSI, then we need to | ||
805 | 664 | # append _netdev to the fstab line | ||
806 | 665 | if iscsi.volpath_is_iscsi(volume_path): | ||
807 | 666 | LOG.debug("Marking volume_path:%s as '_netdev'", volume_path) | ||
808 | 667 | options.append("_netdev") | ||
809 | 668 | |||
810 | 669 | # Create mount point if does not exist | ||
811 | 670 | util.ensure_dir(mount_point) | ||
812 | 671 | |||
813 | 672 | # Mount volume, with options | ||
814 | 673 | try: | ||
815 | 674 | opts = ['-o', ','.join(options)] | ||
816 | 675 | util.subp(['mount', volume_path, mount_point] + opts, capture=True) | ||
817 | 676 | except util.ProcessExecutionError as e: | ||
818 | 677 | LOG.exception(e) | ||
819 | 678 | msg = ('Mount failed: %s @ %s with options %s' % (volume_path, | ||
820 | 679 | mount_point, | ||
821 | 680 | ",".join(opts))) | ||
822 | 681 | LOG.error(msg) | ||
823 | 682 | raise RuntimeError(msg) | ||
824 | 683 | |||
825 | 684 | # set path | ||
826 | 685 | path = "/%s" % path | ||
827 | 686 | |||
828 | 687 | else: | ||
829 | 688 | path = "none" | ||
830 | 689 | options = ["sw"] | ||
831 | 690 | |||
832 | 691 | # Add volume to fstab | ||
833 | 692 | if state['fstab']: | ||
834 | 693 | uuid = block.get_volume_uuid(volume_path) | ||
835 | 694 | location = ("UUID=%s" % uuid) if uuid else ( | ||
836 | 695 | get_path_to_storage_volume(volume.get('id'), | ||
837 | 696 | storage_config)) | ||
838 | 697 | |||
839 | 698 | fstype = filesystem.get('fstype') | ||
840 | 699 | if fstype in ["fat", "fat12", "fat16", "fat32", "fat64"]: | ||
841 | 700 | fstype = "vfat" | ||
842 | 701 | |||
843 | 702 | fstab_entry = "%s %s %s %s 0 0\n" % (location, path, fstype, | ||
844 | 703 | ",".join(options)) | ||
845 | 704 | util.write_file(state['fstab'], fstab_entry, omode='a') | ||
846 | 705 | else: | ||
847 | 706 | LOG.info("fstab not in environment, so not writing") | ||
850 | 707 | 790 | ||
851 | 708 | 791 | ||
852 | 709 | def lvm_volgroup_handler(info, storage_config): | 792 | def lvm_volgroup_handler(info, storage_config): |
853 | @@ -1180,6 +1263,8 @@ def zpool_handler(info, storage_config): | |||
854 | 1180 | """ | 1263 | """ |
855 | 1181 | Create a zpool based in storage_configuration | 1264 | Create a zpool based in storage_configuration |
856 | 1182 | """ | 1265 | """ |
857 | 1266 | zfs.zfs_supported() | ||
858 | 1267 | |||
859 | 1183 | state = util.load_command_environment() | 1268 | state = util.load_command_environment() |
860 | 1184 | 1269 | ||
861 | 1185 | # extract /dev/disk/by-id paths for each volume used | 1270 | # extract /dev/disk/by-id paths for each volume used |
862 | @@ -1197,9 +1282,11 @@ def zpool_handler(info, storage_config): | |||
863 | 1197 | for vdev in vdevs: | 1282 | for vdev in vdevs: |
864 | 1198 | byid = block.disk_to_byid_path(vdev) | 1283 | byid = block.disk_to_byid_path(vdev) |
865 | 1199 | if not byid: | 1284 | if not byid: |
869 | 1200 | msg = 'Cannot find by-id path to zpool device "%s"' % vdev | 1285 | msg = ('Cannot find by-id path to zpool device "%s". ' |
870 | 1201 | LOG.error(msg) | 1286 | 'The zpool may fail to import of path names change.' % vdev) |
871 | 1202 | raise RuntimeError(msg) | 1287 | LOG.warning(msg) |
872 | 1288 | byid = vdev | ||
873 | 1289 | |||
874 | 1203 | vdevs_byid.append(byid) | 1290 | vdevs_byid.append(byid) |
875 | 1204 | 1291 | ||
876 | 1205 | LOG.info('Creating zpool %s with vdevs %s', poolname, vdevs_byid) | 1292 | LOG.info('Creating zpool %s with vdevs %s', poolname, vdevs_byid) |
877 | @@ -1211,6 +1298,7 @@ def zfs_handler(info, storage_config): | |||
878 | 1211 | """ | 1298 | """ |
879 | 1212 | Create a zfs filesystem | 1299 | Create a zfs filesystem |
880 | 1213 | """ | 1300 | """ |
881 | 1301 | zfs.zfs_supported() | ||
882 | 1214 | state = util.load_command_environment() | 1302 | state = util.load_command_environment() |
883 | 1215 | poolname = get_poolname(info, storage_config) | 1303 | poolname = get_poolname(info, storage_config) |
884 | 1216 | volume = info.get('volume') | 1304 | volume = info.get('volume') |
885 | @@ -1279,6 +1367,15 @@ def zfsroot_update_storage_config(storage_config): | |||
886 | 1279 | "zfsroot Mountpoint entry for / has device=%s, expected '%s'" % | 1367 | "zfsroot Mountpoint entry for / has device=%s, expected '%s'" % |
887 | 1280 | (mount.get("device"), root['id'])) | 1368 | (mount.get("device"), root['id'])) |
888 | 1281 | 1369 | ||
889 | 1370 | # validate that the boot disk is GPT partitioned | ||
890 | 1371 | bootdevs = [d for i, d in storage_config.items() if d.get('grub_device')] | ||
891 | 1372 | bootdev = bootdevs[0] | ||
892 | 1373 | if bootdev.get('ptable') != 'gpt': | ||
893 | 1374 | raise ValueError( | ||
894 | 1375 | 'zfsroot requires bootdisk with GPT partition table' | ||
895 | 1376 | ' found "%s" on disk id="%s"' % | ||
896 | 1377 | (bootdev.get('ptable'), bootdev.get('id'))) | ||
897 | 1378 | |||
898 | 1282 | LOG.info('Enabling experimental zfsroot!') | 1379 | LOG.info('Enabling experimental zfsroot!') |
899 | 1283 | 1380 | ||
900 | 1284 | ret = OrderedDict() | 1381 | ret = OrderedDict() |
901 | diff --git a/curtin/commands/curthooks.py b/curtin/commands/curthooks.py | |||
902 | index 9e51a65..d45c3a8 100644 | |||
903 | --- a/curtin/commands/curthooks.py | |||
904 | +++ b/curtin/commands/curthooks.py | |||
905 | @@ -336,7 +336,7 @@ def setup_grub(cfg, target): | |||
906 | 336 | export LANG=C; | 336 | export LANG=C; |
907 | 337 | for d in "$@"; do | 337 | for d in "$@"; do |
908 | 338 | sgdisk "$d" --print | | 338 | sgdisk "$d" --print | |
910 | 339 | awk "\$6 == prep { print d \$1 }" "d=$d" prep=4100 | 339 | awk '$6 == prep { print d $1 }' "d=$d" prep=4100 |
911 | 340 | done | 340 | done |
912 | 341 | """) | 341 | """) |
913 | 342 | try: | 342 | try: |
914 | @@ -486,9 +486,9 @@ def copy_dname_rules(rules_d, target): | |||
915 | 486 | if not rules_d: | 486 | if not rules_d: |
916 | 487 | LOG.warn("no udev rules directory to copy") | 487 | LOG.warn("no udev rules directory to copy") |
917 | 488 | return | 488 | return |
918 | 489 | target_rules_dir = util.target_path(target, "etc/udev/rules.d") | ||
919 | 489 | for rule in os.listdir(rules_d): | 490 | for rule in os.listdir(rules_d): |
922 | 490 | target_file = os.path.join( | 491 | target_file = os.path.join(target_rules_dir, rule) |
921 | 491 | target, "etc/udev/rules.d", "%s.rules" % rule) | ||
923 | 492 | shutil.copy(os.path.join(rules_d, rule), target_file) | 492 | shutil.copy(os.path.join(rules_d, rule), target_file) |
924 | 493 | 493 | ||
925 | 494 | 494 | ||
926 | diff --git a/curtin/commands/install.py b/curtin/commands/install.py | |||
927 | index bfa3930..a8c4cf9 100644 | |||
928 | --- a/curtin/commands/install.py | |||
929 | +++ b/curtin/commands/install.py | |||
930 | @@ -474,29 +474,28 @@ def cmd_install(args): | |||
931 | 474 | 474 | ||
932 | 475 | if instcfg.get('unmount', "") == "disabled": | 475 | if instcfg.get('unmount', "") == "disabled": |
933 | 476 | LOG.info('Skipping unmount: config disabled target unmounting') | 476 | LOG.info('Skipping unmount: config disabled target unmounting') |
957 | 477 | return | 477 | else: |
958 | 478 | 478 | # unmount everything (including iscsi disks) | |
959 | 479 | # unmount everything (including iscsi disks) | 479 | util.do_umount(workingd.target, recursive=True) |
960 | 480 | util.do_umount(workingd.target, recursive=True) | 480 | |
961 | 481 | 481 | # The open-iscsi service in the ephemeral environment handles | |
962 | 482 | # The open-iscsi service in the ephemeral environment handles | 482 | # disconnecting active sessions. On Artful release the systemd |
963 | 483 | # disconnecting active sessions. On Artful release the systemd | 483 | # unit file has conditionals that are not met at boot time and |
964 | 484 | # unit file has conditionals that are not met at boot time and | 484 | # results in open-iscsi service not being started; This breaks |
965 | 485 | # results in open-iscsi service not being started; This breaks | 485 | # shutdown on Artful releases. |
966 | 486 | # shutdown on Artful releases. | 486 | # Additionally, in release < Artful, if the storage configuration |
967 | 487 | # Additionally, in release < Artful, if the storage configuration | 487 | # is layered, like RAID over iscsi volumes, then disconnecting |
968 | 488 | # is layered, like RAID over iscsi volumes, then disconnecting iscsi | 488 | # iscsi sessions before stopping the raid device hangs. |
969 | 489 | # sessions before stopping the raid device hangs. | 489 | # As it turns out, letting the open-iscsi service take down the |
970 | 490 | # As it turns out, letting the open-iscsi service take down the | 490 | # session last is the cleanest way to handle all releases |
971 | 491 | # session last is the cleanest way to handle all releases regardless | 491 | # regardless of what may be layered on top of the iscsi disks. |
972 | 492 | # of what may be layered on top of the iscsi disks. | 492 | # |
973 | 493 | # | 493 | # Check if storage configuration has iscsi volumes and if so ensure |
974 | 494 | # Check if storage configuration has iscsi volumes and if so ensure | 494 | # iscsi service is active before exiting install |
975 | 495 | # iscsi service is active before exiting install | 495 | if iscsi.get_iscsi_disks_from_config(cfg): |
976 | 496 | if iscsi.get_iscsi_disks_from_config(cfg): | 496 | iscsi.restart_iscsi_service() |
977 | 497 | iscsi.restart_iscsi_service() | 497 | |
978 | 498 | 498 | shutil.rmtree(workingd.top) | |
956 | 499 | shutil.rmtree(workingd.top) | ||
979 | 500 | 499 | ||
980 | 501 | apply_power_state(cfg.get('power_state')) | 500 | apply_power_state(cfg.get('power_state')) |
981 | 502 | 501 | ||
982 | diff --git a/curtin/util.py b/curtin/util.py | |||
983 | index 12a5446..de0eb88 100644 | |||
984 | --- a/curtin/util.py | |||
985 | +++ b/curtin/util.py | |||
986 | @@ -1009,6 +1009,40 @@ def is_uefi_bootable(): | |||
987 | 1009 | return os.path.exists('/sys/firmware/efi') is True | 1009 | return os.path.exists('/sys/firmware/efi') is True |
988 | 1010 | 1010 | ||
989 | 1011 | 1011 | ||
990 | 1012 | def parse_efibootmgr(content): | ||
991 | 1013 | efikey_to_dict_key = { | ||
992 | 1014 | 'BootCurrent': 'current', | ||
993 | 1015 | 'Timeout': 'timeout', | ||
994 | 1016 | 'BootOrder': 'order', | ||
995 | 1017 | } | ||
996 | 1018 | |||
997 | 1019 | output = {} | ||
998 | 1020 | for line in content.splitlines(): | ||
999 | 1021 | split = line.split(':') | ||
1000 | 1022 | if len(split) == 2: | ||
1001 | 1023 | key = split[0].strip() | ||
1002 | 1024 | output_key = efikey_to_dict_key.get(key, None) | ||
1003 | 1025 | if output_key: | ||
1004 | 1026 | output[output_key] = split[1].strip() | ||
1005 | 1027 | if output_key == 'order': | ||
1006 | 1028 | output[output_key] = output[output_key].split(',') | ||
1007 | 1029 | output['entries'] = { | ||
1008 | 1030 | entry: { | ||
1009 | 1031 | 'name': name.strip(), | ||
1010 | 1032 | 'path': path.strip(), | ||
1011 | 1033 | } | ||
1012 | 1034 | for entry, name, path in re.findall( | ||
1013 | 1035 | r"^Boot(?P<entry>[0-9a-fA-F]{4})\*?\s(?P<name>.+)\t" | ||
1014 | 1036 | r"(?P<path>.*)$", | ||
1015 | 1037 | content, re.MULTILINE) | ||
1016 | 1038 | } | ||
1017 | 1039 | if 'order' in output: | ||
1018 | 1040 | new_order = [item for item in output['order'] | ||
1019 | 1041 | if item in output['entries']] | ||
1020 | 1042 | output['order'] = new_order | ||
1021 | 1043 | return output | ||
1022 | 1044 | |||
1023 | 1045 | |||
1024 | 1012 | def get_efibootmgr(target): | 1046 | def get_efibootmgr(target): |
1025 | 1013 | """Return mapping of EFI information. | 1047 | """Return mapping of EFI information. |
1026 | 1014 | 1048 | ||
1027 | @@ -1032,33 +1066,9 @@ def get_efibootmgr(target): | |||
1028 | 1032 | } | 1066 | } |
1029 | 1033 | } | 1067 | } |
1030 | 1034 | """ | 1068 | """ |
1031 | 1035 | efikey_to_dict_key = { | ||
1032 | 1036 | 'BootCurrent': 'current', | ||
1033 | 1037 | 'Timeout': 'timeout', | ||
1034 | 1038 | 'BootOrder': 'order', | ||
1035 | 1039 | } | ||
1036 | 1040 | with ChrootableTarget(target) as in_chroot: | 1069 | with ChrootableTarget(target) as in_chroot: |
1037 | 1041 | stdout, _ = in_chroot.subp(['efibootmgr', '-v'], capture=True) | 1070 | stdout, _ = in_chroot.subp(['efibootmgr', '-v'], capture=True) |
1058 | 1042 | output = {} | 1071 | output = parse_efibootmgr(stdout) |
1039 | 1043 | for line in stdout.splitlines(): | ||
1040 | 1044 | split = line.split(':') | ||
1041 | 1045 | if len(split) == 2: | ||
1042 | 1046 | key = split[0].strip() | ||
1043 | 1047 | output_key = efikey_to_dict_key.get(key, None) | ||
1044 | 1048 | if output_key: | ||
1045 | 1049 | output[output_key] = split[1].strip() | ||
1046 | 1050 | if output_key == 'order': | ||
1047 | 1051 | output[output_key] = output[output_key].split(',') | ||
1048 | 1052 | output['entries'] = { | ||
1049 | 1053 | entry: { | ||
1050 | 1054 | 'name': name.strip(), | ||
1051 | 1055 | 'path': path.strip(), | ||
1052 | 1056 | } | ||
1053 | 1057 | for entry, name, path in re.findall( | ||
1054 | 1058 | r"^Boot(?P<entry>[0-9a-fA-F]{4})\*?\s(?P<name>.+)\t" | ||
1055 | 1059 | r"(?P<path>.*)$", | ||
1056 | 1060 | stdout, re.MULTILINE) | ||
1057 | 1061 | } | ||
1059 | 1062 | return output | 1072 | return output |
1060 | 1063 | 1073 | ||
1061 | 1064 | 1074 | ||
1062 | diff --git a/debian/changelog b/debian/changelog | |||
1063 | index fed9042..4f4e78e 100644 | |||
1064 | --- a/debian/changelog | |||
1065 | +++ b/debian/changelog | |||
1066 | @@ -1,3 +1,25 @@ | |||
1067 | 1 | curtin (18.1-17-gae48e86f-0ubuntu1~17.10.1) artful; urgency=medium | ||
1068 | 2 | |||
1069 | 3 | * New upstream snapshot. (LP: #1772044) | ||
1070 | 4 | - tests: replace usage of mock.assert_called | ||
1071 | 5 | - tools: jenkins-runner show curtin version in output. | ||
1072 | 6 | - zfs: implement a supported check to handle i386 | ||
1073 | 7 | - Support mount entries not tied to a device, including bind and tmpfs. | ||
1074 | 8 | - block/clear_holders/mdadm: refactor handling of layered device wiping | ||
1075 | 9 | - clear_holders: only export zpools that have been imported | ||
1076 | 10 | - vmtests: allow env control of apt, system_upgrade, package upgrade | ||
1077 | 11 | - util.get_efibootmgr: filter bootorder by found entries | ||
1078 | 12 | - vmtests: adjust lvm_iscsi dnames to match configuration | ||
1079 | 13 | - vmtest: always boot with BOOTIF and ip=:::::BOOTIF:dhcp | ||
1080 | 14 | - make_dname for bcache should use backing device uuid | ||
1081 | 15 | - zfsroot: add additional checks, do not require disk 'serial' attribute | ||
1082 | 16 | - clear-holders: fix lvm name use when shutting down | ||
1083 | 17 | - install: prevent unmount: disabled from swallowing installation failures | ||
1084 | 18 | - vmtest: bionic images no longer use the vlan package | ||
1085 | 19 | - pycodestyle: Fix invalid escape sequences in string literals. | ||
1086 | 20 | |||
1087 | 21 | -- Ryan Harper <ryan.harper@canonical.com> Fri, 18 May 2018 14:01:58 -0500 | ||
1088 | 22 | |||
1089 | 1 | curtin (18.1-1-g45564eef-0ubuntu1~17.10.1) artful; urgency=medium | 23 | curtin (18.1-1-g45564eef-0ubuntu1~17.10.1) artful; urgency=medium |
1090 | 2 | 24 | ||
1091 | 3 | * New upstream snapshot. (LP: #1759664) | 25 | * New upstream snapshot. (LP: #1759664) |
1092 | diff --git a/doc/topics/integration-testing.rst b/doc/topics/integration-testing.rst | |||
1093 | index d1a849f..7753068 100644 | |||
1094 | --- a/doc/topics/integration-testing.rst | |||
1095 | +++ b/doc/topics/integration-testing.rst | |||
1096 | @@ -307,6 +307,22 @@ Some environment variables affect the running of vmtest | |||
1097 | 307 | This allows us to avoid failures when running curtin from an Ubuntu | 307 | This allows us to avoid failures when running curtin from an Ubuntu |
1098 | 308 | package or from some other "stale" source. | 308 | package or from some other "stale" source. |
1099 | 309 | 309 | ||
1100 | 310 | - ``CURTIN_VMTEST_ADD_REPOS``: default '' | ||
1101 | 311 | This is a comma delimited list of apt repositories that will be | ||
1102 | 312 | added to the target environment. If there are repositories | ||
1103 | 313 | provided here, the and CURTIN_VMTEST_SYSTEM_UPGRADE is at its default | ||
1104 | 314 | setting (auto), then a upgrade will be done to make sure to include | ||
1105 | 315 | any new packages. | ||
1106 | 316 | |||
1107 | 317 | - ``CURTIN_VMTEST_SYSTEM_UPGRADE``: default 'auto' | ||
1108 | 318 | The default setting of 'auto' means to do a system upgrade if | ||
1109 | 319 | there are additional repos added. To enable this explicitly, set | ||
1110 | 320 | to any non "0" value. | ||
1111 | 321 | |||
1112 | 322 | - ``CURTIN_VMTEST_UPGRADE_PACKAGES``: default '' | ||
1113 | 323 | This is a comma delimited string listing packages that should have | ||
1114 | 324 | an 'apt-get install' done to them in curtin late commands. | ||
1115 | 325 | |||
1116 | 310 | 326 | ||
1117 | 311 | Environment 'boolean' values | 327 | Environment 'boolean' values |
1118 | 312 | ============================ | 328 | ============================ |
1119 | diff --git a/doc/topics/storage.rst b/doc/topics/storage.rst | |||
1120 | index 403a20b..ca6253c 100644 | |||
1121 | --- a/doc/topics/storage.rst | |||
1122 | +++ b/doc/topics/storage.rst | |||
1123 | @@ -277,6 +277,8 @@ exists and will not modify the partition. | |||
1124 | 277 | device: disk0 | 277 | device: disk0 |
1125 | 278 | flag: boot | 278 | flag: boot |
1126 | 279 | 279 | ||
1127 | 280 | .. _format: | ||
1128 | 281 | |||
1129 | 280 | Format Command | 282 | Format Command |
1130 | 281 | ~~~~~~~~~~~~~~ | 283 | ~~~~~~~~~~~~~~ |
1131 | 282 | The format command makes filesystems on a volume. The filesystem type and | 284 | The format command makes filesystems on a volume. The filesystem type and |
1132 | @@ -290,7 +292,10 @@ target volume can be specified, as well as a few other options. | |||
1133 | 290 | Utilizing the the ``fstype: zfsroot`` will indicate to curtin | 292 | Utilizing the the ``fstype: zfsroot`` will indicate to curtin |
1134 | 291 | that it should automatically inject the appropriate ``type: zpool`` | 293 | that it should automatically inject the appropriate ``type: zpool`` |
1135 | 292 | and ``type: zfs`` command structures based on which target ``volume`` | 294 | and ``type: zfs`` command structures based on which target ``volume`` |
1137 | 293 | is specified in the ``format`` command. | 295 | is specified in the ``format`` command. There may be only *one* |
1138 | 296 | zfsroot entry. The disk that contains the zfsroot must be partitioned | ||
1139 | 297 | with a GPT partition table. Curtin will fail to install if these | ||
1140 | 298 | requirements are not met. | ||
1141 | 294 | 299 | ||
1142 | 295 | The ``fstype`` key specifies what type of filesystem format curtin should use | 300 | The ``fstype`` key specifies what type of filesystem format curtin should use |
1143 | 296 | for this volume. Curtin knows about common Linux filesystems such as ext4/3 and | 301 | for this volume. Curtin knows about common Linux filesystems such as ext4/3 and |
1144 | @@ -366,9 +371,8 @@ in ``/dev``. | |||
1145 | 366 | 371 | ||
1146 | 367 | **device**: *<device id>* | 372 | **device**: *<device id>* |
1147 | 368 | 373 | ||
1151 | 369 | The ``device`` key refers to the ``id`` of the target device in the storage | 374 | The ``device`` key refers to the ``id`` of a :ref:`Format <format>` entry. |
1152 | 370 | config. The target device must already contain a valid filesystem and be | 375 | One of ``device`` or ``spec`` must be present. |
1150 | 371 | accessible. | ||
1153 | 372 | 376 | ||
1154 | 373 | .. note:: | 377 | .. note:: |
1155 | 374 | 378 | ||
1156 | @@ -376,6 +380,12 @@ accessible. | |||
1157 | 376 | fstab entry will contain ``_netdev`` to indicate networking is | 380 | fstab entry will contain ``_netdev`` to indicate networking is |
1158 | 377 | required to mount this filesystem. | 381 | required to mount this filesystem. |
1159 | 378 | 382 | ||
1160 | 383 | **fstype**: *<fileystem type>* | ||
1161 | 384 | |||
1162 | 385 | ``fstype`` is only required if ``device`` is not present. It indicates | ||
1163 | 386 | the filesystem type and will be used for mount operations and written | ||
1164 | 387 | to ``/etc/fstab`` | ||
1165 | 388 | |||
1166 | 379 | **options**: *<mount(8) comma-separated options string>* | 389 | **options**: *<mount(8) comma-separated options string>* |
1167 | 380 | 390 | ||
1168 | 381 | The ``options`` key will replace the default options value of ``defaults``. | 391 | The ``options`` key will replace the default options value of ``defaults``. |
1169 | @@ -393,6 +403,14 @@ The ``options`` key will replace the default options value of ``defaults``. | |||
1170 | 393 | If either of the environments (install or target) do not have support for | 403 | If either of the environments (install or target) do not have support for |
1171 | 394 | the provided options, the behavior is undefined. | 404 | the provided options, the behavior is undefined. |
1172 | 395 | 405 | ||
1173 | 406 | **spec**: *<fs_spec>* | ||
1174 | 407 | |||
1175 | 408 | The ``spec`` attribute defines the fsspec as defined in fstab(5). | ||
1176 | 409 | If ``spec`` is present with ``device``, then mounts will be done | ||
1177 | 410 | according to ``spec`` rather than determined via inspection of ``device``. | ||
1178 | 411 | If ``spec`` is present without ``device`` then ``fstype`` must be present. | ||
1179 | 412 | |||
1180 | 413 | |||
1181 | 396 | **Config Example**:: | 414 | **Config Example**:: |
1182 | 397 | 415 | ||
1183 | 398 | - id: disk0-part1-fs1-mount0 | 416 | - id: disk0-part1-fs1-mount0 |
1184 | @@ -401,6 +419,41 @@ The ``options`` key will replace the default options value of ``defaults``. | |||
1185 | 401 | device: disk0-part1-fs1 | 419 | device: disk0-part1-fs1 |
1186 | 402 | options: 'noatime,errors=remount-ro' | 420 | options: 'noatime,errors=remount-ro' |
1187 | 403 | 421 | ||
1188 | 422 | **Bind Mount** | ||
1189 | 423 | |||
1190 | 424 | Below is an example of configuring a bind mount. | ||
1191 | 425 | |||
1192 | 426 | .. code-block:: yaml | ||
1193 | 427 | |||
1194 | 428 | - id: bind1 | ||
1195 | 429 | fstype: "none" | ||
1196 | 430 | options: "bind" | ||
1197 | 431 | path: "/var/lib" | ||
1198 | 432 | spec: "/my/bind-over-var-lib" | ||
1199 | 433 | type: mount | ||
1200 | 434 | |||
1201 | 435 | That would result in a fstab entry like:: | ||
1202 | 436 | |||
1203 | 437 | /my/bind-over-var-lib /var/lib none bind 0 0 | ||
1204 | 438 | |||
1205 | 439 | **Tmpfs Mount** | ||
1206 | 440 | |||
1207 | 441 | Below is an example of configuring a tmpfsbind mount. | ||
1208 | 442 | |||
1209 | 443 | .. code-block:: yaml | ||
1210 | 444 | |||
1211 | 445 | - id: tmpfs1 | ||
1212 | 446 | type: mount | ||
1213 | 447 | spec: "none" | ||
1214 | 448 | path: "/my/tmpfs" | ||
1215 | 449 | options: size=4194304 | ||
1216 | 450 | fstype: "tmpfs" | ||
1217 | 451 | |||
1218 | 452 | That would result in a fstab entry like:: | ||
1219 | 453 | |||
1220 | 454 | none /my/tmpfs tmpfs size=4194304 0 0 | ||
1221 | 455 | |||
1222 | 456 | |||
1223 | 404 | Lvm Volgroup Command | 457 | Lvm Volgroup Command |
1224 | 405 | ~~~~~~~~~~~~~~~~~~~~ | 458 | ~~~~~~~~~~~~~~~~~~~~ |
1225 | 406 | The lvm_volgroup command creates LVM Physical Volumes (PV) and connects them in | 459 | The lvm_volgroup command creates LVM Physical Volumes (PV) and connects them in |
1226 | @@ -651,6 +704,10 @@ when constructing ZFS datasets. | |||
1227 | 651 | 704 | ||
1228 | 652 | The ``vdevs`` key specifies a list of items in the storage configuration to use | 705 | The ``vdevs`` key specifies a list of items in the storage configuration to use |
1229 | 653 | in building a ZFS storage pool. This can be a partition or a whole disk. | 706 | in building a ZFS storage pool. This can be a partition or a whole disk. |
1230 | 707 | It is recommended that vdevs are ``disks`` which have a 'serial' attribute | ||
1231 | 708 | which allows Curtin to build a /dev/disk/by-id path which is a persistent | ||
1232 | 709 | path, however, if not available Curtin will accept 'path' attributes but | ||
1233 | 710 | warn that the zpool may be unstable due to missing by-id device path. | ||
1234 | 654 | 711 | ||
1235 | 655 | **mountpoint**: *<mountpoint>* | 712 | **mountpoint**: *<mountpoint>* |
1236 | 656 | 713 | ||
1237 | diff --git a/examples/tests/dirty_disks_config.yaml b/examples/tests/dirty_disks_config.yaml | |||
1238 | index 18d331d..75d44c3 100644 | |||
1239 | --- a/examples/tests/dirty_disks_config.yaml | |||
1240 | +++ b/examples/tests/dirty_disks_config.yaml | |||
1241 | @@ -22,6 +22,11 @@ bucket: | |||
1242 | 22 | done | 22 | done |
1243 | 23 | swapon --show | 23 | swapon --show |
1244 | 24 | exit 0 | 24 | exit 0 |
1245 | 25 | - &zpool_export | | ||
1246 | 26 | #!/bin/sh | ||
1247 | 27 | # disable any rpools to trigger disks with zfs_member label but inactive | ||
1248 | 28 | # pools | ||
1249 | 29 | zpool export rpool ||: | ||
1250 | 25 | 30 | ||
1251 | 26 | early_commands: | 31 | early_commands: |
1252 | 27 | # running block-meta custom from the install environment | 32 | # running block-meta custom from the install environment |
1253 | @@ -34,3 +39,4 @@ early_commands: | |||
1254 | 34 | WORKING_DIR=/tmp/my.bdir/work.d, | 39 | WORKING_DIR=/tmp/my.bdir/work.d, |
1255 | 35 | curtin, --showtrace, -v, block-meta, --umount, custom] | 40 | curtin, --showtrace, -v, block-meta, --umount, custom] |
1256 | 36 | enable_swaps: [sh, -c, *swapon] | 41 | enable_swaps: [sh, -c, *swapon] |
1257 | 42 | disable_rpool: [sh, -c, *zpool_export] | ||
1258 | diff --git a/examples/tests/filesystem_battery.yaml b/examples/tests/filesystem_battery.yaml | |||
1259 | index ba4fcac..3b1edbf 100644 | |||
1260 | --- a/examples/tests/filesystem_battery.yaml | |||
1261 | +++ b/examples/tests/filesystem_battery.yaml | |||
1262 | @@ -99,3 +99,26 @@ storage: | |||
1263 | 99 | label: myxfs | 99 | label: myxfs |
1264 | 100 | volume: d2p10 | 100 | volume: d2p10 |
1265 | 101 | uuid: 9c537621-f2f4-4e24-a071-e05012a1a997 | 101 | uuid: 9c537621-f2f4-4e24-a071-e05012a1a997 |
1266 | 102 | - id: tmpfs1 | ||
1267 | 103 | type: mount | ||
1268 | 104 | spec: "none" | ||
1269 | 105 | path: "/my/tmpfs" | ||
1270 | 106 | options: size=4194304 | ||
1271 | 107 | fstype: "tmpfs" | ||
1272 | 108 | - id: ramfs1 | ||
1273 | 109 | type: mount | ||
1274 | 110 | spec: "none" | ||
1275 | 111 | path: "/my/ramfs" | ||
1276 | 112 | fstype: "ramfs" | ||
1277 | 113 | - id: bind1 | ||
1278 | 114 | fstype: "none" | ||
1279 | 115 | options: "bind" | ||
1280 | 116 | path: "/var/lib" | ||
1281 | 117 | spec: "/my/bind-over-var-lib" | ||
1282 | 118 | type: mount | ||
1283 | 119 | - id: bind2 | ||
1284 | 120 | fstype: "none" | ||
1285 | 121 | options: "bind,ro" | ||
1286 | 122 | path: "/my/bind-ro-etc" | ||
1287 | 123 | spec: "/etc" | ||
1288 | 124 | type: mount | ||
1289 | diff --git a/examples/tests/lvm.yaml b/examples/tests/lvm.yaml | |||
1290 | index 796dd1c..8eab6b0 100644 | |||
1291 | --- a/examples/tests/lvm.yaml | |||
1292 | +++ b/examples/tests/lvm.yaml | |||
1293 | @@ -9,6 +9,13 @@ storage: | |||
1294 | 9 | model: QEMU HARDDISK | 9 | model: QEMU HARDDISK |
1295 | 10 | serial: disk-a | 10 | serial: disk-a |
1296 | 11 | name: main_disk | 11 | name: main_disk |
1297 | 12 | - id: sdb | ||
1298 | 13 | type: disk | ||
1299 | 14 | wipe: superblock | ||
1300 | 15 | ptable: msdos | ||
1301 | 16 | model: QEMU HARDDISK | ||
1302 | 17 | serial: disk-b | ||
1303 | 18 | name: extra_disk | ||
1304 | 12 | - id: sda1 | 19 | - id: sda1 |
1305 | 13 | type: partition | 20 | type: partition |
1306 | 14 | size: 3GB | 21 | size: 3GB |
1307 | @@ -29,6 +36,10 @@ storage: | |||
1308 | 29 | size: 3G | 36 | size: 3G |
1309 | 30 | flag: logical | 37 | flag: logical |
1310 | 31 | device: sda | 38 | device: sda |
1311 | 39 | - id: sdb1 | ||
1312 | 40 | type: partition | ||
1313 | 41 | size: 4GB | ||
1314 | 42 | device: sdb | ||
1315 | 32 | - id: volgroup1 | 43 | - id: volgroup1 |
1316 | 33 | name: vg1 | 44 | name: vg1 |
1317 | 34 | type: lvm_volgroup | 45 | type: lvm_volgroup |
1318 | @@ -44,6 +55,16 @@ storage: | |||
1319 | 44 | name: lv2 | 55 | name: lv2 |
1320 | 45 | type: lvm_partition | 56 | type: lvm_partition |
1321 | 46 | volgroup: volgroup1 | 57 | volgroup: volgroup1 |
1322 | 58 | - id: volgroup2 | ||
1323 | 59 | name: ubuntu-vg | ||
1324 | 60 | type: lvm_volgroup | ||
1325 | 61 | devices: | ||
1326 | 62 | - sdb1 | ||
1327 | 63 | - id: ubuntulv1 | ||
1328 | 64 | name: my-storage | ||
1329 | 65 | size: 1G | ||
1330 | 66 | type: lvm_partition | ||
1331 | 67 | volgroup: volgroup2 | ||
1332 | 47 | - id: sda1_root | 68 | - id: sda1_root |
1333 | 48 | type: format | 69 | type: format |
1334 | 49 | fstype: ext4 | 70 | fstype: ext4 |
1335 | diff --git a/examples/tests/mdadm_bcache_complex.yaml b/examples/tests/mdadm_bcache_complex.yaml | |||
1336 | 50 | deleted file mode 100644 | 71 | deleted file mode 100644 |
1337 | index c9c2f05..0000000 | |||
1338 | --- a/examples/tests/mdadm_bcache_complex.yaml | |||
1339 | +++ /dev/null | |||
1340 | @@ -1,128 +0,0 @@ | |||
1341 | 1 | storage: | ||
1342 | 2 | version: 1 | ||
1343 | 3 | config: | ||
1344 | 4 | - grub_device: true | ||
1345 | 5 | id: sda | ||
1346 | 6 | type: disk | ||
1347 | 7 | wipe: superblock | ||
1348 | 8 | ptable: gpt | ||
1349 | 9 | model: QEMU HARDDISK | ||
1350 | 10 | serial: disk-a | ||
1351 | 11 | name: main_disk | ||
1352 | 12 | - id: bios_boot_partition | ||
1353 | 13 | type: partition | ||
1354 | 14 | size: 1MB | ||
1355 | 15 | device: sda | ||
1356 | 16 | flag: bios_grub | ||
1357 | 17 | - id: sda1 | ||
1358 | 18 | type: partition | ||
1359 | 19 | size: 2GB | ||
1360 | 20 | device: sda | ||
1361 | 21 | - id: sda2 | ||
1362 | 22 | type: partition | ||
1363 | 23 | size: 1GB | ||
1364 | 24 | device: sda | ||
1365 | 25 | - id: sda3 | ||
1366 | 26 | type: partition | ||
1367 | 27 | size: 1GB | ||
1368 | 28 | device: sda | ||
1369 | 29 | - id: sda4 | ||
1370 | 30 | type: partition | ||
1371 | 31 | size: 1GB | ||
1372 | 32 | device: sda | ||
1373 | 33 | - id: sda5 | ||
1374 | 34 | type: partition | ||
1375 | 35 | size: 1GB | ||
1376 | 36 | device: sda | ||
1377 | 37 | - id: sda6 | ||
1378 | 38 | type: partition | ||
1379 | 39 | size: 1GB | ||
1380 | 40 | device: sda | ||
1381 | 41 | - id: sda7 | ||
1382 | 42 | type: partition | ||
1383 | 43 | size: 1GB | ||
1384 | 44 | device: sda | ||
1385 | 45 | - id: sdb | ||
1386 | 46 | type: disk | ||
1387 | 47 | wipe: superblock | ||
1388 | 48 | model: QEMU HARDDISK | ||
1389 | 49 | serial: disk-b | ||
1390 | 50 | name: second_disk | ||
1391 | 51 | - id: sdc | ||
1392 | 52 | type: disk | ||
1393 | 53 | wipe: superblock | ||
1394 | 54 | ptable: gpt | ||
1395 | 55 | model: QEMU HARDDISK | ||
1396 | 56 | serial: disk-c | ||
1397 | 57 | name: third_disk | ||
1398 | 58 | - id: sdc1 | ||
1399 | 59 | type: partition | ||
1400 | 60 | size: 3GB | ||
1401 | 61 | device: sdc | ||
1402 | 62 | - id: mddevice | ||
1403 | 63 | name: md0 | ||
1404 | 64 | type: raid | ||
1405 | 65 | raidlevel: 1 | ||
1406 | 66 | devices: | ||
1407 | 67 | - sda2 | ||
1408 | 68 | - sda3 | ||
1409 | 69 | spare_devices: | ||
1410 | 70 | - sda4 | ||
1411 | 71 | - id: bcache1_raid | ||
1412 | 72 | type: bcache | ||
1413 | 73 | name: cached_array | ||
1414 | 74 | backing_device: mddevice | ||
1415 | 75 | cache_device: sda5 | ||
1416 | 76 | cache_mode: writeback | ||
1417 | 77 | - id: bcache_normal | ||
1418 | 78 | type: bcache | ||
1419 | 79 | name: cached_array_2 | ||
1420 | 80 | backing_device: sda6 | ||
1421 | 81 | cache_device: sda5 | ||
1422 | 82 | cache_mode: writethrough | ||
1423 | 83 | - id: bcachefoo | ||
1424 | 84 | type: bcache | ||
1425 | 85 | name: cached_array_3 | ||
1426 | 86 | backing_device: sdc1 | ||
1427 | 87 | cache_device: sdb | ||
1428 | 88 | cache_mode: writearound | ||
1429 | 89 | - id: sda1_extradisk | ||
1430 | 90 | type: format | ||
1431 | 91 | fstype: ext4 | ||
1432 | 92 | volume: sda1 | ||
1433 | 93 | - id: sda7_boot | ||
1434 | 94 | type: format | ||
1435 | 95 | fstype: ext4 | ||
1436 | 96 | volume: sda7 | ||
1437 | 97 | - id: bcache_raid_storage | ||
1438 | 98 | type: format | ||
1439 | 99 | fstype: ext4 | ||
1440 | 100 | volume: bcache1_raid | ||
1441 | 101 | - id: bcache_normal_storage | ||
1442 | 102 | type: format | ||
1443 | 103 | fstype: ext4 | ||
1444 | 104 | volume: bcache_normal | ||
1445 | 105 | - id: bcachefoo_fulldiskascache_storage | ||
1446 | 106 | type: format | ||
1447 | 107 | fstype: ext4 | ||
1448 | 108 | volume: bcachefoo | ||
1449 | 109 | - id: bcache_root | ||
1450 | 110 | type: mount | ||
1451 | 111 | path: / | ||
1452 | 112 | device: bcachefoo_fulldiskascache_storage | ||
1453 | 113 | - id: bcache1_raid_mount | ||
1454 | 114 | type: mount | ||
1455 | 115 | path: /media/data | ||
1456 | 116 | device: bcache_raid_storage | ||
1457 | 117 | - id: bcache0_mount | ||
1458 | 118 | type: mount | ||
1459 | 119 | path: /media/bcache_normal | ||
1460 | 120 | device: bcache_normal_storage | ||
1461 | 121 | - id: sda1_non_root_mount | ||
1462 | 122 | type: mount | ||
1463 | 123 | path: /media/sda1 | ||
1464 | 124 | device: sda1_extradisk | ||
1465 | 125 | - id: sda7_boot_mount | ||
1466 | 126 | type: mount | ||
1467 | 127 | path: /boot | ||
1468 | 128 | device: sda7_boot | ||
1469 | diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py | |||
1470 | index bd07708..58e068b 100644 | |||
1471 | --- a/tests/unittests/helpers.py | |||
1472 | +++ b/tests/unittests/helpers.py | |||
1473 | @@ -63,7 +63,9 @@ class CiTestCase(TestCase): | |||
1474 | 63 | # the file is not created or modified. | 63 | # the file is not created or modified. |
1475 | 64 | if _dir is None: | 64 | if _dir is None: |
1476 | 65 | _dir = self.tmp_dir() | 65 | _dir = self.tmp_dir() |
1478 | 66 | return os.path.normpath(os.path.abspath(os.path.join(_dir, path))) | 66 | |
1479 | 67 | return os.path.normpath( | ||
1480 | 68 | os.path.abspath(os.path.sep.join((_dir, path)))) | ||
1481 | 67 | 69 | ||
1482 | 68 | 70 | ||
1483 | 69 | def dir2dict(startdir, prefix=None): | 71 | def dir2dict(startdir, prefix=None): |
1484 | diff --git a/tests/unittests/test_block_zfs.py b/tests/unittests/test_block_zfs.py | |||
1485 | index 883f727..c61a6da 100644 | |||
1486 | --- a/tests/unittests/test_block_zfs.py | |||
1487 | +++ b/tests/unittests/test_block_zfs.py | |||
1488 | @@ -1,5 +1,8 @@ | |||
1489 | 1 | import mock | ||
1490 | 2 | |||
1491 | 1 | from curtin.config import merge_config | 3 | from curtin.config import merge_config |
1492 | 2 | from curtin.block import zfs | 4 | from curtin.block import zfs |
1493 | 5 | from curtin.util import ProcessExecutionError | ||
1494 | 3 | from .helpers import CiTestCase | 6 | from .helpers import CiTestCase |
1495 | 4 | 7 | ||
1496 | 5 | 8 | ||
1497 | @@ -375,4 +378,97 @@ class TestBlockZfsDeviceToPoolname(CiTestCase): | |||
1498 | 375 | self.mock_blkid.assert_called_with(devs=[devname]) | 378 | self.mock_blkid.assert_called_with(devs=[devname]) |
1499 | 376 | 379 | ||
1500 | 377 | 380 | ||
1501 | 381 | class TestBlockZfsZfsSupported(CiTestCase): | ||
1502 | 382 | |||
1503 | 383 | def setUp(self): | ||
1504 | 384 | super(TestBlockZfsZfsSupported, self).setUp() | ||
1505 | 385 | self.add_patch('curtin.block.zfs.util.subp', 'mock_subp') | ||
1506 | 386 | self.add_patch('curtin.block.zfs.util.get_platform_arch', 'mock_arch') | ||
1507 | 387 | self.add_patch('curtin.block.zfs.util.lsb_release', 'mock_release') | ||
1508 | 388 | self.mock_release.return_value = {'codename': 'xenial'} | ||
1509 | 389 | self.mock_arch.return_value = 'x86_64' | ||
1510 | 390 | |||
1511 | 391 | def test_supported_arch(self): | ||
1512 | 392 | self.assertTrue(zfs.zfs_supported()) | ||
1513 | 393 | |||
1514 | 394 | def test_unsupported_arch(self): | ||
1515 | 395 | self.mock_arch.return_value = 'i386' | ||
1516 | 396 | with self.assertRaises(RuntimeError): | ||
1517 | 397 | zfs.zfs_supported() | ||
1518 | 398 | |||
1519 | 399 | def test_unsupported_releases(self): | ||
1520 | 400 | for rel in ['precise', 'trusty']: | ||
1521 | 401 | self.mock_release.return_value = {'codename': rel} | ||
1522 | 402 | with self.assertRaises(RuntimeError): | ||
1523 | 403 | zfs.zfs_supported() | ||
1524 | 404 | |||
1525 | 405 | def test_missing_module(self): | ||
1526 | 406 | missing = 'modinfo: ERROR: Module zfs not found.\n ' | ||
1527 | 407 | self.mock_subp.side_effect = ProcessExecutionError(stdout='', | ||
1528 | 408 | stderr=missing, | ||
1529 | 409 | exit_code='1') | ||
1530 | 410 | with self.assertRaises(RuntimeError): | ||
1531 | 411 | zfs.zfs_supported() | ||
1532 | 412 | |||
1533 | 413 | |||
1534 | 414 | class TestZfsSupported(CiTestCase): | ||
1535 | 415 | |||
1536 | 416 | def setUp(self): | ||
1537 | 417 | super(TestZfsSupported, self).setUp() | ||
1538 | 418 | |||
1539 | 419 | @mock.patch('curtin.block.zfs.util') | ||
1540 | 420 | def test_zfs_supported_returns_true(self, mock_util): | ||
1541 | 421 | """zfs_supported returns True on supported platforms""" | ||
1542 | 422 | mock_util.get_platform_arch.return_value = 'amd64' | ||
1543 | 423 | mock_util.lsb_release.return_value = {'codename': 'bionic'} | ||
1544 | 424 | mock_util.subp.return_value = ("", "") | ||
1545 | 425 | |||
1546 | 426 | self.assertNotIn(mock_util.get_platform_arch.return_value, | ||
1547 | 427 | zfs.ZFS_UNSUPPORTED_ARCHES) | ||
1548 | 428 | self.assertNotIn(mock_util.lsb_release.return_value['codename'], | ||
1549 | 429 | zfs.ZFS_UNSUPPORTED_RELEASES) | ||
1550 | 430 | self.assertTrue(zfs.zfs_supported()) | ||
1551 | 431 | |||
1552 | 432 | @mock.patch('curtin.block.zfs.util') | ||
1553 | 433 | def test_zfs_supported_raises_exception_on_bad_arch(self, mock_util): | ||
1554 | 434 | """zfs_supported raises RuntimeError on unspported arches""" | ||
1555 | 435 | mock_util.lsb_release.return_value = {'codename': 'bionic'} | ||
1556 | 436 | mock_util.subp.return_value = ("", "") | ||
1557 | 437 | for arch in zfs.ZFS_UNSUPPORTED_ARCHES: | ||
1558 | 438 | mock_util.get_platform_arch.return_value = arch | ||
1559 | 439 | with self.assertRaises(RuntimeError): | ||
1560 | 440 | zfs.zfs_supported() | ||
1561 | 441 | |||
1562 | 442 | @mock.patch('curtin.block.zfs.util') | ||
1563 | 443 | def test_zfs_supported_raises_execption_on_bad_releases(self, mock_util): | ||
1564 | 444 | """zfs_supported raises RuntimeError on unspported releases""" | ||
1565 | 445 | mock_util.get_platform_arch.return_value = 'amd64' | ||
1566 | 446 | mock_util.subp.return_value = ("", "") | ||
1567 | 447 | for release in zfs.ZFS_UNSUPPORTED_RELEASES: | ||
1568 | 448 | mock_util.lsb_release.return_value = {'codename': release} | ||
1569 | 449 | with self.assertRaises(RuntimeError): | ||
1570 | 450 | zfs.zfs_supported() | ||
1571 | 451 | |||
1572 | 452 | @mock.patch('curtin.block.zfs.util.subprocess.Popen') | ||
1573 | 453 | @mock.patch('curtin.block.zfs.util.lsb_release') | ||
1574 | 454 | @mock.patch('curtin.block.zfs.util.get_platform_arch') | ||
1575 | 455 | def test_zfs_supported_raises_exception_on_missing_module(self, | ||
1576 | 456 | m_arch, | ||
1577 | 457 | m_release, | ||
1578 | 458 | m_popen): | ||
1579 | 459 | """zfs_supported raises RuntimeError on missing zfs module""" | ||
1580 | 460 | |||
1581 | 461 | m_arch.return_value = 'amd64' | ||
1582 | 462 | m_release.return_value = {'codename': 'bionic'} | ||
1583 | 463 | process_mock = mock.Mock() | ||
1584 | 464 | attrs = { | ||
1585 | 465 | 'returncode': 1, | ||
1586 | 466 | 'communicate.return_value': | ||
1587 | 467 | ('output', "modinfo: ERROR: Module zfs not found."), | ||
1588 | 468 | } | ||
1589 | 469 | process_mock.configure_mock(**attrs) | ||
1590 | 470 | m_popen.return_value = process_mock | ||
1591 | 471 | with self.assertRaises(RuntimeError): | ||
1592 | 472 | zfs.zfs_supported() | ||
1593 | 473 | |||
1594 | 378 | # vi: ts=4 expandtab syntax=python | 474 | # vi: ts=4 expandtab syntax=python |
1595 | diff --git a/tests/unittests/test_clear_holders.py b/tests/unittests/test_clear_holders.py | |||
1596 | index 4c07a9c..ceb5615 100644 | |||
1597 | --- a/tests/unittests/test_clear_holders.py | |||
1598 | +++ b/tests/unittests/test_clear_holders.py | |||
1599 | @@ -132,6 +132,7 @@ class TestClearHolders(CiTestCase): | |||
1600 | 132 | mock_block.path_to_kname.assert_called_with(self.test_syspath) | 132 | mock_block.path_to_kname.assert_called_with(self.test_syspath) |
1601 | 133 | mock_get_dmsetup_uuid.assert_called_with(self.test_syspath) | 133 | mock_get_dmsetup_uuid.assert_called_with(self.test_syspath) |
1602 | 134 | 134 | ||
1603 | 135 | @mock.patch('curtin.block.clear_holders.block') | ||
1604 | 135 | @mock.patch('curtin.block.clear_holders.udev.udevadm_settle') | 136 | @mock.patch('curtin.block.clear_holders.udev.udevadm_settle') |
1605 | 136 | @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') | 137 | @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') |
1606 | 137 | @mock.patch('curtin.block.clear_holders.util') | 138 | @mock.patch('curtin.block.clear_holders.util') |
1607 | @@ -140,7 +141,7 @@ class TestClearHolders(CiTestCase): | |||
1608 | 140 | @mock.patch('curtin.block.clear_holders.get_bcache_using_dev') | 141 | @mock.patch('curtin.block.clear_holders.get_bcache_using_dev') |
1609 | 141 | def test_shutdown_bcache(self, mock_get_bcache, mock_log, mock_os, | 142 | def test_shutdown_bcache(self, mock_get_bcache, mock_log, mock_os, |
1610 | 142 | mock_util, mock_get_bcache_block, | 143 | mock_util, mock_get_bcache_block, |
1612 | 143 | mock_udevadm_settle): | 144 | mock_udevadm_settle, mock_block): |
1613 | 144 | """test clear_holders.shutdown_bcache""" | 145 | """test clear_holders.shutdown_bcache""" |
1614 | 145 | # | 146 | # |
1615 | 146 | # pass in a sysfs path to a bcache block device, | 147 | # pass in a sysfs path to a bcache block device, |
1616 | @@ -152,6 +153,7 @@ class TestClearHolders(CiTestCase): | |||
1617 | 152 | # | 153 | # |
1618 | 153 | 154 | ||
1619 | 154 | device = self.test_syspath | 155 | device = self.test_syspath |
1620 | 156 | mock_block.sys_block_path.return_value = '/dev/null' | ||
1621 | 155 | bcache_cset_uuid = 'c08ae789-a964-46fb-a66e-650f0ae78f94' | 157 | bcache_cset_uuid = 'c08ae789-a964-46fb-a66e-650f0ae78f94' |
1622 | 156 | 158 | ||
1623 | 157 | mock_os.path.exists.return_value = True | 159 | mock_os.path.exists.return_value = True |
1624 | @@ -197,6 +199,7 @@ class TestClearHolders(CiTestCase): | |||
1625 | 197 | self.assertEqual(0, len(mock_util.call_args_list)) | 199 | self.assertEqual(0, len(mock_util.call_args_list)) |
1626 | 198 | self.assertEqual(0, len(mock_get_bcache_block.call_args_list)) | 200 | self.assertEqual(0, len(mock_get_bcache_block.call_args_list)) |
1627 | 199 | 201 | ||
1628 | 202 | @mock.patch('curtin.block.clear_holders.block') | ||
1629 | 200 | @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') | 203 | @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') |
1630 | 201 | @mock.patch('curtin.block.clear_holders.util') | 204 | @mock.patch('curtin.block.clear_holders.util') |
1631 | 202 | @mock.patch('curtin.block.clear_holders.os') | 205 | @mock.patch('curtin.block.clear_holders.os') |
1632 | @@ -204,18 +207,20 @@ class TestClearHolders(CiTestCase): | |||
1633 | 204 | @mock.patch('curtin.block.clear_holders.get_bcache_using_dev') | 207 | @mock.patch('curtin.block.clear_holders.get_bcache_using_dev') |
1634 | 205 | def test_shutdown_bcache_no_device(self, mock_get_bcache, mock_log, | 208 | def test_shutdown_bcache_no_device(self, mock_get_bcache, mock_log, |
1635 | 206 | mock_os, mock_util, | 209 | mock_os, mock_util, |
1637 | 207 | mock_get_bcache_block): | 210 | mock_get_bcache_block, mock_block): |
1638 | 208 | device = "/sys/class/block/null" | 211 | device = "/sys/class/block/null" |
1639 | 212 | mock_block.sysfs_to_devpath.return_value = '/dev/null' | ||
1640 | 209 | mock_os.path.exists.return_value = False | 213 | mock_os.path.exists.return_value = False |
1641 | 210 | 214 | ||
1642 | 211 | clear_holders.shutdown_bcache(device) | 215 | clear_holders.shutdown_bcache(device) |
1643 | 212 | 216 | ||
1645 | 213 | self.assertEqual(1, len(mock_log.info.call_args_list)) | 217 | self.assertEqual(3, len(mock_log.info.call_args_list)) |
1646 | 214 | self.assertEqual(1, len(mock_os.path.exists.call_args_list)) | 218 | self.assertEqual(1, len(mock_os.path.exists.call_args_list)) |
1647 | 215 | self.assertEqual(0, len(mock_get_bcache.call_args_list)) | 219 | self.assertEqual(0, len(mock_get_bcache.call_args_list)) |
1648 | 216 | self.assertEqual(0, len(mock_util.call_args_list)) | 220 | self.assertEqual(0, len(mock_util.call_args_list)) |
1649 | 217 | self.assertEqual(0, len(mock_get_bcache_block.call_args_list)) | 221 | self.assertEqual(0, len(mock_get_bcache_block.call_args_list)) |
1650 | 218 | 222 | ||
1651 | 223 | @mock.patch('curtin.block.clear_holders.block') | ||
1652 | 219 | @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') | 224 | @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') |
1653 | 220 | @mock.patch('curtin.block.clear_holders.util') | 225 | @mock.patch('curtin.block.clear_holders.util') |
1654 | 221 | @mock.patch('curtin.block.clear_holders.os') | 226 | @mock.patch('curtin.block.clear_holders.os') |
1655 | @@ -223,8 +228,9 @@ class TestClearHolders(CiTestCase): | |||
1656 | 223 | @mock.patch('curtin.block.clear_holders.get_bcache_using_dev') | 228 | @mock.patch('curtin.block.clear_holders.get_bcache_using_dev') |
1657 | 224 | def test_shutdown_bcache_no_cset(self, mock_get_bcache, mock_log, | 229 | def test_shutdown_bcache_no_cset(self, mock_get_bcache, mock_log, |
1658 | 225 | mock_os, mock_util, | 230 | mock_os, mock_util, |
1660 | 226 | mock_get_bcache_block): | 231 | mock_get_bcache_block, mock_block): |
1661 | 227 | device = "/sys/class/block/null" | 232 | device = "/sys/class/block/null" |
1662 | 233 | mock_block.sysfs_to_devpath.return_value = '/dev/null' | ||
1663 | 228 | mock_os.path.exists.side_effect = iter([ | 234 | mock_os.path.exists.side_effect = iter([ |
1664 | 229 | True, # backing device exists | 235 | True, # backing device exists |
1665 | 230 | False, # cset device not present (already removed) | 236 | False, # cset device not present (already removed) |
1666 | @@ -236,7 +242,7 @@ class TestClearHolders(CiTestCase): | |||
1667 | 236 | 242 | ||
1668 | 237 | clear_holders.shutdown_bcache(device) | 243 | clear_holders.shutdown_bcache(device) |
1669 | 238 | 244 | ||
1671 | 239 | self.assertEqual(2, len(mock_log.info.call_args_list)) | 245 | self.assertEqual(4, len(mock_log.info.call_args_list)) |
1672 | 240 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) | 246 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) |
1673 | 241 | self.assertEqual(1, len(mock_get_bcache.call_args_list)) | 247 | self.assertEqual(1, len(mock_get_bcache.call_args_list)) |
1674 | 242 | self.assertEqual(1, len(mock_get_bcache_block.call_args_list)) | 248 | self.assertEqual(1, len(mock_get_bcache_block.call_args_list)) |
1675 | @@ -252,6 +258,7 @@ class TestClearHolders(CiTestCase): | |||
1676 | 252 | mock.call(device, retries=retries), | 258 | mock.call(device, retries=retries), |
1677 | 253 | mock.call(device + '/bcache', retries=retries)]) | 259 | mock.call(device + '/bcache', retries=retries)]) |
1678 | 254 | 260 | ||
1679 | 261 | @mock.patch('curtin.block.clear_holders.block') | ||
1680 | 255 | @mock.patch('curtin.block.clear_holders.udev.udevadm_settle') | 262 | @mock.patch('curtin.block.clear_holders.udev.udevadm_settle') |
1681 | 256 | @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') | 263 | @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') |
1682 | 257 | @mock.patch('curtin.block.clear_holders.util') | 264 | @mock.patch('curtin.block.clear_holders.util') |
1683 | @@ -262,8 +269,10 @@ class TestClearHolders(CiTestCase): | |||
1684 | 262 | mock_log, mock_os, | 269 | mock_log, mock_os, |
1685 | 263 | mock_util, | 270 | mock_util, |
1686 | 264 | mock_get_bcache_block, | 271 | mock_get_bcache_block, |
1688 | 265 | mock_udevadm_settle): | 272 | mock_udevadm_settle, |
1689 | 273 | mock_block): | ||
1690 | 266 | device = "/sys/class/block/null" | 274 | device = "/sys/class/block/null" |
1691 | 275 | mock_block.sysfs_to_devpath.return_value = '/dev/null' | ||
1692 | 267 | mock_os.path.exists.side_effect = iter([ | 276 | mock_os.path.exists.side_effect = iter([ |
1693 | 268 | True, # backing device exists | 277 | True, # backing device exists |
1694 | 269 | True, # cset device not present (already removed) | 278 | True, # cset device not present (already removed) |
1695 | @@ -276,7 +285,7 @@ class TestClearHolders(CiTestCase): | |||
1696 | 276 | 285 | ||
1697 | 277 | clear_holders.shutdown_bcache(device) | 286 | clear_holders.shutdown_bcache(device) |
1698 | 278 | 287 | ||
1700 | 279 | self.assertEqual(2, len(mock_log.info.call_args_list)) | 288 | self.assertEqual(4, len(mock_log.info.call_args_list)) |
1701 | 280 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) | 289 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) |
1702 | 281 | self.assertEqual(1, len(mock_get_bcache.call_args_list)) | 290 | self.assertEqual(1, len(mock_get_bcache.call_args_list)) |
1703 | 282 | self.assertEqual(1, len(mock_get_bcache_block.call_args_list)) | 291 | self.assertEqual(1, len(mock_get_bcache_block.call_args_list)) |
1704 | @@ -293,6 +302,7 @@ class TestClearHolders(CiTestCase): | |||
1705 | 293 | mock.call(device, retries=self.remove_retries) | 302 | mock.call(device, retries=self.remove_retries) |
1706 | 294 | ]) | 303 | ]) |
1707 | 295 | 304 | ||
1708 | 305 | @mock.patch('curtin.block.clear_holders.block') | ||
1709 | 296 | @mock.patch('curtin.block.clear_holders.udev.udevadm_settle') | 306 | @mock.patch('curtin.block.clear_holders.udev.udevadm_settle') |
1710 | 297 | @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') | 307 | @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') |
1711 | 298 | @mock.patch('curtin.block.clear_holders.util') | 308 | @mock.patch('curtin.block.clear_holders.util') |
1712 | @@ -303,8 +313,10 @@ class TestClearHolders(CiTestCase): | |||
1713 | 303 | mock_log, mock_os, | 313 | mock_log, mock_os, |
1714 | 304 | mock_util, | 314 | mock_util, |
1715 | 305 | mock_get_bcache_block, | 315 | mock_get_bcache_block, |
1717 | 306 | mock_udevadm_settle): | 316 | mock_udevadm_settle, |
1718 | 317 | mock_block): | ||
1719 | 307 | device = "/sys/class/block/null" | 318 | device = "/sys/class/block/null" |
1720 | 319 | mock_block.sysfs_to_devpath.return_value = '/dev/null' | ||
1721 | 308 | mock_os.path.exists.side_effect = iter([ | 320 | mock_os.path.exists.side_effect = iter([ |
1722 | 309 | True, # backing device exists | 321 | True, # backing device exists |
1723 | 310 | True, # cset device not present (already removed) | 322 | True, # cset device not present (already removed) |
1724 | @@ -317,7 +329,7 @@ class TestClearHolders(CiTestCase): | |||
1725 | 317 | 329 | ||
1726 | 318 | clear_holders.shutdown_bcache(device) | 330 | clear_holders.shutdown_bcache(device) |
1727 | 319 | 331 | ||
1729 | 320 | self.assertEqual(2, len(mock_log.info.call_args_list)) | 332 | self.assertEqual(4, len(mock_log.info.call_args_list)) |
1730 | 321 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) | 333 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) |
1731 | 322 | self.assertEqual(1, len(mock_get_bcache.call_args_list)) | 334 | self.assertEqual(1, len(mock_get_bcache.call_args_list)) |
1732 | 323 | self.assertEqual(1, len(mock_get_bcache_block.call_args_list)) | 335 | self.assertEqual(1, len(mock_get_bcache_block.call_args_list)) |
1733 | @@ -333,6 +345,8 @@ class TestClearHolders(CiTestCase): | |||
1734 | 333 | ]) | 345 | ]) |
1735 | 334 | 346 | ||
1736 | 335 | # test bcache shutdown with 'stop' sysfs write failure | 347 | # test bcache shutdown with 'stop' sysfs write failure |
1737 | 348 | @mock.patch('curtin.block.clear_holders.block') | ||
1738 | 349 | @mock.patch('curtin.block.wipe_volume') | ||
1739 | 336 | @mock.patch('curtin.block.clear_holders.udev.udevadm_settle') | 350 | @mock.patch('curtin.block.clear_holders.udev.udevadm_settle') |
1740 | 337 | @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') | 351 | @mock.patch('curtin.block.clear_holders.get_bcache_sys_path') |
1741 | 338 | @mock.patch('curtin.block.clear_holders.util') | 352 | @mock.patch('curtin.block.clear_holders.util') |
1742 | @@ -343,9 +357,12 @@ class TestClearHolders(CiTestCase): | |||
1743 | 343 | mock_log, mock_os, | 357 | mock_log, mock_os, |
1744 | 344 | mock_util, | 358 | mock_util, |
1745 | 345 | mock_get_bcache_block, | 359 | mock_get_bcache_block, |
1747 | 346 | mock_udevadm_settle): | 360 | mock_udevadm_settle, |
1748 | 361 | mock_wipe, | ||
1749 | 362 | mock_block): | ||
1750 | 347 | """Test writes sysfs write failures pass if file not present""" | 363 | """Test writes sysfs write failures pass if file not present""" |
1751 | 348 | device = "/sys/class/block/null" | 364 | device = "/sys/class/block/null" |
1752 | 365 | mock_block.sysfs_to_devpath.return_value = '/dev/null' | ||
1753 | 349 | mock_os.path.exists.side_effect = iter([ | 366 | mock_os.path.exists.side_effect = iter([ |
1754 | 350 | True, # backing device exists | 367 | True, # backing device exists |
1755 | 351 | True, # cset device not present (already removed) | 368 | True, # cset device not present (already removed) |
1756 | @@ -363,7 +380,7 @@ class TestClearHolders(CiTestCase): | |||
1757 | 363 | 380 | ||
1758 | 364 | clear_holders.shutdown_bcache(device) | 381 | clear_holders.shutdown_bcache(device) |
1759 | 365 | 382 | ||
1761 | 366 | self.assertEqual(2, len(mock_log.info.call_args_list)) | 383 | self.assertEqual(4, len(mock_log.info.call_args_list)) |
1762 | 367 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) | 384 | self.assertEqual(3, len(mock_os.path.exists.call_args_list)) |
1763 | 368 | self.assertEqual(1, len(mock_get_bcache.call_args_list)) | 385 | self.assertEqual(1, len(mock_get_bcache.call_args_list)) |
1764 | 369 | self.assertEqual(1, len(mock_get_bcache_block.call_args_list)) | 386 | self.assertEqual(1, len(mock_get_bcache_block.call_args_list)) |
1765 | @@ -378,34 +395,43 @@ class TestClearHolders(CiTestCase): | |||
1766 | 378 | mock.call(cset, retries=self.remove_retries) | 395 | mock.call(cset, retries=self.remove_retries) |
1767 | 379 | ]) | 396 | ]) |
1768 | 380 | 397 | ||
1769 | 398 | @mock.patch('curtin.block.quick_zero') | ||
1770 | 381 | @mock.patch('curtin.block.clear_holders.LOG') | 399 | @mock.patch('curtin.block.clear_holders.LOG') |
1771 | 382 | @mock.patch('curtin.block.clear_holders.block.sys_block_path') | 400 | @mock.patch('curtin.block.clear_holders.block.sys_block_path') |
1772 | 383 | @mock.patch('curtin.block.clear_holders.lvm') | 401 | @mock.patch('curtin.block.clear_holders.lvm') |
1773 | 384 | @mock.patch('curtin.block.clear_holders.util') | 402 | @mock.patch('curtin.block.clear_holders.util') |
1775 | 385 | def test_shutdown_lvm(self, mock_util, mock_lvm, mock_syspath, mock_log): | 403 | def test_shutdown_lvm(self, mock_util, mock_lvm, mock_syspath, mock_log, |
1776 | 404 | mock_zero): | ||
1777 | 386 | """test clear_holders.shutdown_lvm""" | 405 | """test clear_holders.shutdown_lvm""" |
1780 | 387 | vg_name = 'volgroup1' | 406 | lvm_name = b'ubuntu--vg-swap\n' |
1781 | 388 | lv_name = 'lvol1' | 407 | vg_name = 'ubuntu-vg' |
1782 | 408 | lv_name = 'swap' | ||
1783 | 409 | vg_lv_name = "%s/%s" % (vg_name, lv_name) | ||
1784 | 410 | devname = "/dev/" + vg_lv_name | ||
1785 | 411 | pvols = ['/dev/wda1', '/dev/wda2'] | ||
1786 | 389 | mock_syspath.return_value = self.test_blockdev | 412 | mock_syspath.return_value = self.test_blockdev |
1788 | 390 | mock_util.load_file.return_value = '-'.join((vg_name, lv_name)) | 413 | mock_util.load_file.return_value = lvm_name |
1789 | 391 | mock_lvm.split_lvm_name.return_value = (vg_name, lv_name) | 414 | mock_lvm.split_lvm_name.return_value = (vg_name, lv_name) |
1790 | 392 | mock_lvm.get_lvols_in_volgroup.return_value = ['lvol2'] | 415 | mock_lvm.get_lvols_in_volgroup.return_value = ['lvol2'] |
1791 | 393 | clear_holders.shutdown_lvm(self.test_blockdev) | 416 | clear_holders.shutdown_lvm(self.test_blockdev) |
1792 | 394 | mock_syspath.assert_called_with(self.test_blockdev) | 417 | mock_syspath.assert_called_with(self.test_blockdev) |
1793 | 395 | mock_util.load_file.assert_called_with(self.test_blockdev + '/dm/name') | 418 | mock_util.load_file.assert_called_with(self.test_blockdev + '/dm/name') |
1796 | 396 | mock_lvm.split_lvm_name.assert_called_with( | 419 | mock_zero.assert_called_with(devname, partitions=False) |
1797 | 397 | '-'.join((vg_name, lv_name))) | 420 | mock_lvm.split_lvm_name.assert_called_with(lvm_name.strip()) |
1798 | 398 | self.assertTrue(mock_log.debug.called) | 421 | self.assertTrue(mock_log.debug.called) |
1799 | 399 | mock_util.subp.assert_called_with( | 422 | mock_util.subp.assert_called_with( |
1802 | 400 | ['dmsetup', 'remove', '-'.join((vg_name, lv_name))]) | 423 | ['lvremove', '--force', '--force', vg_lv_name]) |
1801 | 401 | |||
1803 | 402 | mock_lvm.get_lvols_in_volgroup.assert_called_with(vg_name) | 424 | mock_lvm.get_lvols_in_volgroup.assert_called_with(vg_name) |
1804 | 403 | self.assertEqual(len(mock_util.subp.call_args_list), 1) | 425 | self.assertEqual(len(mock_util.subp.call_args_list), 1) |
1805 | 404 | self.assertTrue(mock_lvm.lvm_scan.called) | ||
1806 | 405 | mock_lvm.get_lvols_in_volgroup.return_value = [] | 426 | mock_lvm.get_lvols_in_volgroup.return_value = [] |
1807 | 427 | self.assertTrue(mock_lvm.lvm_scan.called) | ||
1808 | 428 | mock_lvm.get_pvols_in_volgroup.return_value = pvols | ||
1809 | 406 | clear_holders.shutdown_lvm(self.test_blockdev) | 429 | clear_holders.shutdown_lvm(self.test_blockdev) |
1810 | 407 | mock_util.subp.assert_called_with( | 430 | mock_util.subp.assert_called_with( |
1811 | 408 | ['vgremove', '--force', '--force', vg_name], rcs=[0, 5]) | 431 | ['vgremove', '--force', '--force', vg_name], rcs=[0, 5]) |
1812 | 432 | for pv in pvols: | ||
1813 | 433 | mock_zero.assert_any_call(pv, partitions=False) | ||
1814 | 434 | self.assertTrue(mock_lvm.lvm_scan.called) | ||
1815 | 409 | 435 | ||
1816 | 410 | @mock.patch('curtin.block.clear_holders.block') | 436 | @mock.patch('curtin.block.clear_holders.block') |
1817 | 411 | @mock.patch('curtin.block.clear_holders.util') | 437 | @mock.patch('curtin.block.clear_holders.util') |
1818 | @@ -417,18 +443,38 @@ class TestClearHolders(CiTestCase): | |||
1819 | 417 | mock_util.subp.assert_called_with( | 443 | mock_util.subp.assert_called_with( |
1820 | 418 | ['cryptsetup', 'remove', self.test_blockdev], capture=True) | 444 | ['cryptsetup', 'remove', self.test_blockdev], capture=True) |
1821 | 419 | 445 | ||
1822 | 446 | @mock.patch('curtin.block.wipe_volume') | ||
1823 | 447 | @mock.patch('curtin.block.path_to_kname') | ||
1824 | 448 | @mock.patch('curtin.block.sysfs_to_devpath') | ||
1825 | 420 | @mock.patch('curtin.block.clear_holders.time') | 449 | @mock.patch('curtin.block.clear_holders.time') |
1826 | 421 | @mock.patch('curtin.block.clear_holders.util') | 450 | @mock.patch('curtin.block.clear_holders.util') |
1827 | 422 | @mock.patch('curtin.block.clear_holders.LOG') | 451 | @mock.patch('curtin.block.clear_holders.LOG') |
1828 | 423 | @mock.patch('curtin.block.clear_holders.mdadm') | 452 | @mock.patch('curtin.block.clear_holders.mdadm') |
1832 | 424 | @mock.patch('curtin.block.clear_holders.block') | 453 | def test_shutdown_mdadm(self, mock_mdadm, mock_log, mock_util, |
1833 | 425 | def test_shutdown_mdadm(self, mock_block, mock_mdadm, mock_log, mock_util, | 454 | mock_time, mock_sysdev, mock_path, mock_wipe): |
1831 | 426 | mock_time): | ||
1834 | 427 | """test clear_holders.shutdown_mdadm""" | 455 | """test clear_holders.shutdown_mdadm""" |
1837 | 428 | mock_block.sysfs_to_devpath.return_value = self.test_blockdev | 456 | devices = ['/dev/wda1', '/dev/wda2'] |
1838 | 429 | mock_block.path_to_kname.return_value = self.test_blockdev | 457 | spares = ['/dev/wdb1'] |
1839 | 458 | md_devs = (devices + spares) | ||
1840 | 459 | mock_sysdev.return_value = self.test_blockdev | ||
1841 | 460 | mock_path.return_value = self.test_blockdev | ||
1842 | 430 | mock_mdadm.md_present.return_value = False | 461 | mock_mdadm.md_present.return_value = False |
1843 | 462 | mock_mdadm.md_get_devices_list.return_value = devices | ||
1844 | 463 | mock_mdadm.md_get_spares_list.return_value = spares | ||
1845 | 464 | |||
1846 | 431 | clear_holders.shutdown_mdadm(self.test_syspath) | 465 | clear_holders.shutdown_mdadm(self.test_syspath) |
1847 | 466 | |||
1848 | 467 | mock_wipe.assert_called_with( | ||
1849 | 468 | self.test_blockdev, exclusive=False, mode='superblock') | ||
1850 | 469 | mock_mdadm.set_sync_action.assert_has_calls([ | ||
1851 | 470 | mock.call(self.test_blockdev, action="idle"), | ||
1852 | 471 | mock.call(self.test_blockdev, action="frozen")]) | ||
1853 | 472 | mock_mdadm.fail_device.assert_has_calls( | ||
1854 | 473 | [mock.call(self.test_blockdev, dev) for dev in md_devs]) | ||
1855 | 474 | mock_mdadm.remove_device.assert_has_calls( | ||
1856 | 475 | [mock.call(self.test_blockdev, dev) for dev in md_devs]) | ||
1857 | 476 | mock_mdadm.zero_device.assert_has_calls( | ||
1858 | 477 | [mock.call(dev) for dev in md_devs]) | ||
1859 | 432 | mock_mdadm.mdadm_stop.assert_called_with(self.test_blockdev) | 478 | mock_mdadm.mdadm_stop.assert_called_with(self.test_blockdev) |
1860 | 433 | mock_mdadm.md_present.assert_called_with(self.test_blockdev) | 479 | mock_mdadm.md_present.assert_called_with(self.test_blockdev) |
1861 | 434 | self.assertTrue(mock_log.debug.called) | 480 | self.assertTrue(mock_log.debug.called) |
1862 | @@ -510,6 +556,7 @@ class TestClearHolders(CiTestCase): | |||
1863 | 510 | mock_block.is_extended_partition.return_value = False | 556 | mock_block.is_extended_partition.return_value = False |
1864 | 511 | mock_block.is_zfs_member.return_value = True | 557 | mock_block.is_zfs_member.return_value = True |
1865 | 512 | mock_zfs.device_to_poolname.return_value = 'fake_pool' | 558 | mock_zfs.device_to_poolname.return_value = 'fake_pool' |
1866 | 559 | mock_zfs.zpool_list.return_value = ['fake_pool'] | ||
1867 | 513 | clear_holders.wipe_superblock(self.test_syspath) | 560 | clear_holders.wipe_superblock(self.test_syspath) |
1868 | 514 | mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath) | 561 | mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath) |
1869 | 515 | mock_zfs.zpool_export.assert_called_with('fake_pool') | 562 | mock_zfs.zpool_export.assert_called_with('fake_pool') |
1870 | @@ -676,29 +723,31 @@ class TestClearHolders(CiTestCase): | |||
1871 | 676 | mock_gen_holders_tree.return_value = self.example_holders_trees[1][1] | 723 | mock_gen_holders_tree.return_value = self.example_holders_trees[1][1] |
1872 | 677 | clear_holders.assert_clear(device) | 724 | clear_holders.assert_clear(device) |
1873 | 678 | 725 | ||
1874 | 726 | @mock.patch('curtin.block.clear_holders.zfs') | ||
1875 | 679 | @mock.patch('curtin.block.clear_holders.mdadm') | 727 | @mock.patch('curtin.block.clear_holders.mdadm') |
1876 | 680 | @mock.patch('curtin.block.clear_holders.util') | 728 | @mock.patch('curtin.block.clear_holders.util') |
1879 | 681 | def test_start_clear_holders_deps(self, mock_util, mock_mdadm): | 729 | def test_start_clear_holders_deps(self, mock_util, mock_mdadm, mock_zfs): |
1880 | 682 | mock_util.lsb_release.return_value = {'codename': 'xenial'} | 730 | mock_zfs.zfs_supported.return_value = True |
1881 | 683 | clear_holders.start_clear_holders_deps() | 731 | clear_holders.start_clear_holders_deps() |
1882 | 684 | mock_mdadm.mdadm_assemble.assert_called_with( | 732 | mock_mdadm.mdadm_assemble.assert_called_with( |
1883 | 685 | scan=True, ignore_errors=True) | 733 | scan=True, ignore_errors=True) |
1884 | 686 | mock_util.load_kernel_module.assert_has_calls([ | 734 | mock_util.load_kernel_module.assert_has_calls([ |
1885 | 687 | mock.call('bcache'), mock.call('zfs')]) | 735 | mock.call('bcache'), mock.call('zfs')]) |
1886 | 688 | 736 | ||
1887 | 737 | @mock.patch('curtin.block.clear_holders.zfs') | ||
1888 | 689 | @mock.patch('curtin.block.clear_holders.mdadm') | 738 | @mock.patch('curtin.block.clear_holders.mdadm') |
1889 | 690 | @mock.patch('curtin.block.clear_holders.util') | 739 | @mock.patch('curtin.block.clear_holders.util') |
1901 | 691 | def test_start_clear_holders_deps_nozfs(self, mock_util, mock_mdadm): | 740 | def test_start_clear_holders_deps_nozfs(self, mock_util, mock_mdadm, |
1902 | 692 | """ test that we skip zfs modprobe on precise, trusty """ | 741 | mock_zfs): |
1903 | 693 | for codename in ['precise', 'trusty']: | 742 | """test that we skip zfs modprobe on unsupported platforms""" |
1904 | 694 | mock_util.lsb_release.return_value = {'codename': codename} | 743 | mock_zfs.zfs_supported.return_value = False |
1905 | 695 | clear_holders.start_clear_holders_deps() | 744 | clear_holders.start_clear_holders_deps() |
1906 | 696 | mock_mdadm.mdadm_assemble.assert_called_with( | 745 | mock_mdadm.mdadm_assemble.assert_called_with( |
1907 | 697 | scan=True, ignore_errors=True) | 746 | scan=True, ignore_errors=True) |
1908 | 698 | mock_util.load_kernel_module.assert_has_calls( | 747 | mock_util.load_kernel_module.assert_has_calls( |
1909 | 699 | [mock.call('bcache')]) | 748 | [mock.call('bcache')]) |
1910 | 700 | self.assertNotIn(mock.call('zfs'), | 749 | self.assertNotIn(mock.call('zfs'), |
1911 | 701 | mock_util.load_kernel_module.call_args_list) | 750 | mock_util.load_kernel_module.call_args_list) |
1912 | 702 | 751 | ||
1913 | 703 | @mock.patch('curtin.block.clear_holders.util') | 752 | @mock.patch('curtin.block.clear_holders.util') |
1914 | 704 | def test_shutdown_swap_calls_swapoff(self, mock_util): | 753 | def test_shutdown_swap_calls_swapoff(self, mock_util): |
1915 | diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py | |||
1916 | index 4937ec0..a6a0b13 100644 | |||
1917 | --- a/tests/unittests/test_commands_block_meta.py | |||
1918 | +++ b/tests/unittests/test_commands_block_meta.py | |||
1919 | @@ -2,7 +2,9 @@ | |||
1920 | 2 | 2 | ||
1921 | 3 | from argparse import Namespace | 3 | from argparse import Namespace |
1922 | 4 | from collections import OrderedDict | 4 | from collections import OrderedDict |
1923 | 5 | import copy | ||
1924 | 5 | from mock import patch, call | 6 | from mock import patch, call |
1925 | 7 | import os | ||
1926 | 6 | 8 | ||
1927 | 7 | from curtin.commands import block_meta | 9 | from curtin.commands import block_meta |
1928 | 8 | from curtin import util | 10 | from curtin import util |
1929 | @@ -321,49 +323,447 @@ class TestBlockMeta(CiTestCase): | |||
1930 | 321 | rendered_fstab = fh.read() | 323 | rendered_fstab = fh.read() |
1931 | 322 | 324 | ||
1932 | 323 | print(rendered_fstab) | 325 | print(rendered_fstab) |
1934 | 324 | self.assertEqual(rendered_fstab, expected) | 326 | self.assertEqual(expected, rendered_fstab) |
1935 | 327 | |||
1936 | 328 | |||
1937 | 329 | class TestZpoolHandler(CiTestCase): | ||
1938 | 330 | @patch('curtin.commands.block_meta.zfs') | ||
1939 | 331 | @patch('curtin.commands.block_meta.block') | ||
1940 | 332 | @patch('curtin.commands.block_meta.util') | ||
1941 | 333 | @patch('curtin.commands.block_meta.get_path_to_storage_volume') | ||
1942 | 334 | def test_zpool_handler_falls_back_to_path_when_no_byid(self, m_getpath, | ||
1943 | 335 | m_util, m_block, | ||
1944 | 336 | m_zfs): | ||
1945 | 337 | storage_config = OrderedDict() | ||
1946 | 338 | info = {'type': 'zpool', 'id': 'myrootfs_zfsroot_pool', | ||
1947 | 339 | 'pool': 'rpool', 'vdevs': ['disk1p1'], 'mountpoint': '/'} | ||
1948 | 340 | disk_path = "/wark/mydev" | ||
1949 | 341 | m_getpath.return_value = disk_path | ||
1950 | 342 | m_block.disk_to_byid_path.return_value = None | ||
1951 | 343 | m_util.load_command_environment.return_value = {'target': 'mytarget'} | ||
1952 | 344 | block_meta.zpool_handler(info, storage_config) | ||
1953 | 345 | m_zfs.zpool_create.assert_called_with(info['pool'], [disk_path], | ||
1954 | 346 | mountpoint="/", | ||
1955 | 347 | altroot="mytarget") | ||
1956 | 325 | 348 | ||
1957 | 326 | 349 | ||
1958 | 327 | class TestZFSRootUpdates(CiTestCase): | 350 | class TestZFSRootUpdates(CiTestCase): |
1978 | 328 | def test_basic_zfsroot_update_storage_config(self): | 351 | zfsroot_id = 'myrootfs' |
1979 | 329 | zfsroot_id = 'myrootfs' | 352 | base = [ |
1980 | 330 | base = [ | 353 | {'id': 'disk1', 'type': 'disk', 'ptable': 'gpt', |
1981 | 331 | {'id': 'disk1', 'type': 'disk', 'ptable': 'gpt', | 354 | 'serial': 'dev_vda', 'name': 'main_disk', 'wipe': 'superblock', |
1982 | 332 | 'serial': 'dev_vda', 'name': 'main_disk', 'wipe': 'superblock', | 355 | 'grub_device': True}, |
1983 | 333 | 'grub_device': True}, | 356 | {'id': 'disk1p1', 'type': 'partition', 'number': '1', |
1984 | 334 | {'id': 'disk1p1', 'type': 'partition', 'number': '1', | 357 | 'size': '9G', 'device': 'disk1'}, |
1985 | 335 | 'size': '9G', 'device': 'disk1'}, | 358 | {'id': 'bios_boot', 'type': 'partition', 'size': '1M', |
1986 | 336 | {'id': 'bios_boot', 'type': 'partition', 'size': '1M', | 359 | 'number': '2', 'device': 'disk1', 'flag': 'bios_grub'}] |
1987 | 337 | 'number': '2', 'device': 'disk1', 'flag': 'bios_grub'}] | 360 | zfsroots = [ |
1988 | 338 | zfsroots = [ | 361 | {'id': zfsroot_id, 'type': 'format', 'fstype': 'zfsroot', |
1989 | 339 | {'id': zfsroot_id, 'type': 'format', 'fstype': 'zfsroot', | 362 | 'volume': 'disk1p1', 'label': 'cloudimg-rootfs'}, |
1990 | 340 | 'volume': 'disk1p1', 'label': 'cloudimg-rootfs'}, | 363 | {'id': 'disk1p1_mount', 'type': 'mount', 'path': '/', |
1991 | 341 | {'id': 'disk1p1_mount', 'type': 'mount', 'path': '/', | 364 | 'device': zfsroot_id}] |
1992 | 342 | 'device': zfsroot_id}] | 365 | extra = [ |
1993 | 343 | extra = [ | 366 | {'id': 'extra', 'type': 'disk', 'ptable': 'gpt', |
1994 | 344 | {'id': 'extra', 'type': 'disk', 'ptable': 'gpt', | 367 | 'wipe': 'superblock'} |
1995 | 345 | 'wipe': 'superblock'} | 368 | ] |
1977 | 346 | ] | ||
1996 | 347 | 369 | ||
1997 | 370 | def test_basic_zfsroot_update_storage_config(self): | ||
1998 | 348 | zfsroot_volname = "/ROOT/zfsroot" | 371 | zfsroot_volname = "/ROOT/zfsroot" |
2000 | 349 | pool_id = zfsroot_id + '_zfsroot_pool' | 372 | pool_id = self.zfsroot_id + '_zfsroot_pool' |
2001 | 350 | newents = [ | 373 | newents = [ |
2002 | 351 | {'type': 'zpool', 'id': pool_id, | 374 | {'type': 'zpool', 'id': pool_id, |
2003 | 352 | 'pool': 'rpool', 'vdevs': ['disk1p1'], 'mountpoint': '/'}, | 375 | 'pool': 'rpool', 'vdevs': ['disk1p1'], 'mountpoint': '/'}, |
2005 | 353 | {'type': 'zfs', 'id': zfsroot_id + '_zfsroot_container', | 376 | {'type': 'zfs', 'id': self.zfsroot_id + '_zfsroot_container', |
2006 | 354 | 'pool': pool_id, 'volume': '/ROOT', | 377 | 'pool': pool_id, 'volume': '/ROOT', |
2007 | 355 | 'properties': {'canmount': 'off', 'mountpoint': 'none'}}, | 378 | 'properties': {'canmount': 'off', 'mountpoint': 'none'}}, |
2009 | 356 | {'type': 'zfs', 'id': zfsroot_id + '_zfsroot_fs', | 379 | {'type': 'zfs', 'id': self.zfsroot_id + '_zfsroot_fs', |
2010 | 357 | 'pool': pool_id, 'volume': zfsroot_volname, | 380 | 'pool': pool_id, 'volume': zfsroot_volname, |
2011 | 358 | 'properties': {'canmount': 'noauto', 'mountpoint': '/'}}, | 381 | 'properties': {'canmount': 'noauto', 'mountpoint': '/'}}, |
2012 | 359 | ] | 382 | ] |
2013 | 360 | expected = OrderedDict( | 383 | expected = OrderedDict( |
2015 | 361 | [(i['id'], i) for i in base + newents + extra]) | 384 | [(i['id'], i) for i in self.base + newents + self.extra]) |
2016 | 362 | 385 | ||
2017 | 363 | scfg = block_meta.extract_storage_ordered_dict( | 386 | scfg = block_meta.extract_storage_ordered_dict( |
2019 | 364 | {'storage': {'version': 1, 'config': base + zfsroots + extra}}) | 387 | {'storage': {'version': 1, |
2020 | 388 | 'config': self.base + self.zfsroots + self.extra}}) | ||
2021 | 365 | found = block_meta.zfsroot_update_storage_config(scfg) | 389 | found = block_meta.zfsroot_update_storage_config(scfg) |
2022 | 366 | print(util.json_dumps([(k, v) for k, v in found.items()])) | 390 | print(util.json_dumps([(k, v) for k, v in found.items()])) |
2023 | 367 | self.assertEqual(expected, found) | 391 | self.assertEqual(expected, found) |
2024 | 368 | 392 | ||
2025 | 393 | def test_basic_zfsroot_raise_valueerror_no_gpt(self): | ||
2026 | 394 | msdos_base = copy.deepcopy(self.base) | ||
2027 | 395 | msdos_base[0]['ptable'] = 'msdos' | ||
2028 | 396 | scfg = block_meta.extract_storage_ordered_dict( | ||
2029 | 397 | {'storage': {'version': 1, | ||
2030 | 398 | 'config': msdos_base + self.zfsroots + self.extra}}) | ||
2031 | 399 | with self.assertRaises(ValueError): | ||
2032 | 400 | block_meta.zfsroot_update_storage_config(scfg) | ||
2033 | 401 | |||
2034 | 402 | def test_basic_zfsroot_raise_valueerror_multi_zfsroot(self): | ||
2035 | 403 | extra_disk = [ | ||
2036 | 404 | {'id': 'disk2', 'type': 'disk', 'ptable': 'gpt', | ||
2037 | 405 | 'serial': 'dev_vdb', 'name': 'extra_disk', 'wipe': 'superblock'}] | ||
2038 | 406 | second_zfs = [ | ||
2039 | 407 | {'id': 'zfsroot2', 'type': 'format', 'fstype': 'zfsroot', | ||
2040 | 408 | 'volume': 'disk2', 'label': ''}] | ||
2041 | 409 | scfg = block_meta.extract_storage_ordered_dict( | ||
2042 | 410 | {'storage': {'version': 1, | ||
2043 | 411 | 'config': (self.base + extra_disk + | ||
2044 | 412 | self.zfsroots + second_zfs)}}) | ||
2045 | 413 | with self.assertRaises(ValueError): | ||
2046 | 414 | block_meta.zfsroot_update_storage_config(scfg) | ||
2047 | 415 | |||
2048 | 416 | |||
2049 | 417 | class TestFstabData(CiTestCase): | ||
2050 | 418 | mnt = {'id': 'm1', 'type': 'mount', 'device': 'fs1', 'path': '/', | ||
2051 | 419 | 'options': 'noatime'} | ||
2052 | 420 | base_cfg = [ | ||
2053 | 421 | {'id': 'xda', 'type': 'disk', 'ptable': 'msdos'}, | ||
2054 | 422 | {'id': 'xda1', 'type': 'partition', 'size': '3GB', | ||
2055 | 423 | 'device': 'xda'}, | ||
2056 | 424 | {'id': 'fs1', 'type': 'format', 'fstype': 'ext4', | ||
2057 | 425 | 'volume': 'xda1', 'label': 'rfs'}, | ||
2058 | 426 | ] | ||
2059 | 427 | |||
2060 | 428 | def _my_gptsv(self, d_id, _scfg): | ||
2061 | 429 | """local test replacement for get_path_to_storage_volume.""" | ||
2062 | 430 | if d_id in ("xda", "xda1"): | ||
2063 | 431 | return "/dev/" + d_id | ||
2064 | 432 | raise RuntimeError("Unexpected call to gptsv with %s" % d_id) | ||
2065 | 433 | |||
2066 | 434 | def test_mount_data_raises_valueerror_if_not_mount(self): | ||
2067 | 435 | """mount_data on non-mount type raises ValueError.""" | ||
2068 | 436 | mnt = self.mnt.copy() | ||
2069 | 437 | mnt['type'] = "not-mount" | ||
2070 | 438 | with self.assertRaisesRegexp(ValueError, r".*not type 'mount'"): | ||
2071 | 439 | block_meta.mount_data(mnt, {mnt['id']: mnt}) | ||
2072 | 440 | |||
2073 | 441 | def test_mount_data_no_device_or_spec_raises_valueerror(self): | ||
2074 | 442 | """test_mount_data raises ValueError if no device or spec.""" | ||
2075 | 443 | mnt = self.mnt.copy() | ||
2076 | 444 | del mnt['device'] | ||
2077 | 445 | with self.assertRaisesRegexp(ValueError, r".*mount.*missing.*"): | ||
2078 | 446 | block_meta.mount_data(mnt, {mnt['id']: mnt}) | ||
2079 | 447 | |||
2080 | 448 | def test_mount_data_invalid_device_ref_raises_valueerror(self): | ||
2081 | 449 | """test_mount_data raises ValueError if device is invalid ref.""" | ||
2082 | 450 | mnt = self.mnt.copy() | ||
2083 | 451 | mnt['device'] = 'myinvalid' | ||
2084 | 452 | scfg = OrderedDict([(i['id'], i) for i in self.base_cfg + [mnt]]) | ||
2085 | 453 | with self.assertRaisesRegexp(ValueError, r".*refers.*myinvalid"): | ||
2086 | 454 | block_meta.mount_data(mnt, scfg) | ||
2087 | 455 | |||
2088 | 456 | def test_mount_data_invalid_format_ref_raises_valueerror(self): | ||
2089 | 457 | """test_mount_data raises ValueError if format.volume is invalid.""" | ||
2090 | 458 | mycfg = copy.deepcopy(self.base_cfg) + [self.mnt.copy()] | ||
2091 | 459 | scfg = OrderedDict([(i['id'], i) for i in mycfg]) | ||
2092 | 460 | # change the 'volume' entry for the 'format' type. | ||
2093 | 461 | scfg['fs1']['volume'] = 'myinvalidvol' | ||
2094 | 462 | with self.assertRaisesRegexp(ValueError, r".*refers.*myinvalidvol"): | ||
2095 | 463 | block_meta.mount_data(scfg['m1'], scfg) | ||
2096 | 464 | |||
2097 | 465 | def test_non_device_mount_with_spec(self): | ||
2098 | 466 | """mount_info with a spec does not need device.""" | ||
2099 | 467 | info = {'id': 'xm1', 'spec': 'none', 'type': 'mount', | ||
2100 | 468 | 'fstype': 'tmpfs', 'path': '/tmpfs'} | ||
2101 | 469 | self.assertEqual( | ||
2102 | 470 | block_meta.FstabData( | ||
2103 | 471 | spec="none", fstype="tmpfs", path="/tmpfs", | ||
2104 | 472 | options="defaults", freq="0", passno="0", device=None), | ||
2105 | 473 | block_meta.mount_data(info, {'xm1': info})) | ||
2106 | 474 | |||
2107 | 475 | @patch('curtin.block.iscsi.volpath_is_iscsi') | ||
2108 | 476 | @patch('curtin.commands.block_meta.get_path_to_storage_volume') | ||
2109 | 477 | def test_device_mount_basic(self, m_gptsv, m_is_iscsi): | ||
2110 | 478 | """Test mount_data for FstabData with a device.""" | ||
2111 | 479 | m_gptsv.side_effect = self._my_gptsv | ||
2112 | 480 | m_is_iscsi.return_value = False | ||
2113 | 481 | |||
2114 | 482 | scfg = OrderedDict( | ||
2115 | 483 | [(i['id'], i) for i in self.base_cfg + [self.mnt]]) | ||
2116 | 484 | self.assertEqual( | ||
2117 | 485 | block_meta.FstabData( | ||
2118 | 486 | spec=None, fstype="ext4", path="/", | ||
2119 | 487 | options="noatime", freq="0", passno="0", device="/dev/xda1"), | ||
2120 | 488 | block_meta.mount_data(scfg['m1'], scfg)) | ||
2121 | 489 | |||
2122 | 490 | @patch('curtin.block.iscsi.volpath_is_iscsi', return_value=False) | ||
2123 | 491 | @patch('curtin.commands.block_meta.get_path_to_storage_volume') | ||
2124 | 492 | def test_device_mount_boot_efi(self, m_gptsv, m_is_iscsi): | ||
2125 | 493 | """Test mount_data fat fs gets converted to vfat.""" | ||
2126 | 494 | bcfg = copy.deepcopy(self.base_cfg) | ||
2127 | 495 | bcfg[2]['fstype'] = 'fat32' | ||
2128 | 496 | mnt = {'id': 'm1', 'type': 'mount', 'device': 'fs1', | ||
2129 | 497 | 'path': '/boot/efi'} | ||
2130 | 498 | m_gptsv.side_effect = self._my_gptsv | ||
2131 | 499 | |||
2132 | 500 | scfg = OrderedDict( | ||
2133 | 501 | [(i['id'], i) for i in bcfg + [mnt]]) | ||
2134 | 502 | self.assertEqual( | ||
2135 | 503 | block_meta.FstabData( | ||
2136 | 504 | spec=None, fstype="vfat", path="/boot/efi", | ||
2137 | 505 | options="defaults", freq="0", passno="0", device="/dev/xda1"), | ||
2138 | 506 | block_meta.mount_data(scfg['m1'], scfg)) | ||
2139 | 507 | |||
2140 | 508 | @patch('curtin.block.iscsi.volpath_is_iscsi') | ||
2141 | 509 | @patch('curtin.commands.block_meta.get_path_to_storage_volume') | ||
2142 | 510 | def test_device_mount_iscsi(self, m_gptsv, m_is_iscsi): | ||
2143 | 511 | """mount_data for a iscsi device should have _netdev in opts.""" | ||
2144 | 512 | m_gptsv.side_effect = self._my_gptsv | ||
2145 | 513 | m_is_iscsi.return_value = True | ||
2146 | 514 | |||
2147 | 515 | scfg = OrderedDict([(i['id'], i) for i in self.base_cfg + [self.mnt]]) | ||
2148 | 516 | self.assertEqual( | ||
2149 | 517 | block_meta.FstabData( | ||
2150 | 518 | spec=None, fstype="ext4", path="/", | ||
2151 | 519 | options="noatime,_netdev", freq="0", passno="0", | ||
2152 | 520 | device="/dev/xda1"), | ||
2153 | 521 | block_meta.mount_data(scfg['m1'], scfg)) | ||
2154 | 522 | |||
2155 | 523 | @patch('curtin.block.iscsi.volpath_is_iscsi') | ||
2156 | 524 | @patch('curtin.commands.block_meta.get_path_to_storage_volume') | ||
2157 | 525 | def test_spec_fstype_override_inline(self, m_gptsv, m_is_iscsi): | ||
2158 | 526 | """spec and fstype are preferred over lookups from 'device' ref. | ||
2159 | 527 | |||
2160 | 528 | If a mount entry has 'fstype' and 'spec', those are prefered over | ||
2161 | 529 | values looked up via the 'device' reference present in the entry. | ||
2162 | 530 | The test here enforces that the device reference present in | ||
2163 | 531 | the mount entry is not looked up, that isn't strictly necessary. | ||
2164 | 532 | """ | ||
2165 | 533 | m_gptsv.side_effect = Exception( | ||
2166 | 534 | "Unexpected Call to get_path_to_storage_volume") | ||
2167 | 535 | m_is_iscsi.return_value = Exception( | ||
2168 | 536 | "Unexpected Call to volpath_is_iscsi") | ||
2169 | 537 | |||
2170 | 538 | myspec = '/dev/disk/by-label/LABEL=rfs' | ||
2171 | 539 | mnt = {'id': 'm1', 'type': 'mount', 'device': 'fs1', 'path': '/', | ||
2172 | 540 | 'options': 'noatime', 'spec': myspec, 'fstype': 'ext3'} | ||
2173 | 541 | scfg = OrderedDict([(i['id'], i) for i in self.base_cfg + [mnt]]) | ||
2174 | 542 | self.assertEqual( | ||
2175 | 543 | block_meta.FstabData( | ||
2176 | 544 | spec=myspec, fstype="ext3", path="/", | ||
2177 | 545 | options="noatime", freq="0", passno="0", | ||
2178 | 546 | device=None), | ||
2179 | 547 | block_meta.mount_data(mnt, scfg)) | ||
2180 | 548 | |||
2181 | 549 | @patch('curtin.commands.block_meta.mount_fstab_data') | ||
2182 | 550 | def test_mount_apply_skips_mounting_swap(self, m_mount_fstab_data): | ||
2183 | 551 | """mount_apply does not mount swap fs, but should write fstab.""" | ||
2184 | 552 | fdata = block_meta.FstabData( | ||
2185 | 553 | spec="/dev/xxxx1", path="none", fstype='swap') | ||
2186 | 554 | fstab = self.tmp_path("fstab") | ||
2187 | 555 | block_meta.mount_apply(fdata, fstab=fstab) | ||
2188 | 556 | contents = util.load_file(fstab) | ||
2189 | 557 | self.assertEqual(0, m_mount_fstab_data.call_count) | ||
2190 | 558 | self.assertIn("/dev/xxxx1", contents) | ||
2191 | 559 | self.assertIn("swap", contents) | ||
2192 | 560 | |||
2193 | 561 | @patch('curtin.commands.block_meta.mount_fstab_data') | ||
2194 | 562 | def test_mount_apply_calls_mount_fstab_data(self, m_mount_fstab_data): | ||
2195 | 563 | """mount_apply should call mount_fstab_data to mount.""" | ||
2196 | 564 | fdata = block_meta.FstabData( | ||
2197 | 565 | spec="/dev/xxxx1", path="none", fstype='ext3') | ||
2198 | 566 | target = self.tmp_dir() | ||
2199 | 567 | block_meta.mount_apply(fdata, target=target, fstab=None) | ||
2200 | 568 | self.assertEqual([call(fdata, target=target)], | ||
2201 | 569 | m_mount_fstab_data.call_args_list) | ||
2202 | 570 | |||
2203 | 571 | @patch('curtin.commands.block_meta.mount_fstab_data') | ||
2204 | 572 | def test_mount_apply_appends_to_fstab(self, m_mount_fstab_data): | ||
2205 | 573 | """mount_apply should append to fstab.""" | ||
2206 | 574 | fdslash = block_meta.FstabData( | ||
2207 | 575 | spec="/dev/disk2", path="/", fstype='ext4') | ||
2208 | 576 | fdboot = block_meta.FstabData( | ||
2209 | 577 | spec="/dev/disk1", path="/boot", fstype='ext3') | ||
2210 | 578 | fstab = self.tmp_path("fstab") | ||
2211 | 579 | existing_line = "# this is my line" | ||
2212 | 580 | util.write_file(fstab, existing_line + "\n") | ||
2213 | 581 | block_meta.mount_apply(fdslash, fstab=fstab) | ||
2214 | 582 | block_meta.mount_apply(fdboot, fstab=fstab) | ||
2215 | 583 | |||
2216 | 584 | self.assertEqual(2, m_mount_fstab_data.call_count) | ||
2217 | 585 | lines = util.load_file(fstab).splitlines() | ||
2218 | 586 | self.assertEqual(existing_line, lines[0]) | ||
2219 | 587 | self.assertIn("/dev/disk2", lines[1]) | ||
2220 | 588 | self.assertIn("/dev/disk1", lines[2]) | ||
2221 | 589 | |||
2222 | 590 | def test_fstab_line_for_data_swap(self): | ||
2223 | 591 | """fstab_line_for_data return value for swap fstab line.""" | ||
2224 | 592 | fdata = block_meta.FstabData( | ||
2225 | 593 | spec="/dev/disk2", path="none", fstype='swap') | ||
2226 | 594 | self.assertEqual( | ||
2227 | 595 | ["/dev/disk2", "none", "swap", "sw", "0", "0"], | ||
2228 | 596 | block_meta.fstab_line_for_data(fdata).split()) | ||
2229 | 597 | |||
2230 | 598 | def test_fstab_line_for_data_swap_no_path(self): | ||
2231 | 599 | """fstab_line_for_data return value for swap with path=None.""" | ||
2232 | 600 | fdata = block_meta.FstabData( | ||
2233 | 601 | spec="/dev/disk2", path=None, fstype='swap') | ||
2234 | 602 | self.assertEqual( | ||
2235 | 603 | ["/dev/disk2", "none", "swap", "sw", "0", "0"], | ||
2236 | 604 | block_meta.fstab_line_for_data(fdata).split()) | ||
2237 | 605 | |||
2238 | 606 | def test_fstab_line_for_data_not_swap_and_no_path(self): | ||
2239 | 607 | """fstab_line_for_data raises ValueError if no path and not swap.""" | ||
2240 | 608 | fdata = block_meta.FstabData( | ||
2241 | 609 | spec="/dev/disk2", device=None, path="", fstype='ext3') | ||
2242 | 610 | with self.assertRaisesRegexp(ValueError, r".*empty.*path"): | ||
2243 | 611 | block_meta.fstab_line_for_data(fdata) | ||
2244 | 612 | |||
2245 | 613 | def test_fstab_line_for_data_with_options(self): | ||
2246 | 614 | """fstab_line_for_data return value with options.""" | ||
2247 | 615 | fdata = block_meta.FstabData( | ||
2248 | 616 | spec="/dev/disk2", path="/mnt", fstype='btrfs', options='noatime') | ||
2249 | 617 | self.assertEqual( | ||
2250 | 618 | ["/dev/disk2", "/mnt", "btrfs", "noatime", "0", "0"], | ||
2251 | 619 | block_meta.fstab_line_for_data(fdata).split()) | ||
2252 | 620 | |||
2253 | 621 | def test_fstab_line_for_data_with_passno_and_freq(self): | ||
2254 | 622 | """fstab_line_for_data should respect passno and freq.""" | ||
2255 | 623 | fdata = block_meta.FstabData( | ||
2256 | 624 | spec="/dev/d1", path="/mnt", fstype='ext4', freq="1", passno="2") | ||
2257 | 625 | self.assertEqual( | ||
2258 | 626 | ["1", "2"], block_meta.fstab_line_for_data(fdata).split()[4:6]) | ||
2259 | 627 | |||
2260 | 628 | def test_fstab_line_for_data_raises_error_without_spec_or_device(self): | ||
2261 | 629 | """fstab_line_for_data should raise ValueError if no spec or device.""" | ||
2262 | 630 | fdata = block_meta.FstabData( | ||
2263 | 631 | spec=None, device=None, path="/", fstype='ext3') | ||
2264 | 632 | match = r".*missing.*spec.*device" | ||
2265 | 633 | with self.assertRaisesRegexp(ValueError, match): | ||
2266 | 634 | block_meta.fstab_line_for_data(fdata) | ||
2267 | 635 | |||
2268 | 636 | @patch('curtin.block.get_volume_uuid') | ||
2269 | 637 | def test_fstab_line_for_data_uses_uuid(self, m_get_uuid): | ||
2270 | 638 | """fstab_line_for_data with a device mounts by uuid.""" | ||
2271 | 639 | fdata = block_meta.FstabData( | ||
2272 | 640 | device="/dev/disk2", path="/mnt", fstype='ext4') | ||
2273 | 641 | uuid = 'b30d2389-5152-4fbc-8f18-0385ef3046c5' | ||
2274 | 642 | m_get_uuid.side_effect = lambda d: uuid if d == "/dev/disk2" else None | ||
2275 | 643 | self.assertEqual( | ||
2276 | 644 | ["UUID=%s" % uuid, "/mnt", "ext4", "defaults", "0", "0"], | ||
2277 | 645 | block_meta.fstab_line_for_data(fdata).split()) | ||
2278 | 646 | self.assertEqual(1, m_get_uuid.call_count) | ||
2279 | 647 | |||
2280 | 648 | @patch('curtin.block.get_volume_uuid') | ||
2281 | 649 | def test_fstab_line_for_data_uses_device_if_no_uuid(self, m_get_uuid): | ||
2282 | 650 | """fstab_line_for_data with a device and no uuid uses device.""" | ||
2283 | 651 | fdata = block_meta.FstabData( | ||
2284 | 652 | device="/dev/disk2", path="/mnt", fstype='ext4') | ||
2285 | 653 | m_get_uuid.return_value = None | ||
2286 | 654 | self.assertEqual( | ||
2287 | 655 | ["/dev/disk2", "/mnt", "ext4", "defaults", "0", "0"], | ||
2288 | 656 | block_meta.fstab_line_for_data(fdata).split()) | ||
2289 | 657 | self.assertEqual(1, m_get_uuid.call_count) | ||
2290 | 658 | |||
2291 | 659 | @patch('curtin.block.get_volume_uuid') | ||
2292 | 660 | def test_fstab_line_for_data__spec_and_dev_prefers_spec(self, m_get_uuid): | ||
2293 | 661 | """fstab_line_for_data should prefer spec over device.""" | ||
2294 | 662 | spec = "/dev/xvda1" | ||
2295 | 663 | fdata = block_meta.FstabData( | ||
2296 | 664 | spec=spec, device="/dev/disk/by-uuid/7AC9-DEFF", | ||
2297 | 665 | path="/mnt", fstype='ext4') | ||
2298 | 666 | m_get_uuid.return_value = None | ||
2299 | 667 | self.assertEqual( | ||
2300 | 668 | ["/dev/xvda1", "/mnt", "ext4", "defaults", "0", "0"], | ||
2301 | 669 | block_meta.fstab_line_for_data(fdata).split()) | ||
2302 | 670 | self.assertEqual(0, m_get_uuid.call_count) | ||
2303 | 671 | |||
2304 | 672 | @patch('curtin.util.ensure_dir') | ||
2305 | 673 | @patch('curtin.util.subp') | ||
2306 | 674 | def test_mount_fstab_data_without_target(self, m_subp, m_ensure_dir): | ||
2307 | 675 | """mount_fstab_data with no target param does the right thing.""" | ||
2308 | 676 | fdata = block_meta.FstabData( | ||
2309 | 677 | device="/dev/disk1", path="/mnt", fstype='ext4') | ||
2310 | 678 | block_meta.mount_fstab_data(fdata) | ||
2311 | 679 | self.assertEqual( | ||
2312 | 680 | call(['mount', "-t", "ext4", "-o", "defaults", | ||
2313 | 681 | "/dev/disk1", "/mnt"], capture=True), | ||
2314 | 682 | m_subp.call_args) | ||
2315 | 683 | self.assertTrue(m_ensure_dir.called) | ||
2316 | 684 | |||
2317 | 685 | def _check_mount_fstab_subp(self, fdata, expected, target=None): | ||
2318 | 686 | # expected currently is like: mount <device> <mp> | ||
2319 | 687 | # and thus mp will always be target + fdata.path | ||
2320 | 688 | if target is None: | ||
2321 | 689 | target = self.tmp_dir() | ||
2322 | 690 | |||
2323 | 691 | expected = [a if a != "_T_MP" else util.target_path(target, fdata.path) | ||
2324 | 692 | for a in expected] | ||
2325 | 693 | with patch("curtin.util.subp") as m_subp: | ||
2326 | 694 | block_meta.mount_fstab_data(fdata, target=target) | ||
2327 | 695 | |||
2328 | 696 | self.assertEqual(call(expected, capture=True), m_subp.call_args) | ||
2329 | 697 | self.assertTrue(os.path.isdir(self.tmp_path(fdata.path, target))) | ||
2330 | 698 | |||
2331 | 699 | def test_mount_fstab_data_with_spec_and_device(self): | ||
2332 | 700 | """mount_fstab_data with spec and device should use device.""" | ||
2333 | 701 | self._check_mount_fstab_subp( | ||
2334 | 702 | block_meta.FstabData( | ||
2335 | 703 | spec="LABEL=foo", device="/dev/disk1", path="/mnt", | ||
2336 | 704 | fstype='ext4'), | ||
2337 | 705 | ['mount', "-t", "ext4", "-o", "defaults", "/dev/disk1", "_T_MP"]) | ||
2338 | 706 | |||
2339 | 707 | def test_mount_fstab_data_with_spec_that_is_path(self): | ||
2340 | 708 | """If spec is a path outside of /dev, then prefix target.""" | ||
2341 | 709 | target = self.tmp_dir() | ||
2342 | 710 | spec = "/mydata" | ||
2343 | 711 | self._check_mount_fstab_subp( | ||
2344 | 712 | block_meta.FstabData( | ||
2345 | 713 | spec=spec, path="/var/lib", fstype="none", options="bind"), | ||
2346 | 714 | ['mount', "-o", "bind", self.tmp_path(spec, target), "_T_MP"], | ||
2347 | 715 | target) | ||
2348 | 716 | |||
2349 | 717 | def test_mount_fstab_data_bind_type_creates_src(self): | ||
2350 | 718 | """Bind mounts should have both src and target dir created.""" | ||
2351 | 719 | target = self.tmp_dir() | ||
2352 | 720 | spec = "/mydata" | ||
2353 | 721 | self._check_mount_fstab_subp( | ||
2354 | 722 | block_meta.FstabData( | ||
2355 | 723 | spec=spec, path="/var/lib", fstype="none", options="bind"), | ||
2356 | 724 | ['mount', "-o", "bind", self.tmp_path(spec, target), "_T_MP"], | ||
2357 | 725 | target) | ||
2358 | 726 | self.assertTrue(os.path.isdir(self.tmp_path(spec, target))) | ||
2359 | 727 | |||
2360 | 728 | def test_mount_fstab_data_with_spec_that_is_device(self): | ||
2361 | 729 | """If spec looks like a path to a device, then use it.""" | ||
2362 | 730 | spec = "/dev/xxda1" | ||
2363 | 731 | self._check_mount_fstab_subp( | ||
2364 | 732 | block_meta.FstabData(spec=spec, path="/var/", fstype="ext3"), | ||
2365 | 733 | ['mount', "-t", "ext3", "-o", "defaults", spec, "_T_MP"]) | ||
2366 | 734 | |||
2367 | 735 | def test_mount_fstab_data_with_device_no_spec(self): | ||
2368 | 736 | """mount_fstab_data mounts by spec if present, not require device.""" | ||
2369 | 737 | spec = "/dev/xxda1" | ||
2370 | 738 | self._check_mount_fstab_subp( | ||
2371 | 739 | block_meta.FstabData(spec=spec, path="/home", fstype="ext3"), | ||
2372 | 740 | ['mount', "-t", "ext3", "-o", "defaults", spec, "_T_MP"]) | ||
2373 | 741 | |||
2374 | 742 | def test_mount_fstab_data_with_uses_options(self): | ||
2375 | 743 | """mount_fstab_data mounts with -o options.""" | ||
2376 | 744 | device = "/dev/xxda1" | ||
2377 | 745 | opts = "option1,option2,x=4" | ||
2378 | 746 | self._check_mount_fstab_subp( | ||
2379 | 747 | block_meta.FstabData( | ||
2380 | 748 | device=device, path="/var", fstype="ext3", options=opts), | ||
2381 | 749 | ['mount', "-t", "ext3", "-o", opts, device, "_T_MP"]) | ||
2382 | 750 | |||
2383 | 751 | @patch('curtin.util.subp') | ||
2384 | 752 | def test_mount_fstab_data_does_not_swallow_subp_exception(self, m_subp): | ||
2385 | 753 | """verify that subp exception gets raised. | ||
2386 | 754 | |||
2387 | 755 | The implementation there could/should change to raise the | ||
2388 | 756 | ProcessExecutionError directly. Currently raises a RuntimeError.""" | ||
2389 | 757 | my_error = util.ProcessExecutionError( | ||
2390 | 758 | stdout="", stderr="BOOM", exit_code=4) | ||
2391 | 759 | m_subp.side_effect = my_error | ||
2392 | 760 | |||
2393 | 761 | mp = self.tmp_path("my-mountpoint") | ||
2394 | 762 | with self.assertRaisesRegexp(RuntimeError, r"Mount failed.*"): | ||
2395 | 763 | block_meta.mount_fstab_data( | ||
2396 | 764 | block_meta.FstabData(device="/dev/disk1", path="/var"), | ||
2397 | 765 | target=mp) | ||
2398 | 766 | # dir should be created before call to subp failed. | ||
2399 | 767 | self.assertTrue(os.path.isdir(mp)) | ||
2400 | 768 | |||
2401 | 369 | # vi: ts=4 expandtab syntax=python | 769 | # vi: ts=4 expandtab syntax=python |
2402 | diff --git a/tests/unittests/test_commands_install.py b/tests/unittests/test_commands_install.py | |||
2403 | index ebc44db..47f4497 100644 | |||
2404 | --- a/tests/unittests/test_commands_install.py | |||
2405 | +++ b/tests/unittests/test_commands_install.py | |||
2406 | @@ -66,6 +66,34 @@ class TestCmdInstall(CiTestCase): | |||
2407 | 66 | "'proxy' in config is not a dictionary: junk", | 66 | "'proxy' in config is not a dictionary: junk", |
2408 | 67 | str(context_manager.exception)) | 67 | str(context_manager.exception)) |
2409 | 68 | 68 | ||
2410 | 69 | def test_curtin_error_unmount_doesnt_lose_exception(self): | ||
2411 | 70 | """Confirm unmount:disable skips unmounting, keeps exception""" | ||
2412 | 71 | working_dir = self.tmp_path('working', _dir=self.new_root) | ||
2413 | 72 | ensure_dir(working_dir) | ||
2414 | 73 | write_file(self.logfile, 'old log') | ||
2415 | 74 | |||
2416 | 75 | # Providing two dd images raises an error, set unmount: disabled | ||
2417 | 76 | myargs = FakeArgs( | ||
2418 | 77 | config={'install': | ||
2419 | 78 | {'log_file': self.logfile, 'unmount': 'disabled'}}, | ||
2420 | 79 | source=['dd-raw:https://localhost/raw_images/centos-6-3.img', | ||
2421 | 80 | 'dd-raw:https://localhost/cant/provide/two/images.img'], | ||
2422 | 81 | reportstack=FakeReportStack()) | ||
2423 | 82 | self.add_patch( | ||
2424 | 83 | 'curtin.commands.collect_logs.create_log_tarfile', 'm_tar') | ||
2425 | 84 | self.add_patch( | ||
2426 | 85 | 'curtin.commands.install.copy_install_log', 'm_copy_log') | ||
2427 | 86 | self.add_patch('curtin.util.do_umount', 'm_umount') | ||
2428 | 87 | |||
2429 | 88 | rv = 42 | ||
2430 | 89 | with self.assertRaises(Exception): | ||
2431 | 90 | rv = install.cmd_install(myargs) | ||
2432 | 91 | |||
2433 | 92 | # make sure install.cmd_install does not return a value, but Exception | ||
2434 | 93 | self.assertEqual(42, rv) | ||
2435 | 94 | self.assertEqual(0, self.m_umount.call_count) | ||
2436 | 95 | self.assertEqual(1, self.m_copy_log.call_count) | ||
2437 | 96 | |||
2438 | 69 | def test_curtin_error_copies_config_and_error_tarfile_defaults(self): | 97 | def test_curtin_error_copies_config_and_error_tarfile_defaults(self): |
2439 | 70 | """On curtin error, install error_tarfile is created with all logs. | 98 | """On curtin error, install error_tarfile is created with all logs. |
2440 | 71 | 99 | ||
2441 | diff --git a/tests/unittests/test_make_dname.py b/tests/unittests/test_make_dname.py | |||
2442 | index 87fa754..2b92a88 100644 | |||
2443 | --- a/tests/unittests/test_make_dname.py | |||
2444 | +++ b/tests/unittests/test_make_dname.py | |||
2445 | @@ -26,6 +26,12 @@ class TestMakeDname(CiTestCase): | |||
2446 | 26 | 'name': 'lpartition1', 'volgroup': 'lvol_id'}, | 26 | 'name': 'lpartition1', 'volgroup': 'lvol_id'}, |
2447 | 27 | 'lpart2_id': {'type': 'lvm_partition', 'id': 'lpart2_id', | 27 | 'lpart2_id': {'type': 'lvm_partition', 'id': 'lpart2_id', |
2448 | 28 | 'name': 'lvm part/2', 'volgroup': 'lvol_id'}, | 28 | 'name': 'lvm part/2', 'volgroup': 'lvol_id'}, |
2449 | 29 | 'bcache1_id': {'type': 'bcache', 'id': 'bcache1_id', | ||
2450 | 30 | 'name': 'my-cached-data'} | ||
2451 | 31 | } | ||
2452 | 32 | bcache_super_show = { | ||
2453 | 33 | 'sb.version': '1 [backing device]', | ||
2454 | 34 | 'dev.uuid': 'f36394c0-3cc0-4423-8d6f-ffac130f171a', | ||
2455 | 29 | } | 35 | } |
2456 | 30 | disk_blkid = textwrap.dedent(""" | 36 | disk_blkid = textwrap.dedent(""" |
2457 | 31 | DEVNAME=/dev/sda | 37 | DEVNAME=/dev/sda |
2458 | @@ -48,7 +54,7 @@ class TestMakeDname(CiTestCase): | |||
2459 | 48 | def _formatted_rule(self, identifiers, target): | 54 | def _formatted_rule(self, identifiers, target): |
2460 | 49 | rule = ['SUBSYSTEM=="block"', 'ACTION=="add|change"'] | 55 | rule = ['SUBSYSTEM=="block"', 'ACTION=="add|change"'] |
2461 | 50 | rule.extend(['ENV{%s}=="%s"' % ident for ident in identifiers]) | 56 | rule.extend(['ENV{%s}=="%s"' % ident for ident in identifiers]) |
2463 | 51 | rule.append('SYMLINK+="disk/by-dname/{}"'.format(target)) | 57 | rule.append('SYMLINK+="disk/by-dname/{}"\n'.format(target)) |
2464 | 52 | return ', '.join(rule) | 58 | return ', '.join(rule) |
2465 | 53 | 59 | ||
2466 | 54 | @mock.patch('curtin.commands.block_meta.LOG') | 60 | @mock.patch('curtin.commands.block_meta.LOG') |
2467 | @@ -188,6 +194,27 @@ class TestMakeDname(CiTestCase): | |||
2468 | 188 | self.rule_file.format(res_dname), | 194 | self.rule_file.format(res_dname), |
2469 | 189 | self._formatted_rule(rule_identifiers, res_dname)) | 195 | self._formatted_rule(rule_identifiers, res_dname)) |
2470 | 190 | 196 | ||
2471 | 197 | @mock.patch('curtin.commands.block_meta.LOG') | ||
2472 | 198 | @mock.patch('curtin.commands.block_meta.bcache') | ||
2473 | 199 | @mock.patch('curtin.commands.block_meta.get_path_to_storage_volume') | ||
2474 | 200 | @mock.patch('curtin.commands.block_meta.util') | ||
2475 | 201 | def test_make_dname_bcache(self, mock_util, mock_get_path, mock_bcache, | ||
2476 | 202 | mock_log): | ||
2477 | 203 | """ check bcache dname uses backing device uuid to link dname """ | ||
2478 | 204 | mock_get_path.return_value = '/my/dev/huge-storage' | ||
2479 | 205 | mock_bcache.superblock_asdict.return_value = self.bcache_super_show | ||
2480 | 206 | mock_util.load_command_environment.return_value = self.state | ||
2481 | 207 | |||
2482 | 208 | res_dname = 'my-cached-data' | ||
2483 | 209 | backing_uuid = 'f36394c0-3cc0-4423-8d6f-ffac130f171a' | ||
2484 | 210 | rule_identifiers = [('CACHED_UUID', backing_uuid)] | ||
2485 | 211 | block_meta.make_dname('bcache1_id', self.storage_config) | ||
2486 | 212 | self.assertTrue(mock_log.debug.called) | ||
2487 | 213 | self.assertFalse(mock_log.warning.called) | ||
2488 | 214 | mock_util.write_file.assert_called_with( | ||
2489 | 215 | self.rule_file.format(res_dname), | ||
2490 | 216 | self._formatted_rule(rule_identifiers, res_dname)) | ||
2491 | 217 | |||
2492 | 191 | def test_sanitize_dname(self): | 218 | def test_sanitize_dname(self): |
2493 | 192 | unsanitized_to_sanitized = [ | 219 | unsanitized_to_sanitized = [ |
2494 | 193 | ('main_disk', 'main_disk'), | 220 | ('main_disk', 'main_disk'), |
2495 | diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py | |||
2496 | index eb431b0..65175c5 100644 | |||
2497 | --- a/tests/unittests/test_util.py | |||
2498 | +++ b/tests/unittests/test_util.py | |||
2499 | @@ -860,6 +860,53 @@ class TestGetEFIBootMGR(CiTestCase): | |||
2500 | 860 | } | 860 | } |
2501 | 861 | }, observed) | 861 | }, observed) |
2502 | 862 | 862 | ||
2503 | 863 | def test_parses_output_filter_missing(self): | ||
2504 | 864 | """ensure parsing ignores items in order that don't have entries""" | ||
2505 | 865 | self.in_chroot_subp_output.append((dedent( | ||
2506 | 866 | """\ | ||
2507 | 867 | BootCurrent: 0000 | ||
2508 | 868 | Timeout: 1 seconds | ||
2509 | 869 | BootOrder: 0000,0002,0001,0003,0004,0005,0006,0007 | ||
2510 | 870 | Boot0000* ubuntu HD(1,GPT)/File(\\EFI\\ubuntu\\shimx64.efi) | ||
2511 | 871 | Boot0001* CD/DVD Drive BBS(CDROM,,0x0) | ||
2512 | 872 | Boot0002* Hard Drive BBS(HD,,0x0) | ||
2513 | 873 | Boot0003* UEFI:CD/DVD Drive BBS(129,,0x0) | ||
2514 | 874 | Boot0004* UEFI:Removable Device BBS(130,,0x0) | ||
2515 | 875 | Boot0005* UEFI:Network Device BBS(131,,0x0) | ||
2516 | 876 | """), '')) | ||
2517 | 877 | observed = util.get_efibootmgr('target') | ||
2518 | 878 | self.assertEquals({ | ||
2519 | 879 | 'current': '0000', | ||
2520 | 880 | 'timeout': '1 seconds', | ||
2521 | 881 | 'order': ['0000', '0002', '0001', '0003', '0004', '0005'], | ||
2522 | 882 | 'entries': { | ||
2523 | 883 | '0000': { | ||
2524 | 884 | 'name': 'ubuntu', | ||
2525 | 885 | 'path': 'HD(1,GPT)/File(\\EFI\\ubuntu\\shimx64.efi)', | ||
2526 | 886 | }, | ||
2527 | 887 | '0001': { | ||
2528 | 888 | 'name': 'CD/DVD Drive', | ||
2529 | 889 | 'path': 'BBS(CDROM,,0x0)', | ||
2530 | 890 | }, | ||
2531 | 891 | '0002': { | ||
2532 | 892 | 'name': 'Hard Drive', | ||
2533 | 893 | 'path': 'BBS(HD,,0x0)', | ||
2534 | 894 | }, | ||
2535 | 895 | '0003': { | ||
2536 | 896 | 'name': 'UEFI:CD/DVD Drive', | ||
2537 | 897 | 'path': 'BBS(129,,0x0)', | ||
2538 | 898 | }, | ||
2539 | 899 | '0004': { | ||
2540 | 900 | 'name': 'UEFI:Removable Device', | ||
2541 | 901 | 'path': 'BBS(130,,0x0)', | ||
2542 | 902 | }, | ||
2543 | 903 | '0005': { | ||
2544 | 904 | 'name': 'UEFI:Network Device', | ||
2545 | 905 | 'path': 'BBS(131,,0x0)', | ||
2546 | 906 | }, | ||
2547 | 907 | } | ||
2548 | 908 | }, observed) | ||
2549 | 909 | |||
2550 | 863 | 910 | ||
2551 | 864 | class TestUsesSystemd(CiTestCase): | 911 | class TestUsesSystemd(CiTestCase): |
2552 | 865 | 912 | ||
2553 | diff --git a/tests/vmtests/__init__.py b/tests/vmtests/__init__.py | |||
2554 | index 64fc867..5c30a83 100644 | |||
2555 | --- a/tests/vmtests/__init__.py | |||
2556 | +++ b/tests/vmtests/__init__.py | |||
2557 | @@ -49,6 +49,10 @@ OUTPUT_DISK_NAME = 'output_disk.img' | |||
2558 | 49 | BOOT_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_BOOT_TIMEOUT", 300)) | 49 | BOOT_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_BOOT_TIMEOUT", 300)) |
2559 | 50 | INSTALL_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_INSTALL_TIMEOUT", 3000)) | 50 | INSTALL_TIMEOUT = int(os.environ.get("CURTIN_VMTEST_INSTALL_TIMEOUT", 3000)) |
2560 | 51 | REUSE_TOPDIR = bool(int(os.environ.get("CURTIN_VMTEST_REUSE_TOPDIR", 0))) | 51 | REUSE_TOPDIR = bool(int(os.environ.get("CURTIN_VMTEST_REUSE_TOPDIR", 0))) |
2561 | 52 | ADD_REPOS = os.environ.get("CURTIN_VMTEST_ADD_REPOS", "") | ||
2562 | 53 | UPGRADE_PACKAGES = os.environ.get("CURTIN_VMTEST_UPGRADE_PACKAGES", "") | ||
2563 | 54 | SYSTEM_UPGRADE = os.environ.get("CURTIN_VMTEST_SYSTEM_UPGRADE", "auto") | ||
2564 | 55 | |||
2565 | 52 | 56 | ||
2566 | 53 | _UNSUPPORTED_UBUNTU = None | 57 | _UNSUPPORTED_UBUNTU = None |
2567 | 54 | 58 | ||
2568 | @@ -346,8 +350,23 @@ class TempDir(object): | |||
2569 | 346 | stdout=DEVNULL, stderr=subprocess.STDOUT) | 350 | stdout=DEVNULL, stderr=subprocess.STDOUT) |
2570 | 347 | 351 | ||
2571 | 348 | 352 | ||
2572 | 353 | def skip_if_flag(flag): | ||
2573 | 354 | def decorator(func): | ||
2574 | 355 | """the name test_wrapper below has to start with test, or nose's | ||
2575 | 356 | filter will not run it.""" | ||
2576 | 357 | def test_wrapper(self, *args, **kwargs): | ||
2577 | 358 | val = getattr(self, flag, None) | ||
2578 | 359 | if val: | ||
2579 | 360 | self.skipTest("skip due to %s=%s" % (flag, val)) | ||
2580 | 361 | else: | ||
2581 | 362 | return func(self, *args, **kwargs) | ||
2582 | 363 | return test_wrapper | ||
2583 | 364 | return decorator | ||
2584 | 365 | |||
2585 | 366 | |||
2586 | 349 | class VMBaseClass(TestCase): | 367 | class VMBaseClass(TestCase): |
2587 | 350 | __test__ = False | 368 | __test__ = False |
2588 | 369 | expected_failure = False | ||
2589 | 351 | arch_skip = [] | 370 | arch_skip = [] |
2590 | 352 | boot_timeout = BOOT_TIMEOUT | 371 | boot_timeout = BOOT_TIMEOUT |
2591 | 353 | collect_scripts = [textwrap.dedent(""" | 372 | collect_scripts = [textwrap.dedent(""" |
2592 | @@ -708,8 +727,8 @@ class VMBaseClass(TestCase): | |||
2593 | 708 | cmd.extend([ | 727 | cmd.extend([ |
2594 | 709 | "--root-arg=root=%s" % root_url, | 728 | "--root-arg=root=%s" % root_url, |
2595 | 710 | "--append=overlayroot=tmpfs", | 729 | "--append=overlayroot=tmpfs", |
2596 | 711 | "--append=ip=dhcp", # enable networking | ||
2597 | 712 | ]) | 730 | ]) |
2598 | 731 | |||
2599 | 713 | # getting resolvconf configured is only fixed in bionic | 732 | # getting resolvconf configured is only fixed in bionic |
2600 | 714 | # the iscsi_auto handles resolvconf setup via call to | 733 | # the iscsi_auto handles resolvconf setup via call to |
2601 | 715 | # configure_networking in initramfs | 734 | # configure_networking in initramfs |
2602 | @@ -733,7 +752,7 @@ class VMBaseClass(TestCase): | |||
2603 | 733 | cls.network_state = curtin_net.parse_net_config(cls.conf_file) | 752 | cls.network_state = curtin_net.parse_net_config(cls.conf_file) |
2604 | 734 | logger.debug("Network state: {}".format(cls.network_state)) | 753 | logger.debug("Network state: {}".format(cls.network_state)) |
2605 | 735 | 754 | ||
2607 | 736 | # build -n arg list with macaddrs from net_config physical config | 755 | # build --netdev=arg list with 'physical' nics from net_config |
2608 | 737 | macs = [] | 756 | macs = [] |
2609 | 738 | interfaces = {} | 757 | interfaces = {} |
2610 | 739 | if cls.network_state: | 758 | if cls.network_state: |
2611 | @@ -744,16 +763,14 @@ class VMBaseClass(TestCase): | |||
2612 | 744 | hwaddr = iface.get('mac_address') | 763 | hwaddr = iface.get('mac_address') |
2613 | 745 | if iface['type'] == 'physical' and hwaddr: | 764 | if iface['type'] == 'physical' and hwaddr: |
2614 | 746 | macs.append(hwaddr) | 765 | macs.append(hwaddr) |
2625 | 747 | netdevs = [] | 766 | |
2626 | 748 | if len(macs) > 0: | 767 | if len(macs) == 0: |
2627 | 749 | # take first mac and mark it as the boot interface to prevent DHCP | 768 | macs = ["52:54:00:12:34:01"] |
2628 | 750 | # on multiple interfaces which can hang the install. | 769 | |
2629 | 751 | cmd.extend(["--append=BOOTIF=01-%s" % macs[0].replace(":", "-")]) | 770 | netdevs = ["--netdev=%s,mac=%s" % (DEFAULT_BRIDGE, m) for m in macs] |
2630 | 752 | for mac in macs: | 771 | |
2631 | 753 | netdevs.extend(["--netdev=" + DEFAULT_BRIDGE + | 772 | # Add kernel parameters to simulate network boot from first nic. |
2632 | 754 | ",mac={}".format(mac)]) | 773 | cmd.extend(kernel_boot_cmdline_for_mac(macs[0])) |
2623 | 755 | else: | ||
2624 | 756 | netdevs.extend(["--netdev=" + DEFAULT_BRIDGE]) | ||
2633 | 757 | 774 | ||
2634 | 758 | # build disk arguments | 775 | # build disk arguments |
2635 | 759 | disks = [] | 776 | disks = [] |
2636 | @@ -843,6 +860,38 @@ class VMBaseClass(TestCase): | |||
2637 | 843 | logger.info('Detected centos, adding default config %s', | 860 | logger.info('Detected centos, adding default config %s', |
2638 | 844 | centos_default) | 861 | centos_default) |
2639 | 845 | 862 | ||
2640 | 863 | add_repos = ADD_REPOS | ||
2641 | 864 | system_upgrade = SYSTEM_UPGRADE | ||
2642 | 865 | upgrade_packages = UPGRADE_PACKAGES | ||
2643 | 866 | if add_repos: | ||
2644 | 867 | # enable if user has set a value here | ||
2645 | 868 | if system_upgrade == "auto": | ||
2646 | 869 | system_upgrade = True | ||
2647 | 870 | logger.info('Adding apt repositories: %s', add_repos) | ||
2648 | 871 | repo_cfg = os.path.join(cls.td.install, 'add_repos.cfg') | ||
2649 | 872 | util.write_file(repo_cfg, | ||
2650 | 873 | generate_repo_config(add_repos.split(","))) | ||
2651 | 874 | configs.append(repo_cfg) | ||
2652 | 875 | elif system_upgrade == "auto": | ||
2653 | 876 | system_upgrade = False | ||
2654 | 877 | |||
2655 | 878 | if system_upgrade: | ||
2656 | 879 | logger.info('Enabling system_upgrade') | ||
2657 | 880 | system_upgrade_cfg = os.path.join(cls.td.install, | ||
2658 | 881 | 'system_upgrade.cfg') | ||
2659 | 882 | util.write_file(system_upgrade_cfg, | ||
2660 | 883 | "system_upgrade: {enabled: true}\n") | ||
2661 | 884 | configs.append(system_upgrade_cfg) | ||
2662 | 885 | |||
2663 | 886 | if upgrade_packages: | ||
2664 | 887 | logger.info('Adding late-commands to install packages: %s', | ||
2665 | 888 | upgrade_packages) | ||
2666 | 889 | upgrade_pkg_cfg = os.path.join(cls.td.install, 'upgrade_pkg.cfg') | ||
2667 | 890 | util.write_file( | ||
2668 | 891 | upgrade_pkg_cfg, | ||
2669 | 892 | generate_upgrade_config(upgrade_packages.split(","))) | ||
2670 | 893 | configs.append(upgrade_pkg_cfg) | ||
2671 | 894 | |||
2672 | 846 | # set reporting logger | 895 | # set reporting logger |
2673 | 847 | cls.reporting_log = os.path.join(cls.td.logs, 'webhooks-events.json') | 896 | cls.reporting_log = os.path.join(cls.td.logs, 'webhooks-events.json') |
2674 | 848 | reporting_logger = CaptureReporting(cls.reporting_log) | 897 | reporting_logger = CaptureReporting(cls.reporting_log) |
2675 | @@ -925,6 +974,10 @@ class VMBaseClass(TestCase): | |||
2676 | 925 | else: | 974 | else: |
2677 | 926 | logger.warn("Boot for install did not produce a console log.") | 975 | logger.warn("Boot for install did not produce a console log.") |
2678 | 927 | 976 | ||
2679 | 977 | if cls.expected_failure: | ||
2680 | 978 | logger.debug('Expected Failure: skipping boot stage') | ||
2681 | 979 | return | ||
2682 | 980 | |||
2683 | 928 | logger.debug('') | 981 | logger.debug('') |
2684 | 929 | try: | 982 | try: |
2685 | 930 | if os.path.exists(cls.install_log): | 983 | if os.path.exists(cls.install_log): |
2686 | @@ -1268,6 +1321,7 @@ class VMBaseClass(TestCase): | |||
2687 | 1268 | ret[val[0]] = val[1] | 1321 | ret[val[0]] = val[1] |
2688 | 1269 | return ret | 1322 | return ret |
2689 | 1270 | 1323 | ||
2690 | 1324 | @skip_if_flag('expected_failure') | ||
2691 | 1271 | def test_fstab(self): | 1325 | def test_fstab(self): |
2692 | 1272 | if self.fstab_expected is None: | 1326 | if self.fstab_expected is None: |
2693 | 1273 | return | 1327 | return |
2694 | @@ -1283,13 +1337,21 @@ class VMBaseClass(TestCase): | |||
2695 | 1283 | self.assertEqual(fstab_entry.split(' ')[1], | 1337 | self.assertEqual(fstab_entry.split(' ')[1], |
2696 | 1284 | mntpoint) | 1338 | mntpoint) |
2697 | 1285 | 1339 | ||
2698 | 1340 | @skip_if_flag('expected_failure') | ||
2699 | 1286 | def test_dname(self, disk_to_check=None): | 1341 | def test_dname(self, disk_to_check=None): |
2700 | 1342 | if "trusty" in [self.release, self.target_release]: | ||
2701 | 1343 | raise SkipTest( | ||
2702 | 1344 | "(LP: #1523037): dname does not work on trusty kernels") | ||
2703 | 1345 | |||
2704 | 1287 | if not disk_to_check: | 1346 | if not disk_to_check: |
2705 | 1288 | disk_to_check = self.disk_to_check | 1347 | disk_to_check = self.disk_to_check |
2706 | 1289 | if disk_to_check is None: | 1348 | if disk_to_check is None: |
2707 | 1349 | logger.debug('test_dname: no disks to check') | ||
2708 | 1290 | return | 1350 | return |
2709 | 1351 | logger.debug('test_dname: checking disks: %s', disk_to_check) | ||
2710 | 1291 | path = self.collect_path("ls_dname") | 1352 | path = self.collect_path("ls_dname") |
2711 | 1292 | if not os.path.exists(path): | 1353 | if not os.path.exists(path): |
2712 | 1354 | logger.debug('test_dname: no "ls_dname" file: %s', path) | ||
2713 | 1293 | return | 1355 | return |
2714 | 1294 | contents = util.load_file(path) | 1356 | contents = util.load_file(path) |
2715 | 1295 | for diskname, part in self.disk_to_check: | 1357 | for diskname, part in self.disk_to_check: |
2716 | @@ -1298,6 +1360,7 @@ class VMBaseClass(TestCase): | |||
2717 | 1298 | self.assertIn(link, contents) | 1360 | self.assertIn(link, contents) |
2718 | 1299 | self.assertIn(diskname, contents) | 1361 | self.assertIn(diskname, contents) |
2719 | 1300 | 1362 | ||
2720 | 1363 | @skip_if_flag('expected_failure') | ||
2721 | 1301 | def test_reporting_data(self): | 1364 | def test_reporting_data(self): |
2722 | 1302 | with open(self.reporting_log, 'r') as fp: | 1365 | with open(self.reporting_log, 'r') as fp: |
2723 | 1303 | data = json.load(fp) | 1366 | data = json.load(fp) |
2724 | @@ -1317,6 +1380,7 @@ class VMBaseClass(TestCase): | |||
2725 | 1317 | self.assertIn('path', files) | 1380 | self.assertIn('path', files) |
2726 | 1318 | self.assertEqual('/tmp/install.log', files.get('path', '')) | 1381 | self.assertEqual('/tmp/install.log', files.get('path', '')) |
2727 | 1319 | 1382 | ||
2728 | 1383 | @skip_if_flag('expected_failure') | ||
2729 | 1320 | def test_interfacesd_eth0_removed(self): | 1384 | def test_interfacesd_eth0_removed(self): |
2730 | 1321 | """ Check that curtin has removed /etc/network/interfaces.d/eth0.cfg | 1385 | """ Check that curtin has removed /etc/network/interfaces.d/eth0.cfg |
2731 | 1322 | by examining the output of a find /etc/network > find_interfaces.d | 1386 | by examining the output of a find /etc/network > find_interfaces.d |
2732 | @@ -1325,9 +1389,9 @@ class VMBaseClass(TestCase): | |||
2733 | 1325 | self.assertNotIn("/etc/network/interfaces.d/eth0.cfg", | 1389 | self.assertNotIn("/etc/network/interfaces.d/eth0.cfg", |
2734 | 1326 | interfacesd.split("\n")) | 1390 | interfacesd.split("\n")) |
2735 | 1327 | 1391 | ||
2736 | 1392 | @skip_if_flag('expected_failure') | ||
2737 | 1328 | def test_installed_correct_kernel_package(self): | 1393 | def test_installed_correct_kernel_package(self): |
2738 | 1329 | """ Test curtin installs the correct kernel package. """ | 1394 | """ Test curtin installs the correct kernel package. """ |
2739 | 1330 | |||
2740 | 1331 | # target_distro is set for non-ubuntu targets | 1395 | # target_distro is set for non-ubuntu targets |
2741 | 1332 | if self.target_distro is not None: | 1396 | if self.target_distro is not None: |
2742 | 1333 | raise SkipTest("Can't check non-ubuntu kernel packages") | 1397 | raise SkipTest("Can't check non-ubuntu kernel packages") |
2743 | @@ -1374,6 +1438,7 @@ class VMBaseClass(TestCase): | |||
2744 | 1374 | self._debian_packages = pkgs | 1438 | self._debian_packages = pkgs |
2745 | 1375 | return self._debian_packages | 1439 | return self._debian_packages |
2746 | 1376 | 1440 | ||
2747 | 1441 | @skip_if_flag('expected_failure') | ||
2748 | 1377 | def test_swaps_used(self): | 1442 | def test_swaps_used(self): |
2749 | 1378 | cfg = yaml.load(self.load_collect_file("root/curtin-install-cfg.yaml")) | 1443 | cfg = yaml.load(self.load_collect_file("root/curtin-install-cfg.yaml")) |
2750 | 1379 | stgcfg = cfg.get("storage", {}).get("config", []) | 1444 | stgcfg = cfg.get("storage", {}).get("config", []) |
2751 | @@ -1476,7 +1541,7 @@ class PsuedoVMBaseClass(VMBaseClass): | |||
2752 | 1476 | def test_fstab(self): | 1541 | def test_fstab(self): |
2753 | 1477 | pass | 1542 | pass |
2754 | 1478 | 1543 | ||
2756 | 1479 | def test_dname(self): | 1544 | def test_dname(self, disk_to_check=None): |
2757 | 1480 | pass | 1545 | pass |
2758 | 1481 | 1546 | ||
2759 | 1482 | def test_interfacesd_eth0_removed(self): | 1547 | def test_interfacesd_eth0_removed(self): |
2760 | @@ -1512,14 +1577,19 @@ def get_rfc4173(ip, port, target, user=None, pword=None, | |||
2761 | 1512 | 1577 | ||
2762 | 1513 | 1578 | ||
2763 | 1514 | def find_error_context(err_match, contents, nrchars=200): | 1579 | def find_error_context(err_match, contents, nrchars=200): |
2764 | 1580 | traceback_end = re.compile(r'Error:.*') | ||
2765 | 1581 | end_match = traceback_end.search(contents, err_match.start()) | ||
2766 | 1515 | context_start = err_match.start() - nrchars | 1582 | context_start = err_match.start() - nrchars |
2768 | 1516 | context_end = err_match.end() + nrchars | 1583 | if end_match: |
2769 | 1584 | context_end = end_match.end() | ||
2770 | 1585 | else: | ||
2771 | 1586 | context_end = err_match.end() + nrchars | ||
2772 | 1517 | # extract contents, split into lines, drop the first and last partials | 1587 | # extract contents, split into lines, drop the first and last partials |
2773 | 1518 | # recombine and return | 1588 | # recombine and return |
2774 | 1519 | return "\n".join(contents[context_start:context_end].splitlines()[1:-1]) | 1589 | return "\n".join(contents[context_start:context_end].splitlines()[1:-1]) |
2775 | 1520 | 1590 | ||
2776 | 1521 | 1591 | ||
2778 | 1522 | def check_install_log(install_log): | 1592 | def check_install_log(install_log, nrchars=200): |
2779 | 1523 | # look if install is OK via curtin 'Installation ok" | 1593 | # look if install is OK via curtin 'Installation ok" |
2780 | 1524 | # if we dont find that, scan for known error messages and report | 1594 | # if we dont find that, scan for known error messages and report |
2781 | 1525 | # if we don't see any errors, fail with general error | 1595 | # if we don't see any errors, fail with general error |
2782 | @@ -1529,11 +1599,11 @@ def check_install_log(install_log): | |||
2783 | 1529 | # regexps expected in curtin output | 1599 | # regexps expected in curtin output |
2784 | 1530 | install_pass = INSTALL_PASS_MSG | 1600 | install_pass = INSTALL_PASS_MSG |
2785 | 1531 | install_fail = "({})".format("|".join([ | 1601 | install_fail = "({})".format("|".join([ |
2787 | 1532 | 'Installation\ failed', | 1602 | 'Installation failed', |
2788 | 1533 | 'ImportError: No module named.*', | 1603 | 'ImportError: No module named.*', |
2789 | 1534 | 'Unexpected error while running command', | 1604 | 'Unexpected error while running command', |
2790 | 1535 | 'E: Unable to locate package.*', | 1605 | 'E: Unable to locate package.*', |
2792 | 1536 | 'Traceback.*most recent call last.*:'])) | 1606 | 'cloud-init.*: Traceback.*'])) |
2793 | 1537 | 1607 | ||
2794 | 1538 | install_is_ok = re.findall(install_pass, install_log) | 1608 | install_is_ok = re.findall(install_pass, install_log) |
2795 | 1539 | # always scan for errors | 1609 | # always scan for errors |
2796 | @@ -1542,7 +1612,7 @@ def check_install_log(install_log): | |||
2797 | 1542 | errmsg = ('Failed to verify Installation is OK') | 1612 | errmsg = ('Failed to verify Installation is OK') |
2798 | 1543 | 1613 | ||
2799 | 1544 | for e in found_errors: | 1614 | for e in found_errors: |
2801 | 1545 | errors.append(find_error_context(e, install_log)) | 1615 | errors.append(find_error_context(e, install_log, nrchars=nrchars)) |
2802 | 1546 | errmsg = ('Errors during curtin installer') | 1616 | errmsg = ('Errors during curtin installer') |
2803 | 1547 | 1617 | ||
2804 | 1548 | return errmsg, errors | 1618 | return errmsg, errors |
2805 | @@ -1737,6 +1807,27 @@ def get_lan_ip(): | |||
2806 | 1737 | return addr | 1807 | return addr |
2807 | 1738 | 1808 | ||
2808 | 1739 | 1809 | ||
2809 | 1810 | def kernel_boot_cmdline_for_mac(mac): | ||
2810 | 1811 | """Return kernel command line arguments for initramfs dhcp on mac. | ||
2811 | 1812 | |||
2812 | 1813 | Ubuntu initramfs respect klibc's ip= format for network config in | ||
2813 | 1814 | initramfs. That format is: | ||
2814 | 1815 | ip=addr:server:gateway:netmask:interface:proto | ||
2815 | 1816 | see /usr/share/doc/libklibc/README.ipconfig.gz for more info. | ||
2816 | 1817 | |||
2817 | 1818 | If no 'interface' field is provided, dhcp will be tried on all. To allow | ||
2818 | 1819 | specifying the interface in ip= parameter without knowing the name of the | ||
2819 | 1820 | device that the kernel will choose, cloud-initramfs-dyn-netconf replaces | ||
2820 | 1821 | 'BOOTIF' in the ip= parameter with the name found in BOOTIF. | ||
2821 | 1822 | |||
2822 | 1823 | Network bootloaders append to kernel command line | ||
2823 | 1824 | BOOTIF=01-<mac-address> to indicate which mac they booted from. | ||
2824 | 1825 | |||
2825 | 1826 | Paired with BOOTIF replacement this ends up being: ip=::::eth0:dhcp.""" | ||
2826 | 1827 | return ["--append=ip=:::::BOOTIF:dhcp", | ||
2827 | 1828 | "--append=BOOTIF=01-%s" % mac.replace(":", "-")] | ||
2828 | 1829 | |||
2829 | 1830 | |||
2830 | 1740 | def is_unsupported_ubuntu(release): | 1831 | def is_unsupported_ubuntu(release): |
2831 | 1741 | global _UNSUPPORTED_UBUNTU | 1832 | global _UNSUPPORTED_UBUNTU |
2832 | 1742 | udi = 'ubuntu-distro-info' | 1833 | udi = 'ubuntu-distro-info' |
2833 | @@ -1758,6 +1849,42 @@ def is_unsupported_ubuntu(release): | |||
2834 | 1758 | return release in _UNSUPPORTED_UBUNTU | 1849 | return release in _UNSUPPORTED_UBUNTU |
2835 | 1759 | 1850 | ||
2836 | 1760 | 1851 | ||
2837 | 1852 | def generate_repo_config(repos): | ||
2838 | 1853 | """Generate apt yaml configuration to add specified repositories. | ||
2839 | 1854 | |||
2840 | 1855 | @param repos: A list of add-apt-repository strings. | ||
2841 | 1856 | 'proposed' is a special case to enable the proposed | ||
2842 | 1857 | pocket of a particular release. | ||
2843 | 1858 | @returns: string: A yaml string | ||
2844 | 1859 | """ | ||
2845 | 1860 | sources = {"add_repos_%02d" % idx: {'source': v} | ||
2846 | 1861 | for idx, v in enumerate(repos)} | ||
2847 | 1862 | return yaml.dump({'apt': {'sources': sources}}) | ||
2848 | 1863 | |||
2849 | 1864 | |||
2850 | 1865 | def generate_upgrade_config(packages, singlecmd=True): | ||
2851 | 1866 | """Generate late_command yaml to install packages with apt. | ||
2852 | 1867 | |||
2853 | 1868 | @param packages: list of package names. | ||
2854 | 1869 | @param singlecmd: Boolean, defaults to True which combines | ||
2855 | 1870 | package installs into a single apt command | ||
2856 | 1871 | If False, a separate command is issued for | ||
2857 | 1872 | each package. | ||
2858 | 1873 | @returns: String of yaml | ||
2859 | 1874 | """ | ||
2860 | 1875 | if not packages: | ||
2861 | 1876 | return "" | ||
2862 | 1877 | cmds = {} | ||
2863 | 1878 | base_cmd = ['curtin', 'in-target', '--', 'apt-get', '-y', 'install'] | ||
2864 | 1879 | if singlecmd: | ||
2865 | 1880 | cmds["install_pkg_00"] = base_cmd + packages | ||
2866 | 1881 | else: | ||
2867 | 1882 | for idx, package in enumerate(packages): | ||
2868 | 1883 | cmds["install_pkg_%02d" % idx] = base_cmd + package | ||
2869 | 1884 | |||
2870 | 1885 | return yaml.dump({'late_commands': cmds}) | ||
2871 | 1886 | |||
2872 | 1887 | |||
2873 | 1761 | apply_keep_settings() | 1888 | apply_keep_settings() |
2874 | 1762 | logger = _initialize_logging() | 1889 | logger = _initialize_logging() |
2875 | 1763 | 1890 | ||
2876 | diff --git a/tests/vmtests/helpers.py b/tests/vmtests/helpers.py | |||
2877 | index 7fc92e1..10e20b3 100644 | |||
2878 | --- a/tests/vmtests/helpers.py | |||
2879 | +++ b/tests/vmtests/helpers.py | |||
2880 | @@ -86,18 +86,7 @@ def check_call(cmd, signal=signal.SIGTERM, **kwargs): | |||
2881 | 86 | return Command(cmd, signal).run(**kwargs) | 86 | return Command(cmd, signal).run(**kwargs) |
2882 | 87 | 87 | ||
2883 | 88 | 88 | ||
2896 | 89 | def find_releases_by_distro(): | 89 | def find_testcases(): |
2885 | 90 | """ | ||
2886 | 91 | Returns a dictionary of distros and the distro releases that will be tested | ||
2887 | 92 | |||
2888 | 93 | distros: | ||
2889 | 94 | ubuntu: | ||
2890 | 95 | releases: [] | ||
2891 | 96 | krels: [] | ||
2892 | 97 | centos: | ||
2893 | 98 | releases: [] | ||
2894 | 99 | krels: [] | ||
2895 | 100 | """ | ||
2897 | 101 | # Use the TestLoder to load all test cases defined within tests/vmtests/ | 90 | # Use the TestLoder to load all test cases defined within tests/vmtests/ |
2898 | 102 | # and figure out what distros and releases they are testing. Any tests | 91 | # and figure out what distros and releases they are testing. Any tests |
2899 | 103 | # which are disabled will be excluded. | 92 | # which are disabled will be excluded. |
2900 | @@ -108,32 +97,60 @@ def find_releases_by_distro(): | |||
2901 | 108 | root_dir = os.path.split(os.path.split(tests_dir)[0])[0] | 97 | root_dir = os.path.split(os.path.split(tests_dir)[0])[0] |
2902 | 109 | # Find all test modules defined in curtin/tests/vmtests/ | 98 | # Find all test modules defined in curtin/tests/vmtests/ |
2903 | 110 | module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir) | 99 | module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir) |
2904 | 111 | # find all distros and releases tested for each distro | ||
2905 | 112 | releases = [] | ||
2906 | 113 | krels = [] | ||
2907 | 114 | rel_by_dist = {} | ||
2908 | 115 | for mts in module_test_suites: | 100 | for mts in module_test_suites: |
2909 | 116 | for class_test_suite in mts: | 101 | for class_test_suite in mts: |
2910 | 117 | for test_case in class_test_suite: | 102 | for test_case in class_test_suite: |
2911 | 118 | # skip disabled tests | 103 | # skip disabled tests |
2912 | 119 | if not getattr(test_case, '__test__', False): | 104 | if not getattr(test_case, '__test__', False): |
2913 | 120 | continue | 105 | continue |
2930 | 121 | for (dist, rel, krel) in ( | 106 | yield test_case |
2931 | 122 | (getattr(test_case, a, None) for a in attrs) | 107 | |
2932 | 123 | for attrs in (('distro', 'release', 'krel'), | 108 | |
2933 | 124 | ('target_distro', 'target_release', | 109 | def find_arches(): |
2934 | 125 | 'krel'))): | 110 | """ |
2935 | 126 | 111 | Return a list of uniq arch values from test cases | |
2936 | 127 | if dist and rel: | 112 | """ |
2937 | 128 | distro = rel_by_dist.get(dist, {'releases': [], | 113 | arches = [] |
2938 | 129 | 'krels': []}) | 114 | for test_case in find_testcases(): |
2939 | 130 | releases = distro.get('releases') | 115 | arch = getattr(test_case, 'arch', None) |
2940 | 131 | krels = distro.get('krels') | 116 | if arch and arch not in arches: |
2941 | 132 | if rel not in releases: | 117 | arches.append(arch) |
2942 | 133 | releases.append(rel) | 118 | return arches |
2943 | 134 | if krel and krel not in krels: | 119 | |
2944 | 135 | krels.append(krel) | 120 | |
2945 | 136 | rel_by_dist.update({dist: distro}) | 121 | def find_releases_by_distro(): |
2946 | 122 | """ | ||
2947 | 123 | Returns a dictionary of distros and the distro releases that will be tested | ||
2948 | 124 | |||
2949 | 125 | distros: | ||
2950 | 126 | ubuntu: | ||
2951 | 127 | releases: [] | ||
2952 | 128 | krels: [] | ||
2953 | 129 | centos: | ||
2954 | 130 | releases: [] | ||
2955 | 131 | krels: [] | ||
2956 | 132 | """ | ||
2957 | 133 | # find all distros and releases tested for each distro | ||
2958 | 134 | releases = [] | ||
2959 | 135 | krels = [] | ||
2960 | 136 | rel_by_dist = {} | ||
2961 | 137 | for test_case in find_testcases(): | ||
2962 | 138 | for (dist, rel, krel) in ( | ||
2963 | 139 | (getattr(test_case, a, None) for a in attrs) | ||
2964 | 140 | for attrs in (('distro', 'release', 'krel'), | ||
2965 | 141 | ('target_distro', 'target_release', | ||
2966 | 142 | 'krel'))): | ||
2967 | 143 | |||
2968 | 144 | if dist and rel: | ||
2969 | 145 | distro = rel_by_dist.get(dist, {'releases': [], | ||
2970 | 146 | 'krels': []}) | ||
2971 | 147 | releases = distro.get('releases') | ||
2972 | 148 | krels = distro.get('krels') | ||
2973 | 149 | if rel not in releases: | ||
2974 | 150 | releases.append(rel) | ||
2975 | 151 | if krel and krel not in krels: | ||
2976 | 152 | krels.append(krel) | ||
2977 | 153 | rel_by_dist.update({dist: distro}) | ||
2978 | 137 | 154 | ||
2979 | 138 | return rel_by_dist | 155 | return rel_by_dist |
2980 | 139 | 156 | ||
2981 | diff --git a/tests/vmtests/test_basic.py b/tests/vmtests/test_basic.py | |||
2982 | index 2d98514..2e47cb6 100644 | |||
2983 | --- a/tests/vmtests/test_basic.py | |||
2984 | +++ b/tests/vmtests/test_basic.py | |||
2985 | @@ -6,6 +6,7 @@ from . import ( | |||
2986 | 6 | from .releases import base_vm_classes as relbase | 6 | from .releases import base_vm_classes as relbase |
2987 | 7 | 7 | ||
2988 | 8 | import textwrap | 8 | import textwrap |
2989 | 9 | from unittest import SkipTest | ||
2990 | 9 | 10 | ||
2991 | 10 | 11 | ||
2992 | 11 | class TestBasicAbs(VMBaseClass): | 12 | class TestBasicAbs(VMBaseClass): |
2993 | @@ -58,7 +59,10 @@ class TestBasicAbs(VMBaseClass): | |||
2994 | 58 | "proc_partitions", | 59 | "proc_partitions", |
2995 | 59 | "root/curtin-install.log", "root/curtin-install-cfg.yaml"]) | 60 | "root/curtin-install.log", "root/curtin-install-cfg.yaml"]) |
2996 | 60 | 61 | ||
2998 | 61 | def test_ptable(self): | 62 | def test_ptable(self, disk_to_check=None): |
2999 | 63 | if "trusty" in [self.release, self.target_release]: | ||
3000 | 64 | raise SkipTest("No PTTYPE blkid output on trusty") | ||
3001 | 65 | |||
3002 | 62 | blkid_info = self.get_blkid_data("blkid_output_vda") | 66 | blkid_info = self.get_blkid_data("blkid_output_vda") |
3003 | 63 | self.assertEquals(blkid_info["PTTYPE"], "dos") | 67 | self.assertEquals(blkid_info["PTTYPE"], "dos") |
3004 | 64 | 68 | ||
3005 | @@ -143,18 +147,14 @@ class TestBasicAbs(VMBaseClass): | |||
3006 | 143 | class TrustyTestBasic(relbase.trusty, TestBasicAbs): | 147 | class TrustyTestBasic(relbase.trusty, TestBasicAbs): |
3007 | 144 | __test__ = True | 148 | __test__ = True |
3008 | 145 | 149 | ||
3009 | 146 | # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect | ||
3010 | 147 | # sda-part2 to exist in /dev/disk/by-dname as we can on other releases | ||
3011 | 148 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3012 | 149 | def test_dname(self): | ||
3013 | 150 | print("test_dname does not work for Trusty") | ||
3014 | 151 | 150 | ||
3017 | 152 | def test_ptable(self): | 151 | class TrustyHWEXTestBasic(relbase.trusty_hwe_x, TrustyTestBasic): |
3018 | 153 | print("test_ptable does not work for Trusty") | 152 | __test__ = True |
3019 | 154 | 153 | ||
3020 | 155 | 154 | ||
3022 | 156 | class TrustyHWEXTestBasic(relbase.trusty_hwe_x, TrustyTestBasic): | 155 | class XenialGAi386TestBasic(relbase.xenial_ga, TestBasicAbs): |
3023 | 157 | __test__ = True | 156 | __test__ = True |
3024 | 157 | arch = 'i386' | ||
3025 | 158 | 158 | ||
3026 | 159 | 159 | ||
3027 | 160 | class XenialGATestBasic(relbase.xenial_ga, TestBasicAbs): | 160 | class XenialGATestBasic(relbase.xenial_ga, TestBasicAbs): |
3028 | @@ -210,6 +210,9 @@ class TestBasicScsiAbs(TestBasicAbs): | |||
3029 | 210 | "ls_disk_id", "proc_partitions"]) | 210 | "ls_disk_id", "proc_partitions"]) |
3030 | 211 | 211 | ||
3031 | 212 | def test_ptable(self): | 212 | def test_ptable(self): |
3032 | 213 | if "trusty" in [self.release, self.target_release]: | ||
3033 | 214 | raise SkipTest("No PTTYPE blkid output on trusty") | ||
3034 | 215 | |||
3035 | 213 | blkid_info = self.get_blkid_data("blkid_output_sda") | 216 | blkid_info = self.get_blkid_data("blkid_output_sda") |
3036 | 214 | self.assertEquals(blkid_info["PTTYPE"], "dos") | 217 | self.assertEquals(blkid_info["PTTYPE"], "dos") |
3037 | 215 | 218 | ||
3038 | diff --git a/tests/vmtests/test_centos_basic.py b/tests/vmtests/test_centos_basic.py | |||
3039 | index b576279..7857e74 100644 | |||
3040 | --- a/tests/vmtests/test_centos_basic.py | |||
3041 | +++ b/tests/vmtests/test_centos_basic.py | |||
3042 | @@ -11,7 +11,6 @@ import textwrap | |||
3043 | 11 | class CentosTestBasicAbs(VMBaseClass): | 11 | class CentosTestBasicAbs(VMBaseClass): |
3044 | 12 | __test__ = False | 12 | __test__ = False |
3045 | 13 | conf_file = "examples/tests/centos_basic.yaml" | 13 | conf_file = "examples/tests/centos_basic.yaml" |
3046 | 14 | extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00" | ||
3047 | 15 | # XXX: command | tee output is required for Centos under SELinux | 14 | # XXX: command | tee output is required for Centos under SELinux |
3048 | 16 | # http://danwalsh.livejournal.com/22860.html | 15 | # http://danwalsh.livejournal.com/22860.html |
3049 | 17 | collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent( | 16 | collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent( |
3050 | @@ -74,7 +73,6 @@ class Centos66FromXenialTestBasic(relbase.centos66fromxenial, | |||
3051 | 74 | 73 | ||
3052 | 75 | class CentosTestBasicNetworkAbs(TestNetworkBaseTestsAbs): | 74 | class CentosTestBasicNetworkAbs(TestNetworkBaseTestsAbs): |
3053 | 76 | conf_file = "examples/tests/centos_basic.yaml" | 75 | conf_file = "examples/tests/centos_basic.yaml" |
3054 | 77 | extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00" | ||
3055 | 78 | collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [ | 76 | collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [ |
3056 | 79 | textwrap.dedent(""" | 77 | textwrap.dedent(""" |
3057 | 80 | cd OUTPUT_COLLECT_D | 78 | cd OUTPUT_COLLECT_D |
3058 | diff --git a/tests/vmtests/test_fs_battery.py b/tests/vmtests/test_fs_battery.py | |||
3059 | index 5798d48..423cc1e 100644 | |||
3060 | --- a/tests/vmtests/test_fs_battery.py | |||
3061 | +++ b/tests/vmtests/test_fs_battery.py | |||
3062 | @@ -52,6 +52,12 @@ class TestFsBattery(VMBaseClass): | |||
3063 | 52 | cat /proc/partitions > proc_partitions | 52 | cat /proc/partitions > proc_partitions |
3064 | 53 | find /etc/network/interfaces.d > find_interfacesd | 53 | find /etc/network/interfaces.d > find_interfacesd |
3065 | 54 | cat /proc/cmdline > cmdline | 54 | cat /proc/cmdline > cmdline |
3066 | 55 | cat /etc/fstab > fstab | ||
3067 | 56 | cat /proc/1/mountinfo > mountinfo | ||
3068 | 57 | |||
3069 | 58 | for p in /my/bind-over-var-lib/apt /my/bind-ro-etc/passwd; do | ||
3070 | 59 | [ -e "$p" ] && echo "$p: present" || echo "$p: missing" | ||
3071 | 60 | done > my-path-checks | ||
3072 | 55 | 61 | ||
3073 | 56 | set +x | 62 | set +x |
3074 | 57 | serial="fsbattery" | 63 | serial="fsbattery" |
3075 | @@ -151,6 +157,49 @@ class TestFsBattery(VMBaseClass): | |||
3076 | 151 | ["%s umount: PASS" % k for k in entries]) | 157 | ["%s umount: PASS" % k for k in entries]) |
3077 | 152 | self.assertEqual(sorted(expected), sorted(results)) | 158 | self.assertEqual(sorted(expected), sorted(results)) |
3078 | 153 | 159 | ||
3079 | 160 | def test_fstab_has_mounts(self): | ||
3080 | 161 | """Verify each of the expected "my" mounts got into fstab.""" | ||
3081 | 162 | expected = [ | ||
3082 | 163 | "none /my/tmpfs tmpfs size=4194304 0 0".split(), | ||
3083 | 164 | "none /my/ramfs ramfs defaults 0 0".split(), | ||
3084 | 165 | "/my/bind-over-var-lib /var/lib none bind 0 0".split(), | ||
3085 | 166 | "/etc /my/bind-ro-etc none bind,ro 0 0".split(), | ||
3086 | 167 | ] | ||
3087 | 168 | fstab_found = [ | ||
3088 | 169 | l.split() for l in self.load_collect_file("fstab").splitlines()] | ||
3089 | 170 | self.assertEqual(expected, [e for e in expected if e in fstab_found]) | ||
3090 | 171 | |||
3091 | 172 | def test_mountinfo_has_mounts(self): | ||
3092 | 173 | """Verify the my mounts got into mountinfo. | ||
3093 | 174 | |||
3094 | 175 | This is a light check that things got mounted. We do not check | ||
3095 | 176 | options as to not break on different kernel behavior. | ||
3096 | 177 | Maybe it could/should.""" | ||
3097 | 178 | # mountinfo has src and path as 4th and 5th field. | ||
3098 | 179 | data = self.load_collect_file("mountinfo").splitlines() | ||
3099 | 180 | dest_src = {} | ||
3100 | 181 | for line in data: | ||
3101 | 182 | toks = line.split() | ||
3102 | 183 | if not (toks[3].startswith("/my/") or toks[4].startswith("/my/")): | ||
3103 | 184 | continue | ||
3104 | 185 | dest_src[toks[4]] = toks[3] | ||
3105 | 186 | self.assertTrue("/my/ramfs" in dest_src) | ||
3106 | 187 | self.assertTrue("/my/tmpfs" in dest_src) | ||
3107 | 188 | self.assertEqual(dest_src.get("/var/lib"), "/my/bind-over-var-lib") | ||
3108 | 189 | self.assertEqual(dest_src.get("/my/bind-ro-etc"), "/etc") | ||
3109 | 190 | |||
3110 | 191 | def test_expected_files_from_bind_mounts(self): | ||
3111 | 192 | data = self.load_collect_file("my-path-checks") | ||
3112 | 193 | # this file is <path>: (present|missing) | ||
3113 | 194 | paths = {} | ||
3114 | 195 | for line in data.splitlines(): | ||
3115 | 196 | path, _, val = line.partition(":") | ||
3116 | 197 | paths[path] = val.strip() | ||
3117 | 198 | |||
3118 | 199 | self.assertEqual( | ||
3119 | 200 | {'/my/bind-over-var-lib/apt': 'present', | ||
3120 | 201 | '/my/bind-ro-etc/passwd': 'present'}, paths) | ||
3121 | 202 | |||
3122 | 154 | 203 | ||
3123 | 155 | class TrustyTestFsBattery(relbase.trusty, TestFsBattery): | 204 | class TrustyTestFsBattery(relbase.trusty, TestFsBattery): |
3124 | 156 | __test__ = True | 205 | __test__ = True |
3125 | diff --git a/tests/vmtests/test_lvm.py b/tests/vmtests/test_lvm.py | |||
3126 | index 224fe64..ed708fd 100644 | |||
3127 | --- a/tests/vmtests/test_lvm.py | |||
3128 | +++ b/tests/vmtests/test_lvm.py | |||
3129 | @@ -2,7 +2,6 @@ | |||
3130 | 2 | 2 | ||
3131 | 3 | from . import VMBaseClass | 3 | from . import VMBaseClass |
3132 | 4 | from .releases import base_vm_classes as relbase | 4 | from .releases import base_vm_classes as relbase |
3133 | 5 | from unittest import SkipTest | ||
3134 | 6 | 5 | ||
3135 | 7 | import textwrap | 6 | import textwrap |
3136 | 8 | 7 | ||
3137 | @@ -10,11 +9,16 @@ import textwrap | |||
3138 | 10 | class TestLvmAbs(VMBaseClass): | 9 | class TestLvmAbs(VMBaseClass): |
3139 | 11 | conf_file = "examples/tests/lvm.yaml" | 10 | conf_file = "examples/tests/lvm.yaml" |
3140 | 12 | interactive = False | 11 | interactive = False |
3142 | 13 | extra_disks = [] | 12 | extra_disks = ['10G'] |
3143 | 13 | dirty_disks = True | ||
3144 | 14 | collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" | 14 | collect_scripts = VMBaseClass.collect_scripts + [textwrap.dedent(""" |
3145 | 15 | cd OUTPUT_COLLECT_D | 15 | cd OUTPUT_COLLECT_D |
3146 | 16 | cat /etc/fstab > fstab | 16 | cat /etc/fstab > fstab |
3147 | 17 | ls /dev/disk/by-dname > ls_dname | 17 | ls /dev/disk/by-dname > ls_dname |
3148 | 18 | ls -al /dev/disk/by-dname > lsal_dname | ||
3149 | 19 | ls -al /dev/disk/by-id/ > ls_byid | ||
3150 | 20 | ls -al /dev/disk/by-uuid/ > ls_byuuid | ||
3151 | 21 | cat /proc/partitions > proc_partitions | ||
3152 | 18 | find /etc/network/interfaces.d > find_interfacesd | 22 | find /etc/network/interfaces.d > find_interfacesd |
3153 | 19 | pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs | 23 | pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs |
3154 | 20 | lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs | 24 | lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs |
3155 | @@ -41,14 +45,6 @@ class TestLvmAbs(VMBaseClass): | |||
3156 | 41 | self.output_files_exist( | 45 | self.output_files_exist( |
3157 | 42 | ["fstab", "ls_dname"]) | 46 | ["fstab", "ls_dname"]) |
3158 | 43 | 47 | ||
3159 | 44 | # FIXME(LP: #1523037): dname does not work on precise|trusty, so we cannot | ||
3160 | 45 | # expect sda-part2 to exist in /dev/disk/by-dname as we can on other | ||
3161 | 46 | # releases when dname works on trusty, then we need to re-enable by | ||
3162 | 47 | # removing line. | ||
3163 | 48 | def test_dname(self): | ||
3164 | 49 | if self.release in ['precise', 'trusty']: | ||
3165 | 50 | raise SkipTest("test_dname does not work for %s" % self.release) | ||
3166 | 51 | |||
3167 | 52 | 48 | ||
3168 | 53 | class TrustyTestLvm(relbase.trusty, TestLvmAbs): | 49 | class TrustyTestLvm(relbase.trusty, TestLvmAbs): |
3169 | 54 | __test__ = True | 50 | __test__ = True |
3170 | diff --git a/tests/vmtests/test_lvm_iscsi.py b/tests/vmtests/test_lvm_iscsi.py | |||
3171 | index 6b247c5..2a11d6e 100644 | |||
3172 | --- a/tests/vmtests/test_lvm_iscsi.py | |||
3173 | +++ b/tests/vmtests/test_lvm_iscsi.py | |||
3174 | @@ -9,6 +9,7 @@ import textwrap | |||
3175 | 9 | 9 | ||
3176 | 10 | class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs): | 10 | class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs): |
3177 | 11 | interactive = False | 11 | interactive = False |
3178 | 12 | dirty_disks = True | ||
3179 | 12 | iscsi_disks = [ | 13 | iscsi_disks = [ |
3180 | 13 | {'size': '6G'}, | 14 | {'size': '6G'}, |
3181 | 14 | {'size': '5G', 'auth': 'user:passw0rd', 'iauth': 'iuser:ipassw0rd'}] | 15 | {'size': '5G', 'auth': 'user:passw0rd', 'iauth': 'iuser:ipassw0rd'}] |
3182 | @@ -20,6 +21,8 @@ class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs): | |||
3183 | 20 | """ | 21 | """ |
3184 | 21 | cd OUTPUT_COLLECT_D | 22 | cd OUTPUT_COLLECT_D |
3185 | 22 | ls -al /sys/class/block/dm*/slaves/ > dm_slaves | 23 | ls -al /sys/class/block/dm*/slaves/ > dm_slaves |
3186 | 24 | cp -a /etc/udev/rules.d udev_rules_d | ||
3187 | 25 | cp -a /etc/iscsi etc_iscsi | ||
3188 | 23 | """)] | 26 | """)] |
3189 | 24 | 27 | ||
3190 | 25 | fstab_expected = { | 28 | fstab_expected = { |
3191 | @@ -29,8 +32,11 @@ class TestLvmIscsiAbs(TestLvmAbs, TestBasicIscsiAbs): | |||
3192 | 29 | 'UUID=a98f706b-b064-4682-8eb2-6c2c1284060c': '/mnt/iscsi4', | 32 | 'UUID=a98f706b-b064-4682-8eb2-6c2c1284060c': '/mnt/iscsi4', |
3193 | 30 | } | 33 | } |
3194 | 31 | disk_to_check = [('main_disk', 1), | 34 | disk_to_check = [('main_disk', 1), |
3197 | 32 | ('main_disk', 5), | 35 | ('main_disk', 2), |
3198 | 33 | ('main_disk', 6), | 36 | ('iscsi_disk1', 5), |
3199 | 37 | ('iscsi_disk1', 6), | ||
3200 | 38 | ('iscsi_disk2', 5), | ||
3201 | 39 | ('iscsi_disk2', 6), | ||
3202 | 34 | ('vg1-lv1', 0), | 40 | ('vg1-lv1', 0), |
3203 | 35 | ('vg1-lv2', 0), | 41 | ('vg1-lv2', 0), |
3204 | 36 | ('vg2-lv3', 0), | 42 | ('vg2-lv3', 0), |
3205 | diff --git a/tests/vmtests/test_mdadm_bcache.py b/tests/vmtests/test_mdadm_bcache.py | |||
3206 | index b0e8c8c..49d4782 100644 | |||
3207 | --- a/tests/vmtests/test_mdadm_bcache.py | |||
3208 | +++ b/tests/vmtests/test_mdadm_bcache.py | |||
3209 | @@ -17,11 +17,17 @@ class TestMdadmAbs(VMBaseClass): | |||
3210 | 17 | mdadm --detail --scan | grep -c ubuntu > mdadm_active1 | 17 | mdadm --detail --scan | grep -c ubuntu > mdadm_active1 |
3211 | 18 | grep -c active /proc/mdstat > mdadm_active2 | 18 | grep -c active /proc/mdstat > mdadm_active2 |
3212 | 19 | ls /dev/disk/by-dname > ls_dname | 19 | ls /dev/disk/by-dname > ls_dname |
3213 | 20 | ls -al /dev/disk/by-dname > lsal_dname | ||
3214 | 21 | ls -al /dev/disk/by-uuid > lsal_uuid | ||
3215 | 20 | find /etc/network/interfaces.d > find_interfacesd | 22 | find /etc/network/interfaces.d > find_interfacesd |
3216 | 21 | cat /proc/mdstat | tee mdstat | 23 | cat /proc/mdstat | tee mdstat |
3217 | 22 | cat /proc/partitions | tee procpartitions | 24 | cat /proc/partitions | tee procpartitions |
3218 | 23 | ls -1 /sys/class/block | tee sys_class_block | 25 | ls -1 /sys/class/block | tee sys_class_block |
3219 | 24 | ls -1 /dev/md* | tee dev_md | 26 | ls -1 /dev/md* | tee dev_md |
3220 | 27 | ls -al /sys/fs/bcache/* > lsal_sys_fs_bcache_star | ||
3221 | 28 | ls -al /dev/bcache* > lsal_dev_bcache_star | ||
3222 | 29 | ls -al /dev/bcache/by_uuid/* > lsal_dev_bcache_byuuid_star | ||
3223 | 30 | cp -a /var/log/syslog . | ||
3224 | 25 | """)] | 31 | """)] |
3225 | 26 | 32 | ||
3226 | 27 | def test_mdadm_output_files_exist(self): | 33 | def test_mdadm_output_files_exist(self): |
3227 | @@ -63,6 +69,7 @@ class TestMdadmBcacheAbs(TestMdadmAbs): | |||
3228 | 63 | cat /sys/block/bcache2/bcache/cache_mode >> bcache_cache_mode | 69 | cat /sys/block/bcache2/bcache/cache_mode >> bcache_cache_mode |
3229 | 64 | cat /proc/mounts > proc_mounts | 70 | cat /proc/mounts > proc_mounts |
3230 | 65 | find /etc/network/interfaces.d > find_interfacesd | 71 | find /etc/network/interfaces.d > find_interfacesd |
3231 | 72 | cp -a /etc/udev/rules.d etc_udev_rules.d | ||
3232 | 66 | """)] | 73 | """)] |
3233 | 67 | fstab_expected = { | 74 | fstab_expected = { |
3234 | 68 | '/dev/vda1': '/media/sda1', | 75 | '/dev/vda1': '/media/sda1', |
3235 | @@ -119,7 +126,6 @@ class TestMdadmBcacheAbs(TestMdadmAbs): | |||
3236 | 119 | self.check_file_regex("bcache_cache_mode", r"\[writearound\]") | 126 | self.check_file_regex("bcache_cache_mode", r"\[writearound\]") |
3237 | 120 | 127 | ||
3238 | 121 | def test_bcache_dnames(self): | 128 | def test_bcache_dnames(self): |
3239 | 122 | self.skip_by_date("1728742", fixby="2018-04-26") | ||
3240 | 123 | self.test_dname(disk_to_check=self.bcache_dnames) | 129 | self.test_dname(disk_to_check=self.bcache_dnames) |
3241 | 124 | 130 | ||
3242 | 125 | 131 | ||
3243 | @@ -131,26 +137,10 @@ class TrustyTestMdadmBcache(relbase.trusty, TestMdadmBcacheAbs): | |||
3244 | 131 | cls.skip_by_date("1754581", fixby="2018-06-22") | 137 | cls.skip_by_date("1754581", fixby="2018-06-22") |
3245 | 132 | super().setUpClass() | 138 | super().setUpClass() |
3246 | 133 | 139 | ||
3247 | 134 | # FIXME(LP: #1523037): dname does not work on trusty | ||
3248 | 135 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3249 | 136 | def test_dname(self): | ||
3250 | 137 | print("test_dname does not work for Trusty") | ||
3251 | 138 | |||
3252 | 139 | def test_ptable(self): | ||
3253 | 140 | print("test_ptable does not work for Trusty") | ||
3254 | 141 | |||
3255 | 142 | 140 | ||
3256 | 143 | class TrustyHWEXTestMdadmBcache(relbase.trusty_hwe_x, TestMdadmBcacheAbs): | 141 | class TrustyHWEXTestMdadmBcache(relbase.trusty_hwe_x, TestMdadmBcacheAbs): |
3257 | 144 | __test__ = True | 142 | __test__ = True |
3258 | 145 | 143 | ||
3259 | 146 | # FIXME(LP: #1523037): dname does not work on trusty | ||
3260 | 147 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3261 | 148 | def test_dname(self): | ||
3262 | 149 | print("test_dname does not work for Trusty") | ||
3263 | 150 | |||
3264 | 151 | def test_ptable(self): | ||
3265 | 152 | print("test_ptable does not work for Trusty") | ||
3266 | 153 | |||
3267 | 154 | 144 | ||
3268 | 155 | class XenialGATestMdadmBcache(relbase.xenial_ga, TestMdadmBcacheAbs): | 145 | class XenialGATestMdadmBcache(relbase.xenial_ga, TestMdadmBcacheAbs): |
3269 | 156 | __test__ = True | 146 | __test__ = True |
3270 | @@ -186,14 +176,6 @@ class TestMirrorbootAbs(TestMdadmAbs): | |||
3271 | 186 | class TrustyTestMirrorboot(relbase.trusty, TestMirrorbootAbs): | 176 | class TrustyTestMirrorboot(relbase.trusty, TestMirrorbootAbs): |
3272 | 187 | __test__ = True | 177 | __test__ = True |
3273 | 188 | 178 | ||
3274 | 189 | # FIXME(LP: #1523037): dname does not work on trusty | ||
3275 | 190 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3276 | 191 | def test_dname(self): | ||
3277 | 192 | print("test_dname does not work for Trusty") | ||
3278 | 193 | |||
3279 | 194 | def test_ptable(self): | ||
3280 | 195 | print("test_ptable does not work for Trusty") | ||
3281 | 196 | |||
3282 | 197 | 179 | ||
3283 | 198 | class TrustyHWEXTestMirrorboot(relbase.trusty_hwe_x, TrustyTestMirrorboot): | 180 | class TrustyHWEXTestMirrorboot(relbase.trusty_hwe_x, TrustyTestMirrorboot): |
3284 | 199 | # This tests kernel upgrade in target | 181 | # This tests kernel upgrade in target |
3285 | @@ -234,14 +216,6 @@ class TrustyTestMirrorbootPartitions(relbase.trusty, | |||
3286 | 234 | TestMirrorbootPartitionsAbs): | 216 | TestMirrorbootPartitionsAbs): |
3287 | 235 | __test__ = True | 217 | __test__ = True |
3288 | 236 | 218 | ||
3289 | 237 | # FIXME(LP: #1523037): dname does not work on trusty | ||
3290 | 238 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3291 | 239 | def test_dname(self): | ||
3292 | 240 | print("test_dname does not work for Trusty") | ||
3293 | 241 | |||
3294 | 242 | def test_ptable(self): | ||
3295 | 243 | print("test_ptable does not work for Trusty") | ||
3296 | 244 | |||
3297 | 245 | 219 | ||
3298 | 246 | class TrustyHWEXTestMirrorbootPartitions(relbase.trusty_hwe_x, | 220 | class TrustyHWEXTestMirrorbootPartitions(relbase.trusty_hwe_x, |
3299 | 247 | TrustyTestMirrorbootPartitions): | 221 | TrustyTestMirrorbootPartitions): |
3300 | @@ -293,14 +267,6 @@ class TrustyTestMirrorbootPartitionsUEFI(relbase.trusty, | |||
3301 | 293 | TestMirrorbootPartitionsUEFIAbs): | 267 | TestMirrorbootPartitionsUEFIAbs): |
3302 | 294 | __test__ = True | 268 | __test__ = True |
3303 | 295 | 269 | ||
3304 | 296 | # FIXME(LP: #1523037): dname does not work on trusty | ||
3305 | 297 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3306 | 298 | def test_dname(self): | ||
3307 | 299 | print("test_dname does not work for Trusty") | ||
3308 | 300 | |||
3309 | 301 | def test_ptable(self): | ||
3310 | 302 | print("test_ptable does not work for Trusty") | ||
3311 | 303 | |||
3312 | 304 | 270 | ||
3313 | 305 | class XenialGATestMirrorbootPartitionsUEFI(relbase.xenial_ga, | 271 | class XenialGATestMirrorbootPartitionsUEFI(relbase.xenial_ga, |
3314 | 306 | TestMirrorbootPartitionsUEFIAbs): | 272 | TestMirrorbootPartitionsUEFIAbs): |
3315 | @@ -342,14 +308,6 @@ class TestRaid5bootAbs(TestMdadmAbs): | |||
3316 | 342 | class TrustyTestRaid5Boot(relbase.trusty, TestRaid5bootAbs): | 308 | class TrustyTestRaid5Boot(relbase.trusty, TestRaid5bootAbs): |
3317 | 343 | __test__ = True | 309 | __test__ = True |
3318 | 344 | 310 | ||
3319 | 345 | # FIXME(LP: #1523037): dname does not work on trusty | ||
3320 | 346 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3321 | 347 | def test_dname(self): | ||
3322 | 348 | print("test_dname does not work for Trusty") | ||
3323 | 349 | |||
3324 | 350 | def test_ptable(self): | ||
3325 | 351 | print("test_ptable does not work for Trusty") | ||
3326 | 352 | |||
3327 | 353 | 311 | ||
3328 | 354 | class TrustyHWEXTestRaid5Boot(relbase.trusty_hwe_x, TrustyTestRaid5Boot): | 312 | class TrustyHWEXTestRaid5Boot(relbase.trusty_hwe_x, TrustyTestRaid5Boot): |
3329 | 355 | # This tests kernel upgrade in target | 313 | # This tests kernel upgrade in target |
3330 | @@ -404,14 +362,6 @@ class TestRaid6bootAbs(TestMdadmAbs): | |||
3331 | 404 | class TrustyTestRaid6boot(relbase.trusty, TestRaid6bootAbs): | 362 | class TrustyTestRaid6boot(relbase.trusty, TestRaid6bootAbs): |
3332 | 405 | __test__ = True | 363 | __test__ = True |
3333 | 406 | 364 | ||
3334 | 407 | # FIXME(LP: #1523037): dname does not work on trusty | ||
3335 | 408 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3336 | 409 | def test_dname(self): | ||
3337 | 410 | print("test_dname does not work for Trusty") | ||
3338 | 411 | |||
3339 | 412 | def test_ptable(self): | ||
3340 | 413 | print("test_ptable does not work for Trusty") | ||
3341 | 414 | |||
3342 | 415 | 365 | ||
3343 | 416 | class TrustyHWEXTestRaid6boot(relbase.trusty_hwe_x, TrustyTestRaid6boot): | 366 | class TrustyHWEXTestRaid6boot(relbase.trusty_hwe_x, TrustyTestRaid6boot): |
3344 | 417 | __test__ = True | 367 | __test__ = True |
3345 | @@ -453,14 +403,6 @@ class TestRaid10bootAbs(TestMdadmAbs): | |||
3346 | 453 | class TrustyTestRaid10boot(relbase.trusty, TestRaid10bootAbs): | 403 | class TrustyTestRaid10boot(relbase.trusty, TestRaid10bootAbs): |
3347 | 454 | __test__ = True | 404 | __test__ = True |
3348 | 455 | 405 | ||
3349 | 456 | # FIXME(LP: #1523037): dname does not work on trusty | ||
3350 | 457 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3351 | 458 | def test_dname(self): | ||
3352 | 459 | print("test_dname does not work for Trusty") | ||
3353 | 460 | |||
3354 | 461 | def test_ptable(self): | ||
3355 | 462 | print("test_ptable does not work for Trusty") | ||
3356 | 463 | |||
3357 | 464 | 406 | ||
3358 | 465 | class TrustyHWEXTestRaid10boot(relbase.trusty_hwe_x, TrustyTestRaid10boot): | 407 | class TrustyHWEXTestRaid10boot(relbase.trusty_hwe_x, TrustyTestRaid10boot): |
3359 | 466 | __test__ = True | 408 | __test__ = True |
3360 | @@ -562,14 +504,6 @@ class TestAllindataAbs(TestMdadmAbs): | |||
3361 | 562 | class TrustyTestAllindata(relbase.trusty, TestAllindataAbs): | 504 | class TrustyTestAllindata(relbase.trusty, TestAllindataAbs): |
3362 | 563 | __test__ = False # luks=no does not disable mounting of device | 505 | __test__ = False # luks=no does not disable mounting of device |
3363 | 564 | 506 | ||
3364 | 565 | # FIXME(LP: #1523037): dname does not work on trusty | ||
3365 | 566 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3366 | 567 | def test_dname(self): | ||
3367 | 568 | print("test_dname does not work for Trusty") | ||
3368 | 569 | |||
3369 | 570 | def test_ptable(self): | ||
3370 | 571 | print("test_ptable does not work for Trusty") | ||
3371 | 572 | |||
3372 | 573 | 507 | ||
3373 | 574 | class TrustyHWEXTestAllindata(relbase.trusty_hwe_x, TrustyTestAllindata): | 508 | class TrustyHWEXTestAllindata(relbase.trusty_hwe_x, TrustyTestAllindata): |
3374 | 575 | __test__ = False # lukes=no does not disable mounting of device | 509 | __test__ = False # lukes=no does not disable mounting of device |
3375 | diff --git a/tests/vmtests/test_network.py b/tests/vmtests/test_network.py | |||
3376 | index 6ce4262..59a25fe 100644 | |||
3377 | --- a/tests/vmtests/test_network.py | |||
3378 | +++ b/tests/vmtests/test_network.py | |||
3379 | @@ -437,7 +437,6 @@ class TestNetworkBasicAbs(TestNetworkBaseTestsAbs): | |||
3380 | 437 | 437 | ||
3381 | 438 | class CentosTestNetworkBasicAbs(TestNetworkBaseTestsAbs): | 438 | class CentosTestNetworkBasicAbs(TestNetworkBaseTestsAbs): |
3382 | 439 | conf_file = "examples/tests/centos_basic.yaml" | 439 | conf_file = "examples/tests/centos_basic.yaml" |
3383 | 440 | extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00" | ||
3384 | 441 | collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [ | 440 | collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [ |
3385 | 442 | textwrap.dedent(""" | 441 | textwrap.dedent(""" |
3386 | 443 | cd OUTPUT_COLLECT_D | 442 | cd OUTPUT_COLLECT_D |
3387 | diff --git a/tests/vmtests/test_network_alias.py b/tests/vmtests/test_network_alias.py | |||
3388 | index 258554f..903b395 100644 | |||
3389 | --- a/tests/vmtests/test_network_alias.py | |||
3390 | +++ b/tests/vmtests/test_network_alias.py | |||
3391 | @@ -19,7 +19,6 @@ class TestNetworkAliasAbs(TestNetworkBaseTestsAbs): | |||
3392 | 19 | 19 | ||
3393 | 20 | 20 | ||
3394 | 21 | class CentosTestNetworkAliasAbs(TestNetworkAliasAbs): | 21 | class CentosTestNetworkAliasAbs(TestNetworkAliasAbs): |
3395 | 22 | extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00" | ||
3396 | 23 | collect_scripts = TestNetworkAliasAbs.collect_scripts + [ | 22 | collect_scripts = TestNetworkAliasAbs.collect_scripts + [ |
3397 | 24 | textwrap.dedent(""" | 23 | textwrap.dedent(""" |
3398 | 25 | cd OUTPUT_COLLECT_D | 24 | cd OUTPUT_COLLECT_D |
3399 | diff --git a/tests/vmtests/test_network_bonding.py b/tests/vmtests/test_network_bonding.py | |||
3400 | index 24cf60f..7d07413 100644 | |||
3401 | --- a/tests/vmtests/test_network_bonding.py | |||
3402 | +++ b/tests/vmtests/test_network_bonding.py | |||
3403 | @@ -16,7 +16,6 @@ class TestNetworkBondingAbs(TestNetworkBaseTestsAbs): | |||
3404 | 16 | 16 | ||
3405 | 17 | 17 | ||
3406 | 18 | class CentosTestNetworkBondingAbs(TestNetworkBondingAbs): | 18 | class CentosTestNetworkBondingAbs(TestNetworkBondingAbs): |
3407 | 19 | extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00" | ||
3408 | 20 | collect_scripts = TestNetworkBondingAbs.collect_scripts + [ | 19 | collect_scripts = TestNetworkBondingAbs.collect_scripts + [ |
3409 | 21 | textwrap.dedent(""" | 20 | textwrap.dedent(""" |
3410 | 22 | cd OUTPUT_COLLECT_D | 21 | cd OUTPUT_COLLECT_D |
3411 | diff --git a/tests/vmtests/test_network_bridging.py b/tests/vmtests/test_network_bridging.py | |||
3412 | index 5691b00..ca8964e 100644 | |||
3413 | --- a/tests/vmtests/test_network_bridging.py | |||
3414 | +++ b/tests/vmtests/test_network_bridging.py | |||
3415 | @@ -184,7 +184,6 @@ class TestBridgeNetworkAbs(TestNetworkBaseTestsAbs): | |||
3416 | 184 | 184 | ||
3417 | 185 | 185 | ||
3418 | 186 | class CentosTestBridgeNetworkAbs(TestBridgeNetworkAbs): | 186 | class CentosTestBridgeNetworkAbs(TestBridgeNetworkAbs): |
3419 | 187 | extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00" | ||
3420 | 188 | collect_scripts = TestBridgeNetworkAbs.collect_scripts + [ | 187 | collect_scripts = TestBridgeNetworkAbs.collect_scripts + [ |
3421 | 189 | textwrap.dedent(""" | 188 | textwrap.dedent(""" |
3422 | 190 | cd OUTPUT_COLLECT_D | 189 | cd OUTPUT_COLLECT_D |
3423 | diff --git a/tests/vmtests/test_network_ipv6.py b/tests/vmtests/test_network_ipv6.py | |||
3424 | index 9bbfc1e..6d87dcf 100644 | |||
3425 | --- a/tests/vmtests/test_network_ipv6.py | |||
3426 | +++ b/tests/vmtests/test_network_ipv6.py | |||
3427 | @@ -25,7 +25,6 @@ class TestNetworkIPV6Abs(TestNetworkBaseTestsAbs): | |||
3428 | 25 | 25 | ||
3429 | 26 | 26 | ||
3430 | 27 | class CentosTestNetworkIPV6Abs(TestNetworkIPV6Abs): | 27 | class CentosTestNetworkIPV6Abs(TestNetworkIPV6Abs): |
3431 | 28 | extra_kern_args = "BOOTIF=eth0-bc:76:4e:06:96:b3" | ||
3432 | 29 | collect_scripts = TestNetworkIPV6Abs.collect_scripts + [ | 28 | collect_scripts = TestNetworkIPV6Abs.collect_scripts + [ |
3433 | 30 | textwrap.dedent(""" | 29 | textwrap.dedent(""" |
3434 | 31 | cd OUTPUT_COLLECT_D | 30 | cd OUTPUT_COLLECT_D |
3435 | diff --git a/tests/vmtests/test_network_mtu.py b/tests/vmtests/test_network_mtu.py | |||
3436 | index 86f4e48..41b1383 100644 | |||
3437 | --- a/tests/vmtests/test_network_mtu.py | |||
3438 | +++ b/tests/vmtests/test_network_mtu.py | |||
3439 | @@ -120,7 +120,6 @@ class TestNetworkMtuAbs(TestNetworkIPV6Abs): | |||
3440 | 120 | 120 | ||
3441 | 121 | class CentosTestNetworkMtuAbs(TestNetworkMtuAbs): | 121 | class CentosTestNetworkMtuAbs(TestNetworkMtuAbs): |
3442 | 122 | conf_file = "examples/tests/network_mtu.yaml" | 122 | conf_file = "examples/tests/network_mtu.yaml" |
3443 | 123 | extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00" | ||
3444 | 124 | collect_scripts = TestNetworkMtuAbs.collect_scripts + [ | 123 | collect_scripts = TestNetworkMtuAbs.collect_scripts + [ |
3445 | 125 | textwrap.dedent(""" | 124 | textwrap.dedent(""" |
3446 | 126 | cd OUTPUT_COLLECT_D | 125 | cd OUTPUT_COLLECT_D |
3447 | diff --git a/tests/vmtests/test_network_static.py b/tests/vmtests/test_network_static.py | |||
3448 | index 2d226c0..d96d3eb 100644 | |||
3449 | --- a/tests/vmtests/test_network_static.py | |||
3450 | +++ b/tests/vmtests/test_network_static.py | |||
3451 | @@ -13,7 +13,6 @@ class TestNetworkStaticAbs(TestNetworkBaseTestsAbs): | |||
3452 | 13 | 13 | ||
3453 | 14 | 14 | ||
3454 | 15 | class CentosTestNetworkStaticAbs(TestNetworkStaticAbs): | 15 | class CentosTestNetworkStaticAbs(TestNetworkStaticAbs): |
3455 | 16 | extra_kern_args = "BOOTIF=eth0-52:54:00:12:34:00" | ||
3456 | 17 | collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [ | 16 | collect_scripts = TestNetworkBaseTestsAbs.collect_scripts + [ |
3457 | 18 | textwrap.dedent(""" | 17 | textwrap.dedent(""" |
3458 | 19 | cd OUTPUT_COLLECT_D | 18 | cd OUTPUT_COLLECT_D |
3459 | diff --git a/tests/vmtests/test_network_vlan.py b/tests/vmtests/test_network_vlan.py | |||
3460 | index 24a01ec..3cb6eae 100644 | |||
3461 | --- a/tests/vmtests/test_network_vlan.py | |||
3462 | +++ b/tests/vmtests/test_network_vlan.py | |||
3463 | @@ -3,6 +3,7 @@ | |||
3464 | 3 | from .releases import base_vm_classes as relbase | 3 | from .releases import base_vm_classes as relbase |
3465 | 4 | from .releases import centos_base_vm_classes as centos_relbase | 4 | from .releases import centos_base_vm_classes as centos_relbase |
3466 | 5 | from .test_network import TestNetworkBaseTestsAbs | 5 | from .test_network import TestNetworkBaseTestsAbs |
3467 | 6 | from unittest import SkipTest | ||
3468 | 6 | 7 | ||
3469 | 7 | import textwrap | 8 | import textwrap |
3470 | 8 | import yaml | 9 | import yaml |
3471 | @@ -34,6 +35,11 @@ class TestNetworkVlanAbs(TestNetworkBaseTestsAbs): | |||
3472 | 34 | self.output_files_exist(link_files) | 35 | self.output_files_exist(link_files) |
3473 | 35 | 36 | ||
3474 | 36 | def test_vlan_installed(self): | 37 | def test_vlan_installed(self): |
3475 | 38 | release = self.target_release if self.target_release else self.release | ||
3476 | 39 | if release not in ('precise', 'trusty', 'xenial', 'artful'): | ||
3477 | 40 | raise SkipTest("release '%s' does not need the vlan package" % | ||
3478 | 41 | release) | ||
3479 | 42 | |||
3480 | 37 | self.assertIn("vlan", self.debian_packages, "vlan deb not installed") | 43 | self.assertIn("vlan", self.debian_packages, "vlan deb not installed") |
3481 | 38 | 44 | ||
3482 | 39 | def test_vlan_enabled(self): | 45 | def test_vlan_enabled(self): |
3483 | @@ -48,7 +54,6 @@ class TestNetworkVlanAbs(TestNetworkBaseTestsAbs): | |||
3484 | 48 | 54 | ||
3485 | 49 | 55 | ||
3486 | 50 | class CentosTestNetworkVlanAbs(TestNetworkVlanAbs): | 56 | class CentosTestNetworkVlanAbs(TestNetworkVlanAbs): |
3487 | 51 | extra_kern_args = "BOOTIF=eth0-d4:be:d9:a8:49:13" | ||
3488 | 52 | collect_scripts = TestNetworkVlanAbs.collect_scripts + [ | 57 | collect_scripts = TestNetworkVlanAbs.collect_scripts + [ |
3489 | 53 | textwrap.dedent(""" | 58 | textwrap.dedent(""" |
3490 | 54 | cd OUTPUT_COLLECT_D | 59 | cd OUTPUT_COLLECT_D |
3491 | diff --git a/tests/vmtests/test_nvme.py b/tests/vmtests/test_nvme.py | |||
3492 | index 1ba3d3d..a9e3bc3 100644 | |||
3493 | --- a/tests/vmtests/test_nvme.py | |||
3494 | +++ b/tests/vmtests/test_nvme.py | |||
3495 | @@ -58,28 +58,10 @@ class TestNvmeAbs(VMBaseClass): | |||
3496 | 58 | class TrustyTestNvme(relbase.trusty, TestNvmeAbs): | 58 | class TrustyTestNvme(relbase.trusty, TestNvmeAbs): |
3497 | 59 | __test__ = True | 59 | __test__ = True |
3498 | 60 | 60 | ||
3499 | 61 | # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect | ||
3500 | 62 | # sda-part2 to exist in /dev/disk/by-dname as we can on other releases | ||
3501 | 63 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3502 | 64 | def test_dname(self): | ||
3503 | 65 | print("test_dname does not work for Trusty") | ||
3504 | 66 | |||
3505 | 67 | def test_ptable(self): | ||
3506 | 68 | print("test_ptable does not work for Trusty") | ||
3507 | 69 | |||
3508 | 70 | 61 | ||
3509 | 71 | class TrustyHWEXTestNvme(relbase.trusty_hwe_x, TestNvmeAbs): | 62 | class TrustyHWEXTestNvme(relbase.trusty_hwe_x, TestNvmeAbs): |
3510 | 72 | __test__ = True | 63 | __test__ = True |
3511 | 73 | 64 | ||
3512 | 74 | # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect | ||
3513 | 75 | # sda-part2 to exist in /dev/disk/by-dname as we can on other releases | ||
3514 | 76 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3515 | 77 | def test_dname(self): | ||
3516 | 78 | print("test_dname does not work for Trusty") | ||
3517 | 79 | |||
3518 | 80 | def test_ptable(self): | ||
3519 | 81 | print("test_ptable does not work for Trusty") | ||
3520 | 82 | |||
3521 | 83 | 65 | ||
3522 | 84 | class XenialGATestNvme(relbase.xenial_ga, TestNvmeAbs): | 66 | class XenialGATestNvme(relbase.xenial_ga, TestNvmeAbs): |
3523 | 85 | __test__ = True | 67 | __test__ = True |
3524 | diff --git a/tests/vmtests/test_pollinate_useragent.py b/tests/vmtests/test_pollinate_useragent.py | |||
3525 | index c076fbc..abd6daf 100644 | |||
3526 | --- a/tests/vmtests/test_pollinate_useragent.py | |||
3527 | +++ b/tests/vmtests/test_pollinate_useragent.py | |||
3528 | @@ -24,7 +24,7 @@ class TestPollinateUserAgent(VMBaseClass): | |||
3529 | 24 | self.output_files_exist(["pollinate_print_user_agent"]) | 24 | self.output_files_exist(["pollinate_print_user_agent"]) |
3530 | 25 | agent_values = self.load_collect_file("pollinate_print_user_agent") | 25 | agent_values = self.load_collect_file("pollinate_print_user_agent") |
3531 | 26 | if len(agent_values) == 0: | 26 | if len(agent_values) == 0: |
3533 | 27 | pollver = re.search('pollinate\s(?P<version>\S+)', | 27 | pollver = re.search(r'pollinate\s(?P<version>\S+)', |
3534 | 28 | self.load_collect_file("debian-packages.txt")) | 28 | self.load_collect_file("debian-packages.txt")) |
3535 | 29 | msg = ("pollinate client '%s' does not support " | 29 | msg = ("pollinate client '%s' does not support " |
3536 | 30 | "--print-user-agent'" % pollver.groupdict()['version']) | 30 | "--print-user-agent'" % pollver.groupdict()['version']) |
3537 | @@ -45,7 +45,7 @@ class TestPollinateUserAgent(VMBaseClass): | |||
3538 | 45 | """ | 45 | """ |
3539 | 46 | ua_val = line.split()[0] | 46 | ua_val = line.split()[0] |
3540 | 47 | # escape + and . that are likely in maas/curtin version strings | 47 | # escape + and . that are likely in maas/curtin version strings |
3542 | 48 | regex = r'%s' % ua_val.replace('+', '\+').replace('.', '\.') | 48 | regex = '%s' % ua_val.replace('+', r'\+').replace('.', r'\.') |
3543 | 49 | hit = re.search(regex, agent_values) | 49 | hit = re.search(regex, agent_values) |
3544 | 50 | self.assertIsNotNone(hit) | 50 | self.assertIsNotNone(hit) |
3545 | 51 | self.assertEqual(ua_val, hit.group()) | 51 | self.assertEqual(ua_val, hit.group()) |
3546 | diff --git a/tests/vmtests/test_raid5_bcache.py b/tests/vmtests/test_raid5_bcache.py | |||
3547 | index 8a47e94..aa2bebf 100644 | |||
3548 | --- a/tests/vmtests/test_raid5_bcache.py | |||
3549 | +++ b/tests/vmtests/test_raid5_bcache.py | |||
3550 | @@ -69,10 +69,6 @@ class TestMdadmBcacheAbs(TestMdadmAbs): | |||
3551 | 69 | 69 | ||
3552 | 70 | class TrustyTestRaid5Bcache(relbase.trusty, TestMdadmBcacheAbs): | 70 | class TrustyTestRaid5Bcache(relbase.trusty, TestMdadmBcacheAbs): |
3553 | 71 | __test__ = True | 71 | __test__ = True |
3554 | 72 | # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect | ||
3555 | 73 | # sda-part2 to exist in /dev/disk/by-dname as we can on other releases | ||
3556 | 74 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3557 | 75 | disk_to_check = [('md0', 0)] | ||
3558 | 76 | 72 | ||
3559 | 77 | 73 | ||
3560 | 78 | class TrustyHWEUTestRaid5Bcache(relbase.trusty_hwe_u, TrustyTestRaid5Bcache): | 74 | class TrustyHWEUTestRaid5Bcache(relbase.trusty_hwe_u, TrustyTestRaid5Bcache): |
3561 | diff --git a/tests/vmtests/test_uefi_basic.py b/tests/vmtests/test_uefi_basic.py | |||
3562 | index d6a58eb..517554f 100644 | |||
3563 | --- a/tests/vmtests/test_uefi_basic.py | |||
3564 | +++ b/tests/vmtests/test_uefi_basic.py | |||
3565 | @@ -95,15 +95,6 @@ class PreciseHWETUefiTestBasic(relbase.precise_hwe_t, PreciseUefiTestBasic): | |||
3566 | 95 | class TrustyUefiTestBasic(relbase.trusty, TestBasicAbs): | 95 | class TrustyUefiTestBasic(relbase.trusty, TestBasicAbs): |
3567 | 96 | __test__ = True | 96 | __test__ = True |
3568 | 97 | 97 | ||
3569 | 98 | # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect | ||
3570 | 99 | # sda-part2 to exist in /dev/disk/by-dname as we can on other releases | ||
3571 | 100 | # when dname works on trusty, then we need to re-enable by removing line. | ||
3572 | 101 | def test_dname(self): | ||
3573 | 102 | print("test_dname does not work for Trusty") | ||
3574 | 103 | |||
3575 | 104 | def test_ptable(self): | ||
3576 | 105 | print("test_ptable does not work for Trusty") | ||
3577 | 106 | |||
3578 | 107 | 98 | ||
3579 | 108 | class TrustyHWEXUefiTestBasic(relbase.trusty_hwe_x, TrustyUefiTestBasic): | 99 | class TrustyHWEXUefiTestBasic(relbase.trusty_hwe_x, TrustyUefiTestBasic): |
3580 | 109 | __test__ = True | 100 | __test__ = True |
3581 | diff --git a/tests/vmtests/test_zfsroot.py b/tests/vmtests/test_zfsroot.py | |||
3582 | index 4487185..1ebc616 100644 | |||
3583 | --- a/tests/vmtests/test_zfsroot.py | |||
3584 | +++ b/tests/vmtests/test_zfsroot.py | |||
3585 | @@ -1,4 +1,4 @@ | |||
3587 | 1 | from . import VMBaseClass | 1 | from . import VMBaseClass, check_install_log, skip_if_flag |
3588 | 2 | from .releases import base_vm_classes as relbase | 2 | from .releases import base_vm_classes as relbase |
3589 | 3 | 3 | ||
3590 | 4 | import textwrap | 4 | import textwrap |
3591 | @@ -33,6 +33,7 @@ class TestZfsRootAbs(VMBaseClass): | |||
3592 | 33 | echo "$v" > apt-proxy | 33 | echo "$v" > apt-proxy |
3593 | 34 | """)] | 34 | """)] |
3594 | 35 | 35 | ||
3595 | 36 | @skip_if_flag('expected_failure') | ||
3596 | 36 | def test_output_files_exist(self): | 37 | def test_output_files_exist(self): |
3597 | 37 | self.output_files_exist( | 38 | self.output_files_exist( |
3598 | 38 | ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2", | 39 | ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2", |
3599 | @@ -40,21 +41,49 @@ class TestZfsRootAbs(VMBaseClass): | |||
3600 | 40 | "proc_partitions", | 41 | "proc_partitions", |
3601 | 41 | "root/curtin-install.log", "root/curtin-install-cfg.yaml"]) | 42 | "root/curtin-install.log", "root/curtin-install-cfg.yaml"]) |
3602 | 42 | 43 | ||
3603 | 44 | @skip_if_flag('expected_failure') | ||
3604 | 43 | def test_ptable(self): | 45 | def test_ptable(self): |
3605 | 44 | blkid_info = self.get_blkid_data("blkid_output_vda") | 46 | blkid_info = self.get_blkid_data("blkid_output_vda") |
3606 | 45 | self.assertEquals(blkid_info["PTTYPE"], "gpt") | 47 | self.assertEquals(blkid_info["PTTYPE"], "gpt") |
3607 | 46 | 48 | ||
3608 | 49 | @skip_if_flag('expected_failure') | ||
3609 | 47 | def test_zfs_list(self): | 50 | def test_zfs_list(self): |
3610 | 48 | """Check rpoot/ROOT/zfsroot is mounted at slash""" | 51 | """Check rpoot/ROOT/zfsroot is mounted at slash""" |
3611 | 49 | self.output_files_exist(['zfs_list']) | 52 | self.output_files_exist(['zfs_list']) |
3612 | 50 | self.check_file_regex('zfs_list', r"rpool/ROOT/zfsroot.*/\n") | 53 | self.check_file_regex('zfs_list', r"rpool/ROOT/zfsroot.*/\n") |
3613 | 51 | 54 | ||
3614 | 55 | @skip_if_flag('expected_failure') | ||
3615 | 52 | def test_proc_cmdline_has_root_zfs(self): | 56 | def test_proc_cmdline_has_root_zfs(self): |
3616 | 53 | """Check /proc/cmdline has root=ZFS=<pool>""" | 57 | """Check /proc/cmdline has root=ZFS=<pool>""" |
3617 | 54 | self.output_files_exist(['proc_cmdline']) | 58 | self.output_files_exist(['proc_cmdline']) |
3618 | 55 | self.check_file_regex('proc_cmdline', r"root=ZFS=rpool/ROOT/zfsroot") | 59 | self.check_file_regex('proc_cmdline', r"root=ZFS=rpool/ROOT/zfsroot") |
3619 | 56 | 60 | ||
3620 | 57 | 61 | ||
3621 | 62 | class UnsupportedZfs(VMBaseClass): | ||
3622 | 63 | expected_failure = True | ||
3623 | 64 | collect_scripts = [] | ||
3624 | 65 | interactive = False | ||
3625 | 66 | |||
3626 | 67 | def test_install_log_finds_zfs_runtime_error(self): | ||
3627 | 68 | with open(self.install_log, 'rb') as lfh: | ||
3628 | 69 | install_log = lfh.read().decode('utf-8', errors='replace') | ||
3629 | 70 | errmsg, errors = check_install_log(install_log) | ||
3630 | 71 | found_zfs = False | ||
3631 | 72 | print("errors: %s" % (len(errors))) | ||
3632 | 73 | for idx, err in enumerate(errors): | ||
3633 | 74 | print("%s:\n%s" % (idx, err)) | ||
3634 | 75 | if 'RuntimeError' in err: | ||
3635 | 76 | found_zfs = True | ||
3636 | 77 | break | ||
3637 | 78 | self.assertTrue(found_zfs) | ||
3638 | 79 | |||
3639 | 80 | |||
3640 | 81 | class XenialGAi386TestZfsRoot(relbase.xenial_ga, TestZfsRootAbs, | ||
3641 | 82 | UnsupportedZfs): | ||
3642 | 83 | __test__ = True | ||
3643 | 84 | arch = 'i386' | ||
3644 | 85 | |||
3645 | 86 | |||
3646 | 58 | class XenialGATestZfsRoot(relbase.xenial_ga, TestZfsRootAbs): | 87 | class XenialGATestZfsRoot(relbase.xenial_ga, TestZfsRootAbs): |
3647 | 59 | __test__ = True | 88 | __test__ = True |
3648 | 60 | 89 | ||
3649 | @@ -81,3 +110,13 @@ class TestZfsRootFsTypeAbs(TestZfsRootAbs): | |||
3650 | 81 | 110 | ||
3651 | 82 | class XenialGATestZfsRootFsType(relbase.xenial_ga, TestZfsRootFsTypeAbs): | 111 | class XenialGATestZfsRootFsType(relbase.xenial_ga, TestZfsRootFsTypeAbs): |
3652 | 83 | __test__ = True | 112 | __test__ = True |
3653 | 113 | |||
3654 | 114 | |||
3655 | 115 | class XenialGAi386TestZfsRootFsType(relbase.xenial_ga, TestZfsRootFsTypeAbs, | ||
3656 | 116 | UnsupportedZfs): | ||
3657 | 117 | __test__ = True | ||
3658 | 118 | arch = 'i386' | ||
3659 | 119 | |||
3660 | 120 | |||
3661 | 121 | class BionicTestZfsRootFsType(relbase.bionic, TestZfsRootFsTypeAbs): | ||
3662 | 122 | __test__ = True | ||
3663 | diff --git a/tools/jenkins-runner b/tools/jenkins-runner | |||
3664 | index 1d0ac73..85c6234 100755 | |||
3665 | --- a/tools/jenkins-runner | |||
3666 | +++ b/tools/jenkins-runner | |||
3667 | @@ -54,6 +54,8 @@ parallel=${CURTIN_VMTEST_PARALLEL} | |||
3668 | 54 | ntargs=( ) | 54 | ntargs=( ) |
3669 | 55 | while [ $# -ne 0 ]; do | 55 | while [ $# -ne 0 ]; do |
3670 | 56 | case "$1" in | 56 | case "$1" in |
3671 | 57 | # allow setting these environment variables on cmdline. | ||
3672 | 58 | CURTIN_VMTEST_*=*) export "$1";; | ||
3673 | 57 | -p|--parallel) parallel="$2"; shift;; | 59 | -p|--parallel) parallel="$2"; shift;; |
3674 | 58 | --parallel=*) parallel=${1#*=};; | 60 | --parallel=*) parallel=${1#*=};; |
3675 | 59 | -p[0-9]|-p-1|-p[0-9][0-9]) parallel=${1#-p};; | 61 | -p[0-9]|-p-1|-p[0-9][0-9]) parallel=${1#-p};; |
3676 | @@ -81,6 +83,16 @@ if [ -n "$parallel" -a "$parallel" != "0" -a "$parallel" != "1" ]; then | |||
3677 | 81 | pargs=( --process-timeout=86400 "--processes=$parallel" ) | 83 | pargs=( --process-timeout=86400 "--processes=$parallel" ) |
3678 | 82 | fi | 84 | fi |
3679 | 83 | 85 | ||
3680 | 86 | curtexe="${CURTIN_VMTEST_CURTIN_EXE:-./bin/curtin}" | ||
3681 | 87 | CURTIN_VMTEST_CURTIN_EXE_VERSION=$($curtexe version) || | ||
3682 | 88 | fail "failed to get version from '$curtexe version'" | ||
3683 | 89 | if [ "$curtexe" = "./bin/curtin" ]; then | ||
3684 | 90 | CURTIN_VMTEST_CURTIN_VERSION="$CURTIN_VMTEST_CURTIN_EXE_VERSION" | ||
3685 | 91 | else | ||
3686 | 92 | CURTIN_VMTEST_CURTIN_VERSION="$(./bin/curtin version)" || | ||
3687 | 93 | fail "failed to get version from ./bin/curtin version" | ||
3688 | 94 | fi | ||
3689 | 95 | |||
3690 | 84 | if [ -n "$TGT_IPC_SOCKET" ]; then | 96 | if [ -n "$TGT_IPC_SOCKET" ]; then |
3691 | 85 | error "existing TGT_IPC_SOCKET=${TGT_IPC_SOCKET}" | 97 | error "existing TGT_IPC_SOCKET=${TGT_IPC_SOCKET}" |
3692 | 86 | elif command -v tgtd >/dev/null 2>&1; then | 98 | elif command -v tgtd >/dev/null 2>&1; then |
3693 | diff --git a/tools/vmtest-sync-images b/tools/vmtest-sync-images | |||
3694 | index 26a1962..3d82b62 100755 | |||
3695 | --- a/tools/vmtest-sync-images | |||
3696 | +++ b/tools/vmtest-sync-images | |||
3697 | @@ -17,11 +17,9 @@ sys.path.insert(1, os.path.realpath(os.path.join( | |||
3698 | 17 | from tests.vmtests import ( | 17 | from tests.vmtests import ( |
3699 | 18 | IMAGE_DIR, IMAGE_SRC_URL, sync_images) | 18 | IMAGE_DIR, IMAGE_SRC_URL, sync_images) |
3700 | 19 | from tests.vmtests.image_sync import ITEM_NAME_FILTERS | 19 | from tests.vmtests.image_sync import ITEM_NAME_FILTERS |
3702 | 20 | from tests.vmtests.helpers import find_releases_by_distro | 20 | from tests.vmtests.helpers import (find_arches, find_releases_by_distro) |
3703 | 21 | from curtin.util import get_platform_arch | 21 | from curtin.util import get_platform_arch |
3704 | 22 | 22 | ||
3705 | 23 | DEFAULT_ARCH = get_platform_arch() | ||
3706 | 24 | |||
3707 | 25 | 23 | ||
3708 | 26 | def _fmt_list_filter(filter_name, matches): | 24 | def _fmt_list_filter(filter_name, matches): |
3709 | 27 | return '~'.join((filter_name, '|'.join(matches))) | 25 | return '~'.join((filter_name, '|'.join(matches))) |
3710 | @@ -53,7 +51,7 @@ if __name__ == '__main__': | |||
3711 | 53 | os.unlink(fpath) | 51 | os.unlink(fpath) |
3712 | 54 | 52 | ||
3713 | 55 | arg_releases = [r for r in sys.argv[1:] if r != "--clean"] | 53 | arg_releases = [r for r in sys.argv[1:] if r != "--clean"] |
3715 | 56 | arch_filters = ['arch={}'.format(DEFAULT_ARCH)] | 54 | arch_filters = [_fmt_list_filter('arch', find_arches())] |
3716 | 57 | filter_sets = [] | 55 | filter_sets = [] |
3717 | 58 | if len(arg_releases): | 56 | if len(arg_releases): |
3718 | 59 | filter_sets.append([_fmt_list_filter('release', arg_releases), | 57 | filter_sets.append([_fmt_list_filter('release', arg_releases), |
PASSED: Continuous integration, rev:bb317840c0e 4ea17160c27bfe2 45a1e87a13d0fc /jenkins. ubuntu. com/server/ job/curtin- ci/944/ /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-amd64/ 944 /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-arm64/ 944 /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-ppc64el/ 944 /jenkins. ubuntu. com/server/ job/curtin- ci/nodes= metal-s390x/ 944
https:/
Executed test runs:
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/curtin- ci/944/ rebuild
https:/