Merge ~smoser/cloud-init:bug/1686514-azure-reformat-large into cloud-init:master
- Git
- lp:~smoser/cloud-init
- bug/1686514-azure-reformat-large
- Merge into master
Status: | Merged |
---|---|
Merged at revision: | 31b6f173280fcc8e9be2732ae2e9b6f6c89679d4 |
Proposed branch: | ~smoser/cloud-init:bug/1686514-azure-reformat-large |
Merge into: | cloud-init:master |
Diff against target: |
588 lines (+307/-77) 4 files modified
cloudinit/config/cc_disk_setup.py (+14/-5) cloudinit/sources/DataSourceAzure.py (+49/-35) tests/unittests/test_datasource/test_azure.py (+228/-37) tests/unittests/test_handler/test_handler_disk_setup.py (+16/-0) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Scott Moser | Approve | ||
Server Team CI bot | continuous-integration | Approve | |
Ryan Harper | Needs Fixing | ||
Review via email: mp+323420@code.launchpad.net |
Commit message
Azure: fix reformatting of ephemeral disks on resize to large types.
Large instance types have a different disk format on the newly
partitioned ephemeral drive. So we have to adjust the logic in the
Azure datasource to recognize that a disk with 2 partitions and
an empty ntfs filesystem on the second one is acceptable.
This also adjusts the datasources's builtin fs_setup config to remove
the 'replace_fs' entry. This entry was previously ignored, and confusing.
I've clarified the doc on that also.
LP: #1686514
Description of the change
Server Team CI bot (server-team-bot) wrote : | # |
Ryan Harper (raharper) : | # |
Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:225163c43eb
https:/
Executed test runs:
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild:
https:/
Scott Moser (smoser) : | # |
Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:f3efa89cce9
https:/
Executed test runs:
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild:
https:/
Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:d6cfcd4f0ec
https:/
Executed test runs:
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild:
https:/
Ryan Harper (raharper) wrote : | # |
Please look at the discussion of realpath; I believe the code works as you have it but I'd prefer to be explicit in where we use realpath.
Scott Moser (smoser) : | # |
Ryan Harper (raharper) : | # |
Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:e4d051d05a6
https:/
Executed test runs:
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild:
https:/
Scott Moser (smoser) : | # |
Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:b5722bd1358
https:/
Executed test runs:
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild:
https:/
Scott Moser (smoser) wrote : | # |
Going to merge this based on
05/17/17 10:09:05 <smoser> rharper, thanks. can you read my comments on my azure branch ? want to get that landed today.
05/17/17 10:09:12 <smoser> if you have somethign you want me to review, please po int.
05/17/17 10:09:58 <rharper> smoser: ack; I read them; I still don't like the side-effects but I won't object any more;
Scott Moser (smoser) : | # |
Preview Diff
1 | diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py |
2 | index 6f827dd..29eb5dd 100644 |
3 | --- a/cloudinit/config/cc_disk_setup.py |
4 | +++ b/cloudinit/config/cc_disk_setup.py |
5 | @@ -68,6 +68,9 @@ specified using ``filesystem``. |
6 | Using ``overwrite: true`` for filesystems is dangerous and can lead to data |
7 | loss, so double check the entry in ``fs_setup``. |
8 | |
9 | +.. note:: |
10 | + ``replace_fs`` is ignored unless ``partition`` is ``auto`` or ``any``. |
11 | + |
12 | **Internal name:** ``cc_disk_setup`` |
13 | |
14 | **Module frequency:** per instance |
15 | @@ -127,7 +130,7 @@ def handle(_name, cfg, cloud, log, _args): |
16 | log.debug("Partitioning disks: %s", str(disk_setup)) |
17 | for disk, definition in disk_setup.items(): |
18 | if not isinstance(definition, dict): |
19 | - log.warn("Invalid disk definition for %s" % disk) |
20 | + log.warning("Invalid disk definition for %s" % disk) |
21 | continue |
22 | |
23 | try: |
24 | @@ -144,7 +147,7 @@ def handle(_name, cfg, cloud, log, _args): |
25 | update_fs_setup_devices(fs_setup, cloud.device_name_to_device) |
26 | for definition in fs_setup: |
27 | if not isinstance(definition, dict): |
28 | - log.warn("Invalid file system definition: %s" % definition) |
29 | + log.warning("Invalid file system definition: %s" % definition) |
30 | continue |
31 | |
32 | try: |
33 | @@ -199,8 +202,13 @@ def update_fs_setup_devices(disk_setup, tformer): |
34 | definition['_origname'] = origname |
35 | definition['device'] = tformed |
36 | |
37 | - if part and 'partition' in definition: |
38 | - definition['_partition'] = definition['partition'] |
39 | + if part: |
40 | + # In origname with <dev>.N, N overrides 'partition' key. |
41 | + if 'partition' in definition: |
42 | + LOG.warning("Partition '%s' from dotted device name '%s' " |
43 | + "overrides 'partition' key in %s", part, origname, |
44 | + definition) |
45 | + definition['_partition'] = definition['partition'] |
46 | definition['partition'] = part |
47 | |
48 | |
49 | @@ -849,7 +857,8 @@ def mkfs(fs_cfg): |
50 | # Check to see if the fs already exists |
51 | LOG.debug("Checking device %s", device) |
52 | check_label, check_fstype, _ = check_fs(device) |
53 | - LOG.debug("Device %s has %s %s", device, check_label, check_fstype) |
54 | + LOG.debug("Device '%s' has check_label='%s' check_fstype=%s", |
55 | + device, check_label, check_fstype) |
56 | |
57 | if check_label == label and check_fstype == fs_type: |
58 | LOG.debug("Existing file system found at %s", device) |
59 | diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py |
60 | index 5254e18..44857c0 100644 |
61 | --- a/cloudinit/sources/DataSourceAzure.py |
62 | +++ b/cloudinit/sources/DataSourceAzure.py |
63 | @@ -196,8 +196,7 @@ BUILTIN_CLOUD_CONFIG = { |
64 | 'overwrite': True}, |
65 | }, |
66 | 'fs_setup': [{'filesystem': DEFAULT_FS, |
67 | - 'device': 'ephemeral0.1', |
68 | - 'replace_fs': 'ntfs'}], |
69 | + 'device': 'ephemeral0.1'}], |
70 | } |
71 | |
72 | DS_CFG_PATH = ['datasource', DS_NAME] |
73 | @@ -413,56 +412,71 @@ class DataSourceAzureNet(sources.DataSource): |
74 | return |
75 | |
76 | |
77 | +def _partitions_on_device(devpath, maxnum=16): |
78 | + # return a list of tuples (ptnum, path) for each part on devpath |
79 | + for suff in ("-part", "p", ""): |
80 | + found = [] |
81 | + for pnum in range(1, maxnum): |
82 | + ppath = devpath + suff + str(pnum) |
83 | + if os.path.exists(ppath): |
84 | + found.append((pnum, os.path.realpath(ppath))) |
85 | + if found: |
86 | + return found |
87 | + return [] |
88 | + |
89 | + |
90 | +def _has_ntfs_filesystem(devpath): |
91 | + ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) |
92 | + LOG.debug('ntfs_devices found = %s', ntfs_devices) |
93 | + return os.path.realpath(devpath) in ntfs_devices |
94 | + |
95 | + |
96 | def can_dev_be_reformatted(devpath): |
97 | - # determine if the ephemeral block device path devpath |
98 | - # is newly formatted after a resize. |
99 | + """Determine if block device devpath is newly formatted ephemeral. |
100 | + |
101 | + A newly formatted disk will: |
102 | + a.) have a partition table (dos or gpt) |
103 | + b.) have 1 partition that is ntfs formatted, or |
104 | + have 2 partitions with the second partition ntfs formatted. |
105 | + (larger instances with >2TB ephemeral disk have gpt, and will |
106 | + have a microsoft reserved partition as part 1. LP: #1686514) |
107 | + c.) the ntfs partition will have no files other than possibly |
108 | + 'dataloss_warning_readme.txt'""" |
109 | if not os.path.exists(devpath): |
110 | return False, 'device %s does not exist' % devpath |
111 | |
112 | - realpath = os.path.realpath(devpath) |
113 | - LOG.debug('Resolving realpath of %s -> %s', devpath, realpath) |
114 | - |
115 | - # it is possible that the block device might exist, but the kernel |
116 | - # have not yet read the partition table and sent events. we udevadm settle |
117 | - # to hope to resolve that. Better here would probably be to test and see, |
118 | - # and then settle if we didn't find anything and try again. |
119 | - if util.which("udevadm"): |
120 | - util.subp(["udevadm", "settle"]) |
121 | + LOG.debug('Resolving realpath of %s -> %s', devpath, |
122 | + os.path.realpath(devpath)) |
123 | |
124 | # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource |
125 | # where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1" |
126 | - part1path = None |
127 | - for suff in ("-part", "p", ""): |
128 | - cand = devpath + suff + "1" |
129 | - if os.path.exists(cand): |
130 | - if os.path.exists(devpath + suff + "2"): |
131 | - msg = ('device %s had more than 1 partition: %s, %s' % |
132 | - devpath, cand, devpath + suff + "2") |
133 | - return False, msg |
134 | - part1path = cand |
135 | - break |
136 | - |
137 | - if part1path is None: |
138 | + partitions = _partitions_on_device(devpath) |
139 | + if len(partitions) == 0: |
140 | return False, 'device %s was not partitioned' % devpath |
141 | + elif len(partitions) > 2: |
142 | + msg = ('device %s had 3 or more partitions: %s' % |
143 | + (devpath, ' '.join([p[1] for p in partitions]))) |
144 | + return False, msg |
145 | + elif len(partitions) == 2: |
146 | + cand_part, cand_path = partitions[1] |
147 | + else: |
148 | + cand_part, cand_path = partitions[0] |
149 | |
150 | - real_part1path = os.path.realpath(part1path) |
151 | - ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) |
152 | - LOG.debug('ntfs_devices found = %s', ntfs_devices) |
153 | - if real_part1path not in ntfs_devices: |
154 | - msg = ('partition 1 (%s -> %s) on device %s was not ntfs formatted' % |
155 | - (part1path, real_part1path, devpath)) |
156 | + if not _has_ntfs_filesystem(cand_path): |
157 | + msg = ('partition %s (%s) on device %s was not ntfs formatted' % |
158 | + (cand_part, cand_path, devpath)) |
159 | return False, msg |
160 | |
161 | def count_files(mp): |
162 | ignored = set(['dataloss_warning_readme.txt']) |
163 | return len([f for f in os.listdir(mp) if f.lower() not in ignored]) |
164 | |
165 | - bmsg = ('partition 1 (%s -> %s) on device %s was ntfs formatted' % |
166 | - (part1path, real_part1path, devpath)) |
167 | + bmsg = ('partition %s (%s) on device %s was ntfs formatted' % |
168 | + (cand_part, cand_path, devpath)) |
169 | try: |
170 | - file_count = util.mount_cb(part1path, count_files) |
171 | + file_count = util.mount_cb(cand_path, count_files) |
172 | except util.MountFailedError as e: |
173 | - return False, bmsg + ' but mount of %s failed: %s' % (part1path, e) |
174 | + return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) |
175 | |
176 | if file_count != 0: |
177 | return False, bmsg + ' but had %d files on it.' % file_count |
178 | diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py |
179 | index e6b0dcb..67cddeb 100644 |
180 | --- a/tests/unittests/test_datasource/test_azure.py |
181 | +++ b/tests/unittests/test_datasource/test_azure.py |
182 | @@ -1,12 +1,13 @@ |
183 | # This file is part of cloud-init. See LICENSE file for license information. |
184 | |
185 | from cloudinit import helpers |
186 | -from cloudinit.util import b64e, decode_binary, load_file |
187 | -from cloudinit.sources import DataSourceAzure |
188 | +from cloudinit.util import b64e, decode_binary, load_file, write_file |
189 | +from cloudinit.sources import DataSourceAzure as dsaz |
190 | from cloudinit.util import find_freebsd_part |
191 | from cloudinit.util import get_path_dev_freebsd |
192 | |
193 | -from ..helpers import TestCase, populate_dir, mock, ExitStack, PY26, SkipTest |
194 | +from ..helpers import (CiTestCase, TestCase, populate_dir, mock, |
195 | + ExitStack, PY26, SkipTest) |
196 | |
197 | import crypt |
198 | import os |
199 | @@ -98,7 +99,6 @@ class TestAzureDataSource(TestCase): |
200 | self.patches.enter_context(mock.patch.object(module, name, new)) |
201 | |
202 | def _get_mockds(self): |
203 | - mod = DataSourceAzure |
204 | sysctl_out = "dev.storvsc.3.%pnpinfo: "\ |
205 | "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ |
206 | "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" |
207 | @@ -123,14 +123,14 @@ scbus-1 on xpt0 bus 0 |
208 | <Msft Virtual Disk 1.0> at scbus3 target 1 lun 0 (da1,pass2) |
209 | """ |
210 | self.apply_patches([ |
211 | - (mod, 'get_dev_storvsc_sysctl', mock.MagicMock( |
212 | + (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock( |
213 | return_value=sysctl_out)), |
214 | - (mod, 'get_camcontrol_dev_bus', mock.MagicMock( |
215 | + (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock( |
216 | return_value=camctl_devbus)), |
217 | - (mod, 'get_camcontrol_dev', mock.MagicMock( |
218 | + (dsaz, 'get_camcontrol_dev', mock.MagicMock( |
219 | return_value=camctl_dev)) |
220 | ]) |
221 | - return mod |
222 | + return dsaz |
223 | |
224 | def _get_ds(self, data, agent_command=None): |
225 | |
226 | @@ -152,8 +152,7 @@ scbus-1 on xpt0 bus 0 |
227 | populate_dir(os.path.join(self.paths.seed_dir, "azure"), |
228 | {'ovf-env.xml': data['ovfcontent']}) |
229 | |
230 | - mod = DataSourceAzure |
231 | - mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d |
232 | + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d |
233 | |
234 | self.get_metadata_from_fabric = mock.MagicMock(return_value={ |
235 | 'public-keys': [], |
236 | @@ -162,19 +161,19 @@ scbus-1 on xpt0 bus 0 |
237 | self.instance_id = 'test-instance-id' |
238 | |
239 | self.apply_patches([ |
240 | - (mod, 'list_possible_azure_ds_devs', dsdevs), |
241 | - (mod, 'invoke_agent', _invoke_agent), |
242 | - (mod, 'wait_for_files', _wait_for_files), |
243 | - (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), |
244 | - (mod, 'perform_hostname_bounce', mock.MagicMock()), |
245 | - (mod, 'get_hostname', mock.MagicMock()), |
246 | - (mod, 'set_hostname', mock.MagicMock()), |
247 | - (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric), |
248 | - (mod.util, 'read_dmi_data', mock.MagicMock( |
249 | + (dsaz, 'list_possible_azure_ds_devs', dsdevs), |
250 | + (dsaz, 'invoke_agent', _invoke_agent), |
251 | + (dsaz, 'wait_for_files', _wait_for_files), |
252 | + (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), |
253 | + (dsaz, 'perform_hostname_bounce', mock.MagicMock()), |
254 | + (dsaz, 'get_hostname', mock.MagicMock()), |
255 | + (dsaz, 'set_hostname', mock.MagicMock()), |
256 | + (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric), |
257 | + (dsaz.util, 'read_dmi_data', mock.MagicMock( |
258 | return_value=self.instance_id)), |
259 | ]) |
260 | |
261 | - dsrc = mod.DataSourceAzureNet( |
262 | + dsrc = dsaz.DataSourceAzureNet( |
263 | data.get('sys_cfg', {}), distro=None, paths=self.paths) |
264 | if agent_command is not None: |
265 | dsrc.ds_cfg['agent_command'] = agent_command |
266 | @@ -418,7 +417,7 @@ fdescfs /dev/fd fdescfs rw 0 0 |
267 | cfg = dsrc.get_config_obj() |
268 | |
269 | self.assertEqual(dsrc.device_name_to_device("ephemeral0"), |
270 | - DataSourceAzure.RESOURCE_DISK_PATH) |
271 | + dsaz.RESOURCE_DISK_PATH) |
272 | assert 'disk_setup' in cfg |
273 | assert 'fs_setup' in cfg |
274 | self.assertIsInstance(cfg['disk_setup'], dict) |
275 | @@ -468,14 +467,13 @@ fdescfs /dev/fd fdescfs rw 0 0 |
276 | |
277 | # Make sure that the redacted password on disk is not used by CI |
278 | self.assertNotEqual(dsrc.cfg.get('password'), |
279 | - DataSourceAzure.DEF_PASSWD_REDACTION) |
280 | + dsaz.DEF_PASSWD_REDACTION) |
281 | |
282 | # Make sure that the password was really encrypted |
283 | et = ET.fromstring(on_disk_ovf) |
284 | for elem in et.iter(): |
285 | if 'UserPassword' in elem.tag: |
286 | - self.assertEqual(DataSourceAzure.DEF_PASSWD_REDACTION, |
287 | - elem.text) |
288 | + self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text) |
289 | |
290 | def test_ovf_env_arrives_in_waagent_dir(self): |
291 | xml = construct_valid_ovf_env(data={}, userdata="FOODATA") |
292 | @@ -524,17 +522,17 @@ class TestAzureBounce(TestCase): |
293 | |
294 | def mock_out_azure_moving_parts(self): |
295 | self.patches.enter_context( |
296 | - mock.patch.object(DataSourceAzure, 'invoke_agent')) |
297 | + mock.patch.object(dsaz, 'invoke_agent')) |
298 | self.patches.enter_context( |
299 | - mock.patch.object(DataSourceAzure, 'wait_for_files')) |
300 | + mock.patch.object(dsaz, 'wait_for_files')) |
301 | self.patches.enter_context( |
302 | - mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs', |
303 | + mock.patch.object(dsaz, 'list_possible_azure_ds_devs', |
304 | mock.MagicMock(return_value=[]))) |
305 | self.patches.enter_context( |
306 | - mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric', |
307 | + mock.patch.object(dsaz, 'get_metadata_from_fabric', |
308 | mock.MagicMock(return_value={}))) |
309 | self.patches.enter_context( |
310 | - mock.patch.object(DataSourceAzure.util, 'read_dmi_data', |
311 | + mock.patch.object(dsaz.util, 'read_dmi_data', |
312 | mock.MagicMock(return_value='test-instance-id'))) |
313 | |
314 | def setUp(self): |
315 | @@ -543,13 +541,13 @@ class TestAzureBounce(TestCase): |
316 | self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') |
317 | self.paths = helpers.Paths({'cloud_dir': self.tmp}) |
318 | self.addCleanup(shutil.rmtree, self.tmp) |
319 | - DataSourceAzure.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d |
320 | + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d |
321 | self.patches = ExitStack() |
322 | self.mock_out_azure_moving_parts() |
323 | self.get_hostname = self.patches.enter_context( |
324 | - mock.patch.object(DataSourceAzure, 'get_hostname')) |
325 | + mock.patch.object(dsaz, 'get_hostname')) |
326 | self.set_hostname = self.patches.enter_context( |
327 | - mock.patch.object(DataSourceAzure, 'set_hostname')) |
328 | + mock.patch.object(dsaz, 'set_hostname')) |
329 | self.subp = self.patches.enter_context( |
330 | mock.patch('cloudinit.sources.DataSourceAzure.util.subp')) |
331 | |
332 | @@ -560,7 +558,7 @@ class TestAzureBounce(TestCase): |
333 | if ovfcontent is not None: |
334 | populate_dir(os.path.join(self.paths.seed_dir, "azure"), |
335 | {'ovf-env.xml': ovfcontent}) |
336 | - dsrc = DataSourceAzure.DataSourceAzureNet( |
337 | + dsrc = dsaz.DataSourceAzureNet( |
338 | {}, distro=None, paths=self.paths) |
339 | if agent_command is not None: |
340 | dsrc.ds_cfg['agent_command'] = agent_command |
341 | @@ -673,7 +671,7 @@ class TestAzureBounce(TestCase): |
342 | |
343 | def test_default_bounce_command_used_by_default(self): |
344 | cmd = 'default-bounce-command' |
345 | - DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd |
346 | + dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd |
347 | cfg = {'hostname_bounce': {'policy': 'force'}} |
348 | data = self.get_ovf_env_with_dscfg('some-hostname', cfg) |
349 | self._get_ds(data, agent_command=['not', '__builtin__']).get_data() |
350 | @@ -701,15 +699,208 @@ class TestAzureBounce(TestCase): |
351 | class TestReadAzureOvf(TestCase): |
352 | def test_invalid_xml_raises_non_azure_ds(self): |
353 | invalid_xml = "<foo>" + construct_valid_ovf_env(data={}) |
354 | - self.assertRaises(DataSourceAzure.BrokenAzureDataSource, |
355 | - DataSourceAzure.read_azure_ovf, invalid_xml) |
356 | + self.assertRaises(dsaz.BrokenAzureDataSource, |
357 | + dsaz.read_azure_ovf, invalid_xml) |
358 | |
359 | def test_load_with_pubkeys(self): |
360 | mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] |
361 | pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] |
362 | content = construct_valid_ovf_env(pubkeys=pubkeys) |
363 | - (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content) |
364 | + (_md, _ud, cfg) = dsaz.read_azure_ovf(content) |
365 | for mypk in mypklist: |
366 | self.assertIn(mypk, cfg['_pubkeys']) |
367 | |
368 | + |
369 | +class TestCanDevBeReformatted(CiTestCase): |
370 | + warning_file = 'dataloss_warning_readme.txt' |
371 | + |
372 | + def _domock(self, mockpath, sattr=None): |
373 | + patcher = mock.patch(mockpath) |
374 | + setattr(self, sattr, patcher.start()) |
375 | + self.addCleanup(patcher.stop) |
376 | + |
377 | + def setUp(self): |
378 | + super(TestCanDevBeReformatted, self).setUp() |
379 | + |
380 | + def patchup(self, devs): |
381 | + bypath = {} |
382 | + for path, data in devs.items(): |
383 | + bypath[path] = data |
384 | + if 'realpath' in data: |
385 | + bypath[data['realpath']] = data |
386 | + for ppath, pdata in data.get('partitions', {}).items(): |
387 | + bypath[ppath] = pdata |
388 | + if 'realpath' in data: |
389 | + bypath[pdata['realpath']] = pdata |
390 | + |
391 | + def realpath(d): |
392 | + return bypath[d].get('realpath', d) |
393 | + |
394 | + def partitions_on_device(devpath): |
395 | + parts = bypath.get(devpath, {}).get('partitions', {}) |
396 | + ret = [] |
397 | + for path, data in parts.items(): |
398 | + ret.append((data.get('num'), realpath(path))) |
399 | + # return sorted by partition number |
400 | + return sorted(ret, key=lambda d: d[0]) |
401 | + |
402 | + def mount_cb(device, callback): |
403 | + p = self.tmp_dir() |
404 | + for f in bypath.get(device).get('files', []): |
405 | + write_file(os.path.join(p, f), content=f) |
406 | + return callback(p) |
407 | + |
408 | + def has_ntfs_fs(device): |
409 | + return bypath.get(device, {}).get('fs') == 'ntfs' |
410 | + |
411 | + p = 'cloudinit.sources.DataSourceAzure' |
412 | + self._domock(p + "._partitions_on_device", 'm_partitions_on_device') |
413 | + self._domock(p + "._has_ntfs_filesystem", 'm_has_ntfs_filesystem') |
414 | + self._domock(p + ".util.mount_cb", 'm_mount_cb') |
415 | + self._domock(p + ".os.path.realpath", 'm_realpath') |
416 | + self._domock(p + ".os.path.exists", 'm_exists') |
417 | + |
418 | + self.m_exists.side_effect = lambda p: p in bypath |
419 | + self.m_realpath.side_effect = realpath |
420 | + self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs |
421 | + self.m_mount_cb.side_effect = mount_cb |
422 | + self.m_partitions_on_device.side_effect = partitions_on_device |
423 | + |
424 | + def test_three_partitions_is_false(self): |
425 | + """A disk with 3 partitions can not be formatted.""" |
426 | + self.patchup({ |
427 | + '/dev/sda': { |
428 | + 'partitions': { |
429 | + '/dev/sda1': {'num': 1}, |
430 | + '/dev/sda2': {'num': 2}, |
431 | + '/dev/sda3': {'num': 3}, |
432 | + }}}) |
433 | + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") |
434 | + self.assertFalse(False, value) |
435 | + self.assertIn("3 or more", msg.lower()) |
436 | + |
437 | + def test_no_partitions_is_false(self): |
438 | + """A disk with no partitions can not be formatted.""" |
439 | + self.patchup({'/dev/sda': {}}) |
440 | + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") |
441 | + self.assertEqual(False, value) |
442 | + self.assertIn("not partitioned", msg.lower()) |
443 | + |
444 | + def test_two_partitions_not_ntfs_false(self): |
445 | + """2 partitions and 2nd not ntfs can not be formatted.""" |
446 | + self.patchup({ |
447 | + '/dev/sda': { |
448 | + 'partitions': { |
449 | + '/dev/sda1': {'num': 1}, |
450 | + '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, |
451 | + }}}) |
452 | + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") |
453 | + self.assertFalse(False, value) |
454 | + self.assertIn("not ntfs", msg.lower()) |
455 | + |
456 | + def test_two_partitions_ntfs_populated_false(self): |
457 | + """2 partitions and populated ntfs fs on 2nd can not be formatted.""" |
458 | + self.patchup({ |
459 | + '/dev/sda': { |
460 | + 'partitions': { |
461 | + '/dev/sda1': {'num': 1}, |
462 | + '/dev/sda2': {'num': 2, 'fs': 'ntfs', |
463 | + 'files': ['secret.txt']}, |
464 | + }}}) |
465 | + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") |
466 | + self.assertFalse(False, value) |
467 | + self.assertIn("files on it", msg.lower()) |
468 | + |
469 | + def test_two_partitions_ntfs_empty_is_true(self): |
470 | + """2 partitions and empty ntfs fs on 2nd can be formatted.""" |
471 | + self.patchup({ |
472 | + '/dev/sda': { |
473 | + 'partitions': { |
474 | + '/dev/sda1': {'num': 1}, |
475 | + '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, |
476 | + }}}) |
477 | + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") |
478 | + self.assertEqual(True, value) |
479 | + self.assertIn("safe for", msg.lower()) |
480 | + |
481 | + def test_one_partition_not_ntfs_false(self): |
482 | + """1 partition witih fs other than ntfs can not be formatted.""" |
483 | + self.patchup({ |
484 | + '/dev/sda': { |
485 | + 'partitions': { |
486 | + '/dev/sda1': {'num': 1, 'fs': 'zfs'}, |
487 | + }}}) |
488 | + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") |
489 | + self.assertEqual(False, value) |
490 | + self.assertIn("not ntfs", msg.lower()) |
491 | + |
492 | + def test_one_partition_ntfs_populated_false(self): |
493 | + """1 mountable ntfs partition with many files can not be formatted.""" |
494 | + self.patchup({ |
495 | + '/dev/sda': { |
496 | + 'partitions': { |
497 | + '/dev/sda1': {'num': 1, 'fs': 'ntfs', |
498 | + 'files': ['file1.txt', 'file2.exe']}, |
499 | + }}}) |
500 | + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") |
501 | + self.assertEqual(False, value) |
502 | + self.assertIn("files on it", msg.lower()) |
503 | + |
504 | + def test_one_partition_ntfs_empty_is_true(self): |
505 | + """1 mountable ntfs partition and no files can be formatted.""" |
506 | + self.patchup({ |
507 | + '/dev/sda': { |
508 | + 'partitions': { |
509 | + '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} |
510 | + }}}) |
511 | + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") |
512 | + self.assertEqual(True, value) |
513 | + self.assertIn("safe for", msg.lower()) |
514 | + |
515 | + def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): |
516 | + """1 mountable ntfs partition and only warn file can be formatted.""" |
517 | + self.patchup({ |
518 | + '/dev/sda': { |
519 | + 'partitions': { |
520 | + '/dev/sda1': {'num': 1, 'fs': 'ntfs', |
521 | + 'files': ['dataloss_warning_readme.txt']} |
522 | + }}}) |
523 | + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") |
524 | + self.assertEqual(True, value) |
525 | + self.assertIn("safe for", msg.lower()) |
526 | + |
527 | + def test_one_partition_through_realpath_is_true(self): |
528 | + """A symlink to a device with 1 ntfs partition can be formatted.""" |
529 | + epath = '/dev/disk/cloud/azure_resource' |
530 | + self.patchup({ |
531 | + epath: { |
532 | + 'realpath': '/dev/sdb', |
533 | + 'partitions': { |
534 | + epath + '-part1': { |
535 | + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], |
536 | + 'realpath': '/dev/sdb1'} |
537 | + }}}) |
538 | + value, msg = dsaz.can_dev_be_reformatted(epath) |
539 | + self.assertEqual(True, value) |
540 | + self.assertIn("safe for", msg.lower()) |
541 | + |
542 | + def test_three_partition_through_realpath_is_false(self): |
543 | + """A symlink to a device with 3 partitions can not be formatted.""" |
544 | + epath = '/dev/disk/cloud/azure_resource' |
545 | + self.patchup({ |
546 | + epath: { |
547 | + 'realpath': '/dev/sdb', |
548 | + 'partitions': { |
549 | + epath + '-part1': { |
550 | + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], |
551 | + 'realpath': '/dev/sdb1'}, |
552 | + epath + '-part2': {'num': 2, 'fs': 'ext3', |
553 | + 'realpath': '/dev/sdb2'}, |
554 | + epath + '-part3': {'num': 3, 'fs': 'ext', |
555 | + 'realpath': '/dev/sdb3'} |
556 | + }}}) |
557 | + value, msg = dsaz.can_dev_be_reformatted(epath) |
558 | + self.assertEqual(False, value) |
559 | + self.assertIn("3 or more", msg.lower()) |
560 | + |
561 | # vi: ts=4 expandtab |
562 | diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py |
563 | index 9f00d46..68fc6aa 100644 |
564 | --- a/tests/unittests/test_handler/test_handler_disk_setup.py |
565 | +++ b/tests/unittests/test_handler/test_handler_disk_setup.py |
566 | @@ -151,6 +151,22 @@ class TestUpdateFsSetupDevices(TestCase): |
567 | 'filesystem': 'xfs' |
568 | }, fs_setup) |
569 | |
570 | + def test_dotted_devname_populates_partition(self): |
571 | + fs_setup = { |
572 | + 'device': 'ephemeral0.1', |
573 | + 'label': 'test2', |
574 | + 'filesystem': 'xfs' |
575 | + } |
576 | + cc_disk_setup.update_fs_setup_devices([fs_setup], |
577 | + lambda device: device) |
578 | + self.assertEqual({ |
579 | + '_origname': 'ephemeral0.1', |
580 | + 'device': 'ephemeral0', |
581 | + 'partition': '1', |
582 | + 'label': 'test2', |
583 | + 'filesystem': 'xfs' |
584 | + }, fs_setup) |
585 | + |
586 | |
587 | @mock.patch('cloudinit.config.cc_disk_setup.find_device_node', |
588 | return_value=('/dev/xdb1', False)) |
PASSED: Continuous integration, rev:66073f870cc 2a7a3794c59293c a9bf715dddf1f6 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 325/ /jenkins. ubuntu. com/server/ job/cloud- init-ci/ nodes=metal- amd64/325 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ nodes=metal- arm64/325 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ nodes=metal- ppc64el/ 325 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ nodes=vm- i386/325
https:/
Executed test runs:
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
SUCCESS: https:/
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 325/rebuild
https:/