Merge ~smoser/cloud-init:bug/1686514-azure-reformat-large into cloud-init:master

Proposed by Scott Moser
Status: Merged
Merged at revision: 31b6f173280fcc8e9be2732ae2e9b6f6c89679d4
Proposed branch: ~smoser/cloud-init:bug/1686514-azure-reformat-large
Merge into: cloud-init:master
Diff against target: 588 lines (+307/-77)
4 files modified
cloudinit/config/cc_disk_setup.py (+14/-5)
cloudinit/sources/DataSourceAzure.py (+49/-35)
tests/unittests/test_datasource/test_azure.py (+228/-37)
tests/unittests/test_handler/test_handler_disk_setup.py (+16/-0)
Reviewer Review Type Date Requested Status
Scott Moser Approve
Server Team CI bot continuous-integration Approve
Ryan Harper Needs Fixing
Review via email: mp+323420@code.launchpad.net

Commit message

Azure: fix reformatting of ephemeral disks on resize to large types.

Large instance types have a different disk format on the newly
partitioned ephemeral drive. So we have to adjust the logic in the
Azure datasource to recognize that a disk with 2 partitions and
an empty ntfs filesystem on the second one is acceptable.

This also adjusts the datasources's builtin fs_setup config to remove
the 'replace_fs' entry. This entry was previously ignored, and confusing.
I've clarified the doc on that also.

LP: #1686514

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)
Revision history for this message
Ryan Harper (raharper) :
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)
Revision history for this message
Scott Moser (smoser) :
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)
Revision history for this message
Ryan Harper (raharper) wrote :

Please look at the discussion of realpath; I believe the code works as you have it but I'd prefer to be explicit in where we use realpath.

review: Needs Fixing
Revision history for this message
Scott Moser (smoser) :
Revision history for this message
Ryan Harper (raharper) :
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)
Revision history for this message
Scott Moser (smoser) :
Revision history for this message
Server Team CI bot (server-team-bot) wrote :
review: Approve (continuous-integration)
Revision history for this message
Scott Moser (smoser) wrote :

Going to merge this based on

05/17/17 10:09:05 <smoser> rharper, thanks. can you read my comments on my azure branch ? want to get that landed today.
05/17/17 10:09:12 <smoser> if you have somethign you want me to review, please po int.
05/17/17 10:09:58 <rharper> smoser: ack; I read them; I still don't like the side-effects but I won't object any more;

Revision history for this message
Scott Moser (smoser) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 6f827dd..29eb5dd 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -68,6 +68,9 @@ specified using ``filesystem``.
68 Using ``overwrite: true`` for filesystems is dangerous and can lead to data68 Using ``overwrite: true`` for filesystems is dangerous and can lead to data
69 loss, so double check the entry in ``fs_setup``.69 loss, so double check the entry in ``fs_setup``.
7070
71.. note::
72 ``replace_fs`` is ignored unless ``partition`` is ``auto`` or ``any``.
73
71**Internal name:** ``cc_disk_setup``74**Internal name:** ``cc_disk_setup``
7275
73**Module frequency:** per instance76**Module frequency:** per instance
@@ -127,7 +130,7 @@ def handle(_name, cfg, cloud, log, _args):
127 log.debug("Partitioning disks: %s", str(disk_setup))130 log.debug("Partitioning disks: %s", str(disk_setup))
128 for disk, definition in disk_setup.items():131 for disk, definition in disk_setup.items():
129 if not isinstance(definition, dict):132 if not isinstance(definition, dict):
130 log.warn("Invalid disk definition for %s" % disk)133 log.warning("Invalid disk definition for %s" % disk)
131 continue134 continue
132135
133 try:136 try:
@@ -144,7 +147,7 @@ def handle(_name, cfg, cloud, log, _args):
144 update_fs_setup_devices(fs_setup, cloud.device_name_to_device)147 update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
145 for definition in fs_setup:148 for definition in fs_setup:
146 if not isinstance(definition, dict):149 if not isinstance(definition, dict):
147 log.warn("Invalid file system definition: %s" % definition)150 log.warning("Invalid file system definition: %s" % definition)
148 continue151 continue
149152
150 try:153 try:
@@ -199,8 +202,13 @@ def update_fs_setup_devices(disk_setup, tformer):
199 definition['_origname'] = origname202 definition['_origname'] = origname
200 definition['device'] = tformed203 definition['device'] = tformed
201204
202 if part and 'partition' in definition:205 if part:
203 definition['_partition'] = definition['partition']206 # In origname with <dev>.N, N overrides 'partition' key.
207 if 'partition' in definition:
208 LOG.warning("Partition '%s' from dotted device name '%s' "
209 "overrides 'partition' key in %s", part, origname,
210 definition)
211 definition['_partition'] = definition['partition']
204 definition['partition'] = part212 definition['partition'] = part
205213
206214
@@ -849,7 +857,8 @@ def mkfs(fs_cfg):
849 # Check to see if the fs already exists857 # Check to see if the fs already exists
850 LOG.debug("Checking device %s", device)858 LOG.debug("Checking device %s", device)
851 check_label, check_fstype, _ = check_fs(device)859 check_label, check_fstype, _ = check_fs(device)
852 LOG.debug("Device %s has %s %s", device, check_label, check_fstype)860 LOG.debug("Device '%s' has check_label='%s' check_fstype=%s",
861 device, check_label, check_fstype)
853862
854 if check_label == label and check_fstype == fs_type:863 if check_label == label and check_fstype == fs_type:
855 LOG.debug("Existing file system found at %s", device)864 LOG.debug("Existing file system found at %s", device)
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 5254e18..44857c0 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -196,8 +196,7 @@ BUILTIN_CLOUD_CONFIG = {
196 'overwrite': True},196 'overwrite': True},
197 },197 },
198 'fs_setup': [{'filesystem': DEFAULT_FS,198 'fs_setup': [{'filesystem': DEFAULT_FS,
199 'device': 'ephemeral0.1',199 'device': 'ephemeral0.1'}],
200 'replace_fs': 'ntfs'}],
201}200}
202201
203DS_CFG_PATH = ['datasource', DS_NAME]202DS_CFG_PATH = ['datasource', DS_NAME]
@@ -413,56 +412,71 @@ class DataSourceAzureNet(sources.DataSource):
413 return412 return
414413
415414
415def _partitions_on_device(devpath, maxnum=16):
416 # return a list of tuples (ptnum, path) for each part on devpath
417 for suff in ("-part", "p", ""):
418 found = []
419 for pnum in range(1, maxnum):
420 ppath = devpath + suff + str(pnum)
421 if os.path.exists(ppath):
422 found.append((pnum, os.path.realpath(ppath)))
423 if found:
424 return found
425 return []
426
427
428def _has_ntfs_filesystem(devpath):
429 ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
430 LOG.debug('ntfs_devices found = %s', ntfs_devices)
431 return os.path.realpath(devpath) in ntfs_devices
432
433
416def can_dev_be_reformatted(devpath):434def can_dev_be_reformatted(devpath):
417 # determine if the ephemeral block device path devpath435 """Determine if block device devpath is newly formatted ephemeral.
418 # is newly formatted after a resize.436
437 A newly formatted disk will:
438 a.) have a partition table (dos or gpt)
439 b.) have 1 partition that is ntfs formatted, or
440 have 2 partitions with the second partition ntfs formatted.
441 (larger instances with >2TB ephemeral disk have gpt, and will
442 have a microsoft reserved partition as part 1. LP: #1686514)
443 c.) the ntfs partition will have no files other than possibly
444 'dataloss_warning_readme.txt'"""
419 if not os.path.exists(devpath):445 if not os.path.exists(devpath):
420 return False, 'device %s does not exist' % devpath446 return False, 'device %s does not exist' % devpath
421447
422 realpath = os.path.realpath(devpath)448 LOG.debug('Resolving realpath of %s -> %s', devpath,
423 LOG.debug('Resolving realpath of %s -> %s', devpath, realpath)449 os.path.realpath(devpath))
424
425 # it is possible that the block device might exist, but the kernel
426 # have not yet read the partition table and sent events. we udevadm settle
427 # to hope to resolve that. Better here would probably be to test and see,
428 # and then settle if we didn't find anything and try again.
429 if util.which("udevadm"):
430 util.subp(["udevadm", "settle"])
431450
432 # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource451 # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource
433 # where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1"452 # where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1"
434 part1path = None453 partitions = _partitions_on_device(devpath)
435 for suff in ("-part", "p", ""):454 if len(partitions) == 0:
436 cand = devpath + suff + "1"
437 if os.path.exists(cand):
438 if os.path.exists(devpath + suff + "2"):
439 msg = ('device %s had more than 1 partition: %s, %s' %
440 devpath, cand, devpath + suff + "2")
441 return False, msg
442 part1path = cand
443 break
444
445 if part1path is None:
446 return False, 'device %s was not partitioned' % devpath455 return False, 'device %s was not partitioned' % devpath
456 elif len(partitions) > 2:
457 msg = ('device %s had 3 or more partitions: %s' %
458 (devpath, ' '.join([p[1] for p in partitions])))
459 return False, msg
460 elif len(partitions) == 2:
461 cand_part, cand_path = partitions[1]
462 else:
463 cand_part, cand_path = partitions[0]
447464
448 real_part1path = os.path.realpath(part1path)465 if not _has_ntfs_filesystem(cand_path):
449 ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)466 msg = ('partition %s (%s) on device %s was not ntfs formatted' %
450 LOG.debug('ntfs_devices found = %s', ntfs_devices)467 (cand_part, cand_path, devpath))
451 if real_part1path not in ntfs_devices:
452 msg = ('partition 1 (%s -> %s) on device %s was not ntfs formatted' %
453 (part1path, real_part1path, devpath))
454 return False, msg468 return False, msg
455469
456 def count_files(mp):470 def count_files(mp):
457 ignored = set(['dataloss_warning_readme.txt'])471 ignored = set(['dataloss_warning_readme.txt'])
458 return len([f for f in os.listdir(mp) if f.lower() not in ignored])472 return len([f for f in os.listdir(mp) if f.lower() not in ignored])
459473
460 bmsg = ('partition 1 (%s -> %s) on device %s was ntfs formatted' %474 bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
461 (part1path, real_part1path, devpath))475 (cand_part, cand_path, devpath))
462 try:476 try:
463 file_count = util.mount_cb(part1path, count_files)477 file_count = util.mount_cb(cand_path, count_files)
464 except util.MountFailedError as e:478 except util.MountFailedError as e:
465 return False, bmsg + ' but mount of %s failed: %s' % (part1path, e)479 return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
466480
467 if file_count != 0:481 if file_count != 0:
468 return False, bmsg + ' but had %d files on it.' % file_count482 return False, bmsg + ' but had %d files on it.' % file_count
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index e6b0dcb..67cddeb 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -1,12 +1,13 @@
1# This file is part of cloud-init. See LICENSE file for license information.1# This file is part of cloud-init. See LICENSE file for license information.
22
3from cloudinit import helpers3from cloudinit import helpers
4from cloudinit.util import b64e, decode_binary, load_file4from cloudinit.util import b64e, decode_binary, load_file, write_file
5from cloudinit.sources import DataSourceAzure5from cloudinit.sources import DataSourceAzure as dsaz
6from cloudinit.util import find_freebsd_part6from cloudinit.util import find_freebsd_part
7from cloudinit.util import get_path_dev_freebsd7from cloudinit.util import get_path_dev_freebsd
88
9from ..helpers import TestCase, populate_dir, mock, ExitStack, PY26, SkipTest9from ..helpers import (CiTestCase, TestCase, populate_dir, mock,
10 ExitStack, PY26, SkipTest)
1011
11import crypt12import crypt
12import os13import os
@@ -98,7 +99,6 @@ class TestAzureDataSource(TestCase):
98 self.patches.enter_context(mock.patch.object(module, name, new))99 self.patches.enter_context(mock.patch.object(module, name, new))
99100
100 def _get_mockds(self):101 def _get_mockds(self):
101 mod = DataSourceAzure
102 sysctl_out = "dev.storvsc.3.%pnpinfo: "\102 sysctl_out = "dev.storvsc.3.%pnpinfo: "\
103 "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\103 "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\
104 "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n"104 "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n"
@@ -123,14 +123,14 @@ scbus-1 on xpt0 bus 0
123<Msft Virtual Disk 1.0> at scbus3 target 1 lun 0 (da1,pass2)123<Msft Virtual Disk 1.0> at scbus3 target 1 lun 0 (da1,pass2)
124 """124 """
125 self.apply_patches([125 self.apply_patches([
126 (mod, 'get_dev_storvsc_sysctl', mock.MagicMock(126 (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock(
127 return_value=sysctl_out)),127 return_value=sysctl_out)),
128 (mod, 'get_camcontrol_dev_bus', mock.MagicMock(128 (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock(
129 return_value=camctl_devbus)),129 return_value=camctl_devbus)),
130 (mod, 'get_camcontrol_dev', mock.MagicMock(130 (dsaz, 'get_camcontrol_dev', mock.MagicMock(
131 return_value=camctl_dev))131 return_value=camctl_dev))
132 ])132 ])
133 return mod133 return dsaz
134134
135 def _get_ds(self, data, agent_command=None):135 def _get_ds(self, data, agent_command=None):
136136
@@ -152,8 +152,7 @@ scbus-1 on xpt0 bus 0
152 populate_dir(os.path.join(self.paths.seed_dir, "azure"),152 populate_dir(os.path.join(self.paths.seed_dir, "azure"),
153 {'ovf-env.xml': data['ovfcontent']})153 {'ovf-env.xml': data['ovfcontent']})
154154
155 mod = DataSourceAzure155 dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
156 mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
157156
158 self.get_metadata_from_fabric = mock.MagicMock(return_value={157 self.get_metadata_from_fabric = mock.MagicMock(return_value={
159 'public-keys': [],158 'public-keys': [],
@@ -162,19 +161,19 @@ scbus-1 on xpt0 bus 0
162 self.instance_id = 'test-instance-id'161 self.instance_id = 'test-instance-id'
163162
164 self.apply_patches([163 self.apply_patches([
165 (mod, 'list_possible_azure_ds_devs', dsdevs),164 (dsaz, 'list_possible_azure_ds_devs', dsdevs),
166 (mod, 'invoke_agent', _invoke_agent),165 (dsaz, 'invoke_agent', _invoke_agent),
167 (mod, 'wait_for_files', _wait_for_files),166 (dsaz, 'wait_for_files', _wait_for_files),
168 (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),167 (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
169 (mod, 'perform_hostname_bounce', mock.MagicMock()),168 (dsaz, 'perform_hostname_bounce', mock.MagicMock()),
170 (mod, 'get_hostname', mock.MagicMock()),169 (dsaz, 'get_hostname', mock.MagicMock()),
171 (mod, 'set_hostname', mock.MagicMock()),170 (dsaz, 'set_hostname', mock.MagicMock()),
172 (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric),171 (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric),
173 (mod.util, 'read_dmi_data', mock.MagicMock(172 (dsaz.util, 'read_dmi_data', mock.MagicMock(
174 return_value=self.instance_id)),173 return_value=self.instance_id)),
175 ])174 ])
176175
177 dsrc = mod.DataSourceAzureNet(176 dsrc = dsaz.DataSourceAzureNet(
178 data.get('sys_cfg', {}), distro=None, paths=self.paths)177 data.get('sys_cfg', {}), distro=None, paths=self.paths)
179 if agent_command is not None:178 if agent_command is not None:
180 dsrc.ds_cfg['agent_command'] = agent_command179 dsrc.ds_cfg['agent_command'] = agent_command
@@ -418,7 +417,7 @@ fdescfs /dev/fd fdescfs rw 0 0
418 cfg = dsrc.get_config_obj()417 cfg = dsrc.get_config_obj()
419418
420 self.assertEqual(dsrc.device_name_to_device("ephemeral0"),419 self.assertEqual(dsrc.device_name_to_device("ephemeral0"),
421 DataSourceAzure.RESOURCE_DISK_PATH)420 dsaz.RESOURCE_DISK_PATH)
422 assert 'disk_setup' in cfg421 assert 'disk_setup' in cfg
423 assert 'fs_setup' in cfg422 assert 'fs_setup' in cfg
424 self.assertIsInstance(cfg['disk_setup'], dict)423 self.assertIsInstance(cfg['disk_setup'], dict)
@@ -468,14 +467,13 @@ fdescfs /dev/fd fdescfs rw 0 0
468467
469 # Make sure that the redacted password on disk is not used by CI468 # Make sure that the redacted password on disk is not used by CI
470 self.assertNotEqual(dsrc.cfg.get('password'),469 self.assertNotEqual(dsrc.cfg.get('password'),
471 DataSourceAzure.DEF_PASSWD_REDACTION)470 dsaz.DEF_PASSWD_REDACTION)
472471
473 # Make sure that the password was really encrypted472 # Make sure that the password was really encrypted
474 et = ET.fromstring(on_disk_ovf)473 et = ET.fromstring(on_disk_ovf)
475 for elem in et.iter():474 for elem in et.iter():
476 if 'UserPassword' in elem.tag:475 if 'UserPassword' in elem.tag:
477 self.assertEqual(DataSourceAzure.DEF_PASSWD_REDACTION,476 self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text)
478 elem.text)
479477
480 def test_ovf_env_arrives_in_waagent_dir(self):478 def test_ovf_env_arrives_in_waagent_dir(self):
481 xml = construct_valid_ovf_env(data={}, userdata="FOODATA")479 xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
@@ -524,17 +522,17 @@ class TestAzureBounce(TestCase):
524522
525 def mock_out_azure_moving_parts(self):523 def mock_out_azure_moving_parts(self):
526 self.patches.enter_context(524 self.patches.enter_context(
527 mock.patch.object(DataSourceAzure, 'invoke_agent'))525 mock.patch.object(dsaz, 'invoke_agent'))
528 self.patches.enter_context(526 self.patches.enter_context(
529 mock.patch.object(DataSourceAzure, 'wait_for_files'))527 mock.patch.object(dsaz, 'wait_for_files'))
530 self.patches.enter_context(528 self.patches.enter_context(
531 mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs',529 mock.patch.object(dsaz, 'list_possible_azure_ds_devs',
532 mock.MagicMock(return_value=[])))530 mock.MagicMock(return_value=[])))
533 self.patches.enter_context(531 self.patches.enter_context(
534 mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric',532 mock.patch.object(dsaz, 'get_metadata_from_fabric',
535 mock.MagicMock(return_value={})))533 mock.MagicMock(return_value={})))
536 self.patches.enter_context(534 self.patches.enter_context(
537 mock.patch.object(DataSourceAzure.util, 'read_dmi_data',535 mock.patch.object(dsaz.util, 'read_dmi_data',
538 mock.MagicMock(return_value='test-instance-id')))536 mock.MagicMock(return_value='test-instance-id')))
539537
540 def setUp(self):538 def setUp(self):
@@ -543,13 +541,13 @@ class TestAzureBounce(TestCase):
543 self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')541 self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
544 self.paths = helpers.Paths({'cloud_dir': self.tmp})542 self.paths = helpers.Paths({'cloud_dir': self.tmp})
545 self.addCleanup(shutil.rmtree, self.tmp)543 self.addCleanup(shutil.rmtree, self.tmp)
546 DataSourceAzure.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d544 dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
547 self.patches = ExitStack()545 self.patches = ExitStack()
548 self.mock_out_azure_moving_parts()546 self.mock_out_azure_moving_parts()
549 self.get_hostname = self.patches.enter_context(547 self.get_hostname = self.patches.enter_context(
550 mock.patch.object(DataSourceAzure, 'get_hostname'))548 mock.patch.object(dsaz, 'get_hostname'))
551 self.set_hostname = self.patches.enter_context(549 self.set_hostname = self.patches.enter_context(
552 mock.patch.object(DataSourceAzure, 'set_hostname'))550 mock.patch.object(dsaz, 'set_hostname'))
553 self.subp = self.patches.enter_context(551 self.subp = self.patches.enter_context(
554 mock.patch('cloudinit.sources.DataSourceAzure.util.subp'))552 mock.patch('cloudinit.sources.DataSourceAzure.util.subp'))
555553
@@ -560,7 +558,7 @@ class TestAzureBounce(TestCase):
560 if ovfcontent is not None:558 if ovfcontent is not None:
561 populate_dir(os.path.join(self.paths.seed_dir, "azure"),559 populate_dir(os.path.join(self.paths.seed_dir, "azure"),
562 {'ovf-env.xml': ovfcontent})560 {'ovf-env.xml': ovfcontent})
563 dsrc = DataSourceAzure.DataSourceAzureNet(561 dsrc = dsaz.DataSourceAzureNet(
564 {}, distro=None, paths=self.paths)562 {}, distro=None, paths=self.paths)
565 if agent_command is not None:563 if agent_command is not None:
566 dsrc.ds_cfg['agent_command'] = agent_command564 dsrc.ds_cfg['agent_command'] = agent_command
@@ -673,7 +671,7 @@ class TestAzureBounce(TestCase):
673671
674 def test_default_bounce_command_used_by_default(self):672 def test_default_bounce_command_used_by_default(self):
675 cmd = 'default-bounce-command'673 cmd = 'default-bounce-command'
676 DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd674 dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd
677 cfg = {'hostname_bounce': {'policy': 'force'}}675 cfg = {'hostname_bounce': {'policy': 'force'}}
678 data = self.get_ovf_env_with_dscfg('some-hostname', cfg)676 data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
679 self._get_ds(data, agent_command=['not', '__builtin__']).get_data()677 self._get_ds(data, agent_command=['not', '__builtin__']).get_data()
@@ -701,15 +699,208 @@ class TestAzureBounce(TestCase):
701class TestReadAzureOvf(TestCase):699class TestReadAzureOvf(TestCase):
702 def test_invalid_xml_raises_non_azure_ds(self):700 def test_invalid_xml_raises_non_azure_ds(self):
703 invalid_xml = "<foo>" + construct_valid_ovf_env(data={})701 invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
704 self.assertRaises(DataSourceAzure.BrokenAzureDataSource,702 self.assertRaises(dsaz.BrokenAzureDataSource,
705 DataSourceAzure.read_azure_ovf, invalid_xml)703 dsaz.read_azure_ovf, invalid_xml)
706704
707 def test_load_with_pubkeys(self):705 def test_load_with_pubkeys(self):
708 mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]706 mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
709 pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]707 pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
710 content = construct_valid_ovf_env(pubkeys=pubkeys)708 content = construct_valid_ovf_env(pubkeys=pubkeys)
711 (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content)709 (_md, _ud, cfg) = dsaz.read_azure_ovf(content)
712 for mypk in mypklist:710 for mypk in mypklist:
713 self.assertIn(mypk, cfg['_pubkeys'])711 self.assertIn(mypk, cfg['_pubkeys'])
714712
713
714class TestCanDevBeReformatted(CiTestCase):
715 warning_file = 'dataloss_warning_readme.txt'
716
717 def _domock(self, mockpath, sattr=None):
718 patcher = mock.patch(mockpath)
719 setattr(self, sattr, patcher.start())
720 self.addCleanup(patcher.stop)
721
722 def setUp(self):
723 super(TestCanDevBeReformatted, self).setUp()
724
725 def patchup(self, devs):
726 bypath = {}
727 for path, data in devs.items():
728 bypath[path] = data
729 if 'realpath' in data:
730 bypath[data['realpath']] = data
731 for ppath, pdata in data.get('partitions', {}).items():
732 bypath[ppath] = pdata
733 if 'realpath' in data:
734 bypath[pdata['realpath']] = pdata
735
736 def realpath(d):
737 return bypath[d].get('realpath', d)
738
739 def partitions_on_device(devpath):
740 parts = bypath.get(devpath, {}).get('partitions', {})
741 ret = []
742 for path, data in parts.items():
743 ret.append((data.get('num'), realpath(path)))
744 # return sorted by partition number
745 return sorted(ret, key=lambda d: d[0])
746
747 def mount_cb(device, callback):
748 p = self.tmp_dir()
749 for f in bypath.get(device).get('files', []):
750 write_file(os.path.join(p, f), content=f)
751 return callback(p)
752
753 def has_ntfs_fs(device):
754 return bypath.get(device, {}).get('fs') == 'ntfs'
755
756 p = 'cloudinit.sources.DataSourceAzure'
757 self._domock(p + "._partitions_on_device", 'm_partitions_on_device')
758 self._domock(p + "._has_ntfs_filesystem", 'm_has_ntfs_filesystem')
759 self._domock(p + ".util.mount_cb", 'm_mount_cb')
760 self._domock(p + ".os.path.realpath", 'm_realpath')
761 self._domock(p + ".os.path.exists", 'm_exists')
762
763 self.m_exists.side_effect = lambda p: p in bypath
764 self.m_realpath.side_effect = realpath
765 self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs
766 self.m_mount_cb.side_effect = mount_cb
767 self.m_partitions_on_device.side_effect = partitions_on_device
768
769 def test_three_partitions_is_false(self):
770 """A disk with 3 partitions can not be formatted."""
771 self.patchup({
772 '/dev/sda': {
773 'partitions': {
774 '/dev/sda1': {'num': 1},
775 '/dev/sda2': {'num': 2},
776 '/dev/sda3': {'num': 3},
777 }}})
778 value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
779 self.assertFalse(False, value)
780 self.assertIn("3 or more", msg.lower())
781
782 def test_no_partitions_is_false(self):
783 """A disk with no partitions can not be formatted."""
784 self.patchup({'/dev/sda': {}})
785 value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
786 self.assertEqual(False, value)
787 self.assertIn("not partitioned", msg.lower())
788
789 def test_two_partitions_not_ntfs_false(self):
790 """2 partitions and 2nd not ntfs can not be formatted."""
791 self.patchup({
792 '/dev/sda': {
793 'partitions': {
794 '/dev/sda1': {'num': 1},
795 '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []},
796 }}})
797 value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
798 self.assertFalse(False, value)
799 self.assertIn("not ntfs", msg.lower())
800
801 def test_two_partitions_ntfs_populated_false(self):
802 """2 partitions and populated ntfs fs on 2nd can not be formatted."""
803 self.patchup({
804 '/dev/sda': {
805 'partitions': {
806 '/dev/sda1': {'num': 1},
807 '/dev/sda2': {'num': 2, 'fs': 'ntfs',
808 'files': ['secret.txt']},
809 }}})
810 value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
811 self.assertFalse(False, value)
812 self.assertIn("files on it", msg.lower())
813
814 def test_two_partitions_ntfs_empty_is_true(self):
815 """2 partitions and empty ntfs fs on 2nd can be formatted."""
816 self.patchup({
817 '/dev/sda': {
818 'partitions': {
819 '/dev/sda1': {'num': 1},
820 '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []},
821 }}})
822 value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
823 self.assertEqual(True, value)
824 self.assertIn("safe for", msg.lower())
825
826 def test_one_partition_not_ntfs_false(self):
827 """1 partition witih fs other than ntfs can not be formatted."""
828 self.patchup({
829 '/dev/sda': {
830 'partitions': {
831 '/dev/sda1': {'num': 1, 'fs': 'zfs'},
832 }}})
833 value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
834 self.assertEqual(False, value)
835 self.assertIn("not ntfs", msg.lower())
836
837 def test_one_partition_ntfs_populated_false(self):
838 """1 mountable ntfs partition with many files can not be formatted."""
839 self.patchup({
840 '/dev/sda': {
841 'partitions': {
842 '/dev/sda1': {'num': 1, 'fs': 'ntfs',
843 'files': ['file1.txt', 'file2.exe']},
844 }}})
845 value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
846 self.assertEqual(False, value)
847 self.assertIn("files on it", msg.lower())
848
849 def test_one_partition_ntfs_empty_is_true(self):
850 """1 mountable ntfs partition and no files can be formatted."""
851 self.patchup({
852 '/dev/sda': {
853 'partitions': {
854 '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []}
855 }}})
856 value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
857 self.assertEqual(True, value)
858 self.assertIn("safe for", msg.lower())
859
860 def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self):
861 """1 mountable ntfs partition and only warn file can be formatted."""
862 self.patchup({
863 '/dev/sda': {
864 'partitions': {
865 '/dev/sda1': {'num': 1, 'fs': 'ntfs',
866 'files': ['dataloss_warning_readme.txt']}
867 }}})
868 value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
869 self.assertEqual(True, value)
870 self.assertIn("safe for", msg.lower())
871
872 def test_one_partition_through_realpath_is_true(self):
873 """A symlink to a device with 1 ntfs partition can be formatted."""
874 epath = '/dev/disk/cloud/azure_resource'
875 self.patchup({
876 epath: {
877 'realpath': '/dev/sdb',
878 'partitions': {
879 epath + '-part1': {
880 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file],
881 'realpath': '/dev/sdb1'}
882 }}})
883 value, msg = dsaz.can_dev_be_reformatted(epath)
884 self.assertEqual(True, value)
885 self.assertIn("safe for", msg.lower())
886
887 def test_three_partition_through_realpath_is_false(self):
888 """A symlink to a device with 3 partitions can not be formatted."""
889 epath = '/dev/disk/cloud/azure_resource'
890 self.patchup({
891 epath: {
892 'realpath': '/dev/sdb',
893 'partitions': {
894 epath + '-part1': {
895 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file],
896 'realpath': '/dev/sdb1'},
897 epath + '-part2': {'num': 2, 'fs': 'ext3',
898 'realpath': '/dev/sdb2'},
899 epath + '-part3': {'num': 3, 'fs': 'ext',
900 'realpath': '/dev/sdb3'}
901 }}})
902 value, msg = dsaz.can_dev_be_reformatted(epath)
903 self.assertEqual(False, value)
904 self.assertIn("3 or more", msg.lower())
905
715# vi: ts=4 expandtab906# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py
index 9f00d46..68fc6aa 100644
--- a/tests/unittests/test_handler/test_handler_disk_setup.py
+++ b/tests/unittests/test_handler/test_handler_disk_setup.py
@@ -151,6 +151,22 @@ class TestUpdateFsSetupDevices(TestCase):
151 'filesystem': 'xfs'151 'filesystem': 'xfs'
152 }, fs_setup)152 }, fs_setup)
153153
154 def test_dotted_devname_populates_partition(self):
155 fs_setup = {
156 'device': 'ephemeral0.1',
157 'label': 'test2',
158 'filesystem': 'xfs'
159 }
160 cc_disk_setup.update_fs_setup_devices([fs_setup],
161 lambda device: device)
162 self.assertEqual({
163 '_origname': 'ephemeral0.1',
164 'device': 'ephemeral0',
165 'partition': '1',
166 'label': 'test2',
167 'filesystem': 'xfs'
168 }, fs_setup)
169
154170
155@mock.patch('cloudinit.config.cc_disk_setup.find_device_node',171@mock.patch('cloudinit.config.cc_disk_setup.find_device_node',
156 return_value=('/dev/xdb1', False))172 return_value=('/dev/xdb1', False))

Subscribers

People subscribed via source and target branches