Merge ~smoser/cloud-init:feature/ds-init into cloud-init:master
- Git
- lp:~smoser/cloud-init
- feature/ds-init
- Merge into master
Proposed by
Scott Moser
Status: | Merged |
---|---|
Merged at revision: | 9e904bbc3336b96475bfd00fb3bf1262ae4de49f |
Proposed branch: | ~smoser/cloud-init:feature/ds-init |
Merge into: | cloud-init:master |
Diff against target: |
381 lines (+136/-107) 6 files modified
cloudinit/cmd/main.py (+3/-0) cloudinit/config/cc_mounts.py (+9/-3) cloudinit/sources/DataSourceAzure.py (+104/-95) cloudinit/sources/__init__.py (+12/-0) cloudinit/stages.py (+7/-0) tests/unittests/test_datasource/test_azure.py (+1/-9) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
cloud-init Commiters | Pending | ||
Review via email: mp+311205@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Revision history for this message
Ryan Harper (raharper) : | # |
There was an error fetching revisions from git servers. Please try again in a few minutes. If the problem persists, contact Launchpad support.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py | |||
2 | index 83eb02c..fe37075 100644 | |||
3 | --- a/cloudinit/cmd/main.py | |||
4 | +++ b/cloudinit/cmd/main.py | |||
5 | @@ -326,6 +326,9 @@ def main_init(name, args): | |||
6 | 326 | util.logexc(LOG, "Failed to re-adjust output redirection!") | 326 | util.logexc(LOG, "Failed to re-adjust output redirection!") |
7 | 327 | logging.setupLogging(mods.cfg) | 327 | logging.setupLogging(mods.cfg) |
8 | 328 | 328 | ||
9 | 329 | # give the activated datasource a chance to adjust | ||
10 | 330 | init.activate_datasource() | ||
11 | 331 | |||
12 | 329 | # Stage 10 | 332 | # Stage 10 |
13 | 330 | return (init.datasource, run_module_section(mods, name, name)) | 333 | return (init.datasource, run_module_section(mods, name, name)) |
14 | 331 | 334 | ||
15 | diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py | |||
16 | index dfc4b59..452c9e8 100644 | |||
17 | --- a/cloudinit/config/cc_mounts.py | |||
18 | +++ b/cloudinit/config/cc_mounts.py | |||
19 | @@ -312,7 +312,8 @@ def handle_swapcfg(swapcfg): | |||
20 | 312 | def handle(_name, cfg, cloud, log, _args): | 312 | def handle(_name, cfg, cloud, log, _args): |
21 | 313 | # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno | 313 | # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno |
22 | 314 | def_mnt_opts = "defaults,nobootwait" | 314 | def_mnt_opts = "defaults,nobootwait" |
24 | 315 | if cloud.distro.uses_systemd(): | 315 | uses_systemd = cloud.distro.uses_systemd() |
25 | 316 | if uses_systemd: | ||
26 | 316 | def_mnt_opts = "defaults,nofail,x-systemd.requires=cloud-init.service" | 317 | def_mnt_opts = "defaults,nofail,x-systemd.requires=cloud-init.service" |
27 | 317 | 318 | ||
28 | 318 | defvals = [None, None, "auto", def_mnt_opts, "0", "2"] | 319 | defvals = [None, None, "auto", def_mnt_opts, "0", "2"] |
29 | @@ -447,7 +448,12 @@ def handle(_name, cfg, cloud, log, _args): | |||
30 | 447 | except Exception: | 448 | except Exception: |
31 | 448 | util.logexc(log, "Failed to make '%s' config-mount", d) | 449 | util.logexc(log, "Failed to make '%s' config-mount", d) |
32 | 449 | 450 | ||
33 | 451 | activate_cmd = ["mount", "-a"] | ||
34 | 452 | if uses_systemd: | ||
35 | 453 | activate_cmd = ["systemctl", "daemon-reload"] | ||
36 | 454 | fmt = "Activate mounts: %s:" + ' '.join(activate_cmd) | ||
37 | 450 | try: | 455 | try: |
39 | 451 | util.subp(("mount", "-a")) | 456 | util.subp(activate_cmd) |
40 | 457 | LOG.debug(fmt, "PASS") | ||
41 | 452 | except util.ProcessExecutionError: | 458 | except util.ProcessExecutionError: |
43 | 453 | util.logexc(log, "Activating mounts via 'mount -a' failed") | 459 | util.logexc(log, fmt, "FAIL") |
44 | diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py | |||
45 | index b802b03..22f9004 100644 | |||
46 | --- a/cloudinit/sources/DataSourceAzure.py | |||
47 | +++ b/cloudinit/sources/DataSourceAzure.py | |||
48 | @@ -19,7 +19,6 @@ | |||
49 | 19 | import base64 | 19 | import base64 |
50 | 20 | import contextlib | 20 | import contextlib |
51 | 21 | import crypt | 21 | import crypt |
52 | 22 | import fnmatch | ||
53 | 23 | from functools import partial | 22 | from functools import partial |
54 | 24 | import os | 23 | import os |
55 | 25 | import os.path | 24 | import os.path |
56 | @@ -28,7 +27,6 @@ from xml.dom import minidom | |||
57 | 28 | import xml.etree.ElementTree as ET | 27 | import xml.etree.ElementTree as ET |
58 | 29 | 28 | ||
59 | 30 | from cloudinit import log as logging | 29 | from cloudinit import log as logging |
60 | 31 | from cloudinit.settings import PER_ALWAYS | ||
61 | 32 | from cloudinit import sources | 30 | from cloudinit import sources |
62 | 33 | from cloudinit.sources.helpers.azure import get_metadata_from_fabric | 31 | from cloudinit.sources.helpers.azure import get_metadata_from_fabric |
63 | 34 | from cloudinit import util | 32 | from cloudinit import util |
64 | @@ -42,6 +40,9 @@ BOUNCE_COMMAND = [ | |||
65 | 42 | 'sh', '-xc', | 40 | 'sh', '-xc', |
66 | 43 | "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" | 41 | "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" |
67 | 44 | ] | 42 | ] |
68 | 43 | # azure systems will always have a resource disk, and 66-azure-ephemeral.rules | ||
69 | 44 | # ensures that it gets linked to this path. | ||
70 | 45 | RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' | ||
71 | 45 | 46 | ||
72 | 46 | BUILTIN_DS_CONFIG = { | 47 | BUILTIN_DS_CONFIG = { |
73 | 47 | 'agent_command': AGENT_START, | 48 | 'agent_command': AGENT_START, |
74 | @@ -53,7 +54,7 @@ BUILTIN_DS_CONFIG = { | |||
75 | 53 | 'command': BOUNCE_COMMAND, | 54 | 'command': BOUNCE_COMMAND, |
76 | 54 | 'hostname_command': 'hostname', | 55 | 'hostname_command': 'hostname', |
77 | 55 | }, | 56 | }, |
79 | 56 | 'disk_aliases': {'ephemeral0': '/dev/sdb'}, | 57 | 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, |
80 | 57 | 'dhclient_lease_file': '/var/lib/dhcp/dhclient.eth0.leases', | 58 | 'dhclient_lease_file': '/var/lib/dhcp/dhclient.eth0.leases', |
81 | 58 | } | 59 | } |
82 | 59 | 60 | ||
83 | @@ -245,15 +246,6 @@ class DataSourceAzureNet(sources.DataSource): | |||
84 | 245 | self.metadata['instance-id'] = util.read_dmi_data('system-uuid') | 246 | self.metadata['instance-id'] = util.read_dmi_data('system-uuid') |
85 | 246 | self.metadata.update(fabric_data) | 247 | self.metadata.update(fabric_data) |
86 | 247 | 248 | ||
87 | 248 | found_ephemeral = find_fabric_formatted_ephemeral_disk() | ||
88 | 249 | if found_ephemeral: | ||
89 | 250 | self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral | ||
90 | 251 | LOG.debug("using detected ephemeral0 of %s", found_ephemeral) | ||
91 | 252 | |||
92 | 253 | cc_modules_override = support_new_ephemeral(self.sys_cfg) | ||
93 | 254 | if cc_modules_override: | ||
94 | 255 | self.cfg['cloud_init_modules'] = cc_modules_override | ||
95 | 256 | |||
96 | 257 | return True | 249 | return True |
97 | 258 | 250 | ||
98 | 259 | def device_name_to_device(self, name): | 251 | def device_name_to_device(self, name): |
99 | @@ -266,97 +258,104 @@ class DataSourceAzureNet(sources.DataSource): | |||
100 | 266 | # quickly (local check only) if self.instance_id is still valid | 258 | # quickly (local check only) if self.instance_id is still valid |
101 | 267 | return sources.instance_id_matches_system_uuid(self.get_instance_id()) | 259 | return sources.instance_id_matches_system_uuid(self.get_instance_id()) |
102 | 268 | 260 | ||
106 | 269 | 261 | def activate(self, cfg, is_new_instance): | |
107 | 270 | def count_files(mp): | 262 | address_ephemeral_resize(is_new_instance=is_new_instance) |
108 | 271 | return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*')) | 263 | return |
109 | 272 | 264 | ||
110 | 273 | 265 | ||
121 | 274 | def find_fabric_formatted_ephemeral_part(): | 266 | def can_dev_be_reformatted(devpath): |
122 | 275 | """ | 267 | # determine if the ephemeral block device path devpath |
123 | 276 | Locate the first fabric formatted ephemeral device. | 268 | # is newly formatted after a resize. |
124 | 277 | """ | 269 | if not os.path.exists(devpath): |
125 | 278 | potential_locations = ['/dev/disk/cloud/azure_resource-part1', | 270 | return False, 'device %s does not exist' % devpath |
126 | 279 | '/dev/disk/azure/resource-part1'] | 271 | |
127 | 280 | device_location = None | 272 | realpath = os.path.realpath(devpath) |
128 | 281 | for potential_location in potential_locations: | 273 | LOG.debug('Resolving realpath of %s -> %s', devpath, realpath) |
129 | 282 | if os.path.exists(potential_location): | 274 | |
130 | 283 | device_location = potential_location | 275 | # it is possible that the block device might exist, but the kernel |
131 | 276 | # have not yet read the partition table and sent events. we udevadm settle | ||
132 | 277 | # to hope to resolve that. Better here would probably be to test and see, | ||
133 | 278 | # and then settle if we didn't find anything and try again. | ||
134 | 279 | if util.which("udevadm"): | ||
135 | 280 | util.subp(["udevadm", "settle"]) | ||
136 | 281 | |||
137 | 282 | # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource | ||
138 | 283 | # where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1" | ||
139 | 284 | part1path = None | ||
140 | 285 | for suff in ("-part", "p", ""): | ||
141 | 286 | cand = devpath + suff + "1" | ||
142 | 287 | if os.path.exists(cand): | ||
143 | 288 | if os.path.exists(devpath + suff + "2"): | ||
144 | 289 | msg = ('device %s had more than 1 partition: %s, %s' % | ||
145 | 290 | devpath, cand, devpath + suff + "2") | ||
146 | 291 | return False, msg | ||
147 | 292 | part1path = cand | ||
148 | 284 | break | 293 | break |
149 | 285 | if device_location is None: | ||
150 | 286 | LOG.debug("no azure resource disk partition path found") | ||
151 | 287 | return None | ||
152 | 288 | ntfs_devices = util.find_devs_with("TYPE=ntfs") | ||
153 | 289 | real_device = os.path.realpath(device_location) | ||
154 | 290 | if real_device in ntfs_devices: | ||
155 | 291 | return device_location | ||
156 | 292 | LOG.debug("'%s' existed (%s) but was not ntfs formated", | ||
157 | 293 | device_location, real_device) | ||
158 | 294 | return None | ||
159 | 295 | |||
160 | 296 | |||
161 | 297 | def find_fabric_formatted_ephemeral_disk(): | ||
162 | 298 | """ | ||
163 | 299 | Get the ephemeral disk. | ||
164 | 300 | """ | ||
165 | 301 | part_dev = find_fabric_formatted_ephemeral_part() | ||
166 | 302 | if part_dev: | ||
167 | 303 | return part_dev.split('-')[0] | ||
168 | 304 | return None | ||
169 | 305 | 294 | ||
170 | 295 | if part1path is None: | ||
171 | 296 | return False, 'device %s was not partitioned' % devpath | ||
172 | 306 | 297 | ||
177 | 307 | def support_new_ephemeral(cfg): | 298 | real_part1path = os.path.realpath(part1path) |
178 | 308 | """ | 299 | ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) |
179 | 309 | Windows Azure makes ephemeral devices ephemeral to boot; a ephemeral device | 300 | LOG.debug('ntfs_devices found = %s', ntfs_devices) |
180 | 310 | may be presented as a fresh device, or not. | 301 | if real_part1path not in ntfs_devices: |
181 | 302 | msg = ('partition 1 (%s -> %s) on device %s was not ntfs formatted' % | ||
182 | 303 | (part1path, real_part1path, devpath)) | ||
183 | 304 | return False, msg | ||
184 | 311 | 305 | ||
195 | 312 | Since the knowledge of when a disk is supposed to be plowed under is | 306 | def count_files(mp): |
196 | 313 | specific to Windows Azure, the logic resides here in the datasource. When a | 307 | ignored = {'dataloss_warning_readme.txt'} |
197 | 314 | new ephemeral device is detected, cloud-init overrides the default | 308 | return len([f for f in os.listdir(mp) if f.lower() not in ignored]) |
188 | 315 | frequency for both disk-setup and mounts for the current boot only. | ||
189 | 316 | """ | ||
190 | 317 | device = find_fabric_formatted_ephemeral_part() | ||
191 | 318 | if not device: | ||
192 | 319 | LOG.debug("no default fabric formated ephemeral0.1 found") | ||
193 | 320 | return None | ||
194 | 321 | LOG.debug("fabric formated ephemeral0.1 device at %s", device) | ||
198 | 322 | 309 | ||
200 | 323 | file_count = 0 | 310 | bmsg = ('partition 1 (%s -> %s) on device %s was ntfs formatted' % |
201 | 311 | (part1path, real_part1path, devpath)) | ||
202 | 324 | try: | 312 | try: |
211 | 325 | file_count = util.mount_cb(device, count_files) | 313 | file_count = util.mount_cb(part1path, count_files) |
212 | 326 | except Exception: | 314 | except util.MountFailedError as e: |
213 | 327 | return None | 315 | return False, bmsg + ' but mount of %s failed: %s' % (part1path, e) |
214 | 328 | LOG.debug("fabric prepared ephmeral0.1 has %s files on it", file_count) | 316 | |
215 | 329 | 317 | if file_count != 0: | |
216 | 330 | if file_count >= 1: | 318 | return False, bmsg + ' but had %d files on it.' % file_count |
217 | 331 | LOG.debug("fabric prepared ephemeral0.1 will be preserved") | 319 | |
218 | 332 | return None | 320 | return True, bmsg + ' and had no important files. Safe for reformatting.' |
219 | 321 | |||
220 | 322 | |||
221 | 323 | def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, | ||
222 | 324 | is_new_instance=False): | ||
223 | 325 | # wait for ephemeral disk to come up | ||
224 | 326 | naplen = .2 | ||
225 | 327 | missing = wait_for_files([devpath], maxwait=maxwait, naplen=naplen, | ||
226 | 328 | log_pre="Azure ephemeral disk: ") | ||
227 | 329 | |||
228 | 330 | if missing: | ||
229 | 331 | LOG.warn("ephemeral device '%s' did not appear after %d seconds.", | ||
230 | 332 | devpath, maxwait) | ||
231 | 333 | return | ||
232 | 334 | |||
233 | 335 | result = False | ||
234 | 336 | msg = None | ||
235 | 337 | if is_new_instance: | ||
236 | 338 | result, msg = (True, "First instance boot.") | ||
237 | 333 | else: | 339 | else: |
261 | 334 | # if device was already mounted, then we need to unmount it | 340 | result, msg = can_dev_be_reformatted(devpath) |
262 | 335 | # race conditions could allow for a check-then-unmount | 341 | |
263 | 336 | # to have a false positive. so just unmount and then check. | 342 | LOG.debug("reformattable=%s: %s" % (result, msg)) |
264 | 337 | try: | 343 | if not result: |
265 | 338 | util.subp(['umount', device]) | 344 | return |
266 | 339 | except util.ProcessExecutionError as e: | 345 | |
267 | 340 | if device in util.mounts(): | 346 | for mod in ['disk_setup', 'mounts']: |
268 | 341 | LOG.warn("Failed to unmount %s, will not reformat.", device) | 347 | sempath = '/var/lib/cloud/instance/sem/config_' + mod |
269 | 342 | LOG.debug("Failed umount: %s", e) | 348 | bmsg = 'Marker "%s" for module "%s"' % (sempath, mod) |
270 | 343 | return None | 349 | if os.path.exists(sempath): |
271 | 344 | 350 | try: | |
272 | 345 | LOG.debug("cloud-init will format ephemeral0.1 this boot.") | 351 | os.unlink(sempath) |
273 | 346 | LOG.debug("setting disk_setup and mounts modules 'always' for this boot") | 352 | LOG.debug(bmsg + " removed.") |
274 | 347 | 353 | except Exception as e: | |
275 | 348 | cc_modules = cfg.get('cloud_init_modules') | 354 | # python3 throws FileNotFoundError, python2 throws OSError |
276 | 349 | if not cc_modules: | 355 | LOG.warn(bmsg + ": remove failed! (%s)" % e) |
254 | 350 | return None | ||
255 | 351 | |||
256 | 352 | mod_list = [] | ||
257 | 353 | for mod in cc_modules: | ||
258 | 354 | if mod in ("disk_setup", "mounts"): | ||
259 | 355 | mod_list.append([mod, PER_ALWAYS]) | ||
260 | 356 | LOG.debug("set module '%s' to 'always' for this boot", mod) | ||
277 | 357 | else: | 356 | else: |
280 | 358 | mod_list.append(mod) | 357 | LOG.debug(bmsg + " did not exist.") |
281 | 359 | return mod_list | 358 | return |
282 | 360 | 359 | ||
283 | 361 | 360 | ||
284 | 362 | def perform_hostname_bounce(hostname, cfg, prev_hostname): | 361 | def perform_hostname_bounce(hostname, cfg, prev_hostname): |
285 | @@ -408,15 +407,25 @@ def pubkeys_from_crt_files(flist): | |||
286 | 408 | return pubkeys | 407 | return pubkeys |
287 | 409 | 408 | ||
288 | 410 | 409 | ||
290 | 411 | def wait_for_files(flist, maxwait=60, naplen=.5): | 410 | def wait_for_files(flist, maxwait=60, naplen=.5, log_pre=""): |
291 | 412 | need = set(flist) | 411 | need = set(flist) |
292 | 413 | waited = 0 | 412 | waited = 0 |
294 | 414 | while waited < maxwait: | 413 | while True: |
295 | 415 | need -= set([f for f in need if os.path.exists(f)]) | 414 | need -= set([f for f in need if os.path.exists(f)]) |
296 | 416 | if len(need) == 0: | 415 | if len(need) == 0: |
297 | 416 | LOG.debug("%sAll files appeared after %s seconds: %s", | ||
298 | 417 | log_pre, waited, flist) | ||
299 | 417 | return [] | 418 | return [] |
300 | 419 | if waited == 0: | ||
301 | 420 | LOG.info("%sWaiting up to %s seconds for the following files: %s", | ||
302 | 421 | log_pre, maxwait, flist) | ||
303 | 422 | if waited + naplen > maxwait: | ||
304 | 423 | break | ||
305 | 418 | time.sleep(naplen) | 424 | time.sleep(naplen) |
306 | 419 | waited += naplen | 425 | waited += naplen |
307 | 426 | |||
308 | 427 | LOG.warn("%sStill missing files after %s seconds: %s", | ||
309 | 428 | log_pre, maxwait, need) | ||
310 | 420 | return need | 429 | return need |
311 | 421 | 430 | ||
312 | 422 | 431 | ||
313 | diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py | |||
314 | index d139527..13fb7c6 100644 | |||
315 | --- a/cloudinit/sources/__init__.py | |||
316 | +++ b/cloudinit/sources/__init__.py | |||
317 | @@ -261,6 +261,18 @@ class DataSource(object): | |||
318 | 261 | def first_instance_boot(self): | 261 | def first_instance_boot(self): |
319 | 262 | return | 262 | return |
320 | 263 | 263 | ||
321 | 264 | def activate(self, cfg, is_new_instance): | ||
322 | 265 | """activate(cfg, is_new_instance) | ||
323 | 266 | |||
324 | 267 | This is called before the init_modules will be called. | ||
325 | 268 | The cfg is fully up to date config, it contains a merged view of | ||
326 | 269 | system config, datasource config, user config, vendor config. | ||
327 | 270 | It should be used rather than the sys_cfg passed to __init__. | ||
328 | 271 | |||
329 | 272 | is_new_instance is a boolean indicating if this is a new instance. | ||
330 | 273 | """ | ||
331 | 274 | return | ||
332 | 275 | |||
333 | 264 | 276 | ||
334 | 265 | def normalize_pubkey_data(pubkey_data): | 277 | def normalize_pubkey_data(pubkey_data): |
335 | 266 | keys = [] | 278 | keys = [] |
336 | diff --git a/cloudinit/stages.py b/cloudinit/stages.py | |||
337 | index 47deac6..86a1378 100644 | |||
338 | --- a/cloudinit/stages.py | |||
339 | +++ b/cloudinit/stages.py | |||
340 | @@ -371,6 +371,13 @@ class Init(object): | |||
341 | 371 | self._store_userdata() | 371 | self._store_userdata() |
342 | 372 | self._store_vendordata() | 372 | self._store_vendordata() |
343 | 373 | 373 | ||
344 | 374 | def activate_datasource(self): | ||
345 | 375 | if self.datasource is None: | ||
346 | 376 | raise RuntimeError("Datasource is None, cannot activate.") | ||
347 | 377 | self.datasource.activate(cfg=self.cfg, | ||
348 | 378 | is_new_instance=self.is_new_instance()) | ||
349 | 379 | self._write_to_cache() | ||
350 | 380 | |||
351 | 374 | def _store_userdata(self): | 381 | def _store_userdata(self): |
352 | 375 | raw_ud = self.datasource.get_userdata_raw() | 382 | raw_ud = self.datasource.get_userdata_raw() |
353 | 376 | if raw_ud is None: | 383 | if raw_ud is None: |
354 | diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py | |||
355 | index e90e903..0712700 100644 | |||
356 | --- a/tests/unittests/test_datasource/test_azure.py | |||
357 | +++ b/tests/unittests/test_datasource/test_azure.py | |||
358 | @@ -349,7 +349,7 @@ class TestAzureDataSource(TestCase): | |||
359 | 349 | cfg = dsrc.get_config_obj() | 349 | cfg = dsrc.get_config_obj() |
360 | 350 | 350 | ||
361 | 351 | self.assertEqual(dsrc.device_name_to_device("ephemeral0"), | 351 | self.assertEqual(dsrc.device_name_to_device("ephemeral0"), |
363 | 352 | "/dev/sdb") | 352 | DataSourceAzure.RESOURCE_DISK_PATH) |
364 | 353 | assert 'disk_setup' in cfg | 353 | assert 'disk_setup' in cfg |
365 | 354 | assert 'fs_setup' in cfg | 354 | assert 'fs_setup' in cfg |
366 | 355 | self.assertIsInstance(cfg['disk_setup'], dict) | 355 | self.assertIsInstance(cfg['disk_setup'], dict) |
367 | @@ -462,14 +462,6 @@ class TestAzureBounce(TestCase): | |||
368 | 462 | mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs', | 462 | mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs', |
369 | 463 | mock.MagicMock(return_value=[]))) | 463 | mock.MagicMock(return_value=[]))) |
370 | 464 | self.patches.enter_context( | 464 | self.patches.enter_context( |
371 | 465 | mock.patch.object(DataSourceAzure, | ||
372 | 466 | 'find_fabric_formatted_ephemeral_disk', | ||
373 | 467 | mock.MagicMock(return_value=None))) | ||
374 | 468 | self.patches.enter_context( | ||
375 | 469 | mock.patch.object(DataSourceAzure, | ||
376 | 470 | 'find_fabric_formatted_ephemeral_part', | ||
377 | 471 | mock.MagicMock(return_value=None))) | ||
378 | 472 | self.patches.enter_context( | ||
379 | 473 | mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric', | 465 | mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric', |
380 | 474 | mock.MagicMock(return_value={}))) | 466 | mock.MagicMock(return_value={}))) |
381 | 475 | self.patches.enter_context( | 467 | self.patches.enter_context( |