diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/atomic_helper.py cloud-init-0.7.9-153-g16a7302f/cloudinit/atomic_helper.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/atomic_helper.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/atomic_helper.py 2017-05-26 18:36:38.000000000 +0000 @@ -2,13 +2,23 @@ import json import os +import stat import tempfile _DEF_PERMS = 0o644 -def write_file(filename, content, mode=_DEF_PERMS, omode="wb"): +def write_file(filename, content, mode=_DEF_PERMS, + omode="wb", copy_mode=False): # open filename in mode 'omode', write content, set permissions to 'mode' + + if copy_mode: + try: + file_stat = os.stat(filename) + mode = stat.S_IMODE(file_stat.st_mode) + except OSError: + pass + tf = None try: tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename), diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/cloud.py cloud-init-0.7.9-153-g16a7302f/cloudinit/cloud.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/cloud.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/cloud.py 2017-05-26 18:36:38.000000000 +0000 @@ -56,7 +56,8 @@ def get_template_filename(self, name): fn = self.paths.template_tpl % (name) if not os.path.isfile(fn): - LOG.warn("No template found at %s for template named %s", fn, name) + LOG.warning("No template found at %s for template named %s", + fn, name) return None return fn diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/cmd/main.py cloud-init-0.7.9-153-g16a7302f/cloudinit/cmd/main.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/cmd/main.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/cmd/main.py 2017-05-26 18:36:38.000000000 +0000 @@ -405,7 +405,8 @@ errfmt_orig = errfmt (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name) if outfmt_orig != outfmt or errfmt_orig != errfmt: - LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt) + LOG.warning("Stdout, stderr changing to (%s, %s)", + outfmt, errfmt) (outfmt, errfmt) = util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to re-adjust output redirection!") @@ -427,15 +428,15 @@ dicfg = cfg.get('di_report', {}) if not isinstance(dicfg, dict): - LOG.warn("di_report config not a dictionary: %s", dicfg) + LOG.warning("di_report config not a dictionary: %s", dicfg) return dslist = dicfg.get('datasource_list') if dslist is None: - LOG.warn("no 'datasource_list' found in di_report.") + LOG.warning("no 'datasource_list' found in di_report.") return elif not isinstance(dslist, list): - LOG.warn("di_report/datasource_list not a list: %s", dslist) + LOG.warning("di_report/datasource_list not a list: %s", dslist) return # ds.__module__ is like cloudinit.sources.DataSourceName @@ -444,8 +445,8 @@ if modname.startswith(sources.DS_PREFIX): modname = modname[len(sources.DS_PREFIX):] else: - LOG.warn("Datasource '%s' came from unexpected module '%s'.", - datasource, modname) + LOG.warning("Datasource '%s' came from unexpected module '%s'.", + datasource, modname) if modname in dslist: LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s", @@ -571,10 +572,10 @@ mod_args, mod_freq) if failures: - LOG.warn("Ran %s but it failed!", mod_name) + LOG.warning("Ran %s but it failed!", mod_name) return 1 elif not which_ran: - LOG.warn("Did not run %s, does it exist?", mod_name) + LOG.warning("Did not run %s, does it exist?", mod_name) return 1 else: # Guess it worked @@ -680,6 +681,10 @@ return len(v1[mode]['errors']) +def main_features(name, args): + sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n') + + def main(sysv_args=None): if sysv_args is not None: parser = argparse.ArgumentParser(prog=sysv_args[0]) @@ -770,6 +775,10 @@ ' upon')) parser_dhclient.set_defaults(action=('dhclient_hook', dhclient_hook)) + parser_features = subparsers.add_parser('features', + help=('list defined features')) + parser_features.set_defaults(action=('features', main_features)) + args = parser.parse_args(args=sysv_args) try: @@ -788,6 +797,7 @@ if name in ("modules", "init"): functor = status_wrapper + rname = None report_on = True if name == "init": if args.local: @@ -802,10 +812,10 @@ rname, rdesc = ("single/%s" % args.name, "running single module %s" % args.name) report_on = args.report - - elif name == 'dhclient_hook': - rname, rdesc = ("dhclient-hook", - "running dhclient-hook module") + else: + rname = name + rdesc = "running 'cloud-init %s'" % name + report_on = False args.reporter = events.ReportEventStack( rname, rdesc, reporting_enabled=report_on) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_apt_configure.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_apt_configure.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_apt_configure.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_apt_configure.py 2017-05-26 18:36:38.000000000 +0000 @@ -65,12 +65,12 @@ basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url -can be specified with the ``url`` key, or a list of mirrors to check can be +can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with -different hosts used for a local apt mirror. If no mirror is provided by uri or -search, ``search_dns`` may be used to search for dns names in the format -``-mirror`` in each of the following: +different hosts used for a local apt mirror. If no mirror is provided by +``uri`` or ``search``, ``search_dns`` may be used to search for dns names in +the format ``-mirror`` in each of the following: - fqdn of this host per cloud metadata - localdomain @@ -278,15 +278,34 @@ raise ValueError("Expected dictionary for 'apt' config, found %s", type(cfg)) - LOG.debug("handling apt (module %s) with apt config '%s'", name, cfg) + apply_debconf_selections(cfg, target) + apply_apt(cfg, cloud, target) + + +def _should_configure_on_empty_apt(): + # if no config was provided, should apt configuration be done? + if util.system_is_snappy(): + return False, "system is snappy." + if not (util.which('apt-get') or util.which('apt')): + return False, "no apt commands." + return True, "Apt is available." + + +def apply_apt(cfg, cloud, target): + # cfg is the 'apt' top level dictionary already in 'v3' format. + if not cfg: + should_config, msg = _should_configure_on_empty_apt() + if not should_config: + LOG.debug("Nothing to do: No apt config and %s", msg) + return + + LOG.debug("handling apt config: %s", cfg) release = util.lsb_release(target=target)['codename'] arch = util.get_architecture(target) mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) - apply_debconf_selections(cfg, target) - if util.is_false(cfg.get('preserve_sources_list', False)): generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, target) @@ -333,8 +352,8 @@ unhandled.append(pkg) if len(unhandled): - LOG.warn("The following packages were installed and preseeded, " - "but cannot be unconfigured: %s", unhandled) + LOG.warning("The following packages were installed and preseeded, " + "but cannot be unconfigured: %s", unhandled) if len(to_config): util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] + @@ -427,7 +446,7 @@ os.rename(filename, newname) except OSError: # since this is a best effort task, warn with but don't fail - LOG.warn("Failed to rename apt list:", exc_info=True) + LOG.warning("Failed to rename apt list:", exc_info=True) def mirror_to_placeholder(tmpl, mirror, placeholder): @@ -435,7 +454,7 @@ replace the specified mirror in a template with a placeholder string Checks for existance of the expected mirror and warns if not found""" if mirror not in tmpl: - LOG.warn("Expected mirror '%s' not found in: %s", mirror, tmpl) + LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl) return tmpl.replace(mirror, placeholder) @@ -511,7 +530,8 @@ if not template_fn: template_fn = cloud.get_template_filename('sources.list') if not template_fn: - LOG.warn("No template found, not rendering /etc/apt/sources.list") + LOG.warning("No template found, " + "not rendering /etc/apt/sources.list") return tmpl = util.load_file(template_fn) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_chef.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_chef.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_chef.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_chef.py 2017-05-26 18:36:38.000000000 +0000 @@ -302,7 +302,7 @@ retries = max(0, util.get_cfg_option_int(chef_cfg, "omnibus_url_retries", default=OMNIBUS_URL_RETRIES)) - content = url_helper.readurl(url=url, retries=retries) + content = url_helper.readurl(url=url, retries=retries).contents with util.tempdir() as tmpd: # Use tmpdir over tmpfile to avoid 'text file busy' on execute tmpf = "%s/chef-omnibus-install" % tmpd diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_disk_setup.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_disk_setup.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_disk_setup.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_disk_setup.py 2017-05-26 18:36:38.000000000 +0000 @@ -68,6 +68,9 @@ Using ``overwrite: true`` for filesystems is dangerous and can lead to data loss, so double check the entry in ``fs_setup``. +.. note:: + ``replace_fs`` is ignored unless ``partition`` is ``auto`` or ``any``. + **Internal name:** ``cc_disk_setup`` **Module frequency:** per instance @@ -127,7 +130,7 @@ log.debug("Partitioning disks: %s", str(disk_setup)) for disk, definition in disk_setup.items(): if not isinstance(definition, dict): - log.warn("Invalid disk definition for %s" % disk) + log.warning("Invalid disk definition for %s" % disk) continue try: @@ -144,7 +147,7 @@ update_fs_setup_devices(fs_setup, cloud.device_name_to_device) for definition in fs_setup: if not isinstance(definition, dict): - log.warn("Invalid file system definition: %s" % definition) + log.warning("Invalid file system definition: %s" % definition) continue try: @@ -181,7 +184,7 @@ # update it with the response from 'tformer' for definition in disk_setup: if not isinstance(definition, dict): - LOG.warn("entry in disk_setup not a dict: %s", definition) + LOG.warning("entry in disk_setup not a dict: %s", definition) continue origname = definition.get('device') @@ -199,9 +202,14 @@ definition['_origname'] = origname definition['device'] = tformed - if part and 'partition' in definition: - definition['_partition'] = definition['partition'] - definition['partition'] = part + if part: + # In origname with .N, N overrides 'partition' key. + if 'partition' in definition: + LOG.warning("Partition '%s' from dotted device name '%s' " + "overrides 'partition' key in %s", part, origname, + definition) + definition['_partition'] = definition['partition'] + definition['partition'] = part def value_splitter(values, start=None): @@ -279,7 +287,7 @@ try: d_type = device_type(name) except Exception: - LOG.warn("Query against device %s failed" % name) + LOG.warning("Query against device %s failed", name) return False if partition and d_type == 'part': @@ -372,7 +380,7 @@ if not raw_device_used: return (device, False) - LOG.warn("Failed to find device during available device search.") + LOG.warning("Failed to find device during available device search.") return (None, False) @@ -423,7 +431,7 @@ raise Exception("No such function %s to call!" % func_name) -def get_mbr_hdd_size(device): +def get_hdd_size(device): try: size_in_bytes, _ = util.subp([BLKDEV_CMD, '--getsize64', device]) sector_size, _ = util.subp([BLKDEV_CMD, '--getss', device]) @@ -433,22 +441,6 @@ return int(size_in_bytes) / int(sector_size) -def get_gpt_hdd_size(device): - out, _ = util.subp([SGDISK_CMD, '-p', device], update_env=LANG_C_ENV) - for line in out.splitlines(): - if line.startswith("Disk"): - return line.split()[2] - raise Exception("Failed to get %s size from sgdisk" % (device)) - - -def get_hdd_size(table_type, device): - """ - Returns the hard disk size. - This works with any disk type, including GPT. - """ - return get_dyn_func("get_%s_hdd_size", table_type, device) - - def check_partition_mbr_layout(device, layout): """ Returns true if the partition layout matches the one on the disk @@ -496,12 +488,35 @@ device, e)) out_lines = iter(out.splitlines()) - # Skip header + # Skip header. Output looks like: + # *************************************************************** + # Found invalid GPT and valid MBR; converting MBR to GPT format + # in memory. + # *************************************************************** + # + # Disk /dev/vdb: 83886080 sectors, 40.0 GiB + # Logical sector size: 512 bytes + # Disk identifier (GUID): 8A7F11AD-3953-491B-8051-077E01C8E9A7 + # Partition table holds up to 128 entries + # First usable sector is 34, last usable sector is 83886046 + # Partitions will be aligned on 2048-sector boundaries + # Total free space is 83476413 sectors (39.8 GiB) + # + # Number Start (sector) End (sector) Size Code Name + # 1 2048 206847 100.0 MiB 0700 Microsoft basic data for line in out_lines: if line.strip().startswith('Number'): break - return [line.strip().split()[-1] for line in out_lines] + codes = [line.strip().split()[5] for line in out_lines] + cleaned = [] + + # user would expect a code '83' to be Linux, but sgdisk outputs 8300. + for code in codes: + if len(code) == 4 and code.endswith("00"): + code = code[0:2] + cleaned.append(code) + return cleaned def check_partition_layout(table_type, device, layout): @@ -515,6 +530,8 @@ found_layout = get_dyn_func( "check_partition_%s_layout", table_type, device, layout) + LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s", + table_type, device, layout, found_layout) if isinstance(layout, bool): # if we are using auto partitioning, or "True" be happy # if a single partition exists. @@ -522,18 +539,17 @@ return True return False - else: - if len(found_layout) != len(layout): - return False - else: - # This just makes sure that the number of requested - # partitions and the type labels are right - for x in range(1, len(layout) + 1): - if isinstance(layout[x - 1], tuple): - _, part_type = layout[x] - if int(found_layout[x]) != int(part_type): - return False - return True + elif len(found_layout) == len(layout): + # This just makes sure that the number of requested + # partitions and the type labels are right + layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None + for x in layout] + LOG.debug("Layout types=%s. Found types=%s", + layout_types, found_layout) + for itype, ftype in zip(layout_types, found_layout): + if itype is not None and str(ftype) != str(itype): + return False + return True return False @@ -638,7 +654,7 @@ if d['type'] not in ["disk", "crypt"]: wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']] try: - LOG.info("Purging filesystem on /dev/%s" % d['name']) + LOG.info("Purging filesystem on /dev/%s", d['name']) util.subp(wipefs_cmd) except Exception: raise Exception("Failed FS purge of /dev/%s" % d['name']) @@ -664,14 +680,14 @@ reliable way to probe the partition table. """ blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] - udev_cmd = [UDEVADM_CMD, 'settle'] + udevadm_settle() try: - util.subp(udev_cmd) util.subp(blkdev_cmd) - util.subp(udev_cmd) except Exception as e: util.logexc(LOG, "Failed reading the partition table %s" % e) + udevadm_settle() + def exec_mkpart_mbr(device, layout): """ @@ -696,11 +712,13 @@ util.subp([SGDISK_CMD, '-n', '{}:{}:{}'.format(index, start, end), device]) if partition_type is not None: + # convert to a 4 char (or more) string right padded with 0 + # 82 -> 8200. 'Linux' -> 'Linux' + pinput = str(partition_type).ljust(4, "0") util.subp( - [SGDISK_CMD, - '-t', '{}:{}'.format(index, partition_type), device]) + [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device]) except Exception: - LOG.warn("Failed to partition device %s" % device) + LOG.warning("Failed to partition device %s", device) raise read_parttbl(device) @@ -719,6 +737,24 @@ return get_dyn_func("exec_mkpart_%s", table_type, device, layout) +def udevadm_settle(): + util.subp(['udevadm', 'settle']) + + +def assert_and_settle_device(device): + """Assert that device exists and settle so it is fully recognized.""" + if not os.path.exists(device): + udevadm_settle() + if not os.path.exists(device): + raise RuntimeError("Device %s did not exist and was not created " + "with a udevamd settle." % device) + + # Whether or not the device existed above, it is possible that udev + # events that would populate udev database (for reading by lsdname) have + # not yet finished. So settle again. + udevadm_settle() + + def mkpart(device, definition): """ Creates the partition table. @@ -734,9 +770,10 @@ device: the device to work on. """ # ensure that we get a real device rather than a symbolic link + assert_and_settle_device(device) device = os.path.realpath(device) - LOG.debug("Checking values for %s definition" % device) + LOG.debug("Checking values for %s definition", device) overwrite = definition.get('overwrite', False) layout = definition.get('layout', False) table_type = definition.get('table_type', 'mbr') @@ -766,15 +803,15 @@ LOG.debug("Checking if device is safe to partition") if not overwrite and (is_disk_used(device) or is_filesystem(device)): - LOG.debug("Skipping partitioning on configured device %s" % device) + LOG.debug("Skipping partitioning on configured device %s", device) return - LOG.debug("Checking for device size") - device_size = get_hdd_size(table_type, device) + LOG.debug("Checking for device size of %s", device) + device_size = get_hdd_size(device) LOG.debug("Calculating partition layout") part_definition = get_partition_layout(table_type, device_size, layout) - LOG.debug(" Layout is: %s" % part_definition) + LOG.debug(" Layout is: %s", part_definition) LOG.debug("Creating partition table on %s", device) exec_mkpart(table_type, device, part_definition) @@ -799,7 +836,7 @@ if fs.lower() in flags: return flags[fs] - LOG.warn("Force flag for %s is unknown." % fs) + LOG.warning("Force flag for %s is unknown.", fs) return '' @@ -834,6 +871,7 @@ overwrite = fs_cfg.get('overwrite', False) # ensure that we get a real device rather than a symbolic link + assert_and_settle_device(device) device = os.path.realpath(device) # This allows you to define the default ephemeral or swap @@ -849,7 +887,8 @@ # Check to see if the fs already exists LOG.debug("Checking device %s", device) check_label, check_fstype, _ = check_fs(device) - LOG.debug("Device %s has %s %s", device, check_label, check_fstype) + LOG.debug("Device '%s' has check_label='%s' check_fstype=%s", + device, check_label, check_fstype) if check_label == label and check_fstype == fs_type: LOG.debug("Existing file system found at %s", device) @@ -858,7 +897,7 @@ LOG.debug("Device %s has required file system", device) return else: - LOG.warn("Destroying filesystem on %s", device) + LOG.warning("Destroying filesystem on %s", device) else: LOG.debug("Device %s is cleared for formating", device) @@ -883,14 +922,14 @@ return if not reuse and fs_replace and device: - LOG.debug("Replacing file system on %s as instructed." % device) + LOG.debug("Replacing file system on %s as instructed.", device) if not device: LOG.debug("No device aviable that matches request. " "Skipping fs creation for %s", fs_cfg) return elif not partition or str(partition).lower() == 'none': - LOG.debug("Using the raw device to place filesystem %s on" % label) + LOG.debug("Using the raw device to place filesystem %s on", label) else: LOG.debug("Error in device identification handling.") @@ -901,7 +940,7 @@ # Make sure the device is defined if not device: - LOG.warn("Device is not known: %s", device) + LOG.warning("Device is not known: %s", device) return # Check that we can create the FS @@ -910,12 +949,23 @@ "must be set.", label) # Create the commands + shell = False if fs_cmd: fs_cmd = fs_cfg['cmd'] % { 'label': label, 'filesystem': fs_type, 'device': device, } + shell = True + + if overwrite: + LOG.warning( + "fs_setup:overwrite ignored because cmd was specified: %s", + fs_cmd) + if fs_opts: + LOG.warning( + "fs_setup:extra_opts ignored because cmd was specified: %s", + fs_cmd) else: # Find the mkfs command mkfs_cmd = util.which("mkfs.%s" % fs_type) @@ -923,8 +973,8 @@ mkfs_cmd = util.which("mk%s" % fs_type) if not mkfs_cmd: - LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type, - fs_type) + LOG.warning("Cannot create fstype '%s'. No mkfs.%s command", + fs_type, fs_type) return fs_cmd = [mkfs_cmd, device] @@ -936,14 +986,14 @@ if overwrite or device_type(device) == "disk": fs_cmd.append(lookup_force_flag(fs_type)) - # Add the extends FS options - if fs_opts: - fs_cmd.extend(fs_opts) + # Add the extends FS options + if fs_opts: + fs_cmd.extend(fs_opts) LOG.debug("Creating file system %s on %s", label, device) - LOG.debug(" Using cmd: %s", " ".join(fs_cmd)) + LOG.debug(" Using cmd: %s", str(fs_cmd)) try: - util.subp(fs_cmd) + util.subp(fs_cmd, shell=shell) except Exception as e: raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_fan.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_fan.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_fan.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_fan.py 2017-05-26 18:36:38.000000000 +0000 @@ -64,7 +64,7 @@ try: return util.subp(cmd, capture=True) except util.ProcessExecutionError as e: - LOG.warn("failed: %s (%s): %s", service, cmd, e) + LOG.warning("failed: %s (%s): %s", service, cmd, e) return False stop_failed = not run(cmds['stop'], msg='stop %s' % service) @@ -74,7 +74,7 @@ ret = run(cmds['start'], msg='start %s' % service) if ret and stop_failed: - LOG.warn("success: %s started", service) + LOG.warning("success: %s started", service) if 'enable' in cmds: ret = run(cmds['enable'], msg='enable %s' % service) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_growpart.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_growpart.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_growpart.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_growpart.py 2017-05-26 18:36:38.000000000 +0000 @@ -247,7 +247,20 @@ result = util.get_mount_info(devent) if not result: raise ValueError("Could not determine device of '%s' % dev_ent") - return result[0] + dev = result[0] + + container = util.is_container() + + # Ensure the path is a block device. + if (dev == "/dev/root" and not container): + dev = util.rootdev_from_cmdline(util.get_cmdline()) + if dev is None: + if os.path.exists(dev): + # if /dev/root exists, but we failed to convert + # that to a "real" /dev/ path device, then return it. + return dev + raise ValueError("Unable to find device '/dev/root'") + return dev def resize_devices(resizer, devices): diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_mounts.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_mounts.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_mounts.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_mounts.py 2017-05-26 18:36:38.000000000 +0000 @@ -216,8 +216,9 @@ else: pinfo[k] = v - LOG.debug("suggest %(size)s swap for %(mem)s memory with '%(avail)s'" - " disk given max=%(max_in)s [max=%(max)s]'" % pinfo) + LOG.debug("suggest %s swap for %s memory with '%s'" + " disk given max=%s [max=%s]'", pinfo['size'], pinfo['mem'], + pinfo['avail'], pinfo['max_in'], pinfo['max']) return size @@ -266,7 +267,7 @@ return None or (filename, size) """ if not isinstance(swapcfg, dict): - LOG.warn("input for swap config was not a dict.") + LOG.warning("input for swap config was not a dict.") return None fname = swapcfg.get('filename', '/swap.img') @@ -289,7 +290,8 @@ return fname LOG.debug("swap file %s existed, but not in /proc/swaps", fname) except Exception: - LOG.warn("swap file %s existed. Error reading /proc/swaps", fname) + LOG.warning("swap file %s existed. Error reading /proc/swaps", + fname) return fname try: @@ -300,7 +302,7 @@ return setup_swapfile(fname=fname, size=size, maxsize=maxsize) except Exception as e: - LOG.warn("failed to setup swap: %s", e) + LOG.warning("failed to setup swap: %s", e) return None diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_ntp.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_ntp.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_ntp.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_ntp.py 2017-05-26 18:36:38.000000000 +0000 @@ -53,14 +53,12 @@ def handle(name, cfg, cloud, log, _args): - """ - Enable and configure ntp + """Enable and configure ntp.""" - ntp: - pools: ['0.{{distro}}.pool.ntp.org', '1.{{distro}}.pool.ntp.org'] - servers: ['192.168.2.1'] - - """ + if 'ntp' not in cfg: + LOG.debug( + "Skipping module named %s, not present or disabled by cfg", name) + return ntp_cfg = cfg.get('ntp', {}) @@ -69,15 +67,18 @@ " but not a dictionary type," " is a %s %instead"), type_utils.obj_name(ntp_cfg)) - if 'ntp' not in cfg: - LOG.debug("Skipping module named %s," - "not present or disabled by cfg", name) - return True - - install_ntp(cloud.distro.install_packages, packages=['ntp'], - check_exe="ntpd") rename_ntp_conf() + # ensure when ntp is installed it has a configuration file + # to use instead of starting up with packaged defaults write_ntp_config_template(ntp_cfg, cloud) + install_ntp(cloud.distro.install_packages, packages=['ntp'], + check_exe="ntpd") + # if ntp was already installed, it may not have started + try: + reload_ntp(systemd=cloud.distro.uses_systemd()) + except util.ProcessExecutionError as e: + LOG.exception("Failed to reload/start ntp service: %s", e) + raise def install_ntp(install_func, packages=None, check_exe="ntpd"): @@ -89,7 +90,10 @@ install_func(packages) -def rename_ntp_conf(config=NTP_CONF): +def rename_ntp_conf(config=None): + """Rename any existing ntp.conf file and render from template""" + if config is None: # For testing + config = NTP_CONF if os.path.exists(config): util.rename(config, config + ".dist") @@ -107,8 +111,9 @@ pools = cfg.get('pools', []) if len(servers) == 0 and len(pools) == 0: - LOG.debug('Adding distro default ntp pool servers') pools = generate_server_names(cloud.distro.name) + LOG.debug( + 'Adding distro default ntp pool servers: %s', ','.join(pools)) params = { 'servers': servers, @@ -125,4 +130,14 @@ templater.render_to_file(template_fn, NTP_CONF, params) + +def reload_ntp(systemd=False): + service = 'ntp' + if systemd: + cmd = ['systemctl', 'reload-or-restart', service] + else: + cmd = ['service', service, 'restart'] + util.subp(cmd, capture=True) + + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_resizefs.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_resizefs.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_resizefs.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_resizefs.py 2017-05-26 18:36:38.000000000 +0000 @@ -33,7 +33,10 @@ """ import errno +import getopt import os +import re +import shlex import stat from cloudinit.settings import PER_ALWAYS @@ -58,6 +61,62 @@ return ('growfs', devpth) +def _get_dumpfs_output(mount_point): + dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) + return dumpfs_res + + +def _get_gpart_output(part): + gpart_res, err = util.subp(['gpart', 'show', part]) + return gpart_res + + +def _can_skip_resize_ufs(mount_point, devpth): + # extract the current fs sector size + """ + # dumpfs -m / + # newfs command for / (/dev/label/rootfs) + newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384 + -h 64 -i 8192 -j -k 6408 -m 8 -o time -s 58719232 /dev/label/rootf + """ + cur_fs_sz = None + frag_sz = None + dumpfs_res = _get_dumpfs_output(mount_point) + for line in dumpfs_res.splitlines(): + if not line.startswith('#'): + newfs_cmd = shlex.split(line) + opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' + optlist, args = getopt.getopt(newfs_cmd[1:], opt_value) + for o, a in optlist: + if o == "-s": + cur_fs_sz = int(a) + if o == "-f": + frag_sz = int(a) + # check the current partition size + """ + # gpart show /dev/da0 +=> 40 62914480 da0 GPT (30G) + 40 1024 1 freebsd-boot (512K) + 1064 58719232 2 freebsd-ufs (28G) + 58720296 3145728 3 freebsd-swap (1.5G) + 61866024 1048496 - free - (512M) + """ + expect_sz = None + m = re.search('^(/dev/.+)p([0-9])$', devpth) + gpart_res = _get_gpart_output(m.group(1)) + for line in gpart_res.splitlines(): + if re.search(r"freebsd-ufs", line): + fields = line.split() + expect_sz = int(fields[1]) + # Normalize the gpart sector size, + # because the size is not exactly the same as fs size. + normal_expect_sz = (expect_sz - expect_sz % (frag_sz / 512)) + if normal_expect_sz == cur_fs_sz: + return True + else: + return False + + # Do not use a dictionary as these commands should be able to be used # for multiple filesystem types if possible, e.g. one command for # ext2, ext3 and ext4. @@ -68,6 +127,10 @@ ('ufs', _resize_ufs), ] +RESIZE_FS_PRECHECK_CMDS = { + 'ufs': _can_skip_resize_ufs +} + NOBLOCK = "noblock" @@ -90,6 +153,14 @@ return "/dev/" + found +def can_skip_resize(fs_type, resize_what, devpth): + fstype_lc = fs_type.lower() + for i, func in RESIZE_FS_PRECHECK_CMDS.items(): + if fstype_lc.startswith(i): + return func(resize_what, devpth) + return False + + def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] @@ -121,7 +192,7 @@ # Ensure the path is a block device. if (devpth == "/dev/root" and not os.path.exists(devpth) and not container): - devpth = rootdev_from_cmdline(util.get_cmdline()) + devpth = util.rootdev_from_cmdline(util.get_cmdline()) if devpth is None: log.warn("Unable to find device '/dev/root'") return @@ -158,6 +229,11 @@ return resizer = None + if can_skip_resize(fs_type, resize_what, devpth): + log.debug("Skip resize filesystem type %s for %s", + fs_type, resize_what) + return + fstype_lc = fs_type.lower() for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: if fstype_lc.startswith(pfix): diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_resolv_conf.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_resolv_conf.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_resolv_conf.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_resolv_conf.py 2017-05-26 18:36:38.000000000 +0000 @@ -77,7 +77,7 @@ params['options'] = {} params['flags'] = flags - LOG.debug("Writing resolv.conf from template %s" % template_fn) + LOG.debug("Writing resolv.conf from template %s", template_fn) templater.render_to_file(template_fn, target_fname, params) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_rsyslog.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_rsyslog.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_rsyslog.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_rsyslog.py 2017-05-26 18:36:38.000000000 +0000 @@ -252,7 +252,8 @@ for cur_pos, ent in enumerate(configs): if isinstance(ent, dict): if "content" not in ent: - LOG.warn("No 'content' entry in config entry %s", cur_pos + 1) + LOG.warning("No 'content' entry in config entry %s", + cur_pos + 1) continue content = ent['content'] filename = ent.get("filename", def_fname) @@ -262,7 +263,7 @@ filename = filename.strip() if not filename: - LOG.warn("Entry %s has an empty filename", cur_pos + 1) + LOG.warning("Entry %s has an empty filename", cur_pos + 1) continue filename = os.path.join(cfg_dir, filename) @@ -389,7 +390,7 @@ try: lines.append(str(parse_remotes_line(line, name=name))) except ValueError as e: - LOG.warn("failed loading remote %s: %s [%s]", name, line, e) + LOG.warning("failed loading remote %s: %s [%s]", name, line, e) if footer is not None: lines.append(footer) return '\n'.join(lines) + "\n" diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_set_passwords.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_set_passwords.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_set_passwords.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_set_passwords.py 2017-05-26 18:36:38.000000000 +0000 @@ -23,7 +23,8 @@ ``username:password`` pairs can be specified. The usernames specified must already exist on the system, or have been created using the ``cc_users_groups`` module. A password can be randomly generated using -``username:RANDOM`` or ``username:R``. Password ssh authentication can be +``username:RANDOM`` or ``username:R``. A hashed password can be specified +using ``username:$6$salt$hash``. Password ssh authentication can be enabled, disabled, or left to system defaults using ``ssh_pwauth``. .. note:: @@ -45,13 +46,25 @@ expire: chpasswd: + list: | + user1:password1 + user2:RANDOM + user3:password3 + user4:R + + ## + # or as yaml list + ## + chpasswd: list: - user1:password1 - - user2:Random + - user2:RANDOM - user3:password3 - user4:R + - user4:$6$rL..$ej... """ +import re import sys from cloudinit.distros import ug_util @@ -79,38 +92,66 @@ if 'chpasswd' in cfg: chfg = cfg['chpasswd'] - plist = util.get_cfg_option_str(chfg, 'list', plist) + if 'list' in chfg and chfg['list']: + if isinstance(chfg['list'], list): + log.debug("Handling input for chpasswd as list.") + plist = util.get_cfg_option_list(chfg, 'list', plist) + else: + log.debug("Handling input for chpasswd as multiline string.") + plist = util.get_cfg_option_str(chfg, 'list', plist) + if plist: + plist = plist.splitlines() + expire = util.get_cfg_option_bool(chfg, 'expire', expire) if not plist and password: (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) if user: - plist = "%s:%s" % (user, password) + plist = ["%s:%s" % (user, password)] else: log.warn("No default or defined user to change password for.") errors = [] if plist: plist_in = [] + hashed_plist_in = [] + hashed_users = [] randlist = [] users = [] - for line in plist.splitlines(): + prog = re.compile(r'\$[1,2a,2y,5,6](\$.+){2}') + for line in plist: u, p = line.split(':', 1) - if p == "R" or p == "RANDOM": - p = rand_user_password() - randlist.append("%s:%s" % (u, p)) - plist_in.append("%s:%s" % (u, p)) - users.append(u) + if prog.match(p) is not None and ":" not in p: + hashed_plist_in.append("%s:%s" % (u, p)) + hashed_users.append(u) + else: + if p == "R" or p == "RANDOM": + p = rand_user_password() + randlist.append("%s:%s" % (u, p)) + plist_in.append("%s:%s" % (u, p)) + users.append(u) ch_in = '\n'.join(plist_in) + '\n' - try: - log.debug("Changing password for %s:", users) - util.subp(['chpasswd'], ch_in) - except Exception as e: - errors.append(e) - util.logexc(log, "Failed to set passwords with chpasswd for %s", - users) + if users: + try: + log.debug("Changing password for %s:", users) + util.subp(['chpasswd'], ch_in) + except Exception as e: + errors.append(e) + util.logexc( + log, "Failed to set passwords with chpasswd for %s", users) + + hashed_ch_in = '\n'.join(hashed_plist_in) + '\n' + if hashed_users: + try: + log.debug("Setting hashed password for %s:", hashed_users) + util.subp(['chpasswd', '-e'], hashed_ch_in) + except Exception as e: + errors.append(e) + util.logexc( + log, "Failed to set hashed passwords with chpasswd for %s", + hashed_users) if len(randlist): blurb = ("Set the following 'random' passwords\n", @@ -174,7 +215,8 @@ pw_auth)) lines = [str(l) for l in new_lines] - util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) + util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines), + copy_mode=True) try: cmd = cloud.distro.init_cmd # Default service diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_snap_config.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_snap_config.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_snap_config.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_snap_config.py 2017-05-26 18:36:38.000000000 +0000 @@ -5,8 +5,8 @@ # This file is part of cloud-init. See LICENSE file for license information. """ -Snappy ------- +Snap Config +----------- **Summary:** snap_config modules allows configuration of snapd. This module uses the same ``snappy`` namespace for configuration but diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_snappy.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_snappy.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_snappy.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_snappy.py 2017-05-26 18:36:38.000000000 +0000 @@ -283,8 +283,8 @@ render_snap_op(**pkg_op) except Exception as e: fails.append((pkg_op, e,)) - LOG.warn("'%s' failed for '%s': %s", - pkg_op['op'], pkg_op['name'], e) + LOG.warning("'%s' failed for '%s': %s", + pkg_op['op'], pkg_op['name'], e) # Default to disabling SSH ssh_enabled = mycfg.get('ssh_enabled', "auto") @@ -303,7 +303,7 @@ LOG.debug("Enabling SSH, password authentication requested") ssh_enabled = True elif ssh_enabled not in (True, False): - LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled) + LOG.warning("Unknown value '%s' in ssh_enabled", ssh_enabled) disable_enable_ssh(ssh_enabled) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_users_groups.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_users_groups.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_users_groups.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_users_groups.py 2017-05-26 18:36:38.000000000 +0000 @@ -25,28 +25,39 @@ config keys for an entry in ``users`` are as follows: - ``name``: The user's login name - - ``homedir``: Optional. Home dir for user. Default is ``/home/`` - - ``primary-group``: Optional. Primary group for user. Default to new group - named after user. + - ``expiredate``: Optional. Date on which the user's login will be + disabled. Default: none + - ``gecos``: Optional. Comment about the user, usually a comma-separated + string of real name and contact information. Default: none - ``groups``: Optional. Additional groups to add the user to. Default: none - - ``selinux-user``: Optional. SELinux user for user's login. Default to - default SELinux user. - - ``lock_passwd``: Optional. Disable password login. Default: true + - ``homedir``: Optional. Home dir for user. Default is ``/home/`` - ``inactive``: Optional. Mark user inactive. Default: false - - ``passwd``: Hash of user password + - ``lock_passwd``: Optional. Disable password login. Default: true - ``no-create-home``: Optional. Do not create home directory. Default: false - - ``no-user-group``: Optional. Do not create group named after user. - Default: false - ``no-log-init``: Optional. Do not initialize lastlog and faillog for user. Default: false - - ``ssh-import-id``: Optional. SSH id to import for user. Default: none - - ``ssh-autorized-keys``: Optional. List of ssh keys to add to user's + - ``no-user-group``: Optional. Do not create group named after user. + Default: false + - ``passwd``: Hash of user password + - ``primary-group``: Optional. Primary group for user. Default to new group + named after user. + - ``selinux-user``: Optional. SELinux user for user's login. Default to + default SELinux user. + - ``shell``: Optional. The user's login shell. The default is to set no + shell, which results in a system-specific default being used. + - ``snapuser``: Optional. Specify an email address to create the user as + a Snappy user through ``snap create-user``. If an Ubuntu SSO account is + associated with the address, username and SSH keys will be requested from + there. Default: none + - ``ssh-authorized-keys``: Optional. List of ssh keys to add to user's authkeys file. Default: none + - ``ssh-import-id``: Optional. SSH id to import for user. Default: none - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use. Default: none. - ``system``: Optional. Create user as system user with no home directory. Default: false + - ``uid``: Optional. The user's ID. Default: The next available value. .. note:: Specifying a hash of a user's password with ``passwd`` is a security risk @@ -65,23 +76,33 @@ **Config keys**:: groups: - - ubuntu: [foo, bar] - - cloud-users + - : [, ] + - users: - default - name: - gecos: - primary-group: - groups: - selinux-user: expiredate: - ssh-import-id: + gecos: + groups: + homedir: + inactive: lock_passwd: + no-create-home: + no-log-init: + no-user-group: passwd: + primary-group: + selinux-user: + shell: + snapuser: + ssh-authorized-keys: + - + - + ssh-import-id: sudo: - inactive: system: + uid: """ # Ensure this is aliased to a name not 'distros' diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_yum_add_repo.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_yum_add_repo.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/cc_yum_add_repo.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/cc_yum_add_repo.py 2017-05-26 18:36:38.000000000 +0000 @@ -32,7 +32,10 @@ import os -import configobj +try: + from configparser import ConfigParser +except ImportError: + from ConfigParser import ConfigParser import six from cloudinit import util @@ -52,8 +55,8 @@ return str(int(val)) if isinstance(val, (list, tuple)): # Can handle 'lists' in certain cases - # See: http://bit.ly/Qqrf1t - return "\n ".join([_format_repo_value(v) for v in val]) + # See: https://linux.die.net/man/5/yum.conf + return "\n".join([_format_repo_value(v) for v in val]) if not isinstance(val, six.string_types): return str(val) return val @@ -62,16 +65,19 @@ # TODO(harlowja): move to distro? # See man yum.conf def _format_repository_config(repo_id, repo_config): - to_be = configobj.ConfigObj() - to_be[repo_id] = {} + to_be = ConfigParser() + to_be.add_section(repo_id) # Do basic translation of the items -> values for (k, v) in repo_config.items(): # For now assume that people using this know # the format of yum and don't verify keys/values further - to_be[repo_id][k] = _format_repo_value(v) - lines = to_be.write() - lines.insert(0, "# Created by cloud-init on %s" % (util.time_rfc2822())) - return "\n".join(lines) + to_be.set(repo_id, k, _format_repo_value(v)) + to_be_stream = six.StringIO() + to_be.write(to_be_stream) + to_be_stream.seek(0) + lines = to_be_stream.readlines() + lines.insert(0, "# Created by cloud-init on %s\n" % (util.time_rfc2822())) + return "".join(lines) def handle(name, cfg, _cloud, log, _args): diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/config/__init__.py cloud-init-0.7.9-153-g16a7302f/cloudinit/config/__init__.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/config/__init__.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/config/__init__.py 2017-05-26 18:36:38.000000000 +0000 @@ -37,7 +37,7 @@ else: freq = mod.frequency if freq and freq not in FREQUENCIES: - LOG.warn("Module %s has an unknown frequency %s", mod, freq) + LOG.warning("Module %s has an unknown frequency %s", mod, freq) if not hasattr(mod, 'distros'): setattr(mod, 'distros', []) if not hasattr(mod, 'osfamilies'): diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/arch.py cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/arch.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/arch.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/arch.py 2017-05-26 18:36:38.000000000 +0000 @@ -83,7 +83,8 @@ try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -94,7 +95,8 @@ try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) return True except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/debian.py cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/debian.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/debian.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/debian.py 2017-05-26 18:36:38.000000000 +0000 @@ -13,8 +13,6 @@ from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging -from cloudinit.net import eni -from cloudinit.net.network_state import parse_net_config_data from cloudinit import util from cloudinit.distros.parsers.hostname import HostnameConf @@ -38,11 +36,23 @@ # network: {config: disabled} """ +NETWORK_CONF_FN = "/etc/network/interfaces.d/50-cloud-init.cfg" + class Distro(distros.Distro): hostname_conf_fn = "/etc/hostname" locale_conf_fn = "/etc/default/locale" - network_conf_fn = "/etc/network/interfaces.d/50-cloud-init.cfg" + network_conf_fn = { + "eni": "/etc/network/interfaces.d/50-cloud-init.cfg", + "netplan": "/etc/netplan/50-cloud-init.yaml" + } + renderer_configs = { + "eni": {"eni_path": network_conf_fn["eni"], + "eni_header": ENI_HEADER}, + "netplan": {"netplan_path": network_conf_fn["netplan"], + "netplan_header": ENI_HEADER, + "postcmds": True} + } def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -51,12 +61,6 @@ # should only happen say once per instance...) self._runner = helpers.Runners(paths) self.osfamily = 'debian' - self._net_renderer = eni.Renderer({ - 'eni_path': self.network_conf_fn, - 'eni_header': ENI_HEADER, - 'links_path_prefix': None, - 'netrules_path': None, - }) def apply_locale(self, locale, out_fn=None): if not out_fn: @@ -76,14 +80,13 @@ self.package_command('install', pkgs=pkglist) def _write_network(self, settings): - util.write_file(self.network_conf_fn, settings) + # this is a legacy method, it will always write eni + util.write_file(self.network_conf_fn["eni"], settings) return ['all'] def _write_network_config(self, netconfig): - ns = parse_net_config_data(netconfig) - self._net_renderer.render_network_state("/", ns) _maybe_remove_legacy_eth0() - return [] + return self._supported_write_network_config(netconfig) def _bring_up_interfaces(self, device_names): use_all = False @@ -140,8 +143,7 @@ pkgs = [] e = os.environ.copy() - # See: http://tiny.cc/kg91fw - # Or: http://tiny.cc/mh91fw + # See: http://manpages.ubuntu.com/manpages/xenial/man7/debconf.7.html e['DEBIAN_FRONTEND'] = 'noninteractive' wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER) @@ -221,6 +223,6 @@ except Exception: msg = bmsg + " %s exists, but could not be read." % path - LOG.warn(msg) + LOG.warning(msg) # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/freebsd.py cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/freebsd.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/freebsd.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/freebsd.py 2017-05-26 18:36:38.000000000 +0000 @@ -30,6 +30,7 @@ login_conf_fn_bak = '/etc/login.conf.orig' resolv_conf_fn = '/etc/resolv.conf' ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users' + default_primary_nic = 'hn0' def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -38,6 +39,8 @@ # should only happen say once per instance...) self._runner = helpers.Runners(paths) self.osfamily = 'freebsd' + self.ipv4_pat = re.compile(r"\s+inet\s+\d+[.]\d+[.]\d+[.]\d+") + cfg['ssh_svcname'] = 'sshd' # Updates a key in /etc/rc.conf. def updatercconf(self, key, value): @@ -148,7 +151,7 @@ def create_group(self, name, members): group_add_cmd = ['pw', '-n', name] if util.is_group(name): - LOG.warn("Skipping creation of existing group '%s'", name) + LOG.warning("Skipping creation of existing group '%s'", name) else: try: util.subp(group_add_cmd) @@ -160,8 +163,8 @@ if len(members) > 0: for member in members: if not util.is_user(member): - LOG.warn("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) + LOG.warning("Unable to add group member '%s' to group '%s'" + "; user does not exist.", member, name) continue try: util.subp(['pw', 'usermod', '-n', name, '-G', member]) @@ -183,7 +186,6 @@ "gecos": '-c', "primary_group": '-g', "groups": '-G', - "passwd": '-h', "shell": '-s', "inactive": '-E', } @@ -193,19 +195,11 @@ "no_log_init": '--no-log-init', } - redact_opts = ['passwd'] - for key, val in kwargs.items(): if (key in adduser_opts and val and isinstance(val, six.string_types)): adduser_cmd.extend([adduser_opts[key], val]) - # Redact certain fields from the logs - if key in redact_opts: - log_adduser_cmd.extend([adduser_opts[key], 'REDACTED']) - else: - log_adduser_cmd.extend([adduser_opts[key], val]) - elif key in adduser_flags and val: adduser_cmd.append(adduser_flags[key]) log_adduser_cmd.append(adduser_flags[key]) @@ -226,19 +220,21 @@ except Exception as e: util.logexc(LOG, "Failed to create user %s", name) raise e + # Set the password if it is provided + # For security consideration, only hashed passwd is assumed + passwd_val = kwargs.get('passwd', None) + if passwd_val is not None: + self.set_passwd(name, passwd_val, hashed=True) def set_passwd(self, user, passwd, hashed=False): - cmd = ['pw', 'usermod', user] - if hashed: - cmd.append('-H') + hash_opt = "-H" else: - cmd.append('-h') - - cmd.append('0') + hash_opt = "-h" try: - util.subp(cmd, passwd, logstring="chpasswd for %s" % user) + util.subp(['pw', 'usermod', user, hash_opt, '0'], + data=passwd, logstring="chpasswd for %s" % user) except Exception as e: util.logexc(LOG, "Failed to set password for %s", user) raise e @@ -271,6 +267,255 @@ keys = set(kwargs['ssh_authorized_keys']) or [] ssh_util.setup_user_keys(keys, name, options=None) + @staticmethod + def get_ifconfig_list(): + cmd = ['ifconfig', '-l'] + (nics, err) = util.subp(cmd, rcs=[0, 1]) + if len(err): + LOG.warning("Error running %s: %s", cmd, err) + return None + return nics + + @staticmethod + def get_ifconfig_ifname_out(ifname): + cmd = ['ifconfig', ifname] + (if_result, err) = util.subp(cmd, rcs=[0, 1]) + if len(err): + LOG.warning("Error running %s: %s", cmd, err) + return None + return if_result + + @staticmethod + def get_ifconfig_ether(): + cmd = ['ifconfig', '-l', 'ether'] + (nics, err) = util.subp(cmd, rcs=[0, 1]) + if len(err): + LOG.warning("Error running %s: %s", cmd, err) + return None + return nics + + @staticmethod + def get_interface_mac(ifname): + if_result = Distro.get_ifconfig_ifname_out(ifname) + for item in if_result.splitlines(): + if item.find('ether ') != -1: + mac = str(item.split()[1]) + if mac: + return mac + + @staticmethod + def get_devicelist(): + nics = Distro.get_ifconfig_list() + return nics.split() + + @staticmethod + def get_ipv6(): + ipv6 = [] + nics = Distro.get_devicelist() + for nic in nics: + if_result = Distro.get_ifconfig_ifname_out(nic) + for item in if_result.splitlines(): + if item.find("inet6 ") != -1 and item.find("scopeid") == -1: + ipv6.append(nic) + return ipv6 + + def get_ipv4(self): + ipv4 = [] + nics = Distro.get_devicelist() + for nic in nics: + if_result = Distro.get_ifconfig_ifname_out(nic) + for item in if_result.splitlines(): + print(item) + if self.ipv4_pat.match(item): + ipv4.append(nic) + return ipv4 + + def is_up(self, ifname): + if_result = Distro.get_ifconfig_ifname_out(ifname) + pat = "^" + ifname + for item in if_result.splitlines(): + if re.match(pat, item): + flags = item.split('<')[1].split('>')[0] + if flags.find("UP") != -1: + return True + + def _get_current_rename_info(self, check_downable=True): + """Collect information necessary for rename_interfaces.""" + names = Distro.get_devicelist() + bymac = {} + for n in names: + bymac[Distro.get_interface_mac(n)] = { + 'name': n, 'up': self.is_up(n), 'downable': None} + + if check_downable: + nics_with_addresses = set() + ipv6 = self.get_ipv6() + ipv4 = self.get_ipv4() + for bytes_out in (ipv6, ipv4): + for i in ipv6: + nics_with_addresses.update(i) + for i in ipv4: + nics_with_addresses.update(i) + + for d in bymac.values(): + d['downable'] = (d['up'] is False or + d['name'] not in nics_with_addresses) + + return bymac + + def _rename_interfaces(self, renames): + if not len(renames): + LOG.debug("no interfaces to rename") + return + + current_info = self._get_current_rename_info() + + cur_bymac = {} + for mac, data in current_info.items(): + cur = data.copy() + cur['mac'] = mac + cur_bymac[mac] = cur + + def update_byname(bymac): + return dict((data['name'], data) + for data in bymac.values()) + + def rename(cur, new): + util.subp(["ifconfig", cur, "name", new], capture=True) + + def down(name): + util.subp(["ifconfig", name, "down"], capture=True) + + def up(name): + util.subp(["ifconfig", name, "up"], capture=True) + + ops = [] + errors = [] + ups = [] + cur_byname = update_byname(cur_bymac) + tmpname_fmt = "cirename%d" + tmpi = -1 + + for mac, new_name in renames: + cur = cur_bymac.get(mac, {}) + cur_name = cur.get('name') + cur_ops = [] + if cur_name == new_name: + # nothing to do + continue + + if not cur_name: + errors.append("[nic not present] Cannot rename mac=%s to %s" + ", not available." % (mac, new_name)) + continue + + if cur['up']: + msg = "[busy] Error renaming mac=%s from %s to %s" + if not cur['downable']: + errors.append(msg % (mac, cur_name, new_name)) + continue + cur['up'] = False + cur_ops.append(("down", mac, new_name, (cur_name,))) + ups.append(("up", mac, new_name, (new_name,))) + + if new_name in cur_byname: + target = cur_byname[new_name] + if target['up']: + msg = "[busy-target] Error renaming mac=%s from %s to %s." + if not target['downable']: + errors.append(msg % (mac, cur_name, new_name)) + continue + else: + cur_ops.append(("down", mac, new_name, (new_name,))) + + tmp_name = None + while tmp_name is None or tmp_name in cur_byname: + tmpi += 1 + tmp_name = tmpname_fmt % tmpi + + cur_ops.append(("rename", mac, new_name, (new_name, tmp_name))) + target['name'] = tmp_name + cur_byname = update_byname(cur_bymac) + if target['up']: + ups.append(("up", mac, new_name, (tmp_name,))) + + cur_ops.append(("rename", mac, new_name, (cur['name'], new_name))) + cur['name'] = new_name + cur_byname = update_byname(cur_bymac) + ops += cur_ops + + opmap = {'rename': rename, 'down': down, 'up': up} + if len(ops) + len(ups) == 0: + if len(errors): + LOG.debug("unable to do any work for renaming of %s", renames) + else: + LOG.debug("no work necessary for renaming of %s", renames) + else: + LOG.debug("achieving renaming of %s with ops %s", + renames, ops + ups) + + for op, mac, new_name, params in ops + ups: + try: + opmap.get(op)(*params) + except Exception as e: + errors.append( + "[unknown] Error performing %s%s for %s, %s: %s" % + (op, params, mac, new_name, e)) + if len(errors): + raise Exception('\n'.join(errors)) + + def apply_network_config_names(self, netcfg): + renames = [] + for ent in netcfg.get('config', {}): + if ent.get('type') != 'physical': + continue + mac = ent.get('mac_address') + name = ent.get('name') + if not mac: + continue + renames.append([mac, name]) + return self._rename_interfaces(renames) + + @classmethod + def generate_fallback_config(self): + nics = Distro.get_ifconfig_ether() + if nics is None: + LOG.debug("Fail to get network interfaces") + return None + potential_interfaces = nics.split() + connected = [] + for nic in potential_interfaces: + pat = "^" + nic + if_result = Distro.get_ifconfig_ifname_out(nic) + for item in if_result.split("\n"): + if re.match(pat, item): + flags = item.split('<')[1].split('>')[0] + if flags.find("RUNNING") != -1: + connected.append(nic) + if connected: + potential_interfaces = connected + names = list(sorted(potential_interfaces)) + default_pri_nic = Distro.default_primary_nic + if default_pri_nic in names: + names.remove(default_pri_nic) + names.insert(0, default_pri_nic) + target_name = None + target_mac = None + for name in names: + mac = Distro.get_interface_mac(name) + if mac: + target_name = name + target_mac = mac + break + if target_mac and target_name: + nconf = {'config': [], 'version': 1} + nconf['config'].append( + {'type': 'physical', 'name': target_name, + 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}) + return nconf + else: + return None + def _write_network(self, settings): entries = net_util.translate_network(settings) nameservers = [] @@ -369,7 +614,7 @@ # OS. This is just fine. (_out, err) = util.subp(cmd, rcs=[0, 1]) if len(err): - LOG.warn("Error running %s: %s", cmd, err) + LOG.warning("Error running %s: %s", cmd, err) def install_packages(self, pkglist): self.update_package_sources() diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/gentoo.py cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/gentoo.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/gentoo.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/gentoo.py 2017-05-26 18:36:38.000000000 +0000 @@ -96,8 +96,8 @@ try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", - cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -121,7 +121,8 @@ try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) return True except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -138,8 +139,8 @@ try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, - err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) return False diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/__init__.py cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/__init__.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/__init__.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/__init__.py 2017-05-26 18:36:38.000000000 +0000 @@ -22,6 +22,7 @@ from cloudinit import net from cloudinit.net import eni from cloudinit.net import network_state +from cloudinit.net import renderers from cloudinit import ssh_util from cloudinit import type_utils from cloudinit import util @@ -50,6 +51,7 @@ hostname_conf_fn = "/etc/hostname" tz_zone_dir = "/usr/share/zoneinfo" init_cmd = ['service'] # systemctl, service etc + renderer_configs = {} def __init__(self, name, cfg, paths): self._paths = paths @@ -69,6 +71,17 @@ def _write_network_config(self, settings): raise NotImplementedError() + def _supported_write_network_config(self, network_config): + priority = util.get_cfg_by_path( + self._cfg, ('network', 'renderers'), None) + + name, render_cls = renderers.select(priority=priority) + LOG.debug("Selected renderer '%s' from priority list: %s", + name, priority) + renderer = render_cls(config=self.renderer_configs.get(name)) + renderer.render_network_config(network_config=network_config) + return [] + def _find_tz_file(self, tz): tz_file = os.path.join(self.tz_zone_dir, str(tz)) if not os.path.isfile(tz_file): @@ -130,9 +143,9 @@ def _apply_network_from_network_config(self, netconfig, bring_up=True): distro = self.__class__ - LOG.warn("apply_network_config is not currently implemented " - "for distribution '%s'. Attempting to use apply_network", - distro) + LOG.warning("apply_network_config is not currently implemented " + "for distribution '%s'. Attempting to use apply_network", + distro) header = '\n'.join([ "# Converted from network_config for distro %s" % distro, "# Implmentation of _write_network_config is needed." @@ -142,6 +155,9 @@ ns, header=header, render_hwaddress=True) return self.apply_network(contents, bring_up=bring_up) + def generate_fallback_config(self): + return net.generate_fallback_config() + def apply_network_config(self, netconfig, bring_up=False): # apply network config netconfig # This method is preferred to apply_network which only takes @@ -322,7 +338,8 @@ try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) return True except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -345,7 +362,7 @@ Add a user to the system using standard GNU tools """ if util.is_user(name): - LOG.info("User %s already exists, skipping." % name) + LOG.info("User %s already exists, skipping.", name) return if 'create_groups' in kwargs: @@ -507,9 +524,9 @@ keys = list(keys.values()) if keys is not None: if not isinstance(keys, (tuple, list, set)): - LOG.warn("Invalid type '%s' detected for" - " 'ssh_authorized_keys', expected list," - " string, dict, or set.", type(keys)) + LOG.warning("Invalid type '%s' detected for" + " 'ssh_authorized_keys', expected list," + " string, dict, or set.", type(keys)) else: keys = set(keys) or [] ssh_util.setup_user_keys(keys, name, options=None) @@ -582,7 +599,7 @@ "#includedir %s" % (path), ''] sudoers_contents = "\n".join(lines) util.append_file(sudo_base, sudoers_contents) - LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base)) + LOG.debug("Added '#includedir %s' to %s", path, sudo_base) except IOError as e: util.logexc(LOG, "Failed to write %s", sudo_base) raise e @@ -634,11 +651,11 @@ # Check if group exists, and then add it doesn't if util.is_group(name): - LOG.warn("Skipping creation of existing group '%s'" % name) + LOG.warning("Skipping creation of existing group '%s'", name) else: try: util.subp(group_add_cmd) - LOG.info("Created new group %s" % name) + LOG.info("Created new group %s", name) except Exception: util.logexc(LOG, "Failed to create group %s", name) @@ -646,12 +663,12 @@ if len(members) > 0: for member in members: if not util.is_user(member): - LOG.warn("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) + LOG.warning("Unable to add group member '%s' to group '%s'" + "; user does not exist.", member, name) continue util.subp(['usermod', '-a', '-G', name, member]) - LOG.info("Added user '%s' to group '%s'" % (member, name)) + LOG.info("Added user '%s' to group '%s'", member, name) def _get_package_mirror_info(mirror_info, data_source=None, @@ -695,7 +712,7 @@ if found: results[name] = found - LOG.debug("filtered distro mirror info: %s" % results) + LOG.debug("filtered distro mirror info: %s", results) return results diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/parsers/hosts.py cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/parsers/hosts.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/parsers/hosts.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/parsers/hosts.py 2017-05-26 18:36:38.000000000 +0000 @@ -10,8 +10,8 @@ # See: man hosts -# or http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts -# or http://tinyurl.com/6lmox3 +# or https://linux.die.net/man/5/hosts +# or https://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/configtuning-configfiles.html # noqa class HostsConf(object): def __init__(self, text): self._text = text diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/parsers/resolv_conf.py cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/parsers/resolv_conf.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/parsers/resolv_conf.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/parsers/resolv_conf.py 2017-05-26 18:36:38.000000000 +0000 @@ -6,9 +6,11 @@ from six import StringIO +from cloudinit.distros.parsers import chop_comment +from cloudinit import log as logging from cloudinit import util -from cloudinit.distros.parsers import chop_comment +LOG = logging.getLogger(__name__) # See: man resolv.conf @@ -79,9 +81,10 @@ if len(new_ns) == len(current_ns): return current_ns if len(current_ns) >= 3: - # Hard restriction on only 3 name servers - raise ValueError(("Adding %r would go beyond the " - "'3' maximum name servers") % (ns)) + LOG.warning("ignoring nameserver %r: adding would " + "exceed the maximum of " + "'3' name servers (see resolv.conf(5))", ns) + return current_ns[:3] self._remove_option('nameserver') for n in new_ns: self._contents.append(('option', ['nameserver', n, ''])) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/rhel.py cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/rhel.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/rhel.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/rhel.py 2017-05-26 18:36:38.000000000 +0000 @@ -11,8 +11,6 @@ from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging -from cloudinit.net.network_state import parse_net_config_data -from cloudinit.net import sysconfig from cloudinit import util from cloudinit.distros import net_util @@ -30,7 +28,7 @@ class Distro(distros.Distro): - # See: http://tiny.cc/6r99fw + # See: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Networking_Guide/sec-Network_Configuration_Using_sysconfig_Files.html # noqa clock_conf_fn = "/etc/sysconfig/clock" locale_conf_fn = '/etc/sysconfig/i18n' systemd_locale_conf_fn = '/etc/locale.conf' @@ -49,16 +47,13 @@ # should only happen say once per instance...) self._runner = helpers.Runners(paths) self.osfamily = 'redhat' - self._net_renderer = sysconfig.Renderer() cfg['ssh_svcname'] = 'sshd' def install_packages(self, pkglist): self.package_command('install', pkgs=pkglist) def _write_network_config(self, netconfig): - ns = parse_net_config_data(netconfig) - self._net_renderer.render_network_state("/", ns) - return [] + return self._supported_write_network_config(netconfig) def _write_network(self, settings): # TODO(harlowja) fix this... since this is the ubuntu format @@ -135,8 +130,8 @@ rhel_util.update_sysconfig_file(out_fn, host_cfg) def _select_hostname(self, hostname, fqdn): - # See: http://bit.ly/TwitgL # Should be fqdn if we can use it + # See: https://www.centos.org/docs/5/html/Deployment_Guide-en-US/ch-sysconfig.html#s2-sysconfig-network # noqa if fqdn: return fqdn return hostname diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/ug_util.py cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/ug_util.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/distros/ug_util.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/distros/ug_util.py 2017-05-26 18:36:38.000000000 +0000 @@ -214,8 +214,8 @@ 'name': old_user, } if not isinstance(old_user, dict): - LOG.warn(("Format for 'user' key must be a string or " - "dictionary and not %s"), type_utils.obj_name(old_user)) + LOG.warning(("Format for 'user' key must be a string or dictionary" + " and not %s"), type_utils.obj_name(old_user)) old_user = {} # If no old user format, then assume the distro @@ -227,9 +227,9 @@ try: distro_user_config = distro.get_default_user() except NotImplementedError: - LOG.warn(("Distro has not implemented default user " - "access. No distribution provided default user" - " will be normalized.")) + LOG.warning(("Distro has not implemented default user " + "access. No distribution provided default user" + " will be normalized.")) # Merge the old user (which may just be an empty dict when not # present with the distro provided default user configuration so @@ -239,9 +239,9 @@ base_users = cfg.get('users', []) if not isinstance(base_users, (list, dict) + six.string_types): - LOG.warn(("Format for 'users' key must be a comma separated string" - " or a dictionary or a list and not %s"), - type_utils.obj_name(base_users)) + LOG.warning(("Format for 'users' key must be a comma separated string" + " or a dictionary or a list and not %s"), + type_utils.obj_name(base_users)) base_users = [] if old_user: diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/ec2_utils.py cloud-init-0.7.9-153-g16a7302f/cloudinit/ec2_utils.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/ec2_utils.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/ec2_utils.py 2017-05-26 18:36:38.000000000 +0000 @@ -38,8 +38,8 @@ # Assume it's json, unless it fails parsing... return json.loads(blob) except (ValueError, TypeError) as e: - LOG.warn("Field %s looked like a json object, but it was" - " not: %s", field, e) + LOG.warning("Field %s looked like a json object, but it" + " was not: %s", field, e) if blob.find("\n") != -1: return blob.splitlines() return blob @@ -125,7 +125,8 @@ joined.update(child_contents) for field in leaf_contents.keys(): if field in joined: - LOG.warn("Duplicate key found in results from %s", base_url) + LOG.warning("Duplicate key found in results from %s", + base_url) else: joined[field] = leaf_contents[field] return joined diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/gpg.py cloud-init-0.7.9-153-g16a7302f/cloudinit/gpg.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/gpg.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/gpg.py 2017-05-26 18:36:38.000000000 +0000 @@ -43,7 +43,7 @@ util.subp(["gpg", "--batch", "--yes", "--delete-keys", key], capture=True) except util.ProcessExecutionError as error: - LOG.warn('Failed delete key "%s": %s', key, error) + LOG.warning('Failed delete key "%s": %s', key, error) def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'): diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/handlers/__init__.py cloud-init-0.7.9-153-g16a7302f/cloudinit/handlers/__init__.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/handlers/__init__.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/handlers/__init__.py 2017-05-26 18:36:38.000000000 +0000 @@ -246,7 +246,7 @@ else: freq = mod.frequency if freq and freq not in FREQUENCIES: - LOG.warn("Handler %s has an unknown frequency %s", mod, freq) + LOG.warning("Handler %s has an unknown frequency %s", mod, freq) return mod diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/helpers.py cloud-init-0.7.9-153-g16a7302f/cloudinit/helpers.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/helpers.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/helpers.py 2017-05-26 18:36:38.000000000 +0000 @@ -126,11 +126,11 @@ # this case could happen if the migrator module hadn't run yet # but the item had run before we did canon_sem_name. if cname != name and os.path.exists(self._get_path(name, freq)): - LOG.warn("%s has run without canonicalized name [%s].\n" - "likely the migrator has not yet run. " - "It will run next boot.\n" - "run manually with: cloud-init single --name=migrator" - % (name, cname)) + LOG.warning("%s has run without canonicalized name [%s].\n" + "likely the migrator has not yet run. " + "It will run next boot.\n" + "run manually with: cloud-init single --name=migrator", + name, cname) return True return False @@ -375,8 +375,8 @@ def get_ipath(self, name=None): ipath = self._get_ipath(name) if not ipath: - LOG.warn(("No per instance data available, " - "is there an datasource/iid set?")) + LOG.warning(("No per instance data available, " + "is there an datasource/iid set?")) return None else: return ipath diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/net/cmdline.py cloud-init-0.7.9-153-g16a7302f/cloudinit/net/cmdline.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/net/cmdline.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/net/cmdline.py 2017-05-26 18:36:38.000000000 +0000 @@ -9,41 +9,12 @@ import glob import gzip import io -import shlex -import sys - -import six from . import get_devicelist from . import read_sys_net_safe from cloudinit import util -PY26 = sys.version_info[0:2] == (2, 6) - - -def _shlex_split(blob): - if PY26 and isinstance(blob, six.text_type): - # Older versions don't support unicode input - blob = blob.encode("utf8") - return shlex.split(blob) - - -def _load_shell_content(content, add_empty=False, empty_val=None): - """Given shell like syntax (key=value\nkey2=value2\n) in content - return the data in dictionary form. If 'add_empty' is True - then add entries in to the returned dictionary for 'VAR=' - variables. Set their value to empty_val.""" - data = {} - for line in _shlex_split(content): - key, value = line.split("=", 1) - if not value: - value = empty_val - if add_empty or value: - data[key] = value - - return data - def _klibc_to_config_entry(content, mac_addrs=None): """Convert a klibc written shell content file to a 'config' entry @@ -63,7 +34,7 @@ if mac_addrs is None: mac_addrs = {} - data = _load_shell_content(content) + data = util.load_shell_content(content) try: name = data['DEVICE'] if 'DEVICE' in data else data['DEVICE6'] except KeyError: @@ -100,6 +71,11 @@ cur_proto = data.get(pre + 'PROTO', proto) subnet = {'type': cur_proto, 'control': 'manual'} + # only populate address for static types. While the rendered config + # may have an address for dhcp, that is not really expected. + if cur_proto == 'static': + subnet['address'] = data[pre + 'ADDR'] + # these fields go right on the subnet for key in ('NETMASK', 'BROADCAST', 'GATEWAY'): if pre + key in data: diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/net/eni.py cloud-init-0.7.9-153-g16a7302f/cloudinit/net/eni.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/net/eni.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/net/eni.py 2017-05-26 18:36:38.000000000 +0000 @@ -8,6 +8,7 @@ from . import ParserError from . import renderer +from .network_state import subnet_is_ipv6 from cloudinit import util @@ -111,16 +112,6 @@ return lines -def _subnet_is_ipv6(subnet): - # 'static6' or 'dhcp6' - if subnet['type'].endswith('6'): - # This is a request for DHCPv6. - return True - elif subnet['type'] == 'static' and ":" in subnet['address']: - return True - return False - - def _parse_deb_config_data(ifaces, contents, src_dir, src_path): """Parses the file contents, placing result into ifaces. @@ -273,8 +264,11 @@ # devname is 'eth0' for name='eth0:1' devname = name.partition(":")[0] if devname not in devs: - devs[devname] = {'type': 'physical', 'name': devname, - 'subnets': []} + if devname == "lo": + dtype = "loopback" + else: + dtype = "physical" + devs[devname] = {'type': dtype, 'name': devname, 'subnets': []} # this isnt strictly correct, but some might specify # hwaddress on a nic for matching / declaring name. if 'hwaddress' in data: @@ -367,7 +361,7 @@ iface['mode'] = subnet['type'] iface['control'] = subnet.get('control', 'auto') subnet_inet = 'inet' - if _subnet_is_ipv6(subnet): + if subnet_is_ipv6(subnet): subnet_inet += '6' iface['inet'] = subnet_inet if subnet['type'].startswith('dhcp'): @@ -423,10 +417,11 @@ bonding ''' order = { - 'physical': 0, - 'bond': 1, - 'bridge': 2, - 'vlan': 3, + 'loopback': 0, + 'physical': 1, + 'bond': 2, + 'bridge': 3, + 'vlan': 4, } sections = [] @@ -444,14 +439,14 @@ return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n" - def render_network_state(self, target, network_state): - fpeni = os.path.join(target, self.eni_path) + def render_network_state(self, network_state, target=None): + fpeni = util.target_path(target, self.eni_path) util.ensure_dir(os.path.dirname(fpeni)) header = self.eni_header if self.eni_header else "" util.write_file(fpeni, header + self._render_interfaces(network_state)) if self.netrules_path: - netrules = os.path.join(target, self.netrules_path) + netrules = util.target_path(target, self.netrules_path) util.ensure_dir(os.path.dirname(netrules)) util.write_file(netrules, self._render_persistent_net(network_state)) @@ -461,7 +456,7 @@ links_prefix=self.links_path_prefix) def _render_systemd_links(self, target, network_state, links_prefix): - fp_prefix = os.path.join(target, links_prefix) + fp_prefix = util.target_path(target, links_prefix) for f in glob.glob(fp_prefix + "*"): os.unlink(f) for iface in network_state.iter_interfaces(): @@ -482,7 +477,7 @@ def network_state_to_eni(network_state, header=None, render_hwaddress=False): # render the provided network state, return a string of equivalent eni eni_path = 'etc/network/interfaces' - renderer = Renderer({ + renderer = Renderer(config={ 'eni_path': eni_path, 'eni_header': header, 'links_path_prefix': None, @@ -496,4 +491,18 @@ network_state, render_hwaddress=render_hwaddress) return header + contents + +def available(target=None): + expected = ['ifquery', 'ifup', 'ifdown'] + search = ['/sbin', '/usr/sbin'] + for p in expected: + if not util.which(p, search=search, target=target): + return False + eni = util.target_path(target, 'etc/network/interfaces') + if not os.path.isfile(eni): + return False + + return True + + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/net/__init__.py cloud-init-0.7.9-153-g16a7302f/cloudinit/net/__init__.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/net/__init__.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/net/__init__.py 2017-05-26 18:36:38.000000000 +0000 @@ -82,6 +82,15 @@ return os.path.exists(sys_dev_path(devname, "wireless")) +def is_bridge(devname): + return os.path.exists(sys_dev_path(devname, "bridge")) + + +def is_vlan(devname): + uevent = str(read_sys_net_safe(devname, "uevent")) + return 'DEVTYPE=vlan' in uevent.splitlines() + + def is_connected(devname): # is_connected isn't really as simple as that. 2 is # 'physically connected'. 3 is 'not connected'. but a wlan interface will @@ -132,7 +141,7 @@ for interface in potential_interfaces: if interface.startswith("veth"): continue - if os.path.exists(sys_dev_path(interface, "bridge")): + if is_bridge(interface): # skip any bridges continue carrier = read_sys_net_int(interface, 'carrier') @@ -187,7 +196,11 @@ """read the network config and rename devices accordingly. if strict_present is false, then do not raise exception if no devices match. if strict_busy is false, then do not raise exception if the - device cannot be renamed because it is currently configured.""" + device cannot be renamed because it is currently configured. + + renames are only attempted for interfaces of type 'physical'. It is + expected that the network system will create other devices with the + correct name in place.""" renames = [] for ent in netcfg.get('config', {}): if ent.get('type') != 'physical': @@ -201,13 +214,35 @@ return _rename_interfaces(renames) +def interface_has_own_mac(ifname, strict=False): + """return True if the provided interface has its own address. + + Based on addr_assign_type in /sys. Return true for any interface + that does not have a 'stolen' address. Examples of such devices + are bonds or vlans that inherit their mac from another device. + Possible values are: + 0: permanent address 2: stolen from another device + 1: randomly generated 3: set using dev_set_mac_address""" + + assign_type = read_sys_net_int(ifname, "addr_assign_type") + if strict and assign_type is None: + raise ValueError("%s had no addr_assign_type.") + return assign_type in (0, 1, 3) + + def _get_current_rename_info(check_downable=True): - """Collect information necessary for rename_interfaces.""" - names = get_devicelist() + """Collect information necessary for rename_interfaces. + + returns a dictionary by mac address like: + {mac: + {'name': name + 'up': boolean: is_up(name), + 'downable': None or boolean indicating that the + device has only automatically assigned ip addrs.}} + """ bymac = {} - for n in names: - bymac[get_interface_mac(n)] = { - 'name': n, 'up': is_up(n), 'downable': None} + for mac, name in get_interfaces_by_mac().items(): + bymac[mac] = {'name': name, 'up': is_up(name), 'downable': None} if check_downable: nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]") @@ -346,22 +381,42 @@ return read_sys_net_safe(ifname, path) -def get_interfaces_by_mac(devs=None): - """Build a dictionary of tuples {mac: name}""" - if devs is None: - try: - devs = get_devicelist() - except OSError as e: - if e.errno == errno.ENOENT: - devs = [] - else: - raise +def get_interfaces_by_mac(): + """Build a dictionary of tuples {mac: name}. + + Bridges and any devices that have a 'stolen' mac are excluded.""" + try: + devs = get_devicelist() + except OSError as e: + if e.errno == errno.ENOENT: + devs = [] + else: + raise ret = {} + empty_mac = '00:00:00:00:00:00' for name in devs: + if not interface_has_own_mac(name): + continue + if is_bridge(name): + continue + if is_vlan(name): + continue mac = get_interface_mac(name) # some devices may not have a mac (tun0) - if mac: - ret[mac] = name + if not mac: + continue + if mac == empty_mac and name != 'lo': + continue + if mac in ret: + raise RuntimeError( + "duplicate mac found! both '%s' and '%s' have mac '%s'" % + (name, ret[mac], mac)) + ret[mac] = name return ret + +class RendererNotFoundError(RuntimeError): + pass + + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/net/netplan.py cloud-init-0.7.9-153-g16a7302f/cloudinit/net/netplan.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/net/netplan.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/net/netplan.py 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,416 @@ +# This file is part of cloud-init. See LICENSE file ... + +import copy +import os + +from . import renderer +from .network_state import mask2cidr, subnet_is_ipv6 + +from cloudinit import log as logging +from cloudinit import util +from cloudinit.net import SYS_CLASS_NET, get_devicelist + +KNOWN_SNAPD_CONFIG = b"""\ +# This is the initial network config. +# It can be overwritten by cloud-init or console-conf. +network: + version: 2 + ethernets: + all-en: + match: + name: "en*" + dhcp4: true + all-eth: + match: + name: "eth*" + dhcp4: true +""" + +LOG = logging.getLogger(__name__) +NET_CONFIG_TO_V2 = { + 'bond': {'bond-ad-select': 'ad-select', + 'bond-arp-interval': 'arp-interval', + 'bond-arp-ip-target': 'arp-ip-target', + 'bond-arp-validate': 'arp-validate', + 'bond-downdelay': 'down-delay', + 'bond-fail-over-mac': 'fail-over-mac-policy', + 'bond-lacp-rate': 'lacp-rate', + 'bond-miimon': 'mii-monitor-interval', + 'bond-min-links': 'min-links', + 'bond-mode': 'mode', + 'bond-num-grat-arp': 'gratuitious-arp', + 'bond-primary-reselect': 'primary-reselect-policy', + 'bond-updelay': 'up-delay', + 'bond-xmit-hash-policy': 'transmit-hash-policy'}, + 'bridge': {'bridge_ageing': 'ageing-time', + 'bridge_bridgeprio': 'priority', + 'bridge_fd': 'forward-delay', + 'bridge_gcint': None, + 'bridge_hello': 'hello-time', + 'bridge_maxage': 'max-age', + 'bridge_maxwait': None, + 'bridge_pathcost': 'path-cost', + 'bridge_portprio': None, + 'bridge_waitport': None}} + + +def _get_params_dict_by_match(config, match): + return dict((key, value) for (key, value) in config.items() + if key.startswith(match)) + + +def _extract_addresses(config, entry): + """This method parse a cloudinit.net.network_state dictionary (config) and + maps netstate keys/values into a dictionary (entry) to represent + netplan yaml. + + An example config dictionary might look like: + + {'mac_address': '52:54:00:12:34:00', + 'name': 'interface0', + 'subnets': [ + {'address': '192.168.1.2/24', + 'mtu': 1501, + 'type': 'static'}, + {'address': '2001:4800:78ff:1b:be76:4eff:fe06:1000", + 'mtu': 1480, + 'netmask': 64, + 'type': 'static'}], + 'type: physical' + } + + An entry dictionary looks like: + + {'set-name': 'interface0', + 'match': {'macaddress': '52:54:00:12:34:00'}, + 'mtu': 1501} + + After modification returns + + {'set-name': 'interface0', + 'match': {'macaddress': '52:54:00:12:34:00'}, + 'mtu': 1501, + 'address': ['192.168.1.2/24', '2001:4800:78ff:1b:be76:4eff:fe06:1000"], + 'mtu6': 1480} + + """ + + def _listify(obj, token=' '): + "Helper to convert strings to list of strings, handle single string" + if not obj or type(obj) not in [str]: + return obj + if token in obj: + return obj.split(token) + else: + return [obj, ] + + addresses = [] + routes = [] + nameservers = [] + searchdomains = [] + subnets = config.get('subnets', []) + if subnets is None: + subnets = [] + for subnet in subnets: + sn_type = subnet.get('type') + if sn_type.startswith('dhcp'): + if sn_type == 'dhcp': + sn_type += '4' + entry.update({sn_type: True}) + elif sn_type in ['static']: + addr = '%s' % subnet.get('address') + netmask = subnet.get('netmask') + if netmask and '/' not in addr: + addr += '/%s' % mask2cidr(netmask) + if 'gateway' in subnet and subnet.get('gateway'): + gateway = subnet.get('gateway') + if ":" in gateway: + entry.update({'gateway6': gateway}) + else: + entry.update({'gateway4': gateway}) + if 'dns_nameservers' in subnet: + nameservers += _listify(subnet.get('dns_nameservers', [])) + if 'dns_search' in subnet: + searchdomains += _listify(subnet.get('dns_search', [])) + if 'mtu' in subnet: + mtukey = 'mtu' + if subnet_is_ipv6(subnet): + mtukey += '6' + entry.update({mtukey: subnet.get('mtu')}) + for route in subnet.get('routes', []): + network = route.get('network') + netmask = route.get('netmask') + to_net = '%s/%s' % (network, mask2cidr(netmask)) + route = { + 'via': route.get('gateway'), + 'to': to_net, + } + if 'metric' in route: + route.update({'metric': route.get('metric', 100)}) + routes.append(route) + + addresses.append(addr) + + if len(addresses) > 0: + entry.update({'addresses': addresses}) + if len(routes) > 0: + entry.update({'routes': routes}) + if len(nameservers) > 0: + ns = {'addresses': nameservers} + entry.update({'nameservers': ns}) + if len(searchdomains) > 0: + ns = entry.get('nameservers', {}) + ns.update({'search': searchdomains}) + entry.update({'nameservers': ns}) + + +def _extract_bond_slaves_by_name(interfaces, entry, bond_master): + bond_slave_names = sorted([name for (name, cfg) in interfaces.items() + if cfg.get('bond-master', None) == bond_master]) + if len(bond_slave_names) > 0: + entry.update({'interfaces': bond_slave_names}) + + +def _clean_default(target=None): + # clean out any known default files and derived files in target + # LP: #1675576 + tpath = util.target_path(target, "etc/netplan/00-snapd-config.yaml") + if not os.path.isfile(tpath): + return + content = util.load_file(tpath, decode=False) + if content != KNOWN_SNAPD_CONFIG: + return + + derived = [util.target_path(target, f) for f in ( + 'run/systemd/network/10-netplan-all-en.network', + 'run/systemd/network/10-netplan-all-eth.network', + 'run/systemd/generator/netplan.stamp')] + existing = [f for f in derived if os.path.isfile(f)] + LOG.debug("removing known config '%s' and derived existing files: %s", + tpath, existing) + + for f in [tpath] + existing: + os.unlink(f) + + +class Renderer(renderer.Renderer): + """Renders network information in a /etc/netplan/network.yaml format.""" + + NETPLAN_GENERATE = ['netplan', 'generate'] + + def __init__(self, config=None): + if not config: + config = {} + self.netplan_path = config.get('netplan_path', + 'etc/netplan/50-cloud-init.yaml') + self.netplan_header = config.get('netplan_header', None) + self._postcmds = config.get('postcmds', False) + self.clean_default = config.get('clean_default', True) + + def render_network_state(self, network_state, target): + # check network state for version + # if v2, then extract network_state.config + # else render_v2_from_state + fpnplan = os.path.join(target, self.netplan_path) + util.ensure_dir(os.path.dirname(fpnplan)) + header = self.netplan_header if self.netplan_header else "" + + # render from state + content = self._render_content(network_state) + + if not header.endswith("\n"): + header += "\n" + util.write_file(fpnplan, header + content) + + if self.clean_default: + _clean_default(target=target) + self._netplan_generate(run=self._postcmds) + self._net_setup_link(run=self._postcmds) + + def _netplan_generate(self, run=False): + if not run: + LOG.debug("netplan generate postcmd disabled") + return + util.subp(self.NETPLAN_GENERATE, capture=True) + + def _net_setup_link(self, run=False): + """To ensure device link properties are applied, we poke + udev to re-evaluate networkd .link files and call + the setup_link udev builtin command + """ + if not run: + LOG.debug("netplan net_setup_link postcmd disabled") + return + setup_lnk = ['udevadm', 'test-builtin', 'net_setup_link'] + for cmd in [setup_lnk + [SYS_CLASS_NET + iface] + for iface in get_devicelist() if + os.path.islink(SYS_CLASS_NET + iface)]: + util.subp(cmd, capture=True) + + def _render_content(self, network_state): + ethernets = {} + wifis = {} + bridges = {} + bonds = {} + vlans = {} + content = [] + + interfaces = network_state._network_state.get('interfaces', []) + + nameservers = network_state.dns_nameservers + searchdomains = network_state.dns_searchdomains + + for config in network_state.iter_interfaces(): + ifname = config.get('name') + # filter None entries up front so we can do simple if key in dict + ifcfg = dict((key, value) for (key, value) in config.items() + if value) + + if_type = ifcfg.get('type') + if if_type == 'physical': + # required_keys = ['name', 'mac_address'] + eth = { + 'set-name': ifname, + 'match': ifcfg.get('match', None), + } + if eth['match'] is None: + macaddr = ifcfg.get('mac_address', None) + if macaddr is not None: + eth['match'] = {'macaddress': macaddr.lower()} + else: + del eth['match'] + del eth['set-name'] + if 'mtu' in ifcfg: + eth['mtu'] = ifcfg.get('mtu') + + _extract_addresses(ifcfg, eth) + ethernets.update({ifname: eth}) + + elif if_type == 'bond': + # required_keys = ['name', 'bond_interfaces'] + bond = {} + bond_config = {} + # extract bond params and drop the bond_ prefix as it's + # redundent in v2 yaml format + v2_bond_map = NET_CONFIG_TO_V2.get('bond') + for match in ['bond_', 'bond-']: + bond_params = _get_params_dict_by_match(ifcfg, match) + for (param, value) in bond_params.items(): + newname = v2_bond_map.get(param.replace('_', '-')) + if newname is None: + continue + bond_config.update({newname: value}) + + if len(bond_config) > 0: + bond.update({'parameters': bond_config}) + slave_interfaces = ifcfg.get('bond-slaves') + if slave_interfaces == 'none': + _extract_bond_slaves_by_name(interfaces, bond, ifname) + _extract_addresses(ifcfg, bond) + bonds.update({ifname: bond}) + + elif if_type == 'bridge': + # required_keys = ['name', 'bridge_ports'] + ports = sorted(copy.copy(ifcfg.get('bridge_ports'))) + bridge = { + 'interfaces': ports, + } + # extract bridge params and drop the bridge prefix as it's + # redundent in v2 yaml format + match_prefix = 'bridge_' + params = _get_params_dict_by_match(ifcfg, match_prefix) + br_config = {} + + # v2 yaml uses different names for the keys + # and at least one value format change + v2_bridge_map = NET_CONFIG_TO_V2.get('bridge') + for (param, value) in params.items(): + newname = v2_bridge_map.get(param) + if newname is None: + continue + br_config.update({newname: value}) + if newname == 'path-cost': + # -> : int() + newvalue = {} + for costval in value: + (port, cost) = costval.split() + newvalue[port] = int(cost) + br_config.update({newname: newvalue}) + if len(br_config) > 0: + bridge.update({'parameters': br_config}) + _extract_addresses(ifcfg, bridge) + bridges.update({ifname: bridge}) + + elif if_type == 'vlan': + # required_keys = ['name', 'vlan_id', 'vlan-raw-device'] + vlan = { + 'id': ifcfg.get('vlan_id'), + 'link': ifcfg.get('vlan-raw-device') + } + macaddr = ifcfg.get('mac_address', None) + if macaddr is not None: + vlan['macaddress'] = macaddr.lower() + _extract_addresses(ifcfg, vlan) + vlans.update({ifname: vlan}) + + # inject global nameserver values under each physical interface + if nameservers: + for _eth, cfg in ethernets.items(): + nscfg = cfg.get('nameservers', {}) + addresses = nscfg.get('addresses', []) + addresses += nameservers + nscfg.update({'addresses': addresses}) + cfg.update({'nameservers': nscfg}) + + if searchdomains: + for _eth, cfg in ethernets.items(): + nscfg = cfg.get('nameservers', {}) + search = nscfg.get('search', []) + search += searchdomains + nscfg.update({'search': search}) + cfg.update({'nameservers': nscfg}) + + # workaround yaml dictionary key sorting when dumping + def _render_section(name, section): + if section: + dump = util.yaml_dumps({name: section}, + explicit_start=False, + explicit_end=False) + txt = util.indent(dump, ' ' * 4) + return [txt] + return [] + + content.append("network:\n version: 2\n") + content += _render_section('ethernets', ethernets) + content += _render_section('wifis', wifis) + content += _render_section('bonds', bonds) + content += _render_section('bridges', bridges) + content += _render_section('vlans', vlans) + + return "".join(content) + + +def available(target=None): + expected = ['netplan'] + search = ['/usr/sbin', '/sbin'] + for p in expected: + if not util.which(p, search=search, target=target): + return False + return True + + +def network_state_to_netplan(network_state, header=None): + # render the provided network state, return a string of equivalent eni + netplan_path = 'etc/network/50-cloud-init.yaml' + renderer = Renderer({ + 'netplan_path': netplan_path, + 'netplan_header': header, + }) + if not header: + header = "" + if not header.endswith("\n"): + header += "\n" + contents = renderer._render_content(network_state) + return header + contents + +# vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/net/network_state.py cloud-init-0.7.9-153-g16a7302f/cloudinit/net/network_state.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/net/network_state.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/net/network_state.py 2017-05-26 18:36:38.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright (C) 2013-2014 Canonical Ltd. +# Copyright (C) 2017 Canonical Ltd. # # Author: Ryan Harper # @@ -18,6 +18,10 @@ NETWORK_STATE_REQUIRED_KEYS = { 1: ['version', 'config', 'network_state'], } +NETWORK_V2_KEY_FILTER = [ + 'addresses', 'dhcp4', 'dhcp6', 'gateway4', 'gateway6', 'interfaces', + 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan' +] def parse_net_config_data(net_config, skip_broken=True): @@ -26,11 +30,18 @@ :param net_config: curtin network config dict """ state = None - if 'version' in net_config and 'config' in net_config: - nsi = NetworkStateInterpreter(version=net_config.get('version'), - config=net_config.get('config')) + version = net_config.get('version') + config = net_config.get('config') + if version == 2: + # v2 does not have explicit 'config' key so we + # pass the whole net-config as-is + config = net_config + + if version and config: + nsi = NetworkStateInterpreter(version=version, config=config) nsi.parse_config(skip_broken=skip_broken) - state = nsi.network_state + state = nsi.get_network_state() + return state @@ -106,6 +117,7 @@ def __init__(self, network_state, version=NETWORK_STATE_VERSION): self._network_state = copy.deepcopy(network_state) self._version = version + self.use_ipv6 = network_state.get('use_ipv6', False) @property def version(self): @@ -152,7 +164,8 @@ 'dns': { 'nameservers': [], 'search': [], - } + }, + 'use_ipv6': False, } def __init__(self, version=NETWORK_STATE_VERSION, config=None): @@ -165,6 +178,14 @@ def network_state(self): return NetworkState(self._network_state, version=self._version) + @property + def use_ipv6(self): + return self._network_state.get('use_ipv6') + + @use_ipv6.setter + def use_ipv6(self, val): + self._network_state.update({'use_ipv6': val}) + def dump(self): state = { 'version': self._version, @@ -192,8 +213,22 @@ def dump_network_state(self): return util.yaml_dumps(self._network_state) + def as_dict(self): + return {'version': self._version, 'config': self._config} + + def get_network_state(self): + ns = self.network_state + return ns + def parse_config(self, skip_broken=True): - # rebuild network state + if self._version == 1: + self.parse_config_v1(skip_broken=skip_broken) + self._parsed = True + elif self._version == 2: + self.parse_config_v2(skip_broken=skip_broken) + self._parsed = True + + def parse_config_v1(self, skip_broken=True): for command in self._config: command_type = command['type'] try: @@ -207,11 +242,35 @@ if not skip_broken: raise else: - LOG.warn("Skipping invalid command: %s", command, - exc_info=True) + LOG.warning("Skipping invalid command: %s", command, + exc_info=True) + LOG.debug(self.dump_network_state()) + + def parse_config_v2(self, skip_broken=True): + for command_type, command in self._config.items(): + if command_type == 'version': + continue + try: + handler = self.command_handlers[command_type] + except KeyError: + raise RuntimeError("No handler found for" + " command '%s'" % command_type) + try: + handler(self, command) + self._v2_common(command) + except InvalidCommand: + if not skip_broken: + raise + else: + LOG.warning("Skipping invalid command: %s", command, + exc_info=True) LOG.debug(self.dump_network_state()) @ensure_command_keys(['name']) + def handle_loopback(self, command): + return self.handle_physical(command) + + @ensure_command_keys(['name']) def handle_physical(self, command): ''' command = { @@ -234,11 +293,16 @@ if subnets: for subnet in subnets: if subnet['type'] == 'static': + if ':' in subnet['address']: + self.use_ipv6 = True if 'netmask' in subnet and ':' in subnet['address']: subnet['netmask'] = mask2cidr(subnet['netmask']) for route in subnet.get('routes', []): if 'netmask' in route: route['netmask'] = mask2cidr(route['netmask']) + elif subnet['type'].endswith('6'): + self.use_ipv6 = True + iface.update({ 'name': command.get('name'), 'type': command.get('type'), @@ -323,7 +387,7 @@ bond_if.update({param: val}) self._network_state['interfaces'].update({ifname: bond_if}) - @ensure_command_keys(['name', 'bridge_interfaces', 'params']) + @ensure_command_keys(['name', 'bridge_interfaces']) def handle_bridge(self, command): ''' auto br0 @@ -369,7 +433,7 @@ self.handle_physical(command) iface = interfaces.get(command.get('name'), {}) iface['bridge_ports'] = command['bridge_interfaces'] - for param, val in command.get('params').items(): + for param, val in command.get('params', {}).items(): iface.update({param: val}) interfaces.update({iface['name']: iface}) @@ -403,6 +467,241 @@ } routes.append(route) + # V2 handlers + def handle_bonds(self, command): + ''' + v2_command = { + bond0: { + 'interfaces': ['interface0', 'interface1'], + 'miimon': 100, + 'mode': '802.3ad', + 'xmit_hash_policy': 'layer3+4'}, + bond1: { + 'bond-slaves': ['interface2', 'interface7'], + 'mode': 1 + } + } + + v1_command = { + 'type': 'bond' + 'name': 'bond0', + 'bond_interfaces': [interface0, interface1], + 'params': { + 'bond-mode': '802.3ad', + 'bond_miimon: 100, + 'bond_xmit_hash_policy': 'layer3+4', + } + } + + ''' + self._handle_bond_bridge(command, cmd_type='bond') + + def handle_bridges(self, command): + + ''' + v2_command = { + br0: { + 'interfaces': ['interface0', 'interface1'], + 'fd': 0, + 'stp': 'off', + 'maxwait': 0, + } + } + + v1_command = { + 'type': 'bridge' + 'name': 'br0', + 'bridge_interfaces': [interface0, interface1], + 'params': { + 'bridge_stp': 'off', + 'bridge_fd: 0, + 'bridge_maxwait': 0 + } + } + + ''' + self._handle_bond_bridge(command, cmd_type='bridge') + + def handle_ethernets(self, command): + ''' + ethernets: + eno1: + match: + macaddress: 00:11:22:33:44:55 + wakeonlan: true + dhcp4: true + dhcp6: false + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + gateway4: 192.168.14.1 + gateway6: 2001:1::2 + nameservers: + search: [foo.local, bar.local] + addresses: [8.8.8.8, 8.8.4.4] + lom: + match: + driver: ixgbe + set-name: lom1 + dhcp6: true + switchports: + match: + name: enp2* + mtu: 1280 + + command = { + 'type': 'physical', + 'mac_address': 'c0:d6:9f:2c:e8:80', + 'name': 'eth0', + 'subnets': [ + {'type': 'dhcp4'} + ] + } + ''' + for eth, cfg in command.items(): + phy_cmd = { + 'type': 'physical', + 'name': cfg.get('set-name', eth), + } + mac_address = cfg.get('match', {}).get('macaddress', None) + if not mac_address: + LOG.debug('NetworkState Version2: missing "macaddress" info ' + 'in config entry: %s: %s', eth, str(cfg)) + + for key in ['mtu', 'match', 'wakeonlan']: + if key in cfg: + phy_cmd.update({key: cfg.get(key)}) + + subnets = self._v2_to_v1_ipcfg(cfg) + if len(subnets) > 0: + phy_cmd.update({'subnets': subnets}) + + LOG.debug('v2(ethernets) -> v1(physical):\n%s', phy_cmd) + self.handle_physical(phy_cmd) + + def handle_vlans(self, command): + ''' + v2_vlans = { + 'eth0.123': { + 'id': 123, + 'link': 'eth0', + 'dhcp4': True, + } + } + + v1_command = { + 'type': 'vlan', + 'name': 'eth0.123', + 'vlan_link': 'eth0', + 'vlan_id': 123, + 'subnets': [{'type': 'dhcp4'}], + } + ''' + for vlan, cfg in command.items(): + vlan_cmd = { + 'type': 'vlan', + 'name': vlan, + 'vlan_id': cfg.get('id'), + 'vlan_link': cfg.get('link'), + } + subnets = self._v2_to_v1_ipcfg(cfg) + if len(subnets) > 0: + vlan_cmd.update({'subnets': subnets}) + LOG.debug('v2(vlans) -> v1(vlan):\n%s', vlan_cmd) + self.handle_vlan(vlan_cmd) + + def handle_wifis(self, command): + raise NotImplementedError("NetworkState V2: " + "Skipping wifi configuration") + + def _v2_common(self, cfg): + LOG.debug('v2_common: handling config:\n%s', cfg) + if 'nameservers' in cfg: + search = cfg.get('nameservers').get('search', []) + dns = cfg.get('nameservers').get('addresses', []) + name_cmd = {'type': 'nameserver'} + if len(search) > 0: + name_cmd.update({'search': search}) + if len(dns) > 0: + name_cmd.update({'addresses': dns}) + LOG.debug('v2(nameserver) -> v1(nameserver):\n%s', name_cmd) + self.handle_nameserver(name_cmd) + + def _handle_bond_bridge(self, command, cmd_type=None): + """Common handler for bond and bridge types""" + for item_name, item_cfg in command.items(): + item_params = dict((key, value) for (key, value) in + item_cfg.items() if key not in + NETWORK_V2_KEY_FILTER) + v1_cmd = { + 'type': cmd_type, + 'name': item_name, + cmd_type + '_interfaces': item_cfg.get('interfaces'), + 'params': item_params, + } + subnets = self._v2_to_v1_ipcfg(item_cfg) + if len(subnets) > 0: + v1_cmd.update({'subnets': subnets}) + + LOG.debug('v2(%ss) -> v1(%s):\n%s', cmd_type, cmd_type, v1_cmd) + self.handle_bridge(v1_cmd) + + def _v2_to_v1_ipcfg(self, cfg): + """Common ipconfig extraction from v2 to v1 subnets array.""" + + subnets = [] + if 'dhcp4' in cfg: + subnets.append({'type': 'dhcp4'}) + if 'dhcp6' in cfg: + self.use_ipv6 = True + subnets.append({'type': 'dhcp6'}) + + gateway4 = None + gateway6 = None + for address in cfg.get('addresses', []): + subnet = { + 'type': 'static', + 'address': address, + } + + routes = [] + for route in cfg.get('routes', []): + route_addr = route.get('to') + if "/" in route_addr: + route_addr, route_cidr = route_addr.split("/") + route_netmask = cidr2mask(route_cidr) + subnet_route = { + 'address': route_addr, + 'netmask': route_netmask, + 'gateway': route.get('via') + } + routes.append(subnet_route) + if len(routes) > 0: + subnet.update({'routes': routes}) + + if ":" in address: + if 'gateway6' in cfg and gateway6 is None: + gateway6 = cfg.get('gateway6') + subnet.update({'gateway': gateway6}) + else: + if 'gateway4' in cfg and gateway4 is None: + gateway4 = cfg.get('gateway4') + subnet.update({'gateway': gateway4}) + + subnets.append(subnet) + return subnets + + +def subnet_is_ipv6(subnet): + """Common helper for checking network_state subnets for ipv6.""" + # 'static6' or 'dhcp6' + if subnet['type'].endswith('6'): + # This is a request for DHCPv6. + return True + elif subnet['type'] == 'static' and ":" in subnet['address']: + return True + return False + def cidr2mask(cidr): mask = [0, 0, 0, 0] @@ -435,9 +734,9 @@ def mask2cidr(mask): - if ':' in mask: + if ':' in str(mask): return ipv6mask2cidr(mask) - elif '.' in mask: + elif '.' in str(mask): return ipv4mask2cidr(mask) else: return mask diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/net/renderer.py cloud-init-0.7.9-153-g16a7302f/cloudinit/net/renderer.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/net/renderer.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/net/renderer.py 2017-05-26 18:36:38.000000000 +0000 @@ -5,8 +5,10 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import abc import six +from .network_state import parse_net_config_data from .udev import generate_udev_rule @@ -36,4 +38,12 @@ iface['mac_address'])) return content.getvalue() + @abc.abstractmethod + def render_network_state(self, network_state, target=None): + """Render network state.""" + + def render_network_config(self, network_config, target=None): + return self.render_network_state( + network_state=parse_net_config_data(network_config), target=target) + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/net/renderers.py cloud-init-0.7.9-153-g16a7302f/cloudinit/net/renderers.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/net/renderers.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/net/renderers.py 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,53 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from . import eni +from . import netplan +from . import RendererNotFoundError +from . import sysconfig + +NAME_TO_RENDERER = { + "eni": eni, + "netplan": netplan, + "sysconfig": sysconfig, +} + +DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan"] + + +def search(priority=None, target=None, first=False): + if priority is None: + priority = DEFAULT_PRIORITY + + available = NAME_TO_RENDERER + + unknown = [i for i in priority if i not in available] + if unknown: + raise ValueError( + "Unknown renderers provided in priority list: %s" % unknown) + + found = [] + for name in priority: + render_mod = available[name] + if render_mod.available(target): + cur = (name, render_mod.Renderer) + if first: + return cur + found.append(cur) + + return found + + +def select(priority=None, target=None): + found = search(priority, target=target, first=True) + if not found: + if priority is None: + priority = DEFAULT_PRIORITY + tmsg = "" + if target and target != "/": + tmsg = " in target=%s" % target + raise RendererNotFoundError( + "No available network renderers found%s. Searched " + "through list: %s" % (tmsg, priority)) + return found + +# vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/net/sysconfig.py cloud-init-0.7.9-153-g16a7302f/cloudinit/net/sysconfig.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/net/sysconfig.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/net/sysconfig.py 2017-05-26 18:36:38.000000000 +0000 @@ -9,6 +9,7 @@ from cloudinit import util from . import renderer +from .network_state import subnet_is_ipv6 def _make_header(sep='#'): @@ -58,6 +59,9 @@ def __setitem__(self, key, value): self._conf[key] = value + def __getitem__(self, key): + return self._conf[key] + def drop(self, key): self._conf.pop(key, None) @@ -82,12 +86,14 @@ class Route(ConfigMap): """Represents a route configuration.""" - route_fn_tpl = '%(base)s/network-scripts/route-%(name)s' + route_fn_tpl_ipv4 = '%(base)s/network-scripts/route-%(name)s' + route_fn_tpl_ipv6 = '%(base)s/network-scripts/route6-%(name)s' def __init__(self, route_name, base_sysconf_dir): super(Route, self).__init__() self.last_idx = 1 - self.has_set_default = False + self.has_set_default_ipv4 = False + self.has_set_default_ipv6 = False self._route_name = route_name self._base_sysconf_dir = base_sysconf_dir @@ -95,13 +101,63 @@ r = Route(self._route_name, self._base_sysconf_dir) r._conf = self._conf.copy() r.last_idx = self.last_idx - r.has_set_default = self.has_set_default + r.has_set_default_ipv4 = self.has_set_default_ipv4 + r.has_set_default_ipv6 = self.has_set_default_ipv6 return r @property - def path(self): - return self.route_fn_tpl % ({'base': self._base_sysconf_dir, - 'name': self._route_name}) + def path_ipv4(self): + return self.route_fn_tpl_ipv4 % ({'base': self._base_sysconf_dir, + 'name': self._route_name}) + + @property + def path_ipv6(self): + return self.route_fn_tpl_ipv6 % ({'base': self._base_sysconf_dir, + 'name': self._route_name}) + + def is_ipv6_route(self, address): + return ':' in address + + def to_string(self, proto="ipv4"): + # only accept ipv4 and ipv6 + if proto not in ['ipv4', 'ipv6']: + raise ValueError("Unknown protocol '%s'" % (str(proto))) + buf = six.StringIO() + buf.write(_make_header()) + if self._conf: + buf.write("\n") + # need to reindex IPv4 addresses + # (because Route can contain a mix of IPv4 and IPv6) + reindex = -1 + for key in sorted(self._conf.keys()): + if 'ADDRESS' in key: + index = key.replace('ADDRESS', '') + address_value = str(self._conf[key]) + # only accept combinations: + # if proto ipv6 only display ipv6 routes + # if proto ipv4 only display ipv4 routes + # do not add ipv6 routes if proto is ipv4 + # do not add ipv4 routes if proto is ipv6 + # (this array will contain a mix of ipv4 and ipv6) + if proto == "ipv4" and not self.is_ipv6_route(address_value): + netmask_value = str(self._conf['NETMASK' + index]) + gateway_value = str(self._conf['GATEWAY' + index]) + # increase IPv4 index + reindex = reindex + 1 + buf.write("%s=%s\n" % ('ADDRESS' + str(reindex), + _quote_value(address_value))) + buf.write("%s=%s\n" % ('GATEWAY' + str(reindex), + _quote_value(gateway_value))) + buf.write("%s=%s\n" % ('NETMASK' + str(reindex), + _quote_value(netmask_value))) + elif proto == "ipv6" and self.is_ipv6_route(address_value): + netmask_value = str(self._conf['NETMASK' + index]) + gateway_value = str(self._conf['GATEWAY' + index]) + buf.write("%s/%s via %s\n" % (address_value, + netmask_value, + gateway_value)) + + return buf.getvalue() class NetInterface(ConfigMap): @@ -119,10 +175,10 @@ super(NetInterface, self).__init__() self.children = [] self.routes = Route(iface_name, base_sysconf_dir) - self._kind = kind + self.kind = kind + self._iface_name = iface_name self._conf['DEVICE'] = iface_name - self._conf['TYPE'] = self.iface_types[kind] self._base_sysconf_dir = base_sysconf_dir @property @@ -140,6 +196,8 @@ @kind.setter def kind(self, kind): + if kind not in self.iface_types: + raise ValueError(kind) self._kind = kind self._conf['TYPE'] = self.iface_types[kind] @@ -173,7 +231,7 @@ ('BOOTPROTO', 'none'), ]) - # If these keys exist, then there values will be used to form + # If these keys exist, then their values will be used to form # a BONDING_OPTS grouping; otherwise no grouping will be set. bond_tpl_opts = tuple([ ('bond_mode', "mode=%s"), @@ -190,7 +248,7 @@ def __init__(self, config=None): if not config: config = {} - self.sysconf_dir = config.get('sysconf_dir', 'etc/sysconfig/') + self.sysconf_dir = config.get('sysconf_dir', 'etc/sysconfig') self.netrules_path = config.get( 'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules') self.dns_path = config.get('dns_path', 'etc/resolv.conf') @@ -199,61 +257,126 @@ def _render_iface_shared(cls, iface, iface_cfg): for k, v in cls.iface_defaults: iface_cfg[k] = v + for (old_key, new_key) in [('mac_address', 'HWADDR'), ('mtu', 'MTU')]: old_value = iface.get(old_key) if old_value is not None: iface_cfg[new_key] = old_value @classmethod - def _render_subnet(cls, iface_cfg, route_cfg, subnet): - subnet_type = subnet.get('type') - if subnet_type == 'dhcp6': - iface_cfg['DHCPV6C'] = True - iface_cfg['IPV6INIT'] = True - iface_cfg['BOOTPROTO'] = 'dhcp' - elif subnet_type in ['dhcp4', 'dhcp']: - iface_cfg['BOOTPROTO'] = 'dhcp' - elif subnet_type == 'static': - iface_cfg['BOOTPROTO'] = 'static' - if subnet.get('ipv6'): - iface_cfg['IPV6ADDR'] = subnet['address'] + def _render_subnets(cls, iface_cfg, subnets): + # setting base values + iface_cfg['BOOTPROTO'] = 'none' + + # modifying base values according to subnets + for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): + subnet_type = subnet.get('type') + if subnet_type == 'dhcp6': iface_cfg['IPV6INIT'] = True + iface_cfg['DHCPV6C'] = True + iface_cfg['BOOTPROTO'] = 'dhcp' + elif subnet_type in ['dhcp4', 'dhcp']: + iface_cfg['BOOTPROTO'] = 'dhcp' + elif subnet_type == 'static': + # grep BOOTPROTO sysconfig.txt -A2 | head -3 + # BOOTPROTO=none|bootp|dhcp + # 'bootp' or 'dhcp' cause a DHCP client + # to run on the device. Any other + # value causes any static configuration + # in the file to be applied. + # ==> the following should not be set to 'static' + # but should remain 'none' + # if iface_cfg['BOOTPROTO'] == 'none': + # iface_cfg['BOOTPROTO'] = 'static' + if subnet_is_ipv6(subnet): + iface_cfg['IPV6INIT'] = True else: - iface_cfg['IPADDR'] = subnet['address'] - else: - raise ValueError("Unknown subnet type '%s' found" - " for interface '%s'" % (subnet_type, - iface_cfg.name)) - if 'netmask' in subnet: - iface_cfg['NETMASK'] = subnet['netmask'] - for route in subnet.get('routes', []): - if _is_default_route(route): - if route_cfg.has_set_default: - raise ValueError("Duplicate declaration of default" - " route found for interface '%s'" - % (iface_cfg.name)) - # NOTE(harlowja): ipv6 and ipv4 default gateways - gw_key = 'GATEWAY0' - nm_key = 'NETMASK0' - addr_key = 'ADDRESS0' - # The owning interface provides the default route. - # - # TODO(harlowja): add validation that no other iface has - # also provided the default route? - iface_cfg['DEFROUTE'] = True - if 'gateway' in route: - iface_cfg['GATEWAY'] = route['gateway'] - route_cfg.has_set_default = True - else: - gw_key = 'GATEWAY%s' % route_cfg.last_idx - nm_key = 'NETMASK%s' % route_cfg.last_idx - addr_key = 'ADDRESS%s' % route_cfg.last_idx - route_cfg.last_idx += 1 - for (old_key, new_key) in [('gateway', gw_key), - ('netmask', nm_key), - ('network', addr_key)]: - if old_key in route: - route_cfg[new_key] = route[old_key] + raise ValueError("Unknown subnet type '%s' found" + " for interface '%s'" % (subnet_type, + iface_cfg.name)) + + # set IPv4 and IPv6 static addresses + ipv4_index = -1 + ipv6_index = -1 + for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): + subnet_type = subnet.get('type') + if subnet_type == 'dhcp6': + continue + elif subnet_type in ['dhcp4', 'dhcp']: + continue + elif subnet_type == 'static': + if subnet_is_ipv6(subnet): + ipv6_index = ipv6_index + 1 + if 'netmask' in subnet and str(subnet['netmask']) != "": + ipv6_cidr = (subnet['address'] + + '/' + + str(subnet['netmask'])) + else: + ipv6_cidr = subnet['address'] + if ipv6_index == 0: + iface_cfg['IPV6ADDR'] = ipv6_cidr + elif ipv6_index == 1: + iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr + else: + iface_cfg['IPV6ADDR_SECONDARIES'] = ( + iface_cfg['IPV6ADDR_SECONDARIES'] + + " " + ipv6_cidr) + else: + ipv4_index = ipv4_index + 1 + if ipv4_index == 0: + iface_cfg['IPADDR'] = subnet['address'] + if 'netmask' in subnet: + iface_cfg['NETMASK'] = subnet['netmask'] + else: + iface_cfg['IPADDR' + str(ipv4_index)] = \ + subnet['address'] + if 'netmask' in subnet: + iface_cfg['NETMASK' + str(ipv4_index)] = \ + subnet['netmask'] + + @classmethod + def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): + for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): + for route in subnet.get('routes', []): + is_ipv6 = subnet.get('ipv6') + + if _is_default_route(route): + if ( + (subnet.get('ipv4') and + route_cfg.has_set_default_ipv4) or + (subnet.get('ipv6') and + route_cfg.has_set_default_ipv6) + ): + raise ValueError("Duplicate declaration of default " + "route found for interface '%s'" + % (iface_cfg.name)) + # NOTE(harlowja): ipv6 and ipv4 default gateways + gw_key = 'GATEWAY0' + nm_key = 'NETMASK0' + addr_key = 'ADDRESS0' + # The owning interface provides the default route. + # + # TODO(harlowja): add validation that no other iface has + # also provided the default route? + iface_cfg['DEFROUTE'] = True + if 'gateway' in route: + if is_ipv6: + iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] + route_cfg.has_set_default_ipv6 = True + else: + iface_cfg['GATEWAY'] = route['gateway'] + route_cfg.has_set_default_ipv4 = True + + else: + gw_key = 'GATEWAY%s' % route_cfg.last_idx + nm_key = 'NETMASK%s' % route_cfg.last_idx + addr_key = 'ADDRESS%s' % route_cfg.last_idx + route_cfg.last_idx += 1 + for (old_key, new_key) in [('gateway', gw_key), + ('netmask', nm_key), + ('network', addr_key)]: + if old_key in route: + route_cfg[new_key] = route[old_key] @classmethod def _render_bonding_opts(cls, iface_cfg, iface): @@ -279,15 +402,9 @@ iface_subnets = iface.get("subnets", []) iface_cfg = iface_contents[iface_name] route_cfg = iface_cfg.routes - if len(iface_subnets) == 1: - cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0]) - elif len(iface_subnets) > 1: - for i, isubnet in enumerate(iface_subnets, - start=len(iface_cfg.children)): - iface_sub_cfg = iface_cfg.copy() - iface_sub_cfg.name = "%s:%s" % (iface_name, i) - iface_cfg.children.append(iface_sub_cfg) - cls._render_subnet(iface_sub_cfg, route_cfg, isubnet) + + cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod def _render_bond_interfaces(cls, network_state, iface_contents): @@ -353,6 +470,8 @@ '''Given state, return /etc/sysconfig files + contents''' iface_contents = {} for iface in network_state.iter_interfaces(): + if iface['type'] == "loopback": + continue iface_name = iface['name'] iface_cfg = NetInterface(iface_name, base_sysconf_dir) cls._render_iface_shared(iface, iface_cfg) @@ -369,22 +488,51 @@ if iface_cfg: contents[iface_cfg.path] = iface_cfg.to_string() if iface_cfg.routes: - contents[iface_cfg.routes.path] = iface_cfg.routes.to_string() + contents[iface_cfg.routes.path_ipv4] = \ + iface_cfg.routes.to_string("ipv4") + contents[iface_cfg.routes.path_ipv6] = \ + iface_cfg.routes.to_string("ipv6") return contents - def render_network_state(self, target, network_state): - base_sysconf_dir = os.path.join(target, self.sysconf_dir) + def render_network_state(self, network_state, target=None): + file_mode = 0o644 + base_sysconf_dir = util.target_path(target, self.sysconf_dir) for path, data in self._render_sysconfig(base_sysconf_dir, network_state).items(): - util.write_file(path, data) + util.write_file(path, data, file_mode) if self.dns_path: - dns_path = os.path.join(target, self.dns_path) + dns_path = util.target_path(target, self.dns_path) resolv_content = self._render_dns(network_state, existing_dns_path=dns_path) - util.write_file(dns_path, resolv_content) + util.write_file(dns_path, resolv_content, file_mode) if self.netrules_path: netrules_content = self._render_persistent_net(network_state) - netrules_path = os.path.join(target, self.netrules_path) - util.write_file(netrules_path, netrules_content) + netrules_path = util.target_path(target, self.netrules_path) + util.write_file(netrules_path, netrules_content, file_mode) + + # always write /etc/sysconfig/network configuration + sysconfig_path = util.target_path(target, "etc/sysconfig/network") + netcfg = [_make_header(), 'NETWORKING=yes'] + if network_state.use_ipv6: + netcfg.append('NETWORKING_IPV6=yes') + netcfg.append('IPV6_AUTOCONF=no') + util.write_file(sysconfig_path, "\n".join(netcfg) + "\n", file_mode) + + +def available(target=None): + expected = ['ifup', 'ifdown'] + search = ['/sbin', '/usr/sbin'] + for p in expected: + if not util.which(p, search=search, target=target): + return False + + expected_paths = [ + 'etc/sysconfig/network-scripts/network-functions', + 'etc/sysconfig/network-scripts/ifdown-eth'] + for p in expected_paths: + if not os.path.isfile(util.target_path(target, p)): + return False + return True + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/reporting/handlers.py cloud-init-0.7.9-153-g16a7302f/cloudinit/reporting/handlers.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/reporting/handlers.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/reporting/handlers.py 2017-05-26 18:36:38.000000000 +0000 @@ -37,7 +37,7 @@ try: level = getattr(logging, level.upper()) except Exception: - LOG.warn("invalid level '%s', using WARN", input_level) + LOG.warning("invalid level '%s', using WARN", input_level) level = logging.WARN self.level = level @@ -82,7 +82,7 @@ timeout=self.timeout, retries=self.retries, ssl_details=self.ssl_details) except Exception: - LOG.warn("failed posting event: %s" % event.as_string()) + LOG.warning("failed posting event: %s", event.as_string()) available_handlers = DictRegistry() diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/settings.py cloud-init-0.7.9-153-g16a7302f/cloudinit/settings.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/settings.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/settings.py 2017-05-26 18:36:38.000000000 +0000 @@ -39,13 +39,14 @@ ], 'def_log_file': '/var/log/cloud-init.log', 'log_cfgs': [], - 'syslog_fix_perms': ['syslog:adm', 'root:adm'], + 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'], 'system_info': { 'paths': { 'cloud_dir': '/var/lib/cloud', 'templates_dir': '/etc/cloud/templates/', }, 'distro': 'ubuntu', + 'network': {'renderers': None}, }, 'vendor_data': {'enabled': True, 'prefix': []}, } diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceAltCloud.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceAltCloud.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceAltCloud.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceAltCloud.py 2017-05-26 18:36:38.000000000 +0000 @@ -181,7 +181,7 @@ try: cmd = CMD_PROBE_FLOPPY (cmd_out, _err) = util.subp(cmd) - LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) + LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) except ProcessExecutionError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) return False @@ -196,13 +196,12 @@ cmd = CMD_UDEVADM_SETTLE cmd.append('--exit-if-exists=' + floppy_dev) (cmd_out, _err) = util.subp(cmd) - LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) + LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) except ProcessExecutionError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) return False except OSError as _err: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), - _err.message) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) return False try: diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceAzure.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceAzure.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceAzure.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceAzure.py 2017-05-26 18:36:38.000000000 +0000 @@ -10,6 +10,7 @@ from functools import partial import os import os.path +import re import time from xml.dom import minidom import xml.etree.ElementTree as ET @@ -32,19 +33,161 @@ # azure systems will always have a resource disk, and 66-azure-ephemeral.rules # ensures that it gets linked to this path. RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' +DEFAULT_PRIMARY_NIC = 'eth0' +LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' +DEFAULT_FS = 'ext4' + + +def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): + # extract the 'X' from dev.storvsc.X. if deviceid matches + """ + dev.storvsc.1.%pnpinfo: + classid=32412632-86cb-44a2-9b5c-50d1417354f5 + deviceid=00000000-0001-8899-0000-000000000000 + """ + for line in sysctl_out.splitlines(): + if re.search(r"pnpinfo", line): + fields = line.split() + if len(fields) >= 3: + columns = fields[2].split('=') + if (len(columns) >= 2 and + columns[0] == "deviceid" and + columns[1].startswith(deviceid)): + comps = fields[0].split('.') + return comps[2] + return None + + +def find_busdev_from_disk(camcontrol_out, disk_drv): + # find the scbusX from 'camcontrol devlist -b' output + # if disk_drv matches the specified disk driver, i.e. blkvsc1 + """ + scbus0 on ata0 bus 0 + scbus1 on ata1 bus 0 + scbus2 on blkvsc0 bus 0 + scbus3 on blkvsc1 bus 0 + scbus4 on storvsc2 bus 0 + scbus5 on storvsc3 bus 0 + scbus-1 on xpt0 bus 0 + """ + for line in camcontrol_out.splitlines(): + if re.search(disk_drv, line): + items = line.split() + return items[0] + return None + + +def find_dev_from_busdev(camcontrol_out, busdev): + # find the daX from 'camcontrol devlist' output + # if busdev matches the specified value, i.e. 'scbus2' + """ + at scbus1 target 0 lun 0 (cd0,pass0) + at scbus2 target 0 lun 0 (da0,pass1) + at scbus3 target 1 lun 0 (da1,pass2) + """ + for line in camcontrol_out.splitlines(): + if re.search(busdev, line): + items = line.split('(') + if len(items) == 2: + dev_pass = items[1].split(',') + return dev_pass[0] + return None + + +def get_dev_storvsc_sysctl(): + try: + sysctl_out, err = util.subp(['sysctl', 'dev.storvsc']) + except util.ProcessExecutionError: + LOG.debug("Fail to execute sysctl dev.storvsc") + return None + return sysctl_out + + +def get_camcontrol_dev_bus(): + try: + camcontrol_b_out, err = util.subp(['camcontrol', 'devlist', '-b']) + except util.ProcessExecutionError: + LOG.debug("Fail to execute camcontrol devlist -b") + return None + return camcontrol_b_out + + +def get_camcontrol_dev(): + try: + camcontrol_out, err = util.subp(['camcontrol', 'devlist']) + except util.ProcessExecutionError: + LOG.debug("Fail to execute camcontrol devlist") + return None + return camcontrol_out + + +def get_resource_disk_on_freebsd(port_id): + g0 = "00000000" + if port_id > 1: + g0 = "00000001" + port_id = port_id - 2 + g1 = "000" + str(port_id) + g0g1 = "{0}-{1}".format(g0, g1) + """ + search 'X' from + 'dev.storvsc.X.%pnpinfo: + classid=32412632-86cb-44a2-9b5c-50d1417354f5 + deviceid=00000000-0001-8899-0000-000000000000' + """ + sysctl_out = get_dev_storvsc_sysctl() + + storvscid = find_storvscid_from_sysctl_pnpinfo(sysctl_out, g0g1) + if not storvscid: + LOG.debug("Fail to find storvsc id from sysctl") + return None + + camcontrol_b_out = get_camcontrol_dev_bus() + camcontrol_out = get_camcontrol_dev() + # try to find /dev/XX from 'blkvsc' device + blkvsc = "blkvsc{0}".format(storvscid) + scbusx = find_busdev_from_disk(camcontrol_b_out, blkvsc) + if scbusx: + devname = find_dev_from_busdev(camcontrol_out, scbusx) + if devname is None: + LOG.debug("Fail to find /dev/daX") + return None + return devname + # try to find /dev/XX from 'storvsc' device + storvsc = "storvsc{0}".format(storvscid) + scbusx = find_busdev_from_disk(camcontrol_b_out, storvsc) + if scbusx: + devname = find_dev_from_busdev(camcontrol_out, scbusx) + if devname is None: + LOG.debug("Fail to find /dev/daX") + return None + return devname + return None + + +# update the FreeBSD specific information +if util.is_FreeBSD(): + DEFAULT_PRIMARY_NIC = 'hn0' + LEASE_FILE = '/var/db/dhclient.leases.hn0' + DEFAULT_FS = 'freebsd-ufs' + res_disk = get_resource_disk_on_freebsd(1) + if res_disk is not None: + LOG.debug("resource disk is not None") + RESOURCE_DISK_PATH = "/dev/" + res_disk + else: + LOG.debug("resource disk is None") BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, 'data_dir': "/var/lib/waagent", 'set_hostname': True, 'hostname_bounce': { - 'interface': 'eth0', + 'interface': DEFAULT_PRIMARY_NIC, 'policy': True, 'command': BOUNCE_COMMAND, 'hostname_command': 'hostname', }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, - 'dhclient_lease_file': '/var/lib/dhcp/dhclient.eth0.leases', + 'dhclient_lease_file': LEASE_FILE, } BUILTIN_CLOUD_CONFIG = { @@ -53,9 +196,8 @@ 'layout': [100], 'overwrite': True}, }, - 'fs_setup': [{'filesystem': 'ext4', - 'device': 'ephemeral0.1', - 'replace_fs': 'ntfs'}], + 'fs_setup': [{'filesystem': DEFAULT_FS, + 'device': 'ephemeral0.1'}], } DS_CFG_PATH = ['datasource', DS_NAME] @@ -111,52 +253,64 @@ root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) - def get_metadata_from_agent(self): - temp_hostname = self.metadata.get('local-hostname') + def bounce_network_with_azure_hostname(self): + # When using cloud-init to provision, we have to set the hostname from + # the metadata and "bounce" the network to force DDNS to update via + # dhclient + azure_hostname = self.metadata.get('local-hostname') + LOG.debug("Hostname in metadata is %s", azure_hostname) hostname_command = self.ds_cfg['hostname_bounce']['hostname_command'] - agent_cmd = self.ds_cfg['agent_command'] - LOG.debug("Getting metadata via agent. hostname=%s cmd=%s", - temp_hostname, agent_cmd) - with temporary_hostname(temp_hostname, self.ds_cfg, + + with temporary_hostname(azure_hostname, self.ds_cfg, hostname_command=hostname_command) \ as previous_hostname: if (previous_hostname is not None and - util.is_true(self.ds_cfg.get('set_hostname'))): + util.is_true(self.ds_cfg.get('set_hostname'))): cfg = self.ds_cfg['hostname_bounce'] + + # "Bouncing" the network try: - perform_hostname_bounce(hostname=temp_hostname, + perform_hostname_bounce(hostname=azure_hostname, cfg=cfg, prev_hostname=previous_hostname) except Exception as e: - LOG.warn("Failed publishing hostname: %s", e) + LOG.warning("Failed publishing hostname: %s", e) util.logexc(LOG, "handling set_hostname failed") - try: - invoke_agent(agent_cmd) - except util.ProcessExecutionError: - # claim the datasource even if the command failed - util.logexc(LOG, "agent command '%s' failed.", - self.ds_cfg['agent_command']) - - ddir = self.ds_cfg['data_dir'] - - fp_files = [] - key_value = None - for pk in self.cfg.get('_pubkeys', []): - if pk.get('value', None): - key_value = pk['value'] - LOG.debug("ssh authentication: using value from fabric") - else: - bname = str(pk['fingerprint'] + ".crt") - fp_files += [os.path.join(ddir, bname)] - LOG.debug("ssh authentication: " - "using fingerprint from fabirc") - - missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", - func=wait_for_files, - args=(fp_files,)) + def get_metadata_from_agent(self): + temp_hostname = self.metadata.get('local-hostname') + agent_cmd = self.ds_cfg['agent_command'] + LOG.debug("Getting metadata via agent. hostname=%s cmd=%s", + temp_hostname, agent_cmd) + + self.bounce_network_with_azure_hostname() + + try: + invoke_agent(agent_cmd) + except util.ProcessExecutionError: + # claim the datasource even if the command failed + util.logexc(LOG, "agent command '%s' failed.", + self.ds_cfg['agent_command']) + + ddir = self.ds_cfg['data_dir'] + + fp_files = [] + key_value = None + for pk in self.cfg.get('_pubkeys', []): + if pk.get('value', None): + key_value = pk['value'] + LOG.debug("ssh authentication: using value from fabric") + else: + bname = str(pk['fingerprint'] + ".crt") + fp_files += [os.path.join(ddir, bname)] + LOG.debug("ssh authentication: " + "using fingerprint from fabirc") + + missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", + func=wait_for_files, + args=(fp_files,)) if len(missing): - LOG.warn("Did not find files, but going on: %s", missing) + LOG.warning("Did not find files, but going on: %s", missing) metadata = {} metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) @@ -178,7 +332,11 @@ for cdev in candidates: try: if cdev.startswith("/dev/"): - ret = util.mount_cb(cdev, load_azure_ds_dir) + if util.is_FreeBSD(): + ret = util.mount_cb(cdev, load_azure_ds_dir, + mtype="udf", sync=False) + else: + ret = util.mount_cb(cdev, load_azure_ds_dir) else: ret = load_azure_ds_dir(cdev) @@ -187,7 +345,7 @@ except BrokenAzureDataSource as exc: raise exc except util.MountFailedError: - LOG.warn("%s was not mountable", cdev) + LOG.warning("%s was not mountable", cdev) continue (md, self.userdata_raw, cfg, files) = ret @@ -206,11 +364,12 @@ LOG.debug("using files cached in %s", ddir) # azure / hyper-v provides random data here - seed = util.load_file("/sys/firmware/acpi/tables/OEM0", - quiet=True, decode=False) - if seed: - self.metadata['random_seed'] = seed - + if not util.is_FreeBSD(): + seed = util.load_file("/sys/firmware/acpi/tables/OEM0", + quiet=True, decode=False) + if seed: + self.metadata['random_seed'] = seed + # TODO. find the seed on FreeBSD platform # now update ds_cfg to reflect contents pass in config user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) @@ -220,6 +379,8 @@ write_files(ddir, files, dirmode=0o700) if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: + self.bounce_network_with_azure_hostname() + metadata_func = partial(get_metadata_from_fabric, fallback_lease_file=self. dhclient_lease_file) @@ -252,56 +413,71 @@ return +def _partitions_on_device(devpath, maxnum=16): + # return a list of tuples (ptnum, path) for each part on devpath + for suff in ("-part", "p", ""): + found = [] + for pnum in range(1, maxnum): + ppath = devpath + suff + str(pnum) + if os.path.exists(ppath): + found.append((pnum, os.path.realpath(ppath))) + if found: + return found + return [] + + +def _has_ntfs_filesystem(devpath): + ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) + LOG.debug('ntfs_devices found = %s', ntfs_devices) + return os.path.realpath(devpath) in ntfs_devices + + def can_dev_be_reformatted(devpath): - # determine if the ephemeral block device path devpath - # is newly formatted after a resize. + """Determine if block device devpath is newly formatted ephemeral. + + A newly formatted disk will: + a.) have a partition table (dos or gpt) + b.) have 1 partition that is ntfs formatted, or + have 2 partitions with the second partition ntfs formatted. + (larger instances with >2TB ephemeral disk have gpt, and will + have a microsoft reserved partition as part 1. LP: #1686514) + c.) the ntfs partition will have no files other than possibly + 'dataloss_warning_readme.txt'""" if not os.path.exists(devpath): return False, 'device %s does not exist' % devpath - realpath = os.path.realpath(devpath) - LOG.debug('Resolving realpath of %s -> %s', devpath, realpath) - - # it is possible that the block device might exist, but the kernel - # have not yet read the partition table and sent events. we udevadm settle - # to hope to resolve that. Better here would probably be to test and see, - # and then settle if we didn't find anything and try again. - if util.which("udevadm"): - util.subp(["udevadm", "settle"]) + LOG.debug('Resolving realpath of %s -> %s', devpath, + os.path.realpath(devpath)) # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource # where partitions are "1" or "-part1" or "p1" - part1path = None - for suff in ("-part", "p", ""): - cand = devpath + suff + "1" - if os.path.exists(cand): - if os.path.exists(devpath + suff + "2"): - msg = ('device %s had more than 1 partition: %s, %s' % - devpath, cand, devpath + suff + "2") - return False, msg - part1path = cand - break - - if part1path is None: + partitions = _partitions_on_device(devpath) + if len(partitions) == 0: return False, 'device %s was not partitioned' % devpath + elif len(partitions) > 2: + msg = ('device %s had 3 or more partitions: %s' % + (devpath, ' '.join([p[1] for p in partitions]))) + return False, msg + elif len(partitions) == 2: + cand_part, cand_path = partitions[1] + else: + cand_part, cand_path = partitions[0] - real_part1path = os.path.realpath(part1path) - ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) - LOG.debug('ntfs_devices found = %s', ntfs_devices) - if real_part1path not in ntfs_devices: - msg = ('partition 1 (%s -> %s) on device %s was not ntfs formatted' % - (part1path, real_part1path, devpath)) + if not _has_ntfs_filesystem(cand_path): + msg = ('partition %s (%s) on device %s was not ntfs formatted' % + (cand_part, cand_path, devpath)) return False, msg def count_files(mp): ignored = set(['dataloss_warning_readme.txt']) return len([f for f in os.listdir(mp) if f.lower() not in ignored]) - bmsg = ('partition 1 (%s -> %s) on device %s was ntfs formatted' % - (part1path, real_part1path, devpath)) + bmsg = ('partition %s (%s) on device %s was ntfs formatted' % + (cand_part, cand_path, devpath)) try: - file_count = util.mount_cb(part1path, count_files) + file_count = util.mount_cb(cand_path, count_files) except util.MountFailedError as e: - return False, bmsg + ' but mount of %s failed: %s' % (part1path, e) + return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) if file_count != 0: return False, bmsg + ' but had %d files on it.' % file_count @@ -317,8 +493,8 @@ log_pre="Azure ephemeral disk: ") if missing: - LOG.warn("ephemeral device '%s' did not appear after %d seconds.", - devpath, maxwait) + LOG.warning("ephemeral device '%s' did not appear after %d seconds.", + devpath, maxwait) return result = False @@ -328,7 +504,7 @@ else: result, msg = can_dev_be_reformatted(devpath) - LOG.debug("reformattable=%s: %s" % (result, msg)) + LOG.debug("reformattable=%s: %s", result, msg) if not result: return @@ -341,7 +517,7 @@ LOG.debug(bmsg + " removed.") except Exception as e: # python3 throws FileNotFoundError, python2 throws OSError - LOG.warn(bmsg + ": remove failed! (%s)" % e) + LOG.warning(bmsg + ": remove failed! (%s)", e) else: LOG.debug(bmsg + " did not exist.") return @@ -391,7 +567,7 @@ errors.append(fname) if errors: - LOG.warn("failed to convert the crt files to pubkey: %s", errors) + LOG.warning("failed to convert the crt files to pubkey: %s", errors) return pubkeys @@ -413,8 +589,8 @@ time.sleep(naplen) waited += naplen - LOG.warn("%sStill missing files after %s seconds: %s", - log_pre, maxwait, need) + LOG.warning("%sStill missing files after %s seconds: %s", + log_pre, maxwait, need) return need @@ -619,8 +795,19 @@ def list_possible_azure_ds_devs(): # return a sorted list of devices that might have a azure datasource devlist = [] - for fstype in ("iso9660", "udf"): - devlist.extend(util.find_devs_with("TYPE=%s" % fstype)) + if util.is_FreeBSD(): + cdrom_dev = "/dev/cd0" + try: + util.subp(["mount", "-o", "ro", "-t", "udf", cdrom_dev, + "/mnt/cdrom/secure"]) + except util.ProcessExecutionError: + LOG.debug("Fail to mount cd") + return devlist + util.subp(["umount", "/mnt/cdrom/secure"]) + devlist.append(cdrom_dev) + else: + for fstype in ("iso9660", "udf"): + devlist.extend(util.find_devs_with("TYPE=%s" % fstype)) devlist.sort(reverse=True) return devlist diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceBigstep.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceBigstep.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceBigstep.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceBigstep.py 2017-05-26 18:36:38.000000000 +0000 @@ -27,7 +27,7 @@ if url is None: return False response = url_helper.readurl(url) - decoded = json.loads(response.contents) + decoded = json.loads(response.contents.decode()) self.metadata = decoded["metadata"] self.vendordata_raw = decoded["vendordata_raw"] self.userdata_raw = decoded["userdata_raw"] diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceCloudSigma.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceCloudSigma.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceCloudSigma.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceCloudSigma.py 2017-05-26 18:36:38.000000000 +0000 @@ -43,7 +43,7 @@ LOG.debug("detected hypervisor as %s", sys_product_name) return 'cloudsigma' in sys_product_name.lower() - LOG.warn("failed to query dmi data for system product name") + LOG.warning("failed to query dmi data for system product name") return False def get_data(self): diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceCloudStack.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceCloudStack.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceCloudStack.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceCloudStack.py 2017-05-26 18:36:38.000000000 +0000 @@ -178,9 +178,10 @@ def get_dhclient_d(): # find lease files directory - supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp"] + supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp", + "/var/lib/NetworkManager"] for d in supported_dirs: - if os.path.exists(d): + if os.path.exists(d) and len(os.listdir(d)) > 0: LOG.debug("Using %s lease directory", d) return d return None @@ -207,8 +208,8 @@ def get_vr_address(): # Get the address of the virtual router via dhcp leases - # see http://bit.ly/T76eKC for documentation on the virtual router. # If no virtual router is detected, fallback on default gateway. + # See http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/4.8/virtual_machines/user-data.html # noqa lease_file = get_latest_lease() if not lease_file: LOG.debug("No lease file found, using default gateway") diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceConfigDrive.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceConfigDrive.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceConfigDrive.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceConfigDrive.py 2017-05-26 18:36:38.000000000 +0000 @@ -54,13 +54,16 @@ found = None md = {} results = {} - if os.path.isdir(self.seed_dir): + for sdir in (self.seed_dir, "/config-drive"): + if not os.path.isdir(sdir): + continue try: - results = read_config_drive(self.seed_dir) - found = self.seed_dir + results = read_config_drive(sdir) + found = sdir + break except openstack.NonReadable: - util.logexc(LOG, "Failed reading config drive from %s", - self.seed_dir) + util.logexc(LOG, "Failed reading config drive from %s", sdir) + if not found: for dev in find_candidate_devs(): try: @@ -124,7 +127,7 @@ try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: - LOG.warn("Invalid content in vendor-data: %s", e) + LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None # network_config is an /etc/network/interfaces formated file and is @@ -187,7 +190,7 @@ if network: net_conf = data.get("network_config", '') if net_conf and distro: - LOG.warn("Updating network interfaces from config drive") + LOG.warning("Updating network interfaces from config drive") distro.apply_network(net_conf) write_injected_files(data.get('files')) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceDigitalOcean.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceDigitalOcean.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceDigitalOcean.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceDigitalOcean.py 2017-05-26 18:36:38.000000000 +0000 @@ -51,7 +51,7 @@ if not is_do: return False - LOG.info("Running on digital ocean. droplet_id=%s" % droplet_id) + LOG.info("Running on digital ocean. droplet_id=%s", droplet_id) ipv4LL_nic = None if self.use_ip4LL: diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceEc2.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceEc2.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceEc2.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceEc2.py 2017-05-26 18:36:38.000000000 +0000 @@ -125,7 +125,7 @@ if len(filtered): mdurls = filtered else: - LOG.warn("Empty metadata url list! using default list") + LOG.warning("Empty metadata url list! using default list") mdurls = self.metadata_urls urls = [] @@ -232,7 +232,7 @@ try: return parse_strict_mode(cfgval) except ValueError as e: - LOG.warn(e) + LOG.warning(e) return default @@ -270,7 +270,7 @@ try: mode, sleep = parse_strict_mode(cfgval) except ValueError as e: - LOG.warn(e) + LOG.warning(e) return if mode == "false": @@ -304,8 +304,8 @@ if result: return result except Exception as e: - LOG.warn("calling %s with %s raised exception: %s", - checker, data, e) + LOG.warning("calling %s with %s raised exception: %s", + checker, data, e) def _collect_platform_data(): diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceGCE.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceGCE.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceGCE.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceGCE.py 2017-05-26 18:36:38.000000000 +0000 @@ -62,13 +62,16 @@ return public_key def get_data(self): + if not platform_reports_gce(): + return False + # url_map: (our-key, path, required, is_text) url_map = [ ('instance-id', ('instance/id',), True, True), ('availability-zone', ('instance/zone',), True, True), ('local-hostname', ('instance/hostname',), True, True), ('public-keys', ('project/attributes/sshKeys', - 'instance/attributes/sshKeys'), False, True), + 'instance/attributes/ssh-keys'), False, True), ('user-data', ('instance/attributes/user-data',), False, False), ('user-data-encoding', ('instance/attributes/user-data-encoding',), False, True), @@ -95,7 +98,7 @@ if not running_on_gce: LOG.debug(msg, mkey) else: - LOG.warn(msg, mkey) + LOG.warning(msg, mkey) return False self.metadata[mkey] = value @@ -113,7 +116,8 @@ self.metadata['user-data'] = b64decode( self.metadata['user-data']) else: - LOG.warn('unknown user-data-encoding: %s, ignoring', encoding) + LOG.warning('unknown user-data-encoding: %s, ignoring', + encoding) return running_on_gce @@ -144,6 +148,21 @@ return self.availability_zone.rsplit('-', 1)[0] +def platform_reports_gce(): + pname = util.read_dmi_data('system-product-name') or "N/A" + if pname == "Google Compute Engine": + return True + + # system-product-name is not always guaranteed (LP: #1674861) + serial = util.read_dmi_data('system-serial-number') or "N/A" + if serial.startswith("GoogleCloud-"): + return True + + LOG.debug("Not running on google cloud. product-name=%s serial=%s", + pname, serial) + return False + + # Used to match classes to dependencies datasources = [ (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceMAAS.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceMAAS.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceMAAS.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceMAAS.py 2017-05-26 18:36:38.000000000 +0000 @@ -71,7 +71,7 @@ except MAASSeedDirNone: pass except MAASSeedDirMalformed as exc: - LOG.warn("%s was malformed: %s" % (self.seed_dir, exc)) + LOG.warning("%s was malformed: %s", self.seed_dir, exc) raise # If there is no metadata_url, then we're not configured @@ -107,7 +107,7 @@ try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: - LOG.warn("Invalid content in vendor-data: %s", e) + LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None def wait_for_metadata_service(self, url): @@ -126,7 +126,7 @@ if timeout in mcfg: timeout = int(mcfg.get("timeout", timeout)) except Exception: - LOG.warn("Failed to get timeout, using %s" % timeout) + LOG.warning("Failed to get timeout, using %s", timeout) starttime = time.time() if url.endswith("/"): @@ -190,8 +190,8 @@ else: md[path] = util.decode_binary(resp.contents) else: - LOG.warn(("Fetching from %s resulted in" - " an invalid http code %s"), url, resp.code) + LOG.warning(("Fetching from %s resulted in" + " an invalid http code %s"), url, resp.code) except url_helper.UrlError as e: if e.code == 404 and not optional: raise MAASSeedDirMalformed( diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceNoCloud.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceNoCloud.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceNoCloud.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceNoCloud.py 2017-05-26 18:36:38.000000000 +0000 @@ -104,8 +104,8 @@ pp2d_kwargs) except ValueError as e: if dev in label_list: - LOG.warn("device %s with label=%s not a" - "valid seed.", dev, label) + LOG.warning("device %s with label=%s not a" + "valid seed.", dev, label) continue mydata = _merge_new_seed(mydata, seeded) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceOpenNebula.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceOpenNebula.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceOpenNebula.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceOpenNebula.py 2017-05-26 18:36:38.000000000 +0000 @@ -64,7 +64,7 @@ except BrokenContextDiskDir as exc: raise exc except util.MountFailedError: - LOG.warn("%s was not mountable" % cdev) + LOG.warning("%s was not mountable", cdev) if results: seed = cdev @@ -286,12 +286,12 @@ output = output[0:-1] # remove trailing null # go through output. First _start_ is for 'preset', second for 'target'. - # Add to target only things were changed and not in volitile + # Add to ret only things were changed and not in excluded. for line in output.split("\x00"): try: (key, val) = line.split("=", 1) if target is preset: - target[key] = val + preset[key] = val elif (key not in excluded and (key in keylist_in or preset.get(key) != val)): ret[key] = val @@ -381,7 +381,7 @@ try: results['userdata'] = util.b64d(results['userdata']) except TypeError: - LOG.warn("Failed base64 decoding of userdata") + LOG.warning("Failed base64 decoding of userdata") # generate static /etc/network/interfaces # only if there are any required context variables diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceOpenStack.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceOpenStack.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceOpenStack.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceOpenStack.py 2017-05-26 18:36:38.000000000 +0000 @@ -60,7 +60,7 @@ try: retries = int(self.ds_cfg.get("retries", retries)) except Exception: - util.logexc(LOG, "Failed to get max wait. using %s", retries) + util.logexc(LOG, "Failed to get retries. using %s", retries) return (max_wait, timeout, retries) @@ -73,7 +73,7 @@ if len(filtered): urls = filtered else: - LOG.warn("Empty metadata url list! using default list") + LOG.warning("Empty metadata url list! using default list") urls = [DEF_MD_URL] md_urls = [] @@ -137,7 +137,7 @@ try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: - LOG.warn("Invalid content in vendor-data: %s", e) + LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None return True diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceOVF.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceOVF.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceOVF.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceOVF.py 2017-05-26 18:36:38.000000000 +0000 @@ -225,12 +225,12 @@ try: max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait)) except ValueError: - LOG.warn("Failed to get '%s', using %s", - max_wait_cfg_option, default_max_wait) + LOG.warning("Failed to get '%s', using %s", + max_wait_cfg_option, default_max_wait) if max_wait <= 0: - LOG.warn("Invalid value '%s' for '%s', using '%s' instead", - max_wait, max_wait_cfg_option, default_max_wait) + LOG.warning("Invalid value '%s' for '%s', using '%s' instead", + max_wait, max_wait_cfg_option, default_max_wait) max_wait = default_max_wait return max_wait @@ -355,7 +355,7 @@ try: (fname, contents) = util.mount_cb(fullp, get_ovf_env, mtype=mtype) except util.MountFailedError: - LOG.debug("%s not mountable as iso9660" % fullp) + LOG.debug("%s not mountable as iso9660", fullp) continue if contents is not False: diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceSmartOS.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceSmartOS.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/DataSourceSmartOS.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/DataSourceSmartOS.py 2017-05-26 18:36:38.000000000 +0000 @@ -555,7 +555,7 @@ val = base64.b64decode(val.encode()).decode() # Bogus input produces different errors in Python 2 and 3 except (TypeError, binascii.Error): - LOG.warn("Failed base64 decoding key '%s': %s", key, val) + LOG.warning("Failed base64 decoding key '%s': %s", key, val) if strip: val = val.strip() diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/helpers/azure.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/helpers/azure.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/helpers/azure.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/helpers/azure.py 2017-05-26 18:36:38.000000000 +0000 @@ -29,6 +29,14 @@ os.chdir(prevdir) +def _get_dhcp_endpoint_option_name(): + if util.is_FreeBSD(): + azure_endpoint = "option-245" + else: + azure_endpoint = "unknown-245" + return azure_endpoint + + class AzureEndpointHttpClient(object): headers = { @@ -235,8 +243,9 @@ leases = [] content = util.load_file(fallback_lease_file) LOG.debug("content is %s", content) + option_name = _get_dhcp_endpoint_option_name() for line in content.splitlines(): - if 'unknown-245' in line: + if option_name in line: # Example line from Ubuntu # option unknown-245 a8:3f:81:10; leases.append(line.strip(' ').split(' ', 2)[-1].strip(';\n"')) @@ -289,7 +298,7 @@ LOG.debug("Unable to find endpoint in dhclient logs. " " Falling back to check lease files") if fallback_lease_file is None: - LOG.warn("No fallback lease file was specified.") + LOG.warning("No fallback lease file was specified.") value = None else: LOG.debug("Looking for endpoint in lease file %s", diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/helpers/digitalocean.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/helpers/digitalocean.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/helpers/digitalocean.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/helpers/digitalocean.py 2017-05-26 18:36:38.000000000 +0000 @@ -23,11 +23,8 @@ """ if not nic: - for cdev in sorted(cloudnet.get_devicelist()): - if cloudnet.is_physical(cdev): - nic = cdev - LOG.debug("assigned nic '%s' for link-local discovery", nic) - break + nic = get_link_local_nic() + LOG.debug("selected interface '%s' for reading metadata", nic) if not nic: raise RuntimeError("unable to find interfaces to access the" @@ -57,6 +54,13 @@ return nic +def get_link_local_nic(): + nics = [f for f in cloudnet.get_devicelist() if cloudnet.is_physical(f)] + if not nics: + return None + return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex')) + + def del_ipv4_link_local(nic=None): """Remove the ip4LL address. While this is not necessary, the ip4LL address is extraneous and confusing to users. @@ -107,15 +111,12 @@ } """ - def _get_subnet_part(pcfg, nameservers=None): + def _get_subnet_part(pcfg): subpart = {'type': 'static', 'control': 'auto', 'address': pcfg.get('ip_address'), 'gateway': pcfg.get('gateway')} - if nameservers: - subpart['dns_nameservers'] = nameservers - if ":" in pcfg.get('ip_address'): subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'), pcfg.get('cidr')) @@ -124,27 +125,31 @@ return subpart - all_nics = [] - for k in ('public', 'private'): - if k in config: - all_nics.extend(config[k]) - - macs_to_nics = cloudnet.get_interfaces_by_mac() nic_configs = [] + macs_to_nics = cloudnet.get_interfaces_by_mac() + LOG.debug("nic mapping: %s", macs_to_nics) - for nic in all_nics: + for n in config: + nic = config[n][0] + LOG.debug("considering %s", nic) mac_address = nic.get('mac') + if mac_address not in macs_to_nics: + raise RuntimeError("Did not find network interface on system " + "with mac '%s'. Cannot apply configuration: %s" + % (mac_address, nic)) + sysfs_name = macs_to_nics.get(mac_address) nic_type = nic.get('type', 'unknown') - # Note: the entry 'public' above contains a list, but - # the list will only ever have one nic inside it per digital ocean. - # If it ever had more than one nic, then this code would - # assign all 'public' the same name. - if_name = NIC_MAP.get(nic_type, sysfs_name) - LOG.debug("mapped %s interface to %s, assigning name of %s", - mac_address, sysfs_name, if_name) + if_name = NIC_MAP.get(nic_type, sysfs_name) + if if_name != sysfs_name: + LOG.debug("Found %s interface '%s' on '%s', assigned name of '%s'", + nic_type, mac_address, sysfs_name, if_name) + else: + msg = ("Found interface '%s' on '%s', which is not a public " + "or private interface. Using default system naming.") + LOG.debug(msg, mac_address, sysfs_name) ncfg = {'type': 'physical', 'mac_address': mac_address, @@ -157,13 +162,8 @@ continue sub_part = _get_subnet_part(raw_subnet) - if nic_type == 'public' and 'anchor' not in netdef: - # add DNS resolvers to the public interfaces only - sub_part = _get_subnet_part(raw_subnet, dns_servers) - else: - # remove the gateway any non-public interfaces - if 'gateway' in sub_part: - del sub_part['gateway'] + if nic_type != "public" or "anchor" in netdef: + del sub_part['gateway'] subnets.append(sub_part) @@ -171,6 +171,10 @@ nic_configs.append(ncfg) LOG.debug("nic '%s' configuration: %s", if_name, ncfg) + if dns_servers: + LOG.debug("added dns servers: %s", dns_servers) + nic_configs.append({'type': 'nameserver', 'address': dns_servers}) + return {'version': 1, 'config': nic_configs} diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/helpers/openstack.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/helpers/openstack.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/helpers/openstack.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/helpers/openstack.py 2017-05-26 18:36:38.000000000 +0000 @@ -21,7 +21,7 @@ from cloudinit import url_helper from cloudinit import util -# For reference: http://tinyurl.com/laora4c +# See https://docs.openstack.org/user-guide/cli-config-drive.html LOG = logging.getLogger(__name__) @@ -52,6 +52,7 @@ PHYSICAL_TYPES = ( None, 'bridge', + 'dvs', 'ethernet', 'hw_veb', 'hyperv', diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/helpers/vmware/imc/config_file.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/helpers/vmware/imc/config_file.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/helpers/vmware/imc/config_file.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/helpers/vmware/imc/config_file.py 2017-05-26 18:36:38.000000000 +0000 @@ -43,9 +43,9 @@ # "sensitive" settings shall not be logged if canLog: - logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val)) + logger.debug("ADDED KEY-VAL :: '%s' = '%s'", key, val) else: - logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key) + logger.debug("ADDED KEY-VAL :: '%s' = '*****************'", key) self[key] = val @@ -60,7 +60,7 @@ Keyword arguments: filename - The full path to the config file. """ - logger.info('Parsing the config file %s.' % filename) + logger.info('Parsing the config file %s.', filename) config = configparser.ConfigParser() config.optionxform = str @@ -69,7 +69,7 @@ self.clear() for category in config.sections(): - logger.debug("FOUND CATEGORY = '%s'" % category) + logger.debug("FOUND CATEGORY = '%s'", category) for (key, value) in config.items(category): self._insertKey(category + '|' + key, value) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/__init__.py cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/__init__.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/sources/__init__.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/sources/__init__.py 2017-05-26 18:36:38.000000000 +0000 @@ -50,7 +50,7 @@ self.distro = distro self.paths = paths self.userdata = None - self.metadata = None + self.metadata = {} self.userdata_raw = None self.vendordata = None self.vendordata_raw = None @@ -210,7 +210,7 @@ else: hostname = toks[0] - if fqdn: + if fqdn and domain != defdomain: return "%s.%s" % (hostname, domain) else: return hostname @@ -237,8 +237,8 @@ if candidate in valid: return candidate else: - LOG.warn("invalid dsmode '%s', using default=%s", - candidate, default) + LOG.warning("invalid dsmode '%s', using default=%s", + candidate, default) return default return default diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/stages.py cloud-init-0.7.9-153-g16a7302f/cloudinit/stages.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/stages.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/stages.py 2017-05-26 18:36:38.000000000 +0000 @@ -163,8 +163,8 @@ except OSError as e: error = e - LOG.warn("Failed changing perms on '%s'. tried: %s. %s", - log_file, ','.join(perms), error) + LOG.warning("Failed changing perms on '%s'. tried: %s. %s", + log_file, ','.join(perms), error) def read_cfg(self, extra_fns=None): # None check so that we don't keep on re-loading if empty @@ -447,9 +447,9 @@ mod_locs, looked_locs = importer.find_module( mod_name, [''], ['list_types', 'handle_part']) if not mod_locs: - LOG.warn("Could not find a valid user-data handler" - " named %s in file %s (searched %s)", - mod_name, fname, looked_locs) + LOG.warning("Could not find a valid user-data handler" + " named %s in file %s (searched %s)", + mod_name, fname, looked_locs) continue mod = importer.import_module(mod_locs[0]) mod = handlers.fixup_handler(mod) @@ -568,7 +568,8 @@ if not isinstance(vdcfg, dict): vdcfg = {'enabled': False} - LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg) + LOG.warning("invalid 'vendor_data' setting. resetting to: %s", + vdcfg) enabled = vdcfg.get('enabled') no_handlers = vdcfg.get('disabled_handlers', None) @@ -623,7 +624,7 @@ return (None, loc) if ncfg: return (ncfg, loc) - return (net.generate_fallback_config(), "fallback") + return (self.distro.generate_fallback_config(), "fallback") def apply_network_config(self, bring_up): netcfg, src = self._find_networking_config() @@ -632,10 +633,10 @@ return try: - LOG.debug("applying net config names for %s" % netcfg) + LOG.debug("applying net config names for %s", netcfg) self.distro.apply_network_config_names(netcfg) except Exception as e: - LOG.warn("Failed to rename devices: %s", e) + LOG.warning("Failed to rename devices: %s", e) if (self.datasource is not NULL_DATA_SOURCE and not self.is_new_instance()): @@ -646,10 +647,14 @@ src, bring_up, netcfg) try: return self.distro.apply_network_config(netcfg, bring_up=bring_up) + except net.RendererNotFoundError as e: + LOG.error("Unable to render networking. Network config is " + "likely broken: %s", e) + return except NotImplementedError: - LOG.warn("distro '%s' does not implement apply_network_config. " - "networking may not be configured properly." % - self.distro) + LOG.warning("distro '%s' does not implement apply_network_config. " + "networking may not be configured properly.", + self.distro) return @@ -733,15 +738,15 @@ if not mod_name: continue if freq and freq not in FREQUENCIES: - LOG.warn(("Config specified module %s" - " has an unknown frequency %s"), raw_name, freq) + LOG.warning(("Config specified module %s" + " has an unknown frequency %s"), raw_name, freq) # Reset it so when ran it will get set to a known value freq = None mod_locs, looked_locs = importer.find_module( mod_name, ['', type_utils.obj_name(config)], ['handle']) if not mod_locs: - LOG.warn("Could not find module named %s (searched %s)", - mod_name, looked_locs) + LOG.warning("Could not find module named %s (searched %s)", + mod_name, looked_locs) continue mod = config.fixup_module(importer.import_module(mod_locs[0])) mostly_mods.append([mod, raw_name, freq, run_args]) @@ -873,7 +878,7 @@ pickle_contents = util.load_file(fname, decode=False) except Exception as e: if os.path.isfile(fname): - LOG.warn("failed loading pickle in %s: %s" % (fname, e)) + LOG.warning("failed loading pickle in %s: %s", fname, e) pass # This is allowed so just return nothing successfully loaded... diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/templater.py cloud-init-0.7.9-153-g16a7302f/cloudinit/templater.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/templater.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/templater.py 2017-05-26 18:36:38.000000000 +0000 @@ -103,14 +103,14 @@ raise ValueError("Unknown template rendering type '%s' requested" % template_type) if template_type == 'jinja' and not JINJA_AVAILABLE: - LOG.warn("Jinja not available as the selected renderer for" - " desired template, reverting to the basic renderer.") + LOG.warning("Jinja not available as the selected renderer for" + " desired template, reverting to the basic renderer.") return ('basic', basic_render, rest) elif template_type == 'jinja' and JINJA_AVAILABLE: return ('jinja', jinja_render, rest) if template_type == 'cheetah' and not CHEETAH_AVAILABLE: - LOG.warn("Cheetah not available as the selected renderer for" - " desired template, reverting to the basic renderer.") + LOG.warning("Cheetah not available as the selected renderer for" + " desired template, reverting to the basic renderer.") return ('basic', basic_render, rest) elif template_type == 'cheetah' and CHEETAH_AVAILABLE: return ('cheetah', cheetah_render, rest) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/url_helper.py cloud-init-0.7.9-153-g16a7302f/cloudinit/url_helper.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/url_helper.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/url_helper.py 2017-05-26 18:36:38.000000000 +0000 @@ -45,7 +45,7 @@ from distutils.version import LooseVersion import pkg_resources _REQ = pkg_resources.get_distribution('requests') - _REQ_VER = LooseVersion(_REQ.version) + _REQ_VER = LooseVersion(_REQ.version) # pylint: disable=no-member if _REQ_VER >= LooseVersion('0.8.8'): SSL_ENABLED = True if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'): @@ -155,8 +155,8 @@ scheme = urlparse(url).scheme if scheme == 'https' and ssl_details: if not SSL_ENABLED: - LOG.warn("SSL is not supported in requests v%s, " - "cert. verification can not occur!", _REQ_VER) + LOG.warning("SSL is not supported in requests v%s, " + "cert. verification can not occur!", _REQ_VER) else: if 'ca_certs' in ssl_details and ssl_details['ca_certs']: ssl_args['verify'] = ssl_details['ca_certs'] @@ -415,14 +415,15 @@ return if 'date' not in exception.headers: - LOG.warn("Missing header 'date' in %s response", exception.code) + LOG.warning("Missing header 'date' in %s response", + exception.code) return date = exception.headers['date'] try: remote_time = time.mktime(parsedate(date)) except Exception as e: - LOG.warn("Failed to convert datetime '%s': %s", date, e) + LOG.warning("Failed to convert datetime '%s': %s", date, e) return skew = int(remote_time - time.time()) @@ -430,7 +431,7 @@ old_skew = self.skew_data.get(host, 0) if abs(old_skew - skew) > self.skew_change_limit: self.update_skew_file(host, skew) - LOG.warn("Setting oauth clockskew for %s to %d", host, skew) + LOG.warning("Setting oauth clockskew for %s to %d", host, skew) self.skew_data[host] = skew return diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/user_data.py cloud-init-0.7.9-153-g16a7302f/cloudinit/user_data.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/user_data.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/user_data.py 2017-05-26 18:36:38.000000000 +0000 @@ -109,8 +109,9 @@ ctype_orig = None was_compressed = True except util.DecompressionError as e: - LOG.warn("Failed decompressing payload from %s of length" - " %s due to: %s", ctype_orig, len(payload), e) + LOG.warning("Failed decompressing payload from %s of" + " length %s due to: %s", + ctype_orig, len(payload), e) continue # Attempt to figure out the payloads content-type @@ -228,9 +229,9 @@ if resp.ok(): content = resp.contents else: - LOG.warn(("Fetching from %s resulted in" - " a invalid http code of %s"), - include_url, resp.code) + LOG.warning(("Fetching from %s resulted in" + " a invalid http code of %s"), + include_url, resp.code) if content is not None: new_msg = convert_string(content) diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/util.py cloud-init-0.7.9-153-g16a7302f/cloudinit/util.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/util.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/util.py 2017-05-26 18:36:38.000000000 +0000 @@ -24,6 +24,7 @@ import pwd import random import re +import shlex import shutil import socket import stat @@ -75,6 +76,7 @@ PROC_CMDLINE = None _LSB_RELEASE = {} +PY26 = sys.version_info[0:2] == (2, 6) def get_architecture(target=None): @@ -96,11 +98,11 @@ data[fmap[fname]] = val.strip() missing = [k for k in fmap.values() if k not in data] if len(missing): - LOG.warn("Missing fields in lsb_release --all output: %s", - ','.join(missing)) + LOG.warning("Missing fields in lsb_release --all output: %s", + ','.join(missing)) except ProcessExecutionError as err: - LOG.warn("Unable to get lsb_release --all: %s", err) + LOG.warning("Unable to get lsb_release --all: %s", err) data = dict((v, "UNAVAILABLE") for v in fmap.values()) return data @@ -476,10 +478,11 @@ try: buf = six.BytesIO(encode_text(data)) with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh: + # E1101 is https://github.com/PyCQA/pylint/issues/1444 if decode: - return decode_binary(gh.read()) + return decode_binary(gh.read()) # pylint: disable=E1101 else: - return gh.read() + return gh.read() # pylint: disable=E1101 except Exception as e: if quiet: return data @@ -565,6 +568,10 @@ return len(toks) == 4 +def is_FreeBSD(): + return system_info()['platform'].startswith('FreeBSD') + + def get_cfg_option_bool(yobj, key, default=False): if key not in yobj: return default @@ -590,7 +597,7 @@ 'release': platform.release(), 'python': platform.python_version(), 'uname': platform.uname(), - 'dist': platform.linux_distribution(), + 'dist': platform.linux_distribution(), # pylint: disable=W1505 } @@ -865,7 +872,7 @@ url = "file://%s" % url if url.lower().startswith("file://"): if data: - LOG.warn("Unable to post data to file resource %s", url) + LOG.warning("Unable to post data to file resource %s", url) file_path = url[len("file://"):] try: contents = load_file(file_path, decode=False) @@ -1279,7 +1286,7 @@ # replace nulls with space and drop trailing null cmdline = contents.replace("\x00", " ")[:-1] except Exception as e: - LOG.warn("failed reading /proc/1/cmdline: %s", e) + LOG.warning("failed reading /proc/1/cmdline: %s", e) cmdline = "" else: try: @@ -1400,7 +1407,7 @@ # or even desirable to have that much junk # coming out to a non-debug stream if msg: - log.warn(msg, *args) + log.warning(msg, *args) # Debug gets the full trace. However, nose has a bug whereby its # logcapture plugin doesn't properly handle the case where there is no # actual exception. To avoid tracebacks during the test suite then, we'll @@ -1688,7 +1695,7 @@ os.chmod(path, real_mode) -def write_file(filename, content, mode=0o644, omode="wb"): +def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False): """ Writes a file with the given content and sets the file mode as specified. Resotres the SELinux context if possible. @@ -1698,6 +1705,14 @@ @param mode: The filesystem mode to set on the file. @param omode: The open mode used when opening the file (w, wb, a, etc.) """ + + if copy_mode: + try: + file_stat = os.stat(filename) + mode = stat.S_IMODE(file_stat.st_mode) + except OSError: + pass + ensure_dir(os.path.dirname(filename)) if 'b' in omode.lower(): content = encode_text(content) @@ -2047,11 +2062,56 @@ return None +def find_freebsd_part(label_part): + if label_part.startswith("/dev/label/"): + target_label = label_part[5:] + (label_part, err) = subp(['glabel', 'status', '-s']) + for labels in label_part.split("\n"): + items = labels.split() + if len(items) > 0 and items[0].startswith(target_label): + label_part = items[2] + break + label_part = str(label_part) + return label_part + + +def get_path_dev_freebsd(path, mnt_list): + path_found = None + for line in mnt_list.split("\n"): + items = line.split() + if (len(items) > 2 and os.path.exists(items[1] + path)): + path_found = line + break + return path_found + + +def get_mount_info_freebsd(path, log=LOG): + (result, err) = subp(['mount', '-p', path], rcs=[0, 1]) + if len(err): + # find a path if the input is not a mounting point + (mnt_list, err) = subp(['mount', '-p']) + path_found = get_path_dev_freebsd(path, mnt_list) + if (path_found is None): + return None + result = path_found + ret = result.split() + label_part = find_freebsd_part(ret[0]) + return "/dev/" + label_part, ret[2], ret[1] + + def parse_mount(path): (mountoutput, _err) = subp("mount") mount_locs = mountoutput.splitlines() for line in mount_locs: m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line) + if not m: + continue + # check whether the dev refers to a label on FreeBSD + # for example, if dev is '/dev/label/rootfs', we should + # continue finding the real device like '/dev/da0'. + devm = re.search('^(/dev/.+)p([0-9])$', m.group(1)) + if (not devm and is_FreeBSD()): + return get_mount_info_freebsd(path) devpth = m.group(1) mount_point = m.group(2) fs_type = m.group(3) @@ -2099,21 +2159,36 @@ return parse_mount(path) -def which(program): - # Return path of program for execution if found in path - def is_exe(fpath): - return os.path.isfile(fpath) and os.access(fpath, os.X_OK) - - _fpath, _ = os.path.split(program) - if _fpath: - if is_exe(program): +def is_exe(fpath): + # return boolean indicating if fpath exists and is executable. + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + +def which(program, search=None, target=None): + target = target_path(target) + + if os.path.sep in program: + # if program had a '/' in it, then do not search PATH + # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls + # so effectively we set cwd to / (or target) + if is_exe(target_path(target, program)): return program - else: - for path in os.environ.get("PATH", "").split(os.pathsep): - path = path.strip('"') - exe_file = os.path.join(path, program) - if is_exe(exe_file): - return exe_file + + if search is None: + paths = [p.strip('"') for p in + os.environ.get("PATH", "").split(os.pathsep)] + if target == "/": + search = paths + else: + search = [p for p in paths if p.startswith("/")] + + # normalize path input + search = [os.path.abspath(p) for p in search] + + for path in search: + ppath = os.path.sep.join((path, program)) + if is_exe(target_path(target, ppath)): + return ppath return None @@ -2313,7 +2388,8 @@ uname_arch = os.uname()[4] if not (uname_arch == "x86_64" or (uname_arch.startswith("i") and uname_arch[2:] == "86") or - uname_arch == 'aarch64'): + uname_arch == 'aarch64' or + uname_arch == 'amd64'): LOG.debug("dmidata is not supported on %s", uname_arch) return None @@ -2321,8 +2397,8 @@ if dmidecode_path: return _call_dmidecode(key, dmidecode_path) - LOG.warn("did not find either path %s or dmidecode command", - DMI_SYS_PATH) + LOG.warning("did not find either path %s or dmidecode command", + DMI_SYS_PATH) return None @@ -2351,6 +2427,18 @@ # channel.ini is configparser loadable. # snappy will move to using /etc/system-image/config.d/*.ini # this is certainly not a perfect test, but good enough for now. + orpath = "/etc/os-release" + try: + orinfo = load_shell_content(load_file(orpath, quiet=True)) + if orinfo.get('ID', '').lower() == "ubuntu-core": + return True + except ValueError as e: + LOG.warning("Unexpected error loading '%s': %s", orpath, e) + + cmdline = get_cmdline() + if 'snap_core=' in cmdline: + return True + content = load_file("/etc/system-image/channel.ini", quiet=True) if 'ubuntu-core' in content.lower(): return True @@ -2358,4 +2446,66 @@ return True return False + +def indent(text, prefix): + """replacement for indent from textwrap that is not available in 2.7.""" + lines = [] + for line in text.splitlines(True): + lines.append(prefix + line) + return ''.join(lines) + + +def rootdev_from_cmdline(cmdline): + found = None + for tok in cmdline.split(): + if tok.startswith("root="): + found = tok[5:] + break + if found is None: + return None + + if found.startswith("/dev/"): + return found + if found.startswith("LABEL="): + return "/dev/disk/by-label/" + found[len("LABEL="):] + if found.startswith("UUID="): + return "/dev/disk/by-uuid/" + found[len("UUID="):].lower() + if found.startswith("PARTUUID="): + disks_path = ("/dev/disk/by-partuuid/" + + found[len("PARTUUID="):].lower()) + if os.path.exists(disks_path): + return disks_path + results = find_devs_with(found) + if results: + return results[0] + # we know this doesn't exist, but for consistency return the path as + # it /would/ exist + return disks_path + + return "/dev/" + found + + +def load_shell_content(content, add_empty=False, empty_val=None): + """Given shell like syntax (key=value\nkey2=value2\n) in content + return the data in dictionary form. If 'add_empty' is True + then add entries in to the returned dictionary for 'VAR=' + variables. Set their value to empty_val.""" + + def _shlex_split(blob): + if PY26 and isinstance(blob, six.text_type): + # Older versions don't support unicode input + blob = blob.encode("utf8") + return shlex.split(blob) + + data = {} + for line in _shlex_split(content): + key, value = line.split("=", 1) + if not value: + value = empty_val + if add_empty or value: + data[key] = value + + return data + + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/version.py cloud-init-0.7.9-153-g16a7302f/cloudinit/version.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/version.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/version.py 2017-05-26 18:36:38.000000000 +0000 @@ -6,6 +6,13 @@ __VERSION__ = "0.7.9" +FEATURES = [ + # supports network config version 1 + 'NETWORK_CONFIG_V1', + # supports network config version 2 (netplan) + 'NETWORK_CONFIG_V2', +] + def version_string(): return __VERSION__ diff -Nru cloud-init-0.7.9-47-gc81ea53/cloudinit/warnings.py cloud-init-0.7.9-153-g16a7302f/cloudinit/warnings.py --- cloud-init-0.7.9-47-gc81ea53/cloudinit/warnings.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/cloudinit/warnings.py 2017-05-26 18:36:38.000000000 +0000 @@ -130,10 +130,10 @@ os.path.join(_get_warn_dir(cfg), name), topline + "\n".join(fmtlines) + "\n" + topline) - LOG.warn(topline + "\n".join(fmtlines) + "\n" + closeline) + LOG.warning(topline + "\n".join(fmtlines) + "\n" + closeline) if sleep: - LOG.debug("sleeping %d seconds for warning '%s'" % (sleep, name)) + LOG.debug("sleeping %d seconds for warning '%s'", sleep, name) time.sleep(sleep) # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/config/cloud.cfg-freebsd cloud-init-0.7.9-153-g16a7302f/config/cloud.cfg-freebsd --- cloud-init-0.7.9-47-gc81ea53/config/cloud.cfg-freebsd 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/config/cloud.cfg-freebsd 2017-05-26 18:36:38.000000000 +0000 @@ -5,7 +5,7 @@ # This should not be required, but leave it in place until the real cause of # not beeing able to find -any- datasources is resolved. -datasource_list: ['ConfigDrive', 'OpenStack', 'Ec2'] +datasource_list: ['ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] # A set of users which may be applied and/or used by various modules # when a 'default' entry is found it will reference the 'default_user' diff -Nru cloud-init-0.7.9-47-gc81ea53/debian/changelog cloud-init-0.7.9-153-g16a7302f/debian/changelog --- cloud-init-0.7.9-47-gc81ea53/debian/changelog 2017-03-03 07:38:34.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/debian/changelog 2017-06-28 18:11:19.000000000 +0000 @@ -1,4 +1,188 @@ -cloud-init (0.7.9-47-gc81ea53-0ubuntu1~16.10.1~ppa0) yakkety; urgency=medium +cloud-init (0.7.9-153-g16a7302f-0ubuntu1~16.10.2~ppa1) yakkety; urgency=medium + + * debian/patches/ds-identify-behavior-yakkety.patch: refresh patch. + * cherry-pick 5fb49bac: azure: identify platform by well known value + in chassis asset (LP: #1693939) + * cherry-pick 003c6678: net: remove systemd link file writing from eni + renderer + * cherry-pick 1cd4323b: azure: remove accidental duplicate line in + merge. + * cherry-pick ebc9ecbc: Azure: Add network-config, Refactor net layer + to handle + * cherry-pick 11121fe4: systemd: make cloud-final.service run before + apt daily (LP: #1693361) + + -- Scott Moser Wed, 28 Jun 2017 14:11:19 -0400 + +cloud-init (0.7.9-153-g16a7302f-0ubuntu1~16.10.1) yakkety-proposed; urgency=medium + + * New upstream snapshot. + - net: fix reading and rendering addresses in cidr format. + [Dimitri John Ledkov] (LP: #1689346, #1684349) + - disk_setup: udev settle before attempting partitioning or fs creation. + (LP: #1692093) + - GCE: Update the attribute used to find instance SSH keys. + [Daniel Watkins] (LP: #1693582) + - nplan: For bonds, allow dashed or underscore names of keys. + [Dimitri John Ledkov] (LP: #1690480) + - tests: python2.6: fix unit tests usage of assertNone and format. + - tests: update docstring on test_configured_list_with_none + - fix tools/ds-identify to not write None twice. + - tox/build: do not package depend on style requirements. + - tests: ntp: Restructure cc_ntp unit tests. [Chad Smith] + - flake8: move the pinned version of flake8 up to 3.3.0 + - tests: Apply workaround for snapd bug in test case. [Joshua Powers] + - RHEL/CentOS: Fix dual stack IPv4/IPv6 configuration. [Andreas Karis] + - disk_setup: fix several issues with gpt disk partitions. (LP: #1692087) + - function spelling & docstring update [Joshua Powers] + - tests: Fix unittest bug in ntp tests. [Joshua Powers] + - tox: move pylint target to 1.7.1 + - Fix get_interfaces_by_mac for empty macs (LP: #1692028) + - DigitalOcean: remove routes except for the public interface. + [Ben Howard] (LP: #1681531.) + - netplan: pass macaddress, when specified, for vlans + [Dimitri John Ledkov] (LP: #1690388) + - doc: various improvements for the docs on cc_users_groups. + [Felix Dreissig] + - cc_ntp: write template before installing and add service restart + [Ryan Harper] (LP: #1645644) + - tests: fix cloudstack unit tests to avoid accessing + /var/lib/NetworkManager [Lars Kellogg-Stedman] + - tests: fix hardcoded path to mkfs.ext4 [Joshua Powers] (LP: #1691517) + - Actually skip warnings when .skip file is present. + [Chris Brinker] (LP: #1691551) + - netplan: fix netplan render_network_state signature. + [Dimitri John Ledkov] (LP: #1685944) + - Azure: fix reformatting of ephemeral disks on resize to large types. + (LP: #1686514) + - make deb: Add devscripts dependency for make deb. + Cleanup packages/bddeb. [Chad Smith] (LP: #1685935) + - openstack: fix log message copy/paste typo in _get_url_settings + [Lars Kellogg-Stedman] + - unittests: fix unittests run on centos [Joshua Powers] + - Improve detection of snappy to include os-release and kernel cmdline. + (LP: #1689944) + - Add address to config entry generated by _klibc_to_config_entry. + [Julien Castets] (LP: #1691135) + - sysconfig: Raise ValueError when multiple default gateways are present. + [Chad Smith] (LP: #1687485) + - FreeBSD: improvements and fixes for use on Azure + [Hongjiang Zhang] (LP: #1636345) + - Add unit tests for ds-identify, fix Ec2 bug found. + - fs_setup: if cmd is specified, use shell interpretation. + [Paul Meyer] (LP: #1687712) + - doc: document network configuration defaults policy and formats. + [Ryan Harper] + - doc: Fix name of "uri" key in docs for "cc_apt_configure" module + [Felix Dreissig] + - tests: Enable artful in integration tests [Joshua Powers] + + -- Scott Moser Fri, 26 May 2017 16:08:21 -0400 + +cloud-init (0.7.9-113-g513e99e0-0ubuntu1~16.10.1) yakkety; urgency=medium + + * debian/update-grub-legacy-ec2: fix early exit failure no /etc/fstab + file. (LP: #1682160) + * New upstream snapshot. + - nova-lxd: read product_name from environment, not platform. + (LP: #1685810) + - Fix yum repo config where keys contain array values [Dylan Perry] + - template: Update debian backports template [Joshua Powers] + - rsyslog: replace ~ with stop [Joshua Powers] (LP: #1367899) + - Doc: add additional RTD examples [Joshua Powers] + - Fix growpart for some cases when booted with root=PARTUUID. + (LP: #1684869) + - pylint: update output style to parseable [Joshua Powers] + - pylint: fix all logging warnings [Joshua Powers] + - CloudStack: Add NetworkManager to list of supported DHCP lease dirs. + [Syed Mushtaq Ahmed] + - net: kernel lies about vlans not stealing mac addresses, when they do + [Dimitri John Ledkov] (LP: #1682871) + - ds-identify: Check correct path for "latest" config drive + [Daniel Watkins] (LP: #1673637) + - doc: Fix example for resolv.conf configuration. [Jon Grimm] + - Fix examples that reference upstream chef repository. [Jon Grimm] + - doc: correct grammar and improve clarity in merging documentation. + [David Tagatac] + - doc: Add missing doc link to snap-config module. [Ryan Harper] + - snap: allows for creating cloud-init snap [Joshua Powers] + - DigitalOcean: assign IPv4ll address to lowest indexed interface. + [Ben Howard] (LP: #1676908) + - DigitalOcean: configure all NICs presented in meta-data. + [Ben Howard] (LP: #1676908) + - Remove (and/or fix) URL shortener references [Jon Grimm] + - HACKING.rst: more info on filling out contributors agreement. + - util: teach write_file about copy_mode option + [Lars Kellogg-Stedman] (LP: #1644064) + - DigitalOcean: bind resolvers to loopback interface. + [Ben Howard] (LP: #1676908) + - tests: fix AltCloud tests to not rely on blkid (LP: #1636531) + + -- Scott Moser Thu, 27 Apr 2017 13:38:40 -0400 + +cloud-init (0.7.9-90-g61eb03fe-0ubuntu1~16.10.1) yakkety; urgency=medium + + * debian/cloud-init.templates: add Bigstep to list of sources. (LP: #1676460) + * New upstream snapshot. + - OpenStack: add 'dvs' to the list of physical link types. (LP: #1674946) + - Fix bug that resulted in an attempt to rename bonds or vlans. + (LP: #1669860) + - tests: update OpenNebula and Digital Ocean to not rely on host + interfaces. + - net: in netplan renderer delete known image-builtin content. + (LP: #1675576) + - doc: correct grammar in capabilities.rst [David Tagatac] + - ds-identify: fix detecting of maas datasource. (LP: #1677710) + - netplan: remove debugging prints, add debug logging [Ryan Harper] + - ds-identify: do not write None twice to datasource_list. + - support resizing partition and rootfs on system booted without + initramfs. [Steve Langasek] (LP: #1677376) + - apt_configure: run only when needed. (LP: #1675185) + - OpenStack: identify OpenStack by product 'OpenStack Compute'. + (LP: #1675349) + - GCE: Search GCE in ds-identify, consider serial number in check. + (LP: #1674861) + - Add support for setting hashed passwords [Tore S. Lonoy] (LP: #1570325) + - Fix filesystem creation when using "partition: auto" + [Jonathan Ballet] (LP: #1634678) + - ConfigDrive: support reading config drive data from /config-drive. + (LP: #1673411) + - ds-identify: fix detection of Bigstep datasource. (LP: #1674766) + - test: add running of pylint [Joshua Powers] + - ds-identify: fix bug where filename expansion was left on. + - advertise network config v2 support (NETWORK_CONFIG_V2) in features. + - Bigstep: fix bug when executing in python3. [root] + - Fix unit test when running in a system deployed with cloud-init. + - Bounce network interface for Azure when using the built-in path. + [Brent Baude] (LP: #1674685) + - cloudinit.net: add network config v2 parsing and rendering [Ryan Harper] + - net: Fix incorrect call to isfile [Joshua Powers] (LP: #1674317) + - net: add renderers for automatically selecting the renderer. + - doc: fix config drive doc with regard to unpartitioned disks. + (LP: #1673818) + - test: Adding integratiron test for password as list [Joshua Powers] + - render_network_state: switch arguments around, do not require target + - support 'loopback' as a device type. + - Integration Testing: improve testcase subclassing [Wesley Wiedenmeier] + - gitignore: adding doc/rtd_html [Joshua Powers] + - doc: add instructions for running integration tests via tox. + [Joshua Powers] + - test: avoid differences in 'date' output due to daylight savings. + - Fix chef config module in omnibus install. [Jeremy Melvin] (LP: #1583837) + - Add feature flags to cloudinit.version. [Wesley Wiedenmeier] + - tox: add a citest environment + - Support chpasswd/list being a list in addition to a string. + [Sergio Lystopad] (LP: #1665694) + - doc: Fix configuration example for cc_set_passwords module. + [Sergio Lystopad] (LP: #1665773) + - net: support both ipv4 and ipv6 gateways in sysconfig. + [Lars Kellogg-Stedman] (LP: #1669504) + - net: do not raise exception for > 3 nameservers + [Lars Kellogg-Stedman] (LP: #1670052) + + -- Scott Moser Mon, 03 Apr 2017 12:03:30 -0400 + +cloud-init (0.7.9-48-g1c795b9-0ubuntu1~16.10.1) yakkety; urgency=medium * debian/rules: install Z99-cloudinit-warnings.sh to /etc/profile.d * debian/patches/ds-identify-behavior-yakkety.patch: adjust default @@ -41,7 +225,7 @@ - Fix minor docs typo: perserve > preserve [Jeremy Bicha] - validate-yaml: use python rather than explicitly python3 - -- Scott Moser Fri, 03 Mar 2017 02:38:34 -0500 + -- Scott Moser Mon, 06 Mar 2017 16:37:28 -0500 cloud-init (0.7.9-0ubuntu1~16.10.1) yakkety; urgency=medium diff -Nru cloud-init-0.7.9-47-gc81ea53/debian/cherry-pick cloud-init-0.7.9-153-g16a7302f/debian/cherry-pick --- cloud-init-0.7.9-47-gc81ea53/debian/cherry-pick 2017-03-03 07:38:34.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/debian/cherry-pick 2017-06-28 18:11:19.000000000 +0000 @@ -170,7 +170,7 @@ } local commit_files="" - commit_files=( debian/changelog "$series" "$fpath" ) + commit_files=( "$series" "$fpath" ) git diff HEAD "${commit_files[@]}" echo -n "Commit this change? (Y/n): " @@ -187,6 +187,9 @@ git commit -m "$msg" "${commit_files[@]}" || fail "failed to commit '$msg'" + git commit -m "update changelog" debian/changelog || + fail "failed to commit update to debian changelog." + return 0 } diff -Nru cloud-init-0.7.9-47-gc81ea53/debian/cloud-init.templates cloud-init-0.7.9-153-g16a7302f/debian/cloud-init.templates --- cloud-init-0.7.9-47-gc81ea53/debian/cloud-init.templates 2017-03-03 07:38:34.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/debian/cloud-init.templates 2017-06-28 18:11:19.000000000 +0000 @@ -1,8 +1,8 @@ Template: cloud-init/datasources Type: multiselect -Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Ec2, CloudStack, None -Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Ec2, CloudStack, None -Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, None: Failsafe datasource +Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Ec2, CloudStack, None +Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Ec2, CloudStack, None +Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, None: Failsafe datasource Description: Which data sources should be searched? Cloud-init supports searching different "Data Sources" for information that it uses to configure a cloud instance. diff -Nru cloud-init-0.7.9-47-gc81ea53/debian/patches/azure-use-walinux-agent.patch cloud-init-0.7.9-153-g16a7302f/debian/patches/azure-use-walinux-agent.patch --- cloud-init-0.7.9-47-gc81ea53/debian/patches/azure-use-walinux-agent.patch 2017-03-03 07:38:34.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/debian/patches/azure-use-walinux-agent.patch 2017-06-28 18:11:19.000000000 +0000 @@ -6,8 +6,8 @@ Author: Scott Moser --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py -@@ -46,7 +46,7 @@ BOUNCE_COMMAND = [ - RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' +@@ -177,7 +177,7 @@ if util.is_FreeBSD(): + LOG.debug("resource disk is None") BUILTIN_DS_CONFIG = { - 'agent_command': AGENT_START_BUILTIN, diff -Nru cloud-init-0.7.9-47-gc81ea53/debian/patches/cpick-003c6678-net-remove-systemd-link-file-writing-from-eni-renderer cloud-init-0.7.9-153-g16a7302f/debian/patches/cpick-003c6678-net-remove-systemd-link-file-writing-from-eni-renderer --- cloud-init-0.7.9-47-gc81ea53/debian/patches/cpick-003c6678-net-remove-systemd-link-file-writing-from-eni-renderer 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/debian/patches/cpick-003c6678-net-remove-systemd-link-file-writing-from-eni-renderer 2017-06-28 18:11:19.000000000 +0000 @@ -0,0 +1,95 @@ +From 003c6678e9c873b3b787a814016872b6592f5069 Mon Sep 17 00:00:00 2001 +From: Ryan Harper +Date: Thu, 25 May 2017 15:37:15 -0500 +Subject: [PATCH] net: remove systemd link file writing from eni renderer + +During the network v2 merge, we inadvertently re-enabled rendering systemd +.link files. This files are not required as cloud-init already has to do +interface renaming due to issues with udevd which may refuse to rename +certain interfaces (such as veth devices in a LXD container). As such, +removing the code altogether. +--- + cloudinit/net/eni.py | 25 ------------------------- + tests/unittests/test_net.py | 9 +++------ + 2 files changed, 3 insertions(+), 31 deletions(-) + +--- a/cloudinit/net/eni.py ++++ b/cloudinit/net/eni.py +@@ -304,8 +304,6 @@ class Renderer(renderer.Renderer): + config = {} + self.eni_path = config.get('eni_path', 'etc/network/interfaces') + self.eni_header = config.get('eni_header', None) +- self.links_path_prefix = config.get( +- 'links_path_prefix', 'etc/systemd/network/50-cloud-init-') + self.netrules_path = config.get( + 'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules') + +@@ -451,28 +449,6 @@ class Renderer(renderer.Renderer): + util.write_file(netrules, + self._render_persistent_net(network_state)) + +- if self.links_path_prefix: +- self._render_systemd_links(target, network_state, +- links_prefix=self.links_path_prefix) +- +- def _render_systemd_links(self, target, network_state, links_prefix): +- fp_prefix = util.target_path(target, links_prefix) +- for f in glob.glob(fp_prefix + "*"): +- os.unlink(f) +- for iface in network_state.iter_interfaces(): +- if (iface['type'] == 'physical' and 'name' in iface and +- iface.get('mac_address')): +- fname = fp_prefix + iface['name'] + ".link" +- content = "\n".join([ +- "[Match]", +- "MACAddress=" + iface['mac_address'], +- "", +- "[Link]", +- "Name=" + iface['name'], +- "" +- ]) +- util.write_file(fname, content) +- + + def network_state_to_eni(network_state, header=None, render_hwaddress=False): + # render the provided network state, return a string of equivalent eni +@@ -480,7 +456,6 @@ def network_state_to_eni(network_state, + renderer = Renderer(config={ + 'eni_path': eni_path, + 'eni_header': header, +- 'links_path_prefix': None, + 'netrules_path': None, + }) + if not header: +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -992,9 +992,7 @@ class TestEniNetRendering(CiTestCase): + os.makedirs(render_dir) + + renderer = eni.Renderer( +- {'links_path_prefix': None, +- 'eni_path': 'interfaces', 'netrules_path': None, +- }) ++ {'eni_path': 'interfaces', 'netrules_path': None}) + renderer.render_network_state(ns, render_dir) + + self.assertTrue(os.path.exists(os.path.join(render_dir, +@@ -1376,7 +1374,7 @@ class TestNetplanRoundTrip(CiTestCase): + + class TestEniRoundTrip(CiTestCase): + def _render_and_read(self, network_config=None, state=None, eni_path=None, +- links_prefix=None, netrules_path=None, dir=None): ++ netrules_path=None, dir=None): + if dir is None: + dir = self.tmp_dir() + +@@ -1391,8 +1389,7 @@ class TestEniRoundTrip(CiTestCase): + eni_path = 'etc/network/interfaces' + + renderer = eni.Renderer( +- config={'eni_path': eni_path, 'links_path_prefix': links_prefix, +- 'netrules_path': netrules_path}) ++ config={'eni_path': eni_path, 'netrules_path': netrules_path}) + + renderer.render_network_state(ns, dir) + return dir2dict(dir) diff -Nru cloud-init-0.7.9-47-gc81ea53/debian/patches/cpick-11121fe4-systemd-make-cloud-final.service-run-before-apt-daily cloud-init-0.7.9-153-g16a7302f/debian/patches/cpick-11121fe4-systemd-make-cloud-final.service-run-before-apt-daily --- cloud-init-0.7.9-47-gc81ea53/debian/patches/cpick-11121fe4-systemd-make-cloud-final.service-run-before-apt-daily 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/debian/patches/cpick-11121fe4-systemd-make-cloud-final.service-run-before-apt-daily 2017-06-28 18:11:19.000000000 +0000 @@ -0,0 +1,33 @@ +From 11121fe4d5af0554140d88685029fa248fa0c7c9 Mon Sep 17 00:00:00 2001 +From: Scott Moser +Date: Mon, 12 Jun 2017 14:10:58 -0400 +Subject: [PATCH] systemd: make cloud-final.service run before apt daily + services. + +This changes all cloud-init systemd units to run 'Before' the apt processes +that run daily and may cause a lock on the apt database. + +apt-daily-upgrade.service contains 'After=apt-daily.service'. +Thus following order is enforced, so we can just be 'Before' the first. + apt-daily.service + apt-daily-upgrade.service + +Note that this means only that apt-daily* will not run until +cloud-init has entirely finished. Any other processes running apt-get +operations are still affected by the global lock. + +LP: #1693361 +--- + systemd/cloud-final.service | 1 + + 1 file changed, 1 insertion(+) + +--- a/systemd/cloud-final.service ++++ b/systemd/cloud-final.service +@@ -2,6 +2,7 @@ + Description=Execute cloud user/final scripts + After=network-online.target cloud-config.service rc-local.service multi-user.target + Wants=network-online.target cloud-config.service ++Before=apt-daily.service + + [Service] + Type=oneshot diff -Nru cloud-init-0.7.9-47-gc81ea53/debian/patches/cpick-1cd4323b-azure-remove-accidental-duplicate-line-in-merge cloud-init-0.7.9-153-g16a7302f/debian/patches/cpick-1cd4323b-azure-remove-accidental-duplicate-line-in-merge --- cloud-init-0.7.9-47-gc81ea53/debian/patches/cpick-1cd4323b-azure-remove-accidental-duplicate-line-in-merge 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/debian/patches/cpick-1cd4323b-azure-remove-accidental-duplicate-line-in-merge 2017-06-28 18:11:19.000000000 +0000 @@ -0,0 +1,22 @@ +From 1cd4323b940408aa34dcaa01bd8a7ed43d9a966a Mon Sep 17 00:00:00 2001 +From: Scott Moser +Date: Thu, 1 Jun 2017 12:40:12 -0400 +Subject: [PATCH] azure: remove accidental duplicate line in merge. + +In previous commit I inadvertantly left two calls to + asset_tag = util.read_dmi_data('chassis-asset-tag') +The second did not do anything useful. Thus, remove it. +--- + cloudinit/sources/DataSourceAzure.py | 1 - + 1 file changed, 1 deletion(-) + +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -326,7 +326,6 @@ class DataSourceAzureNet(sources.DataSou + if asset_tag != AZURE_CHASSIS_ASSET_TAG: + LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) + return False +- asset_tag = util.read_dmi_data('chassis-asset-tag') + ddir = self.ds_cfg['data_dir'] + + candidates = [self.seed_dir] diff -Nru cloud-init-0.7.9-47-gc81ea53/debian/patches/cpick-5fb49bac-azure-identify-platform-by-well-known-value-in-chassis cloud-init-0.7.9-153-g16a7302f/debian/patches/cpick-5fb49bac-azure-identify-platform-by-well-known-value-in-chassis --- cloud-init-0.7.9-47-gc81ea53/debian/patches/cpick-5fb49bac-azure-identify-platform-by-well-known-value-in-chassis 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/debian/patches/cpick-5fb49bac-azure-identify-platform-by-well-known-value-in-chassis 2017-06-28 18:11:19.000000000 +0000 @@ -0,0 +1,338 @@ +From 5fb49bacf7441d8d20a7b4e0e7008ca586f5ebab Mon Sep 17 00:00:00 2001 +From: Chad Smith +Date: Tue, 30 May 2017 10:28:05 -0600 +Subject: [PATCH] azure: identify platform by well known value in chassis asset + tag. + +Azure sets a known chassis asset tag to 7783-7084-3265-9085-8269-3286-77. +We can inspect this in both ds-identify and DataSource.get_data to +determine whether we are on Azure. + +Added unit tests to cover these changes +and some minor tweaks to Exception error message content to give more +context on malformed or missing ovf-env.xml files. + +LP: #1693939 +--- + cloudinit/sources/DataSourceAzure.py | 9 +++- + tests/unittests/test_datasource/test_azure.py | 66 +++++++++++++++++++++++++-- + tests/unittests/test_ds_identify.py | 39 ++++++++++++++++ + tools/ds-identify | 35 +++++++++----- + 4 files changed, 134 insertions(+), 15 deletions(-) + +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -36,6 +36,8 @@ RESOURCE_DISK_PATH = '/dev/disk/cloud/az + DEFAULT_PRIMARY_NIC = 'eth0' + LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' + DEFAULT_FS = 'ext4' ++# DMI chassis-asset-tag is set static for all azure instances ++AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' + + + def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): +@@ -320,6 +322,11 @@ class DataSourceAzureNet(sources.DataSou + # azure removes/ejects the cdrom containing the ovf-env.xml + # file on reboot. So, in order to successfully reboot we + # need to look in the datadir and consider that valid ++ asset_tag = util.read_dmi_data('chassis-asset-tag') ++ if asset_tag != AZURE_CHASSIS_ASSET_TAG: ++ LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) ++ return False ++ asset_tag = util.read_dmi_data('chassis-asset-tag') + ddir = self.ds_cfg['data_dir'] + + candidates = [self.seed_dir] +@@ -694,7 +701,7 @@ def read_azure_ovf(contents): + try: + dom = minidom.parseString(contents) + except Exception as e: +- raise BrokenAzureDataSource("invalid xml: %s" % e) ++ raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e) + + results = find_child(dom.documentElement, + lambda n: n.localName == "ProvisioningSection") +--- a/tests/unittests/test_datasource/test_azure.py ++++ b/tests/unittests/test_datasource/test_azure.py +@@ -76,7 +76,9 @@ def construct_valid_ovf_env(data=None, p + return content + + +-class TestAzureDataSource(TestCase): ++class TestAzureDataSource(CiTestCase): ++ ++ with_logs = True + + def setUp(self): + super(TestAzureDataSource, self).setUp() +@@ -160,6 +162,12 @@ scbus-1 on xpt0 bus 0 + + self.instance_id = 'test-instance-id' + ++ def _dmi_mocks(key): ++ if key == 'system-uuid': ++ return self.instance_id ++ elif key == 'chassis-asset-tag': ++ return '7783-7084-3265-9085-8269-3286-77' ++ + self.apply_patches([ + (dsaz, 'list_possible_azure_ds_devs', dsdevs), + (dsaz, 'invoke_agent', _invoke_agent), +@@ -170,7 +178,7 @@ scbus-1 on xpt0 bus 0 + (dsaz, 'set_hostname', mock.MagicMock()), + (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric), + (dsaz.util, 'read_dmi_data', mock.MagicMock( +- return_value=self.instance_id)), ++ side_effect=_dmi_mocks)), + ]) + + dsrc = dsaz.DataSourceAzureNet( +@@ -241,6 +249,23 @@ fdescfs /dev/fd fdes + res = get_path_dev_freebsd('/etc', mnt_list) + self.assertIsNotNone(res) + ++ @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') ++ def test_non_azure_dmi_chassis_asset_tag(self, m_read_dmi_data): ++ """Report non-azure when DMI's chassis asset tag doesn't match. ++ ++ Return False when the asset tag doesn't match Azure's static ++ AZURE_CHASSIS_ASSET_TAG. ++ """ ++ # Return a non-matching asset tag value ++ nonazure_tag = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' ++ m_read_dmi_data.return_value = nonazure_tag ++ dsrc = dsaz.DataSourceAzureNet( ++ {}, distro=None, paths=self.paths) ++ self.assertFalse(dsrc.get_data()) ++ self.assertEqual( ++ "Non-Azure DMI asset tag '{0}' discovered.\n".format(nonazure_tag), ++ self.logs.getvalue()) ++ + def test_basic_seed_dir(self): + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), +@@ -531,9 +556,17 @@ class TestAzureBounce(TestCase): + self.patches.enter_context( + mock.patch.object(dsaz, 'get_metadata_from_fabric', + mock.MagicMock(return_value={}))) ++ ++ def _dmi_mocks(key): ++ if key == 'system-uuid': ++ return 'test-instance-id' ++ elif key == 'chassis-asset-tag': ++ return '7783-7084-3265-9085-8269-3286-77' ++ raise RuntimeError('should not get here') ++ + self.patches.enter_context( + mock.patch.object(dsaz.util, 'read_dmi_data', +- mock.MagicMock(return_value='test-instance-id'))) ++ mock.MagicMock(side_effect=_dmi_mocks))) + + def setUp(self): + super(TestAzureBounce, self).setUp() +@@ -696,6 +729,33 @@ class TestAzureBounce(TestCase): + self.assertEqual(0, self.set_hostname.call_count) + + ++class TestLoadAzureDsDir(CiTestCase): ++ """Tests for load_azure_ds_dir.""" ++ ++ def setUp(self): ++ self.source_dir = self.tmp_dir() ++ super(TestLoadAzureDsDir, self).setUp() ++ ++ def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self): ++ """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit.""" ++ with self.assertRaises(dsaz.NonAzureDataSource) as context_manager: ++ dsaz.load_azure_ds_dir(self.source_dir) ++ self.assertEqual( ++ 'No ovf-env file found', ++ str(context_manager.exception)) ++ ++ def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self): ++ """load_azure_ds_dir calls read_azure_ovf to parse the xml.""" ++ ovf_path = os.path.join(self.source_dir, 'ovf-env.xml') ++ with open(ovf_path, 'wb') as stream: ++ stream.write(b'invalid xml') ++ with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager: ++ dsaz.load_azure_ds_dir(self.source_dir) ++ self.assertEqual( ++ 'Invalid ovf-env.xml: syntax error: line 1, column 0', ++ str(context_manager.exception)) ++ ++ + class TestReadAzureOvf(TestCase): + def test_invalid_xml_raises_non_azure_ds(self): + invalid_xml = "" + construct_valid_ovf_env(data={}) +--- a/tests/unittests/test_ds_identify.py ++++ b/tests/unittests/test_ds_identify.py +@@ -39,9 +39,11 @@ RC_FOUND = 0 + RC_NOT_FOUND = 1 + DS_NONE = 'None' + ++P_CHASSIS_ASSET_TAG = "sys/class/dmi/id/chassis_asset_tag" + P_PRODUCT_NAME = "sys/class/dmi/id/product_name" + P_PRODUCT_SERIAL = "sys/class/dmi/id/product_serial" + P_PRODUCT_UUID = "sys/class/dmi/id/product_uuid" ++P_SEED_DIR = "var/lib/cloud/seed" + P_DSID_CFG = "etc/cloud/ds-identify.cfg" + + MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} +@@ -160,6 +162,30 @@ class TestDsIdentify(CiTestCase): + _print_run_output(rc, out, err, cfg, files) + return rc, out, err, cfg, files + ++ def test_wb_print_variables(self): ++ """_print_info reports an array of discovered variables to stderr.""" ++ data = VALID_CFG['Azure-dmi-detection'] ++ _, _, err, _, _ = self._call_via_dict(data) ++ expected_vars = [ ++ 'DMI_PRODUCT_NAME', 'DMI_SYS_VENDOR', 'DMI_PRODUCT_SERIAL', ++ 'DMI_PRODUCT_UUID', 'PID_1_PRODUCT_NAME', 'DMI_CHASSIS_ASSET_TAG', ++ 'FS_LABELS', 'KERNEL_CMDLINE', 'VIRT', 'UNAME_KERNEL_NAME', ++ 'UNAME_KERNEL_RELEASE', 'UNAME_KERNEL_VERSION', 'UNAME_MACHINE', ++ 'UNAME_NODENAME', 'UNAME_OPERATING_SYSTEM', 'DSNAME', 'DSLIST', ++ 'MODE', 'ON_FOUND', 'ON_MAYBE', 'ON_NOTFOUND'] ++ for var in expected_vars: ++ self.assertIn('{0}='.format(var), err) ++ ++ def test_azure_dmi_detection_from_chassis_asset_tag(self): ++ """Azure datasource is detected from DMI chassis-asset-tag""" ++ self._test_ds_found('Azure-dmi-detection') ++ ++ def test_azure_seed_file_detection(self): ++ """Azure datasource is detected due to presence of a seed file. ++ ++ The seed file tested is /var/lib/cloud/seed/azure/ovf-env.xml.""" ++ self._test_ds_found('Azure-seed-detection') ++ + def test_aws_ec2_hvm(self): + """EC2: hvm instances use dmi serial and uuid starting with 'ec2'.""" + self._test_ds_found('Ec2-hvm') +@@ -254,6 +280,19 @@ def _print_run_output(rc, out, err, cfg, + + + VALID_CFG = { ++ 'Azure-dmi-detection': { ++ 'ds': 'Azure', ++ 'files': { ++ P_CHASSIS_ASSET_TAG: '7783-7084-3265-9085-8269-3286-77\n', ++ } ++ }, ++ 'Azure-seed-detection': { ++ 'ds': 'Azure', ++ 'files': { ++ P_CHASSIS_ASSET_TAG: 'No-match\n', ++ os.path.join(P_SEED_DIR, 'azure', 'ovf-env.xml'): 'present\n', ++ } ++ }, + 'Ec2-hvm': { + 'ds': 'Ec2', + 'mocks': [{'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}], +--- a/tools/ds-identify ++++ b/tools/ds-identify +@@ -85,6 +85,7 @@ DI_MAIN=${DI_MAIN:-main} + + DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" + DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" ++DI_DMI_CHASSIS_ASSET_TAG="" + DI_DMI_PRODUCT_NAME="" + DI_DMI_SYS_VENDOR="" + DI_DMI_PRODUCT_SERIAL="" +@@ -258,6 +259,12 @@ read_kernel_cmdline() { + DI_KERNEL_CMDLINE="$cmdline" + } + ++read_dmi_chassis_asset_tag() { ++ cached "${DI_DMI_CHASSIS_ASSET_TAG}" && return ++ get_dmi_field chassis_asset_tag ++ DI_DMI_CHASSIS_ASSET_TAG="$_RET" ++} ++ + read_dmi_sys_vendor() { + cached "${DI_DMI_SYS_VENDOR}" && return + get_dmi_field sys_vendor +@@ -385,6 +392,14 @@ read_pid1_product_name() { + DI_PID_1_PRODUCT_NAME="$product_name" + } + ++dmi_chassis_asset_tag_matches() { ++ is_container && return 1 ++ case "${DI_DMI_CHASSIS_ASSET_TAG}" in ++ $1) return 0;; ++ esac ++ return 1 ++} ++ + dmi_product_name_matches() { + is_container && return 1 + case "${DI_DMI_PRODUCT_NAME}" in +@@ -401,11 +416,6 @@ dmi_product_serial_matches() { + return 1 + } + +-dmi_product_name_is() { +- is_container && return 1 +- [ "${DI_DMI_PRODUCT_NAME}" = "$1" ] +-} +- + dmi_sys_vendor_is() { + is_container && return 1 + [ "${DI_DMI_SYS_VENDOR}" = "$1" ] +@@ -477,7 +487,7 @@ dscheck_CloudStack() { + + dscheck_CloudSigma() { + # http://paste.ubuntu.com/23624795/ +- dmi_product_name_is "CloudSigma" && return $DS_FOUND ++ dmi_product_name_matches "CloudSigma" && return $DS_FOUND + return $DS_NOT_FOUND + } + +@@ -653,6 +663,8 @@ dscheck_Azure() { + # UUID="112D211272645f72" LABEL="rd_rdfe_stable.161212-1209" + # TYPE="udf">/dev/sr0 + # ++ local azure_chassis="7783-7084-3265-9085-8269-3286-77" ++ dmi_chassis_asset_tag_matches "${azure_chassis}" && return $DS_FOUND + check_seed_dir azure ovf-env.xml && return ${DS_FOUND} + + [ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND} +@@ -785,7 +797,7 @@ dscheck_Ec2() { + } + + dscheck_GCE() { +- if dmi_product_name_is "Google Compute Engine"; then ++ if dmi_product_name_matches "Google Compute Engine"; then + return ${DS_FOUND} + fi + # product name is not guaranteed (LP: #1674861) +@@ -806,10 +818,10 @@ dscheck_OpenStack() { + return ${DS_NOT_FOUND} + fi + local nova="OpenStack Nova" compute="OpenStack Compute" +- if dmi_product_name_is "$nova"; then ++ if dmi_product_name_matches "$nova"; then + return ${DS_FOUND} + fi +- if dmi_product_name_is "$compute"; then ++ if dmi_product_name_matches "$compute"; then + # RDO installed nova (LP: #1675349). + return ${DS_FOUND} + fi +@@ -887,6 +899,7 @@ collect_info() { + read_config + read_datasource_list + read_dmi_sys_vendor ++ read_dmi_chassis_asset_tag + read_dmi_product_name + read_dmi_product_serial + read_dmi_product_uuid +@@ -901,7 +914,7 @@ print_info() { + _print_info() { + local n="" v="" vars="" + vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" +- vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME" ++ vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME DMI_CHASSIS_ASSET_TAG" + vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" + vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" + vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" diff -Nru cloud-init-0.7.9-47-gc81ea53/debian/patches/cpick-ebc9ecbc-Azure-Add-network-config-Refactor-net-layer-to-handle cloud-init-0.7.9-153-g16a7302f/debian/patches/cpick-ebc9ecbc-Azure-Add-network-config-Refactor-net-layer-to-handle --- cloud-init-0.7.9-47-gc81ea53/debian/patches/cpick-ebc9ecbc-Azure-Add-network-config-Refactor-net-layer-to-handle 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/debian/patches/cpick-ebc9ecbc-Azure-Add-network-config-Refactor-net-layer-to-handle 2017-06-28 18:11:19.000000000 +0000 @@ -0,0 +1,1474 @@ +From ebc9ecbc8a76bdf511a456fb72339a7eb4c20568 Mon Sep 17 00:00:00 2001 +From: Ryan Harper +Date: Tue, 20 Jun 2017 17:06:43 -0500 +Subject: [PATCH] Azure: Add network-config, Refactor net layer to handle + duplicate macs. + +On systems with network devices with duplicate mac addresses, cloud-init +will fail to rename the devices according to the specified network +configuration. Refactor net layer to search by device driver and device +id if available. Azure systems may have duplicate mac addresses by +design. + +Update Azure datasource to run at init-local time and let Azure datasource +generate a fallback networking config to handle advanced networking +configurations. + +Lastly, add a 'setup' method to the datasources that is called before +userdata/vendordata is processed but after networking is up. That is +used here on Azure to interact with the 'fabric'. +--- + cloudinit/cmd/main.py | 3 + + cloudinit/net/__init__.py | 181 ++++++++-- + cloudinit/net/eni.py | 2 + + cloudinit/net/renderer.py | 4 +- + cloudinit/net/udev.py | 7 +- + cloudinit/sources/DataSourceAzure.py | 114 +++++- + cloudinit/sources/__init__.py | 15 +- + cloudinit/stages.py | 5 + + tests/unittests/test_datasource/test_azure.py | 174 +++++++-- + tests/unittests/test_datasource/test_common.py | 2 +- + tests/unittests/test_net.py | 478 ++++++++++++++++++++++++- + 11 files changed, 887 insertions(+), 98 deletions(-) + +--- a/cloudinit/cmd/main.py ++++ b/cloudinit/cmd/main.py +@@ -373,6 +373,9 @@ def main_init(name, args): + LOG.debug("[%s] %s is in local mode, will apply init modules now.", + mode, init.datasource) + ++ # Give the datasource a chance to use network resources. ++ # This is used on Azure to communicate with the fabric over network. ++ init.setup_datasource() + # update fully realizes user-data (pulling in #include if necessary) + init.update() + # Stage 7 +--- a/cloudinit/net/__init__.py ++++ b/cloudinit/net/__init__.py +@@ -86,6 +86,10 @@ def is_bridge(devname): + return os.path.exists(sys_dev_path(devname, "bridge")) + + ++def is_bond(devname): ++ return os.path.exists(sys_dev_path(devname, "bonding")) ++ ++ + def is_vlan(devname): + uevent = str(read_sys_net_safe(devname, "uevent")) + return 'DEVTYPE=vlan' in uevent.splitlines() +@@ -113,6 +117,26 @@ def is_present(devname): + return os.path.exists(sys_dev_path(devname)) + + ++def device_driver(devname): ++ """Return the device driver for net device named 'devname'.""" ++ driver = None ++ driver_path = sys_dev_path(devname, "device/driver") ++ # driver is a symlink to the driver *dir* ++ if os.path.islink(driver_path): ++ driver = os.path.basename(os.readlink(driver_path)) ++ ++ return driver ++ ++ ++def device_devid(devname): ++ """Return the device id string for net device named 'devname'.""" ++ dev_id = read_sys_net_safe(devname, "device/device") ++ if dev_id is False: ++ return None ++ ++ return dev_id ++ ++ + def get_devicelist(): + return os.listdir(SYS_CLASS_NET) + +@@ -127,12 +151,21 @@ def is_disabled_cfg(cfg): + return cfg.get('config') == "disabled" + + +-def generate_fallback_config(): ++def generate_fallback_config(blacklist_drivers=None, config_driver=None): + """Determine which attached net dev is most likely to have a connection and + generate network state to run dhcp on that interface""" ++ ++ if not config_driver: ++ config_driver = False ++ ++ if not blacklist_drivers: ++ blacklist_drivers = [] ++ + # get list of interfaces that could have connections + invalid_interfaces = set(['lo']) +- potential_interfaces = set(get_devicelist()) ++ potential_interfaces = set([device for device in get_devicelist() ++ if device_driver(device) not in ++ blacklist_drivers]) + potential_interfaces = potential_interfaces.difference(invalid_interfaces) + # sort into interfaces with carrier, interfaces which could have carrier, + # and ignore interfaces that are definitely disconnected +@@ -144,6 +177,9 @@ def generate_fallback_config(): + if is_bridge(interface): + # skip any bridges + continue ++ if is_bond(interface): ++ # skip any bonds ++ continue + carrier = read_sys_net_int(interface, 'carrier') + if carrier: + connected.append(interface) +@@ -183,9 +219,18 @@ def generate_fallback_config(): + break + if target_mac and target_name: + nconf = {'config': [], 'version': 1} +- nconf['config'].append( +- {'type': 'physical', 'name': target_name, +- 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}) ++ cfg = {'type': 'physical', 'name': target_name, ++ 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]} ++ # inject the device driver name, dev_id into config if enabled and ++ # device has a valid device driver value ++ if config_driver: ++ driver = device_driver(target_name) ++ if driver: ++ cfg['params'] = { ++ 'driver': driver, ++ 'device_id': device_devid(target_name), ++ } ++ nconf['config'].append(cfg) + return nconf + else: + # can't read any interfaces addresses (or there are none); give up +@@ -206,10 +251,16 @@ def apply_network_config_names(netcfg, s + if ent.get('type') != 'physical': + continue + mac = ent.get('mac_address') +- name = ent.get('name') + if not mac: + continue +- renames.append([mac, name]) ++ name = ent.get('name') ++ driver = ent.get('params', {}).get('driver') ++ device_id = ent.get('params', {}).get('device_id') ++ if not driver: ++ driver = device_driver(name) ++ if not device_id: ++ device_id = device_devid(name) ++ renames.append([mac, name, driver, device_id]) + + return _rename_interfaces(renames) + +@@ -234,15 +285,27 @@ def _get_current_rename_info(check_downa + """Collect information necessary for rename_interfaces. + + returns a dictionary by mac address like: +- {mac: +- {'name': name +- 'up': boolean: is_up(name), ++ {name: ++ { + 'downable': None or boolean indicating that the +- device has only automatically assigned ip addrs.}} ++ device has only automatically assigned ip addrs. ++ 'device_id': Device id value (if it has one) ++ 'driver': Device driver (if it has one) ++ 'mac': mac address ++ 'name': name ++ 'up': boolean: is_up(name) ++ }} + """ +- bymac = {} +- for mac, name in get_interfaces_by_mac().items(): +- bymac[mac] = {'name': name, 'up': is_up(name), 'downable': None} ++ cur_info = {} ++ for (name, mac, driver, device_id) in get_interfaces(): ++ cur_info[name] = { ++ 'downable': None, ++ 'device_id': device_id, ++ 'driver': driver, ++ 'mac': mac, ++ 'name': name, ++ 'up': is_up(name), ++ } + + if check_downable: + nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]") +@@ -254,11 +317,11 @@ def _get_current_rename_info(check_downa + for bytes_out in (ipv6, ipv4): + nics_with_addresses.update(nmatch.findall(bytes_out)) + +- for d in bymac.values(): ++ for d in cur_info.values(): + d['downable'] = (d['up'] is False or + d['name'] not in nics_with_addresses) + +- return bymac ++ return cur_info + + + def _rename_interfaces(renames, strict_present=True, strict_busy=True, +@@ -271,15 +334,15 @@ def _rename_interfaces(renames, strict_p + if current_info is None: + current_info = _get_current_rename_info() + +- cur_bymac = {} +- for mac, data in current_info.items(): ++ cur_info = {} ++ for name, data in current_info.items(): + cur = data.copy() +- cur['mac'] = mac +- cur_bymac[mac] = cur ++ cur['name'] = name ++ cur_info[name] = cur + + def update_byname(bymac): + return dict((data['name'], data) +- for data in bymac.values()) ++ for data in cur_info.values()) + + def rename(cur, new): + util.subp(["ip", "link", "set", cur, "name", new], capture=True) +@@ -293,14 +356,48 @@ def _rename_interfaces(renames, strict_p + ops = [] + errors = [] + ups = [] +- cur_byname = update_byname(cur_bymac) ++ cur_byname = update_byname(cur_info) + tmpname_fmt = "cirename%d" + tmpi = -1 + +- for mac, new_name in renames: +- cur = cur_bymac.get(mac, {}) +- cur_name = cur.get('name') ++ def entry_match(data, mac, driver, device_id): ++ """match if set and in data""" ++ if mac and driver and device_id: ++ return (data['mac'] == mac and ++ data['driver'] == driver and ++ data['device_id'] == device_id) ++ elif mac and driver: ++ return (data['mac'] == mac and ++ data['driver'] == driver) ++ elif mac: ++ return (data['mac'] == mac) ++ ++ return False ++ ++ def find_entry(mac, driver, device_id): ++ match = [data for data in cur_info.values() ++ if entry_match(data, mac, driver, device_id)] ++ if len(match): ++ if len(match) > 1: ++ msg = ('Failed to match a single device. Matched devices "%s"' ++ ' with search values "(mac:%s driver:%s device_id:%s)"' ++ % (match, mac, driver, device_id)) ++ raise ValueError(msg) ++ return match[0] ++ ++ return None ++ ++ for mac, new_name, driver, device_id in renames: + cur_ops = [] ++ cur = find_entry(mac, driver, device_id) ++ if not cur: ++ if strict_present: ++ errors.append( ++ "[nic not present] Cannot rename mac=%s to %s" ++ ", not available." % (mac, new_name)) ++ continue ++ ++ cur_name = cur.get('name') + if cur_name == new_name: + # nothing to do + continue +@@ -340,13 +437,13 @@ def _rename_interfaces(renames, strict_p + + cur_ops.append(("rename", mac, new_name, (new_name, tmp_name))) + target['name'] = tmp_name +- cur_byname = update_byname(cur_bymac) ++ cur_byname = update_byname(cur_info) + if target['up']: + ups.append(("up", mac, new_name, (tmp_name,))) + + cur_ops.append(("rename", mac, new_name, (cur['name'], new_name))) + cur['name'] = new_name +- cur_byname = update_byname(cur_bymac) ++ cur_byname = update_byname(cur_info) + ops += cur_ops + + opmap = {'rename': rename, 'down': down, 'up': up} +@@ -415,6 +512,36 @@ def get_interfaces_by_mac(): + return ret + + ++def get_interfaces(): ++ """Return list of interface tuples (name, mac, driver, device_id) ++ ++ Bridges and any devices that have a 'stolen' mac are excluded.""" ++ try: ++ devs = get_devicelist() ++ except OSError as e: ++ if e.errno == errno.ENOENT: ++ devs = [] ++ else: ++ raise ++ ret = [] ++ empty_mac = '00:00:00:00:00:00' ++ for name in devs: ++ if not interface_has_own_mac(name): ++ continue ++ if is_bridge(name): ++ continue ++ if is_vlan(name): ++ continue ++ mac = get_interface_mac(name) ++ # some devices may not have a mac (tun0) ++ if not mac: ++ continue ++ if mac == empty_mac and name != 'lo': ++ continue ++ ret.append((name, mac, device_driver(name), device_devid(name))) ++ return ret ++ ++ + class RendererNotFoundError(RuntimeError): + pass + +--- a/cloudinit/net/eni.py ++++ b/cloudinit/net/eni.py +@@ -68,6 +68,8 @@ def _iface_add_attrs(iface, index): + content = [] + ignore_map = [ + 'control', ++ 'device_id', ++ 'driver', + 'index', + 'inet', + 'mode', +--- a/cloudinit/net/renderer.py ++++ b/cloudinit/net/renderer.py +@@ -34,8 +34,10 @@ class Renderer(object): + for iface in network_state.iter_interfaces(filter_by_physical): + # for physical interfaces write out a persist net udev rule + if 'name' in iface and iface.get('mac_address'): ++ driver = iface.get('driver', None) + content.write(generate_udev_rule(iface['name'], +- iface['mac_address'])) ++ iface['mac_address'], ++ driver=driver)) + return content.getvalue() + + @abc.abstractmethod +--- a/cloudinit/net/udev.py ++++ b/cloudinit/net/udev.py +@@ -23,7 +23,7 @@ def compose_udev_setting(key, value): + return '%s="%s"' % (key, value) + + +-def generate_udev_rule(interface, mac): ++def generate_udev_rule(interface, mac, driver=None): + """Return a udev rule to set the name of network interface with `mac`. + + The rule ends up as a single line looking something like: +@@ -31,10 +31,13 @@ def generate_udev_rule(interface, mac): + SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", + ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0" + """ ++ if not driver: ++ driver = '?*' ++ + rule = ', '.join([ + compose_udev_equality('SUBSYSTEM', 'net'), + compose_udev_equality('ACTION', 'add'), +- compose_udev_equality('DRIVERS', '?*'), ++ compose_udev_equality('DRIVERS', driver), + compose_udev_attr_equality('address', mac), + compose_udev_setting('NAME', interface), + ]) +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -16,6 +16,7 @@ from xml.dom import minidom + import xml.etree.ElementTree as ET + + from cloudinit import log as logging ++from cloudinit import net + from cloudinit import sources + from cloudinit.sources.helpers.azure import get_metadata_from_fabric + from cloudinit import util +@@ -240,7 +241,9 @@ def temporary_hostname(temp_hostname, cf + set_hostname(previous_hostname, hostname_command) + + +-class DataSourceAzureNet(sources.DataSource): ++class DataSourceAzure(sources.DataSource): ++ _negotiated = False ++ + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.seed_dir = os.path.join(paths.seed_dir, 'azure') +@@ -250,6 +253,7 @@ class DataSourceAzureNet(sources.DataSou + util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), + BUILTIN_DS_CONFIG]) + self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file') ++ self._network_config = None + + def __str__(self): + root = sources.DataSource.__str__(self) +@@ -326,6 +330,7 @@ class DataSourceAzureNet(sources.DataSou + if asset_tag != AZURE_CHASSIS_ASSET_TAG: + LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) + return False ++ + ddir = self.ds_cfg['data_dir'] + + candidates = [self.seed_dir] +@@ -370,13 +375,14 @@ class DataSourceAzureNet(sources.DataSou + LOG.debug("using files cached in %s", ddir) + + # azure / hyper-v provides random data here ++ # TODO. find the seed on FreeBSD platform ++ # now update ds_cfg to reflect contents pass in config + if not util.is_FreeBSD(): + seed = util.load_file("/sys/firmware/acpi/tables/OEM0", + quiet=True, decode=False) + if seed: + self.metadata['random_seed'] = seed +- # TODO. find the seed on FreeBSD platform +- # now update ds_cfg to reflect contents pass in config ++ + user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) + self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) + +@@ -384,6 +390,40 @@ class DataSourceAzureNet(sources.DataSou + # the directory to be protected. + write_files(ddir, files, dirmode=0o700) + ++ self.metadata['instance-id'] = util.read_dmi_data('system-uuid') ++ ++ return True ++ ++ def device_name_to_device(self, name): ++ return self.ds_cfg['disk_aliases'].get(name) ++ ++ def get_config_obj(self): ++ return self.cfg ++ ++ def check_instance_id(self, sys_cfg): ++ # quickly (local check only) if self.instance_id is still valid ++ return sources.instance_id_matches_system_uuid(self.get_instance_id()) ++ ++ def setup(self, is_new_instance): ++ if self._negotiated is False: ++ LOG.debug("negotiating for %s (new_instance=%s)", ++ self.get_instance_id(), is_new_instance) ++ fabric_data = self._negotiate() ++ LOG.debug("negotiating returned %s", fabric_data) ++ if fabric_data: ++ self.metadata.update(fabric_data) ++ self._negotiated = True ++ else: ++ LOG.debug("negotiating already done for %s", ++ self.get_instance_id()) ++ ++ def _negotiate(self): ++ """Negotiate with fabric and return data from it. ++ ++ On success, returns a dictionary including 'public_keys'. ++ On failure, returns False. ++ """ ++ + if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: + self.bounce_network_with_azure_hostname() + +@@ -393,31 +433,64 @@ class DataSourceAzureNet(sources.DataSou + else: + metadata_func = self.get_metadata_from_agent + ++ LOG.debug("negotiating with fabric via agent command %s", ++ self.ds_cfg['agent_command']) + try: + fabric_data = metadata_func() + except Exception as exc: +- LOG.info("Error communicating with Azure fabric; assume we aren't" +- " on Azure.", exc_info=True) ++ LOG.warning( ++ "Error communicating with Azure fabric; You may experience." ++ "connectivity issues.", exc_info=True) + return False +- self.metadata['instance-id'] = util.read_dmi_data('system-uuid') +- self.metadata.update(fabric_data) +- +- return True +- +- def device_name_to_device(self, name): +- return self.ds_cfg['disk_aliases'].get(name) + +- def get_config_obj(self): +- return self.cfg +- +- def check_instance_id(self, sys_cfg): +- # quickly (local check only) if self.instance_id is still valid +- return sources.instance_id_matches_system_uuid(self.get_instance_id()) ++ return fabric_data + + def activate(self, cfg, is_new_instance): + address_ephemeral_resize(is_new_instance=is_new_instance) + return + ++ @property ++ def network_config(self): ++ """Generate a network config like net.generate_fallback_network() with ++ the following execptions. ++ ++ 1. Probe the drivers of the net-devices present and inject them in ++ the network configuration under params: driver: value ++ 2. If the driver value is 'mlx4_core', the control mode should be ++ set to manual. The device will be later used to build a bond, ++ for now we want to ensure the device gets named but does not ++ break any network configuration ++ """ ++ blacklist = ['mlx4_core'] ++ if not self._network_config: ++ LOG.debug('Azure: generating fallback configuration') ++ # generate a network config, blacklist picking any mlx4_core devs ++ netconfig = net.generate_fallback_config( ++ blacklist_drivers=blacklist, config_driver=True) ++ ++ # if we have any blacklisted devices, update the network_config to ++ # include the device, mac, and driver values, but with no ip ++ # config; this ensures udev rules are generated but won't affect ++ # ip configuration ++ bl_found = 0 ++ for bl_dev in [dev for dev in net.get_devicelist() ++ if net.device_driver(dev) in blacklist]: ++ bl_found += 1 ++ cfg = { ++ 'type': 'physical', ++ 'name': 'vf%d' % bl_found, ++ 'mac_address': net.get_interface_mac(bl_dev), ++ 'params': { ++ 'driver': net.device_driver(bl_dev), ++ 'device_id': net.device_devid(bl_dev), ++ }, ++ } ++ netconfig['config'].append(cfg) ++ ++ self._network_config = netconfig ++ ++ return self._network_config ++ + + def _partitions_on_device(devpath, maxnum=16): + # return a list of tuples (ptnum, path) for each part on devpath +@@ -840,9 +913,12 @@ class NonAzureDataSource(Exception): + pass + + ++# Legacy: Must be present in case we load an old pkl object ++DataSourceAzureNet = DataSourceAzure ++ + # Used to match classes to dependencies + datasources = [ +- (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ++ (DataSourceAzure, (sources.DEP_FILESYSTEM, )), + ] + + +--- a/cloudinit/sources/__init__.py ++++ b/cloudinit/sources/__init__.py +@@ -251,10 +251,23 @@ class DataSource(object): + def first_instance_boot(self): + return + ++ def setup(self, is_new_instance): ++ """setup(is_new_instance) ++ ++ This is called before user-data and vendor-data have been processed. ++ ++ Unless the datasource has set mode to 'local', then networking ++ per 'fallback' or per 'network_config' will have been written and ++ brought up the OS at this point. ++ """ ++ return ++ + def activate(self, cfg, is_new_instance): + """activate(cfg, is_new_instance) + +- This is called before the init_modules will be called. ++ This is called before the init_modules will be called but after ++ the user-data and vendor-data have been fully processed. ++ + The cfg is fully up to date config, it contains a merged view of + system config, datasource config, user config, vendor config. + It should be used rather than the sys_cfg passed to __init__. +--- a/cloudinit/stages.py ++++ b/cloudinit/stages.py +@@ -362,6 +362,11 @@ class Init(object): + self._store_userdata() + self._store_vendordata() + ++ def setup_datasource(self): ++ if self.datasource is None: ++ raise RuntimeError("Datasource is None, cannot setup.") ++ self.datasource.setup(is_new_instance=self.is_new_instance()) ++ + def activate_datasource(self): + if self.datasource is None: + raise RuntimeError("Datasource is None, cannot activate.") +--- a/tests/unittests/test_datasource/test_azure.py ++++ b/tests/unittests/test_datasource/test_azure.py +@@ -181,13 +181,19 @@ scbus-1 on xpt0 bus 0 + side_effect=_dmi_mocks)), + ]) + +- dsrc = dsaz.DataSourceAzureNet( ++ dsrc = dsaz.DataSourceAzure( + data.get('sys_cfg', {}), distro=None, paths=self.paths) + if agent_command is not None: + dsrc.ds_cfg['agent_command'] = agent_command + + return dsrc + ++ def _get_and_setup(self, dsrc): ++ ret = dsrc.get_data() ++ if ret: ++ dsrc.setup(True) ++ return ret ++ + def xml_equals(self, oxml, nxml): + """Compare two sets of XML to make sure they are equal""" + +@@ -259,7 +265,7 @@ fdescfs /dev/fd fdes + # Return a non-matching asset tag value + nonazure_tag = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' + m_read_dmi_data.return_value = nonazure_tag +- dsrc = dsaz.DataSourceAzureNet( ++ dsrc = dsaz.DataSourceAzure( + {}, distro=None, paths=self.paths) + self.assertFalse(dsrc.get_data()) + self.assertEqual( +@@ -298,7 +304,7 @@ fdescfs /dev/fd fdes + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) +- ret = dsrc.get_data() ++ ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(data['agent_invoked'], cfg['agent_command']) + +@@ -311,7 +317,7 @@ fdescfs /dev/fd fdes + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) +- ret = dsrc.get_data() ++ ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(data['agent_invoked'], cfg['agent_command']) + +@@ -321,7 +327,7 @@ fdescfs /dev/fd fdes + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data) +- ret = dsrc.get_data() ++ ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(data['agent_invoked'], '_COMMAND') + +@@ -393,7 +399,7 @@ fdescfs /dev/fd fdes + pubkeys=pubkeys)} + + dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) +- ret = dsrc.get_data() ++ ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + for mypk in mypklist: + self.assertIn(mypk, dsrc.cfg['_pubkeys']) +@@ -408,7 +414,7 @@ fdescfs /dev/fd fdes + pubkeys=pubkeys)} + + dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) +- ret = dsrc.get_data() ++ ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + + for mypk in mypklist: +@@ -424,7 +430,7 @@ fdescfs /dev/fd fdes + pubkeys=pubkeys)} + + dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) +- ret = dsrc.get_data() ++ ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + + for mypk in mypklist: +@@ -518,18 +524,20 @@ fdescfs /dev/fd fdes + dsrc.get_data() + + def test_exception_fetching_fabric_data_doesnt_propagate(self): +- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) +- ds.ds_cfg['agent_command'] = '__builtin__' ++ """Errors communicating with fabric should warn, but return True.""" ++ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) ++ dsrc.ds_cfg['agent_command'] = '__builtin__' + self.get_metadata_from_fabric.side_effect = Exception +- self.assertFalse(ds.get_data()) ++ ret = self._get_and_setup(dsrc) ++ self.assertTrue(ret) + + def test_fabric_data_included_in_metadata(self): +- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) +- ds.ds_cfg['agent_command'] = '__builtin__' ++ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) ++ dsrc.ds_cfg['agent_command'] = '__builtin__' + self.get_metadata_from_fabric.return_value = {'test': 'value'} +- ret = ds.get_data() ++ ret = self._get_and_setup(dsrc) + self.assertTrue(ret) +- self.assertEqual('value', ds.metadata['test']) ++ self.assertEqual('value', dsrc.metadata['test']) + + def test_instance_id_from_dmidecode_used(self): + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) +@@ -542,6 +550,84 @@ fdescfs /dev/fd fdes + ds.get_data() + self.assertEqual(self.instance_id, ds.metadata['instance-id']) + ++ @mock.patch('cloudinit.net.get_interface_mac') ++ @mock.patch('cloudinit.net.get_devicelist') ++ @mock.patch('cloudinit.net.device_driver') ++ @mock.patch('cloudinit.net.generate_fallback_config') ++ def test_network_config(self, mock_fallback, mock_dd, ++ mock_devlist, mock_get_mac): ++ odata = {'HostName': "myhost", 'UserName': "myuser"} ++ data = {'ovfcontent': construct_valid_ovf_env(data=odata), ++ 'sys_cfg': {}} ++ ++ fallback_config = { ++ 'version': 1, ++ 'config': [{ ++ 'type': 'physical', 'name': 'eth0', ++ 'mac_address': '00:11:22:33:44:55', ++ 'params': {'driver': 'hv_netsvc'}, ++ 'subnets': [{'type': 'dhcp'}], ++ }] ++ } ++ mock_fallback.return_value = fallback_config ++ ++ mock_devlist.return_value = ['eth0'] ++ mock_dd.return_value = ['hv_netsvc'] ++ mock_get_mac.return_value = '00:11:22:33:44:55' ++ ++ dsrc = self._get_ds(data) ++ ret = dsrc.get_data() ++ self.assertTrue(ret) ++ ++ netconfig = dsrc.network_config ++ self.assertEqual(netconfig, fallback_config) ++ mock_fallback.assert_called_with(blacklist_drivers=['mlx4_core'], ++ config_driver=True) ++ ++ @mock.patch('cloudinit.net.get_interface_mac') ++ @mock.patch('cloudinit.net.get_devicelist') ++ @mock.patch('cloudinit.net.device_driver') ++ @mock.patch('cloudinit.net.generate_fallback_config') ++ def test_network_config_blacklist(self, mock_fallback, mock_dd, ++ mock_devlist, mock_get_mac): ++ odata = {'HostName': "myhost", 'UserName': "myuser"} ++ data = {'ovfcontent': construct_valid_ovf_env(data=odata), ++ 'sys_cfg': {}} ++ ++ fallback_config = { ++ 'version': 1, ++ 'config': [{ ++ 'type': 'physical', 'name': 'eth0', ++ 'mac_address': '00:11:22:33:44:55', ++ 'params': {'driver': 'hv_netsvc'}, ++ 'subnets': [{'type': 'dhcp'}], ++ }] ++ } ++ blacklist_config = { ++ 'type': 'physical', ++ 'name': 'eth1', ++ 'mac_address': '00:11:22:33:44:55', ++ 'params': {'driver': 'mlx4_core'} ++ } ++ mock_fallback.return_value = fallback_config ++ ++ mock_devlist.return_value = ['eth0', 'eth1'] ++ mock_dd.side_effect = [ ++ 'hv_netsvc', # list composition, skipped ++ 'mlx4_core', # list composition, match ++ 'mlx4_core', # config get driver name ++ ] ++ mock_get_mac.return_value = '00:11:22:33:44:55' ++ ++ dsrc = self._get_ds(data) ++ ret = dsrc.get_data() ++ self.assertTrue(ret) ++ ++ netconfig = dsrc.network_config ++ expected_config = fallback_config ++ expected_config['config'].append(blacklist_config) ++ self.assertEqual(netconfig, expected_config) ++ + + class TestAzureBounce(TestCase): + +@@ -591,12 +677,18 @@ class TestAzureBounce(TestCase): + if ovfcontent is not None: + populate_dir(os.path.join(self.paths.seed_dir, "azure"), + {'ovf-env.xml': ovfcontent}) +- dsrc = dsaz.DataSourceAzureNet( ++ dsrc = dsaz.DataSourceAzure( + {}, distro=None, paths=self.paths) + if agent_command is not None: + dsrc.ds_cfg['agent_command'] = agent_command + return dsrc + ++ def _get_and_setup(self, dsrc): ++ ret = dsrc.get_data() ++ if ret: ++ dsrc.setup(True) ++ return ret ++ + def get_ovf_env_with_dscfg(self, hostname, cfg): + odata = { + 'HostName': hostname, +@@ -640,17 +732,20 @@ class TestAzureBounce(TestCase): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'force'}} +- self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), +- agent_command=['not', '__builtin__']).get_data() ++ dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), ++ agent_command=['not', '__builtin__']) ++ ret = self._get_and_setup(dsrc) ++ self.assertTrue(ret) + self.assertEqual(1, perform_hostname_bounce.call_count) + + def test_different_hostnames_sets_hostname(self): + expected_hostname = 'azure-expected-host-name' + self.get_hostname.return_value = 'default-host-name' +- self._get_ds( ++ dsrc = self._get_ds( + self.get_ovf_env_with_dscfg(expected_hostname, {}), +- agent_command=['not', '__builtin__'], +- ).get_data() ++ agent_command=['not', '__builtin__']) ++ ret = self._get_and_setup(dsrc) ++ self.assertTrue(ret) + self.assertEqual(expected_hostname, + self.set_hostname.call_args_list[0][0][0]) + +@@ -659,19 +754,21 @@ class TestAzureBounce(TestCase): + self, perform_hostname_bounce): + expected_hostname = 'azure-expected-host-name' + self.get_hostname.return_value = 'default-host-name' +- self._get_ds( ++ dsrc = self._get_ds( + self.get_ovf_env_with_dscfg(expected_hostname, {}), +- agent_command=['not', '__builtin__'], +- ).get_data() ++ agent_command=['not', '__builtin__']) ++ ret = self._get_and_setup(dsrc) ++ self.assertTrue(ret) + self.assertEqual(1, perform_hostname_bounce.call_count) + + def test_different_hostnames_sets_hostname_back(self): + initial_host_name = 'default-host-name' + self.get_hostname.return_value = initial_host_name +- self._get_ds( ++ dsrc = self._get_ds( + self.get_ovf_env_with_dscfg('some-host-name', {}), +- agent_command=['not', '__builtin__'], +- ).get_data() ++ agent_command=['not', '__builtin__']) ++ ret = self._get_and_setup(dsrc) ++ self.assertTrue(ret) + self.assertEqual(initial_host_name, + self.set_hostname.call_args_list[-1][0][0]) + +@@ -681,10 +778,11 @@ class TestAzureBounce(TestCase): + perform_hostname_bounce.side_effect = Exception + initial_host_name = 'default-host-name' + self.get_hostname.return_value = initial_host_name +- self._get_ds( ++ dsrc = self._get_ds( + self.get_ovf_env_with_dscfg('some-host-name', {}), +- agent_command=['not', '__builtin__'], +- ).get_data() ++ agent_command=['not', '__builtin__']) ++ ret = self._get_and_setup(dsrc) ++ self.assertTrue(ret) + self.assertEqual(initial_host_name, + self.set_hostname.call_args_list[-1][0][0]) + +@@ -695,7 +793,9 @@ class TestAzureBounce(TestCase): + self.get_hostname.return_value = old_hostname + cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} + data = self.get_ovf_env_with_dscfg(hostname, cfg) +- self._get_ds(data, agent_command=['not', '__builtin__']).get_data() ++ dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) ++ ret = self._get_and_setup(dsrc) ++ self.assertTrue(ret) + self.assertEqual(1, self.subp.call_count) + bounce_env = self.subp.call_args[1]['env'] + self.assertEqual(interface, bounce_env['interface']) +@@ -707,7 +807,9 @@ class TestAzureBounce(TestCase): + dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd + cfg = {'hostname_bounce': {'policy': 'force'}} + data = self.get_ovf_env_with_dscfg('some-hostname', cfg) +- self._get_ds(data, agent_command=['not', '__builtin__']).get_data() ++ dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) ++ ret = self._get_and_setup(dsrc) ++ self.assertTrue(ret) + self.assertEqual(1, self.subp.call_count) + bounce_args = self.subp.call_args[1]['args'] + self.assertEqual(cmd, bounce_args) +@@ -963,4 +1065,12 @@ class TestCanDevBeReformatted(CiTestCase + self.assertEqual(False, value) + self.assertIn("3 or more", msg.lower()) + ++ ++class TestAzureNetExists(CiTestCase): ++ def test_azure_net_must_exist_for_legacy_objpkl(self): ++ """DataSourceAzureNet must exist for old obj.pkl files ++ that reference it.""" ++ self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) ++ ++ + # vi: ts=4 expandtab +--- a/tests/unittests/test_datasource/test_common.py ++++ b/tests/unittests/test_datasource/test_common.py +@@ -26,6 +26,7 @@ from cloudinit.sources import DataSource + from .. import helpers as test_helpers + + DEFAULT_LOCAL = [ ++ Azure.DataSourceAzure, + CloudSigma.DataSourceCloudSigma, + ConfigDrive.DataSourceConfigDrive, + DigitalOcean.DataSourceDigitalOcean, +@@ -37,7 +38,6 @@ DEFAULT_LOCAL = [ + + DEFAULT_NETWORK = [ + AltCloud.DataSourceAltCloud, +- Azure.DataSourceAzureNet, + Bigstep.DataSourceBigstep, + CloudStack.DataSourceCloudStack, + DSNone.DataSourceNone, +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -789,38 +789,176 @@ CONFIG_V1_EXPLICIT_LOOPBACK = { + 'subnets': [{'control': 'auto', 'type': 'loopback'}]}, + ]} + ++DEFAULT_DEV_ATTRS = { ++ 'eth1000': { ++ "bridge": False, ++ "carrier": False, ++ "dormant": False, ++ "operstate": "down", ++ "address": "07-1C-C6-75-A4-BE", ++ "device/driver": None, ++ "device/device": None, ++ } ++} ++ + + def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, +- mock_sys_dev_path): +- mock_get_devicelist.return_value = ['eth1000'] +- dev_characteristics = { +- 'eth1000': { +- "bridge": False, +- "carrier": False, +- "dormant": False, +- "operstate": "down", +- "address": "07-1C-C6-75-A4-BE", +- } +- } ++ mock_sys_dev_path, dev_attrs=None): ++ if not dev_attrs: ++ dev_attrs = DEFAULT_DEV_ATTRS ++ ++ mock_get_devicelist.return_value = dev_attrs.keys() + + def fake_read(devname, path, translate=None, + on_enoent=None, on_keyerror=None, + on_einval=None): +- return dev_characteristics[devname][path] ++ return dev_attrs[devname][path] + + mock_read_sys_net.side_effect = fake_read + + def sys_dev_path(devname, path=""): +- return tmp_dir + devname + "/" + path ++ return tmp_dir + "/" + devname + "/" + path + +- for dev in dev_characteristics: ++ for dev in dev_attrs: + os.makedirs(os.path.join(tmp_dir, dev)) + with open(os.path.join(tmp_dir, dev, 'operstate'), 'w') as fh: +- fh.write("down") ++ fh.write(dev_attrs[dev]['operstate']) ++ os.makedirs(os.path.join(tmp_dir, dev, "device")) ++ for key in ['device/driver']: ++ if key in dev_attrs[dev] and dev_attrs[dev][key]: ++ target = dev_attrs[dev][key] ++ link = os.path.join(tmp_dir, dev, key) ++ print('symlink %s -> %s' % (link, target)) ++ os.symlink(target, link) + + mock_sys_dev_path.side_effect = sys_dev_path + + ++class TestGenerateFallbackConfig(CiTestCase): ++ ++ @mock.patch("cloudinit.net.sys_dev_path") ++ @mock.patch("cloudinit.net.read_sys_net") ++ @mock.patch("cloudinit.net.get_devicelist") ++ def test_device_driver(self, mock_get_devicelist, mock_read_sys_net, ++ mock_sys_dev_path): ++ devices = { ++ 'eth0': { ++ 'bridge': False, 'carrier': False, 'dormant': False, ++ 'operstate': 'down', 'address': '00:11:22:33:44:55', ++ 'device/driver': 'hv_netsvc', 'device/device': '0x3'}, ++ 'eth1': { ++ 'bridge': False, 'carrier': False, 'dormant': False, ++ 'operstate': 'down', 'address': '00:11:22:33:44:55', ++ 'device/driver': 'mlx4_core', 'device/device': '0x7'}, ++ } ++ ++ tmp_dir = self.tmp_dir() ++ _setup_test(tmp_dir, mock_get_devicelist, ++ mock_read_sys_net, mock_sys_dev_path, ++ dev_attrs=devices) ++ ++ network_cfg = net.generate_fallback_config(config_driver=True) ++ ns = network_state.parse_net_config_data(network_cfg, ++ skip_broken=False) ++ ++ render_dir = os.path.join(tmp_dir, "render") ++ os.makedirs(render_dir) ++ ++ # don't set rulepath so eni writes them ++ renderer = eni.Renderer( ++ {'eni_path': 'interfaces', 'netrules_path': 'netrules'}) ++ renderer.render_network_state(ns, render_dir) ++ ++ self.assertTrue(os.path.exists(os.path.join(render_dir, ++ 'interfaces'))) ++ with open(os.path.join(render_dir, 'interfaces')) as fh: ++ contents = fh.read() ++ print(contents) ++ expected = """ ++auto lo ++iface lo inet loopback ++ ++auto eth0 ++iface eth0 inet dhcp ++""" ++ self.assertEqual(expected.lstrip(), contents.lstrip()) ++ ++ self.assertTrue(os.path.exists(os.path.join(render_dir, 'netrules'))) ++ with open(os.path.join(render_dir, 'netrules')) as fh: ++ contents = fh.read() ++ print(contents) ++ expected_rule = [ ++ 'SUBSYSTEM=="net"', ++ 'ACTION=="add"', ++ 'DRIVERS=="hv_netsvc"', ++ 'ATTR{address}=="00:11:22:33:44:55"', ++ 'NAME="eth0"', ++ ] ++ self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip()) ++ ++ @mock.patch("cloudinit.net.sys_dev_path") ++ @mock.patch("cloudinit.net.read_sys_net") ++ @mock.patch("cloudinit.net.get_devicelist") ++ def test_device_driver_blacklist(self, mock_get_devicelist, ++ mock_read_sys_net, mock_sys_dev_path): ++ devices = { ++ 'eth1': { ++ 'bridge': False, 'carrier': False, 'dormant': False, ++ 'operstate': 'down', 'address': '00:11:22:33:44:55', ++ 'device/driver': 'hv_netsvc', 'device/device': '0x3'}, ++ 'eth0': { ++ 'bridge': False, 'carrier': False, 'dormant': False, ++ 'operstate': 'down', 'address': '00:11:22:33:44:55', ++ 'device/driver': 'mlx4_core', 'device/device': '0x7'}, ++ } ++ ++ tmp_dir = self.tmp_dir() ++ _setup_test(tmp_dir, mock_get_devicelist, ++ mock_read_sys_net, mock_sys_dev_path, ++ dev_attrs=devices) ++ ++ blacklist = ['mlx4_core'] ++ network_cfg = net.generate_fallback_config(blacklist_drivers=blacklist, ++ config_driver=True) ++ ns = network_state.parse_net_config_data(network_cfg, ++ skip_broken=False) ++ ++ render_dir = os.path.join(tmp_dir, "render") ++ os.makedirs(render_dir) ++ ++ # don't set rulepath so eni writes them ++ renderer = eni.Renderer( ++ {'eni_path': 'interfaces', 'netrules_path': 'netrules'}) ++ renderer.render_network_state(ns, render_dir) ++ ++ self.assertTrue(os.path.exists(os.path.join(render_dir, ++ 'interfaces'))) ++ with open(os.path.join(render_dir, 'interfaces')) as fh: ++ contents = fh.read() ++ print(contents) ++ expected = """ ++auto lo ++iface lo inet loopback ++ ++auto eth1 ++iface eth1 inet dhcp ++""" ++ self.assertEqual(expected.lstrip(), contents.lstrip()) ++ ++ self.assertTrue(os.path.exists(os.path.join(render_dir, 'netrules'))) ++ with open(os.path.join(render_dir, 'netrules')) as fh: ++ contents = fh.read() ++ print(contents) ++ expected_rule = [ ++ 'SUBSYSTEM=="net"', ++ 'ACTION=="add"', ++ 'DRIVERS=="hv_netsvc"', ++ 'ATTR{address}=="00:11:22:33:44:55"', ++ 'NAME="eth1"', ++ ] ++ self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip()) ++ ++ + class TestSysConfigRendering(CiTestCase): + + @mock.patch("cloudinit.net.sys_dev_path") +@@ -1513,6 +1651,118 @@ class TestNetRenderers(CiTestCase): + priority=['sysconfig', 'eni']) + + ++class TestGetInterfaces(CiTestCase): ++ _data = {'bonds': ['bond1'], ++ 'bridges': ['bridge1'], ++ 'vlans': ['bond1.101'], ++ 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', ++ 'bond1.101', 'lo', 'eth1'], ++ 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01', ++ 'enp0s2': 'aa:aa:aa:aa:aa:02', ++ 'bond1': 'aa:aa:aa:aa:aa:01', ++ 'bond1.101': 'aa:aa:aa:aa:aa:01', ++ 'bridge1': 'aa:aa:aa:aa:aa:03', ++ 'bridge1-nic': 'aa:aa:aa:aa:aa:03', ++ 'lo': '00:00:00:00:00:00', ++ 'greptap0': '00:00:00:00:00:00', ++ 'eth1': 'aa:aa:aa:aa:aa:01', ++ 'tun0': None}, ++ 'drivers': {'enp0s1': 'virtio_net', ++ 'enp0s2': 'e1000', ++ 'bond1': None, ++ 'bond1.101': None, ++ 'bridge1': None, ++ 'bridge1-nic': None, ++ 'lo': None, ++ 'greptap0': None, ++ 'eth1': 'mlx4_core', ++ 'tun0': None}} ++ data = {} ++ ++ def _se_get_devicelist(self): ++ return list(self.data['devices']) ++ ++ def _se_device_driver(self, name): ++ return self.data['drivers'][name] ++ ++ def _se_device_devid(self, name): ++ return '0x%s' % sorted(list(self.data['drivers'].keys())).index(name) ++ ++ def _se_get_interface_mac(self, name): ++ return self.data['macs'][name] ++ ++ def _se_is_bridge(self, name): ++ return name in self.data['bridges'] ++ ++ def _se_is_vlan(self, name): ++ return name in self.data['vlans'] ++ ++ def _se_interface_has_own_mac(self, name): ++ return name in self.data['own_macs'] ++ ++ def _mock_setup(self): ++ self.data = copy.deepcopy(self._data) ++ self.data['devices'] = set(list(self.data['macs'].keys())) ++ mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', ++ 'interface_has_own_mac', 'is_vlan', 'device_driver', ++ 'device_devid') ++ self.mocks = {} ++ for n in mocks: ++ m = mock.patch('cloudinit.net.' + n, ++ side_effect=getattr(self, '_se_' + n)) ++ self.addCleanup(m.stop) ++ self.mocks[n] = m.start() ++ ++ def test_gi_includes_duplicate_macs(self): ++ self._mock_setup() ++ ret = net.get_interfaces() ++ ++ self.assertIn('enp0s1', self._se_get_devicelist()) ++ self.assertIn('eth1', self._se_get_devicelist()) ++ found = [ent for ent in ret if 'aa:aa:aa:aa:aa:01' in ent] ++ self.assertEqual(len(found), 2) ++ ++ def test_gi_excludes_any_without_mac_address(self): ++ self._mock_setup() ++ ret = net.get_interfaces() ++ ++ self.assertIn('tun0', self._se_get_devicelist()) ++ found = [ent for ent in ret if 'tun0' in ent] ++ self.assertEqual(len(found), 0) ++ ++ def test_gi_excludes_stolen_macs(self): ++ self._mock_setup() ++ ret = net.get_interfaces() ++ self.mocks['interface_has_own_mac'].assert_has_calls( ++ [mock.call('enp0s1'), mock.call('bond1')], any_order=True) ++ expected = [ ++ ('enp0s2', 'aa:aa:aa:aa:aa:02', 'e1000', '0x5'), ++ ('enp0s1', 'aa:aa:aa:aa:aa:01', 'virtio_net', '0x4'), ++ ('eth1', 'aa:aa:aa:aa:aa:01', 'mlx4_core', '0x6'), ++ ('lo', '00:00:00:00:00:00', None, '0x8'), ++ ('bridge1-nic', 'aa:aa:aa:aa:aa:03', None, '0x3'), ++ ] ++ self.assertEqual(sorted(expected), sorted(ret)) ++ ++ def test_gi_excludes_bridges(self): ++ self._mock_setup() ++ # add a device 'b1', make all return they have their "own mac", ++ # set everything other than 'b1' to be a bridge. ++ # then expect b1 is the only thing left. ++ self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1' ++ self.data['drivers']['b1'] = None ++ self.data['devices'].add('b1') ++ self.data['bonds'] = [] ++ self.data['own_macs'] = self.data['devices'] ++ self.data['bridges'] = [f for f in self.data['devices'] if f != "b1"] ++ ret = net.get_interfaces() ++ self.assertEqual([('b1', 'aa:aa:aa:aa:aa:b1', None, '0x0')], ret) ++ self.mocks['is_bridge'].assert_has_calls( ++ [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'), ++ mock.call('b1')], ++ any_order=True) ++ ++ + class TestGetInterfacesByMac(CiTestCase): + _data = {'bonds': ['bond1'], + 'bridges': ['bridge1'], +@@ -1631,4 +1881,202 @@ def _gzip_data(data): + gzfp.close() + return iobuf.getvalue() + ++ ++class TestRenameInterfaces(CiTestCase): ++ ++ @mock.patch('cloudinit.util.subp') ++ def test_rename_all(self, mock_subp): ++ renames = [ ++ ('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'), ++ ('00:11:22:33:44:aa', 'interface2', 'virtio_net', '0x5'), ++ ] ++ current_info = { ++ 'ens3': { ++ 'downable': True, ++ 'device_id': '0x3', ++ 'driver': 'virtio_net', ++ 'mac': '00:11:22:33:44:55', ++ 'name': 'ens3', ++ 'up': False}, ++ 'ens5': { ++ 'downable': True, ++ 'device_id': '0x5', ++ 'driver': 'virtio_net', ++ 'mac': '00:11:22:33:44:aa', ++ 'name': 'ens5', ++ 'up': False}, ++ } ++ net._rename_interfaces(renames, current_info=current_info) ++ print(mock_subp.call_args_list) ++ mock_subp.assert_has_calls([ ++ mock.call(['ip', 'link', 'set', 'ens3', 'name', 'interface0'], ++ capture=True), ++ mock.call(['ip', 'link', 'set', 'ens5', 'name', 'interface2'], ++ capture=True), ++ ]) ++ ++ @mock.patch('cloudinit.util.subp') ++ def test_rename_no_driver_no_device_id(self, mock_subp): ++ renames = [ ++ ('00:11:22:33:44:55', 'interface0', None, None), ++ ('00:11:22:33:44:aa', 'interface1', None, None), ++ ] ++ current_info = { ++ 'eth0': { ++ 'downable': True, ++ 'device_id': None, ++ 'driver': None, ++ 'mac': '00:11:22:33:44:55', ++ 'name': 'eth0', ++ 'up': False}, ++ 'eth1': { ++ 'downable': True, ++ 'device_id': None, ++ 'driver': None, ++ 'mac': '00:11:22:33:44:aa', ++ 'name': 'eth1', ++ 'up': False}, ++ } ++ net._rename_interfaces(renames, current_info=current_info) ++ print(mock_subp.call_args_list) ++ mock_subp.assert_has_calls([ ++ mock.call(['ip', 'link', 'set', 'eth0', 'name', 'interface0'], ++ capture=True), ++ mock.call(['ip', 'link', 'set', 'eth1', 'name', 'interface1'], ++ capture=True), ++ ]) ++ ++ @mock.patch('cloudinit.util.subp') ++ def test_rename_all_bounce(self, mock_subp): ++ renames = [ ++ ('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'), ++ ('00:11:22:33:44:aa', 'interface2', 'virtio_net', '0x5'), ++ ] ++ current_info = { ++ 'ens3': { ++ 'downable': True, ++ 'device_id': '0x3', ++ 'driver': 'virtio_net', ++ 'mac': '00:11:22:33:44:55', ++ 'name': 'ens3', ++ 'up': True}, ++ 'ens5': { ++ 'downable': True, ++ 'device_id': '0x5', ++ 'driver': 'virtio_net', ++ 'mac': '00:11:22:33:44:aa', ++ 'name': 'ens5', ++ 'up': True}, ++ } ++ net._rename_interfaces(renames, current_info=current_info) ++ print(mock_subp.call_args_list) ++ mock_subp.assert_has_calls([ ++ mock.call(['ip', 'link', 'set', 'ens3', 'down'], capture=True), ++ mock.call(['ip', 'link', 'set', 'ens3', 'name', 'interface0'], ++ capture=True), ++ mock.call(['ip', 'link', 'set', 'ens5', 'down'], capture=True), ++ mock.call(['ip', 'link', 'set', 'ens5', 'name', 'interface2'], ++ capture=True), ++ mock.call(['ip', 'link', 'set', 'interface0', 'up'], capture=True), ++ mock.call(['ip', 'link', 'set', 'interface2', 'up'], capture=True) ++ ]) ++ ++ @mock.patch('cloudinit.util.subp') ++ def test_rename_duplicate_macs(self, mock_subp): ++ renames = [ ++ ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'), ++ ('00:11:22:33:44:55', 'vf1', 'mlx4_core', '0x5'), ++ ] ++ current_info = { ++ 'eth0': { ++ 'downable': True, ++ 'device_id': '0x3', ++ 'driver': 'hv_netsvc', ++ 'mac': '00:11:22:33:44:55', ++ 'name': 'eth0', ++ 'up': False}, ++ 'eth1': { ++ 'downable': True, ++ 'device_id': '0x5', ++ 'driver': 'mlx4_core', ++ 'mac': '00:11:22:33:44:55', ++ 'name': 'eth1', ++ 'up': False}, ++ } ++ net._rename_interfaces(renames, current_info=current_info) ++ print(mock_subp.call_args_list) ++ mock_subp.assert_has_calls([ ++ mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'], ++ capture=True), ++ ]) ++ ++ @mock.patch('cloudinit.util.subp') ++ def test_rename_duplicate_macs_driver_no_devid(self, mock_subp): ++ renames = [ ++ ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', None), ++ ('00:11:22:33:44:55', 'vf1', 'mlx4_core', None), ++ ] ++ current_info = { ++ 'eth0': { ++ 'downable': True, ++ 'device_id': '0x3', ++ 'driver': 'hv_netsvc', ++ 'mac': '00:11:22:33:44:55', ++ 'name': 'eth0', ++ 'up': False}, ++ 'eth1': { ++ 'downable': True, ++ 'device_id': '0x5', ++ 'driver': 'mlx4_core', ++ 'mac': '00:11:22:33:44:55', ++ 'name': 'eth1', ++ 'up': False}, ++ } ++ net._rename_interfaces(renames, current_info=current_info) ++ print(mock_subp.call_args_list) ++ mock_subp.assert_has_calls([ ++ mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'], ++ capture=True), ++ ]) ++ ++ @mock.patch('cloudinit.util.subp') ++ def test_rename_multi_mac_dups(self, mock_subp): ++ renames = [ ++ ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'), ++ ('00:11:22:33:44:55', 'vf1', 'mlx4_core', '0x5'), ++ ('00:11:22:33:44:55', 'vf2', 'mlx4_core', '0x7'), ++ ] ++ current_info = { ++ 'eth0': { ++ 'downable': True, ++ 'device_id': '0x3', ++ 'driver': 'hv_netsvc', ++ 'mac': '00:11:22:33:44:55', ++ 'name': 'eth0', ++ 'up': False}, ++ 'eth1': { ++ 'downable': True, ++ 'device_id': '0x5', ++ 'driver': 'mlx4_core', ++ 'mac': '00:11:22:33:44:55', ++ 'name': 'eth1', ++ 'up': False}, ++ 'eth2': { ++ 'downable': True, ++ 'device_id': '0x7', ++ 'driver': 'mlx4_core', ++ 'mac': '00:11:22:33:44:55', ++ 'name': 'eth2', ++ 'up': False}, ++ } ++ net._rename_interfaces(renames, current_info=current_info) ++ print(mock_subp.call_args_list) ++ mock_subp.assert_has_calls([ ++ mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'], ++ capture=True), ++ mock.call(['ip', 'link', 'set', 'eth2', 'name', 'vf2'], ++ capture=True), ++ ]) ++ ++ + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/debian/patches/ds-identify-behavior-yakkety.patch cloud-init-0.7.9-153-g16a7302f/debian/patches/ds-identify-behavior-yakkety.patch --- cloud-init-0.7.9-47-gc81ea53/debian/patches/ds-identify-behavior-yakkety.patch 2017-03-03 07:38:34.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/debian/patches/ds-identify-behavior-yakkety.patch 2017-06-28 18:11:19.000000000 +0000 @@ -10,21 +10,21 @@ --- a/tools/ds-identify +++ b/tools/ds-identify -@@ -74,7 +74,7 @@ _DI_LOGGED="" +@@ -83,7 +83,7 @@ _DI_LOGGED="" # set DI_MAIN='noop' in environment to source this file with no main called. DI_MAIN=${DI_MAIN:-main} -DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" +DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_ENABLED}" DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" + DI_DMI_CHASSIS_ASSET_TAG="" DI_DMI_PRODUCT_NAME="" - DI_DMI_SYS_VENDOR="" -@@ -109,7 +109,7 @@ DI_ON_FOUND="" +@@ -119,7 +119,7 @@ DI_ON_FOUND="" DI_ON_MAYBE="" DI_ON_NOTFOUND="" -DI_EC2_STRICT_ID_DEFAULT="true" -+DI_EC2_STRICT_ID_DEFAULT="warn,10" ++DI_EC2_STRICT_ID_DEFAULT="warn" error() { set -- "ERROR:" "$@"; diff -Nru cloud-init-0.7.9-47-gc81ea53/debian/patches/series cloud-init-0.7.9-153-g16a7302f/debian/patches/series --- cloud-init-0.7.9-47-gc81ea53/debian/patches/series 2017-03-03 07:38:34.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/debian/patches/series 2017-06-28 18:11:19.000000000 +0000 @@ -1,2 +1,7 @@ azure-use-walinux-agent.patch +cpick-5fb49bac-azure-identify-platform-by-well-known-value-in-chassis ds-identify-behavior-yakkety.patch +cpick-003c6678-net-remove-systemd-link-file-writing-from-eni-renderer +cpick-1cd4323b-azure-remove-accidental-duplicate-line-in-merge +cpick-ebc9ecbc-Azure-Add-network-config-Refactor-net-layer-to-handle +cpick-11121fe4-systemd-make-cloud-final.service-run-before-apt-daily diff -Nru cloud-init-0.7.9-47-gc81ea53/debian/update-grub-legacy-ec2 cloud-init-0.7.9-153-g16a7302f/debian/update-grub-legacy-ec2 --- cloud-init-0.7.9-47-gc81ea53/debian/update-grub-legacy-ec2 2017-03-03 07:38:34.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/debian/update-grub-legacy-ec2 2017-06-28 18:11:19.000000000 +0000 @@ -370,7 +370,10 @@ device_map=$grub_dir/device.map # Default kernel options, overidden by the kopt statement in the menufile. -loop_file=$(awk '$2=="/" && $4~"loop" {print $1}' /etc/fstab) +loop_file="" +if [ -f /etc/fstab ]; then + loop_file=$(awk '$2=="/" && $4~"loop" {print $1}' /etc/fstab) +fi if [ -n "$loop_file" ]; then dev_mountpoint=$(awk '"'${loop_file}'"~"^"$2 && $2!="/" {print $1";"$2}' /proc/mounts|tail -n 1) host_device="${dev_mountpoint%;*}" diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/examples/cloud-config-chef.txt cloud-init-0.7.9-153-g16a7302f/doc/examples/cloud-config-chef.txt --- cloud-init-0.7.9-47-gc81ea53/doc/examples/cloud-config-chef.txt 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/examples/cloud-config-chef.txt 2017-05-26 18:36:38.000000000 +0000 @@ -5,46 +5,50 @@ # Make sure that this file is valid yaml before starting instances. # It should be passed as user-data when starting the instance. # -# This example assumes the instance is 12.04 (precise) +# This example assumes the instance is 16.04 (xenial) # The default is to install from packages. -# Key from http://apt.opscode.com/packages@opscode.com.gpg.key +# Key from https://packages.chef.io/chef.asc apt: - sources: - - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main" - key: | - -----BEGIN PGP PUBLIC KEY BLOCK----- - Version: GnuPG v1.4.9 (GNU/Linux) - - mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu - twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99 - dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC - JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W - ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I - XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe - DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm - sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO - Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ - YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG - CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K - +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR - lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh - DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu - wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx - EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g - w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8 - AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN - QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X - Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ - 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V - Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL - zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb - DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG - 0GLl8EkfA8uhluM= - =zKAm - -----END PGP PUBLIC KEY BLOCK----- + source1: + source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main" + key: | + -----BEGIN PGP PUBLIC KEY BLOCK----- + Version: GnuPG v1.4.12 (Darwin) + Comment: GPGTools - http://gpgtools.org + + mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu + twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99 + dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC + JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W + ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I + XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe + DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm + sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO + Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ + YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG + CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K + +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg + PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK + CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid + AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd + Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz + SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK + OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/ + Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY + IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu + twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8 + DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE + WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS + 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA + dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC + MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD + 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K + zA== + =IxPr + -----END PGP PUBLIC KEY BLOCK----- chef: diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/examples/cloud-config-disk-setup.txt cloud-init-0.7.9-153-g16a7302f/doc/examples/cloud-config-disk-setup.txt --- cloud-init-0.7.9-47-gc81ea53/doc/examples/cloud-config-disk-setup.txt 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/examples/cloud-config-disk-setup.txt 2017-05-26 18:36:38.000000000 +0000 @@ -155,11 +155,11 @@ filesystem: 'ext3' device: 'ephemeral0' partition: 'auto' - - label: mylabl2 + - label: mylabl2 filesystem: 'ext4' device: '/dev/xvda1' - - special: - cmd: mkfs -t %(FILESYSTEM)s -L %(LABEL)s %(DEVICE)s + - cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s + label: mylabl3 filesystem: 'btrfs' device: '/dev/xvdh' diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/examples/cloud-config-resolv-conf.txt cloud-init-0.7.9-153-g16a7302f/doc/examples/cloud-config-resolv-conf.txt --- cloud-init-0.7.9-47-gc81ea53/doc/examples/cloud-config-resolv-conf.txt 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/examples/cloud-config-resolv-conf.txt 2017-05-26 18:36:38.000000000 +0000 @@ -5,9 +5,9 @@ # # Ensure that your yaml is valid and pass this as user-data when starting # the instance. Also be sure that your cloud.cfg file includes this -# configuration module in the appropirate section. +# configuration module in the appropriate section. # -manage-resolv-conf: true +manage_resolv_conf: true resolv_conf: nameservers: ['8.8.4.4', '8.8.8.8'] diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/examples/cloud-config.txt cloud-init-0.7.9-153-g16a7302f/doc/examples/cloud-config.txt --- cloud-init-0.7.9-47-gc81ea53/doc/examples/cloud-config.txt 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/examples/cloud-config.txt 2017-05-26 18:36:38.000000000 +0000 @@ -426,14 +426,21 @@ # # there is also an option to set multiple users passwords, using 'chpasswd' # That looks like the following, with 'expire' set to 'True' by default. -# to not expire users passwords, set 'expire' to 'False': +# to not expire users passwords, set 'expire' to 'False'. Also possible +# to set hashed password, here account 'user3' has a password it set to +# 'cloud-init', hashed with SHA-256: # chpasswd: # list: | # user1:password1 # user2:RANDOM +# user3:$5$eriogqzq$Dg7PxHsKGzziuEGkZgkLvacjuEFeljJ.rLf.hZqKQLA # expire: True # ssh_pwauth: [ True, False, "" or "unchanged" ] # +# Hashed passwords can be generated in multiple ways, example with python3: +# python3 -c 'import crypt,getpass; print(crypt.crypt(getpass.getpass(), crypt.mksalt(crypt.METHOD_SHA512)))' +# Newer versions of 'mkpasswd' will also work: mkpasswd -m sha-512 password +# # So, a simple working example to allow login via ssh, and not expire # for the default user would look like: password: passw0rd diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/examples/cloud-config-update-apt.txt cloud-init-0.7.9-153-g16a7302f/doc/examples/cloud-config-update-apt.txt --- cloud-init-0.7.9-47-gc81ea53/doc/examples/cloud-config-update-apt.txt 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/examples/cloud-config-update-apt.txt 2017-05-26 18:36:38.000000000 +0000 @@ -1,7 +1,8 @@ #cloud-config -# Update apt database on first boot -# (ie run apt-get update) +# Update apt database on first boot (run 'apt-get update'). +# Note, if packages are given, or package_upgrade is true, then +# update will be done independent of this setting. # -# Default: true +# Default: false # Aliases: apt_update package_update: false diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/merging.rst cloud-init-0.7.9-153-g16a7302f/doc/merging.rst --- cloud-init-0.7.9-47-gc81ea53/doc/merging.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/merging.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ -Overview -======== - -This was implemented because it has been a common feature request that there be -a way to specify how cloud-config yaml "dictionaries" provided as user-data are -merged together when there are multiple yamls to merge together (say when -performing an #include). - -Since previously the merging algorithm was very simple and would only overwrite -and not append lists, or strings, and so on it was decided to create a new and -improved way to merge dictionaries (and there contained objects) together in a -way that is customizable, thus allowing for users who provide cloud-config -user-data to determine exactly how there objects will be merged. - -For example. - -.. code-block:: yaml - - #cloud-config (1) - run_cmd: - - bash1 - - bash2 - - #cloud-config (2) - run_cmd: - - bash3 - - bash4 - -The previous way of merging the following 2 objects would result in a final -cloud-config object that contains the following. - -.. code-block:: yaml - - #cloud-config (merged) - run_cmd: - - bash3 - - bash4 - -Typically this is not what users want, instead they would likely prefer: - -.. code-block:: yaml - - #cloud-config (merged) - run_cmd: - - bash1 - - bash2 - - bash3 - - bash4 - -This way makes it easier to combine the various cloud-config objects you have -into a more useful list, thus reducing duplication that would have had to -occur in the previous method to accomplish the same result. - -Customizability -=============== - -Since the above merging algorithm may not always be the desired merging -algorithm (like how the previous merging algorithm was not always the preferred -one) the concept of customizing how merging can be done was introduced through -a new concept call 'merge classes'. - -A merge class is a class defintion which provides functions that can be used -to merge a given type with another given type. - -An example of one of these merging classes is the following: - -.. code-block:: python - - class Merger(object): - def __init__(self, merger, opts): - self._merger = merger - self._overwrite = 'overwrite' in opts - - # This merging algorithm will attempt to merge with - # another dictionary, on encountering any other type of object - # it will not merge with said object, but will instead return - # the original value - # - # On encountering a dictionary, it will create a new dictionary - # composed of the original and the one to merge with, if 'overwrite' - # is enabled then keys that exist in the original will be overwritten - # by keys in the one to merge with (and associated values). Otherwise - # if not in overwrite mode the 2 conflicting keys themselves will - # be merged. - def _on_dict(self, value, merge_with): - if not isinstance(merge_with, (dict)): - return value - merged = dict(value) - for (k, v) in merge_with.items(): - if k in merged: - if not self._overwrite: - merged[k] = self._merger.merge(merged[k], v) - else: - merged[k] = v - else: - merged[k] = v - return merged - -As you can see there is a '_on_dict' method here that will be given a source -value and a value to merge with. The result will be the merged object. This -code itself is called by another merging class which 'directs' the merging to -happen by analyzing the types of the objects to merge and attempting to find a -know object that will merge that type. I will avoid pasting that here, but it -can be found in the `mergers/__init__.py` file (see `LookupMerger` and -`UnknownMerger`). - -So following the typical cloud-init way of allowing source code to be -downloaded and used dynamically, it is possible for users to inject there own -merging files to handle specific types of merging as they choose (the basic -ones included will handle lists, dicts, and strings). Note how each merge can -have options associated with it which affect how the merging is performed, for -example a dictionary merger can be told to overwrite instead of attempt to -merge, or a string merger can be told to append strings instead of discarding -other strings to merge with. - -How to activate -=============== - -There are a few ways to activate the merging algorithms, and to customize them -for your own usage. - -1. The first way involves the usage of MIME messages in cloud-init to specify - multipart documents (this is one way in which multiple cloud-config is - joined together into a single cloud-config). Two new headers are looked - for, both of which can define the way merging is done (the first header to - exist wins). These new headers (in lookup order) are 'Merge-Type' and - 'X-Merge-Type'. The value should be a string which will satisfy the new - merging format defintion (see below for this format). - -2. The second way is actually specifying the merge-type in the body of the - cloud-config dictionary. There are 2 ways to specify this, either as a - string or as a dictionary (see format below). The keys that are looked up - for this definition are the following (in order), 'merge_how', - 'merge_type'. - -String format -------------- - -The string format that is expected is the following. - -:: - - classname1(option1,option2)+classname2(option3,option4).... - -The class name there will be connected to class names used when looking for the -class that can be used to merge and options provided will be given to the class -on construction of that class. - -For example, the default string that is used when none is provided is the -following: - -:: - - list()+dict()+str() - -Dictionary format ------------------ - -In cases where a dictionary can be used to specify the same information as the -string format (ie option #2 of above) it can be used, for example. - -.. code-block:: python - - {'merge_how': [{'name': 'list', 'settings': ['extend']}, - {'name': 'dict', 'settings': []}, - {'name': 'str', 'settings': ['append']}]} - -This would be the equivalent format for default string format but in dictionary -form instead of string form. - -Specifying multiple types and its effect -======================================== - -Now you may be asking yourself, if I specify a merge-type header or dictionary -for every cloud-config that I provide, what exactly happens? - -The answer is that when merging, a stack of 'merging classes' is kept, the -first one on that stack is the default merging classes, this set of mergers -will be used when the first cloud-config is merged with the initial empty -cloud-config dictionary. If the cloud-config that was just merged provided a -set of merging classes (via the above formats) then those merging classes will -be pushed onto the stack. Now if there is a second cloud-config to be merged -then the merging classes from the cloud-config before the first will be used -(not the default) and so on. This way a cloud-config can decide how it will -merge with a cloud-config dictionary coming after it. - -Other uses -========== - -In addition to being used for merging user-data sections, the default merging -algorithm for merging 'conf.d' yaml files (which form an initial yaml config -for cloud-init) was also changed to use this mechanism so its full -benefits (and customization) can also be used there as well. Other places that -used the previous merging are also, similarly, now extensible (metadata -merging, for example). - -Note, however, that merge algorithms are not used *across* types of -configuration. As was the case before merging was implemented, -user-data will overwrite conf.d configuration without merging. - -.. vi: textwidth=78 diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/index.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/index.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/index.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/index.rst 2017-05-26 18:36:38.000000000 +0000 @@ -38,6 +38,7 @@ topics/logging.rst topics/modules.rst topics/merging.rst + topics/network-config.rst topics/vendordata.rst topics/moreinfo.rst topics/hacking.rst diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/capabilities.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/capabilities.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/capabilities.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/capabilities.rst 2017-05-26 18:36:38.000000000 +0000 @@ -3,10 +3,11 @@ ************ - Setting a default locale -- Setting a instance hostname -- Generating instance ssh private keys -- Adding ssh keys to a users ``.ssh/authorized_keys`` so they can log in +- Setting an instance hostname +- Generating instance SSH private keys +- Adding SSH keys to a user's ``.ssh/authorized_keys`` so they can log in - Setting up ephemeral mount points +- Configuring network devices User configurability ==================== @@ -22,5 +23,27 @@ string or `user-data` file for usage by cloud-init on instance creation. +Feature detection +================= + +Newer versions of cloud-init may have a list of additional features that they +support. This allows other applications to detect what features the installed +cloud-init supports without having to parse its version number. If present, +this list of features will be located at ``cloudinit.version.FEATURES``. + +When checking if cloud-init supports a feature, in order to not break the +detection script on older versions of cloud-init without the features list, a +script similar to the following should be used. Note that this will exit 0 if +the feature is supported and 1 otherwise:: + + import sys + from cloudinit import version + sys.exit('' not in getattr(version, 'FEATURES', [])) + +Currently defined feature names include: + + - ``NETWORK_CONFIG_V1`` support for v1 networking configuration, see curtin + documentation for examples. + .. _Cloud-init: https://launchpad.net/cloud-init .. vi: textwidth=78 diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/altcloud.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/altcloud.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/altcloud.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/altcloud.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_alt_cloud: + Alt Cloud ========= diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/azure.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/azure.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/azure.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/azure.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_azure: + Azure ===== diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/cloudsigma.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/cloudsigma.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/cloudsigma.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/cloudsigma.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_cloudsigma: + CloudSigma ========== diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/cloudstack.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/cloudstack.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/cloudstack.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/cloudstack.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_cloudstack: + CloudStack ========== diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/configdrive.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/configdrive.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/configdrive.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/configdrive.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_config_drive: + Config Drive ============ @@ -18,12 +20,13 @@ Version 1 --------- +**Note:** Version 1 is legacy and should be considered deprecated. Version 2 +has been supported in OpenStack since 2012.2 (Folsom). The following criteria are required to as a config drive: 1. Must be formatted with `vfat`_ filesystem -2. Must be a un-partitioned block device (/dev/vdb, not /dev/vdb1) -3. Must contain *one* of the following files +2. Must contain *one* of the following files :: @@ -56,8 +59,7 @@ 1. Must be formatted with `vfat`_ or `iso9660`_ filesystem or have a *filesystem* label of **config-2** -2. Must be a un-partitioned block device (/dev/vdb, not /dev/vdb1) -3. The files that will typically be present in the config drive are: +2. The files that will typically be present in the config drive are: :: diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/digitalocean.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/digitalocean.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/digitalocean.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/digitalocean.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_digital_ocean: + Digital Ocean ============= diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/ec2.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/ec2.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/ec2.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/ec2.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_ec2: + Amazon EC2 ========== diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/fallback.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/fallback.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/fallback.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/fallback.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_fallback: + Fallback/None ============= diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/maas.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/maas.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/maas.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/maas.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_maas: + MAAS ==== diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/nocloud.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/nocloud.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/nocloud.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/nocloud.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_nocloud: + NoCloud ======= @@ -70,6 +72,43 @@ gateway 192.168.1.254 hostname: myhost + +Network configuration can also be provided to cloud-init in either +:ref:`network_config_v1` or :ref:`network_config_v2` by providing that +yaml formatted data in a file named ``network-config``. If found, +this file will override a ``network-interfaces`` file. + +See an example below. Note specifically that this file does not +have a top level ``network`` key as it it is already assumed to +be network configuration based on the filename. + +.. code:: yaml + + version: 1 + config: + - type: physical + name: interface0 + mac_address: "52:54:00:12:34:00" + subnets: + - type: static + address: 192.168.1.10 + netmask: 255.255.255.0 + gateway: 192.168.1.254 + + +.. code:: yaml + + version: 2 + ethernets: + interface0: + match: + mac_address: "52:54:00:12:34:00" + set-name: interface0 + addresses: + - 192.168.1.10/255.255.255.0 + gateway4: 192.168.1.254 + + .. _iso9660: https://en.wikipedia.org/wiki/ISO_9660 .. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table .. vi: textwidth=78 diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/opennebula.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/opennebula.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/opennebula.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/opennebula.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_opennebula: + OpenNebula ========== diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/openstack.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/openstack.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/openstack.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/openstack.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_openstack: + OpenStack ========= diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/ovf.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/ovf.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/ovf.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/ovf.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_ovf: + OVF === diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/smartos.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/smartos.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/datasources/smartos.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/datasources/smartos.rst 2017-05-26 18:36:38.000000000 +0000 @@ -1,3 +1,5 @@ +.. _datasource_smartos: + SmartOS Datasource ================== diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/examples.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/examples.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/examples.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/examples.rst 2017-05-26 18:36:38.000000000 +0000 @@ -93,6 +93,13 @@ :language: yaml :linenos: +Update apt database on first boot +================================= + +.. literalinclude:: ../../examples/cloud-config-update-apt.txt + :language: yaml + :linenos: + Run apt or yum upgrade ====================== @@ -149,6 +156,27 @@ :language: yaml :linenos: -.. _chef: http://www.opscode.com/chef/ +Configure data sources +====================== + +.. literalinclude:: ../../examples/cloud-config-datasources.txt + :language: yaml + :linenos: + +Create partitions and filesystems +================================= + +.. literalinclude:: ../../examples/cloud-config-disk-setup.txt + :language: yaml + :linenos: + +Grow partitions +=============== + +.. literalinclude:: ../../examples/cloud-config-growpart.txt + :language: yaml + :linenos: + +.. _chef: http://www.chef.io/chef/ .. _puppet: http://puppetlabs.com/ .. vi: textwidth=78 diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/merging.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/merging.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/merging.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/merging.rst 2017-05-26 18:36:38.000000000 +0000 @@ -2,5 +2,203 @@ Merging User-Data Sections ************************** -.. include:: ../../merging.rst +Overview +======== + +This was implemented because it has been a common feature request that there be +a way to specify how cloud-config yaml "dictionaries" provided as user-data are +merged together when there are multiple yamls to merge together (say when +performing an #include). + +Since previously the merging algorithm was very simple and would only overwrite +and not append lists, or strings, and so on it was decided to create a new and +improved way to merge dictionaries (and their contained objects) together in a +way that is customizable, thus allowing for users who provide cloud-config +user-data to determine exactly how their objects will be merged. + +For example. + +.. code-block:: yaml + + #cloud-config (1) + run_cmd: + - bash1 + - bash2 + + #cloud-config (2) + run_cmd: + - bash3 + - bash4 + +The previous way of merging the two objects above would result in a final +cloud-config object that contains the following. + +.. code-block:: yaml + + #cloud-config (merged) + run_cmd: + - bash3 + - bash4 + +Typically this is not what users want; instead they would likely prefer: + +.. code-block:: yaml + + #cloud-config (merged) + run_cmd: + - bash1 + - bash2 + - bash3 + - bash4 + +This way makes it easier to combine the various cloud-config objects you have +into a more useful list, thus reducing duplication necessary to accomplish the +same result with the previous method. + +Customizability +=============== + +Because the above merging algorithm may not always be desired (just as the +previous merging algorithm was not always the preferred one), the concept of +customized merging was introduced through 'merge classes'. + +A merge class is a class definition which provides functions that can be used +to merge a given type with another given type. + +An example of one of these merging classes is the following: + +.. code-block:: python + + class Merger(object): + def __init__(self, merger, opts): + self._merger = merger + self._overwrite = 'overwrite' in opts + + # This merging algorithm will attempt to merge with + # another dictionary, on encountering any other type of object + # it will not merge with said object, but will instead return + # the original value + # + # On encountering a dictionary, it will create a new dictionary + # composed of the original and the one to merge with, if 'overwrite' + # is enabled then keys that exist in the original will be overwritten + # by keys in the one to merge with (and associated values). Otherwise + # if not in overwrite mode the 2 conflicting keys themselves will + # be merged. + def _on_dict(self, value, merge_with): + if not isinstance(merge_with, (dict)): + return value + merged = dict(value) + for (k, v) in merge_with.items(): + if k in merged: + if not self._overwrite: + merged[k] = self._merger.merge(merged[k], v) + else: + merged[k] = v + else: + merged[k] = v + return merged + +As you can see there is a '_on_dict' method here that will be given a source +value and a value to merge with. The result will be the merged object. This +code itself is called by another merging class which 'directs' the merging to +happen by analyzing the types of the objects to merge and attempting to find a +know object that will merge that type. I will avoid pasting that here, but it +can be found in the `mergers/__init__.py` file (see `LookupMerger` and +`UnknownMerger`). + +So following the typical cloud-init way of allowing source code to be +downloaded and used dynamically, it is possible for users to inject there own +merging files to handle specific types of merging as they choose (the basic +ones included will handle lists, dicts, and strings). Note how each merge can +have options associated with it which affect how the merging is performed, for +example a dictionary merger can be told to overwrite instead of attempt to +merge, or a string merger can be told to append strings instead of discarding +other strings to merge with. + +How to activate +=============== + +There are a few ways to activate the merging algorithms, and to customize them +for your own usage. + +1. The first way involves the usage of MIME messages in cloud-init to specify + multipart documents (this is one way in which multiple cloud-config is + joined together into a single cloud-config). Two new headers are looked + for, both of which can define the way merging is done (the first header to + exist wins). These new headers (in lookup order) are 'Merge-Type' and + 'X-Merge-Type'. The value should be a string which will satisfy the new + merging format defintion (see below for this format). + +2. The second way is actually specifying the merge-type in the body of the + cloud-config dictionary. There are 2 ways to specify this, either as a + string or as a dictionary (see format below). The keys that are looked up + for this definition are the following (in order), 'merge_how', + 'merge_type'. + +String format +------------- + +The string format that is expected is the following. + +:: + + classname1(option1,option2)+classname2(option3,option4).... + +The class name there will be connected to class names used when looking for the +class that can be used to merge and options provided will be given to the class +on construction of that class. + +For example, the default string that is used when none is provided is the +following: + +:: + + list()+dict()+str() + +Dictionary format +----------------- + +A dictionary can be used when it specifies the same information as the +string format (i.e. the second option above), for example: + +.. code-block:: python + + {'merge_how': [{'name': 'list', 'settings': ['extend']}, + {'name': 'dict', 'settings': []}, + {'name': 'str', 'settings': ['append']}]} + +This would be the equivalent format for default string format but in dictionary +form instead of string form. + +Specifying multiple types and its effect +======================================== + +Now you may be asking yourself, if I specify a merge-type header or dictionary +for every cloud-config that I provide, what exactly happens? + +The answer is that when merging, a stack of 'merging classes' is kept, the +first one on that stack is the default merging classes, this set of mergers +will be used when the first cloud-config is merged with the initial empty +cloud-config dictionary. If the cloud-config that was just merged provided a +set of merging classes (via the above formats) then those merging classes will +be pushed onto the stack. Now if there is a second cloud-config to be merged +then the merging classes from the cloud-config before the first will be used +(not the default) and so on. This way a cloud-config can decide how it will +merge with a cloud-config dictionary coming after it. + +Other uses +========== + +In addition to being used for merging user-data sections, the default merging +algorithm for merging 'conf.d' yaml files (which form an initial yaml config +for cloud-init) was also changed to use this mechanism so its full +benefits (and customization) can also be used there as well. Other places that +used the previous merging are also, similarly, now extensible (metadata +merging, for example). + +Note, however, that merge algorithms are not used *across* types of +configuration. As was the case before merging was implemented, +user-data will overwrite conf.d configuration without merging. + .. vi: textwidth=78 diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/modules.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/modules.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/modules.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/modules.rst 2017-05-26 18:36:38.000000000 +0000 @@ -44,6 +44,7 @@ .. automodule:: cloudinit.config.cc_set_hostname .. automodule:: cloudinit.config.cc_set_passwords .. automodule:: cloudinit.config.cc_snappy +.. automodule:: cloudinit.config.cc_snap_config .. automodule:: cloudinit.config.cc_spacewalk .. automodule:: cloudinit.config.cc_ssh .. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/network-config-format-eni.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/network-config-format-eni.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/network-config-format-eni.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/network-config-format-eni.rst 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,20 @@ +.. _network_config_eni: + +Network Configuration ENI (Legacy) +---------------------------------- + +`Cloud-init`_ supports reading and writing network config in the ``ENI`` +format which is consumed by the ``ifupdown`` tool to parse and apply network +configuration. + +As an input format this is **legacy**. In cases where ENI format is available +and another format is also available, it will prefer to use the other format. +This can happen in either :ref:`datasource_nocloud` or +:ref:`datasource_openstack` datasources. + +Please reference existing `documentation`_ for the +``/etc/network/interfaces(5)`` format. + +.. _Cloud-init: https://launchpad.net/cloud-init +.. _documentation: http://manpages.ubuntu.com/manpages/trusty/en/man5/interfaces.5.html +.. vi: textwidth=78 diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/network-config-format-v1.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/network-config-format-v1.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/network-config-format-v1.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/network-config-format-v1.rst 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,563 @@ +.. _network_config_v1: + +Networking Config Version 1 +=========================== + +This network configuration format lets users customize their instance's +networking interfaces by assigning subnet configuration, virtual device +creation (bonds, bridges, vlans) routes and DNS configuration. + +Required elements of a Network Config Version 1 are ``config`` and +``version``. + +Cloud-init will read this format from system config. +For example the following could be present in +``/etc/cloud/cloud.cfg.d/custom-networking.cfg``: + +.. code-block:: yaml + + network: + version: 1 + config: + - type: physical + name: eth0 + subnets: + - type: dhcp + +The :ref:`datasource_nocloud` datasource can also provide cloud-init +networking configuration in this Format. + +Configuration Types +------------------- +Within the network ``config`` portion, users include a list of configuration +types. The current list of support ``type`` values are as follows: + +- Physical (``physical``) +- Bond (``bond``) +- Bridge (``bridge``) +- VLAN (``vlan``) +- Nameserver (``nameserver``) +- Route (``route``) + +Physical, Bond, Bridge and VLAN types may also include IP configuration under +the key ``subnets``. + +- Subnet/IP (``subnets``) + + +Physical +~~~~~~~~ +The ``physical`` type configuration represents a "physical" network device, +typically Ethernet-based. At least one of of these entries is required for +external network connectivity. Type ``physical`` requires only one key: +``name``. A ``physical`` device may contain some or all of the following +keys: + +**name**: ** + +A devices name must be less than 15 characters. Names exceeding the maximum +will be truncated. This is a limitation of the Linux kernel network-device +structure. + +**mac_address**: ** + +The MAC Address is a device unique identifier that most Ethernet-based network +devices possess. Specifying a MAC Address is optional. + + +.. note:: + + Cloud-init will handle the persistent mapping between a + device's ``name`` and the ``mac_address``. + +**mtu**: ** + +The MTU key represents a device's Maximum Transmission Unit, the largest size +packet or frame, specified in octets (eight-bit bytes), that can be sent in a +packet- or frame-based network. Specifying ``mtu`` is optional. + +.. note:: + + The possible supported values of a device's MTU is not available at + configuration time. It's possible to specify a value too large or to + small for a device and may be ignored by the device. + + +**Physical Example**:: + + network: + version: 1 + config: + # Simple network adapter + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + # Second nic with Jumbo frames + - type: physical + name: jumbo0 + mac_address: aa:11:22:33:44:55 + mtu: 9000 + # 10G pair + - type: physical + name: gbe0 + mac_address: cd:11:22:33:44:00 + - type: physical + name: gbe1 + mac_address: cd:11:22:33:44:02 + +Bond +~~~~ +A ``bond`` type will configure a Linux software Bond with one or more network +devices. A ``bond`` type requires the following keys: + +**name**: ** + +A devices name must be less than 15 characters. Names exceeding the maximum +will be truncated. This is a limitation of the Linux kernel network-device +structure. + +**mac_address**: ** + +When specifying MAC Address on a bond this value will be assigned to the bond +device and may be different than the MAC address of any of the underlying +bond interfaces. Specifying a MAC Address is optional. If ``mac_address`` is +not present, then the bond will use one of the MAC Address values from one of +the bond interfaces. + + +**bond_interfaces**: ** + +The ``bond_interfaces`` key accepts a list of network device ``name`` values +from the configuration. This list may be empty. + +**params**: ** + +The ``params`` key in a bond holds a dictionary of bonding parameters. +This dictionary may be empty. For more details on what the various bonding +parameters mean please read the Linux Kernel Bonding.txt. + +Valid ``params`` keys are: + + - ``active_slave``: Set bond attribute + - ``ad_actor_key``: Set bond attribute + - ``ad_actor_sys_prio``: Set bond attribute + - ``ad_actor_system``: Set bond attribute + - ``ad_aggregator``: Set bond attribute + - ``ad_num_ports``: Set bond attribute + - ``ad_partner_key``: Set bond attribute + - ``ad_partner_mac``: Set bond attribute + - ``ad_select``: Set bond attribute + - ``ad_user_port_key``: Set bond attribute + - ``all_slaves_active``: Set bond attribute + - ``arp_all_targets``: Set bond attribute + - ``arp_interval``: Set bond attribute + - ``arp_ip_target``: Set bond attribute + - ``arp_validate``: Set bond attribute + - ``downdelay``: Set bond attribute + - ``fail_over_mac``: Set bond attribute + - ``lacp_rate``: Set bond attribute + - ``lp_interval``: Set bond attribute + - ``miimon``: Set bond attribute + - ``mii_status``: Set bond attribute + - ``min_links``: Set bond attribute + - ``mode``: Set bond attribute + - ``num_grat_arp``: Set bond attribute + - ``num_unsol_na``: Set bond attribute + - ``packets_per_slave``: Set bond attribute + - ``primary``: Set bond attribute + - ``primary_reselect``: Set bond attribute + - ``queue_id``: Set bond attribute + - ``resend_igmp``: Set bond attribute + - ``slaves``: Set bond attribute + - ``tlb_dynamic_lb``: Set bond attribute + - ``updelay``: Set bond attribute + - ``use_carrier``: Set bond attribute + - ``xmit_hash_policy``: Set bond attribute + +**Bond Example**:: + + network: + version: 1 + config: + # Simple network adapter + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + # 10G pair + - type: physical + name: gbe0 + mac_address: cd:11:22:33:44:00 + - type: physical + name: gbe1 + mac_address: cd:11:22:33:44:02 + - type: bond + name: bond0 + bond_interfaces: + - gbe0 + - gbe1 + params: + bond-mode: active-backup + +Bridge +~~~~~~ +Type ``bridge`` requires the following keys: + +- ``name``: Set the name of the bridge. +- ``bridge_interfaces``: Specify the ports of a bridge via their ``name``. + This list may be empty. +- ``params``: A list of bridge params. For more details, please read the + bridge-utils-interfaces manpage. + +Valid keys are: + + - ``bridge_ageing``: Set the bridge's ageing value. + - ``bridge_bridgeprio``: Set the bridge device network priority. + - ``bridge_fd``: Set the bridge's forward delay. + - ``bridge_hello``: Set the bridge's hello value. + - ``bridge_hw``: Set the bridge's MAC address. + - ``bridge_maxage``: Set the bridge's maxage value. + - ``bridge_maxwait``: Set how long network scripts should wait for the + bridge to be up. + - ``bridge_pathcost``: Set the cost of a specific port on the bridge. + - ``bridge_portprio``: Set the priority of a specific port on the bridge. + - ``bridge_ports``: List of devices that are part of the bridge. + - ``bridge_stp``: Set spanning tree protocol on or off. + - ``bridge_waitport``: Set amount of time in seconds to wait on specific + ports to become available. + + +**Bridge Example**:: + + network: + version: 1 + config: + # Simple network adapter + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + # Second nic with Jumbo frames + - type: physical + name: jumbo0 + mac_address: aa:11:22:33:44:55 + mtu: 9000 + - type: bridge + name: br0 + bridge_interfaces: + - jumbo0 + params: + bridge_ageing: 250 + bridge_bridgeprio: 22 + bridge_fd: 1 + bridge_hello: 1 + bridge_maxage: 10 + bridge_maxwait: 0 + bridge_pathcost: + - jumbo0 75 + bridge_pathprio: + - jumbo0 28 + bridge_stp: 'off' + bridge_maxwait: + - jumbo0 0 + + +VLAN +~~~~ +Type ``vlan`` requires the following keys: + +- ``name``: Set the name of the VLAN +- ``vlan_link``: Specify the underlying link via its ``name``. +- ``vlan_id``: Specify the VLAN numeric id. + +**VLAN Example**:: + + network: + version: 1 + config: + # Physical interfaces. + - type: physical + name: eth0 + mac_address: "c0:d6:9f:2c:e8:80" + # VLAN interface. + - type: vlan + name: eth0.101 + vlan_link: eth0 + vlan_id: 101 + mtu: 1500 + +Nameserver +~~~~~~~~~~ + +Users can specify a ``nameserver`` type. Nameserver dictionaries include +the following keys: + +- ``address``: List of IPv4 or IPv6 address of nameservers. +- ``search``: List of of hostnames to include in the resolv.conf search path. + +**Nameserver Example**:: + + network: + version: 1 + config: + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + subnets: + - type: static + address: 192.168.23.14/27 + gateway: 192.168.23.1 + - type: nameserver: + address: + - 192.168.23.2 + - 8.8.8.8 + search: + - exemplary + + + +Route +~~~~~ + +Users can include static routing information as well. A ``route`` dictionary +has the following keys: + +- ``destination``: IPv4 network address with CIDR netmask notation. +- ``gateway``: IPv4 gateway address with CIDR netmask notation. +- ``metric``: Integer which sets the network metric value for this route. + +**Route Example**:: + + network: + version: 1 + config: + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + subnets: + - type: static + address: 192.168.23.14/24 + gateway: 192.168.23.1 + - type: route + destination: 192.168.24.0/24 + gateway: 192.168.24.1 + metric: 3 + +Subnet/IP +~~~~~~~~~ + +For any network device (one of the Config Types) users can define a list of +``subnets`` which contain ip configuration dictionaries. Multiple subnet +entries will create interface alias allowing a single interface to use +different ip configurations. + +Valid keys for for ``subnets`` include the following: + +- ``type``: Specify the subnet type. +- ``control``: Specify manual, auto or hotplug. Indicates how the interface + will be handled during boot. +- ``address``: IPv4 or IPv6 address. It may include CIDR netmask notation. +- ``netmask``: IPv4 subnet mask in dotted format or CIDR notation. +- ``gateway``: IPv4 address of the default gateway for this subnet. +- ``dns_nameserver``: Specify a list of IPv4 dns server IPs to end up in + resolv.conf. +- ``dns_search``: Specify a list of search paths to be included in + resolv.conf. +- ``routes``: Specify a list of routes for a given interface + + +Subnet types are one of the following: + +- ``dhcp4``: Configure this interface with IPv4 dhcp. +- ``dhcp``: Alias for ``dhcp4`` +- ``dhcp6``: Configure this interface with IPv6 dhcp. +- ``static``: Configure this interface with a static IPv4. +- ``static6``: Configure this interface with a static IPv6 . + +When making use of ``dhcp`` types, no additional configuration is needed in +the subnet dictionary. + + +**Subnet DHCP Example**:: + + network: + version: 1 + config: + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + subnets: + - type: dhcp + + +**Subnet Static Example**:: + + network: + version: 1 + config: + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + subnets: + - type: static + address: 192.168.23.14/27 + gateway: 192.168.23.1 + dns_nameservers: + - 192.168.23.2 + - 8.8.8.8 + dns_search: + - exemplary.maas + +The following will result in an ``interface0`` using DHCP and ``interface0:1`` +using the static subnet configuration. + +**Multiple subnet Example**:: + + network: + version: 1 + config: + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + subnets: + - type: dhcp + - type: static + address: 192.168.23.14/27 + gateway: 192.168.23.1 + dns_nameservers: + - 192.168.23.2 + - 8.8.8.8 + dns_search: + - exemplary + +**Subnet with routes Example**:: + + network: + version: 1 + config: + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + subnets: + - type: dhcp + - type: static + address: 10.184.225.122 + netmask: 255.255.255.252 + routes: + - gateway: 10.184.225.121 + netmask: 255.240.0.0 + network: 10.176.0.0 + - gateway: 10.184.225.121 + netmask: 255.240.0.0 + network: 10.208.0.0 + + +Multi-layered configurations +---------------------------- + +Complex networking sometimes uses layers of configuration. The syntax allows +users to build those layers one at a time. All of the virtual network devices +supported allow specifying an underlying device by their ``name`` value. + +**Bonded VLAN Example**:: + + network: + version: 1 + config: + # 10G pair + - type: physical + name: gbe0 + mac_address: cd:11:22:33:44:00 + - type: physical + name: gbe1 + mac_address: cd:11:22:33:44:02 + # Bond. + - type: bond + name: bond0 + bond_interfaces: + - gbe0 + - gbe1 + params: + bond-mode: 802.3ad + bond-lacp-rate: fast + # A Bond VLAN. + - type: vlan + name: bond0.200 + vlan_link: bond0 + vlan_id: 200 + subnets: + - type: dhcp4 + +More Examples +------------- +Some more examples to explore the various options available. + +**Multiple VLAN example**:: + + network: + version: 1 + config: + - id: eth0 + mac_address: d4:be:d9:a8:49:13 + mtu: 1500 + name: eth0 + subnets: + - address: 10.245.168.16/21 + dns_nameservers: + - 10.245.168.2 + gateway: 10.245.168.1 + type: static + type: physical + - id: eth1 + mac_address: d4:be:d9:a8:49:15 + mtu: 1500 + name: eth1 + subnets: + - address: 10.245.188.2/24 + dns_nameservers: [] + type: static + type: physical + - id: eth1.2667 + mtu: 1500 + name: eth1.2667 + subnets: + - address: 10.245.184.2/24 + dns_nameservers: [] + type: static + type: vlan + vlan_id: 2667 + vlan_link: eth1 + - id: eth1.2668 + mtu: 1500 + name: eth1.2668 + subnets: + - address: 10.245.185.1/24 + dns_nameservers: [] + type: static + type: vlan + vlan_id: 2668 + vlan_link: eth1 + - id: eth1.2669 + mtu: 1500 + name: eth1.2669 + subnets: + - address: 10.245.186.1/24 + dns_nameservers: [] + type: static + type: vlan + vlan_id: 2669 + vlan_link: eth1 + - id: eth1.2670 + mtu: 1500 + name: eth1.2670 + subnets: + - address: 10.245.187.2/24 + dns_nameservers: [] + type: static + type: vlan + vlan_id: 2670 + vlan_link: eth1 + - address: 10.245.168.2 + search: + - dellstack + type: nameserver + +.. vi: textwidth=78 diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/network-config-format-v2.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/network-config-format-v2.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/network-config-format-v2.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/network-config-format-v2.rst 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,503 @@ +.. _network_config_v2: + +Networking Config Version 2 +=========================== + +Cloud-init's support for Version 2 network config is a subset of the +version 2 format defined for the `netplan`_ tool. Cloud-init supports +both reading and writing of Version 2; the latter support requires a +distro with `netplan`_ present. + +The ``network`` key has at least two required elements. First +it must include ``version: 2`` and one or more of possible device +``types``.. + +Cloud-init will read this format from system config. +For example the following could be present in +``/etc/cloud/cloud.cfg.d/custom-networking.cfg``: + + network: + version: 2 + ethernets: [] + +It may also be provided in other locations including the +:ref:`datasource_nocloud`, see :ref:`default_behavior` for other places. + +Supported device ``types`` values are as follows: + +- Ethernets (``ethernets``) +- Bonds (``bonds``) +- Bridges (``bridges``) +- VLANs (``vlans``) + +Each type block contains device definitions as a map where the keys (called +"configuration IDs"). Each entry under the ``types`` may include IP and/or +device configuration. + +Cloud-init does not current support ``wifis`` type that is present in native +`netplan`_. + + +Device configuration IDs +------------------------ + +The key names below the per-device-type definition maps (like ``ethernets:``) +are called "ID"s. They must be unique throughout the entire set of +configuration files. Their primary purpose is to serve as anchor names for +composite devices, for example to enumerate the members of a bridge that is +currently being defined. + +There are two physically/structurally different classes of device definitions, +and the ID field has a different interpretation for each: + +Physical devices + +: (Examples: ethernet, wifi) These can dynamically come and go between + reboots and even during runtime (hotplugging). In the generic case, they + can be selected by ``match:`` rules on desired properties, such as name/name + pattern, MAC address, driver, or device paths. In general these will match + any number of devices (unless they refer to properties which are unique + such as the full path or MAC address), so without further knowledge about + the hardware these will always be considered as a group. + + It is valid to specify no match rules at all, in which case the ID field is + simply the interface name to be matched. This is mostly useful if you want + to keep simple cases simple, and it's how network device configuration has + been done for a long time. + + If there are ``match``: rules, then the ID field is a purely opaque name + which is only being used for references from definitions of compound + devices in the config. + +Virtual devices + +: (Examples: veth, bridge, bond) These are fully under the control of the + config file(s) and the network stack. I. e. these devices are being created + instead of matched. Thus ``match:`` and ``set-name:`` are not applicable for + these, and the ID field is the name of the created virtual device. + +Common properties for physical device types +------------------------------------------- + +**match**: *<(mapping)>* + +This selects a subset of available physical devices by various hardware +properties. The following configuration will then apply to all matching +devices, as soon as they appear. *All* specified properties must match. +The following properties for creating matches are supported: + +**name**: *<(scalar)>* + +Current interface name. Globs are supported, and the primary use case +for matching on names, as selecting one fixed name can be more easily +achieved with having no ``match:`` at all and just using the ID (see +above). Note that currently only networkd supports globbing, +NetworkManager does not. + +**macaddress**: *<(scalar)>* + +Device's MAC address in the form "XX:XX:XX:XX:XX:XX". Globs are not allowed. + +**driver**: *<(scalar)>* + +Kernel driver name, corresponding to the ``DRIVER`` udev property. Globs are +supported. Matching on driver is *only* supported with networkd. + +**Examples**:: + + # all cards on second PCI bus + match: + name: enp2* + + # fixed MAC address + match: + macaddress: 11:22:33:AA:BB:FF + + # first card of driver ``ixgbe`` + match: + driver: ixgbe + name: en*s0 + +**set-name**: *<(scalar)>* + +When matching on unique properties such as path or MAC, or with additional +assumptions such as "there will only ever be one wifi device", +match rules can be written so that they only match one device. Then this +property can be used to give that device a more specific/desirable/nicer +name than the default from udev’s ifnames. Any additional device that +satisfies the match rules will then fail to get renamed and keep the +original kernel name (and dmesg will show an error). + +**wakeonlan**: *<(bool)>* + +Enable wake on LAN. Off by default. + + +Common properties for all device types +-------------------------------------- + +**renderer**: *<(scalar)>* + +Use the given networking backend for this definition. Currently supported are +``networkd`` and ``NetworkManager``. This property can be specified globally +in ``networks:``, for a device type (in e. g. ``ethernets:``) or +for a particular device definition. Default is ``networkd``. + +.. note:: + + Cloud-init only supports networkd backend if rendering version2 config + to the instance. + +**dhcp4**: *<(bool)>* + +Enable DHCP for IPv4. Off by default. + +**dhcp6**: *<(bool)>* + +Enable DHCP for IPv6. Off by default. + +**addresses**: *<(sequence of scalars)>* + +Add static addresses to the interface in addition to the ones received +through DHCP or RA. Each sequence entry is in CIDR notation, i. e. of the +form ``addr/prefixlen`` . ``addr`` is an IPv4 or IPv6 address as recognized +by ``inet_pton``(3) and ``prefixlen`` the number of bits of the subnet. + +Example: ``addresses: [192.168.14.2/24, 2001:1::1/64]`` + +**gateway4**: or **gateway6**: *<(scalar)>* + +Set default gateway for IPv4/6, for manual address configuration. This +requires setting ``addresses`` too. Gateway IPs must be in a form +recognized by ``inet_pton(3)`` + +Example for IPv4: ``gateway4: 172.16.0.1`` +Example for IPv6: ``gateway6: 2001:4::1`` + +**nameservers**: *<(mapping)>* + +Set DNS servers and search domains, for manual address configuration. There +are two supported fields: ``addresses:`` is a list of IPv4 or IPv6 addresses +similar to ``gateway*``, and ``search:`` is a list of search domains. + +Example: :: + + nameservers: + search: [lab, home] + addresses: [8.8.8.8, FEDC::1] + +**routes**: *<(sequence of mapping)>* + +Add device specific routes. Each mapping includes a ``to``, ``via`` key +with an IPv4 or IPv6 address as value. ``metric`` is an optional value. + +Example: :: + + routes: + - to: 0.0.0.0/0 + via: 10.23.2.1 + metric: 3 + +Ethernets +~~~~~~~~~ +Ethernet device definitions do not support any specific properties beyond the +common ones described above. + +Bonds +~~~~~ + +**interfaces** *<(sequence of scalars)>* + +All devices matching this ID list will be added to the bond. + +Example: :: + + ethernets: + switchports: + match: {name: "enp2*"} + [...] + bonds: + bond0: + interfaces: [switchports] + +**parameters**: *<(mapping)>* + +Customization parameters for special bonding options. Time values are specified +in seconds unless otherwise specified. + +**mode**: *<(scalar)>* + +Set the bonding mode used for the interfaces. The default is +``balance-rr`` (round robin). Possible values are ``balance-rr``, +``active-backup``, ``balance-xor``, ``broadcast``, ``802.3ad``, +``balance-tlb``, and ``balance-alb``. + +**lacp-rate**: *<(scalar)>* + +Set the rate at which LACPDUs are transmitted. This is only useful +in 802.3ad mode. Possible values are ``slow`` (30 seconds, default), +and ``fast`` (every second). + +**mii-monitor-interval**: *<(scalar)>* + +Specifies the interval for MII monitoring (verifying if an interface +of the bond has carrier). The default is ``0``; which disables MII +monitoring. + +**min-links**: *<(scalar)>* + +The minimum number of links up in a bond to consider the bond +interface to be up. + +**transmit-hash-policy**: <*(scalar)>* + +Specifies the transmit hash policy for the selection of slaves. This +is only useful in balance-xor, 802.3ad and balance-tlb modes. +Possible values are ``layer2``, ``layer3+4``, ``layer2+3``, +``encap2+3``, and ``encap3+4``. + +**ad-select**: <*(scalar)>* + +Set the aggregation selection mode. Possible values are ``stable``, +``bandwidth``, and ``count``. This option is only used in 802.3ad mode. + +**all-slaves-active**: <*(bool)>* + +If the bond should drop duplicate frames received on inactive ports, +set this option to ``false``. If they should be delivered, set this +option to ``true``. The default value is false, and is the desirable +behavior in most situations. + +**arp-interval**: <*(scalar)>* + +Set the interval value for how frequently ARP link monitoring should +happen. The default value is ``0``, which disables ARP monitoring. + +**arp-ip-targets**: <*(sequence of scalars)>* + +IPs of other hosts on the link which should be sent ARP requests in +order to validate that a slave is up. This option is only used when +``arp-interval`` is set to a value other than ``0``. At least one IP +address must be given for ARP link monitoring to function. Only IPv4 +addresses are supported. You can specify up to 16 IP addresses. The +default value is an empty list. + +**arp-validate**: <*(scalar)>* + +Configure how ARP replies are to be validated when using ARP link +monitoring. Possible values are ``none``, ``active``, ``backup``, +and ``all``. + +**arp-all-targets**: <*(scalar)>* + +Specify whether to use any ARP IP target being up as sufficient for +a slave to be considered up; or if all the targets must be up. This +is only used for ``active-backup`` mode when ``arp-validate`` is +enabled. Possible values are ``any`` and ``all``. + +**up-delay**: <*(scalar)>* + +Specify the delay before enabling a link once the link is physically +up. The default value is ``0``. + +**down-delay**: <*(scalar)>* + +Specify the delay before disabling a link once the link has been +lost. The default value is ``0``. + +**fail-over-mac-policy**: <*(scalar)>* + +Set whether to set all slaves to the same MAC address when adding +them to the bond, or how else the system should handle MAC addresses. +The possible values are ``none``, ``active``, and ``follow``. + +**gratuitious-arp**: <*(scalar)>* + +Specify how many ARP packets to send after failover. Once a link is +up on a new slave, a notification is sent and possibly repeated if +this value is set to a number greater than ``1``. The default value +is ``1`` and valid values are between ``1`` and ``255``. This only +affects ``active-backup`` mode. + +**packets-per-slave**: <*(scalar)>* + +In ``balance-rr`` mode, specifies the number of packets to transmit +on a slave before switching to the next. When this value is set to +``0``, slaves are chosen at random. Allowable values are between +``0`` and ``65535``. The default value is ``1``. This setting is +only used in ``balance-rr`` mode. + +**primary-reselect-policy**: <*(scalar)>* + +Set the reselection policy for the primary slave. On failure of the +active slave, the system will use this policy to decide how the new +active slave will be chosen and how recovery will be handled. The +possible values are ``always``, ``better``, and ``failure``. + +**learn-packet-interval**: <*(scalar)>* + +Specify the interval between sending learning packets to each slave. +The value range is between ``1`` and ``0x7fffffff``. The default +value is ``1``. This option only affects ``balance-tlb`` and +``balance-alb`` modes. + + +Bridges +~~~~~~~ + +**interfaces**: <*(sequence of scalars)>* + +All devices matching this ID list will be added to the bridge. + +Example: :: + + ethernets: + switchports: + match: {name: "enp2*"} + [...] + bridges: + br0: + interfaces: [switchports] + +**parameters**: <*(mapping)>* + +Customization parameters for special bridging options. Time values are specified +in seconds unless otherwise specified. + +**ageing-time**: <*(scalar)>* + +Set the period of time to keep a MAC address in the forwarding +database after a packet is received. + +**priority**: <*(scalar)>* + +Set the priority value for the bridge. This value should be an +number between ``0`` and ``65535``. Lower values mean higher +priority. The bridge with the higher priority will be elected as +the root bridge. + +**forward-delay**: <*(scalar)>* + +Specify the period of time the bridge will remain in Listening and +Learning states before getting to the Forwarding state. This value +should be set in seconds for the systemd backend, and in milliseconds +for the NetworkManager backend. + +**hello-time**: <*(scalar)>* + +Specify the interval between two hello packets being sent out from +the root and designated bridges. Hello packets communicate +information about the network topology. + +**max-age**: <*(scalar)>* + +Set the maximum age of a hello packet. If the last hello packet is +older than that value, the bridge will attempt to become the root +bridge. + +**path-cost**: <*(scalar)>* + +Set the cost of a path on the bridge. Faster interfaces should have +a lower cost. This allows a finer control on the network topology +so that the fastest paths are available whenever possible. + +**stp**: <*(bool)>* + +Define whether the bridge should use Spanning Tree Protocol. The +default value is "true", which means that Spanning Tree should be +used. + + +VLANs +~~~~~ + +**id**: <*(scalar)>* + +VLAN ID, a number between 0 and 4094. + +**link**: <*(scalar)>* + +ID of the underlying device definition on which this VLAN gets +created. + +Example: :: + + ethernets: + eno1: {...} + vlans: + en-intra: + id: 1 + link: eno1 + dhcp4: yes + en-vpn: + id: 2 + link: eno1 + address: ... + + +Examples +-------- +Configure an ethernet device with networkd, identified by its name, and enable +DHCP: :: + + network: + version: 2 + ethernets: + eno1: + dhcp4: true + +This is a complex example which shows most available features: :: + + network: + version: 2 + ethernets: + # opaque ID for physical interfaces, only referred to by other stanzas + id0: + match: + macaddress: 00:11:22:33:44:55 + wakeonlan: true + dhcp4: true + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + gateway4: 192.168.14.1 + gateway6: 2001:1::2 + nameservers: + search: [foo.local, bar.local] + addresses: [8.8.8.8] + lom: + match: + driver: ixgbe + # you are responsible for setting tight enough match rules + # that only match one device if you use set-name + set-name: lom1 + dhcp6: true + switchports: + # all cards on second PCI bus; unconfigured by themselves, will be added + # to br0 below + match: + name: enp2* + mtu: 1280 + bonds: + bond0: + interfaces: [id0, lom] + bridges: + # the key name is the name for virtual (created) interfaces; no match: and + # set-name: allowed + br0: + # IDs of the components; switchports expands into multiple interfaces + interfaces: [wlp1s0, switchports] + dhcp4: true + vlans: + en-intra: + id: 1 + link: id0 + dhcp4: yes + # static routes + routes: + - to: 0.0.0.0/0 + via: 11.0.0.1 + metric: 3 + +.. _netplan: https://launchpad.net/netplan +.. vi: textwidth=78 diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/network-config.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/network-config.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/network-config.rst 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/network-config.rst 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,254 @@ +********************* +Network Configuration +********************* + +- Default Behavior +- Disabling Network Configuration +- Fallback Networking +- Network Configuration Sources +- Network Configuration Outputs +- Network Output Policy +- Network Configuration Tools +- Examples + +.. _default_behavior: + +Default Behavior +================ + +`Cloud-init`_ 's searches for network configuration in order of increasing +precedence; each item overriding the previous. + +**Datasource** + +For example, OpenStack may provide network config in the MetaData Service. + +**System Config** + +A ``network:`` entry in /etc/cloud/cloud.cfg.d/* configuration files. + +**Kernel Command Line** + +``ip=`` or ``network-config=`` + +User-data cannot change an instance's network configuration. In the absense +of network configuration in any of the above sources , `Cloud-init`_ will +write out a network configuration that will issue a DHCP request on a "first" +network interface. + + +Disabling Network Configuration +=============================== + +Users may disable `Cloud-init`_ 's network configuration capability and rely +on other methods, such as embedded configuration or other customizations. + +`Cloud-init`_ supports the following methods for disabling cloud-init. + + +**Kernel Command Line** + +`Cloud-init`_ will check for a parameter ``network-config`` and the +value is expected to be YAML string in the :ref:`network_config_v1` format. +The YAML string may optionally be ``Base64`` encoded, and optionally +compressed with ``gzip``. + +Example disabling kernel command line entry: :: + + network-config={config: disabled} + + +**cloud config** + +In the combined cloud-init configuration dictionary. :: + + network: + config: disabled + +If `Cloud-init`_ 's networking config has not been disabled, and +no other network information is found, then it will proceed +to generate a fallback networking configuration. + + +Fallback Network Configuration +============================== + +`Cloud-init`_ will attempt to determine which of any attached network devices +is most likely to have a connection and then generate a network +configuration to issue a DHCP request on that interface. + +`Cloud-init`_ runs during early boot and does not expect composed network +devices (such as Bridges) to be available. `Cloud-init`_ does not consider +the following interface devices as likely 'first' network interfaces for +fallback configuration; they are filtered out from being selected. + +- **loopback**: ``name=lo`` +- **Virtual Ethernet**: ``name=veth*`` +- **Software Bridges**: ``type=bridge`` +- **Software VLANs**: ``type=vlan`` + + +`Cloud-init`_ will prefer network interfaces that indicate they are connected +via the Linux ``carrier`` flag being set. If no interfaces are marked +connected, then all unfiltered interfaces are potential connections. + +Of the potential interfaces, `Cloud-init`_ will attempt to pick the "right" +interface given the information it has available. + +Finally after selecting the "right" interface, a configuration is +generated and applied to the system. + + +Network Configuration Sources +============================= + +`Cloud-init`_ accepts a number of different network configuration formats in +support of different cloud substrates. The Datasource for these clouds in +`Cloud-init`_ will detect and consume Datasource-specific network +configuration formats for use when writing an instance's network +configuration. + +The following Datasources optionally provide network configuration: + +- :ref:`datasource_config_drive` + + - `OpenStack Metadata Service Network`_ + - :ref:`network_config_eni` + +- :ref:`datasource_digital_ocean` + + - `DigitalOcean JSON metadata`_ + +- :ref:`datasource_nocloud` + + - :ref:`network_config_v1` + - :ref:`network_config_v2` + - :ref:`network_config_eni` + +- :ref:`datasource_opennebula` + + - :ref:`network_config_eni` + +- :ref:`datasource_openstack` + + - :ref:`network_config_eni` + - `OpenStack Metadata Service Network`_ + +- :ref:`datasource_smartos` + + - `SmartOS JSON Metadata`_ + +For more information on network configuration formats + +.. toctree:: + :maxdepth: 1 + + network-config-format-eni.rst + network-config-format-v1.rst + network-config-format-v2.rst + + +Network Configuration Outputs +============================= + +`Cloud-init`_ converts various forms of user supplied or automatically +generated configuration into an internal network configuration state. From +this state `Cloud-init`_ delegates rendering of the configuration to Distro +supported formats. The following ``renderers`` are supported in cloud-init: + +- **ENI** + +/etc/network/interfaces or ``ENI`` is supported by the ``ifupdown`` package +found in Ubuntu and Debian. + +- **Netplan** + +Since Ubuntu 16.10, codename Yakkety, the ``netplan`` project has been an +optional network configuration tool which consumes :ref:`network_config_v2` +input and renders network configuration for supported backends such as +``systemd-networkd`` and ``NetworkManager``. + +- **Sysconfig** + +Sysconfig format is used by RHEL, CentOS, Fedora and other derivatives. + + +Network Output Policy +===================== + +The default policy for selecting a network ``renderer`` in order of preference +is as follows: + +- ENI +- Sysconfig +- Netplan + +When applying the policy, `Cloud-init`_ checks if the current instance has the +correct binaries and paths to support the renderer. The first renderer that +can be used is selected. Users may override the network renderer policy by +supplying an updated configuration in cloud-config. :: + + system_info: + network: + renderers: ['netplan', 'eni', 'sysconfig'] + + +Network Configuration Tools +=========================== + +`Cloud-init`_ contains one tool used to test input/output conversion between +formats. The ``tools/net-convert.py`` in the `Cloud-init`_ source repository +is helpful for examining expected output for a given input format. + +CLI Interface : + +.. code-block:: bash + + % tools/net-convert.py --help + usage: net-convert.py [-h] --network-data PATH --kind + {eni,network_data.json,yaml} -d PATH [-m name,mac] + --output-kind {eni,netplan,sysconfig} + + optional arguments: + -h, --help show this help message and exit + --network-data PATH, -p PATH + --kind {eni,network_data.json,yaml}, -k {eni,network_data.json,yaml} + -d PATH, --directory PATH + directory to place output in + -m name,mac, --mac name,mac + interface name to mac mapping + --output-kind {eni,netplan,sysconfig}, -ok {eni,netplan,sysconfig} + + +Example output convertion V2 to sysconfig: + +.. code-block:: bash + + % tools/net-convert.py --network-data v2.yaml --kind yaml \ + --output-kind sysconfig -d target + % cat target/etc/sysconfig/network-scripts/ifcfg-eth* + # Created by cloud-init on instance boot automatically, do not edit. + # + BOOTPROTO=static + DEVICE=eth7 + IPADDR=192.168.1.5/255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + # Created by cloud-init on instance boot automatically, do not edit. + # + BOOTPROTO=dhcp + DEVICE=eth9 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + + +.. _Cloud-init: https://launchpad.net/cloud-init +.. _DigitalOcean JSON metadata: https://developers.digitalocean.com/documentation/metadata/#network-interfaces-index +.. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html +.. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html + +.. vi: textwidth=78 diff -Nru cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/tests.rst cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/tests.rst --- cloud-init-0.7.9-47-gc81ea53/doc/rtd/topics/tests.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/doc/rtd/topics/tests.rst 2017-05-26 18:36:38.000000000 +0000 @@ -238,6 +238,20 @@ The above command will run the verify scripts on the data discovered in `/tmp/collection`. +Run via tox +----------- +In order to avoid the need for dependencies and ease the setup and +configuration users can run the integration tests via tox: + +.. code-block:: bash + + $ tox -e citest -- run [integration test arguments] + $ tox -e citest -- run -v -n zesty --deb=cloud-init_all.deb + $ tox -e citest -- run -t module/user_groups.yaml + +Users need to invoke the citest enviornment and then pass any additional +arguments. + Architecture ============ diff -Nru cloud-init-0.7.9-47-gc81ea53/.gitignore cloud-init-0.7.9-153-g16a7302f/.gitignore --- cloud-init-0.7.9-47-gc81ea53/.gitignore 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/.gitignore 2017-05-26 18:36:38.000000000 +0000 @@ -5,3 +5,8 @@ __pycache__ .tox .coverage +doc/rtd_html +parts +prime +stage +*.snap diff -Nru cloud-init-0.7.9-47-gc81ea53/HACKING.rst cloud-init-0.7.9-153-g16a7302f/HACKING.rst --- cloud-init-0.7.9-47-gc81ea53/HACKING.rst 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/HACKING.rst 2017-05-26 18:36:38.000000000 +0000 @@ -13,6 +13,9 @@ If you have already signed it as an individual, your Launchpad user will be listed in the `contributor-agreement-canonical`_ group. Unfortunately there is no easy way to check if an organization or company you are doing work for has signed. If you are unsure or have questions, email `Scott Moser `_ or ping smoser in ``#cloud-init`` channel via freenode. + When prompted for 'Project contact' or 'Canonical Project Manager' enter + 'Scott Moser'. + * Clone the upstream `repository`_ on Launchpad:: git clone https://git.launchpad.net/cloud-init diff -Nru cloud-init-0.7.9-47-gc81ea53/Makefile cloud-init-0.7.9-153-g16a7302f/Makefile --- cloud-init-0.7.9-47-gc81ea53/Makefile 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/Makefile 2017-05-26 18:36:38.000000000 +0000 @@ -82,6 +82,10 @@ ./packages/brpm --distro $(distro) deb: + @which debuild || \ + { echo "Missing devscripts dependency. Install with:"; \ + echo sudo apt-get install devscripts; exit 1; } + ./packages/bddeb .PHONY: test pyflakes pyflakes3 clean pep8 rpm deb yaml check_version diff -Nru cloud-init-0.7.9-47-gc81ea53/packages/bddeb cloud-init-0.7.9-153-g16a7302f/packages/bddeb --- cloud-init-0.7.9-47-gc81ea53/packages/bddeb 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/packages/bddeb 2017-05-26 18:36:38.000000000 +0000 @@ -24,35 +24,17 @@ from cloudinit import templater from cloudinit import util -# Package names that will showup in requires to what we can actually -# use in our debian 'control' file, this is a translation of the 'requires' -# file pypi package name to a debian/ubuntu package name. -STD_NAMED_PACKAGES = [ - 'configobj', - 'coverage', - 'jinja2', - 'jsonpatch', - 'oauthlib', - 'prettytable', - 'requests', - 'six', - 'httpretty', - 'mock', - 'nose', - 'setuptools', - 'flake8', - 'hacking', - 'unittest2', -] +# Package names that will showup in requires which have unique package names. +# Format is '': {'': , ...}. NONSTD_NAMED_PACKAGES = { - 'argparse': ('python-argparse', None), - 'contextlib2': ('python-contextlib2', None), - 'cheetah': ('python-cheetah', None), - 'pyserial': ('python-serial', 'python3-serial'), - 'pyyaml': ('python-yaml', 'python3-yaml'), - 'six': ('python-six', 'python3-six'), - 'pep8': ('pep8', 'python3-pep8'), - 'pyflakes': ('pyflakes', 'pyflakes'), + 'argparse': {'2': 'python-argparse', '3': None}, + 'contextlib2': {'2': 'python-contextlib2', '3': None}, + 'cheetah': {'2': 'python-cheetah', '3': None}, + 'pyserial': {'2': 'python-serial', '3': 'python3-serial'}, + 'pyyaml': {'2': 'python-yaml', '3': 'python3-yaml'}, + 'six': {'2': 'python-six', '3': 'python3-six'}, + 'pep8': {'2': 'pep8', '3': 'python3-pep8'}, + 'pyflakes': {'2': 'pyflakes', '3': 'pyflakes'}, } DEBUILD_ARGS = ["-S", "-d"] @@ -68,8 +50,17 @@ return stdout -def write_debian_folder(root, templ_data, pkgmap, pyver="3", - append_requires=[]): +def write_debian_folder(root, templ_data, is_python2, cloud_util_deps): + """Create a debian package directory with all rendered template files.""" + print("Creating a debian/ folder in %r" % (root)) + if is_python2: + pyver = "2" + python = "python" + else: + pyver = "3" + python = "python3" + pkgfmt = "{}-{}" + deb_dir = util.abs_join(root, 'debian') # Just copy debian/ dir and then update files @@ -91,21 +82,16 @@ pypi_test_pkgs = [p.lower().strip() for p in test_reqs] # Map to known packages - requires = append_requires + requires = ['cloud-utils | cloud-guest-utils'] if cloud_util_deps else [] test_requires = [] lists = ((pypi_pkgs, requires), (pypi_test_pkgs, test_requires)) for pypilist, target in lists: for p in pypilist: - if p not in pkgmap: - raise RuntimeError(("Do not know how to translate pypi " - "dependency %r to a known package") % (p)) - elif pkgmap[p]: - target.append(pkgmap[p]) - - if pyver == "3": - python = "python3" - else: - python = "python" + if p in NONSTD_NAMED_PACKAGES: + if NONSTD_NAMED_PACKAGES[p][pyver]: + target.append(NONSTD_NAMED_PACKAGES[p][pyver]) + else: # Then standard package prefix + target.append(pkgfmt.format(python, p)) templater.render_to_file(util.abs_join(find_root(), 'packages', 'debian', 'control.in'), @@ -124,8 +110,8 @@ return json.loads(run_helper('read-version', ['--json'])) -def main(): - +def get_parser(): + """Setup and return an argument parser for bdeb tool.""" parser = argparse.ArgumentParser() parser.add_argument("-v", "--verbose", dest="verbose", help=("run verbosely" @@ -162,7 +148,11 @@ parser.add_argument("--signuser", default=False, action='store', help="user to sign, see man dpkg-genchanges") + return parser + +def main(): + parser = get_parser() args = parser.parse_args() if not args.sign: @@ -177,18 +167,6 @@ if args.verbose: capture = False - pkgmap = {} - for p in NONSTD_NAMED_PACKAGES: - pkgmap[p] = NONSTD_NAMED_PACKAGES[p][int(not args.python2)] - - for p in STD_NAMED_PACKAGES: - if args.python2: - pkgmap[p] = "python-" + p - pyver = "2" - else: - pkgmap[p] = "python3-" + p - pyver = "3" - templ_data = {'debian_release': args.release} with util.tempdir() as tdir: @@ -208,16 +186,10 @@ util.subp(cmd, capture=capture) xdir = util.abs_join(tdir, "cloud-init-%s" % ver_data['version_long']) - - print("Creating a debian/ folder in %r" % (xdir)) - if args.cloud_utils: - append_requires = ['cloud-utils | cloud-guest-utils'] - else: - append_requires = [] - templ_data.update(ver_data) - write_debian_folder(xdir, templ_data, pkgmap, - pyver=pyver, append_requires=append_requires) + + write_debian_folder(xdir, templ_data, is_python2=args.python2, + cloud_util_deps=args.cloud_utils) print("Running 'debuild %s' in %r" % (' '.join(args.debuild_args), xdir)) diff -Nru cloud-init-0.7.9-47-gc81ea53/packages/debian/control.in cloud-init-0.7.9-153-g16a7302f/packages/debian/control.in --- cloud-init-0.7.9-47-gc81ea53/packages/debian/control.in 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/packages/debian/control.in 2017-05-26 18:36:38.000000000 +0000 @@ -6,10 +6,6 @@ Build-Depends: debhelper (>= 9), dh-python, dh-systemd, - iproute2, - pep8, - pyflakes, - python3-pyflakes | pyflakes (<< 1.1.0-2), ${python}, ${test_requires}, ${requires} diff -Nru cloud-init-0.7.9-47-gc81ea53/.pylintrc cloud-init-0.7.9-153-g16a7302f/.pylintrc --- cloud-init-0.7.9-47-gc81ea53/.pylintrc 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/.pylintrc 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,60 @@ +[MASTER] + +# --go-faster, use multiple processes to speed up Pylint +jobs=4 + + +[MESSAGES CONTROL] + +# Errors and warings with some filtered: +# W0105(pointless-string-statement) +# W0107(unnecessary-pass) +# W0201(attribute-defined-outside-init) +# W0212(protected-access) +# W0221(arguments-differ) +# W0222(signature-differs) +# W0223(abstract-method) +# W0231(super-init-not-called) +# W0311(bad-indentation) +# W0511(fixme) +# W0602(global-variable-not-assigned) +# W0603(global-statement) +# W0611(unused-import) +# W0612(unused-variable) +# W0613(unused-argument) +# W0621(redefined-outer-name) +# W0622(redefined-builtin) +# W0631(undefined-loop-variable) +# W0703(broad-except) +# W1401(anomalous-backslash-in-string) + +disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0612, W0613, W0621, W0622, W0631, W0703, W1401 + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +output-format=parseable + +# Just the errors please, no full report +reports=no + + +[TYPECHECK] + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules=six.moves,pkg_resources,httplib,http.client + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members=types,http.client,command_handlers + diff -Nru cloud-init-0.7.9-47-gc81ea53/setup.py cloud-init-0.7.9-153-g16a7302f/setup.py --- cloud-init-0.7.9-47-gc81ea53/setup.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/setup.py 2017-05-26 18:36:38.000000000 +0000 @@ -89,7 +89,6 @@ if os.uname()[0] == 'FreeBSD': USR = "/usr/local" USR_LIB_EXEC = "/usr/local/lib" - ETC = "/usr/local/etc" elif os.path.isfile('/etc/redhat-release'): USR_LIB_EXEC = "/usr/libexec" @@ -138,9 +137,7 @@ self.init_system = self.init_system.split(",") if len(self.init_system) == 0: - raise DistutilsArgError( - ("You must specify one of (%s) when" - " specifying init system(s)!") % (", ".join(INITSYS_TYPES))) + self.init_system = ['systemd'] bad = [f for f in self.init_system if f not in INITSYS_TYPES] if len(bad) != 0: @@ -166,8 +163,6 @@ (ETC + '/cloud', glob('config/*.cfg')), (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), (ETC + '/cloud/templates', glob('templates/*')), - (ETC + '/NetworkManager/dispatcher.d/', ['tools/hook-network-manager']), - (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', 'tools/uncloud-init', 'tools/write-ssh-key-fingerprints']), @@ -176,8 +171,14 @@ [f for f in glob('doc/examples/*') if is_f(f)]), (USR + '/share/doc/cloud-init/examples/seed', [f for f in glob('doc/examples/seed/*') if is_f(f)]), - (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]), ] + if os.uname()[0] != 'FreeBSD': + data_files.extend([ + (ETC + '/NetworkManager/dispatcher.d/', + ['tools/hook-network-manager']), + (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), + (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]) + ]) # Use a subclass for install that handles # adding on the right init system configuration files cmdclass = { diff -Nru cloud-init-0.7.9-47-gc81ea53/snapcraft.yaml cloud-init-0.7.9-153-g16a7302f/snapcraft.yaml --- cloud-init-0.7.9-47-gc81ea53/snapcraft.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/snapcraft.yaml 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,21 @@ +name: cloud-init +version: master +summary: Init scripts for cloud instances +description: | + Cloud instances need special scripts to run during initialisation to + retrieve and install ssh keys and to let the user run various scripts. + +grade: stable +confinement: classic + +apps: + cloud-init: + # LP: #1669306 + command: usr/bin/python3 $SNAP/bin/cloud-init + plugs: [network] + +parts: + cloud-init: + plugin: python + source-type: git + source: https://git.launchpad.net/cloud-init diff -Nru cloud-init-0.7.9-47-gc81ea53/systemd/cloud-init.service cloud-init-0.7.9-153-g16a7302f/systemd/cloud-init.service --- cloud-init-0.7.9-47-gc81ea53/systemd/cloud-init.service 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/systemd/cloud-init.service 2017-05-26 18:36:38.000000000 +0000 @@ -5,6 +5,7 @@ Wants=sshd-keygen.service Wants=sshd.service After=cloud-init-local.service +After=systemd-networkd-wait-online.service After=networking.service Before=network-online.target Before=sshd-keygen.service diff -Nru cloud-init-0.7.9-47-gc81ea53/sysvinit/freebsd/cloudconfig cloud-init-0.7.9-153-g16a7302f/sysvinit/freebsd/cloudconfig --- cloud-init-0.7.9-47-gc81ea53/sysvinit/freebsd/cloudconfig 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/sysvinit/freebsd/cloudconfig 2017-05-26 18:36:38.000000000 +0000 @@ -7,24 +7,14 @@ . /etc/rc.subr PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" -export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg name="cloudconfig" command="/usr/local/bin/cloud-init" start_cmd="cloudconfig_start" stop_cmd=":" rcvar="cloudinit_enable" -start_precmd="cloudinit_override" start_cmd="cloudconfig_start" -cloudinit_override() -{ - # If there exist sysconfig/defaults variable override files use it... - if [ -f /etc/defaults/cloud-init ]; then - . /etc/defaults/cloud-init - fi -} - cloudconfig_start() { echo "${command} starting" diff -Nru cloud-init-0.7.9-47-gc81ea53/sysvinit/freebsd/cloudfinal cloud-init-0.7.9-153-g16a7302f/sysvinit/freebsd/cloudfinal --- cloud-init-0.7.9-47-gc81ea53/sysvinit/freebsd/cloudfinal 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/sysvinit/freebsd/cloudfinal 2017-05-26 18:36:38.000000000 +0000 @@ -7,24 +7,14 @@ . /etc/rc.subr PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" -export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg name="cloudfinal" command="/usr/local/bin/cloud-init" start_cmd="cloudfinal_start" stop_cmd=":" rcvar="cloudinit_enable" -start_precmd="cloudinit_override" start_cmd="cloudfinal_start" -cloudinit_override() -{ - # If there exist sysconfig/defaults variable override files use it... - if [ -f /etc/defaults/cloud-init ]; then - . /etc/defaults/cloud-init - fi -} - cloudfinal_start() { echo -n "${command} starting" diff -Nru cloud-init-0.7.9-47-gc81ea53/sysvinit/freebsd/cloudinit cloud-init-0.7.9-153-g16a7302f/sysvinit/freebsd/cloudinit --- cloud-init-0.7.9-47-gc81ea53/sysvinit/freebsd/cloudinit 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/sysvinit/freebsd/cloudinit 2017-05-26 18:36:38.000000000 +0000 @@ -7,24 +7,14 @@ . /etc/rc.subr PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" -export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg name="cloudinit" command="/usr/local/bin/cloud-init" start_cmd="cloudinit_start" stop_cmd=":" rcvar="cloudinit_enable" -start_precmd="cloudinit_override" start_cmd="cloudinit_start" -cloudinit_override() -{ - # If there exist sysconfig/defaults variable override files use it... - if [ -f /etc/defaults/cloud-init ]; then - . /etc/defaults/cloud-init - fi -} - cloudinit_start() { echo -n "${command} starting" diff -Nru cloud-init-0.7.9-47-gc81ea53/sysvinit/freebsd/cloudinitlocal cloud-init-0.7.9-153-g16a7302f/sysvinit/freebsd/cloudinitlocal --- cloud-init-0.7.9-47-gc81ea53/sysvinit/freebsd/cloudinitlocal 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/sysvinit/freebsd/cloudinitlocal 2017-05-26 18:36:38.000000000 +0000 @@ -1,30 +1,20 @@ #!/bin/sh # PROVIDE: cloudinitlocal -# REQUIRE: mountcritlocal +# REQUIRE: ldconfig mountcritlocal # BEFORE: NETWORKING FILESYSTEMS cloudinit cloudconfig cloudfinal . /etc/rc.subr PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" -export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg name="cloudinitlocal" command="/usr/local/bin/cloud-init" start_cmd="cloudlocal_start" stop_cmd=":" rcvar="cloudinit_enable" -start_precmd="cloudinit_override" start_cmd="cloudlocal_start" -cloudinit_override() -{ - # If there exist sysconfig/defaults variable override files use it... - if [ -f /etc/defaults/cloud-init ]; then - . /etc/defaults/cloud-init - fi -} - cloudlocal_start() { echo -n "${command} starting" diff -Nru cloud-init-0.7.9-47-gc81ea53/templates/sources.list.debian.tmpl cloud-init-0.7.9-153-g16a7302f/templates/sources.list.debian.tmpl --- cloud-init-0.7.9-47-gc81ea53/templates/sources.list.debian.tmpl 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/templates/sources.list.debian.tmpl 2017-05-26 18:36:38.000000000 +0000 @@ -26,7 +26,5 @@ ## N.B. software from this repository may not have been tested as ## extensively as that contained in the main release, although it includes ## newer versions of some applications which may provide useful features. -{# -deb http://backports.debian.org/debian-backports {{codename}}-backports main contrib non-free -deb-src http://backports.debian.org/debian-backports {{codename}}-backports main contrib non-free --#} +deb {{mirror}} {{codename}}-backports main contrib non-free +deb-src {{mirror}} {{codename}}-backports main contrib non-free diff -Nru cloud-init-0.7.9-47-gc81ea53/test-requirements.txt cloud-init-0.7.9-153-g16a7302f/test-requirements.txt --- cloud-init-0.7.9-47-gc81ea53/test-requirements.txt 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/test-requirements.txt 2017-05-26 18:36:38.000000000 +0000 @@ -11,9 +11,3 @@ # Only really needed on older versions of python contextlib2 setuptools - -# Used for syle checking -pep8==1.7.0 -pyflakes==1.1.0 -flake8==2.5.4 -hacking==0.10.2 diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/args.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/args.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/args.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/args.py 2017-05-26 18:36:38.000000000 +0000 @@ -94,7 +94,7 @@ if os.path.exists(config.name_to_path(args.name)): msg = 'test: {} already exists'.format(args.name) if args.force: - LOG.warn('%s but ignoring due to --force', msg) + LOG.warning('%s but ignoring due to --force', msg) else: LOG.error(msg) return None diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/collect.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/collect.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/collect.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/collect.py 2017-05-26 18:36:38.000000000 +0000 @@ -45,7 +45,7 @@ # if test is not enabled, skip and return 0 failures if not test_config.get('enabled', False): - LOG.warn('test config %s is not enabled, skipping', test_name) + LOG.warning('test config %s is not enabled, skipping', test_name) return ({}, 0) # create test instance diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/examples/install_run_chef_recipes.yaml cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/examples/install_run_chef_recipes.yaml --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/examples/install_run_chef_recipes.yaml 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/examples/install_run_chef_recipes.yaml 2017-05-26 18:36:38.000000000 +0000 @@ -1,46 +1,50 @@ # # From cloud config examples on cloudinit.readthedocs.io # -# 2016-11-17: Disabled as test suite fails this long running test currently +# 2017-03-31: Disabled as depends on third party apt repository # enabled: False cloud_config: | #cloud-config - # Key from http://apt.opscode.com/packages@opscode.com.gpg.key + # Key from https://packages.chef.io/chef.asc apt: - sources: - - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main" - key: | - -----BEGIN PGP PUBLIC KEY BLOCK----- - Version: GnuPG v1.4.9 (GNU/Linux) - - mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu - twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99 - dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC - JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W - ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I - XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe - DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm - sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO - Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ - YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG - CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K - +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR - lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh - DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu - wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx - EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g - w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8 - AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN - QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X - Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ - 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V - Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL - zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb - DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG - 0GLl8EkfA8uhluM= - =zKAm - -----END PGP PUBLIC KEY BLOCK----- + source1: + source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main" + key: | + -----BEGIN PGP PUBLIC KEY BLOCK----- + Version: GnuPG v1.4.12 (Darwin) + Comment: GPGTools - http://gpgtools.org + + mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu + twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99 + dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC + JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W + ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I + XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe + DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm + sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO + Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ + YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG + CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K + +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg + PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK + CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid + AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd + Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz + SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK + OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/ + Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY + IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu + twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8 + DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE + WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS + 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA + dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC + MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD + 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K + zA== + =IxPr + -----END PGP PUBLIC KEY BLOCK----- chef: @@ -91,4 +95,9 @@ # Useful for troubleshooting cloud-init issues output: {all: '| tee -a /var/log/cloud-init-output.log'} +collect_scripts: + chef_installed: | + #!/bin/sh + dpkg-query -W -f '${Status}\n' chef + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/modules/ntp_pools.yaml cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/modules/ntp_pools.yaml --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/modules/ntp_pools.yaml 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/modules/ntp_pools.yaml 2017-05-26 18:36:38.000000000 +0000 @@ -5,10 +5,9 @@ #cloud-config ntp: pools: - - 0.pool.ntp.org - - 1.pool.ntp.org - - 2.pool.ntp.org - - 3.pool.ntp.org + - 0.cloud-init.mypool + - 1.cloud-init.mypool + - 172.16.15.14 collect_scripts: ntp_installed_pools: | #!/bin/bash @@ -19,5 +18,8 @@ ntp_conf_pools: | #!/bin/bash grep '^pool' /etc/ntp.conf + ntpq_servers: | + #!/bin/sh + ntpq -p -w # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/modules/ntp_servers.yaml cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/modules/ntp_servers.yaml --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/modules/ntp_servers.yaml 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/modules/ntp_servers.yaml 2017-05-26 18:36:38.000000000 +0000 @@ -5,16 +5,20 @@ #cloud-config ntp: servers: - - pool.ntp.org + - 172.16.15.14 + - 172.16.17.18 collect_scripts: ntp_installed_servers: | - #!/bin/bash - dpkg -l | grep ntp | wc -l + #!/bin/sh + dpkg -l | grep -c ntp ntp_conf_dist_servers: | - #!/bin/bash - ls /etc/ntp.conf.dist | wc -l + #!/bin/sh + cat /etc/ntp.conf.dist | wc -l ntp_conf_servers: | - #!/bin/bash + #!/bin/sh grep '^server' /etc/ntp.conf + ntpq_servers: | + #!/bin/sh + ntpq -p -w # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/modules/set_password_list_string.yaml cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/modules/set_password_list_string.yaml --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/modules/set_password_list_string.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/modules/set_password_list_string.yaml 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,40 @@ +# +# Set password of list of users as a string +# +cloud_config: | + #cloud-config + ssh_pwauth: yes + users: + - name: tom + # md5 gotomgo + passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0" + lock_passwd: false + - name: dick + # md5 gocubsgo + passwd: "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1" + lock_passwd: false + - name: harry + # sha512 goharrygo + passwd: "$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsGJEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/" + lock_passwd: false + - name: jane + # sha256 gojanego + passwd: "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg." + lock_passwd: false + - name: "mikey" + lock_passwd: false + chpasswd: + list: | + tom:mypassword123! + dick:RANDOM + harry:RANDOM + mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89 +collect_scripts: + shadow: | + #!/bin/bash + cat /etc/shadow + sshd_config: | + #!/bin/bash + grep '^PasswordAuth' /etc/ssh/sshd_config + +# vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/modules/set_password_list.yaml cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/modules/set_password_list.yaml --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/modules/set_password_list.yaml 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/modules/set_password_list.yaml 2017-05-26 18:36:38.000000000 +0000 @@ -6,22 +6,29 @@ ssh_pwauth: yes users: - name: tom - password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE. + # md5 gotomgo + passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0" lock_passwd: false - name: dick - password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE. + # md5 gocubsgo + passwd: "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1" lock_passwd: false - name: harry - password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE. + # sha512 goharrygo + passwd: "$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsGJEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/" lock_passwd: false - name: jane - password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE. + # sha256 gojanego + passwd: "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg." + lock_passwd: false + - name: "mikey" lock_passwd: false chpasswd: - list: | - tom:mypassword123! - dick:R - harry:Random + list: + - tom:mypassword123! + - dick:RANDOM + - harry:RANDOM + - mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89 collect_scripts: shadow: | #!/bin/bash diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/modules/snappy.yaml cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/modules/snappy.yaml --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/modules/snappy.yaml 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/modules/snappy.yaml 2017-05-26 18:36:38.000000000 +0000 @@ -6,8 +6,8 @@ snappy: system_snappy: auto collect_scripts: - snap_version: | + snapd: | #!/bin/bash - snap --version + dpkg -s snapd # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/modules/timezone.yaml cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/modules/timezone.yaml --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/configs/modules/timezone.yaml 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/configs/modules/timezone.yaml 2017-05-26 18:36:38.000000000 +0000 @@ -7,6 +7,8 @@ collect_scripts: timezone: | #!/bin/bash - date +%Z + # date will convert this to system's configured time zone. + # use a static date to avoid dealing with daylight savings. + date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400" # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/__main__.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/__main__.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/__main__.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/__main__.py 2017-05-26 18:36:38.000000000 +0000 @@ -38,7 +38,7 @@ finally: # TODO: make this configurable via environ or cmdline if failed: - LOG.warn('some tests failed, leaving data in %s', args.data_dir) + LOG.warning('some tests failed, leaving data in %s', args.data_dir) else: shutil.rmtree(args.data_dir) return failed diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/releases.yaml cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/releases.yaml --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/releases.yaml 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/releases.yaml 2017-05-26 18:36:38.000000000 +0000 @@ -49,6 +49,13 @@ #alias: ubuntu/zesty/default alias: z sstreams_server: https://cloud-images.ubuntu.com/daily + artful: + enabled: true + platform_ident: + lxd: + #alias: ubuntu/artful/default + alias: a + sstreams_server: https://cloud-images.ubuntu.com/daily jessie: platform_ident: lxd: diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/base.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/base.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/base.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/base.py 2017-05-26 18:36:38.000000000 +0000 @@ -2,6 +2,7 @@ from cloudinit import util as c_util +import crypt import json import unittest @@ -14,6 +15,9 @@ conf = None _cloud_config = None + def shortDescription(self): + return None + @property def cloud_config(self): """ @@ -78,4 +82,56 @@ result = self.get_status_data(self.get_data_file('result.json')) self.assertEqual(len(result['errors']), 0) + +class PasswordListTest(CloudTestCase): + def test_shadow_passwords(self): + shadow = self.get_data_file('shadow') + users = {} + dupes = [] + for line in shadow.splitlines(): + user, encpw = line.split(":")[0:2] + if user in users: + dupes.append(user) + users[user] = encpw + + jane_enc = "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg." + self.assertEqual([], dupes) + self.assertEqual(jane_enc, users['jane']) + + mikey_enc = "$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89" + self.assertEqual(mikey_enc, users['mikey']) + + # shadow entry is $N$salt$, so we encrypt with the same format + # and salt and expect the result. + tom = "mypassword123!" + fmtsalt = users['tom'][0:users['tom'].rfind("$") + 1] + tom_enc = crypt.crypt(tom, fmtsalt) + self.assertEqual(tom_enc, users['tom']) + + harry_enc = ("$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsG" + "JEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/") + dick_enc = "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1" + + # these should have been changed to random values. + self.assertNotEqual(harry_enc, users['harry']) + self.assertTrue(users['harry'].startswith("$")) + self.assertNotEqual(dick_enc, users['dick']) + self.assertTrue(users['dick'].startswith("$")) + + self.assertNotEqual(users['harry'], users['dick']) + + def test_shadow_expected_users(self): + """Test every tom, dick, and harry user in shadow""" + out = self.get_data_file('shadow') + self.assertIn('tom:', out) + self.assertIn('dick:', out) + self.assertIn('harry:', out) + self.assertIn('jane:', out) + self.assertIn('mikey:', out) + + def test_sshd_config(self): + """Test sshd config allows passwords""" + out = self.get_data_file('sshd_config') + self.assertIn('PasswordAuthentication yes', out) + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,17 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""cloud-init Integration Test Verify Script""" +from tests.cloud_tests.testcases import base + + +class TestChefExample(base.CloudTestCase): + """Test chef module""" + + def test_chef_basic(self): + """Test chef installed""" + out = self.get_data_file('chef_installed') + self.assertIn('install ok', out) + + # FIXME: Add more tests, and/or replace with comprehensive module tests + +# vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/__init__.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/__init__.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/__init__.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/__init__.py 2017-05-26 18:36:38.000000000 +0000 @@ -21,7 +21,7 @@ raise ValueError('no test verifier found at: {}'.format(testmod_name)) return [mod for name, mod in inspect.getmembers(testmod) - if inspect.isclass(mod) and base_test in mod.__bases__ and + if inspect.isclass(mod) and base_test in inspect.getmro(mod) and getattr(mod, '__test__', True)] diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/ntp_pools.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/ntp_pools.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/ntp_pools.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/ntp_pools.py 2017-05-26 18:36:38.000000000 +0000 @@ -13,16 +13,22 @@ self.assertEqual(1, int(out)) def test_ntp_dist_entries(self): - """Test dist config file has one entry""" + """Test dist config file is empty""" out = self.get_data_file('ntp_conf_dist_pools') - self.assertEqual(1, int(out)) + self.assertEqual(0, int(out)) def test_ntp_entires(self): """Test config entries""" out = self.get_data_file('ntp_conf_pools') - self.assertIn('pool 0.pool.ntp.org iburst', out) - self.assertIn('pool 1.pool.ntp.org iburst', out) - self.assertIn('pool 2.pool.ntp.org iburst', out) - self.assertIn('pool 3.pool.ntp.org iburst', out) + pools = self.cloud_config.get('ntp').get('pools') + for pool in pools: + self.assertIn('pool %s iburst' % pool, out) + + def test_ntpq_servers(self): + """Test ntpq output has configured servers""" + out = self.get_data_file('ntpq_servers') + pools = self.cloud_config.get('ntp').get('pools') + for pool in pools: + self.assertIn(pool, out) # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/ntp.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/ntp.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/ntp.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/ntp.py 2017-05-26 18:36:38.000000000 +0000 @@ -13,9 +13,9 @@ self.assertEqual(1, int(out)) def test_ntp_dist_entries(self): - """Test dist config file has one entry""" + """Test dist config file is empty""" out = self.get_data_file('ntp_conf_dist_empty') - self.assertEqual(1, int(out)) + self.assertEqual(0, int(out)) def test_ntp_entires(self): """Test config entries""" diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/ntp_servers.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/ntp_servers.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/ntp_servers.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/ntp_servers.py 2017-05-26 18:36:38.000000000 +0000 @@ -13,13 +13,22 @@ self.assertEqual(1, int(out)) def test_ntp_dist_entries(self): - """Test dist config file has one entry""" + """Test dist config file is empty""" out = self.get_data_file('ntp_conf_dist_servers') - self.assertEqual(1, int(out)) + self.assertEqual(0, int(out)) - def test_ntp_entires(self): - """Test config entries""" + def test_ntp_entries(self): + """Test config server entries""" out = self.get_data_file('ntp_conf_servers') - self.assertIn('server pool.ntp.org iburst', out) + servers = self.cloud_config.get('ntp').get('servers') + for server in servers: + self.assertIn('server %s iburst' % server, out) + + def test_ntpq_servers(self): + """Test ntpq output has configured servers""" + out = self.get_data_file('ntpq_servers') + servers = self.cloud_config.get('ntp').get('servers') + for server in servers: + self.assertIn(server, out) # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/set_password_list.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/set_password_list.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/set_password_list.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/set_password_list.py 2017-05-26 18:36:38.000000000 +0000 @@ -4,22 +4,8 @@ from tests.cloud_tests.testcases import base -class TestPasswordList(base.CloudTestCase): - """Test password module""" - - # TODO: Verify dick and harry passwords are random - # TODO: Verify tom's password was changed - - def test_shadow(self): - """Test every tom, dick, and harry user in shadow""" - out = self.get_data_file('shadow') - self.assertIn('tom:', out) - self.assertIn('dick:', out) - self.assertIn('harry:', out) - - def test_sshd_config(self): - """Test sshd config allows passwords""" - out = self.get_data_file('sshd_config') - self.assertIn('PasswordAuthentication yes', out) +class TestPasswordList(base.PasswordListTest, base.CloudTestCase): + """Test password setting via list in chpasswd/list""" + __test__ = True # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/set_password_list_string.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/set_password_list_string.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/set_password_list_string.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/set_password_list_string.py 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,11 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""cloud-init Integration Test Verify Script""" +from tests.cloud_tests.testcases import base + + +class TestPasswordListString(base.PasswordListTest, base.CloudTestCase): + """Test password setting via string in chpasswd/list""" + __test__ = True + +# vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/snappy.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/snappy.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/snappy.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/snappy.py 2017-05-26 18:36:38.000000000 +0000 @@ -9,10 +9,7 @@ def test_snappy_version(self): """Test snappy version output""" - out = self.get_data_file('snap_version') - self.assertIn('snap ', out) - self.assertIn('snapd ', out) - self.assertIn('series ', out) - self.assertIn('ubuntu ', out) + out = self.get_data_file('snapd') + self.assertIn('Status: install ok installed', out) # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/timezone.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/timezone.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/testcases/modules/timezone.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/testcases/modules/timezone.py 2017-05-26 18:36:38.000000000 +0000 @@ -10,6 +10,6 @@ def test_timezone(self): """Test date prints correct timezone""" out = self.get_data_file('timezone') - self.assertIn('HST', out) + self.assertEqual('HDT', out.rstrip()) # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/verify.py cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/verify.py --- cloud-init-0.7.9-47-gc81ea53/tests/cloud_tests/verify.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/cloud_tests/verify.py 2017-05-26 18:36:38.000000000 +0000 @@ -45,9 +45,9 @@ } for failure in res[test_name]['failures']: - LOG.warn('test case: %s failed %s.%s with: %s', - test_name, failure['class'], failure['function'], - failure['error']) + LOG.warning('test case: %s failed %s.%s with: %s', + test_name, failure['class'], failure['function'], + failure['error']) return res @@ -80,7 +80,8 @@ if len(fail_list) == 0: LOG.info('test: %s passed all tests', test_name) else: - LOG.warn('test: %s failed %s tests', test_name, len(fail_list)) + LOG.warning('test: %s failed %s tests', test_name, + len(fail_list)) failed += len(fail_list) # dump results diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/helpers.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/helpers.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/helpers.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/helpers.py 2017-05-26 18:36:38.000000000 +0000 @@ -3,6 +3,8 @@ from __future__ import print_function import functools +import json +import logging import os import shutil import sys @@ -17,6 +19,10 @@ from contextlib import ExitStack except ImportError: from contextlib2 import ExitStack +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO from cloudinit import helpers as ch from cloudinit import util @@ -86,6 +92,27 @@ class CiTestCase(TestCase): """This is the preferred test case base class unless user needs other test case classes below.""" + + # Subclass overrides for specific test behavior + # Whether or not a unit test needs logfile setup + with_logs = False + + def setUp(self): + super(CiTestCase, self).setUp() + if self.with_logs: + # Create a log handler so unit tests can search expected logs. + logger = logging.getLogger() + self.logs = StringIO() + handler = logging.StreamHandler(self.logs) + self.old_handlers = logger.handlers + logger.handlers = [handler] + + def tearDown(self): + if self.with_logs: + # Remove the handler we setup + logging.getLogger().handlers = self.old_handlers + super(CiTestCase, self).tearDown() + def tmp_dir(self, dir=None, cleanup=True): # return a full path to a temporary directory that will be cleaned up. if dir is None: @@ -105,7 +132,7 @@ return os.path.normpath(os.path.abspath(os.path.join(dir, path))) -class ResourceUsingTestCase(TestCase): +class ResourceUsingTestCase(CiTestCase): def setUp(self): super(ResourceUsingTestCase, self).setUp() self.resource_path = None @@ -228,8 +255,7 @@ def reRoot(self, root=None): if root is None: - root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, root) + root = self.tmp_dir() self.patchUtils(root) self.patchOS(root) return root @@ -255,7 +281,7 @@ os.makedirs(path) ret = [] for (name, content) in files.items(): - p = os.path.join(path, name) + p = os.path.sep.join([path, name]) util.ensure_dir(os.path.dirname(p)) with open(p, "wb") as fp: if isinstance(content, six.binary_type): @@ -280,6 +306,12 @@ return flist +def json_dumps(data): + # print data in nicely formatted json. + return json.dumps(data, indent=1, sort_keys=True, + separators=(',', ': ')) + + def wrap_and_call(prefix, mocks, func, *args, **kwargs): """ call func(args, **kwargs) with mocks applied, then unapplies mocks diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_altcloud.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_altcloud.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_altcloud.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_altcloud.py 2017-05-26 18:36:38.000000000 +0000 @@ -10,18 +10,17 @@ This test file exercises the code in sources DataSourceAltCloud.py ''' +import mock import os import shutil import tempfile from cloudinit import helpers from cloudinit import util -from unittest import TestCase -# Get the cloudinit.sources.DataSourceAltCloud import items needed. -import cloudinit.sources.DataSourceAltCloud -from cloudinit.sources.DataSourceAltCloud import DataSourceAltCloud -from cloudinit.sources.DataSourceAltCloud import read_user_data_callback +from ..helpers import TestCase + +import cloudinit.sources.DataSourceAltCloud as dsac OS_UNAME_ORIG = getattr(os, 'uname') @@ -32,17 +31,17 @@ with a cloud backend identifier ImageFactory when building an image with ImageFactory. ''' - cifile = open(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 'w') + cifile = open(dsac.CLOUD_INFO_FILE, 'w') cifile.write(value) cifile.close() - os.chmod(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 0o664) + os.chmod(dsac.CLOUD_INFO_FILE, 0o664) def _remove_cloud_info_file(): ''' Remove the test CLOUD_INFO_FILE ''' - os.remove(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE) + os.remove(dsac.CLOUD_INFO_FILE) def _write_user_data_files(mount_dir, value): @@ -122,7 +121,7 @@ Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor ''' util.read_dmi_data = _dmi_data('RHEV') - dsrc = DataSourceAltCloud({}, None, self.paths) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual('RHEV', dsrc.get_cloud_type()) def test_vsphere(self): @@ -131,7 +130,7 @@ Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor ''' util.read_dmi_data = _dmi_data('VMware Virtual Platform') - dsrc = DataSourceAltCloud({}, None, self.paths) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual('VSPHERE', dsrc.get_cloud_type()) def test_unknown(self): @@ -140,7 +139,7 @@ Forcing read_dmi_data return to match an unrecognized return. ''' util.read_dmi_data = _dmi_data('Unrecognized Platform') - dsrc = DataSourceAltCloud({}, None, self.paths) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) @@ -154,8 +153,7 @@ self.paths = helpers.Paths({'cloud_dir': '/tmp'}) self.cloud_info_file = tempfile.mkstemp()[1] self.dmi_data = util.read_dmi_data - cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \ - self.cloud_info_file + dsac.CLOUD_INFO_FILE = self.cloud_info_file def tearDown(self): # Reset @@ -167,14 +165,13 @@ pass util.read_dmi_data = self.dmi_data - cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \ - '/etc/sysconfig/cloud-info' + dsac.CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' def test_rhev(self): '''Success Test module get_data() forcing RHEV.''' _write_cloud_info_file('RHEV') - dsrc = DataSourceAltCloud({}, None, self.paths) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_rhevm = lambda: True self.assertEqual(True, dsrc.get_data()) @@ -182,7 +179,7 @@ '''Success Test module get_data() forcing VSPHERE.''' _write_cloud_info_file('VSPHERE') - dsrc = DataSourceAltCloud({}, None, self.paths) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_vsphere = lambda: True self.assertEqual(True, dsrc.get_data()) @@ -190,7 +187,7 @@ '''Failure Test module get_data() forcing RHEV.''' _write_cloud_info_file('RHEV') - dsrc = DataSourceAltCloud({}, None, self.paths) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_rhevm = lambda: False self.assertEqual(False, dsrc.get_data()) @@ -198,7 +195,7 @@ '''Failure Test module get_data() forcing VSPHERE.''' _write_cloud_info_file('VSPHERE') - dsrc = DataSourceAltCloud({}, None, self.paths) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_vsphere = lambda: False self.assertEqual(False, dsrc.get_data()) @@ -206,7 +203,7 @@ '''Failure Test module get_data() forcing unrecognized.''' _write_cloud_info_file('unrecognized') - dsrc = DataSourceAltCloud({}, None, self.paths) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.get_data()) @@ -219,7 +216,7 @@ '''Set up.''' self.paths = helpers.Paths({'cloud_dir': '/tmp'}) self.dmi_data = util.read_dmi_data - cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \ + dsac.CLOUD_INFO_FILE = \ 'no such file' # We have a different code path for arm to deal with LP1243287 # We have to switch arch to x86_64 to avoid test failure @@ -227,7 +224,7 @@ def tearDown(self): # Reset - cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \ + dsac.CLOUD_INFO_FILE = \ '/etc/sysconfig/cloud-info' util.read_dmi_data = self.dmi_data # Return back to original arch @@ -237,7 +234,7 @@ '''Test No cloud info file module get_data() forcing RHEV.''' util.read_dmi_data = _dmi_data('RHEV Hypervisor') - dsrc = DataSourceAltCloud({}, None, self.paths) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_rhevm = lambda: True self.assertEqual(True, dsrc.get_data()) @@ -245,7 +242,7 @@ '''Test No cloud info file module get_data() forcing VSPHERE.''' util.read_dmi_data = _dmi_data('VMware Virtual Platform') - dsrc = DataSourceAltCloud({}, None, self.paths) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_vsphere = lambda: True self.assertEqual(True, dsrc.get_data()) @@ -253,7 +250,7 @@ '''Test No cloud info file module get_data() forcing unrecognized.''' util.read_dmi_data = _dmi_data('Unrecognized Platform') - dsrc = DataSourceAltCloud({}, None, self.paths) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.get_data()) @@ -261,11 +258,14 @@ ''' Test to exercise method: DataSourceAltCloud.user_data_rhevm() ''' + cmd_pass = ['true'] + cmd_fail = ['false'] + cmd_not_found = ['bogus bad command'] + def setUp(self): '''Set up.''' self.paths = helpers.Paths({'cloud_dir': '/tmp'}) self.mount_dir = tempfile.mkdtemp() - _write_user_data_files(self.mount_dir, 'test user data') def tearDown(self): @@ -279,61 +279,44 @@ except OSError: pass - cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \ - '/etc/sysconfig/cloud-info' - cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \ - ['/sbin/modprobe', 'floppy'] - cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \ - ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5'] + dsac.CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' + dsac.CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy'] + dsac.CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', + '--quiet', '--timeout=5'] def test_mount_cb_fails(self): '''Test user_data_rhevm() where mount_cb fails.''' - cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \ - ['echo', 'modprobe floppy'] - - dsrc = DataSourceAltCloud({}, None, self.paths) - + dsac.CMD_PROBE_FLOPPY = self.cmd_pass + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_modprobe_fails(self): '''Test user_data_rhevm() where modprobe fails.''' - cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \ - ['ls', 'modprobe floppy'] - - dsrc = DataSourceAltCloud({}, None, self.paths) - + dsac.CMD_PROBE_FLOPPY = self.cmd_fail + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_no_modprobe_cmd(self): '''Test user_data_rhevm() with no modprobe command.''' - cloudinit.sources.DataSourceAltCloud.CMD_PROBE_FLOPPY = \ - ['bad command', 'modprobe floppy'] - - dsrc = DataSourceAltCloud({}, None, self.paths) - + dsac.CMD_PROBE_FLOPPY = self.cmd_not_found + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_udevadm_fails(self): '''Test user_data_rhevm() where udevadm fails.''' - cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \ - ['ls', 'udevadm floppy'] - - dsrc = DataSourceAltCloud({}, None, self.paths) - + dsac.CMD_UDEVADM_SETTLE = self.cmd_fail + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_no_udevadm_cmd(self): '''Test user_data_rhevm() with no udevadm command.''' - cloudinit.sources.DataSourceAltCloud.CMD_UDEVADM_SETTLE = \ - ['bad command', 'udevadm floppy'] - - dsrc = DataSourceAltCloud({}, None, self.paths) - + dsac.CMD_UDEVADM_SETTLE = self.cmd_not_found + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) @@ -359,17 +342,30 @@ except OSError: pass - cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \ + dsac.CLOUD_INFO_FILE = \ '/etc/sysconfig/cloud-info' - def test_user_data_vsphere(self): + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") + def test_user_data_vsphere_no_cdrom(self, m_mount_cb, m_find_devs_with): '''Test user_data_vsphere() where mount_cb fails.''' - cloudinit.sources.DataSourceAltCloud.MEDIA_DIR = self.mount_dir + m_mount_cb.return_value = [] + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_vsphere()) + self.assertEqual(0, m_mount_cb.call_count) - dsrc = DataSourceAltCloud({}, None, self.paths) + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") + def test_user_data_vsphere_mcb_fail(self, m_mount_cb, m_find_devs_with): + '''Test user_data_vsphere() where mount_cb fails.''' + m_find_devs_with.return_value = ["/dev/mock/cdrom"] + m_mount_cb.side_effect = util.MountFailedError("Unable To mount") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_vsphere()) + self.assertEqual(1, m_find_devs_with.call_count) + self.assertEqual(1, m_mount_cb.call_count) class TestReadUserDataCallback(TestCase): @@ -398,7 +394,7 @@ '''Test read_user_data_callback() with both files.''' self.assertEqual('test user data', - read_user_data_callback(self.mount_dir)) + dsac.read_user_data_callback(self.mount_dir)) def test_callback_dc(self): '''Test read_user_data_callback() with only DC file.''' @@ -408,7 +404,7 @@ non_dc_file=True) self.assertEqual('test user data', - read_user_data_callback(self.mount_dir)) + dsac.read_user_data_callback(self.mount_dir)) def test_callback_non_dc(self): '''Test read_user_data_callback() with only non-DC file.''' @@ -418,13 +414,13 @@ non_dc_file=False) self.assertEqual('test user data', - read_user_data_callback(self.mount_dir)) + dsac.read_user_data_callback(self.mount_dir)) def test_callback_none(self): '''Test read_user_data_callback() no files are found.''' _remove_user_data_files(self.mount_dir) - self.assertEqual(None, read_user_data_callback(self.mount_dir)) + self.assertIsNone(dsac.read_user_data_callback(self.mount_dir)) def force_arch(arch=None): diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_azure_helper.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_azure_helper.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_azure_helper.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_azure_helper.py 2017-05-26 18:36:38.000000000 +0000 @@ -3,7 +3,6 @@ import os from cloudinit.sources.helpers import azure as azure_helper - from ..helpers import ExitStack, mock, TestCase @@ -72,10 +71,11 @@ @staticmethod def _build_lease_content(encoded_address): + endpoint = azure_helper._get_dhcp_endpoint_option_name() return '\n'.join([ 'lease {', ' interface "eth0";', - ' option unknown-245 {0};'.format(encoded_address), + ' option {0} {1};'.format(endpoint, encoded_address), '}']) def test_from_dhcp_client(self): diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_azure.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_azure.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_azure.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_azure.py 2017-05-26 18:36:38.000000000 +0000 @@ -1,10 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import helpers -from cloudinit.util import b64e, decode_binary, load_file -from cloudinit.sources import DataSourceAzure +from cloudinit.util import b64e, decode_binary, load_file, write_file +from cloudinit.sources import DataSourceAzure as dsaz +from cloudinit.util import find_freebsd_part +from cloudinit.util import get_path_dev_freebsd -from ..helpers import TestCase, populate_dir, mock, ExitStack, PY26, SkipTest +from ..helpers import (CiTestCase, TestCase, populate_dir, mock, + ExitStack, PY26, SkipTest) import crypt import os @@ -95,6 +98,40 @@ for module, name, new in patches: self.patches.enter_context(mock.patch.object(module, name, new)) + def _get_mockds(self): + sysctl_out = "dev.storvsc.3.%pnpinfo: "\ + "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ + "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" + sysctl_out += "dev.storvsc.2.%pnpinfo: "\ + "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ + "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n" + sysctl_out += "dev.storvsc.1.%pnpinfo: "\ + "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "\ + "deviceid=00000000-0001-8899-0000-000000000000\n" + camctl_devbus = """ +scbus0 on ata0 bus 0 +scbus1 on ata1 bus 0 +scbus2 on blkvsc0 bus 0 +scbus3 on blkvsc1 bus 0 +scbus4 on storvsc2 bus 0 +scbus5 on storvsc3 bus 0 +scbus-1 on xpt0 bus 0 + """ + camctl_dev = """ + at scbus1 target 0 lun 0 (cd0,pass0) + at scbus2 target 0 lun 0 (da0,pass1) + at scbus3 target 1 lun 0 (da1,pass2) + """ + self.apply_patches([ + (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock( + return_value=sysctl_out)), + (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock( + return_value=camctl_devbus)), + (dsaz, 'get_camcontrol_dev', mock.MagicMock( + return_value=camctl_dev)) + ]) + return dsaz + def _get_ds(self, data, agent_command=None): def dsdevs(): @@ -115,8 +152,7 @@ populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) - mod = DataSourceAzure - mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.get_metadata_from_fabric = mock.MagicMock(return_value={ 'public-keys': [], @@ -125,19 +161,19 @@ self.instance_id = 'test-instance-id' self.apply_patches([ - (mod, 'list_possible_azure_ds_devs', dsdevs), - (mod, 'invoke_agent', _invoke_agent), - (mod, 'wait_for_files', _wait_for_files), - (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), - (mod, 'perform_hostname_bounce', mock.MagicMock()), - (mod, 'get_hostname', mock.MagicMock()), - (mod, 'set_hostname', mock.MagicMock()), - (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric), - (mod.util, 'read_dmi_data', mock.MagicMock( + (dsaz, 'list_possible_azure_ds_devs', dsdevs), + (dsaz, 'invoke_agent', _invoke_agent), + (dsaz, 'wait_for_files', _wait_for_files), + (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), + (dsaz, 'perform_hostname_bounce', mock.MagicMock()), + (dsaz, 'get_hostname', mock.MagicMock()), + (dsaz, 'set_hostname', mock.MagicMock()), + (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric), + (dsaz.util, 'read_dmi_data', mock.MagicMock( return_value=self.instance_id)), ]) - dsrc = mod.DataSourceAzureNet( + dsrc = dsaz.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command @@ -177,6 +213,34 @@ return raise AssertionError("XML is the same") + def test_get_resource_disk(self): + ds = self._get_mockds() + dev = ds.get_resource_disk_on_freebsd(1) + self.assertEqual("da1", dev) + + @mock.patch('cloudinit.util.subp') + def test_find_freebsd_part_on_Azure(self, mock_subp): + glabel_out = ''' +gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 + label/rootfs N/A da0p2 + label/swap N/A da0p3 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/label/rootfs") + self.assertEqual("da0p2", res) + + def test_get_path_dev_freebsd_on_Azure(self): + mnt_list = ''' +/dev/label/rootfs / ufs rw 1 1 +devfs /dev devfs rw,multilabel 0 0 +fdescfs /dev/fd fdescfs rw 0 0 +/dev/da1s1 /mnt/resource ufs rw 2 2 +''' + with mock.patch.object(os.path, 'exists', + return_value=True): + res = get_path_dev_freebsd('/etc', mnt_list) + self.assertIsNotNone(res) + def test_basic_seed_dir(self): odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), @@ -353,7 +417,7 @@ cfg = dsrc.get_config_obj() self.assertEqual(dsrc.device_name_to_device("ephemeral0"), - DataSourceAzure.RESOURCE_DISK_PATH) + dsaz.RESOURCE_DISK_PATH) assert 'disk_setup' in cfg assert 'fs_setup' in cfg self.assertIsInstance(cfg['disk_setup'], dict) @@ -403,14 +467,13 @@ # Make sure that the redacted password on disk is not used by CI self.assertNotEqual(dsrc.cfg.get('password'), - DataSourceAzure.DEF_PASSWD_REDACTION) + dsaz.DEF_PASSWD_REDACTION) # Make sure that the password was really encrypted et = ET.fromstring(on_disk_ovf) for elem in et.iter(): if 'UserPassword' in elem.tag: - self.assertEqual(DataSourceAzure.DEF_PASSWD_REDACTION, - elem.text) + self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text) def test_ovf_env_arrives_in_waagent_dir(self): xml = construct_valid_ovf_env(data={}, userdata="FOODATA") @@ -459,17 +522,17 @@ def mock_out_azure_moving_parts(self): self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'invoke_agent')) + mock.patch.object(dsaz, 'invoke_agent')) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'wait_for_files')) + mock.patch.object(dsaz, 'wait_for_files')) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs', + mock.patch.object(dsaz, 'list_possible_azure_ds_devs', mock.MagicMock(return_value=[]))) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric', + mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) self.patches.enter_context( - mock.patch.object(DataSourceAzure.util, 'read_dmi_data', + mock.patch.object(dsaz.util, 'read_dmi_data', mock.MagicMock(return_value='test-instance-id'))) def setUp(self): @@ -478,13 +541,13 @@ self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') self.paths = helpers.Paths({'cloud_dir': self.tmp}) self.addCleanup(shutil.rmtree, self.tmp) - DataSourceAzure.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.patches = ExitStack() self.mock_out_azure_moving_parts() self.get_hostname = self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'get_hostname')) + mock.patch.object(dsaz, 'get_hostname')) self.set_hostname = self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'set_hostname')) + mock.patch.object(dsaz, 'set_hostname')) self.subp = self.patches.enter_context( mock.patch('cloudinit.sources.DataSourceAzure.util.subp')) @@ -495,7 +558,7 @@ if ovfcontent is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': ovfcontent}) - dsrc = DataSourceAzure.DataSourceAzureNet( + dsrc = dsaz.DataSourceAzureNet( {}, distro=None, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command @@ -608,7 +671,7 @@ def test_default_bounce_command_used_by_default(self): cmd = 'default-bounce-command' - DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd + dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) self._get_ds(data, agent_command=['not', '__builtin__']).get_data() @@ -636,15 +699,208 @@ class TestReadAzureOvf(TestCase): def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "" + construct_valid_ovf_env(data={}) - self.assertRaises(DataSourceAzure.BrokenAzureDataSource, - DataSourceAzure.read_azure_ovf, invalid_xml) + self.assertRaises(dsaz.BrokenAzureDataSource, + dsaz.read_azure_ovf, invalid_xml) def test_load_with_pubkeys(self): mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] content = construct_valid_ovf_env(pubkeys=pubkeys) - (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content) + (_md, _ud, cfg) = dsaz.read_azure_ovf(content) for mypk in mypklist: self.assertIn(mypk, cfg['_pubkeys']) + +class TestCanDevBeReformatted(CiTestCase): + warning_file = 'dataloss_warning_readme.txt' + + def _domock(self, mockpath, sattr=None): + patcher = mock.patch(mockpath) + setattr(self, sattr, patcher.start()) + self.addCleanup(patcher.stop) + + def setUp(self): + super(TestCanDevBeReformatted, self).setUp() + + def patchup(self, devs): + bypath = {} + for path, data in devs.items(): + bypath[path] = data + if 'realpath' in data: + bypath[data['realpath']] = data + for ppath, pdata in data.get('partitions', {}).items(): + bypath[ppath] = pdata + if 'realpath' in data: + bypath[pdata['realpath']] = pdata + + def realpath(d): + return bypath[d].get('realpath', d) + + def partitions_on_device(devpath): + parts = bypath.get(devpath, {}).get('partitions', {}) + ret = [] + for path, data in parts.items(): + ret.append((data.get('num'), realpath(path))) + # return sorted by partition number + return sorted(ret, key=lambda d: d[0]) + + def mount_cb(device, callback): + p = self.tmp_dir() + for f in bypath.get(device).get('files', []): + write_file(os.path.join(p, f), content=f) + return callback(p) + + def has_ntfs_fs(device): + return bypath.get(device, {}).get('fs') == 'ntfs' + + p = 'cloudinit.sources.DataSourceAzure' + self._domock(p + "._partitions_on_device", 'm_partitions_on_device') + self._domock(p + "._has_ntfs_filesystem", 'm_has_ntfs_filesystem') + self._domock(p + ".util.mount_cb", 'm_mount_cb') + self._domock(p + ".os.path.realpath", 'm_realpath') + self._domock(p + ".os.path.exists", 'm_exists') + + self.m_exists.side_effect = lambda p: p in bypath + self.m_realpath.side_effect = realpath + self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs + self.m_mount_cb.side_effect = mount_cb + self.m_partitions_on_device.side_effect = partitions_on_device + + def test_three_partitions_is_false(self): + """A disk with 3 partitions can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2}, + '/dev/sda3': {'num': 3}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertFalse(False, value) + self.assertIn("3 or more", msg.lower()) + + def test_no_partitions_is_false(self): + """A disk with no partitions can not be formatted.""" + self.patchup({'/dev/sda': {}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(False, value) + self.assertIn("not partitioned", msg.lower()) + + def test_two_partitions_not_ntfs_false(self): + """2 partitions and 2nd not ntfs can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertFalse(False, value) + self.assertIn("not ntfs", msg.lower()) + + def test_two_partitions_ntfs_populated_false(self): + """2 partitions and populated ntfs fs on 2nd can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ntfs', + 'files': ['secret.txt']}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertFalse(False, value) + self.assertIn("files on it", msg.lower()) + + def test_two_partitions_ntfs_empty_is_true(self): + """2 partitions and empty ntfs fs on 2nd can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_not_ntfs_false(self): + """1 partition witih fs other than ntfs can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'zfs'}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(False, value) + self.assertIn("not ntfs", msg.lower()) + + def test_one_partition_ntfs_populated_false(self): + """1 mountable ntfs partition with many files can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['file1.txt', 'file2.exe']}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(False, value) + self.assertIn("files on it", msg.lower()) + + def test_one_partition_ntfs_empty_is_true(self): + """1 mountable ntfs partition and no files can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): + """1 mountable ntfs partition and only warn file can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['dataloss_warning_readme.txt']} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_through_realpath_is_true(self): + """A symlink to a device with 1 ntfs partition can be formatted.""" + epath = '/dev/disk/cloud/azure_resource' + self.patchup({ + epath: { + 'realpath': '/dev/sdb', + 'partitions': { + epath + '-part1': { + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], + 'realpath': '/dev/sdb1'} + }}}) + value, msg = dsaz.can_dev_be_reformatted(epath) + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_three_partition_through_realpath_is_false(self): + """A symlink to a device with 3 partitions can not be formatted.""" + epath = '/dev/disk/cloud/azure_resource' + self.patchup({ + epath: { + 'realpath': '/dev/sdb', + 'partitions': { + epath + '-part1': { + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], + 'realpath': '/dev/sdb1'}, + epath + '-part2': {'num': 2, 'fs': 'ext3', + 'realpath': '/dev/sdb2'}, + epath + '-part3': {'num': 3, 'fs': 'ext', + 'realpath': '/dev/sdb3'} + }}}) + value, msg = dsaz.can_dev_be_reformatted(epath) + self.assertEqual(False, value) + self.assertIn("3 or more", msg.lower()) + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_cloudstack.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_cloudstack.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_cloudstack.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_cloudstack.py 2017-05-26 18:36:38.000000000 +0000 @@ -15,6 +15,16 @@ mod_name = 'cloudinit.sources.DataSourceCloudStack' self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) + default_gw = "192.201.20.0" + get_latest_lease = mock.MagicMock(return_value=None) + self.patches.enter_context(mock.patch( + 'cloudinit.sources.DataSourceCloudStack.get_latest_lease', + get_latest_lease)) + + get_default_gw = mock.MagicMock(return_value=default_gw) + self.patches.enter_context(mock.patch( + 'cloudinit.sources.DataSourceCloudStack.get_default_gateway', + get_default_gw)) def _set_password_server_response(self, response_string): subp = mock.MagicMock(return_value=(response_string, '')) diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_configdrive.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_configdrive.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_configdrive.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_configdrive.py 2017-05-26 18:36:38.000000000 +0000 @@ -645,7 +645,7 @@ routes) eni_renderer = eni.Renderer() eni_renderer.render_network_state( - self.tmp, network_state.parse_net_config_data(ncfg)) + network_state.parse_net_config_data(ncfg), self.tmp) with open(os.path.join(self.tmp, "etc", "network", "interfaces"), 'r') as f: eni_rendering = f.read() @@ -665,8 +665,9 @@ ncfg = openstack.convert_net_json(NETWORK_DATA_BOND, known_macs=KNOWN_MACS) eni_renderer = eni.Renderer() + eni_renderer.render_network_state( - self.tmp, network_state.parse_net_config_data(ncfg)) + network_state.parse_net_config_data(ncfg), self.tmp) with open(os.path.join(self.tmp, "etc", "network", "interfaces"), 'r') as f: eni_rendering = f.read() @@ -697,7 +698,7 @@ known_macs=KNOWN_MACS) eni_renderer = eni.Renderer() eni_renderer.render_network_state( - self.tmp, network_state.parse_net_config_data(ncfg)) + network_state.parse_net_config_data(ncfg), self.tmp) with open(os.path.join(self.tmp, "etc", "network", "interfaces"), 'r') as f: eni_rendering = f.read() diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_digitalocean.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_digitalocean.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_digitalocean.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_digitalocean.py 2017-05-26 18:36:38.000000000 +0000 @@ -1,6 +1,8 @@ # Copyright (C) 2014 Neal Shrader # # Author: Neal Shrader +# Author: Ben Howard +# Author: Scott Moser # # This file is part of cloud-init. See LICENSE file for license information. @@ -194,7 +196,13 @@ class TestNetworkConvert(TestCase): - def _get_networking(self): + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def _get_networking(self, m_get_by_mac): + m_get_by_mac.return_value = { + '04:01:57:d1:9e:01': 'ens1', + '04:01:57:d1:9e:02': 'ens2', + 'b8:ae:ed:75:5f:9a': 'enp0s25', + 'ae:cc:08:7c:88:00': 'meta2p1'} netcfg = digitalocean.convert_network_configuration( DO_META['interfaces'], DO_META['dns']['nameservers']) self.assertIn('config', netcfg) @@ -203,18 +211,33 @@ def test_networking_defined(self): netcfg = self._get_networking() self.assertIsNotNone(netcfg) + dns_defined = False - for nic_def in netcfg.get('config'): - print(json.dumps(nic_def, indent=3)) - n_type = nic_def.get('type') - n_subnets = nic_def.get('type') - n_name = nic_def.get('name') - n_mac = nic_def.get('mac_address') - - self.assertIsNotNone(n_type) - self.assertIsNotNone(n_subnets) - self.assertIsNotNone(n_name) - self.assertIsNotNone(n_mac) + for part in netcfg.get('config'): + n_type = part.get('type') + print("testing part ", n_type, "\n", json.dumps(part, indent=3)) + + if n_type == 'nameserver': + n_address = part.get('address') + self.assertIsNotNone(n_address) + self.assertEqual(len(n_address), 3) + + dns_resolvers = DO_META["dns"]["nameservers"] + for x in n_address: + self.assertIn(x, dns_resolvers) + dns_defined = True + + else: + n_subnets = part.get('type') + n_name = part.get('name') + n_mac = part.get('mac_address') + + self.assertIsNotNone(n_type) + self.assertIsNotNone(n_subnets) + self.assertIsNotNone(n_name) + self.assertIsNotNone(n_mac) + + self.assertTrue(dns_defined) def _get_nic_definition(self, int_type, expected_name): """helper function to return if_type (i.e. public) and the expected @@ -241,6 +264,29 @@ print(json.dumps(subn, indent=3)) return subn + def test_correct_gateways_defined(self): + """test to make sure the eth0 ipv4 and ipv6 gateways are defined""" + netcfg = self._get_networking() + gateways = [] + for nic_def in netcfg.get('config'): + if nic_def.get('type') != 'physical': + continue + for subn in nic_def.get('subnets'): + if 'gateway' in subn: + gateways.append(subn.get('gateway')) + + # we should have two gateways, one ipv4 and ipv6 + self.assertEqual(len(gateways), 2) + + # make that the ipv6 gateway is there + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('ipv4') + self.assertIn(ipv4_def.get('gateway'), gateways) + + # make sure the the ipv6 gateway is there + ipv6_def = meta_def.get('ipv6') + self.assertIn(ipv6_def.get('gateway'), gateways) + def test_public_interface_defined(self): """test that the public interface is defined as eth0""" (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') @@ -255,12 +301,6 @@ self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) self.assertEqual('physical', nic_def.get('type')) - def _check_dns_nameservers(self, subn_def): - self.assertIn('dns_nameservers', subn_def) - expected_nameservers = DO_META['dns']['nameservers'] - nic_nameservers = subn_def.get('dns_nameservers') - self.assertEqual(expected_nameservers, nic_nameservers) - def test_public_interface_ipv6(self): """test public ipv6 addressing""" (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') @@ -275,7 +315,6 @@ self.assertEqual(cidr_notated_address, subn_def.get('address')) self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway')) - self._check_dns_nameservers(subn_def) def test_public_interface_ipv4(self): """test public ipv4 addressing""" @@ -288,7 +327,6 @@ self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway')) - self._check_dns_nameservers(subn_def) def test_public_interface_anchor_ipv4(self): """test public ipv4 addressing""" @@ -302,10 +340,15 @@ self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) self.assertNotIn('gateway', subn_def) - def test_convert_without_private(self): + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_convert_without_private(self, m_get_by_mac): + m_get_by_mac.return_value = { + 'b8:ae:ed:75:5f:9a': 'enp0s25', + 'ae:cc:08:7c:88:00': 'meta2p1'} netcfg = digitalocean.convert_network_configuration( DO_META_2['interfaces'], DO_META_2['dns']['nameservers']) + # print(netcfg) byname = {} for i in netcfg['config']: if 'name' in i: diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_gce.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_gce.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_gce.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_gce.py 2017-05-26 18:36:38.000000000 +0000 @@ -5,6 +5,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import httpretty +import mock import re from base64 import b64encode, b64decode @@ -71,6 +72,11 @@ self.ds = DataSourceGCE.DataSourceGCE( settings.CFG_BUILTIN, None, helpers.Paths({})) + self.m_platform_reports_gce = mock.patch( + 'cloudinit.sources.DataSourceGCE.platform_reports_gce', + return_value=True) + self.m_platform_reports_gce.start() + self.addCleanup(self.m_platform_reports_gce.stop) super(TestDataSourceGCE, self).setUp() def test_connection(self): @@ -134,7 +140,7 @@ def test_instance_level_ssh_keys_are_used(self): key_content = 'ssh-rsa JustAUser root@server' meta = GCE_META.copy() - meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content) + meta['instance/attributes/ssh-keys'] = 'user:{0}'.format(key_content) _set_mock_metadata(meta) self.ds.get_data() @@ -144,7 +150,7 @@ def test_instance_level_keys_replace_project_level_keys(self): key_content = 'ssh-rsa JustAUser root@server' meta = GCE_META.copy() - meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content) + meta['instance/attributes/ssh-keys'] = 'user:{0}'.format(key_content) _set_mock_metadata(meta) self.ds.get_data() @@ -153,7 +159,13 @@ def test_only_last_part_of_zone_used_for_availability_zone(self): _set_mock_metadata() - self.ds.get_data() + r = self.ds.get_data() + self.assertEqual(True, r) self.assertEqual('bar', self.ds.availability_zone) + def test_get_data_returns_false_if_not_on_gce(self): + self.m_platform_reports_gce.return_value = False + self.assertEqual(False, self.ds.get_data()) + + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_maas.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_maas.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_maas.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_maas.py 2017-05-26 18:36:38.000000000 +0000 @@ -44,7 +44,7 @@ # verify that 'userdata' is not returned as part of the metadata self.assertFalse(('user-data' in md)) - self.assertEqual(vd, None) + self.assertIsNone(vd) def test_seed_dir_valid_extra(self): """Verify extra files do not affect seed_dir validity.""" diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_opennebula.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_opennebula.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_opennebula.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_opennebula.py 2017-05-26 18:36:38.000000000 +0000 @@ -126,14 +126,14 @@ populate_dir(self.seed_dir, {'context.sh': ''}) results = ds.read_context_disk_dir(self.seed_dir) - self.assertEqual(results['userdata'], None) + self.assertIsNone(results['userdata']) self.assertEqual(results['metadata'], {}) def test_seed_dir_empty2_context(self): populate_context_dir(self.seed_dir, {}) results = ds.read_context_disk_dir(self.seed_dir) - self.assertEqual(results['userdata'], None) + self.assertIsNone(results['userdata']) self.assertEqual(results['metadata'], {}) def test_seed_dir_broken_context(self): @@ -195,7 +195,9 @@ self.assertTrue('userdata' in results) self.assertEqual(USER_DATA, results['userdata']) - def test_hostname(self): + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_hostname(self, m_get_phys_by_mac): + m_get_phys_by_mac.return_value = {'02:00:0a:12:01:01': 'eth0'} for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): my_d = os.path.join(self.tmp, k) populate_context_dir(my_d, {k: PUBLIC_IP}) @@ -205,11 +207,14 @@ self.assertTrue('local-hostname' in results['metadata']) self.assertEqual(PUBLIC_IP, results['metadata']['local-hostname']) - def test_network_interfaces(self): + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_network_interfaces(self, m_get_phys_by_mac): + m_get_phys_by_mac.return_value = {'02:00:0a:12:01:01': 'eth0'} populate_context_dir(self.seed_dir, {'ETH0_IP': '1.2.3.4'}) results = ds.read_context_disk_dir(self.seed_dir) self.assertTrue('network-interfaces' in results) + self.assertTrue('1.2.3.4' in results['network-interfaces']) def test_find_candidates(self): def my_devs_with(criteria): diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_openstack.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_openstack.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_openstack.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_openstack.py 2017-05-26 18:36:38.000000000 +0000 @@ -242,7 +242,7 @@ self.assertEqual(USER_DATA, ds_os.userdata_raw) self.assertEqual(2, len(ds_os.files)) self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) - self.assertEqual(ds_os.vendordata_raw, None) + self.assertIsNone(ds_os.vendordata_raw) @hp.activate def test_bad_datasource_meta(self): @@ -318,7 +318,7 @@ self.assertEqual(self.cvj(data), data) def test_vd_load_dict_no_ci(self): - self.assertEqual(self.cvj({'foo': 'bar'}), None) + self.assertIsNone(self.cvj({'foo': 'bar'})) def test_vd_load_dict_ci_dict(self): self.assertRaises(ValueError, self.cvj, diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_ovf.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_ovf.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_datasource/test_ovf.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_datasource/test_ovf.py 2017-05-26 18:36:38.000000000 +0000 @@ -68,6 +68,6 @@ md, ud, cfg = dsovf.read_ovf_environment(env) self.assertEqual({"instance-id": "inst-001"}, md) self.assertEqual({'password': "passw0rd"}, cfg) - self.assertEqual(None, ud) + self.assertIsNone(ud) # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_distros/test_netconfig.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_distros/test_netconfig.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_distros/test_netconfig.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_distros/test_netconfig.py 2017-05-26 18:36:38.000000000 +0000 @@ -17,6 +17,7 @@ from cloudinit import distros from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit import helpers +from cloudinit.net import eni from cloudinit import settings from cloudinit import util @@ -28,10 +29,10 @@ auto eth0 iface eth0 inet static address 192.168.1.5 - netmask 255.255.255.0 - network 192.168.0.0 broadcast 192.168.1.0 gateway 192.168.1.254 + netmask 255.255.255.0 + network 192.168.0.0 auto eth1 iface eth1 inet dhcp @@ -67,6 +68,100 @@ gateway 2607:f0d0:1002:0011::1 ''' +V1_NET_CFG = {'config': [{'name': 'eth0', + + 'subnets': [{'address': '192.168.1.5', + 'broadcast': '192.168.1.0', + 'gateway': '192.168.1.254', + 'netmask': '255.255.255.0', + 'type': 'static'}], + 'type': 'physical'}, + {'name': 'eth1', + 'subnets': [{'control': 'auto', 'type': 'dhcp4'}], + 'type': 'physical'}], + 'version': 1} + +V1_NET_CFG_OUTPUT = """ +# This file is generated from information provided by +# the datasource. Changes to it will not persist across an instance. +# To disable cloud-init's network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet static + address 192.168.1.5 + broadcast 192.168.1.0 + gateway 192.168.1.254 + netmask 255.255.255.0 + +auto eth1 +iface eth1 inet dhcp +""" + +V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', + 'subnets': [{'address': + '2607:f0d0:1002:0011::2', + 'gateway': + '2607:f0d0:1002:0011::1', + 'netmask': '64', + 'type': 'static'}], + 'type': 'physical'}, + {'name': 'eth1', + 'subnets': [{'control': 'auto', + 'type': 'dhcp4'}], + 'type': 'physical'}], + 'version': 1} + + +V1_TO_V2_NET_CFG_OUTPUT = """ +# This file is generated from information provided by +# the datasource. Changes to it will not persist across an instance. +# To disable cloud-init's network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} +network: + version: 2 + ethernets: + eth0: + addresses: + - 192.168.1.5/24 + gateway4: 192.168.1.254 + eth1: + dhcp4: true +""" + +V2_NET_CFG = { + 'ethernets': { + 'eth7': { + 'addresses': ['192.168.1.5/255.255.255.0'], + 'gateway4': '192.168.1.254'}, + 'eth9': { + 'dhcp4': True} + }, + 'version': 2 +} + + +V2_TO_V2_NET_CFG_OUTPUT = """ +# This file is generated from information provided by +# the datasource. Changes to it will not persist across an instance. +# To disable cloud-init's network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} +network: + version: 2 + ethernets: + eth7: + addresses: + - 192.168.1.5/255.255.255.0 + gateway4: 192.168.1.254 + eth9: + dhcp4: true +""" + class WriteBuffer(object): def __init__(self): @@ -83,12 +178,28 @@ class TestNetCfgDistro(TestCase): - def _get_distro(self, dname): + frbsd_ifout = """\ +hn0: flags=8843 metric 0 mtu 1500 + options=51b + ether 00:15:5d:4c:73:00 + inet6 fe80::215:5dff:fe4c:7300%hn0 prefixlen 64 scopeid 0x2 + inet 10.156.76.127 netmask 0xfffffc00 broadcast 10.156.79.255 + nd6 options=23 + media: Ethernet autoselect (10Gbase-T ) + status: active +""" + + def setUp(self): + super(TestNetCfgDistro, self).setUp() + + def _get_distro(self, dname, renderers=None): cls = distros.fetch(dname) cfg = settings.CFG_BUILTIN cfg['system_info']['distro'] = dname + if renderers: + cfg['system_info']['network'] = {'renderers': renderers} paths = helpers.Paths({}) - return cls(dname, cfg, paths) + return cls(dname, cfg.get('system_info'), paths) def test_simple_write_ub(self): ub_distro = self._get_distro('ubuntu') @@ -116,6 +227,118 @@ self.assertEqual(str(write_buf).strip(), BASE_NET_CFG.strip()) self.assertEqual(write_buf.mode, 0o644) + def test_apply_network_config_eni_ub(self): + ub_distro = self._get_distro('ubuntu') + with ExitStack() as mocks: + write_bufs = {} + + def replace_write(filename, content, mode=0o644, omode="wb"): + buf = WriteBuffer() + buf.mode = mode + buf.omode = omode + buf.write(content) + write_bufs[filename] = buf + + # eni availability checks + mocks.enter_context( + mock.patch.object(util, 'which', return_value=True)) + mocks.enter_context( + mock.patch.object(eni, 'available', return_value=True)) + mocks.enter_context( + mock.patch.object(util, 'ensure_dir')) + mocks.enter_context( + mock.patch.object(util, 'write_file', replace_write)) + mocks.enter_context( + mock.patch.object(os.path, 'isfile', return_value=False)) + mocks.enter_context( + mock.patch("cloudinit.net.eni.glob.glob", + return_value=[])) + + ub_distro.apply_network_config(V1_NET_CFG, False) + + self.assertEqual(len(write_bufs), 2) + eni_name = '/etc/network/interfaces.d/50-cloud-init.cfg' + self.assertIn(eni_name, write_bufs) + write_buf = write_bufs[eni_name] + self.assertEqual(str(write_buf).strip(), V1_NET_CFG_OUTPUT.strip()) + self.assertEqual(write_buf.mode, 0o644) + + def test_apply_network_config_v1_to_netplan_ub(self): + renderers = ['netplan'] + devlist = ['eth0', 'lo'] + ub_distro = self._get_distro('ubuntu', renderers=renderers) + with ExitStack() as mocks: + write_bufs = {} + + def replace_write(filename, content, mode=0o644, omode="wb"): + buf = WriteBuffer() + buf.mode = mode + buf.omode = omode + buf.write(content) + write_bufs[filename] = buf + + mocks.enter_context( + mock.patch.object(util, 'which', return_value=True)) + mocks.enter_context( + mock.patch.object(util, 'write_file', replace_write)) + mocks.enter_context( + mock.patch.object(util, 'ensure_dir')) + mocks.enter_context( + mock.patch.object(util, 'subp', return_value=(0, 0))) + mocks.enter_context( + mock.patch.object(os.path, 'isfile', return_value=False)) + mocks.enter_context( + mock.patch("cloudinit.net.netplan.get_devicelist", + return_value=devlist)) + + ub_distro.apply_network_config(V1_NET_CFG, False) + + self.assertEqual(len(write_bufs), 1) + netplan_name = '/etc/netplan/50-cloud-init.yaml' + self.assertIn(netplan_name, write_bufs) + write_buf = write_bufs[netplan_name] + self.assertEqual(str(write_buf).strip(), + V1_TO_V2_NET_CFG_OUTPUT.strip()) + self.assertEqual(write_buf.mode, 0o644) + + def test_apply_network_config_v2_passthrough_ub(self): + renderers = ['netplan'] + devlist = ['eth0', 'lo'] + ub_distro = self._get_distro('ubuntu', renderers=renderers) + with ExitStack() as mocks: + write_bufs = {} + + def replace_write(filename, content, mode=0o644, omode="wb"): + buf = WriteBuffer() + buf.mode = mode + buf.omode = omode + buf.write(content) + write_bufs[filename] = buf + + mocks.enter_context( + mock.patch.object(util, 'which', return_value=True)) + mocks.enter_context( + mock.patch.object(util, 'write_file', replace_write)) + mocks.enter_context( + mock.patch.object(util, 'ensure_dir')) + mocks.enter_context( + mock.patch.object(util, 'subp', return_value=(0, 0))) + mocks.enter_context( + mock.patch.object(os.path, 'isfile', return_value=False)) + # FreeBSD does not have '/sys/class/net' file, + # so we need mock here. + mocks.enter_context( + mock.patch.object(os, 'listdir', return_value=devlist)) + ub_distro.apply_network_config(V2_NET_CFG, False) + + self.assertEqual(len(write_bufs), 1) + netplan_name = '/etc/netplan/50-cloud-init.yaml' + self.assertIn(netplan_name, write_bufs) + write_buf = write_bufs[netplan_name] + self.assertEqual(str(write_buf).strip(), + V2_TO_V2_NET_CFG_OUTPUT.strip()) + self.assertEqual(write_buf.mode, 0o644) + def assertCfgEquals(self, blob1, blob2): b1 = dict(SysConf(blob1.strip().splitlines())) b2 = dict(SysConf(blob2.strip().splitlines())) @@ -127,6 +350,29 @@ for (k, v) in b1.items(): self.assertEqual(v, b2[k]) + @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_list') + @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ifname_out') + def test_get_ip_nic_freebsd(self, ifname_out, iflist): + frbsd_distro = self._get_distro('freebsd') + iflist.return_value = "lo0 hn0" + ifname_out.return_value = self.frbsd_ifout + res = frbsd_distro.get_ipv4() + self.assertEqual(res, ['lo0', 'hn0']) + res = frbsd_distro.get_ipv6() + self.assertEqual(res, []) + + @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ether') + @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ifname_out') + @mock.patch('cloudinit.distros.freebsd.Distro.get_interface_mac') + def test_generate_fallback_config_freebsd(self, mac, ifname_out, if_ether): + frbsd_distro = self._get_distro('freebsd') + + if_ether.return_value = 'hn0' + ifname_out.return_value = self.frbsd_ifout + mac.return_value = '00:15:5d:4c:73:00' + res = frbsd_distro.generate_fallback_config() + self.assertIsNotNone(res) + def test_simple_write_rh(self): rh_distro = self._get_distro('rhel') @@ -195,6 +441,79 @@ self.assertCfgEquals(expected_buf, str(write_buf)) self.assertEqual(write_buf.mode, 0o644) + def test_apply_network_config_rh(self): + renderers = ['sysconfig'] + rh_distro = self._get_distro('rhel', renderers=renderers) + + write_bufs = {} + + def replace_write(filename, content, mode=0o644, omode="wb"): + buf = WriteBuffer() + buf.mode = mode + buf.omode = omode + buf.write(content) + write_bufs[filename] = buf + + with ExitStack() as mocks: + # sysconfig availability checks + mocks.enter_context( + mock.patch.object(util, 'which', return_value=True)) + mocks.enter_context( + mock.patch.object(util, 'write_file', replace_write)) + mocks.enter_context( + mock.patch.object(util, 'load_file', return_value='')) + mocks.enter_context( + mock.patch.object(os.path, 'isfile', return_value=True)) + + rh_distro.apply_network_config(V1_NET_CFG, False) + + self.assertEqual(len(write_bufs), 5) + + # eth0 + self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', + write_bufs) + write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] + expected_buf = ''' +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEVICE=eth0 +IPADDR=192.168.1.5 +NETMASK=255.255.255.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +''' + self.assertCfgEquals(expected_buf, str(write_buf)) + self.assertEqual(write_buf.mode, 0o644) + + # eth1 + self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', + write_bufs) + write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] + expected_buf = ''' +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth1 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +''' + self.assertCfgEquals(expected_buf, str(write_buf)) + self.assertEqual(write_buf.mode, 0o644) + + self.assertIn('/etc/sysconfig/network', write_bufs) + write_buf = write_bufs['/etc/sysconfig/network'] + expected_buf = ''' +# Created by cloud-init v. 0.7 +NETWORKING=yes +''' + self.assertCfgEquals(expected_buf, str(write_buf)) + self.assertEqual(write_buf.mode, 0o644) + def test_write_ipv6_rhel(self): rh_distro = self._get_distro('rhel') @@ -214,7 +533,6 @@ mock.patch.object(util, 'load_file', return_value='')) mocks.enter_context( mock.patch.object(os.path, 'isfile', return_value=False)) - rh_distro.apply_network(BASE_NET_CFG_IPV6, False) self.assertEqual(len(write_bufs), 4) @@ -262,6 +580,77 @@ ''' self.assertCfgEquals(expected_buf, str(write_buf)) self.assertEqual(write_buf.mode, 0o644) + + self.assertIn('/etc/sysconfig/network', write_bufs) + write_buf = write_bufs['/etc/sysconfig/network'] + expected_buf = ''' +# Created by cloud-init v. 0.7 +NETWORKING=yes +NETWORKING_IPV6=yes +IPV6_AUTOCONF=no +''' + self.assertCfgEquals(expected_buf, str(write_buf)) + self.assertEqual(write_buf.mode, 0o644) + + def test_apply_network_config_ipv6_rh(self): + renderers = ['sysconfig'] + rh_distro = self._get_distro('rhel', renderers=renderers) + + write_bufs = {} + + def replace_write(filename, content, mode=0o644, omode="wb"): + buf = WriteBuffer() + buf.mode = mode + buf.omode = omode + buf.write(content) + write_bufs[filename] = buf + + with ExitStack() as mocks: + mocks.enter_context( + mock.patch.object(util, 'which', return_value=True)) + mocks.enter_context( + mock.patch.object(util, 'write_file', replace_write)) + mocks.enter_context( + mock.patch.object(util, 'load_file', return_value='')) + mocks.enter_context( + mock.patch.object(os.path, 'isfile', return_value=True)) + + rh_distro.apply_network_config(V1_NET_CFG_IPV6, False) + + self.assertEqual(len(write_bufs), 5) + + self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', + write_bufs) + write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] + expected_buf = ''' +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEVICE=eth0 +IPV6ADDR=2607:f0d0:1002:0011::2/64 +IPV6INIT=yes +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +''' + self.assertCfgEquals(expected_buf, str(write_buf)) + self.assertEqual(write_buf.mode, 0o644) + self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', + write_bufs) + write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] + expected_buf = ''' +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth1 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +''' + self.assertCfgEquals(expected_buf, str(write_buf)) + self.assertEqual(write_buf.mode, 0o644) self.assertIn('/etc/sysconfig/network', write_bufs) write_buf = write_bufs['/etc/sysconfig/network'] diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_distros/test_resolv.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_distros/test_resolv.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_distros/test_resolv.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_distros/test_resolv.py 2017-05-26 18:36:38.000000000 +0000 @@ -30,7 +30,7 @@ def test_local_domain(self): rp = resolv_conf.ResolvConf(BASE_RESOLVE) - self.assertEqual(None, rp.local_domain) + self.assertIsNone(rp.local_domain) rp.local_domain = "bob" self.assertEqual('bob', rp.local_domain) @@ -46,7 +46,7 @@ self.assertNotIn('10.3', rp.nameservers) self.assertEqual(len(rp.nameservers), 3) rp.add_nameserver('10.2') - self.assertRaises(ValueError, rp.add_nameserver, '10.3') + rp.add_nameserver('10.3') self.assertNotIn('10.3', rp.nameservers) def test_search_domains(self): diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_ds_identify.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_ds_identify.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_ds_identify.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_ds_identify.py 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,300 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import os +from uuid import uuid4 + +from cloudinit import safeyaml +from cloudinit import util +from .helpers import CiTestCase, dir2dict, json_dumps, populate_dir + +UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " + "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") +BLKID_EFI_ROOT = """ +DEVNAME=/dev/sda1 +UUID=8B36-5390 +TYPE=vfat +PARTUUID=30d7c715-a6ae-46ee-b050-afc6467fc452 + +DEVNAME=/dev/sda2 +UUID=19ac97d5-6973-4193-9a09-2e6bbfa38262 +TYPE=ext4 +PARTUUID=30c65c77-e07d-4039-b2fb-88b1fb5fa1fc +""" + +DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=enabled" +DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=disabled" + +SHELL_MOCK_TMPL = """\ +%(name)s() { + local out='%(out)s' err='%(err)s' r='%(ret)s' RET='%(RET)s' + [ "$out" = "_unset" ] || echo "$out" + [ "$err" = "_unset" ] || echo "$err" 2>&1 + [ "$RET" = "_unset" ] || _RET="$RET" + return $r +} +""" + +RC_FOUND = 0 +RC_NOT_FOUND = 1 +DS_NONE = 'None' + +P_PRODUCT_NAME = "sys/class/dmi/id/product_name" +P_PRODUCT_SERIAL = "sys/class/dmi/id/product_serial" +P_PRODUCT_UUID = "sys/class/dmi/id/product_uuid" +P_DSID_CFG = "etc/cloud/ds-identify.cfg" + +MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} + + +class TestDsIdentify(CiTestCase): + dsid_path = os.path.realpath('tools/ds-identify') + + def call(self, rootd=None, mocks=None, args=None, files=None, + policy_dmi=DI_DEFAULT_POLICY, + policy_nodmi=DI_DEFAULT_POLICY_NO_DMI): + if args is None: + args = [] + if mocks is None: + mocks = [] + + if files is None: + files = {} + + if rootd is None: + rootd = self.tmp_dir() + + unset = '_unset' + wrap = self.tmp_path(path="_shwrap", dir=rootd) + populate_dir(rootd, files) + + # DI_DEFAULT_POLICY* are declared always as to not rely + # on the default in the code. This is because SRU releases change + # the value in the code, and thus tests would fail there. + head = [ + "DI_MAIN=noop", + "DEBUG_LEVEL=2", + "DI_LOG=stderr", + "PATH_ROOT='%s'" % rootd, + ". " + self.dsid_path, + 'DI_DEFAULT_POLICY="%s"' % policy_dmi, + 'DI_DEFAULT_POLICY_NO_DMI="%s"' % policy_nodmi, + "" + ] + + def write_mock(data): + ddata = {'out': None, 'err': None, 'ret': 0, 'RET': None} + ddata.update(data) + for k in ddata: + if ddata[k] is None: + ddata[k] = unset + return SHELL_MOCK_TMPL % ddata + + mocklines = [] + defaults = [ + {'name': 'detect_virt', 'RET': 'none', 'ret': 1}, + {'name': 'uname', 'out': UNAME_MYSYS}, + {'name': 'blkid', 'out': BLKID_EFI_ROOT}, + ] + + written = [d['name'] for d in mocks] + for data in mocks: + mocklines.append(write_mock(data)) + for d in defaults: + if d['name'] not in written: + mocklines.append(write_mock(d)) + + endlines = [ + 'main %s' % ' '.join(['"%s"' % s for s in args]) + ] + + with open(wrap, "w") as fp: + fp.write('\n'.join(head + mocklines + endlines) + "\n") + + rc = 0 + try: + out, err = util.subp(['sh', '-c', '. %s' % wrap], capture=True) + except util.ProcessExecutionError as e: + rc = e.exit_code + out = e.stdout + err = e.stderr + + cfg = None + cfg_out = os.path.join(rootd, 'run/cloud-init/cloud.cfg') + if os.path.exists(cfg_out): + contents = util.load_file(cfg_out) + try: + cfg = safeyaml.load(contents) + except Exception as e: + cfg = {"_INVALID_YAML": contents, + "_EXCEPTION": str(e)} + + return rc, out, err, cfg, dir2dict(rootd) + + def _call_via_dict(self, data, rootd=None, **kwargs): + # return output of self.call with a dict input like VALID_CFG[item] + xwargs = {'rootd': rootd} + for k in ('mocks', 'args', 'policy_dmi', 'policy_nodmi', 'files'): + if k in data: + xwargs[k] = data[k] + if k in kwargs: + xwargs[k] = kwargs[k] + + return self.call(**xwargs) + + def _test_ds_found(self, name): + data = copy.deepcopy(VALID_CFG[name]) + return self._check_via_dict( + data, RC_FOUND, dslist=[data.get('ds'), DS_NONE]) + + def _check_via_dict(self, data, rc, dslist=None, **kwargs): + found_rc, out, err, cfg, files = self._call_via_dict(data, **kwargs) + good = False + try: + self.assertEqual(rc, found_rc) + if dslist is not None: + self.assertEqual(dslist, cfg['datasource_list']) + good = True + finally: + if not good: + _print_run_output(rc, out, err, cfg, files) + return rc, out, err, cfg, files + + def test_aws_ec2_hvm(self): + """EC2: hvm instances use dmi serial and uuid starting with 'ec2'.""" + self._test_ds_found('Ec2-hvm') + + def test_aws_ec2_xen(self): + """EC2: sys/hypervisor/uuid starts with ec2.""" + self._test_ds_found('Ec2-xen') + + def test_brightbox_is_ec2(self): + """EC2: product_serial ends with 'brightbox.com'""" + self._test_ds_found('Ec2-brightbox') + + def test_gce_by_product_name(self): + """GCE identifies itself with product_name.""" + self._test_ds_found('GCE') + + def test_gce_by_serial(self): + """Older gce compute instances must be identified by serial.""" + self._test_ds_found('GCE-serial') + + def test_config_drive(self): + """ConfigDrive datasource has a disk with LABEL=config-2.""" + self._test_ds_found('ConfigDrive') + return + + def test_policy_disabled(self): + """A Builtin policy of 'disabled' should return not found. + + Even though a search would find something, the builtin policy of + disabled should cause the return of not found.""" + mydata = copy.deepcopy(VALID_CFG['Ec2-hvm']) + self._check_via_dict(mydata, rc=RC_NOT_FOUND, policy_dmi="disabled") + + def test_policy_config_disable_overrides_builtin(self): + """explicit policy: disabled in config file should cause not found.""" + mydata = copy.deepcopy(VALID_CFG['Ec2-hvm']) + mydata['files'][P_DSID_CFG] = '\n'.join(['policy: disabled', '']) + self._check_via_dict(mydata, rc=RC_NOT_FOUND) + + def test_single_entry_defines_datasource(self): + """If config has a single entry in datasource_list, that is used. + + Test the valid Ec2-hvm, but provide a config file that specifies + a single entry in datasource_list. The configured value should + be used.""" + mydata = copy.deepcopy(VALID_CFG['Ec2-hvm']) + cfgpath = 'etc/cloud/cloud.cfg.d/myds.cfg' + mydata['files'][cfgpath] = 'datasource_list: ["NoCloud"]\n' + self._check_via_dict(mydata, rc=RC_FOUND, dslist=['NoCloud', DS_NONE]) + + def test_configured_list_with_none(self): + """When datasource_list already contains None, None is not added. + + The explicitly configured datasource_list has 'None' in it. That + should not have None automatically added.""" + mydata = copy.deepcopy(VALID_CFG['GCE']) + cfgpath = 'etc/cloud/cloud.cfg.d/myds.cfg' + mydata['files'][cfgpath] = 'datasource_list: ["Ec2", "None"]\n' + self._check_via_dict(mydata, rc=RC_FOUND, dslist=['Ec2', DS_NONE]) + + +def blkid_out(disks=None): + """Convert a list of disk dictionaries into blkid content.""" + if disks is None: + disks = [] + lines = [] + for disk in disks: + if not disk["DEVNAME"].startswith("/dev/"): + disk["DEVNAME"] = "/dev/" + disk["DEVNAME"] + for key in disk: + lines.append("%s=%s" % (key, disk[key])) + lines.append("") + return '\n'.join(lines) + + +def _print_run_output(rc, out, err, cfg, files): + """A helper to print return of TestDsIdentify. + + _print_run_output(self.call())""" + print('\n'.join([ + '-- rc = %s --' % rc, + '-- out --', str(out), + '-- err --', str(err), + '-- cfg --', json_dumps(cfg)])) + print('-- files --') + for k, v in files.items(): + if "/_shwrap" in k: + continue + print(' === %s ===' % k) + for line in v.splitlines(): + print(" " + line) + + +VALID_CFG = { + 'Ec2-hvm': { + 'ds': 'Ec2', + 'mocks': [{'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}], + 'files': { + P_PRODUCT_SERIAL: 'ec23aef5-54be-4843-8d24-8c819f88453e\n', + P_PRODUCT_UUID: 'EC23AEF5-54BE-4843-8D24-8C819F88453E\n', + } + }, + 'Ec2-xen': { + 'ds': 'Ec2', + 'mocks': [{'name': 'detect_virt', 'RET': 'xen', 'ret': 0}], + 'files': { + 'sys/hypervisor/uuid': 'ec2c6e2f-5fac-4fc7-9c82-74127ec14bbb\n' + }, + }, + 'Ec2-brightbox': { + 'ds': 'Ec2', + 'files': {P_PRODUCT_SERIAL: 'facc6e2f.brightbox.com\n'}, + }, + 'GCE': { + 'ds': 'GCE', + 'files': {P_PRODUCT_NAME: 'Google Compute Engine\n'}, + 'mocks': [MOCK_VIRT_IS_KVM], + }, + 'GCE-serial': { + 'ds': 'GCE', + 'files': {P_PRODUCT_SERIAL: 'GoogleCloud-8f2e88f\n'}, + 'mocks': [MOCK_VIRT_IS_KVM], + }, + 'ConfigDrive': { + 'ds': 'ConfigDrive', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'config-2'}]) + }, + ], + }, +} + +# vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py 2017-05-26 18:36:38.000000000 +0000 @@ -121,39 +121,82 @@ myds.metadata.update(metadata) return cloud.Cloud(myds, paths, {}, mydist, None) - def _apt_source_list(self, cfg, expected, distro): - "_apt_source_list - Test rendering from template (generic)" - + def _apt_source_list(self, distro, cfg, cfg_on_empty=False): + """_apt_source_list - Test rendering from template (generic)""" # entry at top level now, wrap in 'apt' key cfg = {'apt': cfg} mycloud = self._get_cloud(distro) - with mock.patch.object(util, 'write_file') as mockwf: + + with mock.patch.object(util, 'write_file') as mock_writefile: with mock.patch.object(util, 'load_file', - return_value=MOCKED_APT_SRC_LIST) as mocklf: + return_value=MOCKED_APT_SRC_LIST + ) as mock_loadfile: with mock.patch.object(os.path, 'isfile', - return_value=True) as mockisfile: - with mock.patch.object(util, 'rename'): - cc_apt_configure.handle("test", cfg, mycloud, - LOG, None) - - # check if it would have loaded the distro template - mockisfile.assert_any_call( - ('/etc/cloud/templates/sources.list.%s.tmpl' % distro)) - mocklf.assert_any_call( - ('/etc/cloud/templates/sources.list.%s.tmpl' % distro)) - # check expected content in result - mockwf.assert_called_once_with('/etc/apt/sources.list', expected, - mode=0o644) + return_value=True) as mock_isfile: + cfg_func = ('cloudinit.config.cc_apt_configure.' + + '_should_configure_on_empty_apt') + with mock.patch(cfg_func, + return_value=(cfg_on_empty, "test") + ) as mock_shouldcfg: + cc_apt_configure.handle("test", cfg, mycloud, LOG, + None) + + return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg def test_apt_v3_source_list_debian(self): """test_apt_v3_source_list_debian - without custom sources or parms""" cfg = {} - self._apt_source_list(cfg, EXPECTED_BASE_CONTENT, 'debian') + distro = 'debian' + expected = EXPECTED_BASE_CONTENT + + mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = ( + self._apt_source_list(distro, cfg, cfg_on_empty=True)) + + template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro + mock_writefile.assert_called_once_with('/etc/apt/sources.list', + expected, mode=0o644) + mock_load_file.assert_called_with(template) + mock_isfile.assert_any_call(template) + self.assertEqual(1, mock_shouldcfg.call_count) def test_apt_v3_source_list_ubuntu(self): """test_apt_v3_source_list_ubuntu - without custom sources or parms""" cfg = {} - self._apt_source_list(cfg, EXPECTED_BASE_CONTENT, 'ubuntu') + distro = 'ubuntu' + expected = EXPECTED_BASE_CONTENT + + mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = ( + self._apt_source_list(distro, cfg, cfg_on_empty=True)) + + template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro + mock_writefile.assert_called_once_with('/etc/apt/sources.list', + expected, mode=0o644) + mock_load_file.assert_called_with(template) + mock_isfile.assert_any_call(template) + self.assertEqual(1, mock_shouldcfg.call_count) + + def test_apt_v3_source_list_ubuntu_snappy(self): + """test_apt_v3_source_list_ubuntu_snappy - without custom sources or + parms""" + cfg = {'apt': {}} + mycloud = self._get_cloud('ubuntu') + + with mock.patch.object(util, 'write_file') as mock_writefile: + with mock.patch.object(util, 'system_is_snappy', + return_value=True) as mock_issnappy: + cc_apt_configure.handle("test", cfg, mycloud, LOG, None) + + self.assertEqual(0, mock_writefile.call_count) + self.assertEqual(1, mock_issnappy.call_count) + + def test_apt_v3_source_list_centos(self): + """test_apt_v3_source_list_centos - without custom sources or parms""" + cfg = {} + distro = 'rhel' + + mock_writefile, _, _, _ = self._apt_source_list(distro, cfg) + + self.assertEqual(0, mock_writefile.call_count) def test_apt_v3_source_list_psm(self): """test_apt_v3_source_list_psm - Test specifying prim+sec mirrors""" @@ -164,8 +207,17 @@ 'uri': pm}], 'security': [{'arches': ["default"], 'uri': sm}]} + distro = 'ubuntu' + expected = EXPECTED_PRIMSEC_CONTENT + + mock_writefile, mock_load_file, mock_isfile, _ = ( + self._apt_source_list(distro, cfg, cfg_on_empty=True)) - self._apt_source_list(cfg, EXPECTED_PRIMSEC_CONTENT, 'ubuntu') + template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro + mock_writefile.assert_called_once_with('/etc/apt/sources.list', + expected, mode=0o644) + mock_load_file.assert_called_with(template) + mock_isfile.assert_any_call(template) def test_apt_v3_srcl_custom(self): """test_apt_v3_srcl_custom - Test rendering a custom source template""" diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_disk_setup.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_disk_setup.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_disk_setup.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_disk_setup.py 2017-05-26 18:36:38.000000000 +0000 @@ -17,6 +17,10 @@ self.check_fs = self.patches.enter_context( mock.patch('{0}.check_fs'.format(mod_name))) + def tearDown(self): + super(TestIsDiskUsed, self).tearDown() + self.patches.close() + def test_multiple_child_nodes_returns_true(self): self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2)) self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock()) @@ -62,7 +66,7 @@ size_in_sectors = size_in_bytes / sector_size self._configure_subp_mock(size_in_bytes, sector_size) self.assertEqual(size_in_sectors, - cc_disk_setup.get_mbr_hdd_size('/dev/sda1')) + cc_disk_setup.get_hdd_size('/dev/sda1')) def test_size_for_512_byte_sectors(self): self._test_for_sector_size(512) @@ -103,4 +107,119 @@ ',{0},83\n,,82'.format(expected_partition_size), cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]])) + +class TestUpdateFsSetupDevices(TestCase): + def test_regression_1634678(self): + # Cf. https://bugs.launchpad.net/cloud-init/+bug/1634678 + fs_setup = { + 'partition': 'auto', + 'device': '/dev/xvdb1', + 'overwrite': False, + 'label': 'test', + 'filesystem': 'ext4' + } + + cc_disk_setup.update_fs_setup_devices([fs_setup], + lambda device: device) + + self.assertEqual({ + '_origname': '/dev/xvdb1', + 'partition': 'auto', + 'device': '/dev/xvdb1', + 'overwrite': False, + 'label': 'test', + 'filesystem': 'ext4' + }, fs_setup) + + def test_dotted_devname(self): + fs_setup = { + 'partition': 'auto', + 'device': 'ephemeral0.0', + 'label': 'test2', + 'filesystem': 'xfs' + } + + cc_disk_setup.update_fs_setup_devices([fs_setup], + lambda device: device) + + self.assertEqual({ + '_origname': 'ephemeral0.0', + '_partition': 'auto', + 'partition': '0', + 'device': 'ephemeral0', + 'label': 'test2', + 'filesystem': 'xfs' + }, fs_setup) + + def test_dotted_devname_populates_partition(self): + fs_setup = { + 'device': 'ephemeral0.1', + 'label': 'test2', + 'filesystem': 'xfs' + } + cc_disk_setup.update_fs_setup_devices([fs_setup], + lambda device: device) + self.assertEqual({ + '_origname': 'ephemeral0.1', + 'device': 'ephemeral0', + 'partition': '1', + 'label': 'test2', + 'filesystem': 'xfs' + }, fs_setup) + + +@mock.patch('cloudinit.config.cc_disk_setup.assert_and_settle_device', + return_value=None) +@mock.patch('cloudinit.config.cc_disk_setup.find_device_node', + return_value=('/dev/xdb1', False)) +@mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None) +@mock.patch('cloudinit.config.cc_disk_setup.util.subp', return_value=('', '')) +class TestMkfsCommandHandling(TestCase): + + def test_with_cmd(self, subp, *args): + """mkfs honors cmd and logs warnings when extra_opts or overwrite are + provided.""" + with self.assertLogs( + 'cloudinit.config.cc_disk_setup') as logs: + cc_disk_setup.mkfs({ + 'cmd': 'mkfs -t %(filesystem)s -L %(label)s %(device)s', + 'filesystem': 'ext4', + 'device': '/dev/xdb1', + 'label': 'with_cmd', + 'extra_opts': ['should', 'generate', 'warning'], + 'overwrite': 'should generate warning too' + }) + + self.assertIn( + 'WARNING:cloudinit.config.cc_disk_setup:fs_setup:extra_opts ' + + 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + + '/dev/xdb1', + logs.output) + self.assertIn( + 'WARNING:cloudinit.config.cc_disk_setup:fs_setup:overwrite ' + + 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + + '/dev/xdb1', + logs.output) + + subp.assert_called_once_with( + 'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True) + + @mock.patch('cloudinit.config.cc_disk_setup.util.which') + def test_overwrite_and_extra_opts_without_cmd(self, m_which, subp, *args): + """mkfs observes extra_opts and overwrite settings when cmd is not + present.""" + m_which.side_effect = lambda p: {'mkfs.ext4': '/sbin/mkfs.ext4'}[p] + cc_disk_setup.mkfs({ + 'filesystem': 'ext4', + 'device': '/dev/xdb1', + 'label': 'without_cmd', + 'extra_opts': ['are', 'added'], + 'overwrite': True + }) + + subp.assert_called_once_with( + ['/sbin/mkfs.ext4', '/dev/xdb1', + '-L', 'without_cmd', '-F', 'are', 'added'], + shell=False) + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_ntp.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_ntp.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_ntp.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_ntp.py 2017-05-26 18:36:38.000000000 +0000 @@ -2,277 +2,214 @@ from cloudinit.config import cc_ntp from cloudinit.sources import DataSourceNone -from cloudinit import templater from cloudinit import (distros, helpers, cloud, util) from ..helpers import FilesystemMockingTestCase, mock -import logging + import os +from os.path import dirname import shutil -import tempfile - -LOG = logging.getLogger(__name__) -NTP_TEMPLATE = """ +NTP_TEMPLATE = b"""\ ## template: jinja - -{% if pools %}# pools -{% endif %} -{% for pool in pools -%} -pool {{pool}} iburst -{% endfor %} -{%- if servers %}# servers -{% endif %} -{% for server in servers -%} -server {{server}} iburst -{% endfor %} - -""" - - -NTP_EXPECTED_UBUNTU = """ -# pools -pool 0.mycompany.pool.ntp.org iburst -# servers -server 192.168.23.3 iburst - +servers {{servers}} +pools {{pools}} """ class TestNtp(FilesystemMockingTestCase): + with_logs = True + def setUp(self): super(TestNtp, self).setUp() self.subp = util.subp - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) + self.new_root = self.tmp_dir() - def _get_cloud(self, distro, metadata=None): + def _get_cloud(self, distro): self.patchUtils(self.new_root) - paths = helpers.Paths({}) + paths = helpers.Paths({'templates_dir': self.new_root}) cls = distros.fetch(distro) mydist = cls(distro, {}, paths) myds = DataSourceNone.DataSourceNone({}, mydist, paths) - if metadata: - myds.metadata.update(metadata) return cloud.Cloud(myds, paths, {}, mydist, None) @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install(self, mock_util): - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - cc.distro.name = 'ubuntu' - mock_util.which.return_value = None + """ntp_install installs via install_func when check_exe is absent.""" + mock_util.which.return_value = None # check_exe not found. install_func = mock.MagicMock() - cc_ntp.install_ntp(install_func, packages=['ntpx'], check_exe='ntpdx') - self.assertTrue(install_func.called) mock_util.which.assert_called_with('ntpdx') - install_pkg = install_func.call_args_list[0][0][0] - self.assertEqual(sorted(install_pkg), ['ntpx']) + install_func.assert_called_once_with(['ntpx']) @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install_not_needed(self, mock_util): - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - cc.distro.name = 'ubuntu' - mock_util.which.return_value = ["/usr/sbin/ntpd"] - cc_ntp.install_ntp(cc) - self.assertFalse(cc.distro.install_packages.called) + """ntp_install doesn't attempt install when check_exe is found.""" + mock_util.which.return_value = ["/usr/sbin/ntpd"] # check_exe found. + install_func = mock.MagicMock() + cc_ntp.install_ntp(install_func, packages=['ntp'], check_exe='ntpd') + install_func.assert_not_called() def test_ntp_rename_ntp_conf(self): - with mock.patch.object(os.path, 'exists', - return_value=True) as mockpath: - with mock.patch.object(util, 'rename') as mockrename: - cc_ntp.rename_ntp_conf() - - mockpath.assert_called_with('/etc/ntp.conf') - mockrename.assert_called_with('/etc/ntp.conf', '/etc/ntp.conf.dist') + """When NTP_CONF exists, rename_ntp moves it.""" + ntpconf = self.tmp_path("ntp.conf", self.new_root) + os.mknod(ntpconf) + with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): + cc_ntp.rename_ntp_conf() + self.assertFalse(os.path.exists(ntpconf)) + self.assertTrue(os.path.exists("{0}.dist".format(ntpconf))) def test_ntp_rename_ntp_conf_skip_missing(self): - with mock.patch.object(os.path, 'exists', - return_value=False) as mockpath: - with mock.patch.object(util, 'rename') as mockrename: - cc_ntp.rename_ntp_conf() - - mockpath.assert_called_with('/etc/ntp.conf') - mockrename.assert_not_called() - - def ntp_conf_render(self, distro): - """ntp_conf_render - Test rendering of a ntp.conf from template for a given distro + """When NTP_CONF doesn't exist rename_ntp doesn't create a file.""" + ntpconf = self.tmp_path("ntp.conf", self.new_root) + self.assertFalse(os.path.exists(ntpconf)) + with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): + cc_ntp.rename_ntp_conf() + self.assertFalse(os.path.exists("{0}.dist".format(ntpconf))) + self.assertFalse(os.path.exists(ntpconf)) + + def test_write_ntp_config_template_from_ntp_conf_tmpl_with_servers(self): + """write_ntp_config_template reads content from ntp.conf.tmpl. + + It reads ntp.conf.tmpl if present and renders the value from servers + key. When no pools key is defined, template is rendered using an empty + list for pools. """ - - cfg = {'ntp': {}} - mycloud = self._get_cloud(distro) - distro_names = cc_ntp.generate_server_names(distro) - - with mock.patch.object(templater, 'render_to_file') as mocktmpl: - with mock.patch.object(os.path, 'isfile', return_value=True): - with mock.patch.object(util, 'rename'): - cc_ntp.write_ntp_config_template(cfg, mycloud) - - mocktmpl.assert_called_once_with( - ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro), - '/etc/ntp.conf', - {'servers': [], 'pools': distro_names}) - - def test_ntp_conf_render_rhel(self): - """Test templater.render_to_file() for rhel""" - self.ntp_conf_render('rhel') - - def test_ntp_conf_render_debian(self): - """Test templater.render_to_file() for debian""" - self.ntp_conf_render('debian') - - def test_ntp_conf_render_fedora(self): - """Test templater.render_to_file() for fedora""" - self.ntp_conf_render('fedora') - - def test_ntp_conf_render_sles(self): - """Test templater.render_to_file() for sles""" - self.ntp_conf_render('sles') - - def test_ntp_conf_render_ubuntu(self): - """Test templater.render_to_file() for ubuntu""" - self.ntp_conf_render('ubuntu') - - def test_ntp_conf_servers_no_pools(self): distro = 'ubuntu' - pools = [] - servers = ['192.168.2.1'] cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers, - } + 'servers': ['192.168.2.1', '192.168.2.2'] } mycloud = self._get_cloud(distro) - - with mock.patch.object(templater, 'render_to_file') as mocktmpl: - with mock.patch.object(os.path, 'isfile', return_value=True): - with mock.patch.object(util, 'rename'): - cc_ntp.write_ntp_config_template(cfg.get('ntp'), mycloud) - - mocktmpl.assert_called_once_with( - ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro), - '/etc/ntp.conf', - {'servers': servers, 'pools': pools}) - - def test_ntp_conf_custom_pools_no_server(self): + ntp_conf = self.tmp_path("ntp.conf", self.new_root) # Doesn't exist + # Create ntp.conf.tmpl + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + cc_ntp.write_ntp_config_template(cfg, mycloud) + content = util.read_file_or_url('file://' + ntp_conf).contents + self.assertEqual( + "servers ['192.168.2.1', '192.168.2.2']\npools []\n", + content.decode()) + + def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): + """write_ntp_config_template reads content from ntp.conf.distro.tmpl. + + It reads ntp.conf..tmpl before attempting ntp.conf.tmpl. It + renders the value from the keys servers and pools. When no + servers value is present, template is rendered using an empty list. + """ distro = 'ubuntu' - pools = ['0.mycompany.pool.ntp.org'] - servers = [] cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers, - } + 'pools': ['10.0.0.1', '10.0.0.2'] } mycloud = self._get_cloud(distro) + ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist + # Create ntp.conf.tmpl which isn't read + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(b'NOT READ: ntp.conf..tmpl is primary') + # Create ntp.conf.tmpl. + with open('{0}.{1}.tmpl'.format(ntp_conf, distro), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + cc_ntp.write_ntp_config_template(cfg, mycloud) + content = util.read_file_or_url('file://' + ntp_conf).contents + self.assertEqual( + "servers []\npools ['10.0.0.1', '10.0.0.2']\n", + content.decode()) - with mock.patch.object(templater, 'render_to_file') as mocktmpl: - with mock.patch.object(os.path, 'isfile', return_value=True): - with mock.patch.object(util, 'rename'): - cc_ntp.write_ntp_config_template(cfg.get('ntp'), mycloud) - - mocktmpl.assert_called_once_with( - ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro), - '/etc/ntp.conf', - {'servers': servers, 'pools': pools}) + def test_write_ntp_config_template_defaults_pools_when_empty_lists(self): + """write_ntp_config_template defaults pools servers upon empty config. - def test_ntp_conf_custom_pools_and_server(self): + When both pools and servers are empty, default NR_POOL_SERVERS get + configured. + """ distro = 'ubuntu' - pools = ['0.mycompany.pool.ntp.org'] - servers = ['192.168.23.3'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers, - } - } mycloud = self._get_cloud(distro) - - with mock.patch.object(templater, 'render_to_file') as mocktmpl: - with mock.patch.object(os.path, 'isfile', return_value=True): - with mock.patch.object(util, 'rename'): - cc_ntp.write_ntp_config_template(cfg.get('ntp'), mycloud) - - mocktmpl.assert_called_once_with( - ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro), - '/etc/ntp.conf', - {'servers': servers, 'pools': pools}) - - def test_ntp_conf_contents_match(self): - """Test rendered contents of /etc/ntp.conf for ubuntu""" - pools = ['0.mycompany.pool.ntp.org'] - servers = ['192.168.23.3'] + ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist + # Create ntp.conf.tmpl + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + cc_ntp.write_ntp_config_template({}, mycloud) + content = util.read_file_or_url('file://' + ntp_conf).contents + default_pools = [ + "{0}.{1}.pool.ntp.org".format(x, distro) + for x in range(0, cc_ntp.NR_POOL_SERVERS)] + self.assertEqual( + "servers []\npools {0}\n".format(default_pools), + content.decode()) + self.assertIn( + "Adding distro default ntp pool servers: {0}".format( + ",".join(default_pools)), + self.logs.getvalue()) + + def test_ntp_handler_mocked_template(self): + """Test ntp handler renders ubuntu ntp.conf template.""" + pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] + servers = ['192.168.23.3', '192.168.23.4'] cfg = { 'ntp': { 'pools': pools, - 'servers': servers, + 'servers': servers } } mycloud = self._get_cloud('ubuntu') - side_effect = [NTP_TEMPLATE.lstrip()] - - # work backwards from util.write_file and mock out call path - # write_ntp_config_template() - # cloud.get_template_filename() - # os.path.isfile() - # templater.render_to_file() - # templater.render_from_file() - # util.load_file() - # util.write_file() - # - with mock.patch.object(util, 'write_file') as mockwrite: - with mock.patch.object(util, 'load_file', side_effect=side_effect): - with mock.patch.object(os.path, 'isfile', return_value=True): - with mock.patch.object(util, 'rename'): - cc_ntp.write_ntp_config_template(cfg.get('ntp'), - mycloud) - - mockwrite.assert_called_once_with( - '/etc/ntp.conf', - NTP_EXPECTED_UBUNTU, - mode=420) - - def test_ntp_handler(self): - """Test ntp handler renders ubuntu ntp.conf template""" - pools = ['0.mycompany.pool.ntp.org'] - servers = ['192.168.23.3'] + ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist + # Create ntp.conf.tmpl + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + with mock.patch.object(util, 'which', return_value=None): + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + + content = util.read_file_or_url('file://' + ntp_conf).contents + self.assertEqual( + 'servers {0}\npools {1}\n'.format(servers, pools), + content.decode()) + + def test_ntp_handler_real_distro_templates(self): + """Test ntp handler renders the shipped distro ntp.conf templates.""" + pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] + servers = ['192.168.23.3', '192.168.23.4'] cfg = { 'ntp': { 'pools': pools, - 'servers': servers, + 'servers': servers } } - mycloud = self._get_cloud('ubuntu') - side_effect = [NTP_TEMPLATE.lstrip()] - - with mock.patch.object(util, 'which', return_value=None): - with mock.patch.object(os.path, 'exists'): - with mock.patch.object(util, 'write_file') as mockwrite: - with mock.patch.object(util, 'load_file', - side_effect=side_effect): - with mock.patch.object(os.path, 'isfile', - return_value=True): - with mock.patch.object(util, 'rename'): - cc_ntp.handle("notimportant", cfg, - mycloud, LOG, None) - - mockwrite.assert_called_once_with( - '/etc/ntp.conf', - NTP_EXPECTED_UBUNTU, - mode=420) - - @mock.patch("cloudinit.config.cc_ntp.util") - def test_no_ntpcfg_does_nothing(self, mock_util): - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - cc_ntp.handle('cc_ntp', {}, cc, LOG, []) - self.assertFalse(cc.distro.install_packages.called) - self.assertFalse(mock_util.subp.called) + ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist + for distro in ('debian', 'ubuntu', 'fedora', 'rhel', 'sles'): + mycloud = self._get_cloud(distro) + root_dir = dirname(dirname(os.path.realpath(util.__file__))) + tmpl_file = os.path.join( + '{0}/templates/ntp.conf.{1}.tmpl'.format(root_dir, distro)) + # Create a copy in our tmp_dir + shutil.copy( + tmpl_file, + os.path.join(self.new_root, 'ntp.conf.%s.tmpl' % distro)) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + with mock.patch.object(util, 'which', return_value=[True]): + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + + content = util.read_file_or_url('file://' + ntp_conf).contents + expected_servers = '\n'.join([ + 'server {0} iburst'.format(server) for server in servers]) + self.assertIn( + expected_servers, content.decode(), + 'failed to render ntp.conf for distro:{0}'.format(distro)) + expected_pools = '\n'.join([ + 'pool {0} iburst'.format(pool) for pool in pools]) + self.assertIn( + expected_pools, content.decode(), + 'failed to render ntp.conf for distro:{0}'.format(distro)) + + def test_no_ntpcfg_does_nothing(self): + """When no ntp section is defined handler logs a warning and noops.""" + cc_ntp.handle('cc_ntp', {}, None, None, []) + self.assertEqual( + 'Skipping module named cc_ntp, not present or disabled by cfg\n', + self.logs.getvalue()) # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_power_state.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_power_state.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_power_state.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_power_state.py 2017-05-26 18:36:38.000000000 +0000 @@ -15,12 +15,12 @@ def test_no_config(self): # completely empty config should mean do nothing (cmd, _timeout, _condition) = psc.load_power_state({}) - self.assertEqual(cmd, None) + self.assertIsNone(cmd) def test_irrelevant_config(self): # no power_state field in config should return None for cmd (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'}) - self.assertEqual(cmd, None) + self.assertIsNone(cmd) def test_invalid_mode(self): cfg = {'power_state': {'mode': 'gibberish'}} diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_resizefs.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_resizefs.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_resizefs.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_resizefs.py 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,59 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.config import cc_resizefs + +import textwrap +import unittest + +try: + from unittest import mock +except ImportError: + import mock + + +class TestResizefs(unittest.TestCase): + def setUp(self): + super(TestResizefs, self).setUp() + self.name = "resizefs" + + @mock.patch('cloudinit.config.cc_resizefs._get_dumpfs_output') + @mock.patch('cloudinit.config.cc_resizefs._get_gpart_output') + def test_skip_ufs_resize(self, gpart_out, dumpfs_out): + fs_type = "ufs" + resize_what = "/" + devpth = "/dev/da0p2" + dumpfs_out.return_value = ( + "# newfs command for / (/dev/label/rootfs)\n" + "newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 " + "-f 4096 -g 16384 -h 64 -i 8192 -j -k 6408 -m 8 " + "-o time -s 58719232 /dev/label/rootfs\n") + gpart_out.return_value = textwrap.dedent("""\ + => 40 62914480 da0 GPT (30G) + 40 1024 1 freebsd-boot (512K) + 1064 58719232 2 freebsd-ufs (28G) + 58720296 3145728 3 freebsd-swap (1.5G) + 61866024 1048496 - free - (512M) + """) + res = cc_resizefs.can_skip_resize(fs_type, resize_what, devpth) + self.assertTrue(res) + + @mock.patch('cloudinit.config.cc_resizefs._get_dumpfs_output') + @mock.patch('cloudinit.config.cc_resizefs._get_gpart_output') + def test_skip_ufs_resize_roundup(self, gpart_out, dumpfs_out): + fs_type = "ufs" + resize_what = "/" + devpth = "/dev/da0p2" + dumpfs_out.return_value = ( + "# newfs command for / (/dev/label/rootfs)\n" + "newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 " + "-f 4096 -g 16384 -h 64 -i 8192 -j -k 368 -m 8 " + "-o time -s 297080 /dev/label/rootfs\n") + gpart_out.return_value = textwrap.dedent("""\ + => 34 297086 da0 GPT (145M) + 34 297086 1 freebsd-ufs (145M) + """) + res = cc_resizefs.can_skip_resize(fs_type, resize_what, devpth) + self.assertTrue(res) + + +# vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_snappy.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_snappy.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_snappy.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_snappy.py 2017-05-26 18:36:38.000000000 +0000 @@ -419,7 +419,7 @@ def test_snap_config_add_snap_user_no_config(self): usercfg = add_snap_user(cfg=None) - self.assertEqual(usercfg, None) + self.assertIsNone(usercfg) def test_snap_config_add_snap_user_not_dict(self): cfg = ['foobar'] @@ -428,7 +428,7 @@ def test_snap_config_add_snap_user_no_email(self): cfg = {'assertions': [], 'known': True} usercfg = add_snap_user(cfg=cfg) - self.assertEqual(usercfg, None) + self.assertIsNone(usercfg) @mock.patch('cloudinit.config.cc_snap_config.util') def test_snap_config_add_snap_user_email_only(self, mock_util): diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_yum_add_repo.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_yum_add_repo.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_handler/test_handler_yum_add_repo.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_handler/test_handler_yum_add_repo.py 2017-05-26 18:36:38.000000000 +0000 @@ -5,10 +5,13 @@ from .. import helpers -import configobj +try: + from configparser import ConfigParser +except ImportError: + from ConfigParser import ConfigParser import logging import shutil -from six import BytesIO +from six import StringIO import tempfile LOG = logging.getLogger(__name__) @@ -54,9 +57,9 @@ } self.patchUtils(self.tmp) cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) - contents = util.load_file("/etc/yum.repos.d/epel_testing.repo", - decode=False) - contents = configobj.ConfigObj(BytesIO(contents)) + contents = util.load_file("/etc/yum.repos.d/epel_testing.repo") + parser = ConfigParser() + parser.readfp(StringIO(contents)) expected = { 'epel_testing': { 'name': 'Extra Packages for Enterprise Linux 5 - Testing', @@ -67,6 +70,47 @@ 'gpgcheck': '1', } } - self.assertEqual(expected, dict(contents)) + for section in expected: + self.assertTrue(parser.has_section(section), + "Contains section {0}".format(section)) + for k, v in expected[section].items(): + self.assertEqual(parser.get(section, k), v) + + def test_write_config_array(self): + cfg = { + 'yum_repos': { + 'puppetlabs-products': { + 'name': 'Puppet Labs Products El 6 - $basearch', + 'baseurl': + 'http://yum.puppetlabs.com/el/6/products/$basearch', + 'gpgkey': [ + 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs', + 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet', + ], + 'enabled': True, + 'gpgcheck': True, + } + } + } + self.patchUtils(self.tmp) + cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) + contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo") + parser = ConfigParser() + parser.readfp(StringIO(contents)) + expected = { + 'puppetlabs_products': { + 'name': 'Puppet Labs Products El 6 - $basearch', + 'baseurl': 'http://yum.puppetlabs.com/el/6/products/$basearch', + 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs\n' + 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet', + 'enabled': '1', + 'gpgcheck': '1', + } + } + for section in expected: + self.assertTrue(parser.has_section(section), + "Contains section {0}".format(section)) + for k, v in expected[section].items(): + self.assertEqual(parser.get(section, k), v) # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_helpers.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_helpers.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_helpers.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_helpers.py 2017-05-26 18:36:38.000000000 +0000 @@ -32,6 +32,6 @@ myds._instance_id = None mypaths = self.getCloudPaths(myds) - self.assertEqual(None, mypaths.get_ipath()) + self.assertIsNone(mypaths.get_ipath()) # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_net.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_net.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_net.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_net.py 2017-05-26 18:36:38.000000000 +0000 @@ -3,7 +3,9 @@ from cloudinit import net from cloudinit.net import cmdline from cloudinit.net import eni +from cloudinit.net import netplan from cloudinit.net import network_state +from cloudinit.net import renderers from cloudinit.net import sysconfig from cloudinit.sources.helpers import openstack from cloudinit import util @@ -98,7 +100,8 @@ 'gateway': '10.0.0.1', 'dns_search': ['foo.com'], 'type': 'static', 'netmask': '255.255.255.0', - 'dns_nameservers': ['10.0.1.1']}], + 'dns_nameservers': ['10.0.1.1'], + 'address': '10.0.0.2'}], } # Examples (and expected outputs for various renderers). @@ -134,7 +137,7 @@ """ # Created by cloud-init on instance boot automatically, do not edit. # -BOOTPROTO=static +BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 GATEWAY=172.19.3.254 @@ -202,43 +205,102 @@ # Created by cloud-init on instance boot automatically, do not edit. # BOOTPROTO=none +DEFROUTE=yes DEVICE=eth0 +GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +IPADDR1=10.0.0.10 +NETMASK=255.255.252.0 +NETMASK1=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no """.lstrip()), - ('etc/sysconfig/network-scripts/ifcfg-eth0:0', + ('etc/resolv.conf', + """ +; Created by cloud-init on instance boot automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip()), + ('etc/udev/rules.d/70-persistent-net.rules', + "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] + }, + { + 'in_data': { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "public-ipv4", + "type": "ipv4", "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [{ + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + }], + "ip_address": "172.19.1.34", "id": "network0" + }, { + "network_id": "public-ipv6-a", + "type": "ipv6", "netmask": "", + "link": "tap1a81968a-79", + "routes": [ + { + "gateway": "2001:DB8::1", + "netmask": "::", + "network": "::" + } + ], + "ip_address": "2001:DB8::10", "id": "network1" + }, { + "network_id": "public-ipv6-b", + "type": "ipv6", "netmask": "64", + "link": "tap1a81968a-79", + "routes": [ + ], + "ip_address": "2001:DB9::10", "id": "network2" + }, { + "network_id": "public-ipv6-c", + "type": "ipv6", "netmask": "64", + "link": "tap1a81968a-79", + "routes": [ + ], + "ip_address": "2001:DB10::10", "id": "network3" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + }, + 'in_macs': { + 'fa:16:3e:ed:9a:59': 'eth0', + }, + 'out_sysconfig': [ + ('etc/sysconfig/network-scripts/ifcfg-eth0', """ # Created by cloud-init on instance boot automatically, do not edit. # -BOOTPROTO=static +BOOTPROTO=none DEFROUTE=yes -DEVICE=eth0:0 +DEVICE=eth0 GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 +IPV6ADDR=2001:DB8::10 +IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" +IPV6INIT=yes +IPV6_DEFAULTGW=2001:DB8::1 NETMASK=255.255.252.0 NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no """.lstrip()), - ('etc/sysconfig/network-scripts/ifcfg-eth0:1', - """ -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=static -DEVICE=eth0:1 -HWADDR=fa:16:3e:ed:9a:59 -IPADDR=10.0.0.10 -NETMASK=255.255.255.0 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -""".lstrip()), ('etc/resolv.conf', """ ; Created by cloud-init on instance boot automatically, do not edit. @@ -313,6 +375,41 @@ post-up route add default gw 65.61.151.37 || true pre-down route del default gw 65.61.151.37 || true """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + eth1: + match: + macaddress: cf:d6:af:48:e8:80 + nameservers: + addresses: + - 1.2.3.4 + - 5.6.7.8 + search: + - wark.maas + set-name: eth1 + eth99: + addresses: + - 192.168.21.3/24 + dhcp4: true + match: + macaddress: c0:d6:9f:2c:e8:80 + nameservers: + addresses: + - 8.8.8.8 + - 8.8.4.4 + - 1.2.3.4 + - 5.6.7.8 + search: + - barley.maas + - sach.maas + - wark.maas + routes: + - to: 0.0.0.0/0 + via: 65.61.151.37 + set-name: eth99 + """).rstrip(' '), 'yaml': textwrap.dedent(""" version: 1 config: @@ -355,6 +452,14 @@ # control-alias iface0 iface iface0 inet6 dhcp """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp4: true + dhcp6: true + """).rstrip(' '), 'yaml': textwrap.dedent("""\ version: 1 config: @@ -378,11 +483,15 @@ iface eth1 inet manual bond-master bond0 bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 auto eth2 iface eth2 inet manual bond-master bond0 bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 iface eth3 inet manual @@ -395,6 +504,8 @@ iface bond0 inet6 dhcp bond-mode active-backup bond-slaves none + bond-xmit-hash-policy layer3+4 + bond_miimon 100 hwaddress aa:bb:cc:dd:ee:ff auto br0 @@ -418,6 +529,7 @@ dns-nameservers 192.168.0.10 10.23.23.134 dns-search barley.maas sacchromyces.maas brettanomyces.maas gateway 192.168.0.1 + hwaddress aa:bb:cc:dd:ee:11 mtu 1500 vlan-raw-device eth0 vlan_id 101 @@ -429,6 +541,129 @@ post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true """), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + eth0: + match: + macaddress: c0:d6:9f:2c:e8:80 + nameservers: + addresses: + - 8.8.8.8 + - 4.4.4.4 + - 8.8.4.4 + search: + - barley.maas + - wark.maas + - foobar.maas + set-name: eth0 + eth1: + match: + macaddress: aa:d6:9f:2c:e8:80 + nameservers: + addresses: + - 8.8.8.8 + - 4.4.4.4 + - 8.8.4.4 + search: + - barley.maas + - wark.maas + - foobar.maas + set-name: eth1 + eth2: + match: + macaddress: c0:bb:9f:2c:e8:80 + nameservers: + addresses: + - 8.8.8.8 + - 4.4.4.4 + - 8.8.4.4 + search: + - barley.maas + - wark.maas + - foobar.maas + set-name: eth2 + eth3: + match: + macaddress: 66:bb:9f:2c:e8:80 + nameservers: + addresses: + - 8.8.8.8 + - 4.4.4.4 + - 8.8.4.4 + search: + - barley.maas + - wark.maas + - foobar.maas + set-name: eth3 + eth4: + match: + macaddress: 98:bb:9f:2c:e8:80 + nameservers: + addresses: + - 8.8.8.8 + - 4.4.4.4 + - 8.8.4.4 + search: + - barley.maas + - wark.maas + - foobar.maas + set-name: eth4 + eth5: + dhcp4: true + match: + macaddress: 98:bb:9f:2c:e8:8a + nameservers: + addresses: + - 8.8.8.8 + - 4.4.4.4 + - 8.8.4.4 + search: + - barley.maas + - wark.maas + - foobar.maas + set-name: eth5 + bonds: + bond0: + dhcp6: true + interfaces: + - eth1 + - eth2 + parameters: + mii-monitor-interval: 100 + mode: active-backup + transmit-hash-policy: layer3+4 + bridges: + br0: + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + interfaces: + - eth3 + - eth4 + vlans: + bond0.200: + dhcp4: true + id: 200 + link: bond0 + eth0.101: + addresses: + - 192.168.0.2/24 + - 192.168.2.10/24 + gateway4: 192.168.0.1 + id: 101 + link: eth0 + macaddress: aa:bb:cc:dd:ee:11 + nameservers: + addresses: + - 192.168.0.10 + - 10.23.23.134 + search: + - barley.maas + - sacchromyces.maas + - brettanomyces.maas + """).rstrip(' '), 'yaml': textwrap.dedent(""" version: 1 config: @@ -463,6 +698,7 @@ name: eth0.101 vlan_link: eth0 vlan_id: 101 + mac_address: aa:bb:cc:dd:ee:11 mtu: 1500 subnets: - type: static @@ -488,6 +724,8 @@ - eth2 params: bond-mode: active-backup + bond_miimon: 100 + bond-xmit-hash-policy: "layer3+4" subnets: - type: dhcp6 # A Bond VLAN. @@ -543,6 +781,14 @@ } } +CONFIG_V1_EXPLICIT_LOOPBACK = { + 'version': 1, + 'config': [{'name': 'eth0', 'type': 'physical', + 'subnets': [{'control': 'auto', 'type': 'dhcp'}]}, + {'name': 'lo', 'type': 'loopback', + 'subnets': [{'control': 'auto', 'type': 'loopback'}]}, + ]} + def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path): @@ -595,7 +841,7 @@ os.makedirs(render_dir) renderer = sysconfig.Renderer() - renderer.render_network_state(render_dir, ns) + renderer.render_network_state(ns, render_dir) render_file = 'etc/sysconfig/network-scripts/ifcfg-eth1000' with open(os.path.join(render_dir, render_file)) as fh: @@ -613,6 +859,82 @@ """.lstrip() self.assertEqual(expected_content, content) + def test_multiple_ipv4_default_gateways(self): + """ValueError is raised when duplicate ipv4 gateways exist.""" + net_json = { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", + "type": "ipv4", "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [{ + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + }, { + "netmask": "0.0.0.0", # A second default gateway + "network": "0.0.0.0", + "gateway": "172.20.3.254", + }], + "ip_address": "172.19.1.34", "id": "network0" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + } + macs = {'fa:16:3e:ed:9a:59': 'eth0'} + render_dir = self.tmp_dir() + network_cfg = openstack.convert_net_json(net_json, known_macs=macs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = sysconfig.Renderer() + with self.assertRaises(ValueError): + renderer.render_network_state(ns, render_dir) + self.assertEqual([], os.listdir(render_dir)) + + def test_multiple_ipv6_default_gateways(self): + """ValueError is raised when duplicate ipv6 gateways exist.""" + net_json = { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "public-ipv6", + "type": "ipv6", "netmask": "", + "link": "tap1a81968a-79", + "routes": [{ + "gateway": "2001:DB8::1", + "netmask": "::", + "network": "::" + }, { + "gateway": "2001:DB9::1", + "netmask": "::", + "network": "::" + }], + "ip_address": "2001:DB8::10", "id": "network1" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + } + macs = {'fa:16:3e:ed:9a:59': 'eth0'} + render_dir = self.tmp_dir() + network_cfg = openstack.convert_net_json(net_json, known_macs=macs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = sysconfig.Renderer() + with self.assertRaises(ValueError): + renderer.render_network_state(ns, render_dir) + self.assertEqual([], os.listdir(render_dir)) + def test_openstack_rendering_samples(self): for os_sample in OS_SAMPLES: render_dir = self.tmp_dir() @@ -623,11 +945,32 @@ ns = network_state.parse_net_config_data(network_cfg, skip_broken=False) renderer = sysconfig.Renderer() - renderer.render_network_state(render_dir, ns) + renderer.render_network_state(ns, render_dir) for fn, expected_content in os_sample.get('out_sysconfig', []): with open(os.path.join(render_dir, fn)) as fh: self.assertEqual(expected_content, fh.read()) + def test_config_with_explicit_loopback(self): + ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + renderer = sysconfig.Renderer() + renderer.render_network_state(ns, render_dir) + found = dir2dict(render_dir) + nspath = '/etc/sysconfig/network-scripts/' + self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + expected = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) + class TestEniNetRendering(CiTestCase): @@ -652,7 +995,7 @@ {'links_path_prefix': None, 'eni_path': 'interfaces', 'netrules_path': None, }) - renderer.render_network_state(render_dir, ns) + renderer.render_network_state(ns, render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, 'interfaces'))) @@ -668,6 +1011,179 @@ """ self.assertEqual(expected.lstrip(), contents.lstrip()) + def test_config_with_explicit_loopback(self): + tmp_dir = self.tmp_dir() + ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) + renderer = eni.Renderer() + renderer.render_network_state(ns, tmp_dir) + expected = """\ +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet dhcp +""" + self.assertEqual( + expected, dir2dict(tmp_dir)['/etc/network/interfaces']) + + +class TestNetplanNetRendering(CiTestCase): + + @mock.patch("cloudinit.net.netplan._clean_default") + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_default_generation(self, mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + mock_clean_default): + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path) + + network_cfg = net.generate_fallback_config() + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + + render_dir = os.path.join(tmp_dir, "render") + os.makedirs(render_dir) + + render_target = 'netplan.yaml' + renderer = netplan.Renderer( + {'netplan_path': render_target, 'postcmds': False}) + renderer.render_network_state(ns, render_dir) + + self.assertTrue(os.path.exists(os.path.join(render_dir, + render_target))) + with open(os.path.join(render_dir, render_target)) as fh: + contents = fh.read() + print(contents) + + expected = """ +network: + version: 2 + ethernets: + eth1000: + dhcp4: true + match: + macaddress: 07-1c-c6-75-a4-be + set-name: eth1000 +""" + self.assertEqual(expected.lstrip(), contents.lstrip()) + self.assertEqual(1, mock_clean_default.call_count) + + +class TestNetplanCleanDefault(CiTestCase): + snapd_known_path = 'etc/netplan/00-snapd-config.yaml' + snapd_known_content = textwrap.dedent("""\ + # This is the initial network config. + # It can be overwritten by cloud-init or console-conf. + network: + version: 2 + ethernets: + all-en: + match: + name: "en*" + dhcp4: true + all-eth: + match: + name: "eth*" + dhcp4: true + """) + stub_known = { + 'run/systemd/network/10-netplan-all-en.network': 'foo-en', + 'run/systemd/network/10-netplan-all-eth.network': 'foo-eth', + 'run/systemd/generator/netplan.stamp': 'stamp', + } + + def test_clean_known_config_cleaned(self): + content = {self.snapd_known_path: self.snapd_known_content, } + content.update(self.stub_known) + tmpd = self.tmp_dir() + files = sorted(populate_dir(tmpd, content)) + netplan._clean_default(target=tmpd) + found = [t for t in files if os.path.exists(t)] + self.assertEqual([], found) + + def test_clean_unknown_config_not_cleaned(self): + content = {self.snapd_known_path: self.snapd_known_content, } + content.update(self.stub_known) + content[self.snapd_known_path] += "# user put a comment\n" + tmpd = self.tmp_dir() + files = sorted(populate_dir(tmpd, content)) + netplan._clean_default(target=tmpd) + found = [t for t in files if os.path.exists(t)] + self.assertEqual(files, found) + + def test_clean_known_config_cleans_only_expected(self): + astamp = "run/systemd/generator/another.stamp" + anet = "run/systemd/network/10-netplan-all-lo.network" + ayaml = "etc/netplan/01-foo-config.yaml" + content = { + self.snapd_known_path: self.snapd_known_content, + astamp: "stamp", + anet: "network", + ayaml: "yaml", + } + content.update(self.stub_known) + + tmpd = self.tmp_dir() + files = sorted(populate_dir(tmpd, content)) + netplan._clean_default(target=tmpd) + found = [t for t in files if os.path.exists(t)] + expected = [util.target_path(tmpd, f) for f in (astamp, anet, ayaml)] + self.assertEqual(sorted(expected), found) + + +class TestNetplanPostcommands(CiTestCase): + mycfg = { + 'config': [{"type": "physical", "name": "eth0", + "mac_address": "c0:d6:9f:2c:e8:80", + "subnets": [{"type": "dhcp"}]}], + 'version': 1} + + @mock.patch.object(netplan.Renderer, '_netplan_generate') + @mock.patch.object(netplan.Renderer, '_net_setup_link') + def test_netplan_render_calls_postcmds(self, mock_netplan_generate, + mock_net_setup_link): + tmp_dir = self.tmp_dir() + ns = network_state.parse_net_config_data(self.mycfg, + skip_broken=False) + + render_dir = os.path.join(tmp_dir, "render") + os.makedirs(render_dir) + + render_target = 'netplan.yaml' + renderer = netplan.Renderer( + {'netplan_path': render_target, 'postcmds': True}) + renderer.render_network_state(ns, render_dir) + + mock_netplan_generate.assert_called_with(run=True) + mock_net_setup_link.assert_called_with(run=True) + + @mock.patch.object(netplan, "get_devicelist") + @mock.patch('cloudinit.util.subp') + def test_netplan_postcmds(self, mock_subp, mock_devlist): + mock_devlist.side_effect = [['lo']] + tmp_dir = self.tmp_dir() + ns = network_state.parse_net_config_data(self.mycfg, + skip_broken=False) + + render_dir = os.path.join(tmp_dir, "render") + os.makedirs(render_dir) + + render_target = 'netplan.yaml' + renderer = netplan.Renderer( + {'netplan_path': render_target, 'postcmds': True}) + expected = [ + mock.call(['netplan', 'generate'], capture=True), + mock.call(['udevadm', 'test-builtin', 'net_setup_link', + '/sys/class/net/lo'], capture=True), + ] + with mock.patch.object(os.path, 'islink', return_value=True): + renderer.render_network_state(ns, render_dir) + mock_subp.assert_has_calls(expected) + class TestEniNetworkStateToEni(CiTestCase): mycfg = { @@ -795,7 +1311,7 @@ files = sorted(populate_dir(self.tmp_dir(), content)) found = cmdline.read_kernel_cmdline_config( files=files, cmdline='foo root=/dev/sda', mac_addrs=self.macs) - self.assertEqual(found, None) + self.assertIsNone(found) def test_ip_cmdline_both_ip_ip6(self): content = {'net-eth0.conf': DHCP_CONTENT_1, @@ -814,6 +1330,50 @@ self.assertEqual(found['config'], expected) +class TestNetplanRoundTrip(CiTestCase): + def _render_and_read(self, network_config=None, state=None, + netplan_path=None, target=None): + if target is None: + target = self.tmp_dir() + + if network_config: + ns = network_state.parse_net_config_data(network_config) + elif state: + ns = state + else: + raise ValueError("Expected data or state, got neither") + + if netplan_path is None: + netplan_path = 'etc/netplan/50-cloud-init.yaml' + + renderer = netplan.Renderer( + config={'netplan_path': netplan_path}) + + renderer.render_network_state(ns, target) + return dir2dict(target) + + def testsimple_render_small_netplan(self): + entry = NETWORK_CONFIGS['small'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_v4_and_v6(self): + entry = NETWORK_CONFIGS['v4_and_v6'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_all(self): + entry = NETWORK_CONFIGS['all'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + class TestEniRoundTrip(CiTestCase): def _render_and_read(self, network_config=None, state=None, eni_path=None, links_prefix=None, netrules_path=None, dir=None): @@ -834,7 +1394,7 @@ config={'eni_path': eni_path, 'links_path_prefix': links_prefix, 'netrules_path': netrules_path}) - renderer.render_network_state(dir, ns) + renderer.render_network_state(ns, dir) return dir2dict(dir) def testsimple_convert_and_render(self): @@ -912,6 +1472,161 @@ expected, [line for line in found if line]) +class TestNetRenderers(CiTestCase): + @mock.patch("cloudinit.net.renderers.sysconfig.available") + @mock.patch("cloudinit.net.renderers.eni.available") + def test_eni_and_sysconfig_available(self, m_eni_avail, m_sysc_avail): + m_eni_avail.return_value = True + m_sysc_avail.return_value = True + found = renderers.search(priority=['sysconfig', 'eni'], first=False) + names = [f[0] for f in found] + self.assertEqual(['sysconfig', 'eni'], names) + + @mock.patch("cloudinit.net.renderers.eni.available") + def test_search_returns_empty_on_none(self, m_eni_avail): + m_eni_avail.return_value = False + found = renderers.search(priority=['eni'], first=False) + self.assertEqual([], found) + + @mock.patch("cloudinit.net.renderers.sysconfig.available") + @mock.patch("cloudinit.net.renderers.eni.available") + def test_first_in_priority(self, m_eni_avail, m_sysc_avail): + # available should only be called until one is found. + m_eni_avail.return_value = True + m_sysc_avail.side_effect = Exception("Should not call me") + found = renderers.search(priority=['eni', 'sysconfig'], first=True) + self.assertEqual(['eni'], [found[0]]) + + @mock.patch("cloudinit.net.renderers.sysconfig.available") + @mock.patch("cloudinit.net.renderers.eni.available") + def test_select_positive(self, m_eni_avail, m_sysc_avail): + m_eni_avail.return_value = True + m_sysc_avail.return_value = False + found = renderers.select(priority=['sysconfig', 'eni']) + self.assertEqual('eni', found[0]) + + @mock.patch("cloudinit.net.renderers.sysconfig.available") + @mock.patch("cloudinit.net.renderers.eni.available") + def test_select_none_found_raises(self, m_eni_avail, m_sysc_avail): + # if select finds nothing, should raise exception. + m_eni_avail.return_value = False + m_sysc_avail.return_value = False + + self.assertRaises(net.RendererNotFoundError, renderers.select, + priority=['sysconfig', 'eni']) + + +class TestGetInterfacesByMac(CiTestCase): + _data = {'bonds': ['bond1'], + 'bridges': ['bridge1'], + 'vlans': ['bond1.101'], + 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', + 'bond1.101', 'lo'], + 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01', + 'enp0s2': 'aa:aa:aa:aa:aa:02', + 'bond1': 'aa:aa:aa:aa:aa:01', + 'bond1.101': 'aa:aa:aa:aa:aa:01', + 'bridge1': 'aa:aa:aa:aa:aa:03', + 'bridge1-nic': 'aa:aa:aa:aa:aa:03', + 'lo': '00:00:00:00:00:00', + 'greptap0': '00:00:00:00:00:00', + 'tun0': None}} + data = {} + + def _se_get_devicelist(self): + return list(self.data['devices']) + + def _se_get_interface_mac(self, name): + return self.data['macs'][name] + + def _se_is_bridge(self, name): + return name in self.data['bridges'] + + def _se_is_vlan(self, name): + return name in self.data['vlans'] + + def _se_interface_has_own_mac(self, name): + return name in self.data['own_macs'] + + def _mock_setup(self): + self.data = copy.deepcopy(self._data) + self.data['devices'] = set(list(self.data['macs'].keys())) + mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', + 'interface_has_own_mac', 'is_vlan') + self.mocks = {} + for n in mocks: + m = mock.patch('cloudinit.net.' + n, + side_effect=getattr(self, '_se_' + n)) + self.addCleanup(m.stop) + self.mocks[n] = m.start() + + def test_raise_exception_on_duplicate_macs(self): + self._mock_setup() + self.data['macs']['bridge1-nic'] = self.data['macs']['enp0s1'] + self.assertRaises(RuntimeError, net.get_interfaces_by_mac) + + def test_excludes_any_without_mac_address(self): + self._mock_setup() + ret = net.get_interfaces_by_mac() + self.assertIn('tun0', self._se_get_devicelist()) + self.assertNotIn('tun0', ret.values()) + + def test_excludes_stolen_macs(self): + self._mock_setup() + ret = net.get_interfaces_by_mac() + self.mocks['interface_has_own_mac'].assert_has_calls( + [mock.call('enp0s1'), mock.call('bond1')], any_order=True) + self.assertEqual( + {'aa:aa:aa:aa:aa:01': 'enp0s1', 'aa:aa:aa:aa:aa:02': 'enp0s2', + 'aa:aa:aa:aa:aa:03': 'bridge1-nic', '00:00:00:00:00:00': 'lo'}, + ret) + + def test_excludes_bridges(self): + self._mock_setup() + # add a device 'b1', make all return they have their "own mac", + # set everything other than 'b1' to be a bridge. + # then expect b1 is the only thing left. + self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1' + self.data['devices'].add('b1') + self.data['bonds'] = [] + self.data['own_macs'] = self.data['devices'] + self.data['bridges'] = [f for f in self.data['devices'] if f != "b1"] + ret = net.get_interfaces_by_mac() + self.assertEqual({'aa:aa:aa:aa:aa:b1': 'b1'}, ret) + self.mocks['is_bridge'].assert_has_calls( + [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'), + mock.call('b1')], + any_order=True) + + def test_excludes_vlans(self): + self._mock_setup() + # add a device 'b1', make all return they have their "own mac", + # set everything other than 'b1' to be a vlan. + # then expect b1 is the only thing left. + self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1' + self.data['devices'].add('b1') + self.data['bonds'] = [] + self.data['bridges'] = [] + self.data['own_macs'] = self.data['devices'] + self.data['vlans'] = [f for f in self.data['devices'] if f != "b1"] + ret = net.get_interfaces_by_mac() + self.assertEqual({'aa:aa:aa:aa:aa:b1': 'b1'}, ret) + self.mocks['is_vlan'].assert_has_calls( + [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'), + mock.call('b1')], + any_order=True) + + def test_duplicates_of_empty_mac_are_ok(self): + """Duplicate macs of 00:00:00:00:00:00 should be skipped.""" + self._mock_setup() + empty_mac = "00:00:00:00:00:00" + addnics = ('greptap1', 'lo', 'greptap2') + self.data['macs'].update(dict((k, empty_mac) for k in addnics)) + self.data['devices'].update(set(addnics)) + ret = net.get_interfaces_by_mac() + self.assertEqual('lo', ret[empty_mac]) + + def _gzip_data(data): with io.BytesIO() as iobuf: gzfp = gzip.GzipFile(mode="wb", fileobj=iobuf) diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_util.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_util.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_util.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_util.py 2017-05-26 18:36:38.000000000 +0000 @@ -44,7 +44,7 @@ """None is returned if key is not found and no default given.""" config = {} result = util.get_cfg_option_list(config, "key") - self.assertEqual(None, result) + self.assertIsNone(result) def test_not_found_with_default(self): """Default is returned if key is not found.""" @@ -103,8 +103,8 @@ self.assertTrue(os.path.isdir(dirname)) self.assertTrue(os.path.isfile(path)) - def test_custom_mode(self): - """Verify custom mode works properly.""" + def test_explicit_mode(self): + """Verify explicit file mode works properly.""" path = os.path.join(self.tmp, "NewFile.txt") contents = "Hey there" @@ -115,6 +115,35 @@ file_stat = os.stat(path) self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode)) + def test_copy_mode_no_existing(self): + """Verify that file is created with mode 0o644 if copy_mode + is true and there is no prior existing file.""" + path = os.path.join(self.tmp, "NewFile.txt") + contents = "Hey there" + + util.write_file(path, contents, copy_mode=True) + + self.assertTrue(os.path.exists(path)) + self.assertTrue(os.path.isfile(path)) + file_stat = os.stat(path) + self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + + def test_copy_mode_with_existing(self): + """Verify that file is created using mode of existing file + if copy_mode is true.""" + path = os.path.join(self.tmp, "NewFile.txt") + contents = "Hey there" + + open(path, 'w').close() + os.chmod(path, 0o666) + + util.write_file(path, contents, copy_mode=True) + + self.assertTrue(os.path.exists(path)) + self.assertTrue(os.path.isfile(path)) + file_stat = os.stat(path) + self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode)) + def test_custom_omode(self): """Verify custom omode works properly.""" path = os.path.join(self.tmp, "NewFile.txt") @@ -403,13 +432,13 @@ def test_none_returned_if_neither_source_has_data(self): self.patch_mapping({}) self._configure_dmidecode_return('key', 'value') - self.assertEqual(None, util.read_dmi_data('expect-fail')) + self.assertIsNone(util.read_dmi_data('expect-fail')) def test_none_returned_if_dmidecode_not_in_path(self): self.patched_funcs.enter_context( mock.patch.object(util, 'which', lambda _: False)) self.patch_mapping({}) - self.assertEqual(None, util.read_dmi_data('expect-fail')) + self.assertIsNone(util.read_dmi_data('expect-fail')) def test_dots_returned_instead_of_foxfox(self): # uninitialized dmi values show as \xff, return those as . @@ -567,7 +596,8 @@ def test_subp_capture_stderr(self): data = b'hello world' (out, err) = util.subp(self.stdin2err, capture=True, - decode=False, data=data) + decode=False, data=data, + update_env={'LC_ALL': 'C'}) self.assertEqual(err, data) self.assertEqual(out, b'') @@ -596,8 +626,8 @@ def test_returns_none_if_no_capture(self): (out, err) = util.subp(self.stdin2out, data=b'', capture=False) - self.assertEqual(err, None) - self.assertEqual(out, None) + self.assertIsNone(err) + self.assertIsNone(out) def test_bunch_of_slashes_in_path(self): self.assertEqual("/target/my/path/", @@ -682,4 +712,73 @@ )).format(description=self.empty_description, empty_attr=self.empty_attr)) + +class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): + def test_id_in_os_release_quoted(self): + """os-release containing ID="ubuntu-core" is snappy.""" + orcontent = '\n'.join(['ID="ubuntu-core"', '']) + root_d = self.tmp_dir() + helpers.populate_dir(root_d, {'etc/os-release': orcontent}) + self.reRoot(root_d) + self.assertTrue(util.system_is_snappy()) + + def test_id_in_os_release(self): + """os-release containing ID=ubuntu-core is snappy.""" + orcontent = '\n'.join(['ID=ubuntu-core', '']) + root_d = self.tmp_dir() + helpers.populate_dir(root_d, {'etc/os-release': orcontent}) + self.reRoot(root_d) + self.assertTrue(util.system_is_snappy()) + + @mock.patch('cloudinit.util.get_cmdline') + def test_bad_content_in_os_release_no_effect(self, m_cmdline): + """malformed os-release should not raise exception.""" + m_cmdline.return_value = 'root=/dev/sda' + orcontent = '\n'.join(['IDubuntu-core', '']) + root_d = self.tmp_dir() + helpers.populate_dir(root_d, {'etc/os-release': orcontent}) + self.reRoot() + self.assertFalse(util.system_is_snappy()) + + @mock.patch('cloudinit.util.get_cmdline') + def test_snap_core_in_cmdline_is_snappy(self, m_cmdline): + """The string snap_core= in kernel cmdline indicates snappy.""" + cmdline = ( + "BOOT_IMAGE=(loop)/kernel.img root=LABEL=writable " + "snap_core=core_x1.snap snap_kernel=pc-kernel_x1.snap ro " + "net.ifnames=0 init=/lib/systemd/systemd console=tty1 " + "console=ttyS0 panic=-1") + m_cmdline.return_value = cmdline + self.assertTrue(util.system_is_snappy()) + self.assertTrue(m_cmdline.call_count > 0) + + @mock.patch('cloudinit.util.get_cmdline') + def test_nothing_found_is_not_snappy(self, m_cmdline): + """If no positive identification, then not snappy.""" + m_cmdline.return_value = 'root=/dev/sda' + self.reRoot() + self.assertFalse(util.system_is_snappy()) + self.assertTrue(m_cmdline.call_count > 0) + + @mock.patch('cloudinit.util.get_cmdline') + def test_channel_ini_with_snappy_is_snappy(self, m_cmdline): + """A Channel.ini file with 'ubuntu-core' indicates snappy.""" + m_cmdline.return_value = 'root=/dev/sda' + root_d = self.tmp_dir() + content = '\n'.join(["[Foo]", "source = 'ubuntu-core'", ""]) + helpers.populate_dir( + root_d, {'etc/system-image/channel.ini': content}) + self.reRoot(root_d) + self.assertTrue(util.system_is_snappy()) + + @mock.patch('cloudinit.util.get_cmdline') + def test_system_image_config_dir_is_snappy(self, m_cmdline): + """Existence of /etc/system-image/config.d indicates snappy.""" + m_cmdline.return_value = 'root=/dev/sda' + root_d = self.tmp_dir() + helpers.populate_dir( + root_d, {'etc/system-image/config.d/my.file': "_unused"}) + self.reRoot(root_d) + self.assertTrue(util.system_is_snappy()) + # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_version.py cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_version.py --- cloud-init-0.7.9-47-gc81ea53/tests/unittests/test_version.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tests/unittests/test_version.py 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,14 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from .helpers import CiTestCase +from cloudinit import version + + +class TestExportsFeatures(CiTestCase): + def test_has_network_config_v1(self): + self.assertIn('NETWORK_CONFIG_V1', version.FEATURES) + + def test_has_network_config_v2(self): + self.assertIn('NETWORK_CONFIG_V2', version.FEATURES) + +# vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tools/21-cloudinit.conf cloud-init-0.7.9-153-g16a7302f/tools/21-cloudinit.conf --- cloud-init-0.7.9-47-gc81ea53/tools/21-cloudinit.conf 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tools/21-cloudinit.conf 2017-05-26 18:36:38.000000000 +0000 @@ -3,4 +3,4 @@ # comment out the following line to allow CLOUDINIT messages through. # Doing so means you'll also get CLOUDINIT messages in /var/log/syslog -& ~ +& stop diff -Nru cloud-init-0.7.9-47-gc81ea53/tools/build-on-freebsd cloud-init-0.7.9-153-g16a7302f/tools/build-on-freebsd --- cloud-init-0.7.9-47-gc81ea53/tools/build-on-freebsd 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tools/build-on-freebsd 2017-05-26 18:36:38.000000000 +0000 @@ -10,9 +10,7 @@ pkgs=" dmidecode e2fsprogs - gpart py27-Jinja2 - py27-argparse py27-boto py27-cheetah py27-configobj @@ -38,7 +36,7 @@ python setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd # Install the correct config file: -cp config/cloud.cfg-freebsd /usr/local/etc/cloud/cloud.cfg +cp config/cloud.cfg-freebsd /etc/cloud/cloud.cfg # Enable cloud-init in /etc/rc.conf: sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf diff -Nru cloud-init-0.7.9-47-gc81ea53/tools/ds-identify cloud-init-0.7.9-153-g16a7302f/tools/ds-identify --- cloud-init-0.7.9-47-gc81ea53/tools/ds-identify 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tools/ds-identify 2017-05-26 18:36:38.000000000 +0000 @@ -7,20 +7,27 @@ # # policy: a string that indicates how ds-identify should operate. # kernel command line option: ci.di.policy= +# The format is: +# ,found=value,maybe=value,notfound=value # default setting is: # search,found=all,maybe=all,notfound=disable # -# report: write config to /run/cloud-init/cloud.cfg, but -# namespaced under 'di_report'. Thus cloud-init can still see -# the result, but has no affect. -# enable: do nothing -# ds-identify writes no config and just exits success -# the caller (cloud-init-generator) then enables cloud-init to run -# just without any aid from ds-identify. -# disable: disable cloud-init +# Mode: +# disabled: disable cloud-init +# enabled: enable cloud-init. +# ds-identify writes no config and just exits success. +# the caller (cloud-init-generator) then enables cloud-init to +# run just without any aid from ds-identify. +# search: determine which source or sources should be used +# and write the result (datasource_list) to +# /run/cloud-init/cloud.cfg +# report: basically 'dry run' for search. results are still written +# to the file, but are namespaced under the top level key +# 'di_report' Thus cloud-init is not affected, but can still +# see the result. # -# [report,]found=value,maybe=value,notfound=value -# found: (default=first) +# found,maybe,notfound: +# found: (default=all) # first: use the first found do no further checking # all: enable all DS_FOUND # @@ -63,7 +70,9 @@ PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}" PATH_PROC_1_ENVIRON="${PATH_PROC_1_ENVIRON:-${PATH_ROOT}/proc/1/environ}" PATH_PROC_UPTIME=${PATH_PROC_UPTIME:-${PATH_ROOT}/proc/uptime} -PATH_CLOUD_CONFD="${PATH_CLOUD_CONFD:-${PATH_ROOT}/etc/cloud}" +PATH_ETC_CLOUD="${PATH_ETC_CLOUD:-${PATH_ROOT}/etc/cloud}" +PATH_ETC_CI_CFG="${PATH_ETC_CI_CFG:-${PATH_ETC_CLOUD}/cloud.cfg}" +PATH_ETC_CI_CFG_D="${PATH_ETC_CI_CFG_D:-${PATH_ETC_CI_CFG}.d}" PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}" PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg} PATH_RUN_DI_RESULT=${PATH_RUN_DI_RESULT:-${PATH_RUN_CI}/.ds-identify.result} @@ -83,7 +92,7 @@ DI_FS_LABELS="" DI_KERNEL_CMDLINE="" DI_VIRT="" -DI_PID_1_PLATFORM="" +DI_PID_1_PRODUCT_NAME="" DI_UNAME_KERNEL_NAME="" DI_UNAME_KERNEL_RELEASE="" @@ -101,10 +110,9 @@ # this has to match the builtin list in cloud-init, it is what will # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ -CloudSigma CloudStack DigitalOcean Ec2 OpenNebula OpenStack OVF SmartOS" +CloudSigma CloudStack DigitalOcean Ec2 GCE OpenNebula OpenStack OVF SmartOS" DI_DSLIST="" DI_MODE="" -DI_REPORT="" DI_ON_FOUND="" DI_ON_MAYBE="" DI_ON_NOTFOUND="" @@ -208,9 +216,8 @@ [ -e "${PATH_ROOT}/dev/cdrom" ] } -read_virt() { - cached "$DI_VIRT" && return 0 - local out="" r="" virt="${UNAVAILABLE}" +detect_virt() { + local virt="${UNAVAILABLE}" r="" out="" if [ -d /run/systemd ]; then out=$(systemd-detect-virt 2>&1) r=$? @@ -218,7 +225,13 @@ virt="$out" fi fi - DI_VIRT=$virt + _RET="$virt" +} + +read_virt() { + cached "$DI_VIRT" && return 0 + detect_virt + DI_VIRT=${_RET} } is_container() { @@ -310,8 +323,11 @@ # ['1'] or [1] # '1', '2' local val="$1" oifs="$IFS" ret="" tok="" - val=${val#[} - val=${val%]} + # i386/14.04 (dash=0.5.7-4ubuntu1): the following outputs "[foo" + # sh -c 'n="$1"; echo ${n#[}' -- "[foo" + # the fix was to quote the open bracket (val=${val#"["}) (LP: #1689648) + val=${val#"["} + val=${val%"]"} IFS=","; set -- $val; IFS="$oifs" for tok in "$@"; do trim "$tok" @@ -354,9 +370,9 @@ return 0 } -read_pid1_platform() { - local oifs="$IFS" out="" tok="" key="" val="" platform="${UNAVAILABLE}" - cached "${DI_PID_1_PLATFORM}" && return +read_pid1_product_name() { + local oifs="$IFS" out="" tok="" key="" val="" product_name="${UNAVAILABLE}" + cached "${DI_PID_1_PRODUCT_NAME}" && return [ -r "${PATH_PROC_1_ENVIRON}" ] || return out=$(tr '\0' '\n' <"${PATH_PROC_1_ENVIRON}") IFS="$CR"; set -- $out; IFS="$oifs" @@ -364,9 +380,9 @@ key=${tok%%=*} [ "$key" != "$tok" ] || continue val=${tok#*=} - [ "$key" = "platform" ] && platform="$val" && break + [ "$key" = "product_name" ] && product_name="$val" && break done - DI_PID_1_PLATFORM="$platform" + DI_PID_1_PRODUCT_NAME="$product_name" } dmi_product_name_matches() { @@ -377,6 +393,14 @@ return 1 } +dmi_product_serial_matches() { + is_container && return 1 + case "${DI_DMI_PRODUCT_SERIAL}" in + $1) return 0;; + esac + return 1 +} + dmi_product_name_is() { is_container && return 1 [ "${DI_DMI_PRODUCT_NAME}" = "$1" ] @@ -458,16 +482,19 @@ } check_config() { - # somewhat hackily read config for 'key' in files matching 'files' - # currently does not respect any hierarchy. - local key="$1" files="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" - if [ $# -eq 1 ]; then - files="$bp ${bp}.d/*.cfg" + # check_config(key [,file_globs]) + # somewhat hackily read through file_globs for 'key' + # file_globs are expanded via path expansion and + # default to /etc/cloud/cloud.cfg /etc/cloud/cloud.cfg.d/*.cfg + # currently does not respect any hierarchy in searching for key. + local key="$1" files="" + shift + if [ $# -eq 0 ]; then + files="${PATH_ETC_CI_CFG} ${PATH_ETC_CI_CFG_D}/*.cfg" else files="$*" fi - shift - set +f; set -- $files; set +f; + set +f; set -- $files; set -f; if [ "$1" = "$files" -a ! -f "$1" ]; then return 1 fi @@ -506,9 +533,7 @@ esac # check config files written by maas for installed system. - local confd="${PATH_CLOUD_CONFD}" - local fnmatch="$confd/*maas*.cfg $confd/*kernel_cmdline*.cfg" - if check_config "MAAS" "$fnmatch"; then + if check_config "MAAS"; then return "${DS_FOUND}" fi return ${DS_NOT_FOUND} @@ -532,6 +557,20 @@ if has_fs_with_label "config-2"; then return ${DS_FOUND} fi + # look in /config-drive /seed/config_drive for a directory + # openstack/YYYY-MM-DD format with a file meta_data.json + local d="" + local vlc_config_drive_path="${PATH_VAR_LIB_CLOUD}/seed/config_drive" + for d in /config-drive $vlc_config_drive_path; do + set +f; set -- "$d/openstack/"2???-??-??/meta_data.json; set -f; + [ -f "$1" ] && return ${DS_FOUND} + done + # at least one cloud (softlayer) seeds config drive with only 'latest'. + local lpath="openstack/latest/meta_data.json" + if [ -e "$vlc_config_drive_path/$lpath" ]; then + debug 1 "config drive seeded directory had only 'latest'" + return ${DS_FOUND} + fi return ${DS_NOT_FOUND} } @@ -580,9 +619,7 @@ # (disable_vmware_customization=true). If it is set to false, then # user has requested customization. local key="disable_vmware_customization" - local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" - match="$bp $bp.d/*[Oo][Vv][Ff]*.cfg" - if check_config "$key" "$match"; then + if check_config "$key"; then debug 2 "${_RET_fname} set $key to $_RET" case "$_RET" in 0|false|False) return 0;; @@ -627,7 +664,8 @@ dscheck_Bigstep() { # bigstep is activated by presense of seed file 'url' - check_seed_dir "bigstep" url && return ${DS_FOUND} + [ -f "${PATH_VAR_LIB_CLOUD}/data/seed/bigstep/url" ] && + return ${DS_FOUND} return ${DS_NOT_FOUND} } @@ -652,9 +690,9 @@ esac # 3. look for the key 'strict_id' (datasource/Ec2/strict_id) - local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" - match="$bp $bp.d/*[Ee][Cc]2*.cfg" - if check_config strict_id "$match"; then + # only in cloud.cfg or cloud.cfg.d/EC2.cfg (case insensitive) + local cfg="${PATH_ETC_CI_CFG}" cfg_d="${PATH_ETC_CI_CFG_D}" + if check_config strict_id $cfg "$cfg_d/*[Ee][Cc]2*.cfg"; then debug 2 "${_RET_fname} set strict_id to $_RET" return 0 fi @@ -684,7 +722,7 @@ # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html - local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" + local uuid="" hvuuid="${PATH_SYS_HYPERVISOR}/uuid" # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2' if [ -r "$hvuuid" ] && read uuid < "$hvuuid" && [ "${uuid#ec2}" != "$uuid" ]; then @@ -695,7 +733,7 @@ # product uuid and product serial start with case insensitive local uuid="${DI_DMI_PRODUCT_UUID}" case "$uuid:$serial" in - [Ee][Cc]2*:[Ee][Cc]2) + [Ee][Cc]2*:[Ee][Cc]2*) # both start with ec2, now check for case insenstive equal nocase_equal "$uuid" "$serial" && { _RET="AWS"; return 0; };; @@ -750,6 +788,10 @@ if dmi_product_name_is "Google Compute Engine"; then return ${DS_FOUND} fi + # product name is not guaranteed (LP: #1674861) + if dmi_product_serial_matches "GoogleCloud-*"; then + return ${DS_FOUND} + fi return ${DS_NOT_FOUND} } @@ -763,10 +805,15 @@ if [ $? -eq ${DS_FOUND} ]; then return ${DS_NOT_FOUND} fi - if dmi_product_name_is "OpenStack Nova"; then + local nova="OpenStack Nova" compute="OpenStack Compute" + if dmi_product_name_is "$nova"; then return ${DS_FOUND} fi - if [ "${DI_PID_1_PLATFORM}" = "OpenStack Nova" ]; then + if dmi_product_name_is "$compute"; then + # RDO installed nova (LP: #1675349). + return ${DS_FOUND} + fi + if [ "${DI_PID_1_PRODUCT_NAME}" = "$nova" ]; then return ${DS_FOUND} fi @@ -834,7 +881,7 @@ collect_info() { read_virt - read_pid1_platform + read_pid1_product_name read_kernel_cmdline read_uname_info read_config @@ -854,12 +901,12 @@ _print_info() { local n="" v="" vars="" vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" - vars="$vars DMI_PRODUCT_UUID PID_1_PLATFORM" + vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME" vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" vars="$vars DSNAME DSLIST" - vars="$vars MODE REPORT ON_FOUND ON_MAYBE ON_NOTFOUND" + vars="$vars MODE ON_FOUND ON_MAYBE ON_NOTFOUND" for v in ${vars}; do eval n='${DI_'"$v"'}' echo "$v=$n" @@ -871,7 +918,7 @@ write_result() { local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" pre="" { - if [ "$DI_REPORT" = "true" ]; then + if [ "$DI_MODE" = "report" ]; then echo "di_report:" pre=" " fi @@ -892,11 +939,11 @@ # if not report mode: only report the negative result. # reporting an empty list would mean cloud-init would not search # any datasources. - if [ "$DI_REPORT" = "true" ]; then + if [ "$DI_MODE" = "report" ]; then found -- - else + elif [ "$DI_MODE" = "search" ]; then local msg="# reporting not found result. notfound=${DI_ON_NOTFOUND}." - local DI_REPORT="true" + local DI_MODE="report" found -- "$msg" fi } @@ -916,8 +963,11 @@ # do not pass an empty line through. shift fi - # always write the None datasource last. - list="${list:+${list}, }None" + # if None is not already in the list, then add it last. + case " $list " in + *\ None,\ *|*\ None\ ) :;; + *) list=${list:+${list}, None};; + esac write_result "datasource_list: [ $list ]" "$@" return } @@ -998,11 +1048,11 @@ parse_policy() { # parse_policy(policy, default) # parse a policy string. sets - # _rc_mode (enable|disable,search) + # _rc_mode (enabled|disabled|search|report) # _rc_report true|false # _rc_found first|all # _rc_maybe all|none - # _rc_notfound enable|disable + # _rc_notfound enabled|disabled local def="" case "$DI_UNAME_MACHINE" in # these have dmi data @@ -1025,8 +1075,7 @@ for tok in "$@"; do val=${tok#*=} case "$tok" in - report) report=true;; - $DI_ENABLED|$DI_DISABLED|search) mode=$tok;; + $DI_ENABLED|$DI_DISABLED|search|report) mode=$tok;; found=all|found=first) found=$val;; maybe=all|maybe=none) maybe=$val;; notfound=$DI_ENABLED|notfound=$DI_DISABLED) notfound=$val;; @@ -1075,7 +1124,6 @@ debug 1 "policy loaded: mode=${_rc_mode} report=${_rc_report}" \ "found=${_rc_found} maybe=${_rc_maybe} notfound=${_rc_notfound}" DI_MODE=${_rc_mode} - DI_REPORT=${_rc_report} DI_ON_FOUND=${_rc_found} DI_ON_MAYBE=${_rc_maybe} DI_ON_NOTFOUND=${_rc_notfound} @@ -1118,7 +1166,7 @@ $DI_ENABLED) debug 1 "mode=$DI_ENABLED. returning $ret_en" return $ret_en;; - search) :;; + search|report) :;; esac if [ -n "${DI_DSNAME}" ]; then @@ -1191,18 +1239,26 @@ # record the empty result. record_notfound - case "$DI_ON_NOTFOUND" in - $DI_DISABLED) - debug 1 "No result. notfound=$DI_DISABLED. returning $ret_dis." - return $ret_dis - ;; - $DI_ENABLED) - debug 1 "No result. notfound=$DI_ENABLED. returning $ret_en" - return $ret_en;; - esac - error "Unexpected result" - return 3 + local basemsg="No ds found [mode=$DI_MODE, notfound=$DI_ON_NOTFOUND]." + local msg="" ret=3 + case "$DI_MODE:$DI_ON_NOTFOUND" in + report:$DI_DISABLED) + msg="$basemsg Would disable cloud-init [$ret_dis]" + ret=$ret_en;; + report:$DI_ENABLED) + msg="$basemsg Would enable cloud-init [$ret_en]" + ret=$ret_en;; + search:$DI_DISABLED) + msg="$basemsg Disabled cloud-init [$ret_dis]" + ret=$ret_dis;; + search:$DI_ENABLED) + msg="$basemsg Enabled cloud-init [$ret_en]" + ret=$ret_en;; + *) error "Unexpected result";; + esac + debug 1 "$msg" + return $ret } main() { diff -Nru cloud-init-0.7.9-47-gc81ea53/tools/hacking.py cloud-init-0.7.9-153-g16a7302f/tools/hacking.py --- cloud-init-0.7.9-47-gc81ea53/tools/hacking.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tools/hacking.py 2017-05-26 18:36:38.000000000 +0000 @@ -165,7 +165,8 @@ pep8._main() finally: if len(_missingImport) > 0: - print >> sys.stderr, ("%i imports missing in this test environment" - % len(_missingImport)) + sys.stderr.write( + "%i imports missing in this test environment\n" % + len(_missingImport)) # vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tools/mock-meta.py cloud-init-0.7.9-153-g16a7302f/tools/mock-meta.py --- cloud-init-0.7.9-47-gc81ea53/tools/mock-meta.py 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tools/mock-meta.py 2017-05-26 18:36:38.000000000 +0000 @@ -21,8 +21,8 @@ import json import logging import os -import socket import random +import socket import string import sys import yaml @@ -293,9 +293,9 @@ else: return "%s" % (PLACEMENT_CAPABILITIES.get(pentry, '')) else: - log.warn(("Did not implement action %s, " - "returning empty response: %r"), - action, NOT_IMPL_RESPONSE) + log.warning(("Did not implement action %s, " + "returning empty response: %r"), + action, NOT_IMPL_RESPONSE) return NOT_IMPL_RESPONSE diff -Nru cloud-init-0.7.9-47-gc81ea53/tools/net-convert.py cloud-init-0.7.9-153-g16a7302f/tools/net-convert.py --- cloud-init-0.7.9-47-gc81ea53/tools/net-convert.py 1970-01-01 00:00:00.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tools/net-convert.py 2017-05-26 18:36:38.000000000 +0000 @@ -0,0 +1,84 @@ +#!/usr/bin/python3 +# This file is part of cloud-init. See LICENSE file for license information. + +import argparse +import json +import os +import yaml + +from cloudinit.sources.helpers import openstack + +from cloudinit.net import eni +from cloudinit.net import netplan +from cloudinit.net import network_state +from cloudinit.net import sysconfig + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--network-data", "-p", type=open, + metavar="PATH", required=True) + parser.add_argument("--kind", "-k", + choices=['eni', 'network_data.json', 'yaml'], + required=True) + parser.add_argument("-d", "--directory", + metavar="PATH", + help="directory to place output in", + required=True) + parser.add_argument("-m", "--mac", + metavar="name,mac", + action='append', + help="interface name to mac mapping") + parser.add_argument("--output-kind", "-ok", + choices=['eni', 'netplan', 'sysconfig'], + required=True) + args = parser.parse_args() + + if not os.path.isdir(args.directory): + os.makedirs(args.directory) + + if args.mac: + known_macs = {} + for item in args.mac: + iface_name, iface_mac = item.split(",", 1) + known_macs[iface_mac] = iface_name + else: + known_macs = None + + net_data = args.network_data.read() + if args.kind == "eni": + pre_ns = eni.convert_eni_data(net_data) + ns = network_state.parse_net_config_data(pre_ns) + elif args.kind == "yaml": + pre_ns = yaml.load(net_data) + if 'network' in pre_ns: + pre_ns = pre_ns.get('network') + print("Input YAML") + print(yaml.dump(pre_ns, default_flow_style=False, indent=4)) + ns = network_state.parse_net_config_data(pre_ns) + else: + pre_ns = openstack.convert_net_json( + json.loads(net_data), known_macs=known_macs) + ns = network_state.parse_net_config_data(pre_ns) + + if not ns: + raise RuntimeError("No valid network_state object created from" + "input data") + + print("\nInternal State") + print(yaml.dump(ns, default_flow_style=False, indent=4)) + if args.output_kind == "eni": + r_cls = eni.Renderer + elif args.output_kind == "netplan": + r_cls = netplan.Renderer + else: + r_cls = sysconfig.Renderer + + r = r_cls() + r.render_network_state(ns, target=args.directory) + + +if __name__ == '__main__': + main() + +# vi: ts=4 expandtab diff -Nru cloud-init-0.7.9-47-gc81ea53/tools/Z99-cloudinit-warnings.sh cloud-init-0.7.9-153-g16a7302f/tools/Z99-cloudinit-warnings.sh --- cloud-init-0.7.9-47-gc81ea53/tools/Z99-cloudinit-warnings.sh 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tools/Z99-cloudinit-warnings.sh 2017-05-26 18:36:38.000000000 +0000 @@ -4,12 +4,12 @@ # Purpose: show user warnings on login. cloud_init_warnings() { - local skipf="" warning="" idir="/var/lib/cloud/instance" n=0 + local warning="" idir="/var/lib/cloud/instance" n=0 local warndir="$idir/warnings" local ufile="$HOME/.cloud-warnings.skip" sfile="$warndir/.skip" [ -d "$warndir" ] || return 0 [ ! -f "$ufile" ] || return 0 - [ ! -f "$skipf" ] || return 0 + [ ! -f "$sfile" ] || return 0 for warning in "$warndir"/*; do [ -f "$warning" ] || continue diff -Nru cloud-init-0.7.9-47-gc81ea53/tox.ini cloud-init-0.7.9-153-g16a7302f/tox.ini --- cloud-init-0.7.9-47-gc81ea53/tox.ini 2017-03-03 06:30:15.000000000 +0000 +++ cloud-init-0.7.9-153-g16a7302f/tox.ini 2017-05-26 18:36:38.000000000 +0000 @@ -1,42 +1,54 @@ [tox] -envlist = py27, py3, flake8, xenial +envlist = py27, py3, flake8, xenial, pylint recreate = True [testenv] commands = python -m nose {posargs:tests/unittests} -deps = -r{toxinidir}/test-requirements.txt - -r{toxinidir}/requirements.txt setenv = LC_ALL = en_US.utf-8 [testenv:flake8] basepython = python3 +deps = + pycodestyle==2.3.1 + pyflakes==1.5.0 + flake8==3.3.0 + hacking==0.13.0 commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/} # https://github.com/gabrielfalcao/HTTPretty/issues/223 setenv = LC_ALL = en_US.utf-8 +[testenv:pylint] +deps = pylint==1.7.1 +commands = {envpython} -m pylint {posargs:cloudinit} + [testenv:py3] basepython = python3 +deps = -r{toxinidir}/test-requirements.txt commands = {envpython} -m nose {posargs:--with-coverage \ --cover-erase --cover-branches --cover-inclusive \ --cover-package=cloudinit tests/unittests} +[testenv:py27] +basepython = python2.7 +deps = -r{toxinidir}/test-requirements.txt + [testenv:py26] +deps = -r{toxinidir}/test-requirements.txt commands = nosetests {posargs:tests/unittests} setenv = LC_ALL = C [flake8] #H102 Apache 2.0 license header not found -ignore=H404,H405,H105,H301,H104,H403,H101,H102 +ignore=H404,H405,H105,H301,H104,H403,H101,H102,H106,H304 exclude = .venv,.tox,dist,doc,*egg,.git,build,tools [testenv:doc] basepython = python3 -deps = {[testenv]deps} - sphinx +deps = sphinx commands = {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html} [testenv:xenial] @@ -59,14 +71,10 @@ nose==1.3.7 unittest2==1.1.0 contextlib2==0.5.1 - pep8==1.7.0 - pyflakes==1.1.0 - flake8==2.5.4 - hacking==0.10.2 [testenv:centos6] basepython = python2.6 -commands = nosetests {posargs:tests} +commands = nosetests {posargs:tests/unittests} deps = # requirements argparse==1.2.1 @@ -87,3 +95,14 @@ [testenv:tip-pyflakes] commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} deps = pyflakes + +[testenv:tip-pylint] +commands = {envpython} -m pylint {posargs:cloudinit} +deps = pylint + +[testenv:citest] +basepython = python3 +commands = {envpython} -m tests.cloud_tests {posargs} +passenv = HOME +deps = + pylxd==2.1.3