Merge ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel

Proposed by Chad Smith
Status: Merged
Merged at revision: 1c047bd2a9661be9a82e072723f15560eebcafdd
Proposed branch: ~chad.smith/cloud-init:ubuntu/devel
Merge into: cloud-init:ubuntu/devel
Diff against target: 955 lines (+706/-25)
13 files modified
cloudinit/cloud.py (+2/-2)
cloudinit/cmd/main.py (+5/-3)
cloudinit/config/tests/test_ssh.py (+147/-0)
cloudinit/distros/__init__.py (+1/-1)
cloudinit/reporting/__init__.py (+7/-1)
cloudinit/reporting/handlers.py (+246/-0)
cloudinit/sources/helpers/vmware/imc/config_nic.py (+1/-1)
cloudinit/stages.py (+1/-1)
cloudinit/tests/helpers.py (+27/-16)
debian/changelog (+14/-0)
tests/unittests/test_reporting_hyperv.py (+134/-0)
tests/unittests/test_vmware_config_file.py (+115/-0)
tools/read-version (+6/-0)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
cloud-init Commiters Pending
Review via email: mp+354153@code.launchpad.net

Commit message

New-upstream-snapshot with azure kvp reporting feature

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

PASSED: Continuous integration, rev:00362e3be579a8157e0a915b250888c39eb408fc
https://jenkins.ubuntu.com/server/job/cloud-init-ci/284/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/284/rebuild

review: Approve (continuous-integration)
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

PASSED: Continuous integration, rev:1c047bd2a9661be9a82e072723f15560eebcafdd
https://jenkins.ubuntu.com/server/job/cloud-init-ci/285/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/285/rebuild

review: Approve (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
2index 6d12c43..7ae98e1 100644
3--- a/cloudinit/cloud.py
4+++ b/cloudinit/cloud.py
5@@ -47,7 +47,7 @@ class Cloud(object):
6
7 @property
8 def cfg(self):
9- # Ensure that not indirectly modified
10+ # Ensure that cfg is not indirectly modified
11 return copy.deepcopy(self._cfg)
12
13 def run(self, name, functor, args, freq=None, clear_on_fail=False):
14@@ -61,7 +61,7 @@ class Cloud(object):
15 return None
16 return fn
17
18- # The rest of thes are just useful proxies
19+ # The rest of these are just useful proxies
20 def get_userdata(self, apply_filter=True):
21 return self.datasource.get_userdata(apply_filter)
22
23diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
24index d6ba90f..4ea4fe7 100644
25--- a/cloudinit/cmd/main.py
26+++ b/cloudinit/cmd/main.py
27@@ -315,7 +315,7 @@ def main_init(name, args):
28 existing = "trust"
29
30 init.purge_cache()
31- # Delete the non-net file as well
32+ # Delete the no-net file as well
33 util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
34
35 # Stage 5
36@@ -339,7 +339,7 @@ def main_init(name, args):
37 " Likely bad things to come!"))
38 if not args.force:
39 init.apply_network_config(bring_up=not args.local)
40- LOG.debug("[%s] Exiting without datasource in local mode", mode)
41+ LOG.debug("[%s] Exiting without datasource", mode)
42 if mode == sources.DSMODE_LOCAL:
43 return (None, [])
44 else:
45@@ -877,9 +877,11 @@ def main(sysv_args=None):
46 rname, rdesc, reporting_enabled=report_on)
47
48 with args.reporter:
49- return util.log_time(
50+ retval = util.log_time(
51 logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
52 get_uptime=True, func=functor, args=(name, args))
53+ reporting.flush_events()
54+ return retval
55
56
57 if __name__ == '__main__':
58diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py
59new file mode 100644
60index 0000000..7441d9e
61--- /dev/null
62+++ b/cloudinit/config/tests/test_ssh.py
63@@ -0,0 +1,147 @@
64+# This file is part of cloud-init. See LICENSE file for license information.
65+
66+
67+from cloudinit.config import cc_ssh
68+from cloudinit.tests.helpers import CiTestCase, mock
69+
70+MODPATH = "cloudinit.config.cc_ssh."
71+
72+
73+@mock.patch(MODPATH + "ssh_util.setup_user_keys")
74+class TestHandleSsh(CiTestCase):
75+ """Test cc_ssh handling of ssh config."""
76+
77+ def test_apply_credentials_with_user(self, m_setup_keys):
78+ """Apply keys for the given user and root."""
79+ keys = ["key1"]
80+ user = "clouduser"
81+ options = cc_ssh.DISABLE_ROOT_OPTS
82+ cc_ssh.apply_credentials(keys, user, False, options)
83+ self.assertEqual([mock.call(set(keys), user),
84+ mock.call(set(keys), "root", options="")],
85+ m_setup_keys.call_args_list)
86+
87+ def test_apply_credentials_with_no_user(self, m_setup_keys):
88+ """Apply keys for root only."""
89+ keys = ["key1"]
90+ user = None
91+ options = cc_ssh.DISABLE_ROOT_OPTS
92+ cc_ssh.apply_credentials(keys, user, False, options)
93+ self.assertEqual([mock.call(set(keys), "root", options="")],
94+ m_setup_keys.call_args_list)
95+
96+ def test_apply_credentials_with_user_disable_root(self, m_setup_keys):
97+ """Apply keys for the given user and disable root ssh."""
98+ keys = ["key1"]
99+ user = "clouduser"
100+ options = cc_ssh.DISABLE_ROOT_OPTS
101+ cc_ssh.apply_credentials(keys, user, True, options)
102+ options = options.replace("$USER", user)
103+ self.assertEqual([mock.call(set(keys), user),
104+ mock.call(set(keys), "root", options=options)],
105+ m_setup_keys.call_args_list)
106+
107+ def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys):
108+ """Apply keys no user and disable root ssh."""
109+ keys = ["key1"]
110+ user = None
111+ options = cc_ssh.DISABLE_ROOT_OPTS
112+ cc_ssh.apply_credentials(keys, user, True, options)
113+ options = options.replace("$USER", "NONE")
114+ self.assertEqual([mock.call(set(keys), "root", options=options)],
115+ m_setup_keys.call_args_list)
116+
117+ @mock.patch(MODPATH + "glob.glob")
118+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
119+ @mock.patch(MODPATH + "os.path.exists")
120+ def test_handle_no_cfg(self, m_path_exists, m_nug,
121+ m_glob, m_setup_keys):
122+ """Test handle with no config ignores generating existing keyfiles."""
123+ cfg = {}
124+ keys = ["key1"]
125+ m_glob.return_value = [] # Return no matching keys to prevent removal
126+ # Mock os.path.exits to True to short-circuit the key writing logic
127+ m_path_exists.return_value = True
128+ m_nug.return_value = ([], {})
129+ cloud = self.tmp_cloud(
130+ distro='ubuntu', metadata={'public-keys': keys})
131+ cc_ssh.handle("name", cfg, cloud, None, None)
132+ options = cc_ssh.DISABLE_ROOT_OPTS.replace("$USER", "NONE")
133+ m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*')
134+ self.assertIn(
135+ [mock.call('/etc/ssh/ssh_host_rsa_key'),
136+ mock.call('/etc/ssh/ssh_host_dsa_key'),
137+ mock.call('/etc/ssh/ssh_host_ecdsa_key'),
138+ mock.call('/etc/ssh/ssh_host_ed25519_key')],
139+ m_path_exists.call_args_list)
140+ self.assertEqual([mock.call(set(keys), "root", options=options)],
141+ m_setup_keys.call_args_list)
142+
143+ @mock.patch(MODPATH + "glob.glob")
144+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
145+ @mock.patch(MODPATH + "os.path.exists")
146+ def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug,
147+ m_glob, m_setup_keys):
148+ """Test handle with no config and a default distro user."""
149+ cfg = {}
150+ keys = ["key1"]
151+ user = "clouduser"
152+ m_glob.return_value = [] # Return no matching keys to prevent removal
153+ # Mock os.path.exits to True to short-circuit the key writing logic
154+ m_path_exists.return_value = True
155+ m_nug.return_value = ({user: {"default": user}}, {})
156+ cloud = self.tmp_cloud(
157+ distro='ubuntu', metadata={'public-keys': keys})
158+ cc_ssh.handle("name", cfg, cloud, None, None)
159+
160+ options = cc_ssh.DISABLE_ROOT_OPTS.replace("$USER", user)
161+ self.assertEqual([mock.call(set(keys), user),
162+ mock.call(set(keys), "root", options=options)],
163+ m_setup_keys.call_args_list)
164+
165+ @mock.patch(MODPATH + "glob.glob")
166+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
167+ @mock.patch(MODPATH + "os.path.exists")
168+ def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug,
169+ m_glob, m_setup_keys):
170+ """Test handle with explicit disable_root and a default distro user."""
171+ # This test is identical to test_handle_no_cfg_and_default_root,
172+ # except this uses an explicit cfg value
173+ cfg = {"disable_root": True}
174+ keys = ["key1"]
175+ user = "clouduser"
176+ m_glob.return_value = [] # Return no matching keys to prevent removal
177+ # Mock os.path.exits to True to short-circuit the key writing logic
178+ m_path_exists.return_value = True
179+ m_nug.return_value = ({user: {"default": user}}, {})
180+ cloud = self.tmp_cloud(
181+ distro='ubuntu', metadata={'public-keys': keys})
182+ cc_ssh.handle("name", cfg, cloud, None, None)
183+
184+ options = cc_ssh.DISABLE_ROOT_OPTS.replace("$USER", user)
185+ self.assertEqual([mock.call(set(keys), user),
186+ mock.call(set(keys), "root", options=options)],
187+ m_setup_keys.call_args_list)
188+
189+ @mock.patch(MODPATH + "glob.glob")
190+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
191+ @mock.patch(MODPATH + "os.path.exists")
192+ def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug,
193+ m_glob, m_setup_keys):
194+ """Test handle with disable_root == False."""
195+ # When disable_root == False, the ssh redirect for root is skipped
196+ cfg = {"disable_root": False}
197+ keys = ["key1"]
198+ user = "clouduser"
199+ m_glob.return_value = [] # Return no matching keys to prevent removal
200+ # Mock os.path.exits to True to short-circuit the key writing logic
201+ m_path_exists.return_value = True
202+ m_nug.return_value = ({user: {"default": user}}, {})
203+ cloud = self.tmp_cloud(
204+ distro='ubuntu', metadata={'public-keys': keys})
205+ cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
206+ cc_ssh.handle("name", cfg, cloud, None, None)
207+
208+ self.assertEqual([mock.call(set(keys), user),
209+ mock.call(set(keys), "root", options="")],
210+ m_setup_keys.call_args_list)
211diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
212index ab0b077..fde054e 100755
213--- a/cloudinit/distros/__init__.py
214+++ b/cloudinit/distros/__init__.py
215@@ -157,7 +157,7 @@ class Distro(object):
216 distro)
217 header = '\n'.join([
218 "# Converted from network_config for distro %s" % distro,
219- "# Implmentation of _write_network_config is needed."
220+ "# Implementation of _write_network_config is needed."
221 ])
222 ns = network_state.parse_net_config_data(netconfig)
223 contents = eni.network_state_to_eni(
224diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
225index 1ed2b48..ed5c703 100644
226--- a/cloudinit/reporting/__init__.py
227+++ b/cloudinit/reporting/__init__.py
228@@ -18,7 +18,7 @@ DEFAULT_CONFIG = {
229
230
231 def update_configuration(config):
232- """Update the instanciated_handler_registry.
233+ """Update the instantiated_handler_registry.
234
235 :param config:
236 The dictionary containing changes to apply. If a key is given
237@@ -37,6 +37,12 @@ def update_configuration(config):
238 instantiated_handler_registry.register_item(handler_name, instance)
239
240
241+def flush_events():
242+ for _, handler in instantiated_handler_registry.registered_items.items():
243+ if hasattr(handler, 'flush'):
244+ handler.flush()
245+
246+
247 instantiated_handler_registry = DictRegistry()
248 update_configuration(DEFAULT_CONFIG)
249
250diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
251index 4066076..6d23558 100644
252--- a/cloudinit/reporting/handlers.py
253+++ b/cloudinit/reporting/handlers.py
254@@ -1,17 +1,32 @@
255 # This file is part of cloud-init. See LICENSE file for license information.
256
257 import abc
258+import fcntl
259 import json
260 import six
261+import os
262+import re
263+import struct
264+import threading
265+import time
266
267 from cloudinit import log as logging
268 from cloudinit.registry import DictRegistry
269 from cloudinit import (url_helper, util)
270+from datetime import datetime
271
272+if six.PY2:
273+ from multiprocessing.queues import JoinableQueue as JQueue
274+else:
275+ from queue import Queue as JQueue
276
277 LOG = logging.getLogger(__name__)
278
279
280+class ReportException(Exception):
281+ pass
282+
283+
284 @six.add_metaclass(abc.ABCMeta)
285 class ReportingHandler(object):
286 """Base class for report handlers.
287@@ -24,6 +39,10 @@ class ReportingHandler(object):
288 def publish_event(self, event):
289 """Publish an event."""
290
291+ def flush(self):
292+ """Ensure ReportingHandler has published all events"""
293+ pass
294+
295
296 class LogHandler(ReportingHandler):
297 """Publishes events to the cloud-init log at the ``DEBUG`` log level."""
298@@ -85,9 +104,236 @@ class WebHookHandler(ReportingHandler):
299 LOG.warning("failed posting event: %s", event.as_string())
300
301
302+class HyperVKvpReportingHandler(ReportingHandler):
303+ """
304+ Reports events to a Hyper-V host using Key-Value-Pair exchange protocol
305+ and can be used to obtain high level diagnostic information from the host.
306+
307+ To use this facility, the KVP user-space daemon (hv_kvp_daemon) has to be
308+ running. It reads the kvp_file when the host requests the guest to
309+ enumerate the KVP's.
310+
311+ This reporter collates all events for a module (origin|name) in a single
312+ json string in the dictionary.
313+
314+ For more information, see
315+ https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
316+ """
317+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
318+ HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
319+ HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
320+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
321+ EVENT_PREFIX = 'CLOUD_INIT'
322+ MSG_KEY = 'msg'
323+ RESULT_KEY = 'result'
324+ DESC_IDX_KEY = 'msg_i'
325+ JSON_SEPARATORS = (',', ':')
326+ KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1'
327+
328+ def __init__(self,
329+ kvp_file_path=KVP_POOL_FILE_GUEST,
330+ event_types=None):
331+ super(HyperVKvpReportingHandler, self).__init__()
332+ self._kvp_file_path = kvp_file_path
333+ self._event_types = event_types
334+ self.q = JQueue()
335+ self.kvp_file = None
336+ self.incarnation_no = self._get_incarnation_no()
337+ self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
338+ self.incarnation_no)
339+ self._current_offset = 0
340+ self.publish_thread = threading.Thread(
341+ target=self._publish_event_routine)
342+ self.publish_thread.daemon = True
343+ self.publish_thread.start()
344+
345+ def _get_incarnation_no(self):
346+ """
347+ use the time passed as the incarnation number.
348+ the incarnation number is the number which are used to
349+ distinguish the old data stored in kvp and the new data.
350+ """
351+ uptime_str = util.uptime()
352+ try:
353+ return int(time.time() - float(uptime_str))
354+ except ValueError:
355+ LOG.warning("uptime '%s' not in correct format.", uptime_str)
356+ return 0
357+
358+ def _iterate_kvps(self, offset):
359+ """iterate the kvp file from the current offset."""
360+ try:
361+ with open(self._kvp_file_path, 'rb+') as f:
362+ self.kvp_file = f
363+ fcntl.flock(f, fcntl.LOCK_EX)
364+ f.seek(offset)
365+ record_data = f.read(self.HV_KVP_RECORD_SIZE)
366+ while len(record_data) == self.HV_KVP_RECORD_SIZE:
367+ self._current_offset += self.HV_KVP_RECORD_SIZE
368+ kvp_item = self._decode_kvp_item(record_data)
369+ yield kvp_item
370+ record_data = f.read(self.HV_KVP_RECORD_SIZE)
371+ fcntl.flock(f, fcntl.LOCK_UN)
372+ finally:
373+ self.kvp_file = None
374+
375+ def _event_key(self, event):
376+ """
377+ the event key format is:
378+ CLOUD_INIT|<incarnation number>|<event_type>|<event_name>
379+ """
380+ return u"{0}|{1}|{2}".format(self.event_key_prefix,
381+ event.event_type, event.name)
382+
383+ def _encode_kvp_item(self, key, value):
384+ data = (struct.pack("%ds%ds" % (
385+ self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
386+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
387+ key.encode('utf-8'), value.encode('utf-8')))
388+ return data
389+
390+ def _decode_kvp_item(self, record_data):
391+ record_data_len = len(record_data)
392+ if record_data_len != self.HV_KVP_RECORD_SIZE:
393+ raise ReportException(
394+ "record_data len not correct {0} {1}."
395+ .format(record_data_len, self.HV_KVP_RECORD_SIZE))
396+ k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8')
397+ .strip('\x00'))
398+ v = (
399+ record_data[
400+ self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
401+ ].decode('utf-8').strip('\x00'))
402+
403+ return {'key': k, 'value': v}
404+
405+ def _update_kvp_item(self, record_data):
406+ if self.kvp_file is None:
407+ raise ReportException(
408+ "kvp file '{0}' not opened."
409+ .format(self._kvp_file_path))
410+ self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1)
411+ self.kvp_file.write(record_data)
412+
413+ def _append_kvp_item(self, record_data):
414+ with open(self._kvp_file_path, 'rb+') as f:
415+ fcntl.flock(f, fcntl.LOCK_EX)
416+ # seek to end of the file
417+ f.seek(0, 2)
418+ f.write(record_data)
419+ f.flush()
420+ fcntl.flock(f, fcntl.LOCK_UN)
421+ self._current_offset = f.tell()
422+
423+ def _break_down(self, key, meta_data, description):
424+ del meta_data[self.MSG_KEY]
425+ des_in_json = json.dumps(description)
426+ des_in_json = des_in_json[1:(len(des_in_json) - 1)]
427+ i = 0
428+ result_array = []
429+ message_place_holder = "\"" + self.MSG_KEY + "\":\"\""
430+ while True:
431+ meta_data[self.DESC_IDX_KEY] = i
432+ meta_data[self.MSG_KEY] = ''
433+ data_without_desc = json.dumps(meta_data,
434+ separators=self.JSON_SEPARATORS)
435+ room_for_desc = (
436+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE -
437+ len(data_without_desc) - 8)
438+ value = data_without_desc.replace(
439+ message_place_holder,
440+ '"{key}":"{desc}"'.format(
441+ key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
442+ result_array.append(self._encode_kvp_item(key, value))
443+ i += 1
444+ des_in_json = des_in_json[room_for_desc:]
445+ if len(des_in_json) == 0:
446+ break
447+ return result_array
448+
449+ def _encode_event(self, event):
450+ """
451+ encode the event into kvp data bytes.
452+ if the event content reaches the maximum length of kvp value.
453+ then it would be cut to multiple slices.
454+ """
455+ key = self._event_key(event)
456+ meta_data = {
457+ "name": event.name,
458+ "type": event.event_type,
459+ "ts": (datetime.utcfromtimestamp(event.timestamp)
460+ .isoformat() + 'Z'),
461+ }
462+ if hasattr(event, self.RESULT_KEY):
463+ meta_data[self.RESULT_KEY] = event.result
464+ meta_data[self.MSG_KEY] = event.description
465+ value = json.dumps(meta_data, separators=self.JSON_SEPARATORS)
466+ # if it reaches the maximum length of kvp value,
467+ # break it down to slices.
468+ # this should be very corner case.
469+ if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE:
470+ return self._break_down(key, meta_data, event.description)
471+ else:
472+ data = self._encode_kvp_item(key, value)
473+ return [data]
474+
475+ def _publish_event_routine(self):
476+ while True:
477+ try:
478+ event = self.q.get(block=True)
479+ need_append = True
480+ try:
481+ if not os.path.exists(self._kvp_file_path):
482+ LOG.warning(
483+ "skip writing events %s to %s. file not present.",
484+ event.as_string(),
485+ self._kvp_file_path)
486+ encoded_event = self._encode_event(event)
487+ # for each encoded_event
488+ for encoded_data in (encoded_event):
489+ for kvp in self._iterate_kvps(self._current_offset):
490+ match = (
491+ re.match(
492+ r"^{0}\|(\d+)\|.+"
493+ .format(self.EVENT_PREFIX),
494+ kvp['key']
495+ ))
496+ if match:
497+ match_groups = match.groups(0)
498+ if int(match_groups[0]) < self.incarnation_no:
499+ need_append = False
500+ self._update_kvp_item(encoded_data)
501+ continue
502+ if need_append:
503+ self._append_kvp_item(encoded_data)
504+ except IOError as e:
505+ LOG.warning(
506+ "failed posting event to kvp: %s e:%s",
507+ event.as_string(), e)
508+ finally:
509+ self.q.task_done()
510+
511+ # when main process exits, q.get() will through EOFError
512+ # indicating we should exit this thread.
513+ except EOFError:
514+ return
515+
516+ # since the saving to the kvp pool can be a time costing task
517+ # if the kvp pool already contains a chunk of data,
518+ # so defer it to another thread.
519+ def publish_event(self, event):
520+ if (not self._event_types or event.event_type in self._event_types):
521+ self.q.put(event)
522+
523+ def flush(self):
524+ LOG.debug('HyperVReportingHandler flushing remaining events')
525+ self.q.join()
526+
527+
528 available_handlers = DictRegistry()
529 available_handlers.register_item('log', LogHandler)
530 available_handlers.register_item('print', PrintHandler)
531 available_handlers.register_item('webhook', WebHookHandler)
532+available_handlers.register_item('hyperv', HyperVKvpReportingHandler)
533
534 # vi: ts=4 expandtab
535diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
536index 3ef8c62..e1890e2 100644
537--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
538+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
539@@ -164,7 +164,7 @@ class NicConfigurator(object):
540 return ([subnet], route_list)
541
542 # Add routes if there is no primary nic
543- if not self._primaryNic:
544+ if not self._primaryNic and v4.gateways:
545 route_list.extend(self.gen_ipv4_route(nic,
546 v4.gateways,
547 v4.netmask))
548diff --git a/cloudinit/stages.py b/cloudinit/stages.py
549index c132b57..8874d40 100644
550--- a/cloudinit/stages.py
551+++ b/cloudinit/stages.py
552@@ -510,7 +510,7 @@ class Init(object):
553 # The default frequency if handlers don't have one
554 'frequency': frequency,
555 # This will be used when new handlers are found
556- # to help write there contents to files with numbered
557+ # to help write their contents to files with numbered
558 # names...
559 'handlercount': 0,
560 'excluded': excluded,
561diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
562index 5bfe7fa..de24e25 100644
563--- a/cloudinit/tests/helpers.py
564+++ b/cloudinit/tests/helpers.py
565@@ -10,7 +10,6 @@ import shutil
566 import sys
567 import tempfile
568 import time
569-import unittest
570
571 import mock
572 import six
573@@ -28,11 +27,15 @@ except ImportError:
574
575 from cloudinit.config.schema import (
576 SchemaValidationError, validate_cloudconfig_schema)
577+from cloudinit import cloud
578+from cloudinit import distros
579 from cloudinit import helpers as ch
580+from cloudinit.sources import DataSourceNone
581 from cloudinit import util
582
583 # Used for skipping tests
584 SkipTest = unittest2.SkipTest
585+skipIf = unittest2.skipIf
586
587 # Used for detecting different python versions
588 PY2 = False
589@@ -187,6 +190,29 @@ class CiTestCase(TestCase):
590 """
591 raise SystemExit(code)
592
593+ def tmp_cloud(self, distro, sys_cfg=None, metadata=None):
594+ """Create a cloud with tmp working directory paths.
595+
596+ @param distro: Name of the distro to attach to the cloud.
597+ @param metadata: Optional metadata to set on the datasource.
598+
599+ @return: The built cloud instance.
600+ """
601+ self.new_root = self.tmp_dir()
602+ if not sys_cfg:
603+ sys_cfg = {}
604+ tmp_paths = {}
605+ for var in ['templates_dir', 'run_dir', 'cloud_dir']:
606+ tmp_paths[var] = self.tmp_path(var, dir=self.new_root)
607+ util.ensure_dir(tmp_paths[var])
608+ self.paths = ch.Paths(tmp_paths)
609+ cls = distros.fetch(distro)
610+ mydist = cls(distro, sys_cfg, self.paths)
611+ myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, self.paths)
612+ if metadata:
613+ myds.metadata.update(metadata)
614+ return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None)
615+
616
617 class ResourceUsingTestCase(CiTestCase):
618
619@@ -426,21 +452,6 @@ def readResource(name, mode='r'):
620
621
622 try:
623- skipIf = unittest.skipIf
624-except AttributeError:
625- # Python 2.6. Doesn't have to be high fidelity.
626- def skipIf(condition, reason):
627- def decorator(func):
628- def wrapper(*args, **kws):
629- if condition:
630- return func(*args, **kws)
631- else:
632- print(reason, file=sys.stderr)
633- return wrapper
634- return decorator
635-
636-
637-try:
638 import jsonschema
639 assert jsonschema # avoid pyflakes error F401: import unused
640 _missing_jsonschema_dep = False
641diff --git a/debian/changelog b/debian/changelog
642index ea38c3b..f303d70 100644
643--- a/debian/changelog
644+++ b/debian/changelog
645@@ -1,3 +1,17 @@
646+cloud-init (18.3-35-g3f6d0972-0ubuntu1) cosmic; urgency=medium
647+
648+ * New upstream snapshot.
649+ - Add unit tests for config/cc_ssh.py [Francis Ginther]
650+ - Fix the built-in cloudinit/tests/helpers:skipIf
651+ - read-version: enhance error message [Joshua Powers]
652+ - hyperv_reporting_handler: simplify threaded publisher
653+ - VMWare: Fix a network config bug in vm with static IPv4 and no gateway.
654+ [Pengpeng Sun]
655+ - logging: Add logging config type hyperv for reporting via Azure KVP
656+ [Andy Liu]
657+
658+ -- Chad Smith <chad.smith@canonical.com> Sat, 01 Sep 2018 12:08:52 -0600
659+
660 cloud-init (18.3-29-gdab59087-0ubuntu1) cosmic; urgency=medium
661
662 * New upstream snapshot.
663diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py
664new file mode 100644
665index 0000000..2e64c6c
666--- /dev/null
667+++ b/tests/unittests/test_reporting_hyperv.py
668@@ -0,0 +1,134 @@
669+# This file is part of cloud-init. See LICENSE file for license information.
670+
671+from cloudinit.reporting import events
672+from cloudinit.reporting import handlers
673+
674+import json
675+import os
676+
677+from cloudinit import util
678+from cloudinit.tests.helpers import CiTestCase
679+
680+
681+class TestKvpEncoding(CiTestCase):
682+ def test_encode_decode(self):
683+ kvp = {'key': 'key1', 'value': 'value1'}
684+ kvp_reporting = handlers.HyperVKvpReportingHandler()
685+ data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value'])
686+ self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE)
687+ decoded_kvp = kvp_reporting._decode_kvp_item(data)
688+ self.assertEqual(kvp, decoded_kvp)
689+
690+
691+class TextKvpReporter(CiTestCase):
692+ def setUp(self):
693+ super(TextKvpReporter, self).setUp()
694+ self.tmp_file_path = self.tmp_path('kvp_pool_file')
695+ util.ensure_file(self.tmp_file_path)
696+
697+ def test_event_type_can_be_filtered(self):
698+ reporter = handlers.HyperVKvpReportingHandler(
699+ kvp_file_path=self.tmp_file_path,
700+ event_types=['foo', 'bar'])
701+
702+ reporter.publish_event(
703+ events.ReportingEvent('foo', 'name', 'description'))
704+ reporter.publish_event(
705+ events.ReportingEvent('some_other', 'name', 'description3'))
706+ reporter.q.join()
707+
708+ kvps = list(reporter._iterate_kvps(0))
709+ self.assertEqual(1, len(kvps))
710+
711+ reporter.publish_event(
712+ events.ReportingEvent('bar', 'name', 'description2'))
713+ reporter.q.join()
714+ kvps = list(reporter._iterate_kvps(0))
715+ self.assertEqual(2, len(kvps))
716+
717+ self.assertIn('foo', kvps[0]['key'])
718+ self.assertIn('bar', kvps[1]['key'])
719+ self.assertNotIn('some_other', kvps[0]['key'])
720+ self.assertNotIn('some_other', kvps[1]['key'])
721+
722+ def test_events_are_over_written(self):
723+ reporter = handlers.HyperVKvpReportingHandler(
724+ kvp_file_path=self.tmp_file_path)
725+
726+ self.assertEqual(0, len(list(reporter._iterate_kvps(0))))
727+
728+ reporter.publish_event(
729+ events.ReportingEvent('foo', 'name1', 'description'))
730+ reporter.publish_event(
731+ events.ReportingEvent('foo', 'name2', 'description'))
732+ reporter.q.join()
733+ self.assertEqual(2, len(list(reporter._iterate_kvps(0))))
734+
735+ reporter2 = handlers.HyperVKvpReportingHandler(
736+ kvp_file_path=self.tmp_file_path)
737+ reporter2.incarnation_no = reporter.incarnation_no + 1
738+ reporter2.publish_event(
739+ events.ReportingEvent('foo', 'name3', 'description'))
740+ reporter2.q.join()
741+
742+ self.assertEqual(2, len(list(reporter2._iterate_kvps(0))))
743+
744+ def test_events_with_higher_incarnation_not_over_written(self):
745+ reporter = handlers.HyperVKvpReportingHandler(
746+ kvp_file_path=self.tmp_file_path)
747+
748+ self.assertEqual(0, len(list(reporter._iterate_kvps(0))))
749+
750+ reporter.publish_event(
751+ events.ReportingEvent('foo', 'name1', 'description'))
752+ reporter.publish_event(
753+ events.ReportingEvent('foo', 'name2', 'description'))
754+ reporter.q.join()
755+ self.assertEqual(2, len(list(reporter._iterate_kvps(0))))
756+
757+ reporter3 = handlers.HyperVKvpReportingHandler(
758+ kvp_file_path=self.tmp_file_path)
759+ reporter3.incarnation_no = reporter.incarnation_no - 1
760+ reporter3.publish_event(
761+ events.ReportingEvent('foo', 'name3', 'description'))
762+ reporter3.q.join()
763+ self.assertEqual(3, len(list(reporter3._iterate_kvps(0))))
764+
765+ def test_finish_event_result_is_logged(self):
766+ reporter = handlers.HyperVKvpReportingHandler(
767+ kvp_file_path=self.tmp_file_path)
768+ reporter.publish_event(
769+ events.FinishReportingEvent('name2', 'description1',
770+ result=events.status.FAIL))
771+ reporter.q.join()
772+ self.assertIn('FAIL', list(reporter._iterate_kvps(0))[0]['value'])
773+
774+ def test_file_operation_issue(self):
775+ os.remove(self.tmp_file_path)
776+ reporter = handlers.HyperVKvpReportingHandler(
777+ kvp_file_path=self.tmp_file_path)
778+ reporter.publish_event(
779+ events.FinishReportingEvent('name2', 'description1',
780+ result=events.status.FAIL))
781+ reporter.q.join()
782+
783+ def test_event_very_long(self):
784+ reporter = handlers.HyperVKvpReportingHandler(
785+ kvp_file_path=self.tmp_file_path)
786+ description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE
787+ long_event = events.FinishReportingEvent(
788+ 'event_name',
789+ description,
790+ result=events.status.FAIL)
791+ reporter.publish_event(long_event)
792+ reporter.q.join()
793+ kvps = list(reporter._iterate_kvps(0))
794+ self.assertEqual(3, len(kvps))
795+
796+ # restore from the kvp to see the content are all there
797+ full_description = ''
798+ for i in range(len(kvps)):
799+ msg_slice = json.loads(kvps[i]['value'])
800+ self.assertEqual(msg_slice['msg_i'], i)
801+ full_description += msg_slice['msg']
802+ self.assertEqual(description, full_description)
803diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py
804index 036f687..602dedb 100644
805--- a/tests/unittests/test_vmware_config_file.py
806+++ b/tests/unittests/test_vmware_config_file.py
807@@ -2,11 +2,15 @@
808 # Copyright (C) 2016 VMware INC.
809 #
810 # Author: Sankar Tanguturi <stanguturi@vmware.com>
811+# Pengpeng Sun <pengpengs@vmware.com>
812 #
813 # This file is part of cloud-init. See LICENSE file for license information.
814
815 import logging
816+import os
817 import sys
818+import tempfile
819+import textwrap
820
821 from cloudinit.sources.DataSourceOVF import get_network_config_from_conf
822 from cloudinit.sources.DataSourceOVF import read_vmware_imc
823@@ -343,4 +347,115 @@ class TestVmwareConfigFile(CiTestCase):
824 conf = Config(cf)
825 self.assertEqual("test-script", conf.custom_script_name)
826
827+
828+class TestVmwareNetConfig(CiTestCase):
829+ """Test conversion of vmware config to cloud-init config."""
830+
831+ def _get_NicConfigurator(self, text):
832+ fp = None
833+ try:
834+ with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(),
835+ delete=False) as fp:
836+ fp.write(text)
837+ fp.close()
838+ cfg = Config(ConfigFile(fp.name))
839+ return NicConfigurator(cfg.nics, use_system_devices=False)
840+ finally:
841+ if fp:
842+ os.unlink(fp.name)
843+
844+ def test_non_primary_nic_without_gateway(self):
845+ """A non primary nic set is not required to have a gateway."""
846+ config = textwrap.dedent("""\
847+ [NETWORK]
848+ NETWORKING = yes
849+ BOOTPROTO = dhcp
850+ HOSTNAME = myhost1
851+ DOMAINNAME = eng.vmware.com
852+
853+ [NIC-CONFIG]
854+ NICS = NIC1
855+
856+ [NIC1]
857+ MACADDR = 00:50:56:a6:8c:08
858+ ONBOOT = yes
859+ IPv4_MODE = BACKWARDS_COMPATIBLE
860+ BOOTPROTO = static
861+ IPADDR = 10.20.87.154
862+ NETMASK = 255.255.252.0
863+ """)
864+ nc = self._get_NicConfigurator(config)
865+ self.assertEqual(
866+ [{'type': 'physical', 'name': 'NIC1',
867+ 'mac_address': '00:50:56:a6:8c:08',
868+ 'subnets': [
869+ {'control': 'auto', 'type': 'static',
870+ 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}],
871+ nc.generate())
872+
873+ def test_non_primary_nic_with_gateway(self):
874+ """A non primary nic set can have a gateway."""
875+ config = textwrap.dedent("""\
876+ [NETWORK]
877+ NETWORKING = yes
878+ BOOTPROTO = dhcp
879+ HOSTNAME = myhost1
880+ DOMAINNAME = eng.vmware.com
881+
882+ [NIC-CONFIG]
883+ NICS = NIC1
884+
885+ [NIC1]
886+ MACADDR = 00:50:56:a6:8c:08
887+ ONBOOT = yes
888+ IPv4_MODE = BACKWARDS_COMPATIBLE
889+ BOOTPROTO = static
890+ IPADDR = 10.20.87.154
891+ NETMASK = 255.255.252.0
892+ GATEWAY = 10.20.87.253
893+ """)
894+ nc = self._get_NicConfigurator(config)
895+ self.assertEqual(
896+ [{'type': 'physical', 'name': 'NIC1',
897+ 'mac_address': '00:50:56:a6:8c:08',
898+ 'subnets': [
899+ {'control': 'auto', 'type': 'static',
900+ 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]},
901+ {'type': 'route', 'destination': '10.20.84.0/22',
902+ 'gateway': '10.20.87.253', 'metric': 10000}],
903+ nc.generate())
904+
905+ def test_a_primary_nic_with_gateway(self):
906+ """A primary nic set can have a gateway."""
907+ config = textwrap.dedent("""\
908+ [NETWORK]
909+ NETWORKING = yes
910+ BOOTPROTO = dhcp
911+ HOSTNAME = myhost1
912+ DOMAINNAME = eng.vmware.com
913+
914+ [NIC-CONFIG]
915+ NICS = NIC1
916+
917+ [NIC1]
918+ MACADDR = 00:50:56:a6:8c:08
919+ ONBOOT = yes
920+ IPv4_MODE = BACKWARDS_COMPATIBLE
921+ BOOTPROTO = static
922+ IPADDR = 10.20.87.154
923+ NETMASK = 255.255.252.0
924+ PRIMARY = true
925+ GATEWAY = 10.20.87.253
926+ """)
927+ nc = self._get_NicConfigurator(config)
928+ self.assertEqual(
929+ [{'type': 'physical', 'name': 'NIC1',
930+ 'mac_address': '00:50:56:a6:8c:08',
931+ 'subnets': [
932+ {'control': 'auto', 'type': 'static',
933+ 'address': '10.20.87.154', 'netmask': '255.255.252.0',
934+ 'gateway': '10.20.87.253'}]}],
935+ nc.generate())
936+
937+
938 # vi: ts=4 expandtab
939diff --git a/tools/read-version b/tools/read-version
940index 3ea9e66..e69c2ce 100755
941--- a/tools/read-version
942+++ b/tools/read-version
943@@ -76,6 +76,12 @@ if is_gitdir(_tdir) and which("git"):
944 if not version.startswith(src_version):
945 sys.stderr.write("git describe version (%s) differs from "
946 "cloudinit.version (%s)\n" % (version, src_version))
947+ sys.stderr.write(
948+ "Please get the latest upstream tags.\n"
949+ "As an example, this can be done with the following:\n"
950+ "$ git remote add upstream https://git.launchpad.net/cloud-init\n"
951+ "$ git fetch upstream --tags\n"
952+ )
953 sys.exit(1)
954
955 version_long = tiny_p(cmd + ["--long"]).strip()

Subscribers

People subscribed via source and target branches