Merge ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel

Proposed by Chad Smith
Status: Merged
Merged at revision: 1c047bd2a9661be9a82e072723f15560eebcafdd
Proposed branch: ~chad.smith/cloud-init:ubuntu/devel
Merge into: cloud-init:ubuntu/devel
Diff against target: 955 lines (+706/-25)
13 files modified
cloudinit/cloud.py (+2/-2)
cloudinit/cmd/main.py (+5/-3)
cloudinit/config/tests/test_ssh.py (+147/-0)
cloudinit/distros/__init__.py (+1/-1)
cloudinit/reporting/__init__.py (+7/-1)
cloudinit/reporting/handlers.py (+246/-0)
cloudinit/sources/helpers/vmware/imc/config_nic.py (+1/-1)
cloudinit/stages.py (+1/-1)
cloudinit/tests/helpers.py (+27/-16)
debian/changelog (+14/-0)
tests/unittests/test_reporting_hyperv.py (+134/-0)
tests/unittests/test_vmware_config_file.py (+115/-0)
tools/read-version (+6/-0)
Reviewer Review Type Date Requested Status
Server Team CI bot continuous-integration Approve
cloud-init Commiters Pending
Review via email: mp+354153@code.launchpad.net

Commit message

New-upstream-snapshot with azure kvp reporting feature

To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

PASSED: Continuous integration, rev:00362e3be579a8157e0a915b250888c39eb408fc
https://jenkins.ubuntu.com/server/job/cloud-init-ci/284/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/284/rebuild

review: Approve (continuous-integration)
Revision history for this message
Server Team CI bot (server-team-bot) wrote :

PASSED: Continuous integration, rev:1c047bd2a9661be9a82e072723f15560eebcafdd
https://jenkins.ubuntu.com/server/job/cloud-init-ci/285/
Executed test runs:
    SUCCESS: Checkout
    SUCCESS: Unit & Style Tests
    SUCCESS: Ubuntu LTS: Build
    SUCCESS: Ubuntu LTS: Integration
    IN_PROGRESS: Declarative: Post Actions

Click here to trigger a rebuild:
https://jenkins.ubuntu.com/server/job/cloud-init-ci/285/rebuild

review: Approve (continuous-integration)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index 6d12c43..7ae98e1 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -47,7 +47,7 @@ class Cloud(object):
4747
48 @property48 @property
49 def cfg(self):49 def cfg(self):
50 # Ensure that not indirectly modified50 # Ensure that cfg is not indirectly modified
51 return copy.deepcopy(self._cfg)51 return copy.deepcopy(self._cfg)
5252
53 def run(self, name, functor, args, freq=None, clear_on_fail=False):53 def run(self, name, functor, args, freq=None, clear_on_fail=False):
@@ -61,7 +61,7 @@ class Cloud(object):
61 return None61 return None
62 return fn62 return fn
6363
64 # The rest of thes are just useful proxies64 # The rest of these are just useful proxies
65 def get_userdata(self, apply_filter=True):65 def get_userdata(self, apply_filter=True):
66 return self.datasource.get_userdata(apply_filter)66 return self.datasource.get_userdata(apply_filter)
6767
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index d6ba90f..4ea4fe7 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -315,7 +315,7 @@ def main_init(name, args):
315 existing = "trust"315 existing = "trust"
316316
317 init.purge_cache()317 init.purge_cache()
318 # Delete the non-net file as well318 # Delete the no-net file as well
319 util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))319 util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
320320
321 # Stage 5321 # Stage 5
@@ -339,7 +339,7 @@ def main_init(name, args):
339 " Likely bad things to come!"))339 " Likely bad things to come!"))
340 if not args.force:340 if not args.force:
341 init.apply_network_config(bring_up=not args.local)341 init.apply_network_config(bring_up=not args.local)
342 LOG.debug("[%s] Exiting without datasource in local mode", mode)342 LOG.debug("[%s] Exiting without datasource", mode)
343 if mode == sources.DSMODE_LOCAL:343 if mode == sources.DSMODE_LOCAL:
344 return (None, [])344 return (None, [])
345 else:345 else:
@@ -877,9 +877,11 @@ def main(sysv_args=None):
877 rname, rdesc, reporting_enabled=report_on)877 rname, rdesc, reporting_enabled=report_on)
878878
879 with args.reporter:879 with args.reporter:
880 return util.log_time(880 retval = util.log_time(
881 logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,881 logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
882 get_uptime=True, func=functor, args=(name, args))882 get_uptime=True, func=functor, args=(name, args))
883 reporting.flush_events()
884 return retval
883885
884886
885if __name__ == '__main__':887if __name__ == '__main__':
diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py
886new file mode 100644888new file mode 100644
index 0000000..7441d9e
--- /dev/null
+++ b/cloudinit/config/tests/test_ssh.py
@@ -0,0 +1,147 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3
4from cloudinit.config import cc_ssh
5from cloudinit.tests.helpers import CiTestCase, mock
6
7MODPATH = "cloudinit.config.cc_ssh."
8
9
10@mock.patch(MODPATH + "ssh_util.setup_user_keys")
11class TestHandleSsh(CiTestCase):
12 """Test cc_ssh handling of ssh config."""
13
14 def test_apply_credentials_with_user(self, m_setup_keys):
15 """Apply keys for the given user and root."""
16 keys = ["key1"]
17 user = "clouduser"
18 options = cc_ssh.DISABLE_ROOT_OPTS
19 cc_ssh.apply_credentials(keys, user, False, options)
20 self.assertEqual([mock.call(set(keys), user),
21 mock.call(set(keys), "root", options="")],
22 m_setup_keys.call_args_list)
23
24 def test_apply_credentials_with_no_user(self, m_setup_keys):
25 """Apply keys for root only."""
26 keys = ["key1"]
27 user = None
28 options = cc_ssh.DISABLE_ROOT_OPTS
29 cc_ssh.apply_credentials(keys, user, False, options)
30 self.assertEqual([mock.call(set(keys), "root", options="")],
31 m_setup_keys.call_args_list)
32
33 def test_apply_credentials_with_user_disable_root(self, m_setup_keys):
34 """Apply keys for the given user and disable root ssh."""
35 keys = ["key1"]
36 user = "clouduser"
37 options = cc_ssh.DISABLE_ROOT_OPTS
38 cc_ssh.apply_credentials(keys, user, True, options)
39 options = options.replace("$USER", user)
40 self.assertEqual([mock.call(set(keys), user),
41 mock.call(set(keys), "root", options=options)],
42 m_setup_keys.call_args_list)
43
44 def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys):
45 """Apply keys no user and disable root ssh."""
46 keys = ["key1"]
47 user = None
48 options = cc_ssh.DISABLE_ROOT_OPTS
49 cc_ssh.apply_credentials(keys, user, True, options)
50 options = options.replace("$USER", "NONE")
51 self.assertEqual([mock.call(set(keys), "root", options=options)],
52 m_setup_keys.call_args_list)
53
54 @mock.patch(MODPATH + "glob.glob")
55 @mock.patch(MODPATH + "ug_util.normalize_users_groups")
56 @mock.patch(MODPATH + "os.path.exists")
57 def test_handle_no_cfg(self, m_path_exists, m_nug,
58 m_glob, m_setup_keys):
59 """Test handle with no config ignores generating existing keyfiles."""
60 cfg = {}
61 keys = ["key1"]
62 m_glob.return_value = [] # Return no matching keys to prevent removal
63 # Mock os.path.exits to True to short-circuit the key writing logic
64 m_path_exists.return_value = True
65 m_nug.return_value = ([], {})
66 cloud = self.tmp_cloud(
67 distro='ubuntu', metadata={'public-keys': keys})
68 cc_ssh.handle("name", cfg, cloud, None, None)
69 options = cc_ssh.DISABLE_ROOT_OPTS.replace("$USER", "NONE")
70 m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*')
71 self.assertIn(
72 [mock.call('/etc/ssh/ssh_host_rsa_key'),
73 mock.call('/etc/ssh/ssh_host_dsa_key'),
74 mock.call('/etc/ssh/ssh_host_ecdsa_key'),
75 mock.call('/etc/ssh/ssh_host_ed25519_key')],
76 m_path_exists.call_args_list)
77 self.assertEqual([mock.call(set(keys), "root", options=options)],
78 m_setup_keys.call_args_list)
79
80 @mock.patch(MODPATH + "glob.glob")
81 @mock.patch(MODPATH + "ug_util.normalize_users_groups")
82 @mock.patch(MODPATH + "os.path.exists")
83 def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug,
84 m_glob, m_setup_keys):
85 """Test handle with no config and a default distro user."""
86 cfg = {}
87 keys = ["key1"]
88 user = "clouduser"
89 m_glob.return_value = [] # Return no matching keys to prevent removal
90 # Mock os.path.exits to True to short-circuit the key writing logic
91 m_path_exists.return_value = True
92 m_nug.return_value = ({user: {"default": user}}, {})
93 cloud = self.tmp_cloud(
94 distro='ubuntu', metadata={'public-keys': keys})
95 cc_ssh.handle("name", cfg, cloud, None, None)
96
97 options = cc_ssh.DISABLE_ROOT_OPTS.replace("$USER", user)
98 self.assertEqual([mock.call(set(keys), user),
99 mock.call(set(keys), "root", options=options)],
100 m_setup_keys.call_args_list)
101
102 @mock.patch(MODPATH + "glob.glob")
103 @mock.patch(MODPATH + "ug_util.normalize_users_groups")
104 @mock.patch(MODPATH + "os.path.exists")
105 def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug,
106 m_glob, m_setup_keys):
107 """Test handle with explicit disable_root and a default distro user."""
108 # This test is identical to test_handle_no_cfg_and_default_root,
109 # except this uses an explicit cfg value
110 cfg = {"disable_root": True}
111 keys = ["key1"]
112 user = "clouduser"
113 m_glob.return_value = [] # Return no matching keys to prevent removal
114 # Mock os.path.exits to True to short-circuit the key writing logic
115 m_path_exists.return_value = True
116 m_nug.return_value = ({user: {"default": user}}, {})
117 cloud = self.tmp_cloud(
118 distro='ubuntu', metadata={'public-keys': keys})
119 cc_ssh.handle("name", cfg, cloud, None, None)
120
121 options = cc_ssh.DISABLE_ROOT_OPTS.replace("$USER", user)
122 self.assertEqual([mock.call(set(keys), user),
123 mock.call(set(keys), "root", options=options)],
124 m_setup_keys.call_args_list)
125
126 @mock.patch(MODPATH + "glob.glob")
127 @mock.patch(MODPATH + "ug_util.normalize_users_groups")
128 @mock.patch(MODPATH + "os.path.exists")
129 def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug,
130 m_glob, m_setup_keys):
131 """Test handle with disable_root == False."""
132 # When disable_root == False, the ssh redirect for root is skipped
133 cfg = {"disable_root": False}
134 keys = ["key1"]
135 user = "clouduser"
136 m_glob.return_value = [] # Return no matching keys to prevent removal
137 # Mock os.path.exits to True to short-circuit the key writing logic
138 m_path_exists.return_value = True
139 m_nug.return_value = ({user: {"default": user}}, {})
140 cloud = self.tmp_cloud(
141 distro='ubuntu', metadata={'public-keys': keys})
142 cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
143 cc_ssh.handle("name", cfg, cloud, None, None)
144
145 self.assertEqual([mock.call(set(keys), user),
146 mock.call(set(keys), "root", options="")],
147 m_setup_keys.call_args_list)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index ab0b077..fde054e 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -157,7 +157,7 @@ class Distro(object):
157 distro)157 distro)
158 header = '\n'.join([158 header = '\n'.join([
159 "# Converted from network_config for distro %s" % distro,159 "# Converted from network_config for distro %s" % distro,
160 "# Implmentation of _write_network_config is needed."160 "# Implementation of _write_network_config is needed."
161 ])161 ])
162 ns = network_state.parse_net_config_data(netconfig)162 ns = network_state.parse_net_config_data(netconfig)
163 contents = eni.network_state_to_eni(163 contents = eni.network_state_to_eni(
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index 1ed2b48..ed5c703 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -18,7 +18,7 @@ DEFAULT_CONFIG = {
1818
1919
20def update_configuration(config):20def update_configuration(config):
21 """Update the instanciated_handler_registry.21 """Update the instantiated_handler_registry.
2222
23 :param config:23 :param config:
24 The dictionary containing changes to apply. If a key is given24 The dictionary containing changes to apply. If a key is given
@@ -37,6 +37,12 @@ def update_configuration(config):
37 instantiated_handler_registry.register_item(handler_name, instance)37 instantiated_handler_registry.register_item(handler_name, instance)
3838
3939
40def flush_events():
41 for _, handler in instantiated_handler_registry.registered_items.items():
42 if hasattr(handler, 'flush'):
43 handler.flush()
44
45
40instantiated_handler_registry = DictRegistry()46instantiated_handler_registry = DictRegistry()
41update_configuration(DEFAULT_CONFIG)47update_configuration(DEFAULT_CONFIG)
4248
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 4066076..6d23558 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -1,17 +1,32 @@
1# This file is part of cloud-init. See LICENSE file for license information.1# This file is part of cloud-init. See LICENSE file for license information.
22
3import abc3import abc
4import fcntl
4import json5import json
5import six6import six
7import os
8import re
9import struct
10import threading
11import time
612
7from cloudinit import log as logging13from cloudinit import log as logging
8from cloudinit.registry import DictRegistry14from cloudinit.registry import DictRegistry
9from cloudinit import (url_helper, util)15from cloudinit import (url_helper, util)
16from datetime import datetime
1017
18if six.PY2:
19 from multiprocessing.queues import JoinableQueue as JQueue
20else:
21 from queue import Queue as JQueue
1122
12LOG = logging.getLogger(__name__)23LOG = logging.getLogger(__name__)
1324
1425
26class ReportException(Exception):
27 pass
28
29
15@six.add_metaclass(abc.ABCMeta)30@six.add_metaclass(abc.ABCMeta)
16class ReportingHandler(object):31class ReportingHandler(object):
17 """Base class for report handlers.32 """Base class for report handlers.
@@ -24,6 +39,10 @@ class ReportingHandler(object):
24 def publish_event(self, event):39 def publish_event(self, event):
25 """Publish an event."""40 """Publish an event."""
2641
42 def flush(self):
43 """Ensure ReportingHandler has published all events"""
44 pass
45
2746
28class LogHandler(ReportingHandler):47class LogHandler(ReportingHandler):
29 """Publishes events to the cloud-init log at the ``DEBUG`` log level."""48 """Publishes events to the cloud-init log at the ``DEBUG`` log level."""
@@ -85,9 +104,236 @@ class WebHookHandler(ReportingHandler):
85 LOG.warning("failed posting event: %s", event.as_string())104 LOG.warning("failed posting event: %s", event.as_string())
86105
87106
107class HyperVKvpReportingHandler(ReportingHandler):
108 """
109 Reports events to a Hyper-V host using Key-Value-Pair exchange protocol
110 and can be used to obtain high level diagnostic information from the host.
111
112 To use this facility, the KVP user-space daemon (hv_kvp_daemon) has to be
113 running. It reads the kvp_file when the host requests the guest to
114 enumerate the KVP's.
115
116 This reporter collates all events for a module (origin|name) in a single
117 json string in the dictionary.
118
119 For more information, see
120 https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
121 """
122 HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
123 HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
124 HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
125 HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
126 EVENT_PREFIX = 'CLOUD_INIT'
127 MSG_KEY = 'msg'
128 RESULT_KEY = 'result'
129 DESC_IDX_KEY = 'msg_i'
130 JSON_SEPARATORS = (',', ':')
131 KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1'
132
133 def __init__(self,
134 kvp_file_path=KVP_POOL_FILE_GUEST,
135 event_types=None):
136 super(HyperVKvpReportingHandler, self).__init__()
137 self._kvp_file_path = kvp_file_path
138 self._event_types = event_types
139 self.q = JQueue()
140 self.kvp_file = None
141 self.incarnation_no = self._get_incarnation_no()
142 self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
143 self.incarnation_no)
144 self._current_offset = 0
145 self.publish_thread = threading.Thread(
146 target=self._publish_event_routine)
147 self.publish_thread.daemon = True
148 self.publish_thread.start()
149
150 def _get_incarnation_no(self):
151 """
152 use the time passed as the incarnation number.
153 the incarnation number is the number which are used to
154 distinguish the old data stored in kvp and the new data.
155 """
156 uptime_str = util.uptime()
157 try:
158 return int(time.time() - float(uptime_str))
159 except ValueError:
160 LOG.warning("uptime '%s' not in correct format.", uptime_str)
161 return 0
162
163 def _iterate_kvps(self, offset):
164 """iterate the kvp file from the current offset."""
165 try:
166 with open(self._kvp_file_path, 'rb+') as f:
167 self.kvp_file = f
168 fcntl.flock(f, fcntl.LOCK_EX)
169 f.seek(offset)
170 record_data = f.read(self.HV_KVP_RECORD_SIZE)
171 while len(record_data) == self.HV_KVP_RECORD_SIZE:
172 self._current_offset += self.HV_KVP_RECORD_SIZE
173 kvp_item = self._decode_kvp_item(record_data)
174 yield kvp_item
175 record_data = f.read(self.HV_KVP_RECORD_SIZE)
176 fcntl.flock(f, fcntl.LOCK_UN)
177 finally:
178 self.kvp_file = None
179
180 def _event_key(self, event):
181 """
182 the event key format is:
183 CLOUD_INIT|<incarnation number>|<event_type>|<event_name>
184 """
185 return u"{0}|{1}|{2}".format(self.event_key_prefix,
186 event.event_type, event.name)
187
188 def _encode_kvp_item(self, key, value):
189 data = (struct.pack("%ds%ds" % (
190 self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
191 self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
192 key.encode('utf-8'), value.encode('utf-8')))
193 return data
194
195 def _decode_kvp_item(self, record_data):
196 record_data_len = len(record_data)
197 if record_data_len != self.HV_KVP_RECORD_SIZE:
198 raise ReportException(
199 "record_data len not correct {0} {1}."
200 .format(record_data_len, self.HV_KVP_RECORD_SIZE))
201 k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8')
202 .strip('\x00'))
203 v = (
204 record_data[
205 self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
206 ].decode('utf-8').strip('\x00'))
207
208 return {'key': k, 'value': v}
209
210 def _update_kvp_item(self, record_data):
211 if self.kvp_file is None:
212 raise ReportException(
213 "kvp file '{0}' not opened."
214 .format(self._kvp_file_path))
215 self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1)
216 self.kvp_file.write(record_data)
217
218 def _append_kvp_item(self, record_data):
219 with open(self._kvp_file_path, 'rb+') as f:
220 fcntl.flock(f, fcntl.LOCK_EX)
221 # seek to end of the file
222 f.seek(0, 2)
223 f.write(record_data)
224 f.flush()
225 fcntl.flock(f, fcntl.LOCK_UN)
226 self._current_offset = f.tell()
227
228 def _break_down(self, key, meta_data, description):
229 del meta_data[self.MSG_KEY]
230 des_in_json = json.dumps(description)
231 des_in_json = des_in_json[1:(len(des_in_json) - 1)]
232 i = 0
233 result_array = []
234 message_place_holder = "\"" + self.MSG_KEY + "\":\"\""
235 while True:
236 meta_data[self.DESC_IDX_KEY] = i
237 meta_data[self.MSG_KEY] = ''
238 data_without_desc = json.dumps(meta_data,
239 separators=self.JSON_SEPARATORS)
240 room_for_desc = (
241 self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE -
242 len(data_without_desc) - 8)
243 value = data_without_desc.replace(
244 message_place_holder,
245 '"{key}":"{desc}"'.format(
246 key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
247 result_array.append(self._encode_kvp_item(key, value))
248 i += 1
249 des_in_json = des_in_json[room_for_desc:]
250 if len(des_in_json) == 0:
251 break
252 return result_array
253
254 def _encode_event(self, event):
255 """
256 encode the event into kvp data bytes.
257 if the event content reaches the maximum length of kvp value.
258 then it would be cut to multiple slices.
259 """
260 key = self._event_key(event)
261 meta_data = {
262 "name": event.name,
263 "type": event.event_type,
264 "ts": (datetime.utcfromtimestamp(event.timestamp)
265 .isoformat() + 'Z'),
266 }
267 if hasattr(event, self.RESULT_KEY):
268 meta_data[self.RESULT_KEY] = event.result
269 meta_data[self.MSG_KEY] = event.description
270 value = json.dumps(meta_data, separators=self.JSON_SEPARATORS)
271 # if it reaches the maximum length of kvp value,
272 # break it down to slices.
273 # this should be very corner case.
274 if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE:
275 return self._break_down(key, meta_data, event.description)
276 else:
277 data = self._encode_kvp_item(key, value)
278 return [data]
279
280 def _publish_event_routine(self):
281 while True:
282 try:
283 event = self.q.get(block=True)
284 need_append = True
285 try:
286 if not os.path.exists(self._kvp_file_path):
287 LOG.warning(
288 "skip writing events %s to %s. file not present.",
289 event.as_string(),
290 self._kvp_file_path)
291 encoded_event = self._encode_event(event)
292 # for each encoded_event
293 for encoded_data in (encoded_event):
294 for kvp in self._iterate_kvps(self._current_offset):
295 match = (
296 re.match(
297 r"^{0}\|(\d+)\|.+"
298 .format(self.EVENT_PREFIX),
299 kvp['key']
300 ))
301 if match:
302 match_groups = match.groups(0)
303 if int(match_groups[0]) < self.incarnation_no:
304 need_append = False
305 self._update_kvp_item(encoded_data)
306 continue
307 if need_append:
308 self._append_kvp_item(encoded_data)
309 except IOError as e:
310 LOG.warning(
311 "failed posting event to kvp: %s e:%s",
312 event.as_string(), e)
313 finally:
314 self.q.task_done()
315
316 # when main process exits, q.get() will through EOFError
317 # indicating we should exit this thread.
318 except EOFError:
319 return
320
321 # since the saving to the kvp pool can be a time costing task
322 # if the kvp pool already contains a chunk of data,
323 # so defer it to another thread.
324 def publish_event(self, event):
325 if (not self._event_types or event.event_type in self._event_types):
326 self.q.put(event)
327
328 def flush(self):
329 LOG.debug('HyperVReportingHandler flushing remaining events')
330 self.q.join()
331
332
88available_handlers = DictRegistry()333available_handlers = DictRegistry()
89available_handlers.register_item('log', LogHandler)334available_handlers.register_item('log', LogHandler)
90available_handlers.register_item('print', PrintHandler)335available_handlers.register_item('print', PrintHandler)
91available_handlers.register_item('webhook', WebHookHandler)336available_handlers.register_item('webhook', WebHookHandler)
337available_handlers.register_item('hyperv', HyperVKvpReportingHandler)
92338
93# vi: ts=4 expandtab339# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 3ef8c62..e1890e2 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -164,7 +164,7 @@ class NicConfigurator(object):
164 return ([subnet], route_list)164 return ([subnet], route_list)
165165
166 # Add routes if there is no primary nic166 # Add routes if there is no primary nic
167 if not self._primaryNic:167 if not self._primaryNic and v4.gateways:
168 route_list.extend(self.gen_ipv4_route(nic,168 route_list.extend(self.gen_ipv4_route(nic,
169 v4.gateways,169 v4.gateways,
170 v4.netmask))170 v4.netmask))
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index c132b57..8874d40 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -510,7 +510,7 @@ class Init(object):
510 # The default frequency if handlers don't have one510 # The default frequency if handlers don't have one
511 'frequency': frequency,511 'frequency': frequency,
512 # This will be used when new handlers are found512 # This will be used when new handlers are found
513 # to help write there contents to files with numbered513 # to help write their contents to files with numbered
514 # names...514 # names...
515 'handlercount': 0,515 'handlercount': 0,
516 'excluded': excluded,516 'excluded': excluded,
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 5bfe7fa..de24e25 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -10,7 +10,6 @@ import shutil
10import sys10import sys
11import tempfile11import tempfile
12import time12import time
13import unittest
1413
15import mock14import mock
16import six15import six
@@ -28,11 +27,15 @@ except ImportError:
2827
29from cloudinit.config.schema import (28from cloudinit.config.schema import (
30 SchemaValidationError, validate_cloudconfig_schema)29 SchemaValidationError, validate_cloudconfig_schema)
30from cloudinit import cloud
31from cloudinit import distros
31from cloudinit import helpers as ch32from cloudinit import helpers as ch
33from cloudinit.sources import DataSourceNone
32from cloudinit import util34from cloudinit import util
3335
34# Used for skipping tests36# Used for skipping tests
35SkipTest = unittest2.SkipTest37SkipTest = unittest2.SkipTest
38skipIf = unittest2.skipIf
3639
37# Used for detecting different python versions40# Used for detecting different python versions
38PY2 = False41PY2 = False
@@ -187,6 +190,29 @@ class CiTestCase(TestCase):
187 """190 """
188 raise SystemExit(code)191 raise SystemExit(code)
189192
193 def tmp_cloud(self, distro, sys_cfg=None, metadata=None):
194 """Create a cloud with tmp working directory paths.
195
196 @param distro: Name of the distro to attach to the cloud.
197 @param metadata: Optional metadata to set on the datasource.
198
199 @return: The built cloud instance.
200 """
201 self.new_root = self.tmp_dir()
202 if not sys_cfg:
203 sys_cfg = {}
204 tmp_paths = {}
205 for var in ['templates_dir', 'run_dir', 'cloud_dir']:
206 tmp_paths[var] = self.tmp_path(var, dir=self.new_root)
207 util.ensure_dir(tmp_paths[var])
208 self.paths = ch.Paths(tmp_paths)
209 cls = distros.fetch(distro)
210 mydist = cls(distro, sys_cfg, self.paths)
211 myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, self.paths)
212 if metadata:
213 myds.metadata.update(metadata)
214 return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None)
215
190216
191class ResourceUsingTestCase(CiTestCase):217class ResourceUsingTestCase(CiTestCase):
192218
@@ -426,21 +452,6 @@ def readResource(name, mode='r'):
426452
427453
428try:454try:
429 skipIf = unittest.skipIf
430except AttributeError:
431 # Python 2.6. Doesn't have to be high fidelity.
432 def skipIf(condition, reason):
433 def decorator(func):
434 def wrapper(*args, **kws):
435 if condition:
436 return func(*args, **kws)
437 else:
438 print(reason, file=sys.stderr)
439 return wrapper
440 return decorator
441
442
443try:
444 import jsonschema455 import jsonschema
445 assert jsonschema # avoid pyflakes error F401: import unused456 assert jsonschema # avoid pyflakes error F401: import unused
446 _missing_jsonschema_dep = False457 _missing_jsonschema_dep = False
diff --git a/debian/changelog b/debian/changelog
index ea38c3b..f303d70 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,17 @@
1cloud-init (18.3-35-g3f6d0972-0ubuntu1) cosmic; urgency=medium
2
3 * New upstream snapshot.
4 - Add unit tests for config/cc_ssh.py [Francis Ginther]
5 - Fix the built-in cloudinit/tests/helpers:skipIf
6 - read-version: enhance error message [Joshua Powers]
7 - hyperv_reporting_handler: simplify threaded publisher
8 - VMWare: Fix a network config bug in vm with static IPv4 and no gateway.
9 [Pengpeng Sun]
10 - logging: Add logging config type hyperv for reporting via Azure KVP
11 [Andy Liu]
12
13 -- Chad Smith <chad.smith@canonical.com> Sat, 01 Sep 2018 12:08:52 -0600
14
1cloud-init (18.3-29-gdab59087-0ubuntu1) cosmic; urgency=medium15cloud-init (18.3-29-gdab59087-0ubuntu1) cosmic; urgency=medium
216
3 * New upstream snapshot.17 * New upstream snapshot.
diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py
4new file mode 10064418new file mode 100644
index 0000000..2e64c6c
--- /dev/null
+++ b/tests/unittests/test_reporting_hyperv.py
@@ -0,0 +1,134 @@
1# This file is part of cloud-init. See LICENSE file for license information.
2
3from cloudinit.reporting import events
4from cloudinit.reporting import handlers
5
6import json
7import os
8
9from cloudinit import util
10from cloudinit.tests.helpers import CiTestCase
11
12
13class TestKvpEncoding(CiTestCase):
14 def test_encode_decode(self):
15 kvp = {'key': 'key1', 'value': 'value1'}
16 kvp_reporting = handlers.HyperVKvpReportingHandler()
17 data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value'])
18 self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE)
19 decoded_kvp = kvp_reporting._decode_kvp_item(data)
20 self.assertEqual(kvp, decoded_kvp)
21
22
23class TextKvpReporter(CiTestCase):
24 def setUp(self):
25 super(TextKvpReporter, self).setUp()
26 self.tmp_file_path = self.tmp_path('kvp_pool_file')
27 util.ensure_file(self.tmp_file_path)
28
29 def test_event_type_can_be_filtered(self):
30 reporter = handlers.HyperVKvpReportingHandler(
31 kvp_file_path=self.tmp_file_path,
32 event_types=['foo', 'bar'])
33
34 reporter.publish_event(
35 events.ReportingEvent('foo', 'name', 'description'))
36 reporter.publish_event(
37 events.ReportingEvent('some_other', 'name', 'description3'))
38 reporter.q.join()
39
40 kvps = list(reporter._iterate_kvps(0))
41 self.assertEqual(1, len(kvps))
42
43 reporter.publish_event(
44 events.ReportingEvent('bar', 'name', 'description2'))
45 reporter.q.join()
46 kvps = list(reporter._iterate_kvps(0))
47 self.assertEqual(2, len(kvps))
48
49 self.assertIn('foo', kvps[0]['key'])
50 self.assertIn('bar', kvps[1]['key'])
51 self.assertNotIn('some_other', kvps[0]['key'])
52 self.assertNotIn('some_other', kvps[1]['key'])
53
54 def test_events_are_over_written(self):
55 reporter = handlers.HyperVKvpReportingHandler(
56 kvp_file_path=self.tmp_file_path)
57
58 self.assertEqual(0, len(list(reporter._iterate_kvps(0))))
59
60 reporter.publish_event(
61 events.ReportingEvent('foo', 'name1', 'description'))
62 reporter.publish_event(
63 events.ReportingEvent('foo', 'name2', 'description'))
64 reporter.q.join()
65 self.assertEqual(2, len(list(reporter._iterate_kvps(0))))
66
67 reporter2 = handlers.HyperVKvpReportingHandler(
68 kvp_file_path=self.tmp_file_path)
69 reporter2.incarnation_no = reporter.incarnation_no + 1
70 reporter2.publish_event(
71 events.ReportingEvent('foo', 'name3', 'description'))
72 reporter2.q.join()
73
74 self.assertEqual(2, len(list(reporter2._iterate_kvps(0))))
75
76 def test_events_with_higher_incarnation_not_over_written(self):
77 reporter = handlers.HyperVKvpReportingHandler(
78 kvp_file_path=self.tmp_file_path)
79
80 self.assertEqual(0, len(list(reporter._iterate_kvps(0))))
81
82 reporter.publish_event(
83 events.ReportingEvent('foo', 'name1', 'description'))
84 reporter.publish_event(
85 events.ReportingEvent('foo', 'name2', 'description'))
86 reporter.q.join()
87 self.assertEqual(2, len(list(reporter._iterate_kvps(0))))
88
89 reporter3 = handlers.HyperVKvpReportingHandler(
90 kvp_file_path=self.tmp_file_path)
91 reporter3.incarnation_no = reporter.incarnation_no - 1
92 reporter3.publish_event(
93 events.ReportingEvent('foo', 'name3', 'description'))
94 reporter3.q.join()
95 self.assertEqual(3, len(list(reporter3._iterate_kvps(0))))
96
97 def test_finish_event_result_is_logged(self):
98 reporter = handlers.HyperVKvpReportingHandler(
99 kvp_file_path=self.tmp_file_path)
100 reporter.publish_event(
101 events.FinishReportingEvent('name2', 'description1',
102 result=events.status.FAIL))
103 reporter.q.join()
104 self.assertIn('FAIL', list(reporter._iterate_kvps(0))[0]['value'])
105
106 def test_file_operation_issue(self):
107 os.remove(self.tmp_file_path)
108 reporter = handlers.HyperVKvpReportingHandler(
109 kvp_file_path=self.tmp_file_path)
110 reporter.publish_event(
111 events.FinishReportingEvent('name2', 'description1',
112 result=events.status.FAIL))
113 reporter.q.join()
114
115 def test_event_very_long(self):
116 reporter = handlers.HyperVKvpReportingHandler(
117 kvp_file_path=self.tmp_file_path)
118 description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE
119 long_event = events.FinishReportingEvent(
120 'event_name',
121 description,
122 result=events.status.FAIL)
123 reporter.publish_event(long_event)
124 reporter.q.join()
125 kvps = list(reporter._iterate_kvps(0))
126 self.assertEqual(3, len(kvps))
127
128 # restore from the kvp to see the content are all there
129 full_description = ''
130 for i in range(len(kvps)):
131 msg_slice = json.loads(kvps[i]['value'])
132 self.assertEqual(msg_slice['msg_i'], i)
133 full_description += msg_slice['msg']
134 self.assertEqual(description, full_description)
diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py
index 036f687..602dedb 100644
--- a/tests/unittests/test_vmware_config_file.py
+++ b/tests/unittests/test_vmware_config_file.py
@@ -2,11 +2,15 @@
2# Copyright (C) 2016 VMware INC.2# Copyright (C) 2016 VMware INC.
3#3#
4# Author: Sankar Tanguturi <stanguturi@vmware.com>4# Author: Sankar Tanguturi <stanguturi@vmware.com>
5# Pengpeng Sun <pengpengs@vmware.com>
5#6#
6# This file is part of cloud-init. See LICENSE file for license information.7# This file is part of cloud-init. See LICENSE file for license information.
78
8import logging9import logging
10import os
9import sys11import sys
12import tempfile
13import textwrap
1014
11from cloudinit.sources.DataSourceOVF import get_network_config_from_conf15from cloudinit.sources.DataSourceOVF import get_network_config_from_conf
12from cloudinit.sources.DataSourceOVF import read_vmware_imc16from cloudinit.sources.DataSourceOVF import read_vmware_imc
@@ -343,4 +347,115 @@ class TestVmwareConfigFile(CiTestCase):
343 conf = Config(cf)347 conf = Config(cf)
344 self.assertEqual("test-script", conf.custom_script_name)348 self.assertEqual("test-script", conf.custom_script_name)
345349
350
351class TestVmwareNetConfig(CiTestCase):
352 """Test conversion of vmware config to cloud-init config."""
353
354 def _get_NicConfigurator(self, text):
355 fp = None
356 try:
357 with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(),
358 delete=False) as fp:
359 fp.write(text)
360 fp.close()
361 cfg = Config(ConfigFile(fp.name))
362 return NicConfigurator(cfg.nics, use_system_devices=False)
363 finally:
364 if fp:
365 os.unlink(fp.name)
366
367 def test_non_primary_nic_without_gateway(self):
368 """A non primary nic set is not required to have a gateway."""
369 config = textwrap.dedent("""\
370 [NETWORK]
371 NETWORKING = yes
372 BOOTPROTO = dhcp
373 HOSTNAME = myhost1
374 DOMAINNAME = eng.vmware.com
375
376 [NIC-CONFIG]
377 NICS = NIC1
378
379 [NIC1]
380 MACADDR = 00:50:56:a6:8c:08
381 ONBOOT = yes
382 IPv4_MODE = BACKWARDS_COMPATIBLE
383 BOOTPROTO = static
384 IPADDR = 10.20.87.154
385 NETMASK = 255.255.252.0
386 """)
387 nc = self._get_NicConfigurator(config)
388 self.assertEqual(
389 [{'type': 'physical', 'name': 'NIC1',
390 'mac_address': '00:50:56:a6:8c:08',
391 'subnets': [
392 {'control': 'auto', 'type': 'static',
393 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}],
394 nc.generate())
395
396 def test_non_primary_nic_with_gateway(self):
397 """A non primary nic set can have a gateway."""
398 config = textwrap.dedent("""\
399 [NETWORK]
400 NETWORKING = yes
401 BOOTPROTO = dhcp
402 HOSTNAME = myhost1
403 DOMAINNAME = eng.vmware.com
404
405 [NIC-CONFIG]
406 NICS = NIC1
407
408 [NIC1]
409 MACADDR = 00:50:56:a6:8c:08
410 ONBOOT = yes
411 IPv4_MODE = BACKWARDS_COMPATIBLE
412 BOOTPROTO = static
413 IPADDR = 10.20.87.154
414 NETMASK = 255.255.252.0
415 GATEWAY = 10.20.87.253
416 """)
417 nc = self._get_NicConfigurator(config)
418 self.assertEqual(
419 [{'type': 'physical', 'name': 'NIC1',
420 'mac_address': '00:50:56:a6:8c:08',
421 'subnets': [
422 {'control': 'auto', 'type': 'static',
423 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]},
424 {'type': 'route', 'destination': '10.20.84.0/22',
425 'gateway': '10.20.87.253', 'metric': 10000}],
426 nc.generate())
427
428 def test_a_primary_nic_with_gateway(self):
429 """A primary nic set can have a gateway."""
430 config = textwrap.dedent("""\
431 [NETWORK]
432 NETWORKING = yes
433 BOOTPROTO = dhcp
434 HOSTNAME = myhost1
435 DOMAINNAME = eng.vmware.com
436
437 [NIC-CONFIG]
438 NICS = NIC1
439
440 [NIC1]
441 MACADDR = 00:50:56:a6:8c:08
442 ONBOOT = yes
443 IPv4_MODE = BACKWARDS_COMPATIBLE
444 BOOTPROTO = static
445 IPADDR = 10.20.87.154
446 NETMASK = 255.255.252.0
447 PRIMARY = true
448 GATEWAY = 10.20.87.253
449 """)
450 nc = self._get_NicConfigurator(config)
451 self.assertEqual(
452 [{'type': 'physical', 'name': 'NIC1',
453 'mac_address': '00:50:56:a6:8c:08',
454 'subnets': [
455 {'control': 'auto', 'type': 'static',
456 'address': '10.20.87.154', 'netmask': '255.255.252.0',
457 'gateway': '10.20.87.253'}]}],
458 nc.generate())
459
460
346# vi: ts=4 expandtab461# vi: ts=4 expandtab
diff --git a/tools/read-version b/tools/read-version
index 3ea9e66..e69c2ce 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -76,6 +76,12 @@ if is_gitdir(_tdir) and which("git"):
76 if not version.startswith(src_version):76 if not version.startswith(src_version):
77 sys.stderr.write("git describe version (%s) differs from "77 sys.stderr.write("git describe version (%s) differs from "
78 "cloudinit.version (%s)\n" % (version, src_version))78 "cloudinit.version (%s)\n" % (version, src_version))
79 sys.stderr.write(
80 "Please get the latest upstream tags.\n"
81 "As an example, this can be done with the following:\n"
82 "$ git remote add upstream https://git.launchpad.net/cloud-init\n"
83 "$ git fetch upstream --tags\n"
84 )
79 sys.exit(1)85 sys.exit(1)
8086
81 version_long = tiny_p(cmd + ["--long"]).strip()87 version_long = tiny_p(cmd + ["--long"]).strip()

Subscribers

People subscribed via source and target branches