Merge ~chad.smith/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel
- Git
- lp:~chad.smith/cloud-init
- ubuntu/devel
- Merge into ubuntu/devel
Proposed by
Chad Smith
Status: | Merged |
---|---|
Merged at revision: | 1c047bd2a9661be9a82e072723f15560eebcafdd |
Proposed branch: | ~chad.smith/cloud-init:ubuntu/devel |
Merge into: | cloud-init:ubuntu/devel |
Diff against target: |
955 lines (+706/-25) 13 files modified
cloudinit/cloud.py (+2/-2) cloudinit/cmd/main.py (+5/-3) cloudinit/config/tests/test_ssh.py (+147/-0) cloudinit/distros/__init__.py (+1/-1) cloudinit/reporting/__init__.py (+7/-1) cloudinit/reporting/handlers.py (+246/-0) cloudinit/sources/helpers/vmware/imc/config_nic.py (+1/-1) cloudinit/stages.py (+1/-1) cloudinit/tests/helpers.py (+27/-16) debian/changelog (+14/-0) tests/unittests/test_reporting_hyperv.py (+134/-0) tests/unittests/test_vmware_config_file.py (+115/-0) tools/read-version (+6/-0) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
cloud-init Commiters | Pending | ||
Review via email:
|
Commit message
New-upstream-
Description of the change
To post a comment you must log in.
Revision history for this message

Server Team CI bot (server-team-bot) wrote : | # |
review:
Approve
(continuous-integration)
Revision history for this message

Server Team CI bot (server-team-bot) wrote : | # |
PASSED: Continuous integration, rev:1c047bd2a96
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild:
https:/
review:
Approve
(continuous-integration)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py | |||
2 | index 6d12c43..7ae98e1 100644 | |||
3 | --- a/cloudinit/cloud.py | |||
4 | +++ b/cloudinit/cloud.py | |||
5 | @@ -47,7 +47,7 @@ class Cloud(object): | |||
6 | 47 | 47 | ||
7 | 48 | @property | 48 | @property |
8 | 49 | def cfg(self): | 49 | def cfg(self): |
10 | 50 | # Ensure that not indirectly modified | 50 | # Ensure that cfg is not indirectly modified |
11 | 51 | return copy.deepcopy(self._cfg) | 51 | return copy.deepcopy(self._cfg) |
12 | 52 | 52 | ||
13 | 53 | def run(self, name, functor, args, freq=None, clear_on_fail=False): | 53 | def run(self, name, functor, args, freq=None, clear_on_fail=False): |
14 | @@ -61,7 +61,7 @@ class Cloud(object): | |||
15 | 61 | return None | 61 | return None |
16 | 62 | return fn | 62 | return fn |
17 | 63 | 63 | ||
19 | 64 | # The rest of thes are just useful proxies | 64 | # The rest of these are just useful proxies |
20 | 65 | def get_userdata(self, apply_filter=True): | 65 | def get_userdata(self, apply_filter=True): |
21 | 66 | return self.datasource.get_userdata(apply_filter) | 66 | return self.datasource.get_userdata(apply_filter) |
22 | 67 | 67 | ||
23 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py | |||
24 | index d6ba90f..4ea4fe7 100644 | |||
25 | --- a/cloudinit/cmd/main.py | |||
26 | +++ b/cloudinit/cmd/main.py | |||
27 | @@ -315,7 +315,7 @@ def main_init(name, args): | |||
28 | 315 | existing = "trust" | 315 | existing = "trust" |
29 | 316 | 316 | ||
30 | 317 | init.purge_cache() | 317 | init.purge_cache() |
32 | 318 | # Delete the non-net file as well | 318 | # Delete the no-net file as well |
33 | 319 | util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net")) | 319 | util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net")) |
34 | 320 | 320 | ||
35 | 321 | # Stage 5 | 321 | # Stage 5 |
36 | @@ -339,7 +339,7 @@ def main_init(name, args): | |||
37 | 339 | " Likely bad things to come!")) | 339 | " Likely bad things to come!")) |
38 | 340 | if not args.force: | 340 | if not args.force: |
39 | 341 | init.apply_network_config(bring_up=not args.local) | 341 | init.apply_network_config(bring_up=not args.local) |
41 | 342 | LOG.debug("[%s] Exiting without datasource in local mode", mode) | 342 | LOG.debug("[%s] Exiting without datasource", mode) |
42 | 343 | if mode == sources.DSMODE_LOCAL: | 343 | if mode == sources.DSMODE_LOCAL: |
43 | 344 | return (None, []) | 344 | return (None, []) |
44 | 345 | else: | 345 | else: |
45 | @@ -877,9 +877,11 @@ def main(sysv_args=None): | |||
46 | 877 | rname, rdesc, reporting_enabled=report_on) | 877 | rname, rdesc, reporting_enabled=report_on) |
47 | 878 | 878 | ||
48 | 879 | with args.reporter: | 879 | with args.reporter: |
50 | 880 | return util.log_time( | 880 | retval = util.log_time( |
51 | 881 | logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, | 881 | logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, |
52 | 882 | get_uptime=True, func=functor, args=(name, args)) | 882 | get_uptime=True, func=functor, args=(name, args)) |
53 | 883 | reporting.flush_events() | ||
54 | 884 | return retval | ||
55 | 883 | 885 | ||
56 | 884 | 886 | ||
57 | 885 | if __name__ == '__main__': | 887 | if __name__ == '__main__': |
58 | diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py | |||
59 | 886 | new file mode 100644 | 888 | new file mode 100644 |
60 | index 0000000..7441d9e | |||
61 | --- /dev/null | |||
62 | +++ b/cloudinit/config/tests/test_ssh.py | |||
63 | @@ -0,0 +1,147 @@ | |||
64 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
65 | 2 | |||
66 | 3 | |||
67 | 4 | from cloudinit.config import cc_ssh | ||
68 | 5 | from cloudinit.tests.helpers import CiTestCase, mock | ||
69 | 6 | |||
70 | 7 | MODPATH = "cloudinit.config.cc_ssh." | ||
71 | 8 | |||
72 | 9 | |||
73 | 10 | @mock.patch(MODPATH + "ssh_util.setup_user_keys") | ||
74 | 11 | class TestHandleSsh(CiTestCase): | ||
75 | 12 | """Test cc_ssh handling of ssh config.""" | ||
76 | 13 | |||
77 | 14 | def test_apply_credentials_with_user(self, m_setup_keys): | ||
78 | 15 | """Apply keys for the given user and root.""" | ||
79 | 16 | keys = ["key1"] | ||
80 | 17 | user = "clouduser" | ||
81 | 18 | options = cc_ssh.DISABLE_ROOT_OPTS | ||
82 | 19 | cc_ssh.apply_credentials(keys, user, False, options) | ||
83 | 20 | self.assertEqual([mock.call(set(keys), user), | ||
84 | 21 | mock.call(set(keys), "root", options="")], | ||
85 | 22 | m_setup_keys.call_args_list) | ||
86 | 23 | |||
87 | 24 | def test_apply_credentials_with_no_user(self, m_setup_keys): | ||
88 | 25 | """Apply keys for root only.""" | ||
89 | 26 | keys = ["key1"] | ||
90 | 27 | user = None | ||
91 | 28 | options = cc_ssh.DISABLE_ROOT_OPTS | ||
92 | 29 | cc_ssh.apply_credentials(keys, user, False, options) | ||
93 | 30 | self.assertEqual([mock.call(set(keys), "root", options="")], | ||
94 | 31 | m_setup_keys.call_args_list) | ||
95 | 32 | |||
96 | 33 | def test_apply_credentials_with_user_disable_root(self, m_setup_keys): | ||
97 | 34 | """Apply keys for the given user and disable root ssh.""" | ||
98 | 35 | keys = ["key1"] | ||
99 | 36 | user = "clouduser" | ||
100 | 37 | options = cc_ssh.DISABLE_ROOT_OPTS | ||
101 | 38 | cc_ssh.apply_credentials(keys, user, True, options) | ||
102 | 39 | options = options.replace("$USER", user) | ||
103 | 40 | self.assertEqual([mock.call(set(keys), user), | ||
104 | 41 | mock.call(set(keys), "root", options=options)], | ||
105 | 42 | m_setup_keys.call_args_list) | ||
106 | 43 | |||
107 | 44 | def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys): | ||
108 | 45 | """Apply keys no user and disable root ssh.""" | ||
109 | 46 | keys = ["key1"] | ||
110 | 47 | user = None | ||
111 | 48 | options = cc_ssh.DISABLE_ROOT_OPTS | ||
112 | 49 | cc_ssh.apply_credentials(keys, user, True, options) | ||
113 | 50 | options = options.replace("$USER", "NONE") | ||
114 | 51 | self.assertEqual([mock.call(set(keys), "root", options=options)], | ||
115 | 52 | m_setup_keys.call_args_list) | ||
116 | 53 | |||
117 | 54 | @mock.patch(MODPATH + "glob.glob") | ||
118 | 55 | @mock.patch(MODPATH + "ug_util.normalize_users_groups") | ||
119 | 56 | @mock.patch(MODPATH + "os.path.exists") | ||
120 | 57 | def test_handle_no_cfg(self, m_path_exists, m_nug, | ||
121 | 58 | m_glob, m_setup_keys): | ||
122 | 59 | """Test handle with no config ignores generating existing keyfiles.""" | ||
123 | 60 | cfg = {} | ||
124 | 61 | keys = ["key1"] | ||
125 | 62 | m_glob.return_value = [] # Return no matching keys to prevent removal | ||
126 | 63 | # Mock os.path.exits to True to short-circuit the key writing logic | ||
127 | 64 | m_path_exists.return_value = True | ||
128 | 65 | m_nug.return_value = ([], {}) | ||
129 | 66 | cloud = self.tmp_cloud( | ||
130 | 67 | distro='ubuntu', metadata={'public-keys': keys}) | ||
131 | 68 | cc_ssh.handle("name", cfg, cloud, None, None) | ||
132 | 69 | options = cc_ssh.DISABLE_ROOT_OPTS.replace("$USER", "NONE") | ||
133 | 70 | m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*') | ||
134 | 71 | self.assertIn( | ||
135 | 72 | [mock.call('/etc/ssh/ssh_host_rsa_key'), | ||
136 | 73 | mock.call('/etc/ssh/ssh_host_dsa_key'), | ||
137 | 74 | mock.call('/etc/ssh/ssh_host_ecdsa_key'), | ||
138 | 75 | mock.call('/etc/ssh/ssh_host_ed25519_key')], | ||
139 | 76 | m_path_exists.call_args_list) | ||
140 | 77 | self.assertEqual([mock.call(set(keys), "root", options=options)], | ||
141 | 78 | m_setup_keys.call_args_list) | ||
142 | 79 | |||
143 | 80 | @mock.patch(MODPATH + "glob.glob") | ||
144 | 81 | @mock.patch(MODPATH + "ug_util.normalize_users_groups") | ||
145 | 82 | @mock.patch(MODPATH + "os.path.exists") | ||
146 | 83 | def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug, | ||
147 | 84 | m_glob, m_setup_keys): | ||
148 | 85 | """Test handle with no config and a default distro user.""" | ||
149 | 86 | cfg = {} | ||
150 | 87 | keys = ["key1"] | ||
151 | 88 | user = "clouduser" | ||
152 | 89 | m_glob.return_value = [] # Return no matching keys to prevent removal | ||
153 | 90 | # Mock os.path.exits to True to short-circuit the key writing logic | ||
154 | 91 | m_path_exists.return_value = True | ||
155 | 92 | m_nug.return_value = ({user: {"default": user}}, {}) | ||
156 | 93 | cloud = self.tmp_cloud( | ||
157 | 94 | distro='ubuntu', metadata={'public-keys': keys}) | ||
158 | 95 | cc_ssh.handle("name", cfg, cloud, None, None) | ||
159 | 96 | |||
160 | 97 | options = cc_ssh.DISABLE_ROOT_OPTS.replace("$USER", user) | ||
161 | 98 | self.assertEqual([mock.call(set(keys), user), | ||
162 | 99 | mock.call(set(keys), "root", options=options)], | ||
163 | 100 | m_setup_keys.call_args_list) | ||
164 | 101 | |||
165 | 102 | @mock.patch(MODPATH + "glob.glob") | ||
166 | 103 | @mock.patch(MODPATH + "ug_util.normalize_users_groups") | ||
167 | 104 | @mock.patch(MODPATH + "os.path.exists") | ||
168 | 105 | def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug, | ||
169 | 106 | m_glob, m_setup_keys): | ||
170 | 107 | """Test handle with explicit disable_root and a default distro user.""" | ||
171 | 108 | # This test is identical to test_handle_no_cfg_and_default_root, | ||
172 | 109 | # except this uses an explicit cfg value | ||
173 | 110 | cfg = {"disable_root": True} | ||
174 | 111 | keys = ["key1"] | ||
175 | 112 | user = "clouduser" | ||
176 | 113 | m_glob.return_value = [] # Return no matching keys to prevent removal | ||
177 | 114 | # Mock os.path.exits to True to short-circuit the key writing logic | ||
178 | 115 | m_path_exists.return_value = True | ||
179 | 116 | m_nug.return_value = ({user: {"default": user}}, {}) | ||
180 | 117 | cloud = self.tmp_cloud( | ||
181 | 118 | distro='ubuntu', metadata={'public-keys': keys}) | ||
182 | 119 | cc_ssh.handle("name", cfg, cloud, None, None) | ||
183 | 120 | |||
184 | 121 | options = cc_ssh.DISABLE_ROOT_OPTS.replace("$USER", user) | ||
185 | 122 | self.assertEqual([mock.call(set(keys), user), | ||
186 | 123 | mock.call(set(keys), "root", options=options)], | ||
187 | 124 | m_setup_keys.call_args_list) | ||
188 | 125 | |||
189 | 126 | @mock.patch(MODPATH + "glob.glob") | ||
190 | 127 | @mock.patch(MODPATH + "ug_util.normalize_users_groups") | ||
191 | 128 | @mock.patch(MODPATH + "os.path.exists") | ||
192 | 129 | def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug, | ||
193 | 130 | m_glob, m_setup_keys): | ||
194 | 131 | """Test handle with disable_root == False.""" | ||
195 | 132 | # When disable_root == False, the ssh redirect for root is skipped | ||
196 | 133 | cfg = {"disable_root": False} | ||
197 | 134 | keys = ["key1"] | ||
198 | 135 | user = "clouduser" | ||
199 | 136 | m_glob.return_value = [] # Return no matching keys to prevent removal | ||
200 | 137 | # Mock os.path.exits to True to short-circuit the key writing logic | ||
201 | 138 | m_path_exists.return_value = True | ||
202 | 139 | m_nug.return_value = ({user: {"default": user}}, {}) | ||
203 | 140 | cloud = self.tmp_cloud( | ||
204 | 141 | distro='ubuntu', metadata={'public-keys': keys}) | ||
205 | 142 | cloud.get_public_ssh_keys = mock.Mock(return_value=keys) | ||
206 | 143 | cc_ssh.handle("name", cfg, cloud, None, None) | ||
207 | 144 | |||
208 | 145 | self.assertEqual([mock.call(set(keys), user), | ||
209 | 146 | mock.call(set(keys), "root", options="")], | ||
210 | 147 | m_setup_keys.call_args_list) | ||
211 | diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py | |||
212 | index ab0b077..fde054e 100755 | |||
213 | --- a/cloudinit/distros/__init__.py | |||
214 | +++ b/cloudinit/distros/__init__.py | |||
215 | @@ -157,7 +157,7 @@ class Distro(object): | |||
216 | 157 | distro) | 157 | distro) |
217 | 158 | header = '\n'.join([ | 158 | header = '\n'.join([ |
218 | 159 | "# Converted from network_config for distro %s" % distro, | 159 | "# Converted from network_config for distro %s" % distro, |
220 | 160 | "# Implmentation of _write_network_config is needed." | 160 | "# Implementation of _write_network_config is needed." |
221 | 161 | ]) | 161 | ]) |
222 | 162 | ns = network_state.parse_net_config_data(netconfig) | 162 | ns = network_state.parse_net_config_data(netconfig) |
223 | 163 | contents = eni.network_state_to_eni( | 163 | contents = eni.network_state_to_eni( |
224 | diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py | |||
225 | index 1ed2b48..ed5c703 100644 | |||
226 | --- a/cloudinit/reporting/__init__.py | |||
227 | +++ b/cloudinit/reporting/__init__.py | |||
228 | @@ -18,7 +18,7 @@ DEFAULT_CONFIG = { | |||
229 | 18 | 18 | ||
230 | 19 | 19 | ||
231 | 20 | def update_configuration(config): | 20 | def update_configuration(config): |
233 | 21 | """Update the instanciated_handler_registry. | 21 | """Update the instantiated_handler_registry. |
234 | 22 | 22 | ||
235 | 23 | :param config: | 23 | :param config: |
236 | 24 | The dictionary containing changes to apply. If a key is given | 24 | The dictionary containing changes to apply. If a key is given |
237 | @@ -37,6 +37,12 @@ def update_configuration(config): | |||
238 | 37 | instantiated_handler_registry.register_item(handler_name, instance) | 37 | instantiated_handler_registry.register_item(handler_name, instance) |
239 | 38 | 38 | ||
240 | 39 | 39 | ||
241 | 40 | def flush_events(): | ||
242 | 41 | for _, handler in instantiated_handler_registry.registered_items.items(): | ||
243 | 42 | if hasattr(handler, 'flush'): | ||
244 | 43 | handler.flush() | ||
245 | 44 | |||
246 | 45 | |||
247 | 40 | instantiated_handler_registry = DictRegistry() | 46 | instantiated_handler_registry = DictRegistry() |
248 | 41 | update_configuration(DEFAULT_CONFIG) | 47 | update_configuration(DEFAULT_CONFIG) |
249 | 42 | 48 | ||
250 | diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py | |||
251 | index 4066076..6d23558 100644 | |||
252 | --- a/cloudinit/reporting/handlers.py | |||
253 | +++ b/cloudinit/reporting/handlers.py | |||
254 | @@ -1,17 +1,32 @@ | |||
255 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | 1 | # This file is part of cloud-init. See LICENSE file for license information. |
256 | 2 | 2 | ||
257 | 3 | import abc | 3 | import abc |
258 | 4 | import fcntl | ||
259 | 4 | import json | 5 | import json |
260 | 5 | import six | 6 | import six |
261 | 7 | import os | ||
262 | 8 | import re | ||
263 | 9 | import struct | ||
264 | 10 | import threading | ||
265 | 11 | import time | ||
266 | 6 | 12 | ||
267 | 7 | from cloudinit import log as logging | 13 | from cloudinit import log as logging |
268 | 8 | from cloudinit.registry import DictRegistry | 14 | from cloudinit.registry import DictRegistry |
269 | 9 | from cloudinit import (url_helper, util) | 15 | from cloudinit import (url_helper, util) |
270 | 16 | from datetime import datetime | ||
271 | 10 | 17 | ||
272 | 18 | if six.PY2: | ||
273 | 19 | from multiprocessing.queues import JoinableQueue as JQueue | ||
274 | 20 | else: | ||
275 | 21 | from queue import Queue as JQueue | ||
276 | 11 | 22 | ||
277 | 12 | LOG = logging.getLogger(__name__) | 23 | LOG = logging.getLogger(__name__) |
278 | 13 | 24 | ||
279 | 14 | 25 | ||
280 | 26 | class ReportException(Exception): | ||
281 | 27 | pass | ||
282 | 28 | |||
283 | 29 | |||
284 | 15 | @six.add_metaclass(abc.ABCMeta) | 30 | @six.add_metaclass(abc.ABCMeta) |
285 | 16 | class ReportingHandler(object): | 31 | class ReportingHandler(object): |
286 | 17 | """Base class for report handlers. | 32 | """Base class for report handlers. |
287 | @@ -24,6 +39,10 @@ class ReportingHandler(object): | |||
288 | 24 | def publish_event(self, event): | 39 | def publish_event(self, event): |
289 | 25 | """Publish an event.""" | 40 | """Publish an event.""" |
290 | 26 | 41 | ||
291 | 42 | def flush(self): | ||
292 | 43 | """Ensure ReportingHandler has published all events""" | ||
293 | 44 | pass | ||
294 | 45 | |||
295 | 27 | 46 | ||
296 | 28 | class LogHandler(ReportingHandler): | 47 | class LogHandler(ReportingHandler): |
297 | 29 | """Publishes events to the cloud-init log at the ``DEBUG`` log level.""" | 48 | """Publishes events to the cloud-init log at the ``DEBUG`` log level.""" |
298 | @@ -85,9 +104,236 @@ class WebHookHandler(ReportingHandler): | |||
299 | 85 | LOG.warning("failed posting event: %s", event.as_string()) | 104 | LOG.warning("failed posting event: %s", event.as_string()) |
300 | 86 | 105 | ||
301 | 87 | 106 | ||
302 | 107 | class HyperVKvpReportingHandler(ReportingHandler): | ||
303 | 108 | """ | ||
304 | 109 | Reports events to a Hyper-V host using Key-Value-Pair exchange protocol | ||
305 | 110 | and can be used to obtain high level diagnostic information from the host. | ||
306 | 111 | |||
307 | 112 | To use this facility, the KVP user-space daemon (hv_kvp_daemon) has to be | ||
308 | 113 | running. It reads the kvp_file when the host requests the guest to | ||
309 | 114 | enumerate the KVP's. | ||
310 | 115 | |||
311 | 116 | This reporter collates all events for a module (origin|name) in a single | ||
312 | 117 | json string in the dictionary. | ||
313 | 118 | |||
314 | 119 | For more information, see | ||
315 | 120 | https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests | ||
316 | 121 | """ | ||
317 | 122 | HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048 | ||
318 | 123 | HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512 | ||
319 | 124 | HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE + | ||
320 | 125 | HV_KVP_EXCHANGE_MAX_VALUE_SIZE) | ||
321 | 126 | EVENT_PREFIX = 'CLOUD_INIT' | ||
322 | 127 | MSG_KEY = 'msg' | ||
323 | 128 | RESULT_KEY = 'result' | ||
324 | 129 | DESC_IDX_KEY = 'msg_i' | ||
325 | 130 | JSON_SEPARATORS = (',', ':') | ||
326 | 131 | KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1' | ||
327 | 132 | |||
328 | 133 | def __init__(self, | ||
329 | 134 | kvp_file_path=KVP_POOL_FILE_GUEST, | ||
330 | 135 | event_types=None): | ||
331 | 136 | super(HyperVKvpReportingHandler, self).__init__() | ||
332 | 137 | self._kvp_file_path = kvp_file_path | ||
333 | 138 | self._event_types = event_types | ||
334 | 139 | self.q = JQueue() | ||
335 | 140 | self.kvp_file = None | ||
336 | 141 | self.incarnation_no = self._get_incarnation_no() | ||
337 | 142 | self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, | ||
338 | 143 | self.incarnation_no) | ||
339 | 144 | self._current_offset = 0 | ||
340 | 145 | self.publish_thread = threading.Thread( | ||
341 | 146 | target=self._publish_event_routine) | ||
342 | 147 | self.publish_thread.daemon = True | ||
343 | 148 | self.publish_thread.start() | ||
344 | 149 | |||
345 | 150 | def _get_incarnation_no(self): | ||
346 | 151 | """ | ||
347 | 152 | use the time passed as the incarnation number. | ||
348 | 153 | the incarnation number is the number which are used to | ||
349 | 154 | distinguish the old data stored in kvp and the new data. | ||
350 | 155 | """ | ||
351 | 156 | uptime_str = util.uptime() | ||
352 | 157 | try: | ||
353 | 158 | return int(time.time() - float(uptime_str)) | ||
354 | 159 | except ValueError: | ||
355 | 160 | LOG.warning("uptime '%s' not in correct format.", uptime_str) | ||
356 | 161 | return 0 | ||
357 | 162 | |||
358 | 163 | def _iterate_kvps(self, offset): | ||
359 | 164 | """iterate the kvp file from the current offset.""" | ||
360 | 165 | try: | ||
361 | 166 | with open(self._kvp_file_path, 'rb+') as f: | ||
362 | 167 | self.kvp_file = f | ||
363 | 168 | fcntl.flock(f, fcntl.LOCK_EX) | ||
364 | 169 | f.seek(offset) | ||
365 | 170 | record_data = f.read(self.HV_KVP_RECORD_SIZE) | ||
366 | 171 | while len(record_data) == self.HV_KVP_RECORD_SIZE: | ||
367 | 172 | self._current_offset += self.HV_KVP_RECORD_SIZE | ||
368 | 173 | kvp_item = self._decode_kvp_item(record_data) | ||
369 | 174 | yield kvp_item | ||
370 | 175 | record_data = f.read(self.HV_KVP_RECORD_SIZE) | ||
371 | 176 | fcntl.flock(f, fcntl.LOCK_UN) | ||
372 | 177 | finally: | ||
373 | 178 | self.kvp_file = None | ||
374 | 179 | |||
375 | 180 | def _event_key(self, event): | ||
376 | 181 | """ | ||
377 | 182 | the event key format is: | ||
378 | 183 | CLOUD_INIT|<incarnation number>|<event_type>|<event_name> | ||
379 | 184 | """ | ||
380 | 185 | return u"{0}|{1}|{2}".format(self.event_key_prefix, | ||
381 | 186 | event.event_type, event.name) | ||
382 | 187 | |||
383 | 188 | def _encode_kvp_item(self, key, value): | ||
384 | 189 | data = (struct.pack("%ds%ds" % ( | ||
385 | 190 | self.HV_KVP_EXCHANGE_MAX_KEY_SIZE, | ||
386 | 191 | self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), | ||
387 | 192 | key.encode('utf-8'), value.encode('utf-8'))) | ||
388 | 193 | return data | ||
389 | 194 | |||
390 | 195 | def _decode_kvp_item(self, record_data): | ||
391 | 196 | record_data_len = len(record_data) | ||
392 | 197 | if record_data_len != self.HV_KVP_RECORD_SIZE: | ||
393 | 198 | raise ReportException( | ||
394 | 199 | "record_data len not correct {0} {1}." | ||
395 | 200 | .format(record_data_len, self.HV_KVP_RECORD_SIZE)) | ||
396 | 201 | k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8') | ||
397 | 202 | .strip('\x00')) | ||
398 | 203 | v = ( | ||
399 | 204 | record_data[ | ||
400 | 205 | self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE | ||
401 | 206 | ].decode('utf-8').strip('\x00')) | ||
402 | 207 | |||
403 | 208 | return {'key': k, 'value': v} | ||
404 | 209 | |||
405 | 210 | def _update_kvp_item(self, record_data): | ||
406 | 211 | if self.kvp_file is None: | ||
407 | 212 | raise ReportException( | ||
408 | 213 | "kvp file '{0}' not opened." | ||
409 | 214 | .format(self._kvp_file_path)) | ||
410 | 215 | self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1) | ||
411 | 216 | self.kvp_file.write(record_data) | ||
412 | 217 | |||
413 | 218 | def _append_kvp_item(self, record_data): | ||
414 | 219 | with open(self._kvp_file_path, 'rb+') as f: | ||
415 | 220 | fcntl.flock(f, fcntl.LOCK_EX) | ||
416 | 221 | # seek to end of the file | ||
417 | 222 | f.seek(0, 2) | ||
418 | 223 | f.write(record_data) | ||
419 | 224 | f.flush() | ||
420 | 225 | fcntl.flock(f, fcntl.LOCK_UN) | ||
421 | 226 | self._current_offset = f.tell() | ||
422 | 227 | |||
423 | 228 | def _break_down(self, key, meta_data, description): | ||
424 | 229 | del meta_data[self.MSG_KEY] | ||
425 | 230 | des_in_json = json.dumps(description) | ||
426 | 231 | des_in_json = des_in_json[1:(len(des_in_json) - 1)] | ||
427 | 232 | i = 0 | ||
428 | 233 | result_array = [] | ||
429 | 234 | message_place_holder = "\"" + self.MSG_KEY + "\":\"\"" | ||
430 | 235 | while True: | ||
431 | 236 | meta_data[self.DESC_IDX_KEY] = i | ||
432 | 237 | meta_data[self.MSG_KEY] = '' | ||
433 | 238 | data_without_desc = json.dumps(meta_data, | ||
434 | 239 | separators=self.JSON_SEPARATORS) | ||
435 | 240 | room_for_desc = ( | ||
436 | 241 | self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE - | ||
437 | 242 | len(data_without_desc) - 8) | ||
438 | 243 | value = data_without_desc.replace( | ||
439 | 244 | message_place_holder, | ||
440 | 245 | '"{key}":"{desc}"'.format( | ||
441 | 246 | key=self.MSG_KEY, desc=des_in_json[:room_for_desc])) | ||
442 | 247 | result_array.append(self._encode_kvp_item(key, value)) | ||
443 | 248 | i += 1 | ||
444 | 249 | des_in_json = des_in_json[room_for_desc:] | ||
445 | 250 | if len(des_in_json) == 0: | ||
446 | 251 | break | ||
447 | 252 | return result_array | ||
448 | 253 | |||
449 | 254 | def _encode_event(self, event): | ||
450 | 255 | """ | ||
451 | 256 | encode the event into kvp data bytes. | ||
452 | 257 | if the event content reaches the maximum length of kvp value. | ||
453 | 258 | then it would be cut to multiple slices. | ||
454 | 259 | """ | ||
455 | 260 | key = self._event_key(event) | ||
456 | 261 | meta_data = { | ||
457 | 262 | "name": event.name, | ||
458 | 263 | "type": event.event_type, | ||
459 | 264 | "ts": (datetime.utcfromtimestamp(event.timestamp) | ||
460 | 265 | .isoformat() + 'Z'), | ||
461 | 266 | } | ||
462 | 267 | if hasattr(event, self.RESULT_KEY): | ||
463 | 268 | meta_data[self.RESULT_KEY] = event.result | ||
464 | 269 | meta_data[self.MSG_KEY] = event.description | ||
465 | 270 | value = json.dumps(meta_data, separators=self.JSON_SEPARATORS) | ||
466 | 271 | # if it reaches the maximum length of kvp value, | ||
467 | 272 | # break it down to slices. | ||
468 | 273 | # this should be very corner case. | ||
469 | 274 | if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE: | ||
470 | 275 | return self._break_down(key, meta_data, event.description) | ||
471 | 276 | else: | ||
472 | 277 | data = self._encode_kvp_item(key, value) | ||
473 | 278 | return [data] | ||
474 | 279 | |||
475 | 280 | def _publish_event_routine(self): | ||
476 | 281 | while True: | ||
477 | 282 | try: | ||
478 | 283 | event = self.q.get(block=True) | ||
479 | 284 | need_append = True | ||
480 | 285 | try: | ||
481 | 286 | if not os.path.exists(self._kvp_file_path): | ||
482 | 287 | LOG.warning( | ||
483 | 288 | "skip writing events %s to %s. file not present.", | ||
484 | 289 | event.as_string(), | ||
485 | 290 | self._kvp_file_path) | ||
486 | 291 | encoded_event = self._encode_event(event) | ||
487 | 292 | # for each encoded_event | ||
488 | 293 | for encoded_data in (encoded_event): | ||
489 | 294 | for kvp in self._iterate_kvps(self._current_offset): | ||
490 | 295 | match = ( | ||
491 | 296 | re.match( | ||
492 | 297 | r"^{0}\|(\d+)\|.+" | ||
493 | 298 | .format(self.EVENT_PREFIX), | ||
494 | 299 | kvp['key'] | ||
495 | 300 | )) | ||
496 | 301 | if match: | ||
497 | 302 | match_groups = match.groups(0) | ||
498 | 303 | if int(match_groups[0]) < self.incarnation_no: | ||
499 | 304 | need_append = False | ||
500 | 305 | self._update_kvp_item(encoded_data) | ||
501 | 306 | continue | ||
502 | 307 | if need_append: | ||
503 | 308 | self._append_kvp_item(encoded_data) | ||
504 | 309 | except IOError as e: | ||
505 | 310 | LOG.warning( | ||
506 | 311 | "failed posting event to kvp: %s e:%s", | ||
507 | 312 | event.as_string(), e) | ||
508 | 313 | finally: | ||
509 | 314 | self.q.task_done() | ||
510 | 315 | |||
511 | 316 | # when main process exits, q.get() will through EOFError | ||
512 | 317 | # indicating we should exit this thread. | ||
513 | 318 | except EOFError: | ||
514 | 319 | return | ||
515 | 320 | |||
516 | 321 | # since the saving to the kvp pool can be a time costing task | ||
517 | 322 | # if the kvp pool already contains a chunk of data, | ||
518 | 323 | # so defer it to another thread. | ||
519 | 324 | def publish_event(self, event): | ||
520 | 325 | if (not self._event_types or event.event_type in self._event_types): | ||
521 | 326 | self.q.put(event) | ||
522 | 327 | |||
523 | 328 | def flush(self): | ||
524 | 329 | LOG.debug('HyperVReportingHandler flushing remaining events') | ||
525 | 330 | self.q.join() | ||
526 | 331 | |||
527 | 332 | |||
528 | 88 | available_handlers = DictRegistry() | 333 | available_handlers = DictRegistry() |
529 | 89 | available_handlers.register_item('log', LogHandler) | 334 | available_handlers.register_item('log', LogHandler) |
530 | 90 | available_handlers.register_item('print', PrintHandler) | 335 | available_handlers.register_item('print', PrintHandler) |
531 | 91 | available_handlers.register_item('webhook', WebHookHandler) | 336 | available_handlers.register_item('webhook', WebHookHandler) |
532 | 337 | available_handlers.register_item('hyperv', HyperVKvpReportingHandler) | ||
533 | 92 | 338 | ||
534 | 93 | # vi: ts=4 expandtab | 339 | # vi: ts=4 expandtab |
535 | diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py | |||
536 | index 3ef8c62..e1890e2 100644 | |||
537 | --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py | |||
538 | +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py | |||
539 | @@ -164,7 +164,7 @@ class NicConfigurator(object): | |||
540 | 164 | return ([subnet], route_list) | 164 | return ([subnet], route_list) |
541 | 165 | 165 | ||
542 | 166 | # Add routes if there is no primary nic | 166 | # Add routes if there is no primary nic |
544 | 167 | if not self._primaryNic: | 167 | if not self._primaryNic and v4.gateways: |
545 | 168 | route_list.extend(self.gen_ipv4_route(nic, | 168 | route_list.extend(self.gen_ipv4_route(nic, |
546 | 169 | v4.gateways, | 169 | v4.gateways, |
547 | 170 | v4.netmask)) | 170 | v4.netmask)) |
548 | diff --git a/cloudinit/stages.py b/cloudinit/stages.py | |||
549 | index c132b57..8874d40 100644 | |||
550 | --- a/cloudinit/stages.py | |||
551 | +++ b/cloudinit/stages.py | |||
552 | @@ -510,7 +510,7 @@ class Init(object): | |||
553 | 510 | # The default frequency if handlers don't have one | 510 | # The default frequency if handlers don't have one |
554 | 511 | 'frequency': frequency, | 511 | 'frequency': frequency, |
555 | 512 | # This will be used when new handlers are found | 512 | # This will be used when new handlers are found |
557 | 513 | # to help write there contents to files with numbered | 513 | # to help write their contents to files with numbered |
558 | 514 | # names... | 514 | # names... |
559 | 515 | 'handlercount': 0, | 515 | 'handlercount': 0, |
560 | 516 | 'excluded': excluded, | 516 | 'excluded': excluded, |
561 | diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py | |||
562 | index 5bfe7fa..de24e25 100644 | |||
563 | --- a/cloudinit/tests/helpers.py | |||
564 | +++ b/cloudinit/tests/helpers.py | |||
565 | @@ -10,7 +10,6 @@ import shutil | |||
566 | 10 | import sys | 10 | import sys |
567 | 11 | import tempfile | 11 | import tempfile |
568 | 12 | import time | 12 | import time |
569 | 13 | import unittest | ||
570 | 14 | 13 | ||
571 | 15 | import mock | 14 | import mock |
572 | 16 | import six | 15 | import six |
573 | @@ -28,11 +27,15 @@ except ImportError: | |||
574 | 28 | 27 | ||
575 | 29 | from cloudinit.config.schema import ( | 28 | from cloudinit.config.schema import ( |
576 | 30 | SchemaValidationError, validate_cloudconfig_schema) | 29 | SchemaValidationError, validate_cloudconfig_schema) |
577 | 30 | from cloudinit import cloud | ||
578 | 31 | from cloudinit import distros | ||
579 | 31 | from cloudinit import helpers as ch | 32 | from cloudinit import helpers as ch |
580 | 33 | from cloudinit.sources import DataSourceNone | ||
581 | 32 | from cloudinit import util | 34 | from cloudinit import util |
582 | 33 | 35 | ||
583 | 34 | # Used for skipping tests | 36 | # Used for skipping tests |
584 | 35 | SkipTest = unittest2.SkipTest | 37 | SkipTest = unittest2.SkipTest |
585 | 38 | skipIf = unittest2.skipIf | ||
586 | 36 | 39 | ||
587 | 37 | # Used for detecting different python versions | 40 | # Used for detecting different python versions |
588 | 38 | PY2 = False | 41 | PY2 = False |
589 | @@ -187,6 +190,29 @@ class CiTestCase(TestCase): | |||
590 | 187 | """ | 190 | """ |
591 | 188 | raise SystemExit(code) | 191 | raise SystemExit(code) |
592 | 189 | 192 | ||
593 | 193 | def tmp_cloud(self, distro, sys_cfg=None, metadata=None): | ||
594 | 194 | """Create a cloud with tmp working directory paths. | ||
595 | 195 | |||
596 | 196 | @param distro: Name of the distro to attach to the cloud. | ||
597 | 197 | @param metadata: Optional metadata to set on the datasource. | ||
598 | 198 | |||
599 | 199 | @return: The built cloud instance. | ||
600 | 200 | """ | ||
601 | 201 | self.new_root = self.tmp_dir() | ||
602 | 202 | if not sys_cfg: | ||
603 | 203 | sys_cfg = {} | ||
604 | 204 | tmp_paths = {} | ||
605 | 205 | for var in ['templates_dir', 'run_dir', 'cloud_dir']: | ||
606 | 206 | tmp_paths[var] = self.tmp_path(var, dir=self.new_root) | ||
607 | 207 | util.ensure_dir(tmp_paths[var]) | ||
608 | 208 | self.paths = ch.Paths(tmp_paths) | ||
609 | 209 | cls = distros.fetch(distro) | ||
610 | 210 | mydist = cls(distro, sys_cfg, self.paths) | ||
611 | 211 | myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, self.paths) | ||
612 | 212 | if metadata: | ||
613 | 213 | myds.metadata.update(metadata) | ||
614 | 214 | return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None) | ||
615 | 215 | |||
616 | 190 | 216 | ||
617 | 191 | class ResourceUsingTestCase(CiTestCase): | 217 | class ResourceUsingTestCase(CiTestCase): |
618 | 192 | 218 | ||
619 | @@ -426,21 +452,6 @@ def readResource(name, mode='r'): | |||
620 | 426 | 452 | ||
621 | 427 | 453 | ||
622 | 428 | try: | 454 | try: |
623 | 429 | skipIf = unittest.skipIf | ||
624 | 430 | except AttributeError: | ||
625 | 431 | # Python 2.6. Doesn't have to be high fidelity. | ||
626 | 432 | def skipIf(condition, reason): | ||
627 | 433 | def decorator(func): | ||
628 | 434 | def wrapper(*args, **kws): | ||
629 | 435 | if condition: | ||
630 | 436 | return func(*args, **kws) | ||
631 | 437 | else: | ||
632 | 438 | print(reason, file=sys.stderr) | ||
633 | 439 | return wrapper | ||
634 | 440 | return decorator | ||
635 | 441 | |||
636 | 442 | |||
637 | 443 | try: | ||
638 | 444 | import jsonschema | 455 | import jsonschema |
639 | 445 | assert jsonschema # avoid pyflakes error F401: import unused | 456 | assert jsonschema # avoid pyflakes error F401: import unused |
640 | 446 | _missing_jsonschema_dep = False | 457 | _missing_jsonschema_dep = False |
641 | diff --git a/debian/changelog b/debian/changelog | |||
642 | index ea38c3b..f303d70 100644 | |||
643 | --- a/debian/changelog | |||
644 | +++ b/debian/changelog | |||
645 | @@ -1,3 +1,17 @@ | |||
646 | 1 | cloud-init (18.3-35-g3f6d0972-0ubuntu1) cosmic; urgency=medium | ||
647 | 2 | |||
648 | 3 | * New upstream snapshot. | ||
649 | 4 | - Add unit tests for config/cc_ssh.py [Francis Ginther] | ||
650 | 5 | - Fix the built-in cloudinit/tests/helpers:skipIf | ||
651 | 6 | - read-version: enhance error message [Joshua Powers] | ||
652 | 7 | - hyperv_reporting_handler: simplify threaded publisher | ||
653 | 8 | - VMWare: Fix a network config bug in vm with static IPv4 and no gateway. | ||
654 | 9 | [Pengpeng Sun] | ||
655 | 10 | - logging: Add logging config type hyperv for reporting via Azure KVP | ||
656 | 11 | [Andy Liu] | ||
657 | 12 | |||
658 | 13 | -- Chad Smith <chad.smith@canonical.com> Sat, 01 Sep 2018 12:08:52 -0600 | ||
659 | 14 | |||
660 | 1 | cloud-init (18.3-29-gdab59087-0ubuntu1) cosmic; urgency=medium | 15 | cloud-init (18.3-29-gdab59087-0ubuntu1) cosmic; urgency=medium |
661 | 2 | 16 | ||
662 | 3 | * New upstream snapshot. | 17 | * New upstream snapshot. |
663 | diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py | |||
664 | 4 | new file mode 100644 | 18 | new file mode 100644 |
665 | index 0000000..2e64c6c | |||
666 | --- /dev/null | |||
667 | +++ b/tests/unittests/test_reporting_hyperv.py | |||
668 | @@ -0,0 +1,134 @@ | |||
669 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
670 | 2 | |||
671 | 3 | from cloudinit.reporting import events | ||
672 | 4 | from cloudinit.reporting import handlers | ||
673 | 5 | |||
674 | 6 | import json | ||
675 | 7 | import os | ||
676 | 8 | |||
677 | 9 | from cloudinit import util | ||
678 | 10 | from cloudinit.tests.helpers import CiTestCase | ||
679 | 11 | |||
680 | 12 | |||
681 | 13 | class TestKvpEncoding(CiTestCase): | ||
682 | 14 | def test_encode_decode(self): | ||
683 | 15 | kvp = {'key': 'key1', 'value': 'value1'} | ||
684 | 16 | kvp_reporting = handlers.HyperVKvpReportingHandler() | ||
685 | 17 | data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value']) | ||
686 | 18 | self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE) | ||
687 | 19 | decoded_kvp = kvp_reporting._decode_kvp_item(data) | ||
688 | 20 | self.assertEqual(kvp, decoded_kvp) | ||
689 | 21 | |||
690 | 22 | |||
691 | 23 | class TextKvpReporter(CiTestCase): | ||
692 | 24 | def setUp(self): | ||
693 | 25 | super(TextKvpReporter, self).setUp() | ||
694 | 26 | self.tmp_file_path = self.tmp_path('kvp_pool_file') | ||
695 | 27 | util.ensure_file(self.tmp_file_path) | ||
696 | 28 | |||
697 | 29 | def test_event_type_can_be_filtered(self): | ||
698 | 30 | reporter = handlers.HyperVKvpReportingHandler( | ||
699 | 31 | kvp_file_path=self.tmp_file_path, | ||
700 | 32 | event_types=['foo', 'bar']) | ||
701 | 33 | |||
702 | 34 | reporter.publish_event( | ||
703 | 35 | events.ReportingEvent('foo', 'name', 'description')) | ||
704 | 36 | reporter.publish_event( | ||
705 | 37 | events.ReportingEvent('some_other', 'name', 'description3')) | ||
706 | 38 | reporter.q.join() | ||
707 | 39 | |||
708 | 40 | kvps = list(reporter._iterate_kvps(0)) | ||
709 | 41 | self.assertEqual(1, len(kvps)) | ||
710 | 42 | |||
711 | 43 | reporter.publish_event( | ||
712 | 44 | events.ReportingEvent('bar', 'name', 'description2')) | ||
713 | 45 | reporter.q.join() | ||
714 | 46 | kvps = list(reporter._iterate_kvps(0)) | ||
715 | 47 | self.assertEqual(2, len(kvps)) | ||
716 | 48 | |||
717 | 49 | self.assertIn('foo', kvps[0]['key']) | ||
718 | 50 | self.assertIn('bar', kvps[1]['key']) | ||
719 | 51 | self.assertNotIn('some_other', kvps[0]['key']) | ||
720 | 52 | self.assertNotIn('some_other', kvps[1]['key']) | ||
721 | 53 | |||
722 | 54 | def test_events_are_over_written(self): | ||
723 | 55 | reporter = handlers.HyperVKvpReportingHandler( | ||
724 | 56 | kvp_file_path=self.tmp_file_path) | ||
725 | 57 | |||
726 | 58 | self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) | ||
727 | 59 | |||
728 | 60 | reporter.publish_event( | ||
729 | 61 | events.ReportingEvent('foo', 'name1', 'description')) | ||
730 | 62 | reporter.publish_event( | ||
731 | 63 | events.ReportingEvent('foo', 'name2', 'description')) | ||
732 | 64 | reporter.q.join() | ||
733 | 65 | self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) | ||
734 | 66 | |||
735 | 67 | reporter2 = handlers.HyperVKvpReportingHandler( | ||
736 | 68 | kvp_file_path=self.tmp_file_path) | ||
737 | 69 | reporter2.incarnation_no = reporter.incarnation_no + 1 | ||
738 | 70 | reporter2.publish_event( | ||
739 | 71 | events.ReportingEvent('foo', 'name3', 'description')) | ||
740 | 72 | reporter2.q.join() | ||
741 | 73 | |||
742 | 74 | self.assertEqual(2, len(list(reporter2._iterate_kvps(0)))) | ||
743 | 75 | |||
744 | 76 | def test_events_with_higher_incarnation_not_over_written(self): | ||
745 | 77 | reporter = handlers.HyperVKvpReportingHandler( | ||
746 | 78 | kvp_file_path=self.tmp_file_path) | ||
747 | 79 | |||
748 | 80 | self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) | ||
749 | 81 | |||
750 | 82 | reporter.publish_event( | ||
751 | 83 | events.ReportingEvent('foo', 'name1', 'description')) | ||
752 | 84 | reporter.publish_event( | ||
753 | 85 | events.ReportingEvent('foo', 'name2', 'description')) | ||
754 | 86 | reporter.q.join() | ||
755 | 87 | self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) | ||
756 | 88 | |||
757 | 89 | reporter3 = handlers.HyperVKvpReportingHandler( | ||
758 | 90 | kvp_file_path=self.tmp_file_path) | ||
759 | 91 | reporter3.incarnation_no = reporter.incarnation_no - 1 | ||
760 | 92 | reporter3.publish_event( | ||
761 | 93 | events.ReportingEvent('foo', 'name3', 'description')) | ||
762 | 94 | reporter3.q.join() | ||
763 | 95 | self.assertEqual(3, len(list(reporter3._iterate_kvps(0)))) | ||
764 | 96 | |||
765 | 97 | def test_finish_event_result_is_logged(self): | ||
766 | 98 | reporter = handlers.HyperVKvpReportingHandler( | ||
767 | 99 | kvp_file_path=self.tmp_file_path) | ||
768 | 100 | reporter.publish_event( | ||
769 | 101 | events.FinishReportingEvent('name2', 'description1', | ||
770 | 102 | result=events.status.FAIL)) | ||
771 | 103 | reporter.q.join() | ||
772 | 104 | self.assertIn('FAIL', list(reporter._iterate_kvps(0))[0]['value']) | ||
773 | 105 | |||
774 | 106 | def test_file_operation_issue(self): | ||
775 | 107 | os.remove(self.tmp_file_path) | ||
776 | 108 | reporter = handlers.HyperVKvpReportingHandler( | ||
777 | 109 | kvp_file_path=self.tmp_file_path) | ||
778 | 110 | reporter.publish_event( | ||
779 | 111 | events.FinishReportingEvent('name2', 'description1', | ||
780 | 112 | result=events.status.FAIL)) | ||
781 | 113 | reporter.q.join() | ||
782 | 114 | |||
783 | 115 | def test_event_very_long(self): | ||
784 | 116 | reporter = handlers.HyperVKvpReportingHandler( | ||
785 | 117 | kvp_file_path=self.tmp_file_path) | ||
786 | 118 | description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE | ||
787 | 119 | long_event = events.FinishReportingEvent( | ||
788 | 120 | 'event_name', | ||
789 | 121 | description, | ||
790 | 122 | result=events.status.FAIL) | ||
791 | 123 | reporter.publish_event(long_event) | ||
792 | 124 | reporter.q.join() | ||
793 | 125 | kvps = list(reporter._iterate_kvps(0)) | ||
794 | 126 | self.assertEqual(3, len(kvps)) | ||
795 | 127 | |||
796 | 128 | # restore from the kvp to see the content are all there | ||
797 | 129 | full_description = '' | ||
798 | 130 | for i in range(len(kvps)): | ||
799 | 131 | msg_slice = json.loads(kvps[i]['value']) | ||
800 | 132 | self.assertEqual(msg_slice['msg_i'], i) | ||
801 | 133 | full_description += msg_slice['msg'] | ||
802 | 134 | self.assertEqual(description, full_description) | ||
803 | diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py | |||
804 | index 036f687..602dedb 100644 | |||
805 | --- a/tests/unittests/test_vmware_config_file.py | |||
806 | +++ b/tests/unittests/test_vmware_config_file.py | |||
807 | @@ -2,11 +2,15 @@ | |||
808 | 2 | # Copyright (C) 2016 VMware INC. | 2 | # Copyright (C) 2016 VMware INC. |
809 | 3 | # | 3 | # |
810 | 4 | # Author: Sankar Tanguturi <stanguturi@vmware.com> | 4 | # Author: Sankar Tanguturi <stanguturi@vmware.com> |
811 | 5 | # Pengpeng Sun <pengpengs@vmware.com> | ||
812 | 5 | # | 6 | # |
813 | 6 | # This file is part of cloud-init. See LICENSE file for license information. | 7 | # This file is part of cloud-init. See LICENSE file for license information. |
814 | 7 | 8 | ||
815 | 8 | import logging | 9 | import logging |
816 | 10 | import os | ||
817 | 9 | import sys | 11 | import sys |
818 | 12 | import tempfile | ||
819 | 13 | import textwrap | ||
820 | 10 | 14 | ||
821 | 11 | from cloudinit.sources.DataSourceOVF import get_network_config_from_conf | 15 | from cloudinit.sources.DataSourceOVF import get_network_config_from_conf |
822 | 12 | from cloudinit.sources.DataSourceOVF import read_vmware_imc | 16 | from cloudinit.sources.DataSourceOVF import read_vmware_imc |
823 | @@ -343,4 +347,115 @@ class TestVmwareConfigFile(CiTestCase): | |||
824 | 343 | conf = Config(cf) | 347 | conf = Config(cf) |
825 | 344 | self.assertEqual("test-script", conf.custom_script_name) | 348 | self.assertEqual("test-script", conf.custom_script_name) |
826 | 345 | 349 | ||
827 | 350 | |||
828 | 351 | class TestVmwareNetConfig(CiTestCase): | ||
829 | 352 | """Test conversion of vmware config to cloud-init config.""" | ||
830 | 353 | |||
831 | 354 | def _get_NicConfigurator(self, text): | ||
832 | 355 | fp = None | ||
833 | 356 | try: | ||
834 | 357 | with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(), | ||
835 | 358 | delete=False) as fp: | ||
836 | 359 | fp.write(text) | ||
837 | 360 | fp.close() | ||
838 | 361 | cfg = Config(ConfigFile(fp.name)) | ||
839 | 362 | return NicConfigurator(cfg.nics, use_system_devices=False) | ||
840 | 363 | finally: | ||
841 | 364 | if fp: | ||
842 | 365 | os.unlink(fp.name) | ||
843 | 366 | |||
844 | 367 | def test_non_primary_nic_without_gateway(self): | ||
845 | 368 | """A non primary nic set is not required to have a gateway.""" | ||
846 | 369 | config = textwrap.dedent("""\ | ||
847 | 370 | [NETWORK] | ||
848 | 371 | NETWORKING = yes | ||
849 | 372 | BOOTPROTO = dhcp | ||
850 | 373 | HOSTNAME = myhost1 | ||
851 | 374 | DOMAINNAME = eng.vmware.com | ||
852 | 375 | |||
853 | 376 | [NIC-CONFIG] | ||
854 | 377 | NICS = NIC1 | ||
855 | 378 | |||
856 | 379 | [NIC1] | ||
857 | 380 | MACADDR = 00:50:56:a6:8c:08 | ||
858 | 381 | ONBOOT = yes | ||
859 | 382 | IPv4_MODE = BACKWARDS_COMPATIBLE | ||
860 | 383 | BOOTPROTO = static | ||
861 | 384 | IPADDR = 10.20.87.154 | ||
862 | 385 | NETMASK = 255.255.252.0 | ||
863 | 386 | """) | ||
864 | 387 | nc = self._get_NicConfigurator(config) | ||
865 | 388 | self.assertEqual( | ||
866 | 389 | [{'type': 'physical', 'name': 'NIC1', | ||
867 | 390 | 'mac_address': '00:50:56:a6:8c:08', | ||
868 | 391 | 'subnets': [ | ||
869 | 392 | {'control': 'auto', 'type': 'static', | ||
870 | 393 | 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}], | ||
871 | 394 | nc.generate()) | ||
872 | 395 | |||
873 | 396 | def test_non_primary_nic_with_gateway(self): | ||
874 | 397 | """A non primary nic set can have a gateway.""" | ||
875 | 398 | config = textwrap.dedent("""\ | ||
876 | 399 | [NETWORK] | ||
877 | 400 | NETWORKING = yes | ||
878 | 401 | BOOTPROTO = dhcp | ||
879 | 402 | HOSTNAME = myhost1 | ||
880 | 403 | DOMAINNAME = eng.vmware.com | ||
881 | 404 | |||
882 | 405 | [NIC-CONFIG] | ||
883 | 406 | NICS = NIC1 | ||
884 | 407 | |||
885 | 408 | [NIC1] | ||
886 | 409 | MACADDR = 00:50:56:a6:8c:08 | ||
887 | 410 | ONBOOT = yes | ||
888 | 411 | IPv4_MODE = BACKWARDS_COMPATIBLE | ||
889 | 412 | BOOTPROTO = static | ||
890 | 413 | IPADDR = 10.20.87.154 | ||
891 | 414 | NETMASK = 255.255.252.0 | ||
892 | 415 | GATEWAY = 10.20.87.253 | ||
893 | 416 | """) | ||
894 | 417 | nc = self._get_NicConfigurator(config) | ||
895 | 418 | self.assertEqual( | ||
896 | 419 | [{'type': 'physical', 'name': 'NIC1', | ||
897 | 420 | 'mac_address': '00:50:56:a6:8c:08', | ||
898 | 421 | 'subnets': [ | ||
899 | 422 | {'control': 'auto', 'type': 'static', | ||
900 | 423 | 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}, | ||
901 | 424 | {'type': 'route', 'destination': '10.20.84.0/22', | ||
902 | 425 | 'gateway': '10.20.87.253', 'metric': 10000}], | ||
903 | 426 | nc.generate()) | ||
904 | 427 | |||
905 | 428 | def test_a_primary_nic_with_gateway(self): | ||
906 | 429 | """A primary nic set can have a gateway.""" | ||
907 | 430 | config = textwrap.dedent("""\ | ||
908 | 431 | [NETWORK] | ||
909 | 432 | NETWORKING = yes | ||
910 | 433 | BOOTPROTO = dhcp | ||
911 | 434 | HOSTNAME = myhost1 | ||
912 | 435 | DOMAINNAME = eng.vmware.com | ||
913 | 436 | |||
914 | 437 | [NIC-CONFIG] | ||
915 | 438 | NICS = NIC1 | ||
916 | 439 | |||
917 | 440 | [NIC1] | ||
918 | 441 | MACADDR = 00:50:56:a6:8c:08 | ||
919 | 442 | ONBOOT = yes | ||
920 | 443 | IPv4_MODE = BACKWARDS_COMPATIBLE | ||
921 | 444 | BOOTPROTO = static | ||
922 | 445 | IPADDR = 10.20.87.154 | ||
923 | 446 | NETMASK = 255.255.252.0 | ||
924 | 447 | PRIMARY = true | ||
925 | 448 | GATEWAY = 10.20.87.253 | ||
926 | 449 | """) | ||
927 | 450 | nc = self._get_NicConfigurator(config) | ||
928 | 451 | self.assertEqual( | ||
929 | 452 | [{'type': 'physical', 'name': 'NIC1', | ||
930 | 453 | 'mac_address': '00:50:56:a6:8c:08', | ||
931 | 454 | 'subnets': [ | ||
932 | 455 | {'control': 'auto', 'type': 'static', | ||
933 | 456 | 'address': '10.20.87.154', 'netmask': '255.255.252.0', | ||
934 | 457 | 'gateway': '10.20.87.253'}]}], | ||
935 | 458 | nc.generate()) | ||
936 | 459 | |||
937 | 460 | |||
938 | 346 | # vi: ts=4 expandtab | 461 | # vi: ts=4 expandtab |
939 | diff --git a/tools/read-version b/tools/read-version | |||
940 | index 3ea9e66..e69c2ce 100755 | |||
941 | --- a/tools/read-version | |||
942 | +++ b/tools/read-version | |||
943 | @@ -76,6 +76,12 @@ if is_gitdir(_tdir) and which("git"): | |||
944 | 76 | if not version.startswith(src_version): | 76 | if not version.startswith(src_version): |
945 | 77 | sys.stderr.write("git describe version (%s) differs from " | 77 | sys.stderr.write("git describe version (%s) differs from " |
946 | 78 | "cloudinit.version (%s)\n" % (version, src_version)) | 78 | "cloudinit.version (%s)\n" % (version, src_version)) |
947 | 79 | sys.stderr.write( | ||
948 | 80 | "Please get the latest upstream tags.\n" | ||
949 | 81 | "As an example, this can be done with the following:\n" | ||
950 | 82 | "$ git remote add upstream https://git.launchpad.net/cloud-init\n" | ||
951 | 83 | "$ git fetch upstream --tags\n" | ||
952 | 84 | ) | ||
953 | 79 | sys.exit(1) | 85 | sys.exit(1) |
954 | 80 | 86 | ||
955 | 81 | version_long = tiny_p(cmd + ["--long"]).strip() | 87 | version_long = tiny_p(cmd + ["--long"]).strip() |
PASSED: Continuous integration, rev:00362e3be57 9a8157e0a915b25 0888c39eb408fc /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 284/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 284/rebuild
https:/