Merge ~chad.smith/cloud-init:feature/kvp-reporting into cloud-init:master
- Git
- lp:~chad.smith/cloud-init
- feature/kvp-reporting
- Merge into master
Proposed by
Chad Smith
Status: | Merged |
---|---|
Approved by: | Chad Smith |
Approved revision: | 0124a19cbfc5367395eb888ca504b31910982650 |
Merge reported by: | Server Team CI bot |
Merged at revision: | not available |
Proposed branch: | ~chad.smith/cloud-init:feature/kvp-reporting |
Merge into: | cloud-init:master |
Diff against target: |
510 lines (+396/-7) 7 files modified
cloudinit/cloud.py (+2/-2) cloudinit/cmd/main.py (+2/-2) cloudinit/distros/__init__.py (+1/-1) cloudinit/reporting/__init__.py (+1/-1) cloudinit/reporting/handlers.py (+255/-0) cloudinit/stages.py (+1/-1) tests/unittests/test_reporting_hyperv.py (+134/-0) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
cloud-init Commiters | Pending | ||
Review via email: mp+353739@code.launchpad.net |
Commit message
logging: Add logging config type hyperv for reporting via Azure KVP
Linux guests can provide information to Hyper-V hosts via KVP.
KVP allows the guests to provide any string key-value-pairs back to the
host's registry. On linux, kvp communication pools are presented as pool
files in /var/lib/
The following reporting configuration can enable this kvp reporting in
addition to default logging if the pool files exist:
reporting:
logging:
type: log
telemetry:
type: hyperv
Author: Andy Liu <email address hidden>
Description of the change
Grabbed original branch with a minor flakes/pycodestyle patch https:/
To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
review:
Approve
(continuous-integration)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py | |||
2 | index 6d12c43..7ae98e1 100644 | |||
3 | --- a/cloudinit/cloud.py | |||
4 | +++ b/cloudinit/cloud.py | |||
5 | @@ -47,7 +47,7 @@ class Cloud(object): | |||
6 | 47 | 47 | ||
7 | 48 | @property | 48 | @property |
8 | 49 | def cfg(self): | 49 | def cfg(self): |
10 | 50 | # Ensure that not indirectly modified | 50 | # Ensure that cfg is not indirectly modified |
11 | 51 | return copy.deepcopy(self._cfg) | 51 | return copy.deepcopy(self._cfg) |
12 | 52 | 52 | ||
13 | 53 | def run(self, name, functor, args, freq=None, clear_on_fail=False): | 53 | def run(self, name, functor, args, freq=None, clear_on_fail=False): |
14 | @@ -61,7 +61,7 @@ class Cloud(object): | |||
15 | 61 | return None | 61 | return None |
16 | 62 | return fn | 62 | return fn |
17 | 63 | 63 | ||
19 | 64 | # The rest of thes are just useful proxies | 64 | # The rest of these are just useful proxies |
20 | 65 | def get_userdata(self, apply_filter=True): | 65 | def get_userdata(self, apply_filter=True): |
21 | 66 | return self.datasource.get_userdata(apply_filter) | 66 | return self.datasource.get_userdata(apply_filter) |
22 | 67 | 67 | ||
23 | diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py | |||
24 | index d6ba90f..c0edee1 100644 | |||
25 | --- a/cloudinit/cmd/main.py | |||
26 | +++ b/cloudinit/cmd/main.py | |||
27 | @@ -315,7 +315,7 @@ def main_init(name, args): | |||
28 | 315 | existing = "trust" | 315 | existing = "trust" |
29 | 316 | 316 | ||
30 | 317 | init.purge_cache() | 317 | init.purge_cache() |
32 | 318 | # Delete the non-net file as well | 318 | # Delete the no-net file as well |
33 | 319 | util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net")) | 319 | util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net")) |
34 | 320 | 320 | ||
35 | 321 | # Stage 5 | 321 | # Stage 5 |
36 | @@ -339,7 +339,7 @@ def main_init(name, args): | |||
37 | 339 | " Likely bad things to come!")) | 339 | " Likely bad things to come!")) |
38 | 340 | if not args.force: | 340 | if not args.force: |
39 | 341 | init.apply_network_config(bring_up=not args.local) | 341 | init.apply_network_config(bring_up=not args.local) |
41 | 342 | LOG.debug("[%s] Exiting without datasource in local mode", mode) | 342 | LOG.debug("[%s] Exiting without datasource", mode) |
42 | 343 | if mode == sources.DSMODE_LOCAL: | 343 | if mode == sources.DSMODE_LOCAL: |
43 | 344 | return (None, []) | 344 | return (None, []) |
44 | 345 | else: | 345 | else: |
45 | diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py | |||
46 | index ab0b077..fde054e 100755 | |||
47 | --- a/cloudinit/distros/__init__.py | |||
48 | +++ b/cloudinit/distros/__init__.py | |||
49 | @@ -157,7 +157,7 @@ class Distro(object): | |||
50 | 157 | distro) | 157 | distro) |
51 | 158 | header = '\n'.join([ | 158 | header = '\n'.join([ |
52 | 159 | "# Converted from network_config for distro %s" % distro, | 159 | "# Converted from network_config for distro %s" % distro, |
54 | 160 | "# Implmentation of _write_network_config is needed." | 160 | "# Implementation of _write_network_config is needed." |
55 | 161 | ]) | 161 | ]) |
56 | 162 | ns = network_state.parse_net_config_data(netconfig) | 162 | ns = network_state.parse_net_config_data(netconfig) |
57 | 163 | contents = eni.network_state_to_eni( | 163 | contents = eni.network_state_to_eni( |
58 | diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py | |||
59 | index 1ed2b48..e047767 100644 | |||
60 | --- a/cloudinit/reporting/__init__.py | |||
61 | +++ b/cloudinit/reporting/__init__.py | |||
62 | @@ -18,7 +18,7 @@ DEFAULT_CONFIG = { | |||
63 | 18 | 18 | ||
64 | 19 | 19 | ||
65 | 20 | def update_configuration(config): | 20 | def update_configuration(config): |
67 | 21 | """Update the instanciated_handler_registry. | 21 | """Update the instantiated_handler_registry. |
68 | 22 | 22 | ||
69 | 23 | :param config: | 23 | :param config: |
70 | 24 | The dictionary containing changes to apply. If a key is given | 24 | The dictionary containing changes to apply. If a key is given |
71 | diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py | |||
72 | index 4066076..4b4bb39 100644 | |||
73 | --- a/cloudinit/reporting/handlers.py | |||
74 | +++ b/cloudinit/reporting/handlers.py | |||
75 | @@ -1,17 +1,34 @@ | |||
76 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | 1 | # This file is part of cloud-init. See LICENSE file for license information. |
77 | 2 | 2 | ||
78 | 3 | import abc | 3 | import abc |
79 | 4 | import fcntl | ||
80 | 4 | import json | 5 | import json |
81 | 5 | import six | 6 | import six |
82 | 7 | import os | ||
83 | 8 | import re | ||
84 | 9 | import struct | ||
85 | 10 | import threading | ||
86 | 11 | import time | ||
87 | 6 | 12 | ||
88 | 7 | from cloudinit import log as logging | 13 | from cloudinit import log as logging |
89 | 8 | from cloudinit.registry import DictRegistry | 14 | from cloudinit.registry import DictRegistry |
90 | 9 | from cloudinit import (url_helper, util) | 15 | from cloudinit import (url_helper, util) |
91 | 16 | from datetime import datetime | ||
92 | 10 | 17 | ||
93 | 18 | if six.PY2: | ||
94 | 19 | import multiprocessing.queues as queue | ||
95 | 20 | from multiprocessing.queues import JoinableQueue as JQueue | ||
96 | 21 | else: | ||
97 | 22 | import queue | ||
98 | 23 | from queue import Queue as JQueue | ||
99 | 11 | 24 | ||
100 | 12 | LOG = logging.getLogger(__name__) | 25 | LOG = logging.getLogger(__name__) |
101 | 13 | 26 | ||
102 | 14 | 27 | ||
103 | 28 | class ReportException(Exception): | ||
104 | 29 | pass | ||
105 | 30 | |||
106 | 31 | |||
107 | 15 | @six.add_metaclass(abc.ABCMeta) | 32 | @six.add_metaclass(abc.ABCMeta) |
108 | 16 | class ReportingHandler(object): | 33 | class ReportingHandler(object): |
109 | 17 | """Base class for report handlers. | 34 | """Base class for report handlers. |
110 | @@ -85,9 +102,247 @@ class WebHookHandler(ReportingHandler): | |||
111 | 85 | LOG.warning("failed posting event: %s", event.as_string()) | 102 | LOG.warning("failed posting event: %s", event.as_string()) |
112 | 86 | 103 | ||
113 | 87 | 104 | ||
114 | 105 | class HyperVKvpReportingHandler(ReportingHandler): | ||
115 | 106 | """ | ||
116 | 107 | Reports events to a Hyper-V host using Key-Value-Pair exchange protocol | ||
117 | 108 | and can be used to obtain high level diagnostic information from the host. | ||
118 | 109 | |||
119 | 110 | To use this facility, the KVP user-space daemon (hv_kvp_daemon) has to be | ||
120 | 111 | running. It reads the kvp_file when the host requests the guest to | ||
121 | 112 | enumerate the KVP's. | ||
122 | 113 | |||
123 | 114 | This reporter collates all events for a module (origin|name) in a single | ||
124 | 115 | json string in the dictionary. | ||
125 | 116 | |||
126 | 117 | For more information, see | ||
127 | 118 | https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests | ||
128 | 119 | """ | ||
129 | 120 | HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048 | ||
130 | 121 | HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512 | ||
131 | 122 | HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE + | ||
132 | 123 | HV_KVP_EXCHANGE_MAX_VALUE_SIZE) | ||
133 | 124 | EVENT_PREFIX = 'CLOUD_INIT' | ||
134 | 125 | MSG_KEY = 'msg' | ||
135 | 126 | RESULT_KEY = 'result' | ||
136 | 127 | DESC_IDX_KEY = 'msg_i' | ||
137 | 128 | JSON_SEPARATORS = (',', ':') | ||
138 | 129 | KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1' | ||
139 | 130 | |||
140 | 131 | def __init__(self, | ||
141 | 132 | kvp_file_path=KVP_POOL_FILE_GUEST, | ||
142 | 133 | event_types=None): | ||
143 | 134 | super(HyperVKvpReportingHandler, self).__init__() | ||
144 | 135 | self._kvp_file_path = kvp_file_path | ||
145 | 136 | self._event_types = event_types | ||
146 | 137 | self.running = False | ||
147 | 138 | self.queue_lock = threading.Lock() | ||
148 | 139 | self.running_lock = threading.Lock() | ||
149 | 140 | self.q = JQueue() | ||
150 | 141 | self.kvp_file = None | ||
151 | 142 | self.incarnation_no = self._get_incarnation_no() | ||
152 | 143 | self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, | ||
153 | 144 | self.incarnation_no) | ||
154 | 145 | self._current_offset = 0 | ||
155 | 146 | |||
156 | 147 | def _get_incarnation_no(self): | ||
157 | 148 | """ | ||
158 | 149 | use the time passed as the incarnation number. | ||
159 | 150 | the incarnation number is the number which are used to | ||
160 | 151 | distinguish the old data stored in kvp and the new data. | ||
161 | 152 | """ | ||
162 | 153 | uptime_str = util.uptime() | ||
163 | 154 | try: | ||
164 | 155 | return int(time.time() - float(uptime_str)) | ||
165 | 156 | except ValueError: | ||
166 | 157 | LOG.warning("uptime '%s' not in correct format.", uptime_str) | ||
167 | 158 | return 0 | ||
168 | 159 | |||
169 | 160 | def _iterate_kvps(self, offset): | ||
170 | 161 | """iterate the kvp file from the current offset.""" | ||
171 | 162 | try: | ||
172 | 163 | with open(self._kvp_file_path, 'rb+') as f: | ||
173 | 164 | self.kvp_file = f | ||
174 | 165 | fcntl.flock(f, fcntl.LOCK_EX) | ||
175 | 166 | f.seek(offset) | ||
176 | 167 | record_data = f.read(self.HV_KVP_RECORD_SIZE) | ||
177 | 168 | while len(record_data) == self.HV_KVP_RECORD_SIZE: | ||
178 | 169 | self._current_offset += self.HV_KVP_RECORD_SIZE | ||
179 | 170 | kvp_item = self._decode_kvp_item(record_data) | ||
180 | 171 | yield kvp_item | ||
181 | 172 | record_data = f.read(self.HV_KVP_RECORD_SIZE) | ||
182 | 173 | fcntl.flock(f, fcntl.LOCK_UN) | ||
183 | 174 | finally: | ||
184 | 175 | self.kvp_file = None | ||
185 | 176 | |||
186 | 177 | def _event_key(self, event): | ||
187 | 178 | """ | ||
188 | 179 | the event key format is: | ||
189 | 180 | CLOUD_INIT|<incarnation number>|<event_type>|<event_name> | ||
190 | 181 | """ | ||
191 | 182 | return u"{0}|{1}|{2}".format(self.event_key_prefix, | ||
192 | 183 | event.event_type, event.name) | ||
193 | 184 | |||
194 | 185 | def _encode_kvp_item(self, key, value): | ||
195 | 186 | data = (struct.pack("%ds%ds" % ( | ||
196 | 187 | self.HV_KVP_EXCHANGE_MAX_KEY_SIZE, | ||
197 | 188 | self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), | ||
198 | 189 | key.encode('utf-8'), value.encode('utf-8'))) | ||
199 | 190 | return data | ||
200 | 191 | |||
201 | 192 | def _decode_kvp_item(self, record_data): | ||
202 | 193 | record_data_len = len(record_data) | ||
203 | 194 | if record_data_len != self.HV_KVP_RECORD_SIZE: | ||
204 | 195 | raise ReportException( | ||
205 | 196 | "record_data len not correct {0} {1}." | ||
206 | 197 | .format(record_data_len, self.HV_KVP_RECORD_SIZE)) | ||
207 | 198 | k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8') | ||
208 | 199 | .strip('\x00')) | ||
209 | 200 | v = ( | ||
210 | 201 | record_data[ | ||
211 | 202 | self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE | ||
212 | 203 | ].decode('utf-8').strip('\x00')) | ||
213 | 204 | |||
214 | 205 | return {'key': k, 'value': v} | ||
215 | 206 | |||
216 | 207 | def _update_kvp_item(self, record_data): | ||
217 | 208 | if self.kvp_file is None: | ||
218 | 209 | raise ReportException( | ||
219 | 210 | "kvp file '{0}' not opened." | ||
220 | 211 | .format(self._kvp_file_path)) | ||
221 | 212 | self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1) | ||
222 | 213 | self.kvp_file.write(record_data) | ||
223 | 214 | |||
224 | 215 | def _append_kvp_item(self, record_data): | ||
225 | 216 | with open(self._kvp_file_path, 'rb+') as f: | ||
226 | 217 | fcntl.flock(f, fcntl.LOCK_EX) | ||
227 | 218 | # seek to end of the file | ||
228 | 219 | f.seek(0, 2) | ||
229 | 220 | f.write(record_data) | ||
230 | 221 | f.flush() | ||
231 | 222 | fcntl.flock(f, fcntl.LOCK_UN) | ||
232 | 223 | self._current_offset = f.tell() | ||
233 | 224 | |||
234 | 225 | def _break_down(self, key, meta_data, description): | ||
235 | 226 | del meta_data[self.MSG_KEY] | ||
236 | 227 | des_in_json = json.dumps(description) | ||
237 | 228 | des_in_json = des_in_json[1:(len(des_in_json) - 1)] | ||
238 | 229 | i = 0 | ||
239 | 230 | result_array = [] | ||
240 | 231 | message_place_holder = "\"" + self.MSG_KEY + "\":\"\"" | ||
241 | 232 | while True: | ||
242 | 233 | meta_data[self.DESC_IDX_KEY] = i | ||
243 | 234 | meta_data[self.MSG_KEY] = '' | ||
244 | 235 | data_without_desc = json.dumps(meta_data, | ||
245 | 236 | separators=self.JSON_SEPARATORS) | ||
246 | 237 | room_for_desc = ( | ||
247 | 238 | self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE - | ||
248 | 239 | len(data_without_desc) - 8) | ||
249 | 240 | value = data_without_desc.replace( | ||
250 | 241 | message_place_holder, | ||
251 | 242 | '"{key}":"{desc}"'.format( | ||
252 | 243 | key=self.MSG_KEY, desc=des_in_json[:room_for_desc])) | ||
253 | 244 | result_array.append(self._encode_kvp_item(key, value)) | ||
254 | 245 | i += 1 | ||
255 | 246 | des_in_json = des_in_json[room_for_desc:] | ||
256 | 247 | if len(des_in_json) == 0: | ||
257 | 248 | break | ||
258 | 249 | return result_array | ||
259 | 250 | |||
260 | 251 | def _encode_event(self, event): | ||
261 | 252 | """ | ||
262 | 253 | encode the event into kvp data bytes. | ||
263 | 254 | if the event content reaches the maximum length of kvp value. | ||
264 | 255 | then it would be cut to multiple slices. | ||
265 | 256 | """ | ||
266 | 257 | key = self._event_key(event) | ||
267 | 258 | meta_data = { | ||
268 | 259 | "name": event.name, | ||
269 | 260 | "type": event.event_type, | ||
270 | 261 | "ts": (datetime.utcfromtimestamp(event.timestamp) | ||
271 | 262 | .isoformat() + 'Z'), | ||
272 | 263 | } | ||
273 | 264 | if hasattr(event, self.RESULT_KEY): | ||
274 | 265 | meta_data[self.RESULT_KEY] = event.result | ||
275 | 266 | meta_data[self.MSG_KEY] = event.description | ||
276 | 267 | value = json.dumps(meta_data, separators=self.JSON_SEPARATORS) | ||
277 | 268 | # if it reaches the maximum length of kvp value, | ||
278 | 269 | # break it down to slices. | ||
279 | 270 | # this should be very corner case. | ||
280 | 271 | if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE: | ||
281 | 272 | return self._break_down(key, meta_data, event.description) | ||
282 | 273 | else: | ||
283 | 274 | data = self._encode_kvp_item(key, value) | ||
284 | 275 | return [data] | ||
285 | 276 | |||
286 | 277 | def _publish_event_routine(self): | ||
287 | 278 | while True: | ||
288 | 279 | event = None | ||
289 | 280 | try: | ||
290 | 281 | # acquire the lock. | ||
291 | 282 | event = self.q.get_nowait() | ||
292 | 283 | need_append = True | ||
293 | 284 | try: | ||
294 | 285 | if not os.path.exists(self._kvp_file_path): | ||
295 | 286 | LOG.warning( | ||
296 | 287 | "skip writing events %s to %s. file not present.", | ||
297 | 288 | event.as_string(), | ||
298 | 289 | self._kvp_file_path) | ||
299 | 290 | encoded_event = self._encode_event(event) | ||
300 | 291 | # for each encoded_event | ||
301 | 292 | for encoded_data in (encoded_event): | ||
302 | 293 | for kvp in self._iterate_kvps(self._current_offset): | ||
303 | 294 | match = ( | ||
304 | 295 | re.match( | ||
305 | 296 | r"^{0}\|(\d+)\|.+" | ||
306 | 297 | .format(self.EVENT_PREFIX), | ||
307 | 298 | kvp['key'] | ||
308 | 299 | )) | ||
309 | 300 | if match: | ||
310 | 301 | match_groups = match.groups(0) | ||
311 | 302 | if int(match_groups[0]) < self.incarnation_no: | ||
312 | 303 | need_append = False | ||
313 | 304 | self._update_kvp_item(encoded_data) | ||
314 | 305 | break | ||
315 | 306 | if need_append: | ||
316 | 307 | self._append_kvp_item(encoded_data) | ||
317 | 308 | except IOError as e: | ||
318 | 309 | LOG.warning( | ||
319 | 310 | "failed posting event to kvp: %s e:%s", | ||
320 | 311 | event.as_string(), e) | ||
321 | 312 | self.running = False | ||
322 | 313 | break | ||
323 | 314 | finally: | ||
324 | 315 | self.q.task_done() | ||
325 | 316 | except queue.Empty: | ||
326 | 317 | with self.queue_lock: | ||
327 | 318 | # double check the queue is empty | ||
328 | 319 | if self.q.empty(): | ||
329 | 320 | self.running = False | ||
330 | 321 | break | ||
331 | 322 | |||
332 | 323 | def trigger_publish_event(self): | ||
333 | 324 | if not self.running: | ||
334 | 325 | with self.running_lock: | ||
335 | 326 | if not self.running: | ||
336 | 327 | self.running = True | ||
337 | 328 | thread = threading.Thread( | ||
338 | 329 | target=self._publish_event_routine) | ||
339 | 330 | thread.start() | ||
340 | 331 | |||
341 | 332 | # since the saving to the kvp pool can be a time costing task | ||
342 | 333 | # if the kvp pool already contains a chunk of data, | ||
343 | 334 | # so defer it to another thread. | ||
344 | 335 | def publish_event(self, event): | ||
345 | 336 | if (not self._event_types or event.event_type in self._event_types): | ||
346 | 337 | with self.queue_lock: | ||
347 | 338 | self.q.put(event) | ||
348 | 339 | self.trigger_publish_event() | ||
349 | 340 | |||
350 | 341 | |||
351 | 88 | available_handlers = DictRegistry() | 342 | available_handlers = DictRegistry() |
352 | 89 | available_handlers.register_item('log', LogHandler) | 343 | available_handlers.register_item('log', LogHandler) |
353 | 90 | available_handlers.register_item('print', PrintHandler) | 344 | available_handlers.register_item('print', PrintHandler) |
354 | 91 | available_handlers.register_item('webhook', WebHookHandler) | 345 | available_handlers.register_item('webhook', WebHookHandler) |
355 | 346 | available_handlers.register_item('hyperv', HyperVKvpReportingHandler) | ||
356 | 92 | 347 | ||
357 | 93 | # vi: ts=4 expandtab | 348 | # vi: ts=4 expandtab |
358 | diff --git a/cloudinit/stages.py b/cloudinit/stages.py | |||
359 | index c132b57..8874d40 100644 | |||
360 | --- a/cloudinit/stages.py | |||
361 | +++ b/cloudinit/stages.py | |||
362 | @@ -510,7 +510,7 @@ class Init(object): | |||
363 | 510 | # The default frequency if handlers don't have one | 510 | # The default frequency if handlers don't have one |
364 | 511 | 'frequency': frequency, | 511 | 'frequency': frequency, |
365 | 512 | # This will be used when new handlers are found | 512 | # This will be used when new handlers are found |
367 | 513 | # to help write there contents to files with numbered | 513 | # to help write their contents to files with numbered |
368 | 514 | # names... | 514 | # names... |
369 | 515 | 'handlercount': 0, | 515 | 'handlercount': 0, |
370 | 516 | 'excluded': excluded, | 516 | 'excluded': excluded, |
371 | diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py | |||
372 | 517 | new file mode 100644 | 517 | new file mode 100644 |
373 | index 0000000..2e64c6c | |||
374 | --- /dev/null | |||
375 | +++ b/tests/unittests/test_reporting_hyperv.py | |||
376 | @@ -0,0 +1,134 @@ | |||
377 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
378 | 2 | |||
379 | 3 | from cloudinit.reporting import events | ||
380 | 4 | from cloudinit.reporting import handlers | ||
381 | 5 | |||
382 | 6 | import json | ||
383 | 7 | import os | ||
384 | 8 | |||
385 | 9 | from cloudinit import util | ||
386 | 10 | from cloudinit.tests.helpers import CiTestCase | ||
387 | 11 | |||
388 | 12 | |||
389 | 13 | class TestKvpEncoding(CiTestCase): | ||
390 | 14 | def test_encode_decode(self): | ||
391 | 15 | kvp = {'key': 'key1', 'value': 'value1'} | ||
392 | 16 | kvp_reporting = handlers.HyperVKvpReportingHandler() | ||
393 | 17 | data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value']) | ||
394 | 18 | self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE) | ||
395 | 19 | decoded_kvp = kvp_reporting._decode_kvp_item(data) | ||
396 | 20 | self.assertEqual(kvp, decoded_kvp) | ||
397 | 21 | |||
398 | 22 | |||
399 | 23 | class TextKvpReporter(CiTestCase): | ||
400 | 24 | def setUp(self): | ||
401 | 25 | super(TextKvpReporter, self).setUp() | ||
402 | 26 | self.tmp_file_path = self.tmp_path('kvp_pool_file') | ||
403 | 27 | util.ensure_file(self.tmp_file_path) | ||
404 | 28 | |||
405 | 29 | def test_event_type_can_be_filtered(self): | ||
406 | 30 | reporter = handlers.HyperVKvpReportingHandler( | ||
407 | 31 | kvp_file_path=self.tmp_file_path, | ||
408 | 32 | event_types=['foo', 'bar']) | ||
409 | 33 | |||
410 | 34 | reporter.publish_event( | ||
411 | 35 | events.ReportingEvent('foo', 'name', 'description')) | ||
412 | 36 | reporter.publish_event( | ||
413 | 37 | events.ReportingEvent('some_other', 'name', 'description3')) | ||
414 | 38 | reporter.q.join() | ||
415 | 39 | |||
416 | 40 | kvps = list(reporter._iterate_kvps(0)) | ||
417 | 41 | self.assertEqual(1, len(kvps)) | ||
418 | 42 | |||
419 | 43 | reporter.publish_event( | ||
420 | 44 | events.ReportingEvent('bar', 'name', 'description2')) | ||
421 | 45 | reporter.q.join() | ||
422 | 46 | kvps = list(reporter._iterate_kvps(0)) | ||
423 | 47 | self.assertEqual(2, len(kvps)) | ||
424 | 48 | |||
425 | 49 | self.assertIn('foo', kvps[0]['key']) | ||
426 | 50 | self.assertIn('bar', kvps[1]['key']) | ||
427 | 51 | self.assertNotIn('some_other', kvps[0]['key']) | ||
428 | 52 | self.assertNotIn('some_other', kvps[1]['key']) | ||
429 | 53 | |||
430 | 54 | def test_events_are_over_written(self): | ||
431 | 55 | reporter = handlers.HyperVKvpReportingHandler( | ||
432 | 56 | kvp_file_path=self.tmp_file_path) | ||
433 | 57 | |||
434 | 58 | self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) | ||
435 | 59 | |||
436 | 60 | reporter.publish_event( | ||
437 | 61 | events.ReportingEvent('foo', 'name1', 'description')) | ||
438 | 62 | reporter.publish_event( | ||
439 | 63 | events.ReportingEvent('foo', 'name2', 'description')) | ||
440 | 64 | reporter.q.join() | ||
441 | 65 | self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) | ||
442 | 66 | |||
443 | 67 | reporter2 = handlers.HyperVKvpReportingHandler( | ||
444 | 68 | kvp_file_path=self.tmp_file_path) | ||
445 | 69 | reporter2.incarnation_no = reporter.incarnation_no + 1 | ||
446 | 70 | reporter2.publish_event( | ||
447 | 71 | events.ReportingEvent('foo', 'name3', 'description')) | ||
448 | 72 | reporter2.q.join() | ||
449 | 73 | |||
450 | 74 | self.assertEqual(2, len(list(reporter2._iterate_kvps(0)))) | ||
451 | 75 | |||
452 | 76 | def test_events_with_higher_incarnation_not_over_written(self): | ||
453 | 77 | reporter = handlers.HyperVKvpReportingHandler( | ||
454 | 78 | kvp_file_path=self.tmp_file_path) | ||
455 | 79 | |||
456 | 80 | self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) | ||
457 | 81 | |||
458 | 82 | reporter.publish_event( | ||
459 | 83 | events.ReportingEvent('foo', 'name1', 'description')) | ||
460 | 84 | reporter.publish_event( | ||
461 | 85 | events.ReportingEvent('foo', 'name2', 'description')) | ||
462 | 86 | reporter.q.join() | ||
463 | 87 | self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) | ||
464 | 88 | |||
465 | 89 | reporter3 = handlers.HyperVKvpReportingHandler( | ||
466 | 90 | kvp_file_path=self.tmp_file_path) | ||
467 | 91 | reporter3.incarnation_no = reporter.incarnation_no - 1 | ||
468 | 92 | reporter3.publish_event( | ||
469 | 93 | events.ReportingEvent('foo', 'name3', 'description')) | ||
470 | 94 | reporter3.q.join() | ||
471 | 95 | self.assertEqual(3, len(list(reporter3._iterate_kvps(0)))) | ||
472 | 96 | |||
473 | 97 | def test_finish_event_result_is_logged(self): | ||
474 | 98 | reporter = handlers.HyperVKvpReportingHandler( | ||
475 | 99 | kvp_file_path=self.tmp_file_path) | ||
476 | 100 | reporter.publish_event( | ||
477 | 101 | events.FinishReportingEvent('name2', 'description1', | ||
478 | 102 | result=events.status.FAIL)) | ||
479 | 103 | reporter.q.join() | ||
480 | 104 | self.assertIn('FAIL', list(reporter._iterate_kvps(0))[0]['value']) | ||
481 | 105 | |||
482 | 106 | def test_file_operation_issue(self): | ||
483 | 107 | os.remove(self.tmp_file_path) | ||
484 | 108 | reporter = handlers.HyperVKvpReportingHandler( | ||
485 | 109 | kvp_file_path=self.tmp_file_path) | ||
486 | 110 | reporter.publish_event( | ||
487 | 111 | events.FinishReportingEvent('name2', 'description1', | ||
488 | 112 | result=events.status.FAIL)) | ||
489 | 113 | reporter.q.join() | ||
490 | 114 | |||
491 | 115 | def test_event_very_long(self): | ||
492 | 116 | reporter = handlers.HyperVKvpReportingHandler( | ||
493 | 117 | kvp_file_path=self.tmp_file_path) | ||
494 | 118 | description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE | ||
495 | 119 | long_event = events.FinishReportingEvent( | ||
496 | 120 | 'event_name', | ||
497 | 121 | description, | ||
498 | 122 | result=events.status.FAIL) | ||
499 | 123 | reporter.publish_event(long_event) | ||
500 | 124 | reporter.q.join() | ||
501 | 125 | kvps = list(reporter._iterate_kvps(0)) | ||
502 | 126 | self.assertEqual(3, len(kvps)) | ||
503 | 127 | |||
504 | 128 | # restore from the kvp to see the content are all there | ||
505 | 129 | full_description = '' | ||
506 | 130 | for i in range(len(kvps)): | ||
507 | 131 | msg_slice = json.loads(kvps[i]['value']) | ||
508 | 132 | self.assertEqual(msg_slice['msg_i'], i) | ||
509 | 133 | full_description += msg_slice['msg'] | ||
510 | 134 | self.assertEqual(description, full_description) |
PASSED: Continuous integration, rev:0124a19cbfc 5367395eb888ca5 04b31910982650 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 259/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 259/rebuild
https:/