Merge ~chad.smith/cloud-init:ubuntu/artful into cloud-init:ubuntu/artful
- Git
- lp:~chad.smith/cloud-init
- ubuntu/artful
- Merge into ubuntu/artful
Proposed by
Chad Smith
Status: | Merged | ||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Merged at revision: | 87041591ec51e779429f16454f5b406214bc3059 | ||||||||||||
Proposed branch: | ~chad.smith/cloud-init:ubuntu/artful | ||||||||||||
Merge into: | cloud-init:ubuntu/artful | ||||||||||||
Diff against target: |
1095 lines (+657/-91) 20 files modified
cloudinit/config/cc_bootcmd.py (+7/-1) cloudinit/config/cc_runcmd.py (+5/-0) cloudinit/config/cc_write_files.py (+6/-1) cloudinit/event.py (+17/-0) cloudinit/gpg.py (+42/-10) cloudinit/sources/__init__.py (+77/-1) cloudinit/sources/tests/test_init.py (+82/-1) cloudinit/stages.py (+10/-4) cloudinit/tests/test_gpg.py (+54/-0) cloudinit/tests/test_stages.py (+231/-0) cloudinit/tests/test_util.py (+68/-1) cloudinit/util.py (+18/-10) debian/changelog (+17/-0) dev/null (+0/-49) doc/examples/cloud-config-run-cmds.txt (+4/-1) doc/examples/cloud-config.txt (+4/-1) doc/rtd/topics/format.rst (+1/-1) integration-requirements.txt (+1/-1) tests/unittests/test_datasource/test_azure_helper.py (+3/-1) tools/run-container (+10/-8) |
||||||||||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Server Team CI bot | continuous-integration | Approve | |
Scott Moser | Pending | ||
Review via email: mp+349222@code.launchpad.net |
Commit message
Merge new-upstream-
Description of the change
To post a comment you must log in.
Revision history for this message
Server Team CI bot (server-team-bot) wrote : | # |
review:
Approve
(continuous-integration)
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py | |||
2 | index db64f0a..6813f53 100644 | |||
3 | --- a/cloudinit/config/cc_bootcmd.py | |||
4 | +++ b/cloudinit/config/cc_bootcmd.py | |||
5 | @@ -42,7 +42,13 @@ schema = { | |||
6 | 42 | 42 | ||
7 | 43 | .. note:: | 43 | .. note:: |
8 | 44 | bootcmd should only be used for things that could not be done later | 44 | bootcmd should only be used for things that could not be done later |
10 | 45 | in the boot process."""), | 45 | in the boot process. |
11 | 46 | |||
12 | 47 | .. note:: | ||
13 | 48 | |||
14 | 49 | when writing files, do not use /tmp dir as it races with | ||
15 | 50 | systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead. | ||
16 | 51 | """), | ||
17 | 46 | 'distros': distros, | 52 | 'distros': distros, |
18 | 47 | 'examples': [dedent("""\ | 53 | 'examples': [dedent("""\ |
19 | 48 | bootcmd: | 54 | bootcmd: |
20 | diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py | |||
21 | index b6f6c80..1f75d6c 100644 | |||
22 | --- a/cloudinit/config/cc_runcmd.py | |||
23 | +++ b/cloudinit/config/cc_runcmd.py | |||
24 | @@ -42,6 +42,11 @@ schema = { | |||
25 | 42 | 42 | ||
26 | 43 | all commands must be proper yaml, so you have to quote any characters | 43 | all commands must be proper yaml, so you have to quote any characters |
27 | 44 | yaml would eat (':' can be problematic) | 44 | yaml would eat (':' can be problematic) |
28 | 45 | |||
29 | 46 | .. note:: | ||
30 | 47 | |||
31 | 48 | when writing files, do not use /tmp dir as it races with | ||
32 | 49 | systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead. | ||
33 | 45 | """), | 50 | """), |
34 | 46 | 'distros': distros, | 51 | 'distros': distros, |
35 | 47 | 'examples': [dedent("""\ | 52 | 'examples': [dedent("""\ |
36 | diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py | |||
37 | index 54ae3a6..31d1db6 100644 | |||
38 | --- a/cloudinit/config/cc_write_files.py | |||
39 | +++ b/cloudinit/config/cc_write_files.py | |||
40 | @@ -15,9 +15,14 @@ binary gzip data can be specified and will be decoded before being written. | |||
41 | 15 | 15 | ||
42 | 16 | .. note:: | 16 | .. note:: |
43 | 17 | if multiline data is provided, care should be taken to ensure that it | 17 | if multiline data is provided, care should be taken to ensure that it |
45 | 18 | follows yaml formatting standargs. to specify binary data, use the yaml | 18 | follows yaml formatting standards. to specify binary data, use the yaml |
46 | 19 | option ``!!binary`` | 19 | option ``!!binary`` |
47 | 20 | 20 | ||
48 | 21 | .. note:: | ||
49 | 22 | Do not write files under /tmp during boot because of a race with | ||
50 | 23 | systemd-tmpfiles-clean that can cause temp files to get cleaned during | ||
51 | 24 | the early boot process. Use /run/somedir instead to avoid race LP:1707222. | ||
52 | 25 | |||
53 | 21 | **Internal name:** ``cc_write_files`` | 26 | **Internal name:** ``cc_write_files`` |
54 | 22 | 27 | ||
55 | 23 | **Module frequency:** per instance | 28 | **Module frequency:** per instance |
56 | diff --git a/cloudinit/event.py b/cloudinit/event.py | |||
57 | 24 | new file mode 100644 | 29 | new file mode 100644 |
58 | index 0000000..f7b311f | |||
59 | --- /dev/null | |||
60 | +++ b/cloudinit/event.py | |||
61 | @@ -0,0 +1,17 @@ | |||
62 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
63 | 2 | |||
64 | 3 | """Classes and functions related to event handling.""" | ||
65 | 4 | |||
66 | 5 | |||
67 | 6 | # Event types which can generate maintenance requests for cloud-init. | ||
68 | 7 | class EventType(object): | ||
69 | 8 | BOOT = "System boot" | ||
70 | 9 | BOOT_NEW_INSTANCE = "New instance first boot" | ||
71 | 10 | |||
72 | 11 | # TODO: Cloud-init will grow support for the follow event types: | ||
73 | 12 | # UDEV | ||
74 | 13 | # METADATA_CHANGE | ||
75 | 14 | # USER_REQUEST | ||
76 | 15 | |||
77 | 16 | |||
78 | 17 | # vi: ts=4 expandtab | ||
79 | diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py | |||
80 | index d58d73e..7fe17a2 100644 | |||
81 | --- a/cloudinit/gpg.py | |||
82 | +++ b/cloudinit/gpg.py | |||
83 | @@ -10,6 +10,8 @@ | |||
84 | 10 | from cloudinit import log as logging | 10 | from cloudinit import log as logging |
85 | 11 | from cloudinit import util | 11 | from cloudinit import util |
86 | 12 | 12 | ||
87 | 13 | import time | ||
88 | 14 | |||
89 | 13 | LOG = logging.getLogger(__name__) | 15 | LOG = logging.getLogger(__name__) |
90 | 14 | 16 | ||
91 | 15 | 17 | ||
92 | @@ -25,16 +27,46 @@ def export_armour(key): | |||
93 | 25 | return armour | 27 | return armour |
94 | 26 | 28 | ||
95 | 27 | 29 | ||
106 | 28 | def recv_key(key, keyserver): | 30 | def recv_key(key, keyserver, retries=(1, 1)): |
107 | 29 | """Receive gpg key from the specified keyserver""" | 31 | """Receive gpg key from the specified keyserver. |
108 | 30 | LOG.debug('Receive gpg key "%s"', key) | 32 | |
109 | 31 | try: | 33 | Retries are done by default because keyservers can be unreliable. |
110 | 32 | util.subp(["gpg", "--keyserver", keyserver, "--recv", key], | 34 | Additionally, there is no way to determine the difference between |
111 | 33 | capture=True) | 35 | a non-existant key and a failure. In both cases gpg (at least 2.2.4) |
112 | 34 | except util.ProcessExecutionError as error: | 36 | exits with status 2 and stderr: "keyserver receive failed: No data" |
113 | 35 | raise ValueError(('Failed to import key "%s" ' | 37 | It is assumed that a key provided to cloud-init exists on the keyserver |
114 | 36 | 'from server "%s" - error %s') % | 38 | so re-trying makes better sense than failing. |
115 | 37 | (key, keyserver, error)) | 39 | |
116 | 40 | @param key: a string key fingerprint (as passed to gpg --recv-keys). | ||
117 | 41 | @param keyserver: the keyserver to request keys from. | ||
118 | 42 | @param retries: an iterable of sleep lengths for retries. | ||
119 | 43 | Use None to indicate no retries.""" | ||
120 | 44 | LOG.debug("Importing key '%s' from keyserver '%s'", key, keyserver) | ||
121 | 45 | cmd = ["gpg", "--keyserver=%s" % keyserver, "--recv-keys", key] | ||
122 | 46 | if retries is None: | ||
123 | 47 | retries = [] | ||
124 | 48 | trynum = 0 | ||
125 | 49 | error = None | ||
126 | 50 | sleeps = iter(retries) | ||
127 | 51 | while True: | ||
128 | 52 | trynum += 1 | ||
129 | 53 | try: | ||
130 | 54 | util.subp(cmd, capture=True) | ||
131 | 55 | LOG.debug("Imported key '%s' from keyserver '%s' on try %d", | ||
132 | 56 | key, keyserver, trynum) | ||
133 | 57 | return | ||
134 | 58 | except util.ProcessExecutionError as e: | ||
135 | 59 | error = e | ||
136 | 60 | try: | ||
137 | 61 | naplen = next(sleeps) | ||
138 | 62 | LOG.debug( | ||
139 | 63 | "Import failed with exit code %d, will try again in %ss", | ||
140 | 64 | error.exit_code, naplen) | ||
141 | 65 | time.sleep(naplen) | ||
142 | 66 | except StopIteration: | ||
143 | 67 | raise ValueError( | ||
144 | 68 | ("Failed to import key '%s' from keyserver '%s' " | ||
145 | 69 | "after %d tries: %s") % (key, keyserver, trynum, error)) | ||
146 | 38 | 70 | ||
147 | 39 | 71 | ||
148 | 40 | def delete_key(key): | 72 | def delete_key(key): |
149 | diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py | |||
150 | index 90d7457..f424316 100644 | |||
151 | --- a/cloudinit/sources/__init__.py | |||
152 | +++ b/cloudinit/sources/__init__.py | |||
153 | @@ -19,6 +19,7 @@ from cloudinit.atomic_helper import write_json | |||
154 | 19 | from cloudinit import importer | 19 | from cloudinit import importer |
155 | 20 | from cloudinit import log as logging | 20 | from cloudinit import log as logging |
156 | 21 | from cloudinit import net | 21 | from cloudinit import net |
157 | 22 | from cloudinit.event import EventType | ||
158 | 22 | from cloudinit import type_utils | 23 | from cloudinit import type_utils |
159 | 23 | from cloudinit import user_data as ud | 24 | from cloudinit import user_data as ud |
160 | 24 | from cloudinit import util | 25 | from cloudinit import util |
161 | @@ -102,6 +103,25 @@ class DataSource(object): | |||
162 | 102 | url_timeout = 10 # timeout for each metadata url read attempt | 103 | url_timeout = 10 # timeout for each metadata url read attempt |
163 | 103 | url_retries = 5 # number of times to retry url upon 404 | 104 | url_retries = 5 # number of times to retry url upon 404 |
164 | 104 | 105 | ||
165 | 106 | # The datasource defines a list of supported EventTypes during which | ||
166 | 107 | # the datasource can react to changes in metadata and regenerate | ||
167 | 108 | # network configuration on metadata changes. | ||
168 | 109 | # A datasource which supports writing network config on each system boot | ||
169 | 110 | # would set update_events = {'network': [EventType.BOOT]} | ||
170 | 111 | |||
171 | 112 | # Default: generate network config on new instance id (first boot). | ||
172 | 113 | update_events = {'network': [EventType.BOOT_NEW_INSTANCE]} | ||
173 | 114 | |||
174 | 115 | # N-tuple listing default values for any metadata-related class | ||
175 | 116 | # attributes cached on an instance by a process_data runs. These attribute | ||
176 | 117 | # values are reset via clear_cached_attrs during any update_metadata call. | ||
177 | 118 | cached_attr_defaults = ( | ||
178 | 119 | ('ec2_metadata', UNSET), ('network_json', UNSET), | ||
179 | 120 | ('metadata', {}), ('userdata', None), ('userdata_raw', None), | ||
180 | 121 | ('vendordata', None), ('vendordata_raw', None)) | ||
181 | 122 | |||
182 | 123 | _dirty_cache = False | ||
183 | 124 | |||
184 | 105 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): | 125 | def __init__(self, sys_cfg, distro, paths, ud_proc=None): |
185 | 106 | self.sys_cfg = sys_cfg | 126 | self.sys_cfg = sys_cfg |
186 | 107 | self.distro = distro | 127 | self.distro = distro |
187 | @@ -134,11 +154,31 @@ class DataSource(object): | |||
188 | 134 | 'region': self.region, | 154 | 'region': self.region, |
189 | 135 | 'availability-zone': self.availability_zone}} | 155 | 'availability-zone': self.availability_zone}} |
190 | 136 | 156 | ||
191 | 157 | def clear_cached_attrs(self, attr_defaults=()): | ||
192 | 158 | """Reset any cached metadata attributes to datasource defaults. | ||
193 | 159 | |||
194 | 160 | @param attr_defaults: Optional tuple of (attr, value) pairs to | ||
195 | 161 | set instead of cached_attr_defaults. | ||
196 | 162 | """ | ||
197 | 163 | if not self._dirty_cache: | ||
198 | 164 | return | ||
199 | 165 | if attr_defaults: | ||
200 | 166 | attr_values = attr_defaults | ||
201 | 167 | else: | ||
202 | 168 | attr_values = self.cached_attr_defaults | ||
203 | 169 | |||
204 | 170 | for attribute, value in attr_values: | ||
205 | 171 | if hasattr(self, attribute): | ||
206 | 172 | setattr(self, attribute, value) | ||
207 | 173 | if not attr_defaults: | ||
208 | 174 | self._dirty_cache = False | ||
209 | 175 | |||
210 | 137 | def get_data(self): | 176 | def get_data(self): |
211 | 138 | """Datasources implement _get_data to setup metadata and userdata_raw. | 177 | """Datasources implement _get_data to setup metadata and userdata_raw. |
212 | 139 | 178 | ||
213 | 140 | Minimally, the datasource should return a boolean True on success. | 179 | Minimally, the datasource should return a boolean True on success. |
214 | 141 | """ | 180 | """ |
215 | 181 | self._dirty_cache = True | ||
216 | 142 | return_value = self._get_data() | 182 | return_value = self._get_data() |
217 | 143 | json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) | 183 | json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) |
218 | 144 | if not return_value: | 184 | if not return_value: |
219 | @@ -174,6 +214,7 @@ class DataSource(object): | |||
220 | 174 | return return_value | 214 | return return_value |
221 | 175 | 215 | ||
222 | 176 | def _get_data(self): | 216 | def _get_data(self): |
223 | 217 | """Walk metadata sources, process crawled data and save attributes.""" | ||
224 | 177 | raise NotImplementedError( | 218 | raise NotImplementedError( |
225 | 178 | 'Subclasses of DataSource must implement _get_data which' | 219 | 'Subclasses of DataSource must implement _get_data which' |
226 | 179 | ' sets self.metadata, vendordata_raw and userdata_raw.') | 220 | ' sets self.metadata, vendordata_raw and userdata_raw.') |
227 | @@ -416,6 +457,41 @@ class DataSource(object): | |||
228 | 416 | def get_package_mirror_info(self): | 457 | def get_package_mirror_info(self): |
229 | 417 | return self.distro.get_package_mirror_info(data_source=self) | 458 | return self.distro.get_package_mirror_info(data_source=self) |
230 | 418 | 459 | ||
231 | 460 | def update_metadata(self, source_event_types): | ||
232 | 461 | """Refresh cached metadata if the datasource supports this event. | ||
233 | 462 | |||
234 | 463 | The datasource has a list of update_events which | ||
235 | 464 | trigger refreshing all cached metadata as well as refreshing the | ||
236 | 465 | network configuration. | ||
237 | 466 | |||
238 | 467 | @param source_event_types: List of EventTypes which may trigger a | ||
239 | 468 | metadata update. | ||
240 | 469 | |||
241 | 470 | @return True if the datasource did successfully update cached metadata | ||
242 | 471 | due to source_event_type. | ||
243 | 472 | """ | ||
244 | 473 | supported_events = {} | ||
245 | 474 | for event in source_event_types: | ||
246 | 475 | for update_scope, update_events in self.update_events.items(): | ||
247 | 476 | if event in update_events: | ||
248 | 477 | if not supported_events.get(update_scope): | ||
249 | 478 | supported_events[update_scope] = [] | ||
250 | 479 | supported_events[update_scope].append(event) | ||
251 | 480 | for scope, matched_events in supported_events.items(): | ||
252 | 481 | LOG.debug( | ||
253 | 482 | "Update datasource metadata and %s config due to events: %s", | ||
254 | 483 | scope, ', '.join(matched_events)) | ||
255 | 484 | # Each datasource has a cached config property which needs clearing | ||
256 | 485 | # Once cleared that config property will be regenerated from | ||
257 | 486 | # current metadata. | ||
258 | 487 | self.clear_cached_attrs((('_%s_config' % scope, UNSET),)) | ||
259 | 488 | if supported_events: | ||
260 | 489 | self.clear_cached_attrs() | ||
261 | 490 | result = self.get_data() | ||
262 | 491 | if result: | ||
263 | 492 | return True | ||
264 | 493 | return False | ||
265 | 494 | |||
266 | 419 | def check_instance_id(self, sys_cfg): | 495 | def check_instance_id(self, sys_cfg): |
267 | 420 | # quickly (local check only) if self.instance_id is still | 496 | # quickly (local check only) if self.instance_id is still |
268 | 421 | return False | 497 | return False |
269 | @@ -520,7 +596,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): | |||
270 | 520 | with myrep: | 596 | with myrep: |
271 | 521 | LOG.debug("Seeing if we can get any data from %s", cls) | 597 | LOG.debug("Seeing if we can get any data from %s", cls) |
272 | 522 | s = cls(sys_cfg, distro, paths) | 598 | s = cls(sys_cfg, distro, paths) |
274 | 523 | if s.get_data(): | 599 | if s.update_metadata([EventType.BOOT_NEW_INSTANCE]): |
275 | 524 | myrep.message = "found %s data from %s" % (mode, name) | 600 | myrep.message = "found %s data from %s" % (mode, name) |
276 | 525 | return (s, type_utils.obj_name(cls)) | 601 | return (s, type_utils.obj_name(cls)) |
277 | 526 | except Exception: | 602 | except Exception: |
278 | diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py | |||
279 | index d5bc98a..dcd221b 100644 | |||
280 | --- a/cloudinit/sources/tests/test_init.py | |||
281 | +++ b/cloudinit/sources/tests/test_init.py | |||
282 | @@ -5,10 +5,11 @@ import os | |||
283 | 5 | import six | 5 | import six |
284 | 6 | import stat | 6 | import stat |
285 | 7 | 7 | ||
286 | 8 | from cloudinit.event import EventType | ||
287 | 8 | from cloudinit.helpers import Paths | 9 | from cloudinit.helpers import Paths |
288 | 9 | from cloudinit import importer | 10 | from cloudinit import importer |
289 | 10 | from cloudinit.sources import ( | 11 | from cloudinit.sources import ( |
291 | 11 | INSTANCE_JSON_FILE, DataSource) | 12 | INSTANCE_JSON_FILE, DataSource, UNSET) |
292 | 12 | from cloudinit.tests.helpers import CiTestCase, skipIf, mock | 13 | from cloudinit.tests.helpers import CiTestCase, skipIf, mock |
293 | 13 | from cloudinit.user_data import UserDataProcessor | 14 | from cloudinit.user_data import UserDataProcessor |
294 | 14 | from cloudinit import util | 15 | from cloudinit import util |
295 | @@ -381,3 +382,83 @@ class TestDataSource(CiTestCase): | |||
296 | 381 | get_args(grandchild.get_hostname), # pylint: disable=W1505 | 382 | get_args(grandchild.get_hostname), # pylint: disable=W1505 |
297 | 382 | '%s does not implement DataSource.get_hostname params' | 383 | '%s does not implement DataSource.get_hostname params' |
298 | 383 | % grandchild) | 384 | % grandchild) |
299 | 385 | |||
300 | 386 | def test_clear_cached_attrs_resets_cached_attr_class_attributes(self): | ||
301 | 387 | """Class attributes listed in cached_attr_defaults are reset.""" | ||
302 | 388 | count = 0 | ||
303 | 389 | # Setup values for all cached class attributes | ||
304 | 390 | for attr, value in self.datasource.cached_attr_defaults: | ||
305 | 391 | setattr(self.datasource, attr, count) | ||
306 | 392 | count += 1 | ||
307 | 393 | self.datasource._dirty_cache = True | ||
308 | 394 | self.datasource.clear_cached_attrs() | ||
309 | 395 | for attr, value in self.datasource.cached_attr_defaults: | ||
310 | 396 | self.assertEqual(value, getattr(self.datasource, attr)) | ||
311 | 397 | |||
312 | 398 | def test_clear_cached_attrs_noops_on_clean_cache(self): | ||
313 | 399 | """Class attributes listed in cached_attr_defaults are reset.""" | ||
314 | 400 | count = 0 | ||
315 | 401 | # Setup values for all cached class attributes | ||
316 | 402 | for attr, _ in self.datasource.cached_attr_defaults: | ||
317 | 403 | setattr(self.datasource, attr, count) | ||
318 | 404 | count += 1 | ||
319 | 405 | self.datasource._dirty_cache = False # Fake clean cache | ||
320 | 406 | self.datasource.clear_cached_attrs() | ||
321 | 407 | count = 0 | ||
322 | 408 | for attr, _ in self.datasource.cached_attr_defaults: | ||
323 | 409 | self.assertEqual(count, getattr(self.datasource, attr)) | ||
324 | 410 | count += 1 | ||
325 | 411 | |||
326 | 412 | def test_clear_cached_attrs_skips_non_attr_class_attributes(self): | ||
327 | 413 | """Skip any cached_attr_defaults which aren't class attributes.""" | ||
328 | 414 | self.datasource._dirty_cache = True | ||
329 | 415 | self.datasource.clear_cached_attrs() | ||
330 | 416 | for attr in ('ec2_metadata', 'network_json'): | ||
331 | 417 | self.assertFalse(hasattr(self.datasource, attr)) | ||
332 | 418 | |||
333 | 419 | def test_clear_cached_attrs_of_custom_attrs(self): | ||
334 | 420 | """Custom attr_values can be passed to clear_cached_attrs.""" | ||
335 | 421 | self.datasource._dirty_cache = True | ||
336 | 422 | cached_attr_name = self.datasource.cached_attr_defaults[0][0] | ||
337 | 423 | setattr(self.datasource, cached_attr_name, 'himom') | ||
338 | 424 | self.datasource.myattr = 'orig' | ||
339 | 425 | self.datasource.clear_cached_attrs( | ||
340 | 426 | attr_defaults=(('myattr', 'updated'),)) | ||
341 | 427 | self.assertEqual('himom', getattr(self.datasource, cached_attr_name)) | ||
342 | 428 | self.assertEqual('updated', self.datasource.myattr) | ||
343 | 429 | |||
344 | 430 | def test_update_metadata_only_acts_on_supported_update_events(self): | ||
345 | 431 | """update_metadata won't get_data on unsupported update events.""" | ||
346 | 432 | self.assertEqual( | ||
347 | 433 | {'network': [EventType.BOOT_NEW_INSTANCE]}, | ||
348 | 434 | self.datasource.update_events) | ||
349 | 435 | |||
350 | 436 | def fake_get_data(): | ||
351 | 437 | raise Exception('get_data should not be called') | ||
352 | 438 | |||
353 | 439 | self.datasource.get_data = fake_get_data | ||
354 | 440 | self.assertFalse( | ||
355 | 441 | self.datasource.update_metadata( | ||
356 | 442 | source_event_types=[EventType.BOOT])) | ||
357 | 443 | |||
358 | 444 | def test_update_metadata_returns_true_on_supported_update_event(self): | ||
359 | 445 | """update_metadata returns get_data response on supported events.""" | ||
360 | 446 | |||
361 | 447 | def fake_get_data(): | ||
362 | 448 | return True | ||
363 | 449 | |||
364 | 450 | self.datasource.get_data = fake_get_data | ||
365 | 451 | self.datasource._network_config = 'something' | ||
366 | 452 | self.datasource._dirty_cache = True | ||
367 | 453 | self.assertTrue( | ||
368 | 454 | self.datasource.update_metadata( | ||
369 | 455 | source_event_types=[ | ||
370 | 456 | EventType.BOOT, EventType.BOOT_NEW_INSTANCE])) | ||
371 | 457 | self.assertEqual(UNSET, self.datasource._network_config) | ||
372 | 458 | self.assertIn( | ||
373 | 459 | "DEBUG: Update datasource metadata and network config due to" | ||
374 | 460 | " events: New instance first boot", | ||
375 | 461 | self.logs.getvalue()) | ||
376 | 462 | |||
377 | 463 | |||
378 | 464 | # vi: ts=4 expandtab | ||
379 | diff --git a/cloudinit/stages.py b/cloudinit/stages.py | |||
380 | index 286607b..c132b57 100644 | |||
381 | --- a/cloudinit/stages.py | |||
382 | +++ b/cloudinit/stages.py | |||
383 | @@ -22,6 +22,8 @@ from cloudinit.handlers import cloud_config as cc_part | |||
384 | 22 | from cloudinit.handlers import shell_script as ss_part | 22 | from cloudinit.handlers import shell_script as ss_part |
385 | 23 | from cloudinit.handlers import upstart_job as up_part | 23 | from cloudinit.handlers import upstart_job as up_part |
386 | 24 | 24 | ||
387 | 25 | from cloudinit.event import EventType | ||
388 | 26 | |||
389 | 25 | from cloudinit import cloud | 27 | from cloudinit import cloud |
390 | 26 | from cloudinit import config | 28 | from cloudinit import config |
391 | 27 | from cloudinit import distros | 29 | from cloudinit import distros |
392 | @@ -648,10 +650,14 @@ class Init(object): | |||
393 | 648 | except Exception as e: | 650 | except Exception as e: |
394 | 649 | LOG.warning("Failed to rename devices: %s", e) | 651 | LOG.warning("Failed to rename devices: %s", e) |
395 | 650 | 652 | ||
400 | 651 | if (self.datasource is not NULL_DATA_SOURCE and | 653 | if self.datasource is not NULL_DATA_SOURCE: |
401 | 652 | not self.is_new_instance()): | 654 | if not self.is_new_instance(): |
402 | 653 | LOG.debug("not a new instance. network config is not applied.") | 655 | if not self.datasource.update_metadata([EventType.BOOT]): |
403 | 654 | return | 656 | LOG.debug( |
404 | 657 | "No network config applied. Neither a new instance" | ||
405 | 658 | " nor datasource network update on '%s' event", | ||
406 | 659 | EventType.BOOT) | ||
407 | 660 | return | ||
408 | 655 | 661 | ||
409 | 656 | LOG.info("Applying network configuration from %s bringup=%s: %s", | 662 | LOG.info("Applying network configuration from %s bringup=%s: %s", |
410 | 657 | src, bring_up, netcfg) | 663 | src, bring_up, netcfg) |
411 | diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py | |||
412 | 658 | new file mode 100644 | 664 | new file mode 100644 |
413 | index 0000000..0562b96 | |||
414 | --- /dev/null | |||
415 | +++ b/cloudinit/tests/test_gpg.py | |||
416 | @@ -0,0 +1,54 @@ | |||
417 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
418 | 2 | """Test gpg module.""" | ||
419 | 3 | |||
420 | 4 | from cloudinit import gpg | ||
421 | 5 | from cloudinit import util | ||
422 | 6 | from cloudinit.tests.helpers import CiTestCase | ||
423 | 7 | |||
424 | 8 | import mock | ||
425 | 9 | |||
426 | 10 | |||
427 | 11 | @mock.patch("cloudinit.gpg.time.sleep") | ||
428 | 12 | @mock.patch("cloudinit.gpg.util.subp") | ||
429 | 13 | class TestReceiveKeys(CiTestCase): | ||
430 | 14 | """Test the recv_key method.""" | ||
431 | 15 | |||
432 | 16 | def test_retries_on_subp_exc(self, m_subp, m_sleep): | ||
433 | 17 | """retry should be done on gpg receive keys failure.""" | ||
434 | 18 | retries = (1, 2, 4) | ||
435 | 19 | my_exc = util.ProcessExecutionError( | ||
436 | 20 | stdout='', stderr='', exit_code=2, cmd=['mycmd']) | ||
437 | 21 | m_subp.side_effect = (my_exc, my_exc, ('', '')) | ||
438 | 22 | gpg.recv_key("ABCD", "keyserver.example.com", retries=retries) | ||
439 | 23 | self.assertEqual([mock.call(1), mock.call(2)], m_sleep.call_args_list) | ||
440 | 24 | |||
441 | 25 | def test_raises_error_after_retries(self, m_subp, m_sleep): | ||
442 | 26 | """If the final run fails, error should be raised.""" | ||
443 | 27 | naplen = 1 | ||
444 | 28 | keyid, keyserver = ("ABCD", "keyserver.example.com") | ||
445 | 29 | m_subp.side_effect = util.ProcessExecutionError( | ||
446 | 30 | stdout='', stderr='', exit_code=2, cmd=['mycmd']) | ||
447 | 31 | with self.assertRaises(ValueError) as rcm: | ||
448 | 32 | gpg.recv_key(keyid, keyserver, retries=(naplen,)) | ||
449 | 33 | self.assertIn(keyid, str(rcm.exception)) | ||
450 | 34 | self.assertIn(keyserver, str(rcm.exception)) | ||
451 | 35 | m_sleep.assert_called_with(naplen) | ||
452 | 36 | |||
453 | 37 | def test_no_retries_on_none(self, m_subp, m_sleep): | ||
454 | 38 | """retry should not be done if retries is None.""" | ||
455 | 39 | m_subp.side_effect = util.ProcessExecutionError( | ||
456 | 40 | stdout='', stderr='', exit_code=2, cmd=['mycmd']) | ||
457 | 41 | with self.assertRaises(ValueError): | ||
458 | 42 | gpg.recv_key("ABCD", "keyserver.example.com", retries=None) | ||
459 | 43 | m_sleep.assert_not_called() | ||
460 | 44 | |||
461 | 45 | def test_expected_gpg_command(self, m_subp, m_sleep): | ||
462 | 46 | """Verify gpg is called with expected args.""" | ||
463 | 47 | key, keyserver = ("DEADBEEF", "keyserver.example.com") | ||
464 | 48 | retries = (1, 2, 4) | ||
465 | 49 | m_subp.return_value = ('', '') | ||
466 | 50 | gpg.recv_key(key, keyserver, retries=retries) | ||
467 | 51 | m_subp.assert_called_once_with( | ||
468 | 52 | ['gpg', '--keyserver=%s' % keyserver, '--recv-keys', key], | ||
469 | 53 | capture=True) | ||
470 | 54 | m_sleep.assert_not_called() | ||
471 | diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py | |||
472 | 0 | new file mode 100644 | 55 | new file mode 100644 |
473 | index 0000000..94b6b25 | |||
474 | --- /dev/null | |||
475 | +++ b/cloudinit/tests/test_stages.py | |||
476 | @@ -0,0 +1,231 @@ | |||
477 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
478 | 2 | |||
479 | 3 | """Tests related to cloudinit.stages module.""" | ||
480 | 4 | |||
481 | 5 | import os | ||
482 | 6 | |||
483 | 7 | from cloudinit import stages | ||
484 | 8 | from cloudinit import sources | ||
485 | 9 | |||
486 | 10 | from cloudinit.event import EventType | ||
487 | 11 | from cloudinit.util import write_file | ||
488 | 12 | |||
489 | 13 | from cloudinit.tests.helpers import CiTestCase, mock | ||
490 | 14 | |||
491 | 15 | TEST_INSTANCE_ID = 'i-testing' | ||
492 | 16 | |||
493 | 17 | |||
494 | 18 | class FakeDataSource(sources.DataSource): | ||
495 | 19 | |||
496 | 20 | def __init__(self, paths=None, userdata=None, vendordata=None, | ||
497 | 21 | network_config=''): | ||
498 | 22 | super(FakeDataSource, self).__init__({}, None, paths=paths) | ||
499 | 23 | self.metadata = {'instance-id': TEST_INSTANCE_ID} | ||
500 | 24 | self.userdata_raw = userdata | ||
501 | 25 | self.vendordata_raw = vendordata | ||
502 | 26 | self._network_config = None | ||
503 | 27 | if network_config: # Permit for None value to setup attribute | ||
504 | 28 | self._network_config = network_config | ||
505 | 29 | |||
506 | 30 | @property | ||
507 | 31 | def network_config(self): | ||
508 | 32 | return self._network_config | ||
509 | 33 | |||
510 | 34 | def _get_data(self): | ||
511 | 35 | return True | ||
512 | 36 | |||
513 | 37 | |||
514 | 38 | class TestInit(CiTestCase): | ||
515 | 39 | with_logs = True | ||
516 | 40 | |||
517 | 41 | def setUp(self): | ||
518 | 42 | super(TestInit, self).setUp() | ||
519 | 43 | self.tmpdir = self.tmp_dir() | ||
520 | 44 | self.init = stages.Init() | ||
521 | 45 | # Setup fake Paths for Init to reference | ||
522 | 46 | self.init._cfg = {'system_info': { | ||
523 | 47 | 'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir, | ||
524 | 48 | 'run_dir': self.tmpdir}}} | ||
525 | 49 | self.init.datasource = FakeDataSource(paths=self.init.paths) | ||
526 | 50 | |||
527 | 51 | def test_wb__find_networking_config_disabled(self): | ||
528 | 52 | """find_networking_config returns no config when disabled.""" | ||
529 | 53 | disable_file = os.path.join( | ||
530 | 54 | self.init.paths.get_cpath('data'), 'upgraded-network') | ||
531 | 55 | write_file(disable_file, '') | ||
532 | 56 | self.assertEqual( | ||
533 | 57 | (None, disable_file), | ||
534 | 58 | self.init._find_networking_config()) | ||
535 | 59 | |||
536 | 60 | @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') | ||
537 | 61 | def test_wb__find_networking_config_disabled_by_kernel(self, m_cmdline): | ||
538 | 62 | """find_networking_config returns when disabled by kernel cmdline.""" | ||
539 | 63 | m_cmdline.return_value = {'config': 'disabled'} | ||
540 | 64 | self.assertEqual( | ||
541 | 65 | (None, 'cmdline'), | ||
542 | 66 | self.init._find_networking_config()) | ||
543 | 67 | self.assertEqual('DEBUG: network config disabled by cmdline\n', | ||
544 | 68 | self.logs.getvalue()) | ||
545 | 69 | |||
546 | 70 | @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') | ||
547 | 71 | def test_wb__find_networking_config_disabled_by_datasrc(self, m_cmdline): | ||
548 | 72 | """find_networking_config returns when disabled by datasource cfg.""" | ||
549 | 73 | m_cmdline.return_value = {} # Kernel doesn't disable networking | ||
550 | 74 | self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, | ||
551 | 75 | 'network': {}} # system config doesn't disable | ||
552 | 76 | |||
553 | 77 | self.init.datasource = FakeDataSource( | ||
554 | 78 | network_config={'config': 'disabled'}) | ||
555 | 79 | self.assertEqual( | ||
556 | 80 | (None, 'ds'), | ||
557 | 81 | self.init._find_networking_config()) | ||
558 | 82 | self.assertEqual('DEBUG: network config disabled by ds\n', | ||
559 | 83 | self.logs.getvalue()) | ||
560 | 84 | |||
561 | 85 | @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') | ||
562 | 86 | def test_wb__find_networking_config_disabled_by_sysconfig(self, m_cmdline): | ||
563 | 87 | """find_networking_config returns when disabled by system config.""" | ||
564 | 88 | m_cmdline.return_value = {} # Kernel doesn't disable networking | ||
565 | 89 | self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, | ||
566 | 90 | 'network': {'config': 'disabled'}} | ||
567 | 91 | self.assertEqual( | ||
568 | 92 | (None, 'system_cfg'), | ||
569 | 93 | self.init._find_networking_config()) | ||
570 | 94 | self.assertEqual('DEBUG: network config disabled by system_cfg\n', | ||
571 | 95 | self.logs.getvalue()) | ||
572 | 96 | |||
573 | 97 | @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') | ||
574 | 98 | def test_wb__find_networking_config_returns_kernel(self, m_cmdline): | ||
575 | 99 | """find_networking_config returns kernel cmdline config if present.""" | ||
576 | 100 | expected_cfg = {'config': ['fakekernel']} | ||
577 | 101 | m_cmdline.return_value = expected_cfg | ||
578 | 102 | self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, | ||
579 | 103 | 'network': {'config': ['fakesys_config']}} | ||
580 | 104 | self.init.datasource = FakeDataSource( | ||
581 | 105 | network_config={'config': ['fakedatasource']}) | ||
582 | 106 | self.assertEqual( | ||
583 | 107 | (expected_cfg, 'cmdline'), | ||
584 | 108 | self.init._find_networking_config()) | ||
585 | 109 | |||
586 | 110 | @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') | ||
587 | 111 | def test_wb__find_networking_config_returns_system_cfg(self, m_cmdline): | ||
588 | 112 | """find_networking_config returns system config when present.""" | ||
589 | 113 | m_cmdline.return_value = {} # No kernel network config | ||
590 | 114 | expected_cfg = {'config': ['fakesys_config']} | ||
591 | 115 | self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, | ||
592 | 116 | 'network': expected_cfg} | ||
593 | 117 | self.init.datasource = FakeDataSource( | ||
594 | 118 | network_config={'config': ['fakedatasource']}) | ||
595 | 119 | self.assertEqual( | ||
596 | 120 | (expected_cfg, 'system_cfg'), | ||
597 | 121 | self.init._find_networking_config()) | ||
598 | 122 | |||
599 | 123 | @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') | ||
600 | 124 | def test_wb__find_networking_config_returns_datasrc_cfg(self, m_cmdline): | ||
601 | 125 | """find_networking_config returns datasource net config if present.""" | ||
602 | 126 | m_cmdline.return_value = {} # No kernel network config | ||
603 | 127 | # No system config for network in setUp | ||
604 | 128 | expected_cfg = {'config': ['fakedatasource']} | ||
605 | 129 | self.init.datasource = FakeDataSource(network_config=expected_cfg) | ||
606 | 130 | self.assertEqual( | ||
607 | 131 | (expected_cfg, 'ds'), | ||
608 | 132 | self.init._find_networking_config()) | ||
609 | 133 | |||
610 | 134 | @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') | ||
611 | 135 | def test_wb__find_networking_config_returns_fallback(self, m_cmdline): | ||
612 | 136 | """find_networking_config returns fallback config if not defined.""" | ||
613 | 137 | m_cmdline.return_value = {} # Kernel doesn't disable networking | ||
614 | 138 | # Neither datasource nor system_info disable or provide network | ||
615 | 139 | |||
616 | 140 | fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}], | ||
617 | 141 | 'version': 1} | ||
618 | 142 | |||
619 | 143 | def fake_generate_fallback(): | ||
620 | 144 | return fake_cfg | ||
621 | 145 | |||
622 | 146 | # Monkey patch distro which gets cached on self.init | ||
623 | 147 | distro = self.init.distro | ||
624 | 148 | distro.generate_fallback_config = fake_generate_fallback | ||
625 | 149 | self.assertEqual( | ||
626 | 150 | (fake_cfg, 'fallback'), | ||
627 | 151 | self.init._find_networking_config()) | ||
628 | 152 | self.assertNotIn('network config disabled', self.logs.getvalue()) | ||
629 | 153 | |||
630 | 154 | def test_apply_network_config_disabled(self): | ||
631 | 155 | """Log when network is disabled by upgraded-network.""" | ||
632 | 156 | disable_file = os.path.join( | ||
633 | 157 | self.init.paths.get_cpath('data'), 'upgraded-network') | ||
634 | 158 | |||
635 | 159 | def fake_network_config(): | ||
636 | 160 | return (None, disable_file) | ||
637 | 161 | |||
638 | 162 | self.init._find_networking_config = fake_network_config | ||
639 | 163 | |||
640 | 164 | self.init.apply_network_config(True) | ||
641 | 165 | self.assertIn( | ||
642 | 166 | 'INFO: network config is disabled by %s' % disable_file, | ||
643 | 167 | self.logs.getvalue()) | ||
644 | 168 | |||
645 | 169 | @mock.patch('cloudinit.distros.ubuntu.Distro') | ||
646 | 170 | def test_apply_network_on_new_instance(self, m_ubuntu): | ||
647 | 171 | """Call distro apply_network_config methods on is_new_instance.""" | ||
648 | 172 | net_cfg = { | ||
649 | 173 | 'version': 1, 'config': [ | ||
650 | 174 | {'subnets': [{'type': 'dhcp'}], 'type': 'physical', | ||
651 | 175 | 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} | ||
652 | 176 | |||
653 | 177 | def fake_network_config(): | ||
654 | 178 | return net_cfg, 'fallback' | ||
655 | 179 | |||
656 | 180 | self.init._find_networking_config = fake_network_config | ||
657 | 181 | self.init.apply_network_config(True) | ||
658 | 182 | self.init.distro.apply_network_config_names.assert_called_with(net_cfg) | ||
659 | 183 | self.init.distro.apply_network_config.assert_called_with( | ||
660 | 184 | net_cfg, bring_up=True) | ||
661 | 185 | |||
662 | 186 | @mock.patch('cloudinit.distros.ubuntu.Distro') | ||
663 | 187 | def test_apply_network_on_same_instance_id(self, m_ubuntu): | ||
664 | 188 | """Only call distro.apply_network_config_names on same instance id.""" | ||
665 | 189 | old_instance_id = os.path.join( | ||
666 | 190 | self.init.paths.get_cpath('data'), 'instance-id') | ||
667 | 191 | write_file(old_instance_id, TEST_INSTANCE_ID) | ||
668 | 192 | net_cfg = { | ||
669 | 193 | 'version': 1, 'config': [ | ||
670 | 194 | {'subnets': [{'type': 'dhcp'}], 'type': 'physical', | ||
671 | 195 | 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} | ||
672 | 196 | |||
673 | 197 | def fake_network_config(): | ||
674 | 198 | return net_cfg, 'fallback' | ||
675 | 199 | |||
676 | 200 | self.init._find_networking_config = fake_network_config | ||
677 | 201 | self.init.apply_network_config(True) | ||
678 | 202 | self.init.distro.apply_network_config_names.assert_called_with(net_cfg) | ||
679 | 203 | self.init.distro.apply_network_config.assert_not_called() | ||
680 | 204 | self.assertIn( | ||
681 | 205 | 'No network config applied. Neither a new instance' | ||
682 | 206 | " nor datasource network update on '%s' event" % EventType.BOOT, | ||
683 | 207 | self.logs.getvalue()) | ||
684 | 208 | |||
685 | 209 | @mock.patch('cloudinit.distros.ubuntu.Distro') | ||
686 | 210 | def test_apply_network_on_datasource_allowed_event(self, m_ubuntu): | ||
687 | 211 | """Apply network if datasource.update_metadata permits BOOT event.""" | ||
688 | 212 | old_instance_id = os.path.join( | ||
689 | 213 | self.init.paths.get_cpath('data'), 'instance-id') | ||
690 | 214 | write_file(old_instance_id, TEST_INSTANCE_ID) | ||
691 | 215 | net_cfg = { | ||
692 | 216 | 'version': 1, 'config': [ | ||
693 | 217 | {'subnets': [{'type': 'dhcp'}], 'type': 'physical', | ||
694 | 218 | 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} | ||
695 | 219 | |||
696 | 220 | def fake_network_config(): | ||
697 | 221 | return net_cfg, 'fallback' | ||
698 | 222 | |||
699 | 223 | self.init._find_networking_config = fake_network_config | ||
700 | 224 | self.init.datasource = FakeDataSource(paths=self.init.paths) | ||
701 | 225 | self.init.datasource.update_events = {'network': [EventType.BOOT]} | ||
702 | 226 | self.init.apply_network_config(True) | ||
703 | 227 | self.init.distro.apply_network_config_names.assert_called_with(net_cfg) | ||
704 | 228 | self.init.distro.apply_network_config.assert_called_with( | ||
705 | 229 | net_cfg, bring_up=True) | ||
706 | 230 | |||
707 | 231 | # vi: ts=4 expandtab | ||
708 | diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py | |||
709 | index 17853fc..6a31e50 100644 | |||
710 | --- a/cloudinit/tests/test_util.py | |||
711 | +++ b/cloudinit/tests/test_util.py | |||
712 | @@ -26,8 +26,51 @@ OS_RELEASE_SLES = dedent("""\ | |||
713 | 26 | CPE_NAME="cpe:/o:suse:sles:12:sp3"\n | 26 | CPE_NAME="cpe:/o:suse:sles:12:sp3"\n |
714 | 27 | """) | 27 | """) |
715 | 28 | 28 | ||
716 | 29 | OS_RELEASE_OPENSUSE = dedent("""\ | ||
717 | 30 | NAME="openSUSE Leap" | ||
718 | 31 | VERSION="42.3" | ||
719 | 32 | ID=opensuse | ||
720 | 33 | ID_LIKE="suse" | ||
721 | 34 | VERSION_ID="42.3" | ||
722 | 35 | PRETTY_NAME="openSUSE Leap 42.3" | ||
723 | 36 | ANSI_COLOR="0;32" | ||
724 | 37 | CPE_NAME="cpe:/o:opensuse:leap:42.3" | ||
725 | 38 | BUG_REPORT_URL="https://bugs.opensuse.org" | ||
726 | 39 | HOME_URL="https://www.opensuse.org/" | ||
727 | 40 | """) | ||
728 | 41 | |||
729 | 42 | OS_RELEASE_CENTOS = dedent("""\ | ||
730 | 43 | NAME="CentOS Linux" | ||
731 | 44 | VERSION="7 (Core)" | ||
732 | 45 | ID="centos" | ||
733 | 46 | ID_LIKE="rhel fedora" | ||
734 | 47 | VERSION_ID="7" | ||
735 | 48 | PRETTY_NAME="CentOS Linux 7 (Core)" | ||
736 | 49 | ANSI_COLOR="0;31" | ||
737 | 50 | CPE_NAME="cpe:/o:centos:centos:7" | ||
738 | 51 | HOME_URL="https://www.centos.org/" | ||
739 | 52 | BUG_REPORT_URL="https://bugs.centos.org/" | ||
740 | 53 | |||
741 | 54 | CENTOS_MANTISBT_PROJECT="CentOS-7" | ||
742 | 55 | CENTOS_MANTISBT_PROJECT_VERSION="7" | ||
743 | 56 | REDHAT_SUPPORT_PRODUCT="centos" | ||
744 | 57 | REDHAT_SUPPORT_PRODUCT_VERSION="7" | ||
745 | 58 | """) | ||
746 | 59 | |||
747 | 60 | OS_RELEASE_DEBIAN = dedent("""\ | ||
748 | 61 | PRETTY_NAME="Debian GNU/Linux 9 (stretch)" | ||
749 | 62 | NAME="Debian GNU/Linux" | ||
750 | 63 | VERSION_ID="9" | ||
751 | 64 | VERSION="9 (stretch)" | ||
752 | 65 | ID=debian | ||
753 | 66 | HOME_URL="https://www.debian.org/" | ||
754 | 67 | SUPPORT_URL="https://www.debian.org/support" | ||
755 | 68 | BUG_REPORT_URL="https://bugs.debian.org/" | ||
756 | 69 | """) | ||
757 | 70 | |||
758 | 29 | OS_RELEASE_UBUNTU = dedent("""\ | 71 | OS_RELEASE_UBUNTU = dedent("""\ |
759 | 30 | NAME="Ubuntu"\n | 72 | NAME="Ubuntu"\n |
760 | 73 | # comment test | ||
761 | 31 | VERSION="16.04.3 LTS (Xenial Xerus)"\n | 74 | VERSION="16.04.3 LTS (Xenial Xerus)"\n |
762 | 32 | ID=ubuntu\n | 75 | ID=ubuntu\n |
763 | 33 | ID_LIKE=debian\n | 76 | ID_LIKE=debian\n |
764 | @@ -310,7 +353,31 @@ class TestGetLinuxDistro(CiTestCase): | |||
765 | 310 | m_os_release.return_value = OS_RELEASE_UBUNTU | 353 | m_os_release.return_value = OS_RELEASE_UBUNTU |
766 | 311 | m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists | 354 | m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists |
767 | 312 | dist = util.get_linux_distro() | 355 | dist = util.get_linux_distro() |
769 | 313 | self.assertEqual(('ubuntu', '16.04', platform.machine()), dist) | 356 | self.assertEqual(('ubuntu', '16.04', 'xenial'), dist) |
770 | 357 | |||
771 | 358 | @mock.patch('cloudinit.util.load_file') | ||
772 | 359 | def test_get_linux_centos(self, m_os_release, m_path_exists): | ||
773 | 360 | """Verify we get the correct name and release name on CentOS.""" | ||
774 | 361 | m_os_release.return_value = OS_RELEASE_CENTOS | ||
775 | 362 | m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists | ||
776 | 363 | dist = util.get_linux_distro() | ||
777 | 364 | self.assertEqual(('centos', '7', 'Core'), dist) | ||
778 | 365 | |||
779 | 366 | @mock.patch('cloudinit.util.load_file') | ||
780 | 367 | def test_get_linux_debian(self, m_os_release, m_path_exists): | ||
781 | 368 | """Verify we get the correct name and release name on Debian.""" | ||
782 | 369 | m_os_release.return_value = OS_RELEASE_DEBIAN | ||
783 | 370 | m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists | ||
784 | 371 | dist = util.get_linux_distro() | ||
785 | 372 | self.assertEqual(('debian', '9', 'stretch'), dist) | ||
786 | 373 | |||
787 | 374 | @mock.patch('cloudinit.util.load_file') | ||
788 | 375 | def test_get_linux_opensuse(self, m_os_release, m_path_exists): | ||
789 | 376 | """Verify we get the correct name and machine arch on OpenSUSE.""" | ||
790 | 377 | m_os_release.return_value = OS_RELEASE_OPENSUSE | ||
791 | 378 | m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists | ||
792 | 379 | dist = util.get_linux_distro() | ||
793 | 380 | self.assertEqual(('opensuse', '42.3', platform.machine()), dist) | ||
794 | 314 | 381 | ||
795 | 315 | @mock.patch('platform.dist') | 382 | @mock.patch('platform.dist') |
796 | 316 | def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists): | 383 | def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists): |
797 | diff --git a/cloudinit/util.py b/cloudinit/util.py | |||
798 | index 6da9511..d0b0e90 100644 | |||
799 | --- a/cloudinit/util.py | |||
800 | +++ b/cloudinit/util.py | |||
801 | @@ -579,16 +579,24 @@ def get_cfg_option_int(yobj, key, default=0): | |||
802 | 579 | def get_linux_distro(): | 579 | def get_linux_distro(): |
803 | 580 | distro_name = '' | 580 | distro_name = '' |
804 | 581 | distro_version = '' | 581 | distro_version = '' |
805 | 582 | flavor = '' | ||
806 | 582 | if os.path.exists('/etc/os-release'): | 583 | if os.path.exists('/etc/os-release'): |
816 | 583 | os_release = load_file('/etc/os-release') | 584 | os_release = load_shell_content(load_file('/etc/os-release')) |
817 | 584 | for line in os_release.splitlines(): | 585 | distro_name = os_release.get('ID', '') |
818 | 585 | if line.strip().startswith('ID='): | 586 | distro_version = os_release.get('VERSION_ID', '') |
819 | 586 | distro_name = line.split('=')[-1] | 587 | if 'sles' in distro_name or 'suse' in distro_name: |
820 | 587 | distro_name = distro_name.replace('"', '') | 588 | # RELEASE_BLOCKER: We will drop this sles ivergent behavior in |
821 | 588 | if line.strip().startswith('VERSION_ID='): | 589 | # before 18.4 so that get_linux_distro returns a named tuple |
822 | 589 | # Lets hope for the best that distros stay consistent ;) | 590 | # which will include both version codename and architecture |
823 | 590 | distro_version = line.split('=')[-1] | 591 | # on all distributions. |
824 | 591 | distro_version = distro_version.replace('"', '') | 592 | flavor = platform.machine() |
825 | 593 | else: | ||
826 | 594 | flavor = os_release.get('VERSION_CODENAME', '') | ||
827 | 595 | if not flavor: | ||
828 | 596 | match = re.match(r'[^ ]+ \((?P<codename>[^)]+)\)', | ||
829 | 597 | os_release.get('VERSION')) | ||
830 | 598 | if match: | ||
831 | 599 | flavor = match.groupdict()['codename'] | ||
832 | 592 | else: | 600 | else: |
833 | 593 | dist = ('', '', '') | 601 | dist = ('', '', '') |
834 | 594 | try: | 602 | try: |
835 | @@ -606,7 +614,7 @@ def get_linux_distro(): | |||
836 | 606 | 'expansion may have unexpected results') | 614 | 'expansion may have unexpected results') |
837 | 607 | return dist | 615 | return dist |
838 | 608 | 616 | ||
840 | 609 | return (distro_name, distro_version, platform.machine()) | 617 | return (distro_name, distro_version, flavor) |
841 | 610 | 618 | ||
842 | 611 | 619 | ||
843 | 612 | def system_info(): | 620 | def system_info(): |
844 | diff --git a/debian/changelog b/debian/changelog | |||
845 | index 7951bdb..6d9cf37 100644 | |||
846 | --- a/debian/changelog | |||
847 | +++ b/debian/changelog | |||
848 | @@ -1,3 +1,20 @@ | |||
849 | 1 | cloud-init (18.3-9-g2e62cb8a-0ubuntu1~17.10.1) artful-proposed; urgency=medium | ||
850 | 2 | |||
851 | 3 | * New upstream snapshot. | ||
852 | 4 | - docs: note in rtd about avoiding /tmp when writing files | ||
853 | 5 | - ubuntu,centos,debian: get_linux_distro to align with platform.dist | ||
854 | 6 | (LP: #1780481) | ||
855 | 7 | - Fix boothook docs on environment variable name (INSTANCE_I -> | ||
856 | 8 | INSTANCE_ID) [Marc Tamsky] | ||
857 | 9 | - update_metadata: a datasource can support network re-config every boot | ||
858 | 10 | - tests: drop salt-minion integration test | ||
859 | 11 | - Retry on failed import of gpg receive keys. | ||
860 | 12 | - tools: Fix run-container when neither source or binary package requested. | ||
861 | 13 | - docs: Fix a small spelling error. [Oz N Tiram] | ||
862 | 14 | - tox: use simplestreams from git repository rather than bzr. | ||
863 | 15 | |||
864 | 16 | -- Chad Smith <chad.smith@canonical.com> Mon, 09 Jul 2018 15:33:35 -0600 | ||
865 | 17 | |||
866 | 1 | cloud-init (18.3-0ubuntu1~17.10.1) artful-proposed; urgency=medium | 18 | cloud-init (18.3-0ubuntu1~17.10.1) artful-proposed; urgency=medium |
867 | 2 | 19 | ||
868 | 3 | * debian/rules: update version.version_string to contain packaged version. | 20 | * debian/rules: update version.version_string to contain packaged version. |
869 | diff --git a/doc/examples/cloud-config-run-cmds.txt b/doc/examples/cloud-config-run-cmds.txt | |||
870 | index 3bb0686..002398f 100644 | |||
871 | --- a/doc/examples/cloud-config-run-cmds.txt | |||
872 | +++ b/doc/examples/cloud-config-run-cmds.txt | |||
873 | @@ -18,5 +18,8 @@ runcmd: | |||
874 | 18 | - [ sh, -xc, "echo $(date) ': hello world!'" ] | 18 | - [ sh, -xc, "echo $(date) ': hello world!'" ] |
875 | 19 | - [ sh, -c, echo "=========hello world'=========" ] | 19 | - [ sh, -c, echo "=========hello world'=========" ] |
876 | 20 | - ls -l /root | 20 | - ls -l /root |
878 | 21 | - [ wget, "http://slashdot.org", -O, /tmp/index.html ] | 21 | # Note: Don't write files to /tmp from cloud-init use /run/somedir instead. |
879 | 22 | # Early boot environments can race systemd-tmpfiles-clean LP: #1707222. | ||
880 | 23 | - mkdir /run/mydir | ||
881 | 24 | - [ wget, "http://slashdot.org", -O, /run/mydir/index.html ] | ||
882 | 22 | 25 | ||
883 | diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt | |||
884 | index bd84c64..774f66b 100644 | |||
885 | --- a/doc/examples/cloud-config.txt | |||
886 | +++ b/doc/examples/cloud-config.txt | |||
887 | @@ -127,7 +127,10 @@ runcmd: | |||
888 | 127 | - [ sh, -xc, "echo $(date) ': hello world!'" ] | 127 | - [ sh, -xc, "echo $(date) ': hello world!'" ] |
889 | 128 | - [ sh, -c, echo "=========hello world'=========" ] | 128 | - [ sh, -c, echo "=========hello world'=========" ] |
890 | 129 | - ls -l /root | 129 | - ls -l /root |
892 | 130 | - [ wget, "http://slashdot.org", -O, /tmp/index.html ] | 130 | # Note: Don't write files to /tmp from cloud-init use /run/somedir instead. |
893 | 131 | # Early boot environments can race systemd-tmpfiles-clean LP: #1707222. | ||
894 | 132 | - mkdir /run/mydir | ||
895 | 133 | - [ wget, "http://slashdot.org", -O, /run/mydir/index.html ] | ||
896 | 131 | 134 | ||
897 | 132 | 135 | ||
898 | 133 | # boot commands | 136 | # boot commands |
899 | diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst | |||
900 | index e25289a..1b0ff36 100644 | |||
901 | --- a/doc/rtd/topics/format.rst | |||
902 | +++ b/doc/rtd/topics/format.rst | |||
903 | @@ -121,7 +121,7 @@ Cloud Boothook | |||
904 | 121 | 121 | ||
905 | 122 | This content is ``boothook`` data. It is stored in a file under ``/var/lib/cloud`` and then executed immediately. | 122 | This content is ``boothook`` data. It is stored in a file under ``/var/lib/cloud`` and then executed immediately. |
906 | 123 | This is the earliest ``hook`` available. Note, that there is no mechanism provided for running only once. The boothook must take care of this itself. | 123 | This is the earliest ``hook`` available. Note, that there is no mechanism provided for running only once. The boothook must take care of this itself. |
908 | 124 | It is provided with the instance id in the environment variable ``INSTANCE_I``. This could be made use of to provide a 'once-per-instance' type of functionality. | 124 | It is provided with the instance id in the environment variable ``INSTANCE_ID``. This could be made use of to provide a 'once-per-instance' type of functionality. |
909 | 125 | 125 | ||
910 | 126 | Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when using a MIME archive. | 126 | Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when using a MIME archive. |
911 | 127 | 127 | ||
912 | diff --git a/integration-requirements.txt b/integration-requirements.txt | |||
913 | index e5bb5b2..01baebd 100644 | |||
914 | --- a/integration-requirements.txt | |||
915 | +++ b/integration-requirements.txt | |||
916 | @@ -17,4 +17,4 @@ git+https://github.com/lxc/pylxd.git@4b8ab1802f9aee4eb29cf7b119dae0aa47150779 | |||
917 | 17 | 17 | ||
918 | 18 | 18 | ||
919 | 19 | # finds latest image information | 19 | # finds latest image information |
921 | 20 | bzr+lp:simplestreams | 20 | git+https://git.launchpad.net/simplestreams |
922 | diff --git a/tests/cloud_tests/testcases/modules/salt_minion.py b/tests/cloud_tests/testcases/modules/salt_minion.py | |||
923 | 21 | deleted file mode 100644 | 21 | deleted file mode 100644 |
924 | index fc9688e..0000000 | |||
925 | --- a/tests/cloud_tests/testcases/modules/salt_minion.py | |||
926 | +++ /dev/null | |||
927 | @@ -1,38 +0,0 @@ | |||
928 | 1 | # This file is part of cloud-init. See LICENSE file for license information. | ||
929 | 2 | |||
930 | 3 | """cloud-init Integration Test Verify Script.""" | ||
931 | 4 | from tests.cloud_tests.testcases import base | ||
932 | 5 | |||
933 | 6 | |||
934 | 7 | class Test(base.CloudTestCase): | ||
935 | 8 | """Test salt minion module.""" | ||
936 | 9 | |||
937 | 10 | def test_minon_master(self): | ||
938 | 11 | """Test master value in config.""" | ||
939 | 12 | out = self.get_data_file('minion') | ||
940 | 13 | self.assertIn('master: salt.mydomain.com', out) | ||
941 | 14 | |||
942 | 15 | def test_minion_pem(self): | ||
943 | 16 | """Test private key.""" | ||
944 | 17 | out = self.get_data_file('minion.pem') | ||
945 | 18 | self.assertIn('------BEGIN PRIVATE KEY------', out) | ||
946 | 19 | self.assertIn('<key data>', out) | ||
947 | 20 | self.assertIn('------END PRIVATE KEY-------', out) | ||
948 | 21 | |||
949 | 22 | def test_minion_pub(self): | ||
950 | 23 | """Test public key.""" | ||
951 | 24 | out = self.get_data_file('minion.pub') | ||
952 | 25 | self.assertIn('------BEGIN PUBLIC KEY-------', out) | ||
953 | 26 | self.assertIn('<key data>', out) | ||
954 | 27 | self.assertIn('------END PUBLIC KEY-------', out) | ||
955 | 28 | |||
956 | 29 | def test_grains(self): | ||
957 | 30 | """Test master value in config.""" | ||
958 | 31 | out = self.get_data_file('grains') | ||
959 | 32 | self.assertIn('role: web', out) | ||
960 | 33 | |||
961 | 34 | def test_minion_installed(self): | ||
962 | 35 | """Test if the salt-minion package is installed""" | ||
963 | 36 | self.assertPackageInstalled('salt-minion') | ||
964 | 37 | |||
965 | 38 | # vi: ts=4 expandtab | ||
966 | diff --git a/tests/cloud_tests/testcases/modules/salt_minion.yaml b/tests/cloud_tests/testcases/modules/salt_minion.yaml | |||
967 | 39 | deleted file mode 100644 | 0 | deleted file mode 100644 |
968 | index 9227147..0000000 | |||
969 | --- a/tests/cloud_tests/testcases/modules/salt_minion.yaml | |||
970 | +++ /dev/null | |||
971 | @@ -1,49 +0,0 @@ | |||
972 | 1 | # | ||
973 | 2 | # Create config for a salt minion | ||
974 | 3 | # | ||
975 | 4 | # 2016-11-17: Currently takes >60 seconds results in test failure | ||
976 | 5 | # | ||
977 | 6 | enabled: True | ||
978 | 7 | cloud_config: | | ||
979 | 8 | #cloud-config | ||
980 | 9 | salt_minion: | ||
981 | 10 | conf: | ||
982 | 11 | master: salt.mydomain.com | ||
983 | 12 | public_key: | | ||
984 | 13 | ------BEGIN PUBLIC KEY------- | ||
985 | 14 | <key data> | ||
986 | 15 | ------END PUBLIC KEY------- | ||
987 | 16 | private_key: | | ||
988 | 17 | ------BEGIN PRIVATE KEY------ | ||
989 | 18 | <key data> | ||
990 | 19 | ------END PRIVATE KEY------- | ||
991 | 20 | grains: | ||
992 | 21 | role: web | ||
993 | 22 | collect_scripts: | ||
994 | 23 | minion: | | ||
995 | 24 | #!/bin/bash | ||
996 | 25 | cat /etc/salt/minion | ||
997 | 26 | minion_id: | | ||
998 | 27 | #!/bin/bash | ||
999 | 28 | cat /etc/salt/minion_id | ||
1000 | 29 | minion.pem: | | ||
1001 | 30 | #!/bin/bash | ||
1002 | 31 | PRIV_KEYFILE=/etc/salt/pki/minion/minion.pem | ||
1003 | 32 | if [ ! -f $PRIV_KEYFILE ]; then | ||
1004 | 33 | # Bionic and later automatically moves /etc/salt/pki/minion/* | ||
1005 | 34 | PRIV_KEYFILE=/var/lib/salt/pki/minion/minion.pem | ||
1006 | 35 | fi | ||
1007 | 36 | cat $PRIV_KEYFILE | ||
1008 | 37 | minion.pub: | | ||
1009 | 38 | #!/bin/bash | ||
1010 | 39 | PUB_KEYFILE=/etc/salt/pki/minion/minion.pub | ||
1011 | 40 | if [ ! -f $PUB_KEYFILE ]; then | ||
1012 | 41 | # Bionic and later automatically moves /etc/salt/pki/minion/* | ||
1013 | 42 | PUB_KEYFILE=/var/lib/salt/pki/minion/minion.pub | ||
1014 | 43 | fi | ||
1015 | 44 | cat $PUB_KEYFILE | ||
1016 | 45 | grains: | | ||
1017 | 46 | #!/bin/bash | ||
1018 | 47 | cat /etc/salt/grains | ||
1019 | 48 | |||
1020 | 49 | # vi: ts=4 expandtab | ||
1021 | diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py | |||
1022 | index af9d3e1..26b2b93 100644 | |||
1023 | --- a/tests/unittests/test_datasource/test_azure_helper.py | |||
1024 | +++ b/tests/unittests/test_datasource/test_azure_helper.py | |||
1025 | @@ -85,7 +85,9 @@ class TestFindEndpoint(CiTestCase): | |||
1026 | 85 | self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}} | 85 | self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}} |
1027 | 86 | self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None)) | 86 | self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None)) |
1028 | 87 | 87 | ||
1030 | 88 | def test_latest_lease_used(self): | 88 | @mock.patch('cloudinit.sources.helpers.azure.util.is_FreeBSD') |
1031 | 89 | def test_latest_lease_used(self, m_is_freebsd): | ||
1032 | 90 | m_is_freebsd.return_value = False # To avoid hitting load_file | ||
1033 | 89 | encoded_addresses = ['5:4:3:2', '4:3:2:1'] | 91 | encoded_addresses = ['5:4:3:2', '4:3:2:1'] |
1034 | 90 | file_content = '\n'.join([self._build_lease_content(encoded_address) | 92 | file_content = '\n'.join([self._build_lease_content(encoded_address) |
1035 | 91 | for encoded_address in encoded_addresses]) | 93 | for encoded_address in encoded_addresses]) |
1036 | diff --git a/tools/run-container b/tools/run-container | |||
1037 | index 499e85b..6dedb75 100755 | |||
1038 | --- a/tools/run-container | |||
1039 | +++ b/tools/run-container | |||
1040 | @@ -418,7 +418,7 @@ main() { | |||
1041 | 418 | { bad_Usage; return; } | 418 | { bad_Usage; return; } |
1042 | 419 | 419 | ||
1043 | 420 | local cur="" next="" | 420 | local cur="" next="" |
1045 | 421 | local package="" source_package="" unittest="" name="" | 421 | local package=false srcpackage=false unittest="" name="" |
1046 | 422 | local dirty=false pyexe="auto" artifact_d="." | 422 | local dirty=false pyexe="auto" artifact_d="." |
1047 | 423 | 423 | ||
1048 | 424 | while [ $# -ne 0 ]; do | 424 | while [ $# -ne 0 ]; do |
1049 | @@ -430,8 +430,8 @@ main() { | |||
1050 | 430 | -k|--keep) KEEP=true;; | 430 | -k|--keep) KEEP=true;; |
1051 | 431 | -n|--name) name="$next"; shift;; | 431 | -n|--name) name="$next"; shift;; |
1052 | 432 | --pyexe) pyexe=$next; shift;; | 432 | --pyexe) pyexe=$next; shift;; |
1055 | 433 | -p|--package) package=1;; | 433 | -p|--package) package=true;; |
1056 | 434 | -s|--source-package) source_package=1;; | 434 | -s|--source-package) srcpackage=true;; |
1057 | 435 | -u|--unittest) unittest=1;; | 435 | -u|--unittest) unittest=1;; |
1058 | 436 | -v|--verbose) VERBOSITY=$((VERBOSITY+1));; | 436 | -v|--verbose) VERBOSITY=$((VERBOSITY+1));; |
1059 | 437 | --) shift; break;; | 437 | --) shift; break;; |
1060 | @@ -529,8 +529,8 @@ main() { | |||
1061 | 529 | build_srcpkg="./packages/brpm $distflag --srpm" | 529 | build_srcpkg="./packages/brpm $distflag --srpm" |
1062 | 530 | pkg_ext=".rpm";; | 530 | pkg_ext=".rpm";; |
1063 | 531 | esac | 531 | esac |
1066 | 532 | if [ -n "$source_package" ]; then | 532 | if [ "$srcpackage" = "true" ]; then |
1067 | 533 | [ -n "$build_pkg" ] || { | 533 | [ -n "$build_srcpkg" ] || { |
1068 | 534 | error "Unknown package command for $OS_NAME" | 534 | error "Unknown package command for $OS_NAME" |
1069 | 535 | return 1 | 535 | return 1 |
1070 | 536 | } | 536 | } |
1071 | @@ -542,19 +542,21 @@ main() { | |||
1072 | 542 | } | 542 | } |
1073 | 543 | fi | 543 | fi |
1074 | 544 | 544 | ||
1077 | 545 | if [ -n "$package" ]; then | 545 | if [ "$package" = "true" ]; then |
1078 | 546 | [ -n "$build_srcpkg" ] || { | 546 | [ -n "$build_pkg" ] || { |
1079 | 547 | error "Unknown build source command for $OS_NAME" | 547 | error "Unknown build source command for $OS_NAME" |
1080 | 548 | return 1 | 548 | return 1 |
1081 | 549 | } | 549 | } |
1082 | 550 | debug 1 "building binary package with $build_pkg." | 550 | debug 1 "building binary package with $build_pkg." |
1083 | 551 | # shellcheck disable=SC2086 | ||
1084 | 551 | inside_as_cd "$name" "$user" "$cdir" $pyexe $build_pkg || { | 552 | inside_as_cd "$name" "$user" "$cdir" $pyexe $build_pkg || { |
1085 | 552 | errorrc "failed: $build_pkg"; | 553 | errorrc "failed: $build_pkg"; |
1086 | 553 | errors[${#errors[@]}]="binary package" | 554 | errors[${#errors[@]}]="binary package" |
1087 | 554 | } | 555 | } |
1088 | 555 | fi | 556 | fi |
1089 | 556 | 557 | ||
1091 | 557 | if [ -n "$artifact_d" ]; then | 558 | if [ -n "$artifact_d" ] && |
1092 | 559 | [ "$package" = "true" -o "$srcpackage" = "true" ]; then | ||
1093 | 558 | local art="" | 560 | local art="" |
1094 | 559 | artifact_d="${artifact_d%/}/" | 561 | artifact_d="${artifact_d%/}/" |
1095 | 560 | [ -d "${artifact_d}" ] || mkdir -p "$artifact_d" || { | 562 | [ -d "${artifact_d}" ] || mkdir -p "$artifact_d" || { |
PASSED: Continuous integration, rev:87041591ec5 1e779429f16454f 5b406214bc3059 /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 152/
https:/
Executed test runs:
SUCCESS: Checkout
SUCCESS: Unit & Style Tests
SUCCESS: Ubuntu LTS: Build
SUCCESS: Ubuntu LTS: Integration
SUCCESS: MAAS Compatability Testing
IN_PROGRESS: Declarative: Post Actions
Click here to trigger a rebuild: /jenkins. ubuntu. com/server/ job/cloud- init-ci/ 152/rebuild
https:/