Merge lp:~freyes/charms/xenial/rsyslog/lp1694270 into lp:~bigdata-dev/charms/xenial/rsyslog/trunk
- Xenial Xerus (16.04)
- lp1694270
- Merge into trunk
Status: | Needs review |
---|---|
Proposed branch: | lp:~freyes/charms/xenial/rsyslog/lp1694270 |
Merge into: | lp:~bigdata-dev/charms/xenial/rsyslog/trunk |
Diff against target: |
1728 lines (+1013/-147) 17 files modified
hooks/charmhelpers/__init__.py (+61/-0) hooks/charmhelpers/contrib/charmsupport/nrpe.py (+29/-10) hooks/charmhelpers/core/hookenv.py (+47/-0) hooks/charmhelpers/core/host.py (+225/-35) hooks/charmhelpers/core/host_factory/centos.py (+16/-0) hooks/charmhelpers/core/host_factory/ubuntu.py (+33/-0) hooks/charmhelpers/core/kernel_factory/ubuntu.py (+1/-1) hooks/charmhelpers/core/strutils.py (+53/-0) hooks/charmhelpers/fetch/__init__.py (+17/-9) hooks/charmhelpers/fetch/centos.py (+1/-1) hooks/charmhelpers/fetch/snap.py (+122/-0) hooks/charmhelpers/fetch/ubuntu.py (+314/-82) hooks/charmhelpers/osplatform.py (+6/-0) hooks/hooks.py (+8/-0) templates/rsyslog.conf (+13/-0) tox.ini (+1/-1) unit_tests/test_hooks.py (+66/-8) |
To merge this branch: | bzr merge lp:~freyes/charms/xenial/rsyslog/lp1694270 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Jorge Niedbalski | Pending | ||
Juju Big Data Development | Pending | ||
Review via email: mp+325877@code.launchpad.net |
This proposal supersedes a proposal from 2017-05-29.
Commit message
Description of the change
Use 'rotate' action from /etc/init.d/rsyslog
'reload' command does not exist in Xenial, the rsyslog package uses the
'rotate' functionality implemented in the sysvinit script to close all
open file descriptors.
Jorge Niedbalski (niedbalski) wrote : Posted in a previous version of this proposal | # |
Felipe Reyes (freyes) wrote : Posted in a previous version of this proposal | # |
Jorge,
Pushed some changes to address trusty. http://
Now this patch uses CompareHostsRel
Thanks
Unmerged revisions
- 40. By Felipe Reyes
-
Remove print() statement left from testing
- 39. By Felipe Reyes
-
Use invoke-rc.d only for >=Xenial and add unit tests
- 38. By Felipe Reyes
-
Add py27 to tox
This will prevent regressions with python2.7 needed to run in trusty
- 37. By Felipe Reyes
-
Sync up charm-helpers
- 36. By Felipe Reyes
-
Replace space indentation with tab
- 35. By Felipe Reyes
-
Use 'rotate' action from /etc/init.d/rsyslog
'reload' command does not exist in Xenial, the rsyslog package uses the
'rotate' functionality implemented in the sysvinit script to close all
open file descriptors.Closes-Bug: 1694270
Preview Diff
1 | === modified file 'hooks/charmhelpers/__init__.py' |
2 | --- hooks/charmhelpers/__init__.py 2016-10-26 18:19:59 +0000 |
3 | +++ hooks/charmhelpers/__init__.py 2017-06-16 22:43:26 +0000 |
4 | @@ -14,6 +14,11 @@ |
5 | |
6 | # Bootstrap charm-helpers, installing its dependencies if necessary using |
7 | # only standard libraries. |
8 | +from __future__ import print_function |
9 | +from __future__ import absolute_import |
10 | + |
11 | +import functools |
12 | +import inspect |
13 | import subprocess |
14 | import sys |
15 | |
16 | @@ -34,3 +39,59 @@ |
17 | else: |
18 | subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) |
19 | import yaml # flake8: noqa |
20 | + |
21 | + |
22 | +# Holds a list of mapping of mangled function names that have been deprecated |
23 | +# using the @deprecate decorator below. This is so that the warning is only |
24 | +# printed once for each usage of the function. |
25 | +__deprecated_functions = {} |
26 | + |
27 | + |
28 | +def deprecate(warning, date=None, log=None): |
29 | + """Add a deprecation warning the first time the function is used. |
30 | + The date, which is a string in semi-ISO8660 format indicate the year-month |
31 | + that the function is officially going to be removed. |
32 | + |
33 | + usage: |
34 | + |
35 | + @deprecate('use core/fetch/add_source() instead', '2017-04') |
36 | + def contributed_add_source_thing(...): |
37 | + ... |
38 | + |
39 | + And it then prints to the log ONCE that the function is deprecated. |
40 | + The reason for passing the logging function (log) is so that hookenv.log |
41 | + can be used for a charm if needed. |
42 | + |
43 | + :param warning: String to indicat where it has moved ot. |
44 | + :param date: optional sting, in YYYY-MM format to indicate when the |
45 | + function will definitely (probably) be removed. |
46 | + :param log: The log function to call to log. If not, logs to stdout |
47 | + """ |
48 | + def wrap(f): |
49 | + |
50 | + @functools.wraps(f) |
51 | + def wrapped_f(*args, **kwargs): |
52 | + try: |
53 | + module = inspect.getmodule(f) |
54 | + file = inspect.getsourcefile(f) |
55 | + lines = inspect.getsourcelines(f) |
56 | + f_name = "{}-{}-{}..{}-{}".format( |
57 | + module.__name__, file, lines[0], lines[-1], f.__name__) |
58 | + except (IOError, TypeError): |
59 | + # assume it was local, so just use the name of the function |
60 | + f_name = f.__name__ |
61 | + if f_name not in __deprecated_functions: |
62 | + __deprecated_functions[f_name] = True |
63 | + s = "DEPRECATION WARNING: Function {} is being removed".format( |
64 | + f.__name__) |
65 | + if date: |
66 | + s = "{} on/around {}".format(s, date) |
67 | + if warning: |
68 | + s = "{} : {}".format(s, warning) |
69 | + if log: |
70 | + log(s) |
71 | + else: |
72 | + print(s) |
73 | + return f(*args, **kwargs) |
74 | + return wrapped_f |
75 | + return wrap |
76 | |
77 | === modified file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py' |
78 | --- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2016-10-26 18:19:59 +0000 |
79 | +++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 2017-06-16 22:43:26 +0000 |
80 | @@ -193,6 +193,13 @@ |
81 | nrpe_check_file = self._get_check_filename() |
82 | with open(nrpe_check_file, 'w') as nrpe_check_config: |
83 | nrpe_check_config.write("# check {}\n".format(self.shortname)) |
84 | + if nagios_servicegroups: |
85 | + nrpe_check_config.write( |
86 | + "# The following header was added automatically by juju\n") |
87 | + nrpe_check_config.write( |
88 | + "# Modifying it will affect nagios monitoring and alerting\n") |
89 | + nrpe_check_config.write( |
90 | + "# servicegroups: {}\n".format(nagios_servicegroups)) |
91 | nrpe_check_config.write("command[{}]={}\n".format( |
92 | self.command, self.check_cmd)) |
93 | |
94 | @@ -227,6 +234,7 @@ |
95 | nagios_logdir = '/var/log/nagios' |
96 | nagios_exportdir = '/var/lib/nagios/export' |
97 | nrpe_confdir = '/etc/nagios/nrpe.d' |
98 | + homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server |
99 | |
100 | def __init__(self, hostname=None, primary=True): |
101 | super(NRPE, self).__init__() |
102 | @@ -338,13 +346,14 @@ |
103 | return unit |
104 | |
105 | |
106 | -def add_init_service_checks(nrpe, services, unit_name): |
107 | +def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): |
108 | """ |
109 | Add checks for each service in list |
110 | |
111 | :param NRPE nrpe: NRPE object to add check to |
112 | :param list services: List of services to check |
113 | :param str unit_name: Unit name to use in check description |
114 | + :param bool immediate_check: For sysv init, run the service check immediately |
115 | """ |
116 | for svc in services: |
117 | # Don't add a check for these services from neutron-gateway |
118 | @@ -368,21 +377,31 @@ |
119 | ) |
120 | elif os.path.exists(sysv_init): |
121 | cronpath = '/etc/cron.d/nagios-service-check-%s' % svc |
122 | - cron_file = ('*/5 * * * * root ' |
123 | - '/usr/local/lib/nagios/plugins/check_exit_status.pl ' |
124 | - '-s /etc/init.d/%s status > ' |
125 | - '/var/lib/nagios/service-check-%s.txt\n' % (svc, |
126 | - svc) |
127 | - ) |
128 | + checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) |
129 | + croncmd = ( |
130 | + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' |
131 | + '-e -s /etc/init.d/%s status' % svc |
132 | + ) |
133 | + cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) |
134 | f = open(cronpath, 'w') |
135 | f.write(cron_file) |
136 | f.close() |
137 | nrpe.add_check( |
138 | shortname=svc, |
139 | - description='process check {%s}' % unit_name, |
140 | - check_cmd='check_status_file.py -f ' |
141 | - '/var/lib/nagios/service-check-%s.txt' % svc, |
142 | + description='service check {%s}' % unit_name, |
143 | + check_cmd='check_status_file.py -f %s' % checkpath, |
144 | ) |
145 | + # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail |
146 | + # (LP: #1670223). |
147 | + if immediate_check and os.path.isdir(nrpe.homedir): |
148 | + f = open(checkpath, 'w') |
149 | + subprocess.call( |
150 | + croncmd.split(), |
151 | + stdout=f, |
152 | + stderr=subprocess.STDOUT |
153 | + ) |
154 | + f.close() |
155 | + os.chmod(checkpath, 0o644) |
156 | |
157 | |
158 | def copy_nrpe_checks(): |
159 | |
160 | === modified file 'hooks/charmhelpers/core/hookenv.py' |
161 | --- hooks/charmhelpers/core/hookenv.py 2016-10-26 18:19:59 +0000 |
162 | +++ hooks/charmhelpers/core/hookenv.py 2017-06-16 22:43:26 +0000 |
163 | @@ -332,6 +332,8 @@ |
164 | config_cmd_line = ['config-get'] |
165 | if scope is not None: |
166 | config_cmd_line.append(scope) |
167 | + else: |
168 | + config_cmd_line.append('--all') |
169 | config_cmd_line.append('--format=json') |
170 | try: |
171 | config_data = json.loads( |
172 | @@ -614,6 +616,20 @@ |
173 | subprocess.check_call(_args) |
174 | |
175 | |
176 | +def open_ports(start, end, protocol="TCP"): |
177 | + """Opens a range of service network ports""" |
178 | + _args = ['open-port'] |
179 | + _args.append('{}-{}/{}'.format(start, end, protocol)) |
180 | + subprocess.check_call(_args) |
181 | + |
182 | + |
183 | +def close_ports(start, end, protocol="TCP"): |
184 | + """Close a range of service network ports""" |
185 | + _args = ['close-port'] |
186 | + _args.append('{}-{}/{}'.format(start, end, protocol)) |
187 | + subprocess.check_call(_args) |
188 | + |
189 | + |
190 | @cached |
191 | def unit_get(attribute): |
192 | """Get the unit ID for the remote unit""" |
193 | @@ -1019,3 +1035,34 @@ |
194 | ''' |
195 | cmd = ['network-get', '--primary-address', binding] |
196 | return subprocess.check_output(cmd).decode('UTF-8').strip() |
197 | + |
198 | + |
199 | +def add_metric(*args, **kwargs): |
200 | + """Add metric values. Values may be expressed with keyword arguments. For |
201 | + metric names containing dashes, these may be expressed as one or more |
202 | + 'key=value' positional arguments. May only be called from the collect-metrics |
203 | + hook.""" |
204 | + _args = ['add-metric'] |
205 | + _kvpairs = [] |
206 | + _kvpairs.extend(args) |
207 | + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) |
208 | + _args.extend(sorted(_kvpairs)) |
209 | + try: |
210 | + subprocess.check_call(_args) |
211 | + return |
212 | + except EnvironmentError as e: |
213 | + if e.errno != errno.ENOENT: |
214 | + raise |
215 | + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) |
216 | + log(log_message, level='INFO') |
217 | + |
218 | + |
219 | +def meter_status(): |
220 | + """Get the meter status, if running in the meter-status-changed hook.""" |
221 | + return os.environ.get('JUJU_METER_STATUS') |
222 | + |
223 | + |
224 | +def meter_info(): |
225 | + """Get the meter status information, if running in the meter-status-changed |
226 | + hook.""" |
227 | + return os.environ.get('JUJU_METER_INFO') |
228 | |
229 | === modified file 'hooks/charmhelpers/core/host.py' |
230 | --- hooks/charmhelpers/core/host.py 2016-10-26 18:19:59 +0000 |
231 | +++ hooks/charmhelpers/core/host.py 2017-06-16 22:43:26 +0000 |
232 | @@ -45,6 +45,7 @@ |
233 | add_new_group, |
234 | lsb_release, |
235 | cmp_pkgrevno, |
236 | + CompareHostReleases, |
237 | ) # flake8: noqa -- ignore F401 for this import |
238 | elif __platform__ == "centos": |
239 | from charmhelpers.core.host_factory.centos import ( |
240 | @@ -52,44 +53,146 @@ |
241 | add_new_group, |
242 | lsb_release, |
243 | cmp_pkgrevno, |
244 | + CompareHostReleases, |
245 | ) # flake8: noqa -- ignore F401 for this import |
246 | |
247 | - |
248 | -def service_start(service_name): |
249 | - """Start a system service""" |
250 | - return service('start', service_name) |
251 | - |
252 | - |
253 | -def service_stop(service_name): |
254 | - """Stop a system service""" |
255 | - return service('stop', service_name) |
256 | - |
257 | - |
258 | -def service_restart(service_name): |
259 | - """Restart a system service""" |
260 | +UPDATEDB_PATH = '/etc/updatedb.conf' |
261 | + |
262 | +def service_start(service_name, **kwargs): |
263 | + """Start a system service. |
264 | + |
265 | + The specified service name is managed via the system level init system. |
266 | + Some init systems (e.g. upstart) require that additional arguments be |
267 | + provided in order to directly control service instances whereas other init |
268 | + systems allow for addressing instances of a service directly by name (e.g. |
269 | + systemd). |
270 | + |
271 | + The kwargs allow for the additional parameters to be passed to underlying |
272 | + init systems for those systems which require/allow for them. For example, |
273 | + the ceph-osd upstart script requires the id parameter to be passed along |
274 | + in order to identify which running daemon should be reloaded. The follow- |
275 | + ing example stops the ceph-osd service for instance id=4: |
276 | + |
277 | + service_stop('ceph-osd', id=4) |
278 | + |
279 | + :param service_name: the name of the service to stop |
280 | + :param **kwargs: additional parameters to pass to the init system when |
281 | + managing services. These will be passed as key=value |
282 | + parameters to the init system's commandline. kwargs |
283 | + are ignored for systemd enabled systems. |
284 | + """ |
285 | + return service('start', service_name, **kwargs) |
286 | + |
287 | + |
288 | +def service_stop(service_name, **kwargs): |
289 | + """Stop a system service. |
290 | + |
291 | + The specified service name is managed via the system level init system. |
292 | + Some init systems (e.g. upstart) require that additional arguments be |
293 | + provided in order to directly control service instances whereas other init |
294 | + systems allow for addressing instances of a service directly by name (e.g. |
295 | + systemd). |
296 | + |
297 | + The kwargs allow for the additional parameters to be passed to underlying |
298 | + init systems for those systems which require/allow for them. For example, |
299 | + the ceph-osd upstart script requires the id parameter to be passed along |
300 | + in order to identify which running daemon should be reloaded. The follow- |
301 | + ing example stops the ceph-osd service for instance id=4: |
302 | + |
303 | + service_stop('ceph-osd', id=4) |
304 | + |
305 | + :param service_name: the name of the service to stop |
306 | + :param **kwargs: additional parameters to pass to the init system when |
307 | + managing services. These will be passed as key=value |
308 | + parameters to the init system's commandline. kwargs |
309 | + are ignored for systemd enabled systems. |
310 | + """ |
311 | + return service('stop', service_name, **kwargs) |
312 | + |
313 | + |
314 | +def service_restart(service_name, **kwargs): |
315 | + """Restart a system service. |
316 | + |
317 | + The specified service name is managed via the system level init system. |
318 | + Some init systems (e.g. upstart) require that additional arguments be |
319 | + provided in order to directly control service instances whereas other init |
320 | + systems allow for addressing instances of a service directly by name (e.g. |
321 | + systemd). |
322 | + |
323 | + The kwargs allow for the additional parameters to be passed to underlying |
324 | + init systems for those systems which require/allow for them. For example, |
325 | + the ceph-osd upstart script requires the id parameter to be passed along |
326 | + in order to identify which running daemon should be restarted. The follow- |
327 | + ing example restarts the ceph-osd service for instance id=4: |
328 | + |
329 | + service_restart('ceph-osd', id=4) |
330 | + |
331 | + :param service_name: the name of the service to restart |
332 | + :param **kwargs: additional parameters to pass to the init system when |
333 | + managing services. These will be passed as key=value |
334 | + parameters to the init system's commandline. kwargs |
335 | + are ignored for init systems not allowing additional |
336 | + parameters via the commandline (systemd). |
337 | + """ |
338 | return service('restart', service_name) |
339 | |
340 | |
341 | -def service_reload(service_name, restart_on_failure=False): |
342 | +def service_reload(service_name, restart_on_failure=False, **kwargs): |
343 | """Reload a system service, optionally falling back to restart if |
344 | - reload fails""" |
345 | - service_result = service('reload', service_name) |
346 | + reload fails. |
347 | + |
348 | + The specified service name is managed via the system level init system. |
349 | + Some init systems (e.g. upstart) require that additional arguments be |
350 | + provided in order to directly control service instances whereas other init |
351 | + systems allow for addressing instances of a service directly by name (e.g. |
352 | + systemd). |
353 | + |
354 | + The kwargs allow for the additional parameters to be passed to underlying |
355 | + init systems for those systems which require/allow for them. For example, |
356 | + the ceph-osd upstart script requires the id parameter to be passed along |
357 | + in order to identify which running daemon should be reloaded. The follow- |
358 | + ing example restarts the ceph-osd service for instance id=4: |
359 | + |
360 | + service_reload('ceph-osd', id=4) |
361 | + |
362 | + :param service_name: the name of the service to reload |
363 | + :param restart_on_failure: boolean indicating whether to fallback to a |
364 | + restart if the reload fails. |
365 | + :param **kwargs: additional parameters to pass to the init system when |
366 | + managing services. These will be passed as key=value |
367 | + parameters to the init system's commandline. kwargs |
368 | + are ignored for init systems not allowing additional |
369 | + parameters via the commandline (systemd). |
370 | + """ |
371 | + service_result = service('reload', service_name, **kwargs) |
372 | if not service_result and restart_on_failure: |
373 | - service_result = service('restart', service_name) |
374 | + service_result = service('restart', service_name, **kwargs) |
375 | return service_result |
376 | |
377 | |
378 | -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): |
379 | +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", |
380 | + **kwargs): |
381 | """Pause a system service. |
382 | |
383 | - Stop it, and prevent it from starting again at boot.""" |
384 | + Stop it, and prevent it from starting again at boot. |
385 | + |
386 | + :param service_name: the name of the service to pause |
387 | + :param init_dir: path to the upstart init directory |
388 | + :param initd_dir: path to the sysv init directory |
389 | + :param **kwargs: additional parameters to pass to the init system when |
390 | + managing services. These will be passed as key=value |
391 | + parameters to the init system's commandline. kwargs |
392 | + are ignored for init systems which do not support |
393 | + key=value arguments via the commandline. |
394 | + """ |
395 | stopped = True |
396 | - if service_running(service_name): |
397 | - stopped = service_stop(service_name) |
398 | + if service_running(service_name, **kwargs): |
399 | + stopped = service_stop(service_name, **kwargs) |
400 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) |
401 | sysv_file = os.path.join(initd_dir, service_name) |
402 | if init_is_systemd(): |
403 | service('disable', service_name) |
404 | + service('mask', service_name) |
405 | elif os.path.exists(upstart_file): |
406 | override_path = os.path.join( |
407 | init_dir, '{}.override'.format(service_name)) |
408 | @@ -106,13 +209,23 @@ |
409 | |
410 | |
411 | def service_resume(service_name, init_dir="/etc/init", |
412 | - initd_dir="/etc/init.d"): |
413 | + initd_dir="/etc/init.d", **kwargs): |
414 | """Resume a system service. |
415 | |
416 | - Reenable starting again at boot. Start the service""" |
417 | + Reenable starting again at boot. Start the service. |
418 | + |
419 | + :param service_name: the name of the service to resume |
420 | + :param init_dir: the path to the init dir |
421 | + :param initd dir: the path to the initd dir |
422 | + :param **kwargs: additional parameters to pass to the init system when |
423 | + managing services. These will be passed as key=value |
424 | + parameters to the init system's commandline. kwargs |
425 | + are ignored for systemd enabled systems. |
426 | + """ |
427 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) |
428 | sysv_file = os.path.join(initd_dir, service_name) |
429 | if init_is_systemd(): |
430 | + service('unmask', service_name) |
431 | service('enable', service_name) |
432 | elif os.path.exists(upstart_file): |
433 | override_path = os.path.join( |
434 | @@ -126,19 +239,28 @@ |
435 | "Unable to detect {0} as SystemD, Upstart {1} or" |
436 | " SysV {2}".format( |
437 | service_name, upstart_file, sysv_file)) |
438 | + started = service_running(service_name, **kwargs) |
439 | |
440 | - started = service_running(service_name) |
441 | if not started: |
442 | - started = service_start(service_name) |
443 | + started = service_start(service_name, **kwargs) |
444 | return started |
445 | |
446 | |
447 | -def service(action, service_name): |
448 | - """Control a system service""" |
449 | +def service(action, service_name, **kwargs): |
450 | + """Control a system service. |
451 | + |
452 | + :param action: the action to take on the service |
453 | + :param service_name: the name of the service to perform th action on |
454 | + :param **kwargs: additional params to be passed to the service command in |
455 | + the form of key=value. |
456 | + """ |
457 | if init_is_systemd(): |
458 | cmd = ['systemctl', action, service_name] |
459 | else: |
460 | cmd = ['service', service_name, action] |
461 | + for key, value in six.iteritems(kwargs): |
462 | + parameter = '%s=%s' % (key, value) |
463 | + cmd.append(parameter) |
464 | return subprocess.call(cmd) == 0 |
465 | |
466 | |
467 | @@ -146,15 +268,26 @@ |
468 | _INIT_D_CONF = "/etc/init.d/{}" |
469 | |
470 | |
471 | -def service_running(service_name): |
472 | - """Determine whether a system service is running""" |
473 | +def service_running(service_name, **kwargs): |
474 | + """Determine whether a system service is running. |
475 | + |
476 | + :param service_name: the name of the service |
477 | + :param **kwargs: additional args to pass to the service command. This is |
478 | + used to pass additional key=value arguments to the |
479 | + service command line for managing specific instance |
480 | + units (e.g. service ceph-osd status id=2). The kwargs |
481 | + are ignored in systemd services. |
482 | + """ |
483 | if init_is_systemd(): |
484 | return service('is-active', service_name) |
485 | else: |
486 | if os.path.exists(_UPSTART_CONF.format(service_name)): |
487 | try: |
488 | - output = subprocess.check_output( |
489 | - ['status', service_name], |
490 | + cmd = ['status', service_name] |
491 | + for key, value in six.iteritems(kwargs): |
492 | + parameter = '%s=%s' % (key, value) |
493 | + cmd.append(parameter) |
494 | + output = subprocess.check_output(cmd, |
495 | stderr=subprocess.STDOUT).decode('UTF-8') |
496 | except subprocess.CalledProcessError: |
497 | return False |
498 | @@ -177,6 +310,8 @@ |
499 | |
500 | def init_is_systemd(): |
501 | """Return True if the host system uses systemd, False otherwise.""" |
502 | + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': |
503 | + return False |
504 | return os.path.isdir(SYSTEMD_SYSTEM) |
505 | |
506 | |
507 | @@ -306,15 +441,17 @@ |
508 | subprocess.check_call(cmd) |
509 | |
510 | |
511 | -def rsync(from_path, to_path, flags='-r', options=None): |
512 | +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): |
513 | """Replicate the contents of a path""" |
514 | options = options or ['--delete', '--executability'] |
515 | cmd = ['/usr/bin/rsync', flags] |
516 | + if timeout: |
517 | + cmd = ['timeout', str(timeout)] + cmd |
518 | cmd.extend(options) |
519 | cmd.append(from_path) |
520 | cmd.append(to_path) |
521 | log(" ".join(cmd)) |
522 | - return subprocess.check_output(cmd).decode('UTF-8').strip() |
523 | + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() |
524 | |
525 | |
526 | def symlink(source, destination): |
527 | @@ -684,7 +821,7 @@ |
528 | :param str path: The string path to start changing ownership. |
529 | :param str owner: The owner string to use when looking up the uid. |
530 | :param str group: The group string to use when looking up the gid. |
531 | - :param bool follow_links: Also Chown links if True |
532 | + :param bool follow_links: Also follow and chown links if True |
533 | :param bool chowntopdir: Also chown path itself if True |
534 | """ |
535 | uid = pwd.getpwnam(owner).pw_uid |
536 | @@ -698,7 +835,7 @@ |
537 | broken_symlink = os.path.lexists(path) and not os.path.exists(path) |
538 | if not broken_symlink: |
539 | chown(path, uid, gid) |
540 | - for root, dirs, files in os.walk(path): |
541 | + for root, dirs, files in os.walk(path, followlinks=follow_links): |
542 | for name in dirs + files: |
543 | full = os.path.join(root, name) |
544 | broken_symlink = os.path.lexists(full) and not os.path.exists(full) |
545 | @@ -718,6 +855,20 @@ |
546 | chownr(path, owner, group, follow_links=False) |
547 | |
548 | |
549 | +def owner(path): |
550 | + """Returns a tuple containing the username & groupname owning the path. |
551 | + |
552 | + :param str path: the string path to retrieve the ownership |
553 | + :return tuple(str, str): A (username, groupname) tuple containing the |
554 | + name of the user and group owning the path. |
555 | + :raises OSError: if the specified path does not exist |
556 | + """ |
557 | + stat = os.stat(path) |
558 | + username = pwd.getpwuid(stat.st_uid)[0] |
559 | + groupname = grp.getgrgid(stat.st_gid)[0] |
560 | + return username, groupname |
561 | + |
562 | + |
563 | def get_total_ram(): |
564 | """The total amount of system RAM in bytes. |
565 | |
566 | @@ -732,3 +883,42 @@ |
567 | assert unit == 'kB', 'Unknown unit' |
568 | return int(value) * 1024 # Classic, not KiB. |
569 | raise NotImplementedError() |
570 | + |
571 | + |
572 | +UPSTART_CONTAINER_TYPE = '/run/container_type' |
573 | + |
574 | + |
575 | +def is_container(): |
576 | + """Determine whether unit is running in a container |
577 | + |
578 | + @return: boolean indicating if unit is in a container |
579 | + """ |
580 | + if init_is_systemd(): |
581 | + # Detect using systemd-detect-virt |
582 | + return subprocess.call(['systemd-detect-virt', |
583 | + '--container']) == 0 |
584 | + else: |
585 | + # Detect using upstart container file marker |
586 | + return os.path.exists(UPSTART_CONTAINER_TYPE) |
587 | + |
588 | + |
589 | +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): |
590 | + with open(updatedb_path, 'r+') as f_id: |
591 | + updatedb_text = f_id.read() |
592 | + output = updatedb(updatedb_text, path) |
593 | + f_id.seek(0) |
594 | + f_id.write(output) |
595 | + f_id.truncate() |
596 | + |
597 | + |
598 | +def updatedb(updatedb_text, new_path): |
599 | + lines = [line for line in updatedb_text.split("\n")] |
600 | + for i, line in enumerate(lines): |
601 | + if line.startswith("PRUNEPATHS="): |
602 | + paths_line = line.split("=")[1].replace('"', '') |
603 | + paths = paths_line.split(" ") |
604 | + if new_path not in paths: |
605 | + paths.append(new_path) |
606 | + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) |
607 | + output = "\n".join(lines) |
608 | + return output |
609 | |
610 | === modified file 'hooks/charmhelpers/core/host_factory/centos.py' |
611 | --- hooks/charmhelpers/core/host_factory/centos.py 2016-10-26 18:19:59 +0000 |
612 | +++ hooks/charmhelpers/core/host_factory/centos.py 2017-06-16 22:43:26 +0000 |
613 | @@ -2,6 +2,22 @@ |
614 | import yum |
615 | import os |
616 | |
617 | +from charmhelpers.core.strutils import BasicStringComparator |
618 | + |
619 | + |
620 | +class CompareHostReleases(BasicStringComparator): |
621 | + """Provide comparisons of Host releases. |
622 | + |
623 | + Use in the form of |
624 | + |
625 | + if CompareHostReleases(release) > 'trusty': |
626 | + # do something with mitaka |
627 | + """ |
628 | + |
629 | + def __init__(self, item): |
630 | + raise NotImplementedError( |
631 | + "CompareHostReleases() is not implemented for CentOS") |
632 | + |
633 | |
634 | def service_available(service_name): |
635 | # """Determine whether a system service is available.""" |
636 | |
637 | === modified file 'hooks/charmhelpers/core/host_factory/ubuntu.py' |
638 | --- hooks/charmhelpers/core/host_factory/ubuntu.py 2016-10-26 18:19:59 +0000 |
639 | +++ hooks/charmhelpers/core/host_factory/ubuntu.py 2017-06-16 22:43:26 +0000 |
640 | @@ -1,5 +1,38 @@ |
641 | import subprocess |
642 | |
643 | +from charmhelpers.core.strutils import BasicStringComparator |
644 | + |
645 | + |
646 | +UBUNTU_RELEASES = ( |
647 | + 'lucid', |
648 | + 'maverick', |
649 | + 'natty', |
650 | + 'oneiric', |
651 | + 'precise', |
652 | + 'quantal', |
653 | + 'raring', |
654 | + 'saucy', |
655 | + 'trusty', |
656 | + 'utopic', |
657 | + 'vivid', |
658 | + 'wily', |
659 | + 'xenial', |
660 | + 'yakkety', |
661 | + 'zesty', |
662 | + 'artful', |
663 | +) |
664 | + |
665 | + |
666 | +class CompareHostReleases(BasicStringComparator): |
667 | + """Provide comparisons of Ubuntu releases. |
668 | + |
669 | + Use in the form of |
670 | + |
671 | + if CompareHostReleases(release) > 'trusty': |
672 | + # do something with mitaka |
673 | + """ |
674 | + _list = UBUNTU_RELEASES |
675 | + |
676 | |
677 | def service_available(service_name): |
678 | """Determine whether a system service is available""" |
679 | |
680 | === modified file 'hooks/charmhelpers/core/kernel_factory/ubuntu.py' |
681 | --- hooks/charmhelpers/core/kernel_factory/ubuntu.py 2016-10-26 18:19:59 +0000 |
682 | +++ hooks/charmhelpers/core/kernel_factory/ubuntu.py 2017-06-16 22:43:26 +0000 |
683 | @@ -5,7 +5,7 @@ |
684 | """Load a kernel module and configure for auto-load on reboot.""" |
685 | with open('/etc/modules', 'r+') as modules: |
686 | if module not in modules.read(): |
687 | - modules.write(module) |
688 | + modules.write(module + "\n") |
689 | |
690 | |
691 | def update_initramfs(version='all'): |
692 | |
693 | === modified file 'hooks/charmhelpers/core/strutils.py' |
694 | --- hooks/charmhelpers/core/strutils.py 2016-10-26 18:19:59 +0000 |
695 | +++ hooks/charmhelpers/core/strutils.py 2017-06-16 22:43:26 +0000 |
696 | @@ -68,3 +68,56 @@ |
697 | msg = "Unable to interpret string value '%s' as bytes" % (value) |
698 | raise ValueError(msg) |
699 | return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) |
700 | + |
701 | + |
702 | +class BasicStringComparator(object): |
703 | + """Provides a class that will compare strings from an iterator type object. |
704 | + Used to provide > and < comparisons on strings that may not necessarily be |
705 | + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the |
706 | + z-wrap. |
707 | + """ |
708 | + |
709 | + _list = None |
710 | + |
711 | + def __init__(self, item): |
712 | + if self._list is None: |
713 | + raise Exception("Must define the _list in the class definition!") |
714 | + try: |
715 | + self.index = self._list.index(item) |
716 | + except Exception: |
717 | + raise KeyError("Item '{}' is not in list '{}'" |
718 | + .format(item, self._list)) |
719 | + |
720 | + def __eq__(self, other): |
721 | + assert isinstance(other, str) or isinstance(other, self.__class__) |
722 | + return self.index == self._list.index(other) |
723 | + |
724 | + def __ne__(self, other): |
725 | + return not self.__eq__(other) |
726 | + |
727 | + def __lt__(self, other): |
728 | + assert isinstance(other, str) or isinstance(other, self.__class__) |
729 | + return self.index < self._list.index(other) |
730 | + |
731 | + def __ge__(self, other): |
732 | + return not self.__lt__(other) |
733 | + |
734 | + def __gt__(self, other): |
735 | + assert isinstance(other, str) or isinstance(other, self.__class__) |
736 | + return self.index > self._list.index(other) |
737 | + |
738 | + def __le__(self, other): |
739 | + return not self.__gt__(other) |
740 | + |
741 | + def __str__(self): |
742 | + """Always give back the item at the index so it can be used in |
743 | + comparisons like: |
744 | + |
745 | + s_mitaka = CompareOpenStack('mitaka') |
746 | + s_newton = CompareOpenstack('newton') |
747 | + |
748 | + assert s_newton > s_mitaka |
749 | + |
750 | + @returns: <string> |
751 | + """ |
752 | + return self._list[self.index] |
753 | |
754 | === modified file 'hooks/charmhelpers/fetch/__init__.py' |
755 | --- hooks/charmhelpers/fetch/__init__.py 2016-10-26 18:19:59 +0000 |
756 | +++ hooks/charmhelpers/fetch/__init__.py 2017-06-16 22:43:26 +0000 |
757 | @@ -48,6 +48,13 @@ |
758 | pass |
759 | |
760 | |
761 | +class GPGKeyError(Exception): |
762 | + """Exception occurs when a GPG key cannot be fetched or used. The message |
763 | + indicates what the problem is. |
764 | + """ |
765 | + pass |
766 | + |
767 | + |
768 | class BaseFetchHandler(object): |
769 | |
770 | """Base class for FetchHandler implementations in fetch plugins""" |
771 | @@ -77,21 +84,22 @@ |
772 | fetch = importlib.import_module(module) |
773 | |
774 | filter_installed_packages = fetch.filter_installed_packages |
775 | -install = fetch.install |
776 | -upgrade = fetch.upgrade |
777 | -update = fetch.update |
778 | -purge = fetch.purge |
779 | +install = fetch.apt_install |
780 | +upgrade = fetch.apt_upgrade |
781 | +update = _fetch_update = fetch.apt_update |
782 | +purge = fetch.apt_purge |
783 | add_source = fetch.add_source |
784 | |
785 | if __platform__ == "ubuntu": |
786 | apt_cache = fetch.apt_cache |
787 | - apt_install = fetch.install |
788 | - apt_update = fetch.update |
789 | - apt_upgrade = fetch.upgrade |
790 | - apt_purge = fetch.purge |
791 | + apt_install = fetch.apt_install |
792 | + apt_update = fetch.apt_update |
793 | + apt_upgrade = fetch.apt_upgrade |
794 | + apt_purge = fetch.apt_purge |
795 | apt_mark = fetch.apt_mark |
796 | apt_hold = fetch.apt_hold |
797 | apt_unhold = fetch.apt_unhold |
798 | + import_key = fetch.import_key |
799 | get_upstream_version = fetch.get_upstream_version |
800 | elif __platform__ == "centos": |
801 | yum_search = fetch.yum_search |
802 | @@ -135,7 +143,7 @@ |
803 | for source, key in zip(sources, keys): |
804 | add_source(source, key) |
805 | if update: |
806 | - fetch.update(fatal=True) |
807 | + _fetch_update(fatal=True) |
808 | |
809 | |
810 | def install_remote(source, *args, **kwargs): |
811 | |
812 | === modified file 'hooks/charmhelpers/fetch/centos.py' |
813 | --- hooks/charmhelpers/fetch/centos.py 2016-10-26 18:19:59 +0000 |
814 | +++ hooks/charmhelpers/fetch/centos.py 2017-06-16 22:43:26 +0000 |
815 | @@ -132,7 +132,7 @@ |
816 | key_file.write(key) |
817 | key_file.flush() |
818 | key_file.seek(0) |
819 | - subprocess.check_call(['rpm', '--import', key_file]) |
820 | + subprocess.check_call(['rpm', '--import', key_file.name]) |
821 | else: |
822 | subprocess.check_call(['rpm', '--import', key]) |
823 | |
824 | |
825 | === added file 'hooks/charmhelpers/fetch/snap.py' |
826 | --- hooks/charmhelpers/fetch/snap.py 1970-01-01 00:00:00 +0000 |
827 | +++ hooks/charmhelpers/fetch/snap.py 2017-06-16 22:43:26 +0000 |
828 | @@ -0,0 +1,122 @@ |
829 | +# Copyright 2014-2017 Canonical Limited. |
830 | +# |
831 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
832 | +# you may not use this file except in compliance with the License. |
833 | +# You may obtain a copy of the License at |
834 | +# |
835 | +# http://www.apache.org/licenses/LICENSE-2.0 |
836 | +# |
837 | +# Unless required by applicable law or agreed to in writing, software |
838 | +# distributed under the License is distributed on an "AS IS" BASIS, |
839 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
840 | +# See the License for the specific language governing permissions and |
841 | +# limitations under the License. |
842 | +""" |
843 | +Charm helpers snap for classic charms. |
844 | + |
845 | +If writing reactive charms, use the snap layer: |
846 | +https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html |
847 | +""" |
848 | +import subprocess |
849 | +from os import environ |
850 | +from time import sleep |
851 | +from charmhelpers.core.hookenv import log |
852 | + |
853 | +__author__ = 'Joseph Borg <joseph.borg@canonical.com>' |
854 | + |
855 | +SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). |
856 | +SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. |
857 | +SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. |
858 | + |
859 | + |
860 | +class CouldNotAcquireLockException(Exception): |
861 | + pass |
862 | + |
863 | + |
864 | +def _snap_exec(commands): |
865 | + """ |
866 | + Execute snap commands. |
867 | + |
868 | + :param commands: List commands |
869 | + :return: Integer exit code |
870 | + """ |
871 | + assert type(commands) == list |
872 | + |
873 | + retry_count = 0 |
874 | + return_code = None |
875 | + |
876 | + while return_code is None or return_code == SNAP_NO_LOCK: |
877 | + try: |
878 | + return_code = subprocess.check_call(['snap'] + commands, env=environ) |
879 | + except subprocess.CalledProcessError as e: |
880 | + retry_count += + 1 |
881 | + if retry_count > SNAP_NO_LOCK_RETRY_COUNT: |
882 | + raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) |
883 | + return_code = e.returncode |
884 | + log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') |
885 | + sleep(SNAP_NO_LOCK_RETRY_DELAY) |
886 | + |
887 | + return return_code |
888 | + |
889 | + |
890 | +def snap_install(packages, *flags): |
891 | + """ |
892 | + Install a snap package. |
893 | + |
894 | + :param packages: String or List String package name |
895 | + :param flags: List String flags to pass to install command |
896 | + :return: Integer return code from snap |
897 | + """ |
898 | + if type(packages) is not list: |
899 | + packages = [packages] |
900 | + |
901 | + flags = list(flags) |
902 | + |
903 | + message = 'Installing snap(s) "%s"' % ', '.join(packages) |
904 | + if flags: |
905 | + message += ' with option(s) "%s"' % ', '.join(flags) |
906 | + |
907 | + log(message, level='INFO') |
908 | + return _snap_exec(['install'] + flags + packages) |
909 | + |
910 | + |
911 | +def snap_remove(packages, *flags): |
912 | + """ |
913 | + Remove a snap package. |
914 | + |
915 | + :param packages: String or List String package name |
916 | + :param flags: List String flags to pass to remove command |
917 | + :return: Integer return code from snap |
918 | + """ |
919 | + if type(packages) is not list: |
920 | + packages = [packages] |
921 | + |
922 | + flags = list(flags) |
923 | + |
924 | + message = 'Removing snap(s) "%s"' % ', '.join(packages) |
925 | + if flags: |
926 | + message += ' with options "%s"' % ', '.join(flags) |
927 | + |
928 | + log(message, level='INFO') |
929 | + return _snap_exec(['remove'] + flags + packages) |
930 | + |
931 | + |
932 | +def snap_refresh(packages, *flags): |
933 | + """ |
934 | + Refresh / Update snap package. |
935 | + |
936 | + :param packages: String or List String package name |
937 | + :param flags: List String flags to pass to refresh command |
938 | + :return: Integer return code from snap |
939 | + """ |
940 | + if type(packages) is not list: |
941 | + packages = [packages] |
942 | + |
943 | + flags = list(flags) |
944 | + |
945 | + message = 'Refreshing snap(s) "%s"' % ', '.join(packages) |
946 | + if flags: |
947 | + message += ' with options "%s"' % ', '.join(flags) |
948 | + |
949 | + log(message, level='INFO') |
950 | + return _snap_exec(['refresh'] + flags + packages) |
951 | |
952 | === modified file 'hooks/charmhelpers/fetch/ubuntu.py' |
953 | --- hooks/charmhelpers/fetch/ubuntu.py 2016-10-26 18:19:59 +0000 |
954 | +++ hooks/charmhelpers/fetch/ubuntu.py 2017-06-16 22:43:26 +0000 |
955 | @@ -12,29 +12,47 @@ |
956 | # See the License for the specific language governing permissions and |
957 | # limitations under the License. |
958 | |
959 | +from collections import OrderedDict |
960 | import os |
961 | +import platform |
962 | +import re |
963 | import six |
964 | import time |
965 | import subprocess |
966 | - |
967 | from tempfile import NamedTemporaryFile |
968 | + |
969 | from charmhelpers.core.host import ( |
970 | lsb_release |
971 | ) |
972 | -from charmhelpers.core.hookenv import log |
973 | -from charmhelpers.fetch import SourceConfigError |
974 | +from charmhelpers.core.hookenv import ( |
975 | + log, |
976 | + DEBUG, |
977 | +) |
978 | +from charmhelpers.fetch import SourceConfigError, GPGKeyError |
979 | |
980 | +PROPOSED_POCKET = ( |
981 | + "# Proposed\n" |
982 | + "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " |
983 | + "multiverse restricted\n") |
984 | +PROPOSED_PORTS_POCKET = ( |
985 | + "# Proposed\n" |
986 | + "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " |
987 | + "multiverse restricted\n") |
988 | +# Only supports 64bit and ppc64 at the moment. |
989 | +ARCH_TO_PROPOSED_POCKET = { |
990 | + 'x86_64': PROPOSED_POCKET, |
991 | + 'ppc64le': PROPOSED_PORTS_POCKET, |
992 | + 'aarch64': PROPOSED_PORTS_POCKET, |
993 | +} |
994 | +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" |
995 | +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' |
996 | CLOUD_ARCHIVE = """# Ubuntu Cloud Archive |
997 | deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main |
998 | """ |
999 | - |
1000 | -PROPOSED_POCKET = """# Proposed |
1001 | -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted |
1002 | -""" |
1003 | - |
1004 | CLOUD_ARCHIVE_POCKETS = { |
1005 | # Folsom |
1006 | 'folsom': 'precise-updates/folsom', |
1007 | + 'folsom/updates': 'precise-updates/folsom', |
1008 | 'precise-folsom': 'precise-updates/folsom', |
1009 | 'precise-folsom/updates': 'precise-updates/folsom', |
1010 | 'precise-updates/folsom': 'precise-updates/folsom', |
1011 | @@ -43,6 +61,7 @@ |
1012 | 'precise-proposed/folsom': 'precise-proposed/folsom', |
1013 | # Grizzly |
1014 | 'grizzly': 'precise-updates/grizzly', |
1015 | + 'grizzly/updates': 'precise-updates/grizzly', |
1016 | 'precise-grizzly': 'precise-updates/grizzly', |
1017 | 'precise-grizzly/updates': 'precise-updates/grizzly', |
1018 | 'precise-updates/grizzly': 'precise-updates/grizzly', |
1019 | @@ -51,6 +70,7 @@ |
1020 | 'precise-proposed/grizzly': 'precise-proposed/grizzly', |
1021 | # Havana |
1022 | 'havana': 'precise-updates/havana', |
1023 | + 'havana/updates': 'precise-updates/havana', |
1024 | 'precise-havana': 'precise-updates/havana', |
1025 | 'precise-havana/updates': 'precise-updates/havana', |
1026 | 'precise-updates/havana': 'precise-updates/havana', |
1027 | @@ -59,6 +79,7 @@ |
1028 | 'precise-proposed/havana': 'precise-proposed/havana', |
1029 | # Icehouse |
1030 | 'icehouse': 'precise-updates/icehouse', |
1031 | + 'icehouse/updates': 'precise-updates/icehouse', |
1032 | 'precise-icehouse': 'precise-updates/icehouse', |
1033 | 'precise-icehouse/updates': 'precise-updates/icehouse', |
1034 | 'precise-updates/icehouse': 'precise-updates/icehouse', |
1035 | @@ -67,6 +88,7 @@ |
1036 | 'precise-proposed/icehouse': 'precise-proposed/icehouse', |
1037 | # Juno |
1038 | 'juno': 'trusty-updates/juno', |
1039 | + 'juno/updates': 'trusty-updates/juno', |
1040 | 'trusty-juno': 'trusty-updates/juno', |
1041 | 'trusty-juno/updates': 'trusty-updates/juno', |
1042 | 'trusty-updates/juno': 'trusty-updates/juno', |
1043 | @@ -75,6 +97,7 @@ |
1044 | 'trusty-proposed/juno': 'trusty-proposed/juno', |
1045 | # Kilo |
1046 | 'kilo': 'trusty-updates/kilo', |
1047 | + 'kilo/updates': 'trusty-updates/kilo', |
1048 | 'trusty-kilo': 'trusty-updates/kilo', |
1049 | 'trusty-kilo/updates': 'trusty-updates/kilo', |
1050 | 'trusty-updates/kilo': 'trusty-updates/kilo', |
1051 | @@ -83,6 +106,7 @@ |
1052 | 'trusty-proposed/kilo': 'trusty-proposed/kilo', |
1053 | # Liberty |
1054 | 'liberty': 'trusty-updates/liberty', |
1055 | + 'liberty/updates': 'trusty-updates/liberty', |
1056 | 'trusty-liberty': 'trusty-updates/liberty', |
1057 | 'trusty-liberty/updates': 'trusty-updates/liberty', |
1058 | 'trusty-updates/liberty': 'trusty-updates/liberty', |
1059 | @@ -91,6 +115,7 @@ |
1060 | 'trusty-proposed/liberty': 'trusty-proposed/liberty', |
1061 | # Mitaka |
1062 | 'mitaka': 'trusty-updates/mitaka', |
1063 | + 'mitaka/updates': 'trusty-updates/mitaka', |
1064 | 'trusty-mitaka': 'trusty-updates/mitaka', |
1065 | 'trusty-mitaka/updates': 'trusty-updates/mitaka', |
1066 | 'trusty-updates/mitaka': 'trusty-updates/mitaka', |
1067 | @@ -99,17 +124,44 @@ |
1068 | 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', |
1069 | # Newton |
1070 | 'newton': 'xenial-updates/newton', |
1071 | + 'newton/updates': 'xenial-updates/newton', |
1072 | 'xenial-newton': 'xenial-updates/newton', |
1073 | 'xenial-newton/updates': 'xenial-updates/newton', |
1074 | 'xenial-updates/newton': 'xenial-updates/newton', |
1075 | 'newton/proposed': 'xenial-proposed/newton', |
1076 | 'xenial-newton/proposed': 'xenial-proposed/newton', |
1077 | 'xenial-proposed/newton': 'xenial-proposed/newton', |
1078 | + # Ocata |
1079 | + 'ocata': 'xenial-updates/ocata', |
1080 | + 'ocata/updates': 'xenial-updates/ocata', |
1081 | + 'xenial-ocata': 'xenial-updates/ocata', |
1082 | + 'xenial-ocata/updates': 'xenial-updates/ocata', |
1083 | + 'xenial-updates/ocata': 'xenial-updates/ocata', |
1084 | + 'ocata/proposed': 'xenial-proposed/ocata', |
1085 | + 'xenial-ocata/proposed': 'xenial-proposed/ocata', |
1086 | + 'xenial-ocata/newton': 'xenial-proposed/ocata', |
1087 | + # Pike |
1088 | + 'pike': 'xenial-updates/pike', |
1089 | + 'xenial-pike': 'xenial-updates/pike', |
1090 | + 'xenial-pike/updates': 'xenial-updates/pike', |
1091 | + 'xenial-updates/pike': 'xenial-updates/pike', |
1092 | + 'pike/proposed': 'xenial-proposed/pike', |
1093 | + 'xenial-pike/proposed': 'xenial-proposed/pike', |
1094 | + 'xenial-pike/newton': 'xenial-proposed/pike', |
1095 | + # Queens |
1096 | + 'queens': 'xenial-updates/queens', |
1097 | + 'xenial-queens': 'xenial-updates/queens', |
1098 | + 'xenial-queens/updates': 'xenial-updates/queens', |
1099 | + 'xenial-updates/queens': 'xenial-updates/queens', |
1100 | + 'queens/proposed': 'xenial-proposed/queens', |
1101 | + 'xenial-queens/proposed': 'xenial-proposed/queens', |
1102 | + 'xenial-queens/newton': 'xenial-proposed/queens', |
1103 | } |
1104 | |
1105 | + |
1106 | APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. |
1107 | -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. |
1108 | -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. |
1109 | +CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. |
1110 | +CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. |
1111 | |
1112 | |
1113 | def filter_installed_packages(packages): |
1114 | @@ -137,7 +189,7 @@ |
1115 | return apt_pkg.Cache(progress) |
1116 | |
1117 | |
1118 | -def install(packages, options=None, fatal=False): |
1119 | +def apt_install(packages, options=None, fatal=False): |
1120 | """Install one or more packages.""" |
1121 | if options is None: |
1122 | options = ['--option=Dpkg::Options::=--force-confold'] |
1123 | @@ -154,7 +206,7 @@ |
1124 | _run_apt_command(cmd, fatal) |
1125 | |
1126 | |
1127 | -def upgrade(options=None, fatal=False, dist=False): |
1128 | +def apt_upgrade(options=None, fatal=False, dist=False): |
1129 | """Upgrade all packages.""" |
1130 | if options is None: |
1131 | options = ['--option=Dpkg::Options::=--force-confold'] |
1132 | @@ -169,13 +221,13 @@ |
1133 | _run_apt_command(cmd, fatal) |
1134 | |
1135 | |
1136 | -def update(fatal=False): |
1137 | +def apt_update(fatal=False): |
1138 | """Update local apt cache.""" |
1139 | cmd = ['apt-get', 'update'] |
1140 | _run_apt_command(cmd, fatal) |
1141 | |
1142 | |
1143 | -def purge(packages, fatal=False): |
1144 | +def apt_purge(packages, fatal=False): |
1145 | """Purge one or more packages.""" |
1146 | cmd = ['apt-get', '--assume-yes', 'purge'] |
1147 | if isinstance(packages, six.string_types): |
1148 | @@ -209,7 +261,45 @@ |
1149 | return apt_mark(packages, 'unhold', fatal=fatal) |
1150 | |
1151 | |
1152 | -def add_source(source, key=None): |
1153 | +def import_key(keyid): |
1154 | + """Import a key in either ASCII Armor or Radix64 format. |
1155 | + |
1156 | + `keyid` is either the keyid to fetch from a PGP server, or |
1157 | + the key in ASCII armor foramt. |
1158 | + |
1159 | + :param keyid: String of key (or key id). |
1160 | + :raises: GPGKeyError if the key could not be imported |
1161 | + """ |
1162 | + key = keyid.strip() |
1163 | + if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and |
1164 | + key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): |
1165 | + log("PGP key found (looks like ASCII Armor format)", level=DEBUG) |
1166 | + log("Importing ASCII Armor PGP key", level=DEBUG) |
1167 | + with NamedTemporaryFile() as keyfile: |
1168 | + with open(keyfile.name, 'w') as fd: |
1169 | + fd.write(key) |
1170 | + fd.write("\n") |
1171 | + cmd = ['apt-key', 'add', keyfile.name] |
1172 | + try: |
1173 | + subprocess.check_call(cmd) |
1174 | + except subprocess.CalledProcessError: |
1175 | + error = "Error importing PGP key '{}'".format(key) |
1176 | + log(error) |
1177 | + raise GPGKeyError(error) |
1178 | + else: |
1179 | + log("PGP key found (looks like Radix64 format)", level=DEBUG) |
1180 | + log("Importing PGP key from keyserver", level=DEBUG) |
1181 | + cmd = ['apt-key', 'adv', '--keyserver', |
1182 | + 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] |
1183 | + try: |
1184 | + subprocess.check_call(cmd) |
1185 | + except subprocess.CalledProcessError: |
1186 | + error = "Error importing PGP key '{}'".format(key) |
1187 | + log(error) |
1188 | + raise GPGKeyError(error) |
1189 | + |
1190 | + |
1191 | +def add_source(source, key=None, fail_invalid=False): |
1192 | """Add a package source to this system. |
1193 | |
1194 | @param source: a URL or sources.list entry, as supported by |
1195 | @@ -225,6 +315,33 @@ |
1196 | such as 'cloud:icehouse' |
1197 | 'distro' may be used as a noop |
1198 | |
1199 | + Full list of source specifications supported by the function are: |
1200 | + |
1201 | + 'distro': A NOP; i.e. it has no effect. |
1202 | + 'proposed': the proposed deb spec [2] is wrtten to |
1203 | + /etc/apt/sources.list/proposed |
1204 | + 'distro-proposed': adds <version>-proposed to the debs [2] |
1205 | + 'ppa:<ppa-name>': add-apt-repository --yes <ppa_name> |
1206 | + 'deb <deb-spec>': add-apt-repository --yes deb <deb-spec> |
1207 | + 'http://....': add-apt-repository --yes http://... |
1208 | + 'cloud-archive:<spec>': add-apt-repository -yes cloud-archive:<spec> |
1209 | + 'cloud:<release>[-staging]': specify a Cloud Archive pocket <release> with |
1210 | + optional staging version. If staging is used then the staging PPA [2] |
1211 | + with be used. If staging is NOT used then the cloud archive [3] will be |
1212 | + added, and the 'ubuntu-cloud-keyring' package will be added for the |
1213 | + current distro. |
1214 | + |
1215 | + Otherwise the source is not recognised and this is logged to the juju log. |
1216 | + However, no error is raised, unless sys_error_on_exit is True. |
1217 | + |
1218 | + [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main |
1219 | + where {} is replaced with the derived pocket name. |
1220 | + [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ |
1221 | + main universe multiverse restricted |
1222 | + where {} is replaced with the lsb_release codename (e.g. xenial) |
1223 | + [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu <pocket> |
1224 | + to /etc/apt/sources.list.d/cloud-archive-list |
1225 | + |
1226 | @param key: A key to be added to the system's APT keyring and used |
1227 | to verify the signatures on packages. Ideally, this should be an |
1228 | ASCII format GPG public key including the block headers. A GPG key |
1229 | @@ -232,87 +349,202 @@ |
1230 | available to retrieve the actual public key from a public keyserver |
1231 | placing your Juju environment at risk. ppa and cloud archive keys |
1232 | are securely added automtically, so sould not be provided. |
1233 | + |
1234 | + @param fail_invalid: (boolean) if True, then the function raises a |
1235 | + SourceConfigError is there is no matching installation source. |
1236 | + |
1237 | + @raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a |
1238 | + valid pocket in CLOUD_ARCHIVE_POCKETS |
1239 | """ |
1240 | + _mapping = OrderedDict([ |
1241 | + (r"^distro$", lambda: None), # This is a NOP |
1242 | + (r"^(?:proposed|distro-proposed)$", _add_proposed), |
1243 | + (r"^cloud-archive:(.*)$", _add_apt_repository), |
1244 | + (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), |
1245 | + (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), |
1246 | + (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), |
1247 | + (r"^cloud:(.*)$", _add_cloud_pocket), |
1248 | + ]) |
1249 | if source is None: |
1250 | - log('Source is not present. Skipping') |
1251 | - return |
1252 | - |
1253 | - if (source.startswith('ppa:') or |
1254 | - source.startswith('http') or |
1255 | - source.startswith('deb ') or |
1256 | - source.startswith('cloud-archive:')): |
1257 | - subprocess.check_call(['add-apt-repository', '--yes', source]) |
1258 | - elif source.startswith('cloud:'): |
1259 | - install(filter_installed_packages(['ubuntu-cloud-keyring']), |
1260 | + source = '' |
1261 | + for r, fn in six.iteritems(_mapping): |
1262 | + m = re.match(r, source) |
1263 | + if m: |
1264 | + # call the assoicated function with the captured groups |
1265 | + # raises SourceConfigError on error. |
1266 | + fn(*m.groups()) |
1267 | + if key: |
1268 | + try: |
1269 | + import_key(key) |
1270 | + except GPGKeyError as e: |
1271 | + raise SourceConfigError(str(e)) |
1272 | + break |
1273 | + else: |
1274 | + # nothing matched. log an error and maybe sys.exit |
1275 | + err = "Unknown source: {!r}".format(source) |
1276 | + log(err) |
1277 | + if fail_invalid: |
1278 | + raise SourceConfigError(err) |
1279 | + |
1280 | + |
1281 | +def _add_proposed(): |
1282 | + """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list |
1283 | + |
1284 | + Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for |
1285 | + the deb line. |
1286 | + |
1287 | + For intel architecutres PROPOSED_POCKET is used for the release, but for |
1288 | + other architectures PROPOSED_PORTS_POCKET is used for the release. |
1289 | + """ |
1290 | + release = lsb_release()['DISTRIB_CODENAME'] |
1291 | + arch = platform.machine() |
1292 | + if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): |
1293 | + raise SourceConfigError("Arch {} not supported for (distro-)proposed" |
1294 | + .format(arch)) |
1295 | + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: |
1296 | + apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) |
1297 | + |
1298 | + |
1299 | +def _add_apt_repository(spec): |
1300 | + """Add the spec using add_apt_repository |
1301 | + |
1302 | + :param spec: the parameter to pass to add_apt_repository |
1303 | + """ |
1304 | + _run_with_retries(['add-apt-repository', '--yes', spec]) |
1305 | + |
1306 | + |
1307 | +def _add_cloud_pocket(pocket): |
1308 | + """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list |
1309 | + |
1310 | + Note that this overwrites the existing file if there is one. |
1311 | + |
1312 | + This function also converts the simple pocket in to the actual pocket using |
1313 | + the CLOUD_ARCHIVE_POCKETS mapping. |
1314 | + |
1315 | + :param pocket: string representing the pocket to add a deb spec for. |
1316 | + :raises: SourceConfigError if the cloud pocket doesn't exist or the |
1317 | + requested release doesn't match the current distro version. |
1318 | + """ |
1319 | + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), |
1320 | fatal=True) |
1321 | - pocket = source.split(':')[-1] |
1322 | - if pocket not in CLOUD_ARCHIVE_POCKETS: |
1323 | - raise SourceConfigError( |
1324 | - 'Unsupported cloud: source option %s' % |
1325 | - pocket) |
1326 | - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] |
1327 | - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: |
1328 | - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) |
1329 | - elif source == 'proposed': |
1330 | - release = lsb_release()['DISTRIB_CODENAME'] |
1331 | - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: |
1332 | - apt.write(PROPOSED_POCKET.format(release)) |
1333 | - elif source == 'distro': |
1334 | - pass |
1335 | - else: |
1336 | - log("Unknown source: {!r}".format(source)) |
1337 | - |
1338 | - if key: |
1339 | - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: |
1340 | - with NamedTemporaryFile('w+') as key_file: |
1341 | - key_file.write(key) |
1342 | - key_file.flush() |
1343 | - key_file.seek(0) |
1344 | - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) |
1345 | - else: |
1346 | - # Note that hkp: is in no way a secure protocol. Using a |
1347 | - # GPG key id is pointless from a security POV unless you |
1348 | - # absolutely trust your network and DNS. |
1349 | - subprocess.check_call(['apt-key', 'adv', '--keyserver', |
1350 | - 'hkp://keyserver.ubuntu.com:80', '--recv', |
1351 | - key]) |
1352 | + if pocket not in CLOUD_ARCHIVE_POCKETS: |
1353 | + raise SourceConfigError( |
1354 | + 'Unsupported cloud: source option %s' % |
1355 | + pocket) |
1356 | + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] |
1357 | + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: |
1358 | + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) |
1359 | + |
1360 | + |
1361 | +def _add_cloud_staging(cloud_archive_release, openstack_release): |
1362 | + """Add the cloud staging repository which is in |
1363 | + ppa:ubuntu-cloud-archive/<openstack_release>-staging |
1364 | + |
1365 | + This function checks that the cloud_archive_release matches the current |
1366 | + codename for the distro that charm is being installed on. |
1367 | + |
1368 | + :param cloud_archive_release: string, codename for the release. |
1369 | + :param openstack_release: String, codename for the openstack release. |
1370 | + :raises: SourceConfigError if the cloud_archive_release doesn't match the |
1371 | + current version of the os. |
1372 | + """ |
1373 | + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) |
1374 | + ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) |
1375 | + cmd = 'add-apt-repository -y {}'.format(ppa) |
1376 | + _run_with_retries(cmd.split(' ')) |
1377 | + |
1378 | + |
1379 | +def _add_cloud_distro_check(cloud_archive_release, openstack_release): |
1380 | + """Add the cloud pocket, but also check the cloud_archive_release against |
1381 | + the current distro, and use the openstack_release as the full lookup. |
1382 | + |
1383 | + This just calls _add_cloud_pocket() with the openstack_release as pocket |
1384 | + to get the correct cloud-archive.list for dpkg to work with. |
1385 | + |
1386 | + :param cloud_archive_release:String, codename for the distro release. |
1387 | + :param openstack_release: String, spec for the release to look up in the |
1388 | + CLOUD_ARCHIVE_POCKETS |
1389 | + :raises: SourceConfigError if this is the wrong distro, or the pocket spec |
1390 | + doesn't exist. |
1391 | + """ |
1392 | + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) |
1393 | + _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) |
1394 | + |
1395 | + |
1396 | +def _verify_is_ubuntu_rel(release, os_release): |
1397 | + """Verify that the release is in the same as the current ubuntu release. |
1398 | + |
1399 | + :param release: String, lowercase for the release. |
1400 | + :param os_release: String, the os_release being asked for |
1401 | + :raises: SourceConfigError if the release is not the same as the ubuntu |
1402 | + release. |
1403 | + """ |
1404 | + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] |
1405 | + if release != ubuntu_rel: |
1406 | + raise SourceConfigError( |
1407 | + 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' |
1408 | + 'version ({})'.format(release, os_release, ubuntu_rel)) |
1409 | + |
1410 | + |
1411 | +def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), |
1412 | + retry_message="", cmd_env=None): |
1413 | + """Run a command and retry until success or max_retries is reached. |
1414 | + |
1415 | + :param: cmd: str: The apt command to run. |
1416 | + :param: max_retries: int: The number of retries to attempt on a fatal |
1417 | + command. Defaults to CMD_RETRY_COUNT. |
1418 | + :param: retry_exitcodes: tuple: Optional additional exit codes to retry. |
1419 | + Defaults to retry on exit code 1. |
1420 | + :param: retry_message: str: Optional log prefix emitted during retries. |
1421 | + :param: cmd_env: dict: Environment variables to add to the command run. |
1422 | + """ |
1423 | + |
1424 | + env = None |
1425 | + kwargs = {} |
1426 | + if cmd_env: |
1427 | + env = os.environ.copy() |
1428 | + env.update(cmd_env) |
1429 | + kwargs['env'] = env |
1430 | + |
1431 | + if not retry_message: |
1432 | + retry_message = "Failed executing '{}'".format(" ".join(cmd)) |
1433 | + retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) |
1434 | + |
1435 | + retry_count = 0 |
1436 | + result = None |
1437 | + |
1438 | + retry_results = (None,) + retry_exitcodes |
1439 | + while result in retry_results: |
1440 | + try: |
1441 | + # result = subprocess.check_call(cmd, env=env) |
1442 | + result = subprocess.check_call(cmd, **kwargs) |
1443 | + except subprocess.CalledProcessError as e: |
1444 | + retry_count = retry_count + 1 |
1445 | + if retry_count > max_retries: |
1446 | + raise |
1447 | + result = e.returncode |
1448 | + log(retry_message) |
1449 | + time.sleep(CMD_RETRY_DELAY) |
1450 | |
1451 | |
1452 | def _run_apt_command(cmd, fatal=False): |
1453 | - """Run an APT command. |
1454 | - |
1455 | - Checks the output and retries if the fatal flag is set |
1456 | - to True. |
1457 | + """Run an apt command with optional retries. |
1458 | |
1459 | :param: cmd: str: The apt command to run. |
1460 | :param: fatal: bool: Whether the command's output should be checked and |
1461 | retried. |
1462 | """ |
1463 | - env = os.environ.copy() |
1464 | - |
1465 | - if 'DEBIAN_FRONTEND' not in env: |
1466 | - env['DEBIAN_FRONTEND'] = 'noninteractive' |
1467 | + # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. |
1468 | + cmd_env = { |
1469 | + 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} |
1470 | |
1471 | if fatal: |
1472 | - retry_count = 0 |
1473 | - result = None |
1474 | - |
1475 | - # If the command is considered "fatal", we need to retry if the apt |
1476 | - # lock was not acquired. |
1477 | - |
1478 | - while result is None or result == APT_NO_LOCK: |
1479 | - try: |
1480 | - result = subprocess.check_call(cmd, env=env) |
1481 | - except subprocess.CalledProcessError as e: |
1482 | - retry_count = retry_count + 1 |
1483 | - if retry_count > APT_NO_LOCK_RETRY_COUNT: |
1484 | - raise |
1485 | - result = e.returncode |
1486 | - log("Couldn't acquire DPKG lock. Will retry in {} seconds." |
1487 | - "".format(APT_NO_LOCK_RETRY_DELAY)) |
1488 | - time.sleep(APT_NO_LOCK_RETRY_DELAY) |
1489 | - |
1490 | + _run_with_retries( |
1491 | + cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), |
1492 | + retry_message="Couldn't acquire DPKG lock") |
1493 | else: |
1494 | + env = os.environ.copy() |
1495 | + env.update(cmd_env) |
1496 | subprocess.call(cmd, env=env) |
1497 | |
1498 | |
1499 | |
1500 | === modified file 'hooks/charmhelpers/osplatform.py' |
1501 | --- hooks/charmhelpers/osplatform.py 2016-10-26 18:19:59 +0000 |
1502 | +++ hooks/charmhelpers/osplatform.py 2017-06-16 22:43:26 +0000 |
1503 | @@ -8,12 +8,18 @@ |
1504 | will be returned (which is the name of the module). |
1505 | This string is used to decide which platform module should be imported. |
1506 | """ |
1507 | + # linux_distribution is deprecated and will be removed in Python 3.7 |
1508 | + # Warings *not* disabled, as we certainly need to fix this. |
1509 | tuple_platform = platform.linux_distribution() |
1510 | current_platform = tuple_platform[0] |
1511 | if "Ubuntu" in current_platform: |
1512 | return "ubuntu" |
1513 | elif "CentOS" in current_platform: |
1514 | return "centos" |
1515 | + elif "debian" in current_platform: |
1516 | + # Stock Python does not detect Ubuntu and instead returns debian. |
1517 | + # Or at least it does in some build environments like Travis CI |
1518 | + return "ubuntu" |
1519 | else: |
1520 | raise RuntimeError("This module is not supported on {}." |
1521 | .format(current_platform)) |
1522 | |
1523 | === modified file 'hooks/hooks.py' |
1524 | --- hooks/hooks.py 2016-11-15 18:15:20 +0000 |
1525 | +++ hooks/hooks.py 2017-06-16 22:43:26 +0000 |
1526 | @@ -4,6 +4,8 @@ |
1527 | import sys |
1528 | |
1529 | from charmhelpers.core.host import ( |
1530 | + CompareHostReleases, |
1531 | + lsb_release, |
1532 | service_start, |
1533 | service_stop, |
1534 | service_restart, |
1535 | @@ -69,6 +71,12 @@ |
1536 | changed[config_key] = config_get(config_key) |
1537 | juju_log("Configuration key:%s set to value: %s" % |
1538 | (config_key, changed[config_key])) |
1539 | + |
1540 | + |
1541 | + # allows templates to generate series-dependent configuration |
1542 | + lsb = lsb_release() |
1543 | + changed['series'] = CompareHostReleases(lsb['DISTRIB_CODENAME']) |
1544 | + |
1545 | return changed |
1546 | |
1547 | |
1548 | |
1549 | === modified file 'templates/rsyslog.conf' |
1550 | --- templates/rsyslog.conf 2014-04-22 19:45:41 +0000 |
1551 | +++ templates/rsyslog.conf 2017-06-16 22:43:26 +0000 |
1552 | @@ -1,3 +1,8 @@ |
1553 | +############################################################################### |
1554 | +# [ WARNING ] |
1555 | +# Configuration file maintained by Juju. Local changes may be overwritten. |
1556 | +############################################################################### |
1557 | + |
1558 | /var/log/syslog |
1559 | { |
1560 | rotate {{syslog_rotate}} |
1561 | @@ -7,7 +12,11 @@ |
1562 | delaycompress |
1563 | compress |
1564 | postrotate |
1565 | + {%- if series <= 'trusty' %} |
1566 | reload rsyslog >/dev/null 2>&1 || true |
1567 | + {%- else %} |
1568 | + invoke-rc.d rsyslog rotate > /dev/null |
1569 | + {%- endif %} |
1570 | endscript |
1571 | } |
1572 | |
1573 | @@ -32,6 +41,10 @@ |
1574 | delaycompress |
1575 | sharedscripts |
1576 | postrotate |
1577 | + {%- if series <= 'trusty' %} |
1578 | reload rsyslog >/dev/null 2>&1 || true |
1579 | + {%- else %} |
1580 | + invoke-rc.d rsyslog rotate > /dev/null |
1581 | + {%- endif %} |
1582 | endscript |
1583 | } |
1584 | |
1585 | === modified file 'tox.ini' |
1586 | --- tox.ini 2016-10-27 15:33:35 +0000 |
1587 | +++ tox.ini 2017-06-16 22:43:26 +0000 |
1588 | @@ -1,6 +1,6 @@ |
1589 | [tox] |
1590 | skipsdist=True |
1591 | -envlist = py34, py35 |
1592 | +envlist = py27, py34, py35 |
1593 | skip_missing_interpreters = True |
1594 | |
1595 | [testenv] |
1596 | |
1597 | === modified file 'unit_tests/test_hooks.py' |
1598 | --- unit_tests/test_hooks.py 2017-01-10 17:09:58 +0000 |
1599 | +++ unit_tests/test_hooks.py 2017-06-16 22:43:26 +0000 |
1600 | @@ -1,7 +1,8 @@ |
1601 | #!/usr/bin/env python |
1602 | # -*- coding: utf-8 -*- |
1603 | import os |
1604 | -import hooks |
1605 | +import six |
1606 | +import tempfile |
1607 | |
1608 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' |
1609 | _HERE = os.path.abspath(os.path.dirname(__file__)) |
1610 | @@ -12,6 +13,7 @@ |
1611 | except ImportError as ex: |
1612 | raise ImportError("Please install unittest and mock modules") |
1613 | |
1614 | +import hooks |
1615 | |
1616 | TO_PATCH = [ |
1617 | "apt_install", |
1618 | @@ -28,7 +30,7 @@ |
1619 | ] |
1620 | |
1621 | |
1622 | -class HooksTestCase(unittest.TestCase): |
1623 | +class BaseTestCase(unittest.TestCase): |
1624 | |
1625 | def setUp(self): |
1626 | unittest.TestCase.setUp(self) |
1627 | @@ -37,6 +39,14 @@ |
1628 | self.juju_log.return_value = True |
1629 | self.apt_install.return_value = True |
1630 | self.charm_dir.return_value = os.path.join(_HERE, '..') |
1631 | + self.tmpdir = tempfile.mkdtemp() |
1632 | + self.maxDiff = None |
1633 | + |
1634 | + def tearDown(self): |
1635 | + try: |
1636 | + shutil.rmtree(self.tmpdir) |
1637 | + except: |
1638 | + pass |
1639 | |
1640 | def patch(self, method): |
1641 | _m = mock.patch.object(hooks, method) |
1642 | @@ -48,6 +58,8 @@ |
1643 | for method in TO_PATCH: |
1644 | setattr(self, method, self.patch(method)) |
1645 | |
1646 | +class TestHooks(BaseTestCase): |
1647 | + |
1648 | def test_install_hook(self): |
1649 | """Check if install hooks is correctly executed |
1650 | """ |
1651 | @@ -141,8 +153,16 @@ |
1652 | def test_config_changed(self): |
1653 | """Check if config-changed is executed correctly""" |
1654 | _open = mock.mock_open(read_data=b'foo') |
1655 | - |
1656 | - with mock.patch('builtins.open', _open, create=True): |
1657 | + lsb = hooks.lsb_release() |
1658 | + |
1659 | + if six .PY2: |
1660 | + open_function = '__builtin__.open' |
1661 | + else: |
1662 | + open_function = 'builtins.open' |
1663 | + |
1664 | + with mock.patch(open_function, _open, create=True) as mock_open, \ |
1665 | + mock.patch.object(hooks, 'lsb_release') as mock_lsb: |
1666 | + mock_lsb.return_value = lsb |
1667 | hooks.config_changed() |
1668 | |
1669 | # I'm not quite sure why config_changed appears to be called twice but |
1670 | @@ -154,12 +174,15 @@ |
1671 | '60-aggregator.conf'), 'rb'), |
1672 | mock.call(os.path.join(hooks.DEFAULT_RSYSLOG_PATH, |
1673 | '60-aggregator.conf'), 'w'), |
1674 | - mock.call(os.path.join(hooks.get_template_dir(), |
1675 | - '70-forward.conf'), 'rb'), |
1676 | - mock.call(os.path.join(hooks.get_template_dir(), |
1677 | - '70-forward.conf'), 'rb'), |
1678 | + |
1679 | + mock.call(os.path.join(hooks.get_template_dir(), |
1680 | + '70-forward.conf'), 'rb'), |
1681 | + mock.call(os.path.join(hooks.get_template_dir(), |
1682 | + '70-forward.conf'), 'rb'), |
1683 | + |
1684 | mock.call(os.path.join(hooks.DEFAULT_RSYSLOG_PATH, |
1685 | '70-forward.conf'), 'w'), |
1686 | + |
1687 | mock.call(os.path.join(hooks.get_template_dir(), |
1688 | 'rsyslog.conf'), 'rb'), |
1689 | mock.call(os.path.join(hooks.get_template_dir(), |
1690 | @@ -169,3 +192,38 @@ |
1691 | |
1692 | self.assertEquals(sorted(_open.call_args_list), sorted(expected)) |
1693 | self.service_restart.assert_called_once_with("rsyslog") |
1694 | + |
1695 | + |
1696 | +class TestSeries(BaseTestCase): |
1697 | + |
1698 | + def setUp(self): |
1699 | + super(TestSeries, self).setUp() |
1700 | + self._logrotate_path = hooks.DEFAULT_LOGROTATE_PATH |
1701 | + self._rsyslog_path = hooks.DEFAULT_RSYSLOG_PATH |
1702 | + |
1703 | + def tearDown(self): |
1704 | + hooks.DEFAULT_LOGROTATE_PATH = self._logrotate_path |
1705 | + hooks.DEFAULT_RSYSLOG_PATH = self._rsyslog_path |
1706 | + super(TestSeries, self).tearDown() |
1707 | + |
1708 | + def test_series(self): |
1709 | + rsyslog_config = os.path.join(self.tmpdir, 'rsyslog.conf') |
1710 | + hooks.DEFAULT_LOGROTATE_PATH = rsyslog_config |
1711 | + hooks.DEFAULT_RSYSLOG_PATH = os.path.join(self.tmpdir) |
1712 | + lsb = hooks.lsb_release() |
1713 | + with mock.patch.object(hooks, 'lsb_release') as mock_lsb: |
1714 | + lsb['DISTRIB_CODENAME'] = 'artful' |
1715 | + mock_lsb.return_value = lsb |
1716 | + hooks.config_changed() |
1717 | + |
1718 | + with open(rsyslog_config, 'r') as f: |
1719 | + content = f.read() |
1720 | + self.assertIn('invoke-rc.d rsyslog rotate > /dev/null', |
1721 | + content) |
1722 | + |
1723 | + lsb['DISTRIB_CODENAME'] = 'trusty' |
1724 | + hooks.config_changed() |
1725 | + with open(rsyslog_config, 'r') as f: |
1726 | + content = f.read() |
1727 | + self.assertIn('reload rsyslog >/dev/null 2>&1 || true', |
1728 | + content) |
Felipe,
I'd like to backport this change into trusty series, please make sure
to make this change back compatible from xenial.
<freyes> # invoke-rc.d rsyslog rotate
<freyes> initctl: invalid command: rotate
I will set it to needs fixing for now.
Thanks.