Merge lp:~freyes/charms/xenial/rsyslog/lp1694270 into lp:~bigdata-dev/charms/xenial/rsyslog/trunk
- Xenial Xerus (16.04)
- lp1694270
- Merge into trunk
Status: | Needs review |
---|---|
Proposed branch: | lp:~freyes/charms/xenial/rsyslog/lp1694270 |
Merge into: | lp:~bigdata-dev/charms/xenial/rsyslog/trunk |
Diff against target: |
1728 lines (+1013/-147) 17 files modified
hooks/charmhelpers/__init__.py (+61/-0) hooks/charmhelpers/contrib/charmsupport/nrpe.py (+29/-10) hooks/charmhelpers/core/hookenv.py (+47/-0) hooks/charmhelpers/core/host.py (+225/-35) hooks/charmhelpers/core/host_factory/centos.py (+16/-0) hooks/charmhelpers/core/host_factory/ubuntu.py (+33/-0) hooks/charmhelpers/core/kernel_factory/ubuntu.py (+1/-1) hooks/charmhelpers/core/strutils.py (+53/-0) hooks/charmhelpers/fetch/__init__.py (+17/-9) hooks/charmhelpers/fetch/centos.py (+1/-1) hooks/charmhelpers/fetch/snap.py (+122/-0) hooks/charmhelpers/fetch/ubuntu.py (+314/-82) hooks/charmhelpers/osplatform.py (+6/-0) hooks/hooks.py (+8/-0) templates/rsyslog.conf (+13/-0) tox.ini (+1/-1) unit_tests/test_hooks.py (+66/-8) |
To merge this branch: | bzr merge lp:~freyes/charms/xenial/rsyslog/lp1694270 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Jorge Niedbalski | Pending | ||
Juju Big Data Development | Pending | ||
Review via email: mp+325877@code.launchpad.net |
This proposal supersedes a proposal from 2017-05-29.
Commit message
Description of the change
Use 'rotate' action from /etc/init.d/rsyslog
'reload' command does not exist in Xenial, the rsyslog package uses the
'rotate' functionality implemented in the sysvinit script to close all
open file descriptors.
Jorge Niedbalski (niedbalski) wrote : Posted in a previous version of this proposal | # |
Felipe Reyes (freyes) wrote : Posted in a previous version of this proposal | # |
Jorge,
Pushed some changes to address trusty. http://
Now this patch uses CompareHostsRel
Thanks
Unmerged revisions
- 40. By Felipe Reyes
-
Remove print() statement left from testing
- 39. By Felipe Reyes
-
Use invoke-rc.d only for >=Xenial and add unit tests
- 38. By Felipe Reyes
-
Add py27 to tox
This will prevent regressions with python2.7 needed to run in trusty
- 37. By Felipe Reyes
-
Sync up charm-helpers
- 36. By Felipe Reyes
-
Replace space indentation with tab
- 35. By Felipe Reyes
-
Use 'rotate' action from /etc/init.d/rsyslog
'reload' command does not exist in Xenial, the rsyslog package uses the
'rotate' functionality implemented in the sysvinit script to close all
open file descriptors.Closes-Bug: 1694270
Preview Diff
1 | === modified file 'hooks/charmhelpers/__init__.py' | |||
2 | --- hooks/charmhelpers/__init__.py 2016-10-26 18:19:59 +0000 | |||
3 | +++ hooks/charmhelpers/__init__.py 2017-06-16 22:43:26 +0000 | |||
4 | @@ -14,6 +14,11 @@ | |||
5 | 14 | 14 | ||
6 | 15 | # Bootstrap charm-helpers, installing its dependencies if necessary using | 15 | # Bootstrap charm-helpers, installing its dependencies if necessary using |
7 | 16 | # only standard libraries. | 16 | # only standard libraries. |
8 | 17 | from __future__ import print_function | ||
9 | 18 | from __future__ import absolute_import | ||
10 | 19 | |||
11 | 20 | import functools | ||
12 | 21 | import inspect | ||
13 | 17 | import subprocess | 22 | import subprocess |
14 | 18 | import sys | 23 | import sys |
15 | 19 | 24 | ||
16 | @@ -34,3 +39,59 @@ | |||
17 | 34 | else: | 39 | else: |
18 | 35 | subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) | 40 | subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) |
19 | 36 | import yaml # flake8: noqa | 41 | import yaml # flake8: noqa |
20 | 42 | |||
21 | 43 | |||
22 | 44 | # Holds a list of mapping of mangled function names that have been deprecated | ||
23 | 45 | # using the @deprecate decorator below. This is so that the warning is only | ||
24 | 46 | # printed once for each usage of the function. | ||
25 | 47 | __deprecated_functions = {} | ||
26 | 48 | |||
27 | 49 | |||
28 | 50 | def deprecate(warning, date=None, log=None): | ||
29 | 51 | """Add a deprecation warning the first time the function is used. | ||
30 | 52 | The date, which is a string in semi-ISO8660 format indicate the year-month | ||
31 | 53 | that the function is officially going to be removed. | ||
32 | 54 | |||
33 | 55 | usage: | ||
34 | 56 | |||
35 | 57 | @deprecate('use core/fetch/add_source() instead', '2017-04') | ||
36 | 58 | def contributed_add_source_thing(...): | ||
37 | 59 | ... | ||
38 | 60 | |||
39 | 61 | And it then prints to the log ONCE that the function is deprecated. | ||
40 | 62 | The reason for passing the logging function (log) is so that hookenv.log | ||
41 | 63 | can be used for a charm if needed. | ||
42 | 64 | |||
43 | 65 | :param warning: String to indicat where it has moved ot. | ||
44 | 66 | :param date: optional sting, in YYYY-MM format to indicate when the | ||
45 | 67 | function will definitely (probably) be removed. | ||
46 | 68 | :param log: The log function to call to log. If not, logs to stdout | ||
47 | 69 | """ | ||
48 | 70 | def wrap(f): | ||
49 | 71 | |||
50 | 72 | @functools.wraps(f) | ||
51 | 73 | def wrapped_f(*args, **kwargs): | ||
52 | 74 | try: | ||
53 | 75 | module = inspect.getmodule(f) | ||
54 | 76 | file = inspect.getsourcefile(f) | ||
55 | 77 | lines = inspect.getsourcelines(f) | ||
56 | 78 | f_name = "{}-{}-{}..{}-{}".format( | ||
57 | 79 | module.__name__, file, lines[0], lines[-1], f.__name__) | ||
58 | 80 | except (IOError, TypeError): | ||
59 | 81 | # assume it was local, so just use the name of the function | ||
60 | 82 | f_name = f.__name__ | ||
61 | 83 | if f_name not in __deprecated_functions: | ||
62 | 84 | __deprecated_functions[f_name] = True | ||
63 | 85 | s = "DEPRECATION WARNING: Function {} is being removed".format( | ||
64 | 86 | f.__name__) | ||
65 | 87 | if date: | ||
66 | 88 | s = "{} on/around {}".format(s, date) | ||
67 | 89 | if warning: | ||
68 | 90 | s = "{} : {}".format(s, warning) | ||
69 | 91 | if log: | ||
70 | 92 | log(s) | ||
71 | 93 | else: | ||
72 | 94 | print(s) | ||
73 | 95 | return f(*args, **kwargs) | ||
74 | 96 | return wrapped_f | ||
75 | 97 | return wrap | ||
76 | 37 | 98 | ||
77 | === modified file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py' | |||
78 | --- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2016-10-26 18:19:59 +0000 | |||
79 | +++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 2017-06-16 22:43:26 +0000 | |||
80 | @@ -193,6 +193,13 @@ | |||
81 | 193 | nrpe_check_file = self._get_check_filename() | 193 | nrpe_check_file = self._get_check_filename() |
82 | 194 | with open(nrpe_check_file, 'w') as nrpe_check_config: | 194 | with open(nrpe_check_file, 'w') as nrpe_check_config: |
83 | 195 | nrpe_check_config.write("# check {}\n".format(self.shortname)) | 195 | nrpe_check_config.write("# check {}\n".format(self.shortname)) |
84 | 196 | if nagios_servicegroups: | ||
85 | 197 | nrpe_check_config.write( | ||
86 | 198 | "# The following header was added automatically by juju\n") | ||
87 | 199 | nrpe_check_config.write( | ||
88 | 200 | "# Modifying it will affect nagios monitoring and alerting\n") | ||
89 | 201 | nrpe_check_config.write( | ||
90 | 202 | "# servicegroups: {}\n".format(nagios_servicegroups)) | ||
91 | 196 | nrpe_check_config.write("command[{}]={}\n".format( | 203 | nrpe_check_config.write("command[{}]={}\n".format( |
92 | 197 | self.command, self.check_cmd)) | 204 | self.command, self.check_cmd)) |
93 | 198 | 205 | ||
94 | @@ -227,6 +234,7 @@ | |||
95 | 227 | nagios_logdir = '/var/log/nagios' | 234 | nagios_logdir = '/var/log/nagios' |
96 | 228 | nagios_exportdir = '/var/lib/nagios/export' | 235 | nagios_exportdir = '/var/lib/nagios/export' |
97 | 229 | nrpe_confdir = '/etc/nagios/nrpe.d' | 236 | nrpe_confdir = '/etc/nagios/nrpe.d' |
98 | 237 | homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server | ||
99 | 230 | 238 | ||
100 | 231 | def __init__(self, hostname=None, primary=True): | 239 | def __init__(self, hostname=None, primary=True): |
101 | 232 | super(NRPE, self).__init__() | 240 | super(NRPE, self).__init__() |
102 | @@ -338,13 +346,14 @@ | |||
103 | 338 | return unit | 346 | return unit |
104 | 339 | 347 | ||
105 | 340 | 348 | ||
107 | 341 | def add_init_service_checks(nrpe, services, unit_name): | 349 | def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): |
108 | 342 | """ | 350 | """ |
109 | 343 | Add checks for each service in list | 351 | Add checks for each service in list |
110 | 344 | 352 | ||
111 | 345 | :param NRPE nrpe: NRPE object to add check to | 353 | :param NRPE nrpe: NRPE object to add check to |
112 | 346 | :param list services: List of services to check | 354 | :param list services: List of services to check |
113 | 347 | :param str unit_name: Unit name to use in check description | 355 | :param str unit_name: Unit name to use in check description |
114 | 356 | :param bool immediate_check: For sysv init, run the service check immediately | ||
115 | 348 | """ | 357 | """ |
116 | 349 | for svc in services: | 358 | for svc in services: |
117 | 350 | # Don't add a check for these services from neutron-gateway | 359 | # Don't add a check for these services from neutron-gateway |
118 | @@ -368,21 +377,31 @@ | |||
119 | 368 | ) | 377 | ) |
120 | 369 | elif os.path.exists(sysv_init): | 378 | elif os.path.exists(sysv_init): |
121 | 370 | cronpath = '/etc/cron.d/nagios-service-check-%s' % svc | 379 | cronpath = '/etc/cron.d/nagios-service-check-%s' % svc |
128 | 371 | cron_file = ('*/5 * * * * root ' | 380 | checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) |
129 | 372 | '/usr/local/lib/nagios/plugins/check_exit_status.pl ' | 381 | croncmd = ( |
130 | 373 | '-s /etc/init.d/%s status > ' | 382 | '/usr/local/lib/nagios/plugins/check_exit_status.pl ' |
131 | 374 | '/var/lib/nagios/service-check-%s.txt\n' % (svc, | 383 | '-e -s /etc/init.d/%s status' % svc |
132 | 375 | svc) | 384 | ) |
133 | 376 | ) | 385 | cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) |
134 | 377 | f = open(cronpath, 'w') | 386 | f = open(cronpath, 'w') |
135 | 378 | f.write(cron_file) | 387 | f.write(cron_file) |
136 | 379 | f.close() | 388 | f.close() |
137 | 380 | nrpe.add_check( | 389 | nrpe.add_check( |
138 | 381 | shortname=svc, | 390 | shortname=svc, |
142 | 382 | description='process check {%s}' % unit_name, | 391 | description='service check {%s}' % unit_name, |
143 | 383 | check_cmd='check_status_file.py -f ' | 392 | check_cmd='check_status_file.py -f %s' % checkpath, |
141 | 384 | '/var/lib/nagios/service-check-%s.txt' % svc, | ||
144 | 385 | ) | 393 | ) |
145 | 394 | # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail | ||
146 | 395 | # (LP: #1670223). | ||
147 | 396 | if immediate_check and os.path.isdir(nrpe.homedir): | ||
148 | 397 | f = open(checkpath, 'w') | ||
149 | 398 | subprocess.call( | ||
150 | 399 | croncmd.split(), | ||
151 | 400 | stdout=f, | ||
152 | 401 | stderr=subprocess.STDOUT | ||
153 | 402 | ) | ||
154 | 403 | f.close() | ||
155 | 404 | os.chmod(checkpath, 0o644) | ||
156 | 386 | 405 | ||
157 | 387 | 406 | ||
158 | 388 | def copy_nrpe_checks(): | 407 | def copy_nrpe_checks(): |
159 | 389 | 408 | ||
160 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
161 | --- hooks/charmhelpers/core/hookenv.py 2016-10-26 18:19:59 +0000 | |||
162 | +++ hooks/charmhelpers/core/hookenv.py 2017-06-16 22:43:26 +0000 | |||
163 | @@ -332,6 +332,8 @@ | |||
164 | 332 | config_cmd_line = ['config-get'] | 332 | config_cmd_line = ['config-get'] |
165 | 333 | if scope is not None: | 333 | if scope is not None: |
166 | 334 | config_cmd_line.append(scope) | 334 | config_cmd_line.append(scope) |
167 | 335 | else: | ||
168 | 336 | config_cmd_line.append('--all') | ||
169 | 335 | config_cmd_line.append('--format=json') | 337 | config_cmd_line.append('--format=json') |
170 | 336 | try: | 338 | try: |
171 | 337 | config_data = json.loads( | 339 | config_data = json.loads( |
172 | @@ -614,6 +616,20 @@ | |||
173 | 614 | subprocess.check_call(_args) | 616 | subprocess.check_call(_args) |
174 | 615 | 617 | ||
175 | 616 | 618 | ||
176 | 619 | def open_ports(start, end, protocol="TCP"): | ||
177 | 620 | """Opens a range of service network ports""" | ||
178 | 621 | _args = ['open-port'] | ||
179 | 622 | _args.append('{}-{}/{}'.format(start, end, protocol)) | ||
180 | 623 | subprocess.check_call(_args) | ||
181 | 624 | |||
182 | 625 | |||
183 | 626 | def close_ports(start, end, protocol="TCP"): | ||
184 | 627 | """Close a range of service network ports""" | ||
185 | 628 | _args = ['close-port'] | ||
186 | 629 | _args.append('{}-{}/{}'.format(start, end, protocol)) | ||
187 | 630 | subprocess.check_call(_args) | ||
188 | 631 | |||
189 | 632 | |||
190 | 617 | @cached | 633 | @cached |
191 | 618 | def unit_get(attribute): | 634 | def unit_get(attribute): |
192 | 619 | """Get the unit ID for the remote unit""" | 635 | """Get the unit ID for the remote unit""" |
193 | @@ -1019,3 +1035,34 @@ | |||
194 | 1019 | ''' | 1035 | ''' |
195 | 1020 | cmd = ['network-get', '--primary-address', binding] | 1036 | cmd = ['network-get', '--primary-address', binding] |
196 | 1021 | return subprocess.check_output(cmd).decode('UTF-8').strip() | 1037 | return subprocess.check_output(cmd).decode('UTF-8').strip() |
197 | 1038 | |||
198 | 1039 | |||
199 | 1040 | def add_metric(*args, **kwargs): | ||
200 | 1041 | """Add metric values. Values may be expressed with keyword arguments. For | ||
201 | 1042 | metric names containing dashes, these may be expressed as one or more | ||
202 | 1043 | 'key=value' positional arguments. May only be called from the collect-metrics | ||
203 | 1044 | hook.""" | ||
204 | 1045 | _args = ['add-metric'] | ||
205 | 1046 | _kvpairs = [] | ||
206 | 1047 | _kvpairs.extend(args) | ||
207 | 1048 | _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) | ||
208 | 1049 | _args.extend(sorted(_kvpairs)) | ||
209 | 1050 | try: | ||
210 | 1051 | subprocess.check_call(_args) | ||
211 | 1052 | return | ||
212 | 1053 | except EnvironmentError as e: | ||
213 | 1054 | if e.errno != errno.ENOENT: | ||
214 | 1055 | raise | ||
215 | 1056 | log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) | ||
216 | 1057 | log(log_message, level='INFO') | ||
217 | 1058 | |||
218 | 1059 | |||
219 | 1060 | def meter_status(): | ||
220 | 1061 | """Get the meter status, if running in the meter-status-changed hook.""" | ||
221 | 1062 | return os.environ.get('JUJU_METER_STATUS') | ||
222 | 1063 | |||
223 | 1064 | |||
224 | 1065 | def meter_info(): | ||
225 | 1066 | """Get the meter status information, if running in the meter-status-changed | ||
226 | 1067 | hook.""" | ||
227 | 1068 | return os.environ.get('JUJU_METER_INFO') | ||
228 | 1022 | 1069 | ||
229 | === modified file 'hooks/charmhelpers/core/host.py' | |||
230 | --- hooks/charmhelpers/core/host.py 2016-10-26 18:19:59 +0000 | |||
231 | +++ hooks/charmhelpers/core/host.py 2017-06-16 22:43:26 +0000 | |||
232 | @@ -45,6 +45,7 @@ | |||
233 | 45 | add_new_group, | 45 | add_new_group, |
234 | 46 | lsb_release, | 46 | lsb_release, |
235 | 47 | cmp_pkgrevno, | 47 | cmp_pkgrevno, |
236 | 48 | CompareHostReleases, | ||
237 | 48 | ) # flake8: noqa -- ignore F401 for this import | 49 | ) # flake8: noqa -- ignore F401 for this import |
238 | 49 | elif __platform__ == "centos": | 50 | elif __platform__ == "centos": |
239 | 50 | from charmhelpers.core.host_factory.centos import ( | 51 | from charmhelpers.core.host_factory.centos import ( |
240 | @@ -52,44 +53,146 @@ | |||
241 | 52 | add_new_group, | 53 | add_new_group, |
242 | 53 | lsb_release, | 54 | lsb_release, |
243 | 54 | cmp_pkgrevno, | 55 | cmp_pkgrevno, |
244 | 56 | CompareHostReleases, | ||
245 | 55 | ) # flake8: noqa -- ignore F401 for this import | 57 | ) # flake8: noqa -- ignore F401 for this import |
246 | 56 | 58 | ||
260 | 57 | 59 | UPDATEDB_PATH = '/etc/updatedb.conf' | |
261 | 58 | def service_start(service_name): | 60 | |
262 | 59 | """Start a system service""" | 61 | def service_start(service_name, **kwargs): |
263 | 60 | return service('start', service_name) | 62 | """Start a system service. |
264 | 61 | 63 | ||
265 | 62 | 64 | The specified service name is managed via the system level init system. | |
266 | 63 | def service_stop(service_name): | 65 | Some init systems (e.g. upstart) require that additional arguments be |
267 | 64 | """Stop a system service""" | 66 | provided in order to directly control service instances whereas other init |
268 | 65 | return service('stop', service_name) | 67 | systems allow for addressing instances of a service directly by name (e.g. |
269 | 66 | 68 | systemd). | |
270 | 67 | 69 | ||
271 | 68 | def service_restart(service_name): | 70 | The kwargs allow for the additional parameters to be passed to underlying |
272 | 69 | """Restart a system service""" | 71 | init systems for those systems which require/allow for them. For example, |
273 | 72 | the ceph-osd upstart script requires the id parameter to be passed along | ||
274 | 73 | in order to identify which running daemon should be reloaded. The follow- | ||
275 | 74 | ing example stops the ceph-osd service for instance id=4: | ||
276 | 75 | |||
277 | 76 | service_stop('ceph-osd', id=4) | ||
278 | 77 | |||
279 | 78 | :param service_name: the name of the service to stop | ||
280 | 79 | :param **kwargs: additional parameters to pass to the init system when | ||
281 | 80 | managing services. These will be passed as key=value | ||
282 | 81 | parameters to the init system's commandline. kwargs | ||
283 | 82 | are ignored for systemd enabled systems. | ||
284 | 83 | """ | ||
285 | 84 | return service('start', service_name, **kwargs) | ||
286 | 85 | |||
287 | 86 | |||
288 | 87 | def service_stop(service_name, **kwargs): | ||
289 | 88 | """Stop a system service. | ||
290 | 89 | |||
291 | 90 | The specified service name is managed via the system level init system. | ||
292 | 91 | Some init systems (e.g. upstart) require that additional arguments be | ||
293 | 92 | provided in order to directly control service instances whereas other init | ||
294 | 93 | systems allow for addressing instances of a service directly by name (e.g. | ||
295 | 94 | systemd). | ||
296 | 95 | |||
297 | 96 | The kwargs allow for the additional parameters to be passed to underlying | ||
298 | 97 | init systems for those systems which require/allow for them. For example, | ||
299 | 98 | the ceph-osd upstart script requires the id parameter to be passed along | ||
300 | 99 | in order to identify which running daemon should be reloaded. The follow- | ||
301 | 100 | ing example stops the ceph-osd service for instance id=4: | ||
302 | 101 | |||
303 | 102 | service_stop('ceph-osd', id=4) | ||
304 | 103 | |||
305 | 104 | :param service_name: the name of the service to stop | ||
306 | 105 | :param **kwargs: additional parameters to pass to the init system when | ||
307 | 106 | managing services. These will be passed as key=value | ||
308 | 107 | parameters to the init system's commandline. kwargs | ||
309 | 108 | are ignored for systemd enabled systems. | ||
310 | 109 | """ | ||
311 | 110 | return service('stop', service_name, **kwargs) | ||
312 | 111 | |||
313 | 112 | |||
314 | 113 | def service_restart(service_name, **kwargs): | ||
315 | 114 | """Restart a system service. | ||
316 | 115 | |||
317 | 116 | The specified service name is managed via the system level init system. | ||
318 | 117 | Some init systems (e.g. upstart) require that additional arguments be | ||
319 | 118 | provided in order to directly control service instances whereas other init | ||
320 | 119 | systems allow for addressing instances of a service directly by name (e.g. | ||
321 | 120 | systemd). | ||
322 | 121 | |||
323 | 122 | The kwargs allow for the additional parameters to be passed to underlying | ||
324 | 123 | init systems for those systems which require/allow for them. For example, | ||
325 | 124 | the ceph-osd upstart script requires the id parameter to be passed along | ||
326 | 125 | in order to identify which running daemon should be restarted. The follow- | ||
327 | 126 | ing example restarts the ceph-osd service for instance id=4: | ||
328 | 127 | |||
329 | 128 | service_restart('ceph-osd', id=4) | ||
330 | 129 | |||
331 | 130 | :param service_name: the name of the service to restart | ||
332 | 131 | :param **kwargs: additional parameters to pass to the init system when | ||
333 | 132 | managing services. These will be passed as key=value | ||
334 | 133 | parameters to the init system's commandline. kwargs | ||
335 | 134 | are ignored for init systems not allowing additional | ||
336 | 135 | parameters via the commandline (systemd). | ||
337 | 136 | """ | ||
338 | 70 | return service('restart', service_name) | 137 | return service('restart', service_name) |
339 | 71 | 138 | ||
340 | 72 | 139 | ||
342 | 73 | def service_reload(service_name, restart_on_failure=False): | 140 | def service_reload(service_name, restart_on_failure=False, **kwargs): |
343 | 74 | """Reload a system service, optionally falling back to restart if | 141 | """Reload a system service, optionally falling back to restart if |
346 | 75 | reload fails""" | 142 | reload fails. |
347 | 76 | service_result = service('reload', service_name) | 143 | |
348 | 144 | The specified service name is managed via the system level init system. | ||
349 | 145 | Some init systems (e.g. upstart) require that additional arguments be | ||
350 | 146 | provided in order to directly control service instances whereas other init | ||
351 | 147 | systems allow for addressing instances of a service directly by name (e.g. | ||
352 | 148 | systemd). | ||
353 | 149 | |||
354 | 150 | The kwargs allow for the additional parameters to be passed to underlying | ||
355 | 151 | init systems for those systems which require/allow for them. For example, | ||
356 | 152 | the ceph-osd upstart script requires the id parameter to be passed along | ||
357 | 153 | in order to identify which running daemon should be reloaded. The follow- | ||
358 | 154 | ing example restarts the ceph-osd service for instance id=4: | ||
359 | 155 | |||
360 | 156 | service_reload('ceph-osd', id=4) | ||
361 | 157 | |||
362 | 158 | :param service_name: the name of the service to reload | ||
363 | 159 | :param restart_on_failure: boolean indicating whether to fallback to a | ||
364 | 160 | restart if the reload fails. | ||
365 | 161 | :param **kwargs: additional parameters to pass to the init system when | ||
366 | 162 | managing services. These will be passed as key=value | ||
367 | 163 | parameters to the init system's commandline. kwargs | ||
368 | 164 | are ignored for init systems not allowing additional | ||
369 | 165 | parameters via the commandline (systemd). | ||
370 | 166 | """ | ||
371 | 167 | service_result = service('reload', service_name, **kwargs) | ||
372 | 77 | if not service_result and restart_on_failure: | 168 | if not service_result and restart_on_failure: |
374 | 78 | service_result = service('restart', service_name) | 169 | service_result = service('restart', service_name, **kwargs) |
375 | 79 | return service_result | 170 | return service_result |
376 | 80 | 171 | ||
377 | 81 | 172 | ||
379 | 82 | def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): | 173 | def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", |
380 | 174 | **kwargs): | ||
381 | 83 | """Pause a system service. | 175 | """Pause a system service. |
382 | 84 | 176 | ||
384 | 85 | Stop it, and prevent it from starting again at boot.""" | 177 | Stop it, and prevent it from starting again at boot. |
385 | 178 | |||
386 | 179 | :param service_name: the name of the service to pause | ||
387 | 180 | :param init_dir: path to the upstart init directory | ||
388 | 181 | :param initd_dir: path to the sysv init directory | ||
389 | 182 | :param **kwargs: additional parameters to pass to the init system when | ||
390 | 183 | managing services. These will be passed as key=value | ||
391 | 184 | parameters to the init system's commandline. kwargs | ||
392 | 185 | are ignored for init systems which do not support | ||
393 | 186 | key=value arguments via the commandline. | ||
394 | 187 | """ | ||
395 | 86 | stopped = True | 188 | stopped = True |
398 | 87 | if service_running(service_name): | 189 | if service_running(service_name, **kwargs): |
399 | 88 | stopped = service_stop(service_name) | 190 | stopped = service_stop(service_name, **kwargs) |
400 | 89 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) | 191 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) |
401 | 90 | sysv_file = os.path.join(initd_dir, service_name) | 192 | sysv_file = os.path.join(initd_dir, service_name) |
402 | 91 | if init_is_systemd(): | 193 | if init_is_systemd(): |
403 | 92 | service('disable', service_name) | 194 | service('disable', service_name) |
404 | 195 | service('mask', service_name) | ||
405 | 93 | elif os.path.exists(upstart_file): | 196 | elif os.path.exists(upstart_file): |
406 | 94 | override_path = os.path.join( | 197 | override_path = os.path.join( |
407 | 95 | init_dir, '{}.override'.format(service_name)) | 198 | init_dir, '{}.override'.format(service_name)) |
408 | @@ -106,13 +209,23 @@ | |||
409 | 106 | 209 | ||
410 | 107 | 210 | ||
411 | 108 | def service_resume(service_name, init_dir="/etc/init", | 211 | def service_resume(service_name, init_dir="/etc/init", |
413 | 109 | initd_dir="/etc/init.d"): | 212 | initd_dir="/etc/init.d", **kwargs): |
414 | 110 | """Resume a system service. | 213 | """Resume a system service. |
415 | 111 | 214 | ||
417 | 112 | Reenable starting again at boot. Start the service""" | 215 | Reenable starting again at boot. Start the service. |
418 | 216 | |||
419 | 217 | :param service_name: the name of the service to resume | ||
420 | 218 | :param init_dir: the path to the init dir | ||
421 | 219 | :param initd dir: the path to the initd dir | ||
422 | 220 | :param **kwargs: additional parameters to pass to the init system when | ||
423 | 221 | managing services. These will be passed as key=value | ||
424 | 222 | parameters to the init system's commandline. kwargs | ||
425 | 223 | are ignored for systemd enabled systems. | ||
426 | 224 | """ | ||
427 | 113 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) | 225 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) |
428 | 114 | sysv_file = os.path.join(initd_dir, service_name) | 226 | sysv_file = os.path.join(initd_dir, service_name) |
429 | 115 | if init_is_systemd(): | 227 | if init_is_systemd(): |
430 | 228 | service('unmask', service_name) | ||
431 | 116 | service('enable', service_name) | 229 | service('enable', service_name) |
432 | 117 | elif os.path.exists(upstart_file): | 230 | elif os.path.exists(upstart_file): |
433 | 118 | override_path = os.path.join( | 231 | override_path = os.path.join( |
434 | @@ -126,19 +239,28 @@ | |||
435 | 126 | "Unable to detect {0} as SystemD, Upstart {1} or" | 239 | "Unable to detect {0} as SystemD, Upstart {1} or" |
436 | 127 | " SysV {2}".format( | 240 | " SysV {2}".format( |
437 | 128 | service_name, upstart_file, sysv_file)) | 241 | service_name, upstart_file, sysv_file)) |
438 | 242 | started = service_running(service_name, **kwargs) | ||
439 | 129 | 243 | ||
440 | 130 | started = service_running(service_name) | ||
441 | 131 | if not started: | 244 | if not started: |
443 | 132 | started = service_start(service_name) | 245 | started = service_start(service_name, **kwargs) |
444 | 133 | return started | 246 | return started |
445 | 134 | 247 | ||
446 | 135 | 248 | ||
449 | 136 | def service(action, service_name): | 249 | def service(action, service_name, **kwargs): |
450 | 137 | """Control a system service""" | 250 | """Control a system service. |
451 | 251 | |||
452 | 252 | :param action: the action to take on the service | ||
453 | 253 | :param service_name: the name of the service to perform th action on | ||
454 | 254 | :param **kwargs: additional params to be passed to the service command in | ||
455 | 255 | the form of key=value. | ||
456 | 256 | """ | ||
457 | 138 | if init_is_systemd(): | 257 | if init_is_systemd(): |
458 | 139 | cmd = ['systemctl', action, service_name] | 258 | cmd = ['systemctl', action, service_name] |
459 | 140 | else: | 259 | else: |
460 | 141 | cmd = ['service', service_name, action] | 260 | cmd = ['service', service_name, action] |
461 | 261 | for key, value in six.iteritems(kwargs): | ||
462 | 262 | parameter = '%s=%s' % (key, value) | ||
463 | 263 | cmd.append(parameter) | ||
464 | 142 | return subprocess.call(cmd) == 0 | 264 | return subprocess.call(cmd) == 0 |
465 | 143 | 265 | ||
466 | 144 | 266 | ||
467 | @@ -146,15 +268,26 @@ | |||
468 | 146 | _INIT_D_CONF = "/etc/init.d/{}" | 268 | _INIT_D_CONF = "/etc/init.d/{}" |
469 | 147 | 269 | ||
470 | 148 | 270 | ||
473 | 149 | def service_running(service_name): | 271 | def service_running(service_name, **kwargs): |
474 | 150 | """Determine whether a system service is running""" | 272 | """Determine whether a system service is running. |
475 | 273 | |||
476 | 274 | :param service_name: the name of the service | ||
477 | 275 | :param **kwargs: additional args to pass to the service command. This is | ||
478 | 276 | used to pass additional key=value arguments to the | ||
479 | 277 | service command line for managing specific instance | ||
480 | 278 | units (e.g. service ceph-osd status id=2). The kwargs | ||
481 | 279 | are ignored in systemd services. | ||
482 | 280 | """ | ||
483 | 151 | if init_is_systemd(): | 281 | if init_is_systemd(): |
484 | 152 | return service('is-active', service_name) | 282 | return service('is-active', service_name) |
485 | 153 | else: | 283 | else: |
486 | 154 | if os.path.exists(_UPSTART_CONF.format(service_name)): | 284 | if os.path.exists(_UPSTART_CONF.format(service_name)): |
487 | 155 | try: | 285 | try: |
490 | 156 | output = subprocess.check_output( | 286 | cmd = ['status', service_name] |
491 | 157 | ['status', service_name], | 287 | for key, value in six.iteritems(kwargs): |
492 | 288 | parameter = '%s=%s' % (key, value) | ||
493 | 289 | cmd.append(parameter) | ||
494 | 290 | output = subprocess.check_output(cmd, | ||
495 | 158 | stderr=subprocess.STDOUT).decode('UTF-8') | 291 | stderr=subprocess.STDOUT).decode('UTF-8') |
496 | 159 | except subprocess.CalledProcessError: | 292 | except subprocess.CalledProcessError: |
497 | 160 | return False | 293 | return False |
498 | @@ -177,6 +310,8 @@ | |||
499 | 177 | 310 | ||
500 | 178 | def init_is_systemd(): | 311 | def init_is_systemd(): |
501 | 179 | """Return True if the host system uses systemd, False otherwise.""" | 312 | """Return True if the host system uses systemd, False otherwise.""" |
502 | 313 | if lsb_release()['DISTRIB_CODENAME'] == 'trusty': | ||
503 | 314 | return False | ||
504 | 180 | return os.path.isdir(SYSTEMD_SYSTEM) | 315 | return os.path.isdir(SYSTEMD_SYSTEM) |
505 | 181 | 316 | ||
506 | 182 | 317 | ||
507 | @@ -306,15 +441,17 @@ | |||
508 | 306 | subprocess.check_call(cmd) | 441 | subprocess.check_call(cmd) |
509 | 307 | 442 | ||
510 | 308 | 443 | ||
512 | 309 | def rsync(from_path, to_path, flags='-r', options=None): | 444 | def rsync(from_path, to_path, flags='-r', options=None, timeout=None): |
513 | 310 | """Replicate the contents of a path""" | 445 | """Replicate the contents of a path""" |
514 | 311 | options = options or ['--delete', '--executability'] | 446 | options = options or ['--delete', '--executability'] |
515 | 312 | cmd = ['/usr/bin/rsync', flags] | 447 | cmd = ['/usr/bin/rsync', flags] |
516 | 448 | if timeout: | ||
517 | 449 | cmd = ['timeout', str(timeout)] + cmd | ||
518 | 313 | cmd.extend(options) | 450 | cmd.extend(options) |
519 | 314 | cmd.append(from_path) | 451 | cmd.append(from_path) |
520 | 315 | cmd.append(to_path) | 452 | cmd.append(to_path) |
521 | 316 | log(" ".join(cmd)) | 453 | log(" ".join(cmd)) |
523 | 317 | return subprocess.check_output(cmd).decode('UTF-8').strip() | 454 | return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() |
524 | 318 | 455 | ||
525 | 319 | 456 | ||
526 | 320 | def symlink(source, destination): | 457 | def symlink(source, destination): |
527 | @@ -684,7 +821,7 @@ | |||
528 | 684 | :param str path: The string path to start changing ownership. | 821 | :param str path: The string path to start changing ownership. |
529 | 685 | :param str owner: The owner string to use when looking up the uid. | 822 | :param str owner: The owner string to use when looking up the uid. |
530 | 686 | :param str group: The group string to use when looking up the gid. | 823 | :param str group: The group string to use when looking up the gid. |
532 | 687 | :param bool follow_links: Also Chown links if True | 824 | :param bool follow_links: Also follow and chown links if True |
533 | 688 | :param bool chowntopdir: Also chown path itself if True | 825 | :param bool chowntopdir: Also chown path itself if True |
534 | 689 | """ | 826 | """ |
535 | 690 | uid = pwd.getpwnam(owner).pw_uid | 827 | uid = pwd.getpwnam(owner).pw_uid |
536 | @@ -698,7 +835,7 @@ | |||
537 | 698 | broken_symlink = os.path.lexists(path) and not os.path.exists(path) | 835 | broken_symlink = os.path.lexists(path) and not os.path.exists(path) |
538 | 699 | if not broken_symlink: | 836 | if not broken_symlink: |
539 | 700 | chown(path, uid, gid) | 837 | chown(path, uid, gid) |
541 | 701 | for root, dirs, files in os.walk(path): | 838 | for root, dirs, files in os.walk(path, followlinks=follow_links): |
542 | 702 | for name in dirs + files: | 839 | for name in dirs + files: |
543 | 703 | full = os.path.join(root, name) | 840 | full = os.path.join(root, name) |
544 | 704 | broken_symlink = os.path.lexists(full) and not os.path.exists(full) | 841 | broken_symlink = os.path.lexists(full) and not os.path.exists(full) |
545 | @@ -718,6 +855,20 @@ | |||
546 | 718 | chownr(path, owner, group, follow_links=False) | 855 | chownr(path, owner, group, follow_links=False) |
547 | 719 | 856 | ||
548 | 720 | 857 | ||
549 | 858 | def owner(path): | ||
550 | 859 | """Returns a tuple containing the username & groupname owning the path. | ||
551 | 860 | |||
552 | 861 | :param str path: the string path to retrieve the ownership | ||
553 | 862 | :return tuple(str, str): A (username, groupname) tuple containing the | ||
554 | 863 | name of the user and group owning the path. | ||
555 | 864 | :raises OSError: if the specified path does not exist | ||
556 | 865 | """ | ||
557 | 866 | stat = os.stat(path) | ||
558 | 867 | username = pwd.getpwuid(stat.st_uid)[0] | ||
559 | 868 | groupname = grp.getgrgid(stat.st_gid)[0] | ||
560 | 869 | return username, groupname | ||
561 | 870 | |||
562 | 871 | |||
563 | 721 | def get_total_ram(): | 872 | def get_total_ram(): |
564 | 722 | """The total amount of system RAM in bytes. | 873 | """The total amount of system RAM in bytes. |
565 | 723 | 874 | ||
566 | @@ -732,3 +883,42 @@ | |||
567 | 732 | assert unit == 'kB', 'Unknown unit' | 883 | assert unit == 'kB', 'Unknown unit' |
568 | 733 | return int(value) * 1024 # Classic, not KiB. | 884 | return int(value) * 1024 # Classic, not KiB. |
569 | 734 | raise NotImplementedError() | 885 | raise NotImplementedError() |
570 | 886 | |||
571 | 887 | |||
572 | 888 | UPSTART_CONTAINER_TYPE = '/run/container_type' | ||
573 | 889 | |||
574 | 890 | |||
575 | 891 | def is_container(): | ||
576 | 892 | """Determine whether unit is running in a container | ||
577 | 893 | |||
578 | 894 | @return: boolean indicating if unit is in a container | ||
579 | 895 | """ | ||
580 | 896 | if init_is_systemd(): | ||
581 | 897 | # Detect using systemd-detect-virt | ||
582 | 898 | return subprocess.call(['systemd-detect-virt', | ||
583 | 899 | '--container']) == 0 | ||
584 | 900 | else: | ||
585 | 901 | # Detect using upstart container file marker | ||
586 | 902 | return os.path.exists(UPSTART_CONTAINER_TYPE) | ||
587 | 903 | |||
588 | 904 | |||
589 | 905 | def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): | ||
590 | 906 | with open(updatedb_path, 'r+') as f_id: | ||
591 | 907 | updatedb_text = f_id.read() | ||
592 | 908 | output = updatedb(updatedb_text, path) | ||
593 | 909 | f_id.seek(0) | ||
594 | 910 | f_id.write(output) | ||
595 | 911 | f_id.truncate() | ||
596 | 912 | |||
597 | 913 | |||
598 | 914 | def updatedb(updatedb_text, new_path): | ||
599 | 915 | lines = [line for line in updatedb_text.split("\n")] | ||
600 | 916 | for i, line in enumerate(lines): | ||
601 | 917 | if line.startswith("PRUNEPATHS="): | ||
602 | 918 | paths_line = line.split("=")[1].replace('"', '') | ||
603 | 919 | paths = paths_line.split(" ") | ||
604 | 920 | if new_path not in paths: | ||
605 | 921 | paths.append(new_path) | ||
606 | 922 | lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) | ||
607 | 923 | output = "\n".join(lines) | ||
608 | 924 | return output | ||
609 | 735 | 925 | ||
610 | === modified file 'hooks/charmhelpers/core/host_factory/centos.py' | |||
611 | --- hooks/charmhelpers/core/host_factory/centos.py 2016-10-26 18:19:59 +0000 | |||
612 | +++ hooks/charmhelpers/core/host_factory/centos.py 2017-06-16 22:43:26 +0000 | |||
613 | @@ -2,6 +2,22 @@ | |||
614 | 2 | import yum | 2 | import yum |
615 | 3 | import os | 3 | import os |
616 | 4 | 4 | ||
617 | 5 | from charmhelpers.core.strutils import BasicStringComparator | ||
618 | 6 | |||
619 | 7 | |||
620 | 8 | class CompareHostReleases(BasicStringComparator): | ||
621 | 9 | """Provide comparisons of Host releases. | ||
622 | 10 | |||
623 | 11 | Use in the form of | ||
624 | 12 | |||
625 | 13 | if CompareHostReleases(release) > 'trusty': | ||
626 | 14 | # do something with mitaka | ||
627 | 15 | """ | ||
628 | 16 | |||
629 | 17 | def __init__(self, item): | ||
630 | 18 | raise NotImplementedError( | ||
631 | 19 | "CompareHostReleases() is not implemented for CentOS") | ||
632 | 20 | |||
633 | 5 | 21 | ||
634 | 6 | def service_available(service_name): | 22 | def service_available(service_name): |
635 | 7 | # """Determine whether a system service is available.""" | 23 | # """Determine whether a system service is available.""" |
636 | 8 | 24 | ||
637 | === modified file 'hooks/charmhelpers/core/host_factory/ubuntu.py' | |||
638 | --- hooks/charmhelpers/core/host_factory/ubuntu.py 2016-10-26 18:19:59 +0000 | |||
639 | +++ hooks/charmhelpers/core/host_factory/ubuntu.py 2017-06-16 22:43:26 +0000 | |||
640 | @@ -1,5 +1,38 @@ | |||
641 | 1 | import subprocess | 1 | import subprocess |
642 | 2 | 2 | ||
643 | 3 | from charmhelpers.core.strutils import BasicStringComparator | ||
644 | 4 | |||
645 | 5 | |||
646 | 6 | UBUNTU_RELEASES = ( | ||
647 | 7 | 'lucid', | ||
648 | 8 | 'maverick', | ||
649 | 9 | 'natty', | ||
650 | 10 | 'oneiric', | ||
651 | 11 | 'precise', | ||
652 | 12 | 'quantal', | ||
653 | 13 | 'raring', | ||
654 | 14 | 'saucy', | ||
655 | 15 | 'trusty', | ||
656 | 16 | 'utopic', | ||
657 | 17 | 'vivid', | ||
658 | 18 | 'wily', | ||
659 | 19 | 'xenial', | ||
660 | 20 | 'yakkety', | ||
661 | 21 | 'zesty', | ||
662 | 22 | 'artful', | ||
663 | 23 | ) | ||
664 | 24 | |||
665 | 25 | |||
666 | 26 | class CompareHostReleases(BasicStringComparator): | ||
667 | 27 | """Provide comparisons of Ubuntu releases. | ||
668 | 28 | |||
669 | 29 | Use in the form of | ||
670 | 30 | |||
671 | 31 | if CompareHostReleases(release) > 'trusty': | ||
672 | 32 | # do something with mitaka | ||
673 | 33 | """ | ||
674 | 34 | _list = UBUNTU_RELEASES | ||
675 | 35 | |||
676 | 3 | 36 | ||
677 | 4 | def service_available(service_name): | 37 | def service_available(service_name): |
678 | 5 | """Determine whether a system service is available""" | 38 | """Determine whether a system service is available""" |
679 | 6 | 39 | ||
680 | === modified file 'hooks/charmhelpers/core/kernel_factory/ubuntu.py' | |||
681 | --- hooks/charmhelpers/core/kernel_factory/ubuntu.py 2016-10-26 18:19:59 +0000 | |||
682 | +++ hooks/charmhelpers/core/kernel_factory/ubuntu.py 2017-06-16 22:43:26 +0000 | |||
683 | @@ -5,7 +5,7 @@ | |||
684 | 5 | """Load a kernel module and configure for auto-load on reboot.""" | 5 | """Load a kernel module and configure for auto-load on reboot.""" |
685 | 6 | with open('/etc/modules', 'r+') as modules: | 6 | with open('/etc/modules', 'r+') as modules: |
686 | 7 | if module not in modules.read(): | 7 | if module not in modules.read(): |
688 | 8 | modules.write(module) | 8 | modules.write(module + "\n") |
689 | 9 | 9 | ||
690 | 10 | 10 | ||
691 | 11 | def update_initramfs(version='all'): | 11 | def update_initramfs(version='all'): |
692 | 12 | 12 | ||
693 | === modified file 'hooks/charmhelpers/core/strutils.py' | |||
694 | --- hooks/charmhelpers/core/strutils.py 2016-10-26 18:19:59 +0000 | |||
695 | +++ hooks/charmhelpers/core/strutils.py 2017-06-16 22:43:26 +0000 | |||
696 | @@ -68,3 +68,56 @@ | |||
697 | 68 | msg = "Unable to interpret string value '%s' as bytes" % (value) | 68 | msg = "Unable to interpret string value '%s' as bytes" % (value) |
698 | 69 | raise ValueError(msg) | 69 | raise ValueError(msg) |
699 | 70 | return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) | 70 | return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) |
700 | 71 | |||
701 | 72 | |||
702 | 73 | class BasicStringComparator(object): | ||
703 | 74 | """Provides a class that will compare strings from an iterator type object. | ||
704 | 75 | Used to provide > and < comparisons on strings that may not necessarily be | ||
705 | 76 | alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the | ||
706 | 77 | z-wrap. | ||
707 | 78 | """ | ||
708 | 79 | |||
709 | 80 | _list = None | ||
710 | 81 | |||
711 | 82 | def __init__(self, item): | ||
712 | 83 | if self._list is None: | ||
713 | 84 | raise Exception("Must define the _list in the class definition!") | ||
714 | 85 | try: | ||
715 | 86 | self.index = self._list.index(item) | ||
716 | 87 | except Exception: | ||
717 | 88 | raise KeyError("Item '{}' is not in list '{}'" | ||
718 | 89 | .format(item, self._list)) | ||
719 | 90 | |||
720 | 91 | def __eq__(self, other): | ||
721 | 92 | assert isinstance(other, str) or isinstance(other, self.__class__) | ||
722 | 93 | return self.index == self._list.index(other) | ||
723 | 94 | |||
724 | 95 | def __ne__(self, other): | ||
725 | 96 | return not self.__eq__(other) | ||
726 | 97 | |||
727 | 98 | def __lt__(self, other): | ||
728 | 99 | assert isinstance(other, str) or isinstance(other, self.__class__) | ||
729 | 100 | return self.index < self._list.index(other) | ||
730 | 101 | |||
731 | 102 | def __ge__(self, other): | ||
732 | 103 | return not self.__lt__(other) | ||
733 | 104 | |||
734 | 105 | def __gt__(self, other): | ||
735 | 106 | assert isinstance(other, str) or isinstance(other, self.__class__) | ||
736 | 107 | return self.index > self._list.index(other) | ||
737 | 108 | |||
738 | 109 | def __le__(self, other): | ||
739 | 110 | return not self.__gt__(other) | ||
740 | 111 | |||
741 | 112 | def __str__(self): | ||
742 | 113 | """Always give back the item at the index so it can be used in | ||
743 | 114 | comparisons like: | ||
744 | 115 | |||
745 | 116 | s_mitaka = CompareOpenStack('mitaka') | ||
746 | 117 | s_newton = CompareOpenstack('newton') | ||
747 | 118 | |||
748 | 119 | assert s_newton > s_mitaka | ||
749 | 120 | |||
750 | 121 | @returns: <string> | ||
751 | 122 | """ | ||
752 | 123 | return self._list[self.index] | ||
753 | 71 | 124 | ||
754 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
755 | --- hooks/charmhelpers/fetch/__init__.py 2016-10-26 18:19:59 +0000 | |||
756 | +++ hooks/charmhelpers/fetch/__init__.py 2017-06-16 22:43:26 +0000 | |||
757 | @@ -48,6 +48,13 @@ | |||
758 | 48 | pass | 48 | pass |
759 | 49 | 49 | ||
760 | 50 | 50 | ||
761 | 51 | class GPGKeyError(Exception): | ||
762 | 52 | """Exception occurs when a GPG key cannot be fetched or used. The message | ||
763 | 53 | indicates what the problem is. | ||
764 | 54 | """ | ||
765 | 55 | pass | ||
766 | 56 | |||
767 | 57 | |||
768 | 51 | class BaseFetchHandler(object): | 58 | class BaseFetchHandler(object): |
769 | 52 | 59 | ||
770 | 53 | """Base class for FetchHandler implementations in fetch plugins""" | 60 | """Base class for FetchHandler implementations in fetch plugins""" |
771 | @@ -77,21 +84,22 @@ | |||
772 | 77 | fetch = importlib.import_module(module) | 84 | fetch = importlib.import_module(module) |
773 | 78 | 85 | ||
774 | 79 | filter_installed_packages = fetch.filter_installed_packages | 86 | filter_installed_packages = fetch.filter_installed_packages |
779 | 80 | install = fetch.install | 87 | install = fetch.apt_install |
780 | 81 | upgrade = fetch.upgrade | 88 | upgrade = fetch.apt_upgrade |
781 | 82 | update = fetch.update | 89 | update = _fetch_update = fetch.apt_update |
782 | 83 | purge = fetch.purge | 90 | purge = fetch.apt_purge |
783 | 84 | add_source = fetch.add_source | 91 | add_source = fetch.add_source |
784 | 85 | 92 | ||
785 | 86 | if __platform__ == "ubuntu": | 93 | if __platform__ == "ubuntu": |
786 | 87 | apt_cache = fetch.apt_cache | 94 | apt_cache = fetch.apt_cache |
791 | 88 | apt_install = fetch.install | 95 | apt_install = fetch.apt_install |
792 | 89 | apt_update = fetch.update | 96 | apt_update = fetch.apt_update |
793 | 90 | apt_upgrade = fetch.upgrade | 97 | apt_upgrade = fetch.apt_upgrade |
794 | 91 | apt_purge = fetch.purge | 98 | apt_purge = fetch.apt_purge |
795 | 92 | apt_mark = fetch.apt_mark | 99 | apt_mark = fetch.apt_mark |
796 | 93 | apt_hold = fetch.apt_hold | 100 | apt_hold = fetch.apt_hold |
797 | 94 | apt_unhold = fetch.apt_unhold | 101 | apt_unhold = fetch.apt_unhold |
798 | 102 | import_key = fetch.import_key | ||
799 | 95 | get_upstream_version = fetch.get_upstream_version | 103 | get_upstream_version = fetch.get_upstream_version |
800 | 96 | elif __platform__ == "centos": | 104 | elif __platform__ == "centos": |
801 | 97 | yum_search = fetch.yum_search | 105 | yum_search = fetch.yum_search |
802 | @@ -135,7 +143,7 @@ | |||
803 | 135 | for source, key in zip(sources, keys): | 143 | for source, key in zip(sources, keys): |
804 | 136 | add_source(source, key) | 144 | add_source(source, key) |
805 | 137 | if update: | 145 | if update: |
807 | 138 | fetch.update(fatal=True) | 146 | _fetch_update(fatal=True) |
808 | 139 | 147 | ||
809 | 140 | 148 | ||
810 | 141 | def install_remote(source, *args, **kwargs): | 149 | def install_remote(source, *args, **kwargs): |
811 | 142 | 150 | ||
812 | === modified file 'hooks/charmhelpers/fetch/centos.py' | |||
813 | --- hooks/charmhelpers/fetch/centos.py 2016-10-26 18:19:59 +0000 | |||
814 | +++ hooks/charmhelpers/fetch/centos.py 2017-06-16 22:43:26 +0000 | |||
815 | @@ -132,7 +132,7 @@ | |||
816 | 132 | key_file.write(key) | 132 | key_file.write(key) |
817 | 133 | key_file.flush() | 133 | key_file.flush() |
818 | 134 | key_file.seek(0) | 134 | key_file.seek(0) |
820 | 135 | subprocess.check_call(['rpm', '--import', key_file]) | 135 | subprocess.check_call(['rpm', '--import', key_file.name]) |
821 | 136 | else: | 136 | else: |
822 | 137 | subprocess.check_call(['rpm', '--import', key]) | 137 | subprocess.check_call(['rpm', '--import', key]) |
823 | 138 | 138 | ||
824 | 139 | 139 | ||
825 | === added file 'hooks/charmhelpers/fetch/snap.py' | |||
826 | --- hooks/charmhelpers/fetch/snap.py 1970-01-01 00:00:00 +0000 | |||
827 | +++ hooks/charmhelpers/fetch/snap.py 2017-06-16 22:43:26 +0000 | |||
828 | @@ -0,0 +1,122 @@ | |||
829 | 1 | # Copyright 2014-2017 Canonical Limited. | ||
830 | 2 | # | ||
831 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
832 | 4 | # you may not use this file except in compliance with the License. | ||
833 | 5 | # You may obtain a copy of the License at | ||
834 | 6 | # | ||
835 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
836 | 8 | # | ||
837 | 9 | # Unless required by applicable law or agreed to in writing, software | ||
838 | 10 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
839 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
840 | 12 | # See the License for the specific language governing permissions and | ||
841 | 13 | # limitations under the License. | ||
842 | 14 | """ | ||
843 | 15 | Charm helpers snap for classic charms. | ||
844 | 16 | |||
845 | 17 | If writing reactive charms, use the snap layer: | ||
846 | 18 | https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html | ||
847 | 19 | """ | ||
848 | 20 | import subprocess | ||
849 | 21 | from os import environ | ||
850 | 22 | from time import sleep | ||
851 | 23 | from charmhelpers.core.hookenv import log | ||
852 | 24 | |||
853 | 25 | __author__ = 'Joseph Borg <joseph.borg@canonical.com>' | ||
854 | 26 | |||
855 | 27 | SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). | ||
856 | 28 | SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. | ||
857 | 29 | SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. | ||
858 | 30 | |||
859 | 31 | |||
860 | 32 | class CouldNotAcquireLockException(Exception): | ||
861 | 33 | pass | ||
862 | 34 | |||
863 | 35 | |||
864 | 36 | def _snap_exec(commands): | ||
865 | 37 | """ | ||
866 | 38 | Execute snap commands. | ||
867 | 39 | |||
868 | 40 | :param commands: List commands | ||
869 | 41 | :return: Integer exit code | ||
870 | 42 | """ | ||
871 | 43 | assert type(commands) == list | ||
872 | 44 | |||
873 | 45 | retry_count = 0 | ||
874 | 46 | return_code = None | ||
875 | 47 | |||
876 | 48 | while return_code is None or return_code == SNAP_NO_LOCK: | ||
877 | 49 | try: | ||
878 | 50 | return_code = subprocess.check_call(['snap'] + commands, env=environ) | ||
879 | 51 | except subprocess.CalledProcessError as e: | ||
880 | 52 | retry_count += + 1 | ||
881 | 53 | if retry_count > SNAP_NO_LOCK_RETRY_COUNT: | ||
882 | 54 | raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) | ||
883 | 55 | return_code = e.returncode | ||
884 | 56 | log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') | ||
885 | 57 | sleep(SNAP_NO_LOCK_RETRY_DELAY) | ||
886 | 58 | |||
887 | 59 | return return_code | ||
888 | 60 | |||
889 | 61 | |||
890 | 62 | def snap_install(packages, *flags): | ||
891 | 63 | """ | ||
892 | 64 | Install a snap package. | ||
893 | 65 | |||
894 | 66 | :param packages: String or List String package name | ||
895 | 67 | :param flags: List String flags to pass to install command | ||
896 | 68 | :return: Integer return code from snap | ||
897 | 69 | """ | ||
898 | 70 | if type(packages) is not list: | ||
899 | 71 | packages = [packages] | ||
900 | 72 | |||
901 | 73 | flags = list(flags) | ||
902 | 74 | |||
903 | 75 | message = 'Installing snap(s) "%s"' % ', '.join(packages) | ||
904 | 76 | if flags: | ||
905 | 77 | message += ' with option(s) "%s"' % ', '.join(flags) | ||
906 | 78 | |||
907 | 79 | log(message, level='INFO') | ||
908 | 80 | return _snap_exec(['install'] + flags + packages) | ||
909 | 81 | |||
910 | 82 | |||
911 | 83 | def snap_remove(packages, *flags): | ||
912 | 84 | """ | ||
913 | 85 | Remove a snap package. | ||
914 | 86 | |||
915 | 87 | :param packages: String or List String package name | ||
916 | 88 | :param flags: List String flags to pass to remove command | ||
917 | 89 | :return: Integer return code from snap | ||
918 | 90 | """ | ||
919 | 91 | if type(packages) is not list: | ||
920 | 92 | packages = [packages] | ||
921 | 93 | |||
922 | 94 | flags = list(flags) | ||
923 | 95 | |||
924 | 96 | message = 'Removing snap(s) "%s"' % ', '.join(packages) | ||
925 | 97 | if flags: | ||
926 | 98 | message += ' with options "%s"' % ', '.join(flags) | ||
927 | 99 | |||
928 | 100 | log(message, level='INFO') | ||
929 | 101 | return _snap_exec(['remove'] + flags + packages) | ||
930 | 102 | |||
931 | 103 | |||
932 | 104 | def snap_refresh(packages, *flags): | ||
933 | 105 | """ | ||
934 | 106 | Refresh / Update snap package. | ||
935 | 107 | |||
936 | 108 | :param packages: String or List String package name | ||
937 | 109 | :param flags: List String flags to pass to refresh command | ||
938 | 110 | :return: Integer return code from snap | ||
939 | 111 | """ | ||
940 | 112 | if type(packages) is not list: | ||
941 | 113 | packages = [packages] | ||
942 | 114 | |||
943 | 115 | flags = list(flags) | ||
944 | 116 | |||
945 | 117 | message = 'Refreshing snap(s) "%s"' % ', '.join(packages) | ||
946 | 118 | if flags: | ||
947 | 119 | message += ' with options "%s"' % ', '.join(flags) | ||
948 | 120 | |||
949 | 121 | log(message, level='INFO') | ||
950 | 122 | return _snap_exec(['refresh'] + flags + packages) | ||
951 | 0 | 123 | ||
952 | === modified file 'hooks/charmhelpers/fetch/ubuntu.py' | |||
953 | --- hooks/charmhelpers/fetch/ubuntu.py 2016-10-26 18:19:59 +0000 | |||
954 | +++ hooks/charmhelpers/fetch/ubuntu.py 2017-06-16 22:43:26 +0000 | |||
955 | @@ -12,29 +12,47 @@ | |||
956 | 12 | # See the License for the specific language governing permissions and | 12 | # See the License for the specific language governing permissions and |
957 | 13 | # limitations under the License. | 13 | # limitations under the License. |
958 | 14 | 14 | ||
959 | 15 | from collections import OrderedDict | ||
960 | 15 | import os | 16 | import os |
961 | 17 | import platform | ||
962 | 18 | import re | ||
963 | 16 | import six | 19 | import six |
964 | 17 | import time | 20 | import time |
965 | 18 | import subprocess | 21 | import subprocess |
966 | 19 | |||
967 | 20 | from tempfile import NamedTemporaryFile | 22 | from tempfile import NamedTemporaryFile |
968 | 23 | |||
969 | 21 | from charmhelpers.core.host import ( | 24 | from charmhelpers.core.host import ( |
970 | 22 | lsb_release | 25 | lsb_release |
971 | 23 | ) | 26 | ) |
974 | 24 | from charmhelpers.core.hookenv import log | 27 | from charmhelpers.core.hookenv import ( |
975 | 25 | from charmhelpers.fetch import SourceConfigError | 28 | log, |
976 | 29 | DEBUG, | ||
977 | 30 | ) | ||
978 | 31 | from charmhelpers.fetch import SourceConfigError, GPGKeyError | ||
979 | 26 | 32 | ||
980 | 33 | PROPOSED_POCKET = ( | ||
981 | 34 | "# Proposed\n" | ||
982 | 35 | "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " | ||
983 | 36 | "multiverse restricted\n") | ||
984 | 37 | PROPOSED_PORTS_POCKET = ( | ||
985 | 38 | "# Proposed\n" | ||
986 | 39 | "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " | ||
987 | 40 | "multiverse restricted\n") | ||
988 | 41 | # Only supports 64bit and ppc64 at the moment. | ||
989 | 42 | ARCH_TO_PROPOSED_POCKET = { | ||
990 | 43 | 'x86_64': PROPOSED_POCKET, | ||
991 | 44 | 'ppc64le': PROPOSED_PORTS_POCKET, | ||
992 | 45 | 'aarch64': PROPOSED_PORTS_POCKET, | ||
993 | 46 | } | ||
994 | 47 | CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" | ||
995 | 48 | CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' | ||
996 | 27 | CLOUD_ARCHIVE = """# Ubuntu Cloud Archive | 49 | CLOUD_ARCHIVE = """# Ubuntu Cloud Archive |
997 | 28 | deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main | 50 | deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main |
998 | 29 | """ | 51 | """ |
999 | 30 | |||
1000 | 31 | PROPOSED_POCKET = """# Proposed | ||
1001 | 32 | deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted | ||
1002 | 33 | """ | ||
1003 | 34 | |||
1004 | 35 | CLOUD_ARCHIVE_POCKETS = { | 52 | CLOUD_ARCHIVE_POCKETS = { |
1005 | 36 | # Folsom | 53 | # Folsom |
1006 | 37 | 'folsom': 'precise-updates/folsom', | 54 | 'folsom': 'precise-updates/folsom', |
1007 | 55 | 'folsom/updates': 'precise-updates/folsom', | ||
1008 | 38 | 'precise-folsom': 'precise-updates/folsom', | 56 | 'precise-folsom': 'precise-updates/folsom', |
1009 | 39 | 'precise-folsom/updates': 'precise-updates/folsom', | 57 | 'precise-folsom/updates': 'precise-updates/folsom', |
1010 | 40 | 'precise-updates/folsom': 'precise-updates/folsom', | 58 | 'precise-updates/folsom': 'precise-updates/folsom', |
1011 | @@ -43,6 +61,7 @@ | |||
1012 | 43 | 'precise-proposed/folsom': 'precise-proposed/folsom', | 61 | 'precise-proposed/folsom': 'precise-proposed/folsom', |
1013 | 44 | # Grizzly | 62 | # Grizzly |
1014 | 45 | 'grizzly': 'precise-updates/grizzly', | 63 | 'grizzly': 'precise-updates/grizzly', |
1015 | 64 | 'grizzly/updates': 'precise-updates/grizzly', | ||
1016 | 46 | 'precise-grizzly': 'precise-updates/grizzly', | 65 | 'precise-grizzly': 'precise-updates/grizzly', |
1017 | 47 | 'precise-grizzly/updates': 'precise-updates/grizzly', | 66 | 'precise-grizzly/updates': 'precise-updates/grizzly', |
1018 | 48 | 'precise-updates/grizzly': 'precise-updates/grizzly', | 67 | 'precise-updates/grizzly': 'precise-updates/grizzly', |
1019 | @@ -51,6 +70,7 @@ | |||
1020 | 51 | 'precise-proposed/grizzly': 'precise-proposed/grizzly', | 70 | 'precise-proposed/grizzly': 'precise-proposed/grizzly', |
1021 | 52 | # Havana | 71 | # Havana |
1022 | 53 | 'havana': 'precise-updates/havana', | 72 | 'havana': 'precise-updates/havana', |
1023 | 73 | 'havana/updates': 'precise-updates/havana', | ||
1024 | 54 | 'precise-havana': 'precise-updates/havana', | 74 | 'precise-havana': 'precise-updates/havana', |
1025 | 55 | 'precise-havana/updates': 'precise-updates/havana', | 75 | 'precise-havana/updates': 'precise-updates/havana', |
1026 | 56 | 'precise-updates/havana': 'precise-updates/havana', | 76 | 'precise-updates/havana': 'precise-updates/havana', |
1027 | @@ -59,6 +79,7 @@ | |||
1028 | 59 | 'precise-proposed/havana': 'precise-proposed/havana', | 79 | 'precise-proposed/havana': 'precise-proposed/havana', |
1029 | 60 | # Icehouse | 80 | # Icehouse |
1030 | 61 | 'icehouse': 'precise-updates/icehouse', | 81 | 'icehouse': 'precise-updates/icehouse', |
1031 | 82 | 'icehouse/updates': 'precise-updates/icehouse', | ||
1032 | 62 | 'precise-icehouse': 'precise-updates/icehouse', | 83 | 'precise-icehouse': 'precise-updates/icehouse', |
1033 | 63 | 'precise-icehouse/updates': 'precise-updates/icehouse', | 84 | 'precise-icehouse/updates': 'precise-updates/icehouse', |
1034 | 64 | 'precise-updates/icehouse': 'precise-updates/icehouse', | 85 | 'precise-updates/icehouse': 'precise-updates/icehouse', |
1035 | @@ -67,6 +88,7 @@ | |||
1036 | 67 | 'precise-proposed/icehouse': 'precise-proposed/icehouse', | 88 | 'precise-proposed/icehouse': 'precise-proposed/icehouse', |
1037 | 68 | # Juno | 89 | # Juno |
1038 | 69 | 'juno': 'trusty-updates/juno', | 90 | 'juno': 'trusty-updates/juno', |
1039 | 91 | 'juno/updates': 'trusty-updates/juno', | ||
1040 | 70 | 'trusty-juno': 'trusty-updates/juno', | 92 | 'trusty-juno': 'trusty-updates/juno', |
1041 | 71 | 'trusty-juno/updates': 'trusty-updates/juno', | 93 | 'trusty-juno/updates': 'trusty-updates/juno', |
1042 | 72 | 'trusty-updates/juno': 'trusty-updates/juno', | 94 | 'trusty-updates/juno': 'trusty-updates/juno', |
1043 | @@ -75,6 +97,7 @@ | |||
1044 | 75 | 'trusty-proposed/juno': 'trusty-proposed/juno', | 97 | 'trusty-proposed/juno': 'trusty-proposed/juno', |
1045 | 76 | # Kilo | 98 | # Kilo |
1046 | 77 | 'kilo': 'trusty-updates/kilo', | 99 | 'kilo': 'trusty-updates/kilo', |
1047 | 100 | 'kilo/updates': 'trusty-updates/kilo', | ||
1048 | 78 | 'trusty-kilo': 'trusty-updates/kilo', | 101 | 'trusty-kilo': 'trusty-updates/kilo', |
1049 | 79 | 'trusty-kilo/updates': 'trusty-updates/kilo', | 102 | 'trusty-kilo/updates': 'trusty-updates/kilo', |
1050 | 80 | 'trusty-updates/kilo': 'trusty-updates/kilo', | 103 | 'trusty-updates/kilo': 'trusty-updates/kilo', |
1051 | @@ -83,6 +106,7 @@ | |||
1052 | 83 | 'trusty-proposed/kilo': 'trusty-proposed/kilo', | 106 | 'trusty-proposed/kilo': 'trusty-proposed/kilo', |
1053 | 84 | # Liberty | 107 | # Liberty |
1054 | 85 | 'liberty': 'trusty-updates/liberty', | 108 | 'liberty': 'trusty-updates/liberty', |
1055 | 109 | 'liberty/updates': 'trusty-updates/liberty', | ||
1056 | 86 | 'trusty-liberty': 'trusty-updates/liberty', | 110 | 'trusty-liberty': 'trusty-updates/liberty', |
1057 | 87 | 'trusty-liberty/updates': 'trusty-updates/liberty', | 111 | 'trusty-liberty/updates': 'trusty-updates/liberty', |
1058 | 88 | 'trusty-updates/liberty': 'trusty-updates/liberty', | 112 | 'trusty-updates/liberty': 'trusty-updates/liberty', |
1059 | @@ -91,6 +115,7 @@ | |||
1060 | 91 | 'trusty-proposed/liberty': 'trusty-proposed/liberty', | 115 | 'trusty-proposed/liberty': 'trusty-proposed/liberty', |
1061 | 92 | # Mitaka | 116 | # Mitaka |
1062 | 93 | 'mitaka': 'trusty-updates/mitaka', | 117 | 'mitaka': 'trusty-updates/mitaka', |
1063 | 118 | 'mitaka/updates': 'trusty-updates/mitaka', | ||
1064 | 94 | 'trusty-mitaka': 'trusty-updates/mitaka', | 119 | 'trusty-mitaka': 'trusty-updates/mitaka', |
1065 | 95 | 'trusty-mitaka/updates': 'trusty-updates/mitaka', | 120 | 'trusty-mitaka/updates': 'trusty-updates/mitaka', |
1066 | 96 | 'trusty-updates/mitaka': 'trusty-updates/mitaka', | 121 | 'trusty-updates/mitaka': 'trusty-updates/mitaka', |
1067 | @@ -99,17 +124,44 @@ | |||
1068 | 99 | 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', | 124 | 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', |
1069 | 100 | # Newton | 125 | # Newton |
1070 | 101 | 'newton': 'xenial-updates/newton', | 126 | 'newton': 'xenial-updates/newton', |
1071 | 127 | 'newton/updates': 'xenial-updates/newton', | ||
1072 | 102 | 'xenial-newton': 'xenial-updates/newton', | 128 | 'xenial-newton': 'xenial-updates/newton', |
1073 | 103 | 'xenial-newton/updates': 'xenial-updates/newton', | 129 | 'xenial-newton/updates': 'xenial-updates/newton', |
1074 | 104 | 'xenial-updates/newton': 'xenial-updates/newton', | 130 | 'xenial-updates/newton': 'xenial-updates/newton', |
1075 | 105 | 'newton/proposed': 'xenial-proposed/newton', | 131 | 'newton/proposed': 'xenial-proposed/newton', |
1076 | 106 | 'xenial-newton/proposed': 'xenial-proposed/newton', | 132 | 'xenial-newton/proposed': 'xenial-proposed/newton', |
1077 | 107 | 'xenial-proposed/newton': 'xenial-proposed/newton', | 133 | 'xenial-proposed/newton': 'xenial-proposed/newton', |
1078 | 134 | # Ocata | ||
1079 | 135 | 'ocata': 'xenial-updates/ocata', | ||
1080 | 136 | 'ocata/updates': 'xenial-updates/ocata', | ||
1081 | 137 | 'xenial-ocata': 'xenial-updates/ocata', | ||
1082 | 138 | 'xenial-ocata/updates': 'xenial-updates/ocata', | ||
1083 | 139 | 'xenial-updates/ocata': 'xenial-updates/ocata', | ||
1084 | 140 | 'ocata/proposed': 'xenial-proposed/ocata', | ||
1085 | 141 | 'xenial-ocata/proposed': 'xenial-proposed/ocata', | ||
1086 | 142 | 'xenial-ocata/newton': 'xenial-proposed/ocata', | ||
1087 | 143 | # Pike | ||
1088 | 144 | 'pike': 'xenial-updates/pike', | ||
1089 | 145 | 'xenial-pike': 'xenial-updates/pike', | ||
1090 | 146 | 'xenial-pike/updates': 'xenial-updates/pike', | ||
1091 | 147 | 'xenial-updates/pike': 'xenial-updates/pike', | ||
1092 | 148 | 'pike/proposed': 'xenial-proposed/pike', | ||
1093 | 149 | 'xenial-pike/proposed': 'xenial-proposed/pike', | ||
1094 | 150 | 'xenial-pike/newton': 'xenial-proposed/pike', | ||
1095 | 151 | # Queens | ||
1096 | 152 | 'queens': 'xenial-updates/queens', | ||
1097 | 153 | 'xenial-queens': 'xenial-updates/queens', | ||
1098 | 154 | 'xenial-queens/updates': 'xenial-updates/queens', | ||
1099 | 155 | 'xenial-updates/queens': 'xenial-updates/queens', | ||
1100 | 156 | 'queens/proposed': 'xenial-proposed/queens', | ||
1101 | 157 | 'xenial-queens/proposed': 'xenial-proposed/queens', | ||
1102 | 158 | 'xenial-queens/newton': 'xenial-proposed/queens', | ||
1103 | 108 | } | 159 | } |
1104 | 109 | 160 | ||
1105 | 161 | |||
1106 | 110 | APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. | 162 | APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. |
1109 | 111 | APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. | 163 | CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. |
1110 | 112 | APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. | 164 | CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. |
1111 | 113 | 165 | ||
1112 | 114 | 166 | ||
1113 | 115 | def filter_installed_packages(packages): | 167 | def filter_installed_packages(packages): |
1114 | @@ -137,7 +189,7 @@ | |||
1115 | 137 | return apt_pkg.Cache(progress) | 189 | return apt_pkg.Cache(progress) |
1116 | 138 | 190 | ||
1117 | 139 | 191 | ||
1119 | 140 | def install(packages, options=None, fatal=False): | 192 | def apt_install(packages, options=None, fatal=False): |
1120 | 141 | """Install one or more packages.""" | 193 | """Install one or more packages.""" |
1121 | 142 | if options is None: | 194 | if options is None: |
1122 | 143 | options = ['--option=Dpkg::Options::=--force-confold'] | 195 | options = ['--option=Dpkg::Options::=--force-confold'] |
1123 | @@ -154,7 +206,7 @@ | |||
1124 | 154 | _run_apt_command(cmd, fatal) | 206 | _run_apt_command(cmd, fatal) |
1125 | 155 | 207 | ||
1126 | 156 | 208 | ||
1128 | 157 | def upgrade(options=None, fatal=False, dist=False): | 209 | def apt_upgrade(options=None, fatal=False, dist=False): |
1129 | 158 | """Upgrade all packages.""" | 210 | """Upgrade all packages.""" |
1130 | 159 | if options is None: | 211 | if options is None: |
1131 | 160 | options = ['--option=Dpkg::Options::=--force-confold'] | 212 | options = ['--option=Dpkg::Options::=--force-confold'] |
1132 | @@ -169,13 +221,13 @@ | |||
1133 | 169 | _run_apt_command(cmd, fatal) | 221 | _run_apt_command(cmd, fatal) |
1134 | 170 | 222 | ||
1135 | 171 | 223 | ||
1137 | 172 | def update(fatal=False): | 224 | def apt_update(fatal=False): |
1138 | 173 | """Update local apt cache.""" | 225 | """Update local apt cache.""" |
1139 | 174 | cmd = ['apt-get', 'update'] | 226 | cmd = ['apt-get', 'update'] |
1140 | 175 | _run_apt_command(cmd, fatal) | 227 | _run_apt_command(cmd, fatal) |
1141 | 176 | 228 | ||
1142 | 177 | 229 | ||
1144 | 178 | def purge(packages, fatal=False): | 230 | def apt_purge(packages, fatal=False): |
1145 | 179 | """Purge one or more packages.""" | 231 | """Purge one or more packages.""" |
1146 | 180 | cmd = ['apt-get', '--assume-yes', 'purge'] | 232 | cmd = ['apt-get', '--assume-yes', 'purge'] |
1147 | 181 | if isinstance(packages, six.string_types): | 233 | if isinstance(packages, six.string_types): |
1148 | @@ -209,7 +261,45 @@ | |||
1149 | 209 | return apt_mark(packages, 'unhold', fatal=fatal) | 261 | return apt_mark(packages, 'unhold', fatal=fatal) |
1150 | 210 | 262 | ||
1151 | 211 | 263 | ||
1153 | 212 | def add_source(source, key=None): | 264 | def import_key(keyid): |
1154 | 265 | """Import a key in either ASCII Armor or Radix64 format. | ||
1155 | 266 | |||
1156 | 267 | `keyid` is either the keyid to fetch from a PGP server, or | ||
1157 | 268 | the key in ASCII armor foramt. | ||
1158 | 269 | |||
1159 | 270 | :param keyid: String of key (or key id). | ||
1160 | 271 | :raises: GPGKeyError if the key could not be imported | ||
1161 | 272 | """ | ||
1162 | 273 | key = keyid.strip() | ||
1163 | 274 | if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and | ||
1164 | 275 | key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): | ||
1165 | 276 | log("PGP key found (looks like ASCII Armor format)", level=DEBUG) | ||
1166 | 277 | log("Importing ASCII Armor PGP key", level=DEBUG) | ||
1167 | 278 | with NamedTemporaryFile() as keyfile: | ||
1168 | 279 | with open(keyfile.name, 'w') as fd: | ||
1169 | 280 | fd.write(key) | ||
1170 | 281 | fd.write("\n") | ||
1171 | 282 | cmd = ['apt-key', 'add', keyfile.name] | ||
1172 | 283 | try: | ||
1173 | 284 | subprocess.check_call(cmd) | ||
1174 | 285 | except subprocess.CalledProcessError: | ||
1175 | 286 | error = "Error importing PGP key '{}'".format(key) | ||
1176 | 287 | log(error) | ||
1177 | 288 | raise GPGKeyError(error) | ||
1178 | 289 | else: | ||
1179 | 290 | log("PGP key found (looks like Radix64 format)", level=DEBUG) | ||
1180 | 291 | log("Importing PGP key from keyserver", level=DEBUG) | ||
1181 | 292 | cmd = ['apt-key', 'adv', '--keyserver', | ||
1182 | 293 | 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] | ||
1183 | 294 | try: | ||
1184 | 295 | subprocess.check_call(cmd) | ||
1185 | 296 | except subprocess.CalledProcessError: | ||
1186 | 297 | error = "Error importing PGP key '{}'".format(key) | ||
1187 | 298 | log(error) | ||
1188 | 299 | raise GPGKeyError(error) | ||
1189 | 300 | |||
1190 | 301 | |||
1191 | 302 | def add_source(source, key=None, fail_invalid=False): | ||
1192 | 213 | """Add a package source to this system. | 303 | """Add a package source to this system. |
1193 | 214 | 304 | ||
1194 | 215 | @param source: a URL or sources.list entry, as supported by | 305 | @param source: a URL or sources.list entry, as supported by |
1195 | @@ -225,6 +315,33 @@ | |||
1196 | 225 | such as 'cloud:icehouse' | 315 | such as 'cloud:icehouse' |
1197 | 226 | 'distro' may be used as a noop | 316 | 'distro' may be used as a noop |
1198 | 227 | 317 | ||
1199 | 318 | Full list of source specifications supported by the function are: | ||
1200 | 319 | |||
1201 | 320 | 'distro': A NOP; i.e. it has no effect. | ||
1202 | 321 | 'proposed': the proposed deb spec [2] is wrtten to | ||
1203 | 322 | /etc/apt/sources.list/proposed | ||
1204 | 323 | 'distro-proposed': adds <version>-proposed to the debs [2] | ||
1205 | 324 | 'ppa:<ppa-name>': add-apt-repository --yes <ppa_name> | ||
1206 | 325 | 'deb <deb-spec>': add-apt-repository --yes deb <deb-spec> | ||
1207 | 326 | 'http://....': add-apt-repository --yes http://... | ||
1208 | 327 | 'cloud-archive:<spec>': add-apt-repository -yes cloud-archive:<spec> | ||
1209 | 328 | 'cloud:<release>[-staging]': specify a Cloud Archive pocket <release> with | ||
1210 | 329 | optional staging version. If staging is used then the staging PPA [2] | ||
1211 | 330 | with be used. If staging is NOT used then the cloud archive [3] will be | ||
1212 | 331 | added, and the 'ubuntu-cloud-keyring' package will be added for the | ||
1213 | 332 | current distro. | ||
1214 | 333 | |||
1215 | 334 | Otherwise the source is not recognised and this is logged to the juju log. | ||
1216 | 335 | However, no error is raised, unless sys_error_on_exit is True. | ||
1217 | 336 | |||
1218 | 337 | [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main | ||
1219 | 338 | where {} is replaced with the derived pocket name. | ||
1220 | 339 | [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ | ||
1221 | 340 | main universe multiverse restricted | ||
1222 | 341 | where {} is replaced with the lsb_release codename (e.g. xenial) | ||
1223 | 342 | [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu <pocket> | ||
1224 | 343 | to /etc/apt/sources.list.d/cloud-archive-list | ||
1225 | 344 | |||
1226 | 228 | @param key: A key to be added to the system's APT keyring and used | 345 | @param key: A key to be added to the system's APT keyring and used |
1227 | 229 | to verify the signatures on packages. Ideally, this should be an | 346 | to verify the signatures on packages. Ideally, this should be an |
1228 | 230 | ASCII format GPG public key including the block headers. A GPG key | 347 | ASCII format GPG public key including the block headers. A GPG key |
1229 | @@ -232,87 +349,202 @@ | |||
1230 | 232 | available to retrieve the actual public key from a public keyserver | 349 | available to retrieve the actual public key from a public keyserver |
1231 | 233 | placing your Juju environment at risk. ppa and cloud archive keys | 350 | placing your Juju environment at risk. ppa and cloud archive keys |
1232 | 234 | are securely added automtically, so sould not be provided. | 351 | are securely added automtically, so sould not be provided. |
1233 | 352 | |||
1234 | 353 | @param fail_invalid: (boolean) if True, then the function raises a | ||
1235 | 354 | SourceConfigError is there is no matching installation source. | ||
1236 | 355 | |||
1237 | 356 | @raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a | ||
1238 | 357 | valid pocket in CLOUD_ARCHIVE_POCKETS | ||
1239 | 235 | """ | 358 | """ |
1240 | 359 | _mapping = OrderedDict([ | ||
1241 | 360 | (r"^distro$", lambda: None), # This is a NOP | ||
1242 | 361 | (r"^(?:proposed|distro-proposed)$", _add_proposed), | ||
1243 | 362 | (r"^cloud-archive:(.*)$", _add_apt_repository), | ||
1244 | 363 | (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), | ||
1245 | 364 | (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), | ||
1246 | 365 | (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), | ||
1247 | 366 | (r"^cloud:(.*)$", _add_cloud_pocket), | ||
1248 | 367 | ]) | ||
1249 | 236 | if source is None: | 368 | if source is None: |
1260 | 237 | log('Source is not present. Skipping') | 369 | source = '' |
1261 | 238 | return | 370 | for r, fn in six.iteritems(_mapping): |
1262 | 239 | 371 | m = re.match(r, source) | |
1263 | 240 | if (source.startswith('ppa:') or | 372 | if m: |
1264 | 241 | source.startswith('http') or | 373 | # call the assoicated function with the captured groups |
1265 | 242 | source.startswith('deb ') or | 374 | # raises SourceConfigError on error. |
1266 | 243 | source.startswith('cloud-archive:')): | 375 | fn(*m.groups()) |
1267 | 244 | subprocess.check_call(['add-apt-repository', '--yes', source]) | 376 | if key: |
1268 | 245 | elif source.startswith('cloud:'): | 377 | try: |
1269 | 246 | install(filter_installed_packages(['ubuntu-cloud-keyring']), | 378 | import_key(key) |
1270 | 379 | except GPGKeyError as e: | ||
1271 | 380 | raise SourceConfigError(str(e)) | ||
1272 | 381 | break | ||
1273 | 382 | else: | ||
1274 | 383 | # nothing matched. log an error and maybe sys.exit | ||
1275 | 384 | err = "Unknown source: {!r}".format(source) | ||
1276 | 385 | log(err) | ||
1277 | 386 | if fail_invalid: | ||
1278 | 387 | raise SourceConfigError(err) | ||
1279 | 388 | |||
1280 | 389 | |||
1281 | 390 | def _add_proposed(): | ||
1282 | 391 | """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list | ||
1283 | 392 | |||
1284 | 393 | Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for | ||
1285 | 394 | the deb line. | ||
1286 | 395 | |||
1287 | 396 | For intel architecutres PROPOSED_POCKET is used for the release, but for | ||
1288 | 397 | other architectures PROPOSED_PORTS_POCKET is used for the release. | ||
1289 | 398 | """ | ||
1290 | 399 | release = lsb_release()['DISTRIB_CODENAME'] | ||
1291 | 400 | arch = platform.machine() | ||
1292 | 401 | if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): | ||
1293 | 402 | raise SourceConfigError("Arch {} not supported for (distro-)proposed" | ||
1294 | 403 | .format(arch)) | ||
1295 | 404 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: | ||
1296 | 405 | apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) | ||
1297 | 406 | |||
1298 | 407 | |||
1299 | 408 | def _add_apt_repository(spec): | ||
1300 | 409 | """Add the spec using add_apt_repository | ||
1301 | 410 | |||
1302 | 411 | :param spec: the parameter to pass to add_apt_repository | ||
1303 | 412 | """ | ||
1304 | 413 | _run_with_retries(['add-apt-repository', '--yes', spec]) | ||
1305 | 414 | |||
1306 | 415 | |||
1307 | 416 | def _add_cloud_pocket(pocket): | ||
1308 | 417 | """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list | ||
1309 | 418 | |||
1310 | 419 | Note that this overwrites the existing file if there is one. | ||
1311 | 420 | |||
1312 | 421 | This function also converts the simple pocket in to the actual pocket using | ||
1313 | 422 | the CLOUD_ARCHIVE_POCKETS mapping. | ||
1314 | 423 | |||
1315 | 424 | :param pocket: string representing the pocket to add a deb spec for. | ||
1316 | 425 | :raises: SourceConfigError if the cloud pocket doesn't exist or the | ||
1317 | 426 | requested release doesn't match the current distro version. | ||
1318 | 427 | """ | ||
1319 | 428 | apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), | ||
1320 | 247 | fatal=True) | 429 | fatal=True) |
1352 | 248 | pocket = source.split(':')[-1] | 430 | if pocket not in CLOUD_ARCHIVE_POCKETS: |
1353 | 249 | if pocket not in CLOUD_ARCHIVE_POCKETS: | 431 | raise SourceConfigError( |
1354 | 250 | raise SourceConfigError( | 432 | 'Unsupported cloud: source option %s' % |
1355 | 251 | 'Unsupported cloud: source option %s' % | 433 | pocket) |
1356 | 252 | pocket) | 434 | actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] |
1357 | 253 | actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] | 435 | with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: |
1358 | 254 | with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: | 436 | apt.write(CLOUD_ARCHIVE.format(actual_pocket)) |
1359 | 255 | apt.write(CLOUD_ARCHIVE.format(actual_pocket)) | 437 | |
1360 | 256 | elif source == 'proposed': | 438 | |
1361 | 257 | release = lsb_release()['DISTRIB_CODENAME'] | 439 | def _add_cloud_staging(cloud_archive_release, openstack_release): |
1362 | 258 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: | 440 | """Add the cloud staging repository which is in |
1363 | 259 | apt.write(PROPOSED_POCKET.format(release)) | 441 | ppa:ubuntu-cloud-archive/<openstack_release>-staging |
1364 | 260 | elif source == 'distro': | 442 | |
1365 | 261 | pass | 443 | This function checks that the cloud_archive_release matches the current |
1366 | 262 | else: | 444 | codename for the distro that charm is being installed on. |
1367 | 263 | log("Unknown source: {!r}".format(source)) | 445 | |
1368 | 264 | 446 | :param cloud_archive_release: string, codename for the release. | |
1369 | 265 | if key: | 447 | :param openstack_release: String, codename for the openstack release. |
1370 | 266 | if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: | 448 | :raises: SourceConfigError if the cloud_archive_release doesn't match the |
1371 | 267 | with NamedTemporaryFile('w+') as key_file: | 449 | current version of the os. |
1372 | 268 | key_file.write(key) | 450 | """ |
1373 | 269 | key_file.flush() | 451 | _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) |
1374 | 270 | key_file.seek(0) | 452 | ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) |
1375 | 271 | subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) | 453 | cmd = 'add-apt-repository -y {}'.format(ppa) |
1376 | 272 | else: | 454 | _run_with_retries(cmd.split(' ')) |
1377 | 273 | # Note that hkp: is in no way a secure protocol. Using a | 455 | |
1378 | 274 | # GPG key id is pointless from a security POV unless you | 456 | |
1379 | 275 | # absolutely trust your network and DNS. | 457 | def _add_cloud_distro_check(cloud_archive_release, openstack_release): |
1380 | 276 | subprocess.check_call(['apt-key', 'adv', '--keyserver', | 458 | """Add the cloud pocket, but also check the cloud_archive_release against |
1381 | 277 | 'hkp://keyserver.ubuntu.com:80', '--recv', | 459 | the current distro, and use the openstack_release as the full lookup. |
1382 | 278 | key]) | 460 | |
1383 | 461 | This just calls _add_cloud_pocket() with the openstack_release as pocket | ||
1384 | 462 | to get the correct cloud-archive.list for dpkg to work with. | ||
1385 | 463 | |||
1386 | 464 | :param cloud_archive_release:String, codename for the distro release. | ||
1387 | 465 | :param openstack_release: String, spec for the release to look up in the | ||
1388 | 466 | CLOUD_ARCHIVE_POCKETS | ||
1389 | 467 | :raises: SourceConfigError if this is the wrong distro, or the pocket spec | ||
1390 | 468 | doesn't exist. | ||
1391 | 469 | """ | ||
1392 | 470 | _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) | ||
1393 | 471 | _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) | ||
1394 | 472 | |||
1395 | 473 | |||
1396 | 474 | def _verify_is_ubuntu_rel(release, os_release): | ||
1397 | 475 | """Verify that the release is in the same as the current ubuntu release. | ||
1398 | 476 | |||
1399 | 477 | :param release: String, lowercase for the release. | ||
1400 | 478 | :param os_release: String, the os_release being asked for | ||
1401 | 479 | :raises: SourceConfigError if the release is not the same as the ubuntu | ||
1402 | 480 | release. | ||
1403 | 481 | """ | ||
1404 | 482 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | ||
1405 | 483 | if release != ubuntu_rel: | ||
1406 | 484 | raise SourceConfigError( | ||
1407 | 485 | 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' | ||
1408 | 486 | 'version ({})'.format(release, os_release, ubuntu_rel)) | ||
1409 | 487 | |||
1410 | 488 | |||
1411 | 489 | def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), | ||
1412 | 490 | retry_message="", cmd_env=None): | ||
1413 | 491 | """Run a command and retry until success or max_retries is reached. | ||
1414 | 492 | |||
1415 | 493 | :param: cmd: str: The apt command to run. | ||
1416 | 494 | :param: max_retries: int: The number of retries to attempt on a fatal | ||
1417 | 495 | command. Defaults to CMD_RETRY_COUNT. | ||
1418 | 496 | :param: retry_exitcodes: tuple: Optional additional exit codes to retry. | ||
1419 | 497 | Defaults to retry on exit code 1. | ||
1420 | 498 | :param: retry_message: str: Optional log prefix emitted during retries. | ||
1421 | 499 | :param: cmd_env: dict: Environment variables to add to the command run. | ||
1422 | 500 | """ | ||
1423 | 501 | |||
1424 | 502 | env = None | ||
1425 | 503 | kwargs = {} | ||
1426 | 504 | if cmd_env: | ||
1427 | 505 | env = os.environ.copy() | ||
1428 | 506 | env.update(cmd_env) | ||
1429 | 507 | kwargs['env'] = env | ||
1430 | 508 | |||
1431 | 509 | if not retry_message: | ||
1432 | 510 | retry_message = "Failed executing '{}'".format(" ".join(cmd)) | ||
1433 | 511 | retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) | ||
1434 | 512 | |||
1435 | 513 | retry_count = 0 | ||
1436 | 514 | result = None | ||
1437 | 515 | |||
1438 | 516 | retry_results = (None,) + retry_exitcodes | ||
1439 | 517 | while result in retry_results: | ||
1440 | 518 | try: | ||
1441 | 519 | # result = subprocess.check_call(cmd, env=env) | ||
1442 | 520 | result = subprocess.check_call(cmd, **kwargs) | ||
1443 | 521 | except subprocess.CalledProcessError as e: | ||
1444 | 522 | retry_count = retry_count + 1 | ||
1445 | 523 | if retry_count > max_retries: | ||
1446 | 524 | raise | ||
1447 | 525 | result = e.returncode | ||
1448 | 526 | log(retry_message) | ||
1449 | 527 | time.sleep(CMD_RETRY_DELAY) | ||
1450 | 279 | 528 | ||
1451 | 280 | 529 | ||
1452 | 281 | def _run_apt_command(cmd, fatal=False): | 530 | def _run_apt_command(cmd, fatal=False): |
1457 | 282 | """Run an APT command. | 531 | """Run an apt command with optional retries. |
1454 | 283 | |||
1455 | 284 | Checks the output and retries if the fatal flag is set | ||
1456 | 285 | to True. | ||
1458 | 286 | 532 | ||
1459 | 287 | :param: cmd: str: The apt command to run. | 533 | :param: cmd: str: The apt command to run. |
1460 | 288 | :param: fatal: bool: Whether the command's output should be checked and | 534 | :param: fatal: bool: Whether the command's output should be checked and |
1461 | 289 | retried. | 535 | retried. |
1462 | 290 | """ | 536 | """ |
1467 | 291 | env = os.environ.copy() | 537 | # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. |
1468 | 292 | 538 | cmd_env = { | |
1469 | 293 | if 'DEBIAN_FRONTEND' not in env: | 539 | 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} |
1466 | 294 | env['DEBIAN_FRONTEND'] = 'noninteractive' | ||
1470 | 295 | 540 | ||
1471 | 296 | if fatal: | 541 | if fatal: |
1490 | 297 | retry_count = 0 | 542 | _run_with_retries( |
1491 | 298 | result = None | 543 | cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), |
1492 | 299 | 544 | retry_message="Couldn't acquire DPKG lock") | |
1475 | 300 | # If the command is considered "fatal", we need to retry if the apt | ||
1476 | 301 | # lock was not acquired. | ||
1477 | 302 | |||
1478 | 303 | while result is None or result == APT_NO_LOCK: | ||
1479 | 304 | try: | ||
1480 | 305 | result = subprocess.check_call(cmd, env=env) | ||
1481 | 306 | except subprocess.CalledProcessError as e: | ||
1482 | 307 | retry_count = retry_count + 1 | ||
1483 | 308 | if retry_count > APT_NO_LOCK_RETRY_COUNT: | ||
1484 | 309 | raise | ||
1485 | 310 | result = e.returncode | ||
1486 | 311 | log("Couldn't acquire DPKG lock. Will retry in {} seconds." | ||
1487 | 312 | "".format(APT_NO_LOCK_RETRY_DELAY)) | ||
1488 | 313 | time.sleep(APT_NO_LOCK_RETRY_DELAY) | ||
1489 | 314 | |||
1493 | 315 | else: | 545 | else: |
1494 | 546 | env = os.environ.copy() | ||
1495 | 547 | env.update(cmd_env) | ||
1496 | 316 | subprocess.call(cmd, env=env) | 548 | subprocess.call(cmd, env=env) |
1497 | 317 | 549 | ||
1498 | 318 | 550 | ||
1499 | 319 | 551 | ||
1500 | === modified file 'hooks/charmhelpers/osplatform.py' | |||
1501 | --- hooks/charmhelpers/osplatform.py 2016-10-26 18:19:59 +0000 | |||
1502 | +++ hooks/charmhelpers/osplatform.py 2017-06-16 22:43:26 +0000 | |||
1503 | @@ -8,12 +8,18 @@ | |||
1504 | 8 | will be returned (which is the name of the module). | 8 | will be returned (which is the name of the module). |
1505 | 9 | This string is used to decide which platform module should be imported. | 9 | This string is used to decide which platform module should be imported. |
1506 | 10 | """ | 10 | """ |
1507 | 11 | # linux_distribution is deprecated and will be removed in Python 3.7 | ||
1508 | 12 | # Warings *not* disabled, as we certainly need to fix this. | ||
1509 | 11 | tuple_platform = platform.linux_distribution() | 13 | tuple_platform = platform.linux_distribution() |
1510 | 12 | current_platform = tuple_platform[0] | 14 | current_platform = tuple_platform[0] |
1511 | 13 | if "Ubuntu" in current_platform: | 15 | if "Ubuntu" in current_platform: |
1512 | 14 | return "ubuntu" | 16 | return "ubuntu" |
1513 | 15 | elif "CentOS" in current_platform: | 17 | elif "CentOS" in current_platform: |
1514 | 16 | return "centos" | 18 | return "centos" |
1515 | 19 | elif "debian" in current_platform: | ||
1516 | 20 | # Stock Python does not detect Ubuntu and instead returns debian. | ||
1517 | 21 | # Or at least it does in some build environments like Travis CI | ||
1518 | 22 | return "ubuntu" | ||
1519 | 17 | else: | 23 | else: |
1520 | 18 | raise RuntimeError("This module is not supported on {}." | 24 | raise RuntimeError("This module is not supported on {}." |
1521 | 19 | .format(current_platform)) | 25 | .format(current_platform)) |
1522 | 20 | 26 | ||
1523 | === modified file 'hooks/hooks.py' | |||
1524 | --- hooks/hooks.py 2016-11-15 18:15:20 +0000 | |||
1525 | +++ hooks/hooks.py 2017-06-16 22:43:26 +0000 | |||
1526 | @@ -4,6 +4,8 @@ | |||
1527 | 4 | import sys | 4 | import sys |
1528 | 5 | 5 | ||
1529 | 6 | from charmhelpers.core.host import ( | 6 | from charmhelpers.core.host import ( |
1530 | 7 | CompareHostReleases, | ||
1531 | 8 | lsb_release, | ||
1532 | 7 | service_start, | 9 | service_start, |
1533 | 8 | service_stop, | 10 | service_stop, |
1534 | 9 | service_restart, | 11 | service_restart, |
1535 | @@ -69,6 +71,12 @@ | |||
1536 | 69 | changed[config_key] = config_get(config_key) | 71 | changed[config_key] = config_get(config_key) |
1537 | 70 | juju_log("Configuration key:%s set to value: %s" % | 72 | juju_log("Configuration key:%s set to value: %s" % |
1538 | 71 | (config_key, changed[config_key])) | 73 | (config_key, changed[config_key])) |
1539 | 74 | |||
1540 | 75 | |||
1541 | 76 | # allows templates to generate series-dependent configuration | ||
1542 | 77 | lsb = lsb_release() | ||
1543 | 78 | changed['series'] = CompareHostReleases(lsb['DISTRIB_CODENAME']) | ||
1544 | 79 | |||
1545 | 72 | return changed | 80 | return changed |
1546 | 73 | 81 | ||
1547 | 74 | 82 | ||
1548 | 75 | 83 | ||
1549 | === modified file 'templates/rsyslog.conf' | |||
1550 | --- templates/rsyslog.conf 2014-04-22 19:45:41 +0000 | |||
1551 | +++ templates/rsyslog.conf 2017-06-16 22:43:26 +0000 | |||
1552 | @@ -1,3 +1,8 @@ | |||
1553 | 1 | ############################################################################### | ||
1554 | 2 | # [ WARNING ] | ||
1555 | 3 | # Configuration file maintained by Juju. Local changes may be overwritten. | ||
1556 | 4 | ############################################################################### | ||
1557 | 5 | |||
1558 | 1 | /var/log/syslog | 6 | /var/log/syslog |
1559 | 2 | { | 7 | { |
1560 | 3 | rotate {{syslog_rotate}} | 8 | rotate {{syslog_rotate}} |
1561 | @@ -7,7 +12,11 @@ | |||
1562 | 7 | delaycompress | 12 | delaycompress |
1563 | 8 | compress | 13 | compress |
1564 | 9 | postrotate | 14 | postrotate |
1565 | 15 | {%- if series <= 'trusty' %} | ||
1566 | 10 | reload rsyslog >/dev/null 2>&1 || true | 16 | reload rsyslog >/dev/null 2>&1 || true |
1567 | 17 | {%- else %} | ||
1568 | 18 | invoke-rc.d rsyslog rotate > /dev/null | ||
1569 | 19 | {%- endif %} | ||
1570 | 11 | endscript | 20 | endscript |
1571 | 12 | } | 21 | } |
1572 | 13 | 22 | ||
1573 | @@ -32,6 +41,10 @@ | |||
1574 | 32 | delaycompress | 41 | delaycompress |
1575 | 33 | sharedscripts | 42 | sharedscripts |
1576 | 34 | postrotate | 43 | postrotate |
1577 | 44 | {%- if series <= 'trusty' %} | ||
1578 | 35 | reload rsyslog >/dev/null 2>&1 || true | 45 | reload rsyslog >/dev/null 2>&1 || true |
1579 | 46 | {%- else %} | ||
1580 | 47 | invoke-rc.d rsyslog rotate > /dev/null | ||
1581 | 48 | {%- endif %} | ||
1582 | 36 | endscript | 49 | endscript |
1583 | 37 | } | 50 | } |
1584 | 38 | 51 | ||
1585 | === modified file 'tox.ini' | |||
1586 | --- tox.ini 2016-10-27 15:33:35 +0000 | |||
1587 | +++ tox.ini 2017-06-16 22:43:26 +0000 | |||
1588 | @@ -1,6 +1,6 @@ | |||
1589 | 1 | [tox] | 1 | [tox] |
1590 | 2 | skipsdist=True | 2 | skipsdist=True |
1592 | 3 | envlist = py34, py35 | 3 | envlist = py27, py34, py35 |
1593 | 4 | skip_missing_interpreters = True | 4 | skip_missing_interpreters = True |
1594 | 5 | 5 | ||
1595 | 6 | [testenv] | 6 | [testenv] |
1596 | 7 | 7 | ||
1597 | === modified file 'unit_tests/test_hooks.py' | |||
1598 | --- unit_tests/test_hooks.py 2017-01-10 17:09:58 +0000 | |||
1599 | +++ unit_tests/test_hooks.py 2017-06-16 22:43:26 +0000 | |||
1600 | @@ -1,7 +1,8 @@ | |||
1601 | 1 | #!/usr/bin/env python | 1 | #!/usr/bin/env python |
1602 | 2 | # -*- coding: utf-8 -*- | 2 | # -*- coding: utf-8 -*- |
1603 | 3 | import os | 3 | import os |
1605 | 4 | import hooks | 4 | import six |
1606 | 5 | import tempfile | ||
1607 | 5 | 6 | ||
1608 | 6 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' | 7 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' |
1609 | 7 | _HERE = os.path.abspath(os.path.dirname(__file__)) | 8 | _HERE = os.path.abspath(os.path.dirname(__file__)) |
1610 | @@ -12,6 +13,7 @@ | |||
1611 | 12 | except ImportError as ex: | 13 | except ImportError as ex: |
1612 | 13 | raise ImportError("Please install unittest and mock modules") | 14 | raise ImportError("Please install unittest and mock modules") |
1613 | 14 | 15 | ||
1614 | 16 | import hooks | ||
1615 | 15 | 17 | ||
1616 | 16 | TO_PATCH = [ | 18 | TO_PATCH = [ |
1617 | 17 | "apt_install", | 19 | "apt_install", |
1618 | @@ -28,7 +30,7 @@ | |||
1619 | 28 | ] | 30 | ] |
1620 | 29 | 31 | ||
1621 | 30 | 32 | ||
1623 | 31 | class HooksTestCase(unittest.TestCase): | 33 | class BaseTestCase(unittest.TestCase): |
1624 | 32 | 34 | ||
1625 | 33 | def setUp(self): | 35 | def setUp(self): |
1626 | 34 | unittest.TestCase.setUp(self) | 36 | unittest.TestCase.setUp(self) |
1627 | @@ -37,6 +39,14 @@ | |||
1628 | 37 | self.juju_log.return_value = True | 39 | self.juju_log.return_value = True |
1629 | 38 | self.apt_install.return_value = True | 40 | self.apt_install.return_value = True |
1630 | 39 | self.charm_dir.return_value = os.path.join(_HERE, '..') | 41 | self.charm_dir.return_value = os.path.join(_HERE, '..') |
1631 | 42 | self.tmpdir = tempfile.mkdtemp() | ||
1632 | 43 | self.maxDiff = None | ||
1633 | 44 | |||
1634 | 45 | def tearDown(self): | ||
1635 | 46 | try: | ||
1636 | 47 | shutil.rmtree(self.tmpdir) | ||
1637 | 48 | except: | ||
1638 | 49 | pass | ||
1639 | 40 | 50 | ||
1640 | 41 | def patch(self, method): | 51 | def patch(self, method): |
1641 | 42 | _m = mock.patch.object(hooks, method) | 52 | _m = mock.patch.object(hooks, method) |
1642 | @@ -48,6 +58,8 @@ | |||
1643 | 48 | for method in TO_PATCH: | 58 | for method in TO_PATCH: |
1644 | 49 | setattr(self, method, self.patch(method)) | 59 | setattr(self, method, self.patch(method)) |
1645 | 50 | 60 | ||
1646 | 61 | class TestHooks(BaseTestCase): | ||
1647 | 62 | |||
1648 | 51 | def test_install_hook(self): | 63 | def test_install_hook(self): |
1649 | 52 | """Check if install hooks is correctly executed | 64 | """Check if install hooks is correctly executed |
1650 | 53 | """ | 65 | """ |
1651 | @@ -141,8 +153,16 @@ | |||
1652 | 141 | def test_config_changed(self): | 153 | def test_config_changed(self): |
1653 | 142 | """Check if config-changed is executed correctly""" | 154 | """Check if config-changed is executed correctly""" |
1654 | 143 | _open = mock.mock_open(read_data=b'foo') | 155 | _open = mock.mock_open(read_data=b'foo') |
1657 | 144 | 156 | lsb = hooks.lsb_release() | |
1658 | 145 | with mock.patch('builtins.open', _open, create=True): | 157 | |
1659 | 158 | if six .PY2: | ||
1660 | 159 | open_function = '__builtin__.open' | ||
1661 | 160 | else: | ||
1662 | 161 | open_function = 'builtins.open' | ||
1663 | 162 | |||
1664 | 163 | with mock.patch(open_function, _open, create=True) as mock_open, \ | ||
1665 | 164 | mock.patch.object(hooks, 'lsb_release') as mock_lsb: | ||
1666 | 165 | mock_lsb.return_value = lsb | ||
1667 | 146 | hooks.config_changed() | 166 | hooks.config_changed() |
1668 | 147 | 167 | ||
1669 | 148 | # I'm not quite sure why config_changed appears to be called twice but | 168 | # I'm not quite sure why config_changed appears to be called twice but |
1670 | @@ -154,12 +174,15 @@ | |||
1671 | 154 | '60-aggregator.conf'), 'rb'), | 174 | '60-aggregator.conf'), 'rb'), |
1672 | 155 | mock.call(os.path.join(hooks.DEFAULT_RSYSLOG_PATH, | 175 | mock.call(os.path.join(hooks.DEFAULT_RSYSLOG_PATH, |
1673 | 156 | '60-aggregator.conf'), 'w'), | 176 | '60-aggregator.conf'), 'w'), |
1678 | 157 | mock.call(os.path.join(hooks.get_template_dir(), | 177 | |
1679 | 158 | '70-forward.conf'), 'rb'), | 178 | mock.call(os.path.join(hooks.get_template_dir(), |
1680 | 159 | mock.call(os.path.join(hooks.get_template_dir(), | 179 | '70-forward.conf'), 'rb'), |
1681 | 160 | '70-forward.conf'), 'rb'), | 180 | mock.call(os.path.join(hooks.get_template_dir(), |
1682 | 181 | '70-forward.conf'), 'rb'), | ||
1683 | 182 | |||
1684 | 161 | mock.call(os.path.join(hooks.DEFAULT_RSYSLOG_PATH, | 183 | mock.call(os.path.join(hooks.DEFAULT_RSYSLOG_PATH, |
1685 | 162 | '70-forward.conf'), 'w'), | 184 | '70-forward.conf'), 'w'), |
1686 | 185 | |||
1687 | 163 | mock.call(os.path.join(hooks.get_template_dir(), | 186 | mock.call(os.path.join(hooks.get_template_dir(), |
1688 | 164 | 'rsyslog.conf'), 'rb'), | 187 | 'rsyslog.conf'), 'rb'), |
1689 | 165 | mock.call(os.path.join(hooks.get_template_dir(), | 188 | mock.call(os.path.join(hooks.get_template_dir(), |
1690 | @@ -169,3 +192,38 @@ | |||
1691 | 169 | 192 | ||
1692 | 170 | self.assertEquals(sorted(_open.call_args_list), sorted(expected)) | 193 | self.assertEquals(sorted(_open.call_args_list), sorted(expected)) |
1693 | 171 | self.service_restart.assert_called_once_with("rsyslog") | 194 | self.service_restart.assert_called_once_with("rsyslog") |
1694 | 195 | |||
1695 | 196 | |||
1696 | 197 | class TestSeries(BaseTestCase): | ||
1697 | 198 | |||
1698 | 199 | def setUp(self): | ||
1699 | 200 | super(TestSeries, self).setUp() | ||
1700 | 201 | self._logrotate_path = hooks.DEFAULT_LOGROTATE_PATH | ||
1701 | 202 | self._rsyslog_path = hooks.DEFAULT_RSYSLOG_PATH | ||
1702 | 203 | |||
1703 | 204 | def tearDown(self): | ||
1704 | 205 | hooks.DEFAULT_LOGROTATE_PATH = self._logrotate_path | ||
1705 | 206 | hooks.DEFAULT_RSYSLOG_PATH = self._rsyslog_path | ||
1706 | 207 | super(TestSeries, self).tearDown() | ||
1707 | 208 | |||
1708 | 209 | def test_series(self): | ||
1709 | 210 | rsyslog_config = os.path.join(self.tmpdir, 'rsyslog.conf') | ||
1710 | 211 | hooks.DEFAULT_LOGROTATE_PATH = rsyslog_config | ||
1711 | 212 | hooks.DEFAULT_RSYSLOG_PATH = os.path.join(self.tmpdir) | ||
1712 | 213 | lsb = hooks.lsb_release() | ||
1713 | 214 | with mock.patch.object(hooks, 'lsb_release') as mock_lsb: | ||
1714 | 215 | lsb['DISTRIB_CODENAME'] = 'artful' | ||
1715 | 216 | mock_lsb.return_value = lsb | ||
1716 | 217 | hooks.config_changed() | ||
1717 | 218 | |||
1718 | 219 | with open(rsyslog_config, 'r') as f: | ||
1719 | 220 | content = f.read() | ||
1720 | 221 | self.assertIn('invoke-rc.d rsyslog rotate > /dev/null', | ||
1721 | 222 | content) | ||
1722 | 223 | |||
1723 | 224 | lsb['DISTRIB_CODENAME'] = 'trusty' | ||
1724 | 225 | hooks.config_changed() | ||
1725 | 226 | with open(rsyslog_config, 'r') as f: | ||
1726 | 227 | content = f.read() | ||
1727 | 228 | self.assertIn('reload rsyslog >/dev/null 2>&1 || true', | ||
1728 | 229 | content) |
Felipe,
I'd like to backport this change into trusty series, please make sure
to make this change back compatible from xenial.
<freyes> # invoke-rc.d rsyslog rotate
<freyes> initctl: invalid command: rotate
I will set it to needs fixing for now.
Thanks.