Merge lp:~sajoupa/ubuntu-repository-cache/layer-black-reformatting into lp:ubuntu-repository-cache
- layer-black-reformatting
- Merge into layer-ubuntu-repository-cache
Proposed by
Laurent Sesquès
Status: | Merged |
---|---|
Approved by: | Nick Moffitt |
Approved revision: | 287 |
Merged at revision: | 286 |
Proposed branch: | lp:~sajoupa/ubuntu-repository-cache/layer-black-reformatting |
Merge into: | lp:ubuntu-repository-cache |
Prerequisite: | lp:~sajoupa/ubuntu-repository-cache/layer-service-unpause |
Diff against target: |
1936 lines (+384/-456) 12 files modified
hooks/hooks.py (+5/-8) lib/ubuntu_repository_cache/apache.py (+22/-24) lib/ubuntu_repository_cache/metadata_sync.py (+91/-92) lib/ubuntu_repository_cache/mirror.py (+47/-72) lib/ubuntu_repository_cache/service.py (+34/-49) lib/ubuntu_repository_cache/squid.py (+31/-38) lib/ubuntu_repository_cache/storage.py (+16/-27) lib/ubuntu_repository_cache/tests/test_metadata_sync.py (+58/-47) lib/ubuntu_repository_cache/tests/test_util.py (+0/-2) lib/ubuntu_repository_cache/util.py (+27/-35) reactive/ubuntu-repository-cache.py (+13/-21) tests/util.py (+40/-41) |
To merge this branch: | bzr merge lp:~sajoupa/ubuntu-repository-cache/layer-black-reformatting |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Canonical IS Reviewers | Pending | ||
Ubuntu Repository Cache Charmers, Canonical | Pending | ||
Review via email: mp+391866@code.launchpad.net |
Commit message
auto reformatting by make lint (black)
Description of the change
To post a comment you must log in.
Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote : | # |
Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote : | # |
Unable to determine commit message from repository - please click "Set commit message" and enter the commit message manually.
Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote : | # |
Change successfully merged at revision 286
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'hooks/hooks.py' |
2 | --- hooks/hooks.py 2020-05-14 02:15:28 +0000 |
3 | +++ hooks/hooks.py 2020-10-06 10:05:01 +0000 |
4 | @@ -29,10 +29,8 @@ |
5 | |
6 | def parse_config(conf=service.CONFIG_PATH): |
7 | keys = {} |
8 | - cmd = ['bash', '-c', "trap 'env -0' exit; source {} > /dev/null 2>&1" |
9 | - .format(conf)] |
10 | - output = subprocess.Popen(cmd, shell=False, |
11 | - stdout=subprocess.PIPE).communicate()[0] |
12 | + cmd = ['bash', '-c', "trap 'env -0' exit; source {} > /dev/null 2>&1".format(conf)] |
13 | + output = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE).communicate()[0] |
14 | for ll in output.decode('utf-8').split('\x00'): |
15 | if not ll: |
16 | continue |
17 | @@ -45,11 +43,11 @@ |
18 | @HOOKS.hook('ubuntu_repository_cache_sync') |
19 | @util.run_as_user('www-sync') |
20 | def ubuntu_repository_cache_sync(): |
21 | - '''Notify clustered peers that metadata is available. |
22 | + """Notify clustered peers that metadata is available. |
23 | |
24 | An external system (a cron job) will trigger this hook after it has |
25 | finished syncing metadata from upstream. This will notify the peers that |
26 | - metadata is ready.''' |
27 | + metadata is ready.""" |
28 | |
29 | LOG('Notifying peers of metadata update') |
30 | # We can't use hookenv.config() here because the persistent config |
31 | @@ -72,8 +70,7 @@ |
32 | link_dir = '/'.join((apache_data, 'ubuntu_active')) |
33 | LOG('Pushing updates to peers', hookenv.DEBUG) |
34 | source = meta_dir + '/' |
35 | - successful_peers = mirror.rsync_to_peers( |
36 | - source, dest, link_dir=link_dir) |
37 | + successful_peers = mirror.rsync_to_peers(source, dest, link_dir=link_dir) |
38 | |
39 | # Push metaversion_good canary to indicate a complete sync |
40 | canary = '_'.join((meta_dir, 'good')) |
41 | |
42 | === modified file 'lib/ubuntu_repository_cache/apache.py' |
43 | --- lib/ubuntu_repository_cache/apache.py 2020-05-13 03:19:35 +0000 |
44 | +++ lib/ubuntu_repository_cache/apache.py 2020-10-06 10:05:01 +0000 |
45 | @@ -86,9 +86,7 @@ |
46 | 'maxconnectionsperchild': config['apache2_mpm_maxconnectionsperchild'], |
47 | } |
48 | conf_mpm_worker = '/etc/apache2/conf-available/000mpm-worker.conf' |
49 | - templating.render('apache2/mpm_worker.template', |
50 | - conf_mpm_worker, |
51 | - mpm_context) |
52 | + templating.render('apache2/mpm_worker.template', conf_mpm_worker, mpm_context) |
53 | subprocess.check_call(['a2enconf', '000mpm-worker']) |
54 | mods_mpm_worker = '/etc/apache2/mods-available/mpm_worker.conf' |
55 | if not os.path.exists(mods_mpm_worker): |
56 | @@ -107,9 +105,7 @@ |
57 | 'server_signature': config['apache2_server_signature'], |
58 | 'trace_enabled': config['apache2_trace_enabled'], |
59 | } |
60 | - templating.render('apache2/security.template', |
61 | - '/etc/apache2/conf-available/security.conf', |
62 | - security_context) |
63 | + templating.render('apache2/security.template', '/etc/apache2/conf-available/security.conf', security_context) |
64 | subprocess.check_call(['a2enconf', 'security']) |
65 | |
66 | |
67 | @@ -126,15 +122,16 @@ |
68 | failover_host = util.get_failover_host() |
69 | if util.in_failover() and failover_host: |
70 | uri = 'http://%s:80' % failover_host |
71 | - metaproxy_conf = ('ProxyRemote http://{disp}/ {uri}\n' |
72 | - '\tProxyPass / http://{disp}/\n' |
73 | - '\tProxyPassReverse / http://{disp}/').format( |
74 | - uri=uri, disp=config['display-host']) |
75 | + metaproxy_conf = ( |
76 | + 'ProxyRemote http://{disp}/ {uri}\n' |
77 | + '\tProxyPass / http://{disp}/\n' |
78 | + '\tProxyPassReverse / http://{disp}/' |
79 | + ).format(uri=uri, disp=config['display-host']) |
80 | apache_context['MetadataProxy'] = metaproxy_conf |
81 | |
82 | - templating.render('apache2/archive_ubuntu_com.conf', |
83 | - '/etc/apache2/sites-available/archive_ubuntu_com.conf', |
84 | - apache_context) |
85 | + templating.render( |
86 | + 'apache2/archive_ubuntu_com.conf', '/etc/apache2/sites-available/archive_ubuntu_com.conf', apache_context |
87 | + ) |
88 | subprocess.call(['a2ensite', 'archive_ubuntu_com']) |
89 | start() |
90 | |
91 | @@ -146,8 +143,7 @@ |
92 | LOG('Apache remoteip_logging changed') |
93 | if config['remoteip_logging'] is True: |
94 | subprocess.check_call(['a2enmod', 'remoteip']) |
95 | - templating.render('apache2/remoteip.conf', |
96 | - '/etc/apache2/conf-available/remoteip.conf', {}) |
97 | + templating.render('apache2/remoteip.conf', '/etc/apache2/conf-available/remoteip.conf', {}) |
98 | subprocess.check_call(['a2enconf', 'remoteip']) |
99 | LOG('Enabled remoteip logging') |
100 | else: |
101 | @@ -196,22 +192,24 @@ |
102 | else: |
103 | # try to find proof we're in Azure |
104 | # in any version of the status json |
105 | - if any('DataSourceAzure' in x.get('datasource', '') |
106 | - for x in status_json.values()): |
107 | + if any('DataSourceAzure' in x.get('datasource', '') for x in status_json.values()): |
108 | # In azure, we rely on cloud services |
109 | # instead of doing our own haproxy work |
110 | nrpe_config.add_check( |
111 | shortname='load-balancer', |
112 | description='e2e check that regional load balancer is functioning', |
113 | - check_cmd=('check_http ' |
114 | - '--hostname ubuntu-archive-asm.trafficmanager.net ' |
115 | - '--url "/{}/" --string "dists/" ' |
116 | - '--expect \"200\\ OK\"').format(path_base), |
117 | + check_cmd=( |
118 | + 'check_http ' |
119 | + '--hostname ubuntu-archive-asm.trafficmanager.net ' |
120 | + '--url "/{}/" --string "dists/" ' |
121 | + '--expect \"200\\ OK\"' |
122 | + ).format(path_base), |
123 | ) |
124 | |
125 | nrpe_config.add_check( |
126 | shortname='apache', |
127 | description='Apache2 serving archive metadata', |
128 | - check_cmd=('check_http --hostname localhost --url "/{}/" ' |
129 | - '--string "dists/" --expect \"200\\ OK\"').format(path_base), |
130 | - ) |
131 | + check_cmd=('check_http --hostname localhost --url "/{}/" ' '--string "dists/" --expect \"200\\ OK\"').format( |
132 | + path_base |
133 | + ), |
134 | + ) |
135 | |
136 | === modified file 'lib/ubuntu_repository_cache/metadata_sync.py' |
137 | --- lib/ubuntu_repository_cache/metadata_sync.py 2020-10-05 14:35:30 +0000 |
138 | +++ lib/ubuntu_repository_cache/metadata_sync.py 2020-10-06 10:05:01 +0000 |
139 | @@ -14,12 +14,7 @@ |
140 | from datetime import datetime |
141 | from itertools import cycle |
142 | |
143 | -from subprocess import ( |
144 | - CalledProcessError, |
145 | - check_call, |
146 | - check_output, |
147 | - STDOUT |
148 | -) |
149 | +from subprocess import CalledProcessError, check_call, check_output, STDOUT |
150 | |
151 | from .util import ( |
152 | FileSemaphore, |
153 | @@ -55,14 +50,15 @@ |
154 | |
155 | class ContentBad(Exception): |
156 | '''Mirror content is inconsistent''' |
157 | + |
158 | pass |
159 | |
160 | |
161 | class UpstreamMirrorSyncError(Exception): |
162 | - '''The upstream mirror is currently syncing. |
163 | + """The upstream mirror is currently syncing. |
164 | |
165 | We shouldn't sync with it right now since we will certainly get an invalid |
166 | - snapshot of its state.''' |
167 | + snapshot of its state.""" |
168 | |
169 | |
170 | # VENDORING OF CHARMHELPERS FUNCTIONS. |
171 | @@ -70,8 +66,8 @@ |
172 | # hook environment. There are lifted and adapted: they are mostly thin wrappers |
173 | # around subprocess - ~tribaal |
174 | |
175 | -def rsync(from_path, to_path, options, log, flags="-r", |
176 | - check_output=check_output, user=None, timeout=None): |
177 | + |
178 | +def rsync(from_path, to_path, options, log, flags="-r", check_output=check_output, user=None, timeout=None): |
179 | """ |
180 | A wrapper around subprocess to execute an rsync between a source and a |
181 | destination. |
182 | @@ -119,8 +115,7 @@ |
183 | This function has side-effects through os, pwd and grp |
184 | """ |
185 | if log: |
186 | - log("Making dir {} {}:{} {:o}".format(path, owner, group, |
187 | - perms)) |
188 | + log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) |
189 | uid = pwd.getpwnam(owner).pw_uid |
190 | gid = grp.getgrnam(group).gr_gid |
191 | realpath = os.path.abspath(path) |
192 | @@ -139,26 +134,25 @@ |
193 | """ |
194 | log("WARNING: {}".format(message)) |
195 | |
196 | + |
197 | # END OF VENDORING OF CHARMHELPERS FUNCTIONS. |
198 | |
199 | |
200 | def mirror_archive(hostname, apache_root, local_unit, log, mirror_series, rsync_module='ubuntu'): # NOQA: C901 |
201 | - '''Sync Ubuntu repository metadata to the mirror. |
202 | + """Sync Ubuntu repository metadata to the mirror. |
203 | |
204 | Metadata synchronization is performed and then the metadata is |
205 | verified. |
206 | |
207 | Returns metadata version (destination file name) |
208 | - ''' |
209 | + """ |
210 | |
211 | now = datetime.utcnow().replace(microsecond=0) |
212 | |
213 | apache_data = '/'.join((apache_root, 'data')) |
214 | link_active = '/'.join((apache_data, 'ubuntu_active')) |
215 | link_next = '/'.join((apache_data, 'ubuntu_next')) |
216 | - dest = '/'.join((apache_data, 'ubuntu_{}_u{}'.format( |
217 | - now.isoformat(sep='_'), |
218 | - local_unit.split('/')[-1]))) |
219 | + dest = '/'.join((apache_data, 'ubuntu_{}_u{}'.format(now.isoformat(sep='_'), local_unit.split('/')[-1]))) |
220 | log('Performing sync to %s' % dest) |
221 | |
222 | # The archive may have multiple IP addresses, try each several times. |
223 | @@ -243,16 +237,16 @@ |
224 | # Delete all unused archive metadata mirror directories |
225 | log('Removing old, unused repository mirrors.') |
226 | clean_metadata_dir( |
227 | - apache_root, log, keep=set((os.path.realpath(dest), |
228 | - os.path.realpath('_'.join((dest, 'good'))), |
229 | - os.path.realpath(link_active)))) |
230 | + apache_root, |
231 | + log, |
232 | + keep=set((os.path.realpath(dest), os.path.realpath('_'.join((dest, 'good'))), os.path.realpath(link_active))), |
233 | + ) |
234 | log('Unused repository mirrors removed.') |
235 | |
236 | return os.path.basename(dest) |
237 | |
238 | |
239 | -def main(environment, log, mirror_archive=mirror_archive, |
240 | - check_call=check_call): |
241 | +def main(environment, log, mirror_archive=mirror_archive, check_call=check_call): |
242 | """The main function for this script. |
243 | |
244 | This is called at the very bottom of this file should the file be executed |
245 | @@ -269,8 +263,7 @@ |
246 | unit_path = environment["UNIT_PATH"] |
247 | mirror_series = environment["MIRROR_SERIES"] |
248 | |
249 | - meta_ver = mirror_archive(sync_host, apache_root, local_unit, log, |
250 | - mirror_series, rsync_module=rsync_module) |
251 | + meta_ver = mirror_archive(sync_host, apache_root, local_unit, log, mirror_series, rsync_module=rsync_module) |
252 | if meta_ver is None: |
253 | # If we failed to download and/or validate a new version of the |
254 | # archives, don't propagate the information to other units. |
255 | @@ -281,7 +274,7 @@ |
256 | |
257 | # Call the sync hook inside the juju environment to propagate changes to |
258 | # other units |
259 | - path = ('/usr/local/sbin/charm-env /var/lib/juju/agents/{}/charm/hooks/ubuntu-repository-cache-sync {}') |
260 | + path = '/usr/local/sbin/charm-env /var/lib/juju/agents/{}/charm/hooks/ubuntu-repository-cache-sync {}' |
261 | path = path.format(unit_path, meta_ver) |
262 | cmd = ['juju-run', local_unit, path] |
263 | |
264 | @@ -293,30 +286,32 @@ |
265 | |
266 | |
267 | def rsync_should_giveup(exception): |
268 | - '''Based on the provided returncode, should rsync be retried or aborted. |
269 | + """Based on the provided returncode, should rsync be retried or aborted. |
270 | |
271 | This function has no side-effects. |
272 | - ''' |
273 | + """ |
274 | |
275 | returncode = exception.returncode |
276 | # Exit values for rsync that will trigger a retry of the operation |
277 | - exitval_retry = frozenset(( |
278 | - 5, # Error starting client-server protocol |
279 | - 6, # Daemon unable to append to log-file |
280 | - 10, # Error in socket I/O |
281 | - 11, # Error in file I/O |
282 | - 12, # Error in rsync protocol data stream |
283 | - 13, # Errors with program diagnostics |
284 | - 14, # Error in IPC code |
285 | - 20, # Received SIGUSR1 or SIGINT |
286 | - 21, # Some error returned by waitpid() |
287 | - 22, # Error allocating core memory buffers |
288 | - 23, # Partial transfer due to error |
289 | - 24, # Partial transfer due to vanished source files |
290 | - 25, # The --max-delete limit stopped deletions |
291 | - 30, # Timeout in data send/receive |
292 | - 35, # Timeout waiting for daemon connection |
293 | - )) |
294 | + exitval_retry = frozenset( |
295 | + ( |
296 | + 5, # Error starting client-server protocol |
297 | + 6, # Daemon unable to append to log-file |
298 | + 10, # Error in socket I/O |
299 | + 11, # Error in file I/O |
300 | + 12, # Error in rsync protocol data stream |
301 | + 13, # Errors with program diagnostics |
302 | + 14, # Error in IPC code |
303 | + 20, # Received SIGUSR1 or SIGINT |
304 | + 21, # Some error returned by waitpid() |
305 | + 22, # Error allocating core memory buffers |
306 | + 23, # Partial transfer due to error |
307 | + 24, # Partial transfer due to vanished source files |
308 | + 25, # The --max-delete limit stopped deletions |
309 | + 30, # Timeout in data send/receive |
310 | + 35, # Timeout waiting for daemon connection |
311 | + ) |
312 | + ) |
313 | |
314 | if returncode in exitval_retry: |
315 | return False # Don't give up |
316 | @@ -340,8 +335,7 @@ |
317 | # lots of time when we need to revert, instead of 15-20 mins to wait for a |
318 | # complete sync. |
319 | count = 0 |
320 | - entries = sorted( |
321 | - [os.path.realpath(p) for p in glob.glob(glob_path)], reverse=True) |
322 | + entries = sorted([os.path.realpath(p) for p in glob.glob(glob_path)], reverse=True) |
323 | for entry in entries: |
324 | if count < NUMBER_OF_REPOS_TO_KEEP and entry.endswith('_good'): |
325 | keep.add(entry[:-5]) |
326 | @@ -362,31 +356,32 @@ |
327 | log("Keeping {}".format(entry)) |
328 | |
329 | |
330 | -@backoff.on_exception(backoff.expo, CalledProcessError, |
331 | - max_tries=RSYNC_MAX_RETRIES, giveup=rsync_should_giveup) |
332 | +@backoff.on_exception(backoff.expo, CalledProcessError, max_tries=RSYNC_MAX_RETRIES, giveup=rsync_should_giveup) |
333 | def _mirror_metadata(archive_ip, dest, link_dest, log, mirror_series, rsync_module='ubuntu'): |
334 | '''Mirror metadata from the archive.''' |
335 | |
336 | # Perform an rsync of the metadata, excluding some contents |
337 | rsync_source = '{}::{}/'.format(archive_ip, rsync_module) |
338 | - rsync_options = ('--quiet', |
339 | - '--recursive', |
340 | - '--links', |
341 | - '--perms', |
342 | - '--chmod=g+w', |
343 | - '--times', |
344 | - '--compress', |
345 | - '--delete', |
346 | - '--delete-during', |
347 | - '--link-dest=%s/' % link_dest, |
348 | - '--exclude=.trace', |
349 | - '--exclude=indices', |
350 | - '--exclude=project/trace', |
351 | - '--exclude=pool', |
352 | - '--exclude=dists/*/*/debian-installer', |
353 | - '--exclude=dists/*/*/dist-upgrader-all', |
354 | - '--exclude=dists/*/*/installer*', |
355 | - '--exclude=dists/*/*/uefi',) |
356 | + rsync_options = ( |
357 | + '--quiet', |
358 | + '--recursive', |
359 | + '--links', |
360 | + '--perms', |
361 | + '--chmod=g+w', |
362 | + '--times', |
363 | + '--compress', |
364 | + '--delete', |
365 | + '--delete-during', |
366 | + '--link-dest=%s/' % link_dest, |
367 | + '--exclude=.trace', |
368 | + '--exclude=indices', |
369 | + '--exclude=project/trace', |
370 | + '--exclude=pool', |
371 | + '--exclude=dists/*/*/debian-installer', |
372 | + '--exclude=dists/*/*/dist-upgrader-all', |
373 | + '--exclude=dists/*/*/installer*', |
374 | + '--exclude=dists/*/*/uefi', |
375 | + ) |
376 | # Ordering of the include/exclude rules is important, rsync will |
377 | # follow the first matching rule. So place the common rules above |
378 | # the rules to limit by series. |
379 | @@ -400,18 +395,16 @@ |
380 | rsync(rsync_source, dest, rsync_options, log, user='www-sync', timeout=3600) |
381 | |
382 | |
383 | -@backoff.on_exception(backoff.expo, CalledProcessError, |
384 | - max_tries=RSYNC_MAX_RETRIES, giveup=rsync_should_giveup) |
385 | -@backoff.on_exception(backoff.expo, UpstreamMirrorSyncError, |
386 | - max_tries=UPSTREAM_SYNC_MAX_RETRIES, factor=60) |
387 | +@backoff.on_exception(backoff.expo, CalledProcessError, max_tries=RSYNC_MAX_RETRIES, giveup=rsync_should_giveup) |
388 | +@backoff.on_exception(backoff.expo, UpstreamMirrorSyncError, max_tries=UPSTREAM_SYNC_MAX_RETRIES, factor=60) |
389 | def _mirror_base(apache_root, archive_ip, dest, log=None, rsync_module='ubuntu'): |
390 | - '''Mirror just the root of the archive. |
391 | + """Mirror just the root of the archive. |
392 | |
393 | This checks for the file 'Archive-Update-in-Progress-*' and |
394 | throws ContentBad if the archive source isn't ready. |
395 | Use this as a quick test prior to attempting a full mirror |
396 | operation. |
397 | - ''' |
398 | + """ |
399 | |
400 | source = '{}::{}/'.format(archive_ip, rsync_module) |
401 | mkdir(dest, owner='www-sync', group='www-data', perms=0o775, log=log) |
402 | @@ -419,8 +412,16 @@ |
403 | link_dest = '/'.join((apache_data, 'ubuntu_active')) |
404 | |
405 | # Sync just the root directory |
406 | - rsync_options = ('--links', '--perms', '--times', '--dirs', '--chmod=g+w', |
407 | - '--delete', '--compress', '--link-dest=%s/' % link_dest) |
408 | + rsync_options = ( |
409 | + '--links', |
410 | + '--perms', |
411 | + '--times', |
412 | + '--dirs', |
413 | + '--chmod=g+w', |
414 | + '--delete', |
415 | + '--compress', |
416 | + '--link-dest=%s/' % link_dest, |
417 | + ) |
418 | rsync(source, dest, rsync_options, log, flags='--quiet', user='www-sync', timeout=3600) |
419 | |
420 | # Check for file 'Archive-Update-in-Progress*' which signals |
421 | @@ -437,10 +438,14 @@ |
422 | # Traverse distributions and fail early by starting with the newest. |
423 | # The newest (development branch) will have the highest rate of churn, |
424 | # therefore the greatest chance of being invalid. |
425 | - dists = sorted([ent for ent in os.listdir(dists_root) if |
426 | - os.path.isdir('/'.join((dists_root, ent))) and not |
427 | - os.path.islink('/'.join((dists_root, ent)))], |
428 | - reverse=True) |
429 | + dists = sorted( |
430 | + [ |
431 | + ent |
432 | + for ent in os.listdir(dists_root) |
433 | + if os.path.isdir('/'.join((dists_root, ent))) and not os.path.islink('/'.join((dists_root, ent))) |
434 | + ], |
435 | + reverse=True, |
436 | + ) |
437 | |
438 | for dist in dists: |
439 | log('Verifying distribution %s' % dist) |
440 | @@ -448,18 +453,17 @@ |
441 | release = _parse_release_file(dist_dir) |
442 | _verify_distribution(dist_dir, release) |
443 | |
444 | - for uip in glob.glob('/'.join((archive_root, |
445 | - 'Archive-Update-in-Progress-*'))): |
446 | + for uip in glob.glob('/'.join((archive_root, 'Archive-Update-in-Progress-*'))): |
447 | # XXX: Should be debug-only |
448 | log('Removing {} from clean repository'.format(uip)) |
449 | os.remove(uip) |
450 | |
451 | |
452 | def _parse_release_file(path): |
453 | - '''Read a distribution Release file located at 'path' |
454 | + """Read a distribution Release file located at 'path' |
455 | |
456 | Returns a dictionary describing Release file contents. |
457 | - ''' |
458 | + """ |
459 | |
460 | release = {} |
461 | release['name'] = os.path.basename(path.strip('/')) |
462 | @@ -503,22 +507,20 @@ |
463 | |
464 | size = int(files[filename]['size']) |
465 | if disk_size != size: |
466 | - msg = 'Sizes do not match for {} in {}'.format(filename, |
467 | - release['name']) |
468 | + msg = 'Sizes do not match for {} in {}'.format(filename, release['name']) |
469 | raise ContentBad(msg) |
470 | |
471 | with open(filepath, 'rb') as infile: |
472 | sha256 = hashlib.sha256() |
473 | while True: |
474 | - buf = infile.read(1024*1024*100) |
475 | + buf = infile.read(1024 * 1024 * 100) |
476 | if not buf: |
477 | break |
478 | sha256.update(buf) |
479 | digest = sha256.hexdigest() |
480 | if digest != files[filename]['SHA256']: |
481 | file_mtime = os.stat(filepath).st_mtime |
482 | - msg = 'SHA256 mismatch for {} in {}.'.format(filename, |
483 | - release['name']) |
484 | + msg = 'SHA256 mismatch for {} in {}.'.format(filename, release['name']) |
485 | if release['mtime'] < file_mtime: |
486 | ' '.join((msg, 'Stale Release file.')) |
487 | raise ContentBad(msg) |
488 | @@ -549,11 +551,8 @@ |
489 | |
490 | try: |
491 | with FileSemaphore(SEMAPHORE_FILE): |
492 | - cache_logger.info( |
493 | - "Metadata sync started from metadata_sync.py:__main__") |
494 | + cache_logger.info("Metadata sync started from metadata_sync.py:__main__") |
495 | main(environment, cache_logger.info) |
496 | cache_logger.info("Metadata sync from __main__ ended.") |
497 | except SemaphoreExistsError: |
498 | - cache_logger.info( |
499 | - "Can't acquire semaphore: {} already exists.".format( |
500 | - SEMAPHORE_FILE)) |
501 | + cache_logger.info("Can't acquire semaphore: {} already exists.".format(SEMAPHORE_FILE)) |
502 | |
503 | === modified file 'lib/ubuntu_repository_cache/mirror.py' |
504 | --- lib/ubuntu_repository_cache/mirror.py 2020-10-06 10:05:01 +0000 |
505 | +++ lib/ubuntu_repository_cache/mirror.py 2020-10-06 10:05:01 +0000 |
506 | @@ -27,22 +27,23 @@ |
507 | SERVICE = 'ubuntu-repository-cache' |
508 | |
509 | |
510 | -def _rsync(source, dest, link_dest=None, optlist=None, user='www-sync', |
511 | - retries=1): |
512 | +def _rsync(source, dest, link_dest=None, optlist=None, user='www-sync', retries=1): |
513 | '''Perform an rsync as a specified user''' |
514 | |
515 | - options = ['--quiet', |
516 | - '--recursive', |
517 | - '--links', |
518 | - '--perms', |
519 | - '--chmod=g+w', |
520 | - '--times', |
521 | - '--compress', |
522 | - '--delete', |
523 | - '--delete-before', |
524 | - '--timeout=120', |
525 | - '-e', 'sudo -u {} ssh -l {}'.format(user, user), |
526 | - ] |
527 | + options = [ |
528 | + '--quiet', |
529 | + '--recursive', |
530 | + '--links', |
531 | + '--perms', |
532 | + '--chmod=g+w', |
533 | + '--times', |
534 | + '--compress', |
535 | + '--delete', |
536 | + '--delete-before', |
537 | + '--timeout=120', |
538 | + '-e', |
539 | + 'sudo -u {} ssh -l {}'.format(user, user), |
540 | + ] |
541 | if optlist: |
542 | options.extend(optlist) |
543 | if link_dest: |
544 | @@ -52,9 +53,7 @@ |
545 | try: |
546 | host.rsync(source, dest, options=options, timeout=3600) |
547 | except CalledProcessError as excp: |
548 | - LOG('rsync failed({}): {}'.format(excp.returncode, |
549 | - excp.output), |
550 | - hookenv.WARNING) |
551 | + LOG('rsync failed({}): {}'.format(excp.returncode, excp.output), hookenv.WARNING) |
552 | if not metadata_sync.rsync_should_giveup(excp): |
553 | attempt += 1 |
554 | if attempt < retries: |
555 | @@ -65,16 +64,13 @@ |
556 | LOG('No retries left.') |
557 | raise |
558 | else: |
559 | - LOG('rsync failed({}): {}'.format(excp.returncode, |
560 | - excp.output), |
561 | - hookenv.WARNING) |
562 | + LOG('rsync failed({}): {}'.format(excp.returncode, excp.output), hookenv.WARNING) |
563 | raise |
564 | return |
565 | |
566 | |
567 | @util.run_as_user('www-sync') |
568 | -def rsync_to_peers(source_dir, dest_dir, link_dir=None, user='www-sync', |
569 | - peers=None): |
570 | +def rsync_to_peers(source_dir, dest_dir, link_dir=None, user='www-sync', peers=None): |
571 | """ |
572 | Use rsync to push a directory to all configured peers. |
573 | |
574 | @@ -94,8 +90,7 @@ |
575 | rsync_dest = '{}@{}:{}'.format(user, peer, dest_dir) |
576 | LOG('Syncing {} to {}.'.format(source_dir, rsync_dest)) |
577 | try: |
578 | - _rsync(source_dir, rsync_dest, link_dest=link_dir, |
579 | - user=user, retries=10) |
580 | + _rsync(source_dir, rsync_dest, link_dest=link_dir, user=user, retries=10) |
581 | except Exception: |
582 | LOG('Sync to {} failed'.format(peer)) |
583 | traceback.print_exc(file=sys.stdout) |
584 | @@ -106,25 +101,23 @@ |
585 | |
586 | |
587 | @util.run_as_user('www-sync') |
588 | -def rsync_from_peer(source_dir, dest_dir, peer_ip, link_dir=None, |
589 | - user='www-sync'): |
590 | +def rsync_from_peer(source_dir, dest_dir, peer_ip, link_dir=None, user='www-sync'): |
591 | '''Use rsync to pull a directory from a configured peer. ''' |
592 | |
593 | rsync_source = '{}@{}:{}'.format(user, peer_ip, source_dir) |
594 | LOG('Syncing {} to {}.'.format(rsync_source, dest_dir)) |
595 | - _rsync(rsync_source, dest_dir, link_dest=link_dir, |
596 | - user=user, retries=10) |
597 | + _rsync(rsync_source, dest_dir, link_dest=link_dir, user=user, retries=10) |
598 | LOG('Sync to {} complete.'.format(peer_ip)) |
599 | |
600 | |
601 | def _leader_update_metadata(): |
602 | - '''Update the metadata on a leader in the cluster. |
603 | + """Update the metadata on a leader in the cluster. |
604 | |
605 | If the leader is newly elected, it will pull down the latest |
606 | metadata. The metadata is not pushed to peers. The peers will |
607 | pull the metadata once on the next relation-changed hook triggered |
608 | by setting the meta_version relation data. |
609 | - ''' |
610 | + """ |
611 | |
612 | LOG('Updating metadata on the leader') |
613 | # Relation data |
614 | @@ -136,8 +129,7 @@ |
615 | leader_id = hookenv.leader_get(attribute="leader_id") |
616 | # Log the race if it happened: |
617 | if leader_id != hookenv.local_unit(): |
618 | - LOG('Leader changed between peer_update_metadata and ' |
619 | - '_leader_update_metadata', hookenv.WARNING) |
620 | + LOG('Leader changed between peer_update_metadata and ' '_leader_update_metadata', hookenv.WARNING) |
621 | |
622 | leader_rel = rel[leader_id] |
623 | |
624 | @@ -158,8 +150,7 @@ |
625 | LOG('Nothing to update') |
626 | else: |
627 | LOG('Leader missing expected metadata. Recovering.') |
628 | - LOG('meta_ver:{} active_ver:{}'.format(meta_ver, active_ver), |
629 | - hookenv.DEBUG) |
630 | + LOG('meta_ver:{} active_ver:{}'.format(meta_ver, active_ver), hookenv.DEBUG) |
631 | metadata_sync.clean_metadata_dir(apache_root, LOG) |
632 | |
633 | # Don't use the existing metadata. If this is a current unit |
634 | @@ -175,16 +166,15 @@ |
635 | # reasonable to panic and just explode (hence the absence of try block) |
636 | with util.FileSemaphore(util.SEMAPHORE_FILE): |
637 | new_ver = metadata_sync.mirror_archive( |
638 | - sync_host, apache_root, hookenv.local_unit(), LOG, |
639 | - mirror_series, rsync_module=rsync_module) |
640 | + sync_host, apache_root, hookenv.local_unit(), LOG, mirror_series, rsync_module=rsync_module |
641 | + ) |
642 | |
643 | if not new_ver: |
644 | LOG('Leader could not gather new metadata', hookenv.WARNING) |
645 | return |
646 | util.update_active_symlink(new_ver) |
647 | |
648 | - LOG('Updating metadata version to {}'.format(new_ver), |
649 | - hookenv.DEBUG) |
650 | + LOG('Updating metadata version to {}'.format(new_ver), hookenv.DEBUG) |
651 | hookenv.relation_set(meta_version=new_ver) |
652 | |
653 | service.unpause() |
654 | @@ -206,14 +196,12 @@ |
655 | leader_id = hookenv.leader_get(attribute="leader_id") |
656 | # Log the race if it happened: |
657 | if leader_id == hookenv.local_unit(): |
658 | - LOG('Leader changed between peer_update_metadata and ' |
659 | - '_nonleader_update_metadata', hookenv.WARNING) |
660 | + LOG('Leader changed between peer_update_metadata and ' '_nonleader_update_metadata', hookenv.WARNING) |
661 | |
662 | if leader_id not in rel: |
663 | # Peers can join in any order and the leader may not have joined yet. |
664 | # Log who we think is the leader and return until it joins |
665 | - LOG('Leader {} not yet related, can\'t complete ' |
666 | - '_nonleader_update_metadata'.format(leader_id)) |
667 | + LOG('Leader {} not yet related, can\'t complete ' '_nonleader_update_metadata'.format(leader_id)) |
668 | return |
669 | |
670 | leader_rel = rel[leader_id] |
671 | @@ -254,10 +242,8 @@ |
672 | |
673 | # Can we connect to the leader? |
674 | leader_hosts = leader_rel.get('ssh_authorized_hosts') |
675 | - if not leader_hosts or \ |
676 | - hookenv.unit_private_ip() not in leader_hosts: |
677 | - LOG('Leader has not added IP to authorized_hosts yet, ' |
678 | - 'can not update.') |
679 | + if not leader_hosts or hookenv.unit_private_ip() not in leader_hosts: |
680 | + LOG('Leader has not added IP to authorized_hosts yet, ' 'can not update.') |
681 | return |
682 | |
683 | # Pull metadata from the leader if it exists and is ready |
684 | @@ -268,27 +254,23 @@ |
685 | source = meta_dir + '/' |
686 | dest = meta_dir |
687 | try: |
688 | - rsync_from_peer(source, dest, |
689 | - leader_rel.get('private-address'), |
690 | - link_dir=active_dir) |
691 | + rsync_from_peer(source, dest, leader_rel.get('private-address'), link_dir=active_dir) |
692 | except CalledProcessError: |
693 | traceback.print_exc() |
694 | - LOG('Failed to pull metadata from leader; service stopped' |
695 | - ' until next update from leader.') |
696 | + LOG('Failed to pull metadata from leader; service stopped' ' until next update from leader.') |
697 | else: |
698 | util.update_active_symlink(meta_ver) |
699 | service.unpause() |
700 | |
701 | # Do nothing. Service is stopped until next update from leader |
702 | else: |
703 | - LOG('Metadata dir does not exist on leader.', |
704 | - hookenv.DEBUG) |
705 | + LOG('Metadata dir does not exist on leader.', hookenv.DEBUG) |
706 | metadata_sync.clean_metadata_dir(apache_root, LOG) |
707 | LOG('Metadata update complete.') |
708 | |
709 | |
710 | def peer_update_metadata(): |
711 | - '''Update the metadata on a clustered peer to the current version. |
712 | + """Update the metadata on a clustered peer to the current version. |
713 | |
714 | If the 'cluster' relationship exists, this will look for the |
715 | current 'meta_version' key on the leader and attempt to |
716 | @@ -296,11 +278,13 @@ |
717 | |
718 | This function should be called from the cluster-relation-joined, |
719 | cluster-relation-changed, and cluster-relation-departed hooks. |
720 | - ''' |
721 | + """ |
722 | |
723 | - if not hookenv.is_relation_made('cluster') or \ |
724 | - not hookenv.in_relation_hook() or \ |
725 | - not hookenv.relation_type() == 'cluster': |
726 | + if ( |
727 | + not hookenv.is_relation_made('cluster') |
728 | + or not hookenv.in_relation_hook() |
729 | + or not hookenv.relation_type() == 'cluster' |
730 | + ): |
731 | return |
732 | |
733 | if cluster.is_elected_leader(None): |
734 | @@ -315,27 +299,18 @@ |
735 | def update_checks(nrpe_config): |
736 | '''Update nagios check for mirror metadata age''' |
737 | |
738 | - service_script = '/'.join((hookenv.charm_dir(), |
739 | - 'files', |
740 | - 'nrpe-external-master', |
741 | - 'check_metadata_age')) |
742 | - metadata_dir = '/'.join((unitdata.kv().get('apache-root'), |
743 | - 'data', |
744 | - 'ubuntu_active')) |
745 | - check_metadata_age_config_path = '/srv/{}/var/check_metadata_age'.format( |
746 | - hookenv.service_name()) |
747 | + service_script = '/'.join((hookenv.charm_dir(), 'files', 'nrpe-external-master', 'check_metadata_age')) |
748 | + metadata_dir = '/'.join((unitdata.kv().get('apache-root'), 'data', 'ubuntu_active')) |
749 | + check_metadata_age_config_path = '/srv/{}/var/check_metadata_age'.format(hookenv.service_name()) |
750 | |
751 | config = hookenv.config() |
752 | context = {} |
753 | context['crit_age'] = config['sync-age-crit'] |
754 | context['warn_age'] = config['sync-age-warn'] |
755 | - templating.render('check_metadata_age-config', |
756 | - check_metadata_age_config_path, |
757 | - context) |
758 | + templating.render('check_metadata_age-config', check_metadata_age_config_path, context) |
759 | |
760 | nrpe_config.add_check( |
761 | shortname='mirror', |
762 | description='Ubuntu repository cache metadata age', |
763 | - check_cmd=' '.join( |
764 | - (service_script, metadata_dir, check_metadata_age_config_path)), |
765 | + check_cmd=' '.join((service_script, metadata_dir, check_metadata_age_config_path)), |
766 | ) |
767 | |
768 | === modified file 'lib/ubuntu_repository_cache/service.py' |
769 | --- lib/ubuntu_repository_cache/service.py 2020-06-03 00:17:42 +0000 |
770 | +++ lib/ubuntu_repository_cache/service.py 2020-10-06 10:05:01 +0000 |
771 | @@ -63,27 +63,23 @@ |
772 | # environment, it doesn't mean we can support it. |
773 | preinstall_files = glob.glob('exec.d/*/charm-pre-install') |
774 | if preinstall_files: |
775 | - LOG('============================================================', |
776 | - hookenv.CRITICAL) |
777 | + LOG('============================================================', hookenv.CRITICAL) |
778 | LOG('Executing pre-install script(s).', hookenv.CRITICAL) |
779 | for pre in preinstall_files: |
780 | if not os.path.isfile(pre) or not os.access(pre, os.X_OK): |
781 | LOG('Can not execute %s' % pre, hookenv.CRITICAL) |
782 | raise RuntimeError('Pre-install script %s not executable' % pre) |
783 | try: |
784 | - LOG('============================================================', |
785 | - hookenv.CRITICAL) |
786 | + LOG('============================================================', hookenv.CRITICAL) |
787 | LOG('Calling %s' % pre, hookenv.CRITICAL) |
788 | - cmd = (pre) |
789 | + cmd = pre |
790 | subprocess.check_call(cmd) |
791 | except subprocess.CalledProcessError as xcp: |
792 | - LOG('%s failed, retcode %d' % (pre, xcp.returncode), |
793 | - hookenv.CRITICAL) |
794 | + LOG('%s failed, retcode %d' % (pre, xcp.returncode), hookenv.CRITICAL) |
795 | raise RuntimeError('%s failed, retcode %d' % (pre, xcp.returncode)) |
796 | else: |
797 | LOG('%s succeeded' % pre, hookenv.CRITICAL) |
798 | - LOG('============================================================', |
799 | - hookenv.CRITICAL) |
800 | + LOG('============================================================', hookenv.CRITICAL) |
801 | |
802 | LOG('Installing services', hookenv.INFO) |
803 | write_status('installing') |
804 | @@ -104,9 +100,9 @@ |
805 | apache.install() |
806 | squid.install() |
807 | try: |
808 | - subprocess.check_call(['useradd', '--gid', 'www-data', |
809 | - '--create-home', '--no-user-group', '--shell', |
810 | - '/bin/bash', 'www-sync']) |
811 | + subprocess.check_call( |
812 | + ['useradd', '--gid', 'www-data', '--create-home', '--no-user-group', '--shell', '/bin/bash', 'www-sync'] |
813 | + ) |
814 | except subprocess.CalledProcessError as e: |
815 | if e.returncode == 9: # User already exists. |
816 | pass |
817 | @@ -115,8 +111,7 @@ |
818 | |
819 | # Make sure a log file is available for the user to write to - this will be |
820 | # use by the cron, run as that user. |
821 | - subprocess.check_call(['install', '-o', 'www-sync', '-d', |
822 | - util.REPO_SYNC_DIR]) |
823 | + subprocess.check_call(['install', '-o', 'www-sync', '-d', util.REPO_SYNC_DIR]) |
824 | subprocess.check_call(['touch', util.REPO_SYNC_CRON_LOG]) |
825 | subprocess.check_call(['chown', 'www-sync', util.REPO_SYNC_CRON_LOG]) |
826 | |
827 | @@ -124,7 +119,7 @@ |
828 | |
829 | |
830 | def pause(): |
831 | - '''Temporarily route metadata requests to a peer (non-leader) |
832 | + """Temporarily route metadata requests to a peer (non-leader) |
833 | |
834 | Use this when the metadata does not exist so that users don't |
835 | find an empty mirror. This is achieved by using the next peer |
836 | @@ -135,7 +130,7 @@ |
837 | * takes a long time to stop/start, |
838 | * trashes the in-memory cache when stopped, and |
839 | * can continue serving requests if left running. |
840 | - ''' |
841 | + """ |
842 | |
843 | # Don't pause on leader, but warn developer of incorrect usage |
844 | # Change servicing of metadata by forwarding requests for |
845 | @@ -146,8 +141,7 @@ |
846 | util.set_failover() |
847 | render_configs() |
848 | else: |
849 | - LOG('Pause should not be called on the leader, report to support', |
850 | - hookenv.DEBUG) |
851 | + LOG('Pause should not be called on the leader, report to support', hookenv.DEBUG) |
852 | |
853 | |
854 | def unpause(): |
855 | @@ -188,28 +182,26 @@ |
856 | def is_running(): |
857 | '''Check if service is running''' |
858 | |
859 | - return apache.service_running() and \ |
860 | - squid.service_running() |
861 | + return apache.service_running() and squid.service_running() |
862 | |
863 | |
864 | def service_map(): |
865 | - '''Mapping of configuration files to services. |
866 | + """Mapping of configuration files to services. |
867 | |
868 | This is used by charmhelpers.core.host.restart_on_change to |
869 | restart services when configuration files are altered. |
870 | - ''' |
871 | + """ |
872 | |
873 | return { |
874 | '/etc/squid-deb-proxy/squid-deb-proxy.conf': ['squid-deb-proxy'], |
875 | '/etc/squid-deb-proxy/allowed-networks-src.acl': ['squid-deb-proxy'], |
876 | - ('/etc/squid-deb-proxy/mirror-dstdomain.acl.d/' |
877 | - '99-ubuntu-repository-cache'): ['squid-deb-proxy'], |
878 | + ('/etc/squid-deb-proxy/mirror-dstdomain.acl.d/' '99-ubuntu-repository-cache'): ['squid-deb-proxy'], |
879 | '/etc/apache2/sites-available/archive_ubuntu_com.conf': ['apache2'], |
880 | } |
881 | |
882 | |
883 | def reload_on_change(mapping, restart_on_failure=False): |
884 | - '''Reload services based on configuration files changing |
885 | + """Reload services based on configuration files changing |
886 | |
887 | (Based on charmhelpers.core.host.restart_on_change) |
888 | This function is used as a decorator, for example:: |
889 | @@ -222,7 +214,8 @@ |
890 | In this example, the cinder-api and cinder-volume services |
891 | would be reloaded if /etc/ceph/ceph.conf is changed by the |
892 | ceph_client_changed function. |
893 | - ''' |
894 | + """ |
895 | + |
896 | def wrap(function): |
897 | def wrapped_f(*args): |
898 | checksums = {} |
899 | @@ -235,11 +228,11 @@ |
900 | restarts += mapping[path] |
901 | services_list = list(OrderedDict.fromkeys(restarts)) |
902 | for service_name in services_list: |
903 | - LOG('Reloading %s service' % service_name, |
904 | - hookenv.DEBUG) |
905 | - host.service_reload(service_name, |
906 | - restart_on_failure=restart_on_failure) |
907 | + LOG('Reloading %s service' % service_name, hookenv.DEBUG) |
908 | + host.service_reload(service_name, restart_on_failure=restart_on_failure) |
909 | + |
910 | return wrapped_f |
911 | + |
912 | return wrap |
913 | |
914 | |
915 | @@ -255,12 +248,12 @@ |
916 | |
917 | # Log file rotation |
918 | logrotate_filename = '/etc/logrotate.d/apache2' |
919 | - logrotate_context = {'logrotate_rotate': config['logrotate_rotate'], |
920 | - 'logrotate_count': config['logrotate_count'], |
921 | - 'logrotate_dateext': config['logrotate_dateext'], |
922 | - } |
923 | - templating.render('logrotate/logrotate.conf.template', |
924 | - logrotate_filename, logrotate_context, perms=0o644) |
925 | + logrotate_context = { |
926 | + 'logrotate_rotate': config['logrotate_rotate'], |
927 | + 'logrotate_count': config['logrotate_count'], |
928 | + 'logrotate_dateext': config['logrotate_dateext'], |
929 | + } |
930 | + templating.render('logrotate/logrotate.conf.template', logrotate_filename, logrotate_context, perms=0o644) |
931 | |
932 | if not os.path.exists(CONFIG_DIR): |
933 | host.mkdir(CONFIG_DIR) |
934 | @@ -280,16 +273,11 @@ |
935 | cron_context['RsyncModule'] = config['rsync-module'] |
936 | cron_context['Minutes'] = unitdata.kv().get('rsync-minutes') |
937 | cron_context['UnitId'] = hookenv.local_unit() |
938 | - cron_context['UnitPath'] = \ |
939 | - '-'.join(('unit', hookenv.local_unit().replace('/', '-'))) |
940 | + cron_context['UnitPath'] = '-'.join(('unit', hookenv.local_unit().replace('/', '-'))) |
941 | cron_context['MirrorSeries'] = config['mirror-series'].strip() |
942 | cron_context['ConfigPath'] = urc_config_path |
943 | - templating.render('cron/ubuntu-repository-cache_rsync.cron', |
944 | - cron_filename, |
945 | - cron_context) |
946 | - templating.render('cron/ubuntu-repository-cache-config', |
947 | - urc_config_path, |
948 | - cron_context) |
949 | + templating.render('cron/ubuntu-repository-cache_rsync.cron', cron_filename, cron_context) |
950 | + templating.render('cron/ubuntu-repository-cache-config', urc_config_path, cron_context) |
951 | else: |
952 | try: |
953 | os.stat(cron_filename) |
954 | @@ -315,17 +303,14 @@ |
955 | mirror.update_checks(nrpe_config) |
956 | |
957 | # Metadata service check |
958 | - service_script = '/'.join((hookenv.charm_dir(), |
959 | - 'files', |
960 | - 'nrpe-external-master', |
961 | - 'check_metadata_service')) |
962 | + service_script = '/'.join((hookenv.charm_dir(), 'files', 'nrpe-external-master', 'check_metadata_service')) |
963 | var_dir = '/'.join(('/srv', hookenv.service_name(), 'var')) |
964 | state_filename = '/'.join((var_dir, hookenv.service_name())) |
965 | |
966 | nrpe_config.add_check( |
967 | shortname='metadata', |
968 | description='Ubuntu repository cache metadata service', |
969 | - check_cmd=' '. join((service_script, state_filename)), |
970 | + check_cmd=' '.join((service_script, state_filename)), |
971 | ) |
972 | |
973 | nrpe_config.write() |
974 | |
975 | === modified file 'lib/ubuntu_repository_cache/squid.py' |
976 | --- lib/ubuntu_repository_cache/squid.py 2020-05-07 05:26:37 +0000 |
977 | +++ lib/ubuntu_repository_cache/squid.py 2020-10-06 10:05:01 +0000 |
978 | @@ -13,9 +13,7 @@ |
979 | unitdata, |
980 | ) |
981 | |
982 | -from charmhelpers.core.host import ( |
983 | - lsb_release |
984 | -) |
985 | +from charmhelpers.core.host import lsb_release |
986 | |
987 | from . import util |
988 | |
989 | @@ -25,40 +23,35 @@ |
990 | |
991 | @util.run_as_user('proxy') |
992 | def init_caches(): |
993 | - '''Initialize the squid on-disk caches |
994 | + """Initialize the squid on-disk caches |
995 | |
996 | This must be called while the service is stopped and after the |
997 | configuration has been written. |
998 | - ''' |
999 | + """ |
1000 | |
1001 | if not unitdata.kv().get('squid-disk-caches'): |
1002 | - LOG('No on disk cache directories founds for squid.', |
1003 | - hookenv.ERROR) |
1004 | + LOG('No on disk cache directories founds for squid.', hookenv.ERROR) |
1005 | return |
1006 | - subprocess.check_call(['/usr/sbin/squid3', '-N', '-z', |
1007 | - '-f', '/etc/squid-deb-proxy/squid-deb-proxy.conf']) |
1008 | + subprocess.check_call(['/usr/sbin/squid3', '-N', '-z', '-f', '/etc/squid-deb-proxy/squid-deb-proxy.conf']) |
1009 | |
1010 | |
1011 | def size_caches(): |
1012 | - '''Determine the sizes for in-memory/on-disk squid caches |
1013 | + """Determine the sizes for in-memory/on-disk squid caches |
1014 | |
1015 | This must be run at installation time. Runtime tuning is unsupported. |
1016 | - ''' |
1017 | + """ |
1018 | |
1019 | avail_disk = 0 |
1020 | for cache in unitdata.kv().get('squid-disk-caches'): |
1021 | avail_disk += cache[1] |
1022 | - LOG('{}MB of disk cache available for squid.'.format(avail_disk), |
1023 | - hookenv.DEBUG) |
1024 | + LOG('{}MB of disk cache available for squid.'.format(avail_disk), hookenv.DEBUG) |
1025 | |
1026 | memory = util.system_total_mem() / 3 |
1027 | # Estimate of 100MB of overhead for squid |
1028 | memory = max(memory - 100, 0) |
1029 | |
1030 | if memory < 128: |
1031 | - LOG('Squid has too little memory ({}MB), ' |
1032 | - 'can not deploy service'.format(memory), |
1033 | - hookenv.ERROR) |
1034 | + LOG('Squid has too little memory ({}MB), ' 'can not deploy service'.format(memory), hookenv.ERROR) |
1035 | raise RuntimeError('Too little system memory for squid deployment') |
1036 | |
1037 | # Start by giving squid a maximum of 256MB for in-memory cache |
1038 | @@ -77,10 +70,11 @@ |
1039 | |
1040 | unitdata.kv().set('squid-cache-mem', squid_cache_mem) |
1041 | unitdata.kv().set('squid-cache-disk', squid_cache_disk) |
1042 | - LOG('Squid sizing complete. In-memory cache: {}MB ' |
1043 | - 'On-disk cache: {}MB'.format(unitdata.kv().get('squid-cache-mem'), |
1044 | - unitdata.kv().get('squid-cache-disk')), |
1045 | - hookenv.INFO) |
1046 | + LOG( |
1047 | + 'Squid sizing complete. In-memory cache: {}MB ' |
1048 | + 'On-disk cache: {}MB'.format(unitdata.kv().get('squid-cache-mem'), unitdata.kv().get('squid-cache-disk')), |
1049 | + hookenv.INFO, |
1050 | + ) |
1051 | |
1052 | |
1053 | def install(): |
1054 | @@ -153,10 +147,10 @@ |
1055 | |
1056 | |
1057 | def cache_dirs(): |
1058 | - '''Generate 'cache_dir' lines for the squid configuration file. |
1059 | + """Generate 'cache_dir' lines for the squid configuration file. |
1060 | |
1061 | Returns a string containing the cache_dir line(s) |
1062 | - ''' |
1063 | + """ |
1064 | cache_str = '' |
1065 | config = hookenv.config() |
1066 | max_size = unitdata.kv().get('squid-cache-disk') |
1067 | @@ -167,9 +161,7 @@ |
1068 | size = int(min(max_size, free)) |
1069 | |
1070 | max_size -= size |
1071 | - cache_str = ''.join((cache_str, |
1072 | - 'cache_dir aufs {} {} 16 256\n'.format(path, |
1073 | - size))) |
1074 | + cache_str = ''.join((cache_str, 'cache_dir aufs {} {} 16 256\n'.format(path, size))) |
1075 | return cache_str |
1076 | |
1077 | |
1078 | @@ -185,7 +177,9 @@ |
1079 | context['Cache_Peers'] = peer_config() |
1080 | context['Cache_Hostname'] = hookenv.unit_private_ip() |
1081 | if config['squid_snmp']: |
1082 | - context['Snmp_Config'] = """acl snmppublic snmp_community public |
1083 | + context[ |
1084 | + 'Snmp_Config' |
1085 | + ] = """acl snmppublic snmp_community public |
1086 | snmp_port 3401 |
1087 | snmp_access allow snmppublic localhost |
1088 | snmp_access deny all |
1089 | @@ -193,15 +187,13 @@ |
1090 | else: |
1091 | context['Snmp_Config'] = "" |
1092 | |
1093 | - templating.render('{}/{}.conf'.format(SERVICE, SERVICE), |
1094 | - '/etc/{}/{}.conf'.format(SERVICE, SERVICE), |
1095 | - context) |
1096 | + templating.render('{}/{}.conf'.format(SERVICE, SERVICE), '/etc/{}/{}.conf'.format(SERVICE, SERVICE), context) |
1097 | |
1098 | peer_context = {} |
1099 | peer_context['Cache_Sibling_IPs'] = '\n'.join(util.peer_IPs()) |
1100 | - templating.render('{}/allowed-networks-src.conf'.format(SERVICE), |
1101 | - '/etc/{}/allowed-networks-src.acl'.format(SERVICE), |
1102 | - peer_context) |
1103 | + templating.render( |
1104 | + '{}/allowed-networks-src.conf'.format(SERVICE), '/etc/{}/allowed-networks-src.acl'.format(SERVICE), peer_context |
1105 | + ) |
1106 | |
1107 | dstdomain_context = {} |
1108 | # if 99-ubuntu-repository-cache contains a repository already allowed in |
1109 | @@ -220,7 +212,7 @@ |
1110 | '^extras\\.ubuntu\\.com$|' |
1111 | '\\.extras\\.ubuntu\\.com$|' |
1112 | '^changelogs\\.ubuntu\\.com$' |
1113 | - ) |
1114 | + ) |
1115 | if default_dstdomains.match(config['sync-host']): |
1116 | dstdomain_context['Sync_Host'] = '' |
1117 | else: |
1118 | @@ -228,9 +220,9 @@ |
1119 | |
1120 | templating.render( |
1121 | '{}/mirror-dstdomain.acl'.format(SERVICE), |
1122 | - '/etc/{}/mirror-dstdomain.acl.d/99-ubuntu-repository-cache'.format( |
1123 | - SERVICE), |
1124 | - dstdomain_context) |
1125 | + '/etc/{}/mirror-dstdomain.acl.d/99-ubuntu-repository-cache'.format(SERVICE), |
1126 | + dstdomain_context, |
1127 | + ) |
1128 | |
1129 | |
1130 | def update_checks(nrpe_config): |
1131 | @@ -240,6 +232,7 @@ |
1132 | nrpe_config.add_check( |
1133 | shortname='squid', |
1134 | description='squid serving archive pool', |
1135 | - check_cmd=('check_http --hostname localhost --url "/{}/pool/" ' |
1136 | - '--string "main/" --expect \"200\\ OK\"').format(path_base), |
1137 | + check_cmd=( |
1138 | + 'check_http --hostname localhost --url "/{}/pool/" ' '--string "main/" --expect \"200\\ OK\"' |
1139 | + ).format(path_base), |
1140 | ) |
1141 | |
1142 | === modified file 'lib/ubuntu_repository_cache/storage.py' |
1143 | --- lib/ubuntu_repository_cache/storage.py 2020-07-13 00:08:07 +0000 |
1144 | +++ lib/ubuntu_repository_cache/storage.py 2020-10-06 10:05:01 +0000 |
1145 | @@ -50,17 +50,14 @@ |
1146 | |
1147 | # Apache serves from www/ and metadata is in data/ |
1148 | _mkdir(apache_root, owner='www-data', group='www-data', perms=0o775) |
1149 | - _mkdir('/'.join((apache_root, 'www')), |
1150 | - owner='www-data', group='www-data', perms=0o775) |
1151 | - _mkdir('/'.join((apache_root, 'data')), |
1152 | - owner='www-data', group='www-data', perms=0o775) |
1153 | + _mkdir('/'.join((apache_root, 'www')), owner='www-data', group='www-data', perms=0o775) |
1154 | + _mkdir('/'.join((apache_root, 'data')), owner='www-data', group='www-data', perms=0o775) |
1155 | # XXX support multi-directory path-base |
1156 | - host.symlink('/'.join((apache_root, 'data', 'ubuntu_active')), |
1157 | - '/'.join((apache_root, 'www', config['path-base']))) |
1158 | + host.symlink('/'.join((apache_root, 'data', 'ubuntu_active')), '/'.join((apache_root, 'www', config['path-base']))) |
1159 | |
1160 | |
1161 | def _setup_packages_path(primary_mount, secondary_mounts): |
1162 | - '''Create the squid cache directorys for package caches. |
1163 | + """Create the squid cache directorys for package caches. |
1164 | |
1165 | The config 'squid-disk-caches' is populated with the path to each |
1166 | cache directory and the maximum space available, in MB, for that mount. |
1167 | @@ -71,7 +68,7 @@ |
1168 | be sized based on system memory, so some available disk space may |
1169 | not be used. In that case, having the mount used for metadata |
1170 | mirrors last would let any additional space be used for that. |
1171 | - ''' |
1172 | + """ |
1173 | |
1174 | squid_disk_caches = [] |
1175 | for mnt in secondary_mounts: |
1176 | @@ -90,9 +87,7 @@ |
1177 | # See https://pastebin.ubuntu.com/p/ftCFNMx947/ |
1178 | reserve = (20 + 5 + 4) * 1024 |
1179 | if max_size < reserve: |
1180 | - LOG('Less than {}MB disk free @ {} for mirror.'.format(reserve, |
1181 | - primary_mount), |
1182 | - hookenv.ERROR) |
1183 | + LOG('Less than {}MB disk free @ {} for mirror.'.format(reserve, primary_mount), hookenv.ERROR) |
1184 | else: |
1185 | max_size = (max_size - reserve) * 0.75 |
1186 | squid_disk_caches.append([squid_cache, max_size]) |
1187 | @@ -101,7 +96,7 @@ |
1188 | |
1189 | |
1190 | def _setup_ephemeral(devices=None): |
1191 | - '''Formats and mounts ephemeral devices |
1192 | + """Formats and mounts ephemeral devices |
1193 | |
1194 | Each ephemeral device is formated with ext4 and provided a label |
1195 | of the format '<service_name>_<#>' |
1196 | @@ -111,7 +106,7 @@ |
1197 | |
1198 | An fstab entry is added for each device to ensure the device is |
1199 | mounted on reboot. |
1200 | - ''' |
1201 | + """ |
1202 | |
1203 | ephemeral_mounts = [] |
1204 | if not devices: |
1205 | @@ -135,8 +130,7 @@ |
1206 | partitions = glob.glob(dev + '*').remove(dev) or [] |
1207 | for part in partitions: |
1208 | if Storage.is_device_mounted(part): |
1209 | - LOG('Ephemeral device had a mounted partition %s skipping' % |
1210 | - part, hookenv.ERROR) |
1211 | + LOG('Ephemeral device had a mounted partition %s skipping' % part, hookenv.ERROR) |
1212 | continue |
1213 | |
1214 | if Storage.is_device_mounted(dev): |
1215 | @@ -166,27 +160,25 @@ |
1216 | # Add to fstab by label, not device name |
1217 | _mkdir(mountpoint, perms=0o775) |
1218 | devname = '='.join(('LABEL', label)) |
1219 | - ftab.add_entry(fstab.Fstab.Entry(devname, mountpoint, 'ext4', |
1220 | - options=None)) |
1221 | + ftab.add_entry(fstab.Fstab.Entry(devname, mountpoint, 'ext4', options=None)) |
1222 | ftab.close() |
1223 | |
1224 | subprocess.check_call(['mount', mountpoint]) |
1225 | os.chmod(mountpoint, 0o775) |
1226 | ephemeral_mounts.append(mountpoint) |
1227 | devno += 1 |
1228 | - LOG('Ephemeral storage mounted @ {}'.format(ephemeral_mounts), |
1229 | - hookenv.DEBUG) |
1230 | + LOG('Ephemeral storage mounted @ {}'.format(ephemeral_mounts), hookenv.DEBUG) |
1231 | unitdata.kv().set('ephemeral-mounts', ephemeral_mounts) |
1232 | |
1233 | |
1234 | def _mkdir(path, owner='root', group='root', perms=0o555): |
1235 | - '''Make directories recursively with a umask=0o000. |
1236 | + """Make directories recursively with a umask=0o000. |
1237 | |
1238 | This storage module makes calls to mkdir repeatedly with permissions |
1239 | that include group write permission. This would be masked by the |
1240 | default 0o022 umask. This simple helper function will clear |
1241 | the umask prior to calling host.mkdir and then restore it when done. |
1242 | - ''' |
1243 | + """ |
1244 | |
1245 | prev_umask = os.umask(0o000) |
1246 | host.mkdir(path, owner=owner, group=group, perms=perms) |
1247 | @@ -210,9 +202,7 @@ |
1248 | return |
1249 | |
1250 | try: |
1251 | - LOG('Adding check for ephemeral storage {}'.format( |
1252 | - unitdata.kv().get('ephemeral-mounts')), |
1253 | - hookenv.DEBUG) |
1254 | + LOG('Adding check for ephemeral storage {}'.format(unitdata.kv().get('ephemeral-mounts')), hookenv.DEBUG) |
1255 | except KeyError: |
1256 | LOG('No checks added for ephemeral storage', hookenv.DEBUG) |
1257 | return |
1258 | @@ -220,6 +210,5 @@ |
1259 | nrpe_config.add_check( |
1260 | shortname='disks-ephemeral', |
1261 | description='disk space on ephemeral volumes', |
1262 | - check_cmd='check_disk -w 15 -c 5 -r ' |
1263 | - '^/srv/ubuntu-repository-cache/ephemeral/[0-9].*$', |
1264 | - ) |
1265 | + check_cmd='check_disk -w 15 -c 5 -r ' '^/srv/ubuntu-repository-cache/ephemeral/[0-9].*$', |
1266 | + ) |
1267 | |
1268 | === modified file 'lib/ubuntu_repository_cache/tests/test_metadata_sync.py' |
1269 | --- lib/ubuntu_repository_cache/tests/test_metadata_sync.py 2020-08-11 04:12:28 +0000 |
1270 | +++ lib/ubuntu_repository_cache/tests/test_metadata_sync.py 2020-10-06 10:05:01 +0000 |
1271 | @@ -21,20 +21,22 @@ |
1272 | a41cb792e56b512948b9dd35d3c974497d84f1ea 47334442 something-else |
1273 | """ |
1274 | |
1275 | -RELEASE_FILE_WITH_SHA256 = RELEASE_FILE_CONTENT + """ |
1276 | +RELEASE_FILE_WITH_SHA256 = ( |
1277 | + RELEASE_FILE_CONTENT |
1278 | + + """ |
1279 | SHA256: |
1280 | 36193e81e070279baaf8adce44305b02c3fd029f6c0f1497d9bd044ab2a633aa 435597291 foo |
1281 | e653b6621b1d77144d86421927ae7f9466f9a6b7012fa4b9a51cdf68b333a23a 47334442 bar |
1282 | """ |
1283 | +) |
1284 | |
1285 | |
1286 | EXPECTED_FILES_DICT = OrderedDict( |
1287 | - [('foo', {'SHA256': '36193e81e070279baaf8adce44305b02c3fd029f6c0f1497d9bd' |
1288 | - '044ab2a633aa', |
1289 | - 'size': '435597291'}), |
1290 | - ('bar', {'SHA256': 'e653b6621b1d77144d86421927ae7f9466f9a6b7012fa4b9a51c' |
1291 | - 'df68b333a23a', |
1292 | - 'size': '47334442'})]) |
1293 | + [ |
1294 | + ('foo', {'SHA256': '36193e81e070279baaf8adce44305b02c3fd029f6c0f1497d9bd' '044ab2a633aa', 'size': '435597291'}), |
1295 | + ('bar', {'SHA256': 'e653b6621b1d77144d86421927ae7f9466f9a6b7012fa4b9a51c' 'df68b333a23a', 'size': '47334442'}), |
1296 | + ] |
1297 | +) |
1298 | |
1299 | |
1300 | FakeException = namedtuple('FakeException', 'returncode') |
1301 | @@ -55,30 +57,24 @@ |
1302 | |
1303 | |
1304 | class MirrorTestCase(unittest.TestCase): |
1305 | - |
1306 | def test_rsync_sends_rsync_command(self): |
1307 | """ |
1308 | The rsync function executes a system rsync shell with the passed in |
1309 | parameters. |
1310 | """ |
1311 | + |
1312 | def fake_check_output(cmd, stderr): |
1313 | self.assertEqual(stderr, subprocess.STDOUT) |
1314 | - self.assertEqual( |
1315 | - ["/usr/bin/rsync", "-r", "--foo", "--bar", "source", "dest"], |
1316 | - cmd) |
1317 | + self.assertEqual(["/usr/bin/rsync", "-r", "--foo", "--bar", "source", "dest"], cmd) |
1318 | return b"called " # Note: real check_output returns bytes. |
1319 | |
1320 | log = InvocationRecorder() |
1321 | |
1322 | options = ("--foo", "--bar") |
1323 | - result = metadata_sync.rsync( |
1324 | - "source", "dest", options, log, |
1325 | - check_output=fake_check_output) |
1326 | + result = metadata_sync.rsync("source", "dest", options, log, check_output=fake_check_output) |
1327 | |
1328 | self.assertEqual("called", result) # "rsync" decodes and strips |
1329 | - self.assertEqual( |
1330 | - [[("/usr/bin/rsync -r --foo --bar source dest",), {}]], |
1331 | - log.invocations) |
1332 | + self.assertEqual([[("/usr/bin/rsync -r --foo --bar source dest",), {}]], log.invocations) |
1333 | |
1334 | def test_parse_release_file_no_sha256(self): |
1335 | """ |
1336 | @@ -93,9 +89,11 @@ |
1337 | temp.write(RELEASE_FILE_CONTENT) |
1338 | temp.flush() |
1339 | result = metadata_sync._parse_release_file(temp_dir) |
1340 | - expected = {'files': OrderedDict(), |
1341 | - 'name': os.path.basename(temp_dir), |
1342 | - 'mtime': os.stat(temp_dir + "/Release").st_mtime} |
1343 | + expected = { |
1344 | + 'files': OrderedDict(), |
1345 | + 'name': os.path.basename(temp_dir), |
1346 | + 'mtime': os.stat(temp_dir + "/Release").st_mtime, |
1347 | + } |
1348 | self.assertEqual(expected, result) |
1349 | finally: |
1350 | shutil.rmtree(temp_dir) |
1351 | @@ -113,9 +111,11 @@ |
1352 | temp.write(RELEASE_FILE_WITH_SHA256) |
1353 | temp.flush() |
1354 | result = metadata_sync._parse_release_file(temp_dir) |
1355 | - expected = {'name': os.path.basename(temp_dir.strip("/")), |
1356 | - 'mtime': os.stat(temp_dir + "/Release").st_mtime, |
1357 | - 'files': EXPECTED_FILES_DICT} |
1358 | + expected = { |
1359 | + 'name': os.path.basename(temp_dir.strip("/")), |
1360 | + 'mtime': os.stat(temp_dir + "/Release").st_mtime, |
1361 | + 'files': EXPECTED_FILES_DICT, |
1362 | + } |
1363 | self.assertEqual(expected, result) |
1364 | finally: |
1365 | shutil.rmtree(temp_dir) |
1366 | @@ -128,8 +128,7 @@ |
1367 | |
1368 | def test_rsync_should_retry_do_retry(self): |
1369 | """Retry for whitelisted exit codes.""" |
1370 | - retriables = frozenset(( |
1371 | - 5, 6, 10, 11, 12, 13, 14, 20, 21, 22, 23, 24, 25, 30, 35)) |
1372 | + retriables = frozenset((5, 6, 10, 11, 12, 13, 14, 20, 21, 22, 23, 24, 25, 30, 35)) |
1373 | |
1374 | for i in retriables: |
1375 | exe = FakeException(returncode=i) |
1376 | @@ -144,16 +143,18 @@ |
1377 | """ |
1378 | The main function fails should an environment variable be missing. |
1379 | """ |
1380 | + |
1381 | def noop(*args, **kwargs): |
1382 | return "Called" |
1383 | |
1384 | - env = {"SYNC_HOST": "foo.com", |
1385 | - "MIRROR_SERIES": "xenial", |
1386 | - "APACHE_ROOT": "/var/www/", |
1387 | - "LOCAL_UNIT": "unit/0", |
1388 | - "UNIT_PATH": "something"} |
1389 | - result = metadata_sync.main( |
1390 | - env, InvocationRecorder(), mirror_archive=noop, check_call=noop) |
1391 | + env = { |
1392 | + "SYNC_HOST": "foo.com", |
1393 | + "MIRROR_SERIES": "xenial", |
1394 | + "APACHE_ROOT": "/var/www/", |
1395 | + "LOCAL_UNIT": "unit/0", |
1396 | + "UNIT_PATH": "something", |
1397 | + } |
1398 | + result = metadata_sync.main(env, InvocationRecorder(), mirror_archive=noop, check_call=noop) |
1399 | self.assertEqual("Called", result) |
1400 | |
1401 | def test_main_calls_juju_run(self): |
1402 | @@ -161,11 +162,13 @@ |
1403 | The main() function calls juju run once it determined the meta_ver from |
1404 | syncing with upstream. |
1405 | """ |
1406 | - env = {"SYNC_HOST": "foo.com", |
1407 | - "MIRROR_SERIES": "xenial", |
1408 | - "APACHE_ROOT": "/var/www/", |
1409 | - "LOCAL_UNIT": "unit/0", |
1410 | - "UNIT_PATH": "something"} |
1411 | + env = { |
1412 | + "SYNC_HOST": "foo.com", |
1413 | + "MIRROR_SERIES": "xenial", |
1414 | + "APACHE_ROOT": "/var/www/", |
1415 | + "LOCAL_UNIT": "unit/0", |
1416 | + "UNIT_PATH": "something", |
1417 | + } |
1418 | |
1419 | def fake_mirror_archive(*args, **kwargs): |
1420 | return "some_meta_var" |
1421 | @@ -173,17 +176,25 @@ |
1422 | fake_check_call = InvocationRecorder() |
1423 | |
1424 | result = metadata_sync.main( |
1425 | - env, InvocationRecorder(), mirror_archive=fake_mirror_archive, |
1426 | - check_call=fake_check_call) |
1427 | + env, InvocationRecorder(), mirror_archive=fake_mirror_archive, check_call=fake_check_call |
1428 | + ) |
1429 | |
1430 | self.assertEqual("some_meta_var", result) |
1431 | |
1432 | expected = [ |
1433 | - [(['juju-run', |
1434 | - 'unit/0', |
1435 | - '/usr/local/sbin/charm-env ' |
1436 | - '/var/lib/juju/agents/something/charm/hooks/' |
1437 | - 'ubuntu-repository-cache-sync some_meta_var'],), {}]] |
1438 | + [ |
1439 | + ( |
1440 | + [ |
1441 | + 'juju-run', |
1442 | + 'unit/0', |
1443 | + '/usr/local/sbin/charm-env ' |
1444 | + '/var/lib/juju/agents/something/charm/hooks/' |
1445 | + 'ubuntu-repository-cache-sync some_meta_var', |
1446 | + ], |
1447 | + ), |
1448 | + {}, |
1449 | + ] |
1450 | + ] |
1451 | |
1452 | self.assertEqual(expected, fake_check_call.invocations) |
1453 | |
1454 | @@ -201,9 +212,9 @@ |
1455 | "LOCAL_UNIT": "unit/0", |
1456 | "UNIT_PATH": "something", |
1457 | } |
1458 | - key_combinations = itertools.chain(*[ |
1459 | - itertools.combinations(full_dict.keys(), n) |
1460 | - for n in range(len(full_dict.keys()))]) |
1461 | + key_combinations = itertools.chain( |
1462 | + *[itertools.combinations(full_dict.keys(), n) for n in range(len(full_dict.keys()))] |
1463 | + ) |
1464 | |
1465 | def do_test(env): |
1466 | assert_raises(KeyError, metadata_sync.main, env, InvocationRecorder()) |
1467 | |
1468 | === modified file 'lib/ubuntu_repository_cache/tests/test_util.py' |
1469 | --- lib/ubuntu_repository_cache/tests/test_util.py 2020-08-11 04:12:28 +0000 |
1470 | +++ lib/ubuntu_repository_cache/tests/test_util.py 2020-10-06 10:05:01 +0000 |
1471 | @@ -7,7 +7,6 @@ |
1472 | |
1473 | |
1474 | class SemaphoreTest(unittest.TestCase): |
1475 | - |
1476 | def setUp(self): |
1477 | self.test_dir = tempfile.mkdtemp() |
1478 | |
1479 | @@ -35,7 +34,6 @@ |
1480 | |
1481 | |
1482 | class TouchTest(unittest.TestCase): |
1483 | - |
1484 | def setUp(self): |
1485 | self.test_dir = tempfile.mkdtemp() |
1486 | |
1487 | |
1488 | === modified file 'lib/ubuntu_repository_cache/util.py' |
1489 | --- lib/ubuntu_repository_cache/util.py 2020-05-07 05:26:37 +0000 |
1490 | +++ lib/ubuntu_repository_cache/util.py 2020-10-06 10:05:01 +0000 |
1491 | @@ -29,19 +29,18 @@ |
1492 | '''An error raised when the desired semaphore name already exists.''' |
1493 | |
1494 | |
1495 | -class FileSemaphore(): |
1496 | - '''A context manager that creates and deletes semaphores at the given path. |
1497 | +class FileSemaphore: |
1498 | + """A context manager that creates and deletes semaphores at the given path. |
1499 | |
1500 | It will raise SemaphoreExistsError if the "lock" can't be acquired, and |
1501 | - will cleanup after itself on exit.''' |
1502 | + will cleanup after itself on exit.""" |
1503 | |
1504 | def __init__(self, filename): |
1505 | self.filename = filename |
1506 | |
1507 | def __enter__(self): |
1508 | if os.path.exists(self.filename): |
1509 | - raise SemaphoreExistsError( |
1510 | - "File '{}' already exists.".format(self.filename)) |
1511 | + raise SemaphoreExistsError("File '{}' already exists.".format(self.filename)) |
1512 | touch(self.filename) |
1513 | |
1514 | def __exit__(self, *args): |
1515 | @@ -49,9 +48,9 @@ |
1516 | |
1517 | |
1518 | def touch(filename): |
1519 | - '''Make an empty file at the passed in filename. |
1520 | + """Make an empty file at the passed in filename. |
1521 | |
1522 | - Its modified access time is set to "now".''' |
1523 | + Its modified access time is set to "now".""" |
1524 | with open(filename, 'a'): |
1525 | os.utime(filename, None) |
1526 | |
1527 | @@ -75,11 +74,11 @@ |
1528 | |
1529 | |
1530 | def get_failover_host(): |
1531 | - '''Find the private address of the failover host. |
1532 | + """Find the private address of the failover host. |
1533 | |
1534 | The next highest numbered unit in the cluster relationship is |
1535 | choosen as the host for failover. Its private address is returned. |
1536 | - ''' |
1537 | + """ |
1538 | |
1539 | if not hookenv.is_relation_made('cluster'): |
1540 | return None |
1541 | @@ -98,15 +97,12 @@ |
1542 | |
1543 | # Filter the list to find all units with a higher unit id, will |
1544 | # wrap to lowest unit number |
1545 | - cluster_ids = [x for x in cluster_ids if x > local_id] or \ |
1546 | - [x for x in cluster_ids if x < local_id] |
1547 | + cluster_ids = [x for x in cluster_ids if x > local_id] or [x for x in cluster_ids if x < local_id] |
1548 | if not cluster_ids: |
1549 | return None |
1550 | |
1551 | failover_unit = '%s/%s' % (hookenv.service_name(), cluster_ids[0]) |
1552 | - return hookenv.relation_get(attribute='private-address', |
1553 | - unit=failover_unit, |
1554 | - rid=rel_id) |
1555 | + return hookenv.relation_get(attribute='private-address', unit=failover_unit, rid=rel_id) |
1556 | |
1557 | |
1558 | def restore_saved_id(): |
1559 | @@ -118,14 +114,15 @@ |
1560 | |
1561 | # Submit MP to include this in charmhelpers.core.host |
1562 | def run_as_user(user_name): |
1563 | - '''A decorator to run a function as a particular user. |
1564 | + """A decorator to run a function as a particular user. |
1565 | |
1566 | This works as long as the saved uid (see os.getresuid()[2]) |
1567 | has permission to change the effective to that of the |
1568 | desired uid/gid of the provided user name. |
1569 | |
1570 | The $HOME environment variable is also set to the particular user. |
1571 | - ''' |
1572 | + """ |
1573 | + |
1574 | def decorator(function): |
1575 | @functools.wraps(function) |
1576 | def wrapper(*args, **kwargs): |
1577 | @@ -155,7 +152,9 @@ |
1578 | del os.environ['HOME'] |
1579 | |
1580 | return result |
1581 | + |
1582 | return wrapper |
1583 | + |
1584 | return decorator |
1585 | |
1586 | |
1587 | @@ -167,12 +166,12 @@ |
1588 | |
1589 | |
1590 | def change_aptsources_url(url='http://archive.ubuntu.com/ubuntu'): |
1591 | - '''Change the URL for the current apt sources |
1592 | + """Change the URL for the current apt sources |
1593 | |
1594 | This affects the URL for all sources in /etc/apt/sources.list |
1595 | and /etc/apt/sources.list.d/ so care must be taken if the charm |
1596 | has added any custom source URLs as it will alter them as well. |
1597 | - ''' |
1598 | + """ |
1599 | import aptsources.sourceslist |
1600 | import aptsources.distro |
1601 | |
1602 | @@ -192,16 +191,15 @@ |
1603 | ips = [] |
1604 | for ident in hookenv.relation_ids(reltype=reltype): |
1605 | for unit in hookenv.related_units(relid=ident): |
1606 | - ips.append(hookenv.relation_get(attribute='private-address', |
1607 | - unit=unit, rid=ident)) |
1608 | + ips.append(hookenv.relation_get(attribute='private-address', unit=unit, rid=ident)) |
1609 | return ips |
1610 | |
1611 | |
1612 | def get_active_metaver(): |
1613 | - '''Get the metadata version of the active metadata |
1614 | + """Get the metadata version of the active metadata |
1615 | |
1616 | Returns version or None |
1617 | - ''' |
1618 | + """ |
1619 | |
1620 | apache_data = '/'.join((unitdata.kv().get('apache-root'), 'data')) |
1621 | path = os.path.realpath('/'.join((apache_data, 'ubuntu_active'))) |
1622 | @@ -212,8 +210,8 @@ |
1623 | |
1624 | |
1625 | def update_active_symlink(meta_version, apache_root=None): |
1626 | - '''Update the symlink to the active metadata to point to the |
1627 | - metadata for meta_version.''' |
1628 | + """Update the symlink to the active metadata to point to the |
1629 | + metadata for meta_version.""" |
1630 | |
1631 | if apache_root: |
1632 | root = apache_root |
1633 | @@ -231,17 +229,11 @@ |
1634 | |
1635 | |
1636 | def remote_path_exists(user, host_ip, path, localuser='www-sync'): |
1637 | - '''Use ssh to determine if a remote path exists. |
1638 | - |
1639 | - Remotely execute the stat(1) command on the remote host.''' |
1640 | - |
1641 | - cmd = ('sudo', |
1642 | - '-u', |
1643 | - localuser, |
1644 | - 'ssh', |
1645 | - '{}@{}'.format(user, host_ip), |
1646 | - 'stat {}'.format(path) |
1647 | - ) |
1648 | + """Use ssh to determine if a remote path exists. |
1649 | + |
1650 | + Remotely execute the stat(1) command on the remote host.""" |
1651 | + |
1652 | + cmd = ('sudo', '-u', localuser, 'ssh', '{}@{}'.format(user, host_ip), 'stat {}'.format(path)) |
1653 | try: |
1654 | subprocess.check_call(cmd) |
1655 | except subprocess.CalledProcessError as excp: |
1656 | |
1657 | === modified file 'reactive/ubuntu-repository-cache.py' |
1658 | --- reactive/ubuntu-repository-cache.py 2020-08-11 05:04:02 +0000 |
1659 | +++ reactive/ubuntu-repository-cache.py 2020-10-06 10:05:01 +0000 |
1660 | @@ -82,25 +82,22 @@ |
1661 | try: |
1662 | with util.FileSemaphore(util.SEMAPHORE_FILE): |
1663 | meta_ver = metadata_sync.mirror_archive( |
1664 | - sync_host, |
1665 | - unitdata.kv().get('apache-root'), |
1666 | - hookenv.local_unit(), |
1667 | - LOG, |
1668 | - mirror_series) |
1669 | + sync_host, unitdata.kv().get('apache-root'), hookenv.local_unit(), LOG, mirror_series |
1670 | + ) |
1671 | except util.SemaphoreExistsError: |
1672 | - LOG('Another metadata sync seems to be running (semaphore found ' |
1673 | - 'at {}). Bailing out.'.format(util.SEMAPHORE_FILE)) |
1674 | + LOG( |
1675 | + 'Another metadata sync seems to be running (semaphore found ' |
1676 | + 'at {}). Bailing out.'.format(util.SEMAPHORE_FILE) |
1677 | + ) |
1678 | raise |
1679 | |
1680 | if meta_ver: |
1681 | - LOG('Initial sync from {} completed ver={}.'.format(sync_host, |
1682 | - meta_ver)) |
1683 | + LOG('Initial sync from {} completed ver={}.'.format(sync_host, meta_ver)) |
1684 | service.start() |
1685 | else: |
1686 | LOG('Leader could not gather new metadata', hookenv.WARNING) |
1687 | else: |
1688 | - LOG('Skipping initial content sync, "sync-on-start" config ' |
1689 | - 'option was not set') |
1690 | + LOG('Skipping initial content sync, "sync-on-start" config ' 'option was not set') |
1691 | |
1692 | |
1693 | @reactive.hook('stop') |
1694 | @@ -116,8 +113,7 @@ |
1695 | '''Provide hostname/port for the website relationship''' |
1696 | |
1697 | LOG('Website relation joined for %s' % SERVICE) |
1698 | - settings = {'hostname': hookenv.unit_private_ip(), |
1699 | - 'port': 80} |
1700 | + settings = {'hostname': hookenv.unit_private_ip(), 'port': 80} |
1701 | hookenv.relation_set(relation_settings=settings) |
1702 | |
1703 | |
1704 | @@ -126,8 +122,7 @@ |
1705 | '''Join a peer cluster relationship''' |
1706 | |
1707 | LOG('Cluster relation joined for %s' % SERVICE) |
1708 | - unison.ssh_authorized_peers(peer_interface='cluster', |
1709 | - user='www-sync', group='www-data') |
1710 | + unison.ssh_authorized_peers(peer_interface='cluster', user='www-sync', group='www-data') |
1711 | # LP:1770071: Work around charm-helper's unison ownership |
1712 | # https://github.com/juju/charm-helpers/issues/487 |
1713 | _fix_ssh_ownership(user='www-sync') |
1714 | @@ -140,8 +135,7 @@ |
1715 | '''Handle peer cluster relationship changes''' |
1716 | |
1717 | LOG('Cluster relation changed for %s' % SERVICE) |
1718 | - unison.ssh_authorized_peers(peer_interface='cluster', |
1719 | - user='www-sync', group='www-data') |
1720 | + unison.ssh_authorized_peers(peer_interface='cluster', user='www-sync', group='www-data') |
1721 | # LP:1770071: Work around charm-helper's unison ownership |
1722 | # https://github.com/juju/charm-helpers/issues/487 |
1723 | _fix_ssh_ownership(user='www-sync') |
1724 | @@ -154,8 +148,7 @@ |
1725 | '''Handle peer cluster relationship departures''' |
1726 | |
1727 | LOG('Cluster relation departed for %s' % SERVICE) |
1728 | - unison.ssh_authorized_peers(peer_interface='cluster', |
1729 | - user='www-sync', group='www-data') |
1730 | + unison.ssh_authorized_peers(peer_interface='cluster', user='www-sync', group='www-data') |
1731 | # LP:1770071: Work around charm-helper's unison ownership |
1732 | # https://github.com/juju/charm-helpers/issues/487 |
1733 | _fix_ssh_ownership(user='www-sync') |
1734 | @@ -200,8 +193,7 @@ |
1735 | if not hookenv.is_leader(): |
1736 | LOG('leader-elected fired. This is not the leader') |
1737 | return |
1738 | - LOG('leader-elected fired. This unit is the new leader: {}'.format( |
1739 | - hookenv.local_unit())) |
1740 | + LOG('leader-elected fired. This unit is the new leader: {}'.format(hookenv.local_unit())) |
1741 | hookenv.leader_set(leader_id=hookenv.local_unit()) |
1742 | mirror.peer_update_metadata() |
1743 | service.render_configs() |
1744 | |
1745 | === modified file 'tests/util.py' |
1746 | --- tests/util.py 2020-05-07 05:29:31 +0000 |
1747 | +++ tests/util.py 2020-10-06 10:05:01 +0000 |
1748 | @@ -33,19 +33,19 @@ |
1749 | |
1750 | |
1751 | def unit_name(unit): |
1752 | - ''' |
1753 | + """ |
1754 | Returns the juju unit name for the given amulet unit object. |
1755 | - ''' |
1756 | + """ |
1757 | return unit.info['unit_name'] |
1758 | |
1759 | |
1760 | def send_script(unit, path, script): |
1761 | - ''' |
1762 | + """ |
1763 | Write out the provided script contents to the specified path and unit. |
1764 | |
1765 | Writes one line out at a time using ssh since there is no other convenient |
1766 | amulet method to do so. Works fine for short scripts. |
1767 | - ''' |
1768 | + """ |
1769 | unit.ssh('sudo rm -f {}'.format(path)) |
1770 | for line in script.splitlines(): |
1771 | command = 'sudo echo \'{}\' >> {}'.format(line, path) |
1772 | @@ -54,20 +54,22 @@ |
1773 | |
1774 | |
1775 | def execute(unit, command): |
1776 | - ''' |
1777 | + """ |
1778 | Executes a command on the given unit via a wrapper script. |
1779 | |
1780 | The amulet ssh method provides excessively verbose output on errors. |
1781 | This wrapper gives the test more control over the output and result. |
1782 | - ''' |
1783 | + """ |
1784 | log('INFO: Executing: {}'.format(command)) |
1785 | unit.ssh('sudo rm -f /tmp/command-runner.result') |
1786 | unit.ssh('sudo rm -f /tmp/command-runner.log') |
1787 | - runner_template = '\n'.join([ |
1788 | - '#!/bin/bash', |
1789 | - '{} > /tmp/command-runner.log 2>&1', |
1790 | - 'echo $? > /tmp/command-runner.result', |
1791 | - ]) |
1792 | + runner_template = '\n'.join( |
1793 | + [ |
1794 | + '#!/bin/bash', |
1795 | + '{} > /tmp/command-runner.log 2>&1', |
1796 | + 'echo $? > /tmp/command-runner.result', |
1797 | + ] |
1798 | + ) |
1799 | runner_script_path = '/tmp/command-runner.sh' |
1800 | runner_script = runner_template.format(command) |
1801 | send_script(unit, runner_script_path, runner_script) |
1802 | @@ -77,43 +79,42 @@ |
1803 | try: |
1804 | return (int(result), output) |
1805 | except ValueError: |
1806 | - msg = ('Command execution failed on {}:\n' |
1807 | - '\texit code: {}\n\toutput: {}'.format( |
1808 | - unit_name(unit), result, output)) |
1809 | + msg = 'Command execution failed on {}:\n' '\texit code: {}\n\toutput: {}'.format( |
1810 | + unit_name(unit), result, output |
1811 | + ) |
1812 | amulet.raise_status(amulet.FAIL, msg=msg) |
1813 | |
1814 | |
1815 | def disable_cron(unit): |
1816 | - ''' |
1817 | + """ |
1818 | Disables the cron service on the given unit. |
1819 | - ''' |
1820 | + """ |
1821 | command = 'sudo systemctl stop cron.service' |
1822 | (result, output) = execute(unit, command) |
1823 | if result: |
1824 | - msg = ('Failed to disable cron on {}:\n' |
1825 | - '\texit code: {}\noutput: {}'.format( |
1826 | - unit_name(unit), result, output)) |
1827 | + msg = 'Failed to disable cron on {}:\n' '\texit code: {}\noutput: {}'.format(unit_name(unit), result, output) |
1828 | amulet.raise_status(amulet.FAIL, msg=msg) |
1829 | |
1830 | |
1831 | def rotate_cron_sync_log(unit): |
1832 | - ''' |
1833 | + """ |
1834 | Rotates the LOG_FILE via logrotate. |
1835 | |
1836 | The end result is an empty LOG_FILE. |
1837 | - ''' |
1838 | + """ |
1839 | command = 'sudo logrotate -f /etc/logrotate.d/apache2' |
1840 | (result, output) = execute(unit, command) |
1841 | if result: |
1842 | # logrotate can return non-zero even if the log file we care |
1843 | # about was rotated |
1844 | - log('INFO: Rotating logs on {} return non-zero result (may be ok):\n' |
1845 | - '\texit code: {}\noutput: {}'.format( |
1846 | - unit_name(unit), result, output)) |
1847 | + log( |
1848 | + 'INFO: Rotating logs on {} return non-zero result (may be ok):\n' |
1849 | + '\texit code: {}\noutput: {}'.format(unit_name(unit), result, output) |
1850 | + ) |
1851 | |
1852 | |
1853 | def attempt_rsync(unit): |
1854 | - ''' |
1855 | + """ |
1856 | Performs an archive sync as specified by the cron file. |
1857 | |
1858 | This extracts the user, environment variables and command from the unit's |
1859 | @@ -121,7 +122,7 @@ |
1860 | |
1861 | It would be preferred to let the cron run, but this allows for easier |
1862 | extraction of any errors. |
1863 | - ''' |
1864 | + """ |
1865 | contents = unit.file_contents(CRON_FILE) |
1866 | lines = contents.splitlines() |
1867 | |
1868 | @@ -136,33 +137,32 @@ |
1869 | command = 'sudo -Hu {} {} {}'.format(cron_user, env_vars, cron_command) |
1870 | (result, output) = execute(unit, command) |
1871 | if result: |
1872 | - msg = ('Failed to execute repository sync command on {}\n' |
1873 | - '\tresult: {}\n\toutput: {}'.format( |
1874 | - unit_name(unit), result, output)) |
1875 | + msg = 'Failed to execute repository sync command on {}\n' '\tresult: {}\n\toutput: {}'.format( |
1876 | + unit_name(unit), result, output |
1877 | + ) |
1878 | amulet.raise_status(amulet.FAIL, msg=msg) |
1879 | |
1880 | |
1881 | def check_message_count(contents, message, count): |
1882 | - ''' |
1883 | + """ |
1884 | Check for the requested message in the content. |
1885 | - ''' |
1886 | + """ |
1887 | found = len([line for line in contents.splitlines() if message in line]) |
1888 | if count == found: |
1889 | log('PASS: "{}" found {} times'.format(message, count)) |
1890 | else: |
1891 | - msg = 'FAIL: "{}" found {} times, expected {}'.format( |
1892 | - message, found, count) |
1893 | + msg = 'FAIL: "{}" found {} times, expected {}'.format(message, found, count) |
1894 | amulet.raise_status(amulet.FAIL, msg=msg) |
1895 | |
1896 | |
1897 | def test_url(url, waitsec=30): |
1898 | - '''Request a url and return the contents. |
1899 | + """Request a url and return the contents. |
1900 | |
1901 | Return: |
1902 | response on success, |
1903 | None on timeout, |
1904 | Raise exception for all other failures |
1905 | - ''' |
1906 | + """ |
1907 | |
1908 | try: |
1909 | page = requests.get(url, timeout=waitsec) |
1910 | @@ -174,9 +174,9 @@ |
1911 | |
1912 | |
1913 | def check_metadata_is_active(unit): |
1914 | - ''' |
1915 | + """ |
1916 | Checks that the synced metadata is available over http. |
1917 | - ''' |
1918 | + """ |
1919 | page = test_url('http://{}/ubuntu'.format(unit.info['public-address'])) |
1920 | if page is None: |
1921 | log('FAIL: Timeout checking metadata on {}'.format(unit_name(unit))) |
1922 | @@ -186,11 +186,10 @@ |
1923 | |
1924 | |
1925 | def check_pool_data_is_active(unit): |
1926 | - ''' |
1927 | + """ |
1928 | Checks that the squid cached pool data is available over http. |
1929 | - ''' |
1930 | - page = test_url('http://{}/ubuntu/pool'.format( |
1931 | - unit.info['public-address'])) |
1932 | + """ |
1933 | + page = test_url('http://{}/ubuntu/pool'.format(unit.info['public-address'])) |
1934 | if page is None: |
1935 | log('FAIL: Timeout checking pool data on {}'.format(unit_name(unit))) |
1936 | return |
This merge proposal is being monitored by mergebot. Change the status to Approved to merge.