Merge ~afreiberger/charm-glance-sync:blacken-20.08 into charm-glance-sync:master
- Git
- lp:~afreiberger/charm-glance-sync
- blacken-20.08
- Merge into master
Proposed by
Drew Freiberger
Status: | Merged |
---|---|
Merged at revision: | b37ab82942fd8266a0998a1ce591aef755fc70de |
Proposed branch: | ~afreiberger/charm-glance-sync:blacken-20.08 |
Merge into: | charm-glance-sync:master |
Prerequisite: | ~afreiberger/charm-glance-sync:makefile-20.08 |
Diff against target: |
2500 lines (+756/-712) 10 files modified
src/files/check_stale_lockfile_master.py (+18/-13) src/files/check_stale_lockfile_slave.py (+18/-13) src/files/db_purge_deleted_master/db_purge_deleted_glance_images.py (+20/-19) src/files/db_purge_deleted_slave/db_purge_deleted_glance_images.py (+11/-11) src/files/glance_sync_master.py (+94/-81) src/files/glance_sync_slave.py (+280/-231) src/reactive/glance_sync.py (+282/-314) src/tests/functional/tests/test_glance_sync.py (+29/-26) src/tests/unit/__init__.py (+2/-1) src/tox.ini (+2/-3) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Xav Paice (community) | Approve | ||
Review via email: mp+388630@code.launchpad.net |
Commit message
Blackened repository to 88 lines and fixed up lint
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/src/files/check_stale_lockfile_master.py b/src/files/check_stale_lockfile_master.py |
2 | index bd6aa3d..341441d 100644 |
3 | --- a/src/files/check_stale_lockfile_master.py |
4 | +++ b/src/files/check_stale_lockfile_master.py |
5 | @@ -5,20 +5,25 @@ import os.path |
6 | import sys |
7 | import time |
8 | |
9 | -('Check the status of lock file to be sure it is not stale, ' |
10 | - 'warn at 7200 seconds crit at 14400 seconds') |
11 | +( |
12 | + "Check the status of lock file to be sure it is not stale, " |
13 | + "warn at 7200 seconds crit at 14400 seconds" |
14 | +) |
15 | |
16 | parser = optparse.OptionParser() |
17 | parser.add_option( |
18 | - '-w', action='store', default='7200', help='seconds to warn', type='int') |
19 | + "-w", action="store", default="7200", help="seconds to warn", type="int" |
20 | +) |
21 | parser.add_option( |
22 | - '-c', action='store', default='14400', help='seconds to crit', type='int') |
23 | + "-c", action="store", default="14400", help="seconds to crit", type="int" |
24 | +) |
25 | parser.add_option( |
26 | - '-f', |
27 | - action='store', |
28 | - default='/tmp/glance_sync_master.lock', |
29 | - help='file to check', |
30 | - type='string') |
31 | + "-f", |
32 | + action="store", |
33 | + default="/tmp/glance_sync_master.lock", |
34 | + help="file to check", |
35 | + type="string", |
36 | +) |
37 | |
38 | options, args = parser.parse_args() |
39 | |
40 | @@ -29,7 +34,7 @@ nagCrit = 2 |
41 | try: |
42 | statInfo = os.stat(options.f) |
43 | except OSError: |
44 | - print('OK: lockfile {} not present'.format(options.f)) |
45 | + print("OK: lockfile {} not present".format(options.f)) |
46 | sys.exit(nagOk) |
47 | |
48 | now = int(time.time()) |
49 | @@ -37,11 +42,11 @@ statInfoSlice = statInfo[8] |
50 | timeDiff = now - statInfoSlice |
51 | |
52 | if timeDiff > options.c: |
53 | - print('CRIT: lock file is older than {} seconds'.format(options.c)) |
54 | + print("CRIT: lock file is older than {} seconds".format(options.c)) |
55 | sys.exit(nagCrit) |
56 | elif timeDiff > options.w: |
57 | - print('WARN: lock file is older than {} seconds'.format(options.w)) |
58 | + print("WARN: lock file is older than {} seconds".format(options.w)) |
59 | sys.exit(nagWarn) |
60 | else: |
61 | - print('OK: lock file is under 3 hours') |
62 | + print("OK: lock file is under 3 hours") |
63 | sys.exit(nagOk) |
64 | diff --git a/src/files/check_stale_lockfile_slave.py b/src/files/check_stale_lockfile_slave.py |
65 | index 1be8b9b..08412be 100644 |
66 | --- a/src/files/check_stale_lockfile_slave.py |
67 | +++ b/src/files/check_stale_lockfile_slave.py |
68 | @@ -5,20 +5,25 @@ import os.path |
69 | import sys |
70 | import time |
71 | |
72 | -('Check the status of lock file to be sure it is not stale, ' |
73 | - 'warn at 7200 seconds crit at 14400 seconds') |
74 | +( |
75 | + "Check the status of lock file to be sure it is not stale, " |
76 | + "warn at 7200 seconds crit at 14400 seconds" |
77 | +) |
78 | |
79 | parser = optparse.OptionParser() |
80 | parser.add_option( |
81 | - '-w', action='store', default='7200', help='seconds to warn', type='int') |
82 | + "-w", action="store", default="7200", help="seconds to warn", type="int" |
83 | +) |
84 | parser.add_option( |
85 | - '-c', action='store', default='14400', help='seconds to crit', type='int') |
86 | + "-c", action="store", default="14400", help="seconds to crit", type="int" |
87 | +) |
88 | parser.add_option( |
89 | - '-f', |
90 | - action='store', |
91 | - default='/tmp/glance_sync_slave.lock', |
92 | - help='file to check', |
93 | - type='string') |
94 | + "-f", |
95 | + action="store", |
96 | + default="/tmp/glance_sync_slave.lock", |
97 | + help="file to check", |
98 | + type="string", |
99 | +) |
100 | |
101 | options, args = parser.parse_args() |
102 | |
103 | @@ -29,7 +34,7 @@ nagCrit = 2 |
104 | try: |
105 | statInfo = os.stat(options.f) |
106 | except OSError: |
107 | - print('OK: lockfile {} not present'.format(options.f)) |
108 | + print("OK: lockfile {} not present".format(options.f)) |
109 | sys.exit(nagOk) |
110 | |
111 | now = int(time.time()) |
112 | @@ -37,11 +42,11 @@ statInfoSlice = statInfo[8] |
113 | timeDiff = now - statInfoSlice |
114 | |
115 | if timeDiff > options.c: |
116 | - print('CRIT: lock file is older than {} seconds'.format(options.c)) |
117 | + print("CRIT: lock file is older than {} seconds".format(options.c)) |
118 | sys.exit(nagCrit) |
119 | elif timeDiff > options.w: |
120 | - print('WARN: lock file is older than {} seconds'.format(options.w)) |
121 | + print("WARN: lock file is older than {} seconds".format(options.w)) |
122 | sys.exit(nagWarn) |
123 | else: |
124 | - print('OK: lock file is under 3 hours') |
125 | + print("OK: lock file is under 3 hours") |
126 | sys.exit(nagOk) |
127 | diff --git a/src/files/db_purge_deleted_master/db_purge_deleted_glance_images.py b/src/files/db_purge_deleted_master/db_purge_deleted_glance_images.py |
128 | index 144383f..bbc97c6 100644 |
129 | --- a/src/files/db_purge_deleted_master/db_purge_deleted_glance_images.py |
130 | +++ b/src/files/db_purge_deleted_master/db_purge_deleted_glance_images.py |
131 | @@ -30,27 +30,28 @@ config = {} |
132 | |
133 | # pull connection information from glance-api.conf |
134 | try: |
135 | - with open('/etc/glance/glance-api.conf', 'r') as conf_file: |
136 | + with open("/etc/glance/glance-api.conf", "r") as conf_file: |
137 | for line in conf_file.readlines(): |
138 | - if line.startswith('connection ='): |
139 | + if line.startswith("connection ="): |
140 | # connection = mysql://glance:<password>@<host>/glance |
141 | - connection = urlparse.urlparse(line.split('=')[1].strip()) |
142 | - config['host'] = connection.hostname |
143 | - config['user'] = connection.username |
144 | - config['password'] = connection.password |
145 | - config['database'] = connection.path[1:].strip() |
146 | + connection = urlparse.urlparse(line.split("=")[1].strip()) |
147 | + config["host"] = connection.hostname |
148 | + config["user"] = connection.username |
149 | + config["password"] = connection.password |
150 | + config["database"] = connection.path[1:].strip() |
151 | break |
152 | except IOError as e: |
153 | sys.exit(e) |
154 | |
155 | try: |
156 | connection = MySQLdb.connect( |
157 | - host=config['host'], |
158 | - user=config['user'], |
159 | - passwd=config['password'], |
160 | - db=config['database']) |
161 | + host=config["host"], |
162 | + user=config["user"], |
163 | + passwd=config["password"], |
164 | + db=config["database"], |
165 | + ) |
166 | except Exception as e: |
167 | - print('ERROR: unable to connect to mysql database') |
168 | + print("ERROR: unable to connect to mysql database") |
169 | sys.exit(e) |
170 | |
171 | cursor = connection.cursor() |
172 | @@ -60,14 +61,14 @@ cursor.execute("SELECT id FROM glance.images WHERE status='deleted';") |
173 | image_ids = cursor.fetchall() |
174 | |
175 | for image_id in image_ids: |
176 | - print('purging {}'.format(image_id[0])) |
177 | - args = (image_id[0]) |
178 | + print("purging {}".format(image_id[0])) |
179 | + args = image_id[0] |
180 | commands = [ |
181 | - 'DELETE FROM glance.image_properties WHERE image_id=%s;', |
182 | - 'DELETE FROM glance.image_members WHERE image_id=%s;', |
183 | - 'DELETE FROM glance.image_tags WHERE image_id=%s;', |
184 | - 'DELETE FROM glance.image_locations WHERE image_id=%s;', |
185 | - 'DELETE FROM glance.images WHERE id=%s;' |
186 | + "DELETE FROM glance.image_properties WHERE image_id=%s;", |
187 | + "DELETE FROM glance.image_members WHERE image_id=%s;", |
188 | + "DELETE FROM glance.image_tags WHERE image_id=%s;", |
189 | + "DELETE FROM glance.image_locations WHERE image_id=%s;", |
190 | + "DELETE FROM glance.images WHERE id=%s;", |
191 | ] |
192 | |
193 | for command in commands: |
194 | diff --git a/src/files/db_purge_deleted_slave/db_purge_deleted_glance_images.py b/src/files/db_purge_deleted_slave/db_purge_deleted_glance_images.py |
195 | index c0acc47..74f57be 100644 |
196 | --- a/src/files/db_purge_deleted_slave/db_purge_deleted_glance_images.py |
197 | +++ b/src/files/db_purge_deleted_slave/db_purge_deleted_glance_images.py |
198 | @@ -27,10 +27,11 @@ from contextlib import closing |
199 | import mysql.connector as mysql |
200 | |
201 | con = mysql.connect( |
202 | - host=os.environ['OS_MYSQL_HOST'], |
203 | - user=os.environ['OS_MYSQL_USER'], |
204 | - password=os.environ['OS_MYSQL_PASS'], |
205 | - database=os.environ['OS_MYSQL_DB']) |
206 | + host=os.environ["OS_MYSQL_HOST"], |
207 | + user=os.environ["OS_MYSQL_USER"], |
208 | + password=os.environ["OS_MYSQL_PASS"], |
209 | + database=os.environ["OS_MYSQL_DB"], |
210 | +) |
211 | |
212 | # delete from images where status = 'deleted' |
213 | with closing(con.cursor()) as cur: |
214 | @@ -38,15 +39,14 @@ with closing(con.cursor()) as cur: |
215 | cur.execute(sql) |
216 | image_ids = cur.fetchall() |
217 | for image_id in image_ids: |
218 | - print('purging {}'.format(image_id[0])) |
219 | - args = (image_id[0]) |
220 | + print("purging {}".format(image_id[0])) |
221 | + args = image_id[0] |
222 | commands = [ |
223 | - "DELETE FROM glance.image_properties WHERE image_id='{}';".format( |
224 | - args), "DELETE FROM glance.image_members WHERE image_id='{}';". |
225 | - format(args), |
226 | + "DELETE FROM glance.image_properties WHERE image_id='{}';".format(args), |
227 | + "DELETE FROM glance.image_members WHERE image_id='{}';".format(args), |
228 | "DELETE FROM glance.image_tags WHERE image_id='{}';".format(args), |
229 | - "DELETE FROM glance.image_locations WHERE image_id='{}';".format( |
230 | - args), "DELETE FROM glance.images WHERE id='{}';".format(args) |
231 | + "DELETE FROM glance.image_locations WHERE image_id='{}';".format(args), |
232 | + "DELETE FROM glance.images WHERE id='{}';".format(args), |
233 | ] |
234 | |
235 | for command in commands: |
236 | diff --git a/src/files/glance_sync_master.py b/src/files/glance_sync_master.py |
237 | index 57139b3..5cd53b5 100755 |
238 | --- a/src/files/glance_sync_master.py |
239 | +++ b/src/files/glance_sync_master.py |
240 | @@ -20,31 +20,33 @@ from keystoneclient.v3 import client as keystone_v3_client |
241 | |
242 | def get_keystone_client(): |
243 | # We know that we set OS_AUTH_VERSION, so use it (and cast to int) |
244 | - if int(os.environ['OS_AUTH_VERSION']) == 3: |
245 | + if int(os.environ["OS_AUTH_VERSION"]) == 3: |
246 | ksc = keystone_v3_client.Client( |
247 | - auth_url=os.environ['OS_AUTH_URL'], |
248 | - username=os.environ['OS_USERNAME'], |
249 | - password=os.environ['OS_PASSWORD'], |
250 | - user_domain_name=os.environ['OS_USER_DOMAIN_NAME'], |
251 | - project_domain_name=os.environ['OS_PROJECT_DOMAIN_NAME'], |
252 | - project_name=os.environ['OS_PROJECT_NAME']) |
253 | + auth_url=os.environ["OS_AUTH_URL"], |
254 | + username=os.environ["OS_USERNAME"], |
255 | + password=os.environ["OS_PASSWORD"], |
256 | + user_domain_name=os.environ["OS_USER_DOMAIN_NAME"], |
257 | + project_domain_name=os.environ["OS_PROJECT_DOMAIN_NAME"], |
258 | + project_name=os.environ["OS_PROJECT_NAME"], |
259 | + ) |
260 | else: |
261 | ksc = keystone_v2_client.Client( |
262 | - username=os.environ['OS_USERNAME'], |
263 | - password=os.environ['OS_PASSWORD'], |
264 | - tenant_name=os.environ['OS_TENANT_NAME'], |
265 | - auth_url=os.environ['OS_AUTH_URL']) |
266 | + username=os.environ["OS_USERNAME"], |
267 | + password=os.environ["OS_PASSWORD"], |
268 | + tenant_name=os.environ["OS_TENANT_NAME"], |
269 | + auth_url=os.environ["OS_AUTH_URL"], |
270 | + ) |
271 | return ksc |
272 | |
273 | |
274 | def get_glance_client(ksc): |
275 | # create a glance client, using the provided keystone client for details |
276 | token = ksc.auth_token |
277 | - service = ksc.services.find(name='glance') |
278 | - endpoint = ksc.endpoints.find(service_id=service.id, interface='internal') |
279 | + service = ksc.services.find(name="glance") |
280 | + endpoint = ksc.endpoints.find(service_id=service.id, interface="internal") |
281 | glance_url = endpoint.url |
282 | |
283 | - return GlanceClient('2', endpoint=glance_url, token=token) |
284 | + return GlanceClient("2", endpoint=glance_url, token=token) |
285 | |
286 | |
287 | class BootStackMetadataError(Exception): |
288 | @@ -54,14 +56,15 @@ class BootStackMetadataError(Exception): |
289 | helps map different tenants between regions (with |
290 | different tenant-id, but same/similar tenant-name |
291 | """ |
292 | + |
293 | pass |
294 | |
295 | |
296 | class ImageSyncMaster: |
297 | - def __init__(self, data_dir='/srv/glance_master_sync/data'): |
298 | + def __init__(self, data_dir="/srv/glance_master_sync/data"): |
299 | self.tenants = {} |
300 | self.DATA_DIR = data_dir |
301 | - tmp_dir = '/tmp/glance_master_sync' |
302 | + tmp_dir = "/tmp/glance_master_sync" |
303 | if not os.path.isdir(tmp_dir): |
304 | os.makedirs(tmp_dir) |
305 | |
306 | @@ -71,76 +74,81 @@ class ImageSyncMaster: |
307 | def glance_connect(self): |
308 | try: |
309 | |
310 | - self.log('connecting to Keystone') |
311 | + self.log("connecting to Keystone") |
312 | self.keystone = get_keystone_client() |
313 | except Exception as e: |
314 | - self.log('EXCEPTION: {0}'.format(e)) |
315 | + self.log("EXCEPTION: {0}".format(e)) |
316 | sys.exit(2) |
317 | if not self.tenants: |
318 | # In the call to keystone we know that we get a list response. |
319 | # 1st element is the response code, 2nd is the data in dicts form |
320 | self.tenants = dict( |
321 | - [(tenant['id'], tenant['name']) |
322 | - for tenant in self.keystone.get('/projects')[1]['projects'] |
323 | - if tenant['enabled']]) |
324 | + [ |
325 | + (tenant["id"], tenant["name"]) |
326 | + for tenant in self.keystone.get("/projects")[1]["projects"] |
327 | + if tenant["enabled"] |
328 | + ] |
329 | + ) |
330 | self.glance = get_glance_client(self.keystone) |
331 | return self.glance |
332 | |
333 | def timestamp_now(self): |
334 | - return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') |
335 | + return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
336 | |
337 | def log(self, msg): |
338 | - print('{0} {1}'.format(self.timestamp_now(), msg)) |
339 | + print("{0} {1}".format(self.timestamp_now(), msg)) |
340 | |
341 | def delete_files(self, existing_images_ids): |
342 | if not existing_images_ids: |
343 | - self.log('WARNING: precautionary halt. ' |
344 | - 'No glance images found. noop.') |
345 | + self.log("WARNING: precautionary halt. No glance images found. noop.") |
346 | return |
347 | |
348 | for dirpath, dirnames, filenames in os.walk(self.DATA_DIR): |
349 | - if dirpath != self.DATA_DIR and \ |
350 | - len(dirnames) == 0 and \ |
351 | - len(filenames) == 0: |
352 | + if dirpath != self.DATA_DIR and len(dirnames) == 0 and len(filenames) == 0: |
353 | os.rmdir(dirpath) |
354 | continue |
355 | |
356 | for filename in filenames: |
357 | full_path = os.path.join(dirpath, filename) |
358 | - if filename.endswith('.json.tmp'): |
359 | - self.log('WARNING: temporary file skipped. Please check ' |
360 | - '{0}'.format(full_path)) |
361 | + if filename.endswith(".json.tmp"): |
362 | + self.log( |
363 | + "WARNING: temporary file skipped. Please check " |
364 | + "{0}".format(full_path) |
365 | + ) |
366 | continue |
367 | - elif filename.endswith('.json') and \ |
368 | - filename[:-5] in existing_images_ids: |
369 | + elif ( |
370 | + filename.endswith(".json") and filename[:-5] in existing_images_ids |
371 | + ): |
372 | continue |
373 | else: |
374 | - self.log('INFO: image not found in glance - deleting ' |
375 | - '{0}'.format(full_path)) |
376 | + self.log( |
377 | + "INFO: image not found in glance - deleting " |
378 | + "{0}".format(full_path) |
379 | + ) |
380 | os.remove(full_path) |
381 | |
382 | def create_lock(self, lockfile): |
383 | try: |
384 | os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR) |
385 | except OSError: |
386 | - self.log('ERROR: could not create lockfile {}'.format(lockfile)) |
387 | + self.log("ERROR: could not create lockfile {}".format(lockfile)) |
388 | |
389 | - def file_locked(self, lockfile='/tmp/glance_sync_master.lock'): |
390 | + def file_locked(self, lockfile="/tmp/glance_sync_master.lock"): |
391 | if os.path.isfile(lockfile): |
392 | return True |
393 | else: |
394 | return False |
395 | |
396 | - def release_lock(self, lockfile='/tmp/glance_sync_master.lock'): |
397 | + def release_lock(self, lockfile="/tmp/glance_sync_master.lock"): |
398 | if os.path.isfile(lockfile): |
399 | try: |
400 | os.remove(lockfile) |
401 | except OSError as e: |
402 | self.log(e) |
403 | |
404 | - def set_filelock(self, lockfile='/tmp/glance_sync_master.lock'): |
405 | + def set_filelock(self, lockfile="/tmp/glance_sync_master.lock"): |
406 | if self.file_locked(lockfile): |
407 | - self.log('WARNING: sync already in progress, exiting') |
408 | + self.log("WARNING: sync already in progress, exiting") |
409 | sys.exit(2) |
410 | |
411 | self.create_lock(lockfile) |
412 | @@ -152,21 +160,20 @@ class ImageSyncMaster: |
413 | len(image.id) < 2 : DATA_DIR/<image-id>.json |
414 | len(image.id) >= 2: DATA_DIR/XX/XXZZZ.json |
415 | """ |
416 | - self.log('getting image from database.') |
417 | + self.log("getting image from database.") |
418 | existing_images = self.get_community_images_from_database() |
419 | - self.log('Extending with data from api.') |
420 | + self.log("Extending with data from api.") |
421 | for image in self.glance.images.list(): |
422 | existing_images.add(image.id) |
423 | |
424 | for image_helper in existing_images: |
425 | image = self.glance.images.get(image_helper) |
426 | - if len(image['id']) < 2: |
427 | - basename = '{0}.json'.format(image['id']) |
428 | + if len(image["id"]) < 2: |
429 | + basename = "{0}.json".format(image["id"]) |
430 | else: |
431 | - basename = '{0}/{1}.json'.format( |
432 | - str(image['id'])[:2], image['id']) |
433 | + basename = "{0}/{1}.json".format(str(image["id"])[:2], image["id"]) |
434 | filename = os.path.join(self.DATA_DIR, basename) |
435 | - if not self.is_latest_metadata(filename, image['updated_at']): |
436 | + if not self.is_latest_metadata(filename, image["updated_at"]): |
437 | self.update_metadata(filename, image) |
438 | |
439 | return existing_images |
440 | @@ -181,31 +188,32 @@ class ImageSyncMaster: |
441 | with open(filename) as meta_file: |
442 | try: |
443 | data = json.load(meta_file) |
444 | - local_updated_at = data['updated_at'] |
445 | - imageid = data['id'] |
446 | + local_updated_at = data["updated_at"] |
447 | + imageid = data["id"] |
448 | except Exception as e: |
449 | - self.log('EXCEPTION: {0}'.format(e)) |
450 | + self.log("EXCEPTION: {0}".format(e)) |
451 | return False |
452 | |
453 | local_dup = dateutil.parser.parse(local_updated_at) |
454 | glance_dup = dateutil.parser.parse(glance_updated_at) |
455 | |
456 | if local_dup >= glance_dup: |
457 | - self.log('INFO: {0} up to date'.format(imageid)) |
458 | + self.log("INFO: {0} up to date".format(imageid)) |
459 | return True |
460 | else: |
461 | - self.log('INFO: {0} outdated. Re-creating local ' |
462 | - 'copy of the metadata'.format(imageid)) |
463 | + self.log( |
464 | + "INFO: {0} outdated. Re-creating local " |
465 | + "copy of the metadata".format(imageid) |
466 | + ) |
467 | else: |
468 | - self.log('INFO: {0} not found. Creating a local ' |
469 | - 'copy'.format(filename)) |
470 | + self.log("INFO: {0} not found. Creating a local copy".format(filename)) |
471 | return False |
472 | |
473 | def update_metadata(self, filename, glance_metadata): |
474 | """creates or replaces file with image metadata |
475 | creates subdirectory if it doesn't exist |
476 | """ |
477 | - tmp_file = '{0}.tmp'.format(filename) |
478 | + tmp_file = "{0}.tmp".format(filename) |
479 | |
480 | if not os.path.exists(os.path.dirname(filename)): |
481 | os.mkdir(os.path.dirname(filename), 0o750) |
482 | @@ -216,32 +224,34 @@ class ImageSyncMaster: |
483 | self.log(e) |
484 | return False |
485 | |
486 | - with open(tmp_file, 'w') as f: |
487 | + with open(tmp_file, "w") as f: |
488 | json.dump(glance_metadata, f, indent=4, ensure_ascii=False) |
489 | |
490 | os.rename(tmp_file, filename) |
491 | - self.log('INFO: update_metadata :: {0}'.format(filename)) |
492 | + self.log("INFO: update_metadata :: {0}".format(filename)) |
493 | return True |
494 | |
495 | def add_bs_metadata_keys(self, glance_metadata): |
496 | - keys = [k for k in glance_metadata if k.startswith('bs_')] |
497 | - if 'bs_owner' in keys: |
498 | - msg = 'WARNING: bs_owner metadata should not exist (image: ' \ |
499 | - '{0}; bs_owner: {1})'.format(glance_metadata.id, |
500 | - glance_metadata.bs_owner) |
501 | + keys = [k for k in glance_metadata if k.startswith("bs_")] |
502 | + if "bs_owner" in keys: |
503 | + msg = ( |
504 | + "WARNING: bs_owner metadata should not exist (image: " |
505 | + "{0}; bs_owner: {1})".format( |
506 | + glance_metadata.id, glance_metadata.bs_owner |
507 | + ) |
508 | + ) |
509 | raise BootStackMetadataError(msg) |
510 | - elif glance_metadata['owner'] in self.tenants: |
511 | - glance_metadata['bs_owner'] = self.tenants[ |
512 | - glance_metadata['owner']] |
513 | + elif glance_metadata["owner"] in self.tenants: |
514 | + glance_metadata["bs_owner"] = self.tenants[glance_metadata["owner"]] |
515 | return glance_metadata |
516 | |
517 | def main(self): |
518 | - self.log('starting glance sync') |
519 | + self.log("starting glance sync") |
520 | # updates local metadata files if outdated |
521 | existing_images = self.parse_glance_images() |
522 | # removes local metadata files from deleted images |
523 | self.delete_files(existing_images) |
524 | - self.log('ending glance sync') |
525 | + self.log("ending glance sync") |
526 | self.release_lock() |
527 | |
528 | def get_community_images_from_database(self): |
529 | @@ -249,34 +259,37 @@ class ImageSyncMaster: |
530 | |
531 | db_img_list = list() |
532 | con = mysql.connect( |
533 | - host=os.environ['OS_MYSQL_HOST'], |
534 | - user=os.environ['OS_MYSQL_USER'], |
535 | - password=os.environ['OS_MYSQL_PASS'], |
536 | - database=os.environ['OS_MYSQL_DB']) |
537 | + host=os.environ["OS_MYSQL_HOST"], |
538 | + user=os.environ["OS_MYSQL_USER"], |
539 | + password=os.environ["OS_MYSQL_PASS"], |
540 | + database=os.environ["OS_MYSQL_DB"], |
541 | + ) |
542 | with closing(con.cursor()) as cur: |
543 | - sql = 'SELECT id, name FROM images WHERE deleted = 0 AND ' \ |
544 | - "visibility = 'community'" |
545 | + sql = ( |
546 | + "SELECT id, name FROM images WHERE deleted = 0 AND " |
547 | + "visibility = 'community'" |
548 | + ) |
549 | cur.execute(sql) |
550 | for (id, name) in cur.fetchall(): |
551 | self.log( |
552 | - 'Retrieved community image with id [{}] and name [{}] ' |
553 | - 'from database'.format(id, name)) |
554 | + "Retrieved community image with id [{}] and name [{}] " |
555 | + "from database".format(id, name) |
556 | + ) |
557 | db_img_list.append(id) |
558 | |
559 | return set(db_img_list) |
560 | |
561 | |
562 | -if __name__ == '__main__': |
563 | - parser = argparse.ArgumentParser(description='Synchronize glance images ' |
564 | - 'to disk ') |
565 | - parser.add_argument('-d', '--datadir', help='directory to write images to') |
566 | +if __name__ == "__main__": |
567 | + parser = argparse.ArgumentParser(description="Synchronize glance images to disk ") |
568 | + parser.add_argument("-d", "--datadir", help="directory to write images to") |
569 | args = parser.parse_args() |
570 | |
571 | if args.datadir: |
572 | data_dir = args.datadir |
573 | else: |
574 | parser.print_help() |
575 | - sys.exit('ERROR: please specify an output directory for images') |
576 | + sys.exit("ERROR: please specify an output directory for images") |
577 | |
578 | master = ImageSyncMaster(data_dir) |
579 | master.main() |
580 | diff --git a/src/files/glance_sync_slave.py b/src/files/glance_sync_slave.py |
581 | index 702b362..daf6dc6 100755 |
582 | --- a/src/files/glance_sync_slave.py |
583 | +++ b/src/files/glance_sync_slave.py |
584 | @@ -10,6 +10,7 @@ import json |
585 | import datetime |
586 | import dateutil.parser |
587 | import atexit |
588 | + |
589 | # import re |
590 | import shlex |
591 | import os_client_config |
592 | @@ -22,61 +23,70 @@ class OSProjectNotFound(Exception): |
593 | """This indicates no sync is possible |
594 | (not defaulting to admin project) |
595 | """ |
596 | + |
597 | pass |
598 | |
599 | |
600 | class ImageSyncSlave: |
601 | - extra_properties = set(['bs_owner']) |
602 | - glance_properties = set(["architecture", |
603 | - "checksum", |
604 | - "container_format", |
605 | - "created_at", |
606 | - "deleted", |
607 | - "deleted_at", |
608 | - "direct_url", |
609 | - "disk_format", |
610 | - "file", |
611 | - "id", |
612 | - "instance_uuid", |
613 | - "kernel_id", |
614 | - "locations", |
615 | - "min_disk", |
616 | - "min_ram", |
617 | - "name", |
618 | - "os_distro", |
619 | - "os_version", |
620 | - "owner", |
621 | - "protected", |
622 | - "ramdisk_id", |
623 | - "schema", |
624 | - "self", |
625 | - "size", |
626 | - "status", |
627 | - "tags", |
628 | - "updated_at", |
629 | - "virtual_size", |
630 | - "visibility"]) |
631 | + extra_properties = set(["bs_owner"]) |
632 | + glance_properties = set( |
633 | + [ |
634 | + "architecture", |
635 | + "checksum", |
636 | + "container_format", |
637 | + "created_at", |
638 | + "deleted", |
639 | + "deleted_at", |
640 | + "direct_url", |
641 | + "disk_format", |
642 | + "file", |
643 | + "id", |
644 | + "instance_uuid", |
645 | + "kernel_id", |
646 | + "locations", |
647 | + "min_disk", |
648 | + "min_ram", |
649 | + "name", |
650 | + "os_distro", |
651 | + "os_version", |
652 | + "owner", |
653 | + "protected", |
654 | + "ramdisk_id", |
655 | + "schema", |
656 | + "self", |
657 | + "size", |
658 | + "status", |
659 | + "tags", |
660 | + "updated_at", |
661 | + "virtual_size", |
662 | + "visibility", |
663 | + ] |
664 | + ) |
665 | # egrep -B2 readOnly glanceclient/v2/image_schema.py | \ |
666 | # awk '/\{/ {print $1}' | tr -d \": |
667 | - readonly_properties = set(['file', |
668 | - 'size', |
669 | - 'status', |
670 | - 'self', |
671 | - 'direct_url', |
672 | - 'schema', |
673 | - 'updated_at', |
674 | - 'locations', |
675 | - 'virtual_size', |
676 | - 'checksum', |
677 | - 'created_at']) |
678 | + readonly_properties = set( |
679 | + [ |
680 | + "file", |
681 | + "size", |
682 | + "status", |
683 | + "self", |
684 | + "direct_url", |
685 | + "schema", |
686 | + "updated_at", |
687 | + "locations", |
688 | + "virtual_size", |
689 | + "checksum", |
690 | + "created_at", |
691 | + ] |
692 | + ) |
693 | |
694 | def __init__(self, data_dir, source): |
695 | self.projects_slave = {} |
696 | self.DATA_DIR = data_dir |
697 | self.SOURCE = source |
698 | self.valid_properties = self.glance_properties.difference( |
699 | - self.readonly_properties.union( |
700 | - self.extra_properties)) |
701 | + self.readonly_properties.union(self.extra_properties) |
702 | + ) |
703 | self.set_filelock() |
704 | self.glance_connect_slave() |
705 | self.glance_connect_master() |
706 | @@ -84,22 +94,25 @@ class ImageSyncSlave: |
707 | def download_metadata_from_master(self): |
708 | """rsync metadata files from source to data_dir""" |
709 | |
710 | - if not self.SOURCE.endswith('/'): |
711 | - self.SOURCE += '/' |
712 | - if not self.DATA_DIR.endswith('/'): |
713 | - self.DATA_DIR += '/' |
714 | - |
715 | - command = '/usr/bin/rsync -az --delete -e ' \ |
716 | - "'ssh -o StrictHostKeyChecking=no' " \ |
717 | - '{0} {1}'.format(self.SOURCE, self.DATA_DIR) |
718 | - proc = subprocess.Popen(shlex.split(command), |
719 | - stdout=subprocess.PIPE, |
720 | - stderr=subprocess.PIPE) |
721 | + if not self.SOURCE.endswith("/"): |
722 | + self.SOURCE += "/" |
723 | + if not self.DATA_DIR.endswith("/"): |
724 | + self.DATA_DIR += "/" |
725 | + |
726 | + command = ( |
727 | + "/usr/bin/rsync -az --delete -e " |
728 | + "'ssh -o StrictHostKeyChecking=no' " |
729 | + "{0} {1}".format(self.SOURCE, self.DATA_DIR) |
730 | + ) |
731 | + proc = subprocess.Popen( |
732 | + shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE |
733 | + ) |
734 | (stdout, stderr) = proc.communicate() |
735 | if proc.returncode: |
736 | - self.log('ERROR: problem while getting data from master ' |
737 | - '({0})'.format(command)) |
738 | - self.log('ERROR: {0}'.format(stderr)) |
739 | + self.log( |
740 | + "ERROR: problem while getting data from master ({0})".format(command) |
741 | + ) |
742 | + self.log("ERROR: {0}".format(stderr)) |
743 | self.release_lock() |
744 | sys.exit(2) |
745 | else: |
746 | @@ -111,17 +124,22 @@ class ImageSyncSlave: |
747 | |
748 | db_img_list = list() |
749 | con = mysql.connect( |
750 | - host=os.environ['OS_MYSQL_HOST'], |
751 | - user=os.environ['OS_MYSQL_USER'], |
752 | - password=os.environ['OS_MYSQL_PASS'], |
753 | - database=os.environ['OS_MYSQL_DB']) |
754 | + host=os.environ["OS_MYSQL_HOST"], |
755 | + user=os.environ["OS_MYSQL_USER"], |
756 | + password=os.environ["OS_MYSQL_PASS"], |
757 | + database=os.environ["OS_MYSQL_DB"], |
758 | + ) |
759 | with closing(con.cursor()) as cur: |
760 | - sql = "SELECT id, name FROM images WHERE deleted = 0 AND " \ |
761 | - "visibility = 'community'" |
762 | + sql = ( |
763 | + "SELECT id, name FROM images WHERE deleted = 0 AND " |
764 | + "visibility = 'community'" |
765 | + ) |
766 | cur.execute(sql) |
767 | for (id, name) in cur.fetchall(): |
768 | - self.log('Retrieved community image with id [{}] and name ' |
769 | - '[{}] from database'.format(id, name)) |
770 | + self.log( |
771 | + "Retrieved community image with id [{}] and name " |
772 | + "[{}] from database".format(id, name) |
773 | + ) |
774 | db_img_list.append(id) |
775 | |
776 | return set(db_img_list) |
777 | @@ -134,7 +152,7 @@ class ImageSyncSlave: |
778 | |
779 | @returns processed (aka. parsed) images |
780 | """ |
781 | - self.log('getting image list from slave') |
782 | + self.log("getting image list from slave") |
783 | processed_images_ids = set() |
784 | to_delete_images_ids = set() |
785 | existing_images = self.get_community_images_from_database() |
786 | @@ -143,10 +161,9 @@ class ImageSyncSlave: |
787 | for image_helper in existing_images: |
788 | image = self.glance_slave.images.get(image_helper) |
789 | if len(image.id) < 2: |
790 | - basename = '{0}.json'.format(image.id) |
791 | + basename = "{0}.json".format(image.id) |
792 | else: |
793 | - basename = '{0}/{1}.json'.format(str(image.id)[:2], |
794 | - image.id) |
795 | + basename = "{0}/{1}.json".format(str(image.id)[:2], image.id) |
796 | filename = os.path.join(self.DATA_DIR, basename) |
797 | if not os.path.isfile(filename): |
798 | to_delete_images_ids.add(image.id) |
799 | @@ -154,14 +171,16 @@ class ImageSyncSlave: |
800 | |
801 | metadata_local = self.read_metadata(filename) |
802 | if not metadata_local: |
803 | - self.log('ERROR: read_metadata did not retrieve anything ' |
804 | - '({0})'.format(filename)) |
805 | + self.log( |
806 | + "ERROR: read_metadata did not retrieve anything " |
807 | + "({0})".format(filename) |
808 | + ) |
809 | continue |
810 | |
811 | - if metadata_local['checksum'] == image.checksum: |
812 | - if not self.is_latest_metadata(metadata_local['id'], |
813 | - metadata_local['updated_at'], |
814 | - image.updated_at): |
815 | + if metadata_local["checksum"] == image.checksum: |
816 | + if not self.is_latest_metadata( |
817 | + metadata_local["id"], metadata_local["updated_at"], image.updated_at |
818 | + ): |
819 | # checksum ok, metadata outdated |
820 | self.update_metadata(metadata_local, image) |
821 | processed_images_ids.add(image.id) |
822 | @@ -170,16 +189,17 @@ class ImageSyncSlave: |
823 | self.upload_to_slave(metadata_local) |
824 | processed_images_ids.add(image.id) |
825 | |
826 | - self.log('DEBUG: images pending to be deleted: ' |
827 | - '{0}'.format(to_delete_images_ids)) |
828 | + self.log( |
829 | + "DEBUG: images pending to be deleted: {0}".format(to_delete_images_ids) |
830 | + ) |
831 | self.delete_images_from_slave(to_delete_images_ids) |
832 | - self.log('DEBUG: processed images (to skip while parsing metadata ' |
833 | - 'files): {0}'.format(processed_images_ids)) |
834 | + self.log( |
835 | + "DEBUG: processed images (to skip while parsing metadata " |
836 | + "files): {0}".format(processed_images_ids) |
837 | + ) |
838 | return processed_images_ids |
839 | |
840 | - def is_latest_metadata(self, image_id, |
841 | - master_updated_at, |
842 | - slave_updated_at): |
843 | + def is_latest_metadata(self, image_id, master_updated_at, slave_updated_at): |
844 | """Compares filename content (JSON metadata) and glance service info |
845 | @return |
846 | True: no need to update |
847 | @@ -189,70 +209,78 @@ class ImageSyncSlave: |
848 | slave_dup = dateutil.parser.parse(slave_updated_at) |
849 | |
850 | if master_dup <= slave_dup: |
851 | - self.log('INFO: is_latest_metadata :: {0} up to ' |
852 | - 'date'.format(image_id)) |
853 | + self.log("INFO: is_latest_metadata :: {0} up to date".format(image_id)) |
854 | return True |
855 | else: |
856 | - self.log('INFO: is_latest_metadata :: {0} outdated. Needs ' |
857 | - 'update_metadata.'.format(image_id)) |
858 | + self.log( |
859 | + "INFO: is_latest_metadata :: {0} outdated. Needs " |
860 | + "update_metadata.".format(image_id) |
861 | + ) |
862 | return False |
863 | |
864 | def upload_to_slave(self, metadata_local): # noqa: C901 is too complex (12) |
865 | """upload image to glance slave service |
866 | """ |
867 | - tmp_image_basename = '{0}.img'.format(metadata_local['id']) |
868 | + tmp_image_basename = "{0}.img".format(metadata_local["id"]) |
869 | tmp_image = os.path.join(self.DATA_DIR, tmp_image_basename) |
870 | try: |
871 | clean_metadata, removed_props = self.mangle_metadata(metadata_local) |
872 | except OSProjectNotFound as e: |
873 | - self.log('EXCEPTION: upload_to_slave :: image-id {0} :: ' |
874 | - 'problem uploading data to glance ' |
875 | - 'slave (image could not be removed) :: ' |
876 | - '{1}'.format(metadata_local['id'], e)) |
877 | + self.log( |
878 | + "EXCEPTION: upload_to_slave :: image-id {0} :: " |
879 | + "problem uploading data to glance " |
880 | + "slave (image could not be removed) :: " |
881 | + "{1}".format(metadata_local["id"], e) |
882 | + ) |
883 | return False |
884 | |
885 | for k in removed_props: |
886 | if k in clean_metadata: |
887 | del clean_metadata[k] |
888 | |
889 | - self.log('INFO: creating image {0}'.format(clean_metadata['id'])) |
890 | + self.log("INFO: creating image {0}".format(clean_metadata["id"])) |
891 | try: |
892 | self.glance_slave.images.create(**clean_metadata) |
893 | - self.log('DEBUG: create image: {0}'.format(clean_metadata)) |
894 | + self.log("DEBUG: create image: {0}".format(clean_metadata)) |
895 | except Exception as e: # TODO narrow this exception down |
896 | - self.log('EXCEPTION: upload_to_slave :: {0}'.format(e)) |
897 | + self.log("EXCEPTION: upload_to_slave :: {0}".format(e)) |
898 | try: |
899 | # update metadata |
900 | - self.glance_slave.images.update(clean_metadata['id'], |
901 | - remove_props=removed_props, |
902 | - **clean_metadata) |
903 | - self.log('DEBUG: update_to_slave :: update metadata ' |
904 | - '{0}'.format(clean_metadata)) |
905 | + self.glance_slave.images.update( |
906 | + clean_metadata["id"], remove_props=removed_props, **clean_metadata |
907 | + ) |
908 | + self.log( |
909 | + "DEBUG: update_to_slave :: update metadata " |
910 | + "{0}".format(clean_metadata) |
911 | + ) |
912 | except Exception as e: |
913 | if "HTTPNotFound" not in e: |
914 | - self.log('ERROR: update_to_slave (both image ' |
915 | - 'create/update failed :: {0} - this can ' |
916 | - 'happen if the image was deleted through ' |
917 | - 'the API but still exists in the glance ' |
918 | - 'database :: {1}' |
919 | - .format(clean_metadata['id'], e)) |
920 | + self.log( |
921 | + "ERROR: update_to_slave (both image " |
922 | + "create/update failed :: {0} - this can " |
923 | + "happen if the image was deleted through " |
924 | + "the API but still exists in the glance " |
925 | + "database :: {1}".format(clean_metadata["id"], e) |
926 | + ) |
927 | return False |
928 | |
929 | - self.log('ERROR: {0} {1} is likely deleted'.format( |
930 | - clean_metadata['id'], e)) |
931 | + self.log( |
932 | + "ERROR: {0} {1} is likely deleted".format(clean_metadata["id"], e) |
933 | + ) |
934 | |
935 | try: |
936 | # Upload. |
937 | - self.glance_slave.images.upload(clean_metadata['id'], |
938 | - open(tmp_image, 'rb')) |
939 | + self.glance_slave.images.upload(clean_metadata["id"], open(tmp_image, "rb")) |
940 | os.remove(tmp_image) |
941 | - self.log('DEBUG: update_to_slave :: upload {0}'.format(tmp_image)) |
942 | + self.log("DEBUG: update_to_slave :: upload {0}".format(tmp_image)) |
943 | except Exception as e: |
944 | os.remove(tmp_image) |
945 | - self.log('ERROR: upload_to_slave :: image-id {0} :: ' |
946 | - 'problem uploading data to glance ' |
947 | - 'slave (image could not be removed) :: ' |
948 | - '{1}'.format(clean_metadata['id'], e)) |
949 | + self.log( |
950 | + "ERROR: upload_to_slave :: image-id {0} :: " |
951 | + "problem uploading data to glance " |
952 | + "slave (image could not be removed) :: " |
953 | + "{1}".format(clean_metadata["id"], e) |
954 | + ) |
955 | return False |
956 | |
957 | def download_from_master(self, metadata_local): # noqa: C901 is too complex (12) |
958 | @@ -261,45 +289,54 @@ class ImageSyncSlave: |
959 | @return True: downloaded or already on local storage |
960 | @return False: error |
961 | """ |
962 | - tmp_image_basename = '{0}.img'.format(metadata_local['id']) |
963 | + tmp_image_basename = "{0}.img".format(metadata_local["id"]) |
964 | tmp_image = os.path.join(self.DATA_DIR, tmp_image_basename) |
965 | if os.path.isfile(tmp_image): |
966 | - if self.check_md5(metadata_local['checksum'], tmp_image): |
967 | + if self.check_md5(metadata_local["checksum"], tmp_image): |
968 | return True |
969 | |
970 | try: |
971 | os.remove(tmp_image) |
972 | except Exception as e: |
973 | - self.log('ERROR: download_from_master :: {0}'.format(e)) |
974 | + self.log("ERROR: download_from_master :: {0}".format(e)) |
975 | return False |
976 | downloaded = False |
977 | retries = 3 |
978 | for i in range(0, retries): |
979 | try: |
980 | bin_image = self.glance_master.images.data( |
981 | - image_id=metadata_local['id']) |
982 | + image_id=metadata_local["id"] |
983 | + ) |
984 | |
985 | hash_md5 = hashlib.md5() |
986 | - with open(tmp_image, 'wb') as fd: |
987 | + with open(tmp_image, "wb") as fd: |
988 | for chunk in bin_image: |
989 | fd.write(chunk) |
990 | hash_md5.update(chunk) |
991 | bin_image_checksum = hash_md5.hexdigest() |
992 | - if metadata_local['checksum'] == bin_image_checksum: |
993 | + if metadata_local["checksum"] == bin_image_checksum: |
994 | downloaded = True |
995 | - self.log('INFO: download_from_master ({0} - {1}):: ' |
996 | - 'checksum OK'.format(metadata_local['id'], |
997 | - metadata_local['checksum'])) |
998 | + self.log( |
999 | + "INFO: download_from_master ({0} - {1}):: " |
1000 | + "checksum OK".format( |
1001 | + metadata_local["id"], metadata_local["checksum"] |
1002 | + ) |
1003 | + ) |
1004 | break |
1005 | elif os.path.exists(tmp_image): |
1006 | - self.log('INFO: download_from_master ({0}/{1}; {2}):: ' |
1007 | - 'invalid checksum ' |
1008 | - '{3}'.format(metadata_local['id'], i, retries, |
1009 | - bin_image_checksum)) |
1010 | + self.log( |
1011 | + "INFO: download_from_master ({0}/{1}; {2}):: " |
1012 | + "invalid checksum " |
1013 | + "{3}".format( |
1014 | + metadata_local["id"], i, retries, bin_image_checksum |
1015 | + ) |
1016 | + ) |
1017 | os.remove(tmp_image) |
1018 | except Exception as e: |
1019 | - self.log('EXCEPTION: download_from_master ({0}/{1}; {2}):: ' |
1020 | - '{3}'.format(i, retries, metadata_local['id'], e)) |
1021 | + self.log( |
1022 | + "EXCEPTION: download_from_master ({0}/{1}; {2}):: " |
1023 | + "{3}".format(i, retries, metadata_local["id"], e) |
1024 | + ) |
1025 | if os.path.exists(tmp_image): |
1026 | os.remove(tmp_image) |
1027 | |
1028 | @@ -310,86 +347,90 @@ class ImageSyncSlave: |
1029 | deletes images not found in local storage |
1030 | """ |
1031 | if not to_delete_images_ids: |
1032 | - self.log('WARNING: precautionary halt. No glance images found ' |
1033 | - 'to be deleted. noop.') |
1034 | + self.log( |
1035 | + "WARNING: precautionary halt. No glance images found " |
1036 | + "to be deleted. noop." |
1037 | + ) |
1038 | return |
1039 | |
1040 | for image_id in to_delete_images_ids: |
1041 | - self.log('INFO: removing image {0}'.format(image_id)) |
1042 | + self.log("INFO: removing image {0}".format(image_id)) |
1043 | try: |
1044 | self.glance_slave.images.delete(image_id) |
1045 | - self.log('DEBUG: image {0} removed'.format(image_id)) |
1046 | + self.log("DEBUG: image {0} removed".format(image_id)) |
1047 | except Exception as e: # TODO narrow the exception down |
1048 | - self.log('ERROR: could not delete {0} :: ' |
1049 | - '{1}'.format(image_id, e)) |
1050 | + self.log("ERROR: could not delete {0} :: {1}".format(image_id, e)) |
1051 | |
1052 | def create_missing_slave_images(self, processed_images_ids): |
1053 | |
1054 | for dirpath, dirnames, filenames in os.walk(self.DATA_DIR): |
1055 | - if dirpath != self.DATA_DIR and \ |
1056 | - len(dirnames) == 0 and \ |
1057 | - len(filenames) == 0: |
1058 | + if dirpath != self.DATA_DIR and len(dirnames) == 0 and len(filenames) == 0: |
1059 | os.rmdir(dirpath) |
1060 | continue |
1061 | |
1062 | for filename in filenames: |
1063 | full_path = os.path.join(dirpath, filename) |
1064 | - if filename.endswith('.json'): |
1065 | + if filename.endswith(".json"): |
1066 | image_id = filename[:-5] |
1067 | if image_id in processed_images_ids: |
1068 | continue |
1069 | |
1070 | metadata_local = self.read_metadata(full_path) |
1071 | if not metadata_local: |
1072 | - self.log('ERROR: read_metadata did not ' |
1073 | - 'retrieve anything ' |
1074 | - '({0})'.format(full_path)) |
1075 | + self.log( |
1076 | + "ERROR: read_metadata did not " |
1077 | + "retrieve anything " |
1078 | + "({0})".format(full_path) |
1079 | + ) |
1080 | continue |
1081 | |
1082 | slave_project_id = self.project_mapping(metadata_local) |
1083 | if not slave_project_id: |
1084 | - self.log('DEBUG: could not map image into any ' |
1085 | - 'slave project :: {0}'.format(metadata_local)) |
1086 | + self.log( |
1087 | + "DEBUG: could not map image into any " |
1088 | + "slave project :: {0}".format(metadata_local) |
1089 | + ) |
1090 | continue |
1091 | |
1092 | - metadata_local['owner'] = slave_project_id |
1093 | + metadata_local["owner"] = slave_project_id |
1094 | if self.download_from_master(metadata_local): |
1095 | self.upload_to_slave(metadata_local) |
1096 | else: |
1097 | - self.log('ERROR: image {0} could not be downloaded ' |
1098 | - 'from master'.format(metadata_local['id'])) |
1099 | + self.log( |
1100 | + "ERROR: image {0} could not be downloaded " |
1101 | + "from master".format(metadata_local["id"]) |
1102 | + ) |
1103 | |
1104 | def project_mapping(self, metadata_local): |
1105 | """can master/slave projects be mapped, no matter project_ids are not |
1106 | the same? |
1107 | """ |
1108 | # master/slave match can't be done (no project_id) |
1109 | - if 'owner' not in metadata_local: |
1110 | + if "owner" not in metadata_local: |
1111 | return False |
1112 | |
1113 | # master/slave project_ids match |
1114 | - if metadata_local['owner'] in self.projects_slave: |
1115 | - return metadata_local['owner'] |
1116 | + if metadata_local["owner"] in self.projects_slave: |
1117 | + return metadata_local["owner"] |
1118 | |
1119 | # no extra project_name passed -- can't check match |
1120 | - if 'bs_owner' not in metadata_local: |
1121 | + if "bs_owner" not in metadata_local: |
1122 | return False |
1123 | |
1124 | - master_project_name = metadata_local['bs_owner'] |
1125 | + master_project_name = metadata_local["bs_owner"] |
1126 | |
1127 | # XXX(aluria): no image on slave service |
1128 | # XXX(aluria): look for similar project on slave |
1129 | - for slave_project_id, slave_project_name in ( |
1130 | - self.projects_slave.items()): |
1131 | - slave_to_master = slave_project_name.replace(self.REGION_SLAVE, |
1132 | - self.REGION_MASTER) |
1133 | + for slave_project_id, slave_project_name in self.projects_slave.items(): |
1134 | + slave_to_master = slave_project_name.replace( |
1135 | + self.REGION_SLAVE, self.REGION_MASTER |
1136 | + ) |
1137 | # XXX(aluria): pitfall, if on master service there are |
1138 | # XXX(aluria): 2 projects: |
1139 | # XXX(auria): REGION_SLAVE-restofprojectname |
1140 | # XXX(auria): REGION_MASTER-restofprojectname |
1141 | # XXX(auria): first found gets image assigned |
1142 | - if master_project_name in (slave_project_name, |
1143 | - slave_to_master): |
1144 | + if master_project_name in (slave_project_name, slave_to_master): |
1145 | return slave_project_id |
1146 | return False |
1147 | |
1148 | @@ -413,49 +454,47 @@ class ImageSyncSlave: |
1149 | data = json.load(meta_file) |
1150 | return data |
1151 | except Exception as e: |
1152 | - self.log('EXCEPTION: {0}'.format(e)) |
1153 | + self.log("EXCEPTION: {0}".format(e)) |
1154 | return False |
1155 | else: |
1156 | - self.log('INFO: {0} not found.'.format(metadata_file)) |
1157 | + self.log("INFO: {0} not found.".format(metadata_file)) |
1158 | return False |
1159 | |
1160 | def glance_connect_slave(self): |
1161 | try: |
1162 | self.keystone = os_client_config.session_client( |
1163 | - 'identity', |
1164 | - cloud='envvars', |
1165 | - ) |
1166 | - self.glance_slave = os_client_config.make_client( |
1167 | - 'image', |
1168 | - cloud='envvars', |
1169 | + "identity", cloud="envvars", |
1170 | ) |
1171 | + self.glance_slave = os_client_config.make_client("image", cloud="envvars") |
1172 | except Exception as e: |
1173 | - self.log('EXCEPTION: {0}'.format(e)) |
1174 | - self.log('ERROR: unable to load environment variables, please ' |
1175 | - 'source novarc') |
1176 | + self.log("EXCEPTION: {0}".format(e)) |
1177 | + self.log( |
1178 | + "ERROR: unable to load environment variables, please source novarc" |
1179 | + ) |
1180 | self.release_lock() |
1181 | sys.exit(2) |
1182 | if not self.projects_slave: |
1183 | self.projects_slave = dict( |
1184 | - [(tenant['id'], tenant['name']) |
1185 | - for tenant in self.keystone.get('/projects').json()['projects'] |
1186 | - if tenant['enabled']] |
1187 | + [ |
1188 | + (tenant["id"], tenant["name"]) |
1189 | + for tenant in self.keystone.get("/projects").json()["projects"] |
1190 | + if tenant["enabled"] |
1191 | + ] |
1192 | ) |
1193 | - self.REGION_SLAVE = os.environ['OS_REGION_NAME'].upper() |
1194 | + self.REGION_SLAVE = os.environ["OS_REGION_NAME"].upper() |
1195 | return self.glance_slave |
1196 | |
1197 | def glance_connect_master(self): |
1198 | try: |
1199 | - self.glance_master = os_client_config.make_client( |
1200 | - 'image', |
1201 | - cloud='master', |
1202 | - ) |
1203 | - self.REGION_MASTER = os.environ['OS_MASTER_REGION'] |
1204 | + self.glance_master = os_client_config.make_client("image", cloud="master") |
1205 | + self.REGION_MASTER = os.environ["OS_MASTER_REGION"] |
1206 | except Exception as e: |
1207 | - self.log('EXCEPTION: {0}'.format(e)) |
1208 | - self.log('ERROR: unable to load master cloud environment, ' |
1209 | - 'please check master_creds settings and ' |
1210 | - '/etc/openstack/clouds.yaml') |
1211 | + self.log("EXCEPTION: {0}".format(e)) |
1212 | + self.log( |
1213 | + "ERROR: unable to load master cloud environment, " |
1214 | + "please check master_creds settings and " |
1215 | + "/etc/openstack/clouds.yaml" |
1216 | + ) |
1217 | self.release_lock() |
1218 | sys.exit(2) |
1219 | return self.glance_master |
1220 | @@ -464,7 +503,7 @@ class ImageSyncSlave: |
1221 | return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
1222 | |
1223 | def log(self, msg): |
1224 | - print('{0} {1}'.format(self.timestamp_now(), msg)) |
1225 | + print("{0} {1}".format(self.timestamp_now(), msg)) |
1226 | |
1227 | def mangle_metadata(self, metadata_local, metadata_slave=None): |
1228 | """Maps projects in MASTER region with projects in SLAVE region |
1229 | @@ -519,122 +558,132 @@ class ImageSyncSlave: |
1230 | 'base_image_ref', |
1231 | 'owner_id'] |
1232 | """ |
1233 | - if 'owner' not in metadata_local: |
1234 | - raise (OSProjectNotFound, 'no owner :: {0}'.format(metadata_local)) |
1235 | + if "owner" not in metadata_local: |
1236 | + raise (OSProjectNotFound, "no owner :: {0}".format(metadata_local)) |
1237 | |
1238 | - if metadata_local['owner'] not in self.projects_slave: |
1239 | - if 'bs_owner' in metadata_local: |
1240 | - master_project_name = metadata_local['bs_owner'] |
1241 | + if metadata_local["owner"] not in self.projects_slave: |
1242 | + if "bs_owner" in metadata_local: |
1243 | + master_project_name = metadata_local["bs_owner"] |
1244 | else: |
1245 | - raise (OSProjectNotFound, 'no bs_owner :: ' |
1246 | - '{0}'.format(metadata_local)) |
1247 | + raise ( |
1248 | + OSProjectNotFound, |
1249 | + "no bs_owner :: {0}".format(metadata_local), |
1250 | + ) |
1251 | |
1252 | # XXX(aluria): image does not exist on slave service |
1253 | if not metadata_slave: |
1254 | slave_project_id = self.project_mapping(metadata_local) |
1255 | if not slave_project_id: |
1256 | - raise (OSProjectNotFound, 'no project_mapping :: ' |
1257 | - '{0}'.format(metadata_local)) |
1258 | - elif metadata_local['owner'] != slave_project_id: |
1259 | - metadata_local['owner'] = slave_project_id |
1260 | + raise ( |
1261 | + OSProjectNotFound, |
1262 | + "no project_mapping :: {0}".format(metadata_local), |
1263 | + ) |
1264 | + elif metadata_local["owner"] != slave_project_id: |
1265 | + metadata_local["owner"] = slave_project_id |
1266 | # XXX(aluria): image exists on slave service |
1267 | # XXX(aluria): keep all metadata and mangle project_id (owner) |
1268 | else: |
1269 | # ie. admin, services, SLAVE-CENTRAL |
1270 | - slave_project_name = self.projects_slave[metadata_slave['owner']] |
1271 | + slave_project_name = self.projects_slave[metadata_slave["owner"]] |
1272 | # ie. admin, services, MASTER-CENTRAL |
1273 | - slave_to_master = \ |
1274 | - slave_project_name.replace(self.REGION_SLAVE, |
1275 | - self.REGION_MASTER) |
1276 | + slave_to_master = slave_project_name.replace( |
1277 | + self.REGION_SLAVE, self.REGION_MASTER |
1278 | + ) |
1279 | # ie. admin, services, MASTER-CENTRAL |
1280 | - if master_project_name in (slave_project_name, |
1281 | - slave_to_master): |
1282 | - metadata_local['owner'] = metadata_slave['owner'] |
1283 | + if master_project_name in (slave_project_name, slave_to_master): |
1284 | + metadata_local["owner"] = metadata_slave["owner"] |
1285 | else: |
1286 | - raise (OSProjectNotFound, 'project not found: ' |
1287 | - '{0}'.format(metadata_local)) |
1288 | + raise ( |
1289 | + OSProjectNotFound, |
1290 | + "project not found: {0}".format(metadata_local), |
1291 | + ) |
1292 | |
1293 | - removed_props = [k for k in metadata_local.keys() if k not in |
1294 | - self.valid_properties] |
1295 | + removed_props = [ |
1296 | + k for k in metadata_local.keys() if k not in self.valid_properties |
1297 | + ] |
1298 | return (metadata_local, removed_props) |
1299 | |
1300 | def update_metadata(self, metadata_local, metadata_slave): |
1301 | - self.log('INFO: image-id {0}: updating ' |
1302 | - 'metadata'.format(metadata_local['id'])) |
1303 | + self.log("INFO: image-id {0}: updating metadata".format(metadata_local["id"])) |
1304 | |
1305 | - metadata, removed_props = self.mangle_metadata(metadata_local, |
1306 | - metadata_slave) |
1307 | + metadata, removed_props = self.mangle_metadata(metadata_local, metadata_slave) |
1308 | for k in removed_props: |
1309 | if k in metadata: |
1310 | del metadata[k] |
1311 | |
1312 | try: |
1313 | - self.glance_slave.images.update(metadata['id'], |
1314 | - **metadata) |
1315 | + self.glance_slave.images.update(metadata["id"], **metadata) |
1316 | except Exception as e: |
1317 | - self.log('EXCEPTION: update_metadata :: {0} - ' |
1318 | - '{1}'.format(metadata['id'], e)) |
1319 | + self.log( |
1320 | + "EXCEPTION: update_metadata :: {0} - {1}".format(metadata["id"], e) |
1321 | + ) |
1322 | raise e |
1323 | |
1324 | def create_lock(self, lockfile): |
1325 | try: |
1326 | - with open(lockfile, 'w') as lock: |
1327 | + with open(lockfile, "w") as lock: |
1328 | lock.write(str(os.getpid())) |
1329 | except OSError: |
1330 | - self.log('ERROR: could not create lockfile {0}'.format(lockfile)) |
1331 | + self.log("ERROR: could not create lockfile {0}".format(lockfile)) |
1332 | |
1333 | - def file_locked(self, lockfile='/tmp/glance_sync_slave.lock'): |
1334 | + def file_locked(self, lockfile="/tmp/glance_sync_slave.lock"): |
1335 | if os.path.isfile(lockfile): |
1336 | return True |
1337 | else: |
1338 | return False |
1339 | |
1340 | - def release_lock(self, lockfile='/tmp/glance_sync_slave.lock'): |
1341 | + def release_lock(self, lockfile="/tmp/glance_sync_slave.lock"): |
1342 | if os.path.isfile(lockfile): |
1343 | try: |
1344 | os.remove(lockfile) |
1345 | except OSError as e: |
1346 | self.log(e) |
1347 | |
1348 | - def set_filelock(self, lockfile='/tmp/glance_sync_slave.lock'): |
1349 | + def set_filelock(self, lockfile="/tmp/glance_sync_slave.lock"): |
1350 | if self.file_locked(lockfile): |
1351 | - self.log('WARNING: sync already in progress, exiting') |
1352 | + self.log("WARNING: sync already in progress, exiting") |
1353 | sys.exit(2) |
1354 | |
1355 | self.create_lock(lockfile) |
1356 | atexit.register(self.release_lock) |
1357 | |
1358 | def main(self): |
1359 | - self.log('starting glance sync') |
1360 | - self.log('getting metadata from master') |
1361 | + self.log("starting glance sync") |
1362 | + self.log("getting metadata from master") |
1363 | self.download_metadata_from_master() |
1364 | processed_images_ids = self.parse_glance_slave_images() |
1365 | self.create_missing_slave_images(processed_images_ids) |
1366 | - self.log('ending glance image sync slave run') |
1367 | + self.log("ending glance image sync slave run") |
1368 | self.release_lock() |
1369 | |
1370 | |
1371 | -if __name__ == '__main__': |
1372 | - parser = argparse.ArgumentParser(description='Synchronize remote images ' |
1373 | - 'metadata to disk and import into glance') |
1374 | +if __name__ == "__main__": |
1375 | + parser = argparse.ArgumentParser( |
1376 | + description="Synchronize remote images " |
1377 | + "metadata to disk and import into glance" |
1378 | + ) |
1379 | parser.add_argument("-d", "--datadir", help="directory to write images to") |
1380 | - parser.add_argument("-s", "--source", help="full path to master rsync " |
1381 | - "source. Format: " |
1382 | - "<user>@<hostname>:<port>/" |
1383 | - "<directory>") |
1384 | + parser.add_argument( |
1385 | + "-s", |
1386 | + "--source", |
1387 | + help="full path to master rsync " |
1388 | + "source. Format: " |
1389 | + "<user>@<hostname>:<port>/" |
1390 | + "<directory>", |
1391 | + ) |
1392 | args = parser.parse_args() |
1393 | |
1394 | if args.datadir: |
1395 | data_dir = args.datadir |
1396 | else: |
1397 | parser.print_help() |
1398 | - sys.exit('ERROR: please specify an output directory for images') |
1399 | + sys.exit("ERROR: please specify an output directory for images") |
1400 | |
1401 | if args.source: |
1402 | source = args.source |
1403 | else: |
1404 | parser.print_help() |
1405 | - sys.exit('ERROR: please specify an image source to sync from') |
1406 | + sys.exit("ERROR: please specify an image source to sync from") |
1407 | |
1408 | slave = ImageSyncSlave(data_dir, source) |
1409 | slave.main() |
1410 | diff --git a/src/reactive/glance_sync.py b/src/reactive/glance_sync.py |
1411 | index bd80f43..dff83c6 100644 |
1412 | --- a/src/reactive/glance_sync.py |
1413 | +++ b/src/reactive/glance_sync.py |
1414 | @@ -15,59 +15,53 @@ from charmhelpers.contrib.openstack.utils import config_flags_parser |
1415 | from charms.reactive import hook, clear_flag, when, when_any |
1416 | |
1417 | |
1418 | -@hook('install') |
1419 | +@hook("install") |
1420 | def install_glance_sync(): |
1421 | """Install glance-sync charm.""" |
1422 | - hookenv.status_set('maintenance', 'Installing') |
1423 | + hookenv.status_set("maintenance", "Installing") |
1424 | configure_config_dir() |
1425 | configure_log_dir() |
1426 | configure_script_dir() |
1427 | configure_sync_mode() |
1428 | |
1429 | - homedir = os.path.expanduser('~ubuntu') |
1430 | - ssh_identity = '{}/.ssh/id_rsa'.format(homedir) |
1431 | + homedir = os.path.expanduser("~ubuntu") |
1432 | + ssh_identity = "{}/.ssh/id_rsa".format(homedir) |
1433 | if not os.path.exists(ssh_identity): |
1434 | - command = ['ssh-keygen', '-t', 'rsa', '-N', '', |
1435 | - '-f', ssh_identity] |
1436 | - proc = subprocess.Popen(command, |
1437 | - stdout=subprocess.PIPE, |
1438 | - stderr=subprocess.PIPE) |
1439 | + command = ["ssh-keygen", "-t", "rsa", "-N", "", "-f", ssh_identity] |
1440 | + proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) |
1441 | (stdout, stderr) = proc.communicate() |
1442 | if proc.returncode: |
1443 | - print("ERROR: problem generating ssh key '{}':" |
1444 | - .format(command)) |
1445 | + print("ERROR: problem generating ssh key '{}':".format(command)) |
1446 | print(stderr) |
1447 | - os.chown(ssh_identity, |
1448 | - pwd.getpwnam('ubuntu').pw_uid, |
1449 | - grp.getgrnam('ubuntu').gr_gid) |
1450 | + os.chown( |
1451 | + ssh_identity, pwd.getpwnam("ubuntu").pw_uid, grp.getgrnam("ubuntu").gr_gid |
1452 | + ) |
1453 | |
1454 | - hookenv.status_set('active', 'Unit is ready') |
1455 | + hookenv.status_set("active", "Unit is ready") |
1456 | |
1457 | |
1458 | -@when('config.changed.master_mode') |
1459 | +@when("config.changed.master_mode") |
1460 | def configure_sync_mode(): |
1461 | """Configure glance-sync charm to be either master or slave.""" |
1462 | - master_enabled = hookenv.config('master_mode') |
1463 | - hookenv.log('configuring mode') |
1464 | + master_enabled = hookenv.config("master_mode") |
1465 | + hookenv.log("configuring mode") |
1466 | if master_enabled: |
1467 | - hookenv.status_set('maintenance', |
1468 | - 'Configuring master') |
1469 | - hookenv.log('configuring unit as master') |
1470 | + hookenv.status_set("maintenance", "Configuring master") |
1471 | + hookenv.log("configuring unit as master") |
1472 | perform_slave_cleanup() |
1473 | install_master_sync_script() |
1474 | - hookenv.log('opening TCP port 22') |
1475 | - open_port(22, protocol='TCP') |
1476 | + hookenv.log("opening TCP port 22") |
1477 | + open_port(22, protocol="TCP") |
1478 | else: |
1479 | - hookenv.status_set('maintenance', |
1480 | - 'Configuring slave') |
1481 | - hookenv.log('configuring unit as slave') |
1482 | + hookenv.status_set("maintenance", "Configuring slave") |
1483 | + hookenv.log("configuring unit as slave") |
1484 | perform_master_cleanup() |
1485 | install_slave_sync_script() |
1486 | |
1487 | install_db_cleanup_script() |
1488 | configure_cron() |
1489 | configure_data_dir() |
1490 | - hookenv.status_set('active', 'Unit is ready') |
1491 | + hookenv.status_set("active", "Unit is ready") |
1492 | |
1493 | |
1494 | def perform_slave_cleanup(): |
1495 | @@ -75,29 +69,29 @@ def perform_slave_cleanup(): |
1496 | Cleanup glance-sync slave files, once the charm is set |
1497 | to be the master, after being a slave. |
1498 | """ |
1499 | - data_dir = hookenv.config('data_dir') |
1500 | + data_dir = hookenv.config("data_dir") |
1501 | if os.path.exists(data_dir): |
1502 | shutil.rmtree(data_dir, ignore_errors=True) |
1503 | |
1504 | - script_dir = hookenv.config('script_dir') |
1505 | + script_dir = hookenv.config("script_dir") |
1506 | if os.path.exists(script_dir): |
1507 | shutil.rmtree(script_dir, ignore_errors=True) |
1508 | |
1509 | - config_dir = hookenv.config('config_dir') |
1510 | - novarc_file = os.path.join(config_dir, 'novarc') |
1511 | + config_dir = hookenv.config("config_dir") |
1512 | + novarc_file = os.path.join(config_dir, "novarc") |
1513 | if os.path.isfile(novarc_file): |
1514 | os.remove(novarc_file) |
1515 | |
1516 | - clouds_yaml_dir = '/etc/openstack' |
1517 | + clouds_yaml_dir = "/etc/openstack" |
1518 | if os.path.exists(clouds_yaml_dir): |
1519 | shutil.rmtree(clouds_yaml_dir, ignore_errors=True) |
1520 | |
1521 | - cron_file = '/etc/cron.d/glance_sync_slave' |
1522 | + cron_file = "/etc/cron.d/glance_sync_slave" |
1523 | if os.path.isfile(cron_file): |
1524 | os.remove(cron_file) |
1525 | - clear_flag('cron.configured') |
1526 | + clear_flag("cron.configured") |
1527 | |
1528 | - clear_flag('slave.configured') |
1529 | + clear_flag("slave.configured") |
1530 | |
1531 | |
1532 | def perform_master_cleanup(): |
1533 | @@ -105,168 +99,162 @@ def perform_master_cleanup(): |
1534 | Cleanup glance-sync master files, once the charm is set |
1535 | to be a slave, after being the master. |
1536 | """ |
1537 | - data_dir = hookenv.config('data_dir') |
1538 | + data_dir = hookenv.config("data_dir") |
1539 | if os.path.exists(data_dir): |
1540 | shutil.rmtree(data_dir, ignore_errors=True) |
1541 | |
1542 | - script_dir = hookenv.config('script_dir') |
1543 | + script_dir = hookenv.config("script_dir") |
1544 | if os.path.exists(script_dir): |
1545 | shutil.rmtree(script_dir, ignore_errors=True) |
1546 | |
1547 | - config_dir = hookenv.config('config_dir') |
1548 | - novarc_file = os.path.join(config_dir, 'novarc') |
1549 | + config_dir = hookenv.config("config_dir") |
1550 | + novarc_file = os.path.join(config_dir, "novarc") |
1551 | if os.path.isfile(novarc_file): |
1552 | os.remove(novarc_file) |
1553 | |
1554 | - cron_file = '/etc/cron.d/glance_sync_master' |
1555 | + cron_file = "/etc/cron.d/glance_sync_master" |
1556 | if os.path.isfile(cron_file): |
1557 | os.remove(cron_file) |
1558 | - clear_flag('cron.configured') |
1559 | + clear_flag("cron.configured") |
1560 | |
1561 | - clear_flag('master.configured') |
1562 | + clear_flag("master.configured") |
1563 | |
1564 | |
1565 | -@hook('upgrade-charm') |
1566 | +@hook("upgrade-charm") |
1567 | def upgrade_glance_sync(): |
1568 | """Perform charm upgrade.""" |
1569 | install_glance_sync() |
1570 | |
1571 | |
1572 | -@when('config.changed.config_dir') |
1573 | +@when("config.changed.config_dir") |
1574 | def configure_config_dir(): |
1575 | """Configure 'config_dir' directory, and configure cron.""" |
1576 | - hookenv.status_set('maintenance', 'Configuring') |
1577 | - config_dir = hookenv.config('config_dir') |
1578 | + hookenv.status_set("maintenance", "Configuring") |
1579 | + config_dir = hookenv.config("config_dir") |
1580 | if not os.path.exists(config_dir): |
1581 | os.makedirs(config_dir) |
1582 | - os.chown(config_dir, |
1583 | - pwd.getpwnam('ubuntu').pw_uid, |
1584 | - grp.getgrnam('ubuntu').gr_gid) |
1585 | + os.chown( |
1586 | + config_dir, pwd.getpwnam("ubuntu").pw_uid, grp.getgrnam("ubuntu").gr_gid |
1587 | + ) |
1588 | |
1589 | configure_cron() |
1590 | - hookenv.status_set('active', 'Unit is ready') |
1591 | + hookenv.status_set("active", "Unit is ready") |
1592 | |
1593 | |
1594 | -@when('config.changed.data_dir') |
1595 | +@when("config.changed.data_dir") |
1596 | def configure_data_dir(): |
1597 | """Configure 'data_dir' directory, and configure cron.""" |
1598 | - hookenv.status_set('maintenance', 'Configuring') |
1599 | - data_dir = hookenv.config('data_dir') |
1600 | + hookenv.status_set("maintenance", "Configuring") |
1601 | + data_dir = hookenv.config("data_dir") |
1602 | if not os.path.exists(data_dir): |
1603 | os.makedirs(data_dir) |
1604 | - os.chown(data_dir, |
1605 | - pwd.getpwnam('ubuntu').pw_uid, |
1606 | - grp.getgrnam('ubuntu').gr_gid) |
1607 | + os.chown(data_dir, pwd.getpwnam("ubuntu").pw_uid, grp.getgrnam("ubuntu").gr_gid) |
1608 | |
1609 | configure_cron() |
1610 | - hookenv.status_set('active', 'Unit is ready') |
1611 | + hookenv.status_set("active", "Unit is ready") |
1612 | |
1613 | |
1614 | -@when('config.changed.log_dir') |
1615 | +@when("config.changed.log_dir") |
1616 | def configure_log_dir(): |
1617 | """ |
1618 | Configure 'log_dir' directory, setup lograte for glance-sync |
1619 | log files, and configure cron. |
1620 | """ |
1621 | - hookenv.status_set('maintenance', 'Configuring') |
1622 | - log_dir = hookenv.config('log_dir') |
1623 | + hookenv.status_set("maintenance", "Configuring") |
1624 | + log_dir = hookenv.config("log_dir") |
1625 | if not os.path.exists(log_dir): |
1626 | os.makedirs(log_dir) |
1627 | - os.chown(log_dir, |
1628 | - pwd.getpwnam('ubuntu').pw_uid, |
1629 | - grp.getgrnam('ubuntu').gr_gid) |
1630 | + os.chown(log_dir, pwd.getpwnam("ubuntu").pw_uid, grp.getgrnam("ubuntu").gr_gid) |
1631 | templating.render( |
1632 | - source='logrotate.d.j2', |
1633 | - target='/etc/logrotate.d/glance_sync', |
1634 | - owner='root', |
1635 | - group='root', |
1636 | + source="logrotate.d.j2", |
1637 | + target="/etc/logrotate.d/glance_sync", |
1638 | + owner="root", |
1639 | + group="root", |
1640 | perms=0o644, |
1641 | context=hookenv.config(), |
1642 | ) |
1643 | |
1644 | configure_cron() |
1645 | - hookenv.status_set('active', 'Unit is ready') |
1646 | + hookenv.status_set("active", "Unit is ready") |
1647 | |
1648 | |
1649 | -@when('config.changed.script_dir') |
1650 | +@when("config.changed.script_dir") |
1651 | def configure_script_dir(): |
1652 | """Configure 'script_dir' directory, and configure cron.""" |
1653 | - hookenv.status_set('maintenance', 'Configuring') |
1654 | - script_dir = hookenv.config('script_dir') |
1655 | + hookenv.status_set("maintenance", "Configuring") |
1656 | + script_dir = hookenv.config("script_dir") |
1657 | if not os.path.exists(script_dir): |
1658 | os.makedirs(script_dir) |
1659 | - os.chown(script_dir, |
1660 | - pwd.getpwnam('ubuntu').pw_uid, |
1661 | - grp.getgrnam('ubuntu').gr_gid) |
1662 | + os.chown( |
1663 | + script_dir, pwd.getpwnam("ubuntu").pw_uid, grp.getgrnam("ubuntu").gr_gid |
1664 | + ) |
1665 | |
1666 | configure_cron() |
1667 | - hookenv.status_set('active', 'Unit is ready') |
1668 | + hookenv.status_set("active", "Unit is ready") |
1669 | |
1670 | |
1671 | -@when('config.changed.authorized_keys') |
1672 | +@when("config.changed.authorized_keys") |
1673 | def configure_authorized_keys(): |
1674 | """ |
1675 | Configure authorized_keys file containing command-limited |
1676 | public keys for slave(s). |
1677 | """ |
1678 | - hookenv.status_set('maintenance', 'Configuring') |
1679 | - ssh_conf_dir = '/etc/ssh/authorized-keys' |
1680 | - auth_keys_file = os.path.join(ssh_conf_dir, |
1681 | - 'glance-sync-slaves') |
1682 | - authorized_keys = hookenv.config('authorized_keys') |
1683 | + hookenv.status_set("maintenance", "Configuring") |
1684 | + ssh_conf_dir = "/etc/ssh/authorized-keys" |
1685 | + auth_keys_file = os.path.join(ssh_conf_dir, "glance-sync-slaves") |
1686 | + authorized_keys = hookenv.config("authorized_keys") |
1687 | if authorized_keys: |
1688 | keys_bytestring = base64.b64decode(authorized_keys) |
1689 | - keys = str(keys_bytestring, 'utf-8') |
1690 | + keys = str(keys_bytestring, "utf-8") |
1691 | if not os.path.exists(ssh_conf_dir): |
1692 | os.makedirs(ssh_conf_dir) |
1693 | - os.chown(ssh_conf_dir, |
1694 | - pwd.getpwnam('root').pw_uid, |
1695 | - grp.getgrnam('root').gr_gid) |
1696 | + os.chown( |
1697 | + ssh_conf_dir, pwd.getpwnam("root").pw_uid, grp.getgrnam("root").gr_gid |
1698 | + ) |
1699 | os.chmod(ssh_conf_dir, 0o755) |
1700 | - with open(auth_keys_file, 'w') as f: |
1701 | + with open(auth_keys_file, "w") as f: |
1702 | f.write(keys) |
1703 | os.chmod(auth_keys_file, 0o644) |
1704 | configure_sshd(auth_keys_file) |
1705 | - hookenv.status_set('active', 'Unit is ready') |
1706 | + hookenv.status_set("active", "Unit is ready") |
1707 | else: |
1708 | if os.path.isfile(auth_keys_file): |
1709 | os.remove(auth_keys_file) |
1710 | - hookenv.status_set('active', 'Unit is ready') |
1711 | + hookenv.status_set("active", "Unit is ready") |
1712 | |
1713 | |
1714 | def configure_sshd(keys_file): |
1715 | """Add 'AuthorizedKeysFile' line to '/etc/ssh/sshd_config'.""" |
1716 | - original = '/etc/ssh/sshd_config' |
1717 | - temp_config = '/tmp/sshd_config' |
1718 | + original = "/etc/ssh/sshd_config" |
1719 | + temp_config = "/tmp/sshd_config" |
1720 | old_lines = [] |
1721 | auth_keys_file_added = False |
1722 | auth_keys_file_existed = False |
1723 | - homedirs = '%h/.ssh/authorized_keys' |
1724 | - with open(original, 'r') as old_file: |
1725 | + homedirs = "%h/.ssh/authorized_keys" |
1726 | + with open(original, "r") as old_file: |
1727 | for line in old_file: |
1728 | old_lines.append(line) |
1729 | - with open(temp_config, 'w') as new_file: |
1730 | + with open(temp_config, "w") as new_file: |
1731 | for line in old_lines: |
1732 | - if re.search(r'^AuthorizedKeysFile.*{}.*' |
1733 | - .format(keys_file), line): |
1734 | + if re.search(r"^AuthorizedKeysFile.*{}.*".format(keys_file), line): |
1735 | auth_keys_file_existed = True |
1736 | new_file.write(line) |
1737 | continue |
1738 | else: |
1739 | - replaced = re.sub(r'^AuthorizedKeysFile\s(.*)$', |
1740 | - r'AuthorizedKeysFile \1 {} {}' |
1741 | - .format(homedirs, keys_file), |
1742 | - line) |
1743 | + replaced = re.sub( |
1744 | + r"^AuthorizedKeysFile\s(.*)$", |
1745 | + r"AuthorizedKeysFile \1 {} {}".format(homedirs, keys_file), |
1746 | + line, |
1747 | + ) |
1748 | if replaced is not line: |
1749 | auth_keys_file_added = True |
1750 | new_file.write(replaced) |
1751 | |
1752 | if not auth_keys_file_existed and not auth_keys_file_added: |
1753 | - new_file.write('AuthorizedKeysFile {} {}\n' |
1754 | - .format(homedirs, keys_file)) |
1755 | + new_file.write("AuthorizedKeysFile {} {}\n".format(homedirs, keys_file)) |
1756 | |
1757 | shutil.copy(temp_config, original) |
1758 | - os.system('sudo sshd -t && sudo service ssh reload') |
1759 | + os.system("sudo sshd -t && sudo service ssh reload") |
1760 | |
1761 | |
1762 | def install_slave_sync_script(): |
1763 | @@ -274,22 +262,19 @@ def install_slave_sync_script(): |
1764 | Install slave files, and corresponding directory |
1765 | structure. |
1766 | """ |
1767 | - hookenv.status_set('maintenance', 'Installing') |
1768 | - hookenv.log('installing slave sync script') |
1769 | - script_dir = hookenv.config('script_dir') |
1770 | - files = ['glance_sync_slave.py'] |
1771 | + hookenv.status_set("maintenance", "Installing") |
1772 | + hookenv.log("installing slave sync script") |
1773 | + script_dir = hookenv.config("script_dir") |
1774 | + files = ["glance_sync_slave.py"] |
1775 | for file in files: |
1776 | - source = os.path.join(hookenv.charm_dir(), |
1777 | - 'files', file) |
1778 | + source = os.path.join(hookenv.charm_dir(), "files", file) |
1779 | dest = os.path.join(script_dir, file) |
1780 | if not os.path.exists(os.path.dirname(dest)): |
1781 | os.makedirs(os.path.dirname(dest)) |
1782 | shutil.copy(source, dest) |
1783 | - os.chown(dest, |
1784 | - pwd.getpwnam('ubuntu').pw_uid, |
1785 | - grp.getgrnam('ubuntu').gr_gid) |
1786 | + os.chown(dest, pwd.getpwnam("ubuntu").pw_uid, grp.getgrnam("ubuntu").gr_gid) |
1787 | os.chmod(dest, 0o750) |
1788 | - hookenv.status_set('active', 'Unit is ready') |
1789 | + hookenv.status_set("active", "Unit is ready") |
1790 | |
1791 | |
1792 | def install_master_sync_script(): |
1793 | @@ -297,22 +282,19 @@ def install_master_sync_script(): |
1794 | Install master files, and corresponding directory |
1795 | structure. |
1796 | """ |
1797 | - hookenv.status_set('maintenance', 'Installing') |
1798 | - hookenv.log('installing master sync script') |
1799 | - script_dir = hookenv.config('script_dir') |
1800 | - files = ['glance_sync_master.py'] |
1801 | + hookenv.status_set("maintenance", "Installing") |
1802 | + hookenv.log("installing master sync script") |
1803 | + script_dir = hookenv.config("script_dir") |
1804 | + files = ["glance_sync_master.py"] |
1805 | for file in files: |
1806 | - source = os.path.join(hookenv.charm_dir(), |
1807 | - 'files', file) |
1808 | + source = os.path.join(hookenv.charm_dir(), "files", file) |
1809 | dest = os.path.join(script_dir, file) |
1810 | if not os.path.exists(os.path.dirname(dest)): |
1811 | os.makedirs(os.path.dirname(dest)) |
1812 | shutil.copy(source, dest) |
1813 | - os.chown(dest, |
1814 | - pwd.getpwnam('ubuntu').pw_uid, |
1815 | - grp.getgrnam('ubuntu').gr_gid) |
1816 | + os.chown(dest, pwd.getpwnam("ubuntu").pw_uid, grp.getgrnam("ubuntu").gr_gid) |
1817 | os.chmod(dest, 0o750) |
1818 | - hookenv.status_set('active', 'Unit is ready') |
1819 | + hookenv.status_set("active", "Unit is ready") |
1820 | |
1821 | |
1822 | def install_db_cleanup_script(): |
1823 | @@ -320,102 +302,102 @@ def install_db_cleanup_script(): |
1824 | Install 'db_purge_deleted_glance_images.py' for |
1825 | master or slave. |
1826 | """ |
1827 | - hookenv.status_set('maintenance', 'Installing') |
1828 | - script_dir = hookenv.config('script_dir') |
1829 | - master_enabled = hookenv.config('master_mode') |
1830 | + hookenv.status_set("maintenance", "Installing") |
1831 | + script_dir = hookenv.config("script_dir") |
1832 | + master_enabled = hookenv.config("master_mode") |
1833 | if master_enabled: |
1834 | - mode = 'master' |
1835 | + mode = "master" |
1836 | else: |
1837 | - mode = 'slave' |
1838 | - source = os.path.join(hookenv.charm_dir(), 'files', |
1839 | - 'db_purge_deleted_' + mode, |
1840 | - 'db_purge_deleted_glance_images.py') |
1841 | - dest = os.path.join(script_dir, |
1842 | - 'db_purge_deleted_glance_images.py') |
1843 | + mode = "slave" |
1844 | + source = os.path.join( |
1845 | + hookenv.charm_dir(), |
1846 | + "files", |
1847 | + "db_purge_deleted_" + mode, |
1848 | + "db_purge_deleted_glance_images.py", |
1849 | + ) |
1850 | + dest = os.path.join(script_dir, "db_purge_deleted_glance_images.py") |
1851 | shutil.copy(source, dest) |
1852 | - os.chown(dest, |
1853 | - pwd.getpwnam('ubuntu').pw_uid, |
1854 | - grp.getgrnam('ubuntu').gr_gid) |
1855 | + os.chown(dest, pwd.getpwnam("ubuntu").pw_uid, grp.getgrnam("ubuntu").gr_gid) |
1856 | os.chmod(dest, 0o750) |
1857 | - hookenv.status_set('active', 'Unit is ready') |
1858 | + hookenv.status_set("active", "Unit is ready") |
1859 | |
1860 | |
1861 | -@when('config.changed.master_creds') |
1862 | +@when("config.changed.master_creds") |
1863 | def configure_master_novarc(): |
1864 | """ |
1865 | Get context from 'master_creds'. Put the info into 'clouds.yaml' |
1866 | for the openstacksdk. |
1867 | """ |
1868 | - keystone_creds = config_flags_parser(hookenv.config('master_creds')) |
1869 | - clouds_yaml_dir = '/etc/openstack' |
1870 | + keystone_creds = config_flags_parser(hookenv.config("master_creds")) |
1871 | + clouds_yaml_dir = "/etc/openstack" |
1872 | if not keystone_creds: |
1873 | - hookenv.status_set('blocked', 'Please add master_creds') |
1874 | + hookenv.status_set("blocked", "Please add master_creds") |
1875 | return |
1876 | elif not os.path.exists(clouds_yaml_dir): |
1877 | creds = {} |
1878 | - keystone_creds = config_flags_parser(hookenv.config('master_creds')) |
1879 | - if '/v3' in keystone_creds['auth_url']: |
1880 | + keystone_creds = config_flags_parser(hookenv.config("master_creds")) |
1881 | + if "/v3" in keystone_creds["auth_url"]: |
1882 | creds = { |
1883 | - 'master_username': keystone_creds['username'], |
1884 | - 'master_password': keystone_creds['password'], |
1885 | - 'master_project': keystone_creds['project'], |
1886 | - 'master_region': keystone_creds['region'], |
1887 | - 'master_auth_url': keystone_creds['auth_url'], |
1888 | - 'master_auth_version': '3', |
1889 | - 'master_user_domain': keystone_creds['domain'], |
1890 | - 'master_project_domain': keystone_creds['domain'], |
1891 | + "master_username": keystone_creds["username"], |
1892 | + "master_password": keystone_creds["password"], |
1893 | + "master_project": keystone_creds["project"], |
1894 | + "master_region": keystone_creds["region"], |
1895 | + "master_auth_url": keystone_creds["auth_url"], |
1896 | + "master_auth_version": "3", |
1897 | + "master_user_domain": keystone_creds["domain"], |
1898 | + "master_project_domain": keystone_creds["domain"], |
1899 | } |
1900 | else: |
1901 | creds = { |
1902 | - 'master_username': keystone_creds['username'], |
1903 | - 'master_password': keystone_creds['password'], |
1904 | - 'master_project': keystone_creds['project'], |
1905 | - 'master_region': keystone_creds['region'], |
1906 | - 'master_auth_url': keystone_creds['auth_url'], |
1907 | + "master_username": keystone_creds["username"], |
1908 | + "master_password": keystone_creds["password"], |
1909 | + "master_project": keystone_creds["project"], |
1910 | + "master_region": keystone_creds["region"], |
1911 | + "master_auth_url": keystone_creds["auth_url"], |
1912 | } |
1913 | elif os.path.exists(clouds_yaml_dir): |
1914 | shutil.rmtree(clouds_yaml_dir, ignore_errors=True) |
1915 | creds = {} |
1916 | - keystone_creds = config_flags_parser(hookenv.config('master_creds')) |
1917 | - if '/v3' in keystone_creds['auth_url']: |
1918 | + keystone_creds = config_flags_parser(hookenv.config("master_creds")) |
1919 | + if "/v3" in keystone_creds["auth_url"]: |
1920 | creds = { |
1921 | - 'master_username': keystone_creds['username'], |
1922 | - 'master_password': keystone_creds['password'], |
1923 | - 'master_project': keystone_creds['project'], |
1924 | - 'master_region': keystone_creds['region'], |
1925 | - 'master_auth_url': keystone_creds['auth_url'], |
1926 | - 'master_auth_version': '3', |
1927 | - 'master_user_domain': keystone_creds['domain'], |
1928 | - 'master_project_domain': keystone_creds['domain'], |
1929 | + "master_username": keystone_creds["username"], |
1930 | + "master_password": keystone_creds["password"], |
1931 | + "master_project": keystone_creds["project"], |
1932 | + "master_region": keystone_creds["region"], |
1933 | + "master_auth_url": keystone_creds["auth_url"], |
1934 | + "master_auth_version": "3", |
1935 | + "master_user_domain": keystone_creds["domain"], |
1936 | + "master_project_domain": keystone_creds["domain"], |
1937 | } |
1938 | else: |
1939 | creds = { |
1940 | - 'master_username': keystone_creds['username'], |
1941 | - 'master_password': keystone_creds['password'], |
1942 | - 'master_project': keystone_creds['project'], |
1943 | - 'master_region': keystone_creds['region'], |
1944 | - 'master_auth_url': keystone_creds['auth_url'], |
1945 | + "master_username": keystone_creds["username"], |
1946 | + "master_password": keystone_creds["password"], |
1947 | + "master_project": keystone_creds["project"], |
1948 | + "master_region": keystone_creds["region"], |
1949 | + "master_auth_url": keystone_creds["auth_url"], |
1950 | } |
1951 | - clouds_yaml = os.path.join(clouds_yaml_dir, 'clouds.yaml') |
1952 | + clouds_yaml = os.path.join(clouds_yaml_dir, "clouds.yaml") |
1953 | templating.render( |
1954 | - source='clouds.yaml.j2', |
1955 | + source="clouds.yaml.j2", |
1956 | target=clouds_yaml, |
1957 | - owner='ubuntu', |
1958 | - group='ubuntu', |
1959 | + owner="ubuntu", |
1960 | + group="ubuntu", |
1961 | perms=0o600, |
1962 | context=creds, |
1963 | ) |
1964 | configure_novarc() |
1965 | - hookenv.status_set('active', 'Unit is ready') |
1966 | + hookenv.status_set("active", "Unit is ready") |
1967 | |
1968 | |
1969 | -@when('config.changed.novarc') |
1970 | +@when("config.changed.novarc") |
1971 | def configure_custom_novarc(): |
1972 | """Configure 'novarc' file after config change.""" |
1973 | configure_novarc() |
1974 | |
1975 | |
1976 | -@hook('keystone-admin-relation-{joined,changed}') |
1977 | +@hook("keystone-admin-relation-{joined,changed}") |
1978 | def configure_relation_novarc(relation=None): |
1979 | """ |
1980 | Configure 'novarc' file after adding keystone |
1981 | @@ -424,7 +406,7 @@ def configure_relation_novarc(relation=None): |
1982 | configure_novarc() |
1983 | |
1984 | |
1985 | -@hook('database-relation-{joined,changed}') |
1986 | +@hook("database-relation-{joined,changed}") |
1987 | def configure_relation_glancedb(relation=None): |
1988 | """ |
1989 | Configure 'novarc' file after adding database |
1990 | @@ -438,94 +420,88 @@ def configure_novarc(): |
1991 | Configure 'novarc' file from user supplied custom novarc |
1992 | file, or using keystone and mysql relations. |
1993 | """ |
1994 | - hookenv.status_set('maintenance', 'Configuring') |
1995 | - keystone_relations = hookenv.relations_of_type('keystone-admin') |
1996 | - db_relations = hookenv.relations_of_type('database') |
1997 | - config_dir = hookenv.config('config_dir') |
1998 | - novarc_file = os.path.join(config_dir, 'novarc') |
1999 | - custom_novarc = hookenv.config('novarc') |
2000 | + hookenv.status_set("maintenance", "Configuring") |
2001 | + keystone_relations = hookenv.relations_of_type("keystone-admin") |
2002 | + db_relations = hookenv.relations_of_type("database") |
2003 | + config_dir = hookenv.config("config_dir") |
2004 | + novarc_file = os.path.join(config_dir, "novarc") |
2005 | + custom_novarc = hookenv.config("novarc") |
2006 | if len(keystone_relations) > 0 and len(db_relations) > 0: |
2007 | write_relation_novarc(novarc_file) |
2008 | - elif not custom_novarc == '': |
2009 | + elif not custom_novarc == "": |
2010 | write_custom_novarc(novarc_file) |
2011 | else: |
2012 | - hookenv.log('ERROR: set novarc config or add keystone and ' |
2013 | - 'database relations') |
2014 | + hookenv.log("ERROR: set novarc config or add keystone and database relations") |
2015 | if os.path.isfile(novarc_file): |
2016 | os.remove(novarc_file) |
2017 | - clear_flag('novarc.configured') |
2018 | - hookenv.status_set('blocked', 'Set novarc config or add keystone ' |
2019 | - 'and database relations') |
2020 | + clear_flag("novarc.configured") |
2021 | + hookenv.status_set( |
2022 | + "blocked", "Set novarc config or add keystone and database relations" |
2023 | + ) |
2024 | |
2025 | |
2026 | def write_relation_novarc(path): # noqa: C901 is too complex (14) |
2027 | """Write 'novarc' file.""" |
2028 | - hookenv.status_set('maintenance', 'Configuring novarc') |
2029 | - hookenv.log('configuring novarc based on keystone relation') |
2030 | + hookenv.status_set("maintenance", "Configuring novarc") |
2031 | + hookenv.log("configuring novarc based on keystone relation") |
2032 | # TODO: replace this with some better way to get the master region |
2033 | # name available. Query from the client doesn't have permissions, |
2034 | # it is in 'clouds.yaml'. Possible alternative is to use a cross model |
2035 | # relation. |
2036 | context = {} |
2037 | - clouds_yaml_dir = '/etc/openstack' |
2038 | - master_creds_set = hookenv.config('master_creds') |
2039 | - master_enabled = hookenv.config('master_mode') |
2040 | + clouds_yaml_dir = "/etc/openstack" |
2041 | + master_creds_set = hookenv.config("master_creds") |
2042 | + master_enabled = hookenv.config("master_mode") |
2043 | if master_enabled: |
2044 | - keystone = hookenv.relations_of_type('keystone-admin') |
2045 | + keystone = hookenv.relations_of_type("keystone-admin") |
2046 | if len(keystone) > 0: |
2047 | relation = keystone[0] |
2048 | - if 'service_password' in relation and relation['service_password']: |
2049 | - context['keystone'] = copy(relation) |
2050 | - elif not master_enabled and master_creds_set != '': |
2051 | + if "service_password" in relation and relation["service_password"]: |
2052 | + context["keystone"] = copy(relation) |
2053 | + elif not master_enabled and master_creds_set != "": |
2054 | if not os.path.exists(clouds_yaml_dir): |
2055 | configure_master_novarc() |
2056 | - keystone_creds = config_flags_parser(hookenv |
2057 | - .config('master_creds')) |
2058 | - context['keystone_master_region'] = keystone_creds['region'] |
2059 | - keystone = hookenv.relations_of_type('keystone-admin') |
2060 | + keystone_creds = config_flags_parser(hookenv.config("master_creds")) |
2061 | + context["keystone_master_region"] = keystone_creds["region"] |
2062 | + keystone = hookenv.relations_of_type("keystone-admin") |
2063 | if len(keystone) > 0: |
2064 | - filtered_keystone = ([x for x in keystone |
2065 | - if 'service_password' in x]) |
2066 | + filtered_keystone = [x for x in keystone if "service_password" in x] |
2067 | if len(filtered_keystone) > 0: |
2068 | - context['keystone'] = copy(filtered_keystone[0]) |
2069 | + context["keystone"] = copy(filtered_keystone[0]) |
2070 | else: |
2071 | - if master_creds_set == '': |
2072 | - hookenv.log('master_creds missing') |
2073 | - hookenv.status_set('blocked', |
2074 | - 'Please add master_creds') |
2075 | - mysql = hookenv.relations_of_type('database') |
2076 | + if master_creds_set == "": |
2077 | + hookenv.log("master_creds missing") |
2078 | + hookenv.status_set("blocked", "Please add master_creds") |
2079 | + mysql = hookenv.relations_of_type("database") |
2080 | if len(mysql) > 0: |
2081 | relation = mysql[0] |
2082 | - context['db'] = copy(relation) |
2083 | + context["db"] = copy(relation) |
2084 | if len(context.keys()) == 2: |
2085 | templating.render( |
2086 | - source='novarc_master.j2', |
2087 | + source="novarc_master.j2", |
2088 | target=path, |
2089 | - owner='ubuntu', |
2090 | - group='ubuntu', |
2091 | + owner="ubuntu", |
2092 | + group="ubuntu", |
2093 | perms=0o600, |
2094 | context=context, |
2095 | ) |
2096 | - hookenv.status_set('active', 'Unit is ready') |
2097 | + hookenv.status_set("active", "Unit is ready") |
2098 | elif len(context.keys()) == 3: |
2099 | templating.render( |
2100 | - source='novarc_slave.j2', |
2101 | + source="novarc_slave.j2", |
2102 | target=path, |
2103 | - owner='ubuntu', |
2104 | - group='ubuntu', |
2105 | + owner="ubuntu", |
2106 | + group="ubuntu", |
2107 | perms=0o600, |
2108 | context=context, |
2109 | ) |
2110 | - hookenv.status_set('active', 'Unit is ready') |
2111 | - elif 'db' not in context.keys(): |
2112 | - hookenv.status_set('maintenance', |
2113 | - 'mysql relation incomplete') |
2114 | - elif 'keystone' not in context.keys(): |
2115 | - hookenv.status_set('maintenance', |
2116 | - 'keystone relation incomplete') |
2117 | + hookenv.status_set("active", "Unit is ready") |
2118 | + elif "db" not in context.keys(): |
2119 | + hookenv.status_set("maintenance", "mysql relation incomplete") |
2120 | + elif "keystone" not in context.keys(): |
2121 | + hookenv.status_set("maintenance", "keystone relation incomplete") |
2122 | else: |
2123 | - hookenv.status_set('maintenance', 'keystone and ' |
2124 | - 'mysql relation incomplete') |
2125 | + hookenv.status_set("maintenance", "keystone and mysql relation incomplete") |
2126 | |
2127 | |
2128 | def write_custom_novarc(path): |
2129 | @@ -536,172 +512,164 @@ def write_custom_novarc(path): |
2130 | # To add a custom novarc file, run: |
2131 | # `juju config <glance-sync-application> |
2132 | # novarc=$(base64 -w 0 /path/to/novarc)` |
2133 | - hookenv.status_set('maintenance', |
2134 | - 'Configuring custom novarc') |
2135 | - hookenv.log('configuring custom novarc') |
2136 | - novarc = hookenv.config('novarc') |
2137 | + hookenv.status_set("maintenance", "Configuring custom novarc") |
2138 | + hookenv.log("configuring custom novarc") |
2139 | + novarc = hookenv.config("novarc") |
2140 | novarc_bytestring = base64.b64decode(novarc) |
2141 | - with open(path, 'wb') as f: |
2142 | + with open(path, "wb") as f: |
2143 | f.write(novarc_bytestring) |
2144 | - os.chown(path, |
2145 | - pwd.getpwnam('ubuntu').pw_uid, |
2146 | - grp.getgrnam('ubuntu').gr_gid) |
2147 | + os.chown(path, pwd.getpwnam("ubuntu").pw_uid, grp.getgrnam("ubuntu").gr_gid) |
2148 | os.chmod(path, 0o600) |
2149 | - clouds_yaml_dir = '/etc/openstack' |
2150 | - master_creds_set = hookenv.config('master_creds') |
2151 | - master_enabled = hookenv.config('master_mode') |
2152 | + clouds_yaml_dir = "/etc/openstack" |
2153 | + master_creds_set = hookenv.config("master_creds") |
2154 | + master_enabled = hookenv.config("master_mode") |
2155 | if master_enabled: |
2156 | - hookenv.status_set('active', 'Unit is ready') |
2157 | + hookenv.status_set("active", "Unit is ready") |
2158 | return |
2159 | - elif not master_enabled and master_creds_set != '': |
2160 | + elif not master_enabled and master_creds_set != "": |
2161 | if not os.path.exists(clouds_yaml_dir): |
2162 | configure_master_novarc() |
2163 | - hookenv.status_set('active', 'Unit is ready') |
2164 | + hookenv.status_set("active", "Unit is ready") |
2165 | else: |
2166 | - if master_creds_set == '': |
2167 | - hookenv.log('ERROR: master_creds missing') |
2168 | - hookenv.status_set('blocked', |
2169 | - 'Please add master_creds') |
2170 | + if master_creds_set == "": |
2171 | + hookenv.log("ERROR: master_creds missing") |
2172 | + hookenv.status_set("blocked", "Please add master_creds") |
2173 | |
2174 | |
2175 | -@when('config.changed.sync_source') |
2176 | +@when("config.changed.sync_source") |
2177 | def configure_sync_source(): |
2178 | """Configure cron after 'sync_source' config change.""" |
2179 | configure_cron() |
2180 | |
2181 | |
2182 | -@when_any('config.changed.sync_enabled', |
2183 | - 'config.changed.cron_frequency') |
2184 | +@when_any("config.changed.sync_enabled", "config.changed.cron_frequency") |
2185 | def configure_cron(): |
2186 | """ |
2187 | Configure cron after 'sync_enabled' or |
2188 | 'cron_frequency' config change. |
2189 | """ |
2190 | - hookenv.status_set('maintenance', 'Configuring') |
2191 | - sync_enabled = hookenv.config('sync_enabled') |
2192 | - hookenv.log('configuring sync cronjob') |
2193 | - master_enabled = hookenv.config('master_mode') |
2194 | + hookenv.status_set("maintenance", "Configuring") |
2195 | + sync_enabled = hookenv.config("sync_enabled") |
2196 | + hookenv.log("configuring sync cronjob") |
2197 | + master_enabled = hookenv.config("master_mode") |
2198 | if master_enabled: |
2199 | - cron_file = '/etc/cron.d/glance_sync_master' |
2200 | + cron_file = "/etc/cron.d/glance_sync_master" |
2201 | else: |
2202 | - cron_file = '/etc/cron.d/glance_sync_slave' |
2203 | - if not hookenv.config('sync_source'): |
2204 | - hookenv.log('ERROR: sync_source not set') |
2205 | - hookenv.status_set('blocked', 'Please set a ' |
2206 | - 'sync_source to configure crontab') |
2207 | + cron_file = "/etc/cron.d/glance_sync_slave" |
2208 | + if not hookenv.config("sync_source"): |
2209 | + hookenv.log("ERROR: sync_source not set") |
2210 | + hookenv.status_set( |
2211 | + "blocked", "Please set a sync_source to configure crontab" |
2212 | + ) |
2213 | return |
2214 | |
2215 | if sync_enabled and master_enabled: |
2216 | - hookenv.log('adding cronjob') |
2217 | + hookenv.log("adding cronjob") |
2218 | templating.render( |
2219 | - source='glance_sync_master_cron.j2', |
2220 | + source="glance_sync_master_cron.j2", |
2221 | target=cron_file, |
2222 | - owner='root', |
2223 | - group='root', |
2224 | + owner="root", |
2225 | + group="root", |
2226 | perms=0o640, |
2227 | context=hookenv.config(), |
2228 | ) |
2229 | elif sync_enabled and not master_enabled: |
2230 | # Just an alias to make sure that the paths are as expected. |
2231 | context = hookenv.config() |
2232 | - if context['sync_source'][-1] != '/': |
2233 | - context['sync_source'] += '/' |
2234 | - if context['data_dir'][-1] != '/': |
2235 | - context['data_dir'] += '/' |
2236 | - hookenv.log('adding cronjob') |
2237 | + if context["sync_source"][-1] != "/": |
2238 | + context["sync_source"] += "/" |
2239 | + if context["data_dir"][-1] != "/": |
2240 | + context["data_dir"] += "/" |
2241 | + hookenv.log("adding cronjob") |
2242 | templating.render( |
2243 | - source='glance_sync_slave_cron.j2', |
2244 | + source="glance_sync_slave_cron.j2", |
2245 | target=cron_file, |
2246 | - owner='root', |
2247 | - group='root', |
2248 | + owner="root", |
2249 | + group="root", |
2250 | perms=0o640, |
2251 | context=hookenv.config(), |
2252 | ) |
2253 | else: |
2254 | - hookenv.log('removing cronjob') |
2255 | + hookenv.log("removing cronjob") |
2256 | if os.path.isfile(cron_file): |
2257 | os.remove(cron_file) |
2258 | - clear_flag('cron.configured') |
2259 | + clear_flag("cron.configured") |
2260 | configure_novarc() |
2261 | - hookenv.status_set('active', 'Unit is ready') |
2262 | + hookenv.status_set("active", "Unit is ready") |
2263 | |
2264 | |
2265 | -@hook('nrpe-external-master-relation-changed') |
2266 | +@hook("nrpe-external-master-relation-changed") |
2267 | def setup_nrpe_checks(nagios): |
2268 | """Configure NRPE checks.""" |
2269 | - hookenv.status_set('maintenance', 'Configuring nrpe checks') |
2270 | + hookenv.status_set("maintenance", "Configuring nrpe checks") |
2271 | config = hookenv.config() |
2272 | - modes = ['slave', 'master'] |
2273 | + modes = ["slave", "master"] |
2274 | |
2275 | for mode in modes: |
2276 | nagios.add_check( |
2277 | [ |
2278 | - '/usr/lib/nagios/plugins/check_file_age', |
2279 | - '-w 14400', '-c 25200', '-i', '-f', |
2280 | - os.path.join( |
2281 | - config['log_dir'], |
2282 | - 'glance_sync_' + mode + '.log' |
2283 | - ), |
2284 | + "/usr/lib/nagios/plugins/check_file_age", |
2285 | + "-w 14400", |
2286 | + "-c 25200", |
2287 | + "-i", |
2288 | + "-f", |
2289 | + os.path.join(config["log_dir"], "glance_sync_" + mode + ".log"), |
2290 | ], |
2291 | - name='glance_sync_' + mode + '_log', |
2292 | - description=( |
2293 | - 'Verify age of last image sync ' |
2294 | - 'from glance to disk' |
2295 | - ), |
2296 | - context=config['nagios_context'], |
2297 | + name="glance_sync_" + mode + "_log", |
2298 | + description=("Verify age of last image sync from glance to disk"), |
2299 | + context=config["nagios_context"], |
2300 | unit=hookenv.local_unit(), |
2301 | ) |
2302 | |
2303 | # Copy nrpe check plugin for stale lockfiles in place. |
2304 | - script_dir = hookenv.config('script_dir') |
2305 | - files = ['check_stale_lockfile_slave.py', |
2306 | - 'check_stale_lockfile_master.py'] |
2307 | - hookenv.log('installing stale lockfile nrpe plugin') |
2308 | + script_dir = hookenv.config("script_dir") |
2309 | + files = ["check_stale_lockfile_slave.py", "check_stale_lockfile_master.py"] |
2310 | + hookenv.log("installing stale lockfile nrpe plugin") |
2311 | for file in files: |
2312 | - source = os.path.join(hookenv.charm_dir(), 'files', file) |
2313 | + source = os.path.join(hookenv.charm_dir(), "files", file) |
2314 | dest = os.path.join(script_dir, file) |
2315 | if not os.path.exists(os.path.dirname(dest)): |
2316 | os.makedirs(os.path.dirname(dest)) |
2317 | shutil.copy(source, dest) |
2318 | - os.chown(dest, |
2319 | - pwd.getpwnam('ubuntu').pw_uid, |
2320 | - grp.getgrnam('ubuntu').gr_gid) |
2321 | + os.chown(dest, pwd.getpwnam("ubuntu").pw_uid, grp.getgrnam("ubuntu").gr_gid) |
2322 | os.chmod(dest, 0o755) |
2323 | |
2324 | for mode in modes: |
2325 | - nrpe_plugin = os.path.join(script_dir, |
2326 | - 'check_stale_lockfile_' + # noqa:W504 |
2327 | - mode + '.py') |
2328 | + nrpe_plugin = os.path.join( |
2329 | + script_dir, "check_stale_lockfile_" + mode + ".py" # noqa:W504 |
2330 | + ) |
2331 | nagios.add_check( |
2332 | [ |
2333 | nrpe_plugin, |
2334 | - '-f', '/tmp/glance_sync_' + mode + '.lock', |
2335 | - '-w 72000', '-c 14400', |
2336 | + "-f", |
2337 | + "/tmp/glance_sync_" + mode + ".lock", |
2338 | + "-w 72000", |
2339 | + "-c 14400", |
2340 | ], |
2341 | - name='glance_sync_' + mode + '_lockfile', |
2342 | - description='Verify age of image sync lockfile', |
2343 | - context=config['nagios_context'], |
2344 | + name="glance_sync_" + mode + "_lockfile", |
2345 | + description="Verify age of image sync lockfile", |
2346 | + context=config["nagios_context"], |
2347 | unit=hookenv.local_unit(), |
2348 | ) |
2349 | |
2350 | - hookenv.status_set('active', 'Unit is ready') |
2351 | + hookenv.status_set("active", "Unit is ready") |
2352 | |
2353 | |
2354 | -@when('config.changed.trusted_ssl_ca') |
2355 | +@when("config.changed.trusted_ssl_ca") |
2356 | def fix_ssl(): |
2357 | """ |
2358 | Write user supplied SSL CA from 'trusted_ssl_ca' to |
2359 | 'cert_file', and run `update-ca-certificates`. |
2360 | """ |
2361 | - cert_file = '/usr/local/share/ca-certificates/openstack-service-checks.crt' |
2362 | + cert_file = "/usr/local/share/ca-certificates/openstack-service-checks.crt" |
2363 | config = hookenv.config() |
2364 | - trusted_ssl_ca = config.get('trusted_ssl_ca').strip() |
2365 | - hookenv.log('writing ssl ca cert:{}'.format(trusted_ssl_ca)) |
2366 | + trusted_ssl_ca = config.get("trusted_ssl_ca").strip() |
2367 | + hookenv.log("writing ssl ca cert:{}".format(trusted_ssl_ca)) |
2368 | cert_content = base64.b64decode(trusted_ssl_ca).decode() |
2369 | - with open(cert_file, 'w') as f: |
2370 | + with open(cert_file, "w") as f: |
2371 | print(cert_content, file=f) |
2372 | try: |
2373 | - subprocess.call(['/usr/sbin/update-ca-certificates']) |
2374 | + subprocess.call(["/usr/sbin/update-ca-certificates"]) |
2375 | except subprocess.CalledProcessError as e: |
2376 | - hookenv.log('ERROR: fix_ssl() failed with {}'.format(e)) |
2377 | - hookenv.status_set('error', 'CA cert update failed') |
2378 | + hookenv.log("ERROR: fix_ssl() failed with {}".format(e)) |
2379 | + hookenv.status_set("error", "CA cert update failed") |
2380 | diff --git a/src/tests/functional/tests/test_glance_sync.py b/src/tests/functional/tests/test_glance_sync.py |
2381 | index e03511b..0d6e981 100644 |
2382 | --- a/src/tests/functional/tests/test_glance_sync.py |
2383 | +++ b/src/tests/functional/tests/test_glance_sync.py |
2384 | @@ -48,9 +48,10 @@ class BaseGlanceSyncTest(unittest.TestCase): |
2385 | ) |
2386 | b64_ssh_key = base64.b64encode(cls.slave_ssh_key.encode()) |
2387 | master_config = {"authorized_keys": b64_ssh_key.decode()} |
2388 | - slave_config = {"master_creds": master_creds, |
2389 | - "sync_source": "ubuntu@{}:/srv/glance_sync/data".format(cls.master_ip), |
2390 | - } |
2391 | + slave_config = { |
2392 | + "master_creds": master_creds, |
2393 | + "sync_source": "ubuntu@{}:/srv/glance_sync/data".format(cls.master_ip), |
2394 | + } |
2395 | model.set_application_config(cls.master_app, master_config) |
2396 | model.set_application_config(cls.slave_app, slave_config) |
2397 | |
2398 | @@ -72,23 +73,24 @@ class CharmOperationTest(BaseGlanceSyncTest): |
2399 | # Upload a Glance image to use for test, doesn't need to be big or even real |
2400 | openstack.enable_logging(debug=True) |
2401 | conn_master = openstack.connection.Connection( |
2402 | - region_name='RegionOne', |
2403 | + region_name="RegionOne", |
2404 | auth=dict( |
2405 | - auth_url='http://{}:35357/v3'.format(self.keystone_master_ip), |
2406 | - username='admin', |
2407 | + auth_url="http://{}:35357/v3".format(self.keystone_master_ip), |
2408 | + username="admin", |
2409 | password=self.master_password, |
2410 | - project_name='admin', |
2411 | - user_domain_name='admin_domain', |
2412 | - project_domain_name='admin_domain', |
2413 | + project_name="admin", |
2414 | + user_domain_name="admin_domain", |
2415 | + project_domain_name="admin_domain", |
2416 | ), |
2417 | - compute_api_version='2', |
2418 | - identity_interface='public') |
2419 | + compute_api_version="2", |
2420 | + identity_interface="public", |
2421 | + ) |
2422 | image_attrs = { |
2423 | - 'name': 'zaza_test_image', |
2424 | - 'data': 'some_test_data', |
2425 | - 'disk_format': 'raw', |
2426 | - 'container_format': 'bare', |
2427 | - 'visibility': 'public', |
2428 | + "name": "zaza_test_image", |
2429 | + "data": "some_test_data", |
2430 | + "disk_format": "raw", |
2431 | + "container_format": "bare", |
2432 | + "visibility": "public", |
2433 | } |
2434 | self.image = conn_master.image.upload_image(**image_attrs) |
2435 | # Has image.id, image.created, image.name for future use |
2436 | @@ -97,21 +99,22 @@ class CharmOperationTest(BaseGlanceSyncTest): |
2437 | def test_02_check_slave(self): |
2438 | """Check that the previously uploaded image lands on the slave. |
2439 | |
2440 | - Run a loop till timeout, that checks Glance in the slave region for the presence of the |
2441 | - image uploaded to the master. |
2442 | + Run a loop until timeout, that checks glance in the slave region |
2443 | + for the presence of the image uploaded to the master. |
2444 | """ |
2445 | conn_slave = openstack.connection.Connection( |
2446 | - region_name='RegionOne', |
2447 | + region_name="RegionOne", |
2448 | auth=dict( |
2449 | - auth_url='http://{}:35357/v3'.format(self.keystone_slave_ip), |
2450 | - username='admin', |
2451 | + auth_url="http://{}:35357/v3".format(self.keystone_slave_ip), |
2452 | + username="admin", |
2453 | password=self.slave_password, |
2454 | - project_name='admin', |
2455 | - user_domain_name='admin_domain', |
2456 | - project_domain_name='admin_domain', |
2457 | + project_name="admin", |
2458 | + user_domain_name="admin_domain", |
2459 | + project_domain_name="admin_domain", |
2460 | ), |
2461 | - compute_api_version='2', |
2462 | - identity_interface='public') |
2463 | + compute_api_version="2", |
2464 | + identity_interface="public", |
2465 | + ) |
2466 | timeout = time.time() + TEST_TIMEOUT |
2467 | while time.time() < timeout: |
2468 | image_list = conn_slave.get_image("zaza_test_image") |
2469 | diff --git a/src/tests/unit/__init__.py b/src/tests/unit/__init__.py |
2470 | index 03acc40..28e9795 100644 |
2471 | --- a/src/tests/unit/__init__.py |
2472 | +++ b/src/tests/unit/__init__.py |
2473 | @@ -1,2 +1,3 @@ |
2474 | import sys |
2475 | -sys.path.append('.') |
2476 | + |
2477 | +sys.path.append(".") |
2478 | diff --git a/src/tox.ini b/src/tox.ini |
2479 | index cd1b1f7..45ced91 100644 |
2480 | --- a/src/tox.ini |
2481 | +++ b/src/tox.ini |
2482 | @@ -25,7 +25,7 @@ passenv = |
2483 | [testenv:lint] |
2484 | commands = |
2485 | flake8 |
2486 | -#TODO black --check --exclude "/(\.eggs|\.git|\.tox|\.venv|\.build|dist|charmhelpers|mod)/" . |
2487 | + black --check --exclude "/(\.eggs|\.git|\.tox|\.venv|\.build|dist|charmhelpers|mod)/" . |
2488 | deps = |
2489 | black |
2490 | flake8 |
2491 | @@ -45,8 +45,7 @@ exclude = |
2492 | mod, |
2493 | .build |
2494 | |
2495 | -max-line-length = 120 |
2496 | -#TODO max-line-length = 88 |
2497 | +max-line-length = 88 |
2498 | max-complexity = 10 |
2499 | |
2500 | [testenv:black] |
LGTM