Merge lp:~stub/charms/precise/postgresql/cleanups into lp:charms/postgresql
- Precise Pangolin (12.04)
- cleanups
- Merge into trunk
Proposed by
Stuart Bishop
Status: | Merged | ||||
---|---|---|---|---|---|
Merged at revision: | 91 | ||||
Proposed branch: | lp:~stub/charms/precise/postgresql/cleanups | ||||
Merge into: | lp:charms/postgresql | ||||
Prerequisite: | lp:~stub/charms/precise/postgresql/charm-helpers | ||||
Diff against target: |
432 lines (+185/-172) 4 files modified
config.yaml (+2/-2) hooks/hooks.py (+23/-18) scripts/pgbackup.py (+160/-0) templates/dump-pg-db.tmpl (+0/-152) |
||||
To merge this branch: | bzr merge lp:~stub/charms/precise/postgresql/cleanups | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Marco Ceppi (community) | Approve | ||
Review via email: mp+213457@code.launchpad.net |
Commit message
Description of the change
Some minor cleanups.
The bulk of this MP is updating the backup script, pulling in a more recent copy from our internal branches.
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'config.yaml' |
2 | --- config.yaml 2014-02-13 12:16:30 +0000 |
3 | +++ config.yaml 2014-03-31 13:35:53 +0000 |
4 | @@ -347,13 +347,13 @@ |
5 | List of extra package sources, per charm-helpers standard. |
6 | YAML format. |
7 | type: string |
8 | - default: "" |
9 | + default: null |
10 | install_keys: |
11 | description: | |
12 | List of signing keys for install_sources package sources, per |
13 | charmhelpers standard. YAML format. |
14 | type: string |
15 | - default: "" |
16 | + default: null |
17 | extra_archives: |
18 | default: "" |
19 | type: string |
20 | |
21 | === modified file 'hooks/hooks.py' |
22 | --- hooks/hooks.py 2014-02-13 13:38:07 +0000 |
23 | +++ hooks/hooks.py 2014-03-31 13:35:53 +0000 |
24 | @@ -408,6 +408,22 @@ |
25 | yield line.split()[s] |
26 | |
27 | |
28 | +def createcluster(): |
29 | + with switch_cwd('/tmp'): # Ensure cwd is readable as the postgres user |
30 | + create_cmd = [ |
31 | + "pg_createcluster", |
32 | + "--locale", hookenv.config('locale'), |
33 | + "-e", hookenv.config('encoding')] |
34 | + if hookenv.config('listen_port'): |
35 | + create_cmd.extend(["-p", str(hookenv.config('listen_port'))]) |
36 | + create_cmd.append(pg_version()) |
37 | + create_cmd.append(hookenv.config('cluster_name')) |
38 | + run(create_cmd) |
39 | + # Ensure SSL certificates exist, as we enable SSL by default. |
40 | + create_ssl_cert(os.path.join( |
41 | + postgresql_data_dir, pg_version(), hookenv.config('cluster_name'))) |
42 | + |
43 | + |
44 | def _get_system_ram(): |
45 | """ Return the system ram in Megabytes """ |
46 | import psutil |
47 | @@ -710,7 +726,7 @@ |
48 | |
49 | |
50 | def create_ssl_cert(cluster_dir): |
51 | - # Debian by default expects SSL certificates in the datadir. |
52 | + # PostgreSQL expects SSL certificates in the datadir. |
53 | server_crt = os.path.join(cluster_dir, 'server.crt') |
54 | server_key = os.path.join(cluster_dir, 'server.key') |
55 | if not os.path.exists(server_crt): |
56 | @@ -1038,16 +1054,7 @@ |
57 | port_opt = "--port={}".format(config_data['listen_port']) |
58 | else: |
59 | port_opt = '' |
60 | - with switch_cwd('/tmp'): |
61 | - create_cmd = [ |
62 | - "pg_createcluster", |
63 | - "--locale", config_data['locale'], |
64 | - "-e", config_data['encoding']] |
65 | - if listen_port: |
66 | - create_cmd.extend(["-p", str(config_data['listen_port'])]) |
67 | - create_cmd.append(pg_version()) |
68 | - create_cmd.append(config_data['cluster_name']) |
69 | - run(create_cmd) |
70 | + createcluster() |
71 | assert ( |
72 | not port_opt |
73 | or get_service_port() == config_data['listen_port']), ( |
74 | @@ -1070,15 +1077,13 @@ |
75 | 'logs_dir': postgresql_logs_dir, |
76 | } |
77 | charm_dir = hookenv.charm_dir() |
78 | - template_file = "{}/templates/dump-pg-db.tmpl".format(charm_dir) |
79 | - dump_script = Template(open(template_file).read()).render(paths) |
80 | template_file = "{}/templates/pg_backup_job.tmpl".format(charm_dir) |
81 | backup_job = Template(open(template_file).read()).render(paths) |
82 | host.write_file( |
83 | - '{}/dump-pg-db'.format(postgresql_scripts_dir), |
84 | - dump_script, perms=0755) |
85 | + os.path.join(postgresql_scripts_dir, 'dump-pg-db'), |
86 | + open('scripts/pgbackup.py', 'r').read(), perms=0o755) |
87 | host.write_file( |
88 | - '{}/pg_backup_job'.format(postgresql_scripts_dir), |
89 | + os.path.join(postgresql_scripts_dir, 'pg_backup_job'), |
90 | backup_job, perms=0755) |
91 | install_postgresql_crontab(postgresql_crontab) |
92 | hookenv.open_port(get_service_port()) |
93 | @@ -2049,7 +2054,7 @@ |
94 | # Clone the master with pg_basebackup. |
95 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) |
96 | log(output, DEBUG) |
97 | - # Debian by default expects SSL certificates in the datadir. |
98 | + # SSL certificates need to exist in the datadir. |
99 | create_ssl_cert(postgresql_cluster_dir) |
100 | create_recovery_conf(master_host, master_port) |
101 | except subprocess.CalledProcessError as x: |
102 | @@ -2064,7 +2069,7 @@ |
103 | shutil.rmtree(postgresql_cluster_dir) |
104 | if os.path.exists(postgresql_config_dir): |
105 | shutil.rmtree(postgresql_config_dir) |
106 | - run('pg_createcluster {} main'.format(version)) |
107 | + createcluster() |
108 | config_changed() |
109 | raise |
110 | finally: |
111 | |
112 | === added file 'scripts/pgbackup.py' |
113 | --- scripts/pgbackup.py 1970-01-01 00:00:00 +0000 |
114 | +++ scripts/pgbackup.py 2014-03-31 13:35:53 +0000 |
115 | @@ -0,0 +1,160 @@ |
116 | +#!/usr/bin/python |
117 | + |
118 | +# Copyright 2008-2014 Canonical Ltd. All rights reserved. |
119 | + |
120 | +""" |
121 | +Backup one or more PostgreSQL databases. |
122 | + |
123 | +Suitable for use in crontab for daily backups. |
124 | +""" |
125 | + |
126 | +__metaclass__ = type |
127 | +__all__ = [] |
128 | + |
129 | +import sys |
130 | +import os |
131 | +import os.path |
132 | +import stat |
133 | +import logging |
134 | +import commands |
135 | +from datetime import datetime |
136 | +from optparse import OptionParser |
137 | + |
138 | +MB = float(1024 * 1024) |
139 | + |
140 | + |
141 | +def main(options, databases): |
142 | + #Need longer file names if this is used more than daily |
143 | + #today = datetime.now().strftime('%Y%m%d_%H:%M:%S') |
144 | + today = datetime.now().strftime('%Y%m%d') |
145 | + |
146 | + backup_dir = options.backup_dir |
147 | + rv = 0 |
148 | + |
149 | + for database in databases: |
150 | + dest = os.path.join(backup_dir, '%s.%s.dump' % (database, today)) |
151 | + |
152 | + # base cmd setup; to be modified per the compression desired |
153 | + cmd = " ".join([ |
154 | + "/usr/bin/pg_dump", |
155 | + "-U", "postgres", |
156 | + "--format=c", |
157 | + "--blobs", |
158 | + ]) |
159 | + |
160 | + # alter the cmd to be used based on compression chosen |
161 | + if options.compression_cmd == 'postgres': |
162 | + cmd = " ".join([ |
163 | + cmd, |
164 | + "--compress=%d" % options.compression_level |
165 | + if options.compression_level else "", |
166 | + "--file=%s" % dest, |
167 | + database]) |
168 | + elif options.compression_cmd == 'none': |
169 | + cmd = " ".join([ |
170 | + cmd, |
171 | + "--compress=0", |
172 | + "--file=%s" % dest, |
173 | + database]) |
174 | + else: |
175 | + ext_map = dict( |
176 | + gzip='.gz', pigz='.gz', bzip2='.bz2', |
177 | + pixz='.xz', xz='.xz') |
178 | + dest = dest + ext_map[options.compression_cmd] |
179 | + compression_level_arg = '' |
180 | + if options.compression_level: |
181 | + compression_level_arg = '-%d' % options.compression_level |
182 | + compression_procs_arg = '' |
183 | + if options.processes: |
184 | + compression_procs_arg = '-p %d' % options.processes |
185 | + |
186 | + compression_cmd = options.compression_cmd |
187 | + if options.compression_cmd != 'pixz': |
188 | + compression_cmd = compression_cmd + ' -c' |
189 | + |
190 | + cmd = " ".join([ |
191 | + cmd, "--compress=0", database, "|", compression_cmd, |
192 | + compression_level_arg, compression_procs_arg, |
193 | + ">", dest]) |
194 | + |
195 | + # If the file already exists, it is from an older dump today. |
196 | + # We don't know if it was successful or not, so abort on this |
197 | + # dump. Leave for operator intervention |
198 | + if os.path.exists(dest): |
199 | + log.error("%s already exists. Skipping." % dest) |
200 | + continue |
201 | + |
202 | + (rv, outtext) = commands.getstatusoutput(cmd) |
203 | + if rv != 0: |
204 | + log.critical("Failed to backup %s (%d)" % (database, rv)) |
205 | + log.critical(outtext) |
206 | + continue |
207 | + |
208 | + size = os.stat(dest)[stat.ST_SIZE] |
209 | + log.info("Backed up %s (%0.2fMB)" % (database, size / MB)) |
210 | + |
211 | + return rv |
212 | + |
213 | +if __name__ == '__main__': |
214 | + valid_compression_cmd = ['none'] + sorted([ |
215 | + "gzip", "bzip2", "postgres", "pigz", "xz", "pixz"]) |
216 | + multiproc_compression_cmd = ["pigz", "pixz"] |
217 | + |
218 | + parser = OptionParser( |
219 | + usage="usage: %prog [options] database [database ..]") |
220 | + parser.add_option( |
221 | + "-v", "--verbose", dest="verbose", default=0, action="count") |
222 | + parser.add_option( |
223 | + "-q", "--quiet", dest="quiet", default=0, action="count") |
224 | + parser.add_option( |
225 | + "-d", "--dir", dest="backup_dir", |
226 | + default="/var/lib/postgresql/backups") |
227 | + parser.add_option( |
228 | + "-z", "--compression", dest="compression_cmd", metavar='COMP_CMD', |
229 | + default="gzip", |
230 | + help='Compression tool [{}]'.format(', '.join(valid_compression_cmd))) |
231 | + parser.add_option( |
232 | + "-l", "--compression-level", type=int, metavar='N', |
233 | + dest="compression_level", default=None) |
234 | + parser.add_option( |
235 | + "-p", "--processes", type=int, dest="processes", default=None, |
236 | + metavar="N", |
237 | + help="Number of compression threads, if supported by COMP_CMD") |
238 | + (options, databases) = parser.parse_args() |
239 | + if len(databases) == 0: |
240 | + parser.error("must specify at least one database") |
241 | + if not os.path.isdir(options.backup_dir): |
242 | + parser.error( |
243 | + "Incorrect --dir. %s does not exist or is not a directory" % ( |
244 | + options.backup_dir)) |
245 | + if options.compression_cmd not in valid_compression_cmd: |
246 | + parser.error( |
247 | + "The compression command must be one of: " + ", ".join( |
248 | + valid_compression_cmd)) |
249 | + if options.compression_level is not None and not ( |
250 | + 1 <= options.compression_level <= 9): |
251 | + parser.error( |
252 | + "The compression level must be between 1 and 9: %s" % |
253 | + options.compression_level) |
254 | + if options.processes and ( |
255 | + options.compression_cmd not in multiproc_compression_cmd): |
256 | + parser.error( |
257 | + options.compression_cmd + " does not support multiple processes") |
258 | + |
259 | + # Setup our log |
260 | + log = logging.getLogger('pgbackup') |
261 | + hdlr = logging.StreamHandler(sys.stderr) |
262 | + hdlr.setFormatter(logging.Formatter( |
263 | + fmt='%(asctime)s %(levelname)s %(message)s')) |
264 | + log.addHandler(hdlr) |
265 | + verbosity = options.verbose - options.quiet |
266 | + if verbosity > 0: |
267 | + log.setLevel(logging.DEBUG) |
268 | + elif verbosity == 0: # Default |
269 | + log.setLevel(logging.INFO) |
270 | + elif verbosity == -1: |
271 | + log.setLevel(logging.WARN) |
272 | + elif verbosity < -1: |
273 | + log.setLevel(logging.ERROR) |
274 | + |
275 | + sys.exit(main(options, databases)) |
276 | |
277 | === removed file 'templates/dump-pg-db.tmpl' |
278 | --- templates/dump-pg-db.tmpl 2012-10-15 21:33:06 +0000 |
279 | +++ templates/dump-pg-db.tmpl 1970-01-01 00:00:00 +0000 |
280 | @@ -1,152 +0,0 @@ |
281 | -#!/usr/bin/python |
282 | - |
283 | -# Copyright 2008-2011 Canonical Ltd. All rights reserved. |
284 | - |
285 | -""" |
286 | -Backup one or more PostgreSQL databases. |
287 | - |
288 | -Suitable for use in crontab for daily backups. |
289 | -""" |
290 | - |
291 | -__metaclass__ = type |
292 | -__all__ = [] |
293 | - |
294 | -import sys |
295 | -import os |
296 | -import os.path |
297 | -import stat |
298 | -import logging |
299 | -import commands |
300 | -from datetime import datetime |
301 | -from optparse import OptionParser |
302 | - |
303 | -MB = float(1024 * 1024) |
304 | - |
305 | - |
306 | -def main(options, databases): |
307 | - #Need longer file names if this is used more than daily |
308 | - #today = datetime.now().strftime('%Y%m%d_%H:%M:%S') |
309 | - today = datetime.now().strftime('%Y%m%d') |
310 | - |
311 | - backup_dir = options.backup_dir |
312 | - rv = 0 |
313 | - |
314 | - for database in databases: |
315 | - dest = os.path.join(backup_dir, '%s.%s.dump' % (database, today)) |
316 | - |
317 | - # base cmd setup; to be modified per the compression desired |
318 | - cmd = " ".join([ |
319 | - "/usr/bin/pg_dump", |
320 | - "-U", "postgres", |
321 | - # Need to exclude slony schema from dumps, as read lock on |
322 | - # event table blocks exclusive lock wanted by slony. And it |
323 | - # normally only causes trouble anyway. |
324 | - "--exclude-schema=_sl", |
325 | - "--format=c", |
326 | - "--blobs", |
327 | - ]) |
328 | - |
329 | - # alter the cmd to be used based on compression chosen |
330 | - if options.compression_cmd == 'gzip': |
331 | - dest = dest + ".gz" |
332 | - cmd = " ".join([ |
333 | - cmd, |
334 | - "--compress=0", |
335 | - database, |
336 | - "| gzip -c%d" % options.compression_level, |
337 | - ">", dest, |
338 | - ]) |
339 | - elif options.compression_cmd == 'bzip2': |
340 | - dest = dest + ".bz2" |
341 | - cmd = " ".join([ |
342 | - cmd, |
343 | - "--compress=0", |
344 | - database, |
345 | - "| bzip2 -c%d" % options.compression_level, |
346 | - ">", dest, |
347 | - ]) |
348 | - elif options.compression_cmd == 'postgres': |
349 | - cmd = " ".join([ |
350 | - cmd, |
351 | - "--compress=%d" % options.compression_level, |
352 | - "--file=%s" % dest, |
353 | - database, |
354 | - ]) |
355 | - else: # none |
356 | - cmd = " ".join([ |
357 | - cmd, |
358 | - "--compress=0", |
359 | - "--file=%s" % dest, |
360 | - database, |
361 | - ]) |
362 | - |
363 | - # If the file already exists, it is from an older dump today. |
364 | - # We don't know if it was successful or not, so abort on this |
365 | - # dump. Leave for operator intervention |
366 | - if os.path.exists(dest): |
367 | - log.error("%s already exists. Skipping." % dest) |
368 | - continue |
369 | - |
370 | - (rv, outtext) = commands.getstatusoutput(cmd) |
371 | - if rv != 0: |
372 | - log.critical("Failed to backup %s (%d)" % (database, rv)) |
373 | - log.critical(outtext) |
374 | - continue |
375 | - |
376 | - size = os.stat(dest)[stat.ST_SIZE] |
377 | - log.info("Backed up %s (%0.2fMB)" % (database, size / MB)) |
378 | - |
379 | - return rv |
380 | - |
381 | -if __name__ == '__main__': |
382 | - parser = OptionParser( |
383 | - usage="usage: %prog [options] database [database ..]" |
384 | - ) |
385 | - parser.add_option("-v", "--verbose", dest="verbose", default=0, |
386 | - action="count") |
387 | - parser.add_option("-q", "--quiet", dest="quiet", default=0, |
388 | - action="count") |
389 | - parser.add_option("-d", "--dir", dest="backup_dir", |
390 | - default="/var/lib/postgres/backups") |
391 | - parser.add_option("-z", "--compression", dest="compression_cmd", |
392 | - default="gzip") |
393 | - parser.add_option("-l", "--compression-level", type=int, |
394 | - dest="compression_level", default=6) |
395 | - (options, databases) = parser.parse_args() |
396 | - if len(databases) == 0: |
397 | - parser.error("must specify at least one database") |
398 | - if not os.path.isdir(options.backup_dir): |
399 | - parser.error( |
400 | - "Incorrect --dir. %s does not exist or is not a directory" % ( |
401 | - options.backup_dir |
402 | - ) |
403 | - ) |
404 | - valid_compression_cmd = ["gzip", "bzip2", "postgres", "none"] |
405 | - if options.compression_cmd not in valid_compression_cmd: |
406 | - parser.error( |
407 | - "The compression command must be one of: " + " ".join(valid_compression_cmd) |
408 | - ) |
409 | - if options.compression_level < 1 or options.compression_level > 9: |
410 | - parser.error( |
411 | - "The compression level must be between 1 and 9: %s" % |
412 | - options.compression_level |
413 | - ) |
414 | - |
415 | - # Setup our log |
416 | - log = logging.getLogger('pgbackup') |
417 | - hdlr = logging.StreamHandler(sys.stderr) |
418 | - hdlr.setFormatter(logging.Formatter( |
419 | - fmt='%(asctime)s %(levelname)s %(message)s' |
420 | - )) |
421 | - log.addHandler(hdlr) |
422 | - verbosity = options.verbose - options.quiet |
423 | - if verbosity > 0: |
424 | - log.setLevel(logging.DEBUG) |
425 | - elif verbosity == 0: # Default |
426 | - log.setLevel(logging.INFO) |
427 | - elif verbosity == -1: |
428 | - log.setLevel(logging.WARN) |
429 | - elif verbosity < -1: |
430 | - log.setLevel(logging.ERROR) |
431 | - |
432 | - sys.exit(main(options, databases)) |
This LGTM, I found something else during review but it wasn't introduced by this merge, I filed a bug to track it lp:1313719