Merge lp:~unifield-team/unifield-server/sp71 into lp:unifield-server
- sp71
- Merge into trunk
Proposed by
Samus CTO (OpenERP)
Status: | Merged |
---|---|
Merged at revision: | 3444 |
Proposed branch: | lp:~unifield-team/unifield-server/sp71 |
Merge into: | lp:unifield-server |
Diff against target: |
2048 lines (+1749/-96) 10 files modified
bin/openerp-server.py (+9/-6) bin/pooler.py (+5/-0) bin/service/web_services.py (+3/-2) bin/sql_db.py (+1/-1) bin/unifield-version.txt (+1/-0) bin/updater.py (+299/-76) bin/zipfile266.py (+1409/-0) setup.nsi (+1/-0) setup.py (+1/-0) win32/OpenERPServerService.py (+20/-11) |
To merge this branch: | bzr merge lp:~unifield-team/unifield-server/sp71 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
UniField Dev Team | Pending | ||
Review via email:
|
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'bin/openerp-server.py' | |||
2 | --- bin/openerp-server.py 2013-02-27 09:10:30 +0000 | |||
3 | +++ bin/openerp-server.py 2013-02-28 12:40:27 +0000 | |||
4 | @@ -30,8 +30,8 @@ | |||
5 | 30 | (c) 2003-TODAY, Fabien Pinckaers - OpenERP s.a. | 30 | (c) 2003-TODAY, Fabien Pinckaers - OpenERP s.a. |
6 | 31 | """ | 31 | """ |
7 | 32 | 32 | ||
10 | 33 | from updater import do_update | 33 | import updater |
11 | 34 | do_update() | 34 | updater.do_update() |
12 | 35 | 35 | ||
13 | 36 | #---------------------------------------------------------- | 36 | #---------------------------------------------------------- |
14 | 37 | # python imports | 37 | # python imports |
15 | @@ -65,6 +65,7 @@ | |||
16 | 65 | # import the tools module so that the commandline parameters are parsed | 65 | # import the tools module so that the commandline parameters are parsed |
17 | 66 | #----------------------------------------------------------------------- | 66 | #----------------------------------------------------------------------- |
18 | 67 | import tools | 67 | import tools |
19 | 68 | updater.update_path() | ||
20 | 68 | logger.info("OpenERP version - %s", release.version) | 69 | logger.info("OpenERP version - %s", release.version) |
21 | 69 | for name, value in [('addons_path', tools.config['addons_path']), | 70 | for name, value in [('addons_path', tools.config['addons_path']), |
22 | 70 | ('database hostname', tools.config['db_host'] or 'localhost'), | 71 | ('database hostname', tools.config['db_host'] or 'localhost'), |
23 | @@ -219,6 +220,8 @@ | |||
24 | 219 | signal.signal(signal.SIGQUIT, dumpstacks) | 220 | signal.signal(signal.SIGQUIT, dumpstacks) |
25 | 220 | 221 | ||
26 | 221 | def quit(restart=False): | 222 | def quit(restart=False): |
27 | 223 | if restart: | ||
28 | 224 | time.sleep(updater.restart_delay) | ||
29 | 222 | netsvc.Agent.quit() | 225 | netsvc.Agent.quit() |
30 | 223 | netsvc.Server.quitAll() | 226 | netsvc.Server.quitAll() |
31 | 224 | if tools.config['pidfile']: | 227 | if tools.config['pidfile']: |
32 | @@ -247,6 +250,8 @@ | |||
33 | 247 | logger.info(str(thread.getName()) + ' could not be terminated') | 250 | logger.info(str(thread.getName()) + ' could not be terminated') |
34 | 248 | if not restart: | 251 | if not restart: |
35 | 249 | sys.exit(0) | 252 | sys.exit(0) |
36 | 253 | elif os.name == 'nt': | ||
37 | 254 | sys.exit(1) # require service restart | ||
38 | 250 | else: | 255 | else: |
39 | 251 | os.execv(sys.executable, [sys.executable] + sys.argv) | 256 | os.execv(sys.executable, [sys.executable] + sys.argv) |
40 | 252 | 257 | ||
41 | @@ -260,11 +265,9 @@ | |||
42 | 260 | 265 | ||
43 | 261 | logger.info('OpenERP server is running, waiting for connections...') | 266 | logger.info('OpenERP server is running, waiting for connections...') |
44 | 262 | 267 | ||
48 | 263 | tools.restart_required = False | 268 | while netsvc.quit_signals_received == 0 and not updater.restart_required: |
46 | 264 | |||
47 | 265 | while netsvc.quit_signals_received == 0 and not tools.restart_required: | ||
49 | 266 | time.sleep(5) | 269 | time.sleep(5) |
50 | 267 | 270 | ||
52 | 268 | quit(restart=tools.restart_required) | 271 | quit(restart=updater.restart_required) |
53 | 269 | 272 | ||
54 | 270 | # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | 273 | # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
55 | 271 | 274 | ||
56 | === modified file 'bin/pooler.py' | |||
57 | --- bin/pooler.py 2010-10-01 11:25:52 +0000 | |||
58 | +++ bin/pooler.py 2013-02-28 12:40:27 +0000 | |||
59 | @@ -19,6 +19,8 @@ | |||
60 | 19 | # | 19 | # |
61 | 20 | ############################################################################## | 20 | ############################################################################## |
62 | 21 | 21 | ||
63 | 22 | import updater | ||
64 | 23 | |||
65 | 22 | pool_dic = {} | 24 | pool_dic = {} |
66 | 23 | 25 | ||
67 | 24 | def get_db_and_pool(db_name, force_demo=False, status=None, update_module=False, pooljobs=True): | 26 | def get_db_and_pool(db_name, force_demo=False, status=None, update_module=False, pooljobs=True): |
68 | @@ -45,6 +47,9 @@ | |||
69 | 45 | try: | 47 | try: |
70 | 46 | pool.init_set(cr, False) | 48 | pool.init_set(cr, False) |
71 | 47 | pool.get('ir.actions.report.xml').register_all(cr) | 49 | pool.get('ir.actions.report.xml').register_all(cr) |
72 | 50 | if not updater.do_upgrade(cr, pool): | ||
73 | 51 | pool_dic.pop(db_name) | ||
74 | 52 | raise Exception("updater.py told us that OpenERP version doesn't match database version!") | ||
75 | 48 | cr.commit() | 53 | cr.commit() |
76 | 49 | finally: | 54 | finally: |
77 | 50 | cr.close() | 55 | cr.close() |
78 | 51 | 56 | ||
79 | === modified file 'bin/service/web_services.py' | |||
80 | --- bin/service/web_services.py 2013-02-04 14:43:01 +0000 | |||
81 | +++ bin/service/web_services.py 2013-02-28 12:40:27 +0000 | |||
82 | @@ -32,6 +32,7 @@ | |||
83 | 32 | import ir | 32 | import ir |
84 | 33 | import netsvc | 33 | import netsvc |
85 | 34 | import pooler | 34 | import pooler |
86 | 35 | import updater | ||
87 | 35 | import release | 36 | import release |
88 | 36 | import sql_db | 37 | import sql_db |
89 | 37 | import tools | 38 | import tools |
90 | @@ -191,7 +192,7 @@ | |||
91 | 191 | 192 | ||
92 | 192 | self._set_pg_psw_env_var() | 193 | self._set_pg_psw_env_var() |
93 | 193 | 194 | ||
95 | 194 | cmd = [tools.misc.find_pg_tool('pg_dump'), '--format=c', '--no-owner'] | 195 | cmd = ['pg_dump', '--format=c', '--no-owner'] |
96 | 195 | if tools.config['db_user']: | 196 | if tools.config['db_user']: |
97 | 196 | cmd.append('--username=' + tools.config['db_user']) | 197 | cmd.append('--username=' + tools.config['db_user']) |
98 | 197 | if tools.config['db_host']: | 198 | if tools.config['db_host']: |
99 | @@ -227,7 +228,7 @@ | |||
100 | 227 | 228 | ||
101 | 228 | self._create_empty_database(db_name) | 229 | self._create_empty_database(db_name) |
102 | 229 | 230 | ||
104 | 230 | cmd = [tools.misc.find_pg_tool('pg_restore'), '--no-owner', '--no-acl'] | 231 | cmd = ['pg_restore', '--no-owner'] |
105 | 231 | if tools.config['db_user']: | 232 | if tools.config['db_user']: |
106 | 232 | cmd.append('--username=' + tools.config['db_user']) | 233 | cmd.append('--username=' + tools.config['db_user']) |
107 | 233 | if tools.config['db_host']: | 234 | if tools.config['db_host']: |
108 | 234 | 235 | ||
109 | === modified file 'bin/sql_db.py' | |||
110 | --- bin/sql_db.py 2011-01-18 19:26:37 +0000 | |||
111 | +++ bin/sql_db.py 2013-02-28 12:40:27 +0000 | |||
112 | @@ -325,7 +325,7 @@ | |||
113 | 325 | 325 | ||
114 | 326 | @locked | 326 | @locked |
115 | 327 | def close_all(self, dsn): | 327 | def close_all(self, dsn): |
117 | 328 | self.__logger.info('%r: Close all connections to %r', self, dsn) | 328 | self.__logger.info('%r: Close all connections', self) |
118 | 329 | for i, (cnx, used) in tools.reverse_enumerate(self._connections): | 329 | for i, (cnx, used) in tools.reverse_enumerate(self._connections): |
119 | 330 | if dsn_are_equals(cnx.dsn, dsn): | 330 | if dsn_are_equals(cnx.dsn, dsn): |
120 | 331 | cnx.close() | 331 | cnx.close() |
121 | 332 | 332 | ||
122 | === added file 'bin/unifield-version.txt' | |||
123 | --- bin/unifield-version.txt 1970-01-01 00:00:00 +0000 | |||
124 | +++ bin/unifield-version.txt 2013-02-28 12:40:27 +0000 | |||
125 | @@ -0,0 +1,1 @@ | |||
126 | 1 | 88888888888888888888888888888888 | ||
127 | 0 | 2 | ||
128 | === modified file 'bin/updater.py' | |||
129 | --- bin/updater.py 2012-10-19 13:22:33 +0000 | |||
130 | +++ bin/updater.py 2013-02-28 12:40:27 +0000 | |||
131 | @@ -1,10 +1,100 @@ | |||
132 | 1 | """ | ||
133 | 2 | Unifield module to upgrade the instance to a next version of Unifield | ||
134 | 3 | Beware that we expect to be in the bin/ directory to proceed!! | ||
135 | 4 | """ | ||
136 | 5 | from __future__ import with_statement | ||
137 | 6 | import re | ||
138 | 1 | import os | 7 | import os |
139 | 2 | import sys | 8 | import sys |
141 | 3 | import psycopg2 | 9 | from hashlib import md5 |
142 | 4 | from datetime import datetime | 10 | from datetime import datetime |
145 | 5 | 11 | from base64 import b64decode | |
146 | 6 | ## Unix-like find | 12 | from StringIO import StringIO |
147 | 13 | import logging | ||
148 | 14 | import time | ||
149 | 15 | |||
150 | 16 | if sys.version_info >= (2, 6, 6): | ||
151 | 17 | from zipfile import ZipFile, ZipInfo | ||
152 | 18 | else: | ||
153 | 19 | from zipfile266 import ZipFile, ZipInfo | ||
154 | 20 | |||
155 | 21 | __all__ = ('isset_lock', 'server_version', 'base_version', 'do_prepare', 'base_module_upgrade', 'restart_server') | ||
156 | 22 | |||
157 | 23 | restart_required = False | ||
158 | 24 | log_file = 'updater.log' | ||
159 | 25 | lock_file = 'update.lock' | ||
160 | 26 | update_dir = '.update' | ||
161 | 27 | server_version_file = 'unifield-version.txt' | ||
162 | 28 | new_version_file = os.path.join(update_dir, 'update-list.txt') | ||
163 | 29 | restart_delay = 5 | ||
164 | 30 | |||
165 | 31 | md5hex_size = (md5().digest_size * 8 / 4) | ||
166 | 32 | base_version = '8' * md5hex_size | ||
167 | 33 | re_version = re.compile(r'^\s*([a-fA-F0-9]{'+str(md5hex_size)+r'}\b)') | ||
168 | 34 | logger = logging.getLogger('updater') | ||
169 | 35 | |||
170 | 36 | def restart_server(): | ||
171 | 37 | """Restart OpenERP server""" | ||
172 | 38 | global restart_required | ||
173 | 39 | logger.info("Restaring OpenERP Server in %d seconds..." % restart_delay) | ||
174 | 40 | restart_required = True | ||
175 | 41 | |||
176 | 42 | def isset_lock(file=None): | ||
177 | 43 | """Check if server lock file is set""" | ||
178 | 44 | if file is None: file = lock_file | ||
179 | 45 | return os.path.isfile(lock_file) | ||
180 | 46 | |||
181 | 47 | def set_lock(file=None): | ||
182 | 48 | """Set the lock file to make OpenERP run into do_update method against normal execution""" | ||
183 | 49 | from tools import config | ||
184 | 50 | if file is None: file = lock_file | ||
185 | 51 | with open(file, "w") as f: | ||
186 | 52 | f.write(unicode({'path':os.getcwd(),'rcfile':config.rcfile})) | ||
187 | 53 | |||
188 | 54 | def unset_lock(file=None): | ||
189 | 55 | """Remove the lock""" | ||
190 | 56 | global exec_path | ||
191 | 57 | global rcfile | ||
192 | 58 | if file is None: file = lock_file | ||
193 | 59 | with open(file, "r") as f: | ||
194 | 60 | data = eval(f.read().strip()) | ||
195 | 61 | exec_path = data['path'] | ||
196 | 62 | rcfile = data['rcfile'] | ||
197 | 63 | os.unlink(file) | ||
198 | 64 | |||
199 | 65 | def parse_version_file(filepath): | ||
200 | 66 | """Short method to parse a "version file" | ||
201 | 67 | Basically, a file where each line starts with the sum of a patch""" | ||
202 | 68 | assert os.path.isfile(filepath), "The file `%s' must be a file!" % filepath | ||
203 | 69 | versions = [] | ||
204 | 70 | with open(filepath, 'r') as f: | ||
205 | 71 | for line in f: | ||
206 | 72 | line = line.rstrip() | ||
207 | 73 | if not line: continue | ||
208 | 74 | try: | ||
209 | 75 | m = re_version.match(line) | ||
210 | 76 | versions.append( m.group(1) ) | ||
211 | 77 | except AttributeError: | ||
212 | 78 | raise Exception("Unable to parse version from file `%s': %s" % (filepath, line)) | ||
213 | 79 | return versions | ||
214 | 80 | |||
215 | 81 | def get_server_version(): | ||
216 | 82 | """Autocratically get the current versions of the server | ||
217 | 83 | Get a special key 88888888888888888888888888888888 for default value if no server version can be found""" | ||
218 | 84 | if not os.path.exists(server_version_file): | ||
219 | 85 | return [base_version] | ||
220 | 86 | return parse_version_file(server_version_file) | ||
221 | 87 | |||
222 | 88 | def add_versions(versions, filepath=server_version_file): | ||
223 | 89 | """Set server version with new versions""" | ||
224 | 90 | if not versions: | ||
225 | 91 | return | ||
226 | 92 | with open(filepath, 'a') as f: | ||
227 | 93 | for ver in versions: | ||
228 | 94 | f.write((" ".join([unicode(x) for x in ver]) if hasattr(ver, '__iter__') else ver)+os.linesep) | ||
229 | 95 | |||
230 | 7 | def find(path): | 96 | def find(path): |
231 | 97 | """Unix-like find""" | ||
232 | 8 | files = os.listdir(path) | 98 | files = os.listdir(path) |
233 | 9 | for name in iter(files): | 99 | for name in iter(files): |
234 | 10 | abspath = path+os.path.sep+name | 100 | abspath = path+os.path.sep+name |
235 | @@ -12,23 +102,8 @@ | |||
236 | 12 | files.extend( map(lambda x:name+os.path.sep+x, os.listdir(abspath)) ) | 102 | files.extend( map(lambda x:name+os.path.sep+x, os.listdir(abspath)) ) |
237 | 13 | return files | 103 | return files |
238 | 14 | 104 | ||
239 | 15 | ## Define way to forward logs | ||
240 | 16 | def warn(*args): | ||
241 | 17 | sys.stderr.write(" ".join(map(lambda x:str(x), args))+"\n") | ||
242 | 18 | |||
243 | 19 | ## Try...Resume... | ||
244 | 20 | def Try(command): | ||
245 | 21 | try: | ||
246 | 22 | command() | ||
247 | 23 | except: | ||
248 | 24 | e, msg = sys.exc_info()[0].__name__, str(sys.exc_info()[1]) | ||
249 | 25 | warn(str(msg)) | ||
250 | 26 | return False | ||
251 | 27 | else: | ||
252 | 28 | return True | ||
253 | 29 | |||
254 | 30 | ## Python free rmtree | ||
255 | 31 | def rmtree(files, path=None, verbose=False): | 105 | def rmtree(files, path=None, verbose=False): |
256 | 106 | """Python free rmtree""" | ||
257 | 32 | if path is None and isinstance(files, str): | 107 | if path is None and isinstance(files, str): |
258 | 33 | path, files = files, find(files) | 108 | path, files = files, find(files) |
259 | 34 | for f in reversed(files): | 109 | for f in reversed(files): |
260 | @@ -40,35 +115,72 @@ | |||
261 | 40 | warn("rmdir", target) | 115 | warn("rmdir", target) |
262 | 41 | os.rmdir( target ) | 116 | os.rmdir( target ) |
263 | 42 | 117 | ||
264 | 118 | def now(): | ||
265 | 119 | return datetime.today().strftime("%Y-%m-%d %H:%M:%S") | ||
266 | 120 | |||
267 | 121 | log = sys.stderr | ||
268 | 122 | |||
269 | 123 | def warn(*args): | ||
270 | 124 | """Define way to forward logs""" | ||
271 | 125 | global log | ||
272 | 126 | log.write(("[%s] UPDATER: " % now())+" ".join(map(lambda x:unicode(x), args))+os.linesep) | ||
273 | 127 | |||
274 | 128 | def Try(command): | ||
275 | 129 | """Try...Resume...""" | ||
276 | 130 | try: | ||
277 | 131 | command() | ||
278 | 132 | except BaseException, e: | ||
279 | 133 | warn(unicode(e)) | ||
280 | 134 | return False | ||
281 | 135 | else: | ||
282 | 136 | return True | ||
283 | 137 | |||
284 | 138 | |||
285 | 139 | |||
286 | 140 | ############################################################################## | ||
287 | 141 | ## ## | ||
288 | 142 | ## Main methods of updater modules ## | ||
289 | 143 | ## ## | ||
290 | 144 | ############################################################################## | ||
291 | 145 | |||
292 | 146 | |||
293 | 147 | def base_module_upgrade(cr, pool, upgrade_now=False): | ||
294 | 148 | """Just like -u base / -u all. | ||
295 | 149 | Arguments are: | ||
296 | 150 | * cr: cursor to the database | ||
297 | 151 | * pool: pool of the same db | ||
298 | 152 | * (optional) upgrade_now: False by default, on True, it will launch the process right now""" | ||
299 | 153 | modules = pool.get('ir.module.module') | ||
300 | 154 | base_ids = modules.search(cr, 1, [('name', '=', 'base')]) | ||
301 | 155 | #base_ids = modules.search(cr, 1, [('name', '=', 'sync_client')]) #for tests | ||
302 | 156 | modules.button_upgrade(cr, 1, base_ids) | ||
303 | 157 | if upgrade_now: | ||
304 | 158 | logger.info("Starting base upgrade process") | ||
305 | 159 | pool.get('base.module.upgrade').upgrade_module(cr, 1, []) | ||
306 | 160 | |||
307 | 161 | |||
308 | 43 | def do_update(): | 162 | def do_update(): |
318 | 44 | ## We expect to be in the bin/ directory to proceed | 163 | """Real update of the server (before normal OpenERP execution). |
319 | 45 | if os.path.exists('update.lock'): | 164 | This function is triggered when OpenERP starts. When it finishes, it restart OpenERP automatically. |
320 | 46 | rev_file = os.path.join('.update','revisions.txt') | 165 | On failure, the lock file is deleted and OpenERP files are rollbacked to their previous state.""" |
321 | 47 | hist_file = "revision_history.txt" | 166 | if os.path.exists(lock_file) and Try(unset_lock): |
322 | 48 | infos = {'exec_path':os.getcwd()} | 167 | global log |
323 | 49 | revisions = None | 168 | ## Move logs log file |
324 | 50 | cur = None | 169 | try: |
325 | 51 | conn = None | 170 | log = open(log_file, 'a') |
326 | 52 | update_revisions = None | 171 | except BaseException, e: |
327 | 172 | log.write("Cannot write into `%s': %s" % (log, unicode(e))) | ||
328 | 173 | warn(lock_file, 'removed') | ||
329 | 174 | ## Now, update | ||
330 | 175 | application_time = now() | ||
331 | 176 | revisions = [] | ||
332 | 53 | files = None | 177 | files = None |
333 | 54 | args = list(sys.argv) | ||
334 | 55 | for i, x in enumerate(args): | ||
335 | 56 | if x in ('-d', '-u', '-c'): | ||
336 | 57 | args[i] = None | ||
337 | 58 | args[i+1] = None | ||
338 | 59 | args = filter(lambda x:x is not None, args) | ||
339 | 60 | try: | 178 | try: |
349 | 61 | ## Read DB name | 179 | ## Revisions that going to be installed |
350 | 62 | f = open('update.lock') | 180 | revisions = parse_version_file(new_version_file) |
351 | 63 | infos = eval(f.read()) | 181 | os.unlink(new_version_file) |
343 | 64 | f.close() | ||
344 | 65 | revisions = ",".join( map(lambda x:"'"+str(x)+"'", infos['revisions']) ) | ||
345 | 66 | ## Connect to the DB | ||
346 | 67 | conn = psycopg2.connect(database=infos['dbname'], user=infos['db_user'], password=infos['db_password'], host=infos['db_host'], port=infos['db_port']) | ||
347 | 68 | conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) | ||
348 | 69 | cur = conn.cursor() | ||
352 | 70 | ## Explore .update directory | 182 | ## Explore .update directory |
354 | 71 | files = find('.update') | 183 | files = find(update_dir) |
355 | 72 | ## Prepare backup directory | 184 | ## Prepare backup directory |
356 | 73 | if not os.path.exists('backup'): | 185 | if not os.path.exists('backup'): |
357 | 74 | os.mkdir('backup') | 186 | os.mkdir('backup') |
358 | @@ -77,7 +189,7 @@ | |||
359 | 77 | ## Update Files | 189 | ## Update Files |
360 | 78 | warn("Updating...") | 190 | warn("Updating...") |
361 | 79 | for f in files: | 191 | for f in files: |
363 | 80 | target = os.path.join('.update', f) | 192 | target = os.path.join(update_dir, f) |
364 | 81 | bak = os.path.join('backup', f) | 193 | bak = os.path.join('backup', f) |
365 | 82 | if os.path.isdir(target): | 194 | if os.path.isdir(target): |
366 | 83 | if os.path.isfile(f) or os.path.islink(f): | 195 | if os.path.isfile(f) or os.path.islink(f): |
367 | @@ -91,22 +203,14 @@ | |||
368 | 91 | os.rename(f, bak) | 203 | os.rename(f, bak) |
369 | 92 | warn("`%s' -> `%s'" % (target, f)) | 204 | warn("`%s' -> `%s'" % (target, f)) |
370 | 93 | os.rename(target, f) | 205 | os.rename(target, f) |
374 | 94 | ## Update installed revisions in DB | 206 | add_versions([(x, application_time) for x in revisions]) |
372 | 95 | cur.execute("""UPDATE sync_client_version SET state = 'installed', applied = '%s' WHERE name in (%s)""" | ||
373 | 96 | % ( datetime.today().strftime("%Y-%m-%d %H:%M:%S"), revisions )) | ||
375 | 97 | warn("Update successful.") | 207 | warn("Update successful.") |
383 | 98 | warn("Revisions added: ", ", ".join( infos['revisions'] )) | 208 | warn("Revisions added: ", ", ".join(revisions)) |
384 | 99 | args.extend(['-d', infos['dbname'], '-u', 'all']) | 209 | ## No database update here. I preferred to set modules to update just after the preparation |
385 | 100 | if os.name == 'nt': | 210 | ## The reason is, when pool is populated, it will starts by upgrading modules first |
386 | 101 | args.extend(['-c', '"%s"' % infos['conf']]) | 211 | except BaseException, e: |
380 | 102 | else: | ||
381 | 103 | args.extend(['-c', infos['conf']]) | ||
382 | 104 | except: | ||
387 | 105 | warn("Update failure!") | 212 | warn("Update failure!") |
392 | 106 | ## Update DB to mark revisions as not-installed | 213 | warn(unicode(e)) |
389 | 107 | if cur and infos: | ||
390 | 108 | Try(lambda:cur.execute("""UPDATE sync_client_version SET state = 'not-installed' WHERE name in (%s)""" | ||
391 | 109 | % ( revisions ))) | ||
393 | 110 | ## Restore backup and purge .update | 214 | ## Restore backup and purge .update |
394 | 111 | if files: | 215 | if files: |
395 | 112 | warn("Restoring...") | 216 | warn("Restoring...") |
396 | @@ -114,21 +218,140 @@ | |||
397 | 114 | target = os.path.join('backup', f) | 218 | target = os.path.join('backup', f) |
398 | 115 | if os.path.isfile(target) or os.path.islink(target): | 219 | if os.path.isfile(target) or os.path.islink(target): |
399 | 116 | warn("`%s' -> `%s'" % (target, f)) | 220 | warn("`%s' -> `%s'" % (target, f)) |
403 | 117 | elif os.path.isdir(target): | 221 | os.rename(target, f) |
401 | 118 | warn("rmdir", target) | ||
402 | 119 | os.rmdir( target ) | ||
404 | 120 | warn("Purging...") | 222 | warn("Purging...") |
419 | 121 | Try(lambda:rmtree(files, '.update')) | 223 | Try(lambda:rmtree(update_dir)) |
420 | 122 | warn("rmdir", '.update') | 224 | if os.name == 'nt': |
421 | 123 | Try(lambda:os.rmdir( '.update' )) | 225 | warn("Exiting OpenERP Server with code 1 to tell service to restart") |
422 | 124 | finally: | 226 | sys.exit(1) # require service to restart |
423 | 125 | if cur: cur.close() | 227 | else: |
424 | 126 | if conn: conn.close() | 228 | warn(("Restart OpenERP in %s:" % exec_path), \ |
425 | 127 | ## Remove lock file | 229 | [sys.executable]+sys.argv) |
426 | 128 | warn("rm", 'update.lock') | 230 | if log is not sys.stderr: |
427 | 129 | Try(lambda:os.unlink( 'update.lock' )) | 231 | log.close() |
428 | 130 | warn("Restart OpenERP in", infos['exec_path'], "with:",args) | 232 | os.chdir(exec_path) |
429 | 131 | if infos: os.chdir(infos['exec_path']) | 233 | os.execv(sys.executable, [sys.executable] + sys.argv) |
430 | 132 | os.execv(sys.executable, [sys.executable] + args) | 234 | |
431 | 133 | 235 | ||
432 | 134 | 236 | def update_path(): | |
433 | 237 | """If server starts normally, this step will fix the paths with the configured path in config rc""" | ||
434 | 238 | from tools import config | ||
435 | 239 | for v in ('log_file', 'lock_file', 'update_dir', 'server_version_file', 'new_version_file'): | ||
436 | 240 | globals()[v] = os.path.join(config['root_path'], globals()[v]) | ||
437 | 241 | global server_version | ||
438 | 242 | server_version = get_server_version() | ||
439 | 243 | |||
440 | 244 | |||
441 | 245 | def do_prepare(cr, revision_ids): | ||
442 | 246 | """Prepare patches for an upgrade of the server and set the lock file""" | ||
443 | 247 | if not revision_ids: | ||
444 | 248 | return ('failure', 'Nothing to do.', {}) | ||
445 | 249 | import pooler | ||
446 | 250 | pool = pooler.get_pool(cr.dbname) | ||
447 | 251 | version = pool.get('sync_client.version') | ||
448 | 252 | |||
449 | 253 | # Make an update temporary path | ||
450 | 254 | path = update_dir | ||
451 | 255 | if not os.path.exists(path): | ||
452 | 256 | os.mkdir(path) | ||
453 | 257 | else: | ||
454 | 258 | for f in reversed(find(path)): | ||
455 | 259 | target = os.path.join(path, f) | ||
456 | 260 | if os.path.isfile(target) or os.path.islink(target): | ||
457 | 261 | logger.debug("rm `%s'" % target) | ||
458 | 262 | os.unlink( target ) | ||
459 | 263 | elif os.path.isdir(target): | ||
460 | 264 | logger.debug("rmdir `%s'" % target) | ||
461 | 265 | os.rmdir( target ) | ||
462 | 266 | if not (os.path.isdir(path) and os.access(path, os.W_OK)): | ||
463 | 267 | message = "The path `%s' is not a dir or is not writable!" | ||
464 | 268 | logger.error(message % path) | ||
465 | 269 | return ('failure', message, (path,)) | ||
466 | 270 | # Proceed all patches | ||
467 | 271 | new_revisions = [] | ||
468 | 272 | corrupt = [] | ||
469 | 273 | missing = [] | ||
470 | 274 | need_restart = [] | ||
471 | 275 | for rev in version.browse(cr, 1, revision_ids): | ||
472 | 276 | # Check presence of the patch | ||
473 | 277 | if not rev.patch: | ||
474 | 278 | missing.append( rev ) | ||
475 | 279 | continue | ||
476 | 280 | # Check if the file match the expected sum | ||
477 | 281 | patch = b64decode( rev.patch ) | ||
478 | 282 | local_sum = md5(patch).hexdigest() | ||
479 | 283 | if local_sum != rev.sum: | ||
480 | 284 | corrupt.append( rev ) | ||
481 | 285 | elif not (corrupt or missing): | ||
482 | 286 | # Extract the Zip | ||
483 | 287 | f = StringIO(patch) | ||
484 | 288 | try: | ||
485 | 289 | zip = ZipFile(f, 'r') | ||
486 | 290 | zip.extractall(path) | ||
487 | 291 | finally: | ||
488 | 292 | f.close() | ||
489 | 293 | # Store to list of updates | ||
490 | 294 | new_revisions.append( (rev.sum, ("[%s] %s - %s" % (rev.importance, rev.date, rev.name))) ) | ||
491 | 295 | if rev.state == 'not-installed': | ||
492 | 296 | need_restart.append(rev.id) | ||
493 | 297 | # Remove corrupted patches | ||
494 | 298 | if corrupt: | ||
495 | 299 | corrupt_ids = [x.id for x in corrupt] | ||
496 | 300 | version.write(cr, 1, corrupt_ids, {'patch':False}) | ||
497 | 301 | if len(corrupt) == 1: message = "One file you downloaded seems to be corrupt:\n\n%s" | ||
498 | 302 | else: message = "Some files you downloaded seem to be corrupt:\n\n%s" | ||
499 | 303 | values = "" | ||
500 | 304 | for rev in corrupt: | ||
501 | 305 | values += " - %s (sum expected: %s)\n" % ((rev.name or 'unknown'), rev.sum) | ||
502 | 306 | logger.error(message % values) | ||
503 | 307 | return ('corrupt', message, values) | ||
504 | 308 | # Complaints about missing patches | ||
505 | 309 | if missing: | ||
506 | 310 | if len(missing) == 1: | ||
507 | 311 | message = "A file is missing: %(name)s (check sum: %(sum)s)" | ||
508 | 312 | values = { | ||
509 | 313 | 'name' : missing[0].name or 'unknown', | ||
510 | 314 | 'sum' : missing[0].sum | ||
511 | 315 | } | ||
512 | 316 | else: | ||
513 | 317 | message = "Some files are missing:\n\n%s" | ||
514 | 318 | values = "" | ||
515 | 319 | for rev in missing: | ||
516 | 320 | values += " - %s (check sum: %s)\n" % ((rev.name or 'unknown'), rev.sum) | ||
517 | 321 | logger.error(message % values) | ||
518 | 322 | return ('missing', message, values) | ||
519 | 323 | # Fix the flag of the pending patches | ||
520 | 324 | version.write(cr, 1, need_restart, {'state':'need-restart'}) | ||
521 | 325 | # Make a lock file to make OpenERP able to detect an update | ||
522 | 326 | set_lock() | ||
523 | 327 | add_versions(new_revisions, new_version_file) | ||
524 | 328 | logger.info("Server update prepared. Need to restart to complete the upgrade.") | ||
525 | 329 | return ('success', 'Restart required', {}) | ||
526 | 330 | |||
527 | 331 | |||
528 | 332 | def do_upgrade(cr, pool): | ||
529 | 333 | """Start upgrade process (called by login method and restore)""" | ||
530 | 334 | versions = pool.get('sync_client.version') | ||
531 | 335 | if versions is None: | ||
532 | 336 | return True | ||
533 | 337 | |||
534 | 338 | db_versions = versions.read(cr, 1, versions.search(cr, 1, [('state','=','installed')]), ['sum']) | ||
535 | 339 | db_versions = map(lambda x:x['sum'], db_versions) | ||
536 | 340 | server_lack_versions = set(db_versions) - set(server_version) | ||
537 | 341 | db_lack_versions = set(server_version) - set(db_versions) - set([base_version]) | ||
538 | 342 | |||
539 | 343 | if server_lack_versions: | ||
540 | 344 | revision_ids = versions.search(cr, 1, [('sum','in',list(server_lack_versions))], order='date asc') | ||
541 | 345 | res = do_prepare(cr, revision_ids) | ||
542 | 346 | if res[0] == 'success': | ||
543 | 347 | import tools | ||
544 | 348 | os.chdir( tools.config['root_path'] ) | ||
545 | 349 | restart_server() | ||
546 | 350 | else: | ||
547 | 351 | return False | ||
548 | 352 | |||
549 | 353 | elif db_lack_versions: | ||
550 | 354 | base_module_upgrade(cr, pool, upgrade_now=True) | ||
551 | 355 | # Note: There is no need to update the db versions, the `def init()' of the object do that for us | ||
552 | 356 | |||
553 | 357 | return True | ||
554 | 135 | 358 | ||
555 | === added file 'bin/zipfile266.py' | |||
556 | --- bin/zipfile266.py 1970-01-01 00:00:00 +0000 | |||
557 | +++ bin/zipfile266.py 2013-02-28 12:40:27 +0000 | |||
558 | @@ -0,0 +1,1409 @@ | |||
559 | 1 | """ | ||
560 | 2 | Read and write ZIP files. | ||
561 | 3 | """ | ||
562 | 4 | import struct, os, time, sys, shutil | ||
563 | 5 | import binascii, cStringIO, stat | ||
564 | 6 | |||
565 | 7 | try: | ||
566 | 8 | import zlib # We may need its compression method | ||
567 | 9 | crc32 = zlib.crc32 | ||
568 | 10 | except ImportError: | ||
569 | 11 | zlib = None | ||
570 | 12 | crc32 = binascii.crc32 | ||
571 | 13 | |||
572 | 14 | __all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile", | ||
573 | 15 | "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile" ] | ||
574 | 16 | |||
575 | 17 | class BadZipfile(Exception): | ||
576 | 18 | pass | ||
577 | 19 | |||
578 | 20 | |||
579 | 21 | class LargeZipFile(Exception): | ||
580 | 22 | """ | ||
581 | 23 | Raised when writing a zipfile, the zipfile requires ZIP64 extensions | ||
582 | 24 | and those extensions are disabled. | ||
583 | 25 | """ | ||
584 | 26 | |||
585 | 27 | error = BadZipfile # The exception raised by this module | ||
586 | 28 | |||
587 | 29 | ZIP64_LIMIT = (1 << 31) - 1 | ||
588 | 30 | ZIP_FILECOUNT_LIMIT = 1 << 16 | ||
589 | 31 | ZIP_MAX_COMMENT = (1 << 16) - 1 | ||
590 | 32 | |||
591 | 33 | # constants for Zip file compression methods | ||
592 | 34 | ZIP_STORED = 0 | ||
593 | 35 | ZIP_DEFLATED = 8 | ||
594 | 36 | # Other ZIP compression methods not supported | ||
595 | 37 | |||
596 | 38 | # Below are some formats and associated data for reading/writing headers using | ||
597 | 39 | # the struct module. The names and structures of headers/records are those used | ||
598 | 40 | # in the PKWARE description of the ZIP file format: | ||
599 | 41 | # http://www.pkware.com/documents/casestudies/APPNOTE.TXT | ||
600 | 42 | # (URL valid as of January 2008) | ||
601 | 43 | |||
602 | 44 | # The "end of central directory" structure, magic number, size, and indices | ||
603 | 45 | # (section V.I in the format document) | ||
604 | 46 | structEndArchive = "<4s4H2LH" | ||
605 | 47 | stringEndArchive = "PK\005\006" | ||
606 | 48 | sizeEndCentDir = struct.calcsize(structEndArchive) | ||
607 | 49 | |||
608 | 50 | _ECD_SIGNATURE = 0 | ||
609 | 51 | _ECD_DISK_NUMBER = 1 | ||
610 | 52 | _ECD_DISK_START = 2 | ||
611 | 53 | _ECD_ENTRIES_THIS_DISK = 3 | ||
612 | 54 | _ECD_ENTRIES_TOTAL = 4 | ||
613 | 55 | _ECD_SIZE = 5 | ||
614 | 56 | _ECD_OFFSET = 6 | ||
615 | 57 | _ECD_COMMENT_SIZE = 7 | ||
616 | 58 | # These last two indices are not part of the structure as defined in the | ||
617 | 59 | # spec, but they are used internally by this module as a convenience | ||
618 | 60 | _ECD_COMMENT = 8 | ||
619 | 61 | _ECD_LOCATION = 9 | ||
620 | 62 | |||
621 | 63 | # The "central directory" structure, magic number, size, and indices | ||
622 | 64 | # of entries in the structure (section V.F in the format document) | ||
623 | 65 | structCentralDir = "<4s4B4HL2L5H2L" | ||
624 | 66 | stringCentralDir = "PK\001\002" | ||
625 | 67 | sizeCentralDir = struct.calcsize(structCentralDir) | ||
626 | 68 | |||
627 | 69 | # indexes of entries in the central directory structure | ||
628 | 70 | _CD_SIGNATURE = 0 | ||
629 | 71 | _CD_CREATE_VERSION = 1 | ||
630 | 72 | _CD_CREATE_SYSTEM = 2 | ||
631 | 73 | _CD_EXTRACT_VERSION = 3 | ||
632 | 74 | _CD_EXTRACT_SYSTEM = 4 | ||
633 | 75 | _CD_FLAG_BITS = 5 | ||
634 | 76 | _CD_COMPRESS_TYPE = 6 | ||
635 | 77 | _CD_TIME = 7 | ||
636 | 78 | _CD_DATE = 8 | ||
637 | 79 | _CD_CRC = 9 | ||
638 | 80 | _CD_COMPRESSED_SIZE = 10 | ||
639 | 81 | _CD_UNCOMPRESSED_SIZE = 11 | ||
640 | 82 | _CD_FILENAME_LENGTH = 12 | ||
641 | 83 | _CD_EXTRA_FIELD_LENGTH = 13 | ||
642 | 84 | _CD_COMMENT_LENGTH = 14 | ||
643 | 85 | _CD_DISK_NUMBER_START = 15 | ||
644 | 86 | _CD_INTERNAL_FILE_ATTRIBUTES = 16 | ||
645 | 87 | _CD_EXTERNAL_FILE_ATTRIBUTES = 17 | ||
646 | 88 | _CD_LOCAL_HEADER_OFFSET = 18 | ||
647 | 89 | |||
648 | 90 | # The "local file header" structure, magic number, size, and indices | ||
649 | 91 | # (section V.A in the format document) | ||
650 | 92 | structFileHeader = "<4s2B4HL2L2H" | ||
651 | 93 | stringFileHeader = "PK\003\004" | ||
652 | 94 | sizeFileHeader = struct.calcsize(structFileHeader) | ||
653 | 95 | |||
654 | 96 | _FH_SIGNATURE = 0 | ||
655 | 97 | _FH_EXTRACT_VERSION = 1 | ||
656 | 98 | _FH_EXTRACT_SYSTEM = 2 | ||
657 | 99 | _FH_GENERAL_PURPOSE_FLAG_BITS = 3 | ||
658 | 100 | _FH_COMPRESSION_METHOD = 4 | ||
659 | 101 | _FH_LAST_MOD_TIME = 5 | ||
660 | 102 | _FH_LAST_MOD_DATE = 6 | ||
661 | 103 | _FH_CRC = 7 | ||
662 | 104 | _FH_COMPRESSED_SIZE = 8 | ||
663 | 105 | _FH_UNCOMPRESSED_SIZE = 9 | ||
664 | 106 | _FH_FILENAME_LENGTH = 10 | ||
665 | 107 | _FH_EXTRA_FIELD_LENGTH = 11 | ||
666 | 108 | |||
667 | 109 | # The "Zip64 end of central directory locator" structure, magic number, and size | ||
668 | 110 | structEndArchive64Locator = "<4sLQL" | ||
669 | 111 | stringEndArchive64Locator = "PK\x06\x07" | ||
670 | 112 | sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator) | ||
671 | 113 | |||
672 | 114 | # The "Zip64 end of central directory" record, magic number, size, and indices | ||
673 | 115 | # (section V.G in the format document) | ||
674 | 116 | structEndArchive64 = "<4sQ2H2L4Q" | ||
675 | 117 | stringEndArchive64 = "PK\x06\x06" | ||
676 | 118 | sizeEndCentDir64 = struct.calcsize(structEndArchive64) | ||
677 | 119 | |||
678 | 120 | _CD64_SIGNATURE = 0 | ||
679 | 121 | _CD64_DIRECTORY_RECSIZE = 1 | ||
680 | 122 | _CD64_CREATE_VERSION = 2 | ||
681 | 123 | _CD64_EXTRACT_VERSION = 3 | ||
682 | 124 | _CD64_DISK_NUMBER = 4 | ||
683 | 125 | _CD64_DISK_NUMBER_START = 5 | ||
684 | 126 | _CD64_NUMBER_ENTRIES_THIS_DISK = 6 | ||
685 | 127 | _CD64_NUMBER_ENTRIES_TOTAL = 7 | ||
686 | 128 | _CD64_DIRECTORY_SIZE = 8 | ||
687 | 129 | _CD64_OFFSET_START_CENTDIR = 9 | ||
688 | 130 | |||
689 | 131 | def is_zipfile(filename): | ||
690 | 132 | """Quickly see if file is a ZIP file by checking the magic number.""" | ||
691 | 133 | try: | ||
692 | 134 | fpin = open(filename, "rb") | ||
693 | 135 | endrec = _EndRecData(fpin) | ||
694 | 136 | fpin.close() | ||
695 | 137 | if endrec: | ||
696 | 138 | return True # file has correct magic number | ||
697 | 139 | except IOError: | ||
698 | 140 | pass | ||
699 | 141 | return False | ||
700 | 142 | |||
701 | 143 | def _EndRecData64(fpin, offset, endrec): | ||
702 | 144 | """ | ||
703 | 145 | Read the ZIP64 end-of-archive records and use that to update endrec | ||
704 | 146 | """ | ||
705 | 147 | fpin.seek(offset - sizeEndCentDir64Locator, 2) | ||
706 | 148 | data = fpin.read(sizeEndCentDir64Locator) | ||
707 | 149 | sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data) | ||
708 | 150 | if sig != stringEndArchive64Locator: | ||
709 | 151 | return endrec | ||
710 | 152 | |||
711 | 153 | if diskno != 0 or disks != 1: | ||
712 | 154 | raise BadZipfile("zipfiles that span multiple disks are not supported") | ||
713 | 155 | |||
714 | 156 | # Assume no 'zip64 extensible data' | ||
715 | 157 | fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2) | ||
716 | 158 | data = fpin.read(sizeEndCentDir64) | ||
717 | 159 | sig, sz, create_version, read_version, disk_num, disk_dir, \ | ||
718 | 160 | dircount, dircount2, dirsize, diroffset = \ | ||
719 | 161 | struct.unpack(structEndArchive64, data) | ||
720 | 162 | if sig != stringEndArchive64: | ||
721 | 163 | return endrec | ||
722 | 164 | |||
723 | 165 | # Update the original endrec using data from the ZIP64 record | ||
724 | 166 | endrec[_ECD_SIGNATURE] = sig | ||
725 | 167 | endrec[_ECD_DISK_NUMBER] = disk_num | ||
726 | 168 | endrec[_ECD_DISK_START] = disk_dir | ||
727 | 169 | endrec[_ECD_ENTRIES_THIS_DISK] = dircount | ||
728 | 170 | endrec[_ECD_ENTRIES_TOTAL] = dircount2 | ||
729 | 171 | endrec[_ECD_SIZE] = dirsize | ||
730 | 172 | endrec[_ECD_OFFSET] = diroffset | ||
731 | 173 | return endrec | ||
732 | 174 | |||
733 | 175 | |||
734 | 176 | def _EndRecData(fpin): | ||
735 | 177 | """Return data from the "End of Central Directory" record, or None. | ||
736 | 178 | |||
737 | 179 | The data is a list of the nine items in the ZIP "End of central dir" | ||
738 | 180 | record followed by a tenth item, the file seek offset of this record.""" | ||
739 | 181 | |||
740 | 182 | # Determine file size | ||
741 | 183 | fpin.seek(0, 2) | ||
742 | 184 | filesize = fpin.tell() | ||
743 | 185 | |||
744 | 186 | # Check to see if this is ZIP file with no archive comment (the | ||
745 | 187 | # "end of central directory" structure should be the last item in the | ||
746 | 188 | # file if this is the case). | ||
747 | 189 | try: | ||
748 | 190 | fpin.seek(-sizeEndCentDir, 2) | ||
749 | 191 | except IOError: | ||
750 | 192 | return None | ||
751 | 193 | data = fpin.read() | ||
752 | 194 | if data[0:4] == stringEndArchive and data[-2:] == "\000\000": | ||
753 | 195 | # the signature is correct and there's no comment, unpack structure | ||
754 | 196 | endrec = struct.unpack(structEndArchive, data) | ||
755 | 197 | endrec=list(endrec) | ||
756 | 198 | |||
757 | 199 | # Append a blank comment and record start offset | ||
758 | 200 | endrec.append("") | ||
759 | 201 | endrec.append(filesize - sizeEndCentDir) | ||
760 | 202 | |||
761 | 203 | # Try to read the "Zip64 end of central directory" structure | ||
762 | 204 | return _EndRecData64(fpin, -sizeEndCentDir, endrec) | ||
763 | 205 | |||
764 | 206 | # Either this is not a ZIP file, or it is a ZIP file with an archive | ||
765 | 207 | # comment. Search the end of the file for the "end of central directory" | ||
766 | 208 | # record signature. The comment is the last item in the ZIP file and may be | ||
767 | 209 | # up to 64K long. It is assumed that the "end of central directory" magic | ||
768 | 210 | # number does not appear in the comment. | ||
769 | 211 | maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0) | ||
770 | 212 | fpin.seek(maxCommentStart, 0) | ||
771 | 213 | data = fpin.read() | ||
772 | 214 | start = data.rfind(stringEndArchive) | ||
773 | 215 | if start >= 0: | ||
774 | 216 | # found the magic number; attempt to unpack and interpret | ||
775 | 217 | recData = data[start:start+sizeEndCentDir] | ||
776 | 218 | endrec = list(struct.unpack(structEndArchive, recData)) | ||
777 | 219 | comment = data[start+sizeEndCentDir:] | ||
778 | 220 | # check that comment length is correct | ||
779 | 221 | if endrec[_ECD_COMMENT_SIZE] == len(comment): | ||
780 | 222 | # Append the archive comment and start offset | ||
781 | 223 | endrec.append(comment) | ||
782 | 224 | endrec.append(maxCommentStart + start) | ||
783 | 225 | |||
784 | 226 | # Try to read the "Zip64 end of central directory" structure | ||
785 | 227 | return _EndRecData64(fpin, maxCommentStart + start - filesize, | ||
786 | 228 | endrec) | ||
787 | 229 | |||
788 | 230 | # Unable to find a valid end of central directory structure | ||
789 | 231 | return | ||
790 | 232 | |||
791 | 233 | |||
792 | 234 | class ZipInfo (object): | ||
793 | 235 | """Class with attributes describing each file in the ZIP archive.""" | ||
794 | 236 | |||
795 | 237 | __slots__ = ( | ||
796 | 238 | 'orig_filename', | ||
797 | 239 | 'filename', | ||
798 | 240 | 'date_time', | ||
799 | 241 | 'compress_type', | ||
800 | 242 | 'comment', | ||
801 | 243 | 'extra', | ||
802 | 244 | 'create_system', | ||
803 | 245 | 'create_version', | ||
804 | 246 | 'extract_version', | ||
805 | 247 | 'reserved', | ||
806 | 248 | 'flag_bits', | ||
807 | 249 | 'volume', | ||
808 | 250 | 'internal_attr', | ||
809 | 251 | 'external_attr', | ||
810 | 252 | 'header_offset', | ||
811 | 253 | 'CRC', | ||
812 | 254 | 'compress_size', | ||
813 | 255 | 'file_size', | ||
814 | 256 | '_raw_time', | ||
815 | 257 | ) | ||
816 | 258 | |||
817 | 259 | def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)): | ||
818 | 260 | self.orig_filename = filename # Original file name in archive | ||
819 | 261 | |||
820 | 262 | # Terminate the file name at the first null byte. Null bytes in file | ||
821 | 263 | # names are used as tricks by viruses in archives. | ||
822 | 264 | null_byte = filename.find(chr(0)) | ||
823 | 265 | if null_byte >= 0: | ||
824 | 266 | filename = filename[0:null_byte] | ||
825 | 267 | # This is used to ensure paths in generated ZIP files always use | ||
826 | 268 | # forward slashes as the directory separator, as required by the | ||
827 | 269 | # ZIP format specification. | ||
828 | 270 | if os.sep != "/" and os.sep in filename: | ||
829 | 271 | filename = filename.replace(os.sep, "/") | ||
830 | 272 | |||
831 | 273 | self.filename = filename # Normalized file name | ||
832 | 274 | self.date_time = date_time # year, month, day, hour, min, sec | ||
833 | 275 | # Standard values: | ||
834 | 276 | self.compress_type = ZIP_STORED # Type of compression for the file | ||
835 | 277 | self.comment = "" # Comment for each file | ||
836 | 278 | self.extra = "" # ZIP extra data | ||
837 | 279 | if sys.platform == 'win32': | ||
838 | 280 | self.create_system = 0 # System which created ZIP archive | ||
839 | 281 | else: | ||
840 | 282 | # Assume everything else is unix-y | ||
841 | 283 | self.create_system = 3 # System which created ZIP archive | ||
842 | 284 | self.create_version = 20 # Version which created ZIP archive | ||
843 | 285 | self.extract_version = 20 # Version needed to extract archive | ||
844 | 286 | self.reserved = 0 # Must be zero | ||
845 | 287 | self.flag_bits = 0 # ZIP flag bits | ||
846 | 288 | self.volume = 0 # Volume number of file header | ||
847 | 289 | self.internal_attr = 0 # Internal attributes | ||
848 | 290 | self.external_attr = 0 # External file attributes | ||
849 | 291 | # Other attributes are set by class ZipFile: | ||
850 | 292 | # header_offset Byte offset to the file header | ||
851 | 293 | # CRC CRC-32 of the uncompressed file | ||
852 | 294 | # compress_size Size of the compressed file | ||
853 | 295 | # file_size Size of the uncompressed file | ||
854 | 296 | |||
855 | 297 | def FileHeader(self): | ||
856 | 298 | """Return the per-file header as a string.""" | ||
857 | 299 | dt = self.date_time | ||
858 | 300 | dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] | ||
859 | 301 | dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) | ||
860 | 302 | if self.flag_bits & 0x08: | ||
861 | 303 | # Set these to zero because we write them after the file data | ||
862 | 304 | CRC = compress_size = file_size = 0 | ||
863 | 305 | else: | ||
864 | 306 | CRC = self.CRC | ||
865 | 307 | compress_size = self.compress_size | ||
866 | 308 | file_size = self.file_size | ||
867 | 309 | |||
868 | 310 | extra = self.extra | ||
869 | 311 | |||
870 | 312 | if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT: | ||
871 | 313 | # File is larger than what fits into a 4 byte integer, | ||
872 | 314 | # fall back to the ZIP64 extension | ||
873 | 315 | fmt = '<HHQQ' | ||
874 | 316 | extra = extra + struct.pack(fmt, | ||
875 | 317 | 1, struct.calcsize(fmt)-4, file_size, compress_size) | ||
876 | 318 | file_size = 0xffffffff | ||
877 | 319 | compress_size = 0xffffffff | ||
878 | 320 | self.extract_version = max(45, self.extract_version) | ||
879 | 321 | self.create_version = max(45, self.extract_version) | ||
880 | 322 | |||
881 | 323 | filename, flag_bits = self._encodeFilenameFlags() | ||
882 | 324 | header = struct.pack(structFileHeader, stringFileHeader, | ||
883 | 325 | self.extract_version, self.reserved, flag_bits, | ||
884 | 326 | self.compress_type, dostime, dosdate, CRC, | ||
885 | 327 | compress_size, file_size, | ||
886 | 328 | len(filename), len(extra)) | ||
887 | 329 | return header + filename + extra | ||
888 | 330 | |||
889 | 331 | def _encodeFilenameFlags(self): | ||
890 | 332 | if isinstance(self.filename, unicode): | ||
891 | 333 | try: | ||
892 | 334 | return self.filename.encode('ascii'), self.flag_bits | ||
893 | 335 | except UnicodeEncodeError: | ||
894 | 336 | return self.filename.encode('utf-8'), self.flag_bits | 0x800 | ||
895 | 337 | else: | ||
896 | 338 | return self.filename, self.flag_bits | ||
897 | 339 | |||
898 | 340 | def _decodeFilename(self): | ||
899 | 341 | if self.flag_bits & 0x800: | ||
900 | 342 | return self.filename.decode('utf-8') | ||
901 | 343 | else: | ||
902 | 344 | return self.filename | ||
903 | 345 | |||
904 | 346 | def _decodeExtra(self): | ||
905 | 347 | # Try to decode the extra field. | ||
906 | 348 | extra = self.extra | ||
907 | 349 | unpack = struct.unpack | ||
908 | 350 | while extra: | ||
909 | 351 | tp, ln = unpack('<HH', extra[:4]) | ||
910 | 352 | if tp == 1: | ||
911 | 353 | if ln >= 24: | ||
912 | 354 | counts = unpack('<QQQ', extra[4:28]) | ||
913 | 355 | elif ln == 16: | ||
914 | 356 | counts = unpack('<QQ', extra[4:20]) | ||
915 | 357 | elif ln == 8: | ||
916 | 358 | counts = unpack('<Q', extra[4:12]) | ||
917 | 359 | elif ln == 0: | ||
918 | 360 | counts = () | ||
919 | 361 | else: | ||
920 | 362 | raise RuntimeError, "Corrupt extra field %s"%(ln,) | ||
921 | 363 | |||
922 | 364 | idx = 0 | ||
923 | 365 | |||
924 | 366 | # ZIP64 extension (large files and/or large archives) | ||
925 | 367 | if self.file_size in (0xffffffffffffffffL, 0xffffffffL): | ||
926 | 368 | self.file_size = counts[idx] | ||
927 | 369 | idx += 1 | ||
928 | 370 | |||
929 | 371 | if self.compress_size == 0xFFFFFFFFL: | ||
930 | 372 | self.compress_size = counts[idx] | ||
931 | 373 | idx += 1 | ||
932 | 374 | |||
933 | 375 | if self.header_offset == 0xffffffffL: | ||
934 | 376 | old = self.header_offset | ||
935 | 377 | self.header_offset = counts[idx] | ||
936 | 378 | idx+=1 | ||
937 | 379 | |||
938 | 380 | extra = extra[ln+4:] | ||
939 | 381 | |||
940 | 382 | |||
941 | 383 | class _ZipDecrypter: | ||
942 | 384 | """Class to handle decryption of files stored within a ZIP archive. | ||
943 | 385 | |||
944 | 386 | ZIP supports a password-based form of encryption. Even though known | ||
945 | 387 | plaintext attacks have been found against it, it is still useful | ||
946 | 388 | to be able to get data out of such a file. | ||
947 | 389 | |||
948 | 390 | Usage: | ||
949 | 391 | zd = _ZipDecrypter(mypwd) | ||
950 | 392 | plain_char = zd(cypher_char) | ||
951 | 393 | plain_text = map(zd, cypher_text) | ||
952 | 394 | """ | ||
953 | 395 | |||
954 | 396 | def _GenerateCRCTable(): | ||
955 | 397 | """Generate a CRC-32 table. | ||
956 | 398 | |||
957 | 399 | ZIP encryption uses the CRC32 one-byte primitive for scrambling some | ||
958 | 400 | internal keys. We noticed that a direct implementation is faster than | ||
959 | 401 | relying on binascii.crc32(). | ||
960 | 402 | """ | ||
961 | 403 | poly = 0xedb88320 | ||
962 | 404 | table = [0] * 256 | ||
963 | 405 | for i in range(256): | ||
964 | 406 | crc = i | ||
965 | 407 | for j in range(8): | ||
966 | 408 | if crc & 1: | ||
967 | 409 | crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly | ||
968 | 410 | else: | ||
969 | 411 | crc = ((crc >> 1) & 0x7FFFFFFF) | ||
970 | 412 | table[i] = crc | ||
971 | 413 | return table | ||
972 | 414 | crctable = _GenerateCRCTable() | ||
973 | 415 | |||
974 | 416 | def _crc32(self, ch, crc): | ||
975 | 417 | """Compute the CRC32 primitive on one byte.""" | ||
976 | 418 | return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff] | ||
977 | 419 | |||
978 | 420 | def __init__(self, pwd): | ||
979 | 421 | self.key0 = 305419896 | ||
980 | 422 | self.key1 = 591751049 | ||
981 | 423 | self.key2 = 878082192 | ||
982 | 424 | for p in pwd: | ||
983 | 425 | self._UpdateKeys(p) | ||
984 | 426 | |||
985 | 427 | def _UpdateKeys(self, c): | ||
986 | 428 | self.key0 = self._crc32(c, self.key0) | ||
987 | 429 | self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295 | ||
988 | 430 | self.key1 = (self.key1 * 134775813 + 1) & 4294967295 | ||
989 | 431 | self.key2 = self._crc32(chr((self.key1 >> 24) & 255), self.key2) | ||
990 | 432 | |||
991 | 433 | def __call__(self, c): | ||
992 | 434 | """Decrypt a single character.""" | ||
993 | 435 | c = ord(c) | ||
994 | 436 | k = self.key2 | 2 | ||
995 | 437 | c = c ^ (((k * (k^1)) >> 8) & 255) | ||
996 | 438 | c = chr(c) | ||
997 | 439 | self._UpdateKeys(c) | ||
998 | 440 | return c | ||
999 | 441 | |||
1000 | 442 | class ZipExtFile: | ||
1001 | 443 | """File-like object for reading an archive member. | ||
1002 | 444 | Is returned by ZipFile.open(). | ||
1003 | 445 | """ | ||
1004 | 446 | |||
1005 | 447 | def __init__(self, fileobj, zipinfo, decrypt=None): | ||
1006 | 448 | self.fileobj = fileobj | ||
1007 | 449 | self.decrypter = decrypt | ||
1008 | 450 | self.bytes_read = 0L | ||
1009 | 451 | self.rawbuffer = '' | ||
1010 | 452 | self.readbuffer = '' | ||
1011 | 453 | self.linebuffer = '' | ||
1012 | 454 | self.eof = False | ||
1013 | 455 | self.univ_newlines = False | ||
1014 | 456 | self.nlSeps = ("\n", ) | ||
1015 | 457 | self.lastdiscard = '' | ||
1016 | 458 | |||
1017 | 459 | self.compress_type = zipinfo.compress_type | ||
1018 | 460 | self.compress_size = zipinfo.compress_size | ||
1019 | 461 | |||
1020 | 462 | self.closed = False | ||
1021 | 463 | self.mode = "r" | ||
1022 | 464 | self.name = zipinfo.filename | ||
1023 | 465 | |||
1024 | 466 | # read from compressed files in 64k blocks | ||
1025 | 467 | self.compreadsize = 64*1024 | ||
1026 | 468 | if self.compress_type == ZIP_DEFLATED: | ||
1027 | 469 | self.dc = zlib.decompressobj(-15) | ||
1028 | 470 | |||
1029 | 471 | def set_univ_newlines(self, univ_newlines): | ||
1030 | 472 | self.univ_newlines = univ_newlines | ||
1031 | 473 | |||
1032 | 474 | # pick line separator char(s) based on universal newlines flag | ||
1033 | 475 | self.nlSeps = ("\n", ) | ||
1034 | 476 | if self.univ_newlines: | ||
1035 | 477 | self.nlSeps = ("\r\n", "\r", "\n") | ||
1036 | 478 | |||
1037 | 479 | def __iter__(self): | ||
1038 | 480 | return self | ||
1039 | 481 | |||
1040 | 482 | def next(self): | ||
1041 | 483 | nextline = self.readline() | ||
1042 | 484 | if not nextline: | ||
1043 | 485 | raise StopIteration() | ||
1044 | 486 | |||
1045 | 487 | return nextline | ||
1046 | 488 | |||
1047 | 489 | def close(self): | ||
1048 | 490 | self.closed = True | ||
1049 | 491 | |||
1050 | 492 | def _checkfornewline(self): | ||
1051 | 493 | nl, nllen = -1, -1 | ||
1052 | 494 | if self.linebuffer: | ||
1053 | 495 | # ugly check for cases where half of an \r\n pair was | ||
1054 | 496 | # read on the last pass, and the \r was discarded. In this | ||
1055 | 497 | # case we just throw away the \n at the start of the buffer. | ||
1056 | 498 | if (self.lastdiscard, self.linebuffer[0]) == ('\r','\n'): | ||
1057 | 499 | self.linebuffer = self.linebuffer[1:] | ||
1058 | 500 | |||
1059 | 501 | for sep in self.nlSeps: | ||
1060 | 502 | nl = self.linebuffer.find(sep) | ||
1061 | 503 | if nl >= 0: | ||
1062 | 504 | nllen = len(sep) | ||
1063 | 505 | return nl, nllen | ||
1064 | 506 | |||
1065 | 507 | return nl, nllen | ||
1066 | 508 | |||
1067 | 509 | def readline(self, size = -1): | ||
1068 | 510 | """Read a line with approx. size. If size is negative, | ||
1069 | 511 | read a whole line. | ||
1070 | 512 | """ | ||
1071 | 513 | if size < 0: | ||
1072 | 514 | size = sys.maxint | ||
1073 | 515 | elif size == 0: | ||
1074 | 516 | return '' | ||
1075 | 517 | |||
1076 | 518 | # check for a newline already in buffer | ||
1077 | 519 | nl, nllen = self._checkfornewline() | ||
1078 | 520 | |||
1079 | 521 | if nl >= 0: | ||
1080 | 522 | # the next line was already in the buffer | ||
1081 | 523 | nl = min(nl, size) | ||
1082 | 524 | else: | ||
1083 | 525 | # no line break in buffer - try to read more | ||
1084 | 526 | size -= len(self.linebuffer) | ||
1085 | 527 | while nl < 0 and size > 0: | ||
1086 | 528 | buf = self.read(min(size, 100)) | ||
1087 | 529 | if not buf: | ||
1088 | 530 | break | ||
1089 | 531 | self.linebuffer += buf | ||
1090 | 532 | size -= len(buf) | ||
1091 | 533 | |||
1092 | 534 | # check for a newline in buffer | ||
1093 | 535 | nl, nllen = self._checkfornewline() | ||
1094 | 536 | |||
1095 | 537 | # we either ran out of bytes in the file, or | ||
1096 | 538 | # met the specified size limit without finding a newline, | ||
1097 | 539 | # so return current buffer | ||
1098 | 540 | if nl < 0: | ||
1099 | 541 | s = self.linebuffer | ||
1100 | 542 | self.linebuffer = '' | ||
1101 | 543 | return s | ||
1102 | 544 | |||
1103 | 545 | buf = self.linebuffer[:nl] | ||
1104 | 546 | self.lastdiscard = self.linebuffer[nl:nl + nllen] | ||
1105 | 547 | self.linebuffer = self.linebuffer[nl + nllen:] | ||
1106 | 548 | |||
1107 | 549 | # line is always returned with \n as newline char (except possibly | ||
1108 | 550 | # for a final incomplete line in the file, which is handled above). | ||
1109 | 551 | return buf + "\n" | ||
1110 | 552 | |||
1111 | 553 | def readlines(self, sizehint = -1): | ||
1112 | 554 | """Return a list with all (following) lines. The sizehint parameter | ||
1113 | 555 | is ignored in this implementation. | ||
1114 | 556 | """ | ||
1115 | 557 | result = [] | ||
1116 | 558 | while True: | ||
1117 | 559 | line = self.readline() | ||
1118 | 560 | if not line: break | ||
1119 | 561 | result.append(line) | ||
1120 | 562 | return result | ||
1121 | 563 | |||
1122 | 564 | def read(self, size = None): | ||
1123 | 565 | # act like file() obj and return empty string if size is 0 | ||
1124 | 566 | if size == 0: | ||
1125 | 567 | return '' | ||
1126 | 568 | |||
1127 | 569 | # determine read size | ||
1128 | 570 | bytesToRead = self.compress_size - self.bytes_read | ||
1129 | 571 | |||
1130 | 572 | # adjust read size for encrypted files since the first 12 bytes | ||
1131 | 573 | # are for the encryption/password information | ||
1132 | 574 | if self.decrypter is not None: | ||
1133 | 575 | bytesToRead -= 12 | ||
1134 | 576 | |||
1135 | 577 | if size is not None and size >= 0: | ||
1136 | 578 | if self.compress_type == ZIP_STORED: | ||
1137 | 579 | lr = len(self.readbuffer) | ||
1138 | 580 | bytesToRead = min(bytesToRead, size - lr) | ||
1139 | 581 | elif self.compress_type == ZIP_DEFLATED: | ||
1140 | 582 | if len(self.readbuffer) > size: | ||
1141 | 583 | # the user has requested fewer bytes than we've already | ||
1142 | 584 | # pulled through the decompressor; don't read any more | ||
1143 | 585 | bytesToRead = 0 | ||
1144 | 586 | else: | ||
1145 | 587 | # user will use up the buffer, so read some more | ||
1146 | 588 | lr = len(self.rawbuffer) | ||
1147 | 589 | bytesToRead = min(bytesToRead, self.compreadsize - lr) | ||
1148 | 590 | |||
1149 | 591 | # avoid reading past end of file contents | ||
1150 | 592 | if bytesToRead + self.bytes_read > self.compress_size: | ||
1151 | 593 | bytesToRead = self.compress_size - self.bytes_read | ||
1152 | 594 | |||
1153 | 595 | # try to read from file (if necessary) | ||
1154 | 596 | if bytesToRead > 0: | ||
1155 | 597 | bytes = self.fileobj.read(bytesToRead) | ||
1156 | 598 | self.bytes_read += len(bytes) | ||
1157 | 599 | self.rawbuffer += bytes | ||
1158 | 600 | |||
1159 | 601 | # handle contents of raw buffer | ||
1160 | 602 | if self.rawbuffer: | ||
1161 | 603 | newdata = self.rawbuffer | ||
1162 | 604 | self.rawbuffer = '' | ||
1163 | 605 | |||
1164 | 606 | # decrypt new data if we were given an object to handle that | ||
1165 | 607 | if newdata and self.decrypter is not None: | ||
1166 | 608 | newdata = ''.join(map(self.decrypter, newdata)) | ||
1167 | 609 | |||
1168 | 610 | # decompress newly read data if necessary | ||
1169 | 611 | if newdata and self.compress_type == ZIP_DEFLATED: | ||
1170 | 612 | newdata = self.dc.decompress(newdata) | ||
1171 | 613 | self.rawbuffer = self.dc.unconsumed_tail | ||
1172 | 614 | if self.eof and len(self.rawbuffer) == 0: | ||
1173 | 615 | # we're out of raw bytes (both from the file and | ||
1174 | 616 | # the local buffer); flush just to make sure the | ||
1175 | 617 | # decompressor is done | ||
1176 | 618 | newdata += self.dc.flush() | ||
1177 | 619 | # prevent decompressor from being used again | ||
1178 | 620 | self.dc = None | ||
1179 | 621 | |||
1180 | 622 | self.readbuffer += newdata | ||
1181 | 623 | |||
1182 | 624 | |||
1183 | 625 | # return what the user asked for | ||
1184 | 626 | if size is None or len(self.readbuffer) <= size: | ||
1185 | 627 | bytes = self.readbuffer | ||
1186 | 628 | self.readbuffer = '' | ||
1187 | 629 | else: | ||
1188 | 630 | bytes = self.readbuffer[:size] | ||
1189 | 631 | self.readbuffer = self.readbuffer[size:] | ||
1190 | 632 | |||
1191 | 633 | return bytes | ||
1192 | 634 | |||
1193 | 635 | |||
1194 | 636 | class ZipFile: | ||
1195 | 637 | """ Class with methods to open, read, write, close, list zip files. | ||
1196 | 638 | |||
1197 | 639 | z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False) | ||
1198 | 640 | |||
1199 | 641 | file: Either the path to the file, or a file-like object. | ||
1200 | 642 | If it is a path, the file will be opened and closed by ZipFile. | ||
1201 | 643 | mode: The mode can be either read "r", write "w" or append "a". | ||
1202 | 644 | compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib). | ||
1203 | 645 | allowZip64: if True ZipFile will create files with ZIP64 extensions when | ||
1204 | 646 | needed, otherwise it will raise an exception when this would | ||
1205 | 647 | be necessary. | ||
1206 | 648 | |||
1207 | 649 | """ | ||
1208 | 650 | |||
1209 | 651 | fp = None # Set here since __del__ checks it | ||
1210 | 652 | |||
1211 | 653 | def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False): | ||
1212 | 654 | """Open the ZIP file with mode read "r", write "w" or append "a".""" | ||
1213 | 655 | if mode not in ("r", "w", "a"): | ||
1214 | 656 | raise RuntimeError('ZipFile() requires mode "r", "w", or "a"') | ||
1215 | 657 | |||
1216 | 658 | if compression == ZIP_STORED: | ||
1217 | 659 | pass | ||
1218 | 660 | elif compression == ZIP_DEFLATED: | ||
1219 | 661 | if not zlib: | ||
1220 | 662 | raise RuntimeError,\ | ||
1221 | 663 | "Compression requires the (missing) zlib module" | ||
1222 | 664 | else: | ||
1223 | 665 | raise RuntimeError, "That compression method is not supported" | ||
1224 | 666 | |||
1225 | 667 | self._allowZip64 = allowZip64 | ||
1226 | 668 | self._didModify = False | ||
1227 | 669 | self.debug = 0 # Level of printing: 0 through 3 | ||
1228 | 670 | self.NameToInfo = {} # Find file info given name | ||
1229 | 671 | self.filelist = [] # List of ZipInfo instances for archive | ||
1230 | 672 | self.compression = compression # Method of compression | ||
1231 | 673 | self.mode = key = mode.replace('b', '')[0] | ||
1232 | 674 | self.pwd = None | ||
1233 | 675 | self.comment = '' | ||
1234 | 676 | |||
1235 | 677 | # Check if we were passed a file-like object | ||
1236 | 678 | if isinstance(file, basestring): | ||
1237 | 679 | self._filePassed = 0 | ||
1238 | 680 | self.filename = file | ||
1239 | 681 | modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'} | ||
1240 | 682 | try: | ||
1241 | 683 | self.fp = open(file, modeDict[mode]) | ||
1242 | 684 | except IOError: | ||
1243 | 685 | if mode == 'a': | ||
1244 | 686 | mode = key = 'w' | ||
1245 | 687 | self.fp = open(file, modeDict[mode]) | ||
1246 | 688 | else: | ||
1247 | 689 | raise | ||
1248 | 690 | else: | ||
1249 | 691 | self._filePassed = 1 | ||
1250 | 692 | self.fp = file | ||
1251 | 693 | self.filename = getattr(file, 'name', None) | ||
1252 | 694 | |||
1253 | 695 | if key == 'r': | ||
1254 | 696 | self._GetContents() | ||
1255 | 697 | elif key == 'w': | ||
1256 | 698 | pass | ||
1257 | 699 | elif key == 'a': | ||
1258 | 700 | try: # See if file is a zip file | ||
1259 | 701 | self._RealGetContents() | ||
1260 | 702 | # seek to start of directory and overwrite | ||
1261 | 703 | self.fp.seek(self.start_dir, 0) | ||
1262 | 704 | except BadZipfile: # file is not a zip file, just append | ||
1263 | 705 | self.fp.seek(0, 2) | ||
1264 | 706 | else: | ||
1265 | 707 | if not self._filePassed: | ||
1266 | 708 | self.fp.close() | ||
1267 | 709 | self.fp = None | ||
1268 | 710 | raise RuntimeError, 'Mode must be "r", "w" or "a"' | ||
1269 | 711 | |||
1270 | 712 | def _GetContents(self): | ||
1271 | 713 | """Read the directory, making sure we close the file if the format | ||
1272 | 714 | is bad.""" | ||
1273 | 715 | try: | ||
1274 | 716 | self._RealGetContents() | ||
1275 | 717 | except BadZipfile: | ||
1276 | 718 | if not self._filePassed: | ||
1277 | 719 | self.fp.close() | ||
1278 | 720 | self.fp = None | ||
1279 | 721 | raise | ||
1280 | 722 | |||
1281 | 723 | def _RealGetContents(self): | ||
1282 | 724 | """Read in the table of contents for the ZIP file.""" | ||
1283 | 725 | fp = self.fp | ||
1284 | 726 | endrec = _EndRecData(fp) | ||
1285 | 727 | if not endrec: | ||
1286 | 728 | raise BadZipfile, "File is not a zip file" | ||
1287 | 729 | if self.debug > 1: | ||
1288 | 730 | print endrec | ||
1289 | 731 | size_cd = endrec[_ECD_SIZE] # bytes in central directory | ||
1290 | 732 | offset_cd = endrec[_ECD_OFFSET] # offset of central directory | ||
1291 | 733 | self.comment = endrec[_ECD_COMMENT] # archive comment | ||
1292 | 734 | |||
1293 | 735 | # "concat" is zero, unless zip was concatenated to another file | ||
1294 | 736 | concat = endrec[_ECD_LOCATION] - size_cd - offset_cd | ||
1295 | 737 | if endrec[_ECD_SIGNATURE] == stringEndArchive64: | ||
1296 | 738 | # If Zip64 extension structures are present, account for them | ||
1297 | 739 | concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) | ||
1298 | 740 | |||
1299 | 741 | if self.debug > 2: | ||
1300 | 742 | inferred = concat + offset_cd | ||
1301 | 743 | print "given, inferred, offset", offset_cd, inferred, concat | ||
1302 | 744 | # self.start_dir: Position of start of central directory | ||
1303 | 745 | self.start_dir = offset_cd + concat | ||
1304 | 746 | fp.seek(self.start_dir, 0) | ||
1305 | 747 | data = fp.read(size_cd) | ||
1306 | 748 | fp = cStringIO.StringIO(data) | ||
1307 | 749 | total = 0 | ||
1308 | 750 | while total < size_cd: | ||
1309 | 751 | centdir = fp.read(sizeCentralDir) | ||
1310 | 752 | if centdir[0:4] != stringCentralDir: | ||
1311 | 753 | raise BadZipfile, "Bad magic number for central directory" | ||
1312 | 754 | centdir = struct.unpack(structCentralDir, centdir) | ||
1313 | 755 | if self.debug > 2: | ||
1314 | 756 | print centdir | ||
1315 | 757 | filename = fp.read(centdir[_CD_FILENAME_LENGTH]) | ||
1316 | 758 | # Create ZipInfo instance to store file information | ||
1317 | 759 | x = ZipInfo(filename) | ||
1318 | 760 | x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) | ||
1319 | 761 | x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) | ||
1320 | 762 | x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] | ||
1321 | 763 | (x.create_version, x.create_system, x.extract_version, x.reserved, | ||
1322 | 764 | x.flag_bits, x.compress_type, t, d, | ||
1323 | 765 | x.CRC, x.compress_size, x.file_size) = centdir[1:12] | ||
1324 | 766 | x.volume, x.internal_attr, x.external_attr = centdir[15:18] | ||
1325 | 767 | # Convert date/time code to (year, month, day, hour, min, sec) | ||
1326 | 768 | x._raw_time = t | ||
1327 | 769 | x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, | ||
1328 | 770 | t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) | ||
1329 | 771 | |||
1330 | 772 | x._decodeExtra() | ||
1331 | 773 | x.header_offset = x.header_offset + concat | ||
1332 | 774 | x.filename = x._decodeFilename() | ||
1333 | 775 | self.filelist.append(x) | ||
1334 | 776 | self.NameToInfo[x.filename] = x | ||
1335 | 777 | |||
1336 | 778 | # update total bytes read from central directory | ||
1337 | 779 | total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] | ||
1338 | 780 | + centdir[_CD_EXTRA_FIELD_LENGTH] | ||
1339 | 781 | + centdir[_CD_COMMENT_LENGTH]) | ||
1340 | 782 | |||
1341 | 783 | if self.debug > 2: | ||
1342 | 784 | print "total", total | ||
1343 | 785 | |||
1344 | 786 | |||
1345 | 787 | def namelist(self): | ||
1346 | 788 | """Return a list of file names in the archive.""" | ||
1347 | 789 | l = [] | ||
1348 | 790 | for data in self.filelist: | ||
1349 | 791 | l.append(data.filename) | ||
1350 | 792 | return l | ||
1351 | 793 | |||
1352 | 794 | def infolist(self): | ||
1353 | 795 | """Return a list of class ZipInfo instances for files in the | ||
1354 | 796 | archive.""" | ||
1355 | 797 | return self.filelist | ||
1356 | 798 | |||
1357 | 799 | def printdir(self): | ||
1358 | 800 | """Print a table of contents for the zip file.""" | ||
1359 | 801 | print "%-46s %19s %12s" % ("File Name", "Modified ", "Size") | ||
1360 | 802 | for zinfo in self.filelist: | ||
1361 | 803 | date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6] | ||
1362 | 804 | print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size) | ||
1363 | 805 | |||
1364 | 806 | def testzip(self): | ||
1365 | 807 | """Read all the files and check the CRC.""" | ||
1366 | 808 | chunk_size = 2 ** 20 | ||
1367 | 809 | for zinfo in self.filelist: | ||
1368 | 810 | try: | ||
1369 | 811 | # Read by chunks, to avoid an OverflowError or a | ||
1370 | 812 | # MemoryError with very large embedded files. | ||
1371 | 813 | f = self.open(zinfo.filename, "r") | ||
1372 | 814 | while f.read(chunk_size): # Check CRC-32 | ||
1373 | 815 | pass | ||
1374 | 816 | except BadZipfile: | ||
1375 | 817 | return zinfo.filename | ||
1376 | 818 | |||
1377 | 819 | def getinfo(self, name): | ||
1378 | 820 | """Return the instance of ZipInfo given 'name'.""" | ||
1379 | 821 | info = self.NameToInfo.get(name) | ||
1380 | 822 | if info is None: | ||
1381 | 823 | raise KeyError( | ||
1382 | 824 | 'There is no item named %r in the archive' % name) | ||
1383 | 825 | |||
1384 | 826 | return info | ||
1385 | 827 | |||
1386 | 828 | def setpassword(self, pwd): | ||
1387 | 829 | """Set default password for encrypted files.""" | ||
1388 | 830 | self.pwd = pwd | ||
1389 | 831 | |||
1390 | 832 | def read(self, name, pwd=None): | ||
1391 | 833 | """Return file bytes (as a string) for name.""" | ||
1392 | 834 | return self.open(name, "r", pwd).read() | ||
1393 | 835 | |||
1394 | 836 | def open(self, name, mode="r", pwd=None): | ||
1395 | 837 | """Return file-like object for 'name'.""" | ||
1396 | 838 | if mode not in ("r", "U", "rU"): | ||
1397 | 839 | raise RuntimeError, 'open() requires mode "r", "U", or "rU"' | ||
1398 | 840 | if not self.fp: | ||
1399 | 841 | raise RuntimeError, \ | ||
1400 | 842 | "Attempt to read ZIP archive that was already closed" | ||
1401 | 843 | |||
1402 | 844 | # Only open a new file for instances where we were not | ||
1403 | 845 | # given a file object in the constructor | ||
1404 | 846 | if self._filePassed: | ||
1405 | 847 | zef_file = self.fp | ||
1406 | 848 | else: | ||
1407 | 849 | zef_file = open(self.filename, 'rb') | ||
1408 | 850 | |||
1409 | 851 | # Make sure we have an info object | ||
1410 | 852 | if isinstance(name, ZipInfo): | ||
1411 | 853 | # 'name' is already an info object | ||
1412 | 854 | zinfo = name | ||
1413 | 855 | else: | ||
1414 | 856 | # Get info object for name | ||
1415 | 857 | zinfo = self.getinfo(name) | ||
1416 | 858 | |||
1417 | 859 | zef_file.seek(zinfo.header_offset, 0) | ||
1418 | 860 | |||
1419 | 861 | # Skip the file header: | ||
1420 | 862 | fheader = zef_file.read(sizeFileHeader) | ||
1421 | 863 | if fheader[0:4] != stringFileHeader: | ||
1422 | 864 | raise BadZipfile, "Bad magic number for file header" | ||
1423 | 865 | |||
1424 | 866 | fheader = struct.unpack(structFileHeader, fheader) | ||
1425 | 867 | fname = zef_file.read(fheader[_FH_FILENAME_LENGTH]) | ||
1426 | 868 | if fheader[_FH_EXTRA_FIELD_LENGTH]: | ||
1427 | 869 | zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH]) | ||
1428 | 870 | |||
1429 | 871 | if fname != zinfo.orig_filename: | ||
1430 | 872 | raise BadZipfile, \ | ||
1431 | 873 | 'File name in directory "%s" and header "%s" differ.' % ( | ||
1432 | 874 | zinfo.orig_filename, fname) | ||
1433 | 875 | |||
1434 | 876 | # check for encrypted flag & handle password | ||
1435 | 877 | is_encrypted = zinfo.flag_bits & 0x1 | ||
1436 | 878 | zd = None | ||
1437 | 879 | if is_encrypted: | ||
1438 | 880 | if not pwd: | ||
1439 | 881 | pwd = self.pwd | ||
1440 | 882 | if not pwd: | ||
1441 | 883 | raise RuntimeError, "File %s is encrypted, " \ | ||
1442 | 884 | "password required for extraction" % name | ||
1443 | 885 | |||
1444 | 886 | zd = _ZipDecrypter(pwd) | ||
1445 | 887 | # The first 12 bytes in the cypher stream is an encryption header | ||
1446 | 888 | # used to strengthen the algorithm. The first 11 bytes are | ||
1447 | 889 | # completely random, while the 12th contains the MSB of the CRC, | ||
1448 | 890 | # or the MSB of the file time depending on the header type | ||
1449 | 891 | # and is used to check the correctness of the password. | ||
1450 | 892 | bytes = zef_file.read(12) | ||
1451 | 893 | h = map(zd, bytes[0:12]) | ||
1452 | 894 | if zinfo.flag_bits & 0x8: | ||
1453 | 895 | # compare against the file type from extended local headers | ||
1454 | 896 | check_byte = (zinfo._raw_time >> 8) & 0xff | ||
1455 | 897 | else: | ||
1456 | 898 | # compare against the CRC otherwise | ||
1457 | 899 | check_byte = (zinfo.CRC >> 24) & 0xff | ||
1458 | 900 | if ord(h[11]) != check_byte: | ||
1459 | 901 | raise RuntimeError("Bad password for file", name) | ||
1460 | 902 | |||
1461 | 903 | # build and return a ZipExtFile | ||
1462 | 904 | if zd is None: | ||
1463 | 905 | zef = ZipExtFile(zef_file, zinfo) | ||
1464 | 906 | else: | ||
1465 | 907 | zef = ZipExtFile(zef_file, zinfo, zd) | ||
1466 | 908 | |||
1467 | 909 | # set universal newlines on ZipExtFile if necessary | ||
1468 | 910 | if "U" in mode: | ||
1469 | 911 | zef.set_univ_newlines(True) | ||
1470 | 912 | return zef | ||
1471 | 913 | |||
1472 | 914 | def extract(self, member, path=None, pwd=None): | ||
1473 | 915 | """Extract a member from the archive to the current working directory, | ||
1474 | 916 | using its full name. Its file information is extracted as accurately | ||
1475 | 917 | as possible. `member' may be a filename or a ZipInfo object. You can | ||
1476 | 918 | specify a different directory using `path'. | ||
1477 | 919 | """ | ||
1478 | 920 | if not isinstance(member, ZipInfo): | ||
1479 | 921 | member = self.getinfo(member) | ||
1480 | 922 | |||
1481 | 923 | if path is None: | ||
1482 | 924 | path = os.getcwd() | ||
1483 | 925 | |||
1484 | 926 | return self._extract_member(member, path, pwd) | ||
1485 | 927 | |||
1486 | 928 | def extractall(self, path=None, members=None, pwd=None): | ||
1487 | 929 | """Extract all members from the archive to the current working | ||
1488 | 930 | directory. `path' specifies a different directory to extract to. | ||
1489 | 931 | `members' is optional and must be a subset of the list returned | ||
1490 | 932 | by namelist(). | ||
1491 | 933 | """ | ||
1492 | 934 | if members is None: | ||
1493 | 935 | members = self.namelist() | ||
1494 | 936 | |||
1495 | 937 | for zipinfo in members: | ||
1496 | 938 | self.extract(zipinfo, path, pwd) | ||
1497 | 939 | |||
1498 | 940 | def _extract_member(self, member, targetpath, pwd): | ||
1499 | 941 | """Extract the ZipInfo object 'member' to a physical | ||
1500 | 942 | file on the path targetpath. | ||
1501 | 943 | """ | ||
1502 | 944 | # build the destination pathname, replacing | ||
1503 | 945 | # forward slashes to platform specific separators. | ||
1504 | 946 | # Strip trailing path separator, unless it represents the root. | ||
1505 | 947 | if (targetpath[-1:] in (os.path.sep, os.path.altsep) | ||
1506 | 948 | and len(os.path.splitdrive(targetpath)[1]) > 1): | ||
1507 | 949 | targetpath = targetpath[:-1] | ||
1508 | 950 | |||
1509 | 951 | # don't include leading "/" from file name if present | ||
1510 | 952 | if member.filename[0] == '/': | ||
1511 | 953 | targetpath = os.path.join(targetpath, member.filename[1:]) | ||
1512 | 954 | else: | ||
1513 | 955 | targetpath = os.path.join(targetpath, member.filename) | ||
1514 | 956 | |||
1515 | 957 | targetpath = os.path.normpath(targetpath) | ||
1516 | 958 | |||
1517 | 959 | # Create all upper directories if necessary. | ||
1518 | 960 | upperdirs = os.path.dirname(targetpath) | ||
1519 | 961 | if upperdirs and not os.path.exists(upperdirs): | ||
1520 | 962 | os.makedirs(upperdirs) | ||
1521 | 963 | |||
1522 | 964 | if member.filename[-1] == '/': | ||
1523 | 965 | if not os.path.isdir(targetpath): | ||
1524 | 966 | os.mkdir(targetpath) | ||
1525 | 967 | return targetpath | ||
1526 | 968 | |||
1527 | 969 | source = self.open(member, pwd=pwd) | ||
1528 | 970 | target = file(targetpath, "wb") | ||
1529 | 971 | shutil.copyfileobj(source, target) | ||
1530 | 972 | source.close() | ||
1531 | 973 | target.close() | ||
1532 | 974 | |||
1533 | 975 | return targetpath | ||
1534 | 976 | |||
1535 | 977 | def _writecheck(self, zinfo): | ||
1536 | 978 | """Check for errors before writing a file to the archive.""" | ||
1537 | 979 | if zinfo.filename in self.NameToInfo: | ||
1538 | 980 | if self.debug: # Warning for duplicate names | ||
1539 | 981 | print "Duplicate name:", zinfo.filename | ||
1540 | 982 | if self.mode not in ("w", "a"): | ||
1541 | 983 | raise RuntimeError, 'write() requires mode "w" or "a"' | ||
1542 | 984 | if not self.fp: | ||
1543 | 985 | raise RuntimeError, \ | ||
1544 | 986 | "Attempt to write ZIP archive that was already closed" | ||
1545 | 987 | if zinfo.compress_type == ZIP_DEFLATED and not zlib: | ||
1546 | 988 | raise RuntimeError, \ | ||
1547 | 989 | "Compression requires the (missing) zlib module" | ||
1548 | 990 | if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED): | ||
1549 | 991 | raise RuntimeError, \ | ||
1550 | 992 | "That compression method is not supported" | ||
1551 | 993 | if zinfo.file_size > ZIP64_LIMIT: | ||
1552 | 994 | if not self._allowZip64: | ||
1553 | 995 | raise LargeZipFile("Filesize would require ZIP64 extensions") | ||
1554 | 996 | if zinfo.header_offset > ZIP64_LIMIT: | ||
1555 | 997 | if not self._allowZip64: | ||
1556 | 998 | raise LargeZipFile("Zipfile size would require ZIP64 extensions") | ||
1557 | 999 | |||
1558 | 1000 | def write(self, filename, arcname=None, compress_type=None): | ||
1559 | 1001 | """Put the bytes from filename into the archive under the name | ||
1560 | 1002 | arcname.""" | ||
1561 | 1003 | if not self.fp: | ||
1562 | 1004 | raise RuntimeError( | ||
1563 | 1005 | "Attempt to write to ZIP archive that was already closed") | ||
1564 | 1006 | |||
1565 | 1007 | st = os.stat(filename) | ||
1566 | 1008 | isdir = stat.S_ISDIR(st.st_mode) | ||
1567 | 1009 | mtime = time.localtime(st.st_mtime) | ||
1568 | 1010 | date_time = mtime[0:6] | ||
1569 | 1011 | # Create ZipInfo instance to store file information | ||
1570 | 1012 | if arcname is None: | ||
1571 | 1013 | arcname = filename | ||
1572 | 1014 | arcname = os.path.normpath(os.path.splitdrive(arcname)[1]) | ||
1573 | 1015 | while arcname[0] in (os.sep, os.altsep): | ||
1574 | 1016 | arcname = arcname[1:] | ||
1575 | 1017 | if isdir: | ||
1576 | 1018 | arcname += '/' | ||
1577 | 1019 | zinfo = ZipInfo(arcname, date_time) | ||
1578 | 1020 | zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes | ||
1579 | 1021 | if compress_type is None: | ||
1580 | 1022 | zinfo.compress_type = self.compression | ||
1581 | 1023 | else: | ||
1582 | 1024 | zinfo.compress_type = compress_type | ||
1583 | 1025 | |||
1584 | 1026 | zinfo.file_size = st.st_size | ||
1585 | 1027 | zinfo.flag_bits = 0x00 | ||
1586 | 1028 | zinfo.header_offset = self.fp.tell() # Start of header bytes | ||
1587 | 1029 | |||
1588 | 1030 | self._writecheck(zinfo) | ||
1589 | 1031 | self._didModify = True | ||
1590 | 1032 | |||
1591 | 1033 | if isdir: | ||
1592 | 1034 | zinfo.file_size = 0 | ||
1593 | 1035 | zinfo.compress_size = 0 | ||
1594 | 1036 | zinfo.CRC = 0 | ||
1595 | 1037 | self.filelist.append(zinfo) | ||
1596 | 1038 | self.NameToInfo[zinfo.filename] = zinfo | ||
1597 | 1039 | self.fp.write(zinfo.FileHeader()) | ||
1598 | 1040 | return | ||
1599 | 1041 | |||
1600 | 1042 | fp = open(filename, "rb") | ||
1601 | 1043 | # Must overwrite CRC and sizes with correct data later | ||
1602 | 1044 | zinfo.CRC = CRC = 0 | ||
1603 | 1045 | zinfo.compress_size = compress_size = 0 | ||
1604 | 1046 | zinfo.file_size = file_size = 0 | ||
1605 | 1047 | self.fp.write(zinfo.FileHeader()) | ||
1606 | 1048 | if zinfo.compress_type == ZIP_DEFLATED: | ||
1607 | 1049 | cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, | ||
1608 | 1050 | zlib.DEFLATED, -15) | ||
1609 | 1051 | else: | ||
1610 | 1052 | cmpr = None | ||
1611 | 1053 | while 1: | ||
1612 | 1054 | buf = fp.read(1024 * 8) | ||
1613 | 1055 | if not buf: | ||
1614 | 1056 | break | ||
1615 | 1057 | file_size = file_size + len(buf) | ||
1616 | 1058 | CRC = crc32(buf, CRC) & 0xffffffff | ||
1617 | 1059 | if cmpr: | ||
1618 | 1060 | buf = cmpr.compress(buf) | ||
1619 | 1061 | compress_size = compress_size + len(buf) | ||
1620 | 1062 | self.fp.write(buf) | ||
1621 | 1063 | fp.close() | ||
1622 | 1064 | if cmpr: | ||
1623 | 1065 | buf = cmpr.flush() | ||
1624 | 1066 | compress_size = compress_size + len(buf) | ||
1625 | 1067 | self.fp.write(buf) | ||
1626 | 1068 | zinfo.compress_size = compress_size | ||
1627 | 1069 | else: | ||
1628 | 1070 | zinfo.compress_size = file_size | ||
1629 | 1071 | zinfo.CRC = CRC | ||
1630 | 1072 | zinfo.file_size = file_size | ||
1631 | 1073 | # Seek backwards and write CRC and file sizes | ||
1632 | 1074 | position = self.fp.tell() # Preserve current position in file | ||
1633 | 1075 | self.fp.seek(zinfo.header_offset + 14, 0) | ||
1634 | 1076 | self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size, | ||
1635 | 1077 | zinfo.file_size)) | ||
1636 | 1078 | self.fp.seek(position, 0) | ||
1637 | 1079 | self.filelist.append(zinfo) | ||
1638 | 1080 | self.NameToInfo[zinfo.filename] = zinfo | ||
1639 | 1081 | |||
1640 | 1082 | def writestr(self, zinfo_or_arcname, bytes): | ||
1641 | 1083 | """Write a file into the archive. The contents is the string | ||
1642 | 1084 | 'bytes'. 'zinfo_or_arcname' is either a ZipInfo instance or | ||
1643 | 1085 | the name of the file in the archive.""" | ||
1644 | 1086 | if not isinstance(zinfo_or_arcname, ZipInfo): | ||
1645 | 1087 | zinfo = ZipInfo(filename=zinfo_or_arcname, | ||
1646 | 1088 | date_time=time.localtime(time.time())[:6]) | ||
1647 | 1089 | zinfo.compress_type = self.compression | ||
1648 | 1090 | zinfo.external_attr = 0600 << 16 | ||
1649 | 1091 | else: | ||
1650 | 1092 | zinfo = zinfo_or_arcname | ||
1651 | 1093 | |||
1652 | 1094 | if not self.fp: | ||
1653 | 1095 | raise RuntimeError( | ||
1654 | 1096 | "Attempt to write to ZIP archive that was already closed") | ||
1655 | 1097 | |||
1656 | 1098 | zinfo.file_size = len(bytes) # Uncompressed size | ||
1657 | 1099 | zinfo.header_offset = self.fp.tell() # Start of header bytes | ||
1658 | 1100 | self._writecheck(zinfo) | ||
1659 | 1101 | self._didModify = True | ||
1660 | 1102 | zinfo.CRC = crc32(bytes) & 0xffffffff # CRC-32 checksum | ||
1661 | 1103 | if zinfo.compress_type == ZIP_DEFLATED: | ||
1662 | 1104 | co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, | ||
1663 | 1105 | zlib.DEFLATED, -15) | ||
1664 | 1106 | bytes = co.compress(bytes) + co.flush() | ||
1665 | 1107 | zinfo.compress_size = len(bytes) # Compressed size | ||
1666 | 1108 | else: | ||
1667 | 1109 | zinfo.compress_size = zinfo.file_size | ||
1668 | 1110 | zinfo.header_offset = self.fp.tell() # Start of header bytes | ||
1669 | 1111 | self.fp.write(zinfo.FileHeader()) | ||
1670 | 1112 | self.fp.write(bytes) | ||
1671 | 1113 | self.fp.flush() | ||
1672 | 1114 | if zinfo.flag_bits & 0x08: | ||
1673 | 1115 | # Write CRC and file sizes after the file data | ||
1674 | 1116 | self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size, | ||
1675 | 1117 | zinfo.file_size)) | ||
1676 | 1118 | self.filelist.append(zinfo) | ||
1677 | 1119 | self.NameToInfo[zinfo.filename] = zinfo | ||
1678 | 1120 | |||
1679 | 1121 | def __del__(self): | ||
1680 | 1122 | """Call the "close()" method in case the user forgot.""" | ||
1681 | 1123 | self.close() | ||
1682 | 1124 | |||
1683 | 1125 | def close(self): | ||
1684 | 1126 | """Close the file, and for mode "w" and "a" write the ending | ||
1685 | 1127 | records.""" | ||
1686 | 1128 | if self.fp is None: | ||
1687 | 1129 | return | ||
1688 | 1130 | |||
1689 | 1131 | if self.mode in ("w", "a") and self._didModify: # write ending records | ||
1690 | 1132 | count = 0 | ||
1691 | 1133 | pos1 = self.fp.tell() | ||
1692 | 1134 | for zinfo in self.filelist: # write central directory | ||
1693 | 1135 | count = count + 1 | ||
1694 | 1136 | dt = zinfo.date_time | ||
1695 | 1137 | dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] | ||
1696 | 1138 | dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) | ||
1697 | 1139 | extra = [] | ||
1698 | 1140 | if zinfo.file_size > ZIP64_LIMIT \ | ||
1699 | 1141 | or zinfo.compress_size > ZIP64_LIMIT: | ||
1700 | 1142 | extra.append(zinfo.file_size) | ||
1701 | 1143 | extra.append(zinfo.compress_size) | ||
1702 | 1144 | file_size = 0xffffffff | ||
1703 | 1145 | compress_size = 0xffffffff | ||
1704 | 1146 | else: | ||
1705 | 1147 | file_size = zinfo.file_size | ||
1706 | 1148 | compress_size = zinfo.compress_size | ||
1707 | 1149 | |||
1708 | 1150 | if zinfo.header_offset > ZIP64_LIMIT: | ||
1709 | 1151 | extra.append(zinfo.header_offset) | ||
1710 | 1152 | header_offset = 0xffffffffL | ||
1711 | 1153 | else: | ||
1712 | 1154 | header_offset = zinfo.header_offset | ||
1713 | 1155 | |||
1714 | 1156 | extra_data = zinfo.extra | ||
1715 | 1157 | if extra: | ||
1716 | 1158 | # Append a ZIP64 field to the extra's | ||
1717 | 1159 | extra_data = struct.pack( | ||
1718 | 1160 | '<HH' + 'Q'*len(extra), | ||
1719 | 1161 | 1, 8*len(extra), *extra) + extra_data | ||
1720 | 1162 | |||
1721 | 1163 | extract_version = max(45, zinfo.extract_version) | ||
1722 | 1164 | create_version = max(45, zinfo.create_version) | ||
1723 | 1165 | else: | ||
1724 | 1166 | extract_version = zinfo.extract_version | ||
1725 | 1167 | create_version = zinfo.create_version | ||
1726 | 1168 | |||
1727 | 1169 | try: | ||
1728 | 1170 | filename, flag_bits = zinfo._encodeFilenameFlags() | ||
1729 | 1171 | centdir = struct.pack(structCentralDir, | ||
1730 | 1172 | stringCentralDir, create_version, | ||
1731 | 1173 | zinfo.create_system, extract_version, zinfo.reserved, | ||
1732 | 1174 | flag_bits, zinfo.compress_type, dostime, dosdate, | ||
1733 | 1175 | zinfo.CRC, compress_size, file_size, | ||
1734 | 1176 | len(filename), len(extra_data), len(zinfo.comment), | ||
1735 | 1177 | 0, zinfo.internal_attr, zinfo.external_attr, | ||
1736 | 1178 | header_offset) | ||
1737 | 1179 | except DeprecationWarning: | ||
1738 | 1180 | print >>sys.stderr, (structCentralDir, | ||
1739 | 1181 | stringCentralDir, create_version, | ||
1740 | 1182 | zinfo.create_system, extract_version, zinfo.reserved, | ||
1741 | 1183 | zinfo.flag_bits, zinfo.compress_type, dostime, dosdate, | ||
1742 | 1184 | zinfo.CRC, compress_size, file_size, | ||
1743 | 1185 | len(zinfo.filename), len(extra_data), len(zinfo.comment), | ||
1744 | 1186 | 0, zinfo.internal_attr, zinfo.external_attr, | ||
1745 | 1187 | header_offset) | ||
1746 | 1188 | raise | ||
1747 | 1189 | self.fp.write(centdir) | ||
1748 | 1190 | self.fp.write(filename) | ||
1749 | 1191 | self.fp.write(extra_data) | ||
1750 | 1192 | self.fp.write(zinfo.comment) | ||
1751 | 1193 | |||
1752 | 1194 | pos2 = self.fp.tell() | ||
1753 | 1195 | # Write end-of-zip-archive record | ||
1754 | 1196 | centDirCount = count | ||
1755 | 1197 | centDirSize = pos2 - pos1 | ||
1756 | 1198 | centDirOffset = pos1 | ||
1757 | 1199 | if (centDirCount >= ZIP_FILECOUNT_LIMIT or | ||
1758 | 1200 | centDirOffset > ZIP64_LIMIT or | ||
1759 | 1201 | centDirSize > ZIP64_LIMIT): | ||
1760 | 1202 | # Need to write the ZIP64 end-of-archive records | ||
1761 | 1203 | zip64endrec = struct.pack( | ||
1762 | 1204 | structEndArchive64, stringEndArchive64, | ||
1763 | 1205 | 44, 45, 45, 0, 0, centDirCount, centDirCount, | ||
1764 | 1206 | centDirSize, centDirOffset) | ||
1765 | 1207 | self.fp.write(zip64endrec) | ||
1766 | 1208 | |||
1767 | 1209 | zip64locrec = struct.pack( | ||
1768 | 1210 | structEndArchive64Locator, | ||
1769 | 1211 | stringEndArchive64Locator, 0, pos2, 1) | ||
1770 | 1212 | self.fp.write(zip64locrec) | ||
1771 | 1213 | centDirCount = min(centDirCount, 0xFFFF) | ||
1772 | 1214 | centDirSize = min(centDirSize, 0xFFFFFFFF) | ||
1773 | 1215 | centDirOffset = min(centDirOffset, 0xFFFFFFFF) | ||
1774 | 1216 | |||
1775 | 1217 | # check for valid comment length | ||
1776 | 1218 | if len(self.comment) >= ZIP_MAX_COMMENT: | ||
1777 | 1219 | if self.debug > 0: | ||
1778 | 1220 | msg = 'Archive comment is too long; truncating to %d bytes' \ | ||
1779 | 1221 | % ZIP_MAX_COMMENT | ||
1780 | 1222 | self.comment = self.comment[:ZIP_MAX_COMMENT] | ||
1781 | 1223 | |||
1782 | 1224 | endrec = struct.pack(structEndArchive, stringEndArchive, | ||
1783 | 1225 | 0, 0, centDirCount, centDirCount, | ||
1784 | 1226 | centDirSize, centDirOffset, len(self.comment)) | ||
1785 | 1227 | self.fp.write(endrec) | ||
1786 | 1228 | self.fp.write(self.comment) | ||
1787 | 1229 | self.fp.flush() | ||
1788 | 1230 | |||
1789 | 1231 | if not self._filePassed: | ||
1790 | 1232 | self.fp.close() | ||
1791 | 1233 | self.fp = None | ||
1792 | 1234 | |||
1793 | 1235 | |||
1794 | 1236 | class PyZipFile(ZipFile): | ||
1795 | 1237 | """Class to create ZIP archives with Python library files and packages.""" | ||
1796 | 1238 | |||
1797 | 1239 | def writepy(self, pathname, basename = ""): | ||
1798 | 1240 | """Add all files from "pathname" to the ZIP archive. | ||
1799 | 1241 | |||
1800 | 1242 | If pathname is a package directory, search the directory and | ||
1801 | 1243 | all package subdirectories recursively for all *.py and enter | ||
1802 | 1244 | the modules into the archive. If pathname is a plain | ||
1803 | 1245 | directory, listdir *.py and enter all modules. Else, pathname | ||
1804 | 1246 | must be a Python *.py file and the module will be put into the | ||
1805 | 1247 | archive. Added modules are always module.pyo or module.pyc. | ||
1806 | 1248 | This method will compile the module.py into module.pyc if | ||
1807 | 1249 | necessary. | ||
1808 | 1250 | """ | ||
1809 | 1251 | dir, name = os.path.split(pathname) | ||
1810 | 1252 | if os.path.isdir(pathname): | ||
1811 | 1253 | initname = os.path.join(pathname, "__init__.py") | ||
1812 | 1254 | if os.path.isfile(initname): | ||
1813 | 1255 | # This is a package directory, add it | ||
1814 | 1256 | if basename: | ||
1815 | 1257 | basename = "%s/%s" % (basename, name) | ||
1816 | 1258 | else: | ||
1817 | 1259 | basename = name | ||
1818 | 1260 | if self.debug: | ||
1819 | 1261 | print "Adding package in", pathname, "as", basename | ||
1820 | 1262 | fname, arcname = self._get_codename(initname[0:-3], basename) | ||
1821 | 1263 | if self.debug: | ||
1822 | 1264 | print "Adding", arcname | ||
1823 | 1265 | self.write(fname, arcname) | ||
1824 | 1266 | dirlist = os.listdir(pathname) | ||
1825 | 1267 | dirlist.remove("__init__.py") | ||
1826 | 1268 | # Add all *.py files and package subdirectories | ||
1827 | 1269 | for filename in dirlist: | ||
1828 | 1270 | path = os.path.join(pathname, filename) | ||
1829 | 1271 | root, ext = os.path.splitext(filename) | ||
1830 | 1272 | if os.path.isdir(path): | ||
1831 | 1273 | if os.path.isfile(os.path.join(path, "__init__.py")): | ||
1832 | 1274 | # This is a package directory, add it | ||
1833 | 1275 | self.writepy(path, basename) # Recursive call | ||
1834 | 1276 | elif ext == ".py": | ||
1835 | 1277 | fname, arcname = self._get_codename(path[0:-3], | ||
1836 | 1278 | basename) | ||
1837 | 1279 | if self.debug: | ||
1838 | 1280 | print "Adding", arcname | ||
1839 | 1281 | self.write(fname, arcname) | ||
1840 | 1282 | else: | ||
1841 | 1283 | # This is NOT a package directory, add its files at top level | ||
1842 | 1284 | if self.debug: | ||
1843 | 1285 | print "Adding files from directory", pathname | ||
1844 | 1286 | for filename in os.listdir(pathname): | ||
1845 | 1287 | path = os.path.join(pathname, filename) | ||
1846 | 1288 | root, ext = os.path.splitext(filename) | ||
1847 | 1289 | if ext == ".py": | ||
1848 | 1290 | fname, arcname = self._get_codename(path[0:-3], | ||
1849 | 1291 | basename) | ||
1850 | 1292 | if self.debug: | ||
1851 | 1293 | print "Adding", arcname | ||
1852 | 1294 | self.write(fname, arcname) | ||
1853 | 1295 | else: | ||
1854 | 1296 | if pathname[-3:] != ".py": | ||
1855 | 1297 | raise RuntimeError, \ | ||
1856 | 1298 | 'Files added with writepy() must end with ".py"' | ||
1857 | 1299 | fname, arcname = self._get_codename(pathname[0:-3], basename) | ||
1858 | 1300 | if self.debug: | ||
1859 | 1301 | print "Adding file", arcname | ||
1860 | 1302 | self.write(fname, arcname) | ||
1861 | 1303 | |||
1862 | 1304 | def _get_codename(self, pathname, basename): | ||
1863 | 1305 | """Return (filename, archivename) for the path. | ||
1864 | 1306 | |||
1865 | 1307 | Given a module name path, return the correct file path and | ||
1866 | 1308 | archive name, compiling if necessary. For example, given | ||
1867 | 1309 | /python/lib/string, return (/python/lib/string.pyc, string). | ||
1868 | 1310 | """ | ||
1869 | 1311 | file_py = pathname + ".py" | ||
1870 | 1312 | file_pyc = pathname + ".pyc" | ||
1871 | 1313 | file_pyo = pathname + ".pyo" | ||
1872 | 1314 | if os.path.isfile(file_pyo) and \ | ||
1873 | 1315 | os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime: | ||
1874 | 1316 | fname = file_pyo # Use .pyo file | ||
1875 | 1317 | elif not os.path.isfile(file_pyc) or \ | ||
1876 | 1318 | os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime: | ||
1877 | 1319 | import py_compile | ||
1878 | 1320 | if self.debug: | ||
1879 | 1321 | print "Compiling", file_py | ||
1880 | 1322 | try: | ||
1881 | 1323 | py_compile.compile(file_py, file_pyc, None, True) | ||
1882 | 1324 | except py_compile.PyCompileError,err: | ||
1883 | 1325 | print err.msg | ||
1884 | 1326 | fname = file_pyc | ||
1885 | 1327 | else: | ||
1886 | 1328 | fname = file_pyc | ||
1887 | 1329 | archivename = os.path.split(fname)[1] | ||
1888 | 1330 | if basename: | ||
1889 | 1331 | archivename = "%s/%s" % (basename, archivename) | ||
1890 | 1332 | return (fname, archivename) | ||
1891 | 1333 | |||
1892 | 1334 | |||
1893 | 1335 | def main(args = None): | ||
1894 | 1336 | import textwrap | ||
1895 | 1337 | USAGE=textwrap.dedent("""\ | ||
1896 | 1338 | Usage: | ||
1897 | 1339 | zipfile.py -l zipfile.zip # Show listing of a zipfile | ||
1898 | 1340 | zipfile.py -t zipfile.zip # Test if a zipfile is valid | ||
1899 | 1341 | zipfile.py -e zipfile.zip target # Extract zipfile into target dir | ||
1900 | 1342 | zipfile.py -c zipfile.zip src ... # Create zipfile from sources | ||
1901 | 1343 | """) | ||
1902 | 1344 | if args is None: | ||
1903 | 1345 | args = sys.argv[1:] | ||
1904 | 1346 | |||
1905 | 1347 | if not args or args[0] not in ('-l', '-c', '-e', '-t'): | ||
1906 | 1348 | print USAGE | ||
1907 | 1349 | sys.exit(1) | ||
1908 | 1350 | |||
1909 | 1351 | if args[0] == '-l': | ||
1910 | 1352 | if len(args) != 2: | ||
1911 | 1353 | print USAGE | ||
1912 | 1354 | sys.exit(1) | ||
1913 | 1355 | zf = ZipFile(args[1], 'r') | ||
1914 | 1356 | zf.printdir() | ||
1915 | 1357 | zf.close() | ||
1916 | 1358 | |||
1917 | 1359 | elif args[0] == '-t': | ||
1918 | 1360 | if len(args) != 2: | ||
1919 | 1361 | print USAGE | ||
1920 | 1362 | sys.exit(1) | ||
1921 | 1363 | zf = ZipFile(args[1], 'r') | ||
1922 | 1364 | zf.testzip() | ||
1923 | 1365 | print "Done testing" | ||
1924 | 1366 | |||
1925 | 1367 | elif args[0] == '-e': | ||
1926 | 1368 | if len(args) != 3: | ||
1927 | 1369 | print USAGE | ||
1928 | 1370 | sys.exit(1) | ||
1929 | 1371 | |||
1930 | 1372 | zf = ZipFile(args[1], 'r') | ||
1931 | 1373 | out = args[2] | ||
1932 | 1374 | for path in zf.namelist(): | ||
1933 | 1375 | if path.startswith('./'): | ||
1934 | 1376 | tgt = os.path.join(out, path[2:]) | ||
1935 | 1377 | else: | ||
1936 | 1378 | tgt = os.path.join(out, path) | ||
1937 | 1379 | |||
1938 | 1380 | tgtdir = os.path.dirname(tgt) | ||
1939 | 1381 | if not os.path.exists(tgtdir): | ||
1940 | 1382 | os.makedirs(tgtdir) | ||
1941 | 1383 | fp = open(tgt, 'wb') | ||
1942 | 1384 | fp.write(zf.read(path)) | ||
1943 | 1385 | fp.close() | ||
1944 | 1386 | zf.close() | ||
1945 | 1387 | |||
1946 | 1388 | elif args[0] == '-c': | ||
1947 | 1389 | if len(args) < 3: | ||
1948 | 1390 | print USAGE | ||
1949 | 1391 | sys.exit(1) | ||
1950 | 1392 | |||
1951 | 1393 | def addToZip(zf, path, zippath): | ||
1952 | 1394 | if os.path.isfile(path): | ||
1953 | 1395 | zf.write(path, zippath, ZIP_DEFLATED) | ||
1954 | 1396 | elif os.path.isdir(path): | ||
1955 | 1397 | for nm in os.listdir(path): | ||
1956 | 1398 | addToZip(zf, | ||
1957 | 1399 | os.path.join(path, nm), os.path.join(zippath, nm)) | ||
1958 | 1400 | # else: ignore | ||
1959 | 1401 | |||
1960 | 1402 | zf = ZipFile(args[1], 'w', allowZip64=True) | ||
1961 | 1403 | for src in args[2:]: | ||
1962 | 1404 | addToZip(zf, src, os.path.basename(src)) | ||
1963 | 1405 | |||
1964 | 1406 | zf.close() | ||
1965 | 1407 | |||
1966 | 1408 | if __name__ == "__main__": | ||
1967 | 1409 | main() | ||
1968 | 0 | 1410 | ||
1969 | === modified file 'setup.nsi' | |||
1970 | --- setup.nsi 2012-11-26 11:44:38 +0000 | |||
1971 | +++ setup.nsi 2013-02-28 12:40:27 +0000 | |||
1972 | @@ -206,6 +206,7 @@ | |||
1973 | 206 | 206 | ||
1974 | 207 | nsExec::Exec '"$INSTDIR\openerp-server.exe" --stop-after-init --logfile "$INSTDIR\openerp-server.log" -s' | 207 | nsExec::Exec '"$INSTDIR\openerp-server.exe" --stop-after-init --logfile "$INSTDIR\openerp-server.log" -s' |
1975 | 208 | nsExec::Exec '"$INSTDIR\service\OpenERPServerService.exe" -auto -install' | 208 | nsExec::Exec '"$INSTDIR\service\OpenERPServerService.exe" -auto -install' |
1976 | 209 | nsExec::Exec 'sc failure openerp-server-6.0 reset= 0 actions= restart/0/restart/0/restart/0' | ||
1977 | 209 | SectionEnd | 210 | SectionEnd |
1978 | 210 | 211 | ||
1979 | 211 | Section -RestartServer | 212 | Section -RestartServer |
1980 | 212 | 213 | ||
1981 | === modified file 'setup.py' | |||
1982 | --- setup.py 2011-03-30 17:04:32 +0000 | |||
1983 | +++ setup.py 2013-02-28 12:40:27 +0000 | |||
1984 | @@ -133,6 +133,7 @@ | |||
1985 | 133 | '''Build list of data files to be installed''' | 133 | '''Build list of data files to be installed''' |
1986 | 134 | files = [] | 134 | files = [] |
1987 | 135 | if os.name == 'nt': | 135 | if os.name == 'nt': |
1988 | 136 | files.append(('.', [join('bin', 'unifield-version.txt')])) | ||
1989 | 136 | os.chdir('bin') | 137 | os.chdir('bin') |
1990 | 137 | for (dp, dn, names) in os.walk('addons'): | 138 | for (dp, dn, names) in os.walk('addons'): |
1991 | 138 | files.append((dp, map(lambda x: join('bin', dp, x), names))) | 139 | files.append((dp, map(lambda x: join('bin', dp, x), names))) |
1992 | 139 | 140 | ||
1993 | === modified file 'win32/OpenERPServerService.py' | |||
1994 | --- win32/OpenERPServerService.py 2010-12-29 11:51:44 +0000 | |||
1995 | +++ win32/OpenERPServerService.py 2013-02-28 12:40:27 +0000 | |||
1996 | @@ -32,6 +32,8 @@ | |||
1997 | 32 | import os | 32 | import os |
1998 | 33 | import thread | 33 | import thread |
1999 | 34 | 34 | ||
2000 | 35 | EXIT_UPDATE_REQUIRE_RESTART = 1 | ||
2001 | 36 | |||
2002 | 35 | class OpenERPServerService(win32serviceutil.ServiceFramework): | 37 | class OpenERPServerService(win32serviceutil.ServiceFramework): |
2003 | 36 | # required info | 38 | # required info |
2004 | 37 | _svc_name_ = "openerp-server-6.0" | 39 | _svc_name_ = "openerp-server-6.0" |
2005 | @@ -46,8 +48,6 @@ | |||
2006 | 46 | self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) | 48 | self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) |
2007 | 47 | # a reference to the server's process | 49 | # a reference to the server's process |
2008 | 48 | self.terpprocess = None | 50 | self.terpprocess = None |
2009 | 49 | # info if the service terminates correctly or if the server crashed | ||
2010 | 50 | self.stopping = False | ||
2011 | 51 | 51 | ||
2012 | 52 | 52 | ||
2013 | 53 | def SvcStop(self): | 53 | def SvcStop(self): |
2014 | @@ -73,19 +73,28 @@ | |||
2015 | 73 | def StartControl(self,ws): | 73 | def StartControl(self,ws): |
2016 | 74 | # this listens to the Service Manager's events | 74 | # this listens to the Service Manager's events |
2017 | 75 | win32event.WaitForSingleObject(ws, win32event.INFINITE) | 75 | win32event.WaitForSingleObject(ws, win32event.INFINITE) |
2018 | 76 | self.stopping = True | ||
2019 | 77 | 76 | ||
2020 | 78 | def SvcDoRun(self): | 77 | def SvcDoRun(self): |
2021 | 79 | # Start OpenERP Server itself | ||
2022 | 80 | self.StartTERP() | ||
2023 | 81 | # start the loop waiting for the Service Manager's stop signal | 78 | # start the loop waiting for the Service Manager's stop signal |
2024 | 82 | thread.start_new_thread(self.StartControl, (self.hWaitStop,)) | 79 | thread.start_new_thread(self.StartControl, (self.hWaitStop,)) |
2031 | 83 | # Log a info message that the server is running | 80 | while True: |
2032 | 84 | servicemanager.LogInfoMsg("OpenERP Server up and running") | 81 | # Start OpenERP Server itself |
2033 | 85 | # verification if the server is really running, else quit with an error | 82 | self.StartTERP() |
2034 | 86 | self.terpprocess.wait() | 83 | # Log a info message that the server is running |
2035 | 87 | if not self.stopping: | 84 | servicemanager.LogInfoMsg("OpenERP Server up and running") |
2036 | 88 | sys.exit("OpenERP Server check: server not running, check the logfile for more info") | 85 | # wait until child process is terminated |
2037 | 86 | # if exit status is: | ||
2038 | 87 | # - special 'restart' | ||
2039 | 88 | # simply loop to restart the process and finish update | ||
2040 | 89 | # - other exit stauts: | ||
2041 | 90 | # server crashed? exit with an error message | ||
2042 | 91 | exit_status = self.terpprocess.wait() | ||
2043 | 92 | if exit_status == EXIT_UPDATE_REQUIRE_RESTART: | ||
2044 | 93 | servicemanager.LogInfoMsg("OpenERP has been updated, restarting...") | ||
2045 | 94 | continue # restart openerp process | ||
2046 | 95 | if exit_status == 0: | ||
2047 | 96 | break # normal exit | ||
2048 | 97 | sys.exit(exit_status) | ||
2049 | 89 | 98 | ||
2050 | 90 | 99 | ||
2051 | 91 | 100 |