Merge lp:~unifield-team/unifield-server/sp71 into lp:unifield-server

Proposed by Samus CTO (OpenERP)
Status: Merged
Merged at revision: 3444
Proposed branch: lp:~unifield-team/unifield-server/sp71
Merge into: lp:unifield-server
Diff against target: 2048 lines (+1749/-96)
10 files modified
bin/openerp-server.py (+9/-6)
bin/pooler.py (+5/-0)
bin/service/web_services.py (+3/-2)
bin/sql_db.py (+1/-1)
bin/unifield-version.txt (+1/-0)
bin/updater.py (+299/-76)
bin/zipfile266.py (+1409/-0)
setup.nsi (+1/-0)
setup.py (+1/-0)
win32/OpenERPServerService.py (+20/-11)
To merge this branch: bzr merge lp:~unifield-team/unifield-server/sp71
Reviewer Review Type Date Requested Status
UniField Dev Team Pending
Review via email: mp+151008@code.launchpad.net
To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'bin/openerp-server.py'
--- bin/openerp-server.py 2013-02-27 09:10:30 +0000
+++ bin/openerp-server.py 2013-02-28 12:40:27 +0000
@@ -30,8 +30,8 @@
30(c) 2003-TODAY, Fabien Pinckaers - OpenERP s.a.30(c) 2003-TODAY, Fabien Pinckaers - OpenERP s.a.
31"""31"""
3232
33from updater import do_update33import updater
34do_update()34updater.do_update()
3535
36#----------------------------------------------------------36#----------------------------------------------------------
37# python imports37# python imports
@@ -65,6 +65,7 @@
65# import the tools module so that the commandline parameters are parsed65# import the tools module so that the commandline parameters are parsed
66#-----------------------------------------------------------------------66#-----------------------------------------------------------------------
67import tools67import tools
68updater.update_path()
68logger.info("OpenERP version - %s", release.version)69logger.info("OpenERP version - %s", release.version)
69for name, value in [('addons_path', tools.config['addons_path']),70for name, value in [('addons_path', tools.config['addons_path']),
70 ('database hostname', tools.config['db_host'] or 'localhost'),71 ('database hostname', tools.config['db_host'] or 'localhost'),
@@ -219,6 +220,8 @@
219 signal.signal(signal.SIGQUIT, dumpstacks)220 signal.signal(signal.SIGQUIT, dumpstacks)
220221
221def quit(restart=False):222def quit(restart=False):
223 if restart:
224 time.sleep(updater.restart_delay)
222 netsvc.Agent.quit()225 netsvc.Agent.quit()
223 netsvc.Server.quitAll()226 netsvc.Server.quitAll()
224 if tools.config['pidfile']:227 if tools.config['pidfile']:
@@ -247,6 +250,8 @@
247 logger.info(str(thread.getName()) + ' could not be terminated')250 logger.info(str(thread.getName()) + ' could not be terminated')
248 if not restart:251 if not restart:
249 sys.exit(0)252 sys.exit(0)
253 elif os.name == 'nt':
254 sys.exit(1) # require service restart
250 else:255 else:
251 os.execv(sys.executable, [sys.executable] + sys.argv)256 os.execv(sys.executable, [sys.executable] + sys.argv)
252257
@@ -260,11 +265,9 @@
260265
261logger.info('OpenERP server is running, waiting for connections...')266logger.info('OpenERP server is running, waiting for connections...')
262267
263tools.restart_required = False268while netsvc.quit_signals_received == 0 and not updater.restart_required:
264
265while netsvc.quit_signals_received == 0 and not tools.restart_required:
266 time.sleep(5)269 time.sleep(5)
267270
268quit(restart=tools.restart_required)271quit(restart=updater.restart_required)
269272
270# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:273# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
271274
=== modified file 'bin/pooler.py'
--- bin/pooler.py 2010-10-01 11:25:52 +0000
+++ bin/pooler.py 2013-02-28 12:40:27 +0000
@@ -19,6 +19,8 @@
19#19#
20##############################################################################20##############################################################################
2121
22import updater
23
22pool_dic = {}24pool_dic = {}
2325
24def get_db_and_pool(db_name, force_demo=False, status=None, update_module=False, pooljobs=True):26def get_db_and_pool(db_name, force_demo=False, status=None, update_module=False, pooljobs=True):
@@ -45,6 +47,9 @@
45 try:47 try:
46 pool.init_set(cr, False)48 pool.init_set(cr, False)
47 pool.get('ir.actions.report.xml').register_all(cr)49 pool.get('ir.actions.report.xml').register_all(cr)
50 if not updater.do_upgrade(cr, pool):
51 pool_dic.pop(db_name)
52 raise Exception("updater.py told us that OpenERP version doesn't match database version!")
48 cr.commit()53 cr.commit()
49 finally:54 finally:
50 cr.close()55 cr.close()
5156
=== modified file 'bin/service/web_services.py'
--- bin/service/web_services.py 2013-02-04 14:43:01 +0000
+++ bin/service/web_services.py 2013-02-28 12:40:27 +0000
@@ -32,6 +32,7 @@
32import ir32import ir
33import netsvc33import netsvc
34import pooler34import pooler
35import updater
35import release36import release
36import sql_db37import sql_db
37import tools38import tools
@@ -191,7 +192,7 @@
191192
192 self._set_pg_psw_env_var()193 self._set_pg_psw_env_var()
193194
194 cmd = [tools.misc.find_pg_tool('pg_dump'), '--format=c', '--no-owner']195 cmd = ['pg_dump', '--format=c', '--no-owner']
195 if tools.config['db_user']:196 if tools.config['db_user']:
196 cmd.append('--username=' + tools.config['db_user'])197 cmd.append('--username=' + tools.config['db_user'])
197 if tools.config['db_host']:198 if tools.config['db_host']:
@@ -227,7 +228,7 @@
227228
228 self._create_empty_database(db_name)229 self._create_empty_database(db_name)
229230
230 cmd = [tools.misc.find_pg_tool('pg_restore'), '--no-owner', '--no-acl']231 cmd = ['pg_restore', '--no-owner']
231 if tools.config['db_user']:232 if tools.config['db_user']:
232 cmd.append('--username=' + tools.config['db_user'])233 cmd.append('--username=' + tools.config['db_user'])
233 if tools.config['db_host']:234 if tools.config['db_host']:
234235
=== modified file 'bin/sql_db.py'
--- bin/sql_db.py 2011-01-18 19:26:37 +0000
+++ bin/sql_db.py 2013-02-28 12:40:27 +0000
@@ -325,7 +325,7 @@
325325
326 @locked326 @locked
327 def close_all(self, dsn):327 def close_all(self, dsn):
328 self.__logger.info('%r: Close all connections to %r', self, dsn)328 self.__logger.info('%r: Close all connections', self)
329 for i, (cnx, used) in tools.reverse_enumerate(self._connections):329 for i, (cnx, used) in tools.reverse_enumerate(self._connections):
330 if dsn_are_equals(cnx.dsn, dsn):330 if dsn_are_equals(cnx.dsn, dsn):
331 cnx.close()331 cnx.close()
332332
=== added file 'bin/unifield-version.txt'
--- bin/unifield-version.txt 1970-01-01 00:00:00 +0000
+++ bin/unifield-version.txt 2013-02-28 12:40:27 +0000
@@ -0,0 +1,1 @@
188888888888888888888888888888888
02
=== modified file 'bin/updater.py'
--- bin/updater.py 2012-10-19 13:22:33 +0000
+++ bin/updater.py 2013-02-28 12:40:27 +0000
@@ -1,10 +1,100 @@
1"""
2Unifield module to upgrade the instance to a next version of Unifield
3Beware that we expect to be in the bin/ directory to proceed!!
4"""
5from __future__ import with_statement
6import re
1import os7import os
2import sys8import sys
3import psycopg29from hashlib import md5
4from datetime import datetime10from datetime import datetime
511from base64 import b64decode
6## Unix-like find12from StringIO import StringIO
13import logging
14import time
15
16if sys.version_info >= (2, 6, 6):
17 from zipfile import ZipFile, ZipInfo
18else:
19 from zipfile266 import ZipFile, ZipInfo
20
21__all__ = ('isset_lock', 'server_version', 'base_version', 'do_prepare', 'base_module_upgrade', 'restart_server')
22
23restart_required = False
24log_file = 'updater.log'
25lock_file = 'update.lock'
26update_dir = '.update'
27server_version_file = 'unifield-version.txt'
28new_version_file = os.path.join(update_dir, 'update-list.txt')
29restart_delay = 5
30
31md5hex_size = (md5().digest_size * 8 / 4)
32base_version = '8' * md5hex_size
33re_version = re.compile(r'^\s*([a-fA-F0-9]{'+str(md5hex_size)+r'}\b)')
34logger = logging.getLogger('updater')
35
36def restart_server():
37 """Restart OpenERP server"""
38 global restart_required
39 logger.info("Restaring OpenERP Server in %d seconds..." % restart_delay)
40 restart_required = True
41
42def isset_lock(file=None):
43 """Check if server lock file is set"""
44 if file is None: file = lock_file
45 return os.path.isfile(lock_file)
46
47def set_lock(file=None):
48 """Set the lock file to make OpenERP run into do_update method against normal execution"""
49 from tools import config
50 if file is None: file = lock_file
51 with open(file, "w") as f:
52 f.write(unicode({'path':os.getcwd(),'rcfile':config.rcfile}))
53
54def unset_lock(file=None):
55 """Remove the lock"""
56 global exec_path
57 global rcfile
58 if file is None: file = lock_file
59 with open(file, "r") as f:
60 data = eval(f.read().strip())
61 exec_path = data['path']
62 rcfile = data['rcfile']
63 os.unlink(file)
64
65def parse_version_file(filepath):
66 """Short method to parse a "version file"
67 Basically, a file where each line starts with the sum of a patch"""
68 assert os.path.isfile(filepath), "The file `%s' must be a file!" % filepath
69 versions = []
70 with open(filepath, 'r') as f:
71 for line in f:
72 line = line.rstrip()
73 if not line: continue
74 try:
75 m = re_version.match(line)
76 versions.append( m.group(1) )
77 except AttributeError:
78 raise Exception("Unable to parse version from file `%s': %s" % (filepath, line))
79 return versions
80
81def get_server_version():
82 """Autocratically get the current versions of the server
83 Get a special key 88888888888888888888888888888888 for default value if no server version can be found"""
84 if not os.path.exists(server_version_file):
85 return [base_version]
86 return parse_version_file(server_version_file)
87
88def add_versions(versions, filepath=server_version_file):
89 """Set server version with new versions"""
90 if not versions:
91 return
92 with open(filepath, 'a') as f:
93 for ver in versions:
94 f.write((" ".join([unicode(x) for x in ver]) if hasattr(ver, '__iter__') else ver)+os.linesep)
95
7def find(path):96def find(path):
97 """Unix-like find"""
8 files = os.listdir(path)98 files = os.listdir(path)
9 for name in iter(files):99 for name in iter(files):
10 abspath = path+os.path.sep+name100 abspath = path+os.path.sep+name
@@ -12,23 +102,8 @@
12 files.extend( map(lambda x:name+os.path.sep+x, os.listdir(abspath)) )102 files.extend( map(lambda x:name+os.path.sep+x, os.listdir(abspath)) )
13 return files103 return files
14104
15## Define way to forward logs
16def warn(*args):
17 sys.stderr.write(" ".join(map(lambda x:str(x), args))+"\n")
18
19## Try...Resume...
20def Try(command):
21 try:
22 command()
23 except:
24 e, msg = sys.exc_info()[0].__name__, str(sys.exc_info()[1])
25 warn(str(msg))
26 return False
27 else:
28 return True
29
30## Python free rmtree
31def rmtree(files, path=None, verbose=False):105def rmtree(files, path=None, verbose=False):
106 """Python free rmtree"""
32 if path is None and isinstance(files, str):107 if path is None and isinstance(files, str):
33 path, files = files, find(files)108 path, files = files, find(files)
34 for f in reversed(files):109 for f in reversed(files):
@@ -40,35 +115,72 @@
40 warn("rmdir", target)115 warn("rmdir", target)
41 os.rmdir( target )116 os.rmdir( target )
42117
118def now():
119 return datetime.today().strftime("%Y-%m-%d %H:%M:%S")
120
121log = sys.stderr
122
123def warn(*args):
124 """Define way to forward logs"""
125 global log
126 log.write(("[%s] UPDATER: " % now())+" ".join(map(lambda x:unicode(x), args))+os.linesep)
127
128def Try(command):
129 """Try...Resume..."""
130 try:
131 command()
132 except BaseException, e:
133 warn(unicode(e))
134 return False
135 else:
136 return True
137
138
139
140##############################################################################
141## ##
142## Main methods of updater modules ##
143## ##
144##############################################################################
145
146
147def base_module_upgrade(cr, pool, upgrade_now=False):
148 """Just like -u base / -u all.
149 Arguments are:
150 * cr: cursor to the database
151 * pool: pool of the same db
152 * (optional) upgrade_now: False by default, on True, it will launch the process right now"""
153 modules = pool.get('ir.module.module')
154 base_ids = modules.search(cr, 1, [('name', '=', 'base')])
155 #base_ids = modules.search(cr, 1, [('name', '=', 'sync_client')]) #for tests
156 modules.button_upgrade(cr, 1, base_ids)
157 if upgrade_now:
158 logger.info("Starting base upgrade process")
159 pool.get('base.module.upgrade').upgrade_module(cr, 1, [])
160
161
43def do_update():162def do_update():
44## We expect to be in the bin/ directory to proceed163 """Real update of the server (before normal OpenERP execution).
45 if os.path.exists('update.lock'):164 This function is triggered when OpenERP starts. When it finishes, it restart OpenERP automatically.
46 rev_file = os.path.join('.update','revisions.txt')165 On failure, the lock file is deleted and OpenERP files are rollbacked to their previous state."""
47 hist_file = "revision_history.txt"166 if os.path.exists(lock_file) and Try(unset_lock):
48 infos = {'exec_path':os.getcwd()}167 global log
49 revisions = None168 ## Move logs log file
50 cur = None169 try:
51 conn = None170 log = open(log_file, 'a')
52 update_revisions = None171 except BaseException, e:
172 log.write("Cannot write into `%s': %s" % (log, unicode(e)))
173 warn(lock_file, 'removed')
174 ## Now, update
175 application_time = now()
176 revisions = []
53 files = None177 files = None
54 args = list(sys.argv)
55 for i, x in enumerate(args):
56 if x in ('-d', '-u', '-c'):
57 args[i] = None
58 args[i+1] = None
59 args = filter(lambda x:x is not None, args)
60 try:178 try:
61 ## Read DB name179 ## Revisions that going to be installed
62 f = open('update.lock')180 revisions = parse_version_file(new_version_file)
63 infos = eval(f.read())181 os.unlink(new_version_file)
64 f.close()
65 revisions = ",".join( map(lambda x:"'"+str(x)+"'", infos['revisions']) )
66 ## Connect to the DB
67 conn = psycopg2.connect(database=infos['dbname'], user=infos['db_user'], password=infos['db_password'], host=infos['db_host'], port=infos['db_port'])
68 conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
69 cur = conn.cursor()
70 ## Explore .update directory182 ## Explore .update directory
71 files = find('.update')183 files = find(update_dir)
72 ## Prepare backup directory184 ## Prepare backup directory
73 if not os.path.exists('backup'):185 if not os.path.exists('backup'):
74 os.mkdir('backup')186 os.mkdir('backup')
@@ -77,7 +189,7 @@
77 ## Update Files189 ## Update Files
78 warn("Updating...")190 warn("Updating...")
79 for f in files:191 for f in files:
80 target = os.path.join('.update', f)192 target = os.path.join(update_dir, f)
81 bak = os.path.join('backup', f)193 bak = os.path.join('backup', f)
82 if os.path.isdir(target):194 if os.path.isdir(target):
83 if os.path.isfile(f) or os.path.islink(f):195 if os.path.isfile(f) or os.path.islink(f):
@@ -91,22 +203,14 @@
91 os.rename(f, bak)203 os.rename(f, bak)
92 warn("`%s' -> `%s'" % (target, f))204 warn("`%s' -> `%s'" % (target, f))
93 os.rename(target, f)205 os.rename(target, f)
94 ## Update installed revisions in DB206 add_versions([(x, application_time) for x in revisions])
95 cur.execute("""UPDATE sync_client_version SET state = 'installed', applied = '%s' WHERE name in (%s)"""
96 % ( datetime.today().strftime("%Y-%m-%d %H:%M:%S"), revisions ))
97 warn("Update successful.")207 warn("Update successful.")
98 warn("Revisions added: ", ", ".join( infos['revisions'] ))208 warn("Revisions added: ", ", ".join(revisions))
99 args.extend(['-d', infos['dbname'], '-u', 'all'])209 ## No database update here. I preferred to set modules to update just after the preparation
100 if os.name == 'nt':210 ## The reason is, when pool is populated, it will starts by upgrading modules first
101 args.extend(['-c', '"%s"' % infos['conf']])211 except BaseException, e:
102 else:
103 args.extend(['-c', infos['conf']])
104 except:
105 warn("Update failure!")212 warn("Update failure!")
106 ## Update DB to mark revisions as not-installed213 warn(unicode(e))
107 if cur and infos:
108 Try(lambda:cur.execute("""UPDATE sync_client_version SET state = 'not-installed' WHERE name in (%s)"""
109 % ( revisions )))
110 ## Restore backup and purge .update214 ## Restore backup and purge .update
111 if files:215 if files:
112 warn("Restoring...")216 warn("Restoring...")
@@ -114,21 +218,140 @@
114 target = os.path.join('backup', f)218 target = os.path.join('backup', f)
115 if os.path.isfile(target) or os.path.islink(target):219 if os.path.isfile(target) or os.path.islink(target):
116 warn("`%s' -> `%s'" % (target, f))220 warn("`%s' -> `%s'" % (target, f))
117 elif os.path.isdir(target):221 os.rename(target, f)
118 warn("rmdir", target)
119 os.rmdir( target )
120 warn("Purging...")222 warn("Purging...")
121 Try(lambda:rmtree(files, '.update'))223 Try(lambda:rmtree(update_dir))
122 warn("rmdir", '.update')224 if os.name == 'nt':
123 Try(lambda:os.rmdir( '.update' ))225 warn("Exiting OpenERP Server with code 1 to tell service to restart")
124 finally:226 sys.exit(1) # require service to restart
125 if cur: cur.close()227 else:
126 if conn: conn.close()228 warn(("Restart OpenERP in %s:" % exec_path), \
127 ## Remove lock file229 [sys.executable]+sys.argv)
128 warn("rm", 'update.lock')230 if log is not sys.stderr:
129 Try(lambda:os.unlink( 'update.lock' ))231 log.close()
130 warn("Restart OpenERP in", infos['exec_path'], "with:",args)232 os.chdir(exec_path)
131 if infos: os.chdir(infos['exec_path'])233 os.execv(sys.executable, [sys.executable] + sys.argv)
132 os.execv(sys.executable, [sys.executable] + args)234
133235
134236def update_path():
237 """If server starts normally, this step will fix the paths with the configured path in config rc"""
238 from tools import config
239 for v in ('log_file', 'lock_file', 'update_dir', 'server_version_file', 'new_version_file'):
240 globals()[v] = os.path.join(config['root_path'], globals()[v])
241 global server_version
242 server_version = get_server_version()
243
244
245def do_prepare(cr, revision_ids):
246 """Prepare patches for an upgrade of the server and set the lock file"""
247 if not revision_ids:
248 return ('failure', 'Nothing to do.', {})
249 import pooler
250 pool = pooler.get_pool(cr.dbname)
251 version = pool.get('sync_client.version')
252
253 # Make an update temporary path
254 path = update_dir
255 if not os.path.exists(path):
256 os.mkdir(path)
257 else:
258 for f in reversed(find(path)):
259 target = os.path.join(path, f)
260 if os.path.isfile(target) or os.path.islink(target):
261 logger.debug("rm `%s'" % target)
262 os.unlink( target )
263 elif os.path.isdir(target):
264 logger.debug("rmdir `%s'" % target)
265 os.rmdir( target )
266 if not (os.path.isdir(path) and os.access(path, os.W_OK)):
267 message = "The path `%s' is not a dir or is not writable!"
268 logger.error(message % path)
269 return ('failure', message, (path,))
270 # Proceed all patches
271 new_revisions = []
272 corrupt = []
273 missing = []
274 need_restart = []
275 for rev in version.browse(cr, 1, revision_ids):
276 # Check presence of the patch
277 if not rev.patch:
278 missing.append( rev )
279 continue
280 # Check if the file match the expected sum
281 patch = b64decode( rev.patch )
282 local_sum = md5(patch).hexdigest()
283 if local_sum != rev.sum:
284 corrupt.append( rev )
285 elif not (corrupt or missing):
286 # Extract the Zip
287 f = StringIO(patch)
288 try:
289 zip = ZipFile(f, 'r')
290 zip.extractall(path)
291 finally:
292 f.close()
293 # Store to list of updates
294 new_revisions.append( (rev.sum, ("[%s] %s - %s" % (rev.importance, rev.date, rev.name))) )
295 if rev.state == 'not-installed':
296 need_restart.append(rev.id)
297 # Remove corrupted patches
298 if corrupt:
299 corrupt_ids = [x.id for x in corrupt]
300 version.write(cr, 1, corrupt_ids, {'patch':False})
301 if len(corrupt) == 1: message = "One file you downloaded seems to be corrupt:\n\n%s"
302 else: message = "Some files you downloaded seem to be corrupt:\n\n%s"
303 values = ""
304 for rev in corrupt:
305 values += " - %s (sum expected: %s)\n" % ((rev.name or 'unknown'), rev.sum)
306 logger.error(message % values)
307 return ('corrupt', message, values)
308 # Complaints about missing patches
309 if missing:
310 if len(missing) == 1:
311 message = "A file is missing: %(name)s (check sum: %(sum)s)"
312 values = {
313 'name' : missing[0].name or 'unknown',
314 'sum' : missing[0].sum
315 }
316 else:
317 message = "Some files are missing:\n\n%s"
318 values = ""
319 for rev in missing:
320 values += " - %s (check sum: %s)\n" % ((rev.name or 'unknown'), rev.sum)
321 logger.error(message % values)
322 return ('missing', message, values)
323 # Fix the flag of the pending patches
324 version.write(cr, 1, need_restart, {'state':'need-restart'})
325 # Make a lock file to make OpenERP able to detect an update
326 set_lock()
327 add_versions(new_revisions, new_version_file)
328 logger.info("Server update prepared. Need to restart to complete the upgrade.")
329 return ('success', 'Restart required', {})
330
331
332def do_upgrade(cr, pool):
333 """Start upgrade process (called by login method and restore)"""
334 versions = pool.get('sync_client.version')
335 if versions is None:
336 return True
337
338 db_versions = versions.read(cr, 1, versions.search(cr, 1, [('state','=','installed')]), ['sum'])
339 db_versions = map(lambda x:x['sum'], db_versions)
340 server_lack_versions = set(db_versions) - set(server_version)
341 db_lack_versions = set(server_version) - set(db_versions) - set([base_version])
342
343 if server_lack_versions:
344 revision_ids = versions.search(cr, 1, [('sum','in',list(server_lack_versions))], order='date asc')
345 res = do_prepare(cr, revision_ids)
346 if res[0] == 'success':
347 import tools
348 os.chdir( tools.config['root_path'] )
349 restart_server()
350 else:
351 return False
352
353 elif db_lack_versions:
354 base_module_upgrade(cr, pool, upgrade_now=True)
355 # Note: There is no need to update the db versions, the `def init()' of the object do that for us
356
357 return True
135358
=== added file 'bin/zipfile266.py'
--- bin/zipfile266.py 1970-01-01 00:00:00 +0000
+++ bin/zipfile266.py 2013-02-28 12:40:27 +0000
@@ -0,0 +1,1409 @@
1"""
2Read and write ZIP files.
3"""
4import struct, os, time, sys, shutil
5import binascii, cStringIO, stat
6
7try:
8 import zlib # We may need its compression method
9 crc32 = zlib.crc32
10except ImportError:
11 zlib = None
12 crc32 = binascii.crc32
13
14__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
15 "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile" ]
16
17class BadZipfile(Exception):
18 pass
19
20
21class LargeZipFile(Exception):
22 """
23 Raised when writing a zipfile, the zipfile requires ZIP64 extensions
24 and those extensions are disabled.
25 """
26
27error = BadZipfile # The exception raised by this module
28
29ZIP64_LIMIT = (1 << 31) - 1
30ZIP_FILECOUNT_LIMIT = 1 << 16
31ZIP_MAX_COMMENT = (1 << 16) - 1
32
33# constants for Zip file compression methods
34ZIP_STORED = 0
35ZIP_DEFLATED = 8
36# Other ZIP compression methods not supported
37
38# Below are some formats and associated data for reading/writing headers using
39# the struct module. The names and structures of headers/records are those used
40# in the PKWARE description of the ZIP file format:
41# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
42# (URL valid as of January 2008)
43
44# The "end of central directory" structure, magic number, size, and indices
45# (section V.I in the format document)
46structEndArchive = "<4s4H2LH"
47stringEndArchive = "PK\005\006"
48sizeEndCentDir = struct.calcsize(structEndArchive)
49
50_ECD_SIGNATURE = 0
51_ECD_DISK_NUMBER = 1
52_ECD_DISK_START = 2
53_ECD_ENTRIES_THIS_DISK = 3
54_ECD_ENTRIES_TOTAL = 4
55_ECD_SIZE = 5
56_ECD_OFFSET = 6
57_ECD_COMMENT_SIZE = 7
58# These last two indices are not part of the structure as defined in the
59# spec, but they are used internally by this module as a convenience
60_ECD_COMMENT = 8
61_ECD_LOCATION = 9
62
63# The "central directory" structure, magic number, size, and indices
64# of entries in the structure (section V.F in the format document)
65structCentralDir = "<4s4B4HL2L5H2L"
66stringCentralDir = "PK\001\002"
67sizeCentralDir = struct.calcsize(structCentralDir)
68
69# indexes of entries in the central directory structure
70_CD_SIGNATURE = 0
71_CD_CREATE_VERSION = 1
72_CD_CREATE_SYSTEM = 2
73_CD_EXTRACT_VERSION = 3
74_CD_EXTRACT_SYSTEM = 4
75_CD_FLAG_BITS = 5
76_CD_COMPRESS_TYPE = 6
77_CD_TIME = 7
78_CD_DATE = 8
79_CD_CRC = 9
80_CD_COMPRESSED_SIZE = 10
81_CD_UNCOMPRESSED_SIZE = 11
82_CD_FILENAME_LENGTH = 12
83_CD_EXTRA_FIELD_LENGTH = 13
84_CD_COMMENT_LENGTH = 14
85_CD_DISK_NUMBER_START = 15
86_CD_INTERNAL_FILE_ATTRIBUTES = 16
87_CD_EXTERNAL_FILE_ATTRIBUTES = 17
88_CD_LOCAL_HEADER_OFFSET = 18
89
90# The "local file header" structure, magic number, size, and indices
91# (section V.A in the format document)
92structFileHeader = "<4s2B4HL2L2H"
93stringFileHeader = "PK\003\004"
94sizeFileHeader = struct.calcsize(structFileHeader)
95
96_FH_SIGNATURE = 0
97_FH_EXTRACT_VERSION = 1
98_FH_EXTRACT_SYSTEM = 2
99_FH_GENERAL_PURPOSE_FLAG_BITS = 3
100_FH_COMPRESSION_METHOD = 4
101_FH_LAST_MOD_TIME = 5
102_FH_LAST_MOD_DATE = 6
103_FH_CRC = 7
104_FH_COMPRESSED_SIZE = 8
105_FH_UNCOMPRESSED_SIZE = 9
106_FH_FILENAME_LENGTH = 10
107_FH_EXTRA_FIELD_LENGTH = 11
108
109# The "Zip64 end of central directory locator" structure, magic number, and size
110structEndArchive64Locator = "<4sLQL"
111stringEndArchive64Locator = "PK\x06\x07"
112sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
113
114# The "Zip64 end of central directory" record, magic number, size, and indices
115# (section V.G in the format document)
116structEndArchive64 = "<4sQ2H2L4Q"
117stringEndArchive64 = "PK\x06\x06"
118sizeEndCentDir64 = struct.calcsize(structEndArchive64)
119
120_CD64_SIGNATURE = 0
121_CD64_DIRECTORY_RECSIZE = 1
122_CD64_CREATE_VERSION = 2
123_CD64_EXTRACT_VERSION = 3
124_CD64_DISK_NUMBER = 4
125_CD64_DISK_NUMBER_START = 5
126_CD64_NUMBER_ENTRIES_THIS_DISK = 6
127_CD64_NUMBER_ENTRIES_TOTAL = 7
128_CD64_DIRECTORY_SIZE = 8
129_CD64_OFFSET_START_CENTDIR = 9
130
131def is_zipfile(filename):
132 """Quickly see if file is a ZIP file by checking the magic number."""
133 try:
134 fpin = open(filename, "rb")
135 endrec = _EndRecData(fpin)
136 fpin.close()
137 if endrec:
138 return True # file has correct magic number
139 except IOError:
140 pass
141 return False
142
143def _EndRecData64(fpin, offset, endrec):
144 """
145 Read the ZIP64 end-of-archive records and use that to update endrec
146 """
147 fpin.seek(offset - sizeEndCentDir64Locator, 2)
148 data = fpin.read(sizeEndCentDir64Locator)
149 sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
150 if sig != stringEndArchive64Locator:
151 return endrec
152
153 if diskno != 0 or disks != 1:
154 raise BadZipfile("zipfiles that span multiple disks are not supported")
155
156 # Assume no 'zip64 extensible data'
157 fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
158 data = fpin.read(sizeEndCentDir64)
159 sig, sz, create_version, read_version, disk_num, disk_dir, \
160 dircount, dircount2, dirsize, diroffset = \
161 struct.unpack(structEndArchive64, data)
162 if sig != stringEndArchive64:
163 return endrec
164
165 # Update the original endrec using data from the ZIP64 record
166 endrec[_ECD_SIGNATURE] = sig
167 endrec[_ECD_DISK_NUMBER] = disk_num
168 endrec[_ECD_DISK_START] = disk_dir
169 endrec[_ECD_ENTRIES_THIS_DISK] = dircount
170 endrec[_ECD_ENTRIES_TOTAL] = dircount2
171 endrec[_ECD_SIZE] = dirsize
172 endrec[_ECD_OFFSET] = diroffset
173 return endrec
174
175
176def _EndRecData(fpin):
177 """Return data from the "End of Central Directory" record, or None.
178
179 The data is a list of the nine items in the ZIP "End of central dir"
180 record followed by a tenth item, the file seek offset of this record."""
181
182 # Determine file size
183 fpin.seek(0, 2)
184 filesize = fpin.tell()
185
186 # Check to see if this is ZIP file with no archive comment (the
187 # "end of central directory" structure should be the last item in the
188 # file if this is the case).
189 try:
190 fpin.seek(-sizeEndCentDir, 2)
191 except IOError:
192 return None
193 data = fpin.read()
194 if data[0:4] == stringEndArchive and data[-2:] == "\000\000":
195 # the signature is correct and there's no comment, unpack structure
196 endrec = struct.unpack(structEndArchive, data)
197 endrec=list(endrec)
198
199 # Append a blank comment and record start offset
200 endrec.append("")
201 endrec.append(filesize - sizeEndCentDir)
202
203 # Try to read the "Zip64 end of central directory" structure
204 return _EndRecData64(fpin, -sizeEndCentDir, endrec)
205
206 # Either this is not a ZIP file, or it is a ZIP file with an archive
207 # comment. Search the end of the file for the "end of central directory"
208 # record signature. The comment is the last item in the ZIP file and may be
209 # up to 64K long. It is assumed that the "end of central directory" magic
210 # number does not appear in the comment.
211 maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
212 fpin.seek(maxCommentStart, 0)
213 data = fpin.read()
214 start = data.rfind(stringEndArchive)
215 if start >= 0:
216 # found the magic number; attempt to unpack and interpret
217 recData = data[start:start+sizeEndCentDir]
218 endrec = list(struct.unpack(structEndArchive, recData))
219 comment = data[start+sizeEndCentDir:]
220 # check that comment length is correct
221 if endrec[_ECD_COMMENT_SIZE] == len(comment):
222 # Append the archive comment and start offset
223 endrec.append(comment)
224 endrec.append(maxCommentStart + start)
225
226 # Try to read the "Zip64 end of central directory" structure
227 return _EndRecData64(fpin, maxCommentStart + start - filesize,
228 endrec)
229
230 # Unable to find a valid end of central directory structure
231 return
232
233
234class ZipInfo (object):
235 """Class with attributes describing each file in the ZIP archive."""
236
237 __slots__ = (
238 'orig_filename',
239 'filename',
240 'date_time',
241 'compress_type',
242 'comment',
243 'extra',
244 'create_system',
245 'create_version',
246 'extract_version',
247 'reserved',
248 'flag_bits',
249 'volume',
250 'internal_attr',
251 'external_attr',
252 'header_offset',
253 'CRC',
254 'compress_size',
255 'file_size',
256 '_raw_time',
257 )
258
259 def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
260 self.orig_filename = filename # Original file name in archive
261
262 # Terminate the file name at the first null byte. Null bytes in file
263 # names are used as tricks by viruses in archives.
264 null_byte = filename.find(chr(0))
265 if null_byte >= 0:
266 filename = filename[0:null_byte]
267 # This is used to ensure paths in generated ZIP files always use
268 # forward slashes as the directory separator, as required by the
269 # ZIP format specification.
270 if os.sep != "/" and os.sep in filename:
271 filename = filename.replace(os.sep, "/")
272
273 self.filename = filename # Normalized file name
274 self.date_time = date_time # year, month, day, hour, min, sec
275 # Standard values:
276 self.compress_type = ZIP_STORED # Type of compression for the file
277 self.comment = "" # Comment for each file
278 self.extra = "" # ZIP extra data
279 if sys.platform == 'win32':
280 self.create_system = 0 # System which created ZIP archive
281 else:
282 # Assume everything else is unix-y
283 self.create_system = 3 # System which created ZIP archive
284 self.create_version = 20 # Version which created ZIP archive
285 self.extract_version = 20 # Version needed to extract archive
286 self.reserved = 0 # Must be zero
287 self.flag_bits = 0 # ZIP flag bits
288 self.volume = 0 # Volume number of file header
289 self.internal_attr = 0 # Internal attributes
290 self.external_attr = 0 # External file attributes
291 # Other attributes are set by class ZipFile:
292 # header_offset Byte offset to the file header
293 # CRC CRC-32 of the uncompressed file
294 # compress_size Size of the compressed file
295 # file_size Size of the uncompressed file
296
297 def FileHeader(self):
298 """Return the per-file header as a string."""
299 dt = self.date_time
300 dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
301 dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
302 if self.flag_bits & 0x08:
303 # Set these to zero because we write them after the file data
304 CRC = compress_size = file_size = 0
305 else:
306 CRC = self.CRC
307 compress_size = self.compress_size
308 file_size = self.file_size
309
310 extra = self.extra
311
312 if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
313 # File is larger than what fits into a 4 byte integer,
314 # fall back to the ZIP64 extension
315 fmt = '<HHQQ'
316 extra = extra + struct.pack(fmt,
317 1, struct.calcsize(fmt)-4, file_size, compress_size)
318 file_size = 0xffffffff
319 compress_size = 0xffffffff
320 self.extract_version = max(45, self.extract_version)
321 self.create_version = max(45, self.extract_version)
322
323 filename, flag_bits = self._encodeFilenameFlags()
324 header = struct.pack(structFileHeader, stringFileHeader,
325 self.extract_version, self.reserved, flag_bits,
326 self.compress_type, dostime, dosdate, CRC,
327 compress_size, file_size,
328 len(filename), len(extra))
329 return header + filename + extra
330
331 def _encodeFilenameFlags(self):
332 if isinstance(self.filename, unicode):
333 try:
334 return self.filename.encode('ascii'), self.flag_bits
335 except UnicodeEncodeError:
336 return self.filename.encode('utf-8'), self.flag_bits | 0x800
337 else:
338 return self.filename, self.flag_bits
339
340 def _decodeFilename(self):
341 if self.flag_bits & 0x800:
342 return self.filename.decode('utf-8')
343 else:
344 return self.filename
345
346 def _decodeExtra(self):
347 # Try to decode the extra field.
348 extra = self.extra
349 unpack = struct.unpack
350 while extra:
351 tp, ln = unpack('<HH', extra[:4])
352 if tp == 1:
353 if ln >= 24:
354 counts = unpack('<QQQ', extra[4:28])
355 elif ln == 16:
356 counts = unpack('<QQ', extra[4:20])
357 elif ln == 8:
358 counts = unpack('<Q', extra[4:12])
359 elif ln == 0:
360 counts = ()
361 else:
362 raise RuntimeError, "Corrupt extra field %s"%(ln,)
363
364 idx = 0
365
366 # ZIP64 extension (large files and/or large archives)
367 if self.file_size in (0xffffffffffffffffL, 0xffffffffL):
368 self.file_size = counts[idx]
369 idx += 1
370
371 if self.compress_size == 0xFFFFFFFFL:
372 self.compress_size = counts[idx]
373 idx += 1
374
375 if self.header_offset == 0xffffffffL:
376 old = self.header_offset
377 self.header_offset = counts[idx]
378 idx+=1
379
380 extra = extra[ln+4:]
381
382
383class _ZipDecrypter:
384 """Class to handle decryption of files stored within a ZIP archive.
385
386 ZIP supports a password-based form of encryption. Even though known
387 plaintext attacks have been found against it, it is still useful
388 to be able to get data out of such a file.
389
390 Usage:
391 zd = _ZipDecrypter(mypwd)
392 plain_char = zd(cypher_char)
393 plain_text = map(zd, cypher_text)
394 """
395
396 def _GenerateCRCTable():
397 """Generate a CRC-32 table.
398
399 ZIP encryption uses the CRC32 one-byte primitive for scrambling some
400 internal keys. We noticed that a direct implementation is faster than
401 relying on binascii.crc32().
402 """
403 poly = 0xedb88320
404 table = [0] * 256
405 for i in range(256):
406 crc = i
407 for j in range(8):
408 if crc & 1:
409 crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
410 else:
411 crc = ((crc >> 1) & 0x7FFFFFFF)
412 table[i] = crc
413 return table
414 crctable = _GenerateCRCTable()
415
416 def _crc32(self, ch, crc):
417 """Compute the CRC32 primitive on one byte."""
418 return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff]
419
420 def __init__(self, pwd):
421 self.key0 = 305419896
422 self.key1 = 591751049
423 self.key2 = 878082192
424 for p in pwd:
425 self._UpdateKeys(p)
426
427 def _UpdateKeys(self, c):
428 self.key0 = self._crc32(c, self.key0)
429 self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
430 self.key1 = (self.key1 * 134775813 + 1) & 4294967295
431 self.key2 = self._crc32(chr((self.key1 >> 24) & 255), self.key2)
432
433 def __call__(self, c):
434 """Decrypt a single character."""
435 c = ord(c)
436 k = self.key2 | 2
437 c = c ^ (((k * (k^1)) >> 8) & 255)
438 c = chr(c)
439 self._UpdateKeys(c)
440 return c
441
442class ZipExtFile:
443 """File-like object for reading an archive member.
444 Is returned by ZipFile.open().
445 """
446
447 def __init__(self, fileobj, zipinfo, decrypt=None):
448 self.fileobj = fileobj
449 self.decrypter = decrypt
450 self.bytes_read = 0L
451 self.rawbuffer = ''
452 self.readbuffer = ''
453 self.linebuffer = ''
454 self.eof = False
455 self.univ_newlines = False
456 self.nlSeps = ("\n", )
457 self.lastdiscard = ''
458
459 self.compress_type = zipinfo.compress_type
460 self.compress_size = zipinfo.compress_size
461
462 self.closed = False
463 self.mode = "r"
464 self.name = zipinfo.filename
465
466 # read from compressed files in 64k blocks
467 self.compreadsize = 64*1024
468 if self.compress_type == ZIP_DEFLATED:
469 self.dc = zlib.decompressobj(-15)
470
471 def set_univ_newlines(self, univ_newlines):
472 self.univ_newlines = univ_newlines
473
474 # pick line separator char(s) based on universal newlines flag
475 self.nlSeps = ("\n", )
476 if self.univ_newlines:
477 self.nlSeps = ("\r\n", "\r", "\n")
478
479 def __iter__(self):
480 return self
481
482 def next(self):
483 nextline = self.readline()
484 if not nextline:
485 raise StopIteration()
486
487 return nextline
488
489 def close(self):
490 self.closed = True
491
492 def _checkfornewline(self):
493 nl, nllen = -1, -1
494 if self.linebuffer:
495 # ugly check for cases where half of an \r\n pair was
496 # read on the last pass, and the \r was discarded. In this
497 # case we just throw away the \n at the start of the buffer.
498 if (self.lastdiscard, self.linebuffer[0]) == ('\r','\n'):
499 self.linebuffer = self.linebuffer[1:]
500
501 for sep in self.nlSeps:
502 nl = self.linebuffer.find(sep)
503 if nl >= 0:
504 nllen = len(sep)
505 return nl, nllen
506
507 return nl, nllen
508
509 def readline(self, size = -1):
510 """Read a line with approx. size. If size is negative,
511 read a whole line.
512 """
513 if size < 0:
514 size = sys.maxint
515 elif size == 0:
516 return ''
517
518 # check for a newline already in buffer
519 nl, nllen = self._checkfornewline()
520
521 if nl >= 0:
522 # the next line was already in the buffer
523 nl = min(nl, size)
524 else:
525 # no line break in buffer - try to read more
526 size -= len(self.linebuffer)
527 while nl < 0 and size > 0:
528 buf = self.read(min(size, 100))
529 if not buf:
530 break
531 self.linebuffer += buf
532 size -= len(buf)
533
534 # check for a newline in buffer
535 nl, nllen = self._checkfornewline()
536
537 # we either ran out of bytes in the file, or
538 # met the specified size limit without finding a newline,
539 # so return current buffer
540 if nl < 0:
541 s = self.linebuffer
542 self.linebuffer = ''
543 return s
544
545 buf = self.linebuffer[:nl]
546 self.lastdiscard = self.linebuffer[nl:nl + nllen]
547 self.linebuffer = self.linebuffer[nl + nllen:]
548
549 # line is always returned with \n as newline char (except possibly
550 # for a final incomplete line in the file, which is handled above).
551 return buf + "\n"
552
553 def readlines(self, sizehint = -1):
554 """Return a list with all (following) lines. The sizehint parameter
555 is ignored in this implementation.
556 """
557 result = []
558 while True:
559 line = self.readline()
560 if not line: break
561 result.append(line)
562 return result
563
564 def read(self, size = None):
565 # act like file() obj and return empty string if size is 0
566 if size == 0:
567 return ''
568
569 # determine read size
570 bytesToRead = self.compress_size - self.bytes_read
571
572 # adjust read size for encrypted files since the first 12 bytes
573 # are for the encryption/password information
574 if self.decrypter is not None:
575 bytesToRead -= 12
576
577 if size is not None and size >= 0:
578 if self.compress_type == ZIP_STORED:
579 lr = len(self.readbuffer)
580 bytesToRead = min(bytesToRead, size - lr)
581 elif self.compress_type == ZIP_DEFLATED:
582 if len(self.readbuffer) > size:
583 # the user has requested fewer bytes than we've already
584 # pulled through the decompressor; don't read any more
585 bytesToRead = 0
586 else:
587 # user will use up the buffer, so read some more
588 lr = len(self.rawbuffer)
589 bytesToRead = min(bytesToRead, self.compreadsize - lr)
590
591 # avoid reading past end of file contents
592 if bytesToRead + self.bytes_read > self.compress_size:
593 bytesToRead = self.compress_size - self.bytes_read
594
595 # try to read from file (if necessary)
596 if bytesToRead > 0:
597 bytes = self.fileobj.read(bytesToRead)
598 self.bytes_read += len(bytes)
599 self.rawbuffer += bytes
600
601 # handle contents of raw buffer
602 if self.rawbuffer:
603 newdata = self.rawbuffer
604 self.rawbuffer = ''
605
606 # decrypt new data if we were given an object to handle that
607 if newdata and self.decrypter is not None:
608 newdata = ''.join(map(self.decrypter, newdata))
609
610 # decompress newly read data if necessary
611 if newdata and self.compress_type == ZIP_DEFLATED:
612 newdata = self.dc.decompress(newdata)
613 self.rawbuffer = self.dc.unconsumed_tail
614 if self.eof and len(self.rawbuffer) == 0:
615 # we're out of raw bytes (both from the file and
616 # the local buffer); flush just to make sure the
617 # decompressor is done
618 newdata += self.dc.flush()
619 # prevent decompressor from being used again
620 self.dc = None
621
622 self.readbuffer += newdata
623
624
625 # return what the user asked for
626 if size is None or len(self.readbuffer) <= size:
627 bytes = self.readbuffer
628 self.readbuffer = ''
629 else:
630 bytes = self.readbuffer[:size]
631 self.readbuffer = self.readbuffer[size:]
632
633 return bytes
634
635
636class ZipFile:
637 """ Class with methods to open, read, write, close, list zip files.
638
639 z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
640
641 file: Either the path to the file, or a file-like object.
642 If it is a path, the file will be opened and closed by ZipFile.
643 mode: The mode can be either read "r", write "w" or append "a".
644 compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
645 allowZip64: if True ZipFile will create files with ZIP64 extensions when
646 needed, otherwise it will raise an exception when this would
647 be necessary.
648
649 """
650
651 fp = None # Set here since __del__ checks it
652
653 def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
654 """Open the ZIP file with mode read "r", write "w" or append "a"."""
655 if mode not in ("r", "w", "a"):
656 raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
657
658 if compression == ZIP_STORED:
659 pass
660 elif compression == ZIP_DEFLATED:
661 if not zlib:
662 raise RuntimeError,\
663 "Compression requires the (missing) zlib module"
664 else:
665 raise RuntimeError, "That compression method is not supported"
666
667 self._allowZip64 = allowZip64
668 self._didModify = False
669 self.debug = 0 # Level of printing: 0 through 3
670 self.NameToInfo = {} # Find file info given name
671 self.filelist = [] # List of ZipInfo instances for archive
672 self.compression = compression # Method of compression
673 self.mode = key = mode.replace('b', '')[0]
674 self.pwd = None
675 self.comment = ''
676
677 # Check if we were passed a file-like object
678 if isinstance(file, basestring):
679 self._filePassed = 0
680 self.filename = file
681 modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
682 try:
683 self.fp = open(file, modeDict[mode])
684 except IOError:
685 if mode == 'a':
686 mode = key = 'w'
687 self.fp = open(file, modeDict[mode])
688 else:
689 raise
690 else:
691 self._filePassed = 1
692 self.fp = file
693 self.filename = getattr(file, 'name', None)
694
695 if key == 'r':
696 self._GetContents()
697 elif key == 'w':
698 pass
699 elif key == 'a':
700 try: # See if file is a zip file
701 self._RealGetContents()
702 # seek to start of directory and overwrite
703 self.fp.seek(self.start_dir, 0)
704 except BadZipfile: # file is not a zip file, just append
705 self.fp.seek(0, 2)
706 else:
707 if not self._filePassed:
708 self.fp.close()
709 self.fp = None
710 raise RuntimeError, 'Mode must be "r", "w" or "a"'
711
712 def _GetContents(self):
713 """Read the directory, making sure we close the file if the format
714 is bad."""
715 try:
716 self._RealGetContents()
717 except BadZipfile:
718 if not self._filePassed:
719 self.fp.close()
720 self.fp = None
721 raise
722
723 def _RealGetContents(self):
724 """Read in the table of contents for the ZIP file."""
725 fp = self.fp
726 endrec = _EndRecData(fp)
727 if not endrec:
728 raise BadZipfile, "File is not a zip file"
729 if self.debug > 1:
730 print endrec
731 size_cd = endrec[_ECD_SIZE] # bytes in central directory
732 offset_cd = endrec[_ECD_OFFSET] # offset of central directory
733 self.comment = endrec[_ECD_COMMENT] # archive comment
734
735 # "concat" is zero, unless zip was concatenated to another file
736 concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
737 if endrec[_ECD_SIGNATURE] == stringEndArchive64:
738 # If Zip64 extension structures are present, account for them
739 concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
740
741 if self.debug > 2:
742 inferred = concat + offset_cd
743 print "given, inferred, offset", offset_cd, inferred, concat
744 # self.start_dir: Position of start of central directory
745 self.start_dir = offset_cd + concat
746 fp.seek(self.start_dir, 0)
747 data = fp.read(size_cd)
748 fp = cStringIO.StringIO(data)
749 total = 0
750 while total < size_cd:
751 centdir = fp.read(sizeCentralDir)
752 if centdir[0:4] != stringCentralDir:
753 raise BadZipfile, "Bad magic number for central directory"
754 centdir = struct.unpack(structCentralDir, centdir)
755 if self.debug > 2:
756 print centdir
757 filename = fp.read(centdir[_CD_FILENAME_LENGTH])
758 # Create ZipInfo instance to store file information
759 x = ZipInfo(filename)
760 x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
761 x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
762 x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
763 (x.create_version, x.create_system, x.extract_version, x.reserved,
764 x.flag_bits, x.compress_type, t, d,
765 x.CRC, x.compress_size, x.file_size) = centdir[1:12]
766 x.volume, x.internal_attr, x.external_attr = centdir[15:18]
767 # Convert date/time code to (year, month, day, hour, min, sec)
768 x._raw_time = t
769 x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
770 t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
771
772 x._decodeExtra()
773 x.header_offset = x.header_offset + concat
774 x.filename = x._decodeFilename()
775 self.filelist.append(x)
776 self.NameToInfo[x.filename] = x
777
778 # update total bytes read from central directory
779 total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
780 + centdir[_CD_EXTRA_FIELD_LENGTH]
781 + centdir[_CD_COMMENT_LENGTH])
782
783 if self.debug > 2:
784 print "total", total
785
786
787 def namelist(self):
788 """Return a list of file names in the archive."""
789 l = []
790 for data in self.filelist:
791 l.append(data.filename)
792 return l
793
794 def infolist(self):
795 """Return a list of class ZipInfo instances for files in the
796 archive."""
797 return self.filelist
798
799 def printdir(self):
800 """Print a table of contents for the zip file."""
801 print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
802 for zinfo in self.filelist:
803 date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
804 print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
805
806 def testzip(self):
807 """Read all the files and check the CRC."""
808 chunk_size = 2 ** 20
809 for zinfo in self.filelist:
810 try:
811 # Read by chunks, to avoid an OverflowError or a
812 # MemoryError with very large embedded files.
813 f = self.open(zinfo.filename, "r")
814 while f.read(chunk_size): # Check CRC-32
815 pass
816 except BadZipfile:
817 return zinfo.filename
818
819 def getinfo(self, name):
820 """Return the instance of ZipInfo given 'name'."""
821 info = self.NameToInfo.get(name)
822 if info is None:
823 raise KeyError(
824 'There is no item named %r in the archive' % name)
825
826 return info
827
828 def setpassword(self, pwd):
829 """Set default password for encrypted files."""
830 self.pwd = pwd
831
832 def read(self, name, pwd=None):
833 """Return file bytes (as a string) for name."""
834 return self.open(name, "r", pwd).read()
835
836 def open(self, name, mode="r", pwd=None):
837 """Return file-like object for 'name'."""
838 if mode not in ("r", "U", "rU"):
839 raise RuntimeError, 'open() requires mode "r", "U", or "rU"'
840 if not self.fp:
841 raise RuntimeError, \
842 "Attempt to read ZIP archive that was already closed"
843
844 # Only open a new file for instances where we were not
845 # given a file object in the constructor
846 if self._filePassed:
847 zef_file = self.fp
848 else:
849 zef_file = open(self.filename, 'rb')
850
851 # Make sure we have an info object
852 if isinstance(name, ZipInfo):
853 # 'name' is already an info object
854 zinfo = name
855 else:
856 # Get info object for name
857 zinfo = self.getinfo(name)
858
859 zef_file.seek(zinfo.header_offset, 0)
860
861 # Skip the file header:
862 fheader = zef_file.read(sizeFileHeader)
863 if fheader[0:4] != stringFileHeader:
864 raise BadZipfile, "Bad magic number for file header"
865
866 fheader = struct.unpack(structFileHeader, fheader)
867 fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
868 if fheader[_FH_EXTRA_FIELD_LENGTH]:
869 zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
870
871 if fname != zinfo.orig_filename:
872 raise BadZipfile, \
873 'File name in directory "%s" and header "%s" differ.' % (
874 zinfo.orig_filename, fname)
875
876 # check for encrypted flag & handle password
877 is_encrypted = zinfo.flag_bits & 0x1
878 zd = None
879 if is_encrypted:
880 if not pwd:
881 pwd = self.pwd
882 if not pwd:
883 raise RuntimeError, "File %s is encrypted, " \
884 "password required for extraction" % name
885
886 zd = _ZipDecrypter(pwd)
887 # The first 12 bytes in the cypher stream is an encryption header
888 # used to strengthen the algorithm. The first 11 bytes are
889 # completely random, while the 12th contains the MSB of the CRC,
890 # or the MSB of the file time depending on the header type
891 # and is used to check the correctness of the password.
892 bytes = zef_file.read(12)
893 h = map(zd, bytes[0:12])
894 if zinfo.flag_bits & 0x8:
895 # compare against the file type from extended local headers
896 check_byte = (zinfo._raw_time >> 8) & 0xff
897 else:
898 # compare against the CRC otherwise
899 check_byte = (zinfo.CRC >> 24) & 0xff
900 if ord(h[11]) != check_byte:
901 raise RuntimeError("Bad password for file", name)
902
903 # build and return a ZipExtFile
904 if zd is None:
905 zef = ZipExtFile(zef_file, zinfo)
906 else:
907 zef = ZipExtFile(zef_file, zinfo, zd)
908
909 # set universal newlines on ZipExtFile if necessary
910 if "U" in mode:
911 zef.set_univ_newlines(True)
912 return zef
913
914 def extract(self, member, path=None, pwd=None):
915 """Extract a member from the archive to the current working directory,
916 using its full name. Its file information is extracted as accurately
917 as possible. `member' may be a filename or a ZipInfo object. You can
918 specify a different directory using `path'.
919 """
920 if not isinstance(member, ZipInfo):
921 member = self.getinfo(member)
922
923 if path is None:
924 path = os.getcwd()
925
926 return self._extract_member(member, path, pwd)
927
928 def extractall(self, path=None, members=None, pwd=None):
929 """Extract all members from the archive to the current working
930 directory. `path' specifies a different directory to extract to.
931 `members' is optional and must be a subset of the list returned
932 by namelist().
933 """
934 if members is None:
935 members = self.namelist()
936
937 for zipinfo in members:
938 self.extract(zipinfo, path, pwd)
939
940 def _extract_member(self, member, targetpath, pwd):
941 """Extract the ZipInfo object 'member' to a physical
942 file on the path targetpath.
943 """
944 # build the destination pathname, replacing
945 # forward slashes to platform specific separators.
946 # Strip trailing path separator, unless it represents the root.
947 if (targetpath[-1:] in (os.path.sep, os.path.altsep)
948 and len(os.path.splitdrive(targetpath)[1]) > 1):
949 targetpath = targetpath[:-1]
950
951 # don't include leading "/" from file name if present
952 if member.filename[0] == '/':
953 targetpath = os.path.join(targetpath, member.filename[1:])
954 else:
955 targetpath = os.path.join(targetpath, member.filename)
956
957 targetpath = os.path.normpath(targetpath)
958
959 # Create all upper directories if necessary.
960 upperdirs = os.path.dirname(targetpath)
961 if upperdirs and not os.path.exists(upperdirs):
962 os.makedirs(upperdirs)
963
964 if member.filename[-1] == '/':
965 if not os.path.isdir(targetpath):
966 os.mkdir(targetpath)
967 return targetpath
968
969 source = self.open(member, pwd=pwd)
970 target = file(targetpath, "wb")
971 shutil.copyfileobj(source, target)
972 source.close()
973 target.close()
974
975 return targetpath
976
977 def _writecheck(self, zinfo):
978 """Check for errors before writing a file to the archive."""
979 if zinfo.filename in self.NameToInfo:
980 if self.debug: # Warning for duplicate names
981 print "Duplicate name:", zinfo.filename
982 if self.mode not in ("w", "a"):
983 raise RuntimeError, 'write() requires mode "w" or "a"'
984 if not self.fp:
985 raise RuntimeError, \
986 "Attempt to write ZIP archive that was already closed"
987 if zinfo.compress_type == ZIP_DEFLATED and not zlib:
988 raise RuntimeError, \
989 "Compression requires the (missing) zlib module"
990 if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
991 raise RuntimeError, \
992 "That compression method is not supported"
993 if zinfo.file_size > ZIP64_LIMIT:
994 if not self._allowZip64:
995 raise LargeZipFile("Filesize would require ZIP64 extensions")
996 if zinfo.header_offset > ZIP64_LIMIT:
997 if not self._allowZip64:
998 raise LargeZipFile("Zipfile size would require ZIP64 extensions")
999
1000 def write(self, filename, arcname=None, compress_type=None):
1001 """Put the bytes from filename into the archive under the name
1002 arcname."""
1003 if not self.fp:
1004 raise RuntimeError(
1005 "Attempt to write to ZIP archive that was already closed")
1006
1007 st = os.stat(filename)
1008 isdir = stat.S_ISDIR(st.st_mode)
1009 mtime = time.localtime(st.st_mtime)
1010 date_time = mtime[0:6]
1011 # Create ZipInfo instance to store file information
1012 if arcname is None:
1013 arcname = filename
1014 arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
1015 while arcname[0] in (os.sep, os.altsep):
1016 arcname = arcname[1:]
1017 if isdir:
1018 arcname += '/'
1019 zinfo = ZipInfo(arcname, date_time)
1020 zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
1021 if compress_type is None:
1022 zinfo.compress_type = self.compression
1023 else:
1024 zinfo.compress_type = compress_type
1025
1026 zinfo.file_size = st.st_size
1027 zinfo.flag_bits = 0x00
1028 zinfo.header_offset = self.fp.tell() # Start of header bytes
1029
1030 self._writecheck(zinfo)
1031 self._didModify = True
1032
1033 if isdir:
1034 zinfo.file_size = 0
1035 zinfo.compress_size = 0
1036 zinfo.CRC = 0
1037 self.filelist.append(zinfo)
1038 self.NameToInfo[zinfo.filename] = zinfo
1039 self.fp.write(zinfo.FileHeader())
1040 return
1041
1042 fp = open(filename, "rb")
1043 # Must overwrite CRC and sizes with correct data later
1044 zinfo.CRC = CRC = 0
1045 zinfo.compress_size = compress_size = 0
1046 zinfo.file_size = file_size = 0
1047 self.fp.write(zinfo.FileHeader())
1048 if zinfo.compress_type == ZIP_DEFLATED:
1049 cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
1050 zlib.DEFLATED, -15)
1051 else:
1052 cmpr = None
1053 while 1:
1054 buf = fp.read(1024 * 8)
1055 if not buf:
1056 break
1057 file_size = file_size + len(buf)
1058 CRC = crc32(buf, CRC) & 0xffffffff
1059 if cmpr:
1060 buf = cmpr.compress(buf)
1061 compress_size = compress_size + len(buf)
1062 self.fp.write(buf)
1063 fp.close()
1064 if cmpr:
1065 buf = cmpr.flush()
1066 compress_size = compress_size + len(buf)
1067 self.fp.write(buf)
1068 zinfo.compress_size = compress_size
1069 else:
1070 zinfo.compress_size = file_size
1071 zinfo.CRC = CRC
1072 zinfo.file_size = file_size
1073 # Seek backwards and write CRC and file sizes
1074 position = self.fp.tell() # Preserve current position in file
1075 self.fp.seek(zinfo.header_offset + 14, 0)
1076 self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
1077 zinfo.file_size))
1078 self.fp.seek(position, 0)
1079 self.filelist.append(zinfo)
1080 self.NameToInfo[zinfo.filename] = zinfo
1081
1082 def writestr(self, zinfo_or_arcname, bytes):
1083 """Write a file into the archive. The contents is the string
1084 'bytes'. 'zinfo_or_arcname' is either a ZipInfo instance or
1085 the name of the file in the archive."""
1086 if not isinstance(zinfo_or_arcname, ZipInfo):
1087 zinfo = ZipInfo(filename=zinfo_or_arcname,
1088 date_time=time.localtime(time.time())[:6])
1089 zinfo.compress_type = self.compression
1090 zinfo.external_attr = 0600 << 16
1091 else:
1092 zinfo = zinfo_or_arcname
1093
1094 if not self.fp:
1095 raise RuntimeError(
1096 "Attempt to write to ZIP archive that was already closed")
1097
1098 zinfo.file_size = len(bytes) # Uncompressed size
1099 zinfo.header_offset = self.fp.tell() # Start of header bytes
1100 self._writecheck(zinfo)
1101 self._didModify = True
1102 zinfo.CRC = crc32(bytes) & 0xffffffff # CRC-32 checksum
1103 if zinfo.compress_type == ZIP_DEFLATED:
1104 co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
1105 zlib.DEFLATED, -15)
1106 bytes = co.compress(bytes) + co.flush()
1107 zinfo.compress_size = len(bytes) # Compressed size
1108 else:
1109 zinfo.compress_size = zinfo.file_size
1110 zinfo.header_offset = self.fp.tell() # Start of header bytes
1111 self.fp.write(zinfo.FileHeader())
1112 self.fp.write(bytes)
1113 self.fp.flush()
1114 if zinfo.flag_bits & 0x08:
1115 # Write CRC and file sizes after the file data
1116 self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
1117 zinfo.file_size))
1118 self.filelist.append(zinfo)
1119 self.NameToInfo[zinfo.filename] = zinfo
1120
1121 def __del__(self):
1122 """Call the "close()" method in case the user forgot."""
1123 self.close()
1124
1125 def close(self):
1126 """Close the file, and for mode "w" and "a" write the ending
1127 records."""
1128 if self.fp is None:
1129 return
1130
1131 if self.mode in ("w", "a") and self._didModify: # write ending records
1132 count = 0
1133 pos1 = self.fp.tell()
1134 for zinfo in self.filelist: # write central directory
1135 count = count + 1
1136 dt = zinfo.date_time
1137 dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
1138 dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
1139 extra = []
1140 if zinfo.file_size > ZIP64_LIMIT \
1141 or zinfo.compress_size > ZIP64_LIMIT:
1142 extra.append(zinfo.file_size)
1143 extra.append(zinfo.compress_size)
1144 file_size = 0xffffffff
1145 compress_size = 0xffffffff
1146 else:
1147 file_size = zinfo.file_size
1148 compress_size = zinfo.compress_size
1149
1150 if zinfo.header_offset > ZIP64_LIMIT:
1151 extra.append(zinfo.header_offset)
1152 header_offset = 0xffffffffL
1153 else:
1154 header_offset = zinfo.header_offset
1155
1156 extra_data = zinfo.extra
1157 if extra:
1158 # Append a ZIP64 field to the extra's
1159 extra_data = struct.pack(
1160 '<HH' + 'Q'*len(extra),
1161 1, 8*len(extra), *extra) + extra_data
1162
1163 extract_version = max(45, zinfo.extract_version)
1164 create_version = max(45, zinfo.create_version)
1165 else:
1166 extract_version = zinfo.extract_version
1167 create_version = zinfo.create_version
1168
1169 try:
1170 filename, flag_bits = zinfo._encodeFilenameFlags()
1171 centdir = struct.pack(structCentralDir,
1172 stringCentralDir, create_version,
1173 zinfo.create_system, extract_version, zinfo.reserved,
1174 flag_bits, zinfo.compress_type, dostime, dosdate,
1175 zinfo.CRC, compress_size, file_size,
1176 len(filename), len(extra_data), len(zinfo.comment),
1177 0, zinfo.internal_attr, zinfo.external_attr,
1178 header_offset)
1179 except DeprecationWarning:
1180 print >>sys.stderr, (structCentralDir,
1181 stringCentralDir, create_version,
1182 zinfo.create_system, extract_version, zinfo.reserved,
1183 zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
1184 zinfo.CRC, compress_size, file_size,
1185 len(zinfo.filename), len(extra_data), len(zinfo.comment),
1186 0, zinfo.internal_attr, zinfo.external_attr,
1187 header_offset)
1188 raise
1189 self.fp.write(centdir)
1190 self.fp.write(filename)
1191 self.fp.write(extra_data)
1192 self.fp.write(zinfo.comment)
1193
1194 pos2 = self.fp.tell()
1195 # Write end-of-zip-archive record
1196 centDirCount = count
1197 centDirSize = pos2 - pos1
1198 centDirOffset = pos1
1199 if (centDirCount >= ZIP_FILECOUNT_LIMIT or
1200 centDirOffset > ZIP64_LIMIT or
1201 centDirSize > ZIP64_LIMIT):
1202 # Need to write the ZIP64 end-of-archive records
1203 zip64endrec = struct.pack(
1204 structEndArchive64, stringEndArchive64,
1205 44, 45, 45, 0, 0, centDirCount, centDirCount,
1206 centDirSize, centDirOffset)
1207 self.fp.write(zip64endrec)
1208
1209 zip64locrec = struct.pack(
1210 structEndArchive64Locator,
1211 stringEndArchive64Locator, 0, pos2, 1)
1212 self.fp.write(zip64locrec)
1213 centDirCount = min(centDirCount, 0xFFFF)
1214 centDirSize = min(centDirSize, 0xFFFFFFFF)
1215 centDirOffset = min(centDirOffset, 0xFFFFFFFF)
1216
1217 # check for valid comment length
1218 if len(self.comment) >= ZIP_MAX_COMMENT:
1219 if self.debug > 0:
1220 msg = 'Archive comment is too long; truncating to %d bytes' \
1221 % ZIP_MAX_COMMENT
1222 self.comment = self.comment[:ZIP_MAX_COMMENT]
1223
1224 endrec = struct.pack(structEndArchive, stringEndArchive,
1225 0, 0, centDirCount, centDirCount,
1226 centDirSize, centDirOffset, len(self.comment))
1227 self.fp.write(endrec)
1228 self.fp.write(self.comment)
1229 self.fp.flush()
1230
1231 if not self._filePassed:
1232 self.fp.close()
1233 self.fp = None
1234
1235
1236class PyZipFile(ZipFile):
1237 """Class to create ZIP archives with Python library files and packages."""
1238
1239 def writepy(self, pathname, basename = ""):
1240 """Add all files from "pathname" to the ZIP archive.
1241
1242 If pathname is a package directory, search the directory and
1243 all package subdirectories recursively for all *.py and enter
1244 the modules into the archive. If pathname is a plain
1245 directory, listdir *.py and enter all modules. Else, pathname
1246 must be a Python *.py file and the module will be put into the
1247 archive. Added modules are always module.pyo or module.pyc.
1248 This method will compile the module.py into module.pyc if
1249 necessary.
1250 """
1251 dir, name = os.path.split(pathname)
1252 if os.path.isdir(pathname):
1253 initname = os.path.join(pathname, "__init__.py")
1254 if os.path.isfile(initname):
1255 # This is a package directory, add it
1256 if basename:
1257 basename = "%s/%s" % (basename, name)
1258 else:
1259 basename = name
1260 if self.debug:
1261 print "Adding package in", pathname, "as", basename
1262 fname, arcname = self._get_codename(initname[0:-3], basename)
1263 if self.debug:
1264 print "Adding", arcname
1265 self.write(fname, arcname)
1266 dirlist = os.listdir(pathname)
1267 dirlist.remove("__init__.py")
1268 # Add all *.py files and package subdirectories
1269 for filename in dirlist:
1270 path = os.path.join(pathname, filename)
1271 root, ext = os.path.splitext(filename)
1272 if os.path.isdir(path):
1273 if os.path.isfile(os.path.join(path, "__init__.py")):
1274 # This is a package directory, add it
1275 self.writepy(path, basename) # Recursive call
1276 elif ext == ".py":
1277 fname, arcname = self._get_codename(path[0:-3],
1278 basename)
1279 if self.debug:
1280 print "Adding", arcname
1281 self.write(fname, arcname)
1282 else:
1283 # This is NOT a package directory, add its files at top level
1284 if self.debug:
1285 print "Adding files from directory", pathname
1286 for filename in os.listdir(pathname):
1287 path = os.path.join(pathname, filename)
1288 root, ext = os.path.splitext(filename)
1289 if ext == ".py":
1290 fname, arcname = self._get_codename(path[0:-3],
1291 basename)
1292 if self.debug:
1293 print "Adding", arcname
1294 self.write(fname, arcname)
1295 else:
1296 if pathname[-3:] != ".py":
1297 raise RuntimeError, \
1298 'Files added with writepy() must end with ".py"'
1299 fname, arcname = self._get_codename(pathname[0:-3], basename)
1300 if self.debug:
1301 print "Adding file", arcname
1302 self.write(fname, arcname)
1303
1304 def _get_codename(self, pathname, basename):
1305 """Return (filename, archivename) for the path.
1306
1307 Given a module name path, return the correct file path and
1308 archive name, compiling if necessary. For example, given
1309 /python/lib/string, return (/python/lib/string.pyc, string).
1310 """
1311 file_py = pathname + ".py"
1312 file_pyc = pathname + ".pyc"
1313 file_pyo = pathname + ".pyo"
1314 if os.path.isfile(file_pyo) and \
1315 os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
1316 fname = file_pyo # Use .pyo file
1317 elif not os.path.isfile(file_pyc) or \
1318 os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
1319 import py_compile
1320 if self.debug:
1321 print "Compiling", file_py
1322 try:
1323 py_compile.compile(file_py, file_pyc, None, True)
1324 except py_compile.PyCompileError,err:
1325 print err.msg
1326 fname = file_pyc
1327 else:
1328 fname = file_pyc
1329 archivename = os.path.split(fname)[1]
1330 if basename:
1331 archivename = "%s/%s" % (basename, archivename)
1332 return (fname, archivename)
1333
1334
1335def main(args = None):
1336 import textwrap
1337 USAGE=textwrap.dedent("""\
1338 Usage:
1339 zipfile.py -l zipfile.zip # Show listing of a zipfile
1340 zipfile.py -t zipfile.zip # Test if a zipfile is valid
1341 zipfile.py -e zipfile.zip target # Extract zipfile into target dir
1342 zipfile.py -c zipfile.zip src ... # Create zipfile from sources
1343 """)
1344 if args is None:
1345 args = sys.argv[1:]
1346
1347 if not args or args[0] not in ('-l', '-c', '-e', '-t'):
1348 print USAGE
1349 sys.exit(1)
1350
1351 if args[0] == '-l':
1352 if len(args) != 2:
1353 print USAGE
1354 sys.exit(1)
1355 zf = ZipFile(args[1], 'r')
1356 zf.printdir()
1357 zf.close()
1358
1359 elif args[0] == '-t':
1360 if len(args) != 2:
1361 print USAGE
1362 sys.exit(1)
1363 zf = ZipFile(args[1], 'r')
1364 zf.testzip()
1365 print "Done testing"
1366
1367 elif args[0] == '-e':
1368 if len(args) != 3:
1369 print USAGE
1370 sys.exit(1)
1371
1372 zf = ZipFile(args[1], 'r')
1373 out = args[2]
1374 for path in zf.namelist():
1375 if path.startswith('./'):
1376 tgt = os.path.join(out, path[2:])
1377 else:
1378 tgt = os.path.join(out, path)
1379
1380 tgtdir = os.path.dirname(tgt)
1381 if not os.path.exists(tgtdir):
1382 os.makedirs(tgtdir)
1383 fp = open(tgt, 'wb')
1384 fp.write(zf.read(path))
1385 fp.close()
1386 zf.close()
1387
1388 elif args[0] == '-c':
1389 if len(args) < 3:
1390 print USAGE
1391 sys.exit(1)
1392
1393 def addToZip(zf, path, zippath):
1394 if os.path.isfile(path):
1395 zf.write(path, zippath, ZIP_DEFLATED)
1396 elif os.path.isdir(path):
1397 for nm in os.listdir(path):
1398 addToZip(zf,
1399 os.path.join(path, nm), os.path.join(zippath, nm))
1400 # else: ignore
1401
1402 zf = ZipFile(args[1], 'w', allowZip64=True)
1403 for src in args[2:]:
1404 addToZip(zf, src, os.path.basename(src))
1405
1406 zf.close()
1407
1408if __name__ == "__main__":
1409 main()
01410
=== modified file 'setup.nsi'
--- setup.nsi 2012-11-26 11:44:38 +0000
+++ setup.nsi 2013-02-28 12:40:27 +0000
@@ -206,6 +206,7 @@
206206
207 nsExec::Exec '"$INSTDIR\openerp-server.exe" --stop-after-init --logfile "$INSTDIR\openerp-server.log" -s'207 nsExec::Exec '"$INSTDIR\openerp-server.exe" --stop-after-init --logfile "$INSTDIR\openerp-server.log" -s'
208 nsExec::Exec '"$INSTDIR\service\OpenERPServerService.exe" -auto -install'208 nsExec::Exec '"$INSTDIR\service\OpenERPServerService.exe" -auto -install'
209 nsExec::Exec 'sc failure openerp-server-6.0 reset= 0 actions= restart/0/restart/0/restart/0'
209SectionEnd210SectionEnd
210211
211Section -RestartServer212Section -RestartServer
212213
=== modified file 'setup.py'
--- setup.py 2011-03-30 17:04:32 +0000
+++ setup.py 2013-02-28 12:40:27 +0000
@@ -133,6 +133,7 @@
133 '''Build list of data files to be installed'''133 '''Build list of data files to be installed'''
134 files = []134 files = []
135 if os.name == 'nt':135 if os.name == 'nt':
136 files.append(('.', [join('bin', 'unifield-version.txt')]))
136 os.chdir('bin')137 os.chdir('bin')
137 for (dp, dn, names) in os.walk('addons'):138 for (dp, dn, names) in os.walk('addons'):
138 files.append((dp, map(lambda x: join('bin', dp, x), names)))139 files.append((dp, map(lambda x: join('bin', dp, x), names)))
139140
=== modified file 'win32/OpenERPServerService.py'
--- win32/OpenERPServerService.py 2010-12-29 11:51:44 +0000
+++ win32/OpenERPServerService.py 2013-02-28 12:40:27 +0000
@@ -32,6 +32,8 @@
32import os32import os
33import thread33import thread
3434
35EXIT_UPDATE_REQUIRE_RESTART = 1
36
35class OpenERPServerService(win32serviceutil.ServiceFramework):37class OpenERPServerService(win32serviceutil.ServiceFramework):
36 # required info38 # required info
37 _svc_name_ = "openerp-server-6.0"39 _svc_name_ = "openerp-server-6.0"
@@ -46,8 +48,6 @@
46 self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)48 self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
47 # a reference to the server's process49 # a reference to the server's process
48 self.terpprocess = None50 self.terpprocess = None
49 # info if the service terminates correctly or if the server crashed
50 self.stopping = False
5151
5252
53 def SvcStop(self):53 def SvcStop(self):
@@ -73,19 +73,28 @@
73 def StartControl(self,ws):73 def StartControl(self,ws):
74 # this listens to the Service Manager's events74 # this listens to the Service Manager's events
75 win32event.WaitForSingleObject(ws, win32event.INFINITE)75 win32event.WaitForSingleObject(ws, win32event.INFINITE)
76 self.stopping = True
7776
78 def SvcDoRun(self):77 def SvcDoRun(self):
79 # Start OpenERP Server itself
80 self.StartTERP()
81 # start the loop waiting for the Service Manager's stop signal78 # start the loop waiting for the Service Manager's stop signal
82 thread.start_new_thread(self.StartControl, (self.hWaitStop,))79 thread.start_new_thread(self.StartControl, (self.hWaitStop,))
83 # Log a info message that the server is running80 while True:
84 servicemanager.LogInfoMsg("OpenERP Server up and running")81 # Start OpenERP Server itself
85 # verification if the server is really running, else quit with an error82 self.StartTERP()
86 self.terpprocess.wait()83 # Log a info message that the server is running
87 if not self.stopping:84 servicemanager.LogInfoMsg("OpenERP Server up and running")
88 sys.exit("OpenERP Server check: server not running, check the logfile for more info")85 # wait until child process is terminated
86 # if exit status is:
87 # - special 'restart'
88 # simply loop to restart the process and finish update
89 # - other exit stauts:
90 # server crashed? exit with an error message
91 exit_status = self.terpprocess.wait()
92 if exit_status == EXIT_UPDATE_REQUIRE_RESTART:
93 servicemanager.LogInfoMsg("OpenERP has been updated, restarting...")
94 continue # restart openerp process
95 if exit_status == 0:
96 break # normal exit
97 sys.exit(exit_status)
8998
9099
91100

Subscribers

People subscribed via source and target branches

to all changes: