Merge lp:~unifield-team/unifield-server/sp71 into lp:unifield-server

Proposed by Samus CTO (OpenERP)
Status: Merged
Merged at revision: 3444
Proposed branch: lp:~unifield-team/unifield-server/sp71
Merge into: lp:unifield-server
Diff against target: 2048 lines (+1749/-96)
10 files modified
bin/openerp-server.py (+9/-6)
bin/pooler.py (+5/-0)
bin/service/web_services.py (+3/-2)
bin/sql_db.py (+1/-1)
bin/unifield-version.txt (+1/-0)
bin/updater.py (+299/-76)
bin/zipfile266.py (+1409/-0)
setup.nsi (+1/-0)
setup.py (+1/-0)
win32/OpenERPServerService.py (+20/-11)
To merge this branch: bzr merge lp:~unifield-team/unifield-server/sp71
Reviewer Review Type Date Requested Status
UniField Dev Team Pending
Review via email: mp+151008@code.launchpad.net
To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'bin/openerp-server.py'
2--- bin/openerp-server.py 2013-02-27 09:10:30 +0000
3+++ bin/openerp-server.py 2013-02-28 12:40:27 +0000
4@@ -30,8 +30,8 @@
5 (c) 2003-TODAY, Fabien Pinckaers - OpenERP s.a.
6 """
7
8-from updater import do_update
9-do_update()
10+import updater
11+updater.do_update()
12
13 #----------------------------------------------------------
14 # python imports
15@@ -65,6 +65,7 @@
16 # import the tools module so that the commandline parameters are parsed
17 #-----------------------------------------------------------------------
18 import tools
19+updater.update_path()
20 logger.info("OpenERP version - %s", release.version)
21 for name, value in [('addons_path', tools.config['addons_path']),
22 ('database hostname', tools.config['db_host'] or 'localhost'),
23@@ -219,6 +220,8 @@
24 signal.signal(signal.SIGQUIT, dumpstacks)
25
26 def quit(restart=False):
27+ if restart:
28+ time.sleep(updater.restart_delay)
29 netsvc.Agent.quit()
30 netsvc.Server.quitAll()
31 if tools.config['pidfile']:
32@@ -247,6 +250,8 @@
33 logger.info(str(thread.getName()) + ' could not be terminated')
34 if not restart:
35 sys.exit(0)
36+ elif os.name == 'nt':
37+ sys.exit(1) # require service restart
38 else:
39 os.execv(sys.executable, [sys.executable] + sys.argv)
40
41@@ -260,11 +265,9 @@
42
43 logger.info('OpenERP server is running, waiting for connections...')
44
45-tools.restart_required = False
46-
47-while netsvc.quit_signals_received == 0 and not tools.restart_required:
48+while netsvc.quit_signals_received == 0 and not updater.restart_required:
49 time.sleep(5)
50
51-quit(restart=tools.restart_required)
52+quit(restart=updater.restart_required)
53
54 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
55
56=== modified file 'bin/pooler.py'
57--- bin/pooler.py 2010-10-01 11:25:52 +0000
58+++ bin/pooler.py 2013-02-28 12:40:27 +0000
59@@ -19,6 +19,8 @@
60 #
61 ##############################################################################
62
63+import updater
64+
65 pool_dic = {}
66
67 def get_db_and_pool(db_name, force_demo=False, status=None, update_module=False, pooljobs=True):
68@@ -45,6 +47,9 @@
69 try:
70 pool.init_set(cr, False)
71 pool.get('ir.actions.report.xml').register_all(cr)
72+ if not updater.do_upgrade(cr, pool):
73+ pool_dic.pop(db_name)
74+ raise Exception("updater.py told us that OpenERP version doesn't match database version!")
75 cr.commit()
76 finally:
77 cr.close()
78
79=== modified file 'bin/service/web_services.py'
80--- bin/service/web_services.py 2013-02-04 14:43:01 +0000
81+++ bin/service/web_services.py 2013-02-28 12:40:27 +0000
82@@ -32,6 +32,7 @@
83 import ir
84 import netsvc
85 import pooler
86+import updater
87 import release
88 import sql_db
89 import tools
90@@ -191,7 +192,7 @@
91
92 self._set_pg_psw_env_var()
93
94- cmd = [tools.misc.find_pg_tool('pg_dump'), '--format=c', '--no-owner']
95+ cmd = ['pg_dump', '--format=c', '--no-owner']
96 if tools.config['db_user']:
97 cmd.append('--username=' + tools.config['db_user'])
98 if tools.config['db_host']:
99@@ -227,7 +228,7 @@
100
101 self._create_empty_database(db_name)
102
103- cmd = [tools.misc.find_pg_tool('pg_restore'), '--no-owner', '--no-acl']
104+ cmd = ['pg_restore', '--no-owner']
105 if tools.config['db_user']:
106 cmd.append('--username=' + tools.config['db_user'])
107 if tools.config['db_host']:
108
109=== modified file 'bin/sql_db.py'
110--- bin/sql_db.py 2011-01-18 19:26:37 +0000
111+++ bin/sql_db.py 2013-02-28 12:40:27 +0000
112@@ -325,7 +325,7 @@
113
114 @locked
115 def close_all(self, dsn):
116- self.__logger.info('%r: Close all connections to %r', self, dsn)
117+ self.__logger.info('%r: Close all connections', self)
118 for i, (cnx, used) in tools.reverse_enumerate(self._connections):
119 if dsn_are_equals(cnx.dsn, dsn):
120 cnx.close()
121
122=== added file 'bin/unifield-version.txt'
123--- bin/unifield-version.txt 1970-01-01 00:00:00 +0000
124+++ bin/unifield-version.txt 2013-02-28 12:40:27 +0000
125@@ -0,0 +1,1 @@
126+88888888888888888888888888888888
127
128=== modified file 'bin/updater.py'
129--- bin/updater.py 2012-10-19 13:22:33 +0000
130+++ bin/updater.py 2013-02-28 12:40:27 +0000
131@@ -1,10 +1,100 @@
132+"""
133+Unifield module to upgrade the instance to a next version of Unifield
134+Beware that we expect to be in the bin/ directory to proceed!!
135+"""
136+from __future__ import with_statement
137+import re
138 import os
139 import sys
140-import psycopg2
141+from hashlib import md5
142 from datetime import datetime
143-
144-## Unix-like find
145+from base64 import b64decode
146+from StringIO import StringIO
147+import logging
148+import time
149+
150+if sys.version_info >= (2, 6, 6):
151+ from zipfile import ZipFile, ZipInfo
152+else:
153+ from zipfile266 import ZipFile, ZipInfo
154+
155+__all__ = ('isset_lock', 'server_version', 'base_version', 'do_prepare', 'base_module_upgrade', 'restart_server')
156+
157+restart_required = False
158+log_file = 'updater.log'
159+lock_file = 'update.lock'
160+update_dir = '.update'
161+server_version_file = 'unifield-version.txt'
162+new_version_file = os.path.join(update_dir, 'update-list.txt')
163+restart_delay = 5
164+
165+md5hex_size = (md5().digest_size * 8 / 4)
166+base_version = '8' * md5hex_size
167+re_version = re.compile(r'^\s*([a-fA-F0-9]{'+str(md5hex_size)+r'}\b)')
168+logger = logging.getLogger('updater')
169+
170+def restart_server():
171+ """Restart OpenERP server"""
172+ global restart_required
173+ logger.info("Restaring OpenERP Server in %d seconds..." % restart_delay)
174+ restart_required = True
175+
176+def isset_lock(file=None):
177+ """Check if server lock file is set"""
178+ if file is None: file = lock_file
179+ return os.path.isfile(lock_file)
180+
181+def set_lock(file=None):
182+ """Set the lock file to make OpenERP run into do_update method against normal execution"""
183+ from tools import config
184+ if file is None: file = lock_file
185+ with open(file, "w") as f:
186+ f.write(unicode({'path':os.getcwd(),'rcfile':config.rcfile}))
187+
188+def unset_lock(file=None):
189+ """Remove the lock"""
190+ global exec_path
191+ global rcfile
192+ if file is None: file = lock_file
193+ with open(file, "r") as f:
194+ data = eval(f.read().strip())
195+ exec_path = data['path']
196+ rcfile = data['rcfile']
197+ os.unlink(file)
198+
199+def parse_version_file(filepath):
200+ """Short method to parse a "version file"
201+ Basically, a file where each line starts with the sum of a patch"""
202+ assert os.path.isfile(filepath), "The file `%s' must be a file!" % filepath
203+ versions = []
204+ with open(filepath, 'r') as f:
205+ for line in f:
206+ line = line.rstrip()
207+ if not line: continue
208+ try:
209+ m = re_version.match(line)
210+ versions.append( m.group(1) )
211+ except AttributeError:
212+ raise Exception("Unable to parse version from file `%s': %s" % (filepath, line))
213+ return versions
214+
215+def get_server_version():
216+ """Autocratically get the current versions of the server
217+ Get a special key 88888888888888888888888888888888 for default value if no server version can be found"""
218+ if not os.path.exists(server_version_file):
219+ return [base_version]
220+ return parse_version_file(server_version_file)
221+
222+def add_versions(versions, filepath=server_version_file):
223+ """Set server version with new versions"""
224+ if not versions:
225+ return
226+ with open(filepath, 'a') as f:
227+ for ver in versions:
228+ f.write((" ".join([unicode(x) for x in ver]) if hasattr(ver, '__iter__') else ver)+os.linesep)
229+
230 def find(path):
231+ """Unix-like find"""
232 files = os.listdir(path)
233 for name in iter(files):
234 abspath = path+os.path.sep+name
235@@ -12,23 +102,8 @@
236 files.extend( map(lambda x:name+os.path.sep+x, os.listdir(abspath)) )
237 return files
238
239-## Define way to forward logs
240-def warn(*args):
241- sys.stderr.write(" ".join(map(lambda x:str(x), args))+"\n")
242-
243-## Try...Resume...
244-def Try(command):
245- try:
246- command()
247- except:
248- e, msg = sys.exc_info()[0].__name__, str(sys.exc_info()[1])
249- warn(str(msg))
250- return False
251- else:
252- return True
253-
254-## Python free rmtree
255 def rmtree(files, path=None, verbose=False):
256+ """Python free rmtree"""
257 if path is None and isinstance(files, str):
258 path, files = files, find(files)
259 for f in reversed(files):
260@@ -40,35 +115,72 @@
261 warn("rmdir", target)
262 os.rmdir( target )
263
264+def now():
265+ return datetime.today().strftime("%Y-%m-%d %H:%M:%S")
266+
267+log = sys.stderr
268+
269+def warn(*args):
270+ """Define way to forward logs"""
271+ global log
272+ log.write(("[%s] UPDATER: " % now())+" ".join(map(lambda x:unicode(x), args))+os.linesep)
273+
274+def Try(command):
275+ """Try...Resume..."""
276+ try:
277+ command()
278+ except BaseException, e:
279+ warn(unicode(e))
280+ return False
281+ else:
282+ return True
283+
284+
285+
286+##############################################################################
287+## ##
288+## Main methods of updater modules ##
289+## ##
290+##############################################################################
291+
292+
293+def base_module_upgrade(cr, pool, upgrade_now=False):
294+ """Just like -u base / -u all.
295+ Arguments are:
296+ * cr: cursor to the database
297+ * pool: pool of the same db
298+ * (optional) upgrade_now: False by default, on True, it will launch the process right now"""
299+ modules = pool.get('ir.module.module')
300+ base_ids = modules.search(cr, 1, [('name', '=', 'base')])
301+ #base_ids = modules.search(cr, 1, [('name', '=', 'sync_client')]) #for tests
302+ modules.button_upgrade(cr, 1, base_ids)
303+ if upgrade_now:
304+ logger.info("Starting base upgrade process")
305+ pool.get('base.module.upgrade').upgrade_module(cr, 1, [])
306+
307+
308 def do_update():
309-## We expect to be in the bin/ directory to proceed
310- if os.path.exists('update.lock'):
311- rev_file = os.path.join('.update','revisions.txt')
312- hist_file = "revision_history.txt"
313- infos = {'exec_path':os.getcwd()}
314- revisions = None
315- cur = None
316- conn = None
317- update_revisions = None
318+ """Real update of the server (before normal OpenERP execution).
319+ This function is triggered when OpenERP starts. When it finishes, it restart OpenERP automatically.
320+ On failure, the lock file is deleted and OpenERP files are rollbacked to their previous state."""
321+ if os.path.exists(lock_file) and Try(unset_lock):
322+ global log
323+ ## Move logs log file
324+ try:
325+ log = open(log_file, 'a')
326+ except BaseException, e:
327+ log.write("Cannot write into `%s': %s" % (log, unicode(e)))
328+ warn(lock_file, 'removed')
329+ ## Now, update
330+ application_time = now()
331+ revisions = []
332 files = None
333- args = list(sys.argv)
334- for i, x in enumerate(args):
335- if x in ('-d', '-u', '-c'):
336- args[i] = None
337- args[i+1] = None
338- args = filter(lambda x:x is not None, args)
339 try:
340- ## Read DB name
341- f = open('update.lock')
342- infos = eval(f.read())
343- f.close()
344- revisions = ",".join( map(lambda x:"'"+str(x)+"'", infos['revisions']) )
345- ## Connect to the DB
346- conn = psycopg2.connect(database=infos['dbname'], user=infos['db_user'], password=infos['db_password'], host=infos['db_host'], port=infos['db_port'])
347- conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
348- cur = conn.cursor()
349+ ## Revisions that going to be installed
350+ revisions = parse_version_file(new_version_file)
351+ os.unlink(new_version_file)
352 ## Explore .update directory
353- files = find('.update')
354+ files = find(update_dir)
355 ## Prepare backup directory
356 if not os.path.exists('backup'):
357 os.mkdir('backup')
358@@ -77,7 +189,7 @@
359 ## Update Files
360 warn("Updating...")
361 for f in files:
362- target = os.path.join('.update', f)
363+ target = os.path.join(update_dir, f)
364 bak = os.path.join('backup', f)
365 if os.path.isdir(target):
366 if os.path.isfile(f) or os.path.islink(f):
367@@ -91,22 +203,14 @@
368 os.rename(f, bak)
369 warn("`%s' -> `%s'" % (target, f))
370 os.rename(target, f)
371- ## Update installed revisions in DB
372- cur.execute("""UPDATE sync_client_version SET state = 'installed', applied = '%s' WHERE name in (%s)"""
373- % ( datetime.today().strftime("%Y-%m-%d %H:%M:%S"), revisions ))
374+ add_versions([(x, application_time) for x in revisions])
375 warn("Update successful.")
376- warn("Revisions added: ", ", ".join( infos['revisions'] ))
377- args.extend(['-d', infos['dbname'], '-u', 'all'])
378- if os.name == 'nt':
379- args.extend(['-c', '"%s"' % infos['conf']])
380- else:
381- args.extend(['-c', infos['conf']])
382- except:
383+ warn("Revisions added: ", ", ".join(revisions))
384+ ## No database update here. I preferred to set modules to update just after the preparation
385+ ## The reason is, when pool is populated, it will starts by upgrading modules first
386+ except BaseException, e:
387 warn("Update failure!")
388- ## Update DB to mark revisions as not-installed
389- if cur and infos:
390- Try(lambda:cur.execute("""UPDATE sync_client_version SET state = 'not-installed' WHERE name in (%s)"""
391- % ( revisions )))
392+ warn(unicode(e))
393 ## Restore backup and purge .update
394 if files:
395 warn("Restoring...")
396@@ -114,21 +218,140 @@
397 target = os.path.join('backup', f)
398 if os.path.isfile(target) or os.path.islink(target):
399 warn("`%s' -> `%s'" % (target, f))
400- elif os.path.isdir(target):
401- warn("rmdir", target)
402- os.rmdir( target )
403+ os.rename(target, f)
404 warn("Purging...")
405- Try(lambda:rmtree(files, '.update'))
406- warn("rmdir", '.update')
407- Try(lambda:os.rmdir( '.update' ))
408- finally:
409- if cur: cur.close()
410- if conn: conn.close()
411- ## Remove lock file
412- warn("rm", 'update.lock')
413- Try(lambda:os.unlink( 'update.lock' ))
414- warn("Restart OpenERP in", infos['exec_path'], "with:",args)
415- if infos: os.chdir(infos['exec_path'])
416- os.execv(sys.executable, [sys.executable] + args)
417-
418-
419+ Try(lambda:rmtree(update_dir))
420+ if os.name == 'nt':
421+ warn("Exiting OpenERP Server with code 1 to tell service to restart")
422+ sys.exit(1) # require service to restart
423+ else:
424+ warn(("Restart OpenERP in %s:" % exec_path), \
425+ [sys.executable]+sys.argv)
426+ if log is not sys.stderr:
427+ log.close()
428+ os.chdir(exec_path)
429+ os.execv(sys.executable, [sys.executable] + sys.argv)
430+
431+
432+def update_path():
433+ """If server starts normally, this step will fix the paths with the configured path in config rc"""
434+ from tools import config
435+ for v in ('log_file', 'lock_file', 'update_dir', 'server_version_file', 'new_version_file'):
436+ globals()[v] = os.path.join(config['root_path'], globals()[v])
437+ global server_version
438+ server_version = get_server_version()
439+
440+
441+def do_prepare(cr, revision_ids):
442+ """Prepare patches for an upgrade of the server and set the lock file"""
443+ if not revision_ids:
444+ return ('failure', 'Nothing to do.', {})
445+ import pooler
446+ pool = pooler.get_pool(cr.dbname)
447+ version = pool.get('sync_client.version')
448+
449+ # Make an update temporary path
450+ path = update_dir
451+ if not os.path.exists(path):
452+ os.mkdir(path)
453+ else:
454+ for f in reversed(find(path)):
455+ target = os.path.join(path, f)
456+ if os.path.isfile(target) or os.path.islink(target):
457+ logger.debug("rm `%s'" % target)
458+ os.unlink( target )
459+ elif os.path.isdir(target):
460+ logger.debug("rmdir `%s'" % target)
461+ os.rmdir( target )
462+ if not (os.path.isdir(path) and os.access(path, os.W_OK)):
463+ message = "The path `%s' is not a dir or is not writable!"
464+ logger.error(message % path)
465+ return ('failure', message, (path,))
466+ # Proceed all patches
467+ new_revisions = []
468+ corrupt = []
469+ missing = []
470+ need_restart = []
471+ for rev in version.browse(cr, 1, revision_ids):
472+ # Check presence of the patch
473+ if not rev.patch:
474+ missing.append( rev )
475+ continue
476+ # Check if the file match the expected sum
477+ patch = b64decode( rev.patch )
478+ local_sum = md5(patch).hexdigest()
479+ if local_sum != rev.sum:
480+ corrupt.append( rev )
481+ elif not (corrupt or missing):
482+ # Extract the Zip
483+ f = StringIO(patch)
484+ try:
485+ zip = ZipFile(f, 'r')
486+ zip.extractall(path)
487+ finally:
488+ f.close()
489+ # Store to list of updates
490+ new_revisions.append( (rev.sum, ("[%s] %s - %s" % (rev.importance, rev.date, rev.name))) )
491+ if rev.state == 'not-installed':
492+ need_restart.append(rev.id)
493+ # Remove corrupted patches
494+ if corrupt:
495+ corrupt_ids = [x.id for x in corrupt]
496+ version.write(cr, 1, corrupt_ids, {'patch':False})
497+ if len(corrupt) == 1: message = "One file you downloaded seems to be corrupt:\n\n%s"
498+ else: message = "Some files you downloaded seem to be corrupt:\n\n%s"
499+ values = ""
500+ for rev in corrupt:
501+ values += " - %s (sum expected: %s)\n" % ((rev.name or 'unknown'), rev.sum)
502+ logger.error(message % values)
503+ return ('corrupt', message, values)
504+ # Complaints about missing patches
505+ if missing:
506+ if len(missing) == 1:
507+ message = "A file is missing: %(name)s (check sum: %(sum)s)"
508+ values = {
509+ 'name' : missing[0].name or 'unknown',
510+ 'sum' : missing[0].sum
511+ }
512+ else:
513+ message = "Some files are missing:\n\n%s"
514+ values = ""
515+ for rev in missing:
516+ values += " - %s (check sum: %s)\n" % ((rev.name or 'unknown'), rev.sum)
517+ logger.error(message % values)
518+ return ('missing', message, values)
519+ # Fix the flag of the pending patches
520+ version.write(cr, 1, need_restart, {'state':'need-restart'})
521+ # Make a lock file to make OpenERP able to detect an update
522+ set_lock()
523+ add_versions(new_revisions, new_version_file)
524+ logger.info("Server update prepared. Need to restart to complete the upgrade.")
525+ return ('success', 'Restart required', {})
526+
527+
528+def do_upgrade(cr, pool):
529+ """Start upgrade process (called by login method and restore)"""
530+ versions = pool.get('sync_client.version')
531+ if versions is None:
532+ return True
533+
534+ db_versions = versions.read(cr, 1, versions.search(cr, 1, [('state','=','installed')]), ['sum'])
535+ db_versions = map(lambda x:x['sum'], db_versions)
536+ server_lack_versions = set(db_versions) - set(server_version)
537+ db_lack_versions = set(server_version) - set(db_versions) - set([base_version])
538+
539+ if server_lack_versions:
540+ revision_ids = versions.search(cr, 1, [('sum','in',list(server_lack_versions))], order='date asc')
541+ res = do_prepare(cr, revision_ids)
542+ if res[0] == 'success':
543+ import tools
544+ os.chdir( tools.config['root_path'] )
545+ restart_server()
546+ else:
547+ return False
548+
549+ elif db_lack_versions:
550+ base_module_upgrade(cr, pool, upgrade_now=True)
551+ # Note: There is no need to update the db versions, the `def init()' of the object do that for us
552+
553+ return True
554
555=== added file 'bin/zipfile266.py'
556--- bin/zipfile266.py 1970-01-01 00:00:00 +0000
557+++ bin/zipfile266.py 2013-02-28 12:40:27 +0000
558@@ -0,0 +1,1409 @@
559+"""
560+Read and write ZIP files.
561+"""
562+import struct, os, time, sys, shutil
563+import binascii, cStringIO, stat
564+
565+try:
566+ import zlib # We may need its compression method
567+ crc32 = zlib.crc32
568+except ImportError:
569+ zlib = None
570+ crc32 = binascii.crc32
571+
572+__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
573+ "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile" ]
574+
575+class BadZipfile(Exception):
576+ pass
577+
578+
579+class LargeZipFile(Exception):
580+ """
581+ Raised when writing a zipfile, the zipfile requires ZIP64 extensions
582+ and those extensions are disabled.
583+ """
584+
585+error = BadZipfile # The exception raised by this module
586+
587+ZIP64_LIMIT = (1 << 31) - 1
588+ZIP_FILECOUNT_LIMIT = 1 << 16
589+ZIP_MAX_COMMENT = (1 << 16) - 1
590+
591+# constants for Zip file compression methods
592+ZIP_STORED = 0
593+ZIP_DEFLATED = 8
594+# Other ZIP compression methods not supported
595+
596+# Below are some formats and associated data for reading/writing headers using
597+# the struct module. The names and structures of headers/records are those used
598+# in the PKWARE description of the ZIP file format:
599+# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
600+# (URL valid as of January 2008)
601+
602+# The "end of central directory" structure, magic number, size, and indices
603+# (section V.I in the format document)
604+structEndArchive = "<4s4H2LH"
605+stringEndArchive = "PK\005\006"
606+sizeEndCentDir = struct.calcsize(structEndArchive)
607+
608+_ECD_SIGNATURE = 0
609+_ECD_DISK_NUMBER = 1
610+_ECD_DISK_START = 2
611+_ECD_ENTRIES_THIS_DISK = 3
612+_ECD_ENTRIES_TOTAL = 4
613+_ECD_SIZE = 5
614+_ECD_OFFSET = 6
615+_ECD_COMMENT_SIZE = 7
616+# These last two indices are not part of the structure as defined in the
617+# spec, but they are used internally by this module as a convenience
618+_ECD_COMMENT = 8
619+_ECD_LOCATION = 9
620+
621+# The "central directory" structure, magic number, size, and indices
622+# of entries in the structure (section V.F in the format document)
623+structCentralDir = "<4s4B4HL2L5H2L"
624+stringCentralDir = "PK\001\002"
625+sizeCentralDir = struct.calcsize(structCentralDir)
626+
627+# indexes of entries in the central directory structure
628+_CD_SIGNATURE = 0
629+_CD_CREATE_VERSION = 1
630+_CD_CREATE_SYSTEM = 2
631+_CD_EXTRACT_VERSION = 3
632+_CD_EXTRACT_SYSTEM = 4
633+_CD_FLAG_BITS = 5
634+_CD_COMPRESS_TYPE = 6
635+_CD_TIME = 7
636+_CD_DATE = 8
637+_CD_CRC = 9
638+_CD_COMPRESSED_SIZE = 10
639+_CD_UNCOMPRESSED_SIZE = 11
640+_CD_FILENAME_LENGTH = 12
641+_CD_EXTRA_FIELD_LENGTH = 13
642+_CD_COMMENT_LENGTH = 14
643+_CD_DISK_NUMBER_START = 15
644+_CD_INTERNAL_FILE_ATTRIBUTES = 16
645+_CD_EXTERNAL_FILE_ATTRIBUTES = 17
646+_CD_LOCAL_HEADER_OFFSET = 18
647+
648+# The "local file header" structure, magic number, size, and indices
649+# (section V.A in the format document)
650+structFileHeader = "<4s2B4HL2L2H"
651+stringFileHeader = "PK\003\004"
652+sizeFileHeader = struct.calcsize(structFileHeader)
653+
654+_FH_SIGNATURE = 0
655+_FH_EXTRACT_VERSION = 1
656+_FH_EXTRACT_SYSTEM = 2
657+_FH_GENERAL_PURPOSE_FLAG_BITS = 3
658+_FH_COMPRESSION_METHOD = 4
659+_FH_LAST_MOD_TIME = 5
660+_FH_LAST_MOD_DATE = 6
661+_FH_CRC = 7
662+_FH_COMPRESSED_SIZE = 8
663+_FH_UNCOMPRESSED_SIZE = 9
664+_FH_FILENAME_LENGTH = 10
665+_FH_EXTRA_FIELD_LENGTH = 11
666+
667+# The "Zip64 end of central directory locator" structure, magic number, and size
668+structEndArchive64Locator = "<4sLQL"
669+stringEndArchive64Locator = "PK\x06\x07"
670+sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
671+
672+# The "Zip64 end of central directory" record, magic number, size, and indices
673+# (section V.G in the format document)
674+structEndArchive64 = "<4sQ2H2L4Q"
675+stringEndArchive64 = "PK\x06\x06"
676+sizeEndCentDir64 = struct.calcsize(structEndArchive64)
677+
678+_CD64_SIGNATURE = 0
679+_CD64_DIRECTORY_RECSIZE = 1
680+_CD64_CREATE_VERSION = 2
681+_CD64_EXTRACT_VERSION = 3
682+_CD64_DISK_NUMBER = 4
683+_CD64_DISK_NUMBER_START = 5
684+_CD64_NUMBER_ENTRIES_THIS_DISK = 6
685+_CD64_NUMBER_ENTRIES_TOTAL = 7
686+_CD64_DIRECTORY_SIZE = 8
687+_CD64_OFFSET_START_CENTDIR = 9
688+
689+def is_zipfile(filename):
690+ """Quickly see if file is a ZIP file by checking the magic number."""
691+ try:
692+ fpin = open(filename, "rb")
693+ endrec = _EndRecData(fpin)
694+ fpin.close()
695+ if endrec:
696+ return True # file has correct magic number
697+ except IOError:
698+ pass
699+ return False
700+
701+def _EndRecData64(fpin, offset, endrec):
702+ """
703+ Read the ZIP64 end-of-archive records and use that to update endrec
704+ """
705+ fpin.seek(offset - sizeEndCentDir64Locator, 2)
706+ data = fpin.read(sizeEndCentDir64Locator)
707+ sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
708+ if sig != stringEndArchive64Locator:
709+ return endrec
710+
711+ if diskno != 0 or disks != 1:
712+ raise BadZipfile("zipfiles that span multiple disks are not supported")
713+
714+ # Assume no 'zip64 extensible data'
715+ fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
716+ data = fpin.read(sizeEndCentDir64)
717+ sig, sz, create_version, read_version, disk_num, disk_dir, \
718+ dircount, dircount2, dirsize, diroffset = \
719+ struct.unpack(structEndArchive64, data)
720+ if sig != stringEndArchive64:
721+ return endrec
722+
723+ # Update the original endrec using data from the ZIP64 record
724+ endrec[_ECD_SIGNATURE] = sig
725+ endrec[_ECD_DISK_NUMBER] = disk_num
726+ endrec[_ECD_DISK_START] = disk_dir
727+ endrec[_ECD_ENTRIES_THIS_DISK] = dircount
728+ endrec[_ECD_ENTRIES_TOTAL] = dircount2
729+ endrec[_ECD_SIZE] = dirsize
730+ endrec[_ECD_OFFSET] = diroffset
731+ return endrec
732+
733+
734+def _EndRecData(fpin):
735+ """Return data from the "End of Central Directory" record, or None.
736+
737+ The data is a list of the nine items in the ZIP "End of central dir"
738+ record followed by a tenth item, the file seek offset of this record."""
739+
740+ # Determine file size
741+ fpin.seek(0, 2)
742+ filesize = fpin.tell()
743+
744+ # Check to see if this is ZIP file with no archive comment (the
745+ # "end of central directory" structure should be the last item in the
746+ # file if this is the case).
747+ try:
748+ fpin.seek(-sizeEndCentDir, 2)
749+ except IOError:
750+ return None
751+ data = fpin.read()
752+ if data[0:4] == stringEndArchive and data[-2:] == "\000\000":
753+ # the signature is correct and there's no comment, unpack structure
754+ endrec = struct.unpack(structEndArchive, data)
755+ endrec=list(endrec)
756+
757+ # Append a blank comment and record start offset
758+ endrec.append("")
759+ endrec.append(filesize - sizeEndCentDir)
760+
761+ # Try to read the "Zip64 end of central directory" structure
762+ return _EndRecData64(fpin, -sizeEndCentDir, endrec)
763+
764+ # Either this is not a ZIP file, or it is a ZIP file with an archive
765+ # comment. Search the end of the file for the "end of central directory"
766+ # record signature. The comment is the last item in the ZIP file and may be
767+ # up to 64K long. It is assumed that the "end of central directory" magic
768+ # number does not appear in the comment.
769+ maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
770+ fpin.seek(maxCommentStart, 0)
771+ data = fpin.read()
772+ start = data.rfind(stringEndArchive)
773+ if start >= 0:
774+ # found the magic number; attempt to unpack and interpret
775+ recData = data[start:start+sizeEndCentDir]
776+ endrec = list(struct.unpack(structEndArchive, recData))
777+ comment = data[start+sizeEndCentDir:]
778+ # check that comment length is correct
779+ if endrec[_ECD_COMMENT_SIZE] == len(comment):
780+ # Append the archive comment and start offset
781+ endrec.append(comment)
782+ endrec.append(maxCommentStart + start)
783+
784+ # Try to read the "Zip64 end of central directory" structure
785+ return _EndRecData64(fpin, maxCommentStart + start - filesize,
786+ endrec)
787+
788+ # Unable to find a valid end of central directory structure
789+ return
790+
791+
792+class ZipInfo (object):
793+ """Class with attributes describing each file in the ZIP archive."""
794+
795+ __slots__ = (
796+ 'orig_filename',
797+ 'filename',
798+ 'date_time',
799+ 'compress_type',
800+ 'comment',
801+ 'extra',
802+ 'create_system',
803+ 'create_version',
804+ 'extract_version',
805+ 'reserved',
806+ 'flag_bits',
807+ 'volume',
808+ 'internal_attr',
809+ 'external_attr',
810+ 'header_offset',
811+ 'CRC',
812+ 'compress_size',
813+ 'file_size',
814+ '_raw_time',
815+ )
816+
817+ def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
818+ self.orig_filename = filename # Original file name in archive
819+
820+ # Terminate the file name at the first null byte. Null bytes in file
821+ # names are used as tricks by viruses in archives.
822+ null_byte = filename.find(chr(0))
823+ if null_byte >= 0:
824+ filename = filename[0:null_byte]
825+ # This is used to ensure paths in generated ZIP files always use
826+ # forward slashes as the directory separator, as required by the
827+ # ZIP format specification.
828+ if os.sep != "/" and os.sep in filename:
829+ filename = filename.replace(os.sep, "/")
830+
831+ self.filename = filename # Normalized file name
832+ self.date_time = date_time # year, month, day, hour, min, sec
833+ # Standard values:
834+ self.compress_type = ZIP_STORED # Type of compression for the file
835+ self.comment = "" # Comment for each file
836+ self.extra = "" # ZIP extra data
837+ if sys.platform == 'win32':
838+ self.create_system = 0 # System which created ZIP archive
839+ else:
840+ # Assume everything else is unix-y
841+ self.create_system = 3 # System which created ZIP archive
842+ self.create_version = 20 # Version which created ZIP archive
843+ self.extract_version = 20 # Version needed to extract archive
844+ self.reserved = 0 # Must be zero
845+ self.flag_bits = 0 # ZIP flag bits
846+ self.volume = 0 # Volume number of file header
847+ self.internal_attr = 0 # Internal attributes
848+ self.external_attr = 0 # External file attributes
849+ # Other attributes are set by class ZipFile:
850+ # header_offset Byte offset to the file header
851+ # CRC CRC-32 of the uncompressed file
852+ # compress_size Size of the compressed file
853+ # file_size Size of the uncompressed file
854+
855+ def FileHeader(self):
856+ """Return the per-file header as a string."""
857+ dt = self.date_time
858+ dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
859+ dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
860+ if self.flag_bits & 0x08:
861+ # Set these to zero because we write them after the file data
862+ CRC = compress_size = file_size = 0
863+ else:
864+ CRC = self.CRC
865+ compress_size = self.compress_size
866+ file_size = self.file_size
867+
868+ extra = self.extra
869+
870+ if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
871+ # File is larger than what fits into a 4 byte integer,
872+ # fall back to the ZIP64 extension
873+ fmt = '<HHQQ'
874+ extra = extra + struct.pack(fmt,
875+ 1, struct.calcsize(fmt)-4, file_size, compress_size)
876+ file_size = 0xffffffff
877+ compress_size = 0xffffffff
878+ self.extract_version = max(45, self.extract_version)
879+ self.create_version = max(45, self.extract_version)
880+
881+ filename, flag_bits = self._encodeFilenameFlags()
882+ header = struct.pack(structFileHeader, stringFileHeader,
883+ self.extract_version, self.reserved, flag_bits,
884+ self.compress_type, dostime, dosdate, CRC,
885+ compress_size, file_size,
886+ len(filename), len(extra))
887+ return header + filename + extra
888+
889+ def _encodeFilenameFlags(self):
890+ if isinstance(self.filename, unicode):
891+ try:
892+ return self.filename.encode('ascii'), self.flag_bits
893+ except UnicodeEncodeError:
894+ return self.filename.encode('utf-8'), self.flag_bits | 0x800
895+ else:
896+ return self.filename, self.flag_bits
897+
898+ def _decodeFilename(self):
899+ if self.flag_bits & 0x800:
900+ return self.filename.decode('utf-8')
901+ else:
902+ return self.filename
903+
904+ def _decodeExtra(self):
905+ # Try to decode the extra field.
906+ extra = self.extra
907+ unpack = struct.unpack
908+ while extra:
909+ tp, ln = unpack('<HH', extra[:4])
910+ if tp == 1:
911+ if ln >= 24:
912+ counts = unpack('<QQQ', extra[4:28])
913+ elif ln == 16:
914+ counts = unpack('<QQ', extra[4:20])
915+ elif ln == 8:
916+ counts = unpack('<Q', extra[4:12])
917+ elif ln == 0:
918+ counts = ()
919+ else:
920+ raise RuntimeError, "Corrupt extra field %s"%(ln,)
921+
922+ idx = 0
923+
924+ # ZIP64 extension (large files and/or large archives)
925+ if self.file_size in (0xffffffffffffffffL, 0xffffffffL):
926+ self.file_size = counts[idx]
927+ idx += 1
928+
929+ if self.compress_size == 0xFFFFFFFFL:
930+ self.compress_size = counts[idx]
931+ idx += 1
932+
933+ if self.header_offset == 0xffffffffL:
934+ old = self.header_offset
935+ self.header_offset = counts[idx]
936+ idx+=1
937+
938+ extra = extra[ln+4:]
939+
940+
941+class _ZipDecrypter:
942+ """Class to handle decryption of files stored within a ZIP archive.
943+
944+ ZIP supports a password-based form of encryption. Even though known
945+ plaintext attacks have been found against it, it is still useful
946+ to be able to get data out of such a file.
947+
948+ Usage:
949+ zd = _ZipDecrypter(mypwd)
950+ plain_char = zd(cypher_char)
951+ plain_text = map(zd, cypher_text)
952+ """
953+
954+ def _GenerateCRCTable():
955+ """Generate a CRC-32 table.
956+
957+ ZIP encryption uses the CRC32 one-byte primitive for scrambling some
958+ internal keys. We noticed that a direct implementation is faster than
959+ relying on binascii.crc32().
960+ """
961+ poly = 0xedb88320
962+ table = [0] * 256
963+ for i in range(256):
964+ crc = i
965+ for j in range(8):
966+ if crc & 1:
967+ crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
968+ else:
969+ crc = ((crc >> 1) & 0x7FFFFFFF)
970+ table[i] = crc
971+ return table
972+ crctable = _GenerateCRCTable()
973+
974+ def _crc32(self, ch, crc):
975+ """Compute the CRC32 primitive on one byte."""
976+ return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff]
977+
978+ def __init__(self, pwd):
979+ self.key0 = 305419896
980+ self.key1 = 591751049
981+ self.key2 = 878082192
982+ for p in pwd:
983+ self._UpdateKeys(p)
984+
985+ def _UpdateKeys(self, c):
986+ self.key0 = self._crc32(c, self.key0)
987+ self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
988+ self.key1 = (self.key1 * 134775813 + 1) & 4294967295
989+ self.key2 = self._crc32(chr((self.key1 >> 24) & 255), self.key2)
990+
991+ def __call__(self, c):
992+ """Decrypt a single character."""
993+ c = ord(c)
994+ k = self.key2 | 2
995+ c = c ^ (((k * (k^1)) >> 8) & 255)
996+ c = chr(c)
997+ self._UpdateKeys(c)
998+ return c
999+
1000+class ZipExtFile:
1001+ """File-like object for reading an archive member.
1002+ Is returned by ZipFile.open().
1003+ """
1004+
1005+ def __init__(self, fileobj, zipinfo, decrypt=None):
1006+ self.fileobj = fileobj
1007+ self.decrypter = decrypt
1008+ self.bytes_read = 0L
1009+ self.rawbuffer = ''
1010+ self.readbuffer = ''
1011+ self.linebuffer = ''
1012+ self.eof = False
1013+ self.univ_newlines = False
1014+ self.nlSeps = ("\n", )
1015+ self.lastdiscard = ''
1016+
1017+ self.compress_type = zipinfo.compress_type
1018+ self.compress_size = zipinfo.compress_size
1019+
1020+ self.closed = False
1021+ self.mode = "r"
1022+ self.name = zipinfo.filename
1023+
1024+ # read from compressed files in 64k blocks
1025+ self.compreadsize = 64*1024
1026+ if self.compress_type == ZIP_DEFLATED:
1027+ self.dc = zlib.decompressobj(-15)
1028+
1029+ def set_univ_newlines(self, univ_newlines):
1030+ self.univ_newlines = univ_newlines
1031+
1032+ # pick line separator char(s) based on universal newlines flag
1033+ self.nlSeps = ("\n", )
1034+ if self.univ_newlines:
1035+ self.nlSeps = ("\r\n", "\r", "\n")
1036+
1037+ def __iter__(self):
1038+ return self
1039+
1040+ def next(self):
1041+ nextline = self.readline()
1042+ if not nextline:
1043+ raise StopIteration()
1044+
1045+ return nextline
1046+
1047+ def close(self):
1048+ self.closed = True
1049+
1050+ def _checkfornewline(self):
1051+ nl, nllen = -1, -1
1052+ if self.linebuffer:
1053+ # ugly check for cases where half of an \r\n pair was
1054+ # read on the last pass, and the \r was discarded. In this
1055+ # case we just throw away the \n at the start of the buffer.
1056+ if (self.lastdiscard, self.linebuffer[0]) == ('\r','\n'):
1057+ self.linebuffer = self.linebuffer[1:]
1058+
1059+ for sep in self.nlSeps:
1060+ nl = self.linebuffer.find(sep)
1061+ if nl >= 0:
1062+ nllen = len(sep)
1063+ return nl, nllen
1064+
1065+ return nl, nllen
1066+
1067+ def readline(self, size = -1):
1068+ """Read a line with approx. size. If size is negative,
1069+ read a whole line.
1070+ """
1071+ if size < 0:
1072+ size = sys.maxint
1073+ elif size == 0:
1074+ return ''
1075+
1076+ # check for a newline already in buffer
1077+ nl, nllen = self._checkfornewline()
1078+
1079+ if nl >= 0:
1080+ # the next line was already in the buffer
1081+ nl = min(nl, size)
1082+ else:
1083+ # no line break in buffer - try to read more
1084+ size -= len(self.linebuffer)
1085+ while nl < 0 and size > 0:
1086+ buf = self.read(min(size, 100))
1087+ if not buf:
1088+ break
1089+ self.linebuffer += buf
1090+ size -= len(buf)
1091+
1092+ # check for a newline in buffer
1093+ nl, nllen = self._checkfornewline()
1094+
1095+ # we either ran out of bytes in the file, or
1096+ # met the specified size limit without finding a newline,
1097+ # so return current buffer
1098+ if nl < 0:
1099+ s = self.linebuffer
1100+ self.linebuffer = ''
1101+ return s
1102+
1103+ buf = self.linebuffer[:nl]
1104+ self.lastdiscard = self.linebuffer[nl:nl + nllen]
1105+ self.linebuffer = self.linebuffer[nl + nllen:]
1106+
1107+ # line is always returned with \n as newline char (except possibly
1108+ # for a final incomplete line in the file, which is handled above).
1109+ return buf + "\n"
1110+
1111+ def readlines(self, sizehint = -1):
1112+ """Return a list with all (following) lines. The sizehint parameter
1113+ is ignored in this implementation.
1114+ """
1115+ result = []
1116+ while True:
1117+ line = self.readline()
1118+ if not line: break
1119+ result.append(line)
1120+ return result
1121+
1122+ def read(self, size = None):
1123+ # act like file() obj and return empty string if size is 0
1124+ if size == 0:
1125+ return ''
1126+
1127+ # determine read size
1128+ bytesToRead = self.compress_size - self.bytes_read
1129+
1130+ # adjust read size for encrypted files since the first 12 bytes
1131+ # are for the encryption/password information
1132+ if self.decrypter is not None:
1133+ bytesToRead -= 12
1134+
1135+ if size is not None and size >= 0:
1136+ if self.compress_type == ZIP_STORED:
1137+ lr = len(self.readbuffer)
1138+ bytesToRead = min(bytesToRead, size - lr)
1139+ elif self.compress_type == ZIP_DEFLATED:
1140+ if len(self.readbuffer) > size:
1141+ # the user has requested fewer bytes than we've already
1142+ # pulled through the decompressor; don't read any more
1143+ bytesToRead = 0
1144+ else:
1145+ # user will use up the buffer, so read some more
1146+ lr = len(self.rawbuffer)
1147+ bytesToRead = min(bytesToRead, self.compreadsize - lr)
1148+
1149+ # avoid reading past end of file contents
1150+ if bytesToRead + self.bytes_read > self.compress_size:
1151+ bytesToRead = self.compress_size - self.bytes_read
1152+
1153+ # try to read from file (if necessary)
1154+ if bytesToRead > 0:
1155+ bytes = self.fileobj.read(bytesToRead)
1156+ self.bytes_read += len(bytes)
1157+ self.rawbuffer += bytes
1158+
1159+ # handle contents of raw buffer
1160+ if self.rawbuffer:
1161+ newdata = self.rawbuffer
1162+ self.rawbuffer = ''
1163+
1164+ # decrypt new data if we were given an object to handle that
1165+ if newdata and self.decrypter is not None:
1166+ newdata = ''.join(map(self.decrypter, newdata))
1167+
1168+ # decompress newly read data if necessary
1169+ if newdata and self.compress_type == ZIP_DEFLATED:
1170+ newdata = self.dc.decompress(newdata)
1171+ self.rawbuffer = self.dc.unconsumed_tail
1172+ if self.eof and len(self.rawbuffer) == 0:
1173+ # we're out of raw bytes (both from the file and
1174+ # the local buffer); flush just to make sure the
1175+ # decompressor is done
1176+ newdata += self.dc.flush()
1177+ # prevent decompressor from being used again
1178+ self.dc = None
1179+
1180+ self.readbuffer += newdata
1181+
1182+
1183+ # return what the user asked for
1184+ if size is None or len(self.readbuffer) <= size:
1185+ bytes = self.readbuffer
1186+ self.readbuffer = ''
1187+ else:
1188+ bytes = self.readbuffer[:size]
1189+ self.readbuffer = self.readbuffer[size:]
1190+
1191+ return bytes
1192+
1193+
1194+class ZipFile:
1195+ """ Class with methods to open, read, write, close, list zip files.
1196+
1197+ z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
1198+
1199+ file: Either the path to the file, or a file-like object.
1200+ If it is a path, the file will be opened and closed by ZipFile.
1201+ mode: The mode can be either read "r", write "w" or append "a".
1202+ compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
1203+ allowZip64: if True ZipFile will create files with ZIP64 extensions when
1204+ needed, otherwise it will raise an exception when this would
1205+ be necessary.
1206+
1207+ """
1208+
1209+ fp = None # Set here since __del__ checks it
1210+
1211+ def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
1212+ """Open the ZIP file with mode read "r", write "w" or append "a"."""
1213+ if mode not in ("r", "w", "a"):
1214+ raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
1215+
1216+ if compression == ZIP_STORED:
1217+ pass
1218+ elif compression == ZIP_DEFLATED:
1219+ if not zlib:
1220+ raise RuntimeError,\
1221+ "Compression requires the (missing) zlib module"
1222+ else:
1223+ raise RuntimeError, "That compression method is not supported"
1224+
1225+ self._allowZip64 = allowZip64
1226+ self._didModify = False
1227+ self.debug = 0 # Level of printing: 0 through 3
1228+ self.NameToInfo = {} # Find file info given name
1229+ self.filelist = [] # List of ZipInfo instances for archive
1230+ self.compression = compression # Method of compression
1231+ self.mode = key = mode.replace('b', '')[0]
1232+ self.pwd = None
1233+ self.comment = ''
1234+
1235+ # Check if we were passed a file-like object
1236+ if isinstance(file, basestring):
1237+ self._filePassed = 0
1238+ self.filename = file
1239+ modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
1240+ try:
1241+ self.fp = open(file, modeDict[mode])
1242+ except IOError:
1243+ if mode == 'a':
1244+ mode = key = 'w'
1245+ self.fp = open(file, modeDict[mode])
1246+ else:
1247+ raise
1248+ else:
1249+ self._filePassed = 1
1250+ self.fp = file
1251+ self.filename = getattr(file, 'name', None)
1252+
1253+ if key == 'r':
1254+ self._GetContents()
1255+ elif key == 'w':
1256+ pass
1257+ elif key == 'a':
1258+ try: # See if file is a zip file
1259+ self._RealGetContents()
1260+ # seek to start of directory and overwrite
1261+ self.fp.seek(self.start_dir, 0)
1262+ except BadZipfile: # file is not a zip file, just append
1263+ self.fp.seek(0, 2)
1264+ else:
1265+ if not self._filePassed:
1266+ self.fp.close()
1267+ self.fp = None
1268+ raise RuntimeError, 'Mode must be "r", "w" or "a"'
1269+
1270+ def _GetContents(self):
1271+ """Read the directory, making sure we close the file if the format
1272+ is bad."""
1273+ try:
1274+ self._RealGetContents()
1275+ except BadZipfile:
1276+ if not self._filePassed:
1277+ self.fp.close()
1278+ self.fp = None
1279+ raise
1280+
1281+ def _RealGetContents(self):
1282+ """Read in the table of contents for the ZIP file."""
1283+ fp = self.fp
1284+ endrec = _EndRecData(fp)
1285+ if not endrec:
1286+ raise BadZipfile, "File is not a zip file"
1287+ if self.debug > 1:
1288+ print endrec
1289+ size_cd = endrec[_ECD_SIZE] # bytes in central directory
1290+ offset_cd = endrec[_ECD_OFFSET] # offset of central directory
1291+ self.comment = endrec[_ECD_COMMENT] # archive comment
1292+
1293+ # "concat" is zero, unless zip was concatenated to another file
1294+ concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
1295+ if endrec[_ECD_SIGNATURE] == stringEndArchive64:
1296+ # If Zip64 extension structures are present, account for them
1297+ concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
1298+
1299+ if self.debug > 2:
1300+ inferred = concat + offset_cd
1301+ print "given, inferred, offset", offset_cd, inferred, concat
1302+ # self.start_dir: Position of start of central directory
1303+ self.start_dir = offset_cd + concat
1304+ fp.seek(self.start_dir, 0)
1305+ data = fp.read(size_cd)
1306+ fp = cStringIO.StringIO(data)
1307+ total = 0
1308+ while total < size_cd:
1309+ centdir = fp.read(sizeCentralDir)
1310+ if centdir[0:4] != stringCentralDir:
1311+ raise BadZipfile, "Bad magic number for central directory"
1312+ centdir = struct.unpack(structCentralDir, centdir)
1313+ if self.debug > 2:
1314+ print centdir
1315+ filename = fp.read(centdir[_CD_FILENAME_LENGTH])
1316+ # Create ZipInfo instance to store file information
1317+ x = ZipInfo(filename)
1318+ x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
1319+ x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
1320+ x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
1321+ (x.create_version, x.create_system, x.extract_version, x.reserved,
1322+ x.flag_bits, x.compress_type, t, d,
1323+ x.CRC, x.compress_size, x.file_size) = centdir[1:12]
1324+ x.volume, x.internal_attr, x.external_attr = centdir[15:18]
1325+ # Convert date/time code to (year, month, day, hour, min, sec)
1326+ x._raw_time = t
1327+ x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
1328+ t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
1329+
1330+ x._decodeExtra()
1331+ x.header_offset = x.header_offset + concat
1332+ x.filename = x._decodeFilename()
1333+ self.filelist.append(x)
1334+ self.NameToInfo[x.filename] = x
1335+
1336+ # update total bytes read from central directory
1337+ total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
1338+ + centdir[_CD_EXTRA_FIELD_LENGTH]
1339+ + centdir[_CD_COMMENT_LENGTH])
1340+
1341+ if self.debug > 2:
1342+ print "total", total
1343+
1344+
1345+ def namelist(self):
1346+ """Return a list of file names in the archive."""
1347+ l = []
1348+ for data in self.filelist:
1349+ l.append(data.filename)
1350+ return l
1351+
1352+ def infolist(self):
1353+ """Return a list of class ZipInfo instances for files in the
1354+ archive."""
1355+ return self.filelist
1356+
1357+ def printdir(self):
1358+ """Print a table of contents for the zip file."""
1359+ print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
1360+ for zinfo in self.filelist:
1361+ date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
1362+ print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
1363+
1364+ def testzip(self):
1365+ """Read all the files and check the CRC."""
1366+ chunk_size = 2 ** 20
1367+ for zinfo in self.filelist:
1368+ try:
1369+ # Read by chunks, to avoid an OverflowError or a
1370+ # MemoryError with very large embedded files.
1371+ f = self.open(zinfo.filename, "r")
1372+ while f.read(chunk_size): # Check CRC-32
1373+ pass
1374+ except BadZipfile:
1375+ return zinfo.filename
1376+
1377+ def getinfo(self, name):
1378+ """Return the instance of ZipInfo given 'name'."""
1379+ info = self.NameToInfo.get(name)
1380+ if info is None:
1381+ raise KeyError(
1382+ 'There is no item named %r in the archive' % name)
1383+
1384+ return info
1385+
1386+ def setpassword(self, pwd):
1387+ """Set default password for encrypted files."""
1388+ self.pwd = pwd
1389+
1390+ def read(self, name, pwd=None):
1391+ """Return file bytes (as a string) for name."""
1392+ return self.open(name, "r", pwd).read()
1393+
1394+ def open(self, name, mode="r", pwd=None):
1395+ """Return file-like object for 'name'."""
1396+ if mode not in ("r", "U", "rU"):
1397+ raise RuntimeError, 'open() requires mode "r", "U", or "rU"'
1398+ if not self.fp:
1399+ raise RuntimeError, \
1400+ "Attempt to read ZIP archive that was already closed"
1401+
1402+ # Only open a new file for instances where we were not
1403+ # given a file object in the constructor
1404+ if self._filePassed:
1405+ zef_file = self.fp
1406+ else:
1407+ zef_file = open(self.filename, 'rb')
1408+
1409+ # Make sure we have an info object
1410+ if isinstance(name, ZipInfo):
1411+ # 'name' is already an info object
1412+ zinfo = name
1413+ else:
1414+ # Get info object for name
1415+ zinfo = self.getinfo(name)
1416+
1417+ zef_file.seek(zinfo.header_offset, 0)
1418+
1419+ # Skip the file header:
1420+ fheader = zef_file.read(sizeFileHeader)
1421+ if fheader[0:4] != stringFileHeader:
1422+ raise BadZipfile, "Bad magic number for file header"
1423+
1424+ fheader = struct.unpack(structFileHeader, fheader)
1425+ fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
1426+ if fheader[_FH_EXTRA_FIELD_LENGTH]:
1427+ zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
1428+
1429+ if fname != zinfo.orig_filename:
1430+ raise BadZipfile, \
1431+ 'File name in directory "%s" and header "%s" differ.' % (
1432+ zinfo.orig_filename, fname)
1433+
1434+ # check for encrypted flag & handle password
1435+ is_encrypted = zinfo.flag_bits & 0x1
1436+ zd = None
1437+ if is_encrypted:
1438+ if not pwd:
1439+ pwd = self.pwd
1440+ if not pwd:
1441+ raise RuntimeError, "File %s is encrypted, " \
1442+ "password required for extraction" % name
1443+
1444+ zd = _ZipDecrypter(pwd)
1445+ # The first 12 bytes in the cypher stream is an encryption header
1446+ # used to strengthen the algorithm. The first 11 bytes are
1447+ # completely random, while the 12th contains the MSB of the CRC,
1448+ # or the MSB of the file time depending on the header type
1449+ # and is used to check the correctness of the password.
1450+ bytes = zef_file.read(12)
1451+ h = map(zd, bytes[0:12])
1452+ if zinfo.flag_bits & 0x8:
1453+ # compare against the file type from extended local headers
1454+ check_byte = (zinfo._raw_time >> 8) & 0xff
1455+ else:
1456+ # compare against the CRC otherwise
1457+ check_byte = (zinfo.CRC >> 24) & 0xff
1458+ if ord(h[11]) != check_byte:
1459+ raise RuntimeError("Bad password for file", name)
1460+
1461+ # build and return a ZipExtFile
1462+ if zd is None:
1463+ zef = ZipExtFile(zef_file, zinfo)
1464+ else:
1465+ zef = ZipExtFile(zef_file, zinfo, zd)
1466+
1467+ # set universal newlines on ZipExtFile if necessary
1468+ if "U" in mode:
1469+ zef.set_univ_newlines(True)
1470+ return zef
1471+
1472+ def extract(self, member, path=None, pwd=None):
1473+ """Extract a member from the archive to the current working directory,
1474+ using its full name. Its file information is extracted as accurately
1475+ as possible. `member' may be a filename or a ZipInfo object. You can
1476+ specify a different directory using `path'.
1477+ """
1478+ if not isinstance(member, ZipInfo):
1479+ member = self.getinfo(member)
1480+
1481+ if path is None:
1482+ path = os.getcwd()
1483+
1484+ return self._extract_member(member, path, pwd)
1485+
1486+ def extractall(self, path=None, members=None, pwd=None):
1487+ """Extract all members from the archive to the current working
1488+ directory. `path' specifies a different directory to extract to.
1489+ `members' is optional and must be a subset of the list returned
1490+ by namelist().
1491+ """
1492+ if members is None:
1493+ members = self.namelist()
1494+
1495+ for zipinfo in members:
1496+ self.extract(zipinfo, path, pwd)
1497+
1498+ def _extract_member(self, member, targetpath, pwd):
1499+ """Extract the ZipInfo object 'member' to a physical
1500+ file on the path targetpath.
1501+ """
1502+ # build the destination pathname, replacing
1503+ # forward slashes to platform specific separators.
1504+ # Strip trailing path separator, unless it represents the root.
1505+ if (targetpath[-1:] in (os.path.sep, os.path.altsep)
1506+ and len(os.path.splitdrive(targetpath)[1]) > 1):
1507+ targetpath = targetpath[:-1]
1508+
1509+ # don't include leading "/" from file name if present
1510+ if member.filename[0] == '/':
1511+ targetpath = os.path.join(targetpath, member.filename[1:])
1512+ else:
1513+ targetpath = os.path.join(targetpath, member.filename)
1514+
1515+ targetpath = os.path.normpath(targetpath)
1516+
1517+ # Create all upper directories if necessary.
1518+ upperdirs = os.path.dirname(targetpath)
1519+ if upperdirs and not os.path.exists(upperdirs):
1520+ os.makedirs(upperdirs)
1521+
1522+ if member.filename[-1] == '/':
1523+ if not os.path.isdir(targetpath):
1524+ os.mkdir(targetpath)
1525+ return targetpath
1526+
1527+ source = self.open(member, pwd=pwd)
1528+ target = file(targetpath, "wb")
1529+ shutil.copyfileobj(source, target)
1530+ source.close()
1531+ target.close()
1532+
1533+ return targetpath
1534+
1535+ def _writecheck(self, zinfo):
1536+ """Check for errors before writing a file to the archive."""
1537+ if zinfo.filename in self.NameToInfo:
1538+ if self.debug: # Warning for duplicate names
1539+ print "Duplicate name:", zinfo.filename
1540+ if self.mode not in ("w", "a"):
1541+ raise RuntimeError, 'write() requires mode "w" or "a"'
1542+ if not self.fp:
1543+ raise RuntimeError, \
1544+ "Attempt to write ZIP archive that was already closed"
1545+ if zinfo.compress_type == ZIP_DEFLATED and not zlib:
1546+ raise RuntimeError, \
1547+ "Compression requires the (missing) zlib module"
1548+ if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
1549+ raise RuntimeError, \
1550+ "That compression method is not supported"
1551+ if zinfo.file_size > ZIP64_LIMIT:
1552+ if not self._allowZip64:
1553+ raise LargeZipFile("Filesize would require ZIP64 extensions")
1554+ if zinfo.header_offset > ZIP64_LIMIT:
1555+ if not self._allowZip64:
1556+ raise LargeZipFile("Zipfile size would require ZIP64 extensions")
1557+
1558+ def write(self, filename, arcname=None, compress_type=None):
1559+ """Put the bytes from filename into the archive under the name
1560+ arcname."""
1561+ if not self.fp:
1562+ raise RuntimeError(
1563+ "Attempt to write to ZIP archive that was already closed")
1564+
1565+ st = os.stat(filename)
1566+ isdir = stat.S_ISDIR(st.st_mode)
1567+ mtime = time.localtime(st.st_mtime)
1568+ date_time = mtime[0:6]
1569+ # Create ZipInfo instance to store file information
1570+ if arcname is None:
1571+ arcname = filename
1572+ arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
1573+ while arcname[0] in (os.sep, os.altsep):
1574+ arcname = arcname[1:]
1575+ if isdir:
1576+ arcname += '/'
1577+ zinfo = ZipInfo(arcname, date_time)
1578+ zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
1579+ if compress_type is None:
1580+ zinfo.compress_type = self.compression
1581+ else:
1582+ zinfo.compress_type = compress_type
1583+
1584+ zinfo.file_size = st.st_size
1585+ zinfo.flag_bits = 0x00
1586+ zinfo.header_offset = self.fp.tell() # Start of header bytes
1587+
1588+ self._writecheck(zinfo)
1589+ self._didModify = True
1590+
1591+ if isdir:
1592+ zinfo.file_size = 0
1593+ zinfo.compress_size = 0
1594+ zinfo.CRC = 0
1595+ self.filelist.append(zinfo)
1596+ self.NameToInfo[zinfo.filename] = zinfo
1597+ self.fp.write(zinfo.FileHeader())
1598+ return
1599+
1600+ fp = open(filename, "rb")
1601+ # Must overwrite CRC and sizes with correct data later
1602+ zinfo.CRC = CRC = 0
1603+ zinfo.compress_size = compress_size = 0
1604+ zinfo.file_size = file_size = 0
1605+ self.fp.write(zinfo.FileHeader())
1606+ if zinfo.compress_type == ZIP_DEFLATED:
1607+ cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
1608+ zlib.DEFLATED, -15)
1609+ else:
1610+ cmpr = None
1611+ while 1:
1612+ buf = fp.read(1024 * 8)
1613+ if not buf:
1614+ break
1615+ file_size = file_size + len(buf)
1616+ CRC = crc32(buf, CRC) & 0xffffffff
1617+ if cmpr:
1618+ buf = cmpr.compress(buf)
1619+ compress_size = compress_size + len(buf)
1620+ self.fp.write(buf)
1621+ fp.close()
1622+ if cmpr:
1623+ buf = cmpr.flush()
1624+ compress_size = compress_size + len(buf)
1625+ self.fp.write(buf)
1626+ zinfo.compress_size = compress_size
1627+ else:
1628+ zinfo.compress_size = file_size
1629+ zinfo.CRC = CRC
1630+ zinfo.file_size = file_size
1631+ # Seek backwards and write CRC and file sizes
1632+ position = self.fp.tell() # Preserve current position in file
1633+ self.fp.seek(zinfo.header_offset + 14, 0)
1634+ self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
1635+ zinfo.file_size))
1636+ self.fp.seek(position, 0)
1637+ self.filelist.append(zinfo)
1638+ self.NameToInfo[zinfo.filename] = zinfo
1639+
1640+ def writestr(self, zinfo_or_arcname, bytes):
1641+ """Write a file into the archive. The contents is the string
1642+ 'bytes'. 'zinfo_or_arcname' is either a ZipInfo instance or
1643+ the name of the file in the archive."""
1644+ if not isinstance(zinfo_or_arcname, ZipInfo):
1645+ zinfo = ZipInfo(filename=zinfo_or_arcname,
1646+ date_time=time.localtime(time.time())[:6])
1647+ zinfo.compress_type = self.compression
1648+ zinfo.external_attr = 0600 << 16
1649+ else:
1650+ zinfo = zinfo_or_arcname
1651+
1652+ if not self.fp:
1653+ raise RuntimeError(
1654+ "Attempt to write to ZIP archive that was already closed")
1655+
1656+ zinfo.file_size = len(bytes) # Uncompressed size
1657+ zinfo.header_offset = self.fp.tell() # Start of header bytes
1658+ self._writecheck(zinfo)
1659+ self._didModify = True
1660+ zinfo.CRC = crc32(bytes) & 0xffffffff # CRC-32 checksum
1661+ if zinfo.compress_type == ZIP_DEFLATED:
1662+ co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
1663+ zlib.DEFLATED, -15)
1664+ bytes = co.compress(bytes) + co.flush()
1665+ zinfo.compress_size = len(bytes) # Compressed size
1666+ else:
1667+ zinfo.compress_size = zinfo.file_size
1668+ zinfo.header_offset = self.fp.tell() # Start of header bytes
1669+ self.fp.write(zinfo.FileHeader())
1670+ self.fp.write(bytes)
1671+ self.fp.flush()
1672+ if zinfo.flag_bits & 0x08:
1673+ # Write CRC and file sizes after the file data
1674+ self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
1675+ zinfo.file_size))
1676+ self.filelist.append(zinfo)
1677+ self.NameToInfo[zinfo.filename] = zinfo
1678+
1679+ def __del__(self):
1680+ """Call the "close()" method in case the user forgot."""
1681+ self.close()
1682+
1683+ def close(self):
1684+ """Close the file, and for mode "w" and "a" write the ending
1685+ records."""
1686+ if self.fp is None:
1687+ return
1688+
1689+ if self.mode in ("w", "a") and self._didModify: # write ending records
1690+ count = 0
1691+ pos1 = self.fp.tell()
1692+ for zinfo in self.filelist: # write central directory
1693+ count = count + 1
1694+ dt = zinfo.date_time
1695+ dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
1696+ dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
1697+ extra = []
1698+ if zinfo.file_size > ZIP64_LIMIT \
1699+ or zinfo.compress_size > ZIP64_LIMIT:
1700+ extra.append(zinfo.file_size)
1701+ extra.append(zinfo.compress_size)
1702+ file_size = 0xffffffff
1703+ compress_size = 0xffffffff
1704+ else:
1705+ file_size = zinfo.file_size
1706+ compress_size = zinfo.compress_size
1707+
1708+ if zinfo.header_offset > ZIP64_LIMIT:
1709+ extra.append(zinfo.header_offset)
1710+ header_offset = 0xffffffffL
1711+ else:
1712+ header_offset = zinfo.header_offset
1713+
1714+ extra_data = zinfo.extra
1715+ if extra:
1716+ # Append a ZIP64 field to the extra's
1717+ extra_data = struct.pack(
1718+ '<HH' + 'Q'*len(extra),
1719+ 1, 8*len(extra), *extra) + extra_data
1720+
1721+ extract_version = max(45, zinfo.extract_version)
1722+ create_version = max(45, zinfo.create_version)
1723+ else:
1724+ extract_version = zinfo.extract_version
1725+ create_version = zinfo.create_version
1726+
1727+ try:
1728+ filename, flag_bits = zinfo._encodeFilenameFlags()
1729+ centdir = struct.pack(structCentralDir,
1730+ stringCentralDir, create_version,
1731+ zinfo.create_system, extract_version, zinfo.reserved,
1732+ flag_bits, zinfo.compress_type, dostime, dosdate,
1733+ zinfo.CRC, compress_size, file_size,
1734+ len(filename), len(extra_data), len(zinfo.comment),
1735+ 0, zinfo.internal_attr, zinfo.external_attr,
1736+ header_offset)
1737+ except DeprecationWarning:
1738+ print >>sys.stderr, (structCentralDir,
1739+ stringCentralDir, create_version,
1740+ zinfo.create_system, extract_version, zinfo.reserved,
1741+ zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
1742+ zinfo.CRC, compress_size, file_size,
1743+ len(zinfo.filename), len(extra_data), len(zinfo.comment),
1744+ 0, zinfo.internal_attr, zinfo.external_attr,
1745+ header_offset)
1746+ raise
1747+ self.fp.write(centdir)
1748+ self.fp.write(filename)
1749+ self.fp.write(extra_data)
1750+ self.fp.write(zinfo.comment)
1751+
1752+ pos2 = self.fp.tell()
1753+ # Write end-of-zip-archive record
1754+ centDirCount = count
1755+ centDirSize = pos2 - pos1
1756+ centDirOffset = pos1
1757+ if (centDirCount >= ZIP_FILECOUNT_LIMIT or
1758+ centDirOffset > ZIP64_LIMIT or
1759+ centDirSize > ZIP64_LIMIT):
1760+ # Need to write the ZIP64 end-of-archive records
1761+ zip64endrec = struct.pack(
1762+ structEndArchive64, stringEndArchive64,
1763+ 44, 45, 45, 0, 0, centDirCount, centDirCount,
1764+ centDirSize, centDirOffset)
1765+ self.fp.write(zip64endrec)
1766+
1767+ zip64locrec = struct.pack(
1768+ structEndArchive64Locator,
1769+ stringEndArchive64Locator, 0, pos2, 1)
1770+ self.fp.write(zip64locrec)
1771+ centDirCount = min(centDirCount, 0xFFFF)
1772+ centDirSize = min(centDirSize, 0xFFFFFFFF)
1773+ centDirOffset = min(centDirOffset, 0xFFFFFFFF)
1774+
1775+ # check for valid comment length
1776+ if len(self.comment) >= ZIP_MAX_COMMENT:
1777+ if self.debug > 0:
1778+ msg = 'Archive comment is too long; truncating to %d bytes' \
1779+ % ZIP_MAX_COMMENT
1780+ self.comment = self.comment[:ZIP_MAX_COMMENT]
1781+
1782+ endrec = struct.pack(structEndArchive, stringEndArchive,
1783+ 0, 0, centDirCount, centDirCount,
1784+ centDirSize, centDirOffset, len(self.comment))
1785+ self.fp.write(endrec)
1786+ self.fp.write(self.comment)
1787+ self.fp.flush()
1788+
1789+ if not self._filePassed:
1790+ self.fp.close()
1791+ self.fp = None
1792+
1793+
1794+class PyZipFile(ZipFile):
1795+ """Class to create ZIP archives with Python library files and packages."""
1796+
1797+ def writepy(self, pathname, basename = ""):
1798+ """Add all files from "pathname" to the ZIP archive.
1799+
1800+ If pathname is a package directory, search the directory and
1801+ all package subdirectories recursively for all *.py and enter
1802+ the modules into the archive. If pathname is a plain
1803+ directory, listdir *.py and enter all modules. Else, pathname
1804+ must be a Python *.py file and the module will be put into the
1805+ archive. Added modules are always module.pyo or module.pyc.
1806+ This method will compile the module.py into module.pyc if
1807+ necessary.
1808+ """
1809+ dir, name = os.path.split(pathname)
1810+ if os.path.isdir(pathname):
1811+ initname = os.path.join(pathname, "__init__.py")
1812+ if os.path.isfile(initname):
1813+ # This is a package directory, add it
1814+ if basename:
1815+ basename = "%s/%s" % (basename, name)
1816+ else:
1817+ basename = name
1818+ if self.debug:
1819+ print "Adding package in", pathname, "as", basename
1820+ fname, arcname = self._get_codename(initname[0:-3], basename)
1821+ if self.debug:
1822+ print "Adding", arcname
1823+ self.write(fname, arcname)
1824+ dirlist = os.listdir(pathname)
1825+ dirlist.remove("__init__.py")
1826+ # Add all *.py files and package subdirectories
1827+ for filename in dirlist:
1828+ path = os.path.join(pathname, filename)
1829+ root, ext = os.path.splitext(filename)
1830+ if os.path.isdir(path):
1831+ if os.path.isfile(os.path.join(path, "__init__.py")):
1832+ # This is a package directory, add it
1833+ self.writepy(path, basename) # Recursive call
1834+ elif ext == ".py":
1835+ fname, arcname = self._get_codename(path[0:-3],
1836+ basename)
1837+ if self.debug:
1838+ print "Adding", arcname
1839+ self.write(fname, arcname)
1840+ else:
1841+ # This is NOT a package directory, add its files at top level
1842+ if self.debug:
1843+ print "Adding files from directory", pathname
1844+ for filename in os.listdir(pathname):
1845+ path = os.path.join(pathname, filename)
1846+ root, ext = os.path.splitext(filename)
1847+ if ext == ".py":
1848+ fname, arcname = self._get_codename(path[0:-3],
1849+ basename)
1850+ if self.debug:
1851+ print "Adding", arcname
1852+ self.write(fname, arcname)
1853+ else:
1854+ if pathname[-3:] != ".py":
1855+ raise RuntimeError, \
1856+ 'Files added with writepy() must end with ".py"'
1857+ fname, arcname = self._get_codename(pathname[0:-3], basename)
1858+ if self.debug:
1859+ print "Adding file", arcname
1860+ self.write(fname, arcname)
1861+
1862+ def _get_codename(self, pathname, basename):
1863+ """Return (filename, archivename) for the path.
1864+
1865+ Given a module name path, return the correct file path and
1866+ archive name, compiling if necessary. For example, given
1867+ /python/lib/string, return (/python/lib/string.pyc, string).
1868+ """
1869+ file_py = pathname + ".py"
1870+ file_pyc = pathname + ".pyc"
1871+ file_pyo = pathname + ".pyo"
1872+ if os.path.isfile(file_pyo) and \
1873+ os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
1874+ fname = file_pyo # Use .pyo file
1875+ elif not os.path.isfile(file_pyc) or \
1876+ os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
1877+ import py_compile
1878+ if self.debug:
1879+ print "Compiling", file_py
1880+ try:
1881+ py_compile.compile(file_py, file_pyc, None, True)
1882+ except py_compile.PyCompileError,err:
1883+ print err.msg
1884+ fname = file_pyc
1885+ else:
1886+ fname = file_pyc
1887+ archivename = os.path.split(fname)[1]
1888+ if basename:
1889+ archivename = "%s/%s" % (basename, archivename)
1890+ return (fname, archivename)
1891+
1892+
1893+def main(args = None):
1894+ import textwrap
1895+ USAGE=textwrap.dedent("""\
1896+ Usage:
1897+ zipfile.py -l zipfile.zip # Show listing of a zipfile
1898+ zipfile.py -t zipfile.zip # Test if a zipfile is valid
1899+ zipfile.py -e zipfile.zip target # Extract zipfile into target dir
1900+ zipfile.py -c zipfile.zip src ... # Create zipfile from sources
1901+ """)
1902+ if args is None:
1903+ args = sys.argv[1:]
1904+
1905+ if not args or args[0] not in ('-l', '-c', '-e', '-t'):
1906+ print USAGE
1907+ sys.exit(1)
1908+
1909+ if args[0] == '-l':
1910+ if len(args) != 2:
1911+ print USAGE
1912+ sys.exit(1)
1913+ zf = ZipFile(args[1], 'r')
1914+ zf.printdir()
1915+ zf.close()
1916+
1917+ elif args[0] == '-t':
1918+ if len(args) != 2:
1919+ print USAGE
1920+ sys.exit(1)
1921+ zf = ZipFile(args[1], 'r')
1922+ zf.testzip()
1923+ print "Done testing"
1924+
1925+ elif args[0] == '-e':
1926+ if len(args) != 3:
1927+ print USAGE
1928+ sys.exit(1)
1929+
1930+ zf = ZipFile(args[1], 'r')
1931+ out = args[2]
1932+ for path in zf.namelist():
1933+ if path.startswith('./'):
1934+ tgt = os.path.join(out, path[2:])
1935+ else:
1936+ tgt = os.path.join(out, path)
1937+
1938+ tgtdir = os.path.dirname(tgt)
1939+ if not os.path.exists(tgtdir):
1940+ os.makedirs(tgtdir)
1941+ fp = open(tgt, 'wb')
1942+ fp.write(zf.read(path))
1943+ fp.close()
1944+ zf.close()
1945+
1946+ elif args[0] == '-c':
1947+ if len(args) < 3:
1948+ print USAGE
1949+ sys.exit(1)
1950+
1951+ def addToZip(zf, path, zippath):
1952+ if os.path.isfile(path):
1953+ zf.write(path, zippath, ZIP_DEFLATED)
1954+ elif os.path.isdir(path):
1955+ for nm in os.listdir(path):
1956+ addToZip(zf,
1957+ os.path.join(path, nm), os.path.join(zippath, nm))
1958+ # else: ignore
1959+
1960+ zf = ZipFile(args[1], 'w', allowZip64=True)
1961+ for src in args[2:]:
1962+ addToZip(zf, src, os.path.basename(src))
1963+
1964+ zf.close()
1965+
1966+if __name__ == "__main__":
1967+ main()
1968
1969=== modified file 'setup.nsi'
1970--- setup.nsi 2012-11-26 11:44:38 +0000
1971+++ setup.nsi 2013-02-28 12:40:27 +0000
1972@@ -206,6 +206,7 @@
1973
1974 nsExec::Exec '"$INSTDIR\openerp-server.exe" --stop-after-init --logfile "$INSTDIR\openerp-server.log" -s'
1975 nsExec::Exec '"$INSTDIR\service\OpenERPServerService.exe" -auto -install'
1976+ nsExec::Exec 'sc failure openerp-server-6.0 reset= 0 actions= restart/0/restart/0/restart/0'
1977 SectionEnd
1978
1979 Section -RestartServer
1980
1981=== modified file 'setup.py'
1982--- setup.py 2011-03-30 17:04:32 +0000
1983+++ setup.py 2013-02-28 12:40:27 +0000
1984@@ -133,6 +133,7 @@
1985 '''Build list of data files to be installed'''
1986 files = []
1987 if os.name == 'nt':
1988+ files.append(('.', [join('bin', 'unifield-version.txt')]))
1989 os.chdir('bin')
1990 for (dp, dn, names) in os.walk('addons'):
1991 files.append((dp, map(lambda x: join('bin', dp, x), names)))
1992
1993=== modified file 'win32/OpenERPServerService.py'
1994--- win32/OpenERPServerService.py 2010-12-29 11:51:44 +0000
1995+++ win32/OpenERPServerService.py 2013-02-28 12:40:27 +0000
1996@@ -32,6 +32,8 @@
1997 import os
1998 import thread
1999
2000+EXIT_UPDATE_REQUIRE_RESTART = 1
2001+
2002 class OpenERPServerService(win32serviceutil.ServiceFramework):
2003 # required info
2004 _svc_name_ = "openerp-server-6.0"
2005@@ -46,8 +48,6 @@
2006 self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
2007 # a reference to the server's process
2008 self.terpprocess = None
2009- # info if the service terminates correctly or if the server crashed
2010- self.stopping = False
2011
2012
2013 def SvcStop(self):
2014@@ -73,19 +73,28 @@
2015 def StartControl(self,ws):
2016 # this listens to the Service Manager's events
2017 win32event.WaitForSingleObject(ws, win32event.INFINITE)
2018- self.stopping = True
2019
2020 def SvcDoRun(self):
2021- # Start OpenERP Server itself
2022- self.StartTERP()
2023 # start the loop waiting for the Service Manager's stop signal
2024 thread.start_new_thread(self.StartControl, (self.hWaitStop,))
2025- # Log a info message that the server is running
2026- servicemanager.LogInfoMsg("OpenERP Server up and running")
2027- # verification if the server is really running, else quit with an error
2028- self.terpprocess.wait()
2029- if not self.stopping:
2030- sys.exit("OpenERP Server check: server not running, check the logfile for more info")
2031+ while True:
2032+ # Start OpenERP Server itself
2033+ self.StartTERP()
2034+ # Log a info message that the server is running
2035+ servicemanager.LogInfoMsg("OpenERP Server up and running")
2036+ # wait until child process is terminated
2037+ # if exit status is:
2038+ # - special 'restart'
2039+ # simply loop to restart the process and finish update
2040+ # - other exit stauts:
2041+ # server crashed? exit with an error message
2042+ exit_status = self.terpprocess.wait()
2043+ if exit_status == EXIT_UPDATE_REQUIRE_RESTART:
2044+ servicemanager.LogInfoMsg("OpenERP has been updated, restarting...")
2045+ continue # restart openerp process
2046+ if exit_status == 0:
2047+ break # normal exit
2048+ sys.exit(exit_status)
2049
2050
2051

Subscribers

People subscribed via source and target branches

to all changes: