Merge lp:~rcart/ubuntu/natty/bittornado/fix-420387 into lp:ubuntu/natty/bittornado

Proposed by Ronny Cardona
Status: Merged
Merge reported by: Daniel Holbach
Merged at revision: not available
Proposed branch: lp:~rcart/ubuntu/natty/bittornado/fix-420387
Merge into: lp:ubuntu/natty/bittornado
Diff against target: 28069 lines (+1407/-24161)
80 files modified
.pc/01_MANIFEST.in_remove_broken_cruft.dpatch/setup.py (+0/-28)
.pc/02_btdownloadcurses_increase_significant_digit.dpatch/btdownloadcurses.py (+0/-407)
.pc/05_bttrack_connerr_fix.dpatch/BitTornado/BT1/track.py (+0/-1137)
.pc/06_README_portchange.dpatch/README.txt (+0/-110)
.pc/07_change_report_address.dpatch/BitTornado/__init__.py (+0/-63)
.pc/08_btdownloadcurses_indent.dpatch/btdownloadcurses.py (+0/-407)
.pc/09_timtuckerfixes.dpatch/BitTornado/BT1/Connecter.py (+0/-328)
.pc/09_timtuckerfixes.dpatch/BitTornado/BT1/Encrypter.py (+0/-657)
.pc/09_timtuckerfixes.dpatch/BitTornado/BT1/Storage.py (+0/-584)
.pc/09_timtuckerfixes.dpatch/BitTornado/BT1/StreamCheck.py (+0/-135)
.pc/09_timtuckerfixes.dpatch/BitTornado/ConfigDir.py (+0/-401)
.pc/09_timtuckerfixes.dpatch/BitTornado/RawServer.py (+0/-195)
.pc/09_timtuckerfixes.dpatch/BitTornado/clock.py (+0/-27)
.pc/09_timtuckerfixes.dpatch/BitTornado/download_bt1.py (+0/-877)
.pc/09_timtuckerfixes.dpatch/BitTornado/launchmanycore.py (+0/-381)
.pc/10_removeCVScrud.dpatch/.cvsignore (+0/-4)
.pc/10_removeCVScrud.dpatch/BitTornado/.cvsignore (+0/-4)
.pc/10_removeCVScrud.dpatch/BitTornado/BT1/.cvsignore (+0/-4)
.pc/11_sorthashcheck.dpatch/BitTornado/launchmanycore.py (+0/-389)
.pc/12_fix_guis_for_2.6.dpatch/btcompletedirgui.py (+0/-192)
.pc/12_fix_guis_for_2.6.dpatch/btmaketorrentgui.py (+0/-353)
.pc/13_fix_btcompletedirgui_bug.dpatch/btcompletedirgui.py (+0/-192)
.pc/15_fix_unicode_in_makemetafile.py.dpatch/BitTornado/BT1/makemetafile.py (+0/-263)
.pc/16_fix_ipv6_in_SocketHandler.dpatch/BitTornado/SocketHandler.py (+0/-375)
.pc/17_fix_NatCheck_bufferlen_error.dpatch/BitTornado/BT1/NatCheck.py (+0/-219)
.pc/18_fix_launchmany_encrypter.dpatch/BitTornado/BT1/Encrypter.py (+0/-646)
.pc/19_fix_tracker_return_all.dpatch/BitTornado/BT1/track.py (+0/-1137)
.pc/20_tracker_cache_minor_fix.dpatch/BitTornado/BT1/track.py (+0/-1138)
.pc/21_remove_deprecated_wxPython_usage.dpatch/BitTornado/ConfigReader.py (+0/-1195)
.pc/21_remove_deprecated_wxPython_usage.dpatch/bt-t-make.py (+0/-1063)
.pc/21_remove_deprecated_wxPython_usage.dpatch/btcompletedirgui.py (+0/-192)
.pc/21_remove_deprecated_wxPython_usage.dpatch/btdownloadgui.py (+0/-2373)
.pc/21_remove_deprecated_wxPython_usage.dpatch/btmaketorrentgui.py (+0/-353)
.pc/22_fix_makemetafile_error-handling.dpatch/BitTornado/BT1/makemetafile.py (+0/-264)
.pc/23_remove_UPnP_options.dpatch/BitTornado/download_bt1.py (+0/-871)
.pc/23_remove_UPnP_options.dpatch/BitTornado/launchmanycore.py (+0/-390)
.pc/23_remove_UPnP_options.dpatch/btdownloadcurses.py (+0/-408)
.pc/23_remove_UPnP_options.dpatch/btdownloadgui.py (+0/-2368)
.pc/23_remove_UPnP_options.dpatch/btdownloadheadless.py (+0/-244)
.pc/24_clarify_ip_parameter.dpatch/README.txt (+0/-110)
.pc/25_errors_in_error_handling.dpatch/btdownloadcurses.py (+0/-408)
.pc/25_errors_in_error_handling.dpatch/btdownloadheadless.py (+0/-244)
.pc/27_remove_btdownloadheadless_curses_dependency.dpatch/btdownloadheadless.py (+0/-246)
.pc/28_float_mod_time_fix.dpatch/BitTornado/parsedir.py (+0/-150)
.pc/29_fix_urandom_error.dpatch/BitTornado/BTcrypto.py (+0/-103)
.pc/30_announce_list_only_torrents.dpatch/BitTornado/BT1/btformats.py (+0/-100)
.pc/30_announce_list_only_torrents.dpatch/btshowmetainfo.py (+0/-78)
.pc/31_fix_for_compact_reqd_off.dpatch/BitTornado/BT1/track.py (+0/-1143)
.pc/applied-patches (+0/-27)
BitTornado/BT1/Connecter.py (+0/-1)
BitTornado/BT1/Encrypter.py (+13/-4)
BitTornado/BT1/NatCheck.py (+3/-0)
BitTornado/BT1/Storage.py (+14/-18)
BitTornado/BT1/StreamCheck.py (+16/-1)
BitTornado/BT1/btformats.py (+3/-17)
BitTornado/BT1/makemetafile.py (+2/-3)
BitTornado/BT1/track.py (+13/-22)
BitTornado/BTcrypto.py (+0/-1)
BitTornado/ConfigDir.py (+17/-2)
BitTornado/ConfigReader.py (+231/-226)
BitTornado/RawServer.py (+2/-4)
BitTornado/SocketHandler.py (+1/-1)
BitTornado/__init__.py (+1/-1)
BitTornado/clock.py (+2/-5)
BitTornado/download_bt1.py (+13/-4)
BitTornado/launchmanycore.py (+5/-14)
BitTornado/parsedir.py (+1/-2)
README.txt (+4/-4)
bt-t-make.py (+255/-255)
btcompletedirgui.py (+56/-56)
btdownloadcurses.py (+6/-11)
btdownloadgui.py (+356/-351)
btdownloadheadless.py (+10/-4)
btmaketorrentgui.py (+106/-106)
btshowmetainfo.py (+54/-63)
debian/changelog (+8/-0)
debian/control (+2/-1)
debian/patches/32_use_hashlib_for_sha.patch (+210/-0)
debian/patches/series (+1/-0)
setup.py (+2/-1)
To merge this branch: bzr merge lp:~rcart/ubuntu/natty/bittornado/fix-420387
Reviewer Review Type Date Requested Status
Daniel Holbach (community) Approve
Artur Rona (community) Approve
Ubuntu branches Pending
Review via email: mp+46878@code.launchpad.net

Description of the change

 * debian/patches/32_use_hashlib_for_sha.patch:
   - Updated use of deprecated sha module to hashlib. (LP: #420387, Closes: #593653)

To post a comment you must log in.
Revision history for this message
Scott Moser (smoser) wrote :

(copied from bug comment)

The changes look fine to me.
However, I don't think that your removal of the CVS directory will actually "stick". As I understand it, the bzr importer that maintains the lp:ubuntu/<package> branches basically does:

dget <uploaded package>
dpkg-source -x *.dsc

In doing so, the CVS dir will still be there. I'd advise to either:
 - patch it out (with a debian/patches/32-remove-CVS-dir.patch -- this could be a pain with an active upstream)
 - live with being unable to use debcommit
 - fix debcommit to prefer .bzr over CVS

Revision history for this message
Artur Rona (ari-tczew) wrote :

1) Please don't change Standards-Version and other lintian warnings if it's not necessary. This is the place for Debian.

2) According to above, don't mention about fixing lintian warnings in debian/changelog.

3) We don't mention about update-maintainer field in debian/changelog.

4) Please add DEP3 tags to your patch. https://wiki.ubuntu.com/PackagingGuide/PatchSystems#Patch%20Tagging%20Guidelines

review: Needs Fixing
Revision history for this message
Artur Rona (ari-tczew) wrote :

5) Package is in quilt 3.0 source format, so please change your patch filename to *.patch, not *.dpatch and update series file. Debian should update their files.

6) I would like to see following entry in debian/changelog:

  * debian/patches/32_use_hashlib_for_sha.patch:
    - Updated use of deprecated sha module to hashlib. (LP: #420387)
  * Removed old CVS dir so it can use .bzr dir for Bazaar.

7) What about forwarding this patch to Debian?

Thank you for your contribution!

review: Needs Fixing
Revision history for this message
Artur Rona (ari-tczew) wrote :

There are files patched directly. Could you try to clean up? also .pc files should be removed.

review: Needs Fixing
Revision history for this message
Ronny Cardona (rcart) wrote :

Thanks for the corrections and time, Artur. I've updated the branch right now. All the patches are directly applied by default in the original branch, so i've cleaned it up.

Revision history for this message
Artur Rona (ari-tczew) wrote :

Now looks better, but still some issues:

1) Please use revision 0.3.18-10ubuntu1 and below (LP: #420387, Closes: #593653)

2) Improve DEP3 tags:
Bug-Debian: http://bugs.debian.org/593653
Add dot at the end of sentence in Description ;)
About Origin: I saw on the Debian bug that patch comes from Git - could you research the git address and get a link to this revision?

review: Needs Fixing
Revision history for this message
Artur Rona (ari-tczew) wrote :

BTW package built fine.

Revision history for this message
Ronny Cardona (rcart) wrote :

> BTW package built fine.
Great. Now I’m looking for the git link, hopefully get it right away to update the branch ^^

Revision history for this message
Ronny Cardona (rcart) wrote :

Branch updated.

About the git address; I didn't find it. It seems like Debian uses svn and upstream cvs in the project.

Looks like the patch author worked in a local git branch to generate the patch, not sure about that :/

By the way, why use that Bittornado revision on Ubuntu? It's due to the actual status of developement cycle?

Thanks in advance

Revision history for this message
Artur Rona (ari-tczew) wrote :

Please add DEP3 tag to 32*.patch:
Bug-Ubuntu: https://launchpad.net/420387

If you have this done, I'll approve.

review: Needs Fixing
Revision history for this message
Artur Rona (ari-tczew) wrote :
7. By Ronny Cardona

* debian/patches/32_use_hashlib_for_sha.patch:
  - Updated use of deprecated sha module to hashlib. (LP: #420387,
  Closes: #593653)

Revision history for this message
Ronny Cardona (rcart) wrote :

Branch updated.I hope that it's ready.

Thanks for all your corrections.

Revision history for this message
Artur Rona (ari-tczew) wrote :

OK, now core-dev turn.

review: Approve
Revision history for this message
Daniel Holbach (dholbach) wrote :
Download full text (9.6 KiB)

I'm not quite sure what happened in this branch, but I uploaded what I extracted as the minimal diff.

--- bittornado-0.3.18//debian/changelog 2010-03-21 22:36:58.000000000 +0100
+++ bittornado/debian/changelog 2011-02-09 10:11:50.020853000 +0100
@@ -1,3 +1,11 @@
+bittornado (0.3.18-10ubuntu1) natty; urgency=low
+
+ * debian/patches/32_use_hashlib_for_sha.patch:
+ - Updated use of deprecated sha module to hashlib. (LP: #420387,
+ Closes: #593653)
+
+ -- Ronny Cardona (Rcart) <email address hidden> Mon, 24 Jan 2011 17:27:47 -0600
+
 bittornado (0.3.18-10) unstable; urgency=low

   * New patch from upstream's CVS to allow torrents that only have an
--- bittornado-0.3.18//debian/control 2010-03-21 22:16:54.000000000 +0100
+++ bittornado/debian/control 2011-02-09 10:11:50.020853000 +0100
@@ -1,7 +1,8 @@
 Source: bittornado
 Section: net
 Priority: optional
-Maintainer: Cameron Dale <email address hidden>
+Maintainer: Ubuntu Developers <email address hidden>
+XSBC-Original-Maintainer: Cameron Dale <email address hidden>
 Build-Depends: debhelper (>= 5.0.37.2)
 Build-Depends-Indep: python, python-support (>= 0.5.4), docbook-to-man
 Standards-Version: 3.8.4
--- bittornado-0.3.18//debian/patches/32_use_hashlib_for_sha.patch 1970-01-01 01:00:00.000000000 +0100
+++ bittornado/debian/patches/32_use_hashlib_for_sha.patch 2011-02-09 10:11:50.020853000 +0100
@@ -0,0 +1,210 @@
+From: Ronny Cardona (Rcart) <email address hidden>
+Description: Updated use of deprecated sha module to hashlib.
+Origin: http://bugs.debian.org/593653#17
+Bug-Debian: http://bugs.debian.org/593653
+Bug-Ubuntu: https://launchpad.net/bugs/420387
+
+Index: bittornado.fix-420387/BitTornado/BT1/makemetafile.py
+===================================================================
+--- bittornado.fix-420387.orig/BitTornado/BT1/makemetafile.py 2011-01-24 17:18:09.183076000 -0600
++++ bittornado.fix-420387/BitTornado/BT1/makemetafile.py 2011-01-24 17:18:55.483076002 -0600
+@@ -4,7 +4,10 @@
+
+ from os.path import getsize, split, join, abspath, isdir
+ from os import listdir
+-from sha import sha
++try:
++ from hashlib import sha1 as sha
++except ImportError:
++ from sha import sha
+ from copy import copy
+ from string import strip
+ from BitTornado.bencode import bencode
+Index: bittornado.fix-420387/BitTornado/BT1/Rerequester.py
+===================================================================
+--- bittornado.fix-420387.orig/BitTornado/BT1/Rerequester.py 2011-01-24 17:18:09.083076000 -0600
++++ bittornado.fix-420387/BitTornado/BT1/Rerequester.py 2011-01-24 17:18:55.483076002 -0600
+@@ -12,7 +12,10 @@
+ from traceback import print_exc
+ from socket import error, gethostbyname
+ from random import shuffle
+-from sha import sha
++try:
++ from hashlib import sha1 as sha
++except ImportError:
++ from sha import sha
+ from time import time
+ try:
+ from os import getpid
+Index: bittornado.fix-420387/BitTornado/BT1/StorageWrapper.py
+===================================================================
+--- bittornado.fix-420387.orig/BitTornado/BT1/StorageWrapper.py 2011-01-24 17:18:09.383076000 -0600
++++ bittornado.fix-420387/BitTornado/BT1/StorageWrapper...

Read more...

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== removed directory '.pc/01_MANIFEST.in_remove_broken_cruft.dpatch'
=== removed file '.pc/01_MANIFEST.in_remove_broken_cruft.dpatch/setup.py'
--- .pc/01_MANIFEST.in_remove_broken_cruft.dpatch/setup.py 2010-03-21 14:36:30 +0000
+++ .pc/01_MANIFEST.in_remove_broken_cruft.dpatch/setup.py 1970-01-01 00:00:00 +0000
@@ -1,28 +0,0 @@
1#!/usr/bin/env python
2
3# Written by Bram Cohen
4# see LICENSE.txt for license information
5
6import sys
7assert sys.version >= '2', "Install Python 2.0 or greater"
8from distutils.core import setup, Extension
9import BitTornado
10
11setup(
12 name = "BitTornado",
13 version = BitTornado.version,
14 author = "Bram Cohen, John Hoffman, Uoti Arpala et. al.",
15 author_email = "<theshadow@degreez.net>",
16 url = "http://www.bittornado.com",
17 license = "MIT",
18
19 packages = ["BitTornado","BitTornado.BT1"],
20
21 scripts = ["btdownloadgui.py", "btdownloadheadless.py",
22 "bttrack.py", "btmakemetafile.py", "btlaunchmany.py", "btcompletedir.py",
23 "btdownloadcurses.py", "btcompletedirgui.py", "btlaunchmanycurses.py",
24 "btmakemetafile.py", "btreannounce.py", "btrename.py", "btshowmetainfo.py",
25 'btmaketorrentgui.py', 'btcopyannounce.py', 'btsethttpseeds.py',
26 'bt-t-make.py',
27 ]
28 )
290
=== removed directory '.pc/02_btdownloadcurses_increase_significant_digit.dpatch'
=== removed file '.pc/02_btdownloadcurses_increase_significant_digit.dpatch/btdownloadcurses.py'
--- .pc/02_btdownloadcurses_increase_significant_digit.dpatch/btdownloadcurses.py 2010-03-21 14:36:30 +0000
+++ .pc/02_btdownloadcurses_increase_significant_digit.dpatch/btdownloadcurses.py 1970-01-01 00:00:00 +0000
@@ -1,407 +0,0 @@
1#!/usr/bin/env python
2
3# Written by Henry 'Pi' James
4# see LICENSE.txt for license information
5
6SPEW_SCROLL_RATE = 1
7
8from BitTornado import PSYCO
9if PSYCO.psyco:
10 try:
11 import psyco
12 assert psyco.__version__ >= 0x010100f0
13 psyco.full()
14 except:
15 pass
16
17from BitTornado.download_bt1 import BT1Download, defaults, parse_params, get_usage, get_response
18from BitTornado.RawServer import RawServer, UPnP_ERROR
19from random import seed
20from socket import error as socketerror
21from BitTornado.bencode import bencode
22from BitTornado.natpunch import UPnP_test
23from threading import Event
24from os.path import abspath
25from signal import signal, SIGWINCH
26from sha import sha
27from sys import argv, exit
28import sys
29from time import time, strftime
30from BitTornado.clock import clock
31from BitTornado import createPeerID, version
32from BitTornado.ConfigDir import ConfigDir
33
34try:
35 import curses
36 import curses.panel
37 from curses.wrapper import wrapper as curses_wrapper
38 from signal import signal, SIGWINCH
39except:
40 print 'Textmode GUI initialization failed, cannot proceed.'
41 print
42 print 'This download interface requires the standard Python module ' \
43 '"curses", which is unfortunately not available for the native ' \
44 'Windows port of Python. It is however available for the Cygwin ' \
45 'port of Python, running on all Win32 systems (www.cygwin.com).'
46 print
47 print 'You may still use "btdownloadheadless.py" to download.'
48 sys.exit(1)
49
50assert sys.version >= '2', "Install Python 2.0 or greater"
51try:
52 True
53except:
54 True = 1
55 False = 0
56
57def fmttime(n):
58 if n == 0:
59 return 'download complete!'
60 try:
61 n = int(n)
62 assert n >= 0 and n < 5184000 # 60 days
63 except:
64 return '<unknown>'
65 m, s = divmod(n, 60)
66 h, m = divmod(m, 60)
67 return 'finishing in %d:%02d:%02d' % (h, m, s)
68
69def fmtsize(n):
70 s = str(n)
71 size = s[-3:]
72 while len(s) > 3:
73 s = s[:-3]
74 size = '%s,%s' % (s[-3:], size)
75 if n > 999:
76 unit = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
77 i = 1
78 while i + 1 < len(unit) and (n >> 10) >= 999:
79 i += 1
80 n >>= 10
81 n = float(n) / (1 << 10)
82 size = '%s (%.0f %s)' % (size, n, unit[i])
83 return size
84
85
86class CursesDisplayer:
87 def __init__(self, scrwin, errlist, doneflag):
88 self.scrwin = scrwin
89 self.errlist = errlist
90 self.doneflag = doneflag
91
92 signal(SIGWINCH, self.winch_handler)
93 self.changeflag = Event()
94
95 self.done = 0
96 self.file = ''
97 self.fileSize = ''
98 self.activity = ''
99 self.status = ''
100 self.progress = ''
101 self.downloadTo = ''
102 self.downRate = '---'
103 self.upRate = '---'
104 self.shareRating = ''
105 self.seedStatus = ''
106 self.peerStatus = ''
107 self.errors = []
108 self.last_update_time = 0
109 self.spew_scroll_time = 0
110 self.spew_scroll_pos = 0
111
112 self._remake_window()
113
114 def winch_handler(self, signum, stackframe):
115 self.changeflag.set()
116 curses.endwin()
117 self.scrwin.refresh()
118 self.scrwin = curses.newwin(0, 0, 0, 0)
119 self._remake_window()
120
121 def _remake_window(self):
122 self.scrh, self.scrw = self.scrwin.getmaxyx()
123 self.scrpan = curses.panel.new_panel(self.scrwin)
124 self.labelh, self.labelw, self.labely, self.labelx = 11, 9, 1, 2
125 self.labelwin = curses.newwin(self.labelh, self.labelw,
126 self.labely, self.labelx)
127 self.labelpan = curses.panel.new_panel(self.labelwin)
128 self.fieldh, self.fieldw, self.fieldy, self.fieldx = (
129 self.labelh, self.scrw-2 - self.labelw-3,
130 1, self.labelw+3)
131 self.fieldwin = curses.newwin(self.fieldh, self.fieldw,
132 self.fieldy, self.fieldx)
133 self.fieldwin.nodelay(1)
134 self.fieldpan = curses.panel.new_panel(self.fieldwin)
135 self.spewh, self.speww, self.spewy, self.spewx = (
136 self.scrh - self.labelh - 2, self.scrw - 3, 1 + self.labelh, 2)
137 self.spewwin = curses.newwin(self.spewh, self.speww,
138 self.spewy, self.spewx)
139 self.spewpan = curses.panel.new_panel(self.spewwin)
140 try:
141 self.scrwin.border(ord('|'),ord('|'),ord('-'),ord('-'),ord(' '),ord(' '),ord(' '),ord(' '))
142 except:
143 pass
144 self.labelwin.addstr(0, 0, 'file:')
145 self.labelwin.addstr(1, 0, 'size:')
146 self.labelwin.addstr(2, 0, 'dest:')
147 self.labelwin.addstr(3, 0, 'progress:')
148 self.labelwin.addstr(4, 0, 'status:')
149 self.labelwin.addstr(5, 0, 'dl speed:')
150 self.labelwin.addstr(6, 0, 'ul speed:')
151 self.labelwin.addstr(7, 0, 'sharing:')
152 self.labelwin.addstr(8, 0, 'seeds:')
153 self.labelwin.addstr(9, 0, 'peers:')
154 curses.panel.update_panels()
155 curses.doupdate()
156 self.changeflag.clear()
157
158
159 def finished(self):
160 self.done = 1
161 self.activity = 'download succeeded!'
162 self.downRate = '---'
163 self.display(fractionDone = 1)
164
165 def failed(self):
166 self.done = 1
167 self.activity = 'download failed!'
168 self.downRate = '---'
169 self.display()
170
171 def error(self, errormsg):
172 newerrmsg = strftime('[%H:%M:%S] ') + errormsg
173 self.errors.append(newerrmsg)
174 self.errlist.append(newerrmsg)
175 self.display()
176
177 def display(self, dpflag = Event(), fractionDone = None, timeEst = None,
178 downRate = None, upRate = None, activity = None,
179 statistics = None, spew = None, **kws):
180
181 inchar = self.fieldwin.getch()
182 if inchar == 12: # ^L
183 self._remake_window()
184 elif inchar in (ord('q'),ord('Q')):
185 self.doneflag.set()
186
187 if activity is not None and not self.done:
188 self.activity = activity
189 elif timeEst is not None:
190 self.activity = fmttime(timeEst)
191 if self.changeflag.isSet():
192 return
193 if self.last_update_time + 0.1 > clock() and fractionDone not in (0.0, 1.0) and activity is not None:
194 return
195 self.last_update_time = clock()
196 if fractionDone is not None:
197 blocknum = int(self.fieldw * fractionDone)
198 self.progress = blocknum * '#' + (self.fieldw - blocknum) * '_'
199 self.status = '%s (%.1f%%)' % (self.activity, fractionDone * 100)
200 else:
201 self.status = self.activity
202 if downRate is not None:
203 self.downRate = '%.1f KB/s' % (float(downRate) / (1 << 10))
204 if upRate is not None:
205 self.upRate = '%.1f KB/s' % (float(upRate) / (1 << 10))
206 if statistics is not None:
207 if (statistics.shareRating < 0) or (statistics.shareRating > 100):
208 self.shareRating = 'oo (%.1f MB up / %.1f MB down)' % (float(statistics.upTotal) / (1<<20), float(statistics.downTotal) / (1<<20))
209 else:
210 self.shareRating = '%.3f (%.1f MB up / %.1f MB down)' % (statistics.shareRating, float(statistics.upTotal) / (1<<20), float(statistics.downTotal) / (1<<20))
211 if not self.done:
212 self.seedStatus = '%d seen now, plus %.3f distributed copies' % (statistics.numSeeds,0.001*int(1000*statistics.numCopies2))
213 else:
214 self.seedStatus = '%d seen recently, plus %.3f distributed copies' % (statistics.numOldSeeds,0.001*int(1000*statistics.numCopies))
215 self.peerStatus = '%d seen now, %.1f%% done at %.1f kB/s' % (statistics.numPeers,statistics.percentDone,float(statistics.torrentRate) / (1 << 10))
216
217 self.fieldwin.erase()
218 self.fieldwin.addnstr(0, 0, self.file, self.fieldw, curses.A_BOLD)
219 self.fieldwin.addnstr(1, 0, self.fileSize, self.fieldw)
220 self.fieldwin.addnstr(2, 0, self.downloadTo, self.fieldw)
221 if self.progress:
222 self.fieldwin.addnstr(3, 0, self.progress, self.fieldw, curses.A_BOLD)
223 self.fieldwin.addnstr(4, 0, self.status, self.fieldw)
224 self.fieldwin.addnstr(5, 0, self.downRate, self.fieldw)
225 self.fieldwin.addnstr(6, 0, self.upRate, self.fieldw)
226 self.fieldwin.addnstr(7, 0, self.shareRating, self.fieldw)
227 self.fieldwin.addnstr(8, 0, self.seedStatus, self.fieldw)
228 self.fieldwin.addnstr(9, 0, self.peerStatus, self.fieldw)
229
230 self.spewwin.erase()
231
232 if not spew:
233 errsize = self.spewh
234 if self.errors:
235 self.spewwin.addnstr(0, 0, "error(s):", self.speww, curses.A_BOLD)
236 errsize = len(self.errors)
237 displaysize = min(errsize, self.spewh)
238 displaytop = errsize - displaysize
239 for i in range(displaysize):
240 self.spewwin.addnstr(i, self.labelw, self.errors[displaytop + i],
241 self.speww-self.labelw-1, curses.A_BOLD)
242 else:
243 if self.errors:
244 self.spewwin.addnstr(0, 0, "error:", self.speww, curses.A_BOLD)
245 self.spewwin.addnstr(0, self.labelw, self.errors[-1],
246 self.speww-self.labelw-1, curses.A_BOLD)
247 self.spewwin.addnstr(2, 0, " # IP Upload Download Completed Speed", self.speww, curses.A_BOLD)
248
249
250 if self.spew_scroll_time + SPEW_SCROLL_RATE < clock():
251 self.spew_scroll_time = clock()
252 if len(spew) > self.spewh-5 or self.spew_scroll_pos > 0:
253 self.spew_scroll_pos += 1
254 if self.spew_scroll_pos > len(spew):
255 self.spew_scroll_pos = 0
256
257 for i in range(len(spew)):
258 spew[i]['lineno'] = i+1
259 spew.append({'lineno': None})
260 spew = spew[self.spew_scroll_pos:] + spew[:self.spew_scroll_pos]
261
262 for i in range(min(self.spewh - 5, len(spew))):
263 if not spew[i]['lineno']:
264 continue
265 self.spewwin.addnstr(i+3, 0, '%3d' % spew[i]['lineno'], 3)
266 self.spewwin.addnstr(i+3, 4, spew[i]['ip']+spew[i]['direction'], 16)
267 if spew[i]['uprate'] > 100:
268 self.spewwin.addnstr(i+3, 20, '%6.0f KB/s' % (float(spew[i]['uprate']) / 1000), 11)
269 self.spewwin.addnstr(i+3, 32, '-----', 5)
270 if spew[i]['uinterested'] == 1:
271 self.spewwin.addnstr(i+3, 33, 'I', 1)
272 if spew[i]['uchoked'] == 1:
273 self.spewwin.addnstr(i+3, 35, 'C', 1)
274 if spew[i]['downrate'] > 100:
275 self.spewwin.addnstr(i+3, 38, '%6.0f KB/s' % (float(spew[i]['downrate']) / 1000), 11)
276 self.spewwin.addnstr(i+3, 50, '-------', 7)
277 if spew[i]['dinterested'] == 1:
278 self.spewwin.addnstr(i+3, 51, 'I', 1)
279 if spew[i]['dchoked'] == 1:
280 self.spewwin.addnstr(i+3, 53, 'C', 1)
281 if spew[i]['snubbed'] == 1:
282 self.spewwin.addnstr(i+3, 55, 'S', 1)
283 self.spewwin.addnstr(i+3, 58, '%5.1f%%' % (float(int(spew[i]['completed']*1000))/10), 6)
284 if spew[i]['speed'] is not None:
285 self.spewwin.addnstr(i+3, 64, '%5.0f KB/s' % (float(spew[i]['speed'])/1000), 10)
286
287 if statistics is not None:
288 self.spewwin.addnstr(self.spewh-1, 0,
289 'downloading %d pieces, have %d fragments, %d of %d pieces completed'
290 % ( statistics.storage_active, statistics.storage_dirty,
291 statistics.storage_numcomplete,
292 statistics.storage_totalpieces ), self.speww-1 )
293
294 curses.panel.update_panels()
295 curses.doupdate()
296 dpflag.set()
297
298 def chooseFile(self, default, size, saveas, dir):
299 self.file = default
300 self.fileSize = fmtsize(size)
301 if saveas == '':
302 saveas = default
303 self.downloadTo = abspath(saveas)
304 return saveas
305
306def run(scrwin, errlist, params):
307 doneflag = Event()
308 d = CursesDisplayer(scrwin, errlist, doneflag)
309 try:
310 while 1:
311 configdir = ConfigDir('downloadcurses')
312 defaultsToIgnore = ['responsefile', 'url', 'priority']
313 configdir.setDefaults(defaults,defaultsToIgnore)
314 configdefaults = configdir.loadConfig()
315 defaults.append(('save_options',0,
316 "whether to save the current options as the new default configuration " +
317 "(only for btdownloadcurses.py)"))
318 try:
319 config = parse_params(params, configdefaults)
320 except ValueError, e:
321 d.error('error: ' + str(e) + '\nrun with no args for parameter explanations')
322 break
323 if not config:
324 d.error(get_usage(defaults, d.fieldw, configdefaults))
325 break
326 if config['save_options']:
327 configdir.saveConfig(config)
328 configdir.deleteOldCacheData(config['expire_cache_data'])
329
330 myid = createPeerID()
331 seed(myid)
332
333 rawserver = RawServer(doneflag, config['timeout_check_interval'],
334 config['timeout'], ipv6_enable = config['ipv6_enabled'],
335 failfunc = d.failed, errorfunc = d.error)
336
337 upnp_type = UPnP_test(config['upnp_nat_access'])
338 while True:
339 try:
340 listen_port = rawserver.find_and_bind(config['minport'], config['maxport'],
341 config['bind'], ipv6_socket_style = config['ipv6_binds_v4'],
342 upnp = upnp_type, randomizer = config['random_port'])
343 break
344 except socketerror, e:
345 if upnp_type and e == UPnP_ERROR:
346 d.error('WARNING: COULD NOT FORWARD VIA UPnP')
347 upnp_type = 0
348 continue
349 d.error("Couldn't listen - " + str(e))
350 d.failed()
351 return
352
353 response = get_response(config['responsefile'], config['url'], d.error)
354 if not response:
355 break
356
357 infohash = sha(bencode(response['info'])).digest()
358
359 dow = BT1Download(d.display, d.finished, d.error, d.error, doneflag,
360 config, response, infohash, myid, rawserver, listen_port,
361 configdir)
362
363 if not dow.saveAs(d.chooseFile):
364 break
365
366 if not dow.initFiles(old_style = True):
367 break
368 if not dow.startEngine():
369 dow.shutdown()
370 break
371 dow.startRerequester()
372 dow.autoStats()
373
374 if not dow.am_I_finished():
375 d.display(activity = 'connecting to peers')
376 rawserver.listen_forever(dow.getPortHandler())
377 d.display(activity = 'shutting down')
378 dow.shutdown()
379 break
380
381 except KeyboardInterrupt:
382 # ^C to exit..
383 pass
384 try:
385 rawserver.shutdown()
386 except:
387 pass
388 if not d.done:
389 d.failed()
390
391
392if __name__ == '__main__':
393 if argv[1:] == ['--version']:
394 print version
395 exit(0)
396 if len(argv) <= 1:
397 print "Usage: btdownloadcurses.py <global options>\n"
398 print get_usage(defaults)
399 exit(1)
400
401 errlist = []
402 curses_wrapper(run, errlist, argv[1:])
403
404 if errlist:
405 print "These errors occurred during execution:"
406 for error in errlist:
407 print error
408\ No newline at end of file0\ No newline at end of file
4091
=== removed directory '.pc/05_bttrack_connerr_fix.dpatch'
=== removed directory '.pc/05_bttrack_connerr_fix.dpatch/BitTornado'
=== removed directory '.pc/05_bttrack_connerr_fix.dpatch/BitTornado/BT1'
=== removed file '.pc/05_bttrack_connerr_fix.dpatch/BitTornado/BT1/track.py'
--- .pc/05_bttrack_connerr_fix.dpatch/BitTornado/BT1/track.py 2010-03-21 14:36:30 +0000
+++ .pc/05_bttrack_connerr_fix.dpatch/BitTornado/BT1/track.py 1970-01-01 00:00:00 +0000
@@ -1,1137 +0,0 @@
1# Written by Bram Cohen
2# see LICENSE.txt for license information
3
4from BitTornado.parseargs import parseargs, formatDefinitions
5from BitTornado.RawServer import RawServer, autodetect_ipv6, autodetect_socket_style
6from BitTornado.HTTPHandler import HTTPHandler, months, weekdays
7from BitTornado.parsedir import parsedir
8from NatCheck import NatCheck, CHECK_PEER_ID_ENCRYPTED
9from BitTornado.BTcrypto import CRYPTO_OK
10from T2T import T2TList
11from BitTornado.subnetparse import IP_List, ipv6_to_ipv4, to_ipv4, is_valid_ip, is_ipv4
12from BitTornado.iprangeparse import IP_List as IP_Range_List
13from BitTornado.torrentlistparse import parsetorrentlist
14from threading import Event, Thread
15from BitTornado.bencode import bencode, bdecode, Bencached
16from BitTornado.zurllib import urlopen, quote, unquote
17from Filter import Filter
18from urlparse import urlparse
19from os import rename, getpid
20from os.path import exists, isfile
21from cStringIO import StringIO
22from traceback import print_exc
23from time import time, gmtime, strftime, localtime
24from BitTornado.clock import clock
25from random import shuffle, seed, randrange
26from sha import sha
27from types import StringType, IntType, LongType, ListType, DictType
28from binascii import b2a_hex, a2b_hex, a2b_base64
29from string import lower
30import sys, os
31import signal
32import re
33import BitTornado.__init__
34from BitTornado.__init__ import version, createPeerID
35try:
36 True
37except:
38 True = 1
39 False = 0
40 bool = lambda x: not not x
41
42defaults = [
43 ('port', 80, "Port to listen on."),
44 ('dfile', None, 'file to store recent downloader info in'),
45 ('bind', '', 'comma-separated list of ips/hostnames to bind to locally'),
46# ('ipv6_enabled', autodetect_ipv6(),
47 ('ipv6_enabled', 0,
48 'allow the client to connect to peers via IPv6'),
49 ('ipv6_binds_v4', autodetect_socket_style(),
50 'set if an IPv6 server socket will also field IPv4 connections'),
51 ('socket_timeout', 15, 'timeout for closing connections'),
52 ('save_dfile_interval', 5 * 60, 'seconds between saving dfile'),
53 ('timeout_downloaders_interval', 45 * 60, 'seconds between expiring downloaders'),
54 ('reannounce_interval', 30 * 60, 'seconds downloaders should wait between reannouncements'),
55 ('response_size', 50, 'number of peers to send in an info message'),
56 ('timeout_check_interval', 5,
57 'time to wait between checking if any connections have timed out'),
58 ('nat_check', 3,
59 "how many times to check if a downloader is behind a NAT (0 = don't check)"),
60 ('log_nat_checks', 0,
61 "whether to add entries to the log for nat-check results"),
62 ('min_time_between_log_flushes', 3.0,
63 'minimum time it must have been since the last flush to do another one'),
64 ('min_time_between_cache_refreshes', 600.0,
65 'minimum time in seconds before a cache is considered stale and is flushed'),
66 ('allowed_dir', '', 'only allow downloads for .torrents in this dir'),
67 ('allowed_list', '', 'only allow downloads for hashes in this list (hex format, one per line)'),
68 ('allowed_controls', 0, 'allow special keys in torrents in the allowed_dir to affect tracker access'),
69 ('multitracker_enabled', 0, 'whether to enable multitracker operation'),
70 ('multitracker_allowed', 'autodetect', 'whether to allow incoming tracker announces (can be none, autodetect or all)'),
71 ('multitracker_reannounce_interval', 2 * 60, 'seconds between outgoing tracker announces'),
72 ('multitracker_maxpeers', 20, 'number of peers to get in a tracker announce'),
73 ('aggregate_forward', '', 'format: <url>[,<password>] - if set, forwards all non-multitracker to this url with this optional password'),
74 ('aggregator', '0', 'whether to act as a data aggregator rather than a tracker. If enabled, may be 1, or <password>; ' +
75 'if password is set, then an incoming password is required for access'),
76 ('hupmonitor', 0, 'whether to reopen the log file upon receipt of HUP signal'),
77 ('http_timeout', 60,
78 'number of seconds to wait before assuming that an http connection has timed out'),
79 ('parse_dir_interval', 60, 'seconds between reloading of allowed_dir or allowed_file ' +
80 'and allowed_ips and banned_ips lists'),
81 ('show_infopage', 1, "whether to display an info page when the tracker's root dir is loaded"),
82 ('infopage_redirect', '', 'a URL to redirect the info page to'),
83 ('show_names', 1, 'whether to display names from allowed dir'),
84 ('favicon', '', 'file containing x-icon data to return when browser requests favicon.ico'),
85 ('allowed_ips', '', 'only allow connections from IPs specified in the given file; '+
86 'file contains subnet data in the format: aa.bb.cc.dd/len'),
87 ('banned_ips', '', "don't allow connections from IPs specified in the given file; "+
88 'file contains IP range data in the format: xxx:xxx:ip1-ip2'),
89 ('only_local_override_ip', 2, "ignore the ip GET parameter from machines which aren't on local network IPs " +
90 "(0 = never, 1 = always, 2 = ignore if NAT checking is not enabled)"),
91 ('logfile', '', 'file to write the tracker logs, use - for stdout (default)'),
92 ('allow_get', 0, 'use with allowed_dir; adds a /file?hash={hash} url that allows users to download the torrent file'),
93 ('keep_dead', 0, 'keep dead torrents after they expire (so they still show up on your /scrape and web page)'),
94 ('scrape_allowed', 'full', 'scrape access allowed (can be none, specific or full)'),
95 ('dedicated_seed_id', '', 'allows tracker to monitor dedicated seed(s) and flag torrents as seeded'),
96 ('compact_reqd', 1, "only allow peers that accept a compact response"),
97 ]
98
99def statefiletemplate(x):
100 if type(x) != DictType:
101 raise ValueError
102 for cname, cinfo in x.items():
103 if cname == 'peers':
104 for y in cinfo.values(): # The 'peers' key is a dictionary of SHA hashes (torrent ids)
105 if type(y) != DictType: # ... for the active torrents, and each is a dictionary
106 raise ValueError
107 for id, info in y.items(): # ... of client ids interested in that torrent
108 if (len(id) != 20):
109 raise ValueError
110 if type(info) != DictType: # ... each of which is also a dictionary
111 raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent
112 if type(info.get('ip', '')) != StringType:
113 raise ValueError
114 port = info.get('port')
115 if type(port) not in (IntType,LongType) or port < 0:
116 raise ValueError
117 left = info.get('left')
118 if type(left) not in (IntType,LongType) or left < 0:
119 raise ValueError
120 if type(info.get('supportcrypto')) not in (IntType,LongType):
121 raise ValueError
122 if type(info.get('requirecrypto')) not in (IntType,LongType):
123 raise ValueError
124 elif cname == 'completed':
125 if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids)
126 raise ValueError # ... for keeping track of the total completions per torrent
127 for y in cinfo.values(): # ... each torrent has an integer value
128 if type(y) not in (IntType,LongType):
129 raise ValueError # ... for the number of reported completions for that torrent
130 elif cname == 'allowed':
131 if (type(cinfo) != DictType): # a list of info_hashes and included data
132 raise ValueError
133 if x.has_key('allowed_dir_files'):
134 adlist = [z[1] for z in x['allowed_dir_files'].values()]
135 for y in cinfo.keys(): # and each should have a corresponding key here
136 if not y in adlist:
137 raise ValueError
138 elif cname == 'allowed_dir_files':
139 if (type(cinfo) != DictType): # a list of files, their attributes and info hashes
140 raise ValueError
141 dirkeys = {}
142 for y in cinfo.values(): # each entry should have a corresponding info_hash
143 if not y[1]:
144 continue
145 if not x['allowed'].has_key(y[1]):
146 raise ValueError
147 if dirkeys.has_key(y[1]): # and each should have a unique info_hash
148 raise ValueError
149 dirkeys[y[1]] = 1
150
151
152alas = 'your file may exist elsewhere in the universe\nbut alas, not here\n'
153
154local_IPs = IP_List()
155local_IPs.set_intranet_addresses()
156
157
158def isotime(secs = None):
159 if secs == None:
160 secs = time()
161 return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs))
162
163http_via_filter = re.compile(' for ([0-9.]+)\Z')
164
165def _get_forwarded_ip(headers):
166 header = headers.get('x-forwarded-for')
167 if header:
168 try:
169 x,y = header.split(',')
170 except:
171 return header
172 if is_valid_ip(x) and not local_IPs.includes(x):
173 return x
174 return y
175 header = headers.get('client-ip')
176 if header:
177 return header
178 header = headers.get('via')
179 if header:
180 x = http_via_filter.search(header)
181 try:
182 return x.group(1)
183 except:
184 pass
185 header = headers.get('from')
186 #if header:
187 # return header
188 #return None
189 return header
190
191def get_forwarded_ip(headers):
192 x = _get_forwarded_ip(headers)
193 if not is_valid_ip(x) or local_IPs.includes(x):
194 return None
195 return x
196
197def compact_peer_info(ip, port):
198 try:
199 s = ( ''.join([chr(int(i)) for i in ip.split('.')])
200 + chr((port & 0xFF00) >> 8) + chr(port & 0xFF) )
201 if len(s) != 6:
202 raise ValueError
203 except:
204 s = '' # not a valid IP, must be a domain name
205 return s
206
207class Tracker:
208 def __init__(self, config, rawserver):
209 self.config = config
210 self.response_size = config['response_size']
211 self.dfile = config['dfile']
212 self.natcheck = config['nat_check']
213 favicon = config['favicon']
214 self.parse_dir_interval = config['parse_dir_interval']
215 self.favicon = None
216 if favicon:
217 try:
218 h = open(favicon,'r')
219 self.favicon = h.read()
220 h.close()
221 except:
222 print "**warning** specified favicon file -- %s -- does not exist." % favicon
223 self.rawserver = rawserver
224 self.cached = {} # format: infohash: [[time1, l1, s1], [time2, l2, s2], ...]
225 self.cached_t = {} # format: infohash: [time, cache]
226 self.times = {}
227 self.state = {}
228 self.seedcount = {}
229
230 self.allowed_IPs = None
231 self.banned_IPs = None
232 if config['allowed_ips'] or config['banned_ips']:
233 self.allowed_ip_mtime = 0
234 self.banned_ip_mtime = 0
235 self.read_ip_lists()
236
237 self.only_local_override_ip = config['only_local_override_ip']
238 if self.only_local_override_ip == 2:
239 self.only_local_override_ip = not config['nat_check']
240
241 if CHECK_PEER_ID_ENCRYPTED and not CRYPTO_OK:
242 print ('**warning** crypto library not installed,' +
243 ' cannot completely verify encrypted peers')
244
245 if exists(self.dfile):
246 try:
247 h = open(self.dfile, 'rb')
248 ds = h.read()
249 h.close()
250 tempstate = bdecode(ds)
251 if not tempstate.has_key('peers'):
252 tempstate = {'peers': tempstate}
253 statefiletemplate(tempstate)
254 self.state = tempstate
255 except:
256 print '**warning** statefile '+self.dfile+' corrupt; resetting'
257 self.downloads = self.state.setdefault('peers', {})
258 self.completed = self.state.setdefault('completed', {})
259
260 self.becache = {}
261 ''' format: infohash: [[l0, s0], [l1, s1], ...]
262 l0,s0 = compact, not requirecrypto=1
263 l1,s1 = compact, only supportcrypto=1
264 l2,s2 = [compact, crypto_flag], all peers
265 if --compact_reqd 0:
266 l3,s3 = [ip,port,id]
267 l4,l4 = [ip,port] nopeerid
268 '''
269 if config['compact_reqd']:
270 x = 3
271 else:
272 x = 5
273 self.cache_default = [({},{}) for i in xrange(x)]
274 for infohash, ds in self.downloads.items():
275 self.seedcount[infohash] = 0
276 for x,y in ds.items():
277 ip = y['ip']
278 if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
279 or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
280 del ds[x]
281 continue
282 if not y['left']:
283 self.seedcount[infohash] += 1
284 if y.get('nat',-1):
285 continue
286 gip = y.get('given_ip')
287 if is_valid_ip(gip) and (
288 not self.only_local_override_ip or local_IPs.includes(ip) ):
289 ip = gip
290 self.natcheckOK(infohash,x,ip,y['port'],y)
291
292 for x in self.downloads.keys():
293 self.times[x] = {}
294 for y in self.downloads[x].keys():
295 self.times[x][y] = 0
296
297 self.trackerid = createPeerID('-T-')
298 seed(self.trackerid)
299
300 self.reannounce_interval = config['reannounce_interval']
301 self.save_dfile_interval = config['save_dfile_interval']
302 self.show_names = config['show_names']
303 rawserver.add_task(self.save_state, self.save_dfile_interval)
304 self.prevtime = clock()
305 self.timeout_downloaders_interval = config['timeout_downloaders_interval']
306 rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
307 self.logfile = None
308 self.log = None
309 if (config['logfile']) and (config['logfile'] != '-'):
310 try:
311 self.logfile = config['logfile']
312 self.log = open(self.logfile,'a')
313 sys.stdout = self.log
314 print "# Log Started: ", isotime()
315 except:
316 print "**warning** could not redirect stdout to log file: ", sys.exc_info()[0]
317
318 if config['hupmonitor']:
319 def huphandler(signum, frame, self = self):
320 try:
321 self.log.close ()
322 self.log = open(self.logfile,'a')
323 sys.stdout = self.log
324 print "# Log reopened: ", isotime()
325 except:
326 print "**warning** could not reopen logfile"
327
328 signal.signal(signal.SIGHUP, huphandler)
329
330 self.allow_get = config['allow_get']
331
332 self.t2tlist = T2TList(config['multitracker_enabled'], self.trackerid,
333 config['multitracker_reannounce_interval'],
334 config['multitracker_maxpeers'], config['http_timeout'],
335 self.rawserver)
336
337 if config['allowed_list']:
338 if config['allowed_dir']:
339 print '**warning** allowed_dir and allowed_list options cannot be used together'
340 print '**warning** disregarding allowed_dir'
341 config['allowed_dir'] = ''
342 self.allowed = self.state.setdefault('allowed_list',{})
343 self.allowed_list_mtime = 0
344 self.parse_allowed()
345 self.remove_from_state('allowed','allowed_dir_files')
346 if config['multitracker_allowed'] == 'autodetect':
347 config['multitracker_allowed'] = 'none'
348 config['allowed_controls'] = 0
349
350 elif config['allowed_dir']:
351 self.allowed = self.state.setdefault('allowed',{})
352 self.allowed_dir_files = self.state.setdefault('allowed_dir_files',{})
353 self.allowed_dir_blocked = {}
354 self.parse_allowed()
355 self.remove_from_state('allowed_list')
356
357 else:
358 self.allowed = None
359 self.remove_from_state('allowed','allowed_dir_files', 'allowed_list')
360 if config['multitracker_allowed'] == 'autodetect':
361 config['multitracker_allowed'] = 'none'
362 config['allowed_controls'] = 0
363
364 self.uq_broken = unquote('+') != ' '
365 self.keep_dead = config['keep_dead']
366 self.Filter = Filter(rawserver.add_task)
367
368 aggregator = config['aggregator']
369 if aggregator == '0':
370 self.is_aggregator = False
371 self.aggregator_key = None
372 else:
373 self.is_aggregator = True
374 if aggregator == '1':
375 self.aggregator_key = None
376 else:
377 self.aggregator_key = aggregator
378 self.natcheck = False
379
380 send = config['aggregate_forward']
381 if not send:
382 self.aggregate_forward = None
383 else:
384 try:
385 self.aggregate_forward, self.aggregate_password = send.split(',')
386 except:
387 self.aggregate_forward = send
388 self.aggregate_password = None
389
390 self.dedicated_seed_id = config['dedicated_seed_id']
391 self.is_seeded = {}
392
393 self.cachetime = 0
394 self.cachetimeupdate()
395
396 def cachetimeupdate(self):
397 self.cachetime += 1 # raw clock, but more efficient for cache
398 self.rawserver.add_task(self.cachetimeupdate,1)
399
400 def aggregate_senddata(self, query):
401 url = self.aggregate_forward+'?'+query
402 if self.aggregate_password is not None:
403 url += '&password='+self.aggregate_password
404 rq = Thread(target = self._aggregate_senddata, args = [url])
405 rq.setDaemon(False)
406 rq.start()
407
408 def _aggregate_senddata(self, url): # just send, don't attempt to error check,
409 try: # discard any returned data
410 h = urlopen(url)
411 h.read()
412 h.close()
413 except:
414 return
415
416
417 def get_infopage(self):
418 try:
419 if not self.config['show_infopage']:
420 return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
421 red = self.config['infopage_redirect']
422 if red:
423 return (302, 'Found', {'Content-Type': 'text/html', 'Location': red},
424 '<A HREF="'+red+'">Click Here</A>')
425
426 s = StringIO()
427 s.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' \
428 '<html><head><title>BitTorrent download info</title>\n')
429 if self.favicon is not None:
430 s.write('<link rel="shortcut icon" href="/favicon.ico">\n')
431 s.write('</head>\n<body>\n' \
432 '<h3>BitTorrent download info</h3>\n'\
433 '<ul>\n'
434 '<li><strong>tracker version:</strong> %s</li>\n' \
435 '<li><strong>server time:</strong> %s</li>\n' \
436 '</ul>\n' % (version, isotime()))
437 if self.config['allowed_dir']:
438 if self.show_names:
439 names = [ (self.allowed[hash]['name'],hash)
440 for hash in self.allowed.keys() ]
441 else:
442 names = [ (None,hash)
443 for hash in self.allowed.keys() ]
444 else:
445 names = [ (None,hash) for hash in self.downloads.keys() ]
446 if not names:
447 s.write('<p>not tracking any files yet...</p>\n')
448 else:
449 names.sort()
450 tn = 0
451 tc = 0
452 td = 0
453 tt = 0 # Total transferred
454 ts = 0 # Total size
455 nf = 0 # Number of files displayed
456 if self.config['allowed_dir'] and self.show_names:
457 s.write('<table summary="files" border="1">\n' \
458 '<tr><th>info hash</th><th>torrent name</th><th align="right">size</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th><th align="right">transferred</th></tr>\n')
459 else:
460 s.write('<table summary="files">\n' \
461 '<tr><th>info hash</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th></tr>\n')
462 for name,hash in names:
463 l = self.downloads[hash]
464 n = self.completed.get(hash, 0)
465 tn = tn + n
466 c = self.seedcount[hash]
467 tc = tc + c
468 d = len(l) - c
469 td = td + d
470 if self.config['allowed_dir'] and self.show_names:
471 if self.allowed.has_key(hash):
472 nf = nf + 1
473 sz = self.allowed[hash]['length'] # size
474 ts = ts + sz
475 szt = sz * n # Transferred for this torrent
476 tt = tt + szt
477 if self.allow_get == 1:
478 linkname = '<a href="/file?info_hash=' + quote(hash) + '">' + name + '</a>'
479 else:
480 linkname = name
481 s.write('<tr><td><code>%s</code></td><td>%s</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n' \
482 % (b2a_hex(hash), linkname, size_format(sz), c, d, n, size_format(szt)))
483 else:
484 s.write('<tr><td><code>%s</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td></tr>\n' \
485 % (b2a_hex(hash), c, d, n))
486 if self.config['allowed_dir'] and self.show_names:
487 s.write('<tr><td align="right" colspan="2">%i files</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n'
488 % (nf, size_format(ts), tc, td, tn, size_format(tt)))
489 else:
490 s.write('<tr><td align="right">%i files</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td></tr>\n'
491 % (nf, tc, td, tn))
492 s.write('</table>\n' \
493 '<ul>\n' \
494 '<li><em>info hash:</em> SHA1 hash of the "info" section of the metainfo (*.torrent)</li>\n' \
495 '<li><em>complete:</em> number of connected clients with the complete file</li>\n' \
496 '<li><em>downloading:</em> number of connected clients still downloading</li>\n' \
497 '<li><em>downloaded:</em> reported complete downloads</li>\n' \
498 '<li><em>transferred:</em> torrent size * total downloaded (does not include partial transfers)</li>\n' \
499 '</ul>\n')
500
501 s.write('</body>\n' \
502 '</html>\n')
503 return (200, 'OK', {'Content-Type': 'text/html; charset=iso-8859-1'}, s.getvalue())
504 except:
505 print_exc()
506 return (500, 'Internal Server Error', {'Content-Type': 'text/html; charset=iso-8859-1'}, 'Server Error')
507
508
509 def scrapedata(self, hash, return_name = True):
510 l = self.downloads[hash]
511 n = self.completed.get(hash, 0)
512 c = self.seedcount[hash]
513 d = len(l) - c
514 f = {'complete': c, 'incomplete': d, 'downloaded': n}
515 if return_name and self.show_names and self.config['allowed_dir']:
516 f['name'] = self.allowed[hash]['name']
517 return (f)
518
519 def get_scrape(self, paramslist):
520 fs = {}
521 if paramslist.has_key('info_hash'):
522 if self.config['scrape_allowed'] not in ['specific', 'full']:
523 return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
524 bencode({'failure reason':
525 'specific scrape function is not available with this tracker.'}))
526 for hash in paramslist['info_hash']:
527 if self.allowed is not None:
528 if self.allowed.has_key(hash):
529 fs[hash] = self.scrapedata(hash)
530 else:
531 if self.downloads.has_key(hash):
532 fs[hash] = self.scrapedata(hash)
533 else:
534 if self.config['scrape_allowed'] != 'full':
535 return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
536 bencode({'failure reason':
537 'full scrape function is not available with this tracker.'}))
538 if self.allowed is not None:
539 keys = self.allowed.keys()
540 else:
541 keys = self.downloads.keys()
542 for hash in keys:
543 fs[hash] = self.scrapedata(hash)
544
545 return (200, 'OK', {'Content-Type': 'text/plain'}, bencode({'files': fs}))
546
547
548 def get_file(self, hash):
549 if not self.allow_get:
550 return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
551 'get function is not available with this tracker.')
552 if not self.allowed.has_key(hash):
553 return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
554 fname = self.allowed[hash]['file']
555 fpath = self.allowed[hash]['path']
556 return (200, 'OK', {'Content-Type': 'application/x-bittorrent',
557 'Content-Disposition': 'attachment; filename=' + fname},
558 open(fpath, 'rb').read())
559
560
561 def check_allowed(self, infohash, paramslist):
562 if ( self.aggregator_key is not None
563 and not ( paramslist.has_key('password')
564 and paramslist['password'][0] == self.aggregator_key ) ):
565 return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
566 bencode({'failure reason':
567 'Requested download is not authorized for use with this tracker.'}))
568
569 if self.allowed is not None:
570 if not self.allowed.has_key(infohash):
571 return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
572 bencode({'failure reason':
573 'Requested download is not authorized for use with this tracker.'}))
574 if self.config['allowed_controls']:
575 if self.allowed[infohash].has_key('failure reason'):
576 return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
577 bencode({'failure reason': self.allowed[infohash]['failure reason']}))
578
579 if paramslist.has_key('tracker'):
580 if ( self.config['multitracker_allowed'] == 'none' or # turned off
581 paramslist['peer_id'][0] == self.trackerid ): # oops! contacted myself
582 return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
583 bencode({'failure reason': 'disallowed'}))
584
585 if ( self.config['multitracker_allowed'] == 'autodetect'
586 and not self.allowed[infohash].has_key('announce-list') ):
587 return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
588 bencode({'failure reason':
589 'Requested download is not authorized for multitracker use.'}))
590
591 return None
592
593
594 def add_data(self, infohash, event, ip, paramslist):
595 peers = self.downloads.setdefault(infohash, {})
596 ts = self.times.setdefault(infohash, {})
597 self.completed.setdefault(infohash, 0)
598 self.seedcount.setdefault(infohash, 0)
599
600 def params(key, default = None, l = paramslist):
601 if l.has_key(key):
602 return l[key][0]
603 return default
604
605 myid = params('peer_id','')
606 if len(myid) != 20:
607 raise ValueError, 'id not of length 20'
608 if event not in ['started', 'completed', 'stopped', 'snooped', None]:
609 raise ValueError, 'invalid event'
610 port = params('cryptoport')
611 if port is None:
612 port = params('port','')
613 port = long(port)
614 if port < 0 or port > 65535:
615 raise ValueError, 'invalid port'
616 left = long(params('left',''))
617 if left < 0:
618 raise ValueError, 'invalid amount left'
619 uploaded = long(params('uploaded',''))
620 downloaded = long(params('downloaded',''))
621 if params('supportcrypto'):
622 supportcrypto = 1
623 try:
624 s = int(params['requirecrypto'])
625 chr(s)
626 except:
627 s = 0
628 requirecrypto = s
629 else:
630 supportcrypto = 0
631 requirecrypto = 0
632
633 peer = peers.get(myid)
634 islocal = local_IPs.includes(ip)
635 mykey = params('key')
636 if peer:
637 auth = peer.get('key',-1) == mykey or peer.get('ip') == ip
638
639 gip = params('ip')
640 if is_valid_ip(gip) and (islocal or not self.only_local_override_ip):
641 ip1 = gip
642 else:
643 ip1 = ip
644
645 if params('numwant') is not None:
646 rsize = min(int(params('numwant')),self.response_size)
647 else:
648 rsize = self.response_size
649
650 if event == 'stopped':
651 if peer:
652 if auth:
653 self.delete_peer(infohash,myid)
654
655 elif not peer:
656 ts[myid] = clock()
657 peer = { 'ip': ip, 'port': port, 'left': left,
658 'supportcrypto': supportcrypto,
659 'requirecrypto': requirecrypto }
660 if mykey:
661 peer['key'] = mykey
662 if gip:
663 peer['given ip'] = gip
664 if port:
665 if not self.natcheck or islocal:
666 peer['nat'] = 0
667 self.natcheckOK(infohash,myid,ip1,port,peer)
668 else:
669 NatCheck(self.connectback_result,infohash,myid,ip1,port,
670 self.rawserver,encrypted=requirecrypto)
671 else:
672 peer['nat'] = 2**30
673 if event == 'completed':
674 self.completed[infohash] += 1
675 if not left:
676 self.seedcount[infohash] += 1
677
678 peers[myid] = peer
679
680 else:
681 if not auth:
682 return rsize # return w/o changing stats
683
684 ts[myid] = clock()
685 if not left and peer['left']:
686 self.completed[infohash] += 1
687 self.seedcount[infohash] += 1
688 if not peer.get('nat', -1):
689 for bc in self.becache[infohash]:
690 bc[1][myid] = bc[0][myid]
691 del bc[0][myid]
692 elif left and not peer['left']:
693 self.completed[infohash] -= 1
694 self.seedcount[infohash] -= 1
695 if not peer.get('nat', -1):
696 for bc in self.becache[infohash]:
697 bc[0][myid] = bc[1][myid]
698 del bc[1][myid]
699 peer['left'] = left
700
701 if port:
702 recheck = False
703 if ip != peer['ip']:
704 peer['ip'] = ip
705 recheck = True
706 if gip != peer.get('given ip'):
707 if gip:
708 peer['given ip'] = gip
709 elif peer.has_key('given ip'):
710 del peer['given ip']
711 recheck = True
712
713 natted = peer.get('nat', -1)
714 if recheck:
715 if natted == 0:
716 l = self.becache[infohash]
717 y = not peer['left']
718 for x in l:
719 del x[y][myid]
720 if natted >= 0:
721 del peer['nat'] # restart NAT testing
722 if natted and natted < self.natcheck:
723 recheck = True
724
725 if recheck:
726 if not self.natcheck or islocal:
727 peer['nat'] = 0
728 self.natcheckOK(infohash,myid,ip1,port,peer)
729 else:
730 NatCheck(self.connectback_result,infohash,myid,ip1,port,
731 self.rawserver,encrypted=requirecrypto)
732
733 return rsize
734
735
736 def peerlist(self, infohash, stopped, tracker, is_seed,
737 return_type, rsize, supportcrypto):
738 data = {} # return data
739 seeds = self.seedcount[infohash]
740 data['complete'] = seeds
741 data['incomplete'] = len(self.downloads[infohash]) - seeds
742
743 if ( self.config['allowed_controls']
744 and self.allowed[infohash].has_key('warning message') ):
745 data['warning message'] = self.allowed[infohash]['warning message']
746
747 if tracker:
748 data['interval'] = self.config['multitracker_reannounce_interval']
749 if not rsize:
750 return data
751 cache = self.cached_t.setdefault(infohash, None)
752 if ( not cache or len(cache[1]) < rsize
753 or cache[0] + self.config['min_time_between_cache_refreshes'] < clock() ):
754 bc = self.becache.setdefault(infohash,self.cache_default)
755 cache = [ clock(), bc[0][0].values() + bc[0][1].values() ]
756 self.cached_t[infohash] = cache
757 shuffle(cache[1])
758 cache = cache[1]
759
760 data['peers'] = cache[-rsize:]
761 del cache[-rsize:]
762 return data
763
764 data['interval'] = self.reannounce_interval
765 if stopped or not rsize: # save some bandwidth
766 data['peers'] = []
767 return data
768
769 bc = self.becache.setdefault(infohash,self.cache_default)
770 len_l = len(bc[2][0])
771 len_s = len(bc[2][1])
772 if not (len_l+len_s): # caches are empty!
773 data['peers'] = []
774 return data
775 l_get_size = int(float(rsize)*(len_l)/(len_l+len_s))
776 cache = self.cached.setdefault(infohash,[None,None,None])[return_type]
777 if cache and ( not cache[1]
778 or (is_seed and len(cache[1]) < rsize)
779 or len(cache[1]) < l_get_size
780 or cache[0]+self.config['min_time_between_cache_refreshes'] < self.cachetime ):
781 cache = None
782 if not cache:
783 peers = self.downloads[infohash]
784 if self.config['compact_reqd']:
785 vv = ([],[],[])
786 else:
787 vv = ([],[],[],[],[])
788 for key, ip, port in self.t2tlist.harvest(infohash): # empty if disabled
789 if not peers.has_key(key):
790 cp = compact_peer_info(ip, port)
791 vv[0].append(cp)
792 vv[2].append((cp,'\x00'))
793 if not self.config['compact_reqd']:
794 vv[3].append({'ip': ip, 'port': port, 'peer id': key})
795 vv[4].append({'ip': ip, 'port': port})
796 cache = [ self.cachetime,
797 bc[return_type][0].values()+vv[return_type],
798 bc[return_type][1].values() ]
799 shuffle(cache[1])
800 shuffle(cache[2])
801 self.cached[infohash][return_type] = cache
802 for rr in xrange(len(self.cached[infohash])):
803 if rr != return_type:
804 try:
805 self.cached[infohash][rr][1].extend(vv[rr])
806 except:
807 pass
808 if len(cache[1]) < l_get_size:
809 peerdata = cache[1]
810 if not is_seed:
811 peerdata.extend(cache[2])
812 cache[1] = []
813 cache[2] = []
814 else:
815 if not is_seed:
816 peerdata = cache[2][l_get_size-rsize:]
817 del cache[2][l_get_size-rsize:]
818 rsize -= len(peerdata)
819 else:
820 peerdata = []
821 if rsize:
822 peerdata.extend(cache[1][-rsize:])
823 del cache[1][-rsize:]
824 if return_type == 0:
825 data['peers'] = ''.join(peerdata)
826 elif return_type == 1:
827 data['crypto_flags'] = "0x01"*len(peerdata)
828 data['peers'] = ''.join(peerdata)
829 elif return_type == 2:
830 data['crypto_flags'] = ''.join([p[1] for p in peerdata])
831 data['peers'] = ''.join([p[0] for p in peerdata])
832 else:
833 data['peers'] = peerdata
834 return data
835
836
837 def get(self, connection, path, headers):
838 real_ip = connection.get_ip()
839 ip = real_ip
840 if is_ipv4(ip):
841 ipv4 = True
842 else:
843 try:
844 ip = ipv6_to_ipv4(ip)
845 ipv4 = True
846 except ValueError:
847 ipv4 = False
848
849 if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
850 or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
851 return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
852 bencode({'failure reason':
853 'your IP is not allowed on this tracker'}))
854
855 nip = get_forwarded_ip(headers)
856 if nip and not self.only_local_override_ip:
857 ip = nip
858 try:
859 ip = to_ipv4(ip)
860 ipv4 = True
861 except ValueError:
862 ipv4 = False
863
864 paramslist = {}
865 def params(key, default = None, l = paramslist):
866 if l.has_key(key):
867 return l[key][0]
868 return default
869
870 try:
871 (scheme, netloc, path, pars, query, fragment) = urlparse(path)
872 if self.uq_broken == 1:
873 path = path.replace('+',' ')
874 query = query.replace('+',' ')
875 path = unquote(path)[1:]
876 for s in query.split('&'):
877 if s:
878 i = s.index('=')
879 kw = unquote(s[:i])
880 paramslist.setdefault(kw, [])
881 paramslist[kw] += [unquote(s[i+1:])]
882
883 if path == '' or path == 'index.html':
884 return self.get_infopage()
885 if (path == 'file'):
886 return self.get_file(params('info_hash'))
887 if path == 'favicon.ico' and self.favicon is not None:
888 return (200, 'OK', {'Content-Type' : 'image/x-icon'}, self.favicon)
889
890 # automated access from here on
891
892 if path in ('scrape', 'scrape.php', 'tracker.php/scrape'):
893 return self.get_scrape(paramslist)
894
895 if not path in ('announce', 'announce.php', 'tracker.php/announce'):
896 return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
897
898 # main tracker function
899
900 filtered = self.Filter.check(real_ip, paramslist, headers)
901 if filtered:
902 return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
903 bencode({'failure reason': filtered}))
904
905 infohash = params('info_hash')
906 if not infohash:
907 raise ValueError, 'no info hash'
908
909 notallowed = self.check_allowed(infohash, paramslist)
910 if notallowed:
911 return notallowed
912
913 event = params('event')
914
915 rsize = self.add_data(infohash, event, ip, paramslist)
916
917 except ValueError, e:
918 return (400, 'Bad Request', {'Content-Type': 'text/plain'},
919 'you sent me garbage - ' + str(e))
920
921 if self.aggregate_forward and not paramslist.has_key('tracker'):
922 self.aggregate_senddata(query)
923
924 if self.is_aggregator: # don't return peer data here
925 return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
926 bencode({'response': 'OK'}))
927
928 if params('compact') and ipv4:
929 if params('requirecrypto'):
930 return_type = 1
931 elif params('supportcrypto'):
932 return_type = 2
933 else:
934 return_type = 0
935 elif self.config['compact_reqd'] and ipv4:
936 return (400, 'Bad Request', {'Content-Type': 'text/plain'},
937 'your client is outdated, please upgrade')
938 elif params('no_peer_id'):
939 return_type = 4
940 else:
941 return_type = 3
942
943 data = self.peerlist(infohash, event=='stopped',
944 params('tracker'), not params('left'),
945 return_type, rsize, params('supportcrypto'))
946
947 if paramslist.has_key('scrape'): # deprecated
948 data['scrape'] = self.scrapedata(infohash, False)
949
950 if self.dedicated_seed_id:
951 if params('seed_id') == self.dedicated_seed_id and params('left') == 0:
952 self.is_seeded[infohash] = True
953 if params('check_seeded') and self.is_seeded.get(infohash):
954 data['seeded'] = 1
955
956 return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode(data))
957
958
959 def natcheckOK(self, infohash, peerid, ip, port, peer):
960 seed = not peer['left']
961 bc = self.becache.setdefault(infohash,self.cache_default)
962 cp = compact_peer_info(ip, port)
963 reqc = peer['requirecrypto']
964 bc[2][seed][peerid] = (cp,chr(reqc))
965 if peer['supportcrypto']:
966 bc[1][seed][peerid] = cp
967 if not reqc:
968 bc[0][seed][peerid] = cp
969 if not self.config['compact_reqd']:
970 bc[3][seed][peerid] = Bencached(bencode({'ip': ip, 'port': port,
971 'peer id': peerid}))
972 bc[4][seed][peerid] = Bencached(bencode({'ip': ip, 'port': port}))
973
974
975 def natchecklog(self, peerid, ip, port, result):
976 year, month, day, hour, minute, second, a, b, c = localtime(time())
977 print '%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "!natcheck-%s:%i" %i 0 - -' % (
978 ip, quote(peerid), day, months[month], year, hour, minute, second,
979 ip, port, result)
980
981 def connectback_result(self, result, downloadid, peerid, ip, port):
982 record = self.downloads.get(downloadid,{}).get(peerid)
983 if ( record is None
984 or (record['ip'] != ip and record.get('given ip') != ip)
985 or record['port'] != port ):
986 if self.config['log_nat_checks']:
987 self.natchecklog(peerid, ip, port, 404)
988 return
989 if self.config['log_nat_checks']:
990 if result:
991 x = 200
992 else:
993 x = 503
994 self.natchecklog(peerid, ip, port, x)
995 if not record.has_key('nat'):
996 record['nat'] = int(not result)
997 if result:
998 self.natcheckOK(downloadid,peerid,ip,port,record)
999 elif result and record['nat']:
1000 record['nat'] = 0
1001 self.natcheckOK(downloadid,peerid,ip,port,record)
1002 elif not result:
1003 record['nat'] += 1
1004
1005
1006 def remove_from_state(self, *l):
1007 for s in l:
1008 try:
1009 del self.state[s]
1010 except:
1011 pass
1012
1013 def save_state(self):
1014 self.rawserver.add_task(self.save_state, self.save_dfile_interval)
1015 h = open(self.dfile, 'wb')
1016 h.write(bencode(self.state))
1017 h.close()
1018
1019
1020 def parse_allowed(self):
1021 self.rawserver.add_task(self.parse_allowed, self.parse_dir_interval)
1022
1023 if self.config['allowed_dir']:
1024 r = parsedir( self.config['allowed_dir'], self.allowed,
1025 self.allowed_dir_files, self.allowed_dir_blocked,
1026 [".torrent"] )
1027 ( self.allowed, self.allowed_dir_files, self.allowed_dir_blocked,
1028 added, garbage2 ) = r
1029
1030 self.state['allowed'] = self.allowed
1031 self.state['allowed_dir_files'] = self.allowed_dir_files
1032
1033 self.t2tlist.parse(self.allowed)
1034
1035 else:
1036 f = self.config['allowed_list']
1037 if self.allowed_list_mtime == os.path.getmtime(f):
1038 return
1039 try:
1040 r = parsetorrentlist(f, self.allowed)
1041 (self.allowed, added, garbage2) = r
1042 self.state['allowed_list'] = self.allowed
1043 except (IOError, OSError):
1044 print '**warning** unable to read allowed torrent list'
1045 return
1046 self.allowed_list_mtime = os.path.getmtime(f)
1047
1048 for infohash in added.keys():
1049 self.downloads.setdefault(infohash, {})
1050 self.completed.setdefault(infohash, 0)
1051 self.seedcount.setdefault(infohash, 0)
1052
1053
1054 def read_ip_lists(self):
1055 self.rawserver.add_task(self.read_ip_lists,self.parse_dir_interval)
1056
1057 f = self.config['allowed_ips']
1058 if f and self.allowed_ip_mtime != os.path.getmtime(f):
1059 self.allowed_IPs = IP_List()
1060 try:
1061 self.allowed_IPs.read_fieldlist(f)
1062 self.allowed_ip_mtime = os.path.getmtime(f)
1063 except (IOError, OSError):
1064 print '**warning** unable to read allowed_IP list'
1065
1066 f = self.config['banned_ips']
1067 if f and self.banned_ip_mtime != os.path.getmtime(f):
1068 self.banned_IPs = IP_Range_List()
1069 try:
1070 self.banned_IPs.read_rangelist(f)
1071 self.banned_ip_mtime = os.path.getmtime(f)
1072 except (IOError, OSError):
1073 print '**warning** unable to read banned_IP list'
1074
1075
1076 def delete_peer(self, infohash, peerid):
1077 dls = self.downloads[infohash]
1078 peer = dls[peerid]
1079 if not peer['left']:
1080 self.seedcount[infohash] -= 1
1081 if not peer.get('nat',-1):
1082 l = self.becache[infohash]
1083 y = not peer['left']
1084 for x in l:
1085 if x[y].has_key(peerid):
1086 del x[y][peerid]
1087 del self.times[infohash][peerid]
1088 del dls[peerid]
1089
1090 def expire_downloaders(self):
1091 for x in self.times.keys():
1092 for myid, t in self.times[x].items():
1093 if t < self.prevtime:
1094 self.delete_peer(x,myid)
1095 self.prevtime = clock()
1096 if (self.keep_dead != 1):
1097 for key, value in self.downloads.items():
1098 if len(value) == 0 and (
1099 self.allowed is None or not self.allowed.has_key(key) ):
1100 del self.times[key]
1101 del self.downloads[key]
1102 del self.seedcount[key]
1103 self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
1104
1105
1106def track(args):
1107 if len(args) == 0:
1108 print formatDefinitions(defaults, 80)
1109 return
1110 try:
1111 config, files = parseargs(args, defaults, 0, 0)
1112 except ValueError, e:
1113 print 'error: ' + str(e)
1114 print 'run with no arguments for parameter explanations'
1115 return
1116 r = RawServer(Event(), config['timeout_check_interval'],
1117 config['socket_timeout'], ipv6_enable = config['ipv6_enabled'])
1118 t = Tracker(config, r)
1119 r.bind(config['port'], config['bind'],
1120 reuse = True, ipv6_socket_style = config['ipv6_binds_v4'])
1121 r.listen_forever(HTTPHandler(t.get, config['min_time_between_log_flushes']))
1122 t.save_state()
1123 print '# Shutting down: ' + isotime()
1124
1125def size_format(s):
1126 if (s < 1024):
1127 r = str(s) + 'B'
1128 elif (s < 1048576):
1129 r = str(int(s/1024)) + 'KiB'
1130 elif (s < 1073741824L):
1131 r = str(int(s/1048576)) + 'MiB'
1132 elif (s < 1099511627776L):
1133 r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB'
1134 else:
1135 r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB'
1136 return(r)
1137
11380
=== removed directory '.pc/06_README_portchange.dpatch'
=== removed file '.pc/06_README_portchange.dpatch/README.txt'
--- .pc/06_README_portchange.dpatch/README.txt 2010-03-21 14:36:30 +0000
+++ .pc/06_README_portchange.dpatch/README.txt 1970-01-01 00:00:00 +0000
@@ -1,110 +0,0 @@
1BitTorrent is a tool for distributing files. It's extremely
2easy to use - downloads are started by clicking on hyperlinks.
3Whenever more than one person is downloading at once
4they send pieces of the file(s) to each other, thus relieving
5the central server's bandwidth burden. Even with many
6simultaneous downloads, the upload burden on the central server
7remains quite small, since each new downloader introduces new
8upload capacity.
9
10Windows web browser support is added by running an installer.
11A prebuilt one is available, but instructions for building it
12yourself are in BUILD.windows.txt
13
14Instructions for Unix installation are in INSTALL.unix.txt
15
16To start hosting -
17
181) start running a tracker
19
20First, you need a tracker. If you're on a dynamic IP or otherwise
21unreliable connection, you should find someone else's tracker and
22use that. Otherwise, follow the rest of this step.
23
24Trackers refer downloaders to each other. The load on the tracker
25is very small, so you only need one for all your files.
26
27To run a tracker, execute the command bttrack.py Here is an example -
28
29./bttrack.py --port 6969 --dfile dstate
30
31--dfile is where persistent information is kept on the tracker across
32invocations. It makes everything start working again immediately if
33you restart the tracker. A new one will be created if it doesn't exist
34already.
35
36The tracker must be on a net-addressible box, and you must know the
37ip number or dns name of it.
38
39The tracker outputs web logs to standard out. You can get information
40about the files it's currently serving by getting its index page.
41
422) create a metainfo file using btmakemetafile.py
43
44To generate a metainfo file, run the publish btmakemetafile and give
45it the file you want metainfo for and the url of the tracker
46
47./btmakemetafile.py http://my.tracker:6969/announce myfile.ext
48
49This will generate a file called myfile.ext.torrent
50
51Make sure to include the port number in the tracker url if it isn't 80.
52
53This command may take a while to scan over the whole file hashing it.
54
55The /announce path is special and hard-coded into the tracker.
56Make sure to give the domain or ip your tracker is on instead of
57my.tracker.
58
59You can use either a dns name or an IP address in the tracker url.
60
613) associate .torrent with application/x-bittorrent on your web server
62
63The way you do this is dependent on the particular web server you're using.
64
65You must have a web server which can serve ordinary static files and is
66addressable from the internet at large.
67
684) put the newly made .torrent file on your web server
69
70Note that the file name you choose on the server must end in .torrent, so
71it gets associated with the right mimetype.
72
735) put up a static page which links to the location you uploaded to in step 4
74
75The file you uploaded in step 4 is linked to using an ordinary url.
76
776) start a downloader as a resume on the complete file
78
79You have to run a downloader which already has the complete file,
80so new downloaders have a place to get it from. Here's an example -
81
82./btdownloadheadless.py --url http://my.server/myfile.torrent --saveas myfile.ext
83
84Make sure the saveas argument points to the already complete file.
85
86If you're running the complete downloader on the same machine or LAN as
87the tracker, give a --ip parameter to the complete downloader. The --ip
88parameter can be either an IP address or DNS name.
89
90BitTorrent defaults to port 6881. If it can't use 6881, (probably because
91another download is happening) it tries 6882, then 6883, etc. It gives up
92after 6889.
93
947) you're done!
95
96Now you just have to get people downloading! Refer them to the page you
97created in step 5.
98
99BitTorrent can also publish whole directories - simply point
100btmakemetafile.py at the directory with files in it, they'll be published
101as one unit. All files in subdirectories will be included, although files
102and directories named 'CVS' and 'core' are ignored.
103
104If you have any questions, try the web site or mailing list -
105
106http://bitconjurer.org/BitTorrent/
107
108http://groups.yahoo.com/group/BitTorrent
109
110You can also often find me, Bram, in #bittorrent of irc.freenode.net
1110
=== removed directory '.pc/07_change_report_address.dpatch'
=== removed directory '.pc/07_change_report_address.dpatch/BitTornado'
=== removed file '.pc/07_change_report_address.dpatch/BitTornado/__init__.py'
--- .pc/07_change_report_address.dpatch/BitTornado/__init__.py 2010-03-21 14:36:30 +0000
+++ .pc/07_change_report_address.dpatch/BitTornado/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,63 +0,0 @@
1product_name = 'BitTornado'
2version_short = 'T-0.3.18'
3
4version = version_short+' ('+product_name+')'
5report_email = version_short+'@degreez.net'
6
7from types import StringType
8from sha import sha
9from time import time, clock
10try:
11 from os import getpid
12except ImportError:
13 def getpid():
14 return 1
15
16mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
17
18_idprefix = version_short[0]
19for subver in version_short[2:].split('.'):
20 try:
21 subver = int(subver)
22 except:
23 subver = 0
24 _idprefix += mapbase64[subver]
25_idprefix += ('-' * (6-len(_idprefix)))
26_idrandom = [None]
27
28def resetPeerIDs():
29 try:
30 f = open('/dev/urandom','rb')
31 x = f.read(20)
32 f.close()
33 except:
34 x = ''
35
36 l1 = 0
37 t = clock()
38 while t == clock():
39 l1 += 1
40 l2 = 0
41 t = long(time()*100)
42 while t == long(time()*100):
43 l2 += 1
44 l3 = 0
45 if l2 < 1000:
46 t = long(time()*10)
47 while t == long(clock()*10):
48 l3 += 1
49 x += ( repr(time()) + '/' + str(time()) + '/'
50 + str(l1) + '/' + str(l2) + '/' + str(l3) + '/'
51 + str(getpid()) )
52
53 s = ''
54 for i in sha(x).digest()[-11:]:
55 s += mapbase64[ord(i) & 0x3F]
56 _idrandom[0] = s
57
58resetPeerIDs()
59
60def createPeerID(ins = '---'):
61 assert type(ins) is StringType
62 assert len(ins) == 3
63 return _idprefix + ins + _idrandom[0]
640
=== removed directory '.pc/08_btdownloadcurses_indent.dpatch'
=== removed file '.pc/08_btdownloadcurses_indent.dpatch/btdownloadcurses.py'
--- .pc/08_btdownloadcurses_indent.dpatch/btdownloadcurses.py 2010-03-21 14:36:30 +0000
+++ .pc/08_btdownloadcurses_indent.dpatch/btdownloadcurses.py 1970-01-01 00:00:00 +0000
@@ -1,407 +0,0 @@
1#!/usr/bin/env python
2
3# Written by Henry 'Pi' James
4# see LICENSE.txt for license information
5
6SPEW_SCROLL_RATE = 1
7
8from BitTornado import PSYCO
9if PSYCO.psyco:
10 try:
11 import psyco
12 assert psyco.__version__ >= 0x010100f0
13 psyco.full()
14 except:
15 pass
16
17from BitTornado.download_bt1 import BT1Download, defaults, parse_params, get_usage, get_response
18from BitTornado.RawServer import RawServer, UPnP_ERROR
19from random import seed
20from socket import error as socketerror
21from BitTornado.bencode import bencode
22from BitTornado.natpunch import UPnP_test
23from threading import Event
24from os.path import abspath
25from signal import signal, SIGWINCH
26from sha import sha
27from sys import argv, exit
28import sys
29from time import time, strftime
30from BitTornado.clock import clock
31from BitTornado import createPeerID, version
32from BitTornado.ConfigDir import ConfigDir
33
34try:
35 import curses
36 import curses.panel
37 from curses.wrapper import wrapper as curses_wrapper
38 from signal import signal, SIGWINCH
39except:
40 print 'Textmode GUI initialization failed, cannot proceed.'
41 print
42 print 'This download interface requires the standard Python module ' \
43 '"curses", which is unfortunately not available for the native ' \
44 'Windows port of Python. It is however available for the Cygwin ' \
45 'port of Python, running on all Win32 systems (www.cygwin.com).'
46 print
47 print 'You may still use "btdownloadheadless.py" to download.'
48 sys.exit(1)
49
50assert sys.version >= '2', "Install Python 2.0 or greater"
51try:
52 True
53except:
54 True = 1
55 False = 0
56
57def fmttime(n):
58 if n == 0:
59 return 'download complete!'
60 try:
61 n = int(n)
62 assert n >= 0 and n < 5184000 # 60 days
63 except:
64 return '<unknown>'
65 m, s = divmod(n, 60)
66 h, m = divmod(m, 60)
67 return 'finishing in %d:%02d:%02d' % (h, m, s)
68
69def fmtsize(n):
70 s = str(n)
71 size = s[-3:]
72 while len(s) > 3:
73 s = s[:-3]
74 size = '%s,%s' % (s[-3:], size)
75 if n > 999:
76 unit = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
77 i = 1
78 while i + 1 < len(unit) and (n >> 10) >= 999:
79 i += 1
80 n >>= 10
81 n = float(n) / (1 << 10)
82 size = '%s (%.2f %s)' % (size, n, unit[i])
83 return size
84
85
86class CursesDisplayer:
87 def __init__(self, scrwin, errlist, doneflag):
88 self.scrwin = scrwin
89 self.errlist = errlist
90 self.doneflag = doneflag
91
92 signal(SIGWINCH, self.winch_handler)
93 self.changeflag = Event()
94
95 self.done = 0
96 self.file = ''
97 self.fileSize = ''
98 self.activity = ''
99 self.status = ''
100 self.progress = ''
101 self.downloadTo = ''
102 self.downRate = '---'
103 self.upRate = '---'
104 self.shareRating = ''
105 self.seedStatus = ''
106 self.peerStatus = ''
107 self.errors = []
108 self.last_update_time = 0
109 self.spew_scroll_time = 0
110 self.spew_scroll_pos = 0
111
112 self._remake_window()
113
114 def winch_handler(self, signum, stackframe):
115 self.changeflag.set()
116 curses.endwin()
117 self.scrwin.refresh()
118 self.scrwin = curses.newwin(0, 0, 0, 0)
119 self._remake_window()
120
121 def _remake_window(self):
122 self.scrh, self.scrw = self.scrwin.getmaxyx()
123 self.scrpan = curses.panel.new_panel(self.scrwin)
124 self.labelh, self.labelw, self.labely, self.labelx = 11, 9, 1, 2
125 self.labelwin = curses.newwin(self.labelh, self.labelw,
126 self.labely, self.labelx)
127 self.labelpan = curses.panel.new_panel(self.labelwin)
128 self.fieldh, self.fieldw, self.fieldy, self.fieldx = (
129 self.labelh, self.scrw-2 - self.labelw-3,
130 1, self.labelw+3)
131 self.fieldwin = curses.newwin(self.fieldh, self.fieldw,
132 self.fieldy, self.fieldx)
133 self.fieldwin.nodelay(1)
134 self.fieldpan = curses.panel.new_panel(self.fieldwin)
135 self.spewh, self.speww, self.spewy, self.spewx = (
136 self.scrh - self.labelh - 2, self.scrw - 3, 1 + self.labelh, 2)
137 self.spewwin = curses.newwin(self.spewh, self.speww,
138 self.spewy, self.spewx)
139 self.spewpan = curses.panel.new_panel(self.spewwin)
140 try:
141 self.scrwin.border(ord('|'),ord('|'),ord('-'),ord('-'),ord(' '),ord(' '),ord(' '),ord(' '))
142 except:
143 pass
144 self.labelwin.addstr(0, 0, 'file:')
145 self.labelwin.addstr(1, 0, 'size:')
146 self.labelwin.addstr(2, 0, 'dest:')
147 self.labelwin.addstr(3, 0, 'progress:')
148 self.labelwin.addstr(4, 0, 'status:')
149 self.labelwin.addstr(5, 0, 'dl speed:')
150 self.labelwin.addstr(6, 0, 'ul speed:')
151 self.labelwin.addstr(7, 0, 'sharing:')
152 self.labelwin.addstr(8, 0, 'seeds:')
153 self.labelwin.addstr(9, 0, 'peers:')
154 curses.panel.update_panels()
155 curses.doupdate()
156 self.changeflag.clear()
157
158
159 def finished(self):
160 self.done = 1
161 self.activity = 'download succeeded!'
162 self.downRate = '---'
163 self.display(fractionDone = 1)
164
165 def failed(self):
166 self.done = 1
167 self.activity = 'download failed!'
168 self.downRate = '---'
169 self.display()
170
171 def error(self, errormsg):
172 newerrmsg = strftime('[%H:%M:%S] ') + errormsg
173 self.errors.append(newerrmsg)
174 self.errlist.append(newerrmsg)
175 self.display()
176
177 def display(self, dpflag = Event(), fractionDone = None, timeEst = None,
178 downRate = None, upRate = None, activity = None,
179 statistics = None, spew = None, **kws):
180
181 inchar = self.fieldwin.getch()
182 if inchar == 12: # ^L
183 self._remake_window()
184 elif inchar in (ord('q'),ord('Q')):
185 self.doneflag.set()
186
187 if activity is not None and not self.done:
188 self.activity = activity
189 elif timeEst is not None:
190 self.activity = fmttime(timeEst)
191 if self.changeflag.isSet():
192 return
193 if self.last_update_time + 0.1 > clock() and fractionDone not in (0.0, 1.0) and activity is not None:
194 return
195 self.last_update_time = clock()
196 if fractionDone is not None:
197 blocknum = int(self.fieldw * fractionDone)
198 self.progress = blocknum * '#' + (self.fieldw - blocknum) * '_'
199 self.status = '%s (%.1f%%)' % (self.activity, fractionDone * 100)
200 else:
201 self.status = self.activity
202 if downRate is not None:
203 self.downRate = '%.1f KB/s' % (float(downRate) / (1 << 10))
204 if upRate is not None:
205 self.upRate = '%.1f KB/s' % (float(upRate) / (1 << 10))
206 if statistics is not None:
207 if (statistics.shareRating < 0) or (statistics.shareRating > 100):
208 self.shareRating = 'oo (%.1f MB up / %.1f MB down)' % (float(statistics.upTotal) / (1<<20), float(statistics.downTotal) / (1<<20))
209 else:
210 self.shareRating = '%.3f (%.1f MB up / %.1f MB down)' % (statistics.shareRating, float(statistics.upTotal) / (1<<20), float(statistics.downTotal) / (1<<20))
211 if not self.done:
212 self.seedStatus = '%d seen now, plus %.3f distributed copies' % (statistics.numSeeds,0.001*int(1000*statistics.numCopies2))
213 else:
214 self.seedStatus = '%d seen recently, plus %.3f distributed copies' % (statistics.numOldSeeds,0.001*int(1000*statistics.numCopies))
215 self.peerStatus = '%d seen now, %.1f%% done at %.1f kB/s' % (statistics.numPeers,statistics.percentDone,float(statistics.torrentRate) / (1 << 10))
216
217 self.fieldwin.erase()
218 self.fieldwin.addnstr(0, 0, self.file, self.fieldw, curses.A_BOLD)
219 self.fieldwin.addnstr(1, 0, self.fileSize, self.fieldw)
220 self.fieldwin.addnstr(2, 0, self.downloadTo, self.fieldw)
221 if self.progress:
222 self.fieldwin.addnstr(3, 0, self.progress, self.fieldw, curses.A_BOLD)
223 self.fieldwin.addnstr(4, 0, self.status, self.fieldw)
224 self.fieldwin.addnstr(5, 0, self.downRate, self.fieldw)
225 self.fieldwin.addnstr(6, 0, self.upRate, self.fieldw)
226 self.fieldwin.addnstr(7, 0, self.shareRating, self.fieldw)
227 self.fieldwin.addnstr(8, 0, self.seedStatus, self.fieldw)
228 self.fieldwin.addnstr(9, 0, self.peerStatus, self.fieldw)
229
230 self.spewwin.erase()
231
232 if not spew:
233 errsize = self.spewh
234 if self.errors:
235 self.spewwin.addnstr(0, 0, "error(s):", self.speww, curses.A_BOLD)
236 errsize = len(self.errors)
237 displaysize = min(errsize, self.spewh)
238 displaytop = errsize - displaysize
239 for i in range(displaysize):
240 self.spewwin.addnstr(i, self.labelw, self.errors[displaytop + i],
241 self.speww-self.labelw-1, curses.A_BOLD)
242 else:
243 if self.errors:
244 self.spewwin.addnstr(0, 0, "error:", self.speww, curses.A_BOLD)
245 self.spewwin.addnstr(0, self.labelw, self.errors[-1],
246 self.speww-self.labelw-1, curses.A_BOLD)
247 self.spewwin.addnstr(2, 0, " # IP Upload Download Completed Speed", self.speww, curses.A_BOLD)
248
249
250 if self.spew_scroll_time + SPEW_SCROLL_RATE < clock():
251 self.spew_scroll_time = clock()
252 if len(spew) > self.spewh-5 or self.spew_scroll_pos > 0:
253 self.spew_scroll_pos += 1
254 if self.spew_scroll_pos > len(spew):
255 self.spew_scroll_pos = 0
256
257 for i in range(len(spew)):
258 spew[i]['lineno'] = i+1
259 spew.append({'lineno': None})
260 spew = spew[self.spew_scroll_pos:] + spew[:self.spew_scroll_pos]
261
262 for i in range(min(self.spewh - 5, len(spew))):
263 if not spew[i]['lineno']:
264 continue
265 self.spewwin.addnstr(i+3, 0, '%3d' % spew[i]['lineno'], 3)
266 self.spewwin.addnstr(i+3, 4, spew[i]['ip']+spew[i]['direction'], 16)
267 if spew[i]['uprate'] > 100:
268 self.spewwin.addnstr(i+3, 20, '%6.0f KB/s' % (float(spew[i]['uprate']) / 1000), 11)
269 self.spewwin.addnstr(i+3, 32, '-----', 5)
270 if spew[i]['uinterested'] == 1:
271 self.spewwin.addnstr(i+3, 33, 'I', 1)
272 if spew[i]['uchoked'] == 1:
273 self.spewwin.addnstr(i+3, 35, 'C', 1)
274 if spew[i]['downrate'] > 100:
275 self.spewwin.addnstr(i+3, 38, '%6.0f KB/s' % (float(spew[i]['downrate']) / 1000), 11)
276 self.spewwin.addnstr(i+3, 50, '-------', 7)
277 if spew[i]['dinterested'] == 1:
278 self.spewwin.addnstr(i+3, 51, 'I', 1)
279 if spew[i]['dchoked'] == 1:
280 self.spewwin.addnstr(i+3, 53, 'C', 1)
281 if spew[i]['snubbed'] == 1:
282 self.spewwin.addnstr(i+3, 55, 'S', 1)
283 self.spewwin.addnstr(i+3, 58, '%5.1f%%' % (float(int(spew[i]['completed']*1000))/10), 6)
284 if spew[i]['speed'] is not None:
285 self.spewwin.addnstr(i+3, 64, '%5.0f KB/s' % (float(spew[i]['speed'])/1000), 10)
286
287 if statistics is not None:
288 self.spewwin.addnstr(self.spewh-1, 0,
289 'downloading %d pieces, have %d fragments, %d of %d pieces completed'
290 % ( statistics.storage_active, statistics.storage_dirty,
291 statistics.storage_numcomplete,
292 statistics.storage_totalpieces ), self.speww-1 )
293
294 curses.panel.update_panels()
295 curses.doupdate()
296 dpflag.set()
297
298 def chooseFile(self, default, size, saveas, dir):
299 self.file = default
300 self.fileSize = fmtsize(size)
301 if saveas == '':
302 saveas = default
303 self.downloadTo = abspath(saveas)
304 return saveas
305
306def run(scrwin, errlist, params):
307 doneflag = Event()
308 d = CursesDisplayer(scrwin, errlist, doneflag)
309 try:
310 while 1:
311 configdir = ConfigDir('downloadcurses')
312 defaultsToIgnore = ['responsefile', 'url', 'priority']
313 configdir.setDefaults(defaults,defaultsToIgnore)
314 configdefaults = configdir.loadConfig()
315 defaults.append(('save_options',0,
316 "whether to save the current options as the new default configuration " +
317 "(only for btdownloadcurses.py)"))
318 try:
319 config = parse_params(params, configdefaults)
320 except ValueError, e:
321 d.error('error: ' + str(e) + '\nrun with no args for parameter explanations')
322 break
323 if not config:
324 d.error(get_usage(defaults, d.fieldw, configdefaults))
325 break
326 if config['save_options']:
327 configdir.saveConfig(config)
328 configdir.deleteOldCacheData(config['expire_cache_data'])
329
330 myid = createPeerID()
331 seed(myid)
332
333 rawserver = RawServer(doneflag, config['timeout_check_interval'],
334 config['timeout'], ipv6_enable = config['ipv6_enabled'],
335 failfunc = d.failed, errorfunc = d.error)
336
337 upnp_type = UPnP_test(config['upnp_nat_access'])
338 while True:
339 try:
340 listen_port = rawserver.find_and_bind(config['minport'], config['maxport'],
341 config['bind'], ipv6_socket_style = config['ipv6_binds_v4'],
342 upnp = upnp_type, randomizer = config['random_port'])
343 break
344 except socketerror, e:
345 if upnp_type and e == UPnP_ERROR:
346 d.error('WARNING: COULD NOT FORWARD VIA UPnP')
347 upnp_type = 0
348 continue
349 d.error("Couldn't listen - " + str(e))
350 d.failed()
351 return
352
353 response = get_response(config['responsefile'], config['url'], d.error)
354 if not response:
355 break
356
357 infohash = sha(bencode(response['info'])).digest()
358
359 dow = BT1Download(d.display, d.finished, d.error, d.error, doneflag,
360 config, response, infohash, myid, rawserver, listen_port,
361 configdir)
362
363 if not dow.saveAs(d.chooseFile):
364 break
365
366 if not dow.initFiles(old_style = True):
367 break
368 if not dow.startEngine():
369 dow.shutdown()
370 break
371 dow.startRerequester()
372 dow.autoStats()
373
374 if not dow.am_I_finished():
375 d.display(activity = 'connecting to peers')
376 rawserver.listen_forever(dow.getPortHandler())
377 d.display(activity = 'shutting down')
378 dow.shutdown()
379 break
380
381 except KeyboardInterrupt:
382 # ^C to exit..
383 pass
384 try:
385 rawserver.shutdown()
386 except:
387 pass
388 if not d.done:
389 d.failed()
390
391
392if __name__ == '__main__':
393 if argv[1:] == ['--version']:
394 print version
395 exit(0)
396 if len(argv) <= 1:
397 print "Usage: btdownloadcurses.py <global options>\n"
398 print get_usage(defaults)
399 exit(1)
400
401 errlist = []
402 curses_wrapper(run, errlist, argv[1:])
403
404 if errlist:
405 print "These errors occurred during execution:"
406 for error in errlist:
407 print error
408\ No newline at end of file0\ No newline at end of file
4091
=== removed directory '.pc/09_timtuckerfixes.dpatch'
=== removed directory '.pc/09_timtuckerfixes.dpatch/BitTornado'
=== removed directory '.pc/09_timtuckerfixes.dpatch/BitTornado/BT1'
=== removed file '.pc/09_timtuckerfixes.dpatch/BitTornado/BT1/Connecter.py'
--- .pc/09_timtuckerfixes.dpatch/BitTornado/BT1/Connecter.py 2010-03-21 14:36:30 +0000
+++ .pc/09_timtuckerfixes.dpatch/BitTornado/BT1/Connecter.py 1970-01-01 00:00:00 +0000
@@ -1,328 +0,0 @@
1# Written by Bram Cohen
2# see LICENSE.txt for license information
3
4from BitTornado.bitfield import Bitfield
5from BitTornado.clock import clock
6from binascii import b2a_hex
7
8try:
9 True
10except:
11 True = 1
12 False = 0
13
14DEBUG1 = False
15DEBUG2 = False
16
17def toint(s):
18 return long(b2a_hex(s), 16)
19
20def tobinary(i):
21 return (chr(i >> 24) + chr((i >> 16) & 0xFF) +
22 chr((i >> 8) & 0xFF) + chr(i & 0xFF))
23
24CHOKE = chr(0)
25UNCHOKE = chr(1)
26INTERESTED = chr(2)
27NOT_INTERESTED = chr(3)
28# index
29HAVE = chr(4)
30# index, bitfield
31BITFIELD = chr(5)
32# index, begin, length
33REQUEST = chr(6)
34# index, begin, piece
35PIECE = chr(7)
36# index, begin, piece
37CANCEL = chr(8)
38
39class Connection:
40 def __init__(self, connection, connecter, ccount):
41 self.connection = connection
42 self.connecter = connecter
43 self.ccount = ccount
44 self.got_anything = False
45 self.next_upload = None
46 self.outqueue = []
47 self.partial_message = None
48 self.download = None
49 self.send_choke_queued = False
50 self.just_unchoked = None
51
52 def get_ip(self, real=False):
53 return self.connection.get_ip(real)
54
55 def get_id(self):
56 return self.connection.get_id()
57
58 def get_readable_id(self):
59 return self.connection.get_readable_id()
60
61 def close(self):
62 if DEBUG1:
63 print (self.ccount,'connection closed')
64 self.connection.close()
65
66 def is_locally_initiated(self):
67 return self.connection.is_locally_initiated()
68
69 def is_encrypted(self):
70 return self.connection.is_encrypted()
71
72 def send_interested(self):
73 self._send_message(INTERESTED)
74
75 def send_not_interested(self):
76 self._send_message(NOT_INTERESTED)
77
78 def send_choke(self):
79 if self.partial_message:
80 self.send_choke_queued = True
81 else:
82 self._send_message(CHOKE)
83 self.upload.choke_sent()
84 self.just_unchoked = 0
85
86 def send_unchoke(self):
87 if self.send_choke_queued:
88 self.send_choke_queued = False
89 if DEBUG1:
90 print (self.ccount,'CHOKE SUPPRESSED')
91 else:
92 self._send_message(UNCHOKE)
93 if ( self.partial_message or self.just_unchoked is None
94 or not self.upload.interested or self.download.active_requests ):
95 self.just_unchoked = 0
96 else:
97 self.just_unchoked = clock()
98
99 def send_request(self, index, begin, length):
100 self._send_message(REQUEST + tobinary(index) +
101 tobinary(begin) + tobinary(length))
102 if DEBUG1:
103 print (self.ccount,'sent request',index,begin,begin+length)
104
105 def send_cancel(self, index, begin, length):
106 self._send_message(CANCEL + tobinary(index) +
107 tobinary(begin) + tobinary(length))
108 if DEBUG1:
109 print (self.ccount,'sent cancel',index,begin,begin+length)
110
111 def send_bitfield(self, bitfield):
112 self._send_message(BITFIELD + bitfield)
113
114 def send_have(self, index):
115 self._send_message(HAVE + tobinary(index))
116
117 def send_keepalive(self):
118 self._send_message('')
119
120 def _send_message(self, s):
121 if DEBUG2:
122 if s:
123 print (self.ccount,'SENDING MESSAGE',ord(s[0]),len(s))
124 else:
125 print (self.ccount,'SENDING MESSAGE',-1,0)
126 s = tobinary(len(s))+s
127 if self.partial_message:
128 self.outqueue.append(s)
129 else:
130 self.connection.send_message_raw(s)
131
132 def send_partial(self, bytes):
133 if self.connection.closed:
134 return 0
135 if self.partial_message is None:
136 s = self.upload.get_upload_chunk()
137 if s is None:
138 return 0
139 index, begin, piece = s
140 self.partial_message = ''.join((
141 tobinary(len(piece) + 9), PIECE,
142 tobinary(index), tobinary(begin), piece.tostring() ))
143 if DEBUG1:
144 print (self.ccount,'sending chunk',index,begin,begin+len(piece))
145
146 if bytes < len(self.partial_message):
147 self.connection.send_message_raw(self.partial_message[:bytes])
148 self.partial_message = self.partial_message[bytes:]
149 return bytes
150
151 q = [self.partial_message]
152 self.partial_message = None
153 if self.send_choke_queued:
154 self.send_choke_queued = False
155 self.outqueue.append(tobinary(1)+CHOKE)
156 self.upload.choke_sent()
157 self.just_unchoked = 0
158 q.extend(self.outqueue)
159 self.outqueue = []
160 q = ''.join(q)
161 self.connection.send_message_raw(q)
162 return len(q)
163
164 def get_upload(self):
165 return self.upload
166
167 def get_download(self):
168 return self.download
169
170 def set_download(self, download):
171 self.download = download
172
173 def backlogged(self):
174 return not self.connection.is_flushed()
175
176 def got_request(self, i, p, l):
177 self.upload.got_request(i, p, l)
178 if self.just_unchoked:
179 self.connecter.ratelimiter.ping(clock() - self.just_unchoked)
180 self.just_unchoked = 0
181
182
183
184
185class Connecter:
186 def __init__(self, make_upload, downloader, choker, numpieces,
187 totalup, config, ratelimiter, sched = None):
188 self.downloader = downloader
189 self.make_upload = make_upload
190 self.choker = choker
191 self.numpieces = numpieces
192 self.config = config
193 self.ratelimiter = ratelimiter
194 self.rate_capped = False
195 self.sched = sched
196 self.totalup = totalup
197 self.rate_capped = False
198 self.connections = {}
199 self.external_connection_made = 0
200 self.ccount = 0
201
202 def how_many_connections(self):
203 return len(self.connections)
204
205 def connection_made(self, connection):
206 self.ccount += 1
207 c = Connection(connection, self, self.ccount)
208 if DEBUG2:
209 print (c.ccount,'connection made')
210 self.connections[connection] = c
211 c.upload = self.make_upload(c, self.ratelimiter, self.totalup)
212 c.download = self.downloader.make_download(c)
213 self.choker.connection_made(c)
214 return c
215
216 def connection_lost(self, connection):
217 c = self.connections[connection]
218 if DEBUG2:
219 print (c.ccount,'connection closed')
220 del self.connections[connection]
221 if c.download:
222 c.download.disconnected()
223 self.choker.connection_lost(c)
224
225 def connection_flushed(self, connection):
226 conn = self.connections[connection]
227 if conn.next_upload is None and (conn.partial_message is not None
228 or len(conn.upload.buffer) > 0):
229 self.ratelimiter.queue(conn)
230
231 def got_piece(self, i):
232 for co in self.connections.values():
233 co.send_have(i)
234
235 def got_message(self, connection, message):
236 c = self.connections[connection]
237 t = message[0]
238 if DEBUG2:
239 print (c.ccount,'message received',ord(t))
240 if t == BITFIELD and c.got_anything:
241 if DEBUG2:
242 print (c.ccount,'misplaced bitfield')
243 connection.close()
244 return
245 c.got_anything = True
246 if (t in [CHOKE, UNCHOKE, INTERESTED, NOT_INTERESTED] and
247 len(message) != 1):
248 if DEBUG2:
249 print (c.ccount,'bad message length')
250 connection.close()
251 return
252 if t == CHOKE:
253 c.download.got_choke()
254 elif t == UNCHOKE:
255 c.download.got_unchoke()
256 elif t == INTERESTED:
257 if not c.download.have.complete():
258 c.upload.got_interested()
259 elif t == NOT_INTERESTED:
260 c.upload.got_not_interested()
261 elif t == HAVE:
262 if len(message) != 5:
263 if DEBUG2:
264 print (c.ccount,'bad message length')
265 connection.close()
266 return
267 i = toint(message[1:])
268 if i >= self.numpieces:
269 if DEBUG2:
270 print (c.ccount,'bad piece number')
271 connection.close()
272 return
273 if c.download.got_have(i):
274 c.upload.got_not_interested()
275 elif t == BITFIELD:
276 try:
277 b = Bitfield(self.numpieces, message[1:])
278 except ValueError:
279 if DEBUG2:
280 print (c.ccount,'bad bitfield')
281 connection.close()
282 return
283 if c.download.got_have_bitfield(b):
284 c.upload.got_not_interested()
285 elif t == REQUEST:
286 if len(message) != 13:
287 if DEBUG2:
288 print (c.ccount,'bad message length')
289 connection.close()
290 return
291 i = toint(message[1:5])
292 if i >= self.numpieces:
293 if DEBUG2:
294 print (c.ccount,'bad piece number')
295 connection.close()
296 return
297 c.got_request(i, toint(message[5:9]),
298 toint(message[9:]))
299 elif t == CANCEL:
300 if len(message) != 13:
301 if DEBUG2:
302 print (c.ccount,'bad message length')
303 connection.close()
304 return
305 i = toint(message[1:5])
306 if i >= self.numpieces:
307 if DEBUG2:
308 print (c.ccount,'bad piece number')
309 connection.close()
310 return
311 c.upload.got_cancel(i, toint(message[5:9]),
312 toint(message[9:]))
313 elif t == PIECE:
314 if len(message) <= 9:
315 if DEBUG2:
316 print (c.ccount,'bad message length')
317 connection.close()
318 return
319 i = toint(message[1:5])
320 if i >= self.numpieces:
321 if DEBUG2:
322 print (c.ccount,'bad piece number')
323 connection.close()
324 return
325 if c.download.got_piece(i, toint(message[5:9]), message[9:]):
326 self.got_piece(i)
327 else:
328 connection.close()
3290
=== removed file '.pc/09_timtuckerfixes.dpatch/BitTornado/BT1/Encrypter.py'
--- .pc/09_timtuckerfixes.dpatch/BitTornado/BT1/Encrypter.py 2010-03-21 14:36:30 +0000
+++ .pc/09_timtuckerfixes.dpatch/BitTornado/BT1/Encrypter.py 1970-01-01 00:00:00 +0000
@@ -1,657 +0,0 @@
1# Written by Bram Cohen
2# see LICENSE.txt for license information
3
4from cStringIO import StringIO
5from binascii import b2a_hex
6from socket import error as socketerror
7from urllib import quote
8from traceback import print_exc
9from BitTornado.BTcrypto import Crypto
10
11try:
12 True
13except:
14 True = 1
15 False = 0
16 bool = lambda x: not not x
17
18DEBUG = False
19
20MAX_INCOMPLETE = 8
21
22protocol_name = 'BitTorrent protocol'
23option_pattern = chr(0)*8
24
25def toint(s):
26 return long(b2a_hex(s), 16)
27
28def tobinary16(i):
29 return chr((i >> 8) & 0xFF) + chr(i & 0xFF)
30
31hexchars = '0123456789ABCDEF'
32hexmap = []
33for i in xrange(256):
34 hexmap.append(hexchars[(i&0xF0)/16]+hexchars[i&0x0F])
35
36def tohex(s):
37 r = []
38 for c in s:
39 r.append(hexmap[ord(c)])
40 return ''.join(r)
41
42def make_readable(s):
43 if not s:
44 return ''
45 if quote(s).find('%') >= 0:
46 return tohex(s)
47 return '"'+s+'"'
48
49
50class IncompleteCounter:
51 def __init__(self):
52 self.c = 0
53 def increment(self):
54 self.c += 1
55 def decrement(self):
56 self.c -= 1
57 def toomany(self):
58 return self.c >= MAX_INCOMPLETE
59
60incompletecounter = IncompleteCounter()
61
62
63# header, options, download id, my id, [length, message]
64
65class Connection:
66 def __init__(self, Encoder, connection, id,
67 ext_handshake=False, encrypted = None, options = None):
68 self.Encoder = Encoder
69 self.connection = connection
70 self.connecter = Encoder.connecter
71 self.id = id
72 self.locally_initiated = (id != None)
73 self.readable_id = make_readable(id)
74 self.complete = False
75 self.keepalive = lambda: None
76 self.closed = False
77 self.buffer = ''
78 self.bufferlen = None
79 self.log = None
80 self.read = self._read
81 self.write = self._write
82 self.cryptmode = 0
83 self.encrypter = None
84 if self.locally_initiated:
85 incompletecounter.increment()
86 if encrypted:
87 self.encrypted = True
88 self.encrypter = Crypto(True)
89 self.write(self.encrypter.pubkey+self.encrypter.padding())
90 else:
91 self.encrypted = False
92 self.write(chr(len(protocol_name)) + protocol_name +
93 option_pattern + self.Encoder.download_id )
94 self.next_len, self.next_func = 1+len(protocol_name), self.read_header
95 elif ext_handshake:
96 self.Encoder.connecter.external_connection_made += 1
97 if encrypted: # passed an already running encrypter
98 self.encrypter = encrypted
99 self.encrypted = True
100 self._start_crypto()
101 self.next_len, self.next_func = 14, self.read_crypto_block3c
102 else:
103 self.encrypted = False
104 self.options = options
105 self.write(self.Encoder.my_id)
106 self.next_len, self.next_func = 20, self.read_peer_id
107 else:
108 self.encrypted = None # don't know yet
109 self.next_len, self.next_func = 1+len(protocol_name), self.read_header
110 self.Encoder.raw_server.add_task(self._auto_close, 30)
111
112
113 def _log_start(self): # only called with DEBUG = True
114 self.log = open('peerlog.'+self.get_ip()+'.txt','a')
115 self.log.write('connected - ')
116 if self.locally_initiated:
117 self.log.write('outgoing\n')
118 else:
119 self.log.write('incoming\n')
120 self._logwritefunc = self.write
121 self.write = self._log_write
122
123 def _log_write(self, s):
124 self.log.write('w:'+b2a_hex(s)+'\n')
125 self._logwritefunc(s)
126
127
128 def get_ip(self, real=False):
129 return self.connection.get_ip(real)
130
131 def get_id(self):
132 return self.id
133
134 def get_readable_id(self):
135 return self.readable_id
136
137 def is_locally_initiated(self):
138 return self.locally_initiated
139
140 def is_encrypted(self):
141 return bool(self.encrypted)
142
143 def is_flushed(self):
144 return self.connection.is_flushed()
145
146 def _read_header(self, s):
147 if s == chr(len(protocol_name))+protocol_name:
148 return 8, self.read_options
149 return None
150
151 def read_header(self, s):
152 if self._read_header(s):
153 if self.encrypted or self.Encoder.config['crypto_stealth']:
154 return None
155 return 8, self.read_options
156 if self.locally_initiated and not self.encrypted:
157 return None
158 elif not self.Encoder.config['crypto_allowed']:
159 return None
160 if not self.encrypted:
161 self.encrypted = True
162 self.encrypter = Crypto(self.locally_initiated)
163 self._write_buffer(s)
164 return self.encrypter.keylength, self.read_crypto_header
165
166 ################## ENCRYPTION SUPPORT ######################
167
168 def _start_crypto(self):
169 self.encrypter.setrawaccess(self._read,self._write)
170 self.write = self.encrypter.write
171 self.read = self.encrypter.read
172 if self.buffer:
173 self.buffer = self.encrypter.decrypt(self.buffer)
174
175 def _end_crypto(self):
176 self.read = self._read
177 self.write = self._write
178 self.encrypter = None
179
180 def read_crypto_header(self, s):
181 self.encrypter.received_key(s)
182 self.encrypter.set_skey(self.Encoder.download_id)
183 if self.locally_initiated:
184 if self.Encoder.config['crypto_only']:
185 cryptmode = '\x00\x00\x00\x02' # full stream encryption
186 else:
187 cryptmode = '\x00\x00\x00\x03' # header or full stream
188 padc = self.encrypter.padding()
189 self.write( self.encrypter.block3a
190 + self.encrypter.block3b
191 + self.encrypter.encrypt(
192 ('\x00'*8) # VC
193 + cryptmode # acceptable crypto modes
194 + tobinary16(len(padc))
195 + padc # PadC
196 + '\x00\x00' ) ) # no initial payload data
197 self._max_search = 520
198 return 1, self.read_crypto_block4a
199 self.write(self.encrypter.pubkey+self.encrypter.padding())
200 self._max_search = 520
201 return 0, self.read_crypto_block3a
202
203 def _search_for_pattern(self, s, pat):
204 p = s.find(pat)
205 if p < 0:
206 if len(s) >= len(pat):
207 self._max_search -= len(s)+1-len(pat)
208 if self._max_search < 0:
209 self.close()
210 return False
211 self._write_buffer(s[1-len(pat):])
212 return False
213 self._write_buffer(s[p+len(pat):])
214 return True
215
216 ### INCOMING CONNECTION ###
217
218 def read_crypto_block3a(self, s):
219 if not self._search_for_pattern(s,self.encrypter.block3a):
220 return -1, self.read_crypto_block3a # wait for more data
221 return len(self.encrypter.block3b), self.read_crypto_block3b
222
223 def read_crypto_block3b(self, s):
224 if s != self.encrypter.block3b:
225 return None
226 self.Encoder.connecter.external_connection_made += 1
227 self._start_crypto()
228 return 14, self.read_crypto_block3c
229
230 def read_crypto_block3c(self, s):
231 if s[:8] != ('\x00'*8): # check VC
232 return None
233 self.cryptmode = toint(s[8:12]) % 4
234 if self.cryptmode == 0:
235 return None # no encryption selected
236 if ( self.cryptmode == 1 # only header encryption
237 and self.Encoder.config['crypto_only'] ):
238 return None
239 padlen = (ord(s[12])<<8)+ord(s[13])
240 if padlen > 512:
241 return None
242 return padlen+2, self.read_crypto_pad3
243
244 def read_crypto_pad3(self, s):
245 s = s[-2:]
246 ialen = (ord(s[0])<<8)+ord(s[1])
247 if ialen > 65535:
248 return None
249 if self.cryptmode == 1:
250 cryptmode = '\x00\x00\x00\x01' # header only encryption
251 else:
252 cryptmode = '\x00\x00\x00\x02' # full stream encryption
253 padd = self.encrypter.padding()
254 self.write( ('\x00'*8) # VC
255 + cryptmode # encryption mode
256 + tobinary16(len(padd))
257 + padd ) # PadD
258 if ialen:
259 return ialen, self.read_crypto_ia
260 return self.read_crypto_block3done()
261
262 def read_crypto_ia(self, s):
263 if DEBUG:
264 self._log_start()
265 self.log.write('r:'+b2a_hex(s)+'(ia)\n')
266 if self.buffer:
267 self.log.write('r:'+b2a_hex(self.buffer)+'(buffer)\n')
268 return self.read_crypto_block3done(s)
269
270 def read_crypto_block3done(self, ia=''):
271 if DEBUG:
272 if not self.log:
273 self._log_start()
274 if self.cryptmode == 1: # only handshake encryption
275 assert not self.buffer # oops; check for exceptions to this
276 self._end_crypto()
277 if ia:
278 self._write_buffer(ia)
279 return 1+len(protocol_name), self.read_encrypted_header
280
281 ### OUTGOING CONNECTION ###
282
283 def read_crypto_block4a(self, s):
284 if not self._search_for_pattern(s,self.encrypter.VC_pattern()):
285 return -1, self.read_crypto_block4a # wait for more data
286 self._start_crypto()
287 return 6, self.read_crypto_block4b
288
289 def read_crypto_block4b(self, s):
290 self.cryptmode = toint(s[:4]) % 4
291 if self.cryptmode == 1: # only header encryption
292 if self.Encoder.config['crypto_only']:
293 return None
294 elif self.cryptmode != 2:
295 return None # unknown encryption
296 padlen = (ord(s[4])<<8)+ord(s[5])
297 if padlen > 512:
298 return None
299 if padlen:
300 return padlen, self.read_crypto_pad4
301 return self.read_crypto_block4done()
302
303 def read_crypto_pad4(self, s):
304 # discard data
305 return self.read_crypto_block4done()
306
307 def read_crypto_block4done(self):
308 if DEBUG:
309 self._log_start()
310 if self.cryptmode == 1: # only handshake encryption
311 if not self.buffer: # oops; check for exceptions to this
312 return None
313 self._end_crypto()
314 self.write(chr(len(protocol_name)) + protocol_name +
315 option_pattern + self.Encoder.download_id)
316 return 1+len(protocol_name), self.read_encrypted_header
317
318 ### START PROTOCOL OVER ENCRYPTED CONNECTION ###
319
320 def read_encrypted_header(self, s):
321 return self._read_header(s)
322
323 ################################################
324
325 def read_options(self, s):
326 self.options = s
327 return 20, self.read_download_id
328
329 def read_download_id(self, s):
330 if ( s != self.Encoder.download_id
331 or not self.Encoder.check_ip(ip=self.get_ip()) ):
332 return None
333 if not self.locally_initiated:
334 if not self.encrypted:
335 self.Encoder.connecter.external_connection_made += 1
336 self.write(chr(len(protocol_name)) + protocol_name +
337 option_pattern + self.Encoder.download_id + self.Encoder.my_id)
338 return 20, self.read_peer_id
339
340 def read_peer_id(self, s):
341 if not self.encrypted and self.Encoder.config['crypto_only']:
342 return None # allows older trackers to ping,
343 # but won't proceed w/ connections
344 if not self.id:
345 self.id = s
346 self.readable_id = make_readable(s)
347 else:
348 if s != self.id:
349 return None
350 self.complete = self.Encoder.got_id(self)
351 if not self.complete:
352 return None
353 if self.locally_initiated:
354 self.write(self.Encoder.my_id)
355 incompletecounter.decrement()
356 self._switch_to_read2()
357 c = self.Encoder.connecter.connection_made(self)
358 self.keepalive = c.send_keepalive
359 return 4, self.read_len
360
361 def read_len(self, s):
362 l = toint(s)
363 if l > self.Encoder.max_len:
364 return None
365 return l, self.read_message
366
367 def read_message(self, s):
368 if s != '':
369 self.connecter.got_message(self, s)
370 return 4, self.read_len
371
372 def read_dead(self, s):
373 return None
374
375 def _auto_close(self):
376 if not self.complete:
377 self.close()
378
379 def close(self):
380 if not self.closed:
381 self.connection.close()
382 self.sever()
383
384 def sever(self):
385 if self.log:
386 self.log.write('closed\n')
387 self.log.close()
388 self.closed = True
389 del self.Encoder.connections[self.connection]
390 if self.complete:
391 self.connecter.connection_lost(self)
392 elif self.locally_initiated:
393 incompletecounter.decrement()
394
395 def send_message_raw(self, message):
396 self.write(message)
397
398 def _write(self, message):
399 if not self.closed:
400 self.connection.write(message)
401
402 def data_came_in(self, connection, s):
403 self.read(s)
404
405 def _write_buffer(self, s):
406 self.buffer = s+self.buffer
407
408 def _read(self, s):
409 if self.log:
410 self.log.write('r:'+b2a_hex(s)+'\n')
411 self.Encoder.measurefunc(len(s))
412 self.buffer += s
413 while True:
414 if self.closed:
415 return
416 # self.next_len = # of characters function expects
417 # or 0 = all characters in the buffer
418 # or -1 = wait for next read, then all characters in the buffer
419 # not compatible w/ keepalives, switch out after all negotiation complete
420 if self.next_len <= 0:
421 m = self.buffer
422 self.buffer = ''
423 elif len(self.buffer) >= self.next_len:
424 m = self.buffer[:self.next_len]
425 self.buffer = self.buffer[self.next_len:]
426 else:
427 return
428 try:
429 x = self.next_func(m)
430 except:
431 self.next_len, self.next_func = 1, self.read_dead
432 raise
433 if x is None:
434 self.close()
435 return
436 self.next_len, self.next_func = x
437 if self.next_len < 0: # already checked buffer
438 return # wait for additional data
439 if self.bufferlen is not None:
440 self._read2('')
441 return
442
443 def _switch_to_read2(self):
444 self._write_buffer = None
445 if self.encrypter:
446 self.encrypter.setrawaccess(self._read2,self._write)
447 else:
448 self.read = self._read2
449 self.bufferlen = len(self.buffer)
450 self.buffer = [self.buffer]
451
452 def _read2(self, s): # more efficient, requires buffer['',''] & bufferlen
453 if self.log:
454 self.log.write('r:'+b2a_hex(s)+'\n')
455 self.Encoder.measurefunc(len(s))
456 while True:
457 if self.closed:
458 return
459 p = self.next_len-self.bufferlen
460 if self.next_len == 0:
461 m = ''
462 elif s:
463 if p > len(s):
464 self.buffer.append(s)
465 self.bufferlen += len(s)
466 return
467 self.bufferlen = len(s)-p
468 self.buffer.append(s[:p])
469 m = ''.join(self.buffer)
470 if p == len(s):
471 self.buffer = []
472 else:
473 self.buffer=[s[p:]]
474 s = ''
475 elif p <= 0:
476 # assert len(self.buffer) == 1
477 s = self.buffer[0]
478 self.bufferlen = len(s)-self.next_len
479 m = s[:self.next_len]
480 if p == 0:
481 self.buffer = []
482 else:
483 self.buffer = [s[self.next_len:]]
484 s = ''
485 else:
486 return
487 try:
488 x = self.next_func(m)
489 except:
490 self.next_len, self.next_func = 1, self.read_dead
491 raise
492 if x is None:
493 self.close()
494 return
495 self.next_len, self.next_func = x
496 if self.next_len < 0: # already checked buffer
497 return # wait for additional data
498
499
500 def connection_flushed(self, connection):
501 if self.complete:
502 self.connecter.connection_flushed(self)
503
504 def connection_lost(self, connection):
505 if self.Encoder.connections.has_key(connection):
506 self.sever()
507
508
509class _dummy_banlist:
510 def includes(self, x):
511 return False
512
513class Encoder:
514 def __init__(self, connecter, raw_server, my_id, max_len,
515 schedulefunc, keepalive_delay, download_id,
516 measurefunc, config, bans=_dummy_banlist() ):
517 self.raw_server = raw_server
518 self.connecter = connecter
519 self.my_id = my_id
520 self.max_len = max_len
521 self.schedulefunc = schedulefunc
522 self.keepalive_delay = keepalive_delay
523 self.download_id = download_id
524 self.measurefunc = measurefunc
525 self.config = config
526 self.connections = {}
527 self.banned = {}
528 self.external_bans = bans
529 self.to_connect = []
530 self.paused = False
531 if self.config['max_connections'] == 0:
532 self.max_connections = 2 ** 30
533 else:
534 self.max_connections = self.config['max_connections']
535 schedulefunc(self.send_keepalives, keepalive_delay)
536
537 def send_keepalives(self):
538 self.schedulefunc(self.send_keepalives, self.keepalive_delay)
539 if self.paused:
540 return
541 for c in self.connections.values():
542 c.keepalive()
543
544 def start_connections(self, list):
545 if not self.to_connect:
546 self.raw_server.add_task(self._start_connection_from_queue)
547 self.to_connect = list
548
549 def _start_connection_from_queue(self):
550 if self.connecter.external_connection_made:
551 max_initiate = self.config['max_initiate']
552 else:
553 max_initiate = int(self.config['max_initiate']*1.5)
554 cons = len(self.connections)
555 if cons >= self.max_connections or cons >= max_initiate:
556 delay = 60
557 elif self.paused or incompletecounter.toomany():
558 delay = 1
559 else:
560 delay = 0
561 dns, id, encrypted = self.to_connect.pop(0)
562 self.start_connection(dns, id, encrypted)
563 if self.to_connect:
564 self.raw_server.add_task(self._start_connection_from_queue, delay)
565
566 def start_connection(self, dns, id, encrypted = None):
567 if ( self.paused
568 or len(self.connections) >= self.max_connections
569 or id == self.my_id
570 or not self.check_ip(ip=dns[0]) ):
571 return True
572 if self.config['crypto_only']:
573 if encrypted is None or encrypted: # fails on encrypted = 0
574 encrypted = True
575 else:
576 return True
577 for v in self.connections.values():
578 if v is None:
579 continue
580 if id and v.id == id:
581 return True
582 ip = v.get_ip(True)
583 if self.config['security'] and ip != 'unknown' and ip == dns[0]:
584 return True
585 try:
586 c = self.raw_server.start_connection(dns)
587 con = Connection(self, c, id, encrypted = encrypted)
588 self.connections[c] = con
589 c.set_handler(con)
590 except socketerror:
591 return False
592 return True
593
594 def _start_connection(self, dns, id, encrypted = None):
595 def foo(self=self, dns=dns, id=id, encrypted=encrypted):
596 self.start_connection(dns, id, encrypted)
597 self.schedulefunc(foo, 0)
598
599 def check_ip(self, connection=None, ip=None):
600 if not ip:
601 ip = connection.get_ip(True)
602 if self.config['security'] and self.banned.has_key(ip):
603 return False
604 if self.external_bans.includes(ip):
605 return False
606 return True
607
608 def got_id(self, connection):
609 if connection.id == self.my_id:
610 self.connecter.external_connection_made -= 1
611 return False
612 ip = connection.get_ip(True)
613 for v in self.connections.values():
614 if connection is not v:
615 if connection.id == v.id:
616 if ip == v.get_ip(True):
617 v.close()
618 else:
619 return False
620 if self.config['security'] and ip != 'unknown' and ip == v.get_ip(True):
621 v.close()
622 return True
623
624 def external_connection_made(self, connection):
625 if self.paused or len(self.connections) >= self.max_connections:
626 connection.close()
627 return False
628 con = Connection(self, connection, None)
629 self.connections[connection] = con
630 connection.set_handler(con)
631 return True
632
633 def externally_handshaked_connection_made(self, connection, options,
634 already_read, encrypted = None):
635 if ( self.paused
636 or len(self.connections) >= self.max_connections
637 or not self.check_ip(connection=connection) ):
638 connection.close()
639 return False
640 con = Connection(self, connection, None,
641 ext_handshake = True, encrypted = encrypted, options = options)
642 self.connections[connection] = con
643 connection.set_handler(con)
644 if already_read:
645 con.data_came_in(con, already_read)
646 return True
647
648 def close_all(self):
649 for c in self.connections.values():
650 c.close()
651 self.connections = {}
652
653 def ban(self, ip):
654 self.banned[ip] = 1
655
656 def pause(self, flag):
657 self.paused = flag
6580
=== removed file '.pc/09_timtuckerfixes.dpatch/BitTornado/BT1/Storage.py'
--- .pc/09_timtuckerfixes.dpatch/BitTornado/BT1/Storage.py 2010-03-21 14:36:30 +0000
+++ .pc/09_timtuckerfixes.dpatch/BitTornado/BT1/Storage.py 1970-01-01 00:00:00 +0000
@@ -1,584 +0,0 @@
1# Written by Bram Cohen
2# see LICENSE.txt for license information
3
4from BitTornado.piecebuffer import BufferPool
5from threading import Lock
6from time import time, strftime, localtime
7import os
8from os.path import exists, getsize, getmtime, basename
9from traceback import print_exc
10try:
11 from os import fsync
12except ImportError:
13 fsync = lambda x: None
14from bisect import bisect
15
16try:
17 True
18except:
19 True = 1
20 False = 0
21
22DEBUG = False
23
24MAXREADSIZE = 32768
25MAXLOCKSIZE = 1000000000L
26MAXLOCKRANGE = 3999999999L # only lock first 4 gig of file
27
28_pool = BufferPool()
29PieceBuffer = _pool.new
30
31def dummy_status(fractionDone = None, activity = None):
32 pass
33
34class Storage:
35 def __init__(self, files, piece_length, doneflag, config,
36 disabled_files = None):
37 # can raise IOError and ValueError
38 self.files = files
39 self.piece_length = piece_length
40 self.doneflag = doneflag
41 self.disabled = [False] * len(files)
42 self.file_ranges = []
43 self.disabled_ranges = []
44 self.working_ranges = []
45 numfiles = 0
46 total = 0l
47 so_far = 0l
48 self.handles = {}
49 self.whandles = {}
50 self.tops = {}
51 self.sizes = {}
52 self.mtimes = {}
53 if config.get('lock_files', True):
54 self.lock_file, self.unlock_file = self._lock_file, self._unlock_file
55 else:
56 self.lock_file, self.unlock_file = lambda x1,x2: None, lambda x1,x2: None
57 self.lock_while_reading = config.get('lock_while_reading', False)
58 self.lock = Lock()
59
60 if not disabled_files:
61 disabled_files = [False] * len(files)
62
63 for i in xrange(len(files)):
64 file, length = files[i]
65 if doneflag.isSet(): # bail out if doneflag is set
66 return
67 self.disabled_ranges.append(None)
68 if length == 0:
69 self.file_ranges.append(None)
70 self.working_ranges.append([])
71 else:
72 range = (total, total + length, 0, file)
73 self.file_ranges.append(range)
74 self.working_ranges.append([range])
75 numfiles += 1
76 total += length
77 if disabled_files[i]:
78 l = 0
79 else:
80 if exists(file):
81 l = getsize(file)
82 if l > length:
83 h = open(file, 'rb+')
84 h.truncate(length)
85 h.flush()
86 h.close()
87 l = length
88 else:
89 l = 0
90 h = open(file, 'wb+')
91 h.flush()
92 h.close()
93 self.mtimes[file] = getmtime(file)
94 self.tops[file] = l
95 self.sizes[file] = length
96 so_far += l
97
98 self.total_length = total
99 self._reset_ranges()
100
101 self.max_files_open = config['max_files_open']
102 if self.max_files_open > 0 and numfiles > self.max_files_open:
103 self.handlebuffer = []
104 else:
105 self.handlebuffer = None
106
107
108 if os.name == 'nt':
109 def _lock_file(self, name, f):
110 import msvcrt
111 for p in range(0, min(self.sizes[name],MAXLOCKRANGE), MAXLOCKSIZE):
112 f.seek(p)
113 msvcrt.locking(f.fileno(), msvcrt.LK_LOCK,
114 min(MAXLOCKSIZE,self.sizes[name]-p))
115
116 def _unlock_file(self, name, f):
117 import msvcrt
118 for p in range(0, min(self.sizes[name],MAXLOCKRANGE), MAXLOCKSIZE):
119 f.seek(p)
120 msvcrt.locking(f.fileno(), msvcrt.LK_UNLCK,
121 min(MAXLOCKSIZE,self.sizes[name]-p))
122
123 elif os.name == 'posix':
124 def _lock_file(self, name, f):
125 import fcntl
126 fcntl.flock(f.fileno(), fcntl.LOCK_EX)
127
128 def _unlock_file(self, name, f):
129 import fcntl
130 fcntl.flock(f.fileno(), fcntl.LOCK_UN)
131
132 else:
133 def _lock_file(self, name, f):
134 pass
135 def _unlock_file(self, name, f):
136 pass
137
138
139 def was_preallocated(self, pos, length):
140 for file, begin, end in self._intervals(pos, length):
141 if self.tops.get(file, 0) < end:
142 return False
143 return True
144
145
146 def _sync(self, file):
147 self._close(file)
148 if self.handlebuffer:
149 self.handlebuffer.remove(file)
150
151 def sync(self):
152 # may raise IOError or OSError
153 for file in self.whandles.keys():
154 self._sync(file)
155
156
157 def set_readonly(self, f=None):
158 if f is None:
159 self.sync()
160 return
161 file = self.files[f][0]
162 if self.whandles.has_key(file):
163 self._sync(file)
164
165
166 def get_total_length(self):
167 return self.total_length
168
169
170 def _open(self, file, mode):
171 if self.mtimes.has_key(file):
172 try:
173 if self.handlebuffer is not None:
174 assert getsize(file) == self.tops[file]
175 newmtime = getmtime(file)
176 oldmtime = self.mtimes[file]
177 assert newmtime <= oldmtime+1
178 assert newmtime >= oldmtime-1
179 except:
180 if DEBUG:
181 print ( file+' modified: '
182 +strftime('(%x %X)',localtime(self.mtimes[file]))
183 +strftime(' != (%x %X) ?',localtime(getmtime(file))) )
184 raise IOError('modified during download')
185 try:
186 return open(file, mode)
187 except:
188 if DEBUG:
189 print_exc()
190 raise
191
192
193 def _close(self, file):
194 f = self.handles[file]
195 del self.handles[file]
196 if self.whandles.has_key(file):
197 del self.whandles[file]
198 f.flush()
199 self.unlock_file(file, f)
200 f.close()
201 self.tops[file] = getsize(file)
202 self.mtimes[file] = getmtime(file)
203 else:
204 if self.lock_while_reading:
205 self.unlock_file(file, f)
206 f.close()
207
208
209 def _close_file(self, file):
210 if not self.handles.has_key(file):
211 return
212 self._close(file)
213 if self.handlebuffer:
214 self.handlebuffer.remove(file)
215
216
217 def _get_file_handle(self, file, for_write):
218 if self.handles.has_key(file):
219 if for_write and not self.whandles.has_key(file):
220 self._close(file)
221 try:
222 f = self._open(file, 'rb+')
223 self.handles[file] = f
224 self.whandles[file] = 1
225 self.lock_file(file, f)
226 except (IOError, OSError), e:
227 if DEBUG:
228 print_exc()
229 raise IOError('unable to reopen '+file+': '+str(e))
230
231 if self.handlebuffer:
232 if self.handlebuffer[-1] != file:
233 self.handlebuffer.remove(file)
234 self.handlebuffer.append(file)
235 elif self.handlebuffer is not None:
236 self.handlebuffer.append(file)
237 else:
238 try:
239 if for_write:
240 f = self._open(file, 'rb+')
241 self.handles[file] = f
242 self.whandles[file] = 1
243 self.lock_file(file, f)
244 else:
245 f = self._open(file, 'rb')
246 self.handles[file] = f
247 if self.lock_while_reading:
248 self.lock_file(file, f)
249 except (IOError, OSError), e:
250 if DEBUG:
251 print_exc()
252 raise IOError('unable to open '+file+': '+str(e))
253
254 if self.handlebuffer is not None:
255 self.handlebuffer.append(file)
256 if len(self.handlebuffer) > self.max_files_open:
257 self._close(self.handlebuffer.pop(0))
258
259 return self.handles[file]
260
261
262 def _reset_ranges(self):
263 self.ranges = []
264 for l in self.working_ranges:
265 self.ranges.extend(l)
266 self.begins = [i[0] for i in self.ranges]
267
268 def _intervals(self, pos, amount):
269 r = []
270 stop = pos + amount
271 p = bisect(self.begins, pos) - 1
272 while p < len(self.ranges):
273 begin, end, offset, file = self.ranges[p]
274 if begin >= stop:
275 break
276 r.append(( file,
277 offset + max(pos, begin) - begin,
278 offset + min(end, stop) - begin ))
279 p += 1
280 return r
281
282
283 def read(self, pos, amount, flush_first = False):
284 r = PieceBuffer()
285 for file, pos, end in self._intervals(pos, amount):
286 if DEBUG:
287 print 'reading '+file+' from '+str(pos)+' to '+str(end)
288 self.lock.acquire()
289 h = self._get_file_handle(file, False)
290 if flush_first and self.whandles.has_key(file):
291 h.flush()
292 fsync(h)
293 h.seek(pos)
294 while pos < end:
295 length = min(end-pos, MAXREADSIZE)
296 data = h.read(length)
297 if len(data) != length:
298 raise IOError('error reading data from '+file)
299 r.append(data)
300 pos += length
301 self.lock.release()
302 return r
303
304 def write(self, pos, s):
305 # might raise an IOError
306 total = 0
307 for file, begin, end in self._intervals(pos, len(s)):
308 if DEBUG:
309 print 'writing '+file+' from '+str(pos)+' to '+str(end)
310 self.lock.acquire()
311 h = self._get_file_handle(file, True)
312 h.seek(begin)
313 h.write(s[total: total + end - begin])
314 self.lock.release()
315 total += end - begin
316
317 def top_off(self):
318 for begin, end, offset, file in self.ranges:
319 l = offset + end - begin
320 if l > self.tops.get(file, 0):
321 self.lock.acquire()
322 h = self._get_file_handle(file, True)
323 h.seek(l-1)
324 h.write(chr(0xFF))
325 self.lock.release()
326
327 def flush(self):
328 # may raise IOError or OSError
329 for file in self.whandles.keys():
330 self.lock.acquire()
331 self.handles[file].flush()
332 self.lock.release()
333
334 def close(self):
335 for file, f in self.handles.items():
336 try:
337 self.unlock_file(file, f)
338 except:
339 pass
340 try:
341 f.close()
342 except:
343 pass
344 self.handles = {}
345 self.whandles = {}
346 self.handlebuffer = None
347
348
349 def _get_disabled_ranges(self, f):
350 if not self.file_ranges[f]:
351 return ((),(),())
352 r = self.disabled_ranges[f]
353 if r:
354 return r
355 start, end, offset, file = self.file_ranges[f]
356 if DEBUG:
357 print 'calculating disabled range for '+self.files[f][0]
358 print 'bytes: '+str(start)+'-'+str(end)
359 print 'file spans pieces '+str(int(start/self.piece_length))+'-'+str(int((end-1)/self.piece_length)+1)
360 pieces = range( int(start/self.piece_length),
361 int((end-1)/self.piece_length)+1 )
362 offset = 0
363 disabled_files = []
364 if len(pieces) == 1:
365 if ( start % self.piece_length == 0
366 and end % self.piece_length == 0 ): # happens to be a single,
367 # perfect piece
368 working_range = [(start, end, offset, file)]
369 update_pieces = []
370 else:
371 midfile = os.path.join(self.bufferdir,str(f))
372 working_range = [(start, end, 0, midfile)]
373 disabled_files.append((midfile, start, end))
374 length = end - start
375 self.sizes[midfile] = length
376 piece = pieces[0]
377 update_pieces = [(piece, start-(piece*self.piece_length), length)]
378 else:
379 update_pieces = []
380 if start % self.piece_length != 0: # doesn't begin on an even piece boundary
381 end_b = pieces[1]*self.piece_length
382 startfile = os.path.join(self.bufferdir,str(f)+'b')
383 working_range_b = [ ( start, end_b, 0, startfile ) ]
384 disabled_files.append((startfile, start, end_b))
385 length = end_b - start
386 self.sizes[startfile] = length
387 offset = length
388 piece = pieces.pop(0)
389 update_pieces.append((piece, start-(piece*self.piece_length), length))
390 else:
391 working_range_b = []
392 if f != len(self.files)-1 and end % self.piece_length != 0:
393 # doesn't end on an even piece boundary
394 start_e = pieces[-1] * self.piece_length
395 endfile = os.path.join(self.bufferdir,str(f)+'e')
396 working_range_e = [ ( start_e, end, 0, endfile ) ]
397 disabled_files.append((endfile, start_e, end))
398 length = end - start_e
399 self.sizes[endfile] = length
400 piece = pieces.pop(-1)
401 update_pieces.append((piece, 0, length))
402 else:
403 working_range_e = []
404 if pieces:
405 working_range_m = [ ( pieces[0]*self.piece_length,
406 (pieces[-1]+1)*self.piece_length,
407 offset, file ) ]
408 else:
409 working_range_m = []
410 working_range = working_range_b + working_range_m + working_range_e
411
412 if DEBUG:
413 print str(working_range)
414 print str(update_pieces)
415 r = (tuple(working_range), tuple(update_pieces), tuple(disabled_files))
416 self.disabled_ranges[f] = r
417 return r
418
419
420 def set_bufferdir(self, dir):
421 self.bufferdir = dir
422
423 def enable_file(self, f):
424 if not self.disabled[f]:
425 return
426 self.disabled[f] = False
427 r = self.file_ranges[f]
428 if not r:
429 return
430 file = r[3]
431 if not exists(file):
432 h = open(file, 'wb+')
433 h.flush()
434 h.close()
435 if not self.tops.has_key(file):
436 self.tops[file] = getsize(file)
437 if not self.mtimes.has_key(file):
438 self.mtimes[file] = getmtime(file)
439 self.working_ranges[f] = [r]
440
441 def disable_file(self, f):
442 if self.disabled[f]:
443 return
444 self.disabled[f] = True
445 r = self._get_disabled_ranges(f)
446 if not r:
447 return
448 for file, begin, end in r[2]:
449 if not os.path.isdir(self.bufferdir):
450 os.makedirs(self.bufferdir)
451 if not exists(file):
452 h = open(file, 'wb+')
453 h.flush()
454 h.close()
455 if not self.tops.has_key(file):
456 self.tops[file] = getsize(file)
457 if not self.mtimes.has_key(file):
458 self.mtimes[file] = getmtime(file)
459 self.working_ranges[f] = r[0]
460
461 reset_file_status = _reset_ranges
462
463
464 def get_piece_update_list(self, f):
465 return self._get_disabled_ranges(f)[1]
466
467
468 def delete_file(self, f):
469 try:
470 os.remove(self.files[f][0])
471 except:
472 pass
473
474
475 '''
476 Pickled data format:
477
478 d['files'] = [ file #, size, mtime {, file #, size, mtime...} ]
479 file # in torrent, and the size and last modification
480 time for those files. Missing files are either empty
481 or disabled.
482 d['partial files'] = [ name, size, mtime... ]
483 Names, sizes and last modification times of files containing
484 partial piece data. Filenames go by the following convention:
485 {file #, 0-based}{nothing, "b" or "e"}
486 eg: "0e" "3" "4b" "4e"
487 Where "b" specifies the partial data for the first piece in
488 the file, "e" the last piece, and no letter signifying that
489 the file is disabled but is smaller than one piece, and that
490 all the data is cached inside so adjacent files may be
491 verified.
492 '''
493 def pickle(self):
494 files = []
495 pfiles = []
496 for i in xrange(len(self.files)):
497 if not self.files[i][1]: # length == 0
498 continue
499 if self.disabled[i]:
500 for file, start, end in self._get_disabled_ranges(i)[2]:
501 pfiles.extend([basename(file),getsize(file),int(getmtime(file))])
502 continue
503 file = self.files[i][0]
504 files.extend([i,getsize(file),int(getmtime(file))])
505 return {'files': files, 'partial files': pfiles}
506
507
508 def unpickle(self, data):
509 # assume all previously-disabled files have already been disabled
510 try:
511 files = {}
512 pfiles = {}
513 l = data['files']
514 assert len(l) % 3 == 0
515 l = [l[x:x+3] for x in xrange(0,len(l),3)]
516 for f, size, mtime in l:
517 files[f] = (size, mtime)
518 l = data.get('partial files',[])
519 assert len(l) % 3 == 0
520 l = [l[x:x+3] for x in xrange(0,len(l),3)]
521 for file, size, mtime in l:
522 pfiles[file] = (size, mtime)
523
524 valid_pieces = {}
525 for i in xrange(len(self.files)):
526 if self.disabled[i]:
527 continue
528 r = self.file_ranges[i]
529 if not r:
530 continue
531 start, end, offset, file =r
532 if DEBUG:
533 print 'adding '+file
534 for p in xrange( int(start/self.piece_length),
535 int((end-1)/self.piece_length)+1 ):
536 valid_pieces[p] = 1
537
538 if DEBUG:
539 print valid_pieces.keys()
540
541 def test(old, size, mtime):
542 oldsize, oldmtime = old
543 if size != oldsize:
544 return False
545 if mtime > oldmtime+1:
546 return False
547 if mtime < oldmtime-1:
548 return False
549 return True
550
551 for i in xrange(len(self.files)):
552 if self.disabled[i]:
553 for file, start, end in self._get_disabled_ranges(i)[2]:
554 f1 = basename(file)
555 if ( not pfiles.has_key(f1)
556 or not test(pfiles[f1],getsize(file),getmtime(file)) ):
557 if DEBUG:
558 print 'removing '+file
559 for p in xrange( int(start/self.piece_length),
560 int((end-1)/self.piece_length)+1 ):
561 if valid_pieces.has_key(p):
562 del valid_pieces[p]
563 continue
564 file, size = self.files[i]
565 if not size:
566 continue
567 if ( not files.has_key(i)
568 or not test(files[i],getsize(file),getmtime(file)) ):
569 start, end, offset, file = self.file_ranges[i]
570 if DEBUG:
571 print 'removing '+file
572 for p in xrange( int(start/self.piece_length),
573 int((end-1)/self.piece_length)+1 ):
574 if valid_pieces.has_key(p):
575 del valid_pieces[p]
576 except:
577 if DEBUG:
578 print_exc()
579 return []
580
581 if DEBUG:
582 print valid_pieces.keys()
583 return valid_pieces.keys()
584
5850
=== removed file '.pc/09_timtuckerfixes.dpatch/BitTornado/BT1/StreamCheck.py'
--- .pc/09_timtuckerfixes.dpatch/BitTornado/BT1/StreamCheck.py 2010-03-21 14:36:30 +0000
+++ .pc/09_timtuckerfixes.dpatch/BitTornado/BT1/StreamCheck.py 1970-01-01 00:00:00 +0000
@@ -1,135 +0,0 @@
1# Written by Bram Cohen
2# see LICENSE.txt for license information
3
4from cStringIO import StringIO
5from binascii import b2a_hex
6from socket import error as socketerror
7from urllib import quote
8from traceback import print_exc
9import Connecter
10try:
11 True
12except:
13 True = 1
14 False = 0
15
16DEBUG = False
17
18
19protocol_name = 'BitTorrent protocol'
20option_pattern = chr(0)*8
21
22def toint(s):
23 return long(b2a_hex(s), 16)
24
25def tobinary(i):
26 return (chr(i >> 24) + chr((i >> 16) & 0xFF) +
27 chr((i >> 8) & 0xFF) + chr(i & 0xFF))
28
29hexchars = '0123456789ABCDEF'
30hexmap = []
31for i in xrange(256):
32 hexmap.append(hexchars[(i&0xF0)/16]+hexchars[i&0x0F])
33
34def tohex(s):
35 r = []
36 for c in s:
37 r.append(hexmap[ord(c)])
38 return ''.join(r)
39
40def make_readable(s):
41 if not s:
42 return ''
43 if quote(s).find('%') >= 0:
44 return tohex(s)
45 return '"'+s+'"'
46
47def toint(s):
48 return long(b2a_hex(s), 16)
49
50# header, reserved, download id, my id, [length, message]
51
52streamno = 0
53
54
55class StreamCheck:
56 def __init__(self):
57 global streamno
58 self.no = streamno
59 streamno += 1
60 self.buffer = StringIO()
61 self.next_len, self.next_func = 1, self.read_header_len
62
63 def read_header_len(self, s):
64 if ord(s) != len(protocol_name):
65 print self.no, 'BAD HEADER LENGTH'
66 return len(protocol_name), self.read_header
67
68 def read_header(self, s):
69 if s != protocol_name:
70 print self.no, 'BAD HEADER'
71 return 8, self.read_reserved
72
73 def read_reserved(self, s):
74 return 20, self.read_download_id
75
76 def read_download_id(self, s):
77 if DEBUG:
78 print self.no, 'download ID ' + tohex(s)
79 return 20, self.read_peer_id
80
81 def read_peer_id(self, s):
82 if DEBUG:
83 print self.no, 'peer ID' + make_readable(s)
84 return 4, self.read_len
85
86 def read_len(self, s):
87 l = toint(s)
88 if l > 2 ** 23:
89 print self.no, 'BAD LENGTH: '+str(l)+' ('+s+')'
90 return l, self.read_message
91
92 def read_message(self, s):
93 if not s:
94 return 4, self.read_len
95 m = s[0]
96 if ord(m) > 8:
97 print self.no, 'BAD MESSAGE: '+str(ord(m))
98 if m == Connecter.REQUEST:
99 if len(s) != 13:
100 print self.no, 'BAD REQUEST SIZE: '+str(len(s))
101 return 4, self.read_len
102 index = toint(s[1:5])
103 begin = toint(s[5:9])
104 length = toint(s[9:])
105 print self.no, 'Request: '+str(index)+': '+str(begin)+'-'+str(begin)+'+'+str(length)
106 elif m == Connecter.CANCEL:
107 if len(s) != 13:
108 print self.no, 'BAD CANCEL SIZE: '+str(len(s))
109 return 4, self.read_len
110 index = toint(s[1:5])
111 begin = toint(s[5:9])
112 length = toint(s[9:])
113 print self.no, 'Cancel: '+str(index)+': '+str(begin)+'-'+str(begin)+'+'+str(length)
114 elif m == Connecter.PIECE:
115 index = toint(s[1:5])
116 begin = toint(s[5:9])
117 length = len(s)-9
118 print self.no, 'Piece: '+str(index)+': '+str(begin)+'-'+str(begin)+'+'+str(length)
119 else:
120 print self.no, 'Message '+str(ord(m))+' (length '+str(len(s))+')'
121 return 4, self.read_len
122
123 def write(self, s):
124 while True:
125 i = self.next_len - self.buffer.tell()
126 if i > len(s):
127 self.buffer.write(s)
128 return
129 self.buffer.write(s[:i])
130 s = s[i:]
131 m = self.buffer.getvalue()
132 self.buffer.reset()
133 self.buffer.truncate()
134 x = self.next_func(m)
135 self.next_len, self.next_func = x
1360
=== removed file '.pc/09_timtuckerfixes.dpatch/BitTornado/ConfigDir.py'
--- .pc/09_timtuckerfixes.dpatch/BitTornado/ConfigDir.py 2010-03-21 14:36:30 +0000
+++ .pc/09_timtuckerfixes.dpatch/BitTornado/ConfigDir.py 1970-01-01 00:00:00 +0000
@@ -1,401 +0,0 @@
1#written by John Hoffman
2
3from inifile import ini_write, ini_read
4from bencode import bencode, bdecode
5from types import IntType, LongType, StringType, FloatType
6from CreateIcons import GetIcons, CreateIcon
7from parseargs import defaultargs
8from __init__ import product_name, version_short
9import sys,os
10from time import time, strftime
11
12try:
13 True
14except:
15 True = 1
16 False = 0
17
18try:
19 realpath = os.path.realpath
20except:
21 realpath = lambda x:x
22OLDICONPATH = os.path.abspath(os.path.dirname(realpath(sys.argv[0])))
23
24DIRNAME = '.'+product_name
25
26hexchars = '0123456789abcdef'
27hexmap = []
28revmap = {}
29for i in xrange(256):
30 x = hexchars[(i&0xF0)/16]+hexchars[i&0x0F]
31 hexmap.append(x)
32 revmap[x] = chr(i)
33
34def tohex(s):
35 r = []
36 for c in s:
37 r.append(hexmap[ord(c)])
38 return ''.join(r)
39
40def unhex(s):
41 r = [ revmap[s[x:x+2]] for x in xrange(0, len(s), 2) ]
42 return ''.join(r)
43
44def copyfile(oldpath, newpath): # simple file copy, all in RAM
45 try:
46 f = open(oldpath,'rb')
47 r = f.read()
48 success = True
49 except:
50 success = False
51 try:
52 f.close()
53 except:
54 pass
55 if not success:
56 return False
57 try:
58 f = open(newpath,'wb')
59 f.write(r)
60 except:
61 success = False
62 try:
63 f.close()
64 except:
65 pass
66 return success
67
68
69class ConfigDir:
70
71 ###### INITIALIZATION TASKS ######
72
73 def __init__(self, config_type = None):
74 self.config_type = config_type
75 if config_type:
76 config_ext = '.'+config_type
77 else:
78 config_ext = ''
79
80 def check_sysvars(x):
81 y = os.path.expandvars(x)
82 if y != x and os.path.isdir(y):
83 return y
84 return None
85
86 for d in ['${APPDATA}', '${HOME}', '${HOMEPATH}', '${USERPROFILE}']:
87 dir_root = check_sysvars(d)
88 if dir_root:
89 break
90 else:
91 dir_root = os.path.expanduser('~')
92 if not os.path.isdir(dir_root):
93 dir_root = os.path.abspath(os.path.dirname(sys.argv[0]))
94
95 dir_root = os.path.join(dir_root,DIRNAME)
96 self.dir_root = dir_root
97
98 if not os.path.isdir(self.dir_root):
99 os.mkdir(self.dir_root,0700) # exception if failed
100
101 self.dir_icons = os.path.join(dir_root,'icons')
102 if not os.path.isdir(self.dir_icons):
103 os.mkdir(self.dir_icons)
104 for icon in GetIcons():
105 i = os.path.join(self.dir_icons,icon)
106 if not os.path.exists(i):
107 if not copyfile(os.path.join(OLDICONPATH,icon),i):
108 CreateIcon(icon,self.dir_icons)
109
110 self.dir_torrentcache = os.path.join(dir_root,'torrentcache')
111 if not os.path.isdir(self.dir_torrentcache):
112 os.mkdir(self.dir_torrentcache)
113
114 self.dir_datacache = os.path.join(dir_root,'datacache')
115 if not os.path.isdir(self.dir_datacache):
116 os.mkdir(self.dir_datacache)
117
118 self.dir_piececache = os.path.join(dir_root,'piececache')
119 if not os.path.isdir(self.dir_piececache):
120 os.mkdir(self.dir_piececache)
121
122 self.configfile = os.path.join(dir_root,'config'+config_ext+'.ini')
123 self.statefile = os.path.join(dir_root,'state'+config_ext)
124
125 self.TorrentDataBuffer = {}
126
127
128 ###### CONFIG HANDLING ######
129
130 def setDefaults(self, defaults, ignore=[]):
131 self.config = defaultargs(defaults)
132 for k in ignore:
133 if self.config.has_key(k):
134 del self.config[k]
135
136 def checkConfig(self):
137 return os.path.exists(self.configfile)
138
139 def loadConfig(self):
140 try:
141 r = ini_read(self.configfile)['']
142 except:
143 return self.config
144 l = self.config.keys()
145 for k,v in r.items():
146 if self.config.has_key(k):
147 t = type(self.config[k])
148 try:
149 if t == StringType:
150 self.config[k] = v
151 elif t == IntType or t == LongType:
152 self.config[k] = long(v)
153 elif t == FloatType:
154 self.config[k] = float(v)
155 l.remove(k)
156 except:
157 pass
158 if l: # new default values since last save
159 self.saveConfig()
160 return self.config
161
162 def saveConfig(self, new_config = None):
163 if new_config:
164 for k,v in new_config.items():
165 if self.config.has_key(k):
166 self.config[k] = v
167 try:
168 ini_write( self.configfile, self.config,
169 'Generated by '+product_name+'/'+version_short+'\n'
170 + strftime('%x %X') )
171 return True
172 except:
173 return False
174
175 def getConfig(self):
176 return self.config
177
178
179 ###### STATE HANDLING ######
180
181 def getState(self):
182 try:
183 f = open(self.statefile,'rb')
184 r = f.read()
185 except:
186 r = None
187 try:
188 f.close()
189 except:
190 pass
191 try:
192 r = bdecode(r)
193 except:
194 r = None
195 return r
196
197 def saveState(self, state):
198 try:
199 f = open(self.statefile,'wb')
200 f.write(bencode(state))
201 success = True
202 except:
203 success = False
204 try:
205 f.close()
206 except:
207 pass
208 return success
209
210
211 ###### TORRENT HANDLING ######
212
213 def getTorrents(self):
214 d = {}
215 for f in os.listdir(self.dir_torrentcache):
216 f = os.path.basename(f)
217 try:
218 f, garbage = f.split('.')
219 except:
220 pass
221 d[unhex(f)] = 1
222 return d.keys()
223
224 def getTorrentVariations(self, t):
225 t = tohex(t)
226 d = []
227 for f in os.listdir(self.dir_torrentcache):
228 f = os.path.basename(f)
229 if f[:len(t)] == t:
230 try:
231 garbage, ver = f.split('.')
232 except:
233 ver = '0'
234 d.append(int(ver))
235 d.sort()
236 return d
237
238 def getTorrent(self, t, v = -1):
239 t = tohex(t)
240 if v == -1:
241 v = max(self.getTorrentVariations(t)) # potential exception
242 if v:
243 t += '.'+str(v)
244 try:
245 f = open(os.path.join(self.dir_torrentcache,t),'rb')
246 r = bdecode(f.read())
247 except:
248 r = None
249 try:
250 f.close()
251 except:
252 pass
253 return r
254
255 def writeTorrent(self, data, t, v = -1):
256 t = tohex(t)
257 if v == -1:
258 try:
259 v = max(self.getTorrentVariations(t))+1
260 except:
261 v = 0
262 if v:
263 t += '.'+str(v)
264 try:
265 f = open(os.path.join(self.dir_torrentcache,t),'wb')
266 f.write(bencode(data))
267 except:
268 v = None
269 try:
270 f.close()
271 except:
272 pass
273 return v
274
275
276 ###### TORRENT DATA HANDLING ######
277
278 def getTorrentData(self, t):
279 if self.TorrentDataBuffer.has_key(t):
280 return self.TorrentDataBuffer[t]
281 t = os.path.join(self.dir_datacache,tohex(t))
282 if not os.path.exists(t):
283 return None
284 try:
285 f = open(t,'rb')
286 r = bdecode(f.read())
287 except:
288 r = None
289 try:
290 f.close()
291 except:
292 pass
293 self.TorrentDataBuffer[t] = r
294 return r
295
296 def writeTorrentData(self, t, data):
297 self.TorrentDataBuffer[t] = data
298 try:
299 f = open(os.path.join(self.dir_datacache,tohex(t)),'wb')
300 f.write(bencode(data))
301 success = True
302 except:
303 success = False
304 try:
305 f.close()
306 except:
307 pass
308 if not success:
309 self.deleteTorrentData(t)
310 return success
311
312 def deleteTorrentData(self, t):
313 try:
314 os.remove(os.path.join(self.dir_datacache,tohex(t)))
315 except:
316 pass
317
318 def getPieceDir(self, t):
319 return os.path.join(self.dir_piececache,tohex(t))
320
321
322 ###### EXPIRATION HANDLING ######
323
324 def deleteOldCacheData(self, days, still_active = [], delete_torrents = False):
325 if not days:
326 return
327 exptime = time() - (days*24*3600)
328 names = {}
329 times = {}
330
331 for f in os.listdir(self.dir_torrentcache):
332 p = os.path.join(self.dir_torrentcache,f)
333 f = os.path.basename(f)
334 try:
335 f, garbage = f.split('.')
336 except:
337 pass
338 try:
339 f = unhex(f)
340 assert len(f) == 20
341 except:
342 continue
343 if delete_torrents:
344 names.setdefault(f,[]).append(p)
345 try:
346 t = os.path.getmtime(p)
347 except:
348 t = time()
349 times.setdefault(f,[]).append(t)
350
351 for f in os.listdir(self.dir_datacache):
352 p = os.path.join(self.dir_datacache,f)
353 try:
354 f = unhex(os.path.basename(f))
355 assert len(f) == 20
356 except:
357 continue
358 names.setdefault(f,[]).append(p)
359 try:
360 t = os.path.getmtime(p)
361 except:
362 t = time()
363 times.setdefault(f,[]).append(t)
364
365 for f in os.listdir(self.dir_piececache):
366 p = os.path.join(self.dir_piececache,f)
367 try:
368 f = unhex(os.path.basename(f))
369 assert len(f) == 20
370 except:
371 continue
372 for f2 in os.listdir(p):
373 p2 = os.path.join(p,f2)
374 names.setdefault(f,[]).append(p2)
375 try:
376 t = os.path.getmtime(p2)
377 except:
378 t = time()
379 times.setdefault(f,[]).append(t)
380 names.setdefault(f,[]).append(p)
381
382 for k,v in times.items():
383 if max(v) < exptime and not k in still_active:
384 for f in names[k]:
385 try:
386 os.remove(f)
387 except:
388 try:
389 os.removedirs(f)
390 except:
391 pass
392
393
394 def deleteOldTorrents(self, days, still_active = []):
395 self.deleteOldCacheData(days, still_active, True)
396
397
398 ###### OTHER ######
399
400 def getIconDir(self):
401 return self.dir_icons
4020
=== removed file '.pc/09_timtuckerfixes.dpatch/BitTornado/RawServer.py'
--- .pc/09_timtuckerfixes.dpatch/BitTornado/RawServer.py 2010-03-21 14:36:30 +0000
+++ .pc/09_timtuckerfixes.dpatch/BitTornado/RawServer.py 1970-01-01 00:00:00 +0000
@@ -1,195 +0,0 @@
1# Written by Bram Cohen
2# see LICENSE.txt for license information
3
4from bisect import insort
5from SocketHandler import SocketHandler, UPnP_ERROR
6import socket
7from cStringIO import StringIO
8from traceback import print_exc
9from select import error
10from threading import Thread, Event
11from time import sleep
12from clock import clock
13import sys
14try:
15 True
16except:
17 True = 1
18 False = 0
19
20
21def autodetect_ipv6():
22 try:
23 assert sys.version_info >= (2,3)
24 assert socket.has_ipv6
25 socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
26 except:
27 return 0
28 return 1
29
30def autodetect_socket_style():
31 if sys.platform.find('linux') < 0:
32 return 1
33 else:
34 try:
35 f = open('/proc/sys/net/ipv6/bindv6only','r')
36 dual_socket_style = int(f.read())
37 f.close()
38 return int(not dual_socket_style)
39 except:
40 return 0
41
42
43READSIZE = 32768
44
45class RawServer:
46 def __init__(self, doneflag, timeout_check_interval, timeout, noisy = True,
47 ipv6_enable = True, failfunc = lambda x: None, errorfunc = None,
48 sockethandler = None, excflag = Event()):
49 self.timeout_check_interval = timeout_check_interval
50 self.timeout = timeout
51 self.servers = {}
52 self.single_sockets = {}
53 self.dead_from_write = []
54 self.doneflag = doneflag
55 self.noisy = noisy
56 self.failfunc = failfunc
57 self.errorfunc = errorfunc
58 self.exccount = 0
59 self.funcs = []
60 self.externally_added = []
61 self.finished = Event()
62 self.tasks_to_kill = []
63 self.excflag = excflag
64
65 if sockethandler is None:
66 sockethandler = SocketHandler(timeout, ipv6_enable, READSIZE)
67 self.sockethandler = sockethandler
68 self.add_task(self.scan_for_timeouts, timeout_check_interval)
69
70 def get_exception_flag(self):
71 return self.excflag
72
73 def _add_task(self, func, delay, id = None):
74 assert float(delay) >= 0
75 insort(self.funcs, (clock() + delay, func, id))
76
77 def add_task(self, func, delay = 0, id = None):
78 assert float(delay) >= 0
79 self.externally_added.append((func, delay, id))
80
81 def scan_for_timeouts(self):
82 self.add_task(self.scan_for_timeouts, self.timeout_check_interval)
83 self.sockethandler.scan_for_timeouts()
84
85 def bind(self, port, bind = '', reuse = False,
86 ipv6_socket_style = 1, upnp = False):
87 self.sockethandler.bind(port, bind, reuse, ipv6_socket_style, upnp)
88
89 def find_and_bind(self, minport, maxport, bind = '', reuse = False,
90 ipv6_socket_style = 1, upnp = 0, randomizer = False):
91 return self.sockethandler.find_and_bind(minport, maxport, bind, reuse,
92 ipv6_socket_style, upnp, randomizer)
93
94 def start_connection_raw(self, dns, socktype, handler = None):
95 return self.sockethandler.start_connection_raw(dns, socktype, handler)
96
97 def start_connection(self, dns, handler = None, randomize = False):
98 return self.sockethandler.start_connection(dns, handler, randomize)
99
100 def get_stats(self):
101 return self.sockethandler.get_stats()
102
103 def pop_external(self):
104 while self.externally_added:
105 (a, b, c) = self.externally_added.pop(0)
106 self._add_task(a, b, c)
107
108
109 def listen_forever(self, handler):
110 self.sockethandler.set_handler(handler)
111 try:
112 while not self.doneflag.isSet():
113 try:
114 self.pop_external()
115 self._kill_tasks()
116 if self.funcs:
117 period = self.funcs[0][0] + 0.001 - clock()
118 else:
119 period = 2 ** 30
120 if period < 0:
121 period = 0
122 events = self.sockethandler.do_poll(period)
123 if self.doneflag.isSet():
124 return
125 while self.funcs and self.funcs[0][0] <= clock():
126 garbage1, func, id = self.funcs.pop(0)
127 if id in self.tasks_to_kill:
128 pass
129 try:
130# print func.func_name
131 func()
132 except (SystemError, MemoryError), e:
133 self.failfunc(str(e))
134 return
135 except KeyboardInterrupt:
136# self.exception(True)
137 return
138 except:
139 if self.noisy:
140 self.exception()
141 self.sockethandler.close_dead()
142 self.sockethandler.handle_events(events)
143 if self.doneflag.isSet():
144 return
145 self.sockethandler.close_dead()
146 except (SystemError, MemoryError), e:
147 self.failfunc(str(e))
148 return
149 except error:
150 if self.doneflag.isSet():
151 return
152 except KeyboardInterrupt:
153# self.exception(True)
154 return
155 except:
156 self.exception()
157 if self.exccount > 10:
158 return
159 finally:
160# self.sockethandler.shutdown()
161 self.finished.set()
162
163 def is_finished(self):
164 return self.finished.isSet()
165
166 def wait_until_finished(self):
167 self.finished.wait()
168
169 def _kill_tasks(self):
170 if self.tasks_to_kill:
171 new_funcs = []
172 for (t, func, id) in self.funcs:
173 if id not in self.tasks_to_kill:
174 new_funcs.append((t, func, id))
175 self.funcs = new_funcs
176 self.tasks_to_kill = []
177
178 def kill_tasks(self, id):
179 self.tasks_to_kill.append(id)
180
181 def exception(self, kbint = False):
182 if not kbint:
183 self.excflag.set()
184 self.exccount += 1
185 if self.errorfunc is None:
186 print_exc()
187 else:
188 data = StringIO()
189 print_exc(file = data)
190# print data.getvalue() # report exception here too
191 if not kbint: # don't report here if it's a keyboard interrupt
192 self.errorfunc(data.getvalue())
193
194 def shutdown(self):
195 self.sockethandler.shutdown()
1960
=== removed file '.pc/09_timtuckerfixes.dpatch/BitTornado/clock.py'
--- .pc/09_timtuckerfixes.dpatch/BitTornado/clock.py 2010-03-21 14:36:30 +0000
+++ .pc/09_timtuckerfixes.dpatch/BitTornado/clock.py 1970-01-01 00:00:00 +0000
@@ -1,27 +0,0 @@
1# Written by John Hoffman
2# see LICENSE.txt for license information
3
4from time import *
5import sys
6
7_MAXFORWARD = 100
8_FUDGE = 1
9
10class RelativeTime:
11 def __init__(self):
12 self.time = time()
13 self.offset = 0
14
15 def get_time(self):
16 t = time() + self.offset
17 if t < self.time or t > self.time + _MAXFORWARD:
18 self.time += _FUDGE
19 self.offset += self.time - t
20 return self.time
21 self.time = t
22 return t
23
24if sys.platform != 'win32':
25 _RTIME = RelativeTime()
26 def clock():
27 return _RTIME.get_time()
28\ No newline at end of file0\ No newline at end of file
291
=== removed file '.pc/09_timtuckerfixes.dpatch/BitTornado/download_bt1.py'
--- .pc/09_timtuckerfixes.dpatch/BitTornado/download_bt1.py 2010-03-21 14:36:30 +0000
+++ .pc/09_timtuckerfixes.dpatch/BitTornado/download_bt1.py 1970-01-01 00:00:00 +0000
@@ -1,877 +0,0 @@
1# Written by Bram Cohen
2# see LICENSE.txt for license information
3
4from zurllib import urlopen
5from urlparse import urlparse
6from BT1.btformats import check_message
7from BT1.Choker import Choker
8from BT1.Storage import Storage
9from BT1.StorageWrapper import StorageWrapper
10from BT1.FileSelector import FileSelector
11from BT1.Uploader import Upload
12from BT1.Downloader import Downloader
13from BT1.HTTPDownloader import HTTPDownloader
14from BT1.Connecter import Connecter
15from RateLimiter import RateLimiter
16from BT1.Encrypter import Encoder
17from RawServer import RawServer, autodetect_ipv6, autodetect_socket_style
18from BT1.Rerequester import Rerequester
19from BT1.DownloaderFeedback import DownloaderFeedback
20from RateMeasure import RateMeasure
21from CurrentRateMeasure import Measure
22from BT1.PiecePicker import PiecePicker
23from BT1.Statistics import Statistics
24from ConfigDir import ConfigDir
25from bencode import bencode, bdecode
26from natpunch import UPnP_test
27from sha import sha
28from os import path, makedirs, listdir
29from parseargs import parseargs, formatDefinitions, defaultargs
30from socket import error as socketerror
31from random import seed
32from threading import Thread, Event
33from clock import clock
34from BTcrypto import CRYPTO_OK
35from __init__ import createPeerID
36
37try:
38 True
39except:
40 True = 1
41 False = 0
42
43defaults = [
44 ('max_uploads', 7,
45 "the maximum number of uploads to allow at once."),
46 ('keepalive_interval', 120.0,
47 'number of seconds to pause between sending keepalives'),
48 ('download_slice_size', 2 ** 14,
49 "How many bytes to query for per request."),
50 ('upload_unit_size', 1460,
51 "when limiting upload rate, how many bytes to send at a time"),
52 ('request_backlog', 10,
53 "maximum number of requests to keep in a single pipe at once."),
54 ('max_message_length', 2 ** 23,
55 "maximum length prefix encoding you'll accept over the wire - larger values get the connection dropped."),
56 ('ip', '',
57 "ip to report you have to the tracker."),
58 ('minport', 10000, 'minimum port to listen on, counts up if unavailable'),
59 ('maxport', 60000, 'maximum port to listen on'),
60 ('random_port', 1, 'whether to choose randomly inside the port range ' +
61 'instead of counting up linearly'),
62 ('responsefile', '',
63 'file the server response was stored in, alternative to url'),
64 ('url', '',
65 'url to get file from, alternative to responsefile'),
66 ('crypto_allowed', int(CRYPTO_OK),
67 'whether to allow the client to accept encrypted connections'),
68 ('crypto_only', 0,
69 'whether to only create or allow encrypted connections'),
70 ('crypto_stealth', 0,
71 'whether to prevent all non-encrypted connection attempts; ' +
72 'will result in an effectively firewalled state on older trackers'),
73 ('selector_enabled', 1,
74 'whether to enable the file selector and fast resume function'),
75 ('expire_cache_data', 10,
76 'the number of days after which you wish to expire old cache data ' +
77 '(0 = disabled)'),
78 ('priority', '',
79 'a list of file priorities separated by commas, must be one per file, ' +
80 '0 = highest, 1 = normal, 2 = lowest, -1 = download disabled'),
81 ('saveas', '',
82 'local file name to save the file as, null indicates query user'),
83 ('timeout', 300.0,
84 'time to wait between closing sockets which nothing has been received on'),
85 ('timeout_check_interval', 60.0,
86 'time to wait between checking if any connections have timed out'),
87 ('max_slice_length', 2 ** 17,
88 "maximum length slice to send to peers, larger requests are ignored"),
89 ('max_rate_period', 20.0,
90 "maximum amount of time to guess the current rate estimate represents"),
91 ('bind', '',
92 'comma-separated list of ips/hostnames to bind to locally'),
93# ('ipv6_enabled', autodetect_ipv6(),
94 ('ipv6_enabled', 0,
95 'allow the client to connect to peers via IPv6'),
96 ('ipv6_binds_v4', autodetect_socket_style(),
97 "set if an IPv6 server socket won't also field IPv4 connections"),
98 ('upnp_nat_access', 1,
99 'attempt to autoconfigure a UPnP router to forward a server port ' +
100 '(0 = disabled, 1 = mode 1 [fast], 2 = mode 2 [slow])'),
101 ('upload_rate_fudge', 5.0,
102 'time equivalent of writing to kernel-level TCP buffer, for rate adjustment'),
103 ('tcp_ack_fudge', 0.03,
104 'how much TCP ACK download overhead to add to upload rate calculations ' +
105 '(0 = disabled)'),
106 ('display_interval', .5,
107 'time between updates of displayed information'),
108 ('rerequest_interval', 5 * 60,
109 'time to wait between requesting more peers'),
110 ('min_peers', 20,
111 'minimum number of peers to not do rerequesting'),
112 ('http_timeout', 60,
113 'number of seconds to wait before assuming that an http connection has timed out'),
114 ('max_initiate', 40,
115 'number of peers at which to stop initiating new connections'),
116 ('check_hashes', 1,
117 'whether to check hashes on disk'),
118 ('max_upload_rate', 0,
119 'maximum kB/s to upload at (0 = no limit, -1 = automatic)'),
120 ('max_download_rate', 0,
121 'maximum kB/s to download at (0 = no limit)'),
122 ('alloc_type', 'normal',
123 'allocation type (may be normal, background, pre-allocate or sparse)'),
124 ('alloc_rate', 2.0,
125 'rate (in MiB/s) to allocate space at using background allocation'),
126 ('buffer_reads', 1,
127 'whether to buffer disk reads'),
128 ('write_buffer_size', 4,
129 'the maximum amount of space to use for buffering disk writes ' +
130 '(in megabytes, 0 = disabled)'),
131 ('breakup_seed_bitfield', 1,
132 'sends an incomplete bitfield and then fills with have messages, '
133 'in order to get around stupid ISP manipulation'),
134 ('snub_time', 30.0,
135 "seconds to wait for data to come in over a connection before assuming it's semi-permanently choked"),
136 ('spew', 0,
137 "whether to display diagnostic info to stdout"),
138 ('rarest_first_cutoff', 2,
139 "number of downloads at which to switch from random to rarest first"),
140 ('rarest_first_priority_cutoff', 5,
141 'the number of peers which need to have a piece before other partials take priority over rarest first'),
142 ('min_uploads', 4,
143 "the number of uploads to fill out to with extra optimistic unchokes"),
144 ('max_files_open', 50,
145 'the maximum number of files to keep open at a time, 0 means no limit'),
146 ('round_robin_period', 30,
147 "the number of seconds between the client's switching upload targets"),
148 ('super_seeder', 0,
149 "whether to use special upload-efficiency-maximizing routines (only for dedicated seeds)"),
150 ('security', 1,
151 "whether to enable extra security features intended to prevent abuse"),
152 ('max_connections', 0,
153 "the absolute maximum number of peers to connect with (0 = no limit)"),
154 ('auto_kick', 1,
155 "whether to allow the client to automatically kick/ban peers that send bad data"),
156 ('double_check', 1,
157 "whether to double-check data being written to the disk for errors (may increase CPU load)"),
158 ('triple_check', 0,
159 "whether to thoroughly check data being written to the disk (may slow disk access)"),
160 ('lock_files', 1,
161 "whether to lock files the client is working with"),
162 ('lock_while_reading', 0,
163 "whether to lock access to files being read"),
164 ('auto_flush', 0,
165 "minutes between automatic flushes to disk (0 = disabled)"),
166 ('dedicated_seed_id', '',
167 "code to send to tracker identifying as a dedicated seed"),
168 ]
169
170argslistheader = 'Arguments are:\n\n'
171
172
173def _failfunc(x):
174 print x
175
176# old-style downloader
177def download(params, filefunc, statusfunc, finfunc, errorfunc, doneflag, cols,
178 pathFunc = None, presets = {}, exchandler = None,
179 failed = _failfunc, paramfunc = None):
180
181 try:
182 config = parse_params(params, presets)
183 except ValueError, e:
184 failed('error: ' + str(e) + '\nrun with no args for parameter explanations')
185 return
186 if not config:
187 errorfunc(get_usage())
188 return
189
190 myid = createPeerID()
191 seed(myid)
192
193 rawserver = RawServer(doneflag, config['timeout_check_interval'],
194 config['timeout'], ipv6_enable = config['ipv6_enabled'],
195 failfunc = failed, errorfunc = exchandler)
196
197 upnp_type = UPnP_test(config['upnp_nat_access'])
198 try:
199 listen_port = rawserver.find_and_bind(config['minport'], config['maxport'],
200 config['bind'], ipv6_socket_style = config['ipv6_binds_v4'],
201 upnp = upnp_type, randomizer = config['random_port'])
202 except socketerror, e:
203 failed("Couldn't listen - " + str(e))
204 return
205
206 response = get_response(config['responsefile'], config['url'], failed)
207 if not response:
208 return
209
210 infohash = sha(bencode(response['info'])).digest()
211
212 d = BT1Download(statusfunc, finfunc, errorfunc, exchandler, doneflag,
213 config, response, infohash, myid, rawserver, listen_port)
214
215 if not d.saveAs(filefunc):
216 return
217
218 if pathFunc:
219 pathFunc(d.getFilename())
220
221 hashcheck = d.initFiles(old_style = True)
222 if not hashcheck:
223 return
224 if not hashcheck():
225 return
226 if not d.startEngine():
227 return
228 d.startRerequester()
229 d.autoStats()
230
231 statusfunc(activity = 'connecting to peers')
232
233 if paramfunc:
234 paramfunc({ 'max_upload_rate' : d.setUploadRate, # change_max_upload_rate(<int KiB/sec>)
235 'max_uploads': d.setConns, # change_max_uploads(<int max uploads>)
236 'listen_port' : listen_port, # int
237 'peer_id' : myid, # string
238 'info_hash' : infohash, # string
239 'start_connection' : d._startConnection, # start_connection((<string ip>, <int port>), <peer id>)
240 })
241
242 rawserver.listen_forever(d.getPortHandler())
243
244 d.shutdown()
245
246
247def parse_params(params, presets = {}):
248 if len(params) == 0:
249 return None
250 config, args = parseargs(params, defaults, 0, 1, presets = presets)
251 if args:
252 if config['responsefile'] or config['url']:
253 raise ValueError,'must have responsefile or url as arg or parameter, not both'
254 if path.isfile(args[0]):
255 config['responsefile'] = args[0]
256 else:
257 try:
258 urlparse(args[0])
259 except:
260 raise ValueError, 'bad filename or url'
261 config['url'] = args[0]
262 elif (config['responsefile'] == '') == (config['url'] == ''):
263 raise ValueError, 'need responsefile or url, must have one, cannot have both'
264 return config
265
266
267def get_usage(defaults = defaults, cols = 100, presets = {}):
268 return (argslistheader + formatDefinitions(defaults, cols, presets))
269
270
271def get_response(file, url, errorfunc):
272 try:
273 if file:
274 h = open(file, 'rb')
275 try:
276 line = h.read(10) # quick test to see if responsefile contains a dict
277 front,garbage = line.split(':',1)
278 assert front[0] == 'd'
279 int(front[1:])
280 except:
281 errorfunc(file+' is not a valid responsefile')
282 return None
283 try:
284 h.seek(0)
285 except:
286 try:
287 h.close()
288 except:
289 pass
290 h = open(file, 'rb')
291 else:
292 try:
293 h = urlopen(url)
294 except:
295 errorfunc(url+' bad url')
296 return None
297 response = h.read()
298
299 except IOError, e:
300 errorfunc('problem getting response info - ' + str(e))
301 return None
302 try:
303 h.close()
304 except:
305 pass
306 try:
307 try:
308 response = bdecode(response)
309 except:
310 errorfunc("warning: bad data in responsefile")
311 response = bdecode(response, sloppy=1)
312 check_message(response)
313 except ValueError, e:
314 errorfunc("got bad file info - " + str(e))
315 return None
316
317 return response
318
319
320class BT1Download:
321 def __init__(self, statusfunc, finfunc, errorfunc, excfunc, doneflag,
322 config, response, infohash, id, rawserver, port,
323 appdataobj = None):
324 self.statusfunc = statusfunc
325 self.finfunc = finfunc
326 self.errorfunc = errorfunc
327 self.excfunc = excfunc
328 self.doneflag = doneflag
329 self.config = config
330 self.response = response
331 self.infohash = infohash
332 self.myid = id
333 self.rawserver = rawserver
334 self.port = port
335
336 self.info = self.response['info']
337 self.pieces = [self.info['pieces'][x:x+20]
338 for x in xrange(0, len(self.info['pieces']), 20)]
339 self.len_pieces = len(self.pieces)
340 self.argslistheader = argslistheader
341 self.unpauseflag = Event()
342 self.unpauseflag.set()
343 self.downloader = None
344 self.storagewrapper = None
345 self.fileselector = None
346 self.super_seeding_active = False
347 self.filedatflag = Event()
348 self.spewflag = Event()
349 self.superseedflag = Event()
350 self.whenpaused = None
351 self.finflag = Event()
352 self.rerequest = None
353 self.tcp_ack_fudge = config['tcp_ack_fudge']
354
355 self.selector_enabled = config['selector_enabled']
356 if appdataobj:
357 self.appdataobj = appdataobj
358 elif self.selector_enabled:
359 self.appdataobj = ConfigDir()
360 self.appdataobj.deleteOldCacheData( config['expire_cache_data'],
361 [self.infohash] )
362
363 self.excflag = self.rawserver.get_exception_flag()
364 self.failed = False
365 self.checking = False
366 self.started = False
367
368 self.picker = PiecePicker(self.len_pieces, config['rarest_first_cutoff'],
369 config['rarest_first_priority_cutoff'])
370 self.choker = Choker(config, rawserver.add_task,
371 self.picker, self.finflag.isSet)
372
373
374 def checkSaveLocation(self, loc):
375 if self.info.has_key('length'):
376 return path.exists(loc)
377 for x in self.info['files']:
378 if path.exists(path.join(loc, x['path'][0])):
379 return True
380 return False
381
382
383 def saveAs(self, filefunc, pathfunc = None):
384 try:
385 def make(f, forcedir = False):
386 if not forcedir:
387 f = path.split(f)[0]
388 if f != '' and not path.exists(f):
389 makedirs(f)
390
391 if self.info.has_key('length'):
392 file_length = self.info['length']
393 file = filefunc(self.info['name'], file_length,
394 self.config['saveas'], False)
395 if file is None:
396 return None
397 make(file)
398 files = [(file, file_length)]
399 else:
400 file_length = 0L
401 for x in self.info['files']:
402 file_length += x['length']
403 file = filefunc(self.info['name'], file_length,
404 self.config['saveas'], True)
405 if file is None:
406 return None
407
408 # if this path exists, and no files from the info dict exist, we assume it's a new download and
409 # the user wants to create a new directory with the default name
410 existing = 0
411 if path.exists(file):
412 if not path.isdir(file):
413 self.errorfunc(file + 'is not a dir')
414 return None
415 if len(listdir(file)) > 0: # if it's not empty
416 for x in self.info['files']:
417 if path.exists(path.join(file, x['path'][0])):
418 existing = 1
419 if not existing:
420 file = path.join(file, self.info['name'])
421 if path.exists(file) and not path.isdir(file):
422 if file[-8:] == '.torrent':
423 file = file[:-8]
424 if path.exists(file) and not path.isdir(file):
425 self.errorfunc("Can't create dir - " + self.info['name'])
426 return None
427 make(file, True)
428
429 # alert the UI to any possible change in path
430 if pathfunc != None:
431 pathfunc(file)
432
433 files = []
434 for x in self.info['files']:
435 n = file
436 for i in x['path']:
437 n = path.join(n, i)
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: