1
# Written by Bram Cohen
2
# see LICENSE.txt for license information
4
from BitTornado.piecebuffer import BufferPool
5
from threading import Lock
6
from time import time, strftime, localtime
8
from os.path import exists, getsize, getmtime, basename
9
from traceback import print_exc
13
fsync = lambda x: None
14
from bisect import bisect
25
MAXLOCKSIZE = 1000000000L
26
MAXLOCKRANGE = 3999999999L # only lock first 4 gig of file
29
PieceBuffer = _pool.new
31
def dummy_status(fractionDone = None, activity = None):
35
def __init__(self, files, piece_length, doneflag, config,
36
disabled_files = None):
37
# can raise IOError and ValueError
39
self.piece_length = piece_length
40
self.doneflag = doneflag
41
self.disabled = [False] * len(files)
43
self.disabled_ranges = []
44
self.working_ranges = []
53
if config.get('lock_files', True):
54
self.lock_file, self.unlock_file = self._lock_file, self._unlock_file
56
self.lock_file, self.unlock_file = lambda x1,x2: None, lambda x1,x2: None
57
self.lock_while_reading = config.get('lock_while_reading', False)
60
if not disabled_files:
61
disabled_files = [False] * len(files)
63
for i in xrange(len(files)):
64
file, length = files[i]
65
if doneflag.isSet(): # bail out if doneflag is set
67
self.disabled_ranges.append(None)
69
self.file_ranges.append(None)
70
self.working_ranges.append([])
72
range = (total, total + length, 0, file)
73
self.file_ranges.append(range)
74
self.working_ranges.append([range])
93
self.mtimes[file] = getmtime(file)
95
self.sizes[file] = length
98
self.total_length = total
101
self.max_files_open = config['max_files_open']
102
if self.max_files_open > 0 and numfiles > self.max_files_open:
103
self.handlebuffer = []
105
self.handlebuffer = None
109
def _lock_file(self, name, f):
111
for p in range(0, min(self.sizes[name],MAXLOCKRANGE), MAXLOCKSIZE):
113
msvcrt.locking(f.fileno(), msvcrt.LK_LOCK,
114
min(MAXLOCKSIZE,self.sizes[name]-p))
116
def _unlock_file(self, name, f):
118
for p in range(0, min(self.sizes[name],MAXLOCKRANGE), MAXLOCKSIZE):
120
msvcrt.locking(f.fileno(), msvcrt.LK_UNLCK,
121
min(MAXLOCKSIZE,self.sizes[name]-p))
123
elif os.name == 'posix':
124
def _lock_file(self, name, f):
126
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
128
def _unlock_file(self, name, f):
130
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
133
def _lock_file(self, name, f):
135
def _unlock_file(self, name, f):
139
def was_preallocated(self, pos, length):
140
for file, begin, end in self._intervals(pos, length):
141
if self.tops.get(file, 0) < end:
146
def _sync(self, file):
148
if self.handlebuffer:
149
self.handlebuffer.remove(file)
152
# may raise IOError or OSError
153
for file in self.whandles.keys():
157
def set_readonly(self, f=None):
161
file = self.files[f][0]
162
if self.whandles.has_key(file):
166
def get_total_length(self):
167
return self.total_length
170
def _open(self, file, mode):
171
if self.mtimes.has_key(file):
173
if self.handlebuffer is not None:
174
assert getsize(file) == self.tops[file]
175
newmtime = getmtime(file)
176
oldmtime = self.mtimes[file]
177
assert newmtime <= oldmtime+1
178
assert newmtime >= oldmtime-1
181
print ( file+' modified: '
182
+strftime('(%x %X)',localtime(self.mtimes[file]))
183
+strftime(' != (%x %X) ?',localtime(getmtime(file))) )
184
raise IOError('modified during download')
186
return open(file, mode)
193
def _close(self, file):
194
f = self.handles[file]
195
del self.handles[file]
196
if self.whandles.has_key(file):
197
del self.whandles[file]
199
self.unlock_file(file, f)
201
self.tops[file] = getsize(file)
202
self.mtimes[file] = getmtime(file)
204
if self.lock_while_reading:
205
self.unlock_file(file, f)
209
def _close_file(self, file):
210
if not self.handles.has_key(file):
213
if self.handlebuffer:
214
self.handlebuffer.remove(file)
217
def _get_file_handle(self, file, for_write):
218
if self.handles.has_key(file):
219
if for_write and not self.whandles.has_key(file):
222
f = self._open(file, 'rb+')
223
self.handles[file] = f
224
self.whandles[file] = 1
225
self.lock_file(file, f)
226
except (IOError, OSError), e:
229
raise IOError('unable to reopen '+file+': '+str(e))
231
if self.handlebuffer:
232
if self.handlebuffer[-1] != file:
233
self.handlebuffer.remove(file)
234
self.handlebuffer.append(file)
235
elif self.handlebuffer is not None:
236
self.handlebuffer.append(file)
240
f = self._open(file, 'rb+')
241
self.handles[file] = f
242
self.whandles[file] = 1
243
self.lock_file(file, f)
245
f = self._open(file, 'rb')
246
self.handles[file] = f
247
if self.lock_while_reading:
248
self.lock_file(file, f)
249
except (IOError, OSError), e:
252
raise IOError('unable to open '+file+': '+str(e))
254
if self.handlebuffer is not None:
255
self.handlebuffer.append(file)
256
if len(self.handlebuffer) > self.max_files_open:
257
self._close(self.handlebuffer.pop(0))
259
return self.handles[file]
262
def _reset_ranges(self):
264
for l in self.working_ranges:
265
self.ranges.extend(l)
266
self.begins = [i[0] for i in self.ranges]
268
def _intervals(self, pos, amount):
271
p = bisect(self.begins, pos) - 1
272
while p < len(self.ranges):
273
begin, end, offset, file = self.ranges[p]
277
offset + max(pos, begin) - begin,
278
offset + min(end, stop) - begin ))
283
def read(self, pos, amount, flush_first = False):
285
for file, pos, end in self._intervals(pos, amount):
287
print 'reading '+file+' from '+str(pos)+' to '+str(end)
289
h = self._get_file_handle(file, False)
290
if flush_first and self.whandles.has_key(file):
295
length = min(end-pos, MAXREADSIZE)
296
data = h.read(length)
297
if len(data) != length:
298
raise IOError('error reading data from '+file)
304
def write(self, pos, s):
305
# might raise an IOError
307
for file, begin, end in self._intervals(pos, len(s)):
309
print 'writing '+file+' from '+str(pos)+' to '+str(end)
311
h = self._get_file_handle(file, True)
313
h.write(s[total: total + end - begin])
318
for begin, end, offset, file in self.ranges:
319
l = offset + end - begin
320
if l > self.tops.get(file, 0):
322
h = self._get_file_handle(file, True)
328
# may raise IOError or OSError
329
for file in self.whandles.keys():
331
self.handles[file].flush()
335
for file, f in self.handles.items():
337
self.unlock_file(file, f)
346
self.handlebuffer = None
349
def _get_disabled_ranges(self, f):
350
if not self.file_ranges[f]:
352
r = self.disabled_ranges[f]
355
start, end, offset, file = self.file_ranges[f]
357
print 'calculating disabled range for '+self.files[f][0]
358
print 'bytes: '+str(start)+'-'+str(end)
359
print 'file spans pieces '+str(int(start/self.piece_length))+'-'+str(int((end-1)/self.piece_length)+1)
360
pieces = range( int(start/self.piece_length),
361
int((end-1)/self.piece_length)+1 )
365
if ( start % self.piece_length == 0
366
and end % self.piece_length == 0 ): # happens to be a single,
368
working_range = [(start, end, offset, file)]
371
midfile = os.path.join(self.bufferdir,str(f))
372
working_range = [(start, end, 0, midfile)]
373
disabled_files.append((midfile, start, end))
375
self.sizes[midfile] = length
377
update_pieces = [(piece, start-(piece*self.piece_length), length)]
380
if start % self.piece_length != 0: # doesn't begin on an even piece boundary
381
end_b = pieces[1]*self.piece_length
382
startfile = os.path.join(self.bufferdir,str(f)+'b')
383
working_range_b = [ ( start, end_b, 0, startfile ) ]
384
disabled_files.append((startfile, start, end_b))
385
length = end_b - start
386
self.sizes[startfile] = length
388
piece = pieces.pop(0)
389
update_pieces.append((piece, start-(piece*self.piece_length), length))
392
if f != len(self.files)-1 and end % self.piece_length != 0:
393
# doesn't end on an even piece boundary
394
start_e = pieces[-1] * self.piece_length
395
endfile = os.path.join(self.bufferdir,str(f)+'e')
396
working_range_e = [ ( start_e, end, 0, endfile ) ]
397
disabled_files.append((endfile, start_e, end))
398
length = end - start_e
399
self.sizes[endfile] = length
400
piece = pieces.pop(-1)
401
update_pieces.append((piece, 0, length))
405
working_range_m = [ ( pieces[0]*self.piece_length,
406
(pieces[-1]+1)*self.piece_length,
410
working_range = working_range_b + working_range_m + working_range_e
413
print str(working_range)
414
print str(update_pieces)
415
r = (tuple(working_range), tuple(update_pieces), tuple(disabled_files))
416
self.disabled_ranges[f] = r
420
def set_bufferdir(self, dir):
423
def enable_file(self, f):
424
if not self.disabled[f]:
426
self.disabled[f] = False
427
r = self.file_ranges[f]
432
h = open(file, 'wb+')
435
if not self.tops.has_key(file):
436
self.tops[file] = getsize(file)
437
if not self.mtimes.has_key(file):
438
self.mtimes[file] = getmtime(file)
439
self.working_ranges[f] = [r]
441
def disable_file(self, f):
444
self.disabled[f] = True
445
r = self._get_disabled_ranges(f)
448
for file, begin, end in r[2]:
449
if not os.path.isdir(self.bufferdir):
450
os.makedirs(self.bufferdir)
452
h = open(file, 'wb+')
455
if not self.tops.has_key(file):
456
self.tops[file] = getsize(file)
457
if not self.mtimes.has_key(file):
458
self.mtimes[file] = getmtime(file)
459
self.working_ranges[f] = r[0]
461
reset_file_status = _reset_ranges
464
def get_piece_update_list(self, f):
465
return self._get_disabled_ranges(f)[1]
468
def delete_file(self, f):
470
os.remove(self.files[f][0])
478
d['files'] = [ file #, size, mtime {, file #, size, mtime...} ]
479
file # in torrent, and the size and last modification
480
time for those files. Missing files are either empty
482
d['partial files'] = [ name, size, mtime... ]
483
Names, sizes and last modification times of files containing
484
partial piece data. Filenames go by the following convention:
485
{file #, 0-based}{nothing, "b" or "e"}
486
eg: "0e" "3" "4b" "4e"
487
Where "b" specifies the partial data for the first piece in
488
the file, "e" the last piece, and no letter signifying that
489
the file is disabled but is smaller than one piece, and that
490
all the data is cached inside so adjacent files may be
496
for i in xrange(len(self.files)):
497
if not self.files[i][1]: # length == 0
500
for file, start, end in self._get_disabled_ranges(i)[2]:
501
pfiles.extend([basename(file),getsize(file),int(getmtime(file))])
503
file = self.files[i][0]
504
files.extend([i,getsize(file),int(getmtime(file))])
505
return {'files': files, 'partial files': pfiles}
508
def unpickle(self, data):
509
# assume all previously-disabled files have already been disabled
514
assert len(l) % 3 == 0
515
l = [l[x:x+3] for x in xrange(0,len(l),3)]
516
for f, size, mtime in l:
517
files[f] = (size, mtime)
518
l = data.get('partial files',[])
519
assert len(l) % 3 == 0
520
l = [l[x:x+3] for x in xrange(0,len(l),3)]
521
for file, size, mtime in l:
522
pfiles[file] = (size, mtime)
525
for i in xrange(len(self.files)):
528
r = self.file_ranges[i]
531
start, end, offset, file =r
534
for p in xrange( int(start/self.piece_length),
535
int((end-1)/self.piece_length)+1 ):
539
print valid_pieces.keys()
541
def test(old, size, mtime):
542
oldsize, oldmtime = old
545
if mtime > oldmtime+1:
547
if mtime < oldmtime-1:
551
for i in xrange(len(self.files)):
553
for file, start, end in self._get_disabled_ranges(i)[2]:
555
if ( not pfiles.has_key(f1)
556
or not test(pfiles[f1],getsize(file),getmtime(file)) ):
558
print 'removing '+file
559
for p in xrange( int(start/self.piece_length),
560
int((end-1)/self.piece_length)+1 ):
561
if valid_pieces.has_key(p):
564
file, size = self.files[i]
567
if ( not files.has_key(i)
568
or not test(files[i],getsize(file),getmtime(file)) ):
569
start, end, offset, file = self.file_ranges[i]
571
print 'removing '+file
572
for p in xrange( int(start/self.piece_length),
573
int((end-1)/self.piece_length)+1 ):
574
if valid_pieces.has_key(p):
582
print valid_pieces.keys()
583
return valid_pieces.keys()