40
40
def DirSig(path_iter):
41
"""Alias for SigTarBlockIter below"""
42
return SigTarBlockIter(path_iter)
41
"""Alias for SigTarBlockIter below"""
42
return SigTarBlockIter(path_iter)
45
45
def DirFull(path_iter):
46
"""Return a tarblock full backup of items in path_iter
48
A full backup is just a diff starting from nothing (it may be less
49
elegant than using a standard tar file, but we can be sure that it
50
will be easy to split up the tar and make the volumes the same
54
return DirDelta(path_iter, cStringIO.StringIO(""))
46
"""Return a tarblock full backup of items in path_iter
48
A full backup is just a diff starting from nothing (it may be less
49
elegant than using a standard tar file, but we can be sure that it
50
will be easy to split up the tar and make the volumes the same
54
return DirDelta(path_iter, cStringIO.StringIO(""))
56
56
def DirFull_WriteSig(path_iter, sig_outfp):
57
"""Return full backup like above, but also write signature to sig_outfp"""
58
return DirDelta_WriteSig(path_iter, cStringIO.StringIO(""), sig_outfp)
57
"""Return full backup like above, but also write signature to sig_outfp"""
58
return DirDelta_WriteSig(path_iter, cStringIO.StringIO(""), sig_outfp)
61
61
def DirDelta(path_iter, dirsig_fileobj_list):
62
"""Produce tarblock diff given dirsig_fileobj_list and pathiter
64
dirsig_fileobj_list should either be a tar fileobj or a list of
65
those, sorted so the most recent is last.
68
if type(dirsig_fileobj_list) is types.ListType:
69
sig_iter = combine_path_iters(map(sigtar2path_iter,
71
else: sig_iter = sigtar2path_iter(dirsig_fileobj_list)
72
return DeltaTarBlockIter(get_delta_iter(path_iter, sig_iter))
62
"""Produce tarblock diff given dirsig_fileobj_list and pathiter
64
dirsig_fileobj_list should either be a tar fileobj or a list of
65
those, sorted so the most recent is last.
68
if type(dirsig_fileobj_list) is types.ListType:
69
sig_iter = combine_path_iters(map(sigtar2path_iter,
71
else: sig_iter = sigtar2path_iter(dirsig_fileobj_list)
72
return DeltaTarBlockIter(get_delta_iter(path_iter, sig_iter))
74
74
def delta_iter_error_handler(exc, new_path, sig_path, sig_tar = None):
75
"""Called by get_delta_iter, report error in getting delta"""
76
if new_path: index_string = new_path.get_relative_path()
77
elif sig_path: index_string = sig_path.get_relative_path()
78
else: assert 0, "Both new and sig are None for some reason"
79
log.Log("Error %s getting delta for %s" % (str(exc), index_string), 2)
75
"""Called by get_delta_iter, report error in getting delta"""
76
if new_path: index_string = new_path.get_relative_path()
77
elif sig_path: index_string = sig_path.get_relative_path()
78
else: assert 0, "Both new and sig are None for some reason"
79
log.Log("Error %s getting delta for %s" % (str(exc), index_string), 2)
82
82
def get_delta_path(new_path, sig_path):
83
"""Get one delta_path, or None if error"""
84
delta_path = new_path.get_ropath()
85
if not new_path.isreg():
86
delta_path.difftype = "snapshot"
87
elif not sig_path or not sig_path.isreg():
88
delta_path.difftype = "snapshot"
89
delta_path.setfileobj(new_path.open("rb"))
90
else: # both new and sig exist and are regular files
91
assert sig_path.difftype == "signature"
92
delta_path.difftype = "diff"
93
sigfp, newfp = sig_path.open("rb"), new_path.open("rb")
94
delta_path.setfileobj(librsync.DeltaFile(sigfp, newfp))
95
new_path.copy_attribs(delta_path)
96
delta_path.stat.st_size = new_path.stat.st_size
83
"""Get one delta_path, or None if error"""
84
delta_path = new_path.get_ropath()
85
if not new_path.isreg():
86
delta_path.difftype = "snapshot"
87
elif not sig_path or not sig_path.isreg():
88
delta_path.difftype = "snapshot"
89
delta_path.setfileobj(new_path.open("rb"))
90
else: # both new and sig exist and are regular files
91
assert sig_path.difftype == "signature"
92
delta_path.difftype = "diff"
93
sigfp, newfp = sig_path.open("rb"), new_path.open("rb")
94
delta_path.setfileobj(librsync.DeltaFile(sigfp, newfp))
95
new_path.copy_attribs(delta_path)
96
delta_path.stat.st_size = new_path.stat.st_size
99
99
def log_delta_path(delta_path, new_path = None, stats = None):
100
"""Look at delta path and log delta. Add stats if new_path is set"""
101
if delta_path.difftype == "snapshot":
102
if new_path: stats.add_new_file(new_path)
103
log.Log("Generating delta - new file: %s" %
104
(delta_path.get_relative_path(),), 5)
106
if new_path: stats.add_changed_file(new_path)
107
log.Log("Generating delta - changed file: %s" %
108
(delta_path.get_relative_path(),), 5)
100
"""Look at delta path and log delta. Add stats if new_path is set"""
101
if delta_path.difftype == "snapshot":
102
if new_path: stats.add_new_file(new_path)
103
log.Log("Generating delta - new file: %s" %
104
(delta_path.get_relative_path(),), 5)
106
if new_path: stats.add_changed_file(new_path)
107
log.Log("Generating delta - changed file: %s" %
108
(delta_path.get_relative_path(),), 5)
110
110
def get_delta_iter(new_iter, sig_iter):
111
"""Generate delta iter from new Path iter and sig Path iter.
113
For each delta path of regular file type, path.difftype with be
114
set to "snapshot", "diff". sig_iter will probably iterate ROPaths
118
collated = collate2iters(new_iter, sig_iter)
119
for new_path, sig_path in collated:
120
log.Log("Comparing %s and %s" % (new_path and new_path.index,
121
sig_path and sig_path.index), 6)
122
if (not new_path or not new_path.type) and sig_path and sig_path.type:
123
log.Log("Generating delta - deleted file: %s" %
124
(sig_path.get_relative_path(),), 5)
125
yield ROPath(sig_path.index)
126
elif sig_path and new_path == sig_path: pass # no change, skip
128
delta_path = robust.check_common_error(delta_iter_error_handler,
130
(new_path, sig_path))
131
if delta_path: # if not, an error must have occurred
132
log_delta_path(delta_path)
111
"""Generate delta iter from new Path iter and sig Path iter.
113
For each delta path of regular file type, path.difftype with be
114
set to "snapshot", "diff". sig_iter will probably iterate ROPaths
118
collated = collate2iters(new_iter, sig_iter)
119
for new_path, sig_path in collated:
120
log.Log("Comparing %s and %s" % (new_path and new_path.index,
121
sig_path and sig_path.index), 6)
122
if (not new_path or not new_path.type) and sig_path and sig_path.type:
123
log.Log("Generating delta - deleted file: %s" %
124
(sig_path.get_relative_path(),), 5)
125
yield ROPath(sig_path.index)
126
elif sig_path and new_path == sig_path: pass # no change, skip
128
delta_path = robust.check_common_error(delta_iter_error_handler,
130
(new_path, sig_path))
131
if delta_path: # if not, an error must have occurred
132
log_delta_path(delta_path)
135
135
def sigtar2path_iter(sigtarobj):
136
"""Convert signature tar file object open for reading into path iter"""
137
tf = tarfile.TarFile("Arbitrary Name", "r", sigtarobj)
140
for prefix in ["signature/", "snapshot/", "deleted/"]:
141
if tarinfo.name.startswith(prefix):
142
# strip prefix and from name and set it to difftype
143
name, difftype = tarinfo.name[len(prefix):], prefix[:-1]
145
else: raise DiffDirException("Bad tarinfo name %s" % (tarinfo.name,))
147
index = tuple(name.split("/"))
148
if not index[-1]: index = index[:-1] # deal with trailing /, ""
150
ropath = ROPath(index)
151
ropath.difftype = difftype
152
if difftype == "signature" or difftype == "snapshot":
153
ropath.init_from_tarinfo(tarinfo)
154
if ropath.isreg(): ropath.setfileobj(tf.extractfile(tarinfo))
136
"""Convert signature tar file object open for reading into path iter"""
137
tf = tarfile.TarFile("Arbitrary Name", "r", sigtarobj)
140
for prefix in ["signature/", "snapshot/", "deleted/"]:
141
if tarinfo.name.startswith(prefix):
142
# strip prefix and from name and set it to difftype
143
name, difftype = tarinfo.name[len(prefix):], prefix[:-1]
145
else: raise DiffDirException("Bad tarinfo name %s" % (tarinfo.name,))
147
index = tuple(name.split("/"))
148
if not index[-1]: index = index[:-1] # deal with trailing /, ""
150
ropath = ROPath(index)
151
ropath.difftype = difftype
152
if difftype == "signature" or difftype == "snapshot":
153
ropath.init_from_tarinfo(tarinfo)
154
if ropath.isreg(): ropath.setfileobj(tf.extractfile(tarinfo))
158
158
def collate2iters(riter1, riter2):
159
"""Collate two iterators.
161
The elements yielded by each iterator must be have an index
162
variable, and this function returns pairs (elem1, elem2), (elem1,
163
None), or (None, elem2) two elements in a pair will have the same
164
index, and earlier indicies are yielded later than later indicies.
167
relem1, relem2 = None, None
170
try: relem1 = riter1.next()
171
except StopIteration:
172
if relem2: yield (None, relem2)
173
for relem2 in riter2: yield (None, relem2)
175
index1 = relem1.index
177
try: relem2 = riter2.next()
178
except StopIteration:
179
if relem1: yield (relem1, None)
180
for relem1 in riter1: yield (relem1, None)
182
index2 = relem2.index
187
elif index1 == index2:
188
yield (relem1, relem2)
189
relem1, relem2 = None, None
190
else: # index2 is less
159
"""Collate two iterators.
161
The elements yielded by each iterator must be have an index
162
variable, and this function returns pairs (elem1, elem2), (elem1,
163
None), or (None, elem2) two elements in a pair will have the same
164
index, and earlier indicies are yielded later than later indicies.
167
relem1, relem2 = None, None
170
try: relem1 = riter1.next()
171
except StopIteration:
172
if relem2: yield (None, relem2)
173
for relem2 in riter2: yield (None, relem2)
175
index1 = relem1.index
177
try: relem2 = riter2.next()
178
except StopIteration:
179
if relem1: yield (relem1, None)
180
for relem1 in riter1: yield (relem1, None)
182
index2 = relem2.index
187
elif index1 == index2:
188
yield (relem1, relem2)
189
relem1, relem2 = None, None
190
else: # index2 is less
194
194
def combine_path_iters(path_iter_list):
195
"""Produce new iterator by combining the iterators in path_iter_list
197
This new iter will iterate every path that is in path_iter_list in
198
order of increasing index. If multiple iterators in
199
path_iter_list yield paths with the same index, combine_path_iters
200
will discard all paths but the one yielded by the last path_iter.
202
This is used to combine signature iters, as the output will be a
203
full up-to-date signature iter.
206
path_iter_list = path_iter_list[:] # copy before destructive reverse
207
path_iter_list.reverse()
209
def get_triple(iter_index):
210
"""Represent the next element as a triple, to help sorting"""
211
try: path = path_iter_list[iter_index].next()
212
except StopIteration: return None
213
return (path.index, iter_index, path)
215
def refresh_triple_list(triple_list):
216
"""Update all elements with path_index same as first element"""
217
path_index = triple_list[0][0]
219
while iter_index < len(triple_list):
220
old_triple = triple_list[iter_index]
221
if old_triple[0] == path_index:
222
new_triple = get_triple(old_triple[1])
224
triple_list[iter_index] = new_triple
226
else: del triple_list[iter_index]
227
else: break # assumed triple_list sorted, so can exit now
229
triple_list = filter(lambda x: x, map(get_triple,
230
range(len(path_iter_list))))
233
yield triple_list[0][2]
234
refresh_triple_list(triple_list)
195
"""Produce new iterator by combining the iterators in path_iter_list
197
This new iter will iterate every path that is in path_iter_list in
198
order of increasing index. If multiple iterators in
199
path_iter_list yield paths with the same index, combine_path_iters
200
will discard all paths but the one yielded by the last path_iter.
202
This is used to combine signature iters, as the output will be a
203
full up-to-date signature iter.
206
path_iter_list = path_iter_list[:] # copy before destructive reverse
207
path_iter_list.reverse()
209
def get_triple(iter_index):
210
"""Represent the next element as a triple, to help sorting"""
211
try: path = path_iter_list[iter_index].next()
212
except StopIteration: return None
213
return (path.index, iter_index, path)
215
def refresh_triple_list(triple_list):
216
"""Update all elements with path_index same as first element"""
217
path_index = triple_list[0][0]
219
while iter_index < len(triple_list):
220
old_triple = triple_list[iter_index]
221
if old_triple[0] == path_index:
222
new_triple = get_triple(old_triple[1])
224
triple_list[iter_index] = new_triple
226
else: del triple_list[iter_index]
227
else: break # assumed triple_list sorted, so can exit now
229
triple_list = filter(lambda x: x, map(get_triple,
230
range(len(path_iter_list))))
233
yield triple_list[0][2]
234
refresh_triple_list(triple_list)
237
237
def DirDelta_WriteSig(path_iter, sig_infp_list, newsig_outfp):
238
"""Like DirDelta but also write signature into sig_fileobj
240
Like DirDelta, sig_infp_list can be a tar fileobj or a sorted list
241
of those. A signature will only be written to newsig_outfp if it
242
is different from (the combined) sig_infp_list.
246
stats = statistics.StatsDeltaProcess()
247
if type(sig_infp_list) is types.ListType:
248
sig_path_iter = get_combined_path_iter(sig_infp_list)
249
else: sig_path_iter = sigtar2path_iter(sig_infp_list)
250
delta_iter = get_delta_iter_w_sig(path_iter, sig_path_iter, newsig_outfp)
251
return DeltaTarBlockIter(delta_iter)
238
"""Like DirDelta but also write signature into sig_fileobj
240
Like DirDelta, sig_infp_list can be a tar fileobj or a sorted list
241
of those. A signature will only be written to newsig_outfp if it
242
is different from (the combined) sig_infp_list.
246
stats = statistics.StatsDeltaProcess()
247
if type(sig_infp_list) is types.ListType:
248
sig_path_iter = get_combined_path_iter(sig_infp_list)
249
else: sig_path_iter = sigtar2path_iter(sig_infp_list)
250
delta_iter = get_delta_iter_w_sig(path_iter, sig_path_iter, newsig_outfp)
251
return DeltaTarBlockIter(delta_iter)
253
253
def get_combined_path_iter(sig_infp_list):
254
"""Return path iter combining signatures in list of open sig files"""
255
return combine_path_iters(map(sigtar2path_iter, sig_infp_list))
254
"""Return path iter combining signatures in list of open sig files"""
255
return combine_path_iters(map(sigtar2path_iter, sig_infp_list))
257
257
def get_delta_iter_w_sig(path_iter, sig_path_iter, sig_fileobj):
258
"""Like get_delta_iter but also write signatures to sig_fileobj"""
259
collated = collate2iters(path_iter, sig_path_iter)
260
sigTarFile = tarfile.TarFile("arbitrary", "w", sig_fileobj)
261
for new_path, sig_path in collated:
262
log.Log("Comparing %s and %s" % (new_path and new_path.index,
263
sig_path and sig_path.index), 6)
264
if not new_path or not new_path.type: # file doesn't exist
265
if sig_path and sig_path.exists(): # but signature says it did
266
log.Log("Generating delta - deleted file: %s" %
267
(sig_path.get_relative_path(),), 5)
268
ti = ROPath(sig_path.index).get_tarinfo()
269
ti.name = "deleted/" + "/".join(sig_path.index)
270
sigTarFile.addfile(ti)
271
stats.add_deleted_file()
272
yield ROPath(sig_path.index)
273
elif not sig_path or new_path != sig_path:
274
# Must calculate new signature and create delta
275
delta_path = robust.check_common_error(
276
delta_iter_error_handler, get_delta_path_w_sig,
277
(new_path, sig_path, sigTarFile))
279
log_delta_path(delta_path, new_path, stats)
281
else: stats.Errors += 1
282
else: stats.add_unchanged_file(new_path)
258
"""Like get_delta_iter but also write signatures to sig_fileobj"""
259
collated = collate2iters(path_iter, sig_path_iter)
260
sigTarFile = tarfile.TarFile("arbitrary", "w", sig_fileobj)
261
for new_path, sig_path in collated:
262
log.Log("Comparing %s and %s" % (new_path and new_path.index,
263
sig_path and sig_path.index), 6)
264
if not new_path or not new_path.type: # file doesn't exist
265
if sig_path and sig_path.exists(): # but signature says it did
266
log.Log("Generating delta - deleted file: %s" %
267
(sig_path.get_relative_path(),), 5)
268
ti = ROPath(sig_path.index).get_tarinfo()
269
ti.name = "deleted/" + "/".join(sig_path.index)
270
sigTarFile.addfile(ti)
271
stats.add_deleted_file()
272
yield ROPath(sig_path.index)
273
elif not sig_path or new_path != sig_path:
274
# Must calculate new signature and create delta
275
delta_path = robust.check_common_error(
276
delta_iter_error_handler, get_delta_path_w_sig,
277
(new_path, sig_path, sigTarFile))
279
log_delta_path(delta_path, new_path, stats)
281
else: stats.Errors += 1
282
else: stats.add_unchanged_file(new_path)
286
286
def get_delta_path_w_sig(new_path, sig_path, sigTarFile):
287
"""Return new delta_path which, when read, writes sig to sig_fileobj"""
289
ti = new_path.get_tarinfo()
290
index = new_path.index
291
delta_path = new_path.get_ropath()
292
log.Log("Getting delta of %s and %s" % (new_path, sig_path), 7)
294
def callback(sig_string):
295
"""Callback activated when FileWithSignature read to end"""
296
ti.size = len(sig_string)
297
ti.name = "signature/" + "/".join(index)
298
sigTarFile.addfile(ti, cStringIO.StringIO(sig_string))
300
if new_path.isreg() and sig_path and sig_path.difftype == "signature":
301
delta_path.difftype = "diff"
302
old_sigfp = sig_path.open("rb")
303
newfp = FileWithSignature(new_path.open("rb"), callback,
305
delta_path.setfileobj(librsync.DeltaFile(old_sigfp, newfp))
307
delta_path.difftype = "snapshot"
308
ti.name = "snapshot/" + "/".join(index)
309
if not new_path.isreg(): sigTarFile.addfile(ti)
310
else: delta_path.setfileobj(FileWithSignature(new_path.open("rb"),
313
new_path.copy_attribs(delta_path)
314
delta_path.stat.st_size = new_path.stat.st_size
287
"""Return new delta_path which, when read, writes sig to sig_fileobj"""
289
ti = new_path.get_tarinfo()
290
index = new_path.index
291
delta_path = new_path.get_ropath()
292
log.Log("Getting delta of %s and %s" % (new_path, sig_path), 7)
294
def callback(sig_string):
295
"""Callback activated when FileWithSignature read to end"""
296
ti.size = len(sig_string)
297
ti.name = "signature/" + "/".join(index)
298
sigTarFile.addfile(ti, cStringIO.StringIO(sig_string))
300
if new_path.isreg() and sig_path and sig_path.difftype == "signature":
301
delta_path.difftype = "diff"
302
old_sigfp = sig_path.open("rb")
303
newfp = FileWithSignature(new_path.open("rb"), callback,
305
delta_path.setfileobj(librsync.DeltaFile(old_sigfp, newfp))
307
delta_path.difftype = "snapshot"
308
ti.name = "snapshot/" + "/".join(index)
309
if not new_path.isreg(): sigTarFile.addfile(ti)
310
else: delta_path.setfileobj(FileWithSignature(new_path.open("rb"),
313
new_path.copy_attribs(delta_path)
314
delta_path.stat.st_size = new_path.stat.st_size
318
318
class FileWithSignature:
319
"""File-like object which also computes signature as it is read"""
320
blocksize = 32 * 1024
321
def __init__(self, infile, callback, filelen, *extra_args):
322
"""FileTee initializer
324
The object will act like infile, but whenever it is read it
325
add infile's data to a SigGenerator object. When the file has
326
been read to the end the callback will be called with the
327
calculated signature, and any extra_args if given.
329
filelen is used to calculate the block size of the signature.
332
self.infile, self.callback = infile, callback
333
self.sig_gen = librsync.SigGenerator(get_block_size(filelen))
334
self.activated_callback = None
335
self.extra_args = extra_args
337
def read(self, length = -1):
338
buf = self.infile.read(length)
339
self.sig_gen.update(buf)
343
# Make sure all of infile read
344
if not self.activated_callback:
345
while self.read(self.blocksize): pass
346
self.activated_callback = 1
347
self.callback(self.sig_gen.getsig(), *self.extra_args)
348
return self.infile.close()
319
"""File-like object which also computes signature as it is read"""
320
blocksize = 32 * 1024
321
def __init__(self, infile, callback, filelen, *extra_args):
322
"""FileTee initializer
324
The object will act like infile, but whenever it is read it
325
add infile's data to a SigGenerator object. When the file has
326
been read to the end the callback will be called with the
327
calculated signature, and any extra_args if given.
329
filelen is used to calculate the block size of the signature.
332
self.infile, self.callback = infile, callback
333
self.sig_gen = librsync.SigGenerator(get_block_size(filelen))
334
self.activated_callback = None
335
self.extra_args = extra_args
337
def read(self, length = -1):
338
buf = self.infile.read(length)
339
self.sig_gen.update(buf)
343
# Make sure all of infile read
344
if not self.activated_callback:
345
while self.read(self.blocksize): pass
346
self.activated_callback = 1
347
self.callback(self.sig_gen.getsig(), *self.extra_args)
348
return self.infile.close()
352
"""Contain information to add next file to tar"""
353
def __init__(self, index, data):
354
"""TarBlock initializer - just store data"""
352
"""Contain information to add next file to tar"""
353
def __init__(self, index, data):
354
"""TarBlock initializer - just store data"""
358
358
class TarBlockIter:
359
"""A bit like an iterator, yield tar blocks given input iterator
361
Unlike an iterator, however, control over the maximum size of a
362
tarblock is available by passing an argument to next(). Also the
363
get_footer() is available.
366
def __init__(self, input_iter):
367
"""TarBlockIter initializer"""
368
self.input_iter = input_iter
369
self.offset = 0l # total length of data read
370
self.process_waiting = None # process_continued has more blocks
371
self.previous_index = None # holds index of last block returned
372
self.remember_next = None # see remember_next_index()
373
self.remember_value = None
375
# We need to instantiate a dummy TarFile just to get access to
376
# some of the functions like _get_full_headers.
377
self.tf = tarfile.TarFromIterator(None)
379
def tarinfo2tarblock(self, index, tarinfo, file_data = ""):
380
"""Make tarblock out of tarinfo and file data"""
381
tarinfo.size = len(file_data)
382
headers = self.tf._get_full_headers(tarinfo)
383
blocks, remainder = divmod(tarinfo.size, tarfile.BLOCKSIZE)
384
if remainder > 0: filler_data = "\0" * (tarfile.BLOCKSIZE - remainder)
385
else: filler_data = ""
386
return TarBlock(index, "%s%s%s" % (headers, file_data, filler_data))
388
def process(self, val, size):
389
"""Turn next value of input_iter into a TarBlock"""
390
assert not self.process_waiting
391
XXX # this should be overridden in subclass
393
def process_continued(self, size):
394
"""Get more tarblocks
396
If processing val above would produce more than one TarBlock,
397
get the rest of them by calling process_continue.
400
assert self.process_waiting
401
XXX # Override this too in subclass
403
def next(self, size = 1024 * 1024):
404
"""Return next block, no bigger than size, and update offset"""
405
if self.process_waiting: result = self.process_continued(size)
406
else: # Below a StopIteration exception will just be passed upwards
407
result = self.process(self.input_iter.next(), size)
408
self.offset += len(result.data)
409
self.previous_index = result.index
410
if self.remember_next:
411
self.remember_value = result.index
412
self.remember_next = None
415
def get_previous_index(self):
416
"""Return index of last tarblock, or None if no previous index"""
417
return self.previous_index
419
def remember_next_index(self):
420
"""When called, remember the index of the next block iterated"""
421
self.remember_next = 1
422
self.remember_value = None
424
def recall_index(self):
425
"""Retrieve index remembered with remember_next_index"""
426
return self.remember_value # or None if no index recalled
428
def get_footer(self):
429
"""Return closing string for tarfile, reset offset"""
430
blocks, remainder = divmod(self.offset, tarfile.RECORDSIZE)
432
return '\0' * (tarfile.RECORDSIZE - remainder) # remainder can be 0
434
def __iter__(self): return self
359
"""A bit like an iterator, yield tar blocks given input iterator
361
Unlike an iterator, however, control over the maximum size of a
362
tarblock is available by passing an argument to next(). Also the
363
get_footer() is available.
366
def __init__(self, input_iter):
367
"""TarBlockIter initializer"""
368
self.input_iter = input_iter
369
self.offset = 0l # total length of data read
370
self.process_waiting = None # process_continued has more blocks
371
self.previous_index = None # holds index of last block returned
372
self.remember_next = None # see remember_next_index()
373
self.remember_value = None
375
# We need to instantiate a dummy TarFile just to get access to
376
# some of the functions like _get_full_headers.
377
self.tf = tarfile.TarFromIterator(None)
379
def tarinfo2tarblock(self, index, tarinfo, file_data = ""):
380
"""Make tarblock out of tarinfo and file data"""
381
tarinfo.size = len(file_data)
382
headers = self.tf._get_full_headers(tarinfo)
383
blocks, remainder = divmod(tarinfo.size, tarfile.BLOCKSIZE)
384
if remainder > 0: filler_data = "\0" * (tarfile.BLOCKSIZE - remainder)
385
else: filler_data = ""
386
return TarBlock(index, "%s%s%s" % (headers, file_data, filler_data))
388
def process(self, val, size):
389
"""Turn next value of input_iter into a TarBlock"""
390
assert not self.process_waiting
391
XXX # this should be overridden in subclass
393
def process_continued(self, size):
394
"""Get more tarblocks
396
If processing val above would produce more than one TarBlock,
397
get the rest of them by calling process_continue.
400
assert self.process_waiting
401
XXX # Override this too in subclass
403
def next(self, size = 1024 * 1024):
404
"""Return next block, no bigger than size, and update offset"""
405
if self.process_waiting: result = self.process_continued(size)
406
else: # Below a StopIteration exception will just be passed upwards
407
result = self.process(self.input_iter.next(), size)
408
self.offset += len(result.data)
409
self.previous_index = result.index
410
if self.remember_next:
411
self.remember_value = result.index
412
self.remember_next = None
415
def get_previous_index(self):
416
"""Return index of last tarblock, or None if no previous index"""
417
return self.previous_index
419
def remember_next_index(self):
420
"""When called, remember the index of the next block iterated"""
421
self.remember_next = 1
422
self.remember_value = None
424
def recall_index(self):
425
"""Retrieve index remembered with remember_next_index"""
426
return self.remember_value # or None if no index recalled
428
def get_footer(self):
429
"""Return closing string for tarfile, reset offset"""
430
blocks, remainder = divmod(self.offset, tarfile.RECORDSIZE)
432
return '\0' * (tarfile.RECORDSIZE - remainder) # remainder can be 0
434
def __iter__(self): return self
436
436
class SigTarBlockIter(TarBlockIter):
437
"""TarBlockIter that yields blocks of a signature tar from path_iter"""
438
def process(self, path, size):
439
"""Return associated signature TarBlock from path
441
Here size is just ignored --- let's hope a signature isn't too
442
big. Also signatures are stored in multiple volumes so it
446
ti = path.get_tarinfo()
448
sfp = librsync.SigFile(path.open("rb"),
449
get_block_size(path.getsize()))
452
ti.name = "signature/" + "/".join(path.index)
453
return self.tarinfo2tarblock(path.index, ti, sigbuf)
455
ti.name = "snapshot/" + "/".join(path.index)
456
return self.tarinfo2tarblock(path.index, ti)
437
"""TarBlockIter that yields blocks of a signature tar from path_iter"""
438
def process(self, path, size):
439
"""Return associated signature TarBlock from path
441
Here size is just ignored --- let's hope a signature isn't too
442
big. Also signatures are stored in multiple volumes so it
446
ti = path.get_tarinfo()
448
sfp = librsync.SigFile(path.open("rb"),
449
get_block_size(path.getsize()))
452
ti.name = "signature/" + "/".join(path.index)
453
return self.tarinfo2tarblock(path.index, ti, sigbuf)
455
ti.name = "snapshot/" + "/".join(path.index)
456
return self.tarinfo2tarblock(path.index, ti)
458
458
class DeltaTarBlockIter(TarBlockIter):
459
"""TarBlockIter that yields parts of a deltatar file
461
Unlike SigTarBlockIter, the argument to __init__ is a
462
delta_path_iter, so the delta information has already been
466
def process(self, delta_ropath, size):
467
"""Get a tarblock from delta_ropath"""
468
def add_prefix(tarinfo, prefix):
469
"""Add prefix to the name of a tarinfo file"""
470
if tarinfo.name == ".": tarinfo.name = prefix + "/"
471
else: tarinfo.name = "%s/%s" % (prefix, tarinfo.name)
473
ti = delta_ropath.get_tarinfo()
474
index = delta_ropath.index
476
# Return blocks of deleted files or fileless snapshots
477
if not delta_ropath.type or not delta_ropath.fileobj:
478
if not delta_ropath.type: add_prefix(ti, "deleted")
480
assert delta_ropath.difftype == "snapshot"
481
add_prefix(ti, "snapshot")
482
return self.tarinfo2tarblock(index, ti)
484
# Now handle single volume block case
485
fp = delta_ropath.open("rb")
486
# Below the 512 is the usual length of a tar header
487
data, last_block = self.get_data_block(fp, size - 512)
488
if stats: stats.RawDeltaSize += len(data)
490
if delta_ropath.difftype == "snapshot": add_prefix(ti, "snapshot")
491
elif delta_ropath.difftype == "diff": add_prefix(ti, "diff")
492
else: assert 0, "Unknown difftype"
493
return self.tarinfo2tarblock(index, ti, data)
495
# Finally, do multivol snapshot or diff case
496
full_name = "multivol_%s/%s" % (delta_ropath.difftype, ti.name)
497
ti.name = full_name + "/1"
498
self.process_prefix = full_name
500
self.process_ropath = delta_ropath
501
self.process_waiting = 1
502
self.process_next_vol_number = 2
503
return self.tarinfo2tarblock(index, ti, data)
505
def get_data_block(self, fp, max_size):
506
"""Return pair (next data block, boolean last data block)"""
507
read_size = min(64*1024, max_size)
508
buf = fp.read(read_size)
509
if len(buf) < read_size:
510
if fp.close(): raise DiffDirException("Error closing file")
512
else: return (buf, None)
514
def process_continued(self, size):
515
"""Return next volume in multivol diff or snapshot"""
516
assert self.process_waiting
517
ropath = self.process_ropath
518
ti, index = ropath.get_tarinfo(), ropath.index
519
ti.name = "%s/%d" % (self.process_prefix, self.process_next_vol_number)
520
data, last_block = self.get_data_block(self.process_fp, size-512)
522
self.process_prefix = None
523
self.process_fp = None
524
self.process_ropath = None
525
self.process_waiting = None
526
self.process_next_vol_number = None
527
else: self.process_next_vol_number += 1
528
return self.tarinfo2tarblock(index, ti, data)
459
"""TarBlockIter that yields parts of a deltatar file
461
Unlike SigTarBlockIter, the argument to __init__ is a
462
delta_path_iter, so the delta information has already been
466
def process(self, delta_ropath, size):
467
"""Get a tarblock from delta_ropath"""
468
def add_prefix(tarinfo, prefix):
469
"""Add prefix to the name of a tarinfo file"""
470
if tarinfo.name == ".": tarinfo.name = prefix + "/"
471
else: tarinfo.name = "%s/%s" % (prefix, tarinfo.name)
473
ti = delta_ropath.get_tarinfo()
474
index = delta_ropath.index
476
# Return blocks of deleted files or fileless snapshots
477
if not delta_ropath.type or not delta_ropath.fileobj:
478
if not delta_ropath.type: add_prefix(ti, "deleted")
480
assert delta_ropath.difftype == "snapshot"
481
add_prefix(ti, "snapshot")
482
return self.tarinfo2tarblock(index, ti)
484
# Now handle single volume block case
485
fp = delta_ropath.open("rb")
486
# Below the 512 is the usual length of a tar header
487
data, last_block = self.get_data_block(fp, size - 512)
488
if stats: stats.RawDeltaSize += len(data)
490
if delta_ropath.difftype == "snapshot": add_prefix(ti, "snapshot")
491
elif delta_ropath.difftype == "diff": add_prefix(ti, "diff")
492
else: assert 0, "Unknown difftype"
493
return self.tarinfo2tarblock(index, ti, data)
495
# Finally, do multivol snapshot or diff case
496
full_name = "multivol_%s/%s" % (delta_ropath.difftype, ti.name)
497
ti.name = full_name + "/1"
498
self.process_prefix = full_name
500
self.process_ropath = delta_ropath
501
self.process_waiting = 1
502
self.process_next_vol_number = 2
503
return self.tarinfo2tarblock(index, ti, data)
505
def get_data_block(self, fp, max_size):
506
"""Return pair (next data block, boolean last data block)"""
507
read_size = min(64*1024, max_size)
508
buf = fp.read(read_size)
509
if len(buf) < read_size:
510
if fp.close(): raise DiffDirException("Error closing file")
512
else: return (buf, None)
514
def process_continued(self, size):
515
"""Return next volume in multivol diff or snapshot"""
516
assert self.process_waiting
517
ropath = self.process_ropath
518
ti, index = ropath.get_tarinfo(), ropath.index
519
ti.name = "%s/%d" % (self.process_prefix, self.process_next_vol_number)
520
data, last_block = self.get_data_block(self.process_fp, size-512)
522
self.process_prefix = None
523
self.process_fp = None
524
self.process_ropath = None
525
self.process_waiting = None
526
self.process_next_vol_number = None
527
else: self.process_next_vol_number += 1
528
return self.tarinfo2tarblock(index, ti, data)
531
531
def write_block_iter(block_iter, out_obj):
532
"""Write block_iter to filename, path, or file object"""
533
if isinstance(out_obj, Path): fp = open(out_obj.name, "wb")
534
elif type(out_obj) is types.StringType: fp = open(out_obj, "wb")
536
for block in block_iter: fp.write(block.data)
537
fp.write(block_iter.get_footer())
538
assert not fp.close()
539
if isinstance(out_obj, Path): out_obj.setdata()
532
"""Write block_iter to filename, path, or file object"""
533
if isinstance(out_obj, Path): fp = open(out_obj.name, "wb")
534
elif type(out_obj) is types.StringType: fp = open(out_obj, "wb")
536
for block in block_iter: fp.write(block.data)
537
fp.write(block_iter.get_footer())
538
assert not fp.close()
539
if isinstance(out_obj, Path): out_obj.setdata()
541
541
def get_block_size(file_len):
542
"""Return a reasonable block size to use on files of length file_len
544
If the block size is too big, deltas will be bigger than is
545
necessary. If the block size is too small, making deltas and
546
patching can take a really long time.
549
if file_len < 1024000: return 512 # set minimum of 512 bytes
550
else: # Split file into about 2000 pieces, rounding to 512
551
return min(long((file_len/(2000*512))*512), 2048)
542
"""Return a reasonable block size to use on files of length file_len
544
If the block size is too big, deltas will be bigger than is
545
necessary. If the block size is too small, making deltas and
546
patching can take a really long time.
549
if file_len < 1024000: return 512 # set minimum of 512 bytes
550
else: # Split file into about 2000 pieces, rounding to 512
551
return min(long((file_len/(2000*512))*512), 2048)