1
# Copyright (C) 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Core compression logic for compressing streams of related files."""
19
from itertools import izip
20
from cStringIO import StringIO
40
from bzrlib.graph import Graph
41
from bzrlib.btree_index import BTreeBuilder
42
from bzrlib.lru_cache import LRUSizeCache
43
from bzrlib.tsort import topo_sort
44
from bzrlib.versionedfile import (
47
ChunkedContentFactory,
48
FulltextContentFactory,
52
_USE_LZMA = False and (pylzma is not None)
54
# osutils.sha_string('')
55
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
58
def sort_gc_optimal(parent_map):
59
"""Sort and group the keys in parent_map into groupcompress order.
61
groupcompress is defined (currently) as reverse-topological order, grouped
64
:return: A sorted-list of keys
66
# groupcompress ordering is approximately reverse topological,
67
# properly grouped by file-id.
69
for item in parent_map.iteritems():
71
if isinstance(key, str) or len(key) == 1:
76
per_prefix_map[prefix].append(item)
78
per_prefix_map[prefix] = [item]
81
for prefix in sorted(per_prefix_map):
82
present_keys.extend(reversed(topo_sort(per_prefix_map[prefix])))
86
# The max zlib window size is 32kB, so if we set 'max_size' output of the
87
# decompressor to the requested bytes + 32kB, then we should guarantee
88
# num_bytes coming out.
89
_ZLIB_DECOMP_WINDOW = 32*1024
91
class GroupCompressBlock(object):
92
"""An object which maintains the internal structure of the compressed data.
94
This tracks the meta info (start of text, length, type, etc.)
97
# Group Compress Block v1 Zlib
98
GCB_HEADER = 'gcb1z\n'
99
# Group Compress Block v1 Lzma
100
GCB_LZ_HEADER = 'gcb1l\n'
101
GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER)
104
# map by key? or just order in file?
105
self._compressor_name = None
106
self._z_content = None
107
self._z_content_decompressor = None
108
self._z_content_length = None
109
self._content_length = None
113
# This is the maximum number of bytes this object will reference if
114
# everything is decompressed. However, if we decompress less than
115
# everything... (this would cause some problems for LRUSizeCache)
116
return self._content_length + self._z_content_length
118
def _ensure_content(self, num_bytes=None):
119
"""Make sure that content has been expanded enough.
121
:param num_bytes: Ensure that we have extracted at least num_bytes of
122
content. If None, consume everything
124
# TODO: If we re-use the same content block at different times during
125
# get_record_stream(), it is possible that the first pass will
126
# get inserted, triggering an extract/_ensure_content() which
127
# will get rid of _z_content. And then the next use of the block
128
# will try to access _z_content (to send it over the wire), and
129
# fail because it is already extracted. Consider never releasing
130
# _z_content because of this.
131
if num_bytes is None:
132
num_bytes = self._content_length
133
elif (self._content_length is not None
134
and num_bytes > self._content_length):
135
raise AssertionError(
136
'requested num_bytes (%d) > content length (%d)'
137
% (num_bytes, self._content_length))
138
# Expand the content if required
139
if self._content is None:
140
if self._z_content is None:
141
raise AssertionError('No content to decompress')
142
if self._z_content == '':
144
elif self._compressor_name == 'lzma':
145
# We don't do partial lzma decomp yet
146
self._content = pylzma.decompress(self._z_content)
147
elif self._compressor_name == 'zlib':
148
# Start a zlib decompressor
149
if num_bytes is None:
150
self._content = zlib.decompress(self._z_content)
152
self._z_content_decompressor = zlib.decompressobj()
153
# Seed the decompressor with the uncompressed bytes, so
154
# that the rest of the code is simplified
155
self._content = self._z_content_decompressor.decompress(
156
self._z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
158
raise AssertionError('Unknown compressor: %r'
159
% self._compressor_name)
160
# Any bytes remaining to be decompressed will be in the decompressors
163
# Do we have enough bytes already?
164
if num_bytes is not None and len(self._content) >= num_bytes:
166
if num_bytes is None and self._z_content_decompressor is None:
167
# We must have already decompressed everything
169
# If we got this far, and don't have a decompressor, something is wrong
170
if self._z_content_decompressor is None:
171
raise AssertionError(
172
'No decompressor to decompress %d bytes' % num_bytes)
173
remaining_decomp = self._z_content_decompressor.unconsumed_tail
174
if num_bytes is None:
176
# We don't know how much is left, but we'll decompress it all
177
self._content += self._z_content_decompressor.decompress(
179
# Note: There's what I consider a bug in zlib.decompressobj
180
# If you pass back in the entire unconsumed_tail, only
181
# this time you don't pass a max-size, it doesn't
182
# change the unconsumed_tail back to None/''.
183
# However, we know we are done with the whole stream
184
self._z_content_decompressor = None
185
# XXX: Why is this the only place in this routine we set this?
186
self._content_length = len(self._content)
188
if not remaining_decomp:
189
raise AssertionError('Nothing left to decompress')
190
needed_bytes = num_bytes - len(self._content)
191
# We always set max_size to 32kB over the minimum needed, so that
192
# zlib will give us as much as we really want.
193
# TODO: If this isn't good enough, we could make a loop here,
194
# that keeps expanding the request until we get enough
195
self._content += self._z_content_decompressor.decompress(
196
remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW)
197
if len(self._content) < num_bytes:
198
raise AssertionError('%d bytes wanted, only %d available'
199
% (num_bytes, len(self._content)))
200
if not self._z_content_decompressor.unconsumed_tail:
201
# The stream is finished
202
self._z_content_decompressor = None
204
def _parse_bytes(self, bytes, pos):
205
"""Read the various lengths from the header.
207
This also populates the various 'compressed' buffers.
209
:return: The position in bytes just after the last newline
211
# At present, we have 2 integers for the compressed and uncompressed
212
# content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
213
# checking too far, cap the search to 14 bytes.
214
pos2 = bytes.index('\n', pos, pos + 14)
215
self._z_content_length = int(bytes[pos:pos2])
217
pos2 = bytes.index('\n', pos, pos + 14)
218
self._content_length = int(bytes[pos:pos2])
220
if len(bytes) != (pos + self._z_content_length):
221
# XXX: Define some GCCorrupt error ?
222
raise AssertionError('Invalid bytes: (%d) != %d + %d' %
223
(len(bytes), pos, self._z_content_length))
224
self._z_content = bytes[pos:]
227
def from_bytes(cls, bytes):
229
if bytes[:6] not in cls.GCB_KNOWN_HEADERS:
230
raise ValueError('bytes did not start with any of %r'
231
% (cls.GCB_KNOWN_HEADERS,))
232
# XXX: why not testing the whole header ?
234
out._compressor_name = 'zlib'
235
elif bytes[4] == 'l':
236
out._compressor_name = 'lzma'
238
raise ValueError('unknown compressor: %r' % (bytes,))
239
out._parse_bytes(bytes, 6)
242
def extract(self, key, start, end, sha1=None):
243
"""Extract the text for a specific key.
245
:param key: The label used for this content
246
:param sha1: TODO (should we validate only when sha1 is supplied?)
247
:return: The bytes for the content
249
if start == end == 0:
251
self._ensure_content(end)
252
# The bytes are 'f' or 'd' for the type, then a variable-length
253
# base128 integer for the content size, then the actual content
254
# We know that the variable-length integer won't be longer than 5
255
# bytes (it takes 5 bytes to encode 2^32)
256
c = self._content[start]
261
raise ValueError('Unknown content control code: %s'
264
content_len, len_len = decode_base128_int(
265
self._content[start + 1:start + 6])
266
content_start = start + 1 + len_len
267
if end != content_start + content_len:
268
raise ValueError('end != len according to field header'
269
' %s != %s' % (end, content_start + content_len))
271
bytes = self._content[content_start:end]
273
bytes = apply_delta_to_source(self._content, content_start, end)
276
def set_content(self, content):
277
"""Set the content of this block."""
278
self._content_length = len(content)
279
self._content = content
280
self._z_content = None
283
"""Encode the information into a byte stream."""
284
compress = zlib.compress
286
compress = pylzma.compress
287
if self._z_content is None:
288
if self._content is None:
289
raise AssertionError('Nothing to compress')
290
self._z_content = compress(self._content)
291
self._z_content_length = len(self._z_content)
293
header = self.GCB_LZ_HEADER
295
header = self.GCB_HEADER
297
'%d\n%d\n' % (self._z_content_length, self._content_length),
300
return ''.join(chunks)
302
def _dump(self, include_text=False):
303
"""Take this block, and spit out a human-readable structure.
305
:param include_text: Inserts also include text bits, chose whether you
306
want this displayed in the dump or not.
307
:return: A dump of the given block. The layout is something like:
308
[('f', length), ('d', delta_length, text_length, [delta_info])]
309
delta_info := [('i', num_bytes, text), ('c', offset, num_bytes),
312
self._ensure_content()
315
while pos < self._content_length:
316
kind = self._content[pos]
318
if kind not in ('f', 'd'):
319
raise ValueError('invalid kind character: %r' % (kind,))
320
content_len, len_len = decode_base128_int(
321
self._content[pos:pos + 5])
323
if content_len + pos > self._content_length:
324
raise ValueError('invalid content_len %d for record @ pos %d'
325
% (content_len, pos - len_len - 1))
326
if kind == 'f': # Fulltext
328
text = self._content[pos:pos+content_len]
329
result.append(('f', content_len, text))
331
result.append(('f', content_len))
332
elif kind == 'd': # Delta
333
delta_content = self._content[pos:pos+content_len]
335
# The first entry in a delta is the decompressed length
336
decomp_len, delta_pos = decode_base128_int(delta_content)
337
result.append(('d', content_len, decomp_len, delta_info))
339
while delta_pos < content_len:
340
c = ord(delta_content[delta_pos])
344
delta_pos) = decode_copy_instruction(delta_content, c,
347
text = self._content[offset:offset+length]
348
delta_info.append(('c', offset, length, text))
350
delta_info.append(('c', offset, length))
351
measured_len += length
354
txt = delta_content[delta_pos:delta_pos+c]
357
delta_info.append(('i', c, txt))
360
if delta_pos != content_len:
361
raise ValueError('Delta consumed a bad number of bytes:'
362
' %d != %d' % (delta_pos, content_len))
363
if measured_len != decomp_len:
364
raise ValueError('Delta claimed fulltext was %d bytes, but'
365
' extraction resulted in %d bytes'
366
% (decomp_len, measured_len))
371
class _LazyGroupCompressFactory(object):
372
"""Yield content from a GroupCompressBlock on demand."""
374
def __init__(self, key, parents, manager, start, end, first):
375
"""Create a _LazyGroupCompressFactory
377
:param key: The key of just this record
378
:param parents: The parents of this key (possibly None)
379
:param gc_block: A GroupCompressBlock object
380
:param start: Offset of the first byte for this record in the
382
:param end: Offset of the byte just after the end of this record
383
(ie, bytes = content[start:end])
384
:param first: Is this the first Factory for the given block?
387
self.parents = parents
389
# Note: This attribute coupled with Manager._factories creates a
390
# reference cycle. Perhaps we would rather use a weakref(), or
391
# find an appropriate time to release the ref. After the first
392
# get_bytes_as call? After Manager.get_record_stream() returns
394
self._manager = manager
396
self.storage_kind = 'groupcompress-block'
398
self.storage_kind = 'groupcompress-block-ref'
404
return '%s(%s, first=%s)' % (self.__class__.__name__,
405
self.key, self._first)
407
def get_bytes_as(self, storage_kind):
408
if storage_kind == self.storage_kind:
410
# wire bytes, something...
411
return self._manager._wire_bytes()
414
if storage_kind in ('fulltext', 'chunked'):
415
if self._bytes is None:
416
# Grab and cache the raw bytes for this entry
417
# and break the ref-cycle with _manager since we don't need it
419
self._manager._prepare_for_extract()
420
block = self._manager._block
421
self._bytes = block.extract(self.key, self._start, self._end)
422
# There are code paths that first extract as fulltext, and then
423
# extract as storage_kind (smart fetch). So we don't break the
424
# refcycle here, but instead in manager.get_record_stream()
425
# self._manager = None
426
if storage_kind == 'fulltext':
430
raise errors.UnavailableRepresentation(self.key, storage_kind,
434
class _LazyGroupContentManager(object):
435
"""This manages a group of _LazyGroupCompressFactory objects."""
437
def __init__(self, block):
439
# We need to preserve the ordering
443
def add_factory(self, key, parents, start, end):
444
if not self._factories:
448
# Note that this creates a reference cycle....
449
factory = _LazyGroupCompressFactory(key, parents, self,
450
start, end, first=first)
451
# max() works here, but as a function call, doing a compare seems to be
452
# significantly faster, timeit says 250ms for max() and 100ms for the
454
if end > self._last_byte:
455
self._last_byte = end
456
self._factories.append(factory)
458
def get_record_stream(self):
459
"""Get a record for all keys added so far."""
460
for factory in self._factories:
462
# Break the ref-cycle
463
factory._bytes = None
464
factory._manager = None
465
# TODO: Consider setting self._factories = None after the above loop,
466
# as it will break the reference cycle
468
def _trim_block(self, last_byte):
469
"""Create a new GroupCompressBlock, with just some of the content."""
470
# None of the factories need to be adjusted, because the content is
471
# located in an identical place. Just that some of the unreferenced
472
# trailing bytes are stripped
473
trace.mutter('stripping trailing bytes from groupcompress block'
474
' %d => %d', self._block._content_length, last_byte)
475
new_block = GroupCompressBlock()
476
self._block._ensure_content(last_byte)
477
new_block.set_content(self._block._content[:last_byte])
478
self._block = new_block
480
def _rebuild_block(self):
481
"""Create a new GroupCompressBlock with only the referenced texts."""
482
compressor = GroupCompressor()
484
old_length = self._block._content_length
486
for factory in self._factories:
487
bytes = factory.get_bytes_as('fulltext')
488
(found_sha1, start_point, end_point,
489
type) = compressor.compress(factory.key, bytes, factory.sha1)
490
# Now update this factory with the new offsets, etc
491
factory.sha1 = found_sha1
492
factory._start = start_point
493
factory._end = end_point
494
self._last_byte = end_point
495
new_block = compressor.flush()
496
# TODO: Should we check that new_block really *is* smaller than the old
497
# block? It seems hard to come up with a method that it would
498
# expand, since we do full compression again. Perhaps based on a
499
# request that ends up poorly ordered?
500
delta = time.time() - tstart
501
self._block = new_block
502
trace.mutter('creating new compressed block on-the-fly in %.3fs'
503
' %d bytes => %d bytes', delta, old_length,
504
self._block._content_length)
506
def _prepare_for_extract(self):
507
"""A _LazyGroupCompressFactory is about to extract to fulltext."""
508
# We expect that if one child is going to fulltext, all will be. This
509
# helps prevent all of them from extracting a small amount at a time.
510
# Which in itself isn't terribly expensive, but resizing 2MB 32kB at a
511
# time (self._block._content) is a little expensive.
512
self._block._ensure_content(self._last_byte)
514
def _check_rebuild_block(self):
515
"""Check to see if our block should be repacked."""
518
for factory in self._factories:
519
total_bytes_used += factory._end - factory._start
520
last_byte_used = max(last_byte_used, factory._end)
521
# If we are using most of the bytes from the block, we have nothing
522
# else to check (currently more that 1/2)
523
if total_bytes_used * 2 >= self._block._content_length:
525
# Can we just strip off the trailing bytes? If we are going to be
526
# transmitting more than 50% of the front of the content, go ahead
527
if total_bytes_used * 2 > last_byte_used:
528
self._trim_block(last_byte_used)
531
# We are using a small amount of the data, and it isn't just packed
532
# nicely at the front, so rebuild the content.
533
# Note: This would be *nicer* as a strip-data-from-group, rather than
534
# building it up again from scratch
535
# It might be reasonable to consider the fulltext sizes for
536
# different bits when deciding this, too. As you may have a small
537
# fulltext, and a trivial delta, and you are just trading around
538
# for another fulltext. If we do a simple 'prune' you may end up
539
# expanding many deltas into fulltexts, as well.
540
# If we build a cheap enough 'strip', then we could try a strip,
541
# if that expands the content, we then rebuild.
542
self._rebuild_block()
544
def _wire_bytes(self):
545
"""Return a byte stream suitable for transmitting over the wire."""
546
self._check_rebuild_block()
547
# The outer block starts with:
548
# 'groupcompress-block\n'
549
# <length of compressed key info>\n
550
# <length of uncompressed info>\n
551
# <length of gc block>\n
554
lines = ['groupcompress-block\n']
555
# The minimal info we need is the key, the start offset, and the
556
# parents. The length and type are encoded in the record itself.
557
# However, passing in the other bits makes it easier. The list of
558
# keys, and the start offset, the length
560
# 1 line with parents, '' for ()
561
# 1 line for start offset
562
# 1 line for end byte
564
for factory in self._factories:
565
key_bytes = '\x00'.join(factory.key)
566
parents = factory.parents
568
parent_bytes = 'None:'
570
parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
571
record_header = '%s\n%s\n%d\n%d\n' % (
572
key_bytes, parent_bytes, factory._start, factory._end)
573
header_lines.append(record_header)
574
# TODO: Can we break the refcycle at this point and set
575
# factory._manager = None?
576
header_bytes = ''.join(header_lines)
578
header_bytes_len = len(header_bytes)
579
z_header_bytes = zlib.compress(header_bytes)
581
z_header_bytes_len = len(z_header_bytes)
582
block_bytes = self._block.to_bytes()
583
lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len,
585
lines.append(z_header_bytes)
586
lines.append(block_bytes)
587
del z_header_bytes, block_bytes
588
return ''.join(lines)
591
def from_bytes(cls, bytes):
592
# TODO: This does extra string copying, probably better to do it a
594
(storage_kind, z_header_len, header_len,
595
block_len, rest) = bytes.split('\n', 4)
597
if storage_kind != 'groupcompress-block':
598
raise ValueError('Unknown storage kind: %s' % (storage_kind,))
599
z_header_len = int(z_header_len)
600
if len(rest) < z_header_len:
601
raise ValueError('Compressed header len shorter than all bytes')
602
z_header = rest[:z_header_len]
603
header_len = int(header_len)
604
header = zlib.decompress(z_header)
605
if len(header) != header_len:
606
raise ValueError('invalid length for decompressed bytes')
608
block_len = int(block_len)
609
if len(rest) != z_header_len + block_len:
610
raise ValueError('Invalid length for block')
611
block_bytes = rest[z_header_len:]
613
# So now we have a valid GCB, we just need to parse the factories that
615
header_lines = header.split('\n')
617
last = header_lines.pop()
619
raise ValueError('header lines did not end with a trailing'
621
if len(header_lines) % 4 != 0:
622
raise ValueError('The header was not an even multiple of 4 lines')
623
block = GroupCompressBlock.from_bytes(block_bytes)
626
for start in xrange(0, len(header_lines), 4):
628
key = tuple(header_lines[start].split('\x00'))
629
parents_line = header_lines[start+1]
630
if parents_line == 'None:':
633
parents = tuple([tuple(segment.split('\x00'))
634
for segment in parents_line.split('\t')
636
start_offset = int(header_lines[start+2])
637
end_offset = int(header_lines[start+3])
638
result.add_factory(key, parents, start_offset, end_offset)
642
def network_block_to_records(storage_kind, bytes, line_end):
643
if storage_kind != 'groupcompress-block':
644
raise ValueError('Unknown storage kind: %s' % (storage_kind,))
645
manager = _LazyGroupContentManager.from_bytes(bytes)
646
return manager.get_record_stream()
649
class _CommonGroupCompressor(object):
652
"""Create a GroupCompressor."""
657
self.labels_deltas = {}
658
self._delta_index = None # Set by the children
659
self._block = GroupCompressBlock()
661
def compress(self, key, bytes, expected_sha, nostore_sha=None, soft=False):
662
"""Compress lines with label key.
664
:param key: A key tuple. It is stored in the output
665
for identification of the text during decompression. If the last
666
element is 'None' it is replaced with the sha1 of the text -
668
:param bytes: The bytes to be compressed
669
:param expected_sha: If non-None, the sha the lines are believed to
670
have. During compression the sha is calculated; a mismatch will
672
:param nostore_sha: If the computed sha1 sum matches, we will raise
673
ExistingContent rather than adding the text.
674
:param soft: Do a 'soft' compression. This means that we require larger
675
ranges to match to be considered for a copy command.
677
:return: The sha1 of lines, the start and end offsets in the delta, and
678
the type ('fulltext' or 'delta').
680
:seealso VersionedFiles.add_lines:
682
if not bytes: # empty, like a dir entry, etc
683
if nostore_sha == _null_sha1:
684
raise errors.ExistingContent()
685
return _null_sha1, 0, 0, 'fulltext'
686
# we assume someone knew what they were doing when they passed it in
687
if expected_sha is not None:
690
sha1 = osutils.sha_string(bytes)
691
if nostore_sha is not None:
692
if sha1 == nostore_sha:
693
raise errors.ExistingContent()
695
key = key[:-1] + ('sha1:' + sha1,)
697
start, end, type = self._compress(key, bytes, len(bytes) / 2, soft)
698
return sha1, start, end, type
700
def _compress(self, key, bytes, max_delta_size, soft=False):
701
"""Compress lines with label key.
703
:param key: A key tuple. It is stored in the output for identification
704
of the text during decompression.
706
:param bytes: The bytes to be compressed
708
:param max_delta_size: The size above which we issue a fulltext instead
711
:param soft: Do a 'soft' compression. This means that we require larger
712
ranges to match to be considered for a copy command.
714
:return: The sha1 of lines, the start and end offsets in the delta, and
715
the type ('fulltext' or 'delta').
717
raise NotImplementedError(self._compress)
719
def extract(self, key):
720
"""Extract a key previously added to the compressor.
722
:param key: The key to extract.
723
:return: An iterable over bytes and the sha1.
725
(start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key]
726
delta_chunks = self.chunks[start_chunk:end_chunk]
727
stored_bytes = ''.join(delta_chunks)
728
if stored_bytes[0] == 'f':
729
fulltext_len, offset = decode_base128_int(stored_bytes[1:10])
730
data_len = fulltext_len + 1 + offset
731
if data_len != len(stored_bytes):
732
raise ValueError('Index claimed fulltext len, but stored bytes'
734
% (len(stored_bytes), data_len))
735
bytes = stored_bytes[offset + 1:]
737
# XXX: This is inefficient at best
738
source = ''.join(self.chunks[:start_chunk])
739
if stored_bytes[0] != 'd':
740
raise ValueError('Unknown content kind, bytes claim %s'
741
% (stored_bytes[0],))
742
delta_len, offset = decode_base128_int(stored_bytes[1:10])
743
data_len = delta_len + 1 + offset
744
if data_len != len(stored_bytes):
745
raise ValueError('Index claimed delta len, but stored bytes'
747
% (len(stored_bytes), data_len))
748
bytes = apply_delta(source, stored_bytes[offset + 1:])
749
bytes_sha1 = osutils.sha_string(bytes)
750
return bytes, bytes_sha1
753
"""Finish this group, creating a formatted stream.
755
After calling this, the compressor should no longer be used
757
# TODO: this causes us to 'bloat' to 2x the size of content in the
758
# group. This has an impact for 'commit' of large objects.
759
# One possibility is to use self._content_chunks, and be lazy and
760
# only fill out self._content as a full string when we actually
761
# need it. That would at least drop the peak memory consumption
762
# for 'commit' down to ~1x the size of the largest file, at a
763
# cost of increased complexity within this code. 2x is still <<
764
# 3x the size of the largest file, so we are doing ok.
765
content = ''.join(self.chunks)
767
self._delta_index = None
768
self._block.set_content(content)
772
"""Call this if you want to 'revoke' the last compression.
774
After this, the data structures will be rolled back, but you cannot do
777
self._delta_index = None
778
del self.chunks[self._last[0]:]
779
self.endpoint = self._last[1]
783
"""Return the overall compression ratio."""
784
return float(self.input_bytes) / float(self.endpoint)
787
class PythonGroupCompressor(_CommonGroupCompressor):
790
"""Create a GroupCompressor.
792
Used only if the pyrex version is not available.
794
super(PythonGroupCompressor, self).__init__()
795
self._delta_index = LinesDeltaIndex([])
796
# The actual content is managed by LinesDeltaIndex
797
self.chunks = self._delta_index.lines
799
def _compress(self, key, bytes, max_delta_size, soft=False):
800
"""see _CommonGroupCompressor._compress"""
801
input_len = len(bytes)
802
new_lines = osutils.split_lines(bytes)
803
out_lines, index_lines = self._delta_index.make_delta(
804
new_lines, bytes_length=input_len, soft=soft)
805
delta_length = sum(map(len, out_lines))
806
if delta_length > max_delta_size:
807
# The delta is longer than the fulltext, insert a fulltext
809
out_lines = ['f', encode_base128_int(input_len)]
810
out_lines.extend(new_lines)
811
index_lines = [False, False]
812
index_lines.extend([True] * len(new_lines))
814
# this is a worthy delta, output it
817
# Update the delta_length to include those two encoded integers
818
out_lines[1] = encode_base128_int(delta_length)
820
start = self.endpoint
821
chunk_start = len(self.chunks)
822
self._last = (chunk_start, self.endpoint)
823
self._delta_index.extend_lines(out_lines, index_lines)
824
self.endpoint = self._delta_index.endpoint
825
self.input_bytes += input_len
826
chunk_end = len(self.chunks)
827
self.labels_deltas[key] = (start, chunk_start,
828
self.endpoint, chunk_end)
829
return start, self.endpoint, type
832
class PyrexGroupCompressor(_CommonGroupCompressor):
833
"""Produce a serialised group of compressed texts.
835
It contains code very similar to SequenceMatcher because of having a similar
836
task. However some key differences apply:
837
- there is no junk, we want a minimal edit not a human readable diff.
838
- we don't filter very common lines (because we don't know where a good
839
range will start, and after the first text we want to be emitting minmal
841
- we chain the left side, not the right side
842
- we incrementally update the adjacency matrix as new lines are provided.
843
- we look for matches in all of the left side, so the routine which does
844
the analagous task of find_longest_match does not need to filter on the
849
super(PyrexGroupCompressor, self).__init__()
850
self._delta_index = DeltaIndex()
852
def _compress(self, key, bytes, max_delta_size, soft=False):
853
"""see _CommonGroupCompressor._compress"""
854
input_len = len(bytes)
855
# By having action/label/sha1/len, we can parse the group if the index
856
# was ever destroyed, we have the key in 'label', we know the final
857
# bytes are valid from sha1, and we know where to find the end of this
858
# record because of 'len'. (the delta record itself will store the
859
# total length for the expanded record)
860
# 'len: %d\n' costs approximately 1% increase in total data
861
# Having the labels at all costs us 9-10% increase, 38% increase for
862
# inventory pages, and 5.8% increase for text pages
863
# new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
864
if self._delta_index._source_offset != self.endpoint:
865
raise AssertionError('_source_offset != endpoint'
866
' somehow the DeltaIndex got out of sync with'
868
delta = self._delta_index.make_delta(bytes, max_delta_size)
871
enc_length = encode_base128_int(len(bytes))
872
len_mini_header = 1 + len(enc_length)
873
self._delta_index.add_source(bytes, len_mini_header)
874
new_chunks = ['f', enc_length, bytes]
877
enc_length = encode_base128_int(len(delta))
878
len_mini_header = 1 + len(enc_length)
879
new_chunks = ['d', enc_length, delta]
880
self._delta_index.add_delta_source(delta, len_mini_header)
882
start = self.endpoint
883
chunk_start = len(self.chunks)
884
# Now output these bytes
885
self._output_chunks(new_chunks)
886
self.input_bytes += input_len
887
chunk_end = len(self.chunks)
888
self.labels_deltas[key] = (start, chunk_start,
889
self.endpoint, chunk_end)
890
if not self._delta_index._source_offset == self.endpoint:
891
raise AssertionError('the delta index is out of sync'
892
'with the output lines %s != %s'
893
% (self._delta_index._source_offset, self.endpoint))
894
return start, self.endpoint, type
896
def _output_chunks(self, new_chunks):
897
"""Output some chunks.
899
:param new_chunks: The chunks to output.
901
self._last = (len(self.chunks), self.endpoint)
902
endpoint = self.endpoint
903
self.chunks.extend(new_chunks)
904
endpoint += sum(map(len, new_chunks))
905
self.endpoint = endpoint
908
def make_pack_factory(graph, delta, keylength):
909
"""Create a factory for creating a pack based groupcompress.
911
This is only functional enough to run interface tests, it doesn't try to
912
provide a full pack environment.
914
:param graph: Store a graph.
915
:param delta: Delta compress contents.
916
:param keylength: How long should keys be.
918
def factory(transport):
923
graph_index = BTreeBuilder(reference_lists=ref_length,
924
key_elements=keylength)
925
stream = transport.open_write_stream('newpack')
926
writer = pack.ContainerWriter(stream.write)
928
index = _GCGraphIndex(graph_index, lambda:True, parents=parents,
929
add_callback=graph_index.add_nodes)
930
access = knit._DirectPackAccess({})
931
access.set_writer(writer, graph_index, (transport, 'newpack'))
932
result = GroupCompressVersionedFiles(index, access, delta)
933
result.stream = stream
934
result.writer = writer
939
def cleanup_pack_group(versioned_files):
940
versioned_files.writer.end()
941
versioned_files.stream.close()
944
class GroupCompressVersionedFiles(VersionedFiles):
945
"""A group-compress based VersionedFiles implementation."""
947
def __init__(self, index, access, delta=True):
948
"""Create a GroupCompressVersionedFiles object.
950
:param index: The index object storing access and graph data.
951
:param access: The access object storing raw data.
952
:param delta: Whether to delta compress or just entropy compress.
955
self._access = access
957
self._unadded_refs = {}
958
self._group_cache = LRUSizeCache(max_size=50*1024*1024)
959
self._fallback_vfs = []
961
def add_lines(self, key, parents, lines, parent_texts=None,
962
left_matching_blocks=None, nostore_sha=None, random_id=False,
964
"""Add a text to the store.
966
:param key: The key tuple of the text to add.
967
:param parents: The parents key tuples of the text to add.
968
:param lines: A list of lines. Each line must be a bytestring. And all
969
of them except the last must be terminated with \n and contain no
970
other \n's. The last line may either contain no \n's or a single
971
terminating \n. If the lines list does meet this constraint the add
972
routine may error or may succeed - but you will be unable to read
973
the data back accurately. (Checking the lines have been split
974
correctly is expensive and extremely unlikely to catch bugs so it
975
is not done at runtime unless check_content is True.)
976
:param parent_texts: An optional dictionary containing the opaque
977
representations of some or all of the parents of version_id to
978
allow delta optimisations. VERY IMPORTANT: the texts must be those
979
returned by add_lines or data corruption can be caused.
980
:param left_matching_blocks: a hint about which areas are common
981
between the text and its left-hand-parent. The format is
982
the SequenceMatcher.get_matching_blocks format.
983
:param nostore_sha: Raise ExistingContent and do not add the lines to
984
the versioned file if the digest of the lines matches this.
985
:param random_id: If True a random id has been selected rather than
986
an id determined by some deterministic process such as a converter
987
from a foreign VCS. When True the backend may choose not to check
988
for uniqueness of the resulting key within the versioned file, so
989
this should only be done when the result is expected to be unique
991
:param check_content: If True, the lines supplied are verified to be
992
bytestrings that are correctly formed lines.
993
:return: The text sha1, the number of bytes in the text, and an opaque
994
representation of the inserted version which can be provided
995
back to future add_lines calls in the parent_texts dictionary.
997
self._index._check_write_ok()
998
self._check_add(key, lines, random_id, check_content)
1000
# The caller might pass None if there is no graph data, but kndx
1001
# indexes can't directly store that, so we give them
1002
# an empty tuple instead.
1004
# double handling for now. Make it work until then.
1005
length = sum(map(len, lines))
1006
record = ChunkedContentFactory(key, parents, None, lines)
1007
sha1 = list(self._insert_record_stream([record], random_id=random_id,
1008
nostore_sha=nostore_sha))[0]
1009
return sha1, length, None
1011
def add_fallback_versioned_files(self, a_versioned_files):
1012
"""Add a source of texts for texts not present in this knit.
1014
:param a_versioned_files: A VersionedFiles object.
1016
self._fallback_vfs.append(a_versioned_files)
1018
def annotate(self, key):
1019
"""See VersionedFiles.annotate."""
1021
parent_map = self.get_parent_map([key])
1023
raise errors.RevisionNotPresent(key, self)
1024
if parent_map[key] is not None:
1025
search = graph._make_breadth_first_searcher([key])
1029
present, ghosts = search.next_with_ghosts()
1030
except StopIteration:
1032
keys.update(present)
1033
parent_map = self.get_parent_map(keys)
1036
parent_map = {key:()}
1037
# So we used Graph(self) to load the parent_map, but now that we have
1038
# it, we can just query the parent map directly, so create a new Graph
1040
graph = _mod_graph.Graph(_mod_graph.DictParentsProvider(parent_map))
1041
head_cache = _mod_graph.FrozenHeadsCache(graph)
1043
reannotate = annotate.reannotate
1044
for record in self.get_record_stream(keys, 'topological', True):
1046
lines = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
1047
parent_lines = [parent_cache[parent] for parent in parent_map[key]]
1048
parent_cache[key] = list(
1049
reannotate(parent_lines, lines, key, None, head_cache))
1050
return parent_cache[key]
1052
def check(self, progress_bar=None):
1053
"""See VersionedFiles.check()."""
1055
for record in self.get_record_stream(keys, 'unordered', True):
1056
record.get_bytes_as('fulltext')
1058
def _check_add(self, key, lines, random_id, check_content):
1059
"""check that version_id and lines are safe to add."""
1060
version_id = key[-1]
1061
if version_id is not None:
1062
if osutils.contains_whitespace(version_id):
1063
raise errors.InvalidRevisionId(version_id, self)
1064
self.check_not_reserved_id(version_id)
1065
# TODO: If random_id==False and the key is already present, we should
1066
# probably check that the existing content is identical to what is
1067
# being inserted, and otherwise raise an exception. This would make
1068
# the bundle code simpler.
1070
self._check_lines_not_unicode(lines)
1071
self._check_lines_are_lines(lines)
1073
def get_parent_map(self, keys):
1074
"""Get a map of the graph parents of keys.
1076
:param keys: The keys to look up parents for.
1077
:return: A mapping from keys to parents. Absent keys are absent from
1080
return self._get_parent_map_with_sources(keys)[0]
1082
def _get_parent_map_with_sources(self, keys):
1083
"""Get a map of the parents of keys.
1085
:param keys: The keys to look up parents for.
1086
:return: A tuple. The first element is a mapping from keys to parents.
1087
Absent keys are absent from the mapping. The second element is a
1088
list with the locations each key was found in. The first element
1089
is the in-this-knit parents, the second the first fallback source,
1093
sources = [self._index] + self._fallback_vfs
1096
for source in sources:
1099
new_result = source.get_parent_map(missing)
1100
source_results.append(new_result)
1101
result.update(new_result)
1102
missing.difference_update(set(new_result))
1103
return result, source_results
1105
def _get_block(self, index_memo):
1106
read_memo = index_memo[0:3]
1109
block = self._group_cache[read_memo]
1112
zdata = self._access.get_raw_records([read_memo]).next()
1113
# decompress - whole thing - this is not a bug, as it
1114
# permits caching. We might want to store the partially
1115
# decompresed group and decompress object, so that recent
1116
# texts are not penalised by big groups.
1117
block = GroupCompressBlock.from_bytes(zdata)
1118
self._group_cache[read_memo] = block
1120
# print len(zdata), len(plain)
1121
# parse - requires split_lines, better to have byte offsets
1122
# here (but not by much - we only split the region for the
1123
# recipe, and we often want to end up with lines anyway.
1126
def get_missing_compression_parent_keys(self):
1127
"""Return the keys of missing compression parents.
1129
Missing compression parents occur when a record stream was missing
1130
basis texts, or a index was scanned that had missing basis texts.
1132
# GroupCompress cannot currently reference texts that are not in the
1133
# group, so this is valid for now
1136
def get_record_stream(self, keys, ordering, include_delta_closure):
1137
"""Get a stream of records for keys.
1139
:param keys: The keys to include.
1140
:param ordering: Either 'unordered' or 'topological'. A topologically
1141
sorted stream has compression parents strictly before their
1143
:param include_delta_closure: If True then the closure across any
1144
compression parents will be included (in the opaque data).
1145
:return: An iterator of ContentFactory objects, each of which is only
1146
valid until the iterator is advanced.
1148
# keys might be a generator
1149
orig_keys = list(keys)
1153
if (not self._index.has_graph
1154
and ordering in ('topological', 'groupcompress')):
1155
# Cannot topological order when no graph has been stored.
1156
# but we allow 'as-requested' or 'unordered'
1157
ordering = 'unordered'
1159
remaining_keys = keys
1162
keys = set(remaining_keys)
1163
for content_factory in self._get_remaining_record_stream(keys,
1164
orig_keys, ordering, include_delta_closure):
1165
remaining_keys.discard(content_factory.key)
1166
yield content_factory
1168
except errors.RetryWithNewPacks, e:
1169
self._access.reload_or_raise(e)
1171
def _find_from_fallback(self, missing):
1172
"""Find whatever keys you can from the fallbacks.
1174
:param missing: A set of missing keys. This set will be mutated as keys
1175
are found from a fallback_vfs
1176
:return: (parent_map, key_to_source_map, source_results)
1177
parent_map the overall key => parent_keys
1178
key_to_source_map a dict from {key: source}
1179
source_results a list of (source: keys)
1182
key_to_source_map = {}
1184
for source in self._fallback_vfs:
1187
source_parents = source.get_parent_map(missing)
1188
parent_map.update(source_parents)
1189
source_parents = list(source_parents)
1190
source_results.append((source, source_parents))
1191
key_to_source_map.update((key, source) for key in source_parents)
1192
missing.difference_update(source_parents)
1193
return parent_map, key_to_source_map, source_results
1195
def _get_ordered_source_keys(self, ordering, parent_map, key_to_source_map):
1196
"""Get the (source, [keys]) list.
1198
The returned objects should be in the order defined by 'ordering',
1199
which can weave between different sources.
1200
:param ordering: Must be one of 'topological' or 'groupcompress'
1201
:return: List of [(source, [keys])] tuples, such that all keys are in
1202
the defined order, regardless of source.
1204
if ordering == 'topological':
1205
present_keys = topo_sort(parent_map)
1207
# ordering == 'groupcompress'
1208
# XXX: This only optimizes for the target ordering. We may need
1209
# to balance that with the time it takes to extract
1210
# ordering, by somehow grouping based on
1211
# locations[key][0:3]
1212
present_keys = sort_gc_optimal(parent_map)
1213
# Now group by source:
1215
current_source = None
1216
for key in present_keys:
1217
source = key_to_source_map.get(key, self)
1218
if source is not current_source:
1219
source_keys.append((source, []))
1220
current_source = source
1221
source_keys[-1][1].append(key)
1224
def _get_as_requested_source_keys(self, orig_keys, locations, unadded_keys,
1227
current_source = None
1228
for key in orig_keys:
1229
if key in locations or key in unadded_keys:
1231
elif key in key_to_source_map:
1232
source = key_to_source_map[key]
1235
if source is not current_source:
1236
source_keys.append((source, []))
1237
current_source = source
1238
source_keys[-1][1].append(key)
1241
def _get_io_ordered_source_keys(self, locations, unadded_keys,
1244
# This is the group the bytes are stored in, followed by the
1245
# location in the group
1246
return locations[key][0]
1247
present_keys = sorted(locations.iterkeys(), key=get_group)
1248
# We don't have an ordering for keys in the in-memory object, but
1249
# lets process the in-memory ones first.
1250
present_keys = list(unadded_keys) + present_keys
1251
# Now grab all of the ones from other sources
1252
source_keys = [(self, present_keys)]
1253
source_keys.extend(source_result)
1256
def _get_remaining_record_stream(self, keys, orig_keys, ordering,
1257
include_delta_closure):
1258
"""Get a stream of records for keys.
1260
:param keys: The keys to include.
1261
:param ordering: one of 'unordered', 'topological', 'groupcompress' or
1263
:param include_delta_closure: If True then the closure across any
1264
compression parents will be included (in the opaque data).
1265
:return: An iterator of ContentFactory objects, each of which is only
1266
valid until the iterator is advanced.
1269
locations = self._index.get_build_details(keys)
1270
unadded_keys = set(self._unadded_refs).intersection(keys)
1271
missing = keys.difference(locations)
1272
missing.difference_update(unadded_keys)
1273
(fallback_parent_map, key_to_source_map,
1274
source_result) = self._find_from_fallback(missing)
1275
if ordering in ('topological', 'groupcompress'):
1276
# would be better to not globally sort initially but instead
1277
# start with one key, recurse to its oldest parent, then grab
1278
# everything in the same group, etc.
1279
parent_map = dict((key, details[2]) for key, details in
1280
locations.iteritems())
1281
for key in unadded_keys:
1282
parent_map[key] = self._unadded_refs[key]
1283
parent_map.update(fallback_parent_map)
1284
source_keys = self._get_ordered_source_keys(ordering, parent_map,
1286
elif ordering == 'as-requested':
1287
source_keys = self._get_as_requested_source_keys(orig_keys,
1288
locations, unadded_keys, key_to_source_map)
1290
# We want to yield the keys in a semi-optimal (read-wise) ordering.
1291
# Otherwise we thrash the _group_cache and destroy performance
1292
source_keys = self._get_io_ordered_source_keys(locations,
1293
unadded_keys, source_result)
1295
yield AbsentContentFactory(key)
1297
last_read_memo = None
1298
# TODO: This works fairly well at batching up existing groups into a
1299
# streamable format, and possibly allowing for taking one big
1300
# group and splitting it when it isn't fully utilized.
1301
# However, it doesn't allow us to find under-utilized groups and
1302
# combine them into a bigger group on the fly.
1303
# (Consider the issue with how chk_map inserts texts
1304
# one-at-a-time.) This could be done at insert_record_stream()
1305
# time, but it probably would decrease the number of
1306
# bytes-on-the-wire for fetch.
1307
for source, keys in source_keys:
1310
if key in self._unadded_refs:
1311
if manager is not None:
1312
for factory in manager.get_record_stream():
1314
last_read_memo = manager = None
1315
bytes, sha1 = self._compressor.extract(key)
1316
parents = self._unadded_refs[key]
1317
yield FulltextContentFactory(key, parents, sha1, bytes)
1319
index_memo, _, parents, (method, _) = locations[key]
1320
read_memo = index_memo[0:3]
1321
if last_read_memo != read_memo:
1322
# We are starting a new block. If we have a
1323
# manager, we have found everything that fits for
1324
# now, so yield records
1325
if manager is not None:
1326
for factory in manager.get_record_stream():
1328
# Now start a new manager
1329
block = self._get_block(index_memo)
1330
manager = _LazyGroupContentManager(block)
1331
last_read_memo = read_memo
1332
start, end = index_memo[3:5]
1333
manager.add_factory(key, parents, start, end)
1335
if manager is not None:
1336
for factory in manager.get_record_stream():
1338
last_read_memo = manager = None
1339
for record in source.get_record_stream(keys, ordering,
1340
include_delta_closure):
1342
if manager is not None:
1343
for factory in manager.get_record_stream():
1346
def get_sha1s(self, keys):
1347
"""See VersionedFiles.get_sha1s()."""
1349
for record in self.get_record_stream(keys, 'unordered', True):
1350
if record.sha1 != None:
1351
result[record.key] = record.sha1
1353
if record.storage_kind != 'absent':
1354
result[record.key] = osutils.sha_string(
1355
record.get_bytes_as('fulltext'))
1358
def insert_record_stream(self, stream):
1359
"""Insert a record stream into this container.
1361
:param stream: A stream of records to insert.
1363
:seealso VersionedFiles.get_record_stream:
1365
# XXX: Setting random_id=True makes
1366
# test_insert_record_stream_existing_keys fail for groupcompress and
1367
# groupcompress-nograph, this needs to be revisited while addressing
1368
# 'bzr branch' performance issues.
1369
for _ in self._insert_record_stream(stream, random_id=False):
1372
def _insert_record_stream(self, stream, random_id=False, nostore_sha=None,
1374
"""Internal core to insert a record stream into this container.
1376
This helper function has a different interface than insert_record_stream
1377
to allow add_lines to be minimal, but still return the needed data.
1379
:param stream: A stream of records to insert.
1380
:param nostore_sha: If the sha1 of a given text matches nostore_sha,
1381
raise ExistingContent, rather than committing the new text.
1382
:param reuse_blocks: If the source is streaming from
1383
groupcompress-blocks, just insert the blocks as-is, rather than
1384
expanding the texts and inserting again.
1385
:return: An iterator over the sha1 of the inserted records.
1386
:seealso insert_record_stream:
1390
def get_adapter(adapter_key):
1392
return adapters[adapter_key]
1394
adapter_factory = adapter_registry.get(adapter_key)
1395
adapter = adapter_factory(self)
1396
adapters[adapter_key] = adapter
1398
# This will go up to fulltexts for gc to gc fetching, which isn't
1400
self._compressor = GroupCompressor()
1401
self._unadded_refs = {}
1404
bytes = self._compressor.flush().to_bytes()
1405
index, start, length = self._access.add_raw_records(
1406
[(None, len(bytes))], bytes)[0]
1408
for key, reads, refs in keys_to_add:
1409
nodes.append((key, "%d %d %s" % (start, length, reads), refs))
1410
self._index.add_records(nodes, random_id=random_id)
1411
self._unadded_refs = {}
1413
self._compressor = GroupCompressor()
1416
max_fulltext_len = 0
1417
max_fulltext_prefix = None
1418
insert_manager = None
1421
# XXX: TODO: remove this, it is just for safety checking for now
1422
inserted_keys = set()
1423
for record in stream:
1424
# Raise an error when a record is missing.
1425
if record.storage_kind == 'absent':
1426
raise errors.RevisionNotPresent(record.key, self)
1428
if record.key in inserted_keys:
1429
trace.note('Insert claimed random_id=True,'
1430
' but then inserted %r two times', record.key)
1432
inserted_keys.add(record.key)
1434
# If the reuse_blocks flag is set, check to see if we can just
1435
# copy a groupcompress block as-is.
1436
if record.storage_kind == 'groupcompress-block':
1437
# Insert the raw block into the target repo
1438
insert_manager = record._manager
1439
insert_manager._check_rebuild_block()
1440
bytes = record._manager._block.to_bytes()
1441
_, start, length = self._access.add_raw_records(
1442
[(None, len(bytes))], bytes)[0]
1445
block_length = length
1446
if record.storage_kind in ('groupcompress-block',
1447
'groupcompress-block-ref'):
1448
if insert_manager is None:
1449
raise AssertionError('No insert_manager set')
1450
value = "%d %d %d %d" % (block_start, block_length,
1451
record._start, record._end)
1452
nodes = [(record.key, value, (record.parents,))]
1453
# TODO: Consider buffering up many nodes to be added, not
1454
# sure how much overhead this has, but we're seeing
1455
# ~23s / 120s in add_records calls
1456
self._index.add_records(nodes, random_id=random_id)
1459
bytes = record.get_bytes_as('fulltext')
1460
except errors.UnavailableRepresentation:
1461
adapter_key = record.storage_kind, 'fulltext'
1462
adapter = get_adapter(adapter_key)
1463
bytes = adapter.get_bytes(record)
1464
if len(record.key) > 1:
1465
prefix = record.key[0]
1466
soft = (prefix == last_prefix)
1470
if max_fulltext_len < len(bytes):
1471
max_fulltext_len = len(bytes)
1472
max_fulltext_prefix = prefix
1473
(found_sha1, start_point, end_point,
1474
type) = self._compressor.compress(record.key,
1475
bytes, record.sha1, soft=soft,
1476
nostore_sha=nostore_sha)
1477
# delta_ratio = float(len(bytes)) / (end_point - start_point)
1478
# Check if we want to continue to include that text
1479
if (prefix == max_fulltext_prefix
1480
and end_point < 2 * max_fulltext_len):
1481
# As long as we are on the same file_id, we will fill at least
1482
# 2 * max_fulltext_len
1483
start_new_block = False
1484
elif end_point > 4*1024*1024:
1485
start_new_block = True
1486
elif (prefix is not None and prefix != last_prefix
1487
and end_point > 2*1024*1024):
1488
start_new_block = True
1490
start_new_block = False
1491
last_prefix = prefix
1493
self._compressor.pop_last()
1495
max_fulltext_len = len(bytes)
1496
(found_sha1, start_point, end_point,
1497
type) = self._compressor.compress(record.key, bytes,
1499
if record.key[-1] is None:
1500
key = record.key[:-1] + ('sha1:' + found_sha1,)
1503
self._unadded_refs[key] = record.parents
1505
keys_to_add.append((key, '%d %d' % (start_point, end_point),
1507
if len(keys_to_add):
1509
self._compressor = None
1511
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1512
"""Iterate over the lines in the versioned files from keys.
1514
This may return lines from other keys. Each item the returned
1515
iterator yields is a tuple of a line and a text version that that line
1516
is present in (not introduced in).
1518
Ordering of results is in whatever order is most suitable for the
1519
underlying storage format.
1521
If a progress bar is supplied, it may be used to indicate progress.
1522
The caller is responsible for cleaning up progress bars (because this
1526
* Lines are normalised by the underlying store: they will all have \n
1528
* Lines are returned in arbitrary order.
1530
:return: An iterator over (line, key).
1533
pb = progress.DummyProgress()
1536
# we don't care about inclusions, the caller cares.
1537
# but we need to setup a list of records to visit.
1538
# we need key, position, length
1539
for key_idx, record in enumerate(self.get_record_stream(keys,
1540
'unordered', True)):
1541
# XXX: todo - optimise to use less than full texts.
1543
pb.update('Walking content', key_idx, total)
1544
if record.storage_kind == 'absent':
1545
raise errors.RevisionNotPresent(key, self)
1546
lines = osutils.split_lines(record.get_bytes_as('fulltext'))
1549
pb.update('Walking content', total, total)
1552
"""See VersionedFiles.keys."""
1553
if 'evil' in debug.debug_flags:
1554
trace.mutter_callsite(2, "keys scales with size of history")
1555
sources = [self._index] + self._fallback_vfs
1557
for source in sources:
1558
result.update(source.keys())
1562
class _GCGraphIndex(object):
1563
"""Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
1565
def __init__(self, graph_index, is_locked, parents=True,
1566
add_callback=None, track_external_parent_refs=False):
1567
"""Construct a _GCGraphIndex on a graph_index.
1569
:param graph_index: An implementation of bzrlib.index.GraphIndex.
1570
:param is_locked: A callback, returns True if the index is locked and
1572
:param parents: If True, record knits parents, if not do not record
1574
:param add_callback: If not None, allow additions to the index and call
1575
this callback with a list of added GraphIndex nodes:
1576
[(node, value, node_refs), ...]
1577
:param track_external_parent_refs: As keys are added, keep track of the
1578
keys they reference, so that we can query get_missing_parents(),
1581
self._add_callback = add_callback
1582
self._graph_index = graph_index
1583
self._parents = parents
1584
self.has_graph = parents
1585
self._is_locked = is_locked
1586
if track_external_parent_refs:
1587
self._key_dependencies = knit._KeyRefs()
1589
self._key_dependencies = None
1591
def add_records(self, records, random_id=False):
1592
"""Add multiple records to the index.
1594
This function does not insert data into the Immutable GraphIndex
1595
backing the KnitGraphIndex, instead it prepares data for insertion by
1596
the caller and checks that it is safe to insert then calls
1597
self._add_callback with the prepared GraphIndex nodes.
1599
:param records: a list of tuples:
1600
(key, options, access_memo, parents).
1601
:param random_id: If True the ids being added were randomly generated
1602
and no check for existence will be performed.
1604
if not self._add_callback:
1605
raise errors.ReadOnlyError(self)
1606
# we hope there are no repositories with inconsistent parentage
1611
for (key, value, refs) in records:
1612
if not self._parents:
1616
raise KnitCorrupt(self,
1617
"attempt to add node with parents "
1618
"in parentless index.")
1621
keys[key] = (value, refs)
1624
present_nodes = self._get_entries(keys)
1625
for (index, key, value, node_refs) in present_nodes:
1626
if node_refs != keys[key][1]:
1627
raise errors.KnitCorrupt(self, "inconsistent details in add_records"
1628
": %s %s" % ((value, node_refs), keys[key]))
1634
for key, (value, node_refs) in keys.iteritems():
1635
result.append((key, value, node_refs))
1637
for key, (value, node_refs) in keys.iteritems():
1638
result.append((key, value))
1640
key_dependencies = self._key_dependencies
1641
if key_dependencies is not None and self._parents:
1642
for key, value, refs in records:
1644
key_dependencies.add_references(key, parents)
1645
self._add_callback(records)
1647
def _check_read(self):
1648
"""Raise an exception if reads are not permitted."""
1649
if not self._is_locked():
1650
raise errors.ObjectNotLocked(self)
1652
def _check_write_ok(self):
1653
"""Raise an exception if writes are not permitted."""
1654
if not self._is_locked():
1655
raise errors.ObjectNotLocked(self)
1657
def _get_entries(self, keys, check_present=False):
1658
"""Get the entries for keys.
1660
Note: Callers are responsible for checking that the index is locked
1661
before calling this method.
1663
:param keys: An iterable of index key tuples.
1668
for node in self._graph_index.iter_entries(keys):
1670
found_keys.add(node[1])
1672
# adapt parentless index to the rest of the code.
1673
for node in self._graph_index.iter_entries(keys):
1674
yield node[0], node[1], node[2], ()
1675
found_keys.add(node[1])
1677
missing_keys = keys.difference(found_keys)
1679
raise RevisionNotPresent(missing_keys.pop(), self)
1681
def get_parent_map(self, keys):
1682
"""Get a map of the parents of keys.
1684
:param keys: The keys to look up parents for.
1685
:return: A mapping from keys to parents. Absent keys are absent from
1689
nodes = self._get_entries(keys)
1693
result[node[1]] = node[3][0]
1696
result[node[1]] = None
1699
def get_missing_parents(self):
1700
"""Return the keys of missing parents."""
1701
# Copied from _KnitGraphIndex.get_missing_parents
1702
# We may have false positives, so filter those out.
1703
self._key_dependencies.add_keys(
1704
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
1705
return frozenset(self._key_dependencies.get_unsatisfied_refs())
1707
def get_build_details(self, keys):
1708
"""Get the various build details for keys.
1710
Ghosts are omitted from the result.
1712
:param keys: An iterable of keys.
1713
:return: A dict of key:
1714
(index_memo, compression_parent, parents, record_details).
1716
opaque structure to pass to read_records to extract the raw
1719
Content that this record is built upon, may be None
1721
Logical parents of this node
1723
extra information about the content which needs to be passed to
1724
Factory.parse_record
1728
entries = self._get_entries(keys)
1729
for entry in entries:
1731
if not self._parents:
1734
parents = entry[3][0]
1736
result[key] = (self._node_to_position(entry),
1737
None, parents, (method, None))
1741
"""Get all the keys in the collection.
1743
The keys are not ordered.
1746
return [node[1] for node in self._graph_index.iter_all_entries()]
1748
def _node_to_position(self, node):
1749
"""Convert an index value to position details."""
1750
bits = node[2].split(' ')
1751
# It would be nice not to read the entire gzip.
1752
start = int(bits[0])
1754
basis_end = int(bits[2])
1755
delta_end = int(bits[3])
1756
return node[0], start, stop, basis_end, delta_end
1758
def scan_unvalidated_index(self, graph_index):
1759
"""Inform this _GCGraphIndex that there is an unvalidated index.
1761
This allows this _GCGraphIndex to keep track of any missing
1762
compression parents we may want to have filled in to make those
1765
:param graph_index: A GraphIndex
1767
if self._key_dependencies is not None:
1768
# Add parent refs from graph_index (and discard parent refs that
1769
# the graph_index has).
1770
add_refs = self._key_dependencies.add_references
1771
for node in graph_index.iter_all_entries():
1772
add_refs(node[1], node[3][0])
1776
from bzrlib._groupcompress_py import (
1778
apply_delta_to_source,
1781
decode_copy_instruction,
1785
from bzrlib._groupcompress_pyx import (
1787
apply_delta_to_source,
1792
GroupCompressor = PyrexGroupCompressor
1794
GroupCompressor = PythonGroupCompressor