1
# Copyright (C) 2005, 2006 Canonical Ltd
2
# Written by Robert Collins <robert.collins@canonical.com>
4
# This program is free software; you can redistribute it and/or modify
5
# it under the terms of the GNU General Public License as published by
6
# the Free Software Foundation; either version 2 of the License, or
7
# (at your option) any later version.
9
# This program is distributed in the hope that it will be useful,
10
# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
# GNU General Public License for more details.
14
# You should have received a copy of the GNU General Public License
15
# along with this program; if not, write to the Free Software
16
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
"""Bzrlib specific gzip tunings. We plan to feed these to the upstream gzip."""
20
from cStringIO import StringIO
22
# make GzipFile faster:
24
from gzip import U32, LOWU32, FEXTRA, FCOMMENT, FNAME, FHCRC
29
__all__ = ["GzipFile"]
32
class GzipFile(gzip.GzipFile):
33
"""Knit tuned version of GzipFile.
35
This is based on the following lsprof stats:
36
python 2.4 stock GzipFile write:
37
58971 0 5644.3090 2721.4730 gzip:193(write)
38
+58971 0 1159.5530 1159.5530 +<built-in method compress>
39
+176913 0 987.0320 987.0320 +<len>
40
+58971 0 423.1450 423.1450 +<zlib.crc32>
41
+58971 0 353.1060 353.1060 +<method 'write' of 'cStringIO.
44
58971 0 4477.2590 2103.1120 bzrlib.knit:1250(write)
45
+58971 0 1297.7620 1297.7620 +<built-in method compress>
46
+58971 0 406.2160 406.2160 +<zlib.crc32>
47
+58971 0 341.9020 341.9020 +<method 'write' of 'cStringIO.
49
+58971 0 328.2670 328.2670 +<len>
52
Yes, its only 1.6 seconds, but they add up.
55
def _add_read_data(self, data):
57
# temp var for len(data) and switch to +='s.
60
self.crc = zlib.crc32(data, self.crc)
62
self.extrasize += len_data
65
def _write_gzip_header(self):
66
"""A tuned version of gzip._write_gzip_header
68
We have some extra constrains that plain Gzip does not.
69
1) We want to write the whole blob at once. rather than multiple
70
calls to fileobj.write().
71
2) We never have a filename
72
3) We don't care about the time
75
'\037\213' # self.fileobj.write('\037\213') # magic header
76
'\010' # self.fileobj.write('\010') # compression method
77
# fname = self.filename[:-3]
81
'\x00' # self.fileobj.write(chr(flags))
82
'\0\0\0\0' # write32u(self.fileobj, long(time.time()))
83
'\002' # self.fileobj.write('\002')
84
'\377' # self.fileobj.write('\377')
86
'' # self.fileobj.write(fname + '\000')
89
def _read(self, size=1024):
90
# various optimisations:
91
# reduces lsprof count from 2500 to
92
# 8337 calls in 1272, 365 internal
93
if self.fileobj is None:
94
raise EOFError, "Reached EOF"
97
# If the _new_member flag is set, we have to
98
# jump to the next member, if there is one.
100
# First, check if we're at the end of the file;
101
# if so, it's time to stop; no more members to read.
102
next_header_bytes = self.fileobj.read(10)
103
if next_header_bytes == '':
104
raise EOFError, "Reached EOF"
107
self._read_gzip_header(next_header_bytes)
108
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
109
self._new_member = False
111
# Read a chunk of data from the file
112
buf = self.fileobj.read(size)
114
# If the EOF has been reached, flush the decompression object
115
# and mark this object as finished.
118
self._add_read_data(self.decompress.flush())
119
assert len(self.decompress.unused_data) >= 8, "what does flush do?"
120
self._gzip_tail = self.decompress.unused_data[0:8]
122
# tell the driving read() call we have stuffed all the data
124
raise EOFError, 'Reached EOF'
126
self._add_read_data(self.decompress.decompress(buf))
128
if self.decompress.unused_data != "":
129
# Ending case: we've come to the end of a member in the file,
130
# so seek back to the start of the data for the next member which
131
# is the length of the decompress objects unused data - the first
132
# 8 bytes for the end crc and size records.
134
# so seek back to the start of the unused data, finish up
135
# this member, and read a new gzip header.
136
# (The number of bytes to seek back is the length of the unused
137
# data, minus 8 because those 8 bytes are part of this member.
138
seek_length = len (self.decompress.unused_data) - 8
140
# we read too much data
141
self.fileobj.seek(-seek_length, 1)
142
self._gzip_tail = self.decompress.unused_data[0:8]
143
elif seek_length < 0:
144
# we haven't read enough to check the checksum.
145
assert -8 < seek_length, "too great a seek."
146
buf = self.fileobj.read(-seek_length)
147
self._gzip_tail = self.decompress.unused_data + buf
149
self._gzip_tail = self.decompress.unused_data
151
# Check the CRC and file size, and set the flag so we read
152
# a new member on the next call
154
self._new_member = True
157
"""tuned to reduce function calls and eliminate file seeking:
159
reduces lsprof count from 800 to 288
161
avoid U32 call by using struct format L
164
# We've read to the end of the file, so we should have 8 bytes of
165
# unused data in the decompressor. If we don't, there is a corrupt file.
166
# We use these 8 bytes to calculate the CRC and the recorded file size.
167
# We then check the that the computed CRC and size of the
168
# uncompressed data matches the stored values. Note that the size
169
# stored is the true file size mod 2**32.
170
assert len(self._gzip_tail) == 8, "gzip trailer is incorrect length."
171
crc32, isize = struct.unpack("<LL", self._gzip_tail)
172
# note that isize is unsigned - it can exceed 2GB
173
if crc32 != U32(self.crc):
174
raise IOError, "CRC check failed %d %d" % (crc32, U32(self.crc))
175
elif isize != LOWU32(self.size):
176
raise IOError, "Incorrect length of data produced"
178
def _read_gzip_header(self, bytes=None):
179
"""Supply bytes if the minimum header size is already read.
181
:param bytes: 10 bytes of header data.
183
"""starting cost: 300 in 3998
184
15998 reads from 3998 calls
188
bytes = self.fileobj.read(10)
190
if magic != '\037\213':
191
raise IOError, 'Not a gzipped file'
192
method = ord(bytes[2:3])
194
raise IOError, 'Unknown compression method'
195
flag = ord(bytes[3:4])
196
# modtime = self.fileobj.read(4) (bytes [4:8])
197
# extraflag = self.fileobj.read(1) (bytes[8:9])
198
# os = self.fileobj.read(1) (bytes[9:10])
199
# self.fileobj.read(6)
202
# Read & discard the extra field, if present
203
xlen = ord(self.fileobj.read(1))
204
xlen = xlen + 256*ord(self.fileobj.read(1))
205
self.fileobj.read(xlen)
207
# Read and discard a null-terminated string containing the filename
209
s = self.fileobj.read(1)
210
if not s or s=='\000':
213
# Read and discard a null-terminated string containing a comment
215
s = self.fileobj.read(1)
216
if not s or s=='\000':
219
self.fileobj.read(2) # Read & discard the 16-bit header CRC
221
def readline(self, size=-1):
222
"""Tuned to remove buffer length calls in _unread and...
224
also removes multiple len(c) calls, inlines _unread,
225
total savings - lsprof 5800 to 5300
228
8176 calls to read() in 1684
229
changing the min chunk size to 200 halved all the cache misses
230
leading to a drop to:
232
4168 call to read() in 1646
233
- i.e. just reduced the function call overhead. May be worth
236
if size < 0: size = sys.maxint
238
readsize = min(200, size) # Read from the file in small chunks
241
return "".join(bufs) # Return resulting line
244
c = self.read(readsize)
245
# number of bytes read
249
# We set i=size to break out of the loop under two
250
# conditions: 1) there's no newline, and the chunk is
251
# larger than size, or 2) there is a newline, but the
252
# resulting line would be longer than 'size'.
253
if i==-1 and len_c > size: i=size-1
254
elif size <= i: i = size -1
256
if i >= 0 or c == '':
257
# if i>= 0 we have a newline or have triggered the above
258
# if size is not None condition.
259
# if c == '' its EOF.
260
bufs.append(c[:i+1]) # Add portion of last chunk
261
# -- inlined self._unread --
262
## self._unread(c[i+1:], len_c - i) # Push back rest of chunk
263
self.extrabuf = c[i+1:] + self.extrabuf
264
self.extrasize = len_c - i + self.extrasize
265
self.offset -= len_c - i
266
# -- end inlined self._unread --
267
return ''.join(bufs) # Return resulting line
269
# Append chunk to list, decrease 'size',
272
readsize = min(size, readsize * 2)
274
def readlines(self, sizehint=0):
275
# optimise to avoid all the buffer manipulation
276
# lsprof changed from:
277
# 4168 calls in 5472 with 32000 calls to readline()
280
# Negative numbers result in reading all the lines
282
# python's gzip routine uses sizehint. This is a more efficient way
283
# than python uses to honor it. But it is even more efficient to
284
# just read the entire thing and use cStringIO to split into lines.
287
# content = self.read(sizehint)
288
# return bzrlib.osutils.split_lines(content)
289
content = StringIO(self.read(-1))
290
return content.readlines()
292
def _unread(self, buf, len_buf=None):
293
"""tuned to remove unneeded len calls.
295
because this is such an inner routine in readline, and readline is
296
in many inner loops, this has been inlined into readline().
298
The len_buf parameter combined with the reduction in len calls dropped
299
the lsprof ms count for this routine on my test data from 800 to 200 -
304
self.extrabuf = buf + self.extrabuf
305
self.extrasize = len_buf + self.extrasize
306
self.offset -= len_buf
308
def write(self, data):
309
if self.mode != gzip.WRITE:
311
raise IOError(errno.EBADF, "write() on read-only GzipFile object")
313
if self.fileobj is None:
314
raise ValueError, "write() on closed GzipFile object"
317
self.size = self.size + data_len
318
self.crc = zlib.crc32(data, self.crc)
319
self.fileobj.write( self.compress.compress(data) )
320
self.offset += data_len
322
def writelines(self, lines):
323
# profiling indicated a significant overhead
324
# calling write for each line.
325
# this batch call is a lot faster :).
326
# (4 seconds to 1 seconds for the sample upgrades I was testing).
327
self.write(''.join(lines))