3
benchmark.py - this file is part of S3QL (http://s3ql.googlecode.com)
5
Benchmark compression and upload performance and recommend compression
6
algorithm that maximizes throughput.
9
Copyright (C) 2010 Nikolaus Rath <Nikolaus@rath.org>
11
This program can be distributed under the terms of the GNU LGPL.
14
from __future__ import division, print_function, absolute_import
24
# We are running from the S3QL source directory, make sure
25
# that we use modules from this directory
26
basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
27
if (os.path.exists(os.path.join(basedir, 'setup.py')) and
28
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
29
sys.path = [os.path.join(basedir, 'src')] + sys.path
31
from s3ql.backends.common import compress_encrypt_fh
32
from s3ql.common import (get_backend, QuietError,setup_logging)
33
from s3ql.parse_args import ArgumentParser
36
log = logging.getLogger('benchmark')
39
'''Parse command line'''
41
parser = ArgumentParser(
42
usage="%prog [options] <storage-url> <test-file>\n"
44
description="Transfers and compresses the test file and gives a recommendation "
45
"for the compression algorithm to use.")
53
parser.add_storage_url()
54
parser.add_argument('file', metavar='<file>', type=argparse.FileType(mode='rb'),
55
help='File to transfer')
56
parser.add_argument("--compression-threads", action="store", type=int,
57
default=1, metavar='<no>',
58
help='Number of parallel compression and encryption threads '
59
'to use (default: %(default)s).')
60
return parser.parse_args(args)
67
options = parse_args(args)
68
setup_logging(options)
70
with get_backend(options.storage_url, options.homedir,
71
options.ssl) as (conn, bucketname):
73
if not bucketname in conn:
74
raise QuietError("Bucket does not exist.")
75
bucket = conn.get_bucket(bucketname)
77
ifh = options.testfile
78
ofh = open('/dev/null', 'r+b')
79
size = os.fstat(ifh.fileno()).st_size / 1024
80
log.info('Test file size: %.2f MB', (size / 1024))
82
log.info('Compressing with LZMA...')
84
compress_encrypt_fh(ifh, ofh, 'foobar', 'nonce',
85
lzma.LZMACompressor(options={ 'level': 7 }))
86
seconds = time.time() - stamp
87
lzma_speed = size / seconds
88
log.info('done. LZMA Compression Speed: %.2f KB per second', lzma_speed)
90
log.info('Compressing with BZip2...')
93
compress_encrypt_fh(ifh, ofh, 'foobar', 'nonce',
95
seconds = time.time() - stamp
96
bzip2_speed = size / seconds
97
log.info('done. Bzip2 Compression Speed: %.2f KB per second', bzip2_speed)
99
log.info('Compressing with zlib...')
102
compress_encrypt_fh(ifh, ofh, 'foobar', 'nonce',
104
seconds = time.time() - stamp
105
zlib_speed = size / seconds
106
log.info('done. zlib Compression Speed: %.2f KB per second', zlib_speed)
108
log.info('Transferring to backend...')
111
bucket.raw_store(options.testfile, ifh, dict())
112
seconds = time.time() - stamp
113
net_speed = size / seconds
114
log.info('done. Network Uplink Speed: %.2f KB per second', net_speed)
117
print('Assuming mount.s3ql will be called with --compression-threads %d'
118
% options.compression_threads)
119
lzma_speed *= options.compression_threads
120
bzip2_speed *= options.compression_threads
121
zlib_speed *= options.compression_threads
123
if lzma_speed > net_speed:
124
print('You should use LZMA compression.')
125
elif bzip2_speed > net_speed:
126
print('You should use BZip2 compression.')
127
elif zlib_speed > net_speed:
128
print('You should use zlib compression.')
130
print('You should use zlib compression, but even that is not fast\n'
131
'enough to saturate your network connection.')
134
if __name__ == '__main__':