1
# Miro - an RSS based video player application
2
# Copyright (C) 2005-2010 Participatory Culture Foundation
4
# This program is free software; you can redistribute it and/or modify
5
# it under the terms of the GNU General Public License as published by
6
# the Free Software Foundation; either version 2 of the License, or
7
# (at your option) any later version.
9
# This program is distributed in the hope that it will be useful,
10
# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
# GNU General Public License for more details.
14
# You should have received a copy of the GNU General Public License
15
# along with this program; if not, write to the Free Software
16
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18
# In addition, as a special exception, the copyright holders give
19
# permission to link the code of portions of this program with the OpenSSL
22
# You must obey the GNU General Public License in all respects for all of
23
# the code used other than OpenSSL. If you modify file(s) with this
24
# exception, you may extend this exception to your version of the file(s),
25
# but you are not obligated to do so. If you do not wish to do so, delete
26
# this exception statement from your version. If you delete this exception
27
# statement from all source files in the program, then also delete it here.
29
"""``miro.util`` -- Utility functions.
31
This module contains self-contained utility functions. It shouldn't import
32
any other Miro modules.
40
from hashlib import sha1 as sha
47
from miro import filetypes
50
from StringIO import StringIO
51
from clock import clock
53
# Should we print out warning messages. Turn off in the unit tests.
57
'application/x-bittorrent',
58
'application/ogg', 'video/ogg', 'audio/ogg',
59
'video/mp4', 'video/quicktime', 'video/mpeg',
60
'video/x-xvid', 'video/x-divx', 'video/x-wmv',
61
'video/x-msmpeg', 'video/x-flv']
63
PREFERRED_TYPES_ORDER = dict((type, i) for i, type in
64
enumerate(PREFERRED_TYPES))
66
MAX_TORRENT_SIZE = 500 * (2**10) # 500k
69
"""Get a stack trace that's a easier to read that the full one."""
70
stack = traceback.extract_stack()
71
# We don't care about the unit test lines
73
and os.path.basename(stack[0][0]) == 'unittest.py'
74
or (isinstance(stack[0][3], str)
75
and stack[0][3].startswith('unittest.main'))):
78
# remove after the call to signals.system.failed
79
for i in xrange(len(stack)):
80
if ((os.path.basename(stack[i][0]) == 'signals.py'
81
and stack[i][2] in ('system.failed', 'system.failed_exn'))):
85
# remove trap_call calls
86
stack = [i for i in stack if 'trap_call' in i]
90
CONFIG_LINE_RE = re.compile(r"^([^ ]+) *= *([^\r\n]*)[\r\n]*$")
92
def read_simple_config_file(path):
93
"""Parse a configuration file in a very simple format and return contents
96
Each line is either whitespace or "Key = Value". Whitespace is ignored
97
at the beginning of Value, but the remainder of the line is taken
98
literally, including any whitespace.
100
Note: There is no way to put a newline in a value.
104
filep = open(path, "rt")
105
for line in filep.readlines():
110
# Otherwise it'd better be a configuration setting
111
match = CONFIG_LINE_RE.match(line)
113
print ("WARNING: %s: ignored bad configuration directive '%s'" %
118
value = match.group(2)
120
print "WARNING: %s: ignored duplicate directive '%s'" % (path, line)
127
def write_simple_config_file(path, data):
128
"""Given a dict, write a configuration file in the format that
129
read_simple_config_file reads.
131
filep = open(path, "wt")
133
for k, v in data.iteritems():
134
filep.write("%s = %s\n" % (k, v))
138
def query_revision():
139
"""Called at build-time to ask git for the revision of this
142
Returns the (url, revision) on success and None on failure.
147
proc = subprocess.Popen(["git", "config", "--list"],
148
stdout=subprocess.PIPE)
149
info = proc.stdout.read().splitlines()
151
origline = "remote.origin.url"
152
info = [m for m in info if m.startswith(origline)]
154
url = info[0][len(origline)+1:].strip()
156
proc = subprocess.Popen(["git", "rev-parse", "HEAD"],
157
stdout=subprocess.PIPE)
158
info = proc.stdout.read()
161
return (url, revision)
162
except StandardError, exc:
163
print "Exception thrown when querying revision: %s" % exc
164
return (url, revision)
166
class AutoFlushingStream:
167
"""Converts a stream to an auto-flushing one. It behaves in
168
exactly the same way, except all write() calls are automatically
169
followed by a flush().
171
def __init__(self, stream):
172
self.__dict__['stream'] = stream
174
def write(self, data):
175
if isinstance(data, unicode):
176
data = data.encode('ascii', 'backslashreplace')
177
self.stream.write(data)
180
def __getattr__(self, name):
181
return getattr(self.stream, name)
183
def __setattr__(self, name, value):
184
return setattr(self.stream, name, value)
187
class AutoLoggingStream(StringIO):
188
"""Create a stream that intercepts write calls and sends them to
191
def __init__(self, logging_callback, prefix):
192
StringIO.__init__(self)
193
# We init from StringIO to give us a bunch of stream-related
194
# methods, like closed() and read() automatically.
195
self.logging_callback = logging_callback
198
def write(self, data):
199
if isinstance(data, unicode):
200
data = data.encode('ascii', 'backslashreplace')
201
if data.endswith("\n"):
204
self.logging_callback(self.prefix + data)
206
def make_dummy_socket_pair():
207
"""Create a pair of sockets connected to each other on the local
208
interface. Used to implement SocketHandler.wakeup().
210
On Unixish systems, port 0 will pick the next available port.
211
But that appears to have problems on Windows possibly with
212
firewall software. So if we hit a socketerror with port 0, we
213
try ports between 50000 and 65500.
218
dummy_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
219
dummy_server.bind(("127.0.0.1", port))
220
dummy_server.listen(1)
221
server_address = dummy_server.getsockname()
222
first = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
223
first.connect(server_address)
224
second, address = dummy_server.accept()
228
# if we hit this, then it's hopeless--give up
231
# bump us into ephemeral ports if we need to try a bunch
236
def get_torrent_info_hash(path):
237
if os.path.getsize(path) > MAX_TORRENT_SIZE:
238
# file is too large, bailout. (see #12301)
239
raise ValueError("%s is not a valid torrent" % path)
241
import libtorrent as lt
246
# File doesn't start with 'd', bailout (see #12301)
247
raise ValueError("%s is not a valid torrent" % path)
248
metainfo = lt.bdecode(data)
250
infohash = metainfo['info']
251
except StandardError:
252
raise ValueError("%s is not a valid torrent" % path)
253
infohash = sha(lt.bencode(infohash)).digest()
258
def gather_media_files(path):
259
"""Gather media files on the disk in a directory tree.
260
This is used by the first time startup dialog.
262
path -- absolute file path to search
264
from miro import prefs
265
from miro import config
268
short_app_name = config.get(prefs.SHORT_APP_NAME)
269
for root, dirs, files in os.walk(path):
272
if filetypes.is_video_filename(f):
273
found.append(os.path.join(root, f))
275
if short_app_name in dirs:
276
dirs.remove(short_app_name)
279
adjusted_parsed = int(parsed / 100.0) * 100
281
adjusted_parsed = int(parsed / 10.0) * 10
283
adjusted_parsed = parsed
285
yield adjusted_parsed, found
287
def gather_subtitle_files(movie_path):
288
"""Given an absolute path for a video file, this returns a list of
289
filenames of sidecar subtitle file that are in the same directory
290
or in a subtitles directory that are associated with the video
293
>>> gather_subtitles_file('/tmp/foo.ogv")
295
>>> gather_subtitle_files("/tmp/bar.ogv")
296
["/tmp/bar.en.srt", "/tmp/bar.fr.srt"]
297
>>> gather_subtitle_files("/tmp/baz.ogv")
298
["/tmp/subtitles/baz.en.sub", "/tmp/subtitles/baz.fr.sub"]
302
if movie_path is None:
303
return subtitle_files
304
dirname, movie_file = os.path.split(movie_path)
305
basename, ext = os.path.splitext(movie_file)
307
# check for files in the current directory
308
if os.path.exists(dirname):
309
possible = [os.path.join(dirname, mem)
310
for mem in os.listdir(dirname)
311
if mem.startswith(basename)
312
and filetypes.is_subtitle_filename(mem)]
313
if len(possible) > 0:
314
subtitle_files.extend(possible)
316
# check for files in the subtitles/ directory
317
subdir = os.path.join(dirname, "subtitles")
318
if os.path.exists(subdir):
319
possible = [os.path.join(subdir, mem)
320
for mem in os.listdir(subdir)
321
if mem.startswith(basename)
322
and filetypes.is_subtitle_filename(mem)]
323
if len(possible) > 0:
324
subtitle_files.extend(possible)
326
subtitle_files.sort()
327
return subtitle_files
329
def copy_subtitle_file(sub_path, video_path):
330
"""Copies the subtitle file located at sub_path alongside the
331
video file located at video_path. It also changes the name
332
so that the subtitle file follows the rules of sidecar files.
334
Returns the path the subtitle file was copied to.
336
from miro import iso_639
338
sub_basename = os.path.basename(sub_path)
339
match = re.match("(.*)(\....?)(\..*)", sub_basename)
340
if match is not None:
341
sub_basename_root = match.group(1)
342
sub_language = match.group(2)
343
sub_ext = match.group(3)
344
if iso_639.find(sub_language[1:]) is not None:
345
sub_ext = sub_language + sub_ext
347
sub_basename_root = sub_basename_root + sub_language
349
sub_basename_root, sub_ext = os.path.splitext(sub_basename)
351
video_basename = os.path.basename(video_path)
352
video_basename_root, video_ext = os.path.splitext(video_basename)
353
if sub_basename_root != video_basename_root:
354
sub_basename = video_basename_root + sub_ext
355
dest_path = os.path.join(os.path.dirname(video_path), sub_basename)
356
if sub_path != dest_path:
357
if os.path.exists(dest_path):
359
shutil.copyfile(sub_path, dest_path)
362
def format_size_for_user(nbytes, zero_string="", with_decimals=True,
364
"""Format an int containing the number of bytes into a string
365
suitable for printing out to the user.
367
zero_string is the string to use if bytes == 0.
369
from miro.gtcache import gettext as _
370
if nbytes > (1 << 30) and not kb_only:
371
value = (nbytes / (1024.0 * 1024.0 * 1024.0))
373
# we do the string composing this way so as to make it easier
375
return _("%(size)sGB", {"size": "%1.1f" % value})
377
return _("%(size)sGB", {"size": "%d" % value})
378
elif nbytes > (1 << 20) and not kb_only:
379
value = (nbytes / (1024.0 * 1024.0))
381
return _("%(size)sMB", {"size": "%1.1f" % value})
383
return _("%(size)sMB", {"size": "%d" % value})
384
elif nbytes > (1 << 10):
385
value = (nbytes / 1024.0)
387
return _("%(size)sKB", {"size": "%1.1f" % value})
389
return _("%(size)sKB", {"size": "%d" % value})
393
return _("%(size)sB", {"size": "%1.1f" % value})
395
return _("%(size)sB", {"size": "%d" % value})
399
def clamp_text(text, max_length=20):
400
if len(text) > max_length:
401
return text[:max_length-3] + '...'
405
def print_mem_usage(message):
407
# Uncomment for memory usage printouts on Linux.
409
# os.system("ps huwwwp %d" % (os.getpid(),))
411
def db_mem_usage_test():
412
from miro import models
413
from miro.database import DDBObject
414
last_usage = get_mem_usage()
415
logging.info("baseline memory usage: %s", last_usage)
416
for name in dir(models):
417
ddb_object_class = getattr(models, name)
419
if not issubclass(ddb_object_class, DDBObject):
423
if name == 'FileItem':
424
# Item and FileItem share a db table, so we only need to
428
# make sure each object is loaded in memory and count the total
429
count = len(list(ddb_object_class.make_view()))
430
current_usage = get_mem_usage()
431
class_usage = current_usage-last_usage
433
count = 1 # prevent zero division errors
434
logging.info("memory usage for %s: %s (%d bytes per object)",
435
ddb_object_class.__name__, class_usage,
436
class_usage * 1024 / count)
437
last_usage = current_usage
438
logging.info("total memory usage: %s", last_usage)
439
logging.info("feed count: %s", models.Feed.make_view().count())
440
logging.info("item count: %s", models.Item.make_view().count())
443
return int(call_command('ps', '-o', 'rss', 'hp', str(os.getpid())))
446
"""Adds TIMING and JSALERT logging levels.
448
logging.addLevelName(15, "STACK TRACE")
449
logging.stacktrace = lambda msg, *args, **kargs: logging.log(15, "%s\n%s" % ("".join(traceback.format_stack()), msg) , *args, **kargs)
451
logging.addLevelName(25, "TIMING")
452
logging.timing = lambda msg, *args, **kargs: logging.log(25, msg, *args, **kargs)
453
logging.addLevelName(26, "JSALERT")
454
logging.jsalert = lambda msg, *args, **kargs: logging.log(26, msg, *args, **kargs)
456
logging.addLevelName(21, "DBLOG")
457
logging.dblog = lambda msg, *args, **kargs: logging.log(21, msg, *args, **kargs)
459
class MiroUnicodeError(StandardError):
460
"""Returned when input to a template function isn't unicode
465
"""Raise an exception if input isn't unicode
467
if text is not None and not isinstance(text, unicode):
468
raise MiroUnicodeError(u"text %r is not a unicode string (type:%s)" %
471
def returns_unicode(func):
472
"""Decorator that raised an exception if the function doesn't
475
def check_func(*args, **kwargs):
476
result = func(*args, **kwargs)
477
if result is not None:
483
"""Raise an exception if input isn't a binary string
485
if text is not None and not isinstance(text, str):
486
raise MiroUnicodeError, (u"text \"%s\" is not a binary string" % text)
488
def returns_binary(func):
489
"""Decorator that raised an exception if the function doesn't
492
def check_func(*args, **kwargs):
493
result = func(*args, **kwargs)
494
if result is not None:
500
"""Returns exception if input isn't a filename type
502
from miro.plat.utils import FilenameType
503
if text is not None and not isinstance(text, FilenameType):
504
raise MiroUnicodeError, (u"text %r is not a valid filename type" %
507
def returns_filename(func):
508
"""Decorator that raised an exception if the function doesn't
511
def check_func(*args, **kwargs):
512
result = func(*args, **kwargs)
513
if result is not None:
519
"""Turns all strings in data structure to unicode.
521
if isinstance(data, dict):
522
for key, val in data.items():
523
data[key] = unicodify(val)
524
elif isinstance(data, list):
525
for i, mem in enumerate(data):
526
data[i] = unicodify(mem)
527
elif isinstance(data, str):
528
data = data.decode('ascii', 'replace')
531
def stringify(unicode_str, handleerror="xmlcharrefreplace"):
532
"""Takes a possibly unicode string and converts it to a string
533
string. This is required for some logging especially where the
534
things being logged are filenames which can be Unicode in the
537
You can pass in a handleerror argument which defaults to
538
``"xmlcharrefreplace"``. This will increase the string size as it
539
converts unicode characters that don't have ascii equivalents into
540
escape sequences. If you don't want to increase the string
541
length, use ``"replace"`` which will use ? for unicode characters
542
that don't have ascii equivalents.
546
This is not the inverse of unicodify!
548
if isinstance(unicode_str, unicode):
549
return unicode_str.encode("ascii", handleerror)
550
if not isinstance(unicode_str, str):
551
return str(unicode_str)
554
def quote_unicode_url(url):
555
"""Quote international characters contained in a URL according to
556
w3c, see: <http://www.w3.org/International/O-URL-code.html>
560
for c in url.encode('utf8'):
562
quoted_chars.append(urllib.quote(c))
564
quoted_chars.append(c)
565
return u''.join(quoted_chars)
567
def no_console_startupinfo():
568
"""Returns the startupinfo argument for subprocess.Popen so that
569
we don't open a console window. On platforms other than windows,
570
this is just None. On windows, it's some win32 silliness.
572
if subprocess.mswindows:
573
startupinfo = subprocess.STARTUPINFO()
574
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
579
def call_command(*args, **kwargs):
580
"""Call an external command. If the command doesn't exit with
581
status 0, or if it outputs to stderr, an exception will be raised.
584
ignore_stderr = kwargs.pop('ignore_stderr', False)
586
raise TypeError('extra keyword arguments: %s' % kwargs)
588
pipe = subprocess.Popen(args, stdout=subprocess.PIPE,
589
stdin=subprocess.PIPE, stderr=subprocess.PIPE,
590
startupinfo=no_console_startupinfo())
591
stdout, stderr = pipe.communicate()
592
if pipe.returncode != 0:
593
raise OSError("call_command with %s has return code %s\n"
594
"stdout:%s\nstderr:%s" %
595
(args, pipe.returncode, stdout, stderr))
596
elif stderr and not ignore_stderr:
597
raise OSError("call_command with %s outputed error text:\n%s" %
602
def random_string(length):
603
return ''.join(random.choice(string.ascii_letters) for i in xrange(length))
605
def _get_enclosure_index(enc):
606
maxindex = len(PREFERRED_TYPES_ORDER)
607
return maxindex - PREFERRED_TYPES_ORDER.get(enc.get('type'), maxindex)
609
def _get_enclosure_size(enc):
610
if 'filesize' in enc and enc['filesize'].isdigit():
611
return int(enc['filesize'])
615
def _get_enclosure_bitrate(enc):
616
if 'bitrate' in enc and enc['bitrate'].isdigit():
617
return int(enc['bitrate'])
621
def cmp_enclosures(enc1, enc2):
622
"""Compares two enclosures looking for the best one (i.e.
623
the one with the biggest values).
625
Returns -1 if enclosure1 is preferred, 1 if enclosure2 is
626
preferred, and zero if there is no preference between the two of
629
# media:content enclosures have an isDefault which we should pick
630
# since it's the preference of the feed
632
# if that's not there, then we sort by preference, bitrate, and
634
encdata1 = (enc1.get("isDefault"),
635
_get_enclosure_index(enc1),
636
_get_enclosure_bitrate(enc1),
637
_get_enclosure_size(enc1))
639
encdata2 = (enc2.get("isDefault"),
640
_get_enclosure_index(enc2),
641
_get_enclosure_bitrate(enc2),
642
_get_enclosure_size(enc2))
644
return cmp(encdata2, encdata1)
646
def get_first_video_enclosure(entry):
648
Find the first "best" video enclosure in a feedparser entry.
649
Returns the enclosure, or None if no video enclosure is found.
652
enclosures = entry.enclosures
653
except (KeyError, AttributeError):
656
enclosures = [e for e in enclosures if filetypes.is_video_enclosure(e)]
657
if len(enclosures) == 0:
660
enclosures.sort(cmp_enclosures)
664
_default_encoding = "iso-8859-1" # aka Latin-1
667
def _to_utf8_bytes(s, encoding=None):
668
"""Takes a string and do whatever needs to be done to make it into
669
a UTF-8 string. If a Unicode string is given, it is just encoded
670
in UTF-8. Otherwise, if an encoding hint is given, first try to
671
decode the string as if it were in that encoding; if that fails
672
(or the hint isn't given), liberally (if necessary lossily)
673
interpret it as _default_encoding.
676
return _utf8cache[(s, encoding)]
681
# If we got a Unicode string, half of our work is already done.
682
if isinstance(s, unicode):
683
result = s.encode('utf-8')
684
elif not isinstance(s, str):
686
if result is None and encoding is not None:
687
# If we knew the encoding of the s, try that.
689
decoded = s.decode(encoding, 'replace')
690
except UnicodeDecodeError:
693
result = decoded.encode('utf-8')
695
# Encoding wasn't provided, or it was wrong. Interpret
696
# provided string liberally as a fixed _default_encoding (see
698
result = s.decode(_default_encoding, 'replace').encode('utf-8')
700
_utf8cache[(s, encoding)] = result
701
return _utf8cache[(s, encoding)]
707
"""Takes a string and returns a new unicode string with &, >, and
708
< replaced by &, >, and < respectively.
711
return _escapecache[str_]
715
new_str = unicode(str_)
716
for mem in [("&", "&"),
719
new_str = new_str.replace(mem[0], mem[1])
720
_escapecache[str_] = new_str
723
def to_uni(orig, encoding=None):
724
"""Takes a stringish thing and returns the unicode version
727
If the stringish thing is already unicode, it returns it--no-op.
729
If the stringish thing is a string, then it converts it to utf-8
730
from the specified encoding, then turns it into a unicode.
732
if isinstance(orig, unicode):
736
return _unicache[orig]
740
if isinstance(orig, str):
741
orig = _to_utf8_bytes(orig, encoding)
742
_unicache[orig] = unicode(orig, 'utf-8')
744
_unicache[orig] = unicode(orig)
745
return _unicache[orig]
749
# replaces one or more non-newline whitespace characters
750
WHITESPACE_RE = re.compile("[ \\t]+", re.M)
752
# replaces one or more newline characters
753
NEWLINE_RE = re.compile("[ ]*\\n[ \\n]+", re.M)
755
# <xyz/> -> <xyz /> fix--sgmllib.SGMLParser doesn't handle these right
756
UNARY_RE = re.compile("\\<[ ]*([A-Za-z]+)[ ]*[/]?\\>", re.M)
758
class HTMLStripper(sgmllib.SGMLParser):
759
"""Strips html from text while maintaining links and newline-like HTML
762
This class resets itself after every ``strip`` call, so you can re-use
763
the class if you want. However, this class is not threadsafe.
766
sgmllib.SGMLParser.__init__(self)
773
"""Takes a string ``s`` and returns the stripped version.
775
if not isinstance(s, basestring):
778
s = s.replace("\r\n", "\n")
780
# if it's just white space, we skip all the work.
784
s = UNARY_RE.sub("<\\1 />", s)
791
data, links = self._data, self._links
799
sgmllib.SGMLParser.reset(self)
806
s = WHITESPACE_RE.sub(" ", s)
807
s = NEWLINE_RE.sub("\n", s)
814
temp = self._clean("".join(self._temp))
816
self._data = temp.lstrip()
821
def handle_data(self, data):
822
data = data.replace("\n", " ")
825
def handle_charref(self, ref):
826
if ref.startswith('x'):
827
charnum = int(ref[1:], 16)
830
self._add(unichr(charnum))
832
def start_p(self, attributes):
838
def start_br(self, attributes):
844
def start_a(self, attributes):
845
for key, val in attributes:
853
self._links.append((len(self._data), -1, href))
857
if self._links and self._links[-1][1] == -1:
858
beg, dummy, url = self._links[-1]
859
self._links[-1] = (beg, len(self._data), url)
861
class Matrix(object):
862
"""2 Dimensional matrix.
864
Matrix objects are accessed like a list, except tuples are used as
865
indices, for example:
870
None, None, None, None, None
871
None, None, None, None, None
872
None, None, None, None, None
873
None, None, None, None, None
874
None, None, None, 'foo', None
877
def __init__(self, columns, rows, initial_value=None):
878
self.columns = columns
880
self.data = [ initial_value ] * (columns * rows)
882
def __getitem__(self, key):
883
return self.data[(key[0] * self.rows) + key[1]]
885
def __setitem__(self, key, value):
886
self.data[(key[0] * self.rows) + key[1]] = value
889
return iter(self.data)
892
return "\n".join([", ".join([repr(r)
893
for r in list(self.row(i))])
894
for i in xrange(self.rows)])
896
def remove(self, value):
897
"""This sets the value to None--it does NOT remove the cell
898
from the Matrix because that doesn't make any sense.
900
i = self.data.index(value)
904
"""Iterator that yields all the objects in a row."""
905
for i in xrange(self.columns):
908
def column(self, column):
909
"""Iterator that yields all the objects in a column."""
910
for i in xrange(self.rows):
911
yield self[column, i]
913
def entity_replace(text):
926
# FIXME: have a more general, charset-aware way to do this.
927
for src, dest in replacements:
928
text = text.replace(src, dest)
931
HTTP_HTTPS_MATCH_RE = re.compile(r"^(http|https)://[^/ ]+/[^ ]*$")
934
"""Returns True if this is URL-ish.
939
for c in url.encode('utf-8'):
942
if HTTP_HTTPS_MATCH_RE.match(url) is not None:
946
LOWER_TRANSLATE = string.maketrans(string.ascii_uppercase,
947
string.ascii_lowercase)
950
"""Converts a string to lower case, using a simple translations of ASCII
953
This method is not locale-dependant, which is useful in some cases.
954
Normally s.lower() should be used though.
956
return s.translate(LOWER_TRANSLATE)
958
class DebuggingTimer:
960
self.start_time = self.last_time = clock()
962
def log_time(self, msg):
963
current_time = clock()
964
logging.timing("%s: %0.4f", msg, current_time - self.last_time)
965
self.last_time = current_time
967
def log_total_time(self):
968
logging.timing("total time: %0.3f", clock() - self.start_time)