~facundo/enjuewemela/trunk

« back to all changes in this revision

Viewing changes to enjuewemela/tests/benchmark_detector.py

  • Committer: facundo at com
  • Date: 2011-07-04 00:29:17 UTC
  • mto: This revision was merged to the branch mainline in revision 73.
  • Revision ID: facundo@taniquetil.com.ar-20110704002917-zfvxgdu81v2ynnkz
Benchmarks

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
# This code is part of the 'enjuewemela' game
 
2
# License: GPLv3
 
3
# Main author: Facundo Batista
 
4
# Code, bug tracker, etc:
 
5
#   https://launchpad.net/enjuewemela/
 
6
#
 
7
"""Benchmark for the detector."""
 
8
 
1
9
from __future__ import division
2
10
 
3
 
import sys
4
 
import  os
5
 
sys.path.append(os.path.abspath("."))
6
 
 
7
11
import time
8
12
import random
9
13
 
10
 
from logic_board import detect
 
14
from enjuewemela.logic_board import detect
11
15
 
12
16
HOW_MANY = 1000
13
17
 
31
35
 
32
36
scenario = [_generate_board() for x in range(HOW_MANY)]
33
37
 
 
38
print "Benchmarking the detector..."
34
39
results = {}
35
40
tini = time.time()
36
41
for board in scenario:
39
44
    results[lenres] = results.get(lenres, 0) + 1
40
45
tfin = time.time()
41
46
 
42
 
print "Done! Tested %d iterations" % HOW_MANY
43
 
print "Average analysis time: %.2f mseg" % (1000 * (tfin-tini) / HOW_MANY)
44
 
print "Matchs:", results
 
47
print "    Done! Tested %d iterations" % HOW_MANY
 
48
print "    Average analysis time: %.2f mseg" % (1000 * (tfin-tini) / HOW_MANY)
 
49
print "    Matchs:", results
45
50