~jtv/storm/profile-fetches

« back to all changes in this revision

Viewing changes to test

  • Committer: Gustavo Niemeyer
  • Date: 2006-05-13 04:42:42 UTC
  • Revision ID: gustavo@niemeyer.net-20060513044242-39457d29f0849be3
Adding initial infrastructure.

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
#!/usr/bin/env python
 
2
import optparse
 
3
import unittest
 
4
import doctest
 
5
import new
 
6
import sys
 
7
import os
 
8
 
 
9
import tests
 
10
 
 
11
 
 
12
def disable_conftest():
 
13
    """Install an empty module on py.test's tests.conftest
 
14
 
 
15
    When other runners are stepping by the suite of tests, they shouldn't
 
16
    find the py.test specific conftest.py file.
 
17
    """
 
18
    conftest = new.module("conftest")
 
19
    conftest.__file__ = "tests/conftest.py"
 
20
    sys.modules["tests.conftest"] = tests.conftest = conftest
 
21
 
 
22
def test_with_trial():
 
23
    from twisted.scripts import trial
 
24
    disable_conftest()
 
25
    if len(sys.argv) == 1:
 
26
        for dir, dirs, files in os.walk('tests'):
 
27
            for file in files:
 
28
                if file.endswith('.py'):
 
29
                    sys.argv.append(os.path.join(dir, file))
 
30
    trial.run()
 
31
 
 
32
def test_with_py_test():
 
33
    import py
 
34
    if not [x for x in sys.argv[1:] if not x.startswith("-")]:
 
35
        tests_dir = os.path.join(os.path.dirname(__file__), "tests/")
 
36
        # For timestamp checking when looping:
 
37
        landscape_dir = os.path.join(lib_dir, "landscape")
 
38
        sys.argv.extend([tests_dir, landscape_dir])
 
39
    py.test.cmdline.main()
 
40
 
 
41
def test_with_unittest():
 
42
 
 
43
    usage = "test.py [options] [<test filename>, ...]"
 
44
    description = ("If the environment variables %s and %s are present,\n"
 
45
                   "unittests will be run against the specified databases."
 
46
                   % (MAIN_DB_URI, RESOURCE_DB_URI))
 
47
 
 
48
    parser = optparse.OptionParser(description=description, usage=usage)
 
49
 
 
50
    parser.add_option('--verbose', action='store_true')
 
51
    opts, args = parser.parse_args()
 
52
    opts.args = args
 
53
 
 
54
    disable_conftest()
 
55
 
 
56
    runner = unittest.TextTestRunner()
 
57
 
 
58
    if opts.verbose:
 
59
        runner.verbosity = 2
 
60
        
 
61
    loader = unittest.TestLoader()
 
62
    topdir = os.path.abspath(os.path.dirname(__file__))
 
63
    testdir = os.path.dirname(tests.__file__)
 
64
    doctest_flags = doctest.ELLIPSIS
 
65
    unittests = []
 
66
    doctests = []
 
67
    for root, dirnames, filenames in os.walk(testdir):
 
68
        for filename in filenames:
 
69
            filepath = os.path.join(root, filename)
 
70
            relpath = filepath[len(topdir)+1:]
 
71
            if (filename == "__init__.py" or filename.endswith(".pyc") or
 
72
                opts.args and relpath not in opts.args):
 
73
                pass
 
74
            elif filename.endswith(".py"):
 
75
                unittests.append(relpath)
 
76
            elif filename.endswith(".txt"):
 
77
                doctests.append(relpath)
 
78
 
 
79
    class Summary:
 
80
        def __init__(self):
 
81
            self.total_failures = 0
 
82
            self.total_errors = 0
 
83
            self.total_tests = 0
 
84
        def __call__(self, tests, failures, errors):
 
85
            self.total_tests += tests
 
86
            self.total_failures += failures
 
87
            self.total_errors += errors
 
88
            print "(tests=%d, failures=%d, errors=%d)" % \
 
89
                  (tests, failures, errors)
 
90
 
 
91
    unittest_summary = Summary()
 
92
    doctest_summary = Summary()
 
93
 
 
94
    if unittests:
 
95
        print "Running unittests..."
 
96
        for relpath in unittests:
 
97
            print "[%s]" % relpath
 
98
            modpath = relpath.replace('/', '.')[:-3]
 
99
            module = __import__(modpath, None, None, [""])
 
100
            test = loader.loadTestsFromModule(module)
 
101
            result = runner.run(test)
 
102
            unittest_summary(test.countTestCases(),
 
103
                             len(result.failures), len(result.errors))
 
104
            print
 
105
 
 
106
    if doctests:
 
107
        print "Running doctests..."
 
108
        for relpath in doctests:
 
109
            print "[%s]" % relpath
 
110
            failures, total = doctest.testfile(relpath,
 
111
                                               optionflags=doctest_flags)
 
112
            doctest_summary(total, failures, 0)
 
113
            print
 
114
 
 
115
    print "Total test cases: %d" % unittest_summary.total_tests
 
116
    print "Total doctests: %d" % doctest_summary.total_tests
 
117
    print "Total failures: %d" % (unittest_summary.total_failures +
 
118
                                  doctest_summary.total_failures)
 
119
    print "Total errors: %d" % (unittest_summary.total_errors +
 
120
                                doctest_summary.total_errors)
 
121
 
 
122
    failed = bool(unittest_summary.total_failures or
 
123
                  unittest_summary.total_errors or
 
124
                  doctest_summary.total_failures or
 
125
                  doctest_summary.total_errors)
 
126
 
 
127
    sys.exit(failed)
 
128
 
 
129
if __name__ == "__main__":
 
130
    runner = os.environ.get("STORM_TEST_RUNNER")
 
131
    if not runner:
 
132
        runner = "unittest"
 
133
    runner_func = globals().get("test_with_%s" % runner.replace(".", "_"))
 
134
    if not runner_func:
 
135
        sys.exit("Test runner not found: %s" % runner)
 
136
    runner_func()
 
137
 
 
138
# vim:ts=4:sw=4:et