2
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
4
# Copyright (c) 2009 University of Washington
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License version 2 as
8
# published by the Free Software Foundation;
10
# This program is distributed in the hope that it will be useful,
11
# but WITHOUT ANY WARRANTY; without even the implied warranty of
12
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
# GNU General Public License for more details.
15
# You should have received a copy of the GNU General Public License
16
# along with this program; if not, write to the Free Software
17
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28
import xml.dom.minidom
32
from utils import get_list_from_file
35
# XXX This should really be part of a waf command to list the configuration
36
# items relative to optional ns-3 pieces.
38
# A list of interesting configuration items in the waf configuration
39
# cache which we may be interested in when deciding on which examples
40
# to run and how to run them. These are set by waf during the
41
# configuration phase and the corresponding assignments are usually
42
# found in the associated subdirectory wscript files.
44
interesting_config_items = [
46
"NS3_ENABLED_MODULES",
53
"EXAMPLE_DIRECTORIES",
54
"ENABLE_PYTHON_BINDINGS",
60
ENABLE_REAL_TIME = False
61
ENABLE_THREADING = False
62
ENABLE_EXAMPLES = True
65
ENABLE_OPENFLOW = False
66
EXAMPLE_DIRECTORIES = []
69
# If the user has constrained us to run certain kinds of tests, we can tell waf
72
core_kinds = ["bvt", "core", "system", "unit"]
75
# There are some special cases for test suites that kill valgrind. This is
76
# because NSC causes illegal instruction crashes when run under valgrind.
78
core_valgrind_skip_tests = [
81
"ns3-tcp-interoperability",
86
# There are some special cases for test suites that fail when NSC is
89
core_nsc_missing_skip_tests = [
92
"ns3-tcp-interoperability",
96
# Parse the examples-to-run file if it exists.
98
# This function adds any C++ examples or Python examples that are to be run
99
# to the lists in example_tests and python_tests, respectively.
101
def parse_examples_to_run_file(
102
examples_to_run_path,
108
# Look for the examples-to-run file exists.
109
if os.path.exists(examples_to_run_path):
111
# Each tuple in the C++ list of examples to run contains
113
# (example_name, do_run, do_valgrind_run)
115
# where example_name is the executable to be run, do_run is a
116
# condition under which to run the example, and do_valgrind_run is
117
# a condition under which to run the example under valgrind. This
118
# is needed because NSC causes illegal instruction crashes with
119
# some tests when they are run under valgrind.
121
# Note that the two conditions are Python statements that
122
# can depend on waf configuration variables. For example,
124
# ("tcp-nsc-lfn", "NSC_ENABLED == True", "NSC_ENABLED == False"),
126
cpp_examples = get_list_from_file(examples_to_run_path, "cpp_examples")
127
for example_name, do_run, do_valgrind_run in cpp_examples:
128
example_path = os.path.join(cpp_executable_dir, example_name)
129
# Add all of the C++ examples that were built, i.e. found
130
# in the directory, to the list of C++ examples to run.
131
if os.path.exists(example_path):
132
example_tests.append((example_path, do_run, do_valgrind_run))
134
# Each tuple in the Python list of examples to run contains
136
# (example_name, do_run)
138
# where example_name is the Python script to be run and
139
# do_run is a condition under which to run the example.
141
# Note that the condition is a Python statement that can
142
# depend on waf configuration variables. For example,
144
# ("realtime-udp-echo.py", "ENABLE_REAL_TIME == True"),
146
python_examples = get_list_from_file(examples_to_run_path, "python_examples")
147
for example_name, do_run in python_examples:
148
example_path = os.path.join(python_script_dir, example_name)
149
# Add all of the Python examples that were found to the
150
# list of Python examples to run.
151
if os.path.exists(example_path):
152
python_tests.append((example_path, do_run))
155
# The test suites are going to want to output status. They are running
156
# concurrently. This means that unless we are careful, the output of
157
# the test suites will be interleaved. Rather than introducing a lock
158
# file that could unintentionally start serializing execution, we ask
159
# the tests to write their output to a temporary directory and then
160
# put together the final output file when we "join" the test tasks back
161
# to the main thread. In addition to this issue, the example programs
162
# often write lots and lots of trace files which we will just ignore.
163
# We put all of them into the temp directory as well, so they can be
166
TMP_OUTPUT_DIR = "testpy-output"
169
result = test.find('Result').text
170
name = test.find('Name').text
171
if not test.find('Time') is None:
172
time_real = test.find('Time').get('real')
175
return (result, name, time_real)
178
# A simple example of writing a text file with a test result summary. It is
179
# expected that this output will be fine for developers looking for problems.
181
def node_to_text (test, f):
182
(result, name, time_real) = read_test(test)
183
output = "%s: Test Suite \"%s\" (%s)\n" % (result, name, time_real)
185
for details in test.findall('FailureDetails'):
186
f.write(" Details:\n")
187
f.write(" Message: %s\n" % details.find('Message').text)
188
f.write(" Condition: %s\n" % details.find('Condition').text)
189
f.write(" Actual: %s\n" % details.find('Actual').text)
190
f.write(" Limit: %s\n" % details.find('Limit').text)
191
f.write(" File: %s\n" % details.find('File').text)
192
f.write(" Line: %s\n" % details.find('Line').text)
193
for child in test.findall('Test'):
194
node_to_text(child, f)
196
def translate_to_text(results_file, text_file):
197
f = open(text_file, 'w')
198
import xml.etree.ElementTree as ET
199
et = ET.parse (results_file)
200
for test in et.findall('Test'):
201
node_to_text (test, f)
203
for example in et.findall('Example'):
204
result = example.find('Result').text
205
name = example.find('Name').text
206
if not example.find('Time') is None:
207
time_real = example.find('Time').get('real')
210
output = "%s: Example \"%s\" (%s)\n" % (result, name, time_real)
216
# A simple example of writing an HTML file with a test result summary. It is
217
# expected that this will eventually be made prettier as time progresses and
218
# we have time to tweak it. This may end up being moved to a separate module
219
# since it will probably grow over time.
221
def translate_to_html(results_file, html_file):
222
f = open(html_file, 'w')
225
f.write("<center><h1>ns-3 Test Results</h1></center>\n")
228
# Read and parse the whole results file.
230
import xml.etree.ElementTree as ET
231
et = ET.parse(results_file)
234
# Iterate through the test suites
236
f.write("<h2>Test Suites</h2>\n")
237
for suite in et.findall('Test'):
239
# For each test suite, get its name, result and execution time info
241
(result, name, time) = read_test (suite)
244
# Print a level three header with the result, name and time. If the
245
# test suite passed, the header is printed in green. If the suite was
246
# skipped, print it in orange, otherwise assume something bad happened
250
f.write("<h3 style=\"color:green\">%s: %s (%s)</h3>\n" % (result, name, time))
251
elif result == "SKIP":
252
f.write("<h3 style=\"color:#ff6600\">%s: %s (%s)</h3>\n" % (result, name, time))
254
f.write("<h3 style=\"color:red\">%s: %s (%s)</h3>\n" % (result, name, time))
257
# The test case information goes in a table.
259
f.write("<table border=\"1\">\n")
262
# The first column of the table has the heading Result
264
f.write("<th> Result </th>\n")
267
# If the suite crashed or is skipped, there is no further information, so just
268
# delare a new table row with the result (CRASH or SKIP) in it. Looks like:
276
# Then go on to the next test suite. Valgrind and skipped errors look the same.
278
if result in ["CRASH", "SKIP", "VALGR"]:
281
f.write("<td style=\"color:#ff6600\">%s</td>\n" % result)
283
f.write("<td style=\"color:red\">%s</td>\n" % result)
285
f.write("</table>\n")
289
# If the suite didn't crash, we expect more information, so fill out
290
# the table heading row. Like,
292
# +--------+----------------+------+
293
# | Result | Test Case Name | Time |
294
# +--------+----------------+------+
296
f.write("<th>Test Case Name</th>\n")
297
f.write("<th> Time </th>\n")
300
# If the test case failed, we need to print out some failure details
301
# so extend the heading row again. Like,
303
# +--------+----------------+------+-----------------+
304
# | Result | Test Case Name | Time | Failure Details |
305
# +--------+----------------+------+-----------------+
308
f.write("<th>Failure Details</th>\n")
311
# Now iterate through all of the test cases.
313
for case in suite.findall('Test'):
316
# Get the name, result and timing information from xml to use in
317
# printing table below.
319
(result, name, time) = read_test(case)
322
# If the test case failed, we iterate through possibly multiple
327
# There can be multiple failures for each test case. The first
328
# row always gets the result, name and timing information along
329
# with the failure details. Remaining failures don't duplicate
330
# this information but just get blanks for readability. Like,
332
# +--------+----------------+------+-----------------+
333
# | Result | Test Case Name | Time | Failure Details |
334
# +--------+----------------+------+-----------------+
335
# | FAIL | The name | time | It's busted |
336
# +--------+----------------+------+-----------------+
337
# | | | | Really broken |
338
# +--------+----------------+------+-----------------+
339
# | | | | Busted bad |
340
# +--------+----------------+------+-----------------+
344
for details in case.findall('FailureDetails'):
347
# Start a new row in the table for each possible Failure Detail
353
f.write("<td style=\"color:red\">%s</td>\n" % result)
354
f.write("<td>%s</td>\n" % name)
355
f.write("<td>%s</td>\n" % time)
357
f.write("<td></td>\n")
358
f.write("<td></td>\n")
359
f.write("<td></td>\n")
362
f.write("<b>Message: </b>%s, " % details.find('Message').text)
363
f.write("<b>Condition: </b>%s, " % details.find('Condition').text)
364
f.write("<b>Actual: </b>%s, " % details.find('Actual').text)
365
f.write("<b>Limit: </b>%s, " % details.find('Limit').text)
366
f.write("<b>File: </b>%s, " % details.find('File').text)
367
f.write("<b>Line: </b>%s" % details.find('Line').text)
376
# If this particular test case passed, then we just print the PASS
377
# result in green, followed by the test case name and its execution
378
# time information. These go off in <td> ... </td> table data.
379
# The details table entry is left blank.
381
# +--------+----------------+------+---------+
382
# | Result | Test Case Name | Time | Details |
383
# +--------+----------------+------+---------+
384
# | PASS | The name | time | |
385
# +--------+----------------+------+---------+
388
f.write("<td style=\"color:green\">%s</td>\n" % result)
389
f.write("<td>%s</td>\n" % name)
390
f.write("<td>%s</td>\n" % time)
391
f.write("<td></td>\n")
394
# All of the rows are written, so we need to end the table.
396
f.write("</table>\n")
399
# That's it for all of the test suites. Now we have to do something about
402
f.write("<h2>Examples</h2>\n")
405
# Example status is rendered in a table just like the suites.
407
f.write("<table border=\"1\">\n")
410
# The table headings look like,
412
# +--------+--------------+--------------+
413
# | Result | Example Name | Elapsed Time |
414
# +--------+--------------+--------------+
416
f.write("<th> Result </th>\n")
417
f.write("<th>Example Name</th>\n")
418
f.write("<th>Elapsed Time</th>\n")
421
# Now iterate through all of the examples
423
for example in et.findall("Example"):
426
# Start a new row for each example
431
# Get the result and name of the example in question
433
(result, name, time) = read_test(example)
436
# If the example either failed or crashed, print its result status
437
# in red; otherwise green. This goes in a <td> ... </td> table data
440
f.write("<td style=\"color:green\">%s</td>\n" % result)
441
elif result == "SKIP":
442
f.write("<td style=\"color:#ff6600\">%s</fd>\n" % result)
444
f.write("<td style=\"color:red\">%s</td>\n" % result)
447
# Write the example name as a new tag data.
449
f.write("<td>%s</td>\n" % name)
452
# Write the elapsed time as a new tag data.
454
f.write("<td>%s</td>\n" % time)
457
# That's it for the current example, so terminate the row.
462
# That's it for the table of examples, so terminate the table.
464
f.write("</table>\n")
467
# And that's it for the report, so finish up.
474
# Python Control-C handling is broken in the presence of multiple threads.
475
# Signals get delivered to the runnable/running thread by default and if
476
# it is blocked, the signal is simply ignored. So we hook sigint and set
477
# a global variable telling the system to shut down gracefully.
481
def sigint_hook(signal, frame):
487
# Waf can be configured to compile in debug or optimized modes. In each
488
# case, the resulting built goes into a different directory. If we want
489
# test tests to run from the correct code-base, we have to figure out which
490
# mode waf is running in. This is called its active variant.
492
# XXX This function pokes around in the waf internal state file. To be a
493
# little less hacky, we should add a commmand to waf to return this info
494
# and use that result.
496
def read_waf_active_variant():
497
for line in open("build/c4che/default.cache.py").readlines():
498
if line.startswith("NS3_ACTIVE_VARIANT"):
499
exec(line, globals())
503
print "NS3_ACTIVE_VARIANT == %s" % NS3_ACTIVE_VARIANT
506
# In general, the build process itself naturally takes care of figuring out
507
# which tests are built into the test runner. For example, if waf configure
508
# determines that ENABLE_EMU is false due to some missing dependency,
509
# the tests for the emu net device simply will not be built and will
510
# therefore not be included in the built test runner.
512
# Examples, however, are a different story. In that case, we are just given
513
# a list of examples that could be run. Instead of just failing, for example,
514
# nsc-tcp-zoo if NSC is not present, we look into the waf saved configuration
515
# for relevant configuration items.
517
# XXX This function pokes around in the waf internal state file. To be a
518
# little less hacky, we should add a commmand to waf to return this info
519
# and use that result.
521
def read_waf_config():
522
for line in open("build/c4che/%s.cache.py" % NS3_ACTIVE_VARIANT).readlines():
523
for item in interesting_config_items:
524
if line.startswith(item):
525
exec(line, globals())
528
for item in interesting_config_items:
529
print "%s ==" % item, eval(item)
532
# It seems pointless to fork a process to run waf to fork a process to run
533
# the test runner, so we just run the test runner directly. The main thing
534
# that waf would do for us would be to sort out the shared library path but
535
# we can deal with that easily and do here.
537
# There can be many different ns-3 repositories on a system, and each has
538
# its own shared libraries, so ns-3 doesn't hardcode a shared library search
539
# path -- it is cooked up dynamically, so we do that too.
542
have_DYLD_LIBRARY_PATH = False
543
have_LD_LIBRARY_PATH = False
545
have_PYTHONPATH = False
547
keys = os.environ.keys()
549
if key == "DYLD_LIBRARY_PATH":
550
have_DYLD_LIBRARY_PATH = True
551
if key == "LD_LIBRARY_PATH":
552
have_LD_LIBRARY_PATH = True
555
if key == "PYTHONPATH":
556
have_PYTHONPATH = True
558
pypath = os.environ["PYTHONPATH"] = os.path.join (NS3_BUILDDIR, NS3_ACTIVE_VARIANT, "bindings", "python")
560
if not have_PYTHONPATH:
561
os.environ["PYTHONPATH"] = pypath
563
os.environ["PYTHONPATH"] += ":" + pypath
566
print "os.environ[\"PYTHONPATH\"] == %s" % os.environ["PYTHONPATH"]
568
if sys.platform == "darwin":
569
if not have_DYLD_LIBRARY_PATH:
570
os.environ["DYLD_LIBRARY_PATH"] = ""
571
for path in NS3_MODULE_PATH:
572
os.environ["DYLD_LIBRARY_PATH"] += ":" + path
574
print "os.environ[\"DYLD_LIBRARY_PATH\"] == %s" % os.environ["DYLD_LIBRARY_PATH"]
575
elif sys.platform == "win32":
577
os.environ["PATH"] = ""
578
for path in NS3_MODULE_PATH:
579
os.environ["PATH"] += ';' + path
581
print "os.environ[\"PATH\"] == %s" % os.environ["PATH"]
582
elif sys.platform == "cygwin":
584
os.environ["PATH"] = ""
585
for path in NS3_MODULE_PATH:
586
os.environ["PATH"] += ":" + path
588
print "os.environ[\"PATH\"] == %s" % os.environ["PATH"]
590
if not have_LD_LIBRARY_PATH:
591
os.environ["LD_LIBRARY_PATH"] = ""
592
for path in NS3_MODULE_PATH:
593
os.environ["LD_LIBRARY_PATH"] += ":" + path
595
print "os.environ[\"LD_LIBRARY_PATH\"] == %s" % os.environ["LD_LIBRARY_PATH"]
598
# Short note on generating suppressions:
600
# See the valgrind documentation for a description of suppressions. The easiest
601
# way to generate a suppression expression is by using the valgrind
602
# --gen-suppressions option. To do that you have to figure out how to run the
605
# If you do "test.py -v -g -s <suitename> then test.py will output most of what
606
# you need. For example, if you are getting a valgrind error in the
607
# devices-mesh-dot11s-regression test suite, you can run:
609
# ./test.py -v -g -s devices-mesh-dot11s-regression
611
# You should see in the verbose output something that looks like:
613
# Synchronously execute valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
614
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner
615
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
616
# --tempdir=testpy-output/2010-01-12-22-47-50-CUT
617
# --out=testpy-output/2010-01-12-22-47-50-CUT/devices-mesh-dot11s-regression.xml
619
# You need to pull out the useful pieces, and so could run the following to
620
# reproduce your error:
622
# valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
623
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner
624
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
625
# --tempdir=testpy-output
627
# Hint: Use the first part of the command as is, and point the "tempdir" to
628
# somewhere real. You don't need to specify an "out" file.
630
# When you run the above command you should see your valgrind error. The
631
# suppression expression(s) can be generated by adding the --gen-suppressions=yes
632
# option to valgrind. Use something like:
634
# valgrind --gen-suppressions=yes --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
635
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner
636
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
637
# --tempdir=testpy-output
639
# Now when valgrind detects an error it will ask:
641
# ==27235== ---- Print suppression ? --- [Return/N/n/Y/y/C/c] ----
643
# to which you just enter 'y'<ret>.
645
# You will be provided with a suppression expression that looks something like
648
# <insert_a_suppression_name_here>
650
# fun:_ZN3ns36dot11s15HwmpProtocolMac8SendPreqESt6vectorINS0_6IePreqESaIS3_EE
651
# fun:_ZN3ns36dot11s15HwmpProtocolMac10SendMyPreqEv
652
# fun:_ZN3ns36dot11s15HwmpProtocolMac18RequestDestinationENS_12Mac48AddressEjj
654
# the rest of the stack frame
658
# You need to add a supression name which will only be printed out by valgrind in
659
# verbose mode (but it needs to be there in any case). The entire stack frame is
660
# shown to completely characterize the error, but in most cases you won't need
661
# all of that info. For example, if you want to turn off all errors that happen
662
# when the function (fun:) is called, you can just delete the rest of the stack
663
# frame. You can also use wildcards to make the mangled signatures more readable.
665
# I added the following to the testpy.supp file for this particular error:
668
# Supress invalid read size errors in SendPreq() when using HwmpProtocolMac
670
# fun:*HwmpProtocolMac*SendPreq*
673
# Now, when you run valgrind the error will be suppressed.
675
VALGRIND_SUPPRESSIONS_FILE = "testpy.supp"
677
def run_job_synchronously(shell_command, directory, valgrind, is_python, build_path=""):
678
(base, build) = os.path.split (NS3_BUILDDIR)
679
suppressions_path = os.path.join (base, VALGRIND_SUPPRESSIONS_FILE)
682
path_cmd = "python " + os.path.join (base, shell_command)
685
path_cmd = os.path.join (build_path, shell_command)
687
path_cmd = os.path.join (NS3_BUILDDIR, NS3_ACTIVE_VARIANT, shell_command)
690
cmd = "valgrind --suppressions=%s --leak-check=full --show-reachable=yes --error-exitcode=2 %s" % (suppressions_path,
696
print "Synchronously execute %s" % cmd
698
start_time = time.time()
699
proc = subprocess.Popen(cmd, shell = True, cwd = directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
700
stdout_results, stderr_results = proc.communicate()
701
elapsed_time = time.time() - start_time
703
retval = proc.returncode
706
# valgrind sometimes has its own idea about what kind of memory management
707
# errors are important. We want to detect *any* leaks, so the way to do
708
# that is to look for the presence of a valgrind leak summary section.
710
# If another error has occurred (like a test suite has failed), we don't
711
# want to trump that error, so only do the valgrind output scan if the
712
# test has otherwise passed (return code was zero).
714
if valgrind and retval == 0 and "== LEAK SUMMARY:" in stderr_results:
718
print "Return code = ", retval
719
print "stderr = ", stderr_results
721
return (retval, stdout_results, stderr_results, elapsed_time)
724
# This class defines a unit of testing work. It will typically refer to
725
# a test suite to run using the test-runner, or an example to run directly.
729
self.is_break = False
731
self.is_example = False
732
self.is_pyexample = False
733
self.shell_command = ""
734
self.display_name = ""
738
self.tmp_file_name = ""
739
self.returncode = False
740
self.elapsed_time = 0
744
# A job is either a standard job or a special job indicating that a worker
745
# thread should exist. This special job is indicated by setting is_break
748
def set_is_break(self, is_break):
749
self.is_break = is_break
752
# If a job is to be skipped, we actually run it through the worker threads
753
# to keep the PASS, FAIL, CRASH and SKIP processing all in one place.
755
def set_is_skip(self, is_skip):
756
self.is_skip = is_skip
759
# Examples are treated differently than standard test suites. This is
760
# mostly because they are completely unaware that they are being run as
761
# tests. So we have to do some special case processing to make them look
764
def set_is_example(self, is_example):
765
self.is_example = is_example
768
# Examples are treated differently than standard test suites. This is
769
# mostly because they are completely unaware that they are being run as
770
# tests. So we have to do some special case processing to make them look
773
def set_is_pyexample(self, is_pyexample):
774
self.is_pyexample = is_pyexample
777
# This is the shell command that will be executed in the job. For example,
779
# "utils/test-runner --test-name=some-test-suite"
781
def set_shell_command(self, shell_command):
782
self.shell_command = shell_command
785
# This is the build path where ns-3 was built. For example,
787
# "/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build/debug"
789
def set_build_path(self, build_path):
790
self.build_path = build_path
793
# This is the dispaly name of the job, typically the test suite or example
796
# "some-test-suite" or "udp-echo"
798
def set_display_name(self, display_name):
799
self.display_name = display_name
802
# This is the base directory of the repository out of which the tests are
803
# being run. It will be used deep down in the testing framework to determine
804
# where the source directory of the test was, and therefore where to find
805
# provided test vectors. For example,
807
# "/home/user/repos/ns-3-dev"
809
def set_basedir(self, basedir):
810
self.basedir = basedir
813
# This is the directory to which a running test suite should write any
816
def set_tempdir(self, tempdir):
817
self.tempdir = tempdir
820
# This is the current working directory that will be given to an executing
821
# test as it is being run. It will be used for examples to tell them where
822
# to write all of the pcap files that we will be carefully ignoring. For
825
# "/tmp/unchecked-traces"
827
def set_cwd(self, cwd):
831
# This is the temporary results file name that will be given to an executing
832
# test as it is being run. We will be running all of our tests in parallel
833
# so there must be multiple temporary output files. These will be collected
834
# into a single XML file at the end and then be deleted.
836
def set_tmp_file_name(self, tmp_file_name):
837
self.tmp_file_name = tmp_file_name
840
# The return code received when the job process is executed.
842
def set_returncode(self, returncode):
843
self.returncode = returncode
846
# The elapsed real time for the job execution.
848
def set_elapsed_time(self, elapsed_time):
849
self.elapsed_time = elapsed_time
852
# The worker thread class that handles the actual running of a given test.
853
# Once spawned, it receives requests for work through its input_queue and
854
# ships the results back through the output_queue.
856
class worker_thread(threading.Thread):
857
def __init__(self, input_queue, output_queue):
858
threading.Thread.__init__(self)
859
self.input_queue = input_queue
860
self.output_queue = output_queue
864
job = self.input_queue.get()
866
# Worker threads continue running until explicitly told to stop with
872
# If the global interrupt handler sets the thread_exit variable,
873
# we stop doing real work and just report back a "break" in the
874
# normal command processing has happened.
876
if thread_exit == True:
877
job.set_is_break(True)
878
self.output_queue.put(job)
882
# If we are actually supposed to skip this job, do so. Note that
883
# if is_skip is true, returncode is undefined.
887
print "Skip %s" % job.shell_command
888
self.output_queue.put(job)
892
# Otherwise go about the business of running tests as normal.
896
print "Launch %s" % job.shell_command
898
if job.is_example or job.is_pyexample:
900
# If we have an example, the shell command is all we need to
901
# know. It will be something like "examples/udp-echo" or
902
# "examples/mixed-wireless.py"
904
(job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command,
905
job.cwd, options.valgrind, job.is_pyexample, job.build_path)
908
# If we're a test suite, we need to provide a little more info
909
# to the test runner, specifically the base directory and temp
912
if options.update_data:
913
update_data = '--update-data'
916
(job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command +
917
" --xml --tempdir=%s --out=%s %s" % (job.tempdir, job.tmp_file_name, update_data),
918
job.cwd, options.valgrind, False)
920
job.set_elapsed_time(et)
923
print "returncode = %d" % job.returncode
924
print "---------- begin standard out ----------"
926
print "---------- begin standard err ----------"
928
print "---------- end standard err ----------"
930
self.output_queue.put(job)
933
# This is the main function that does the work of interacting with the test-runner
938
# Run waf to make sure that everything is built, configured and ready to go
939
# unless we are explicitly told not to. We want to be careful about causing
940
# our users pain while waiting for extraneous stuff to compile and link, so
941
# we allow users that know what they''re doing to not invoke waf at all.
943
if not options.nowaf:
946
# If the user is running the "kinds" or "list" options, there is an
947
# implied dependency on the test-runner since we call that program
948
# if those options are selected. We will exit after processing those
949
# options, so if we see them, we can safely only build the test-runner.
951
# If the user has constrained us to running only a particular type of
952
# file, we can only ask waf to build what we know will be necessary.
953
# For example, if the user only wants to run BVT tests, we only have
954
# to build the test-runner and can ignore all of the examples.
956
# If the user only wants to run a single example, then we can just build
959
# If there is no constraint, then we have to build everything since the
960
# user wants to run everything.
962
if options.kinds or options.list or (len(options.constrain) and options.constrain in core_kinds):
963
if sys.platform == "win32":
964
waf_cmd = "waf --target=test-runner"
966
waf_cmd = "./waf --target=test-runner"
967
elif len(options.example):
968
if sys.platform == "win32":
969
waf_cmd = "waf --target=%s" % os.path.basename(options.example)
971
waf_cmd = "./waf --target=%s" % os.path.basename(options.example)
974
if sys.platform == "win32":
980
print "Building: %s" % waf_cmd
982
proc = subprocess.Popen(waf_cmd, shell = True)
985
print >> sys.stderr, "Waf died. Not running tests"
986
return proc.returncode
989
# Pull some interesting configuration information out of waf, primarily
990
# so we can know where executables can be found, but also to tell us what
991
# pieces of the system have been built. This will tell us what examples
994
read_waf_active_variant()
998
# Get the information from the build status file.
999
build_status_file = os.path.join (NS3_BUILDDIR, NS3_ACTIVE_VARIANT, 'build-status.py')
1000
if os.path.exists(build_status_file):
1001
ns3_runnable_programs = get_list_from_file(build_status_file, "ns3_runnable_programs")
1002
ns3_runnable_scripts = get_list_from_file(build_status_file, "ns3_runnable_scripts")
1004
print >> sys.stderr, 'The build status file was not found. You must do waf build before running test.py.'
1007
# Generate the lists of examples to run as smoke tests in order to
1008
# ensure that they remain buildable and runnable over time.
1012
for directory in EXAMPLE_DIRECTORIES:
1013
# Set the directories and paths for this example.
1014
example_directory = os.path.join("examples", directory)
1015
examples_to_run_path = os.path.join(example_directory, "examples-to-run.py")
1016
cpp_executable_dir = os.path.join(NS3_BUILDDIR, NS3_ACTIVE_VARIANT, example_directory)
1017
python_script_dir = os.path.join(example_directory)
1019
# Parse this example directory's file.
1020
parse_examples_to_run_file(
1021
examples_to_run_path,
1027
for module in NS3_ENABLED_MODULES:
1028
# Remove the "ns3-" from the module name.
1029
module = module[len("ns3-"):]
1031
# Set the directories and paths for this example.
1032
module_directory = os.path.join("src", module)
1033
example_directory = os.path.join(module_directory, "examples")
1034
examples_to_run_path = os.path.join(module_directory, "test", "examples-to-run.py")
1035
cpp_executable_dir = os.path.join(NS3_BUILDDIR, NS3_ACTIVE_VARIANT, example_directory)
1036
python_script_dir = os.path.join(example_directory)
1038
# Parse this module's file.
1039
parse_examples_to_run_file(
1040
examples_to_run_path,
1047
# If lots of logging is enabled, we can crash Python when it tries to
1048
# save all of the text. We just don't allow logging to be turned on when
1049
# test.py runs. If you want to see logging output from your tests, you
1050
# have to run them using the test-runner directly.
1052
os.environ["NS_LOG"] = ""
1055
# There are a couple of options that imply we can to exit before starting
1056
# up a bunch of threads and running tests. Let's detect these cases and
1057
# handle them without doing all of the hard work.
1060
path_cmd = os.path.join("utils", "test-runner --print-test-type-list")
1061
(rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
1065
path_cmd = os.path.join("utils", "test-runner --print-test-name-list")
1066
(rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
1069
if options.kinds or options.list:
1073
# We communicate results in two ways. First, a simple message relating
1074
# PASS, FAIL, CRASH or SKIP is always written to the standard output. It
1075
# is expected that this will be one of the main use cases. A developer can
1076
# just run test.py with no options and see that all of the tests still
1079
# The second main use case is when detailed status is requested (with the
1080
# --text or --html options). Typicall this will be text if a developer
1081
# finds a problem, or HTML for nightly builds. In these cases, an
1082
# XML file is written containing the status messages from the test suites.
1083
# This file is then read and translated into text or HTML. It is expected
1084
# that nobody will really be interested in the XML, so we write it somewhere
1085
# with a unique name (time) to avoid collisions. In case an error happens, we
1086
# provide a runtime option to retain the temporary files.
1088
# When we run examples as smoke tests, they are going to want to create
1089
# lots and lots of trace files. We aren't really interested in the contents
1090
# of the trace files, so we also just stash them off in the temporary dir.
1091
# The retain option also causes these unchecked trace files to be kept.
1093
date_and_time = time.strftime("%Y-%m-%d-%H-%M-%S-CUT", time.gmtime())
1095
if not os.path.exists(TMP_OUTPUT_DIR):
1096
os.makedirs(TMP_OUTPUT_DIR)
1098
testpy_output_dir = os.path.join(TMP_OUTPUT_DIR, date_and_time);
1100
if not os.path.exists(testpy_output_dir):
1101
os.makedirs(testpy_output_dir)
1104
# Create the main output file and start filling it with XML. We need to
1105
# do this since the tests will just append individual results to this file.
1107
xml_results_file = os.path.join(testpy_output_dir, "results.xml")
1108
f = open(xml_results_file, 'w')
1109
f.write('<?xml version="1.0"?>\n')
1110
f.write('<Results>\n')
1114
# We need to figure out what test suites to execute. We are either given one
1115
# suite or example explicitly via the --suite or --example/--pyexample option,
1116
# or we need to call into the test runner and ask it to list all of the available
1117
# test suites. Further, we need to provide the constraint information if it
1118
# has been given to us.
1120
# This translates into allowing the following options with respect to the
1123
# ./test,py: run all of the suites and examples
1124
# ./test.py --constrain=core: run all of the suites of all kinds
1125
# ./test.py --constrain=unit: run all unit suites
1126
# ./test,py --suite=some-test-suite: run a single suite
1127
# ./test,py --example=udp/udp-echo: run no test suites
1128
# ./test,py --pyexample=wireless/mixed-wireless.py: run no test suites
1129
# ./test,py --suite=some-suite --example=some-example: run the single suite
1131
# We can also use the --constrain option to provide an ordering of test
1132
# execution quite easily.
1134
if len(options.suite):
1135
# See if this is a valid test suite.
1136
path_cmd = os.path.join("utils", "test-runner --print-test-name-list")
1137
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
1138
if options.suite in suites:
1139
suites = options.suite + "\n"
1141
print >> sys.stderr, 'The test suite was not run because an unknown test suite name was requested.'
1144
elif len(options.example) == 0 and len(options.pyexample) == 0:
1145
if len(options.constrain):
1146
path_cmd = os.path.join("utils", "test-runner --print-test-name-list --test-type=%s" % options.constrain)
1147
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
1149
path_cmd = os.path.join("utils", "test-runner --print-test-name-list")
1150
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
1155
# suite_list will either a single test suite name that the user has
1156
# indicated she wants to run or a list of test suites provided by
1157
# the test-runner possibly according to user provided constraints.
1158
# We go through the trouble of setting up the parallel execution
1159
# even in the case of a single suite to avoid having two process the
1160
# results in two different places.
1162
suite_list = suites.split('\n')
1165
# We now have a possibly large number of test suites to run, so we want to
1166
# run them in parallel. We're going to spin up a number of worker threads
1167
# that will run our test jobs for us.
1169
input_queue = Queue.Queue(0)
1170
output_queue = Queue.Queue(0)
1176
# In Python 2.6 you can just use multiprocessing module, but we don't want
1177
# to introduce that dependency yet; so we jump through a few hoops.
1181
if sys.platform != "win32":
1182
if 'SC_NPROCESSORS_ONLN'in os.sysconf_names:
1183
processors = os.sysconf('SC_NPROCESSORS_ONLN')
1185
proc = subprocess.Popen("sysctl -n hw.ncpu", shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1186
stdout_results, stderr_results = proc.communicate()
1187
if len(stderr_results) == 0:
1188
processors = int(stdout_results)
1191
# Now, spin up one thread per processor which will eventually mean one test
1192
# per processor running concurrently.
1194
for i in range(processors):
1195
thread = worker_thread(input_queue, output_queue)
1196
threads.append(thread)
1200
# Keep track of some summary statistics
1206
# We now have worker threads spun up, and a list of work to do. So, run
1207
# through the list of test suites and dispatch a job to run each one.
1209
# Dispatching will run with unlimited speed and the worker threads will
1210
# execute as fast as possible from the queue.
1212
# Note that we actually dispatch tests to be skipped, so all of the
1213
# PASS, FAIL, CRASH and SKIP processing is done in the same place.
1215
for test in suite_list:
1219
job.set_is_example(False)
1220
job.set_is_pyexample(False)
1221
job.set_display_name(test)
1222
job.set_tmp_file_name(os.path.join(testpy_output_dir, "%s.xml" % test))
1223
job.set_cwd(os.getcwd())
1224
job.set_basedir(os.getcwd())
1225
job.set_tempdir(testpy_output_dir)
1226
if (options.multiple):
1229
multiple = " --stop-on-failure"
1231
path_cmd = os.path.join("utils", "test-runner --test-name=%s%s" % (test, multiple))
1232
job.set_shell_command(path_cmd)
1234
if options.valgrind and test in core_valgrind_skip_tests:
1235
job.set_is_skip(True)
1237
# Skip tests that will fail if NSC is missing.
1238
if not NSC_ENABLED and test in core_nsc_missing_skip_tests:
1239
job.set_is_skip(True)
1242
print "Queue %s" % test
1244
input_queue.put(job)
1246
total_tests = total_tests + 1
1249
# We've taken care of the discovered or specified test suites. Now we
1250
# have to deal with examples run as smoke tests. We have a list of all of
1251
# the example programs it makes sense to try and run. Each example will
1252
# have a condition associated with it that must evaluate to true for us
1253
# to try and execute it. This is used to determine if the example has
1254
# a dependency that is not satisfied. For example, if an example depends
1255
# on NSC being configured by waf, that example should have a condition
1256
# that evaluates to true if NSC is enabled. For example,
1258
# ("tcp-nsc-zoo", "NSC_ENABLED == True"),
1260
# In this case, the example "tcp-nsc-zoo" will only be run if we find the
1261
# waf configuration variable "NSC_ENABLED" to be True.
1263
# We don't care at all how the trace files come out, so we just write them
1264
# to a single temporary directory.
1266
# XXX As it stands, all of the trace files have unique names, and so file
1267
# collisions can only happen if two instances of an example are running in
1268
# two versions of the test.py process concurrently. We may want to create
1269
# uniquely named temporary traces directories to avoid this problem.
1271
# We need to figure out what examples to execute. We are either given one
1272
# suite or example explicitly via the --suite or --example option, or we
1273
# need to walk the list of examples looking for available example
1276
# This translates into allowing the following options with respect to the
1279
# ./test,py: run all of the examples
1280
# ./test.py --constrain=unit run no examples
1281
# ./test.py --constrain=example run all of the examples
1282
# ./test.py --suite=some-test-suite: run no examples
1283
# ./test.py --example=some-example: run the single example
1284
# ./test.py --suite=some-suite --example=some-example: run the single example
1286
# XXX could use constrain to separate out examples used for performance
1289
if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
1290
if len(options.constrain) == 0 or options.constrain == "example":
1292
for test, do_run, do_valgrind_run in example_tests:
1294
# Don't try to run this example if it isn't runnable.
1295
if os.path.basename(test) in ns3_runnable_programs:
1298
job.set_is_example(True)
1299
job.set_is_pyexample(False)
1300
job.set_display_name(test)
1301
job.set_tmp_file_name("")
1302
job.set_cwd(testpy_output_dir)
1303
job.set_basedir(os.getcwd())
1304
job.set_tempdir(testpy_output_dir)
1305
job.set_shell_command(test)
1306
job.set_build_path("")
1308
if options.valgrind and not eval(do_valgrind_run):
1309
job.set_is_skip (True)
1312
print "Queue %s" % test
1314
input_queue.put(job)
1316
total_tests = total_tests + 1
1318
elif len(options.example):
1319
# Don't try to run this example if it isn't runnable.
1320
example_name = os.path.basename(options.example)
1321
if example_name not in ns3_runnable_programs:
1322
print "Example %s is not runnable." % example_name
1325
# If you tell me to run an example, I will try and run the example
1326
# irrespective of any condition.
1329
job.set_is_example(True)
1330
job.set_is_pyexample(False)
1331
job.set_display_name(options.example)
1332
job.set_tmp_file_name("")
1333
job.set_cwd(testpy_output_dir)
1334
job.set_basedir(os.getcwd())
1335
job.set_tempdir(testpy_output_dir)
1336
job.set_shell_command(options.example)
1337
job.set_build_path(options.buildpath)
1340
print "Queue %s" % options.example
1342
input_queue.put(job)
1344
total_tests = total_tests + 1
1347
# Run some Python examples as smoke tests. We have a list of all of
1348
# the example programs it makes sense to try and run. Each example will
1349
# have a condition associated with it that must evaluate to true for us
1350
# to try and execute it. This is used to determine if the example has
1351
# a dependency that is not satisfied.
1353
# We don't care at all how the trace files come out, so we just write them
1354
# to a single temporary directory.
1356
# We need to figure out what python examples to execute. We are either
1357
# given one pyexample explicitly via the --pyexample option, or we
1358
# need to walk the list of python examples
1360
# This translates into allowing the following options with respect to the
1363
# ./test.py --constrain=pyexample run all of the python examples
1364
# ./test.py --pyexample=some-example.py: run the single python example
1366
if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
1367
if len(options.constrain) == 0 or options.constrain == "pyexample":
1369
for test, do_run in python_tests:
1371
# Don't try to run this example if it isn't runnable.
1372
if os.path.basename(test) in ns3_runnable_scripts:
1375
job.set_is_example(False)
1376
job.set_is_pyexample(True)
1377
job.set_display_name(test)
1378
job.set_tmp_file_name("")
1379
job.set_cwd(testpy_output_dir)
1380
job.set_basedir(os.getcwd())
1381
job.set_tempdir(testpy_output_dir)
1382
job.set_shell_command(test)
1383
job.set_build_path("")
1386
# Python programs and valgrind do not work and play
1387
# well together, so we skip them under valgrind.
1388
# We go through the trouble of doing all of this
1389
# work to report the skipped tests in a consistent
1390
# way throught the output formatter.
1392
if options.valgrind:
1393
job.set_is_skip (True)
1396
# The user can disable python bindings, so we need
1397
# to pay attention to that and give some feedback
1398
# that we're not testing them
1400
if not ENABLE_PYTHON_BINDINGS:
1401
job.set_is_skip (True)
1404
print "Queue %s" % test
1406
input_queue.put(job)
1408
total_tests = total_tests + 1
1410
elif len(options.pyexample):
1411
# Don't try to run this example if it isn't runnable.
1412
example_name = os.path.basename(options.pyexample)
1413
if example_name not in ns3_runnable_scripts:
1414
print "Example %s is not runnable." % example_name
1417
# If you tell me to run a python example, I will try and run the example
1418
# irrespective of any condition.
1421
job.set_is_pyexample(True)
1422
job.set_display_name(options.pyexample)
1423
job.set_tmp_file_name("")
1424
job.set_cwd(testpy_output_dir)
1425
job.set_basedir(os.getcwd())
1426
job.set_tempdir(testpy_output_dir)
1427
job.set_shell_command(options.pyexample)
1428
job.set_build_path("")
1431
print "Queue %s" % options.pyexample
1433
input_queue.put(job)
1435
total_tests = total_tests + 1
1438
# Tell the worker threads to pack up and go home for the day. Each one
1439
# will exit when they see their is_break task.
1441
for i in range(processors):
1443
job.set_is_break(True)
1444
input_queue.put(job)
1447
# Now all of the tests have been dispatched, so all we have to do here
1448
# in the main thread is to wait for them to complete. Keyboard interrupt
1449
# handling is broken as mentioned above. We use a signal handler to catch
1450
# sigint and set a global variable. When the worker threads sense this
1451
# they stop doing real work and will just start throwing jobs back at us
1452
# with is_break set to True. In this case, there are no real results so we
1453
# ignore them. If there are real results, we always print PASS or FAIL to
1454
# standard out as a quick indication of what happened.
1460
for i in range(jobs):
1461
job = output_queue.get()
1465
if job.is_example or job.is_pyexample:
1472
skipped_tests = skipped_tests + 1
1474
if job.returncode == 0:
1476
passed_tests = passed_tests + 1
1477
elif job.returncode == 1:
1478
failed_tests = failed_tests + 1
1480
elif job.returncode == 2:
1481
valgrind_errors = valgrind_errors + 1
1484
crashed_tests = crashed_tests + 1
1487
print "%s: %s %s" % (status, kind, job.display_name)
1489
if job.is_example or job.is_pyexample:
1491
# Examples are the odd man out here. They are written without any
1492
# knowledge that they are going to be run as a test, so we need to
1493
# cook up some kind of output for them. We're writing an xml file,
1494
# so we do some simple XML that says we ran the example.
1496
# XXX We could add some timing information to the examples, i.e. run
1497
# them through time and print the results here.
1499
f = open(xml_results_file, 'a')
1500
f.write('<Example>\n')
1501
example_name = " <Name>%s</Name>\n" % job.display_name
1502
f.write(example_name)
1504
if status == "PASS":
1505
f.write(' <Result>PASS</Result>\n')
1506
elif status == "FAIL":
1507
f.write(' <Result>FAIL</Result>\n')
1508
elif status == "VALGR":
1509
f.write(' <Result>VALGR</Result>\n')
1510
elif status == "SKIP":
1511
f.write(' <Result>SKIP</Result>\n')
1513
f.write(' <Result>CRASH</Result>\n')
1515
f.write(' <Time real="%.3f"/>\n' % job.elapsed_time)
1516
f.write('</Example>\n')
1521
# If we're not running an example, we're running a test suite.
1522
# These puppies are running concurrently and generating output
1523
# that was written to a temporary file to avoid collisions.
1525
# Now that we are executing sequentially in the main thread, we can
1526
# concatenate the contents of the associated temp file to the main
1527
# results file and remove that temp file.
1529
# One thing to consider is that a test suite can crash just as
1530
# well as any other program, so we need to deal with that
1531
# possibility as well. If it ran correctly it will return 0
1532
# if it passed, or 1 if it failed. In this case, we can count
1533
# on the results file it saved being complete. If it crashed, it
1534
# will return some other code, and the file should be considered
1535
# corrupt and useless. If the suite didn't create any XML, then
1536
# we're going to have to do it ourselves.
1538
# Another issue is how to deal with a valgrind error. If we run
1539
# a test suite under valgrind and it passes, we will get a return
1540
# code of 0 and there will be a valid xml results file since the code
1541
# ran to completion. If we get a return code of 1 under valgrind,
1542
# the test case failed, but valgrind did not find any problems so the
1543
# test case return code was passed through. We will have a valid xml
1544
# results file here as well since the test suite ran. If we see a
1545
# return code of 2, this means that valgrind found an error (we asked
1546
# it to return 2 if it found a problem in run_job_synchronously) but
1547
# the suite ran to completion so there is a valid xml results file.
1548
# If the suite crashes under valgrind we will see some other error
1549
# return code (like 139). If valgrind finds an illegal instruction or
1550
# some other strange problem, it will die with its own strange return
1551
# code (like 132). However, if the test crashes by itself, not under
1552
# valgrind we will also see some other return code.
1554
# If the return code is 0, 1, or 2, we have a valid xml file. If we
1555
# get another return code, we have no xml and we can't really say what
1556
# happened -- maybe the TestSuite crashed, maybe valgrind crashed due
1557
# to an illegal instruction. If we get something beside 0-2, we assume
1558
# a crash and fake up an xml entry. After this is all done, we still
1559
# need to indicate a valgrind error somehow, so we fake up an xml entry
1560
# with a VALGR result. Thus, in the case of a working TestSuite that
1561
# fails valgrind, we'll see the PASS entry for the working TestSuite
1562
# followed by a VALGR failing test suite of the same name.
1565
f = open(xml_results_file, 'a')
1567
f.write(" <Name>%s</Name>\n" % job.display_name)
1568
f.write(' <Result>SKIP</Result>\n')
1569
f.write("</Test>\n")
1572
if job.returncode == 0 or job.returncode == 1 or job.returncode == 2:
1573
f_to = open(xml_results_file, 'a')
1574
f_from = open(job.tmp_file_name)
1575
f_to.write(f_from.read())
1579
f = open(xml_results_file, 'a')
1581
f.write(" <Name>%s</Name>\n" % job.display_name)
1582
f.write(' <Result>CRASH</Suite>\n')
1583
f.write("</Test>\n")
1586
if job.returncode == 2:
1587
f = open(xml_results_file, 'a')
1589
f.write(" <Name>%s</Name>\n" % job.display_name)
1590
f.write(' <Result>VALGR</Result>\n')
1591
f.write("</Test>\n")
1595
# We have all of the tests run and the results written out. One final
1596
# bit of housekeeping is to wait for all of the threads to close down
1597
# so we can exit gracefully.
1599
for thread in threads:
1603
# Back at the beginning of time, we started the body of an XML document
1604
# since the test suites and examples were going to just write their
1605
# individual pieces. So, we need to finish off and close out the XML
1608
f = open(xml_results_file, 'a')
1609
f.write('</Results>\n')
1613
# Print a quick summary of events
1615
print "%d of %d tests passed (%d passed, %d skipped, %d failed, %d crashed, %d valgrind errors)" % (passed_tests,
1616
total_tests, passed_tests, skipped_tests, failed_tests, crashed_tests, valgrind_errors)
1618
# The last things to do are to translate the XML results file to "human
1619
# readable form" if the user asked for it (or make an XML file somewhere)
1621
if len(options.html):
1622
translate_to_html(xml_results_file, options.html)
1624
if len(options.text):
1625
translate_to_text(xml_results_file, options.text)
1627
if len(options.xml):
1628
shutil.copyfile(xml_results_file, options.xml)
1631
# Let the user know if they need to turn on tests or examples.
1633
if not ENABLE_TESTS or not ENABLE_EXAMPLES:
1635
if not ENABLE_TESTS:
1636
print '*** Note: ns-3 tests are currently disabled. Enable them by adding'
1637
print '*** "--enable-tests" to ./waf configure or modifying your .ns3rc file.'
1639
if not ENABLE_EXAMPLES:
1640
print '*** Note: ns-3 examples are currently disabled. Enable them by adding'
1641
print '*** "--enable-examples" to ./waf configure or modifying your .ns3rc file.'
1644
# If we have been asked to retain all of the little temporary files, we
1645
# don't delete tm. If we do delete the temporary files, delete only the
1646
# directory we just created. We don't want to happily delete any retained
1647
# directories, which will probably surprise the user.
1649
if not options.retain:
1650
shutil.rmtree(testpy_output_dir)
1652
if passed_tests + skipped_tests == total_tests:
1655
return 1 # catchall for general errors
1658
parser = optparse.OptionParser()
1659
parser.add_option("-b", "--buildpath", action="store", type="string", dest="buildpath", default="",
1660
metavar="BUILDPATH",
1661
help="specify the path where ns-3 was built (defaults to the build directory for the current variant)")
1663
parser.add_option("-c", "--constrain", action="store", type="string", dest="constrain", default="",
1665
help="constrain the test-runner by kind of test")
1667
parser.add_option("-e", "--example", action="store", type="string", dest="example", default="",
1669
help="specify a single example to run (with relative path)")
1671
parser.add_option("-u", "--update-data", action="store_true", dest="update_data", default=False,
1672
help="If examples use reference data files, get them to re-generate them")
1674
parser.add_option("-g", "--grind", action="store_true", dest="valgrind", default=False,
1675
help="run the test suites and examples using valgrind")
1677
parser.add_option("-k", "--kinds", action="store_true", dest="kinds", default=False,
1678
help="print the kinds of tests available")
1680
parser.add_option("-l", "--list", action="store_true", dest="list", default=False,
1681
help="print the list of known tests")
1683
parser.add_option("-m", "--multiple", action="store_true", dest="multiple", default=False,
1684
help="report multiple failures from test suites and test cases")
1686
parser.add_option("-n", "--nowaf", action="store_true", dest="nowaf", default=False,
1687
help="do not run waf before starting testing")
1689
parser.add_option("-p", "--pyexample", action="store", type="string", dest="pyexample", default="",
1690
metavar="PYEXAMPLE",
1691
help="specify a single python example to run (with relative path)")
1693
parser.add_option("-r", "--retain", action="store_true", dest="retain", default=False,
1694
help="retain all temporary files (which are normally deleted)")
1696
parser.add_option("-s", "--suite", action="store", type="string", dest="suite", default="",
1697
metavar="TEST-SUITE",
1698
help="specify a single test suite to run")
1700
parser.add_option("-t", "--text", action="store", type="string", dest="text", default="",
1701
metavar="TEXT-FILE",
1702
help="write detailed test results into TEXT-FILE.txt")
1704
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
1705
help="print progress and informational messages")
1707
parser.add_option("-w", "--web", "--html", action="store", type="string", dest="html", default="",
1708
metavar="HTML-FILE",
1709
help="write detailed test results into HTML-FILE.html")
1711
parser.add_option("-x", "--xml", action="store", type="string", dest="xml", default="",
1713
help="write detailed test results into XML-FILE.xml")
1716
options = parser.parse_args()[0]
1717
signal.signal(signal.SIGINT, sigint_hook)
1721
if __name__ == '__main__':
1722
sys.exit(main(sys.argv))