craigdo@4772: #! /usr/bin/env python craigdo@4772: ## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*- craigdo@4772: # craigdo@4772: # Copyright (c) 2009 University of Washington craigdo@4772: # craigdo@4772: # This program is free software; you can redistribute it and/or modify craigdo@4772: # it under the terms of the GNU General Public License version 2 as craigdo@4772: # published by the Free Software Foundation; craigdo@4772: # craigdo@4772: # This program is distributed in the hope that it will be useful, craigdo@4772: # but WITHOUT ANY WARRANTY; without even the implied warranty of craigdo@4772: # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the craigdo@4772: # GNU General Public License for more details. craigdo@4772: # craigdo@4772: # You should have received a copy of the GNU General Public License craigdo@4772: # along with this program; if not, write to the Free Software craigdo@4772: # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA craigdo@4772: # craigdo@4772: craigdo@4772: import os craigdo@4772: import sys craigdo@5412: import time craigdo@4772: import optparse craigdo@4772: import subprocess craigdo@4772: import threading craigdo@4772: import Queue craigdo@4772: import signal craigdo@4772: import xml.dom.minidom craigdo@5239: import shutil fmoatamr@5912: import re craigdo@4772: craigdo@4772: # craigdo@4772: # XXX This should really be part of a waf command to list the configuration craigdo@4772: # items relative to optional ns-3 pieces. craigdo@4772: # craigdo@4772: # A list of interesting configuration items in the waf configuration craigdo@4772: # cache which we may be interested in when deciding on which examples craigdo@4772: # to run and how to run them. These are set by waf during the craigdo@4772: # configuration phase and the corresponding assignments are usually craigdo@4772: # found in the associated subdirectory wscript files. craigdo@4772: # craigdo@4772: interesting_config_items = [ craigdo@4772: "NS3_BUILDDIR", craigdo@4772: "NS3_MODULE_PATH", craigdo@4772: "ENABLE_NSC", craigdo@4772: "ENABLE_REAL_TIME", craigdo@5369: "ENABLE_EXAMPLES", gjc@6243: "ENABLE_PYTHON_BINDINGS", craigdo@4772: ] craigdo@4772: craigdo@5295: ENABLE_NSC = False craigdo@5295: ENABLE_REAL_TIME = False craigdo@5369: ENABLE_EXAMPLES = True craigdo@5369: craigdo@5369: # craigdo@5369: # If the user has constrained us to run certain kinds of tests, we can tell waf craigdo@5369: # to only build craigdo@5402: # craigdo@5369: core_kinds = ["bvt", "core", "system", "unit"] craigdo@5295: craigdo@4772: # craigdo@5402: # There are some special cases for test suites that kill valgrind. This is craigdo@5402: # because NSC causes illegal instruction crashes when run under valgrind. craigdo@5402: # craigdo@5402: core_valgrind_skip_tests = [ craigdo@5402: "ns3-tcp-cwnd", tomh@6198: "nsc-tcp-loss", craigdo@5402: "ns3-tcp-interoperability", craigdo@5402: ] craigdo@5402: craigdo@5402: # craigdo@4772: # A list of examples to run as smoke tests just to ensure that they remain craigdo@4772: # buildable and runnable over time. Also a condition under which to run craigdo@5402: # the example (from the waf configuration), and a condition under which to craigdo@5402: # run the example under valgrind. This is because NSC causes illegal craigdo@5402: # instruction crashes when run under valgrind. craigdo@4772: # craigdo@4772: # XXX Should this not be read from a configuration file somewhere and not craigdo@4772: # hardcoded. craigdo@4772: # craigdo@4772: example_tests = [ craigdo@5402: ("csma/csma-bridge", "True", "True"), craigdo@5402: ("csma/csma-bridge-one-hop", "True", "True"), craigdo@5402: ("csma/csma-broadcast", "True", "True"), craigdo@5402: ("csma/csma-multicast", "True", "True"), craigdo@5402: ("csma/csma-one-subnet", "True", "True"), craigdo@5402: ("csma/csma-packet-socket", "True", "True"), craigdo@5402: ("csma/csma-ping", "True", "True"), craigdo@5402: ("csma/csma-raw-ip-socket", "True", "True"), craigdo@5402: ("csma/csma-star", "True", "True"), craigdo@5369: craigdo@5402: ("emulation/emu-ping", "False", "True"), craigdo@5402: ("emulation/emu-udp-echo", "False", "True"), craigdo@5369: craigdo@5402: ("error-model/simple-error-model", "True", "True"), craigdo@5369: craigdo@5402: ("ipv6/icmpv6-redirect", "True", "True"), craigdo@5402: ("ipv6/ping6", "True", "True"), craigdo@5402: ("ipv6/radvd", "True", "True"), craigdo@5402: ("ipv6/radvd-two-prefix", "True", "True"), craigdo@5402: ("ipv6/test-ipv6", "True", "True"), craigdo@5369: craigdo@5402: ("mesh/mesh", "True", "True"), craigdo@5369: craigdo@5402: ("naming/object-names", "True", "True"), craigdo@5369: craigdo@5402: ("realtime/realtime-udp-echo", "ENABLE_REAL_TIME == True", "True"), craigdo@5369: craigdo@5402: ("routing/dynamic-global-routing", "True", "True"), craigdo@5402: ("routing/global-injection-slash32", "True", "True"), craigdo@5402: ("routing/global-routing-slash32", "True", "True"), craigdo@5402: ("routing/mixed-global-routing", "True", "True"), craigdo@5402: ("routing/nix-simple", "True", "True"), craigdo@5402: ("routing/nms-p2p-nix", "False", "True"), # Takes too long to run craigdo@5402: ("routing/simple-alternate-routing", "True", "True"), craigdo@5402: ("routing/simple-global-routing", "True", "True"), craigdo@5402: ("routing/simple-point-to-point-olsr", "True", "True"), craigdo@5402: ("routing/simple-routing-ping6", "True", "True"), craigdo@5402: ("routing/static-routing-slash32", "True", "True"), boyko@5741: ("routing/aodv", "True", "True"), craigdo@5369: nbaldo@6349: ("spectrum/adhoc-aloha-ideal-phy", "True", "True"), nbaldo@6349: ("spectrum/adhoc-aloha-ideal-phy-with-microwave-oven", "True", "True"), nbaldo@6349: craigdo@5402: ("stats/wifi-example-sim", "True", "True"), craigdo@5369: craigdo@5402: ("tap/tap-wifi-dumbbell", "False", "True"), # Requires manual configuration craigdo@5369: craigdo@5402: ("tcp/star", "True", "True"), craigdo@5402: ("tcp/tcp-large-transfer", "True", "True"), craigdo@5402: ("tcp/tcp-nsc-lfn", "ENABLE_NSC == True", "True"), craigdo@5402: ("tcp/tcp-nsc-zoo", "ENABLE_NSC == True", "True"), craigdo@5402: ("tcp/tcp-star-server", "True", "True"), craigdo@5369: tpecorella@6127: ("topology-read/topology-read --input=../../examples/topology-read/Inet_small_toposample.txt", "True", "True"), tazaki@6374: ("topology-read/topology-read --format=Rocketfuel --input=../../examples/topology-read/RocketFuel_toposample_1239_weights.txt", "True", "True"), tpecorella@6127: craigdo@5402: ("tunneling/virtual-net-device", "True", "True"), craigdo@5369: craigdo@5402: ("tutorial/first", "True", "True"), craigdo@5402: ("tutorial/hello-simulator", "True", "True"), craigdo@5402: ("tutorial/second", "True", "True"), craigdo@5402: ("tutorial/third", "True", "True"), craigdo@5402: ("tutorial/fourth", "True", "True"), craigdo@5483: ("tutorial/fifth", "True", "True"), craigdo@6046: ("tutorial/sixth", "True", "True"), craigdo@5369: craigdo@5402: ("udp/udp-echo", "True", "True"), craigdo@5369: craigdo@5402: ("wireless/mixed-wireless", "True", "True"), duy@6311: ("wireless/multirate --totalTime=0.3s --rateManager=ns3::AarfcdWifiManager", "True", "True"), duy@6311: ("wireless/multirate --totalTime=0.3s --rateManager=ns3::AmrrWifiManager", "True", "True"), duy@6311: ("wireless/multirate --totalTime=0.3s --rateManager=ns3::CaraWifiManager", "True", "True"), duy@6311: ("wireless/multirate --totalTime=0.3s --rateManager=ns3::IdealWifiManager", "True", "True"), duy@6337: ("wireless/multirate --totalTime=0.3s --rateManager=ns3::MinstrelWifiManager", "True", "True"), duy@6311: ("wireless/multirate --totalTime=0.3s --rateManager=ns3::OnoeWifiManager", "True", "True"), duy@6311: ("wireless/multirate --totalTime=0.3s --rateManager=ns3::RraaWifiManager", "True", "True"), craigdo@5402: ("wireless/simple-wifi-frame-aggregation", "True", "True"), craigdo@5402: ("wireless/wifi-adhoc", "False", "True"), # Takes too long to run craigdo@5402: ("wireless/wifi-ap --verbose=0", "True", "True"), # Don't let it spew to stdout craigdo@5402: ("wireless/wifi-clear-channel-cmu", "False", "True"), # Requires specific hardware craigdo@5402: ("wireless/wifi-simple-adhoc", "True", "True"), craigdo@5402: ("wireless/wifi-simple-adhoc-grid", "True", "True"), craigdo@5402: ("wireless/wifi-simple-infra", "True", "True"), craigdo@5402: ("wireless/wifi-simple-interference", "True", "True"), craigdo@5402: ("wireless/wifi-wired-bridging", "True", "True"), amine@6111: amine@6111: ("wimax/wimax-simple", "True", "True"), amine@6111: ("wimax/wimax-ipv4", "True", "True"), amine@6111: ("wimax/wimax-multicast", "True", "True"), craigdo@4772: ] craigdo@4772: craigdo@4772: # craigdo@6200: # A list of python examples to run as smoke tests just to ensure that they craigdo@6200: # runnable over time. Also a condition under which to run the example (from craigdo@6200: # the waf configuration) craigdo@6200: # craigdo@6200: # XXX Should this not be read from a configuration file somewhere and not craigdo@6200: # hardcoded. craigdo@6200: # craigdo@6200: python_tests = [ craigdo@6200: ("csma/csma-bridge.py", "True"), craigdo@6200: craigdo@6200: ("flowmon/wifi-olsr-flowmon.py", "True"), craigdo@6200: craigdo@6200: ("routing/simple-routing-ping6.py", "True"), craigdo@6200: craigdo@6202: ("tap/tap-csma-virtual-machine.py", "False"), # requires enable-sudo craigdo@6202: ("tap/tap-wifi-virtual-machine.py", "False"), # requires enable-sudo craigdo@6200: craigdo@6200: ("tutorial/first.py", "True"), craigdo@6200: craigdo@6200: ("wireless/wifi-ap.py", "True"), craigdo@6200: ("wireless/mixed-wireless.py", "True"), craigdo@6200: ] craigdo@6200: craigdo@6200: # craigdo@4772: # The test suites are going to want to output status. They are running craigdo@4772: # concurrently. This means that unless we are careful, the output of craigdo@4772: # the test suites will be interleaved. Rather than introducing a lock craigdo@4772: # file that could unintentionally start serializing execution, we ask craigdo@4772: # the tests to write their output to a temporary directory and then craigdo@4772: # put together the final output file when we "join" the test tasks back craigdo@5412: # to the main thread. In addition to this issue, the example programs craigdo@5412: # often write lots and lots of trace files which we will just ignore. craigdo@5412: # We put all of them into the temp directory as well, so they can be craigdo@5412: # easily deleted. craigdo@4772: # craigdo@5412: TMP_OUTPUT_DIR = "testpy-output" craigdo@4772: craigdo@4772: def get_node_text(node): craigdo@4772: for child in node.childNodes: craigdo@4772: if child.nodeType == child.TEXT_NODE: craigdo@4772: return child.nodeValue craigdo@4772: return "None" craigdo@4772: craigdo@4772: # craigdo@5324: # A simple example of writing a text file with a test result summary. It is craigdo@5324: # expected that this output will be fine for developers looking for problems. craigdo@4772: # craigdo@4772: def translate_to_text(results_file, text_file): craigdo@4772: f = open(text_file, 'w') craigdo@4772: dom = xml.dom.minidom.parse(results_file) craigdo@4772: for suite in dom.getElementsByTagName("TestSuite"): craigdo@4772: result = get_node_text(suite.getElementsByTagName("SuiteResult")[0]) craigdo@4772: name = get_node_text(suite.getElementsByTagName("SuiteName")[0]) craigdo@4772: time = get_node_text(suite.getElementsByTagName("SuiteTime")[0]) craigdo@4772: output = "%s: Test Suite \"%s\" (%s)\n" % (result, name, time) craigdo@4772: f.write(output) craigdo@4772: if result != "CRASH": craigdo@4772: for case in suite.getElementsByTagName("TestCase"): craigdo@4772: result = get_node_text(case.getElementsByTagName("CaseResult")[0]) craigdo@4772: name = get_node_text(case.getElementsByTagName("CaseName")[0]) craigdo@4772: time = get_node_text(case.getElementsByTagName("CaseTime")[0]) craigdo@4772: output = " %s: Test Case \"%s\" (%s)\n" % (result, name, time) craigdo@4772: f.write(output) craigdo@4772: craigdo@4772: if result == "FAIL": craigdo@5324: for details in case.getElementsByTagName("FailureDetails"): craigdo@5324: f.write(" Details:\n") craigdo@5324: f.write(" Message: %s\n" % get_node_text(details.getElementsByTagName("Message")[0])) craigdo@5324: f.write(" Condition: %s\n" % get_node_text(details.getElementsByTagName("Condition")[0])) craigdo@5324: f.write(" Actual: %s\n" % get_node_text(details.getElementsByTagName("Actual")[0])) craigdo@5324: f.write(" Limit: %s\n" % get_node_text(details.getElementsByTagName("Limit")[0])) craigdo@5324: f.write(" File: %s\n" % get_node_text(details.getElementsByTagName("File")[0])) craigdo@5324: f.write(" Line: %s\n" % get_node_text(details.getElementsByTagName("Line")[0])) craigdo@4772: craigdo@4772: for example in dom.getElementsByTagName("Example"): craigdo@4772: result = get_node_text(example.getElementsByTagName("Result")[0]) craigdo@4772: name = get_node_text(example.getElementsByTagName("Name")[0]) craigdo@5460: time = get_node_text(example.getElementsByTagName("ElapsedTime")[0]) craigdo@5460: output = "%s: Example \"%s\" (%s)\n" % (result, name, time) craigdo@4772: f.write(output) craigdo@4772: craigdo@4772: f.close() craigdo@4772: craigdo@4772: # craigdo@5324: # A simple example of writing an HTML file with a test result summary. It is craigdo@5324: # expected that this will eventually be made prettier as time progresses and craigdo@5324: # we have time to tweak it. This may end up being moved to a separate module craigdo@5324: # since it will probably grow over time. craigdo@4772: # craigdo@4772: def translate_to_html(results_file, html_file): craigdo@4772: f = open(html_file, 'w') craigdo@4772: f.write("\n") craigdo@4772: f.write("\n") craigdo@4772: f.write("

ns-3 Test Results

\n") craigdo@4772: craigdo@5324: # craigdo@5324: # Read and parse the whole results file. craigdo@5324: # craigdo@4772: dom = xml.dom.minidom.parse(results_file) craigdo@4772: craigdo@5324: # craigdo@5324: # Iterate through the test suites craigdo@5324: # craigdo@4772: f.write("

Test Suites

\n") craigdo@4772: for suite in dom.getElementsByTagName("TestSuite"): craigdo@5324: craigdo@5324: # craigdo@5324: # For each test suite, get its name, result and execution time info craigdo@5324: # craigdo@4772: name = get_node_text(suite.getElementsByTagName("SuiteName")[0]) craigdo@4772: result = get_node_text(suite.getElementsByTagName("SuiteResult")[0]) craigdo@4772: time = get_node_text(suite.getElementsByTagName("SuiteTime")[0]) craigdo@4772: craigdo@5324: # craigdo@5402: # Print a level three header with the result, name and time. If the craigdo@5402: # test suite passed, the header is printed in green. If the suite was craigdo@5402: # skipped, print it in orange, otherwise assume something bad happened craigdo@5402: # and print in red. craigdo@5324: # craigdo@4772: if result == "PASS": craigdo@4772: f.write("

%s: %s (%s)

\n" % (result, name, time)) craigdo@5402: elif result == "SKIP": craigdo@5402: f.write("

%s: %s (%s)

\n" % (result, name, time)) craigdo@4772: else: craigdo@4772: f.write("

%s: %s (%s)

\n" % (result, name, time)) craigdo@4772: craigdo@5324: # craigdo@5324: # The test case information goes in a table. craigdo@5324: # craigdo@5324: f.write("\n") craigdo@4772: craigdo@5324: # craigdo@5324: # The first column of the table has the heading Result craigdo@5324: # craigdo@4772: f.write("\n") craigdo@4772: craigdo@5324: # craigdo@5402: # If the suite crashed or is skipped, there is no further information, so just craigdo@5402: # delare a new table row with the result (CRASH or SKIP) in it. Looks like: craigdo@5324: # craigdo@5324: # +--------+ craigdo@5324: # | Result | craigdo@5324: # +--------+ craigdo@5324: # | CRASH | craigdo@5324: # +--------+ craigdo@5324: # craigdo@5402: # Then go on to the next test suite. Valgrind and skipped errors look the same. craigdo@5324: # craigdo@5402: if result in ["CRASH", "SKIP", "VALGR"]: craigdo@4772: f.write("\n") craigdo@5402: if result == "SKIP": craigdo@5402: f.write("\n" % result) craigdo@5402: else: craigdo@5402: f.write("\n" % result) craigdo@4772: f.write("\n") craigdo@4772: f.write("
Result
%s%s
\n") craigdo@4772: continue craigdo@4772: craigdo@5324: # craigdo@5324: # If the suite didn't crash, we expect more information, so fill out craigdo@5324: # the table heading row. Like, craigdo@5324: # craigdo@5324: # +--------+----------------+------+ craigdo@5324: # | Result | Test Case Name | Time | craigdo@5324: # +--------+----------------+------+ craigdo@5324: # craigdo@4772: f.write("Test Case Name\n") craigdo@4772: f.write(" Time \n") craigdo@4772: craigdo@5324: # craigdo@5324: # If the test case failed, we need to print out some failure details craigdo@5324: # so extend the heading row again. Like, craigdo@5324: # craigdo@5324: # +--------+----------------+------+-----------------+ craigdo@5324: # | Result | Test Case Name | Time | Failure Details | craigdo@5324: # +--------+----------------+------+-----------------+ craigdo@5324: # craigdo@4772: if result == "FAIL": craigdo@5324: f.write("Failure Details\n") craigdo@4772: craigdo@5324: # craigdo@5324: # Now iterate through all of the test cases. craigdo@5324: # craigdo@4772: for case in suite.getElementsByTagName("TestCase"): craigdo@5324: craigdo@5324: # craigdo@5324: # Get the name, result and timing information from xml to use in craigdo@5324: # printing table below. craigdo@5324: # craigdo@4772: name = get_node_text(case.getElementsByTagName("CaseName")[0]) craigdo@4772: result = get_node_text(case.getElementsByTagName("CaseResult")[0]) craigdo@4772: time = get_node_text(case.getElementsByTagName("CaseTime")[0]) craigdo@5324: craigdo@5324: # craigdo@5324: # If the test case failed, we iterate through possibly multiple craigdo@5324: # failure details craigdo@5324: # craigdo@4772: if result == "FAIL": craigdo@5324: # craigdo@5324: # There can be multiple failures for each test case. The first craigdo@5324: # row always gets the result, name and timing information along craigdo@5324: # with the failure details. Remaining failures don't duplicate craigdo@5324: # this information but just get blanks for readability. Like, craigdo@5324: # craigdo@5324: # +--------+----------------+------+-----------------+ craigdo@5324: # | Result | Test Case Name | Time | Failure Details | craigdo@5324: # +--------+----------------+------+-----------------+ craigdo@5324: # | FAIL | The name | time | It's busted | craigdo@5324: # +--------+----------------+------+-----------------+ craigdo@5324: # | | | | Really broken | craigdo@5324: # +--------+----------------+------+-----------------+ craigdo@5324: # | | | | Busted bad | craigdo@5324: # +--------+----------------+------+-----------------+ craigdo@5324: # craigdo@5324: craigdo@5324: first_row = True craigdo@5324: for details in case.getElementsByTagName("FailureDetails"): craigdo@5324: craigdo@5324: # craigdo@5324: # Start a new row in the table for each possible Failure Detail craigdo@5324: # craigdo@5324: f.write("\n") craigdo@5324: craigdo@5324: if first_row: craigdo@5324: first_row = False craigdo@5324: f.write("%s\n" % result) craigdo@5324: f.write("%s\n" % name) craigdo@5324: f.write("%s\n" % time) craigdo@5324: else: craigdo@5324: f.write("\n") craigdo@5324: f.write("\n") craigdo@5324: f.write("\n") craigdo@5324: craigdo@5324: f.write("") craigdo@5324: f.write("Message: %s, " % get_node_text(details.getElementsByTagName("Message")[0])) craigdo@5324: f.write("Condition: %s, " % get_node_text(details.getElementsByTagName("Condition")[0])) craigdo@5324: f.write("Actual: %s, " % get_node_text(details.getElementsByTagName("Actual")[0])) craigdo@5324: f.write("Limit: %s, " % get_node_text(details.getElementsByTagName("Limit")[0])) craigdo@5324: f.write("File: %s, " % get_node_text(details.getElementsByTagName("File")[0])) craigdo@5324: f.write("Line: %s" % get_node_text(details.getElementsByTagName("Line")[0])) craigdo@5324: f.write("\n") craigdo@5324: craigdo@5324: # craigdo@5324: # End the table row craigdo@5324: # craigdo@5324: f.write("\n") craigdo@4772: else: craigdo@5324: # craigdo@5324: # If this particular test case passed, then we just print the PASS craigdo@5324: # result in green, followed by the test case name and its execution craigdo@5324: # time information. These go off in ... table data. craigdo@5324: # The details table entry is left blank. craigdo@5324: # craigdo@5324: # +--------+----------------+------+---------+ craigdo@5324: # | Result | Test Case Name | Time | Details | craigdo@5324: # +--------+----------------+------+---------+ craigdo@5324: # | PASS | The name | time | | craigdo@5324: # +--------+----------------+------+---------+ craigdo@5324: # craigdo@5324: f.write("\n") craigdo@4772: f.write("%s\n" % result) craigdo@4772: f.write("%s\n" % name) craigdo@4772: f.write("%s\n" % time) craigdo@4772: f.write("\n") craigdo@5324: f.write("\n") craigdo@5324: # craigdo@5324: # All of the rows are written, so we need to end the table. craigdo@5324: # craigdo@4772: f.write("\n") craigdo@4772: craigdo@5324: # craigdo@5324: # That's it for all of the test suites. Now we have to do something about craigdo@5324: # our examples. craigdo@5324: # craigdo@4772: f.write("

Examples

\n") craigdo@5324: craigdo@5324: # craigdo@5324: # Example status is rendered in a table just like the suites. craigdo@5324: # craigdo@4772: f.write("\n") craigdo@5324: craigdo@5324: # craigdo@5324: # The table headings look like, craigdo@5324: # craigdo@5460: # +--------+--------------+--------------+ craigdo@5460: # | Result | Example Name | Elapsed Time | craigdo@5460: # +--------+--------------+--------------+ craigdo@5324: # craigdo@4772: f.write("\n") craigdo@4772: f.write("\n") craigdo@5460: f.write("\n") craigdo@5324: craigdo@5324: # craigdo@5324: # Now iterate through all of the examples craigdo@5324: # craigdo@4772: for example in dom.getElementsByTagName("Example"): craigdo@5324: craigdo@5324: # craigdo@5324: # Start a new row for each example craigdo@5324: # craigdo@4772: f.write("\n") craigdo@5324: craigdo@5324: # craigdo@5324: # Get the result and name of the example in question craigdo@5324: # craigdo@4772: result = get_node_text(example.getElementsByTagName("Result")[0]) craigdo@5324: name = get_node_text(example.getElementsByTagName("Name")[0]) craigdo@5460: time = get_node_text(example.getElementsByTagName("ElapsedTime")[0]) craigdo@5324: craigdo@5324: # craigdo@5370: # If the example either failed or crashed, print its result status craigdo@5324: # in red; otherwise green. This goes in a table data craigdo@5324: # craigdo@5370: if result == "PASS": craigdo@5370: f.write("\n" % result) craigdo@5402: elif result == "SKIP": craigdo@5402: f.write("\n" % result) craigdo@5324: craigdo@5324: # craigdo@5402: # Write the example name as a new tag data. craigdo@5324: # craigdo@4772: f.write("\n" % name) craigdo@5324: craigdo@5324: # craigdo@5460: # Write the elapsed time as a new tag data. craigdo@5460: # craigdo@5460: f.write("\n" % time) craigdo@5460: craigdo@5460: # craigdo@5324: # That's it for the current example, so terminate the row. craigdo@5324: # craigdo@4772: f.write("\n") craigdo@4772: craigdo@5324: # craigdo@5324: # That's it for the table of examples, so terminate the table. craigdo@5324: # craigdo@4772: f.write("
Result Example NameElapsed Time
... %s%s\n" % result) craigdo@5370: else: craigdo@4772: f.write("%s%s%s
\n") craigdo@4772: craigdo@5324: # craigdo@5324: # And that's it for the report, so finish up. craigdo@5324: # craigdo@4772: f.write("\n") craigdo@4772: f.write("\n") craigdo@4772: f.close() craigdo@4772: craigdo@4772: # craigdo@4772: # Python Control-C handling is broken in the presence of multiple threads. craigdo@4772: # Signals get delivered to the runnable/running thread by default and if craigdo@4772: # it is blocked, the signal is simply ignored. So we hook sigint and set craigdo@4772: # a global variable telling the system to shut down gracefully. craigdo@4772: # craigdo@4772: thread_exit = False craigdo@4772: craigdo@4772: def sigint_hook(signal, frame): craigdo@4772: global thread_exit craigdo@4772: thread_exit = True craigdo@4772: return 0 craigdo@4772: craigdo@4772: # craigdo@4772: # Waf can be configured to compile in debug or optimized modes. In each craigdo@4772: # case, the resulting built goes into a different directory. If we want craigdo@4772: # test tests to run from the correct code-base, we have to figure out which craigdo@4772: # mode waf is running in. This is called its active variant. craigdo@4772: # craigdo@4772: # XXX This function pokes around in the waf internal state file. To be a craigdo@4772: # little less hacky, we should add a commmand to waf to return this info craigdo@4772: # and use that result. craigdo@4772: # craigdo@4772: def read_waf_active_variant(): craigdo@4772: for line in open("build/c4che/default.cache.py").readlines(): craigdo@4772: if line.startswith("NS3_ACTIVE_VARIANT"): craigdo@4772: exec(line, globals()) craigdo@4772: break craigdo@4772: craigdo@4772: if options.verbose: craigdo@4772: print "NS3_ACTIVE_VARIANT == %s" % NS3_ACTIVE_VARIANT craigdo@4772: craigdo@4772: # craigdo@4772: # In general, the build process itself naturally takes care of figuring out craigdo@4772: # which tests are built into the test runner. For example, if waf configure craigdo@4772: # determines that ENABLE_EMU is false due to some missing dependency, craigdo@4772: # the tests for the emu net device simply will not be built and will craigdo@4772: # therefore not be included in the built test runner. craigdo@4772: # craigdo@4772: # Examples, however, are a different story. In that case, we are just given craigdo@4772: # a list of examples that could be run. Instead of just failing, for example, craigdo@4772: # nsc-tcp-zoo if NSC is not present, we look into the waf saved configuration craigdo@4772: # for relevant configuration items. craigdo@4772: # craigdo@4772: # XXX This function pokes around in the waf internal state file. To be a craigdo@4772: # little less hacky, we should add a commmand to waf to return this info craigdo@4772: # and use that result. craigdo@4772: # craigdo@4772: def read_waf_config(): craigdo@4772: for line in open("build/c4che/%s.cache.py" % NS3_ACTIVE_VARIANT).readlines(): craigdo@4772: for item in interesting_config_items: craigdo@4772: if line.startswith(item): craigdo@4772: exec(line, globals()) craigdo@4772: craigdo@4772: if options.verbose: craigdo@4772: for item in interesting_config_items: craigdo@4772: print "%s ==" % item, eval(item) craigdo@4772: craigdo@4772: # craigdo@4772: # It seems pointless to fork a process to run waf to fork a process to run craigdo@4772: # the test runner, so we just run the test runner directly. The main thing craigdo@4772: # that waf would do for us would be to sort out the shared library path but craigdo@4772: # we can deal with that easily and do here. craigdo@4772: # craigdo@4772: # There can be many different ns-3 repositories on a system, and each has craigdo@4772: # its own shared libraries, so ns-3 doesn't hardcode a shared library search craigdo@4772: # path -- it is cooked up dynamically, so we do that too. craigdo@4772: # craigdo@6227: def make_paths(): craigdo@5459: have_DYLD_LIBRARY_PATH = False craigdo@5459: have_LD_LIBRARY_PATH = False craigdo@5459: have_PATH = False craigdo@6227: have_PYTHONPATH = False craigdo@4772: craigdo@5459: keys = os.environ.keys() craigdo@5459: for key in keys: craigdo@5459: if key == "DYLD_LIBRARY_PATH": craigdo@5459: have_DYLD_LIBRARY_PATH = True craigdo@5459: if key == "LD_LIBRARY_PATH": craigdo@5459: have_LD_LIBRARY_PATH = True craigdo@5459: if key == "PATH": craigdo@5459: have_PATH = True craigdo@6227: if key == "PYTHONPATH": craigdo@6227: have_PYTHONPATH = True craigdo@6227: craigdo@6227: pypath = os.environ["PYTHONPATH"] = os.path.join (NS3_BUILDDIR, NS3_ACTIVE_VARIANT, "bindings", "python") craigdo@6227: craigdo@6227: if not have_PYTHONPATH: craigdo@6227: os.environ["PYTHONPATH"] = pypath craigdo@6227: else: craigdo@6227: os.environ["PYTHONPATH"] += ":" + pypath craigdo@6227: craigdo@6227: if options.verbose: craigdo@6227: print "os.environ[\"PYTHONPATH\"] == %s" % os.environ["PYTHONPATH"] craigdo@4772: craigdo@4772: if sys.platform == "darwin": craigdo@5459: if not have_DYLD_LIBRARY_PATH: craigdo@5459: os.environ["DYLD_LIBRARY_PATH"] = "" craigdo@5459: for path in NS3_MODULE_PATH: craigdo@5459: os.environ["DYLD_LIBRARY_PATH"] += ":" + path craigdo@5459: if options.verbose: fmoatamr@5466: print "os.environ[\"DYLD_LIBRARY_PATH\"] == %s" % os.environ["DYLD_LIBRARY_PATH"] craigdo@4772: elif sys.platform == "win32": craigdo@5459: if not have_PATH: craigdo@5459: os.environ["PATH"] = "" craigdo@5459: for path in NS3_MODULE_PATH: craigdo@5459: os.environ["PATH"] += ';' + path craigdo@5459: if options.verbose: craigdo@5459: print "os.environ[\"PATH\"] == %s" % os.environ["PATH"] craigdo@4772: elif sys.platform == "cygwin": craigdo@5459: if not have_PATH: craigdo@5459: os.environ["PATH"] = "" craigdo@5459: for path in NS3_MODULE_PATH: craigdo@5459: os.environ["PATH"] += ":" + path craigdo@5459: if options.verbose: craigdo@5459: print "os.environ[\"PATH\"] == %s" % os.environ["PATH"] craigdo@5459: else: craigdo@5459: if not have_LD_LIBRARY_PATH: craigdo@5459: os.environ["LD_LIBRARY_PATH"] = "" craigdo@5459: for path in NS3_MODULE_PATH: craigdo@5459: os.environ["LD_LIBRARY_PATH"] += ":" + path craigdo@5459: if options.verbose: craigdo@5459: print "os.environ[\"LD_LIBRARY_PATH\"] == %s" % os.environ["LD_LIBRARY_PATH"] craigdo@5275: faker@5909: # faker@5909: # Short note on generating suppressions: faker@5909: # faker@5909: # See the valgrind documentation for a description of suppressions. The easiest faker@5909: # way to generate a suppression expression is by using the valgrind faker@5909: # --gen-suppressions option. To do that you have to figure out how to run the faker@5909: # test in question. faker@5909: # faker@5909: # If you do "test.py -v -g -s then test.py will output most of what faker@5909: # you need. For example, if you are getting a valgrind error in the faker@5909: # devices-mesh-dot11s-regression test suite, you can run: faker@5909: # faker@5909: # ./test.py -v -g -s devices-mesh-dot11s-regression faker@5909: # faker@5909: # You should see in the verbose output something that looks like: faker@5909: # faker@5909: # Synchronously execute valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp faker@5909: # --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner faker@5909: # --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev faker@5909: # --tempdir=testpy-output/2010-01-12-22-47-50-CUT faker@5909: # --out=testpy-output/2010-01-12-22-47-50-CUT/devices-mesh-dot11s-regression.xml faker@5909: # faker@5909: # You need to pull out the useful pieces, and so could run the following to faker@5909: # reproduce your error: faker@5909: # faker@5909: # valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp faker@5909: # --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner faker@5909: # --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev faker@5909: # --tempdir=testpy-output faker@5909: # faker@5909: # Hint: Use the first part of the command as is, and point the "tempdir" to faker@5909: # somewhere real. You don't need to specify an "out" file. faker@5909: # faker@5909: # When you run the above command you should see your valgrind error. The faker@5909: # suppression expression(s) can be generated by adding the --gen-suppressions=yes faker@5909: # option to valgrind. Use something like: faker@5909: # faker@5909: # valgrind --gen-suppressions=yes --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp faker@5909: # --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner faker@5909: # --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev faker@5909: # --tempdir=testpy-output faker@5909: # faker@5909: # Now when valgrind detects an error it will ask: faker@5909: # faker@5909: # ==27235== ---- Print suppression ? --- [Return/N/n/Y/y/C/c] ---- faker@5909: # faker@5909: # to which you just enter 'y'. faker@5909: # faker@5909: # You will be provided with a suppression expression that looks something like faker@5909: # the following: faker@5909: # { faker@5909: # faker@5909: # Memcheck:Addr8 faker@5909: # fun:_ZN3ns36dot11s15HwmpProtocolMac8SendPreqESt6vectorINS0_6IePreqESaIS3_EE faker@5909: # fun:_ZN3ns36dot11s15HwmpProtocolMac10SendMyPreqEv faker@5909: # fun:_ZN3ns36dot11s15HwmpProtocolMac18RequestDestinationENS_12Mac48AddressEjj faker@5909: # ... faker@5909: # the rest of the stack frame faker@5909: # ... faker@5909: # } faker@5909: # faker@5909: # You need to add a supression name which will only be printed out by valgrind in faker@5909: # verbose mode (but it needs to be there in any case). The entire stack frame is faker@5909: # shown to completely characterize the error, but in most cases you won't need faker@5909: # all of that info. For example, if you want to turn off all errors that happen faker@5909: # when the function (fun:) is called, you can just delete the rest of the stack faker@5909: # frame. You can also use wildcards to make the mangled signatures more readable. faker@5909: # faker@5909: # I added the following to the testpy.supp file for this particular error: faker@5909: # faker@5909: # { faker@5909: # Supress invalid read size errors in SendPreq() when using HwmpProtocolMac faker@5909: # Memcheck:Addr8 faker@5909: # fun:*HwmpProtocolMac*SendPreq* faker@5909: # } faker@5909: # faker@5909: # Now, when you run valgrind the error will be suppressed. faker@5909: # faker@5909: VALGRIND_SUPPRESSIONS_FILE = "testpy.supp" faker@5909: craigdo@6200: def run_job_synchronously(shell_command, directory, valgrind, is_python): faker@5909: (base, build) = os.path.split (NS3_BUILDDIR) faker@5909: suppressions_path = os.path.join (base, VALGRIND_SUPPRESSIONS_FILE) craigdo@6200: craigdo@6200: if is_python: craigdo@6200: path_cmd = "python " + os.path.join (base, shell_command) craigdo@6200: else: craigdo@6200: path_cmd = os.path.join (NS3_BUILDDIR, NS3_ACTIVE_VARIANT, shell_command) craigdo@6200: craigdo@5370: if valgrind: craigdo@5917: cmd = "valgrind --suppressions=%s --leak-check=full --show-reachable=yes --error-exitcode=2 %s" % (suppressions_path, craigdo@5917: path_cmd) craigdo@5370: else: craigdo@5459: cmd = path_cmd craigdo@5370: craigdo@4772: if options.verbose: craigdo@4772: print "Synchronously execute %s" % cmd craigdo@5370: craigdo@5460: start_time = time.time() craigdo@5459: proc = subprocess.Popen(cmd, shell = True, cwd = directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE) craigdo@5917: stdout_results, stderr_results = proc.communicate() fmoatamr@5912: elapsed_time = time.time() - start_time craigdo@5917: fmoatamr@5912: retval = proc.returncode craigdo@5917: craigdo@5917: # craigdo@5917: # valgrind sometimes has its own idea about what kind of memory management craigdo@5917: # errors are important. We want to detect *any* leaks, so the way to do craigdo@5917: # that is to look for the presence of a valgrind leak summary section. craigdo@5917: # craigdo@5917: # If another error has occurred (like a test suite has failed), we don't craigdo@5917: # want to trump that error, so only do the valgrind output scan if the craigdo@5917: # test has otherwise passed (return code was zero). craigdo@5917: # craigdo@5917: if valgrind and retval == 0 and "== LEAK SUMMARY:" in stderr_results: craigdo@5917: retval = 2 craigdo@5917: craigdo@5459: if options.verbose: fmoatamr@5912: print "Return code = ", retval craigdo@5459: print "stderr = ", stderr_results craigdo@5459: fmoatamr@5912: return (retval, stdout_results, stderr_results, elapsed_time) craigdo@4772: craigdo@4772: # craigdo@4772: # This class defines a unit of testing work. It will typically refer to craigdo@4772: # a test suite to run using the test-runner, or an example to run directly. craigdo@4772: # craigdo@5415: class Job: craigdo@4772: def __init__(self): craigdo@4772: self.is_break = False craigdo@5402: self.is_skip = False craigdo@4772: self.is_example = False craigdo@6200: self.is_pyexample = False craigdo@4772: self.shell_command = "" craigdo@4772: self.display_name = "" craigdo@5402: self.basedir = "" craigdo@5481: self.tempdir = "" craigdo@4772: self.cwd = "" craigdo@4772: self.tmp_file_name = "" craigdo@4772: self.returncode = False craigdo@5459: self.elapsed_time = 0 craigdo@4772: craigdo@4772: # craigdo@4772: # A job is either a standard job or a special job indicating that a worker craigdo@4772: # thread should exist. This special job is indicated by setting is_break craigdo@4772: # to true. craigdo@4772: # craigdo@4772: def set_is_break(self, is_break): craigdo@4772: self.is_break = is_break craigdo@4772: craigdo@4772: # craigdo@5402: # If a job is to be skipped, we actually run it through the worker threads craigdo@5402: # to keep the PASS, FAIL, CRASH and SKIP processing all in one place. craigdo@5402: # craigdo@5402: def set_is_skip(self, is_skip): craigdo@5402: self.is_skip = is_skip craigdo@5402: craigdo@5402: # craigdo@4772: # Examples are treated differently than standard test suites. This is craigdo@4772: # mostly because they are completely unaware that they are being run as craigdo@4772: # tests. So we have to do some special case processing to make them look craigdo@4772: # like tests. craigdo@4772: # craigdo@4772: def set_is_example(self, is_example): craigdo@4772: self.is_example = is_example craigdo@4772: craigdo@4772: # craigdo@6200: # Examples are treated differently than standard test suites. This is craigdo@6200: # mostly because they are completely unaware that they are being run as craigdo@6200: # tests. So we have to do some special case processing to make them look craigdo@6200: # like tests. craigdo@6200: # craigdo@6200: def set_is_pyexample(self, is_pyexample): craigdo@6200: self.is_pyexample = is_pyexample craigdo@6200: craigdo@6200: # craigdo@4772: # This is the shell command that will be executed in the job. For example, craigdo@4772: # craigdo@4772: # "utils/test-runner --suite=some-test-suite" craigdo@4772: # craigdo@4772: def set_shell_command(self, shell_command): craigdo@4772: self.shell_command = shell_command craigdo@4772: craigdo@4772: # craigdo@4772: # This is the dispaly name of the job, typically the test suite or example craigdo@4772: # name. For example, craigdo@4772: # craigdo@4772: # "some-test-suite" or "udp-echo" craigdo@4772: # craigdo@4772: def set_display_name(self, display_name): craigdo@4772: self.display_name = display_name craigdo@4772: craigdo@4772: # craigdo@4772: # This is the base directory of the repository out of which the tests are craigdo@4772: # being run. It will be used deep down in the testing framework to determine craigdo@4772: # where the source directory of the test was, and therefore where to find craigdo@4772: # provided test vectors. For example, craigdo@4772: # craigdo@4772: # "/home/user/repos/ns-3-dev" craigdo@4772: # craigdo@4772: def set_basedir(self, basedir): craigdo@4772: self.basedir = basedir craigdo@4772: craigdo@4772: # craigdo@5481: # This is the directory to which a running test suite should write any craigdo@5481: # temporary files. craigdo@5481: # craigdo@5481: def set_tempdir(self, tempdir): craigdo@5481: self.tempdir = tempdir craigdo@5481: craigdo@5481: # craigdo@4772: # This is the current working directory that will be given to an executing craigdo@4772: # test as it is being run. It will be used for examples to tell them where craigdo@4772: # to write all of the pcap files that we will be carefully ignoring. For craigdo@4772: # example, craigdo@4772: # craigdo@4772: # "/tmp/unchecked-traces" craigdo@4772: # craigdo@4772: def set_cwd(self, cwd): craigdo@4772: self.cwd = cwd craigdo@4772: craigdo@4772: # craigdo@4772: # This is the temporary results file name that will be given to an executing craigdo@4772: # test as it is being run. We will be running all of our tests in parallel craigdo@4772: # so there must be multiple temporary output files. These will be collected craigdo@5412: # into a single XML file at the end and then be deleted. craigdo@4772: # craigdo@4772: def set_tmp_file_name(self, tmp_file_name): craigdo@4772: self.tmp_file_name = tmp_file_name craigdo@4772: craigdo@4772: # craigdo@4772: # The return code received when the job process is executed. craigdo@4772: # craigdo@4772: def set_returncode(self, returncode): craigdo@4772: self.returncode = returncode craigdo@4772: craigdo@5459: # craigdo@5459: # The elapsed real time for the job execution. craigdo@5459: # craigdo@5459: def set_elapsed_time(self, elapsed_time): craigdo@5459: self.elapsed_time = elapsed_time craigdo@5459: craigdo@4772: # craigdo@4772: # The worker thread class that handles the actual running of a given test. craigdo@4772: # Once spawned, it receives requests for work through its input_queue and craigdo@4772: # ships the results back through the output_queue. craigdo@4772: # craigdo@4772: class worker_thread(threading.Thread): craigdo@4772: def __init__(self, input_queue, output_queue): craigdo@4772: threading.Thread.__init__(self) craigdo@4772: self.input_queue = input_queue craigdo@4772: self.output_queue = output_queue craigdo@4772: craigdo@4772: def run(self): craigdo@4772: while True: craigdo@4772: job = self.input_queue.get() craigdo@4772: # craigdo@4772: # Worker threads continue running until explicitly told to stop with craigdo@4772: # a special job. craigdo@4772: # craigdo@4772: if job.is_break: craigdo@4772: return craigdo@4772: # craigdo@4772: # If the global interrupt handler sets the thread_exit variable, craigdo@4772: # we stop doing real work and just report back a "break" in the craigdo@4772: # normal command processing has happened. craigdo@4772: # craigdo@4772: if thread_exit == True: craigdo@4772: job.set_is_break(True) craigdo@4772: self.output_queue.put(job) craigdo@4772: continue craigdo@5402: craigdo@5402: # craigdo@5402: # If we are actually supposed to skip this job, do so. Note that craigdo@5402: # if is_skip is true, returncode is undefined. craigdo@5402: # craigdo@5402: if job.is_skip: craigdo@5402: if options.verbose: craigdo@5402: print "Skip %s" % job.shell_command craigdo@5402: self.output_queue.put(job) craigdo@6253: continue craigdo@5402: craigdo@4772: # craigdo@4772: # Otherwise go about the business of running tests as normal. craigdo@4772: # craigdo@4772: else: craigdo@4772: if options.verbose: craigdo@4772: print "Launch %s" % job.shell_command craigdo@4772: craigdo@6200: if job.is_example or job.is_pyexample: craigdo@4772: # craigdo@4772: # If we have an example, the shell command is all we need to craigdo@6200: # know. It will be something like "examples/udp-echo" or craigdo@6200: # "examples/mixed-wireless.py" craigdo@4772: # craigdo@5459: (job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command, craigdo@6200: job.cwd, options.valgrind, job.is_pyexample) craigdo@4772: else: craigdo@4772: # craigdo@4772: # If we're a test suite, we need to provide a little more info craigdo@4772: # to the test runner, specifically the base directory and temp craigdo@4772: # file name craigdo@4772: # craigdo@5459: (job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command + craigdo@5481: " --basedir=%s --tempdir=%s --out=%s" % (job.basedir, job.tempdir, job.tmp_file_name), craigdo@6200: job.cwd, options.valgrind, False) craigdo@4772: craigdo@5459: job.set_elapsed_time(et) craigdo@5459: craigdo@4772: if options.verbose: craigdo@5370: print "returncode = %d" % job.returncode Lalith@6327: print "---------- begin standard out ----------" craigdo@4772: print standard_out craigdo@5351: print "---------- begin standard err ----------" craigdo@5351: print standard_err craigdo@5351: print "---------- end standard err ----------" craigdo@4772: craigdo@4772: self.output_queue.put(job) craigdo@4772: craigdo@4772: # craigdo@4772: # This is the main function that does the work of interacting with the test-runner craigdo@4772: # itself. craigdo@4772: # craigdo@4772: def run_tests(): craigdo@4772: # craigdo@4772: # Run waf to make sure that everything is built, configured and ready to go craigdo@5369: # unless we are explicitly told not to. We want to be careful about causing craigdo@5369: # our users pain while waiting for extraneous stuff to compile and link, so craigdo@5369: # we allow users that know what they''re doing to not invoke waf at all. craigdo@4772: # craigdo@5369: if not options.nowaf: craigdo@5369: craigdo@5369: # craigdo@5369: # If the user is running the "kinds" or "list" options, there is an craigdo@5369: # implied dependency on the test-runner since we call that program craigdo@5369: # if those options are selected. We will exit after processing those craigdo@5369: # options, so if we see them, we can safely only build the test-runner. craigdo@5369: # craigdo@5369: # If the user has constrained us to running only a particular type of craigdo@5369: # file, we can only ask waf to build what we know will be necessary. craigdo@5369: # For example, if the user only wants to run BVT tests, we only have craigdo@5369: # to build the test-runner and can ignore all of the examples. craigdo@5369: # craigdo@5470: # If the user only wants to run a single example, then we can just build craigdo@5470: # that example. craigdo@5470: # craigdo@5470: # If there is no constraint, then we have to build everything since the craigdo@5470: # user wants to run everything. craigdo@5470: # craigdo@5369: if options.kinds or options.list or (len(options.constrain) and options.constrain in core_kinds): craigdo@5470: if sys.platform == "win32": craigdo@5470: waf_cmd = "waf --target=test-runner" craigdo@5470: else: craigdo@5470: waf_cmd = "./waf --target=test-runner" craigdo@5470: elif len(options.example): craigdo@5470: if sys.platform == "win32": craigdo@5470: waf_cmd = "waf --target=%s" % os.path.basename(options.example) craigdo@5470: else: craigdo@5470: waf_cmd = "./waf --target=%s" % os.path.basename(options.example) craigdo@5470: craigdo@5369: else: craigdo@5470: if sys.platform == "win32": craigdo@5470: waf_cmd = "waf" craigdo@5470: else: craigdo@5470: waf_cmd = "./waf" craigdo@5369: craigdo@5470: if options.verbose: craigdo@5470: print "Building: %s" % waf_cmd craigdo@5470: craigdo@5470: proc = subprocess.Popen(waf_cmd, shell = True) craigdo@4772: proc.communicate() mazo@6315: if proc.returncode: mazo@6315: print >> sys.stderr, "Waf died. Not running tests" mazo@6315: return proc.returncode craigdo@4772: craigdo@4772: # craigdo@4772: # Pull some interesting configuration information out of waf, primarily craigdo@4772: # so we can know where executables can be found, but also to tell us what craigdo@4772: # pieces of the system have been built. This will tell us what examples craigdo@4772: # are runnable. craigdo@4772: # craigdo@4772: read_waf_active_variant() craigdo@4772: read_waf_config() craigdo@6227: make_paths() craigdo@4772: craigdo@4772: # craigdo@6083: # If lots of logging is enabled, we can crash Python when it tries to craigdo@6083: # save all of the text. We just don't allow logging to be turned on when craigdo@6083: # test.py runs. If you want to see logging output from your tests, you craigdo@6083: # have to run them using the test-runner directly. craigdo@6083: # craigdo@6083: os.environ["NS_LOG"] = "" craigdo@6083: craigdo@6083: # craigdo@4772: # There are a couple of options that imply we can to exit before starting craigdo@4772: # up a bunch of threads and running tests. Let's detect these cases and craigdo@4772: # handle them without doing all of the hard work. craigdo@4772: # craigdo@4772: if options.kinds: craigdo@5459: path_cmd = os.path.join("utils", "test-runner --kinds") craigdo@6200: (rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False) craigdo@4772: print standard_out craigdo@4772: craigdo@4772: if options.list: craigdo@5459: path_cmd = os.path.join("utils", "test-runner --list") craigdo@6200: (rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False) craigdo@4772: print standard_out craigdo@4772: craigdo@4772: if options.kinds or options.list: craigdo@4772: return craigdo@4772: craigdo@4772: # craigdo@4772: # We communicate results in two ways. First, a simple message relating craigdo@5402: # PASS, FAIL, CRASH or SKIP is always written to the standard output. It craigdo@5402: # is expected that this will be one of the main use cases. A developer can craigdo@4772: # just run test.py with no options and see that all of the tests still craigdo@4772: # pass. craigdo@4772: # craigdo@4772: # The second main use case is when detailed status is requested (with the craigdo@4772: # --text or --html options). Typicall this will be text if a developer craigdo@4772: # finds a problem, or HTML for nightly builds. In these cases, an craigdo@4772: # XML file is written containing the status messages from the test suites. craigdo@4772: # This file is then read and translated into text or HTML. It is expected craigdo@5412: # that nobody will really be interested in the XML, so we write it somewhere craigdo@5412: # with a unique name (time) to avoid collisions. In case an error happens, we craigdo@5412: # provide a runtime option to retain the temporary files. craigdo@4772: # craigdo@4772: # When we run examples as smoke tests, they are going to want to create craigdo@4772: # lots and lots of trace files. We aren't really interested in the contents craigdo@5412: # of the trace files, so we also just stash them off in the temporary dir. craigdo@5412: # The retain option also causes these unchecked trace files to be kept. craigdo@4772: # craigdo@5412: date_and_time = time.strftime("%Y-%m-%d-%H-%M-%S-CUT", time.gmtime()) craigdo@5412: craigdo@4772: if not os.path.exists(TMP_OUTPUT_DIR): craigdo@4772: os.makedirs(TMP_OUTPUT_DIR) craigdo@4772: craigdo@5412: testpy_output_dir = os.path.join(TMP_OUTPUT_DIR, date_and_time); craigdo@5412: craigdo@5412: if not os.path.exists(testpy_output_dir): craigdo@5412: os.makedirs(testpy_output_dir) craigdo@4772: craigdo@4772: # craigdo@4772: # Create the main output file and start filling it with XML. We need to craigdo@4772: # do this since the tests will just append individual results to this file. craigdo@4772: # craigdo@5412: xml_results_file = os.path.join(testpy_output_dir, "results.xml") craigdo@4772: f = open(xml_results_file, 'w') craigdo@4772: f.write('\n') craigdo@4772: f.write('\n') craigdo@4772: f.close() craigdo@4772: craigdo@4772: # craigdo@4772: # We need to figure out what test suites to execute. We are either given one craigdo@6200: # suite or example explicitly via the --suite or --example/--pyexample option, craigdo@6200: # or we need to call into the test runner and ask it to list all of the available craigdo@4772: # test suites. Further, we need to provide the constraint information if it craigdo@4772: # has been given to us. craigdo@4772: # craigdo@4772: # This translates into allowing the following options with respect to the craigdo@4772: # suites craigdo@4772: # craigdo@5369: # ./test,py: run all of the suites and examples craigdo@5369: # ./test.py --constrain=core: run all of the suites of all kinds craigdo@4772: # ./test.py --constrain=unit: run all unit suites craigdo@5369: # ./test,py --suite=some-test-suite: run a single suite craigdo@6200: # ./test,py --example=udp/udp-echo: run no test suites craigdo@6200: # ./test,py --pyexample=wireless/mixed-wireless.py: run no test suites craigdo@4772: # ./test,py --suite=some-suite --example=some-example: run the single suite craigdo@4772: # craigdo@4772: # We can also use the --constrain option to provide an ordering of test craigdo@4772: # execution quite easily. craigdo@4772: # craigdo@4772: if len(options.suite): craigdo@4772: suites = options.suite + "\n" craigdo@6200: elif len(options.example) == 0 and len(options.pyexample) == 0: craigdo@4772: if len(options.constrain): craigdo@5459: path_cmd = os.path.join("utils", "test-runner --list --constrain=%s" % options.constrain) craigdo@6200: (rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False) craigdo@4772: else: craigdo@5459: path_cmd = os.path.join("utils", "test-runner --list") craigdo@6200: (rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False) craigdo@4772: else: craigdo@4772: suites = "" craigdo@4772: craigdo@4772: # craigdo@4772: # suite_list will either a single test suite name that the user has craigdo@4772: # indicated she wants to run or a list of test suites provided by craigdo@4772: # the test-runner possibly according to user provided constraints. craigdo@4772: # We go through the trouble of setting up the parallel execution craigdo@4772: # even in the case of a single suite to avoid having two process the craigdo@4772: # results in two different places. craigdo@4772: # craigdo@4772: suite_list = suites.split('\n') craigdo@4772: craigdo@4772: # craigdo@4772: # We now have a possibly large number of test suites to run, so we want to craigdo@4772: # run them in parallel. We're going to spin up a number of worker threads craigdo@4772: # that will run our test jobs for us. craigdo@4772: # craigdo@4772: input_queue = Queue.Queue(0) craigdo@4772: output_queue = Queue.Queue(0) craigdo@4772: craigdo@4772: jobs = 0 craigdo@4772: threads=[] craigdo@4772: craigdo@5273: # craigdo@5273: # In Python 2.6 you can just use multiprocessing module, but we don't want craigdo@5273: # to introduce that dependency yet; so we jump through a few hoops. craigdo@5273: # craigdo@5273: processors = 1 craigdo@5273: craigdo@5459: if sys.platform != "win32": craigdo@5459: if 'SC_NPROCESSORS_ONLN'in os.sysconf_names: craigdo@5459: processors = os.sysconf('SC_NPROCESSORS_ONLN') craigdo@5459: else: craigdo@5459: proc = subprocess.Popen("sysctl -n hw.ncpu", shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) craigdo@5459: stdout_results, stderr_results = proc.communicate() craigdo@5459: if len(stderr_results) == 0: craigdo@5459: processors = int(stdout_results) craigdo@5273: craigdo@5273: # craigdo@5273: # Now, spin up one thread per processor which will eventually mean one test craigdo@5273: # per processor running concurrently. craigdo@5273: # craigdo@4772: for i in range(processors): craigdo@4772: thread = worker_thread(input_queue, output_queue) craigdo@4772: threads.append(thread) craigdo@4772: thread.start() craigdo@4772: craigdo@4772: # craigdo@5402: # Keep track of some summary statistics craigdo@5402: # craigdo@5402: total_tests = 0 craigdo@5402: skipped_tests = 0 craigdo@5402: craigdo@5402: # craigdo@4772: # We now have worker threads spun up, and a list of work to do. So, run craigdo@4772: # through the list of test suites and dispatch a job to run each one. craigdo@4772: # craigdo@4772: # Dispatching will run with unlimited speed and the worker threads will craigdo@4772: # execute as fast as possible from the queue. craigdo@4772: # craigdo@5402: # Note that we actually dispatch tests to be skipped, so all of the craigdo@5402: # PASS, FAIL, CRASH and SKIP processing is done in the same place. craigdo@5402: # craigdo@4772: for test in suite_list: craigdo@5461: test = test.strip() craigdo@4772: if len(test): craigdo@4772: job = Job() craigdo@4772: job.set_is_example(False) craigdo@6200: job.set_is_pyexample(False) craigdo@4772: job.set_display_name(test) craigdo@5412: job.set_tmp_file_name(os.path.join(testpy_output_dir, "%s.xml" % test)) craigdo@4772: job.set_cwd(os.getcwd()) craigdo@4772: job.set_basedir(os.getcwd()) craigdo@5481: job.set_tempdir(testpy_output_dir) craigdo@5324: if (options.multiple): craigdo@5324: multiple = " --multiple" craigdo@5324: else: craigdo@5324: multiple = "" craigdo@5324: craigdo@5461: path_cmd = os.path.join("utils", "test-runner --suite=%s%s" % (test, multiple)) craigdo@5459: job.set_shell_command(path_cmd) craigdo@4772: craigdo@5402: if options.valgrind and test in core_valgrind_skip_tests: craigdo@5402: job.set_is_skip(True) craigdo@5402: craigdo@4772: if options.verbose: craigdo@4772: print "Queue %s" % test craigdo@4772: craigdo@4772: input_queue.put(job) craigdo@4772: jobs = jobs + 1 craigdo@5279: total_tests = total_tests + 1 craigdo@4772: craigdo@4772: # craigdo@4772: # We've taken care of the discovered or specified test suites. Now we craigdo@4772: # have to deal with examples run as smoke tests. We have a list of all of craigdo@4772: # the example programs it makes sense to try and run. Each example will craigdo@4772: # have a condition associated with it that must evaluate to true for us craigdo@4772: # to try and execute it. This is used to determine if the example has craigdo@4772: # a dependency that is not satisfied. For example, if an example depends craigdo@4772: # on NSC being configured by waf, that example should have a condition craigdo@4772: # that evaluates to true if NSC is enabled. For example, craigdo@4772: # craigdo@4772: # ("tcp-nsc-zoo", "ENABLE_NSC == True"), craigdo@4772: # craigdo@4772: # In this case, the example "tcp-nsc-zoo" will only be run if we find the craigdo@4772: # waf configuration variable "ENABLE_NSC" to be True. craigdo@4772: # craigdo@4772: # We don't care at all how the trace files come out, so we just write them craigdo@4772: # to a single temporary directory. craigdo@4772: # craigdo@4772: # XXX As it stands, all of the trace files have unique names, and so file craigdo@4772: # collisions can only happen if two instances of an example are running in craigdo@4772: # two versions of the test.py process concurrently. We may want to create craigdo@4772: # uniquely named temporary traces directories to avoid this problem. craigdo@4772: # craigdo@4772: # We need to figure out what examples to execute. We are either given one craigdo@4772: # suite or example explicitly via the --suite or --example option, or we craigdo@4772: # need to walk the list of examples looking for available example craigdo@4772: # conditions. craigdo@4772: # craigdo@4772: # This translates into allowing the following options with respect to the craigdo@4772: # suites craigdo@4772: # craigdo@4772: # ./test,py: run all of the examples craigdo@4772: # ./test.py --constrain=unit run no examples craigdo@5369: # ./test.py --constrain=example run all of the examples craigdo@6200: # ./test.py --suite=some-test-suite: run no examples craigdo@6200: # ./test.py --example=some-example: run the single example craigdo@6200: # ./test.py --suite=some-suite --example=some-example: run the single example craigdo@4772: # craigdo@4772: # XXX could use constrain to separate out examples used for performance craigdo@4772: # testing craigdo@4772: # craigdo@6200: if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0: craigdo@4772: if len(options.constrain) == 0 or options.constrain == "example": craigdo@5369: if ENABLE_EXAMPLES: craigdo@5402: for test, do_run, do_valgrind_run in example_tests: craigdo@5402: if eval(do_run): craigdo@5369: job = Job() craigdo@5369: job.set_is_example(True) craigdo@6200: job.set_is_pyexample(False) craigdo@5369: job.set_display_name(test) craigdo@5369: job.set_tmp_file_name("") craigdo@5412: job.set_cwd(testpy_output_dir) craigdo@5369: job.set_basedir(os.getcwd()) craigdo@5481: job.set_tempdir(testpy_output_dir) craigdo@5369: job.set_shell_command("examples/%s" % test) craigdo@4772: craigdo@5402: if options.valgrind and not eval(do_valgrind_run): craigdo@5402: job.set_is_skip (True) craigdo@5402: craigdo@5369: if options.verbose: craigdo@5369: print "Queue %s" % test craigdo@4772: craigdo@5369: input_queue.put(job) craigdo@5369: jobs = jobs + 1 craigdo@5369: total_tests = total_tests + 1 craigdo@5279: craigdo@4772: elif len(options.example): craigdo@4772: # craigdo@4772: # If you tell me to run an example, I will try and run the example craigdo@4772: # irrespective of any condition. craigdo@4772: # craigdo@4772: job = Job() craigdo@4772: job.set_is_example(True) craigdo@6200: job.set_is_pyexample(False) craigdo@4772: job.set_display_name(options.example) craigdo@4772: job.set_tmp_file_name("") craigdo@5412: job.set_cwd(testpy_output_dir) craigdo@4772: job.set_basedir(os.getcwd()) craigdo@5481: job.set_tempdir(testpy_output_dir) craigdo@4772: job.set_shell_command("examples/%s" % options.example) craigdo@4772: craigdo@4772: if options.verbose: craigdo@6200: print "Queue %s" % options.example craigdo@6200: craigdo@6200: input_queue.put(job) craigdo@6200: jobs = jobs + 1 craigdo@6200: total_tests = total_tests + 1 craigdo@6200: craigdo@6200: # craigdo@6200: # Run some Python examples as smoke tests. We have a list of all of craigdo@6200: # the example programs it makes sense to try and run. Each example will craigdo@6200: # have a condition associated with it that must evaluate to true for us craigdo@6200: # to try and execute it. This is used to determine if the example has craigdo@6200: # a dependency that is not satisfied. craigdo@6200: # craigdo@6200: # We don't care at all how the trace files come out, so we just write them craigdo@6200: # to a single temporary directory. craigdo@6200: # craigdo@6200: # We need to figure out what python examples to execute. We are either craigdo@6200: # given one pyexample explicitly via the --pyexample option, or we craigdo@6200: # need to walk the list of python examples craigdo@6200: # craigdo@6200: # This translates into allowing the following options with respect to the craigdo@6200: # suites craigdo@6200: # craigdo@6200: # ./test.py --constrain=pyexample run all of the python examples craigdo@6200: # ./test.py --pyexample=some-example.py: run the single python example craigdo@6200: # craigdo@6200: if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0: craigdo@6200: if len(options.constrain) == 0 or options.constrain == "pyexample": craigdo@6247: if ENABLE_EXAMPLES: craigdo@6200: for test, do_run in python_tests: craigdo@6200: if eval(do_run): craigdo@6200: job = Job() craigdo@6200: job.set_is_example(False) craigdo@6200: job.set_is_pyexample(True) craigdo@6200: job.set_display_name(test) craigdo@6200: job.set_tmp_file_name("") craigdo@6200: job.set_cwd(testpy_output_dir) craigdo@6200: job.set_basedir(os.getcwd()) craigdo@6200: job.set_tempdir(testpy_output_dir) craigdo@6200: job.set_shell_command("examples/%s" % test) craigdo@6200: craigdo@6247: # craigdo@6247: # Python programs and valgrind do not work and play craigdo@6247: # well together, so we skip them under valgrind. craigdo@6247: # We go through the trouble of doing all of this craigdo@6247: # work to report the skipped tests in a consistent craigdo@6247: # way throught the output formatter. craigdo@6247: # craigdo@6247: if options.valgrind: craigdo@6247: job.set_is_skip (True) craigdo@6247: craigdo@6247: # craigdo@6247: # The user can disable python bindings, so we need craigdo@6247: # to pay attention to that and give some feedback craigdo@6247: # that we're not testing them craigdo@6247: # craigdo@6247: if not ENABLE_PYTHON_BINDINGS: craigdo@6200: job.set_is_skip (True) craigdo@6200: craigdo@6200: if options.verbose: craigdo@6200: print "Queue %s" % test craigdo@6200: craigdo@6200: input_queue.put(job) craigdo@6200: jobs = jobs + 1 craigdo@6200: total_tests = total_tests + 1 craigdo@6200: craigdo@6200: elif len(options.pyexample): craigdo@6200: # craigdo@6200: # If you tell me to run a python example, I will try and run the example craigdo@6200: # irrespective of any condition. craigdo@6200: # craigdo@6200: job = Job() craigdo@6200: job.set_is_pyexample(True) craigdo@6200: job.set_display_name(options.pyexample) craigdo@6200: job.set_tmp_file_name("") craigdo@6200: job.set_cwd(testpy_output_dir) craigdo@6200: job.set_basedir(os.getcwd()) craigdo@6200: job.set_tempdir(testpy_output_dir) craigdo@6200: job.set_shell_command("examples/%s" % options.pyexample) craigdo@6200: craigdo@6200: if options.verbose: craigdo@6200: print "Queue %s" % options.pyexample craigdo@4772: craigdo@4772: input_queue.put(job) craigdo@4772: jobs = jobs + 1 craigdo@5279: total_tests = total_tests + 1 craigdo@4772: craigdo@4772: # craigdo@4772: # Tell the worker threads to pack up and go home for the day. Each one craigdo@4772: # will exit when they see their is_break task. craigdo@4772: # craigdo@4772: for i in range(processors): craigdo@4772: job = Job() craigdo@4772: job.set_is_break(True) craigdo@4772: input_queue.put(job) craigdo@4772: craigdo@4772: # craigdo@4772: # Now all of the tests have been dispatched, so all we have to do here craigdo@4772: # in the main thread is to wait for them to complete. Keyboard interrupt craigdo@4772: # handling is broken as mentioned above. We use a signal handler to catch craigdo@4772: # sigint and set a global variable. When the worker threads sense this craigdo@4772: # they stop doing real work and will just start throwing jobs back at us craigdo@4772: # with is_break set to True. In this case, there are no real results so we craigdo@4772: # ignore them. If there are real results, we always print PASS or FAIL to craigdo@4772: # standard out as a quick indication of what happened. craigdo@4772: # craigdo@5279: passed_tests = 0 craigdo@5279: failed_tests = 0 craigdo@5279: crashed_tests = 0 craigdo@5370: valgrind_errors = 0 craigdo@4772: for i in range(jobs): craigdo@4772: job = output_queue.get() craigdo@4772: if job.is_break: craigdo@4772: continue craigdo@4772: craigdo@6203: if job.is_example or job.is_pyexample: craigdo@4772: kind = "Example" craigdo@4772: else: craigdo@4772: kind = "TestSuite" craigdo@4772: craigdo@5402: if job.is_skip: craigdo@5402: status = "SKIP" craigdo@5402: skipped_tests = skipped_tests + 1 craigdo@4772: else: craigdo@5402: if job.returncode == 0: craigdo@5402: status = "PASS" craigdo@5402: passed_tests = passed_tests + 1 craigdo@5402: elif job.returncode == 1: craigdo@5402: failed_tests = failed_tests + 1 craigdo@5402: status = "FAIL" craigdo@5402: elif job.returncode == 2: craigdo@5402: valgrind_errors = valgrind_errors + 1 craigdo@5402: status = "VALGR" craigdo@5402: else: craigdo@5402: crashed_tests = crashed_tests + 1 craigdo@5402: status = "CRASH" craigdo@4772: craigdo@4772: print "%s: %s %s" % (status, kind, job.display_name) craigdo@4772: craigdo@6200: if job.is_example or job.is_pyexample: craigdo@4772: # craigdo@4772: # Examples are the odd man out here. They are written without any craigdo@4772: # knowledge that they are going to be run as a test, so we need to craigdo@4772: # cook up some kind of output for them. We're writing an xml file, craigdo@4772: # so we do some simple XML that says we ran the example. craigdo@4772: # craigdo@4772: # XXX We could add some timing information to the examples, i.e. run craigdo@4772: # them through time and print the results here. craigdo@4772: # craigdo@4772: f = open(xml_results_file, 'a') craigdo@4772: f.write('\n') craigdo@4772: example_name = " %s\n" % job.display_name craigdo@4772: f.write(example_name) craigdo@5370: craigdo@5402: if status == "PASS": craigdo@4772: f.write(' PASS\n') craigdo@5402: elif status == "FAIL": craigdo@4772: f.write(' FAIL\n') craigdo@5402: elif status == "VALGR": craigdo@5370: f.write(' VALGR\n') craigdo@5402: elif status == "SKIP": craigdo@5402: f.write(' SKIP\n') craigdo@4772: else: craigdo@4772: f.write(' CRASH\n') craigdo@4772: craigdo@5460: f.write(' %.3f\n' % job.elapsed_time) craigdo@4772: f.write('\n') craigdo@4772: f.close() craigdo@5370: craigdo@4772: else: craigdo@4772: # craigdo@4772: # If we're not running an example, we're running a test suite. craigdo@4772: # These puppies are running concurrently and generating output craigdo@4772: # that was written to a temporary file to avoid collisions. craigdo@4772: # craigdo@4772: # Now that we are executing sequentially in the main thread, we can craigdo@4772: # concatenate the contents of the associated temp file to the main craigdo@4772: # results file and remove that temp file. craigdo@4772: # craigdo@4772: # One thing to consider is that a test suite can crash just as craigdo@4772: # well as any other program, so we need to deal with that craigdo@4772: # possibility as well. If it ran correctly it will return 0 craigdo@4772: # if it passed, or 1 if it failed. In this case, we can count craigdo@4772: # on the results file it saved being complete. If it crashed, it craigdo@4772: # will return some other code, and the file should be considered craigdo@4772: # corrupt and useless. If the suite didn't create any XML, then craigdo@4772: # we're going to have to do it ourselves. craigdo@4772: # craigdo@5370: # Another issue is how to deal with a valgrind error. If we run craigdo@5370: # a test suite under valgrind and it passes, we will get a return craigdo@5370: # code of 0 and there will be a valid xml results file since the code craigdo@5370: # ran to completion. If we get a return code of 1 under valgrind, craigdo@5370: # the test case failed, but valgrind did not find any problems so the craigdo@5370: # test case return code was passed through. We will have a valid xml craigdo@5370: # results file here as well since the test suite ran. If we see a craigdo@5370: # return code of 2, this means that valgrind found an error (we asked craigdo@5370: # it to return 2 if it found a problem in run_job_synchronously) but craigdo@5370: # the suite ran to completion so there is a valid xml results file. craigdo@5370: # If the suite crashes under valgrind we will see some other error craigdo@5370: # return code (like 139). If valgrind finds an illegal instruction or craigdo@5370: # some other strange problem, it will die with its own strange return craigdo@5370: # code (like 132). However, if the test crashes by itself, not under craigdo@5370: # valgrind we will also see some other return code. craigdo@5370: # craigdo@5370: # If the return code is 0, 1, or 2, we have a valid xml file. If we craigdo@5370: # get another return code, we have no xml and we can't really say what craigdo@5370: # happened -- maybe the TestSuite crashed, maybe valgrind crashed due craigdo@5370: # to an illegal instruction. If we get something beside 0-2, we assume craigdo@5370: # a crash and fake up an xml entry. After this is all done, we still craigdo@5370: # need to indicate a valgrind error somehow, so we fake up an xml entry craigdo@5370: # with a VALGR result. Thus, in the case of a working TestSuite that craigdo@5370: # fails valgrind, we'll see the PASS entry for the working TestSuite craigdo@5370: # followed by a VALGR failing test suite of the same name. craigdo@5370: # craigdo@5402: if job.is_skip: craigdo@4772: f = open(xml_results_file, 'a') craigdo@4772: f.write("\n") craigdo@5239: f.write(" %s\n" % job.display_name) craigdo@5402: f.write(' SKIP\n') craigdo@5239: f.write(' Execution times not available\n') craigdo@4772: f.write("\n") craigdo@4772: f.close() craigdo@5402: else: craigdo@5402: if job.returncode == 0 or job.returncode == 1 or job.returncode == 2: craigdo@5402: f_to = open(xml_results_file, 'a') craigdo@5459: f_from = open(job.tmp_file_name) craigdo@5402: f_to.write(f_from.read()) craigdo@5402: f_to.close() craigdo@5402: f_from.close() craigdo@5402: else: craigdo@5402: f = open(xml_results_file, 'a') craigdo@5402: f.write("\n") craigdo@5402: f.write(" %s\n" % job.display_name) craigdo@5402: f.write(' CRASH\n') craigdo@5402: f.write(' Execution times not available\n') craigdo@5402: f.write("\n") craigdo@5402: f.close() craigdo@4772: craigdo@5402: if job.returncode == 2: craigdo@5402: f = open(xml_results_file, 'a') craigdo@5402: f.write("\n") craigdo@5402: f.write(" %s\n" % job.display_name) craigdo@5402: f.write(' VALGR\n') craigdo@5402: f.write(' Execution times not available\n') craigdo@5402: f.write("\n") craigdo@5402: f.close() craigdo@5370: craigdo@4772: # craigdo@4772: # We have all of the tests run and the results written out. One final craigdo@4772: # bit of housekeeping is to wait for all of the threads to close down craigdo@4772: # so we can exit gracefully. craigdo@4772: # craigdo@4772: for thread in threads: craigdo@4772: thread.join() craigdo@4772: craigdo@4772: # craigdo@4772: # Back at the beginning of time, we started the body of an XML document craigdo@4772: # since the test suites and examples were going to just write their craigdo@4772: # individual pieces. So, we need to finish off and close out the XML craigdo@4772: # document craigdo@4772: # craigdo@4772: f = open(xml_results_file, 'a') craigdo@4772: f.write('\n') craigdo@4772: f.close() craigdo@4772: craigdo@4772: # craigdo@5279: # Print a quick summary of events craigdo@5279: # craigdo@5402: print "%d of %d tests passed (%d passed, %d skipped, %d failed, %d crashed, %d valgrind errors)" % (passed_tests, craigdo@5402: total_tests, passed_tests, skipped_tests, failed_tests, crashed_tests, valgrind_errors) craigdo@5279: # craigdo@4772: # The last things to do are to translate the XML results file to "human craigdo@5239: # readable form" if the user asked for it (or make an XML file somewhere) craigdo@4772: # craigdo@4772: if len(options.html): craigdo@4772: translate_to_html(xml_results_file, options.html) craigdo@4772: craigdo@4772: if len(options.text): craigdo@4772: translate_to_text(xml_results_file, options.text) craigdo@4772: craigdo@5239: if len(options.xml): craigdo@5239: shutil.copyfile(xml_results_file, options.xml) craigdo@5239: craigdo@5412: # craigdo@5412: # If we have been asked to retain all of the little temporary files, we craigdo@5412: # don't delete tm. If we do delete the temporary files, delete only the craigdo@5412: # directory we just created. We don't want to happily delete any retained craigdo@5412: # directories, which will probably surprise the user. craigdo@5412: # craigdo@5412: if not options.retain: craigdo@5412: shutil.rmtree(testpy_output_dir) craigdo@5412: craigdo@5403: if passed_tests + skipped_tests == total_tests: craigdo@5351: return 0 # success craigdo@5350: else: craigdo@5351: return 1 # catchall for general errors craigdo@5350: craigdo@4772: def main(argv): craigdo@4772: parser = optparse.OptionParser() craigdo@4772: parser.add_option("-c", "--constrain", action="store", type="string", dest="constrain", default="", craigdo@4772: metavar="KIND", craigdo@4772: help="constrain the test-runner by kind of test") craigdo@4772: craigdo@4772: parser.add_option("-e", "--example", action="store", type="string", dest="example", default="", craigdo@4772: metavar="EXAMPLE", craigdo@4772: help="specify a single example to run") craigdo@4772: craigdo@5370: parser.add_option("-g", "--grind", action="store_true", dest="valgrind", default=False, craigdo@5370: help="run the test suites and examples using valgrind") craigdo@5370: craigdo@4772: parser.add_option("-k", "--kinds", action="store_true", dest="kinds", default=False, craigdo@4772: help="print the kinds of tests available") craigdo@4772: craigdo@4772: parser.add_option("-l", "--list", action="store_true", dest="list", default=False, craigdo@4772: help="print the list of known tests") craigdo@4772: craigdo@5324: parser.add_option("-m", "--multiple", action="store_true", dest="multiple", default=False, craigdo@5324: help="report multiple failures from test suites and test cases") craigdo@5324: craigdo@4772: parser.add_option("-n", "--nowaf", action="store_true", dest="nowaf", default=False, craigdo@4772: help="do not run waf before starting testing") craigdo@4772: craigdo@6200: parser.add_option("-p", "--pyexample", action="store", type="string", dest="pyexample", default="", craigdo@6200: metavar="PYEXAMPLE", craigdo@6200: help="specify a single python example to run") craigdo@6200: craigdo@6200: parser.add_option("-r", "--retain", action="store_true", dest="retain", default=False, craigdo@6200: help="retain all temporary files (which are normally deleted)") craigdo@6200: craigdo@4772: parser.add_option("-s", "--suite", action="store", type="string", dest="suite", default="", craigdo@4772: metavar="TEST-SUITE", craigdo@4772: help="specify a single test suite to run") craigdo@4772: craigdo@6200: parser.add_option("-t", "--text", action="store", type="string", dest="text", default="", craigdo@6200: metavar="TEXT-FILE", craigdo@6200: help="write detailed test results into TEXT-FILE.txt") craigdo@6200: craigdo@4772: parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, craigdo@4772: help="print progress and informational messages") craigdo@4772: craigdo@4772: parser.add_option("-w", "--web", "--html", action="store", type="string", dest="html", default="", craigdo@4772: metavar="HTML-FILE", craigdo@4772: help="write detailed test results into HTML-FILE.html") craigdo@4772: craigdo@5239: parser.add_option("-x", "--xml", action="store", type="string", dest="xml", default="", craigdo@5239: metavar="XML-FILE", craigdo@5239: help="write detailed test results into XML-FILE.xml") craigdo@5239: craigdo@4772: global options craigdo@4772: options = parser.parse_args()[0] craigdo@4772: signal.signal(signal.SIGINT, sigint_hook) craigdo@5350: craigdo@5350: return run_tests() craigdo@4772: craigdo@4772: if __name__ == '__main__': craigdo@4772: sys.exit(main(sys.argv))