test.py
author Josh Pelkey <jpelkey@gatech.edu>
Wed, 11 Aug 2010 11:37:37 -0400
changeset 6553 fb5ad9c7755a
parent 6349 4bab6b10a034
permissions -rwxr-xr-x
update release notes and fix doxygen warnings
     1 #! /usr/bin/env python
     2 ## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
     3 #
     4 # Copyright (c) 2009 University of Washington
     5 #
     6 # This program is free software; you can redistribute it and/or modify
     7 # it under the terms of the GNU General Public License version 2 as
     8 # published by the Free Software Foundation;
     9 #
    10 # This program is distributed in the hope that it will be useful,
    11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
    12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13 # GNU General Public License for more details.
    14 #
    15 # You should have received a copy of the GNU General Public License
    16 # along with this program; if not, write to the Free Software
    17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    18 #
    19 
    20 import os
    21 import sys
    22 import time
    23 import optparse
    24 import subprocess
    25 import threading
    26 import Queue
    27 import signal
    28 import xml.dom.minidom
    29 import shutil
    30 import re
    31 
    32 #
    33 # XXX This should really be part of a waf command to list the configuration
    34 # items relative to optional ns-3 pieces.
    35 #
    36 # A list of interesting configuration items in the waf configuration 
    37 # cache which we may be interested in when deciding on which examples
    38 # to run and how to run them.  These are set by waf during the 
    39 # configuration phase and the corresponding assignments are usually
    40 # found in the associated subdirectory wscript files.
    41 #
    42 interesting_config_items = [
    43     "NS3_BUILDDIR",
    44     "NS3_MODULE_PATH",
    45     "ENABLE_NSC",
    46     "ENABLE_REAL_TIME",
    47     "ENABLE_EXAMPLES",
    48     "ENABLE_PYTHON_BINDINGS",
    49 ]
    50 
    51 ENABLE_NSC = False
    52 ENABLE_REAL_TIME = False
    53 ENABLE_EXAMPLES = True
    54 
    55 #
    56 # If the user has constrained us to run certain kinds of tests, we can tell waf
    57 # to only build
    58 #
    59 core_kinds = ["bvt", "core", "system", "unit"]
    60 
    61 #
    62 # There are some special cases for test suites that kill valgrind.  This is
    63 # because NSC causes illegal instruction crashes when run under valgrind.
    64 #
    65 core_valgrind_skip_tests = [
    66     "ns3-tcp-cwnd",
    67     "nsc-tcp-loss",
    68     "ns3-tcp-interoperability",
    69 ]
    70 
    71 #
    72 # A list of examples to run as smoke tests just to ensure that they remain 
    73 # buildable and runnable over time.  Also a condition under which to run
    74 # the example (from the waf configuration), and a condition under which to
    75 # run the example under valgrind.  This is because NSC causes illegal 
    76 # instruction crashes when run under valgrind.
    77 #
    78 # XXX Should this not be read from a configuration file somewhere and not
    79 # hardcoded.
    80 #
    81 example_tests = [
    82     ("csma/csma-bridge", "True", "True"),
    83     ("csma/csma-bridge-one-hop", "True", "True"),
    84     ("csma/csma-broadcast", "True", "True"),
    85     ("csma/csma-multicast", "True", "True"),
    86     ("csma/csma-one-subnet", "True", "True"),
    87     ("csma/csma-packet-socket", "True", "True"),
    88     ("csma/csma-ping", "True", "True"),
    89     ("csma/csma-raw-ip-socket", "True", "True"),
    90     ("csma/csma-star", "True", "True"),
    91 
    92     ("emulation/emu-ping", "False", "True"),
    93     ("emulation/emu-udp-echo", "False", "True"),
    94 
    95     ("error-model/simple-error-model", "True", "True"),
    96 
    97     ("ipv6/icmpv6-redirect", "True", "True"),
    98     ("ipv6/ping6", "True", "True"),
    99     ("ipv6/radvd", "True", "True"),
   100     ("ipv6/radvd-two-prefix", "True", "True"),    
   101     ("ipv6/test-ipv6", "True", "True"),
   102 
   103     ("mesh/mesh", "True", "True"),
   104 
   105     ("naming/object-names", "True", "True"),
   106 
   107     ("realtime/realtime-udp-echo", "ENABLE_REAL_TIME == True", "True"),
   108 
   109     ("routing/dynamic-global-routing", "True", "True"),
   110     ("routing/global-injection-slash32", "True", "True"),
   111     ("routing/global-routing-slash32", "True", "True"),
   112     ("routing/mixed-global-routing", "True", "True"),
   113     ("routing/nix-simple", "True", "True"),
   114     ("routing/nms-p2p-nix", "False", "True"), # Takes too long to run
   115     ("routing/simple-alternate-routing", "True", "True"),
   116     ("routing/simple-global-routing", "True", "True"),
   117     ("routing/simple-point-to-point-olsr", "True", "True"),
   118     ("routing/simple-routing-ping6", "True", "True"),
   119     ("routing/static-routing-slash32", "True", "True"),
   120     ("routing/aodv", "True", "True"),
   121 
   122     ("spectrum/adhoc-aloha-ideal-phy", "True", "True"),
   123     ("spectrum/adhoc-aloha-ideal-phy-with-microwave-oven", "True", "True"),
   124 
   125     ("stats/wifi-example-sim", "True", "True"),
   126 
   127     ("tap/tap-wifi-dumbbell", "False", "True"), # Requires manual configuration
   128 
   129     ("tcp/star", "True", "True"),
   130     ("tcp/tcp-large-transfer", "True", "True"),
   131     ("tcp/tcp-nsc-lfn", "ENABLE_NSC == True", "True"),
   132     ("tcp/tcp-nsc-zoo", "ENABLE_NSC == True", "True"),
   133     ("tcp/tcp-star-server", "True", "True"),
   134 
   135     ("topology-read/topology-read --input=../../examples/topology-read/Inet_small_toposample.txt", "True", "True"),
   136     ("topology-read/topology-read --format=Rocketfuel --input=../../examples/topology-read/RocketFuel_toposample_1239_weights.txt", "True", "True"),
   137 
   138     ("tunneling/virtual-net-device", "True", "True"),
   139 
   140     ("tutorial/first", "True", "True"),
   141     ("tutorial/hello-simulator", "True", "True"),
   142     ("tutorial/second", "True", "True"),
   143     ("tutorial/third", "True", "True"),
   144     ("tutorial/fourth", "True", "True"),
   145     ("tutorial/fifth", "True", "True"),
   146     ("tutorial/sixth", "True", "True"),
   147 
   148     ("udp/udp-echo", "True", "True"),
   149 
   150     ("wireless/mixed-wireless", "True", "True"),
   151     ("wireless/multirate --totalTime=0.3s --rateManager=ns3::AarfcdWifiManager", "True", "True"), 
   152     ("wireless/multirate --totalTime=0.3s --rateManager=ns3::AmrrWifiManager", "True", "True"), 
   153     ("wireless/multirate --totalTime=0.3s --rateManager=ns3::CaraWifiManager", "True", "True"), 
   154     ("wireless/multirate --totalTime=0.3s --rateManager=ns3::IdealWifiManager", "True", "True"), 
   155     ("wireless/multirate --totalTime=0.3s --rateManager=ns3::MinstrelWifiManager", "True", "True"), 
   156     ("wireless/multirate --totalTime=0.3s --rateManager=ns3::OnoeWifiManager", "True", "True"), 
   157     ("wireless/multirate --totalTime=0.3s --rateManager=ns3::RraaWifiManager", "True", "True"), 
   158     ("wireless/simple-wifi-frame-aggregation", "True", "True"),
   159     ("wireless/wifi-adhoc", "False", "True"), # Takes too long to run
   160     ("wireless/wifi-ap --verbose=0", "True", "True"), # Don't let it spew to stdout
   161     ("wireless/wifi-clear-channel-cmu", "False", "True"), # Requires specific hardware
   162     ("wireless/wifi-simple-adhoc", "True", "True"),
   163     ("wireless/wifi-simple-adhoc-grid", "True", "True"),
   164     ("wireless/wifi-simple-infra", "True", "True"),
   165     ("wireless/wifi-simple-interference", "True", "True"),
   166     ("wireless/wifi-wired-bridging", "True", "True"),
   167 
   168     ("wimax/wimax-simple", "True", "True"),
   169     ("wimax/wimax-ipv4", "True", "True"),
   170     ("wimax/wimax-multicast", "True", "True"),
   171 ]
   172 
   173 #
   174 # A list of python examples to run as smoke tests just to ensure that they 
   175 # runnable over time.  Also a condition under which to run the example (from
   176 # the waf configuration)
   177 #
   178 # XXX Should this not be read from a configuration file somewhere and not
   179 # hardcoded.
   180 #
   181 python_tests = [
   182     ("csma/csma-bridge.py", "True"),
   183 
   184     ("flowmon/wifi-olsr-flowmon.py", "True"),
   185 
   186     ("routing/simple-routing-ping6.py", "True"),
   187 
   188     ("tap/tap-csma-virtual-machine.py", "False"), # requires enable-sudo
   189     ("tap/tap-wifi-virtual-machine.py", "False"), # requires enable-sudo
   190 
   191     ("tutorial/first.py", "True"),
   192 
   193     ("wireless/wifi-ap.py", "True"),
   194     ("wireless/mixed-wireless.py", "True"),
   195 ]
   196 
   197 #
   198 # The test suites are going to want to output status.  They are running
   199 # concurrently.  This means that unless we are careful, the output of
   200 # the test suites will be interleaved.  Rather than introducing a lock
   201 # file that could unintentionally start serializing execution, we ask
   202 # the tests to write their output to a temporary directory and then 
   203 # put together the final output file when we "join" the test tasks back
   204 # to the main thread.  In addition to this issue, the example programs
   205 # often write lots and lots of trace files which we will just ignore.
   206 # We put all of them into the temp directory as well, so they can be
   207 # easily deleted.
   208 #
   209 TMP_OUTPUT_DIR = "testpy-output"
   210 
   211 def get_node_text(node):
   212     for child in node.childNodes:
   213         if child.nodeType == child.TEXT_NODE:
   214             return child.nodeValue
   215     return "None"
   216 
   217 #
   218 # A simple example of writing a text file with a test result summary.  It is 
   219 # expected that this output will be fine for developers looking for problems.
   220 #
   221 def translate_to_text(results_file, text_file):
   222     f = open(text_file, 'w')
   223     dom = xml.dom.minidom.parse(results_file)
   224     for suite in dom.getElementsByTagName("TestSuite"):
   225         result = get_node_text(suite.getElementsByTagName("SuiteResult")[0])
   226         name = get_node_text(suite.getElementsByTagName("SuiteName")[0])
   227         time = get_node_text(suite.getElementsByTagName("SuiteTime")[0])
   228         output = "%s: Test Suite \"%s\" (%s)\n" % (result, name, time)
   229         f.write(output)
   230         if result != "CRASH":
   231             for case in suite.getElementsByTagName("TestCase"):
   232                 result = get_node_text(case.getElementsByTagName("CaseResult")[0])
   233                 name = get_node_text(case.getElementsByTagName("CaseName")[0])
   234                 time = get_node_text(case.getElementsByTagName("CaseTime")[0])
   235                 output =   "  %s: Test Case \"%s\" (%s)\n" % (result, name, time)
   236                 f.write(output)
   237 
   238                 if result == "FAIL":
   239                     for details in case.getElementsByTagName("FailureDetails"):
   240                         f.write("    Details:\n")
   241                         f.write("      Message:   %s\n" % get_node_text(details.getElementsByTagName("Message")[0]))
   242                         f.write("      Condition: %s\n" % get_node_text(details.getElementsByTagName("Condition")[0]))
   243                         f.write("      Actual:    %s\n" % get_node_text(details.getElementsByTagName("Actual")[0]))
   244                         f.write("      Limit:     %s\n" % get_node_text(details.getElementsByTagName("Limit")[0]))
   245                         f.write("      File:      %s\n" % get_node_text(details.getElementsByTagName("File")[0]))
   246                         f.write("      Line:      %s\n" % get_node_text(details.getElementsByTagName("Line")[0]))
   247 
   248     for example in dom.getElementsByTagName("Example"):
   249         result = get_node_text(example.getElementsByTagName("Result")[0])
   250         name = get_node_text(example.getElementsByTagName("Name")[0])
   251         time = get_node_text(example.getElementsByTagName("ElapsedTime")[0])
   252         output = "%s: Example \"%s\" (%s)\n" % (result, name, time)
   253         f.write(output)
   254 
   255     f.close()
   256     
   257 #
   258 # A simple example of writing an HTML file with a test result summary.  It is 
   259 # expected that this will eventually be made prettier as time progresses and
   260 # we have time to tweak it.  This may end up being moved to a separate module
   261 # since it will probably grow over time.
   262 #
   263 def translate_to_html(results_file, html_file):
   264     f = open(html_file, 'w')
   265     f.write("<html>\n")
   266     f.write("<body>\n")
   267     f.write("<center><h1>ns-3 Test Results</h1></center>\n")
   268 
   269     #
   270     # Read and parse the whole results file.
   271     #
   272     dom = xml.dom.minidom.parse(results_file)
   273 
   274     #
   275     # Iterate through the test suites
   276     #
   277     f.write("<h2>Test Suites</h2>\n")
   278     for suite in dom.getElementsByTagName("TestSuite"):
   279      
   280         #
   281         # For each test suite, get its name, result and execution time info
   282         #
   283         name = get_node_text(suite.getElementsByTagName("SuiteName")[0])
   284         result = get_node_text(suite.getElementsByTagName("SuiteResult")[0])
   285         time = get_node_text(suite.getElementsByTagName("SuiteTime")[0])
   286 
   287         # 
   288         # Print a level three header with the result, name and time.  If the 
   289         # test suite passed, the header is printed in green. If the suite was
   290         # skipped, print it in orange, otherwise assume something bad happened
   291         # and print in red.
   292         #
   293         if result == "PASS":
   294             f.write("<h3 style=\"color:green\">%s: %s (%s)</h3>\n" % (result, name, time))
   295         elif result == "SKIP":
   296             f.write("<h3 style=\"color:#ff6600\">%s: %s (%s)</h3>\n" % (result, name, time))
   297         else:
   298             f.write("<h3 style=\"color:red\">%s: %s (%s)</h3>\n" % (result, name, time))
   299 
   300         #
   301         # The test case information goes in a table.
   302         #
   303         f.write("<table border=\"1\">\n")
   304 
   305         #
   306         # The first column of the table has the heading Result
   307         #
   308         f.write("<th> Result </th>\n")
   309 
   310         #
   311         # If the suite crashed or is skipped, there is no further information, so just
   312         # delare a new table row with the result (CRASH or SKIP) in it.  Looks like:
   313         #
   314         #   +--------+
   315         #   | Result |
   316         #   +--------+
   317         #   | CRASH  |
   318         #   +--------+
   319         #
   320         # Then go on to the next test suite.  Valgrind and skipped errors look the same.
   321         #
   322         if result in ["CRASH", "SKIP", "VALGR"]:
   323             f.write("<tr>\n")
   324             if result == "SKIP":
   325                 f.write("<td style=\"color:#ff6600\">%s</td>\n" % result)
   326             else:
   327                 f.write("<td style=\"color:red\">%s</td>\n" % result)
   328             f.write("</tr>\n")
   329             f.write("</table>\n")
   330             continue
   331 
   332         #
   333         # If the suite didn't crash, we expect more information, so fill out
   334         # the table heading row.  Like,
   335         #
   336         #   +--------+----------------+------+
   337         #   | Result | Test Case Name | Time |
   338         #   +--------+----------------+------+
   339         #
   340         f.write("<th>Test Case Name</th>\n")
   341         f.write("<th> Time </th>\n")
   342 
   343         #
   344         # If the test case failed, we need to print out some failure details
   345         # so extend the heading row again.  Like,
   346         #
   347         #   +--------+----------------+------+-----------------+
   348         #   | Result | Test Case Name | Time | Failure Details |
   349         #   +--------+----------------+------+-----------------+
   350         #
   351         if result == "FAIL":
   352             f.write("<th>Failure Details</th>\n")
   353 
   354         #
   355         # Now iterate through all of the test cases.
   356         #
   357         for case in suite.getElementsByTagName("TestCase"):
   358 
   359             #
   360             # Get the name, result and timing information from xml to use in
   361             # printing table below.
   362             #
   363             name = get_node_text(case.getElementsByTagName("CaseName")[0])
   364             result = get_node_text(case.getElementsByTagName("CaseResult")[0])
   365             time = get_node_text(case.getElementsByTagName("CaseTime")[0])
   366 
   367             #
   368             # If the test case failed, we iterate through possibly multiple
   369             # failure details
   370             #
   371             if result == "FAIL":
   372                 #
   373                 # There can be multiple failures for each test case.  The first
   374                 # row always gets the result, name and timing information along
   375                 # with the failure details.  Remaining failures don't duplicate
   376                 # this information but just get blanks for readability.  Like,
   377                 #
   378                 #   +--------+----------------+------+-----------------+
   379                 #   | Result | Test Case Name | Time | Failure Details |
   380                 #   +--------+----------------+------+-----------------+
   381                 #   |  FAIL  | The name       | time | It's busted     |   
   382                 #   +--------+----------------+------+-----------------+
   383                 #   |        |                |      | Really broken   |   
   384                 #   +--------+----------------+------+-----------------+
   385                 #   |        |                |      | Busted bad      |   
   386                 #   +--------+----------------+------+-----------------+
   387                 #
   388 
   389                 first_row = True
   390                 for details in case.getElementsByTagName("FailureDetails"):
   391 
   392                     #
   393                     # Start a new row in the table for each possible Failure Detail
   394                     #
   395                     f.write("<tr>\n")
   396 
   397                     if first_row:
   398                         first_row = False
   399                         f.write("<td style=\"color:red\">%s</td>\n" % result)
   400                         f.write("<td>%s</td>\n" % name)
   401                         f.write("<td>%s</td>\n" % time)
   402                     else:
   403                         f.write("<td></td>\n")
   404                         f.write("<td></td>\n")
   405                         f.write("<td></td>\n")
   406 
   407                     f.write("<td>")
   408                     f.write("<b>Message: </b>%s, " % get_node_text(details.getElementsByTagName("Message")[0]))
   409                     f.write("<b>Condition: </b>%s, " % get_node_text(details.getElementsByTagName("Condition")[0]))
   410                     f.write("<b>Actual: </b>%s, " % get_node_text(details.getElementsByTagName("Actual")[0]))
   411                     f.write("<b>Limit: </b>%s, " % get_node_text(details.getElementsByTagName("Limit")[0]))
   412                     f.write("<b>File: </b>%s, " % get_node_text(details.getElementsByTagName("File")[0]))
   413                     f.write("<b>Line: </b>%s" % get_node_text(details.getElementsByTagName("Line")[0]))
   414                     f.write("</td>\n")
   415                     
   416                     #
   417                     # End the table row
   418                     #
   419                     f.write("</td>\n")
   420             else:
   421                 #
   422                 # If this particular test case passed, then we just print the PASS
   423                 # result in green, followed by the test case name and its execution
   424                 # time information.  These go off in <td> ... </td> table data.
   425                 # The details table entry is left blank.
   426                 #
   427                 #   +--------+----------------+------+---------+
   428                 #   | Result | Test Case Name | Time | Details |
   429                 #   +--------+----------------+------+---------+
   430                 #   |  PASS  | The name       | time |         |   
   431                 #   +--------+----------------+------+---------+
   432                 #
   433                 f.write("<tr>\n")
   434                 f.write("<td style=\"color:green\">%s</td>\n" % result)
   435                 f.write("<td>%s</td>\n" % name)
   436                 f.write("<td>%s</td>\n" % time)
   437                 f.write("<td></td>\n")
   438                 f.write("</tr>\n")
   439         #
   440         # All of the rows are written, so we need to end the table.
   441         #
   442         f.write("</table>\n")
   443 
   444     #
   445     # That's it for all of the test suites.  Now we have to do something about 
   446     # our examples.
   447     #
   448     f.write("<h2>Examples</h2>\n")
   449 
   450     #
   451     # Example status is rendered in a table just like the suites.
   452     #
   453     f.write("<table border=\"1\">\n")
   454 
   455     #
   456     # The table headings look like,
   457     #
   458     #   +--------+--------------+--------------+
   459     #   | Result | Example Name | Elapsed Time |
   460     #   +--------+--------------+--------------+
   461     #
   462     f.write("<th> Result </th>\n")
   463     f.write("<th>Example Name</th>\n")
   464     f.write("<th>Elapsed Time</th>\n")
   465 
   466     #
   467     # Now iterate through all of the examples
   468     #
   469     for example in dom.getElementsByTagName("Example"):
   470         
   471         #
   472         # Start a new row for each example
   473         #
   474         f.write("<tr>\n")
   475         
   476         #
   477         # Get the result and name of the example in question
   478         #
   479         result = get_node_text(example.getElementsByTagName("Result")[0])
   480         name =   get_node_text(example.getElementsByTagName("Name")[0])
   481         time =   get_node_text(example.getElementsByTagName("ElapsedTime")[0])
   482 
   483         #
   484         # If the example either failed or crashed, print its result status
   485         # in red; otherwise green.  This goes in a <td> ... </td> table data
   486         #
   487         if result == "PASS":
   488             f.write("<td style=\"color:green\">%s</td>\n" % result)
   489         elif result == "SKIP":
   490             f.write("<td style=\"color:#ff6600\">%s</fd>\n" % result)
   491         else:
   492             f.write("<td style=\"color:red\">%s</td>\n" % result)
   493 
   494         #
   495         # Write the example name as a new tag data.
   496         #
   497         f.write("<td>%s</td>\n" % name)
   498 
   499         #
   500         # Write the elapsed time as a new tag data.
   501         #
   502         f.write("<td>%s</td>\n" % time)
   503 
   504         #
   505         # That's it for the current example, so terminate the row.
   506         #
   507         f.write("</tr>\n")
   508 
   509     #
   510     # That's it for the table of examples, so terminate the table.
   511     #
   512     f.write("</table>\n")
   513 
   514     #
   515     # And that's it for the report, so finish up.
   516     #
   517     f.write("</body>\n")
   518     f.write("</html>\n")
   519     f.close()
   520     
   521 #
   522 # Python Control-C handling is broken in the presence of multiple threads.  
   523 # Signals get delivered to the runnable/running thread by default and if 
   524 # it is blocked, the signal is simply ignored.  So we hook sigint and set 
   525 # a global variable telling the system to shut down gracefully.
   526 #
   527 thread_exit = False
   528 
   529 def sigint_hook(signal, frame):
   530     global thread_exit
   531     thread_exit = True
   532     return 0
   533 
   534 #
   535 # Waf can be configured to compile in debug or optimized modes.  In each
   536 # case, the resulting built goes into a different directory.  If we want
   537 # test tests to run from the correct code-base, we have to figure out which
   538 # mode waf is running in.  This is called its active variant.
   539 #
   540 # XXX This function pokes around in the waf internal state file.  To be a
   541 # little less hacky, we should add a commmand to waf to return this info
   542 # and use that result.
   543 #
   544 def read_waf_active_variant():
   545     for line in open("build/c4che/default.cache.py").readlines():
   546         if line.startswith("NS3_ACTIVE_VARIANT"):
   547             exec(line, globals())
   548             break
   549 
   550     if options.verbose:
   551         print "NS3_ACTIVE_VARIANT == %s" % NS3_ACTIVE_VARIANT
   552 
   553 #
   554 # In general, the build process itself naturally takes care of figuring out
   555 # which tests are built into the test runner.  For example, if waf configure
   556 # determines that ENABLE_EMU is false due to some missing dependency,
   557 # the tests for the emu net device simply will not be built and will 
   558 # therefore not be included in the built test runner.
   559 #
   560 # Examples, however, are a different story.  In that case, we are just given
   561 # a list of examples that could be run.  Instead of just failing, for example,
   562 # nsc-tcp-zoo if NSC is not present, we look into the waf saved configuration
   563 # for relevant configuration items.  
   564 #
   565 # XXX This function pokes around in the waf internal state file.  To be a
   566 # little less hacky, we should add a commmand to waf to return this info
   567 # and use that result.
   568 #
   569 def read_waf_config():
   570     for line in open("build/c4che/%s.cache.py" % NS3_ACTIVE_VARIANT).readlines():
   571         for item in interesting_config_items:
   572             if line.startswith(item):
   573                 exec(line, globals())
   574 
   575     if options.verbose:
   576         for item in interesting_config_items:
   577             print "%s ==" % item, eval(item)
   578 
   579 #
   580 # It seems pointless to fork a process to run waf to fork a process to run
   581 # the test runner, so we just run the test runner directly.  The main thing 
   582 # that waf would do for us would be to sort out the shared library path but
   583 # we can deal with that easily and do here.
   584 #
   585 # There can be many different ns-3 repositories on a system, and each has 
   586 # its own shared libraries, so ns-3 doesn't hardcode a shared library search
   587 # path -- it is cooked up dynamically, so we do that too.
   588 #
   589 def make_paths():
   590     have_DYLD_LIBRARY_PATH = False
   591     have_LD_LIBRARY_PATH = False
   592     have_PATH = False
   593     have_PYTHONPATH = False
   594 
   595     keys = os.environ.keys()
   596     for key in keys:
   597         if key == "DYLD_LIBRARY_PATH":
   598             have_DYLD_LIBRARY_PATH = True
   599         if key == "LD_LIBRARY_PATH":
   600             have_LD_LIBRARY_PATH = True
   601         if key == "PATH":
   602             have_PATH = True
   603         if key == "PYTHONPATH":
   604             have_PYTHONPATH = True
   605 
   606     pypath = os.environ["PYTHONPATH"] = os.path.join (NS3_BUILDDIR, NS3_ACTIVE_VARIANT, "bindings", "python")
   607 
   608     if not have_PYTHONPATH:
   609         os.environ["PYTHONPATH"] = pypath
   610     else:
   611         os.environ["PYTHONPATH"] += ":" + pypath
   612 
   613     if options.verbose:
   614         print "os.environ[\"PYTHONPATH\"] == %s" % os.environ["PYTHONPATH"]
   615 
   616     if sys.platform == "darwin":
   617         if not have_DYLD_LIBRARY_PATH:
   618             os.environ["DYLD_LIBRARY_PATH"] = ""
   619         for path in NS3_MODULE_PATH:
   620             os.environ["DYLD_LIBRARY_PATH"] += ":" + path
   621         if options.verbose:
   622             print "os.environ[\"DYLD_LIBRARY_PATH\"] == %s" % os.environ["DYLD_LIBRARY_PATH"]
   623     elif sys.platform == "win32":
   624         if not have_PATH:
   625             os.environ["PATH"] = ""
   626         for path in NS3_MODULE_PATH:
   627             os.environ["PATH"] += ';' + path
   628         if options.verbose:
   629             print "os.environ[\"PATH\"] == %s" % os.environ["PATH"]
   630     elif sys.platform == "cygwin":
   631         if not have_PATH:
   632             os.environ["PATH"] = ""
   633         for path in NS3_MODULE_PATH:
   634             os.environ["PATH"] += ":" + path
   635         if options.verbose:
   636             print "os.environ[\"PATH\"] == %s" % os.environ["PATH"]
   637     else:
   638         if not have_LD_LIBRARY_PATH:
   639             os.environ["LD_LIBRARY_PATH"] = ""
   640         for path in NS3_MODULE_PATH:
   641             os.environ["LD_LIBRARY_PATH"] += ":" + path
   642         if options.verbose:
   643             print "os.environ[\"LD_LIBRARY_PATH\"] == %s" % os.environ["LD_LIBRARY_PATH"]
   644 
   645 #
   646 # Short note on generating suppressions:
   647 #
   648 # See the valgrind documentation for a description of suppressions.  The easiest
   649 # way to generate a suppression expression is by using the valgrind 
   650 # --gen-suppressions option.  To do that you have to figure out how to run the 
   651 # test in question.
   652 #
   653 # If you do "test.py -v -g -s <suitename> then test.py will output most of what
   654 # you need.  For example, if you are getting a valgrind error in the
   655 # devices-mesh-dot11s-regression test suite, you can run:
   656 #
   657 #   ./test.py -v -g -s devices-mesh-dot11s-regression 
   658 #
   659 # You should see in the verbose output something that looks like:
   660 #
   661 #   Synchronously execute valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
   662 #   --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner 
   663 #   --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev 
   664 #   --tempdir=testpy-output/2010-01-12-22-47-50-CUT 
   665 #   --out=testpy-output/2010-01-12-22-47-50-CUT/devices-mesh-dot11s-regression.xml
   666 #
   667 # You need to pull out the useful pieces, and so could run the following to 
   668 # reproduce your error:
   669 #
   670 #   valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
   671 #   --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner 
   672 #   --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev 
   673 #   --tempdir=testpy-output 
   674 #
   675 # Hint: Use the first part of the command as is, and point the "tempdir" to 
   676 # somewhere real.  You don't need to specify an "out" file.
   677 #
   678 # When you run the above command you should see your valgrind error.  The 
   679 # suppression expression(s) can be generated by adding the --gen-suppressions=yes
   680 # option to valgrind.  Use something like:
   681 #
   682 #   valgrind --gen-suppressions=yes --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
   683 #   --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner 
   684 #   --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev 
   685 #   --tempdir=testpy-output 
   686 #
   687 # Now when valgrind detects an error it will ask:
   688 #
   689 #   ==27235== ---- Print suppression ? --- [Return/N/n/Y/y/C/c] ----
   690 #
   691 # to which you just enter 'y'<ret>.
   692 #
   693 # You will be provided with a suppression expression that looks something like
   694 # the following:
   695 #   {
   696 #     <insert_a_suppression_name_here>
   697 #     Memcheck:Addr8
   698 #     fun:_ZN3ns36dot11s15HwmpProtocolMac8SendPreqESt6vectorINS0_6IePreqESaIS3_EE
   699 #     fun:_ZN3ns36dot11s15HwmpProtocolMac10SendMyPreqEv
   700 #     fun:_ZN3ns36dot11s15HwmpProtocolMac18RequestDestinationENS_12Mac48AddressEjj
   701 #     ...
   702 #     the rest of the stack frame
   703 #     ...
   704 #   }
   705 #
   706 # You need to add a supression name which will only be printed out by valgrind in 
   707 # verbose mode (but it needs to be there in any case).  The entire stack frame is
   708 # shown to completely characterize the error, but in most cases you won't need 
   709 # all of that info.  For example, if you want to turn off all errors that happen
   710 # when the function (fun:) is called, you can just delete the rest of the stack
   711 # frame.  You can also use wildcards to make the mangled signatures more readable.
   712 #
   713 # I added the following to the testpy.supp file for this particular error:
   714 #
   715 #   {
   716 #     Supress invalid read size errors in SendPreq() when using HwmpProtocolMac
   717 #     Memcheck:Addr8
   718 #     fun:*HwmpProtocolMac*SendPreq*
   719 #   }
   720 #
   721 # Now, when you run valgrind the error will be suppressed.
   722 #
   723 VALGRIND_SUPPRESSIONS_FILE = "testpy.supp"
   724 
   725 def run_job_synchronously(shell_command, directory, valgrind, is_python):
   726     (base, build) = os.path.split (NS3_BUILDDIR)
   727     suppressions_path = os.path.join (base, VALGRIND_SUPPRESSIONS_FILE)
   728 
   729     if is_python:
   730         path_cmd = "python " + os.path.join (base, shell_command)
   731     else:
   732         path_cmd = os.path.join (NS3_BUILDDIR, NS3_ACTIVE_VARIANT, shell_command)
   733 
   734     if valgrind:
   735         cmd = "valgrind --suppressions=%s --leak-check=full --show-reachable=yes --error-exitcode=2 %s" % (suppressions_path, 
   736             path_cmd)
   737     else:
   738         cmd = path_cmd
   739 
   740     if options.verbose:
   741         print "Synchronously execute %s" % cmd
   742 
   743     start_time = time.time()
   744     proc = subprocess.Popen(cmd, shell = True, cwd = directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
   745     stdout_results, stderr_results = proc.communicate()
   746     elapsed_time = time.time() - start_time
   747 
   748     retval = proc.returncode
   749 
   750     #
   751     # valgrind sometimes has its own idea about what kind of memory management
   752     # errors are important.  We want to detect *any* leaks, so the way to do 
   753     # that is to look for the presence of a valgrind leak summary section.
   754     #
   755     # If another error has occurred (like a test suite has failed), we don't 
   756     # want to trump that error, so only do the valgrind output scan if the 
   757     # test has otherwise passed (return code was zero).
   758     #
   759     if valgrind and retval == 0 and "== LEAK SUMMARY:" in stderr_results:
   760         retval = 2
   761     
   762     if options.verbose:
   763         print "Return code = ", retval
   764         print "stderr = ", stderr_results
   765 
   766     return (retval, stdout_results, stderr_results, elapsed_time)
   767 
   768 #
   769 # This class defines a unit of testing work.  It will typically refer to
   770 # a test suite to run using the test-runner, or an example to run directly.
   771 #
   772 class Job:
   773     def __init__(self):
   774         self.is_break = False
   775         self.is_skip = False
   776         self.is_example = False
   777         self.is_pyexample = False
   778         self.shell_command = ""
   779         self.display_name = ""
   780         self.basedir = ""
   781         self.tempdir = ""
   782         self.cwd = ""
   783         self.tmp_file_name = ""
   784         self.returncode = False
   785         self.elapsed_time = 0
   786 
   787     #
   788     # A job is either a standard job or a special job indicating that a worker
   789     # thread should exist.  This special job is indicated by setting is_break 
   790     # to true.
   791     #
   792     def set_is_break(self, is_break):
   793         self.is_break = is_break
   794 
   795     #
   796     # If a job is to be skipped, we actually run it through the worker threads
   797     # to keep the PASS, FAIL, CRASH and SKIP processing all in one place.
   798     #
   799     def set_is_skip(self, is_skip):
   800         self.is_skip = is_skip
   801 
   802     #
   803     # Examples are treated differently than standard test suites.  This is
   804     # mostly because they are completely unaware that they are being run as 
   805     # tests.  So we have to do some special case processing to make them look
   806     # like tests.
   807     #
   808     def set_is_example(self, is_example):
   809         self.is_example = is_example
   810 
   811     #
   812     # Examples are treated differently than standard test suites.  This is
   813     # mostly because they are completely unaware that they are being run as 
   814     # tests.  So we have to do some special case processing to make them look
   815     # like tests.
   816     #
   817     def set_is_pyexample(self, is_pyexample):
   818         self.is_pyexample = is_pyexample
   819 
   820     #
   821     # This is the shell command that will be executed in the job.  For example,
   822     #
   823     #  "utils/test-runner --suite=some-test-suite"
   824     #
   825     def set_shell_command(self, shell_command):
   826         self.shell_command = shell_command
   827 
   828     #
   829     # This is the dispaly name of the job, typically the test suite or example 
   830     # name.  For example,
   831     #
   832     #  "some-test-suite" or "udp-echo"
   833     #
   834     def set_display_name(self, display_name):
   835         self.display_name = display_name
   836 
   837     #
   838     # This is the base directory of the repository out of which the tests are
   839     # being run.  It will be used deep down in the testing framework to determine
   840     # where the source directory of the test was, and therefore where to find 
   841     # provided test vectors.  For example,
   842     #
   843     #  "/home/user/repos/ns-3-dev"
   844     #
   845     def set_basedir(self, basedir):
   846         self.basedir = basedir
   847 
   848     #
   849     # This is the directory to which a running test suite should write any 
   850     # temporary files.
   851     #
   852     def set_tempdir(self, tempdir):
   853         self.tempdir = tempdir
   854 
   855     #
   856     # This is the current working directory that will be given to an executing
   857     # test as it is being run.  It will be used for examples to tell them where
   858     # to write all of the pcap files that we will be carefully ignoring.  For
   859     # example,
   860     #
   861     #  "/tmp/unchecked-traces"
   862     #
   863     def set_cwd(self, cwd):
   864         self.cwd = cwd
   865 
   866     #
   867     # This is the temporary results file name that will be given to an executing 
   868     # test as it is being run.  We will be running all of our tests in parallel
   869     # so there must be multiple temporary output files.  These will be collected
   870     # into a single XML file at the end and then be deleted.  
   871     #
   872     def set_tmp_file_name(self, tmp_file_name):
   873         self.tmp_file_name = tmp_file_name
   874 
   875     #
   876     # The return code received when the job process is executed.
   877     #
   878     def set_returncode(self, returncode):
   879         self.returncode = returncode
   880 
   881     #
   882     # The elapsed real time for the job execution.
   883     #
   884     def set_elapsed_time(self, elapsed_time):
   885         self.elapsed_time = elapsed_time
   886 
   887 #
   888 # The worker thread class that handles the actual running of a given test.
   889 # Once spawned, it receives requests for work through its input_queue and
   890 # ships the results back through the output_queue.
   891 #
   892 class worker_thread(threading.Thread):
   893     def __init__(self, input_queue, output_queue):
   894         threading.Thread.__init__(self)
   895         self.input_queue = input_queue
   896         self.output_queue = output_queue
   897 
   898     def run(self):
   899         while True:
   900             job = self.input_queue.get()
   901             #
   902             # Worker threads continue running until explicitly told to stop with
   903             # a special job.
   904             #
   905             if job.is_break:
   906                 return
   907             #
   908             # If the global interrupt handler sets the thread_exit variable,
   909             # we stop doing real work and just report back a "break" in the
   910             # normal command processing has happened.
   911             #
   912             if thread_exit == True:
   913                 job.set_is_break(True)
   914                 self.output_queue.put(job)
   915                 continue
   916 
   917             #
   918             # If we are actually supposed to skip this job, do so.  Note that
   919             # if is_skip is true, returncode is undefined.
   920             #
   921             if job.is_skip:
   922                 if options.verbose:
   923                     print "Skip %s" % job.shell_command
   924                 self.output_queue.put(job)
   925                 continue
   926 
   927             #
   928             # Otherwise go about the business of running tests as normal.
   929             #
   930             else:
   931                 if options.verbose:
   932                     print "Launch %s" % job.shell_command
   933 
   934                 if job.is_example or job.is_pyexample:
   935                     #
   936                     # If we have an example, the shell command is all we need to
   937                     # know.  It will be something like "examples/udp-echo" or 
   938                     # "examples/mixed-wireless.py"
   939                     #
   940                     (job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command, 
   941                         job.cwd, options.valgrind, job.is_pyexample)
   942                 else:
   943                     #
   944                     # If we're a test suite, we need to provide a little more info
   945                     # to the test runner, specifically the base directory and temp
   946                     # file name
   947                     #
   948                     (job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command + 
   949                         " --basedir=%s --tempdir=%s --out=%s" % (job.basedir, job.tempdir, job.tmp_file_name), 
   950                         job.cwd, options.valgrind, False)
   951 
   952                 job.set_elapsed_time(et)
   953 
   954                 if options.verbose:
   955                     print "returncode = %d" % job.returncode
   956                     print "---------- begin standard out ----------"
   957                     print standard_out
   958                     print "---------- begin standard err ----------"
   959                     print standard_err
   960                     print "---------- end standard err ----------"
   961 
   962                 self.output_queue.put(job)
   963 
   964 #
   965 # This is the main function that does the work of interacting with the test-runner
   966 # itself.
   967 #
   968 def run_tests():
   969     #
   970     # Run waf to make sure that everything is built, configured and ready to go
   971     # unless we are explicitly told not to.  We want to be careful about causing
   972     # our users pain while waiting for extraneous stuff to compile and link, so
   973     # we allow users that know what they''re doing to not invoke waf at all.
   974     #
   975     if not options.nowaf:
   976 
   977         #
   978         # If the user is running the "kinds" or "list" options, there is an 
   979         # implied dependency on the test-runner since we call that program
   980         # if those options are selected.  We will exit after processing those
   981         # options, so if we see them, we can safely only build the test-runner.
   982         #
   983         # If the user has constrained us to running only a particular type of
   984         # file, we can only ask waf to build what we know will be necessary.
   985         # For example, if the user only wants to run BVT tests, we only have
   986         # to build the test-runner and can ignore all of the examples.
   987         #
   988         # If the user only wants to run a single example, then we can just build
   989         # that example.
   990         #
   991         # If there is no constraint, then we have to build everything since the
   992         # user wants to run everything.
   993         #
   994         if options.kinds or options.list or (len(options.constrain) and options.constrain in core_kinds):
   995             if sys.platform == "win32":
   996                 waf_cmd = "waf --target=test-runner"
   997             else:
   998                 waf_cmd = "./waf --target=test-runner"
   999         elif len(options.example):
  1000             if sys.platform == "win32":
  1001                 waf_cmd = "waf --target=%s" % os.path.basename(options.example)
  1002             else:
  1003                 waf_cmd = "./waf --target=%s" % os.path.basename(options.example)
  1004 
  1005         else:
  1006             if sys.platform == "win32":
  1007                 waf_cmd = "waf"
  1008             else:
  1009                 waf_cmd = "./waf"
  1010 
  1011         if options.verbose:
  1012             print "Building: %s" % waf_cmd
  1013 
  1014         proc = subprocess.Popen(waf_cmd, shell = True)
  1015         proc.communicate()
  1016         if proc.returncode:
  1017             print >> sys.stderr, "Waf died. Not running tests"
  1018             return proc.returncode
  1019 
  1020     #
  1021     # Pull some interesting configuration information out of waf, primarily
  1022     # so we can know where executables can be found, but also to tell us what
  1023     # pieces of the system have been built.  This will tell us what examples 
  1024     # are runnable.
  1025     #
  1026     read_waf_active_variant()
  1027     read_waf_config()
  1028     make_paths()
  1029 
  1030     #
  1031     # If lots of logging is enabled, we can crash Python when it tries to 
  1032     # save all of the text.  We just don't allow logging to be turned on when
  1033     # test.py runs.  If you want to see logging output from your tests, you
  1034     # have to run them using the test-runner directly.
  1035     #
  1036     os.environ["NS_LOG"] = ""
  1037 
  1038     #
  1039     # There are a couple of options that imply we can to exit before starting
  1040     # up a bunch of threads and running tests.  Let's detect these cases and 
  1041     # handle them without doing all of the hard work.
  1042     #
  1043     if options.kinds:
  1044         path_cmd = os.path.join("utils", "test-runner --kinds")
  1045         (rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
  1046         print standard_out
  1047 
  1048     if options.list:
  1049         path_cmd = os.path.join("utils", "test-runner --list")
  1050         (rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
  1051         print standard_out
  1052 
  1053     if options.kinds or options.list:
  1054         return
  1055 
  1056     #
  1057     # We communicate results in two ways.  First, a simple message relating 
  1058     # PASS, FAIL, CRASH or SKIP is always written to the standard output.  It 
  1059     # is expected that this will be one of the main use cases.  A developer can
  1060     # just run test.py with no options and see that all of the tests still 
  1061     # pass.
  1062     #
  1063     # The second main use case is when detailed status is requested (with the
  1064     # --text or --html options).  Typicall this will be text if a developer
  1065     # finds a problem, or HTML for nightly builds.  In these cases, an
  1066     # XML file is written containing the status messages from the test suites.
  1067     # This file is then read and translated into text or HTML.  It is expected
  1068     # that nobody will really be interested in the XML, so we write it somewhere
  1069     # with a unique name (time) to avoid collisions.  In case an error happens, we
  1070     # provide a runtime option to retain the temporary files.
  1071     #
  1072     # When we run examples as smoke tests, they are going to want to create
  1073     # lots and lots of trace files.  We aren't really interested in the contents
  1074     # of the trace files, so we also just stash them off in the temporary dir.
  1075     # The retain option also causes these unchecked trace files to be kept.
  1076     #
  1077     date_and_time = time.strftime("%Y-%m-%d-%H-%M-%S-CUT", time.gmtime())
  1078 
  1079     if not os.path.exists(TMP_OUTPUT_DIR):
  1080         os.makedirs(TMP_OUTPUT_DIR)
  1081 
  1082     testpy_output_dir = os.path.join(TMP_OUTPUT_DIR, date_and_time);
  1083 
  1084     if not os.path.exists(testpy_output_dir):
  1085         os.makedirs(testpy_output_dir)
  1086 
  1087     #
  1088     # Create the main output file and start filling it with XML.  We need to 
  1089     # do this since the tests will just append individual results to this file.
  1090     #
  1091     xml_results_file = os.path.join(testpy_output_dir, "results.xml")
  1092     f = open(xml_results_file, 'w')
  1093     f.write('<?xml version="1.0"?>\n')
  1094     f.write('<TestResults>\n')
  1095     f.close()
  1096 
  1097     #
  1098     # We need to figure out what test suites to execute.  We are either given one 
  1099     # suite or example explicitly via the --suite or --example/--pyexample option,
  1100     # or we need to call into the test runner and ask it to list all of the available
  1101     # test suites.  Further, we need to provide the constraint information if it
  1102     # has been given to us.
  1103     # 
  1104     # This translates into allowing the following options with respect to the 
  1105     # suites
  1106     #
  1107     #  ./test,py:                                           run all of the suites and examples
  1108     #  ./test.py --constrain=core:                          run all of the suites of all kinds
  1109     #  ./test.py --constrain=unit:                          run all unit suites
  1110     #  ./test,py --suite=some-test-suite:                   run a single suite
  1111     #  ./test,py --example=udp/udp-echo:                    run no test suites
  1112     #  ./test,py --pyexample=wireless/mixed-wireless.py:    run no test suites
  1113     #  ./test,py --suite=some-suite --example=some-example: run the single suite
  1114     #
  1115     # We can also use the --constrain option to provide an ordering of test 
  1116     # execution quite easily.
  1117     #
  1118     if len(options.suite):
  1119         suites = options.suite + "\n"
  1120     elif len(options.example) == 0 and len(options.pyexample) == 0:
  1121         if len(options.constrain):
  1122             path_cmd = os.path.join("utils", "test-runner --list --constrain=%s" % options.constrain)
  1123             (rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
  1124         else:
  1125             path_cmd = os.path.join("utils", "test-runner --list")
  1126             (rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
  1127     else:
  1128         suites = ""
  1129 
  1130     #
  1131     # suite_list will either a single test suite name that the user has 
  1132     # indicated she wants to run or a list of test suites provided by
  1133     # the test-runner possibly according to user provided constraints.
  1134     # We go through the trouble of setting up the parallel execution 
  1135     # even in the case of a single suite to avoid having two process the
  1136     # results in two different places.
  1137     #
  1138     suite_list = suites.split('\n')
  1139 
  1140     #
  1141     # We now have a possibly large number of test suites to run, so we want to
  1142     # run them in parallel.  We're going to spin up a number of worker threads
  1143     # that will run our test jobs for us.
  1144     #
  1145     input_queue = Queue.Queue(0)
  1146     output_queue = Queue.Queue(0)
  1147 
  1148     jobs = 0
  1149     threads=[]
  1150 
  1151     #
  1152     # In Python 2.6 you can just use multiprocessing module, but we don't want
  1153     # to introduce that dependency yet; so we jump through a few hoops.
  1154     #
  1155     processors = 1
  1156 
  1157     if sys.platform != "win32":
  1158         if 'SC_NPROCESSORS_ONLN'in os.sysconf_names:
  1159             processors = os.sysconf('SC_NPROCESSORS_ONLN')
  1160         else:
  1161             proc = subprocess.Popen("sysctl -n hw.ncpu", shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
  1162             stdout_results, stderr_results = proc.communicate()
  1163             if len(stderr_results) == 0:
  1164                 processors = int(stdout_results)
  1165 
  1166     #
  1167     # Now, spin up one thread per processor which will eventually mean one test
  1168     # per processor running concurrently.
  1169     #
  1170     for i in range(processors):
  1171         thread = worker_thread(input_queue, output_queue)
  1172         threads.append(thread)
  1173         thread.start()
  1174 
  1175     #
  1176     # Keep track of some summary statistics
  1177     #
  1178     total_tests = 0
  1179     skipped_tests = 0
  1180 
  1181     #
  1182     # We now have worker threads spun up, and a list of work to do.  So, run 
  1183     # through the list of test suites and dispatch a job to run each one.
  1184     # 
  1185     # Dispatching will run with unlimited speed and the worker threads will 
  1186     # execute as fast as possible from the queue.
  1187     #
  1188     # Note that we actually dispatch tests to be skipped, so all of the 
  1189     # PASS, FAIL, CRASH and SKIP processing is done in the same place.
  1190     #
  1191     for test in suite_list:
  1192         test = test.strip()
  1193         if len(test):
  1194             job = Job()
  1195             job.set_is_example(False)
  1196             job.set_is_pyexample(False)
  1197             job.set_display_name(test)
  1198             job.set_tmp_file_name(os.path.join(testpy_output_dir, "%s.xml" % test))
  1199             job.set_cwd(os.getcwd())
  1200             job.set_basedir(os.getcwd())
  1201             job.set_tempdir(testpy_output_dir)
  1202             if (options.multiple):
  1203                 multiple = " --multiple"
  1204             else:
  1205                 multiple = ""
  1206 
  1207             path_cmd = os.path.join("utils", "test-runner --suite=%s%s" % (test, multiple))
  1208             job.set_shell_command(path_cmd)
  1209 
  1210             if options.valgrind and test in core_valgrind_skip_tests:
  1211                 job.set_is_skip(True)
  1212 
  1213             if options.verbose:
  1214                 print "Queue %s" % test
  1215 
  1216             input_queue.put(job)
  1217             jobs = jobs + 1
  1218             total_tests = total_tests + 1
  1219     
  1220     #
  1221     # We've taken care of the discovered or specified test suites.  Now we
  1222     # have to deal with examples run as smoke tests.  We have a list of all of
  1223     # the example programs it makes sense to try and run.  Each example will
  1224     # have a condition associated with it that must evaluate to true for us
  1225     # to try and execute it.  This is used to determine if the example has
  1226     # a dependency that is not satisfied.  For example, if an example depends
  1227     # on NSC being configured by waf, that example should have a condition
  1228     # that evaluates to true if NSC is enabled.  For example,
  1229     #
  1230     #      ("tcp-nsc-zoo", "ENABLE_NSC == True"),
  1231     #
  1232     # In this case, the example "tcp-nsc-zoo" will only be run if we find the
  1233     # waf configuration variable "ENABLE_NSC" to be True.
  1234     #
  1235     # We don't care at all how the trace files come out, so we just write them 
  1236     # to a single temporary directory.
  1237     #
  1238     # XXX As it stands, all of the trace files have unique names, and so file
  1239     # collisions can only happen if two instances of an example are running in
  1240     # two versions of the test.py process concurrently.  We may want to create
  1241     # uniquely named temporary traces directories to avoid this problem.
  1242     #
  1243     # We need to figure out what examples to execute.  We are either given one 
  1244     # suite or example explicitly via the --suite or --example option, or we
  1245     # need to walk the list of examples looking for available example 
  1246     # conditions.
  1247     #
  1248     # This translates into allowing the following options with respect to the 
  1249     # suites
  1250     #
  1251     #  ./test,py:                                           run all of the examples
  1252     #  ./test.py --constrain=unit                           run no examples
  1253     #  ./test.py --constrain=example                        run all of the examples
  1254     #  ./test.py --suite=some-test-suite:                   run no examples
  1255     #  ./test.py --example=some-example:                    run the single example
  1256     #  ./test.py --suite=some-suite --example=some-example: run the single example
  1257     #
  1258     # XXX could use constrain to separate out examples used for performance 
  1259     # testing
  1260     #
  1261     if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
  1262         if len(options.constrain) == 0 or options.constrain == "example":
  1263             if ENABLE_EXAMPLES:
  1264                 for test, do_run, do_valgrind_run in example_tests:
  1265                     if eval(do_run):
  1266                         job = Job()
  1267                         job.set_is_example(True)
  1268                         job.set_is_pyexample(False)
  1269                         job.set_display_name(test)
  1270                         job.set_tmp_file_name("")
  1271                         job.set_cwd(testpy_output_dir)
  1272                         job.set_basedir(os.getcwd())
  1273                         job.set_tempdir(testpy_output_dir)
  1274                         job.set_shell_command("examples/%s" % test)
  1275 
  1276                         if options.valgrind and not eval(do_valgrind_run):
  1277                             job.set_is_skip (True)
  1278 
  1279                         if options.verbose:
  1280                             print "Queue %s" % test
  1281 
  1282                         input_queue.put(job)
  1283                         jobs = jobs + 1
  1284                         total_tests = total_tests + 1
  1285 
  1286     elif len(options.example):
  1287         #
  1288         # If you tell me to run an example, I will try and run the example
  1289         # irrespective of any condition.
  1290         #
  1291         job = Job()
  1292         job.set_is_example(True)
  1293         job.set_is_pyexample(False)
  1294         job.set_display_name(options.example)
  1295         job.set_tmp_file_name("")
  1296         job.set_cwd(testpy_output_dir)
  1297         job.set_basedir(os.getcwd())
  1298         job.set_tempdir(testpy_output_dir)
  1299         job.set_shell_command("examples/%s" % options.example)
  1300         
  1301         if options.verbose:
  1302             print "Queue %s" % options.example
  1303 
  1304         input_queue.put(job)
  1305         jobs = jobs + 1
  1306         total_tests = total_tests + 1
  1307 
  1308     #
  1309     # Run some Python examples as smoke tests.  We have a list of all of
  1310     # the example programs it makes sense to try and run.  Each example will
  1311     # have a condition associated with it that must evaluate to true for us
  1312     # to try and execute it.  This is used to determine if the example has
  1313     # a dependency that is not satisfied.
  1314     #
  1315     # We don't care at all how the trace files come out, so we just write them 
  1316     # to a single temporary directory.
  1317     #
  1318     # We need to figure out what python examples to execute.  We are either 
  1319     # given one pyexample explicitly via the --pyexample option, or we
  1320     # need to walk the list of python examples
  1321     #
  1322     # This translates into allowing the following options with respect to the 
  1323     # suites
  1324     #
  1325     #  ./test.py --constrain=pyexample           run all of the python examples
  1326     #  ./test.py --pyexample=some-example.py:    run the single python example
  1327     #
  1328     if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
  1329         if len(options.constrain) == 0 or options.constrain == "pyexample":
  1330             if ENABLE_EXAMPLES:
  1331                 for test, do_run in python_tests:
  1332                     if eval(do_run):
  1333                         job = Job()
  1334                         job.set_is_example(False)
  1335                         job.set_is_pyexample(True)
  1336                         job.set_display_name(test)
  1337                         job.set_tmp_file_name("")
  1338                         job.set_cwd(testpy_output_dir)
  1339                         job.set_basedir(os.getcwd())
  1340                         job.set_tempdir(testpy_output_dir)
  1341                         job.set_shell_command("examples/%s" % test)
  1342 
  1343                         #
  1344                         # Python programs and valgrind do not work and play
  1345                         # well together, so we skip them under valgrind.
  1346                         # We go through the trouble of doing all of this
  1347                         # work to report the skipped tests in a consistent
  1348                         # way throught the output formatter.
  1349                         #
  1350                         if options.valgrind:
  1351                             job.set_is_skip (True)
  1352 
  1353                         #
  1354                         # The user can disable python bindings, so we need
  1355                         # to pay attention to that and give some feedback
  1356                         # that we're not testing them
  1357                         #
  1358                         if not ENABLE_PYTHON_BINDINGS:
  1359                             job.set_is_skip (True)
  1360 
  1361                         if options.verbose:
  1362                             print "Queue %s" % test
  1363 
  1364                         input_queue.put(job)
  1365                         jobs = jobs + 1
  1366                         total_tests = total_tests + 1
  1367 
  1368     elif len(options.pyexample):
  1369         #
  1370         # If you tell me to run a python example, I will try and run the example
  1371         # irrespective of any condition.
  1372         #
  1373         job = Job()
  1374         job.set_is_pyexample(True)
  1375         job.set_display_name(options.pyexample)
  1376         job.set_tmp_file_name("")
  1377         job.set_cwd(testpy_output_dir)
  1378         job.set_basedir(os.getcwd())
  1379         job.set_tempdir(testpy_output_dir)
  1380         job.set_shell_command("examples/%s" % options.pyexample)
  1381         
  1382         if options.verbose:
  1383             print "Queue %s" % options.pyexample
  1384 
  1385         input_queue.put(job)
  1386         jobs = jobs + 1
  1387         total_tests = total_tests + 1
  1388 
  1389     #
  1390     # Tell the worker threads to pack up and go home for the day.  Each one
  1391     # will exit when they see their is_break task.
  1392     #
  1393     for i in range(processors):
  1394         job = Job()
  1395         job.set_is_break(True)
  1396         input_queue.put(job)
  1397 
  1398     #
  1399     # Now all of the tests have been dispatched, so all we have to do here
  1400     # in the main thread is to wait for them to complete.  Keyboard interrupt
  1401     # handling is broken as mentioned above.  We use a signal handler to catch
  1402     # sigint and set a global variable.  When the worker threads sense this
  1403     # they stop doing real work and will just start throwing jobs back at us
  1404     # with is_break set to True.  In this case, there are no real results so we 
  1405     # ignore them.  If there are real results, we always print PASS or FAIL to
  1406     # standard out as a quick indication of what happened.
  1407     #
  1408     passed_tests = 0
  1409     failed_tests = 0
  1410     crashed_tests = 0
  1411     valgrind_errors = 0
  1412     for i in range(jobs):
  1413         job = output_queue.get()
  1414         if job.is_break:
  1415             continue
  1416 
  1417         if job.is_example or job.is_pyexample:
  1418             kind = "Example"
  1419         else:
  1420             kind = "TestSuite"
  1421 
  1422         if job.is_skip:
  1423             status = "SKIP"
  1424             skipped_tests = skipped_tests + 1
  1425         else:
  1426             if job.returncode == 0:
  1427                 status = "PASS"
  1428                 passed_tests = passed_tests + 1
  1429             elif job.returncode == 1:
  1430                 failed_tests = failed_tests + 1
  1431                 status = "FAIL"
  1432             elif job.returncode == 2:
  1433                 valgrind_errors = valgrind_errors + 1
  1434                 status = "VALGR"
  1435             else:
  1436                 crashed_tests = crashed_tests + 1
  1437                 status = "CRASH"
  1438 
  1439         print "%s: %s %s" % (status, kind, job.display_name)
  1440 
  1441         if job.is_example or job.is_pyexample:
  1442             #
  1443             # Examples are the odd man out here.  They are written without any
  1444             # knowledge that they are going to be run as a test, so we need to 
  1445             # cook up some kind of output for them.  We're writing an xml file,
  1446             # so we do some simple XML that says we ran the example.
  1447             #
  1448             # XXX We could add some timing information to the examples, i.e. run
  1449             # them through time and print the results here.
  1450             #
  1451             f = open(xml_results_file, 'a')
  1452             f.write('<Example>\n')
  1453             example_name = "  <Name>%s</Name>\n" % job.display_name
  1454             f.write(example_name)
  1455 
  1456             if status == "PASS":
  1457                 f.write('  <Result>PASS</Result>\n')
  1458             elif status == "FAIL":
  1459                 f.write('  <Result>FAIL</Result>\n')
  1460             elif status == "VALGR":
  1461                 f.write('  <Result>VALGR</Result>\n')
  1462             elif status == "SKIP":
  1463                 f.write('  <Result>SKIP</Result>\n')
  1464             else:
  1465                 f.write('  <Result>CRASH</Result>\n')
  1466 
  1467             f.write('  <ElapsedTime>%.3f</ElapsedTime>\n' % job.elapsed_time)
  1468             f.write('</Example>\n')
  1469             f.close()
  1470 
  1471         else:
  1472             #
  1473             # If we're not running an example, we're running a test suite.
  1474             # These puppies are running concurrently and generating output
  1475             # that was written to a temporary file to avoid collisions.
  1476             #
  1477             # Now that we are executing sequentially in the main thread, we can
  1478             # concatenate the contents of the associated temp file to the main 
  1479             # results file and remove that temp file.
  1480             #
  1481             # One thing to consider is that a test suite can crash just as
  1482             # well as any other program, so we need to deal with that 
  1483             # possibility as well.  If it ran correctly it will return 0
  1484             # if it passed, or 1 if it failed.  In this case, we can count
  1485             # on the results file it saved being complete.  If it crashed, it 
  1486             # will return some other code, and the file should be considered 
  1487             # corrupt and useless.  If the suite didn't create any XML, then
  1488             # we're going to have to do it ourselves.
  1489             #
  1490             # Another issue is how to deal with a valgrind error.  If we run
  1491             # a test suite under valgrind and it passes, we will get a return
  1492             # code of 0 and there will be a valid xml results file since the code
  1493             # ran to completion.  If we get a return code of 1 under valgrind,
  1494             # the test case failed, but valgrind did not find any problems so the
  1495             # test case return code was passed through.  We will have a valid xml
  1496             # results file here as well since the test suite ran.  If we see a 
  1497             # return code of 2, this means that valgrind found an error (we asked
  1498             # it to return 2 if it found a problem in run_job_synchronously) but
  1499             # the suite ran to completion so there is a valid xml results file.
  1500             # If the suite crashes under valgrind we will see some other error 
  1501             # return code (like 139).  If valgrind finds an illegal instruction or
  1502             # some other strange problem, it will die with its own strange return
  1503             # code (like 132).  However, if the test crashes by itself, not under
  1504             # valgrind we will also see some other return code.
  1505             #
  1506             # If the return code is 0, 1, or 2, we have a valid xml file.  If we 
  1507             # get another return code, we have no xml and we can't really say what
  1508             # happened -- maybe the TestSuite crashed, maybe valgrind crashed due
  1509             # to an illegal instruction.  If we get something beside 0-2, we assume
  1510             # a crash and fake up an xml entry.  After this is all done, we still
  1511             # need to indicate a valgrind error somehow, so we fake up an xml entry
  1512             # with a VALGR result.  Thus, in the case of a working TestSuite that
  1513             # fails valgrind, we'll see the PASS entry for the working TestSuite
  1514             # followed by a VALGR failing test suite of the same name.
  1515             #
  1516             if job.is_skip:
  1517                 f = open(xml_results_file, 'a')
  1518                 f.write("<TestSuite>\n")
  1519                 f.write("  <SuiteName>%s</SuiteName>\n" % job.display_name)
  1520                 f.write('  <SuiteResult>SKIP</SuiteResult>\n')
  1521                 f.write('  <SuiteTime>Execution times not available</SuiteTime>\n')
  1522                 f.write("</TestSuite>\n")
  1523                 f.close()
  1524             else:
  1525                 if job.returncode == 0 or job.returncode == 1 or job.returncode == 2:
  1526                     f_to = open(xml_results_file, 'a')
  1527                     f_from = open(job.tmp_file_name)
  1528                     f_to.write(f_from.read())
  1529                     f_to.close()
  1530                     f_from.close()
  1531                 else:
  1532                     f = open(xml_results_file, 'a')
  1533                     f.write("<TestSuite>\n")
  1534                     f.write("  <SuiteName>%s</SuiteName>\n" % job.display_name)
  1535                     f.write('  <SuiteResult>CRASH</SuiteResult>\n')
  1536                     f.write('  <SuiteTime>Execution times not available</SuiteTime>\n')
  1537                     f.write("</TestSuite>\n")
  1538                     f.close()
  1539 
  1540                     if job.returncode == 2:
  1541                         f = open(xml_results_file, 'a')
  1542                         f.write("<TestSuite>\n")
  1543                         f.write("  <SuiteName>%s</SuiteName>\n" % job.display_name)
  1544                         f.write('  <SuiteResult>VALGR</SuiteResult>\n')
  1545                         f.write('  <SuiteTime>Execution times not available</SuiteTime>\n')
  1546                         f.write("</TestSuite>\n")
  1547                         f.close()
  1548 
  1549     #
  1550     # We have all of the tests run and the results written out.  One final 
  1551     # bit of housekeeping is to wait for all of the threads to close down
  1552     # so we can exit gracefully.
  1553     #
  1554     for thread in threads:
  1555         thread.join()
  1556     
  1557     #
  1558     # Back at the beginning of time, we started the body of an XML document
  1559     # since the test suites and examples were going to just write their 
  1560     # individual pieces.  So, we need to finish off and close out the XML 
  1561     # document
  1562     #
  1563     f = open(xml_results_file, 'a')
  1564     f.write('</TestResults>\n')
  1565     f.close()
  1566 
  1567     #
  1568     # Print a quick summary of events
  1569     #
  1570     print "%d of %d tests passed (%d passed, %d skipped, %d failed, %d crashed, %d valgrind errors)" % (passed_tests, 
  1571         total_tests, passed_tests, skipped_tests, failed_tests, crashed_tests, valgrind_errors)
  1572     #
  1573     # The last things to do are to translate the XML results file to "human
  1574     # readable form" if the user asked for it (or make an XML file somewhere)
  1575     #
  1576     if len(options.html):
  1577         translate_to_html(xml_results_file, options.html)
  1578 
  1579     if len(options.text):
  1580         translate_to_text(xml_results_file, options.text)
  1581 
  1582     if len(options.xml):
  1583         shutil.copyfile(xml_results_file, options.xml)
  1584 
  1585     #
  1586     # If we have been asked to retain all of the little temporary files, we
  1587     # don't delete tm.  If we do delete the temporary files, delete only the
  1588     # directory we just created.  We don't want to happily delete any retained
  1589     # directories, which will probably surprise the user.
  1590     #
  1591     if not options.retain:
  1592         shutil.rmtree(testpy_output_dir)
  1593 
  1594     if passed_tests + skipped_tests == total_tests:
  1595         return 0 # success
  1596     else:
  1597         return 1 # catchall for general errors
  1598 
  1599 def main(argv):
  1600     parser = optparse.OptionParser()
  1601     parser.add_option("-c", "--constrain", action="store", type="string", dest="constrain", default="",
  1602                       metavar="KIND",
  1603                       help="constrain the test-runner by kind of test")
  1604 
  1605     parser.add_option("-e", "--example", action="store", type="string", dest="example", default="",
  1606                       metavar="EXAMPLE",
  1607                       help="specify a single example to run")
  1608 
  1609     parser.add_option("-g", "--grind", action="store_true", dest="valgrind", default=False,
  1610                       help="run the test suites and examples using valgrind")
  1611 
  1612     parser.add_option("-k", "--kinds", action="store_true", dest="kinds", default=False,
  1613                       help="print the kinds of tests available")
  1614 
  1615     parser.add_option("-l", "--list", action="store_true", dest="list", default=False,
  1616                       help="print the list of known tests")
  1617 
  1618     parser.add_option("-m", "--multiple", action="store_true", dest="multiple", default=False,
  1619                       help="report multiple failures from test suites and test cases")
  1620 
  1621     parser.add_option("-n", "--nowaf", action="store_true", dest="nowaf", default=False,
  1622                       help="do not run waf before starting testing")
  1623 
  1624     parser.add_option("-p", "--pyexample", action="store", type="string", dest="pyexample", default="",
  1625                       metavar="PYEXAMPLE",
  1626                       help="specify a single python example to run")
  1627 
  1628     parser.add_option("-r", "--retain", action="store_true", dest="retain", default=False,
  1629                       help="retain all temporary files (which are normally deleted)")
  1630 
  1631     parser.add_option("-s", "--suite", action="store", type="string", dest="suite", default="",
  1632                       metavar="TEST-SUITE",
  1633                       help="specify a single test suite to run")
  1634 
  1635     parser.add_option("-t", "--text", action="store", type="string", dest="text", default="",
  1636                       metavar="TEXT-FILE",
  1637                       help="write detailed test results into TEXT-FILE.txt")
  1638 
  1639     parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
  1640                       help="print progress and informational messages")
  1641 
  1642     parser.add_option("-w", "--web", "--html", action="store", type="string", dest="html", default="",
  1643                       metavar="HTML-FILE",
  1644                       help="write detailed test results into HTML-FILE.html")
  1645 
  1646     parser.add_option("-x", "--xml", action="store", type="string", dest="xml", default="",
  1647                       metavar="XML-FILE",
  1648                       help="write detailed test results into XML-FILE.xml")
  1649 
  1650     global options
  1651     options = parser.parse_args()[0]
  1652     signal.signal(signal.SIGINT, sigint_hook)
  1653     
  1654     return run_tests()
  1655 
  1656 if __name__ == '__main__':
  1657     sys.exit(main(sys.argv))