test.py
author Josh Pelkey <jpelkey@gatech.edu>
Wed, 11 Aug 2010 11:37:37 -0400
changeset 6553 fb5ad9c7755a
parent 6349 4bab6b10a034
permissions -rwxr-xr-x
update release notes and fix doxygen warnings
craigdo@4772
     1
#! /usr/bin/env python
craigdo@4772
     2
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
craigdo@4772
     3
#
craigdo@4772
     4
# Copyright (c) 2009 University of Washington
craigdo@4772
     5
#
craigdo@4772
     6
# This program is free software; you can redistribute it and/or modify
craigdo@4772
     7
# it under the terms of the GNU General Public License version 2 as
craigdo@4772
     8
# published by the Free Software Foundation;
craigdo@4772
     9
#
craigdo@4772
    10
# This program is distributed in the hope that it will be useful,
craigdo@4772
    11
# but WITHOUT ANY WARRANTY; without even the implied warranty of
craigdo@4772
    12
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
craigdo@4772
    13
# GNU General Public License for more details.
craigdo@4772
    14
#
craigdo@4772
    15
# You should have received a copy of the GNU General Public License
craigdo@4772
    16
# along with this program; if not, write to the Free Software
craigdo@4772
    17
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
craigdo@4772
    18
#
craigdo@4772
    19
craigdo@4772
    20
import os
craigdo@4772
    21
import sys
craigdo@5412
    22
import time
craigdo@4772
    23
import optparse
craigdo@4772
    24
import subprocess
craigdo@4772
    25
import threading
craigdo@4772
    26
import Queue
craigdo@4772
    27
import signal
craigdo@4772
    28
import xml.dom.minidom
craigdo@5239
    29
import shutil
fmoatamr@5912
    30
import re
craigdo@4772
    31
craigdo@4772
    32
#
craigdo@4772
    33
# XXX This should really be part of a waf command to list the configuration
craigdo@4772
    34
# items relative to optional ns-3 pieces.
craigdo@4772
    35
#
craigdo@4772
    36
# A list of interesting configuration items in the waf configuration 
craigdo@4772
    37
# cache which we may be interested in when deciding on which examples
craigdo@4772
    38
# to run and how to run them.  These are set by waf during the 
craigdo@4772
    39
# configuration phase and the corresponding assignments are usually
craigdo@4772
    40
# found in the associated subdirectory wscript files.
craigdo@4772
    41
#
craigdo@4772
    42
interesting_config_items = [
craigdo@4772
    43
    "NS3_BUILDDIR",
craigdo@4772
    44
    "NS3_MODULE_PATH",
craigdo@4772
    45
    "ENABLE_NSC",
craigdo@4772
    46
    "ENABLE_REAL_TIME",
craigdo@5369
    47
    "ENABLE_EXAMPLES",
gjc@6243
    48
    "ENABLE_PYTHON_BINDINGS",
craigdo@4772
    49
]
craigdo@4772
    50
craigdo@5295
    51
ENABLE_NSC = False
craigdo@5295
    52
ENABLE_REAL_TIME = False
craigdo@5369
    53
ENABLE_EXAMPLES = True
craigdo@5369
    54
craigdo@5369
    55
#
craigdo@5369
    56
# If the user has constrained us to run certain kinds of tests, we can tell waf
craigdo@5369
    57
# to only build
craigdo@5402
    58
#
craigdo@5369
    59
core_kinds = ["bvt", "core", "system", "unit"]
craigdo@5295
    60
craigdo@4772
    61
#
craigdo@5402
    62
# There are some special cases for test suites that kill valgrind.  This is
craigdo@5402
    63
# because NSC causes illegal instruction crashes when run under valgrind.
craigdo@5402
    64
#
craigdo@5402
    65
core_valgrind_skip_tests = [
craigdo@5402
    66
    "ns3-tcp-cwnd",
tomh@6198
    67
    "nsc-tcp-loss",
craigdo@5402
    68
    "ns3-tcp-interoperability",
craigdo@5402
    69
]
craigdo@5402
    70
craigdo@5402
    71
#
craigdo@4772
    72
# A list of examples to run as smoke tests just to ensure that they remain 
craigdo@4772
    73
# buildable and runnable over time.  Also a condition under which to run
craigdo@5402
    74
# the example (from the waf configuration), and a condition under which to
craigdo@5402
    75
# run the example under valgrind.  This is because NSC causes illegal 
craigdo@5402
    76
# instruction crashes when run under valgrind.
craigdo@4772
    77
#
craigdo@4772
    78
# XXX Should this not be read from a configuration file somewhere and not
craigdo@4772
    79
# hardcoded.
craigdo@4772
    80
#
craigdo@4772
    81
example_tests = [
craigdo@5402
    82
    ("csma/csma-bridge", "True", "True"),
craigdo@5402
    83
    ("csma/csma-bridge-one-hop", "True", "True"),
craigdo@5402
    84
    ("csma/csma-broadcast", "True", "True"),
craigdo@5402
    85
    ("csma/csma-multicast", "True", "True"),
craigdo@5402
    86
    ("csma/csma-one-subnet", "True", "True"),
craigdo@5402
    87
    ("csma/csma-packet-socket", "True", "True"),
craigdo@5402
    88
    ("csma/csma-ping", "True", "True"),
craigdo@5402
    89
    ("csma/csma-raw-ip-socket", "True", "True"),
craigdo@5402
    90
    ("csma/csma-star", "True", "True"),
craigdo@5369
    91
craigdo@5402
    92
    ("emulation/emu-ping", "False", "True"),
craigdo@5402
    93
    ("emulation/emu-udp-echo", "False", "True"),
craigdo@5369
    94
craigdo@5402
    95
    ("error-model/simple-error-model", "True", "True"),
craigdo@5369
    96
craigdo@5402
    97
    ("ipv6/icmpv6-redirect", "True", "True"),
craigdo@5402
    98
    ("ipv6/ping6", "True", "True"),
craigdo@5402
    99
    ("ipv6/radvd", "True", "True"),
craigdo@5402
   100
    ("ipv6/radvd-two-prefix", "True", "True"),    
craigdo@5402
   101
    ("ipv6/test-ipv6", "True", "True"),
craigdo@5369
   102
craigdo@5402
   103
    ("mesh/mesh", "True", "True"),
craigdo@5369
   104
craigdo@5402
   105
    ("naming/object-names", "True", "True"),
craigdo@5369
   106
craigdo@5402
   107
    ("realtime/realtime-udp-echo", "ENABLE_REAL_TIME == True", "True"),
craigdo@5369
   108
craigdo@5402
   109
    ("routing/dynamic-global-routing", "True", "True"),
craigdo@5402
   110
    ("routing/global-injection-slash32", "True", "True"),
craigdo@5402
   111
    ("routing/global-routing-slash32", "True", "True"),
craigdo@5402
   112
    ("routing/mixed-global-routing", "True", "True"),
craigdo@5402
   113
    ("routing/nix-simple", "True", "True"),
craigdo@5402
   114
    ("routing/nms-p2p-nix", "False", "True"), # Takes too long to run
craigdo@5402
   115
    ("routing/simple-alternate-routing", "True", "True"),
craigdo@5402
   116
    ("routing/simple-global-routing", "True", "True"),
craigdo@5402
   117
    ("routing/simple-point-to-point-olsr", "True", "True"),
craigdo@5402
   118
    ("routing/simple-routing-ping6", "True", "True"),
craigdo@5402
   119
    ("routing/static-routing-slash32", "True", "True"),
boyko@5741
   120
    ("routing/aodv", "True", "True"),
craigdo@5369
   121
nbaldo@6349
   122
    ("spectrum/adhoc-aloha-ideal-phy", "True", "True"),
nbaldo@6349
   123
    ("spectrum/adhoc-aloha-ideal-phy-with-microwave-oven", "True", "True"),
nbaldo@6349
   124
craigdo@5402
   125
    ("stats/wifi-example-sim", "True", "True"),
craigdo@5369
   126
craigdo@5402
   127
    ("tap/tap-wifi-dumbbell", "False", "True"), # Requires manual configuration
craigdo@5369
   128
craigdo@5402
   129
    ("tcp/star", "True", "True"),
craigdo@5402
   130
    ("tcp/tcp-large-transfer", "True", "True"),
craigdo@5402
   131
    ("tcp/tcp-nsc-lfn", "ENABLE_NSC == True", "True"),
craigdo@5402
   132
    ("tcp/tcp-nsc-zoo", "ENABLE_NSC == True", "True"),
craigdo@5402
   133
    ("tcp/tcp-star-server", "True", "True"),
craigdo@5369
   134
tpecorella@6127
   135
    ("topology-read/topology-read --input=../../examples/topology-read/Inet_small_toposample.txt", "True", "True"),
tazaki@6374
   136
    ("topology-read/topology-read --format=Rocketfuel --input=../../examples/topology-read/RocketFuel_toposample_1239_weights.txt", "True", "True"),
tpecorella@6127
   137
craigdo@5402
   138
    ("tunneling/virtual-net-device", "True", "True"),
craigdo@5369
   139
craigdo@5402
   140
    ("tutorial/first", "True", "True"),
craigdo@5402
   141
    ("tutorial/hello-simulator", "True", "True"),
craigdo@5402
   142
    ("tutorial/second", "True", "True"),
craigdo@5402
   143
    ("tutorial/third", "True", "True"),
craigdo@5402
   144
    ("tutorial/fourth", "True", "True"),
craigdo@5483
   145
    ("tutorial/fifth", "True", "True"),
craigdo@6046
   146
    ("tutorial/sixth", "True", "True"),
craigdo@5369
   147
craigdo@5402
   148
    ("udp/udp-echo", "True", "True"),
craigdo@5369
   149
craigdo@5402
   150
    ("wireless/mixed-wireless", "True", "True"),
duy@6311
   151
    ("wireless/multirate --totalTime=0.3s --rateManager=ns3::AarfcdWifiManager", "True", "True"), 
duy@6311
   152
    ("wireless/multirate --totalTime=0.3s --rateManager=ns3::AmrrWifiManager", "True", "True"), 
duy@6311
   153
    ("wireless/multirate --totalTime=0.3s --rateManager=ns3::CaraWifiManager", "True", "True"), 
duy@6311
   154
    ("wireless/multirate --totalTime=0.3s --rateManager=ns3::IdealWifiManager", "True", "True"), 
duy@6337
   155
    ("wireless/multirate --totalTime=0.3s --rateManager=ns3::MinstrelWifiManager", "True", "True"), 
duy@6311
   156
    ("wireless/multirate --totalTime=0.3s --rateManager=ns3::OnoeWifiManager", "True", "True"), 
duy@6311
   157
    ("wireless/multirate --totalTime=0.3s --rateManager=ns3::RraaWifiManager", "True", "True"), 
craigdo@5402
   158
    ("wireless/simple-wifi-frame-aggregation", "True", "True"),
craigdo@5402
   159
    ("wireless/wifi-adhoc", "False", "True"), # Takes too long to run
craigdo@5402
   160
    ("wireless/wifi-ap --verbose=0", "True", "True"), # Don't let it spew to stdout
craigdo@5402
   161
    ("wireless/wifi-clear-channel-cmu", "False", "True"), # Requires specific hardware
craigdo@5402
   162
    ("wireless/wifi-simple-adhoc", "True", "True"),
craigdo@5402
   163
    ("wireless/wifi-simple-adhoc-grid", "True", "True"),
craigdo@5402
   164
    ("wireless/wifi-simple-infra", "True", "True"),
craigdo@5402
   165
    ("wireless/wifi-simple-interference", "True", "True"),
craigdo@5402
   166
    ("wireless/wifi-wired-bridging", "True", "True"),
amine@6111
   167
amine@6111
   168
    ("wimax/wimax-simple", "True", "True"),
amine@6111
   169
    ("wimax/wimax-ipv4", "True", "True"),
amine@6111
   170
    ("wimax/wimax-multicast", "True", "True"),
craigdo@4772
   171
]
craigdo@4772
   172
craigdo@4772
   173
#
craigdo@6200
   174
# A list of python examples to run as smoke tests just to ensure that they 
craigdo@6200
   175
# runnable over time.  Also a condition under which to run the example (from
craigdo@6200
   176
# the waf configuration)
craigdo@6200
   177
#
craigdo@6200
   178
# XXX Should this not be read from a configuration file somewhere and not
craigdo@6200
   179
# hardcoded.
craigdo@6200
   180
#
craigdo@6200
   181
python_tests = [
craigdo@6200
   182
    ("csma/csma-bridge.py", "True"),
craigdo@6200
   183
craigdo@6200
   184
    ("flowmon/wifi-olsr-flowmon.py", "True"),
craigdo@6200
   185
craigdo@6200
   186
    ("routing/simple-routing-ping6.py", "True"),
craigdo@6200
   187
craigdo@6202
   188
    ("tap/tap-csma-virtual-machine.py", "False"), # requires enable-sudo
craigdo@6202
   189
    ("tap/tap-wifi-virtual-machine.py", "False"), # requires enable-sudo
craigdo@6200
   190
craigdo@6200
   191
    ("tutorial/first.py", "True"),
craigdo@6200
   192
craigdo@6200
   193
    ("wireless/wifi-ap.py", "True"),
craigdo@6200
   194
    ("wireless/mixed-wireless.py", "True"),
craigdo@6200
   195
]
craigdo@6200
   196
craigdo@6200
   197
#
craigdo@4772
   198
# The test suites are going to want to output status.  They are running
craigdo@4772
   199
# concurrently.  This means that unless we are careful, the output of
craigdo@4772
   200
# the test suites will be interleaved.  Rather than introducing a lock
craigdo@4772
   201
# file that could unintentionally start serializing execution, we ask
craigdo@4772
   202
# the tests to write their output to a temporary directory and then 
craigdo@4772
   203
# put together the final output file when we "join" the test tasks back
craigdo@5412
   204
# to the main thread.  In addition to this issue, the example programs
craigdo@5412
   205
# often write lots and lots of trace files which we will just ignore.
craigdo@5412
   206
# We put all of them into the temp directory as well, so they can be
craigdo@5412
   207
# easily deleted.
craigdo@4772
   208
#
craigdo@5412
   209
TMP_OUTPUT_DIR = "testpy-output"
craigdo@4772
   210
craigdo@4772
   211
def get_node_text(node):
craigdo@4772
   212
    for child in node.childNodes:
craigdo@4772
   213
        if child.nodeType == child.TEXT_NODE:
craigdo@4772
   214
            return child.nodeValue
craigdo@4772
   215
    return "None"
craigdo@4772
   216
craigdo@4772
   217
#
craigdo@5324
   218
# A simple example of writing a text file with a test result summary.  It is 
craigdo@5324
   219
# expected that this output will be fine for developers looking for problems.
craigdo@4772
   220
#
craigdo@4772
   221
def translate_to_text(results_file, text_file):
craigdo@4772
   222
    f = open(text_file, 'w')
craigdo@4772
   223
    dom = xml.dom.minidom.parse(results_file)
craigdo@4772
   224
    for suite in dom.getElementsByTagName("TestSuite"):
craigdo@4772
   225
        result = get_node_text(suite.getElementsByTagName("SuiteResult")[0])
craigdo@4772
   226
        name = get_node_text(suite.getElementsByTagName("SuiteName")[0])
craigdo@4772
   227
        time = get_node_text(suite.getElementsByTagName("SuiteTime")[0])
craigdo@4772
   228
        output = "%s: Test Suite \"%s\" (%s)\n" % (result, name, time)
craigdo@4772
   229
        f.write(output)
craigdo@4772
   230
        if result != "CRASH":
craigdo@4772
   231
            for case in suite.getElementsByTagName("TestCase"):
craigdo@4772
   232
                result = get_node_text(case.getElementsByTagName("CaseResult")[0])
craigdo@4772
   233
                name = get_node_text(case.getElementsByTagName("CaseName")[0])
craigdo@4772
   234
                time = get_node_text(case.getElementsByTagName("CaseTime")[0])
craigdo@4772
   235
                output =   "  %s: Test Case \"%s\" (%s)\n" % (result, name, time)
craigdo@4772
   236
                f.write(output)
craigdo@4772
   237
craigdo@4772
   238
                if result == "FAIL":
craigdo@5324
   239
                    for details in case.getElementsByTagName("FailureDetails"):
craigdo@5324
   240
                        f.write("    Details:\n")
craigdo@5324
   241
                        f.write("      Message:   %s\n" % get_node_text(details.getElementsByTagName("Message")[0]))
craigdo@5324
   242
                        f.write("      Condition: %s\n" % get_node_text(details.getElementsByTagName("Condition")[0]))
craigdo@5324
   243
                        f.write("      Actual:    %s\n" % get_node_text(details.getElementsByTagName("Actual")[0]))
craigdo@5324
   244
                        f.write("      Limit:     %s\n" % get_node_text(details.getElementsByTagName("Limit")[0]))
craigdo@5324
   245
                        f.write("      File:      %s\n" % get_node_text(details.getElementsByTagName("File")[0]))
craigdo@5324
   246
                        f.write("      Line:      %s\n" % get_node_text(details.getElementsByTagName("Line")[0]))
craigdo@4772
   247
craigdo@4772
   248
    for example in dom.getElementsByTagName("Example"):
craigdo@4772
   249
        result = get_node_text(example.getElementsByTagName("Result")[0])
craigdo@4772
   250
        name = get_node_text(example.getElementsByTagName("Name")[0])
craigdo@5460
   251
        time = get_node_text(example.getElementsByTagName("ElapsedTime")[0])
craigdo@5460
   252
        output = "%s: Example \"%s\" (%s)\n" % (result, name, time)
craigdo@4772
   253
        f.write(output)
craigdo@4772
   254
craigdo@4772
   255
    f.close()
craigdo@4772
   256
    
craigdo@4772
   257
#
craigdo@5324
   258
# A simple example of writing an HTML file with a test result summary.  It is 
craigdo@5324
   259
# expected that this will eventually be made prettier as time progresses and
craigdo@5324
   260
# we have time to tweak it.  This may end up being moved to a separate module
craigdo@5324
   261
# since it will probably grow over time.
craigdo@4772
   262
#
craigdo@4772
   263
def translate_to_html(results_file, html_file):
craigdo@4772
   264
    f = open(html_file, 'w')
craigdo@4772
   265
    f.write("<html>\n")
craigdo@4772
   266
    f.write("<body>\n")
craigdo@4772
   267
    f.write("<center><h1>ns-3 Test Results</h1></center>\n")
craigdo@4772
   268
craigdo@5324
   269
    #
craigdo@5324
   270
    # Read and parse the whole results file.
craigdo@5324
   271
    #
craigdo@4772
   272
    dom = xml.dom.minidom.parse(results_file)
craigdo@4772
   273
craigdo@5324
   274
    #
craigdo@5324
   275
    # Iterate through the test suites
craigdo@5324
   276
    #
craigdo@4772
   277
    f.write("<h2>Test Suites</h2>\n")
craigdo@4772
   278
    for suite in dom.getElementsByTagName("TestSuite"):
craigdo@5324
   279
     
craigdo@5324
   280
        #
craigdo@5324
   281
        # For each test suite, get its name, result and execution time info
craigdo@5324
   282
        #
craigdo@4772
   283
        name = get_node_text(suite.getElementsByTagName("SuiteName")[0])
craigdo@4772
   284
        result = get_node_text(suite.getElementsByTagName("SuiteResult")[0])
craigdo@4772
   285
        time = get_node_text(suite.getElementsByTagName("SuiteTime")[0])
craigdo@4772
   286
craigdo@5324
   287
        # 
craigdo@5402
   288
        # Print a level three header with the result, name and time.  If the 
craigdo@5402
   289
        # test suite passed, the header is printed in green. If the suite was
craigdo@5402
   290
        # skipped, print it in orange, otherwise assume something bad happened
craigdo@5402
   291
        # and print in red.
craigdo@5324
   292
        #
craigdo@4772
   293
        if result == "PASS":
craigdo@4772
   294
            f.write("<h3 style=\"color:green\">%s: %s (%s)</h3>\n" % (result, name, time))
craigdo@5402
   295
        elif result == "SKIP":
craigdo@5402
   296
            f.write("<h3 style=\"color:#ff6600\">%s: %s (%s)</h3>\n" % (result, name, time))
craigdo@4772
   297
        else:
craigdo@4772
   298
            f.write("<h3 style=\"color:red\">%s: %s (%s)</h3>\n" % (result, name, time))
craigdo@4772
   299
craigdo@5324
   300
        #
craigdo@5324
   301
        # The test case information goes in a table.
craigdo@5324
   302
        #
craigdo@5324
   303
        f.write("<table border=\"1\">\n")
craigdo@4772
   304
craigdo@5324
   305
        #
craigdo@5324
   306
        # The first column of the table has the heading Result
craigdo@5324
   307
        #
craigdo@4772
   308
        f.write("<th> Result </th>\n")
craigdo@4772
   309
craigdo@5324
   310
        #
craigdo@5402
   311
        # If the suite crashed or is skipped, there is no further information, so just
craigdo@5402
   312
        # delare a new table row with the result (CRASH or SKIP) in it.  Looks like:
craigdo@5324
   313
        #
craigdo@5324
   314
        #   +--------+
craigdo@5324
   315
        #   | Result |
craigdo@5324
   316
        #   +--------+
craigdo@5324
   317
        #   | CRASH  |
craigdo@5324
   318
        #   +--------+
craigdo@5324
   319
        #
craigdo@5402
   320
        # Then go on to the next test suite.  Valgrind and skipped errors look the same.
craigdo@5324
   321
        #
craigdo@5402
   322
        if result in ["CRASH", "SKIP", "VALGR"]:
craigdo@4772
   323
            f.write("<tr>\n")
craigdo@5402
   324
            if result == "SKIP":
craigdo@5402
   325
                f.write("<td style=\"color:#ff6600\">%s</td>\n" % result)
craigdo@5402
   326
            else:
craigdo@5402
   327
                f.write("<td style=\"color:red\">%s</td>\n" % result)
craigdo@4772
   328
            f.write("</tr>\n")
craigdo@4772
   329
            f.write("</table>\n")
craigdo@4772
   330
            continue
craigdo@4772
   331
craigdo@5324
   332
        #
craigdo@5324
   333
        # If the suite didn't crash, we expect more information, so fill out
craigdo@5324
   334
        # the table heading row.  Like,
craigdo@5324
   335
        #
craigdo@5324
   336
        #   +--------+----------------+------+
craigdo@5324
   337
        #   | Result | Test Case Name | Time |
craigdo@5324
   338
        #   +--------+----------------+------+
craigdo@5324
   339
        #
craigdo@4772
   340
        f.write("<th>Test Case Name</th>\n")
craigdo@4772
   341
        f.write("<th> Time </th>\n")
craigdo@4772
   342
craigdo@5324
   343
        #
craigdo@5324
   344
        # If the test case failed, we need to print out some failure details
craigdo@5324
   345
        # so extend the heading row again.  Like,
craigdo@5324
   346
        #
craigdo@5324
   347
        #   +--------+----------------+------+-----------------+
craigdo@5324
   348
        #   | Result | Test Case Name | Time | Failure Details |
craigdo@5324
   349
        #   +--------+----------------+------+-----------------+
craigdo@5324
   350
        #
craigdo@4772
   351
        if result == "FAIL":
craigdo@5324
   352
            f.write("<th>Failure Details</th>\n")
craigdo@4772
   353
craigdo@5324
   354
        #
craigdo@5324
   355
        # Now iterate through all of the test cases.
craigdo@5324
   356
        #
craigdo@4772
   357
        for case in suite.getElementsByTagName("TestCase"):
craigdo@5324
   358
craigdo@5324
   359
            #
craigdo@5324
   360
            # Get the name, result and timing information from xml to use in
craigdo@5324
   361
            # printing table below.
craigdo@5324
   362
            #
craigdo@4772
   363
            name = get_node_text(case.getElementsByTagName("CaseName")[0])
craigdo@4772
   364
            result = get_node_text(case.getElementsByTagName("CaseResult")[0])
craigdo@4772
   365
            time = get_node_text(case.getElementsByTagName("CaseTime")[0])
craigdo@5324
   366
craigdo@5324
   367
            #
craigdo@5324
   368
            # If the test case failed, we iterate through possibly multiple
craigdo@5324
   369
            # failure details
craigdo@5324
   370
            #
craigdo@4772
   371
            if result == "FAIL":
craigdo@5324
   372
                #
craigdo@5324
   373
                # There can be multiple failures for each test case.  The first
craigdo@5324
   374
                # row always gets the result, name and timing information along
craigdo@5324
   375
                # with the failure details.  Remaining failures don't duplicate
craigdo@5324
   376
                # this information but just get blanks for readability.  Like,
craigdo@5324
   377
                #
craigdo@5324
   378
                #   +--------+----------------+------+-----------------+
craigdo@5324
   379
                #   | Result | Test Case Name | Time | Failure Details |
craigdo@5324
   380
                #   +--------+----------------+------+-----------------+
craigdo@5324
   381
                #   |  FAIL  | The name       | time | It's busted     |   
craigdo@5324
   382
                #   +--------+----------------+------+-----------------+
craigdo@5324
   383
                #   |        |                |      | Really broken   |   
craigdo@5324
   384
                #   +--------+----------------+------+-----------------+
craigdo@5324
   385
                #   |        |                |      | Busted bad      |   
craigdo@5324
   386
                #   +--------+----------------+------+-----------------+
craigdo@5324
   387
                #
craigdo@5324
   388
craigdo@5324
   389
                first_row = True
craigdo@5324
   390
                for details in case.getElementsByTagName("FailureDetails"):
craigdo@5324
   391
craigdo@5324
   392
                    #
craigdo@5324
   393
                    # Start a new row in the table for each possible Failure Detail
craigdo@5324
   394
                    #
craigdo@5324
   395
                    f.write("<tr>\n")
craigdo@5324
   396
craigdo@5324
   397
                    if first_row:
craigdo@5324
   398
                        first_row = False
craigdo@5324
   399
                        f.write("<td style=\"color:red\">%s</td>\n" % result)
craigdo@5324
   400
                        f.write("<td>%s</td>\n" % name)
craigdo@5324
   401
                        f.write("<td>%s</td>\n" % time)
craigdo@5324
   402
                    else:
craigdo@5324
   403
                        f.write("<td></td>\n")
craigdo@5324
   404
                        f.write("<td></td>\n")
craigdo@5324
   405
                        f.write("<td></td>\n")
craigdo@5324
   406
craigdo@5324
   407
                    f.write("<td>")
craigdo@5324
   408
                    f.write("<b>Message: </b>%s, " % get_node_text(details.getElementsByTagName("Message")[0]))
craigdo@5324
   409
                    f.write("<b>Condition: </b>%s, " % get_node_text(details.getElementsByTagName("Condition")[0]))
craigdo@5324
   410
                    f.write("<b>Actual: </b>%s, " % get_node_text(details.getElementsByTagName("Actual")[0]))
craigdo@5324
   411
                    f.write("<b>Limit: </b>%s, " % get_node_text(details.getElementsByTagName("Limit")[0]))
craigdo@5324
   412
                    f.write("<b>File: </b>%s, " % get_node_text(details.getElementsByTagName("File")[0]))
craigdo@5324
   413
                    f.write("<b>Line: </b>%s" % get_node_text(details.getElementsByTagName("Line")[0]))
craigdo@5324
   414
                    f.write("</td>\n")
craigdo@5324
   415
                    
craigdo@5324
   416
                    #
craigdo@5324
   417
                    # End the table row
craigdo@5324
   418
                    #
craigdo@5324
   419
                    f.write("</td>\n")
craigdo@4772
   420
            else:
craigdo@5324
   421
                #
craigdo@5324
   422
                # If this particular test case passed, then we just print the PASS
craigdo@5324
   423
                # result in green, followed by the test case name and its execution
craigdo@5324
   424
                # time information.  These go off in <td> ... </td> table data.
craigdo@5324
   425
                # The details table entry is left blank.
craigdo@5324
   426
                #
craigdo@5324
   427
                #   +--------+----------------+------+---------+
craigdo@5324
   428
                #   | Result | Test Case Name | Time | Details |
craigdo@5324
   429
                #   +--------+----------------+------+---------+
craigdo@5324
   430
                #   |  PASS  | The name       | time |         |   
craigdo@5324
   431
                #   +--------+----------------+------+---------+
craigdo@5324
   432
                #
craigdo@5324
   433
                f.write("<tr>\n")
craigdo@4772
   434
                f.write("<td style=\"color:green\">%s</td>\n" % result)
craigdo@4772
   435
                f.write("<td>%s</td>\n" % name)
craigdo@4772
   436
                f.write("<td>%s</td>\n" % time)
craigdo@4772
   437
                f.write("<td></td>\n")
craigdo@5324
   438
                f.write("</tr>\n")
craigdo@5324
   439
        #
craigdo@5324
   440
        # All of the rows are written, so we need to end the table.
craigdo@5324
   441
        #
craigdo@4772
   442
        f.write("</table>\n")
craigdo@4772
   443
craigdo@5324
   444
    #
craigdo@5324
   445
    # That's it for all of the test suites.  Now we have to do something about 
craigdo@5324
   446
    # our examples.
craigdo@5324
   447
    #
craigdo@4772
   448
    f.write("<h2>Examples</h2>\n")
craigdo@5324
   449
craigdo@5324
   450
    #
craigdo@5324
   451
    # Example status is rendered in a table just like the suites.
craigdo@5324
   452
    #
craigdo@4772
   453
    f.write("<table border=\"1\">\n")
craigdo@5324
   454
craigdo@5324
   455
    #
craigdo@5324
   456
    # The table headings look like,
craigdo@5324
   457
    #
craigdo@5460
   458
    #   +--------+--------------+--------------+
craigdo@5460
   459
    #   | Result | Example Name | Elapsed Time |
craigdo@5460
   460
    #   +--------+--------------+--------------+
craigdo@5324
   461
    #
craigdo@4772
   462
    f.write("<th> Result </th>\n")
craigdo@4772
   463
    f.write("<th>Example Name</th>\n")
craigdo@5460
   464
    f.write("<th>Elapsed Time</th>\n")
craigdo@5324
   465
craigdo@5324
   466
    #
craigdo@5324
   467
    # Now iterate through all of the examples
craigdo@5324
   468
    #
craigdo@4772
   469
    for example in dom.getElementsByTagName("Example"):
craigdo@5324
   470
        
craigdo@5324
   471
        #
craigdo@5324
   472
        # Start a new row for each example
craigdo@5324
   473
        #
craigdo@4772
   474
        f.write("<tr>\n")
craigdo@5324
   475
        
craigdo@5324
   476
        #
craigdo@5324
   477
        # Get the result and name of the example in question
craigdo@5324
   478
        #
craigdo@4772
   479
        result = get_node_text(example.getElementsByTagName("Result")[0])
craigdo@5324
   480
        name =   get_node_text(example.getElementsByTagName("Name")[0])
craigdo@5460
   481
        time =   get_node_text(example.getElementsByTagName("ElapsedTime")[0])
craigdo@5324
   482
craigdo@5324
   483
        #
craigdo@5370
   484
        # If the example either failed or crashed, print its result status
craigdo@5324
   485
        # in red; otherwise green.  This goes in a <td> ... </td> table data
craigdo@5324
   486
        #
craigdo@5370
   487
        if result == "PASS":
craigdo@5370
   488
            f.write("<td style=\"color:green\">%s</td>\n" % result)
craigdo@5402
   489
        elif result == "SKIP":
craigdo@5402
   490
            f.write("<td style=\"color:#ff6600\">%s</fd>\n" % result)
craigdo@5370
   491
        else:
craigdo@4772
   492
            f.write("<td style=\"color:red\">%s</td>\n" % result)
craigdo@5324
   493
craigdo@5324
   494
        #
craigdo@5402
   495
        # Write the example name as a new tag data.
craigdo@5324
   496
        #
craigdo@4772
   497
        f.write("<td>%s</td>\n" % name)
craigdo@5324
   498
craigdo@5324
   499
        #
craigdo@5460
   500
        # Write the elapsed time as a new tag data.
craigdo@5460
   501
        #
craigdo@5460
   502
        f.write("<td>%s</td>\n" % time)
craigdo@5460
   503
craigdo@5460
   504
        #
craigdo@5324
   505
        # That's it for the current example, so terminate the row.
craigdo@5324
   506
        #
craigdo@4772
   507
        f.write("</tr>\n")
craigdo@4772
   508
craigdo@5324
   509
    #
craigdo@5324
   510
    # That's it for the table of examples, so terminate the table.
craigdo@5324
   511
    #
craigdo@4772
   512
    f.write("</table>\n")
craigdo@4772
   513
craigdo@5324
   514
    #
craigdo@5324
   515
    # And that's it for the report, so finish up.
craigdo@5324
   516
    #
craigdo@4772
   517
    f.write("</body>\n")
craigdo@4772
   518
    f.write("</html>\n")
craigdo@4772
   519
    f.close()
craigdo@4772
   520
    
craigdo@4772
   521
#
craigdo@4772
   522
# Python Control-C handling is broken in the presence of multiple threads.  
craigdo@4772
   523
# Signals get delivered to the runnable/running thread by default and if 
craigdo@4772
   524
# it is blocked, the signal is simply ignored.  So we hook sigint and set 
craigdo@4772
   525
# a global variable telling the system to shut down gracefully.
craigdo@4772
   526
#
craigdo@4772
   527
thread_exit = False
craigdo@4772
   528
craigdo@4772
   529
def sigint_hook(signal, frame):
craigdo@4772
   530
    global thread_exit
craigdo@4772
   531
    thread_exit = True
craigdo@4772
   532
    return 0
craigdo@4772
   533
craigdo@4772
   534
#
craigdo@4772
   535
# Waf can be configured to compile in debug or optimized modes.  In each
craigdo@4772
   536
# case, the resulting built goes into a different directory.  If we want
craigdo@4772
   537
# test tests to run from the correct code-base, we have to figure out which
craigdo@4772
   538
# mode waf is running in.  This is called its active variant.
craigdo@4772
   539
#
craigdo@4772
   540
# XXX This function pokes around in the waf internal state file.  To be a
craigdo@4772
   541
# little less hacky, we should add a commmand to waf to return this info
craigdo@4772
   542
# and use that result.
craigdo@4772
   543
#
craigdo@4772
   544
def read_waf_active_variant():
craigdo@4772
   545
    for line in open("build/c4che/default.cache.py").readlines():
craigdo@4772
   546
        if line.startswith("NS3_ACTIVE_VARIANT"):
craigdo@4772
   547
            exec(line, globals())
craigdo@4772
   548
            break
craigdo@4772
   549
craigdo@4772
   550
    if options.verbose:
craigdo@4772
   551
        print "NS3_ACTIVE_VARIANT == %s" % NS3_ACTIVE_VARIANT
craigdo@4772
   552
craigdo@4772
   553
#
craigdo@4772
   554
# In general, the build process itself naturally takes care of figuring out
craigdo@4772
   555
# which tests are built into the test runner.  For example, if waf configure
craigdo@4772
   556
# determines that ENABLE_EMU is false due to some missing dependency,
craigdo@4772
   557
# the tests for the emu net device simply will not be built and will 
craigdo@4772
   558
# therefore not be included in the built test runner.
craigdo@4772
   559
#
craigdo@4772
   560
# Examples, however, are a different story.  In that case, we are just given
craigdo@4772
   561
# a list of examples that could be run.  Instead of just failing, for example,
craigdo@4772
   562
# nsc-tcp-zoo if NSC is not present, we look into the waf saved configuration
craigdo@4772
   563
# for relevant configuration items.  
craigdo@4772
   564
#
craigdo@4772
   565
# XXX This function pokes around in the waf internal state file.  To be a
craigdo@4772
   566
# little less hacky, we should add a commmand to waf to return this info
craigdo@4772
   567
# and use that result.
craigdo@4772
   568
#
craigdo@4772
   569
def read_waf_config():
craigdo@4772
   570
    for line in open("build/c4che/%s.cache.py" % NS3_ACTIVE_VARIANT).readlines():
craigdo@4772
   571
        for item in interesting_config_items:
craigdo@4772
   572
            if line.startswith(item):
craigdo@4772
   573
                exec(line, globals())
craigdo@4772
   574
craigdo@4772
   575
    if options.verbose:
craigdo@4772
   576
        for item in interesting_config_items:
craigdo@4772
   577
            print "%s ==" % item, eval(item)
craigdo@4772
   578
craigdo@4772
   579
#
craigdo@4772
   580
# It seems pointless to fork a process to run waf to fork a process to run
craigdo@4772
   581
# the test runner, so we just run the test runner directly.  The main thing 
craigdo@4772
   582
# that waf would do for us would be to sort out the shared library path but
craigdo@4772
   583
# we can deal with that easily and do here.
craigdo@4772
   584
#
craigdo@4772
   585
# There can be many different ns-3 repositories on a system, and each has 
craigdo@4772
   586
# its own shared libraries, so ns-3 doesn't hardcode a shared library search
craigdo@4772
   587
# path -- it is cooked up dynamically, so we do that too.
craigdo@4772
   588
#
craigdo@6227
   589
def make_paths():
craigdo@5459
   590
    have_DYLD_LIBRARY_PATH = False
craigdo@5459
   591
    have_LD_LIBRARY_PATH = False
craigdo@5459
   592
    have_PATH = False
craigdo@6227
   593
    have_PYTHONPATH = False
craigdo@4772
   594
craigdo@5459
   595
    keys = os.environ.keys()
craigdo@5459
   596
    for key in keys:
craigdo@5459
   597
        if key == "DYLD_LIBRARY_PATH":
craigdo@5459
   598
            have_DYLD_LIBRARY_PATH = True
craigdo@5459
   599
        if key == "LD_LIBRARY_PATH":
craigdo@5459
   600
            have_LD_LIBRARY_PATH = True
craigdo@5459
   601
        if key == "PATH":
craigdo@5459
   602
            have_PATH = True
craigdo@6227
   603
        if key == "PYTHONPATH":
craigdo@6227
   604
            have_PYTHONPATH = True
craigdo@6227
   605
craigdo@6227
   606
    pypath = os.environ["PYTHONPATH"] = os.path.join (NS3_BUILDDIR, NS3_ACTIVE_VARIANT, "bindings", "python")
craigdo@6227
   607
craigdo@6227
   608
    if not have_PYTHONPATH:
craigdo@6227
   609
        os.environ["PYTHONPATH"] = pypath
craigdo@6227
   610
    else:
craigdo@6227
   611
        os.environ["PYTHONPATH"] += ":" + pypath
craigdo@6227
   612
craigdo@6227
   613
    if options.verbose:
craigdo@6227
   614
        print "os.environ[\"PYTHONPATH\"] == %s" % os.environ["PYTHONPATH"]
craigdo@4772
   615
craigdo@4772
   616
    if sys.platform == "darwin":
craigdo@5459
   617
        if not have_DYLD_LIBRARY_PATH:
craigdo@5459
   618
            os.environ["DYLD_LIBRARY_PATH"] = ""
craigdo@5459
   619
        for path in NS3_MODULE_PATH:
craigdo@5459
   620
            os.environ["DYLD_LIBRARY_PATH"] += ":" + path
craigdo@5459
   621
        if options.verbose:
fmoatamr@5466
   622
            print "os.environ[\"DYLD_LIBRARY_PATH\"] == %s" % os.environ["DYLD_LIBRARY_PATH"]
craigdo@4772
   623
    elif sys.platform == "win32":
craigdo@5459
   624
        if not have_PATH:
craigdo@5459
   625
            os.environ["PATH"] = ""
craigdo@5459
   626
        for path in NS3_MODULE_PATH:
craigdo@5459
   627
            os.environ["PATH"] += ';' + path
craigdo@5459
   628
        if options.verbose:
craigdo@5459
   629
            print "os.environ[\"PATH\"] == %s" % os.environ["PATH"]
craigdo@4772
   630
    elif sys.platform == "cygwin":
craigdo@5459
   631
        if not have_PATH:
craigdo@5459
   632
            os.environ["PATH"] = ""
craigdo@5459
   633
        for path in NS3_MODULE_PATH:
craigdo@5459
   634
            os.environ["PATH"] += ":" + path
craigdo@5459
   635
        if options.verbose:
craigdo@5459
   636
            print "os.environ[\"PATH\"] == %s" % os.environ["PATH"]
craigdo@5459
   637
    else:
craigdo@5459
   638
        if not have_LD_LIBRARY_PATH:
craigdo@5459
   639
            os.environ["LD_LIBRARY_PATH"] = ""
craigdo@5459
   640
        for path in NS3_MODULE_PATH:
craigdo@5459
   641
            os.environ["LD_LIBRARY_PATH"] += ":" + path
craigdo@5459
   642
        if options.verbose:
craigdo@5459
   643
            print "os.environ[\"LD_LIBRARY_PATH\"] == %s" % os.environ["LD_LIBRARY_PATH"]
craigdo@5275
   644
faker@5909
   645
#
faker@5909
   646
# Short note on generating suppressions:
faker@5909
   647
#
faker@5909
   648
# See the valgrind documentation for a description of suppressions.  The easiest
faker@5909
   649
# way to generate a suppression expression is by using the valgrind 
faker@5909
   650
# --gen-suppressions option.  To do that you have to figure out how to run the 
faker@5909
   651
# test in question.
faker@5909
   652
#
faker@5909
   653
# If you do "test.py -v -g -s <suitename> then test.py will output most of what
faker@5909
   654
# you need.  For example, if you are getting a valgrind error in the
faker@5909
   655
# devices-mesh-dot11s-regression test suite, you can run:
faker@5909
   656
#
faker@5909
   657
#   ./test.py -v -g -s devices-mesh-dot11s-regression 
faker@5909
   658
#
faker@5909
   659
# You should see in the verbose output something that looks like:
faker@5909
   660
#
faker@5909
   661
#   Synchronously execute valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
faker@5909
   662
#   --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner 
faker@5909
   663
#   --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev 
faker@5909
   664
#   --tempdir=testpy-output/2010-01-12-22-47-50-CUT 
faker@5909
   665
#   --out=testpy-output/2010-01-12-22-47-50-CUT/devices-mesh-dot11s-regression.xml
faker@5909
   666
#
faker@5909
   667
# You need to pull out the useful pieces, and so could run the following to 
faker@5909
   668
# reproduce your error:
faker@5909
   669
#
faker@5909
   670
#   valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
faker@5909
   671
#   --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner 
faker@5909
   672
#   --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev 
faker@5909
   673
#   --tempdir=testpy-output 
faker@5909
   674
#
faker@5909
   675
# Hint: Use the first part of the command as is, and point the "tempdir" to 
faker@5909
   676
# somewhere real.  You don't need to specify an "out" file.
faker@5909
   677
#
faker@5909
   678
# When you run the above command you should see your valgrind error.  The 
faker@5909
   679
# suppression expression(s) can be generated by adding the --gen-suppressions=yes
faker@5909
   680
# option to valgrind.  Use something like:
faker@5909
   681
#
faker@5909
   682
#   valgrind --gen-suppressions=yes --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
faker@5909
   683
#   --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner 
faker@5909
   684
#   --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev 
faker@5909
   685
#   --tempdir=testpy-output 
faker@5909
   686
#
faker@5909
   687
# Now when valgrind detects an error it will ask:
faker@5909
   688
#
faker@5909
   689
#   ==27235== ---- Print suppression ? --- [Return/N/n/Y/y/C/c] ----
faker@5909
   690
#
faker@5909
   691
# to which you just enter 'y'<ret>.
faker@5909
   692
#
faker@5909
   693
# You will be provided with a suppression expression that looks something like
faker@5909
   694
# the following:
faker@5909
   695
#   {
faker@5909
   696
#     <insert_a_suppression_name_here>
faker@5909
   697
#     Memcheck:Addr8
faker@5909
   698
#     fun:_ZN3ns36dot11s15HwmpProtocolMac8SendPreqESt6vectorINS0_6IePreqESaIS3_EE
faker@5909
   699
#     fun:_ZN3ns36dot11s15HwmpProtocolMac10SendMyPreqEv
faker@5909
   700
#     fun:_ZN3ns36dot11s15HwmpProtocolMac18RequestDestinationENS_12Mac48AddressEjj
faker@5909
   701
#     ...
faker@5909
   702
#     the rest of the stack frame
faker@5909
   703
#     ...
faker@5909
   704
#   }
faker@5909
   705
#
faker@5909
   706
# You need to add a supression name which will only be printed out by valgrind in 
faker@5909
   707
# verbose mode (but it needs to be there in any case).  The entire stack frame is
faker@5909
   708
# shown to completely characterize the error, but in most cases you won't need 
faker@5909
   709
# all of that info.  For example, if you want to turn off all errors that happen
faker@5909
   710
# when the function (fun:) is called, you can just delete the rest of the stack
faker@5909
   711
# frame.  You can also use wildcards to make the mangled signatures more readable.
faker@5909
   712
#
faker@5909
   713
# I added the following to the testpy.supp file for this particular error:
faker@5909
   714
#
faker@5909
   715
#   {
faker@5909
   716
#     Supress invalid read size errors in SendPreq() when using HwmpProtocolMac
faker@5909
   717
#     Memcheck:Addr8
faker@5909
   718
#     fun:*HwmpProtocolMac*SendPreq*
faker@5909
   719
#   }
faker@5909
   720
#
faker@5909
   721
# Now, when you run valgrind the error will be suppressed.
faker@5909
   722
#
faker@5909
   723
VALGRIND_SUPPRESSIONS_FILE = "testpy.supp"
faker@5909
   724
craigdo@6200
   725
def run_job_synchronously(shell_command, directory, valgrind, is_python):
faker@5909
   726
    (base, build) = os.path.split (NS3_BUILDDIR)
faker@5909
   727
    suppressions_path = os.path.join (base, VALGRIND_SUPPRESSIONS_FILE)
craigdo@6200
   728
craigdo@6200
   729
    if is_python:
craigdo@6200
   730
        path_cmd = "python " + os.path.join (base, shell_command)
craigdo@6200
   731
    else:
craigdo@6200
   732
        path_cmd = os.path.join (NS3_BUILDDIR, NS3_ACTIVE_VARIANT, shell_command)
craigdo@6200
   733
craigdo@5370
   734
    if valgrind:
craigdo@5917
   735
        cmd = "valgrind --suppressions=%s --leak-check=full --show-reachable=yes --error-exitcode=2 %s" % (suppressions_path, 
craigdo@5917
   736
            path_cmd)
craigdo@5370
   737
    else:
craigdo@5459
   738
        cmd = path_cmd
craigdo@5370
   739
craigdo@4772
   740
    if options.verbose:
craigdo@4772
   741
        print "Synchronously execute %s" % cmd
craigdo@5370
   742
craigdo@5460
   743
    start_time = time.time()
craigdo@5459
   744
    proc = subprocess.Popen(cmd, shell = True, cwd = directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
craigdo@5917
   745
    stdout_results, stderr_results = proc.communicate()
fmoatamr@5912
   746
    elapsed_time = time.time() - start_time
craigdo@5917
   747
fmoatamr@5912
   748
    retval = proc.returncode
craigdo@5917
   749
craigdo@5917
   750
    #
craigdo@5917
   751
    # valgrind sometimes has its own idea about what kind of memory management
craigdo@5917
   752
    # errors are important.  We want to detect *any* leaks, so the way to do 
craigdo@5917
   753
    # that is to look for the presence of a valgrind leak summary section.
craigdo@5917
   754
    #
craigdo@5917
   755
    # If another error has occurred (like a test suite has failed), we don't 
craigdo@5917
   756
    # want to trump that error, so only do the valgrind output scan if the 
craigdo@5917
   757
    # test has otherwise passed (return code was zero).
craigdo@5917
   758
    #
craigdo@5917
   759
    if valgrind and retval == 0 and "== LEAK SUMMARY:" in stderr_results:
craigdo@5917
   760
        retval = 2
craigdo@5917
   761
    
craigdo@5459
   762
    if options.verbose:
fmoatamr@5912
   763
        print "Return code = ", retval
craigdo@5459
   764
        print "stderr = ", stderr_results
craigdo@5459
   765
fmoatamr@5912
   766
    return (retval, stdout_results, stderr_results, elapsed_time)
craigdo@4772
   767
craigdo@4772
   768
#
craigdo@4772
   769
# This class defines a unit of testing work.  It will typically refer to
craigdo@4772
   770
# a test suite to run using the test-runner, or an example to run directly.
craigdo@4772
   771
#
craigdo@5415
   772
class Job:
craigdo@4772
   773
    def __init__(self):
craigdo@4772
   774
        self.is_break = False
craigdo@5402
   775
        self.is_skip = False
craigdo@4772
   776
        self.is_example = False
craigdo@6200
   777
        self.is_pyexample = False
craigdo@4772
   778
        self.shell_command = ""
craigdo@4772
   779
        self.display_name = ""
craigdo@5402
   780
        self.basedir = ""
craigdo@5481
   781
        self.tempdir = ""
craigdo@4772
   782
        self.cwd = ""
craigdo@4772
   783
        self.tmp_file_name = ""
craigdo@4772
   784
        self.returncode = False
craigdo@5459
   785
        self.elapsed_time = 0
craigdo@4772
   786
craigdo@4772
   787
    #
craigdo@4772
   788
    # A job is either a standard job or a special job indicating that a worker
craigdo@4772
   789
    # thread should exist.  This special job is indicated by setting is_break 
craigdo@4772
   790
    # to true.
craigdo@4772
   791
    #
craigdo@4772
   792
    def set_is_break(self, is_break):
craigdo@4772
   793
        self.is_break = is_break
craigdo@4772
   794
craigdo@4772
   795
    #
craigdo@5402
   796
    # If a job is to be skipped, we actually run it through the worker threads
craigdo@5402
   797
    # to keep the PASS, FAIL, CRASH and SKIP processing all in one place.
craigdo@5402
   798
    #
craigdo@5402
   799
    def set_is_skip(self, is_skip):
craigdo@5402
   800
        self.is_skip = is_skip
craigdo@5402
   801
craigdo@5402
   802
    #
craigdo@4772
   803
    # Examples are treated differently than standard test suites.  This is
craigdo@4772
   804
    # mostly because they are completely unaware that they are being run as 
craigdo@4772
   805
    # tests.  So we have to do some special case processing to make them look
craigdo@4772
   806
    # like tests.
craigdo@4772
   807
    #
craigdo@4772
   808
    def set_is_example(self, is_example):
craigdo@4772
   809
        self.is_example = is_example
craigdo@4772
   810
craigdo@4772
   811
    #
craigdo@6200
   812
    # Examples are treated differently than standard test suites.  This is
craigdo@6200
   813
    # mostly because they are completely unaware that they are being run as 
craigdo@6200
   814
    # tests.  So we have to do some special case processing to make them look
craigdo@6200
   815
    # like tests.
craigdo@6200
   816
    #
craigdo@6200
   817
    def set_is_pyexample(self, is_pyexample):
craigdo@6200
   818
        self.is_pyexample = is_pyexample
craigdo@6200
   819
craigdo@6200
   820
    #
craigdo@4772
   821
    # This is the shell command that will be executed in the job.  For example,
craigdo@4772
   822
    #
craigdo@4772
   823
    #  "utils/test-runner --suite=some-test-suite"
craigdo@4772
   824
    #
craigdo@4772
   825
    def set_shell_command(self, shell_command):
craigdo@4772
   826
        self.shell_command = shell_command
craigdo@4772
   827
craigdo@4772
   828
    #
craigdo@4772
   829
    # This is the dispaly name of the job, typically the test suite or example 
craigdo@4772
   830
    # name.  For example,
craigdo@4772
   831
    #
craigdo@4772
   832
    #  "some-test-suite" or "udp-echo"
craigdo@4772
   833
    #
craigdo@4772
   834
    def set_display_name(self, display_name):
craigdo@4772
   835
        self.display_name = display_name
craigdo@4772
   836
craigdo@4772
   837
    #
craigdo@4772
   838
    # This is the base directory of the repository out of which the tests are
craigdo@4772
   839
    # being run.  It will be used deep down in the testing framework to determine
craigdo@4772
   840
    # where the source directory of the test was, and therefore where to find 
craigdo@4772
   841
    # provided test vectors.  For example,
craigdo@4772
   842
    #
craigdo@4772
   843
    #  "/home/user/repos/ns-3-dev"
craigdo@4772
   844
    #
craigdo@4772
   845
    def set_basedir(self, basedir):
craigdo@4772
   846
        self.basedir = basedir
craigdo@4772
   847
craigdo@4772
   848
    #
craigdo@5481
   849
    # This is the directory to which a running test suite should write any 
craigdo@5481
   850
    # temporary files.
craigdo@5481
   851
    #
craigdo@5481
   852
    def set_tempdir(self, tempdir):
craigdo@5481
   853
        self.tempdir = tempdir
craigdo@5481
   854
craigdo@5481
   855
    #
craigdo@4772
   856
    # This is the current working directory that will be given to an executing
craigdo@4772
   857
    # test as it is being run.  It will be used for examples to tell them where
craigdo@4772
   858
    # to write all of the pcap files that we will be carefully ignoring.  For
craigdo@4772
   859
    # example,
craigdo@4772
   860
    #
craigdo@4772
   861
    #  "/tmp/unchecked-traces"
craigdo@4772
   862
    #
craigdo@4772
   863
    def set_cwd(self, cwd):
craigdo@4772
   864
        self.cwd = cwd
craigdo@4772
   865
craigdo@4772
   866
    #
craigdo@4772
   867
    # This is the temporary results file name that will be given to an executing 
craigdo@4772
   868
    # test as it is being run.  We will be running all of our tests in parallel
craigdo@4772
   869
    # so there must be multiple temporary output files.  These will be collected
craigdo@5412
   870
    # into a single XML file at the end and then be deleted.  
craigdo@4772
   871
    #
craigdo@4772
   872
    def set_tmp_file_name(self, tmp_file_name):
craigdo@4772
   873
        self.tmp_file_name = tmp_file_name
craigdo@4772
   874
craigdo@4772
   875
    #
craigdo@4772
   876
    # The return code received when the job process is executed.
craigdo@4772
   877
    #
craigdo@4772
   878
    def set_returncode(self, returncode):
craigdo@4772
   879
        self.returncode = returncode
craigdo@4772
   880
craigdo@5459
   881
    #
craigdo@5459
   882
    # The elapsed real time for the job execution.
craigdo@5459
   883
    #
craigdo@5459
   884
    def set_elapsed_time(self, elapsed_time):
craigdo@5459
   885
        self.elapsed_time = elapsed_time
craigdo@5459
   886
craigdo@4772
   887
#
craigdo@4772
   888
# The worker thread class that handles the actual running of a given test.
craigdo@4772
   889
# Once spawned, it receives requests for work through its input_queue and
craigdo@4772
   890
# ships the results back through the output_queue.
craigdo@4772
   891
#
craigdo@4772
   892
class worker_thread(threading.Thread):
craigdo@4772
   893
    def __init__(self, input_queue, output_queue):
craigdo@4772
   894
        threading.Thread.__init__(self)
craigdo@4772
   895
        self.input_queue = input_queue
craigdo@4772
   896
        self.output_queue = output_queue
craigdo@4772
   897
craigdo@4772
   898
    def run(self):
craigdo@4772
   899
        while True:
craigdo@4772
   900
            job = self.input_queue.get()
craigdo@4772
   901
            #
craigdo@4772
   902
            # Worker threads continue running until explicitly told to stop with
craigdo@4772
   903
            # a special job.
craigdo@4772
   904
            #
craigdo@4772
   905
            if job.is_break:
craigdo@4772
   906
                return
craigdo@4772
   907
            #
craigdo@4772
   908
            # If the global interrupt handler sets the thread_exit variable,
craigdo@4772
   909
            # we stop doing real work and just report back a "break" in the
craigdo@4772
   910
            # normal command processing has happened.
craigdo@4772
   911
            #
craigdo@4772
   912
            if thread_exit == True:
craigdo@4772
   913
                job.set_is_break(True)
craigdo@4772
   914
                self.output_queue.put(job)
craigdo@4772
   915
                continue
craigdo@5402
   916
craigdo@5402
   917
            #
craigdo@5402
   918
            # If we are actually supposed to skip this job, do so.  Note that
craigdo@5402
   919
            # if is_skip is true, returncode is undefined.
craigdo@5402
   920
            #
craigdo@5402
   921
            if job.is_skip:
craigdo@5402
   922
                if options.verbose:
craigdo@5402
   923
                    print "Skip %s" % job.shell_command
craigdo@5402
   924
                self.output_queue.put(job)
craigdo@6253
   925
                continue
craigdo@5402
   926
craigdo@4772
   927
            #
craigdo@4772
   928
            # Otherwise go about the business of running tests as normal.
craigdo@4772
   929
            #
craigdo@4772
   930
            else:
craigdo@4772
   931
                if options.verbose:
craigdo@4772
   932
                    print "Launch %s" % job.shell_command
craigdo@4772
   933
craigdo@6200
   934
                if job.is_example or job.is_pyexample:
craigdo@4772
   935
                    #
craigdo@4772
   936
                    # If we have an example, the shell command is all we need to
craigdo@6200
   937
                    # know.  It will be something like "examples/udp-echo" or 
craigdo@6200
   938
                    # "examples/mixed-wireless.py"
craigdo@4772
   939
                    #
craigdo@5459
   940
                    (job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command, 
craigdo@6200
   941
                        job.cwd, options.valgrind, job.is_pyexample)
craigdo@4772
   942
                else:
craigdo@4772
   943
                    #
craigdo@4772
   944
                    # If we're a test suite, we need to provide a little more info
craigdo@4772
   945
                    # to the test runner, specifically the base directory and temp
craigdo@4772
   946
                    # file name
craigdo@4772
   947
                    #
craigdo@5459
   948
                    (job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command + 
craigdo@5481
   949
                        " --basedir=%s --tempdir=%s --out=%s" % (job.basedir, job.tempdir, job.tmp_file_name), 
craigdo@6200
   950
                        job.cwd, options.valgrind, False)
craigdo@4772
   951
craigdo@5459
   952
                job.set_elapsed_time(et)
craigdo@5459
   953
craigdo@4772
   954
                if options.verbose:
craigdo@5370
   955
                    print "returncode = %d" % job.returncode
Lalith@6327
   956
                    print "---------- begin standard out ----------"
craigdo@4772
   957
                    print standard_out
craigdo@5351
   958
                    print "---------- begin standard err ----------"
craigdo@5351
   959
                    print standard_err
craigdo@5351
   960
                    print "---------- end standard err ----------"
craigdo@4772
   961
craigdo@4772
   962
                self.output_queue.put(job)
craigdo@4772
   963
craigdo@4772
   964
#
craigdo@4772
   965
# This is the main function that does the work of interacting with the test-runner
craigdo@4772
   966
# itself.
craigdo@4772
   967
#
craigdo@4772
   968
def run_tests():
craigdo@4772
   969
    #
craigdo@4772
   970
    # Run waf to make sure that everything is built, configured and ready to go
craigdo@5369
   971
    # unless we are explicitly told not to.  We want to be careful about causing
craigdo@5369
   972
    # our users pain while waiting for extraneous stuff to compile and link, so
craigdo@5369
   973
    # we allow users that know what they''re doing to not invoke waf at all.
craigdo@4772
   974
    #
craigdo@5369
   975
    if not options.nowaf:
craigdo@5369
   976
craigdo@5369
   977
        #
craigdo@5369
   978
        # If the user is running the "kinds" or "list" options, there is an 
craigdo@5369
   979
        # implied dependency on the test-runner since we call that program
craigdo@5369
   980
        # if those options are selected.  We will exit after processing those
craigdo@5369
   981
        # options, so if we see them, we can safely only build the test-runner.
craigdo@5369
   982
        #
craigdo@5369
   983
        # If the user has constrained us to running only a particular type of
craigdo@5369
   984
        # file, we can only ask waf to build what we know will be necessary.
craigdo@5369
   985
        # For example, if the user only wants to run BVT tests, we only have
craigdo@5369
   986
        # to build the test-runner and can ignore all of the examples.
craigdo@5369
   987
        #
craigdo@5470
   988
        # If the user only wants to run a single example, then we can just build
craigdo@5470
   989
        # that example.
craigdo@5470
   990
        #
craigdo@5470
   991
        # If there is no constraint, then we have to build everything since the
craigdo@5470
   992
        # user wants to run everything.
craigdo@5470
   993
        #
craigdo@5369
   994
        if options.kinds or options.list or (len(options.constrain) and options.constrain in core_kinds):
craigdo@5470
   995
            if sys.platform == "win32":
craigdo@5470
   996
                waf_cmd = "waf --target=test-runner"
craigdo@5470
   997
            else:
craigdo@5470
   998
                waf_cmd = "./waf --target=test-runner"
craigdo@5470
   999
        elif len(options.example):
craigdo@5470
  1000
            if sys.platform == "win32":
craigdo@5470
  1001
                waf_cmd = "waf --target=%s" % os.path.basename(options.example)
craigdo@5470
  1002
            else:
craigdo@5470
  1003
                waf_cmd = "./waf --target=%s" % os.path.basename(options.example)
craigdo@5470
  1004
craigdo@5369
  1005
        else:
craigdo@5470
  1006
            if sys.platform == "win32":
craigdo@5470
  1007
                waf_cmd = "waf"
craigdo@5470
  1008
            else:
craigdo@5470
  1009
                waf_cmd = "./waf"
craigdo@5369
  1010
craigdo@5470
  1011
        if options.verbose:
craigdo@5470
  1012
            print "Building: %s" % waf_cmd
craigdo@5470
  1013
craigdo@5470
  1014
        proc = subprocess.Popen(waf_cmd, shell = True)
craigdo@4772
  1015
        proc.communicate()
mazo@6315
  1016
        if proc.returncode:
mazo@6315
  1017
            print >> sys.stderr, "Waf died. Not running tests"
mazo@6315
  1018
            return proc.returncode
craigdo@4772
  1019
craigdo@4772
  1020
    #
craigdo@4772
  1021
    # Pull some interesting configuration information out of waf, primarily
craigdo@4772
  1022
    # so we can know where executables can be found, but also to tell us what
craigdo@4772
  1023
    # pieces of the system have been built.  This will tell us what examples 
craigdo@4772
  1024
    # are runnable.
craigdo@4772
  1025
    #
craigdo@4772
  1026
    read_waf_active_variant()
craigdo@4772
  1027
    read_waf_config()
craigdo@6227
  1028
    make_paths()
craigdo@4772
  1029
craigdo@4772
  1030
    #
craigdo@6083
  1031
    # If lots of logging is enabled, we can crash Python when it tries to 
craigdo@6083
  1032
    # save all of the text.  We just don't allow logging to be turned on when
craigdo@6083
  1033
    # test.py runs.  If you want to see logging output from your tests, you
craigdo@6083
  1034
    # have to run them using the test-runner directly.
craigdo@6083
  1035
    #
craigdo@6083
  1036
    os.environ["NS_LOG"] = ""
craigdo@6083
  1037
craigdo@6083
  1038
    #
craigdo@4772
  1039
    # There are a couple of options that imply we can to exit before starting
craigdo@4772
  1040
    # up a bunch of threads and running tests.  Let's detect these cases and 
craigdo@4772
  1041
    # handle them without doing all of the hard work.
craigdo@4772
  1042
    #
craigdo@4772
  1043
    if options.kinds:
craigdo@5459
  1044
        path_cmd = os.path.join("utils", "test-runner --kinds")
craigdo@6200
  1045
        (rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
craigdo@4772
  1046
        print standard_out
craigdo@4772
  1047
craigdo@4772
  1048
    if options.list:
craigdo@5459
  1049
        path_cmd = os.path.join("utils", "test-runner --list")
craigdo@6200
  1050
        (rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
craigdo@4772
  1051
        print standard_out
craigdo@4772
  1052
craigdo@4772
  1053
    if options.kinds or options.list:
craigdo@4772
  1054
        return
craigdo@4772
  1055
craigdo@4772
  1056
    #
craigdo@4772
  1057
    # We communicate results in two ways.  First, a simple message relating 
craigdo@5402
  1058
    # PASS, FAIL, CRASH or SKIP is always written to the standard output.  It 
craigdo@5402
  1059
    # is expected that this will be one of the main use cases.  A developer can
craigdo@4772
  1060
    # just run test.py with no options and see that all of the tests still 
craigdo@4772
  1061
    # pass.
craigdo@4772
  1062
    #
craigdo@4772
  1063
    # The second main use case is when detailed status is requested (with the
craigdo@4772
  1064
    # --text or --html options).  Typicall this will be text if a developer
craigdo@4772
  1065
    # finds a problem, or HTML for nightly builds.  In these cases, an
craigdo@4772
  1066
    # XML file is written containing the status messages from the test suites.
craigdo@4772
  1067
    # This file is then read and translated into text or HTML.  It is expected
craigdo@5412
  1068
    # that nobody will really be interested in the XML, so we write it somewhere
craigdo@5412
  1069
    # with a unique name (time) to avoid collisions.  In case an error happens, we
craigdo@5412
  1070
    # provide a runtime option to retain the temporary files.
craigdo@4772
  1071
    #
craigdo@4772
  1072
    # When we run examples as smoke tests, they are going to want to create
craigdo@4772
  1073
    # lots and lots of trace files.  We aren't really interested in the contents
craigdo@5412
  1074
    # of the trace files, so we also just stash them off in the temporary dir.
craigdo@5412
  1075
    # The retain option also causes these unchecked trace files to be kept.
craigdo@4772
  1076
    #
craigdo@5412
  1077
    date_and_time = time.strftime("%Y-%m-%d-%H-%M-%S-CUT", time.gmtime())
craigdo@5412
  1078
craigdo@4772
  1079
    if not os.path.exists(TMP_OUTPUT_DIR):
craigdo@4772
  1080
        os.makedirs(TMP_OUTPUT_DIR)
craigdo@4772
  1081
craigdo@5412
  1082
    testpy_output_dir = os.path.join(TMP_OUTPUT_DIR, date_and_time);
craigdo@5412
  1083
craigdo@5412
  1084
    if not os.path.exists(testpy_output_dir):
craigdo@5412
  1085
        os.makedirs(testpy_output_dir)
craigdo@4772
  1086
craigdo@4772
  1087
    #
craigdo@4772
  1088
    # Create the main output file and start filling it with XML.  We need to 
craigdo@4772
  1089
    # do this since the tests will just append individual results to this file.
craigdo@4772
  1090
    #
craigdo@5412
  1091
    xml_results_file = os.path.join(testpy_output_dir, "results.xml")
craigdo@4772
  1092
    f = open(xml_results_file, 'w')
craigdo@4772
  1093
    f.write('<?xml version="1.0"?>\n')
craigdo@4772
  1094
    f.write('<TestResults>\n')
craigdo@4772
  1095
    f.close()
craigdo@4772
  1096
craigdo@4772
  1097
    #
craigdo@4772
  1098
    # We need to figure out what test suites to execute.  We are either given one 
craigdo@6200
  1099
    # suite or example explicitly via the --suite or --example/--pyexample option,
craigdo@6200
  1100
    # or we need to call into the test runner and ask it to list all of the available
craigdo@4772
  1101
    # test suites.  Further, we need to provide the constraint information if it
craigdo@4772
  1102
    # has been given to us.
craigdo@4772
  1103
    # 
craigdo@4772
  1104
    # This translates into allowing the following options with respect to the 
craigdo@4772
  1105
    # suites
craigdo@4772
  1106
    #
craigdo@5369
  1107
    #  ./test,py:                                           run all of the suites and examples
craigdo@5369
  1108
    #  ./test.py --constrain=core:                          run all of the suites of all kinds
craigdo@4772
  1109
    #  ./test.py --constrain=unit:                          run all unit suites
craigdo@5369
  1110
    #  ./test,py --suite=some-test-suite:                   run a single suite
craigdo@6200
  1111
    #  ./test,py --example=udp/udp-echo:                    run no test suites
craigdo@6200
  1112
    #  ./test,py --pyexample=wireless/mixed-wireless.py:    run no test suites
craigdo@4772
  1113
    #  ./test,py --suite=some-suite --example=some-example: run the single suite
craigdo@4772
  1114
    #
craigdo@4772
  1115
    # We can also use the --constrain option to provide an ordering of test 
craigdo@4772
  1116
    # execution quite easily.
craigdo@4772
  1117
    #
craigdo@4772
  1118
    if len(options.suite):
craigdo@4772
  1119
        suites = options.suite + "\n"
craigdo@6200
  1120
    elif len(options.example) == 0 and len(options.pyexample) == 0:
craigdo@4772
  1121
        if len(options.constrain):
craigdo@5459
  1122
            path_cmd = os.path.join("utils", "test-runner --list --constrain=%s" % options.constrain)
craigdo@6200
  1123
            (rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
craigdo@4772
  1124
        else:
craigdo@5459
  1125
            path_cmd = os.path.join("utils", "test-runner --list")
craigdo@6200
  1126
            (rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
craigdo@4772
  1127
    else:
craigdo@4772
  1128
        suites = ""
craigdo@4772
  1129
craigdo@4772
  1130
    #
craigdo@4772
  1131
    # suite_list will either a single test suite name that the user has 
craigdo@4772
  1132
    # indicated she wants to run or a list of test suites provided by
craigdo@4772
  1133
    # the test-runner possibly according to user provided constraints.
craigdo@4772
  1134
    # We go through the trouble of setting up the parallel execution 
craigdo@4772
  1135
    # even in the case of a single suite to avoid having two process the
craigdo@4772
  1136
    # results in two different places.
craigdo@4772
  1137
    #
craigdo@4772
  1138
    suite_list = suites.split('\n')
craigdo@4772
  1139
craigdo@4772
  1140
    #
craigdo@4772
  1141
    # We now have a possibly large number of test suites to run, so we want to
craigdo@4772
  1142
    # run them in parallel.  We're going to spin up a number of worker threads
craigdo@4772
  1143
    # that will run our test jobs for us.
craigdo@4772
  1144
    #
craigdo@4772
  1145
    input_queue = Queue.Queue(0)
craigdo@4772
  1146
    output_queue = Queue.Queue(0)
craigdo@4772
  1147
craigdo@4772
  1148
    jobs = 0
craigdo@4772
  1149
    threads=[]
craigdo@4772
  1150
craigdo@5273
  1151
    #
craigdo@5273
  1152
    # In Python 2.6 you can just use multiprocessing module, but we don't want
craigdo@5273
  1153
    # to introduce that dependency yet; so we jump through a few hoops.
craigdo@5273
  1154
    #
craigdo@5273
  1155
    processors = 1
craigdo@5273
  1156
craigdo@5459
  1157
    if sys.platform != "win32":
craigdo@5459
  1158
        if 'SC_NPROCESSORS_ONLN'in os.sysconf_names:
craigdo@5459
  1159
            processors = os.sysconf('SC_NPROCESSORS_ONLN')
craigdo@5459
  1160
        else:
craigdo@5459
  1161
            proc = subprocess.Popen("sysctl -n hw.ncpu", shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
craigdo@5459
  1162
            stdout_results, stderr_results = proc.communicate()
craigdo@5459
  1163
            if len(stderr_results) == 0:
craigdo@5459
  1164
                processors = int(stdout_results)
craigdo@5273
  1165
craigdo@5273
  1166
    #
craigdo@5273
  1167
    # Now, spin up one thread per processor which will eventually mean one test
craigdo@5273
  1168
    # per processor running concurrently.
craigdo@5273
  1169
    #
craigdo@4772
  1170
    for i in range(processors):
craigdo@4772
  1171
        thread = worker_thread(input_queue, output_queue)
craigdo@4772
  1172
        threads.append(thread)
craigdo@4772
  1173
        thread.start()
craigdo@4772
  1174
craigdo@4772
  1175
    #
craigdo@5402
  1176
    # Keep track of some summary statistics
craigdo@5402
  1177
    #
craigdo@5402
  1178
    total_tests = 0
craigdo@5402
  1179
    skipped_tests = 0
craigdo@5402
  1180
craigdo@5402
  1181
    #
craigdo@4772
  1182
    # We now have worker threads spun up, and a list of work to do.  So, run 
craigdo@4772
  1183
    # through the list of test suites and dispatch a job to run each one.
craigdo@4772
  1184
    # 
craigdo@4772
  1185
    # Dispatching will run with unlimited speed and the worker threads will 
craigdo@4772
  1186
    # execute as fast as possible from the queue.
craigdo@4772
  1187
    #
craigdo@5402
  1188
    # Note that we actually dispatch tests to be skipped, so all of the 
craigdo@5402
  1189
    # PASS, FAIL, CRASH and SKIP processing is done in the same place.
craigdo@5402
  1190
    #
craigdo@4772
  1191
    for test in suite_list:
craigdo@5461
  1192
        test = test.strip()
craigdo@4772
  1193
        if len(test):
craigdo@4772
  1194
            job = Job()
craigdo@4772
  1195
            job.set_is_example(False)
craigdo@6200
  1196
            job.set_is_pyexample(False)
craigdo@4772
  1197
            job.set_display_name(test)
craigdo@5412
  1198
            job.set_tmp_file_name(os.path.join(testpy_output_dir, "%s.xml" % test))
craigdo@4772
  1199
            job.set_cwd(os.getcwd())
craigdo@4772
  1200
            job.set_basedir(os.getcwd())
craigdo@5481
  1201
            job.set_tempdir(testpy_output_dir)
craigdo@5324
  1202
            if (options.multiple):
craigdo@5324
  1203
                multiple = " --multiple"
craigdo@5324
  1204
            else:
craigdo@5324
  1205
                multiple = ""
craigdo@5324
  1206
craigdo@5461
  1207
            path_cmd = os.path.join("utils", "test-runner --suite=%s%s" % (test, multiple))
craigdo@5459
  1208
            job.set_shell_command(path_cmd)
craigdo@4772
  1209
craigdo@5402
  1210
            if options.valgrind and test in core_valgrind_skip_tests:
craigdo@5402
  1211
                job.set_is_skip(True)
craigdo@5402
  1212
craigdo@4772
  1213
            if options.verbose:
craigdo@4772
  1214
                print "Queue %s" % test
craigdo@4772
  1215
craigdo@4772
  1216
            input_queue.put(job)
craigdo@4772
  1217
            jobs = jobs + 1
craigdo@5279
  1218
            total_tests = total_tests + 1
craigdo@4772
  1219
    
craigdo@4772
  1220
    #
craigdo@4772
  1221
    # We've taken care of the discovered or specified test suites.  Now we
craigdo@4772
  1222
    # have to deal with examples run as smoke tests.  We have a list of all of
craigdo@4772
  1223
    # the example programs it makes sense to try and run.  Each example will
craigdo@4772
  1224
    # have a condition associated with it that must evaluate to true for us
craigdo@4772
  1225
    # to try and execute it.  This is used to determine if the example has
craigdo@4772
  1226
    # a dependency that is not satisfied.  For example, if an example depends
craigdo@4772
  1227
    # on NSC being configured by waf, that example should have a condition
craigdo@4772
  1228
    # that evaluates to true if NSC is enabled.  For example,
craigdo@4772
  1229
    #
craigdo@4772
  1230
    #      ("tcp-nsc-zoo", "ENABLE_NSC == True"),
craigdo@4772
  1231
    #
craigdo@4772
  1232
    # In this case, the example "tcp-nsc-zoo" will only be run if we find the
craigdo@4772
  1233
    # waf configuration variable "ENABLE_NSC" to be True.
craigdo@4772
  1234
    #
craigdo@4772
  1235
    # We don't care at all how the trace files come out, so we just write them 
craigdo@4772
  1236
    # to a single temporary directory.
craigdo@4772
  1237
    #
craigdo@4772
  1238
    # XXX As it stands, all of the trace files have unique names, and so file
craigdo@4772
  1239
    # collisions can only happen if two instances of an example are running in
craigdo@4772
  1240
    # two versions of the test.py process concurrently.  We may want to create
craigdo@4772
  1241
    # uniquely named temporary traces directories to avoid this problem.
craigdo@4772
  1242
    #
craigdo@4772
  1243
    # We need to figure out what examples to execute.  We are either given one 
craigdo@4772
  1244
    # suite or example explicitly via the --suite or --example option, or we
craigdo@4772
  1245
    # need to walk the list of examples looking for available example 
craigdo@4772
  1246
    # conditions.
craigdo@4772
  1247
    #
craigdo@4772
  1248
    # This translates into allowing the following options with respect to the 
craigdo@4772
  1249
    # suites
craigdo@4772
  1250
    #
craigdo@4772
  1251
    #  ./test,py:                                           run all of the examples
craigdo@4772
  1252
    #  ./test.py --constrain=unit                           run no examples
craigdo@5369
  1253
    #  ./test.py --constrain=example                        run all of the examples
craigdo@6200
  1254
    #  ./test.py --suite=some-test-suite:                   run no examples
craigdo@6200
  1255
    #  ./test.py --example=some-example:                    run the single example
craigdo@6200
  1256
    #  ./test.py --suite=some-suite --example=some-example: run the single example
craigdo@4772
  1257
    #
craigdo@4772
  1258
    # XXX could use constrain to separate out examples used for performance 
craigdo@4772
  1259
    # testing
craigdo@4772
  1260
    #
craigdo@6200
  1261
    if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
craigdo@4772
  1262
        if len(options.constrain) == 0 or options.constrain == "example":
craigdo@5369
  1263
            if ENABLE_EXAMPLES:
craigdo@5402
  1264
                for test, do_run, do_valgrind_run in example_tests:
craigdo@5402
  1265
                    if eval(do_run):
craigdo@5369
  1266
                        job = Job()
craigdo@5369
  1267
                        job.set_is_example(True)
craigdo@6200
  1268
                        job.set_is_pyexample(False)
craigdo@5369
  1269
                        job.set_display_name(test)
craigdo@5369
  1270
                        job.set_tmp_file_name("")
craigdo@5412
  1271
                        job.set_cwd(testpy_output_dir)
craigdo@5369
  1272
                        job.set_basedir(os.getcwd())
craigdo@5481
  1273
                        job.set_tempdir(testpy_output_dir)
craigdo@5369
  1274
                        job.set_shell_command("examples/%s" % test)
craigdo@4772
  1275
craigdo@5402
  1276
                        if options.valgrind and not eval(do_valgrind_run):
craigdo@5402
  1277
                            job.set_is_skip (True)
craigdo@5402
  1278
craigdo@5369
  1279
                        if options.verbose:
craigdo@5369
  1280
                            print "Queue %s" % test
craigdo@4772
  1281
craigdo@5369
  1282
                        input_queue.put(job)
craigdo@5369
  1283
                        jobs = jobs + 1
craigdo@5369
  1284
                        total_tests = total_tests + 1
craigdo@5279
  1285
craigdo@4772
  1286
    elif len(options.example):
craigdo@4772
  1287
        #
craigdo@4772
  1288
        # If you tell me to run an example, I will try and run the example
craigdo@4772
  1289
        # irrespective of any condition.
craigdo@4772
  1290
        #
craigdo@4772
  1291
        job = Job()
craigdo@4772
  1292
        job.set_is_example(True)
craigdo@6200
  1293
        job.set_is_pyexample(False)
craigdo@4772
  1294
        job.set_display_name(options.example)
craigdo@4772
  1295
        job.set_tmp_file_name("")
craigdo@5412
  1296
        job.set_cwd(testpy_output_dir)
craigdo@4772
  1297
        job.set_basedir(os.getcwd())
craigdo@5481
  1298
        job.set_tempdir(testpy_output_dir)
craigdo@4772
  1299
        job.set_shell_command("examples/%s" % options.example)
craigdo@4772
  1300
        
craigdo@4772
  1301
        if options.verbose:
craigdo@6200
  1302
            print "Queue %s" % options.example
craigdo@6200
  1303
craigdo@6200
  1304
        input_queue.put(job)
craigdo@6200
  1305
        jobs = jobs + 1
craigdo@6200
  1306
        total_tests = total_tests + 1
craigdo@6200
  1307
craigdo@6200
  1308
    #
craigdo@6200
  1309
    # Run some Python examples as smoke tests.  We have a list of all of
craigdo@6200
  1310
    # the example programs it makes sense to try and run.  Each example will
craigdo@6200
  1311
    # have a condition associated with it that must evaluate to true for us
craigdo@6200
  1312
    # to try and execute it.  This is used to determine if the example has
craigdo@6200
  1313
    # a dependency that is not satisfied.
craigdo@6200
  1314
    #
craigdo@6200
  1315
    # We don't care at all how the trace files come out, so we just write them 
craigdo@6200
  1316
    # to a single temporary directory.
craigdo@6200
  1317
    #
craigdo@6200
  1318
    # We need to figure out what python examples to execute.  We are either 
craigdo@6200
  1319
    # given one pyexample explicitly via the --pyexample option, or we
craigdo@6200
  1320
    # need to walk the list of python examples
craigdo@6200
  1321
    #
craigdo@6200
  1322
    # This translates into allowing the following options with respect to the 
craigdo@6200
  1323
    # suites
craigdo@6200
  1324
    #
craigdo@6200
  1325
    #  ./test.py --constrain=pyexample           run all of the python examples
craigdo@6200
  1326
    #  ./test.py --pyexample=some-example.py:    run the single python example
craigdo@6200
  1327
    #
craigdo@6200
  1328
    if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
craigdo@6200
  1329
        if len(options.constrain) == 0 or options.constrain == "pyexample":
craigdo@6247
  1330
            if ENABLE_EXAMPLES:
craigdo@6200
  1331
                for test, do_run in python_tests:
craigdo@6200
  1332
                    if eval(do_run):
craigdo@6200
  1333
                        job = Job()
craigdo@6200
  1334
                        job.set_is_example(False)
craigdo@6200
  1335
                        job.set_is_pyexample(True)
craigdo@6200
  1336
                        job.set_display_name(test)
craigdo@6200
  1337
                        job.set_tmp_file_name("")
craigdo@6200
  1338
                        job.set_cwd(testpy_output_dir)
craigdo@6200
  1339
                        job.set_basedir(os.getcwd())
craigdo@6200
  1340
                        job.set_tempdir(testpy_output_dir)
craigdo@6200
  1341
                        job.set_shell_command("examples/%s" % test)
craigdo@6200
  1342
craigdo@6247
  1343
                        #
craigdo@6247
  1344
                        # Python programs and valgrind do not work and play
craigdo@6247
  1345
                        # well together, so we skip them under valgrind.
craigdo@6247
  1346
                        # We go through the trouble of doing all of this
craigdo@6247
  1347
                        # work to report the skipped tests in a consistent
craigdo@6247
  1348
                        # way throught the output formatter.
craigdo@6247
  1349
                        #
craigdo@6247
  1350
                        if options.valgrind:
craigdo@6247
  1351
                            job.set_is_skip (True)
craigdo@6247
  1352
craigdo@6247
  1353
                        #
craigdo@6247
  1354
                        # The user can disable python bindings, so we need
craigdo@6247
  1355
                        # to pay attention to that and give some feedback
craigdo@6247
  1356
                        # that we're not testing them
craigdo@6247
  1357
                        #
craigdo@6247
  1358
                        if not ENABLE_PYTHON_BINDINGS:
craigdo@6200
  1359
                            job.set_is_skip (True)
craigdo@6200
  1360
craigdo@6200
  1361
                        if options.verbose:
craigdo@6200
  1362
                            print "Queue %s" % test
craigdo@6200
  1363
craigdo@6200
  1364
                        input_queue.put(job)
craigdo@6200
  1365
                        jobs = jobs + 1
craigdo@6200
  1366
                        total_tests = total_tests + 1
craigdo@6200
  1367
craigdo@6200
  1368
    elif len(options.pyexample):
craigdo@6200
  1369
        #
craigdo@6200
  1370
        # If you tell me to run a python example, I will try and run the example
craigdo@6200
  1371
        # irrespective of any condition.
craigdo@6200
  1372
        #
craigdo@6200
  1373
        job = Job()
craigdo@6200
  1374
        job.set_is_pyexample(True)
craigdo@6200
  1375
        job.set_display_name(options.pyexample)
craigdo@6200
  1376
        job.set_tmp_file_name("")
craigdo@6200
  1377
        job.set_cwd(testpy_output_dir)
craigdo@6200
  1378
        job.set_basedir(os.getcwd())
craigdo@6200
  1379
        job.set_tempdir(testpy_output_dir)
craigdo@6200
  1380
        job.set_shell_command("examples/%s" % options.pyexample)
craigdo@6200
  1381
        
craigdo@6200
  1382
        if options.verbose:
craigdo@6200
  1383
            print "Queue %s" % options.pyexample
craigdo@4772
  1384
craigdo@4772
  1385
        input_queue.put(job)
craigdo@4772
  1386
        jobs = jobs + 1
craigdo@5279
  1387
        total_tests = total_tests + 1
craigdo@4772
  1388
craigdo@4772
  1389
    #
craigdo@4772
  1390
    # Tell the worker threads to pack up and go home for the day.  Each one
craigdo@4772
  1391
    # will exit when they see their is_break task.
craigdo@4772
  1392
    #
craigdo@4772
  1393
    for i in range(processors):
craigdo@4772
  1394
        job = Job()
craigdo@4772
  1395
        job.set_is_break(True)
craigdo@4772
  1396
        input_queue.put(job)
craigdo@4772
  1397
craigdo@4772
  1398
    #
craigdo@4772
  1399
    # Now all of the tests have been dispatched, so all we have to do here
craigdo@4772
  1400
    # in the main thread is to wait for them to complete.  Keyboard interrupt
craigdo@4772
  1401
    # handling is broken as mentioned above.  We use a signal handler to catch
craigdo@4772
  1402
    # sigint and set a global variable.  When the worker threads sense this
craigdo@4772
  1403
    # they stop doing real work and will just start throwing jobs back at us
craigdo@4772
  1404
    # with is_break set to True.  In this case, there are no real results so we 
craigdo@4772
  1405
    # ignore them.  If there are real results, we always print PASS or FAIL to
craigdo@4772
  1406
    # standard out as a quick indication of what happened.
craigdo@4772
  1407
    #
craigdo@5279
  1408
    passed_tests = 0
craigdo@5279
  1409
    failed_tests = 0
craigdo@5279
  1410
    crashed_tests = 0
craigdo@5370
  1411
    valgrind_errors = 0
craigdo@4772
  1412
    for i in range(jobs):
craigdo@4772
  1413
        job = output_queue.get()
craigdo@4772
  1414
        if job.is_break:
craigdo@4772
  1415
            continue
craigdo@4772
  1416
craigdo@6203
  1417
        if job.is_example or job.is_pyexample:
craigdo@4772
  1418
            kind = "Example"
craigdo@4772
  1419
        else:
craigdo@4772
  1420
            kind = "TestSuite"
craigdo@4772
  1421
craigdo@5402
  1422
        if job.is_skip:
craigdo@5402
  1423
            status = "SKIP"
craigdo@5402
  1424
            skipped_tests = skipped_tests + 1
craigdo@4772
  1425
        else:
craigdo@5402
  1426
            if job.returncode == 0:
craigdo@5402
  1427
                status = "PASS"
craigdo@5402
  1428
                passed_tests = passed_tests + 1
craigdo@5402
  1429
            elif job.returncode == 1:
craigdo@5402
  1430
                failed_tests = failed_tests + 1
craigdo@5402
  1431
                status = "FAIL"
craigdo@5402
  1432
            elif job.returncode == 2:
craigdo@5402
  1433
                valgrind_errors = valgrind_errors + 1
craigdo@5402
  1434
                status = "VALGR"
craigdo@5402
  1435
            else:
craigdo@5402
  1436
                crashed_tests = crashed_tests + 1
craigdo@5402
  1437
                status = "CRASH"
craigdo@4772
  1438
craigdo@4772
  1439
        print "%s: %s %s" % (status, kind, job.display_name)
craigdo@4772
  1440
craigdo@6200
  1441
        if job.is_example or job.is_pyexample:
craigdo@4772
  1442
            #
craigdo@4772
  1443
            # Examples are the odd man out here.  They are written without any
craigdo@4772
  1444
            # knowledge that they are going to be run as a test, so we need to 
craigdo@4772
  1445
            # cook up some kind of output for them.  We're writing an xml file,
craigdo@4772
  1446
            # so we do some simple XML that says we ran the example.
craigdo@4772
  1447
            #
craigdo@4772
  1448
            # XXX We could add some timing information to the examples, i.e. run
craigdo@4772
  1449
            # them through time and print the results here.
craigdo@4772
  1450
            #
craigdo@4772
  1451
            f = open(xml_results_file, 'a')
craigdo@4772
  1452
            f.write('<Example>\n')
craigdo@4772
  1453
            example_name = "  <Name>%s</Name>\n" % job.display_name
craigdo@4772
  1454
            f.write(example_name)
craigdo@5370
  1455
craigdo@5402
  1456
            if status == "PASS":
craigdo@4772
  1457
                f.write('  <Result>PASS</Result>\n')
craigdo@5402
  1458
            elif status == "FAIL":
craigdo@4772
  1459
                f.write('  <Result>FAIL</Result>\n')
craigdo@5402
  1460
            elif status == "VALGR":
craigdo@5370
  1461
                f.write('  <Result>VALGR</Result>\n')
craigdo@5402
  1462
            elif status == "SKIP":
craigdo@5402
  1463
                f.write('  <Result>SKIP</Result>\n')
craigdo@4772
  1464
            else:
craigdo@4772
  1465
                f.write('  <Result>CRASH</Result>\n')
craigdo@4772
  1466
craigdo@5460
  1467
            f.write('  <ElapsedTime>%.3f</ElapsedTime>\n' % job.elapsed_time)
craigdo@4772
  1468
            f.write('</Example>\n')
craigdo@4772
  1469
            f.close()
craigdo@5370
  1470
craigdo@4772
  1471
        else:
craigdo@4772
  1472
            #
craigdo@4772
  1473
            # If we're not running an example, we're running a test suite.
craigdo@4772
  1474
            # These puppies are running concurrently and generating output
craigdo@4772
  1475
            # that was written to a temporary file to avoid collisions.
craigdo@4772
  1476
            #
craigdo@4772
  1477
            # Now that we are executing sequentially in the main thread, we can
craigdo@4772
  1478
            # concatenate the contents of the associated temp file to the main 
craigdo@4772
  1479
            # results file and remove that temp file.
craigdo@4772
  1480
            #
craigdo@4772
  1481
            # One thing to consider is that a test suite can crash just as
craigdo@4772
  1482
            # well as any other program, so we need to deal with that 
craigdo@4772
  1483
            # possibility as well.  If it ran correctly it will return 0
craigdo@4772
  1484
            # if it passed, or 1 if it failed.  In this case, we can count
craigdo@4772
  1485
            # on the results file it saved being complete.  If it crashed, it 
craigdo@4772
  1486
            # will return some other code, and the file should be considered 
craigdo@4772
  1487
            # corrupt and useless.  If the suite didn't create any XML, then
craigdo@4772
  1488
            # we're going to have to do it ourselves.
craigdo@4772
  1489
            #
craigdo@5370
  1490
            # Another issue is how to deal with a valgrind error.  If we run
craigdo@5370
  1491
            # a test suite under valgrind and it passes, we will get a return
craigdo@5370
  1492
            # code of 0 and there will be a valid xml results file since the code
craigdo@5370
  1493
            # ran to completion.  If we get a return code of 1 under valgrind,
craigdo@5370
  1494
            # the test case failed, but valgrind did not find any problems so the
craigdo@5370
  1495
            # test case return code was passed through.  We will have a valid xml
craigdo@5370
  1496
            # results file here as well since the test suite ran.  If we see a 
craigdo@5370
  1497
            # return code of 2, this means that valgrind found an error (we asked
craigdo@5370
  1498
            # it to return 2 if it found a problem in run_job_synchronously) but
craigdo@5370
  1499
            # the suite ran to completion so there is a valid xml results file.
craigdo@5370
  1500
            # If the suite crashes under valgrind we will see some other error 
craigdo@5370
  1501
            # return code (like 139).  If valgrind finds an illegal instruction or
craigdo@5370
  1502
            # some other strange problem, it will die with its own strange return
craigdo@5370
  1503
            # code (like 132).  However, if the test crashes by itself, not under
craigdo@5370
  1504
            # valgrind we will also see some other return code.
craigdo@5370
  1505
            #
craigdo@5370
  1506
            # If the return code is 0, 1, or 2, we have a valid xml file.  If we 
craigdo@5370
  1507
            # get another return code, we have no xml and we can't really say what
craigdo@5370
  1508
            # happened -- maybe the TestSuite crashed, maybe valgrind crashed due
craigdo@5370
  1509
            # to an illegal instruction.  If we get something beside 0-2, we assume
craigdo@5370
  1510
            # a crash and fake up an xml entry.  After this is all done, we still
craigdo@5370
  1511
            # need to indicate a valgrind error somehow, so we fake up an xml entry
craigdo@5370
  1512
            # with a VALGR result.  Thus, in the case of a working TestSuite that
craigdo@5370
  1513
            # fails valgrind, we'll see the PASS entry for the working TestSuite
craigdo@5370
  1514
            # followed by a VALGR failing test suite of the same name.
craigdo@5370
  1515
            #
craigdo@5402
  1516
            if job.is_skip:
craigdo@4772
  1517
                f = open(xml_results_file, 'a')
craigdo@4772
  1518
                f.write("<TestSuite>\n")
craigdo@5239
  1519
                f.write("  <SuiteName>%s</SuiteName>\n" % job.display_name)
craigdo@5402
  1520
                f.write('  <SuiteResult>SKIP</SuiteResult>\n')
craigdo@5239
  1521
                f.write('  <SuiteTime>Execution times not available</SuiteTime>\n')
craigdo@4772
  1522
                f.write("</TestSuite>\n")
craigdo@4772
  1523
                f.close()
craigdo@5402
  1524
            else:
craigdo@5402
  1525
                if job.returncode == 0 or job.returncode == 1 or job.returncode == 2:
craigdo@5402
  1526
                    f_to = open(xml_results_file, 'a')
craigdo@5459
  1527
                    f_from = open(job.tmp_file_name)
craigdo@5402
  1528
                    f_to.write(f_from.read())
craigdo@5402
  1529
                    f_to.close()
craigdo@5402
  1530
                    f_from.close()
craigdo@5402
  1531
                else:
craigdo@5402
  1532
                    f = open(xml_results_file, 'a')
craigdo@5402
  1533
                    f.write("<TestSuite>\n")
craigdo@5402
  1534
                    f.write("  <SuiteName>%s</SuiteName>\n" % job.display_name)
craigdo@5402
  1535
                    f.write('  <SuiteResult>CRASH</SuiteResult>\n')
craigdo@5402
  1536
                    f.write('  <SuiteTime>Execution times not available</SuiteTime>\n')
craigdo@5402
  1537
                    f.write("</TestSuite>\n")
craigdo@5402
  1538
                    f.close()
craigdo@4772
  1539
craigdo@5402
  1540
                    if job.returncode == 2:
craigdo@5402
  1541
                        f = open(xml_results_file, 'a')
craigdo@5402
  1542
                        f.write("<TestSuite>\n")
craigdo@5402
  1543
                        f.write("  <SuiteName>%s</SuiteName>\n" % job.display_name)
craigdo@5402
  1544
                        f.write('  <SuiteResult>VALGR</SuiteResult>\n')
craigdo@5402
  1545
                        f.write('  <SuiteTime>Execution times not available</SuiteTime>\n')
craigdo@5402
  1546
                        f.write("</TestSuite>\n")
craigdo@5402
  1547
                        f.close()
craigdo@5370
  1548
craigdo@4772
  1549
    #
craigdo@4772
  1550
    # We have all of the tests run and the results written out.  One final 
craigdo@4772
  1551
    # bit of housekeeping is to wait for all of the threads to close down
craigdo@4772
  1552
    # so we can exit gracefully.
craigdo@4772
  1553
    #
craigdo@4772
  1554
    for thread in threads:
craigdo@4772
  1555
        thread.join()
craigdo@4772
  1556
    
craigdo@4772
  1557
    #
craigdo@4772
  1558
    # Back at the beginning of time, we started the body of an XML document
craigdo@4772
  1559
    # since the test suites and examples were going to just write their 
craigdo@4772
  1560
    # individual pieces.  So, we need to finish off and close out the XML 
craigdo@4772
  1561
    # document
craigdo@4772
  1562
    #
craigdo@4772
  1563
    f = open(xml_results_file, 'a')
craigdo@4772
  1564
    f.write('</TestResults>\n')
craigdo@4772
  1565
    f.close()
craigdo@4772
  1566
craigdo@4772
  1567
    #
craigdo@5279
  1568
    # Print a quick summary of events
craigdo@5279
  1569
    #
craigdo@5402
  1570
    print "%d of %d tests passed (%d passed, %d skipped, %d failed, %d crashed, %d valgrind errors)" % (passed_tests, 
craigdo@5402
  1571
        total_tests, passed_tests, skipped_tests, failed_tests, crashed_tests, valgrind_errors)
craigdo@5279
  1572
    #
craigdo@4772
  1573
    # The last things to do are to translate the XML results file to "human
craigdo@5239
  1574
    # readable form" if the user asked for it (or make an XML file somewhere)
craigdo@4772
  1575
    #
craigdo@4772
  1576
    if len(options.html):
craigdo@4772
  1577
        translate_to_html(xml_results_file, options.html)
craigdo@4772
  1578
craigdo@4772
  1579
    if len(options.text):
craigdo@4772
  1580
        translate_to_text(xml_results_file, options.text)
craigdo@4772
  1581
craigdo@5239
  1582
    if len(options.xml):
craigdo@5239
  1583
        shutil.copyfile(xml_results_file, options.xml)
craigdo@5239
  1584
craigdo@5412
  1585
    #
craigdo@5412
  1586
    # If we have been asked to retain all of the little temporary files, we
craigdo@5412
  1587
    # don't delete tm.  If we do delete the temporary files, delete only the
craigdo@5412
  1588
    # directory we just created.  We don't want to happily delete any retained
craigdo@5412
  1589
    # directories, which will probably surprise the user.
craigdo@5412
  1590
    #
craigdo@5412
  1591
    if not options.retain:
craigdo@5412
  1592
        shutil.rmtree(testpy_output_dir)
craigdo@5412
  1593
craigdo@5403
  1594
    if passed_tests + skipped_tests == total_tests:
craigdo@5351
  1595
        return 0 # success
craigdo@5350
  1596
    else:
craigdo@5351
  1597
        return 1 # catchall for general errors
craigdo@5350
  1598
craigdo@4772
  1599
def main(argv):
craigdo@4772
  1600
    parser = optparse.OptionParser()
craigdo@4772
  1601
    parser.add_option("-c", "--constrain", action="store", type="string", dest="constrain", default="",
craigdo@4772
  1602
                      metavar="KIND",
craigdo@4772
  1603
                      help="constrain the test-runner by kind of test")
craigdo@4772
  1604
craigdo@4772
  1605
    parser.add_option("-e", "--example", action="store", type="string", dest="example", default="",
craigdo@4772
  1606
                      metavar="EXAMPLE",
craigdo@4772
  1607
                      help="specify a single example to run")
craigdo@4772
  1608
craigdo@5370
  1609
    parser.add_option("-g", "--grind", action="store_true", dest="valgrind", default=False,
craigdo@5370
  1610
                      help="run the test suites and examples using valgrind")
craigdo@5370
  1611
craigdo@4772
  1612
    parser.add_option("-k", "--kinds", action="store_true", dest="kinds", default=False,
craigdo@4772
  1613
                      help="print the kinds of tests available")
craigdo@4772
  1614
craigdo@4772
  1615
    parser.add_option("-l", "--list", action="store_true", dest="list", default=False,
craigdo@4772
  1616
                      help="print the list of known tests")
craigdo@4772
  1617
craigdo@5324
  1618
    parser.add_option("-m", "--multiple", action="store_true", dest="multiple", default=False,
craigdo@5324
  1619
                      help="report multiple failures from test suites and test cases")
craigdo@5324
  1620
craigdo@4772
  1621
    parser.add_option("-n", "--nowaf", action="store_true", dest="nowaf", default=False,
craigdo@4772
  1622
                      help="do not run waf before starting testing")
craigdo@4772
  1623
craigdo@6200
  1624
    parser.add_option("-p", "--pyexample", action="store", type="string", dest="pyexample", default="",
craigdo@6200
  1625
                      metavar="PYEXAMPLE",
craigdo@6200
  1626
                      help="specify a single python example to run")
craigdo@6200
  1627
craigdo@6200
  1628
    parser.add_option("-r", "--retain", action="store_true", dest="retain", default=False,
craigdo@6200
  1629
                      help="retain all temporary files (which are normally deleted)")
craigdo@6200
  1630
craigdo@4772
  1631
    parser.add_option("-s", "--suite", action="store", type="string", dest="suite", default="",
craigdo@4772
  1632
                      metavar="TEST-SUITE",
craigdo@4772
  1633
                      help="specify a single test suite to run")
craigdo@4772
  1634
craigdo@6200
  1635
    parser.add_option("-t", "--text", action="store", type="string", dest="text", default="",
craigdo@6200
  1636
                      metavar="TEXT-FILE",
craigdo@6200
  1637
                      help="write detailed test results into TEXT-FILE.txt")
craigdo@6200
  1638
craigdo@4772
  1639
    parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
craigdo@4772
  1640
                      help="print progress and informational messages")
craigdo@4772
  1641
craigdo@4772
  1642
    parser.add_option("-w", "--web", "--html", action="store", type="string", dest="html", default="",
craigdo@4772
  1643
                      metavar="HTML-FILE",
craigdo@4772
  1644
                      help="write detailed test results into HTML-FILE.html")
craigdo@4772
  1645
craigdo@5239
  1646
    parser.add_option("-x", "--xml", action="store", type="string", dest="xml", default="",
craigdo@5239
  1647
                      metavar="XML-FILE",
craigdo@5239
  1648
                      help="write detailed test results into XML-FILE.xml")
craigdo@5239
  1649
craigdo@4772
  1650
    global options
craigdo@4772
  1651
    options = parser.parse_args()[0]
craigdo@4772
  1652
    signal.signal(signal.SIGINT, sigint_hook)
craigdo@5350
  1653
    
craigdo@5350
  1654
    return run_tests()
craigdo@4772
  1655
craigdo@4772
  1656
if __name__ == '__main__':
craigdo@4772
  1657
    sys.exit(main(sys.argv))