80 |
80 |
81 # |
81 # |
82 # If the user has constrained us to run certain kinds of tests, we can tell waf |
82 # If the user has constrained us to run certain kinds of tests, we can tell waf |
83 # to only build |
83 # to only build |
84 # |
84 # |
85 core_kinds = ["bvt", "core", "system", "unit"] |
85 core_kinds = ["bvt", "core", "performance", "system", "unit"] |
86 |
86 |
87 # |
87 # |
88 # There are some special cases for test suites that kill valgrind. This is |
88 # There are some special cases for test suites that kill valgrind. This is |
89 # because NSC causes illegal instruction crashes when run under valgrind. |
89 # because NSC causes illegal instruction crashes when run under valgrind. |
90 # |
90 # |
1210 # results in two different places. |
1210 # results in two different places. |
1211 # |
1211 # |
1212 suite_list = suites.split('\n') |
1212 suite_list = suites.split('\n') |
1213 |
1213 |
1214 # |
1214 # |
|
1215 # Performance tests should only be run when they are requested, |
|
1216 # i.e. they are not run by default in test.py. |
|
1217 # |
|
1218 if options.constrain != 'performance': |
|
1219 |
|
1220 # Get a list of all of the performance tests. |
|
1221 path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --test-type=%s" % "performance") |
|
1222 (rc, performance_tests, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False) |
|
1223 performance_test_list = performance_tests.split('\n') |
|
1224 |
|
1225 # Remove any performance tests from the suites list. |
|
1226 for performance_test in performance_test_list: |
|
1227 if performance_test in suite_list: |
|
1228 suite_list.remove(performance_test) |
|
1229 |
1215 # We now have a possibly large number of test suites to run, so we want to |
1230 # We now have a possibly large number of test suites to run, so we want to |
1216 # run them in parallel. We're going to spin up a number of worker threads |
1231 # run them in parallel. We're going to spin up a number of worker threads |
1217 # that will run our test jobs for us. |
1232 # that will run our test jobs for us. |
1218 # |
1233 # |
1219 input_queue = Queue.Queue(0) |
1234 input_queue = Queue.Queue(0) |
1324 # conditions. |
1339 # conditions. |
1325 # |
1340 # |
1326 # This translates into allowing the following options with respect to the |
1341 # This translates into allowing the following options with respect to the |
1327 # suites |
1342 # suites |
1328 # |
1343 # |
1329 # ./test,py: run all of the examples |
1344 # ./test.py: run all of the examples |
1330 # ./test.py --constrain=unit run no examples |
1345 # ./test.py --constrain=unit run no examples |
1331 # ./test.py --constrain=example run all of the examples |
1346 # ./test.py --constrain=example run all of the examples |
1332 # ./test.py --suite=some-test-suite: run no examples |
1347 # ./test.py --suite=some-test-suite: run no examples |
1333 # ./test.py --example=some-example: run the single example |
1348 # ./test.py --example=some-example: run the single example |
1334 # ./test.py --suite=some-suite --example=some-example: run the single example |
1349 # ./test.py --suite=some-suite --example=some-example: run the single example |
1335 # |
1350 # |
1336 # XXX could use constrain to separate out examples used for performance |
|
1337 # testing |
|
1338 # |
1351 # |
1339 if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0: |
1352 if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0: |
1340 if len(options.constrain) == 0 or options.constrain == "example": |
1353 if len(options.constrain) == 0 or options.constrain == "example": |
1341 if ENABLE_EXAMPLES: |
1354 if ENABLE_EXAMPLES: |
1342 for test, do_run, do_valgrind_run in example_tests: |
1355 for test, do_run, do_valgrind_run in example_tests: |
1543 status = "VALGR" |
1556 status = "VALGR" |
1544 else: |
1557 else: |
1545 crashed_tests = crashed_tests + 1 |
1558 crashed_tests = crashed_tests + 1 |
1546 status = "CRASH" |
1559 status = "CRASH" |
1547 |
1560 |
1548 print "%s: %s %s" % (status, kind, job.display_name) |
1561 if options.duration or options.constrain == "performance": |
|
1562 print "%s (%.3f): %s %s" % (status, job.elapsed_time, kind, job.display_name) |
|
1563 else: |
|
1564 print "%s: %s %s" % (status, kind, job.display_name) |
1549 |
1565 |
1550 if job.is_example or job.is_pyexample: |
1566 if job.is_example or job.is_pyexample: |
1551 # |
1567 # |
1552 # Examples are the odd man out here. They are written without any |
1568 # Examples are the odd man out here. They are written without any |
1553 # knowledge that they are going to be run as a test, so we need to |
1569 # knowledge that they are going to be run as a test, so we need to |
1723 |
1739 |
1724 parser.add_option("-c", "--constrain", action="store", type="string", dest="constrain", default="", |
1740 parser.add_option("-c", "--constrain", action="store", type="string", dest="constrain", default="", |
1725 metavar="KIND", |
1741 metavar="KIND", |
1726 help="constrain the test-runner by kind of test") |
1742 help="constrain the test-runner by kind of test") |
1727 |
1743 |
|
1744 parser.add_option("-d", "--duration", action="store_true", dest="duration", default=False, |
|
1745 help="print the duration of each test suite and example") |
|
1746 |
1728 parser.add_option("-e", "--example", action="store", type="string", dest="example", default="", |
1747 parser.add_option("-e", "--example", action="store", type="string", dest="example", default="", |
1729 metavar="EXAMPLE", |
1748 metavar="EXAMPLE", |
1730 help="specify a single example to run (with relative path)") |
1749 help="specify a single example to run (with relative path)") |
1731 |
1750 |
1732 parser.add_option("-u", "--update-data", action="store_true", dest="update_data", default=False, |
1751 parser.add_option("-u", "--update-data", action="store_true", dest="update_data", default=False, |