regression.py
author Gustavo J. A. M. Carneiro <gjc@inescporto.pt>
Mon, 13 Apr 2009 23:10:37 +0100
changeset 4326 179f86838e62
parent 4308 b8528d30dfb3
child 4332 a1c7bc503a0c
permissions -rw-r--r--
Upgrade to WAF 1.5.4
     1 # python lib modules
     2 import os
     3 import sys
     4 import shutil
     5 import pproc as subprocess
     6 import errno
     7 
     8 # WAF modules
     9 import Options
    10 import Utils
    11 import Task
    12 
    13 # local modules
    14 import wutils
    15 
    16 
    17 def dev_null():
    18     if sys.platform == 'win32':
    19         return open("NUL:", "w")
    20     else:
    21         return open("/dev/null", "w")
    22 
    23 
    24 def _find_tests(testdir):
    25     """Return a list of test modules in the test directory
    26 
    27     Arguments:
    28     testdir -- the directory to look in for tests
    29     """
    30     names = os.listdir(testdir)
    31     tests = []
    32     for name in names:
    33         if name[:5] == "test-" and name[-3:] == ".py":
    34             testname = name[:-3]
    35             tests.append(testname)
    36     tests.sort()
    37     return tests
    38 
    39 def diff(dir1, dir2, verbose):
    40     import filecmp
    41     comp = filecmp.dircmp(dir1, dir2)
    42     differ = (comp.left_only or comp.right_only or comp.diff_files)
    43     if differ:
    44         if verbose:
    45             comp.report()
    46             import difflib
    47             for diff_fname in comp.diff_files:
    48                 if not (diff_fname.endswith(".tr") or diff_fname.endswith(".mob")):
    49                     print "The different file %r does not sound like a text file, not compared." % (diff_fname,)
    50                 diff_file1 = open(os.path.join(dir1, diff_fname), "rt").readlines()
    51                 diff_file2 = open(os.path.join(dir2, diff_fname), "rt").readlines()
    52                 diff = difflib.unified_diff(diff_file1, diff_file2)
    53                 count = 0
    54                 print "Differences in file %r" % (diff_fname,)
    55                 for line in diff:
    56                     print line
    57                     count += 1
    58                     if count > 100:
    59                         break
    60         return 1
    61     else:
    62         return 0
    63 
    64 class regression_test_task(Task.TaskBase):
    65     after = 'cc cxx cc_link cxx_link'
    66     color = 'BLUE'
    67 
    68     def __init__(self, bld, env, test_name, test_scripts_dir, build_traces_dir, reference_traces):
    69         self.bld = bld
    70         self.generator = self
    71         self.env = env
    72         super(regression_test_task, self).__init__(generator=self, env=env)
    73         self.test_name = test_name
    74         self.test_scripts_dir = test_scripts_dir
    75         self.build_traces_dir = build_traces_dir
    76         self.reference_traces_dir = reference_traces
    77 
    78     def __str__(self):
    79         return 'regression-test (%s)\n' % self.test_name
    80 
    81     def runnable_status(self):
    82         return Task.RUN_ME
    83 
    84     def run(self):
    85         """Run a single test"""
    86         sys.path.insert(0, self.test_scripts_dir)
    87         try:
    88             mod = __import__(self.test_name, globals(), locals(), [])
    89         finally:
    90             sys.path.remove(self.test_scripts_dir)
    91 
    92         assert self.test_name.startswith('test-')
    93         short_name = self.test_name[len('test-'):]
    94 
    95         trace_dir_name = getattr(mod, "trace_dir_name", None)
    96         if trace_dir_name is None:
    97             trace_dir_name = "%s.ref" % short_name
    98         trace_output_path = os.path.join(self.build_traces_dir, trace_dir_name)
    99         reference_traces_path = os.path.join(self.reference_traces_dir, trace_dir_name)
   100 
   101         if hasattr(mod, 'get_arguments'):
   102             arguments = mod.get_arguments(self.env, '..')
   103         else:
   104             arguments = getattr(mod, "arguments", [])
   105 
   106         pyscript = getattr(mod, "pyscript", None)
   107         if pyscript:
   108             is_pyscript = True
   109             program = pyscript
   110         else:
   111             is_pyscript = False
   112             program = getattr(mod, "program", short_name)
   113 
   114         if hasattr(mod, 'may_run'):
   115             reason_cannot_run = mod.may_run(self.env, Options.options)
   116         else:
   117             reason_cannot_run = None
   118         if reason_cannot_run:
   119             print "SKIP %s (%s)" % (self.test_name, reason_cannot_run)
   120             self.result = None
   121             return 0
   122 
   123         if Options.options.regression_generate:
   124             # clean the target dir
   125             try:
   126                 shutil.rmtree(reference_traces_path)
   127             except OSError, ex:
   128                 if ex.errno not in [errno.ENOENT]:
   129                     raise
   130             os.makedirs(reference_traces_path)
   131             result = self.run_reference_generate(reference_traces_path, program, arguments, is_pyscript)
   132             if result == 0:
   133                 print "GENERATE " + self.test_name
   134             else:
   135                 print "GENERATE FAIL " + self.test_name
   136         else:
   137             # clean the target dir
   138             try:
   139                 shutil.rmtree(trace_output_path)
   140             except OSError, ex:
   141                 if ex.errno not in [errno.ENOENT]:
   142                     raise
   143             os.makedirs(trace_output_path)
   144             # run it
   145             result = self.run_reference_test(reference_traces_path, trace_output_path, program, arguments, is_pyscript)
   146             if result == 0:
   147                 print "PASS " + self.test_name
   148             else:
   149                 print "FAIL " + self.test_name
   150         self.result = result
   151         return 0
   152 
   153     def run_reference_test(self, reference_traces_path, trace_output_path, program, arguments, is_pyscript):
   154         if not os.path.exists(reference_traces_path):
   155             print "Cannot locate reference traces in " + reference_traces_path
   156             return 1
   157 
   158         if is_pyscript:
   159             script = os.path.abspath(os.path.join('..', *os.path.split(program)))
   160             argv = [self.env['PYTHON'], script] + arguments
   161             try:
   162                 wutils.run_argv(argv, cwd=trace_output_path)
   163             except Utils.WafError, ex:
   164                 print >> sys.stderr, ex
   165                 return 1
   166         else:
   167             try:
   168                 wutils.run_program(program,
   169                                    command_template=wutils.get_command_template(self.env, arguments),
   170                                    cwd=trace_output_path)
   171             except Utils.WafError, ex:
   172                 print >> sys.stderr, ex
   173                 return 1
   174 
   175         rc = diff(trace_output_path, reference_traces_path, Options.options.verbose)
   176         if rc:
   177             print "----------"
   178             print "Traces differ in test: ", self.test_name
   179             print "Reference traces in directory: " + reference_traces_path
   180             print "Traces in directory: " + trace_output_path
   181             print "Run the following command for details:"
   182             print "\tdiff -u %s %s" % (reference_traces_path, trace_output_path)
   183             if not Options.options.verbose:
   184                 print "Or re-run regression testing with option -v"
   185             print "----------"
   186         return rc
   187 
   188 
   189     def run_reference_generate(self, trace_output_path, program, arguments, is_pyscript):
   190         if is_pyscript:
   191             script = os.path.abspath(os.path.join('..', *os.path.split(program)))
   192             argv = [self.env['PYTHON'], script] + arguments
   193             try:
   194                 retval = wutils.run_argv(argv, cwd=trace_output_path)
   195             except Utils.WafError, ex:
   196                 print >> sys.stderr, ex
   197                 return 1
   198         else:
   199             try:
   200                 retval = wutils.run_program(program,
   201                                             command_template=wutils.get_command_template(self.env, arguments),
   202                                             cwd=trace_output_path)
   203             except Utils.WafError, ex:
   204                 print >> sys.stderr, ex
   205                 return 1
   206         return retval
   207 
   208 
   209 class regression_test_collector_task(Task.TaskBase):
   210     after = 'regression_test_task'
   211     color = 'BLUE'
   212 
   213     def __init__(self, bld, test_tasks):
   214         self.bld = bld
   215         super(regression_test_collector_task, self).__init__(generator=self)
   216         self.test_tasks = test_tasks
   217 
   218     def __str__(self):
   219         return 'regression-test-collector\n'
   220 
   221     def runnable_status(self):
   222         return Task.RUN_ME
   223 
   224     def run(self):
   225         failed_tests = [test for test in self.test_tasks if test.result is not None and test.result != 0]
   226         skipped_tests = [test for test in self.test_tasks if test.result is None]
   227         print "Regression testing summary:"
   228         if skipped_tests:
   229             print "SKIP: %i of %i tests have been skipped (%s)" % (
   230                 len(skipped_tests), len(self.test_tasks),
   231                 ', '.join([test.test_name for test in skipped_tests]))
   232         if failed_tests:
   233             print "FAIL: %i of %i tests have failed (%s)" % (
   234                 len(failed_tests), len(self.test_tasks),
   235                 ', '.join([test.test_name for test in failed_tests]))
   236             return 1
   237         else:
   238             print "PASS: %i of %i tests passed" % (len(self.test_tasks) - len(skipped_tests),
   239                                                    len(self.test_tasks))
   240             return 0
   241 
   242 def run_regression(bld, reference_traces):
   243     """Execute regression tests.  Called with cwd set to the 'regression' subdir of ns-3.
   244 
   245     @param reference_traces: reference traces directory.
   246 
   247     """
   248 
   249     testdir = os.path.join("regression", "tests")
   250     if not os.path.exists(testdir):
   251         print "Tests directory does not exist"
   252         sys.exit(3)
   253 
   254     if Options.options.regression_tests:
   255         tests = Options.options.regression_tests.split(',')
   256     else:
   257         tests = _find_tests(testdir)
   258 
   259     if not os.path.exists(reference_traces):
   260         print "Reference traces directory (%s) does not exist" % reference_traces
   261         return 3
   262     
   263     test_scripts_dir = bld.path.find_dir('regression/tests').abspath()
   264     build_traces_dir = bld.path.find_or_declare('regression/traces').abspath(bld.env)
   265     tasks = []
   266     for test in tests:
   267         task = regression_test_task(bld, bld.env, test, test_scripts_dir, build_traces_dir, reference_traces)
   268         #bld.task_manager.add_task(task)
   269         tasks.append(task)
   270     regression_test_collector_task(bld, tasks)