regression.py
changeset 6625 edc862ef7d63
parent 6624 a071889af159
child 6626 c00446e0d643
equal deleted inserted replaced
6624:a071889af159 6625:edc862ef7d63
     1 # python lib modules
       
     2 import os
       
     3 import sys
       
     4 import shutil
       
     5 import pproc as subprocess
       
     6 import errno
       
     7 
       
     8 # WAF modules
       
     9 import Options
       
    10 import Utils
       
    11 import Task
       
    12 
       
    13 # local modules
       
    14 import wutils
       
    15 
       
    16 
       
    17 def dev_null():
       
    18     if sys.platform == 'win32':
       
    19         return open("NUL:", "w")
       
    20     else:
       
    21         return open("/dev/null", "w")
       
    22 
       
    23 
       
    24 def _find_tests(testdir):
       
    25     """Return a list of test modules in the test directory
       
    26 
       
    27     Arguments:
       
    28     testdir -- the directory to look in for tests
       
    29     """
       
    30     names = os.listdir(testdir)
       
    31     tests = []
       
    32     for name in names:
       
    33         if name[:5] == "test-" and name[-3:] == ".py":
       
    34             testname = name[:-3]
       
    35             tests.append(testname)
       
    36     tests.sort()
       
    37     return tests
       
    38 
       
    39 def diff(dir1, dir2, verbose):
       
    40     import filecmp
       
    41     comp = filecmp.dircmp(dir1, dir2)
       
    42     differ = (comp.left_only or comp.right_only or comp.diff_files)
       
    43 
       
    44     if differ:
       
    45         # ok, stupid binary comparison reports differences, but maybe
       
    46         # only text files differ, in which case we should compare
       
    47         # again while ignoring newline differences between
       
    48         # windows/mac/unix.
       
    49         if not comp.left_only and not comp.right_only:
       
    50             for diff_fname in comp.diff_files:
       
    51                 if not (diff_fname.endswith(".tr") or diff_fname.endswith(".mob")):
       
    52                     # doesn't look like a text file; it has to differ
       
    53                     break
       
    54                 diff_file1 = open(os.path.join(dir1, diff_fname), "rtU").readlines()
       
    55                 diff_file2 = open(os.path.join(dir2, diff_fname), "rtU").readlines()
       
    56                 if diff_file1 != diff_file2:
       
    57                     break
       
    58                 #else:
       
    59                 #    print ">>>>>>>> %s file does not really differ!" % (diff_fname)
       
    60             else:
       
    61                 differ = False
       
    62 
       
    63     if differ:
       
    64         if verbose:
       
    65             comp.report()
       
    66             import difflib
       
    67             for diff_fname in comp.diff_files:
       
    68                 if not (diff_fname.endswith(".tr") or diff_fname.endswith(".mob")):
       
    69                     print "The different file %r does not sound like a text file, not compared." % (diff_fname,)
       
    70                 diff_file1 = open(os.path.join(dir1, diff_fname), "rt").readlines()
       
    71                 diff_file2 = open(os.path.join(dir2, diff_fname), "rt").readlines()
       
    72                 diff = difflib.unified_diff(diff_file1, diff_file2)
       
    73                 count = 0
       
    74                 print "Differences in file %r" % (diff_fname,)
       
    75                 for line in diff:
       
    76                     print line
       
    77                     count += 1
       
    78                     if count > 100:
       
    79                         break
       
    80         return 1
       
    81     else:
       
    82         return 0
       
    83 
       
    84 class regression_test_task(Task.TaskBase):
       
    85     after = 'cc cxx cc_link cxx_link'
       
    86     color = 'BLUE'
       
    87 
       
    88     def __init__(self, bld, env, test_name, test_scripts_dir, build_traces_dir, reference_traces):
       
    89         self.bld = bld
       
    90         self.generator = self
       
    91         self.env = env
       
    92         super(regression_test_task, self).__init__(generator=self, env=env)
       
    93         self.test_name = test_name
       
    94 
       
    95         assert self.test_name.startswith('test-')
       
    96         short_name = self.test_name[len('test-'):]
       
    97 
       
    98         self.test_scripts_dir = test_scripts_dir
       
    99         self.build_traces_dir = build_traces_dir
       
   100         self.reference_traces_dir = reference_traces
       
   101 
       
   102         sys.path.insert(0, self.test_scripts_dir)
       
   103         try:
       
   104             mod = __import__(self.test_name, globals(), locals(), [])
       
   105         finally:
       
   106             sys.path.remove(self.test_scripts_dir)
       
   107         self.mod = mod
       
   108         if hasattr(mod, 'may_run'):
       
   109             reason_cannot_run = mod.may_run(self.env, Options.options)
       
   110         else:
       
   111             reason_cannot_run = None
       
   112         if not reason_cannot_run:
       
   113             pyscript = getattr(mod, "pyscript", None)
       
   114             if pyscript:
       
   115                 Options.options.compile_targets += ',ns3module,pybindgen-command'
       
   116             else:
       
   117                 program = getattr(mod, "program", short_name)
       
   118                 Options.options.compile_targets += ',' + program
       
   119 
       
   120     def __str__(self):
       
   121         return 'regression-test (%s)\n' % self.test_name
       
   122 
       
   123     def runnable_status(self):
       
   124         return Task.RUN_ME
       
   125 
       
   126     def run(self):
       
   127         """Run a single test"""
       
   128         assert self.test_name.startswith('test-')
       
   129         short_name = self.test_name[len('test-'):]
       
   130         mod = self.mod
       
   131         trace_dir_name = getattr(mod, "trace_dir_name", None)
       
   132         if trace_dir_name is None:
       
   133             trace_dir_name = "%s.ref" % short_name
       
   134         trace_output_path = os.path.join(self.build_traces_dir, trace_dir_name)
       
   135         reference_traces_path = os.path.join(self.reference_traces_dir, trace_dir_name)
       
   136 
       
   137         if hasattr(mod, 'get_arguments'):
       
   138             arguments = mod.get_arguments(self.env, '..')
       
   139         else:
       
   140             arguments = getattr(mod, "arguments", [])
       
   141 
       
   142         pyscript = getattr(mod, "pyscript", None)
       
   143         if pyscript:
       
   144             is_pyscript = True
       
   145             program = pyscript
       
   146         else:
       
   147             is_pyscript = False
       
   148             program = getattr(mod, "program", short_name)
       
   149 
       
   150         if hasattr(mod, 'may_run'):
       
   151             reason_cannot_run = mod.may_run(self.env, Options.options)
       
   152         else:
       
   153             reason_cannot_run = None
       
   154         if reason_cannot_run:
       
   155             print "SKIP %s (%s)" % (self.test_name, reason_cannot_run)
       
   156             self.result = None
       
   157             return 0
       
   158 
       
   159         if Options.options.regression_generate:
       
   160             # clean the target dir
       
   161             try:
       
   162                 shutil.rmtree(reference_traces_path)
       
   163             except OSError, ex:
       
   164                 if ex.errno not in [errno.ENOENT]:
       
   165                     raise
       
   166             os.makedirs(reference_traces_path)
       
   167             result = self.run_reference_generate(reference_traces_path, program, arguments, is_pyscript)
       
   168             if result == 0:
       
   169                 print "GENERATE " + self.test_name
       
   170             else:
       
   171                 print "GENERATE FAIL " + self.test_name
       
   172         else:
       
   173             # clean the target dir
       
   174             try:
       
   175                 shutil.rmtree(trace_output_path)
       
   176             except OSError, ex:
       
   177                 if ex.errno not in [errno.ENOENT]:
       
   178                     raise
       
   179             os.makedirs(trace_output_path)
       
   180             # run it
       
   181             #print "self.run_reference_test:(%r, %r, %r, %r, %r)" \
       
   182             #    % (reference_traces_path, trace_output_path, program, arguments, is_pyscript)
       
   183             result = self.run_reference_test(reference_traces_path, trace_output_path, program, arguments, is_pyscript)
       
   184             if result == 0:
       
   185                 print "PASS " + self.test_name
       
   186             else:
       
   187                 print "FAIL " + self.test_name
       
   188         self.result = result
       
   189         return 0
       
   190 
       
   191     def run_reference_test(self, reference_traces_path, trace_output_path, program, arguments, is_pyscript):
       
   192         if not os.path.exists(reference_traces_path):
       
   193             print "Cannot locate reference traces in " + reference_traces_path
       
   194             return 1
       
   195 
       
   196         if is_pyscript:
       
   197             script = os.path.abspath(os.path.join('..', *os.path.split(program)))
       
   198             argv = [self.env['PYTHON'], script] + arguments
       
   199             try:
       
   200                 wutils.run_argv(argv, self.env, cwd=trace_output_path, force_no_valgrind=True)
       
   201             except Utils.WafError, ex:
       
   202                 print >> sys.stderr, ex
       
   203                 return 1
       
   204         else:
       
   205             try:
       
   206                 wutils.run_program(program, self.env,
       
   207                                    command_template=wutils.get_command_template(self.env, arguments),
       
   208                                    cwd=trace_output_path)
       
   209             except Utils.WafError, ex:
       
   210                 print >> sys.stderr, ex
       
   211                 return 1
       
   212 
       
   213         rc = diff(trace_output_path, reference_traces_path, Options.options.verbose)
       
   214         if rc:
       
   215             print "----------"
       
   216             print "Traces differ in test: ", self.test_name
       
   217             print "Reference traces in directory: " + reference_traces_path
       
   218             print "Traces in directory: " + trace_output_path
       
   219             print "Run the following command for details:"
       
   220             print "\tdiff -u %s %s" % (reference_traces_path, trace_output_path)
       
   221             if not Options.options.verbose:
       
   222                 print "Or re-run regression testing with option -v"
       
   223             print "----------"
       
   224         return rc
       
   225 
       
   226 
       
   227     def run_reference_generate(self, trace_output_path, program, arguments, is_pyscript):
       
   228         if is_pyscript:
       
   229             script = os.path.abspath(os.path.join('..', *os.path.split(program)))
       
   230             argv = [self.env['PYTHON'], script] + arguments
       
   231             try:
       
   232                 retval = wutils.run_argv(argv, self.env, cwd=trace_output_path, force_no_valgrind=True)
       
   233             except Utils.WafError, ex:
       
   234                 print >> sys.stderr, ex
       
   235                 return 1
       
   236         else:
       
   237             try:
       
   238                 retval = wutils.run_program(program, self.env,
       
   239                                             command_template=wutils.get_command_template(self.env, arguments),
       
   240                                             cwd=trace_output_path)
       
   241             except Utils.WafError, ex:
       
   242                 print >> sys.stderr, ex
       
   243                 return 1
       
   244         return retval
       
   245 
       
   246 
       
   247 class regression_test_collector_task(Task.TaskBase):
       
   248     after = 'regression_test_task'
       
   249     color = 'BLUE'
       
   250 
       
   251     def __init__(self, bld, test_tasks):
       
   252         self.bld = bld
       
   253         super(regression_test_collector_task, self).__init__(generator=self)
       
   254         self.test_tasks = test_tasks
       
   255 
       
   256     def __str__(self):
       
   257         return 'regression-test-collector\n'
       
   258 
       
   259     def runnable_status(self):
       
   260         return Task.RUN_ME
       
   261 
       
   262     def run(self):
       
   263         failed_tests = [test for test in self.test_tasks if test.result is not None and test.result != 0]
       
   264         skipped_tests = [test for test in self.test_tasks if test.result is None]
       
   265         print "Regression testing summary:"
       
   266         if skipped_tests:
       
   267             print "SKIP: %i of %i tests have been skipped (%s)" % (
       
   268                 len(skipped_tests), len(self.test_tasks),
       
   269                 ', '.join([test.test_name for test in skipped_tests]))
       
   270         if failed_tests:
       
   271             print "FAIL: %i of %i tests have failed (%s)" % (
       
   272                 len(failed_tests), len(self.test_tasks),
       
   273                 ', '.join([test.test_name for test in failed_tests]))
       
   274             return 1
       
   275         else:
       
   276             print "PASS: %i of %i tests passed" % (len(self.test_tasks) - len(skipped_tests),
       
   277                                                    len(self.test_tasks))
       
   278             return 0
       
   279 
       
   280 def run_regression(bld, reference_traces):
       
   281     """Execute regression tests.  Called with cwd set to the 'regression' subdir of ns-3.
       
   282 
       
   283     @param reference_traces: reference traces directory.
       
   284 
       
   285     """
       
   286 
       
   287     testdir = os.path.join("regression", "tests")
       
   288     if not os.path.exists(testdir):
       
   289         print "Tests directory does not exist"
       
   290         sys.exit(3)
       
   291 
       
   292     if Options.options.regression_tests:
       
   293         tests = Options.options.regression_tests.split(',')
       
   294     else:
       
   295         tests = _find_tests(testdir)
       
   296 
       
   297     if not os.path.exists(reference_traces):
       
   298         print "Reference traces directory (%s) does not exist" % reference_traces
       
   299         return 3
       
   300     
       
   301     test_scripts_dir = bld.path.find_dir('regression/tests').abspath()
       
   302     build_traces_dir = bld.path.find_or_declare('regression/traces').abspath(bld.env)
       
   303     tasks = []
       
   304     for test in tests:
       
   305         task = regression_test_task(bld, bld.env, test, test_scripts_dir, build_traces_dir, reference_traces)
       
   306         #bld.task_manager.add_task(task)
       
   307         tasks.append(task)
       
   308     regression_test_collector_task(bld, tasks)