[Piglit] [PATCH] framework: Add new option to rerun tests that failed in a previous run

Dylan Baker baker.dylan.c at gmail.com
Mon Jul 28 15:55:23 PDT 2014


This patch adds a new command line option --filter-by-result, which
takes a test result as an argument. This result is used as a filter, any
tests with a status of warn, fail, crash, dmesg-warn, or dmesg-fail will
be rerun, while any tests with a status of skip or pass will not be
rerun.

This does not add skipped results to the new results file, since
presumably the driver has been changed since the last run.

Signed-off-by: Dylan Baker <baker.dylan.c at gmail.com>
---
 framework/profile.py             | 26 ++++++++++++++++++++++++++
 framework/programs/run.py        |  7 +++++++
 framework/tests/profile_tests.py | 29 +++++++++++++++++++++++++++++
 3 files changed, 62 insertions(+)

diff --git a/framework/profile.py b/framework/profile.py
index 5428890..fc9b0c9 100644
--- a/framework/profile.py
+++ b/framework/profile.py
@@ -259,6 +259,32 @@ class TestProfile(object):
             self.test_list.update(profile.test_list)
 
 
+class FilterRerun(object):
+    """ A special filter to profile that removes tests that are not fail
+
+    This filter loads a result file, removing any test that had a status other
+    than 'fail', 'warn', 'dmesg-fail', or 'crash'
+
+    This marks any new test as 'skip', and doesn't run it. The purpose of this
+    is to rerun tests that failed, introducing new tests adds a new variable
+    and doesn't make sense.
+
+    This uses a __call__ method so that it works like a function, which is what
+    the filter mechanism expects.
+
+    """
+    def __init__(self, json_file):
+        with open(json_file, 'r') as f:
+            self.result = framework.results.TestrunResult(resultfile=f)
+
+    def __call__(self, name, test):
+        # If the test failed rerun it
+        if self.result.tests.get(name, {'result': 'skip'})['result'] in [
+                'fail', 'warn', 'crash', 'dmesg-warn', 'dmesg-fail']:
+            return True
+        return False
+
+
 def load_test_profile(filename):
     """ Load a python module and return it's profile attribute
 
diff --git a/framework/programs/run.py b/framework/programs/run.py
index eb67d7f..8cdc1f6 100644
--- a/framework/programs/run.py
+++ b/framework/programs/run.py
@@ -89,6 +89,11 @@ def run(input_):
                         action="store_true",
                         help="Produce a line of output for each test before "
                              "and after it runs")
+    parser.add_argument("--filter-by-result",
+                        type=framework.profile.FilterRerun,
+                        metavar="<results.json>",
+                        help="Rerun all of the tests in a result that did not"
+                             " have a status of 'pass' or 'skip'")
     parser.add_argument("test_profile",
                         metavar="<Path to one or more test profile(s)>",
                         nargs='+',
@@ -171,6 +176,8 @@ def run(input_):
 
     profile = framework.profile.merge_test_profiles(args.test_profile)
     profile.results_dir = args.results_path
+    if args.filter_by_result:
+        profile.filter_tests(args.filter_by_result)
 
     time_start = time.time()
     # Set the dmesg type
diff --git a/framework/tests/profile_tests.py b/framework/tests/profile_tests.py
index c861cfc..88b5638 100644
--- a/framework/tests/profile_tests.py
+++ b/framework/tests/profile_tests.py
@@ -21,12 +21,14 @@
 """ Provides test for the framework.profile modules """
 
 import copy
+import json
 import platform
 import nose.tools as nt
 from nose.plugins.skip import SkipTest
 import framework.core as core
 import framework.dmesg as dmesg
 import framework.profile as profile
+import framework.tests.utils as utils
 
 
 def test_initialize_testprofile():
@@ -263,3 +265,30 @@ def test_matches_exclude_mar(data):
     del baseline['group3/test5']
 
     nt.assert_dict_equal(profile_.test_list, baseline)
+
+
+ at utils.nose_generator
+def test_filterrerun():
+    """ generate tests for profile.FilterRerun """
+    data = copy.deepcopy(utils.JSON_DATA)
+    data['tests'] = {
+        'pass': {'result': 'pass'},
+        'fail': {'result': 'fail'},
+        'warn': {'result': 'warn'},
+        'skip': {'result': 'skip'},
+        'crash': {'result': 'crash'},
+        'dmesg-warn': {'result': 'dmesg-warn'},
+        'dmesg-fail': {'result': 'dmesg-fail'},
+    }
+
+    no_rerun = lambda x: nt.eq_(filter(x, None), False)
+    rerun = lambda x: nt.eq_(filter(x, None), True)
+
+    with utils.with_tempfile(json.dumps(data)) as f:
+        filter = profile.FilterRerun(f)
+        for name in ['pass', 'skip', 'new test']:
+            no_rerun.description = "FilterRun: status {} should skip".format(name)
+            yield no_rerun, name
+        for name in ['warn', 'fail', 'crash', 'dmesg-warn', 'dmesg-fail']:
+            rerun.description = "FilterRun: status {} should not skip".format(name)
+            yield rerun, name
-- 
2.0.2



More information about the Piglit mailing list