[Piglit] [PATCH 10/13] framework: stop passing duplicate arguments

Dylan Baker baker.dylan.c at gmail.com
Tue Apr 15 16:12:43 PDT 2014


A number of methods in Test require to have stdout, the returncode and
the TestResult() instance passed into them. This is actually redundant
since the TestResult() instance already has all of that information in
it. Since a lot of methods need this information make the TestResult()
an instance attribute.

Signed-off-by: Dylan Baker <baker.dylan.c at gmail.com>
---
 framework/exectest.py              | 129 ++++++++++++++++++-------------------
 framework/gleantest.py             |  14 ++--
 framework/tests/gleantest_tests.py |   5 +-
 tests/es3conform.py                |   9 ++-
 tests/igt.py                       |  25 ++++---
 tests/oglconform.py                |  13 ++--
 6 files changed, 91 insertions(+), 104 deletions(-)

diff --git a/framework/exectest.py b/framework/exectest.py
index 3c62c0a..fe82181 100644
--- a/framework/exectest.py
+++ b/framework/exectest.py
@@ -60,6 +60,7 @@ class Test(object):
         self.run_concurrent = run_concurrent
         self.command = command
         self.env = {}
+        self.result = TestResult()
 
         # This is a hook for doing some testing on execute right before
         # self.run is called.
@@ -81,34 +82,35 @@ class Test(object):
                 time_start = time.time()
                 dmesg.update_dmesg()
                 self._test_hook_execute_run()
-                result = self.run()
-                result = dmesg.update_result(result)
+                self.run()
+                self.result = dmesg.update_result(self.result)
                 time_end = time.time()
-                if 'time' not in result:
-                    result['time'] = time_end - time_start
-                if 'result' not in result:
-                    result['result'] = 'fail'
-                if not isinstance(result, TestResult):
-                    result = TestResult(result)
-                    result['result'] = 'warn'
-                    result['note'] = ('Result not returned as an instance '
-                                      'of TestResult')
+                if 'time' not in self.result:
+                    self.result['time'] = time_end - time_start
+                if 'result' not in self.result:
+                    self.result['result'] = 'fail'
+                if not isinstance(self.result, TestResult):
+                    self.result = TestResult(self.result)
+                    self.result['result'] = 'warn'
+                    self.result['note'] = ('Result not returned as an '
+                                            'instance of TestResult')
             except:
-                result = TestResult()
                 exception = sys.exc_info()
-                result['result'] = 'fail'
-                result['exception'] = "{}{}".format(*exception[:2])
-                result['traceback'] = "".join(traceback.format_tb(exception[2]))
-
-            log.log(path, result['result'])
-            log.post_log(log_current, result['result'])
-
-            if 'subtest' in result and len(result['subtest']) > 1:
-                for test in result['subtest']:
-                    result['result'] = result['subtest'][test]
-                    json_writer.write_dict_item(os.path.join(path, test), result)
+                self.result['result'] = 'fail'
+                self.result['exception'] = "{}{}".format(*exception[:2])
+                self.result['traceback'] = "".join(
+                    traceback.format_tb(exception[2]))
+
+            log.log(path, self.result['result'])
+            log.post_log(log_current, self.result['result'])
+
+            if 'subtest' in self.result and len(self.result['subtest']) > 1:
+                for test in self.result['subtest']:
+                    self.result['result'] = self.result['subtest'][test]
+                    json_writer.write_dict_item(os.path.join(path, test),
+                                                self.result)
             else:
-                json_writer.write_dict_item(path, result)
+                json_writer.write_dict_item(path, self.result)
         else:
             log.log(path, 'dry-run')
             log.post_log(log_current, 'dry-run')
@@ -127,7 +129,7 @@ class Test(object):
             return
         self._command = value
 
-    def interpret_result(self, out, returncode, results):
+    def interpret_result(self):
         raise NotImplementedError
         return out
 
@@ -143,29 +145,28 @@ class Test(object):
         """
         assert self.command
 
-        results = TestResult()
-        results['command'] = ' '.join(self.command)
-        results['environment'] = " ".join(
+        self.result['command'] = ' '.join(self.command)
+        self.result['environment'] = " ".join(
             '{0}="{1}"'.format(k, v) for k, v in  self.env.iteritems())
 
         if self.check_for_skip_scenario():
-            results['result'] = 'skip'
-            results['out'] = "PIGLIT: {'result': 'skip'}\n"
-            results['err'] = ""
-            results['returncode'] = None
-            return results
+            self.result['result'] = 'skip'
+            self.result['out'] = ""
+            self.result['err'] = ""
+            self.result['returncode'] = None
+            return
 
         for _ in xrange(5):
-            out, err, returncode = self.get_command_result()
+            self.get_command_result()
 
             # https://bugzilla.gnome.org/show_bug.cgi?id=680214 is
             # affecting many developers.  If we catch it
             # happening, try just re-running the test.
-            if out.find("Got spurious window resize") == -1:
+            if self.result['out'].find("Got spurious window resize") == -1:
                 break
 
-        results['result'] = 'fail'
-        out = self.interpret_result(out, returncode, results)
+        self.result['result'] = 'fail'
+        self.interpret_result()
 
         crash_codes = [
             # Unix: terminated by a signal
@@ -181,27 +182,22 @@ class Test(object):
             -1073741676
         ]
 
-        if returncode in crash_codes:
-            results['result'] = 'crash'
-        elif returncode != 0 and results['result'] == 'pass':
-            results['result'] = 'warn'
+        if self.result['returncode'] in crash_codes:
+            self.result['result'] = 'crash'
+        elif self.result['returncode'] != 0 and self.result['result'] == 'pass':
+            self.result['result'] = 'warn'
 
         if self.ENV.valgrind:
             # If the underlying test failed, simply report
             # 'skip' for this valgrind test.
-            if results['result'] != 'pass':
-                results['result'] = 'skip'
-            elif returncode == 0:
+            if self.result['result'] != 'pass':
+                self.result['result'] = 'skip'
+            elif self.result['returncode'] == 0:
                 # Test passes and is valgrind clean.
-                results['result'] = 'pass'
+                self.result['result'] = 'pass'
             else:
                 # Test passed but has valgrind errors.
-                results['result'] = 'fail'
-
-        results['returncode'] = returncode
-        results['info'] = unicode("Returncode: {0}\n\nErrors:\n{1}\n\n"
-                                  "Output:\n{2}").format(returncode, err, out)
-        return results
+                self.result['result'] = 'fail'
 
     def check_for_skip_scenario(self):
         """ Application specific check for skip
@@ -252,10 +248,9 @@ class Test(object):
         # replaces erroneous charcters with the Unicode
         # "replacement character" (a white question mark inside
         # a black diamond).
-        out = out.decode('utf-8', 'replace')
-        err = err.decode('utf-8', 'replace')
-
-        return out, err, returncode
+        self.result['out'] = out.decode('utf-8', 'replace')
+        self.result['err'] = err.decode('utf-8', 'replace')
+        self.result['returncode'] = returncode
 
 
 class PiglitTest(Test):
@@ -285,25 +280,23 @@ class PiglitTest(Test):
                 return True
         return False
 
-    def interpret_result(self, out, returncode, results):
-        outlines = out.split('\n')
+    def interpret_result(self):
+        outlines = self.result['out'].split('\n')
         outpiglit = (s[7:] for s in outlines if s.startswith('PIGLIT:'))
 
         try:
             for piglit in outpiglit:
                 if piglit.startswith('subtest'):
-                    if not 'subtest' in results:
-                        results['subtest'] = {}
-                    results['subtest'].update(eval(piglit[7:]))
+                    if not 'subtest' in self.result:
+                        self.result['subtest'] = {}
+                    self.result['subtest'].update(eval(piglit[7:]))
                 else:
-                    results.update(eval(piglit))
-            out = '\n'.join(
+                    self.result.update(eval(piglit))
+            self.result['out'] = '\n'.join(
                 s for s in outlines if not s.startswith('PIGLIT:'))
         except:
-            results['result'] = 'fail'
-            results['note'] = 'Failed to parse result string'
-
-        if 'result' not in results:
-            results['result'] = 'fail'
+            self.result['result'] = 'fail'
+            self.result['note'] = 'Failed to parse result string'
 
-        return out
+        if 'result' not in self.result:
+            self.result['result'] = 'fail'
diff --git a/framework/gleantest.py b/framework/gleantest.py
index 53343f3..4e09d41 100644
--- a/framework/gleantest.py
+++ b/framework/gleantest.py
@@ -40,11 +40,11 @@ class GleanTest(Test):
     def command(self):
         return super(GleanTest, self).command + self.globalParams
 
-    def interpret_result(self, out, returncode, results):
-        if "{'result': 'skip'}" in out:
-            results['result'] = 'skip'
-        elif out.find('FAIL') >= 0 or returncode != 0:
-            results['result'] = 'fail'
+    def interpret_result(self):
+        if "{'result': 'skip'}" in self.result['out']:
+            self.result['result'] = 'skip'
+        elif (self.result['out'].find('FAIL') >= 0 or
+                self.result['returncode'] != 0):
+            self.result['result'] = 'fail'
         else:
-            results['result'] = 'pass'
-        return out
+            self.result['result'] = 'pass'
diff --git a/framework/tests/gleantest_tests.py b/framework/tests/gleantest_tests.py
index 848b835..20aa647 100644
--- a/framework/tests/gleantest_tests.py
+++ b/framework/tests/gleantest_tests.py
@@ -67,6 +67,5 @@ def test_bad_returncode():
     os.environ = {}
 
     test = GleanTest('basic')
-    result = test.run()
-    print("result: {result}\nreturncode: {returncode}".format(**result))
-    assert result['result'] == 'fail', "Result should have been fail"
+    test.run()
+    assert test.result['result'] == 'fail', "Result should have been fail"
diff --git a/tests/es3conform.py b/tests/es3conform.py
index 0aefe58..b94cb06 100644
--- a/tests/es3conform.py
+++ b/tests/es3conform.py
@@ -55,13 +55,12 @@ class GTFTest(Test):
                                        '-minfmt', '-width=113', '-height=47',
                                        '-run=' + testpath])
 
-    def interpret_result(self, out, returncode, results):
-        mo = self.pass_re.search(out)
+    def interpret_result(self):
+        mo = self.pass_re.search(self.result['out'])
         if mo is not None and int(mo.group('passed')) > 0:
-            results['result'] = 'pass'
+            self.result['result'] = 'pass'
         else:
-            results['result'] = 'fail'
-        return out
+            self.result['result'] = 'fail'
 
 def populateTests(runfile):
     "Read a .run file, adding any .test files to the profile"
diff --git a/tests/igt.py b/tests/igt.py
index 1053791..9c700a9 100644
--- a/tests/igt.py
+++ b/tests/igt.py
@@ -76,27 +76,24 @@ class IGTTest(Test):
         super(IGTTest, self).__init__(
             [path.join(igtTestRoot, binary)] + arguments)
 
-    def interpret_result(self, out, returncode, results):
+    def interpret_result(self):
         if not igtEnvironmentOk:
-            return out
+            return
 
-        if returncode == 0:
-            results['result'] = 'pass'
-        elif returncode == 77:
-            results['result'] = 'skip'
+        if self.result['returncode'] == 0:
+            self.result['result'] = 'pass'
+        elif self.result['returncode'] == 77:
+            self.result['result'] = 'skip'
         else:
-            results['result'] = 'fail'
-        return out
+            self.result['result'] = 'fail'
 
     def run(self):
         if not igtEnvironmentOk:
-            results = TestResult()
-            results['result'] = 'fail'
-            results['info'] = unicode("Test Environment isn't OK")
+            self.result['result'] = 'fail'
+            self.result['info'] = unicode("Test Environment isn't OK")
+            return
 
-            return results
-
-        return super(IGTTest, self).run()
+        super(IGTTest, self).run()
 
 def listTests(listname):
     oldDir = os.getcwd()
diff --git a/tests/oglconform.py b/tests/oglconform.py
index d62ceff..1d40d2a 100644
--- a/tests/oglconform.py
+++ b/tests/oglconform.py
@@ -52,14 +52,13 @@ class OGLCTest(Test):
         super(OGLCTest, self).__init__([bin_oglconform, '-minFmt', '-v', '4',
                                         '-test', category, subtest])
 
-    def interpret_result(self, out, returncode, results):
-        if self.skip_re.search(out) is not None:
-            results['result'] = 'skip'
-        elif re.search('Total Passed : 1', out) is not None:
-            results['result'] = 'pass'
+    def interpret_result(self):
+        if self.skip_re.search(self.result['out']) is not None:
+            self.result['result'] = 'skip'
+        elif re.search('Total Passed : 1', self.result['out']) is not None:
+            self.result['result'] = 'pass'
         else:
-            results['result'] = 'fail'
-        return out
+            self.result['result'] = 'fail'
 
 # Create a new top-level 'oglconform' category
 
-- 
1.9.2



More information about the Piglit mailing list