[Piglit] [Patch v2 4/4] tests/xts.py: Add a tests file for the X Test suite.

Dylan Baker baker.dylan.c at gmail.com
Tue Apr 8 13:12:37 PDT 2014


This adds support for running XTS from piglit.

This patch was original written by Eric Anholt, but I've cleaned it up
and reworked parts of it for upstreaming, and as a result I've attached
my name as the author so ``git bisect'' and ``git blame'' will send
angry mail my way instead of Eric's.

v2: - Don't break dmesg reporting
    - Use str.format consistently
    - populate profile after check for xtest symlink

Signed-off-by: Dylan Baker <baker.dylan.c at gmail.com>
---
 framework/exectest.py      |   6 +-
 framework/summary.py       |   3 +-
 templates/test_result.mako |  27 +++++-
 tests/xts.py               | 206 +++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 236 insertions(+), 6 deletions(-)
 create mode 100644 tests/xts.py

diff --git a/framework/exectest.py b/framework/exectest.py
index c6c5067..f8c26c3 100644
--- a/framework/exectest.py
+++ b/framework/exectest.py
@@ -234,9 +234,9 @@ class Test(object):
             if env:
                 results['environment'] = env
 
-            results['info'] = unicode("Returncode: {0}\n\nErrors:\n{1}\n\n"
-                                      "Output:\n{2}").format(returncode,
-                                                             err, out)
+            results['info'] = unicode(
+                "Returncode: {0}\n\nErrors:\n{1}\n\nOutput:\n{2}\n\n{3}".format(
+                    returncode, err, out, results.get('info', '')))
             results['returncode'] = returncode
             results['command'] = ' '.join(self.command)
 
diff --git a/framework/summary.py b/framework/summary.py
index a4aa136..ff7528f 100644
--- a/framework/summary.py
+++ b/framework/summary.py
@@ -400,7 +400,7 @@ class Summary:
                     if not path.exists(temp_path):
                         os.makedirs(temp_path)
 
-                    dmesg = value.get('dmesg', 'None')
+                    dmesg = value.get('dmesg', None)
                     if isinstance(dmesg, list):
                         dmesg = "\n".join(dmesg)
 
@@ -419,6 +419,7 @@ class Summary:
                             info=value.get('info', 'None'),
                             traceback=value.get('traceback', 'None'),
                             command=value.get('command', 'None'),
+                            images=value.get('images', None),
                             dmesg=dmesg,
                             css=path.relpath(result_css, temp_path),
                             index=path.relpath(index, temp_path)))
diff --git a/templates/test_result.mako b/templates/test_result.mako
index 490c009..c8d9ea8 100644
--- a/templates/test_result.mako
+++ b/templates/test_result.mako
@@ -28,20 +28,41 @@
         <td>Time</td>
         <td>${time}</b>
       </tr>
+    % if images:
+      <tr>
+        <td>Images</td>
+        <td>
+          <table>
+            <tr>
+              <td/>
+              <td>reference</td>
+              <td>rendered</td>
+            </tr>
+          % for image in images:
+            <tr>
+              <td>${image['image_desc']}</td>
+              <td><img src="file://${image['image_ref']}" /></td>
+              <td><img src="file://${image['image_render']}" /></td>
+            </tr>
+          % endfor
+          </table>
+        </td>
+      </tr>
+    % endif
       <tr>
         <td>Info</td>
         <td>
           <pre>${info | h}</pre>
         </td>
       </tr>
-      % if env:
+    % if env:
       <tr>
         <td>Environment</td>
         <td>
           <pre>${env | h}</pre>
         </td>
       </tr>
-      % endif
+    % endif
       <tr>
         <td>Command</td>
         <td>
@@ -54,12 +75,14 @@
           <pre>${traceback | h}</pre>
         </td>
       </tr>
+    % if dmesg:
       <tr>
         <td>dmesg</td>
         <td>
           <pre>${dmesg | h}</pre>
         </td>
       </tr>
+    % endif
     </table>
     <p><a href="${index}">Back to summary</a></p>
   </body>
diff --git a/tests/xts.py b/tests/xts.py
new file mode 100644
index 0000000..5f5222c
--- /dev/null
+++ b/tests/xts.py
@@ -0,0 +1,206 @@
+# Copyright (c) 2013-2014 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation
+# files (the "Software"), to deal in the Software without
+# restriction, including without limitation the rights to use,
+# copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following
+# conditions:
+#
+# This permission notice shall be included in all copies or
+# substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE AUTHOR(S) BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+# OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+""" Test integreation for the X Test Suite """
+
+import os
+import re
+import sys
+import subprocess
+import itertools
+from framework.core import TestProfile
+from framework.exectest import Test, testBinDir
+
+__all__ = ['profile']
+
+X_TEST_SUITE = os.path.join(testBinDir, 'xtest')
+
+
+class XTSProfile(TestProfile):
+    """ A sublcass of TestProfile that provides a setup hook for XTS """
+    def pre_run_hook(self):
+        """ This hook sets the XTSTest.results_path variable
+
+        Setting this variable allows images created by XTS to moved into the
+        results directory
+
+        """
+        XTSTest.RESULTS_PATH = self.results_dir
+
+        try:
+            os.mkdir(os.path.join(self.results_dir, 'images'))
+        except OSError as e:
+            # If the exception is not 'directory already exists', raise the
+            # exception
+            if e.errno != 17:
+                raise
+
+
+class XTSTest(Test):
+    """ X Test Suite class
+
+    Runs a single test or subtest from XTS. For best results it's recomended
+    that you run this using Xephyr, the nested x-server
+
+    Arguments:
+    name -- the name of the test
+    testname -- the name of the test file
+    testnum -- the number of the test file
+
+    """
+    RESULTS_PATH = None
+
+    def __init__(self, name, testname, testnum):
+        super(XTSTest, self).__init__(
+            ['./' + os.path.basename(name), '-i', str(testnum)])
+        self.testname = '{0}-{1}'.format(testname, testnum)
+        self.cwd = os.path.dirname(os.path.realpath(name))
+        self.test_results_file = os.path.join(self.cwd, self.testname)
+        self.env.update(
+            {"TET_RESFILE": self.test_results_file,
+             "XT_RESET_DELAY": '0',
+             "XT_FONTPATH_GOOD": '/usr/share/fonts/X11/misc',
+             "XT_FONTPATH": os.path.join(X_TEST_SUITE, 'xts5', 'fonts'),
+             #XXX: Are the next 3 necissary
+             "XT_LOCAL": 'Yes',
+             "XT_TCP": 'No',
+             "XT_DISPLAYHOST": ''})
+
+    def __process_log_for_images(self, log):
+        """ Parse the image logfile """
+        images = []
+        search = re.compile('See file (Err[0-9]+.err)')
+
+        for line in log.splitlines():
+            match = search.search(line)
+            if match is not None:
+                # Can we parse any other useful information out to give a
+                # better description of each image?
+                desc = match.group(1)
+
+                # The error logs are text, with a header with width, height,
+                # and depth, then run-length-encoded pixel values (in
+                # hexadecimal).  Use xtsttopng to convert the error log to a
+                # pair of PNGs so we can put them in the summary.
+                command = ['xtsttopng', os.path.join(self.cwd, match.group(1))]
+                try:
+                    out = subprocess.check_output(command, cwd=self.cwd)
+                except OSError:
+                    images.append({'image_desc': 'image processing failed'})
+                    continue
+
+                # Each Err*.err log contains a rendered image, and a reference
+                # image that it was compared to.  We relocate the to our tree
+                # with more useful names.  (Otherwise, since tests generate
+                # error logs with numbers sequentially starting from 0, each
+                # subtest with an error would overwrite the previous test's
+                # images).
+                #
+                # XXX: This *should* be sending the images to the results
+                # directory, but since nothing fails on my system (and I cant
+                # hack on the test suite I can't know for sure
+                ref_path = '{0}/images/{1}-{2}-ref.png'.format(
+                    XTSTest.RESULTS_PATH, self.testname, match.group(1))
+                render_path = '{0}/images/{1}-{2}-render.png'.format(
+                    XTSTest.RESULTS_PATH, self.testname, match.group(1))
+
+                split = out.splitlines()
+                os.rename(os.path.join(self.cwd, split[0]), render_path)
+                os.rename(os.path.join(self.cwd, split[1]), ref_path)
+
+                images.append({'image_desc': desc,
+                               'image_ref': ref_path,
+                               'image_render': render_path})
+
+        return images
+
+    def interpretResult(self, out, returncode, results):
+        try:
+            with open(self.test_results_file, 'r') as rfile:
+                log = rfile.read()
+                results['info'] = log
+                os.remove(self.test_results_file)
+        except IOError:
+            results['info'] = "No results file found"
+
+        if returncode == 0:
+            if re.search('FAIL', out) is not None:
+                results['result'] = 'fail'
+            elif re.search('PASS', out) is not None:
+                results['result'] = 'pass'
+            else:
+                results['result'] = 'fail'
+        elif returncode == 77:
+            results['result'] = 'skip'
+        elif returncode == 1:
+            if re.search('Could not open all VSW5 fonts', log):
+                results['result'] = 'warn'
+            else:
+                results['result'] = 'fail'
+        else:
+            results['result'] = 'fail'
+
+        results['images'] = self.__process_log_for_images(log)
+
+        return out
+
+
+def populate_profile():
+    """ Populate the profile attribute """
+    # Add all tests to the profile
+    profile = XTSProfile()
+    fpath = os.path.join(X_TEST_SUITE, 'xts5')
+    for dirpath, _, filenames in os.walk(fpath):
+        for fname in filenames:
+            # only look at the .m test files
+            testname, ext = os.path.splitext(fname)
+            if ext != '.m':
+                continue
+
+            # incrementing number generator
+            counts = (x for x in itertools.count(1, 1))
+
+            # Walk the file looking for >>ASSERTION, each of these corresponds
+            # to a generated subtest, there can be multiple subtests per .m
+            # file
+            with open(os.path.join(dirpath, fname), 'r') as rfile:
+                for line in rfile:
+                    if line.startswith('>>ASSERTION'):
+                        num = next(counts)
+                        group = '{0}/{1}/{2}'.format(
+                            os.path.relpath(dirpath, X_TEST_SUITE),
+                            testname, num)
+
+                        profile.tests[group] = XTSTest(
+                            os.path.join(dirpath, testname),
+                            testname,
+                            num)
+    return profile
+
+
+# If the symlink for the XTS has not been created exit
+if not os.path.exists(X_TEST_SUITE):
+    print "xtest symlink not found!"
+    sys.exit(0)
+
+profile = populate_profile()
-- 
1.9.1



More information about the Piglit mailing list