[Piglit] [RFC 06/10] framework: refactor test running to use explicit TestWorkItems and TestRunners

Nicolai Hähnle nhaehnle at gmail.com
Wed Oct 11 10:26:55 UTC 2017


From: Nicolai Hähnle <nicolai.haehnle at amd.com>

Factor out the execution of tests into TestRunner objects, to allow
a cleaner approach to running multiple tests without process isolation.

After this patch, which already adapts the shader tests, each individual
test is unaffected by whether process isolation is enabled or not. Instead,
when process isolation is disabled, multiple tests are grouped into work
items that have the same runner. Every work item will result in a single
TestResult object that contains the results for all tests run as part of
that work item.

Additionally, each TestWorkItem has a unique ID which is also stored in
the TestResult object. This is used for resuming test runs: the idea is
that the generation of work items is reproducible, so each work item
can continue working precisely where it left.

This allows resumed test runs to work properly without process isolation.

TODO
- unit tests
- split up: extract some more stand-alone changes e.g. to TestResult
---
 framework/backends/abstract.py |  14 +-
 framework/backends/json.py     |  31 ++-
 framework/profile.py           | 101 ++++++--
 framework/programs/run.py      |  19 +-
 framework/results.py           |   8 +-
 framework/test/base.py         | 548 ++++++++++++++++++++++-------------------
 framework/test/deqp.py         |  44 ++--
 framework/test/gleantest.py    |  10 +-
 framework/test/gtest.py        |  12 +-
 framework/test/oclconform.py   |   8 +-
 framework/test/piglit_test.py  |  45 +++-
 framework/test/shader_test.py  | 172 ++++++-------
 tests/all.py                   |  44 ++--
 tests/deqp_vk.py               |  16 +-
 tests/es3conform.py            |   8 +-
 tests/igt.py                   |  26 +-
 tests/oglconform.py            |  16 +-
 tests/xts.py                   |  44 ++--
 18 files changed, 637 insertions(+), 529 deletions(-)

diff --git a/framework/backends/abstract.py b/framework/backends/abstract.py
index cee7d7bdf..bd44a2ac9 100644
--- a/framework/backends/abstract.py
+++ b/framework/backends/abstract.py
@@ -184,40 +184,38 @@ class FileBackend(Backend):
 
     @abc.abstractmethod
     def _write(self, f, name, data):
         """Method that writes a TestResult into a result file."""
 
     @abc.abstractproperty
     def _file_extension(self):
         """The file extension of the backend."""
 
     @contextlib.contextmanager
-    def write_test(self, test):
-        """Write a test.
+    def write_test(self, workitem):
+        """Write a workitem's results.
 
         When this context manager is opened it will first write a placeholder
         file with the status incomplete.
 
         When it is called to write the final result it will create a temporary
         file, write to that file, then move that file over the original,
         incomplete status file. This helps to make the operation atomic, as
         long as the filesystem continues running and the result was valid in
         the original file it will be valid at the end
 
         """
-        def finish(val):
+        def update():
             tfile = file_ + '.tmp'
             with open(tfile, 'w') as f:
-                self._write(f, test.name.lower(), val)
+                self._write(f, workitem.results.root, workitem.results)
                 self.__fsync(f)
             shutil.move(tfile, file_)
 
         file_ = os.path.join(self._dest, 'tests', '{}.{}'.format(
             next(self._counter), self._file_extension))
 
         with open(file_, 'w') as f:
-            incomplete = TestResult(result=INCOMPLETE)
-            incomplete.root = test.name
-            self._write(f, test.name.lower(), incomplete)
+            self._write(f, workitem.results.root, workitem.results)
             self.__fsync(f)
 
-        yield finish
+        yield update
diff --git a/framework/backends/json.py b/framework/backends/json.py
index 80d82d0ab..1aa14bd1c 100644
--- a/framework/backends/json.py
+++ b/framework/backends/json.py
@@ -119,52 +119,62 @@ class JSONBackend(FileBackend):
             pass
 
     def finalize(self, metadata=None):
         """ End json serialization and cleanup
 
         This method is called after all of tests are written, it closes any
         containers that are still open and closes the file
 
         """
         tests_dir = os.path.join(self._dest, 'tests')
+
+        # Traverse the files in reverse order, so that later versions of
+        # a work item override earlier ones (for resumed test runs).
         file_list = sorted(
             (f for f in os.listdir(tests_dir) if f.endswith('.json')),
-            key=lambda p: int(os.path.splitext(p)[0]))
+            key=lambda p: int(os.path.splitext(p)[0]),
+            reverse=True)
 
         # If jsonstreams is not present then build a complete tree of all of
         # the data and write it with json.dump
         if not _STREAMS:
             # Create a dictionary that is full of data to be written to a
             # single file
             data = collections.OrderedDict()
 
             # Load the metadata and put it into a dictionary
             with open(os.path.join(self._dest, 'metadata.json'), 'r') as f:
                 data.update(json.load(f))
 
             # If there is more metadata add it the dictionary
             if metadata:
                 data.update(metadata)
 
             # Add the tests to the dictionary
             data['results'] = []
 
+            workitemids = set()
             for test in file_list:
                 test = os.path.join(tests_dir, test)
                 if os.path.isfile(test):
                     # Try to open the json snippets. If we fail to open a test
                     # then throw the whole thing out. This gives us atomic
                     # writes, the writing worked and is valid or it didn't
                     # work.
                     try:
                         with open(test, 'r') as f:
-                            data['results'].append(json.load(f))
+                            result = json.load(f)
+                            if 'workitemid' in result:
+                                if result['workitemid'] in workitemids:
+                                    continue
+                                workitemids.add(result['workitemid'])
+                            data['results'].append(result)
                     except ValueError:
                         pass
             assert data['results']
 
             data = results.TestrunResult.from_dict(data)
 
             # write out the combined file. Use the compression writer from the
             # FileBackend
             with self._write_final(os.path.join(self._dest, 'results.json')) as f:
                 json.dump(data, f, default=piglit_encoder, indent=INDENT)
@@ -180,29 +190,34 @@ class JSONBackend(FileBackend):
                                         encoder=encoder, pretty=True) as s:
                     s.write('__type__', 'TestrunResult')
                     with open(os.path.join(self._dest, 'metadata.json'),
                               'r') as n:
                         s.iterwrite(six.iteritems(json.load(n, object_pairs_hook=collections.OrderedDict)))
 
                     if metadata:
                         s.iterwrite(six.iteritems(metadata))
 
                     with s.subarray('results') as t:
+                        workitemids = set()
                         for test in file_list:
                             test = os.path.join(tests_dir, test)
                             if os.path.isfile(test):
                                 try:
                                     with open(test, 'r') as f:
                                         a = json.load(f)
                                 except ValueError:
                                     continue
 
+                                if 'workitemid' in a:
+                                    if a['workitemid'] in workitemids:
+                                        continue
+                                    workitemids.add(a['workitemid'])
                                 t.write(a)
 
 
         # Delete the temporary files
         os.unlink(os.path.join(self._dest, 'metadata.json'))
         shutil.rmtree(os.path.join(self._dest, 'tests'))
 
     @staticmethod
     def _write(f, name, data):
         json.dump(data, f, default=piglit_encoder)
@@ -285,27 +300,37 @@ def _resume(results_dir):
         "Old results version, resume impossible"
 
     meta['results'] = []
 
     # Load all of the test names and added them to the test list
     tests_dir = os.path.join(results_dir, 'tests')
     file_list = sorted(
         (l for l in os.listdir(tests_dir) if l.endswith('.json')),
         key=lambda p: int(os.path.splitext(p)[0]))
 
+    result_by_id = {}
+
     for file_ in file_list:
         with open(os.path.join(tests_dir, file_), 'r') as f:
             try:
-                meta['results'].append(json.load(f))
+                result = json.load(f)
+                if 'workitemid' in result:
+                    # Potentially overwrite an earlier reslt with the same ID.
+                    # This can happen with resumed test results.
+                    result_by_id[result['workitemid']] = result
+                else:
+                    meta['results'].append(json.load(f))
             except ValueError:
                 continue
 
+    meta['results'] += six.itervalues(result_by_id)
+
     return results.TestrunResult.from_dict(meta)
 
 
 def _update_results(results, filepath):
     """ Update results to the latest version
 
     This function is a wrapper for other update_* functions, providing
     incremental updates from one version to another.
 
     Arguments:
diff --git a/framework/profile.py b/framework/profile.py
index c95ab9341..1fadb39a8 100644
--- a/framework/profile.py
+++ b/framework/profile.py
@@ -32,28 +32,30 @@ from __future__ import (
 )
 import collections
 import contextlib
 import copy
 import importlib
 import itertools
 import multiprocessing
 import multiprocessing.dummy
 import os
 import re
+import sys
+import traceback
 
 import six
 
 from framework import grouptools, exceptions, status
 from framework.dmesg import get_dmesg
 from framework.log import LogManager
 from framework.monitoring import Monitoring
-from framework.test.base import Test, DummyTest
+from framework.test.base import Test, DummyTest, TestWorkItem
 
 __all__ = [
     'RegexFilter',
     'TestDict',
     'TestProfile',
     'load_test_profile',
     'run',
 ]
 
 
@@ -348,99 +350,158 @@ def load_test_profile(filename):
     Raises:
     PiglitFatalError -- if the module cannot be imported for any reason, or if
                         the module lacks a "profile" attribute.
 
     Arguments:
     filename -- the name of a python module to get a 'profile' from
     """
     try:
         mod = importlib.import_module('tests.{0}'.format(
             os.path.splitext(os.path.basename(filename))[0]))
-    except ImportError:
+    except ImportError as e:
+        traceback.print_exc(file=sys.stderr)
         raise exceptions.PiglitFatalError(
             'Failed to import "{}", there is either something wrong with the '
             'module or it doesn\'t exist. Check your spelling?'.format(
                 filename))
 
     try:
         return mod.profile
     except AttributeError:
         raise exceptions.PiglitFatalError(
             'There is no "profile" attribute in module {}.\n'
             'Did you specify the right file?'.format(filename))
 
 
-def run(profiles, logger, backend, concurrency):
+class WorkItemBuilder(object):
+    """Build list of TestWorkItem objects for one or more profiles.
+
+    Handles the assignment of unique IDs across profiles and matches
+    work items with results from a previous run.
+    """
+    def __init__(self, process_isolation=True, results=None):
+        self.process_isolation = process_isolation
+        self.results = dict((r.workitemid, r) for r in results or [])
+        self.workitemid = 1
+
+    def _add_workitem(self, workitems, runner, tests):
+        workitem = TestWorkItem(runner, tests, workitemid=self.workitemid)
+        self.workitemid += 1
+
+        if workitem.workitemid in self.results:
+            workitem.results = self.results[workitem.workitemid]
+            if not any(workitem.pending_tests):
+                return
+
+        workitems.append(workitem)
+
+    def __call__(self, test_iter):
+        """Given an Iterable of tests, return a list of TestWorkItems."""
+        # Group tests by runner
+        runners = collections.OrderedDict()
+        for test in test_iter:
+            if test.runner in runners:
+                runners[test.runner].append(test)
+            else:
+                runners[test.runner] = [test]
+
+        # Create workitems
+        workitems = []
+        for runner, tests in six.iteritems(runners):
+            if self.process_isolation:
+                for test in tests:
+                    self._add_workitem(workitems, runner, [test])
+            else:
+                max_tests = runner.max_tests or len(tests)
+                for idx in range(0, len(tests), max_tests):
+                    self._add_workitem(workitems, runner, tests[idx:idx + max_tests])
+
+        # Run large work items first
+        workitems.sort(key=lambda w: len(w.tests), reverse=True)
+        return workitems
+
+
+def run(profiles, logger, backend, concurrency='some',
+        process_isolation=True, results=None):
     """Runs all tests using Thread pool.
 
     When called this method will flatten out self.tests into self.test_list,
     then will prepare a logger, and begin executing tests through it's Thread
     pools.
 
     Based on the value of concurrency it will either run all the tests
     concurrently, all serially, or first the thread safe tests then the
     serial tests.
 
     Finally it will print a final summary of the tests.
 
     Arguments:
     profiles -- a list of Profile instances.
     logger   -- a log.LogManager instance.
     backend  -- a results.Backend derived instance.
+    results  -- list of TestResult instances from a previous interrupted run
+                using the same profiles and settings (resulting in the same
+                workitems).
     """
     chunksize = 1
 
     # The logger needs to know how many tests are running. Because of filters
     # there's no way to do that without making a concrete list out of the
     # filters profiles.
-    profiles = [(p, list(p.itertests())) for p in profiles]
+    workitem_builder = WorkItemBuilder(process_isolation=process_isolation,
+                                       results=results)
+    profiles = [(p, workitem_builder(p.itertests())) for p in profiles]
     log = LogManager(logger, sum(len(l) for _, l in profiles))
 
     # check that after the filters are run there are actually tests to run.
     if not any(l for _, l in profiles):
         raise exceptions.PiglitUserError('no matching tests')
 
-    def test(test, profile, this_pool=None):
+    def run_workitem(workitem, profile, this_pool=None):
         """Function to call test.execute from map"""
-        with backend.write_test(test) as w:
-            test.execute(log.get(), profile.options)
-            w(test.result)
-        if profile.options['monitor'].abort_needed:
-            this_pool.terminate()
-
-    def run_threads(pool, profile, test_list, filterby=None):
+        try:
+            with backend.write_test(workitem) as w:
+                workitem.runner.execute(workitem, log.get(), profile.options)
+                w()
+            if profile.options['monitor'].abort_needed:
+                this_pool.terminate()
+        except:
+            traceback.print_exc(file=sys.stderr)
+            raise
+
+    def run_threads(pool, profile, workitems, filterby=None):
         """ Open a pool, close it, and join it """
         if filterby:
             # Although filterby could be attached to TestProfile as a filter,
             # it would have to be removed when run_threads exits, requiring
             # more code, and adding side-effects
-            test_list = (x for x in test_list if filterby(x))
+            workitems = list(x for x in workitems if filterby(x))
 
-        pool.imap(lambda t: test(t, profile, pool),
-                  test_list, chunksize)
+        pool.imap(lambda t: run_workitem(t, profile, pool),
+                  workitems, chunksize)
 
-    def run_profile(profile, test_list):
+    def run_profile(profile, workitems):
         """Run an individual profile."""
         profile.setup()
         if concurrency == "all":
-            run_threads(multi, profile, test_list)
+            run_threads(multi, profile, workitems)
         elif concurrency == "none":
-            run_threads(single, profile, test_list)
+            run_threads(single, profile, workitems)
         else:
             assert concurrency == "some"
             # Filter and return only thread safe tests to the threaded pool
-            run_threads(multi, profile, test_list,
+            run_threads(multi, profile, workitems,
                         lambda x: x.run_concurrent)
 
             # Filter and return the non thread safe tests to the single
             # pool
-            run_threads(single, profile, test_list,
+            run_threads(single, profile, workitems,
                         lambda x: not x.run_concurrent)
         profile.teardown()
 
     # Multiprocessing.dummy is a wrapper around Threading that provides a
     # multiprocessing compatible API
     #
     # The default value of pool is the number of virtual processor cores
     single = multiprocessing.dummy.Pool(1)
     multi = multiprocessing.dummy.Pool()
 
diff --git a/framework/programs/run.py b/framework/programs/run.py
index 4acccf348..03562cd97 100644
--- a/framework/programs/run.py
+++ b/framework/programs/run.py
@@ -357,21 +357,23 @@ def run(input_):
 
     for p in profiles:
         if args.exclude_tests:
             p.filters.append(profile.RegexFilter(args.exclude_tests,
                                                  inverse=True))
         if args.include_tests:
             p.filters.append(profile.RegexFilter(args.include_tests))
 
     time_elapsed = TimeAttribute(start=time.time())
 
-    profile.run(profiles, args.log_level, backend, args.concurrency)
+    profile.run(profiles, args.log_level, backend,
+                concurrency=args.concurrency,
+                process_isolation=args.process_isolation)
 
     time_elapsed.end = time.time()
     backend.finalize({'time_elapsed': time_elapsed.to_json()})
 
     print('Thank you for running Piglit!\n'
           'Results have been written to ' + args.results_path)
 
 
 @exceptions.handler
 def resume(input_):
@@ -402,63 +404,56 @@ def resume(input_):
     core.get_config(args.config_file)
 
     options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']
 
     results.options['env'] = core.collect_system_info()
     results.options['name'] = results.name
 
     # Resume only works with the JSON backend
     backend = backends.get_backend('json')(
         args.results_path,
-        file_start_count=len(results.tests) + 1)
+        file_start_count=len(results.results) + 1)
     # Specifically do not initialize again, everything initialize does is done.
 
-    # Don't re-run tests that have already completed, incomplete status tests
-    # have obviously not completed.
-    exclude_tests = set()
-    for name, result in six.iteritems(results.tests):
-        if args.no_retry or result.result != 'incomplete':
-            exclude_tests.add(name)
-
     profiles = [profile.load_test_profile(p)
                 for p in results.options['profile']]
     for p in profiles:
         p.results_dir = args.results_path
 
         if results.options['dmesg']:
             p.dmesg = dmesg.get_dmesg(results.options['dmesg'])
 
         if results.options['monitoring']:
             p.options['monitor'] = monitoring.Monitoring(
                 results.options['monitoring'])
 
         if results.options['ignore_missing']:
             p.options['ignore_missing'] = results.options['ignore_missing']
 
-        if exclude_tests:
-            p.filters.append(lambda test: test.name.lower() not in exclude_tests)
         if results.options['exclude_filter']:
             p.filters.append(
                 profile.RegexFilter(results.options['exclude_filter'],
                                     inverse=True))
         if results.options['include_filter']:
             p.filters.append(
                 profile.RegexFilter(results.options['include_filter']))
 
         if results.options['forced_test_list']:
             p.forced_test_list = results.options['forced_test_list']
 
     # This is resumed, don't bother with time since it won't be accurate anyway
     try:
         profile.run(
             profiles,
             results.options['log_level'],
             backend,
-            results.options['concurrent'])
+            concurrency=results.options['concurrent'],
+            process_isolation=results.options['process_isolation'],
+            results=results.results)
     except exceptions.PiglitUserError as e:
         if str(e) != 'no matching tests':
             raise
 
     backend.finalize()
 
     print("Thank you for running Piglit!\n"
           "Results have been written to {0}".format(args.results_path))
diff --git a/framework/results.py b/framework/results.py
index c13379b3e..478a09021 100644
--- a/framework/results.py
+++ b/framework/results.py
@@ -141,35 +141,36 @@ class TimeAttribute(object):
 
         if '__type__' in dict_:
             del dict_['__type__']
         return cls(**dict_)
 
 
 class TestResult(object):
     """An object represting the result of a single test."""
     __slots__ = ['returncode', '_err', '_out', 'time', 'command', 'traceback',
                  'environment', 'subtests', 'dmesg', '__result', 'images',
-                 'exception', 'pid', 'root']
+                 'exception', 'pid', 'root', 'workitemid']
     err = StringDescriptor('_err')
     out = StringDescriptor('_out')
 
     def __init__(self, result=None):
         self.root = ''
         self.returncode = None
         self.time = TimeAttribute()
         self.command = str()
         self.environment = str()
         self.subtests = Subtests()
         self.dmesg = str()
         self.images = None
         self.traceback = None
         self.exception = None
+        self.workitemid = None
         self.pid = []
         if result:
             self.result = result
         else:
             self.__result = status.NOTRUN
 
     @property
     def result(self):
         """Return the result of the test.
 
@@ -265,40 +266,41 @@ class TestResult(object):
             'err': self.err,
             'out': self.out,
             'result': self.result,
             'returncode': self.returncode,
             'subtests': self.subtests.to_json(),
             'time': self.time.to_json(),
             'exception': self.exception,
             'traceback': self.traceback,
             'dmesg': self.dmesg,
             'pid': self.pid,
+            'workitemid': self.workitemid,
         }
         return obj
 
     @classmethod
     def from_dict(cls, dict_):
         """Load an already generated result in dictionary form.
 
         This is used as an alternate constructor which converts an existing
         dictionary into a TestResult object. It converts a key 'result' into a
         status.Status object
 
         """
         # pylint will say that assining to inst.out or inst.err is a non-slot
         # because self.err and self.out are descriptors, methods that act like
         # variables. Just silence pylint
         # pylint: disable=assigning-non-slot
         inst = cls()
 
         for each in ['returncode', 'command', 'exception', 'environment',
-                     'traceback', 'dmesg', 'pid', 'result', 'root']:
+                     'traceback', 'dmesg', 'pid', 'result', 'root', 'workitemid']:
             if each in dict_:
                 setattr(inst, each, dict_[each])
 
         # Set special instances
         if 'subtests' in dict_:
             inst.subtests = Subtests.from_dict(dict_['subtests'])
         if 'time' in dict_:
             inst.time = TimeAttribute.from_dict(dict_['time'])
 
         # out and err must be set manually to avoid replacing the setter
@@ -474,21 +476,21 @@ class TestrunResult(object):
         res.results = [TestResult.from_dict(t) for t in dict_['results']]
 
         for result in res.results:
             if result.subtests:
                 for subtest in six.iterkeys(result.subtests):
                     fullname = grouptools.join(result.root, subtest)
                     assert fullname not in res._tests
                     res._tests[fullname] = result
             else:
                 if result.root in res._tests:
-                    # This can happen with old resumed test results.
+                    # This can happen with resumed test results from old versions.
                     print('Warning: duplicate results for {}'.format(result.root))
                 res._tests[result.root] = result
 
         if not 'totals' in dict_ and not _no_totals:
             res.calculate_group_totals()
         else:
             res.totals = {n: Totals.from_dict(t) for n, t in
                           six.iteritems(dict_['totals'])}
 
         return res
diff --git a/framework/test/base.py b/framework/test/base.py
index d7b9432f5..ba19be947 100644
--- a/framework/test/base.py
+++ b/framework/test/base.py
@@ -33,20 +33,21 @@ import traceback
 import itertools
 import abc
 import copy
 import signal
 import warnings
 
 import six
 from six.moves import range
 
 from framework import exceptions
+from framework import grouptools
 from framework import status
 from framework.options import OPTIONS
 from framework.results import TestResult
 
 # We're doing some special crazy here to make timeouts work on python 2. pylint
 # is going to complain a lot
 # pylint: disable=wrong-import-position,wrong-import-order
 if six.PY2:
     try:
         # subprocess32 only supports *nix systems, this is important because
@@ -149,166 +150,309 @@ def is_crash_returncode(returncode):
         # - MSVCRT's abort() terminates process with exit code 3
         return returncode < 0 or returncode == 3
     else:
         return returncode < 0
 
 
 @six.add_metaclass(abc.ABCMeta)
 class Test(object):
     """ Abstract base class for Test classes
 
-    This class provides the framework for running tests, with several methods
+    This class provides the framework for representing tests, with several methods
     and properties that can be overwritten to produce a specialized class for
     running test suites other than piglit.
 
-    It provides two methods for running tests, execute and run.
-    execute() provides lots of features, and is invoced when running piglit
-    from the command line, run() is a more basic method for running the test,
-    and is called internally by execute(), but is can be useful outside of it.
+    Test objects do not provide methods for being run themselves. Instead, each
+    test has an associated TestRunner object. A single TestRunner object can be
+    associated to many tests, and provide functionality for running tests without
+    process isolation.
 
     Arguments:
-    command -- a value to be passed to subprocess.Popen
+    command -- a value interpreted by the test runner, usually passing it to
+               subprocess.Popen
+    runner -- the test runner object; defaults to a SingleProcessRunner
 
     Keyword Arguments:
     run_concurrent -- If True the test is thread safe. Default: False
 
     """
-    __slots__ = ['name', 'run_concurrent', 'env', 'result', 'cwd', '_command']
+    __slots__ = ['name', 'run_concurrent', 'env', 'runner', 'cwd', '_command']
     timeout = None
 
-    def __init__(self, command, run_concurrent=False):
+    def __init__(self, command, run_concurrent=False, runner=None):
         assert isinstance(command, list), command
 
         self.name = ''
         self.run_concurrent = run_concurrent
         self._command = copy.copy(command)
         self.env = {}
-        self.result = TestResult()
+        self.runner = runner or DEFAULT_RUNNER
         self.cwd = None
 
-    def execute(self, log, options):
-        """ Run a test
+    @property
+    def command(self):
+        assert self._command
+        return self._command
+
+    @command.setter
+    def command(self, new):
+        assert isinstance(new, list), 'Test.command must be a list'
+        self._command = new
+
+    def interpret_result(self, result):
+        """Convert the raw output of the test into a form piglit understands.
+
+        Used by the SingleProcessRunner
+        """
+        if is_crash_returncode(result.returncode):
+            result.result = 'crash'
+        elif result.returncode != 0:
+            if result.result == 'pass':
+                result.result = 'warn'
+            else:
+                result.result = 'fail'
+
+    def is_skip(self):
+        """ Application specific check for skip
+
+        If this function returns a truthy value then the current test will be
+        skipped. The base version will always return False
+
+        """
+        pass
+
+    def __eq__(self, other):
+        return type(self) == type(other) and self.command == other.command
+
+    def __ne__(self, other):
+        return not self == other
+
+
+class DummyTest(Test):
+    def __init__(self, name, result):
+        super(DummyTest, self).__init__([name], runner=DummyTestRunner())
+        self.testresult = result
+
+
+class TestWorkItem(object):
+    """A test work item contains one or more tests that are run together to
+    produce a single TestResult.
+
+    Each workitem has a unique integer ID which is used to match existing
+    results when resuming a test run.
+    """
+    __slots__ = ['_workitemid', 'runner', 'tests', 'results']
+
+    def __init__(self, runner, tests, workitemid=None):
+        assert len(tests) >= 1
+        self._workitemid = workitemid
+        self.runner = runner
+        self.tests = tests
+        self.results = TestResult()
+        self.results.root = grouptools.commonprefix([test.name for test in tests])
+        self.results.workitemid = workitemid
+
+        # Fill the initial test status, determining which tests should be skipped.
+        for test in tests:
+            try:
+                self.results.add_test(test.name)
+                test.is_skip()
+            except TestIsSkip as e:
+                self.results.set_result(test.name, status.SKIP)
+                relative = grouptools.relative(self.results.root, test.name)
+                if relative:
+                    relative = ' ' + relative
+                self.results.out += 'Skip{}: {}\n'.format(relative, e.reason)
+
+    @property
+    def workitemid(self):
+        """Return the ID of the work item."""
+        return self._workitemid
+
+    @workitemid.setter
+    def workitemid(self, workitemid):
+        """Set the ID of the work item.
+
+        This may only be called once.
+        """
+        assert self._workitemid is None
+        self._workitemid = workitemid
+        self.results.workitemid = workitemid
+
+    @property
+    def pretty_name(self):
+        """Return the workitem's name for printing in logs."""
+        return self.results.root
+
+    @property
+    def pending_tests(self):
+        """Iterator of sub-tests (if any) that still need to be run."""
+        for test in self.tests:
+            if test != self.results.root and not self.results.have_test(test.name):
+                # This happens when a test has subtests that aren't recorded
+                # in the test profile.
+                continue
+
+            result = self.results.get_result(test.name)
+            if result == status.NOTRUN or \
+               (result == status.INCOMPLETE and not OPTIONS.no_retry):
+                yield test
+
+    @property
+    def run_concurrent(self):
+        """Whether to run the workitem concurrently with others."""
+        return all(t.run_concurrent for t in self.tests)
+
+
+ at six.add_metaclass(abc.ABCMeta)
+class TestRunner(object):
+    """Abstract base class for running one or more tests.
+
+    It provides two methods for running tests, execute and run.
+    execute() provides lots of features, and is invoced when running piglit
+    from the command line, run() is a more basic method for running the test,
+    and is called internally by execute(), but is can be useful outside of it.
+    """
+
+    def __init__(self):
+        pass
+
+    @property
+    def max_tests(self):
+        """Upper limit on the number of tests per work item.
+
+        Can return None to indicate no upper limit.
+        """
+        pass
+
+    def execute(self, workitem, log, options):
+        """Run tests
 
         Run a test, but with features. This times the test, uses dmesg checking
         (if requested), and runs the logger.
 
         Arguments:
-        path    -- the name of the test
-        log     -- a log.Log instance
-        options -- a dictionary containing dmesg and monitoring objects
+        workitem -- the workitem to be run
+        log      -- a log.Log instance
+        options  -- a dictionary containing dmesg and monitoring objects
         """
-        log.start(self.name)
+        log.start(workitem.pretty_name)
         # Run the test
-        self.result.root = self.name
         if OPTIONS.execute:
             try:
-                self.result.time.start = time.time()
+                workitem.results.time.start = time.time()
                 options['dmesg'].update_dmesg()
                 options['monitor'].update_monitoring()
-                self.run()
-                self.result.time.end = time.time()
-                self.result = options['dmesg'].update_result(self.result)
+                self.run(workitem)
+                workitem.results.time.end = time.time()
+                workitem.results = options['dmesg'].update_result(workitem.results)
                 options['monitor'].check_monitoring()
             # This is a rare case where a bare exception is okay, since we're
             # using it to log exceptions
             except:
                 exc_type, exc_value, exc_traceback = sys.exc_info()
                 traceback.print_exc(file=sys.stderr)
-                self.result.result = 'fail'
-                self.result.exception = "{}{}".format(exc_type, exc_value)
-                self.result.traceback = "".join(
+                workitem.results.result = 'crash'
+                workitem.results.exception = "{}{}".format(exc_type, exc_value)
+                workitem.results.traceback = "".join(
                     traceback.format_tb(exc_traceback))
 
-            log.log(self.result.result)
+            log.log(workitem.results.result)
         else:
             log.log('dry-run')
 
-    @property
-    def command(self):
-        assert self._command
-        return self._command
-
-    @command.setter
-    def command(self, new):
-        assert isinstance(new, list), 'Test.command must be a list'
-        self._command = new
-
-    @abc.abstractmethod
-    def interpret_result(self):
-        """Convert the raw output of the test into a form piglit understands.
+    def run(self, workitem):
         """
-        if is_crash_returncode(self.result.returncode):
-            self.result.result = 'crash'
-        elif self.result.returncode != 0:
-            if self.result.result == 'pass':
-                self.result.result = 'warn'
-            else:
-                self.result.result = 'fail'
+        Run tests of a workitem.
 
-    def run(self):
-        """
-        Run a test.  The return value will be a dictionary with keys
+        The workitem's results will be updated with information
         including 'result', 'info', 'returncode' and 'command'.
         * For 'result', the value may be one of 'pass', 'fail', 'skip',
           'crash', or 'warn'.
         * For 'info', the value will include stderr/out text.
         * For 'returncode', the value will be the numeric exit code/value.
         * For 'command', the value will be command line program and arguments.
         """
-        self.result.command = ' '.join(self.command)
-        self.result.environment = " ".join(
-            '{0}="{1}"'.format(k, v) for k, v in itertools.chain(
-                six.iteritems(OPTIONS.env), six.iteritems(self.env)))
-
-        try:
-            self.is_skip()
-        except TestIsSkip as e:
-            self.result.result = status.SKIP
-            for each in six.iterkeys(self.result.subtests):
-                self.result.subtests[each] = status.SKIP
-            self.result.out = e.reason
-            self.result.returncode = None
-            return
+        def merge_results(results):
+            resumed = workitem.results.command
 
-        try:
-            self._run_command()
-        except TestRunError as e:
-            self.result.result = six.text_type(e.status)
-            for each in six.iterkeys(self.result.subtests):
-                self.result.subtests[each] = six.text_type(e.status)
-            self.result.out = six.text_type(e)
-            self.result.returncode = None
-            return
+            if resumed:
+                workitem.results.out += '\n\nRESUME\n\n'
+                workitem.results.err += '\n\nRESUME\n\n'
+            else:
+                workitem.results.command = results.command
+                workitem.results.environment = results.environment
+
+            workitem.results.out += results.out
+            workitem.results.err += results.err
+            workitem.results.pid += results.pid
+            workitem.results.returncode = results.returncode
+
+            change = False
+            for key in results.tests:
+                result = results.get_result(key)
+                if not workitem.results.have_test(key):
+                    workitem.results.add_test(key)
+                    change = True
+                else:
+                    change = change or (result != workitem.results.get_result(key))
+                workitem.results.set_result(key, result)
+
+            return change
+
+        while True:
+            tests = list(workitem.pending_tests)
+            if not tests:
+                break
+
+            results = TestResult()
+            results.root = workitem.results.root
+            for test in tests:
+                results.add_test(test.name)
 
-        self.interpret_result()
+            try:
+                self._run_tests(results, tests)
+
+                if not merge_results(results):
+                    raise TestRunError('No change in any subtest', 'fail')
+            except TestRunError as e:
+                workitem.results.result = six.text_type(e.status)
+                for each in results.tests:
+                    results.set_result(each, six.text_type(e.status))
+                results.out += six.text_type(e)
+                results.returncode = None
+                merge_results(results)
+                return
+            except:
+                # Ensure that the command and its output are logged,
+                # as this can help debug errors in the test output parsers.
+                merge_results(results)
+                raise
 
-    def is_skip(self):
-        """ Application specific check for skip
+    @abc.abstractmethod
+    def _run_tests(self, results, tests):
+        """Run a list of tests together and store the results.
 
-        If this function returns a truthy value then the current test will be
-        skipped. The base version will always return False
+        The caller will provide a pristine TestResults object.
 
+        Must be implemented by derived classes.
         """
         pass
 
-    def _run_command(self, **kwargs):
-        """ Run the test command and get the result
+    def _run_command(self, result, command, cwd=None, env={}, timeout=None, **kwargs):
+        """Helper method for running a test command and filling in a TestResult
 
-        This method sets environment options, then runs the executable. If the
-        executable isn't found it sets the result to skip.
+        This method sets environment options, then runs the executable.
 
-        """
-        # This allows the ReducedProcessMixin to work without having to whack
-        # self.command (which should be treated as immutable), but is
-        # considered private.
-        command = kwargs.pop('_command', self.command)
+        An exception is raised if the executable isn't found or a timeout occurs.
 
+        """
         # Setup the environment for the test. Environment variables are taken
         # from the following sources, listed in order of increasing precedence:
         #
         #   1. This process's current environment.
         #   2. Global test options. (Some of these are command line options to
         #      Piglit's runner script).
         #   3. Per-test environment variables set in all.py.
         #
         # Piglit chooses this order because Unix tradition dictates that
         # command line options (2) override environment variables (1); and
@@ -316,35 +460,39 @@ class Test(object):
         # requirements.
         #
         # passing this as unicode is basically broken in python2 on windows, it
         # must be passed a bytes.
         if six.PY2 and sys.platform.startswith('win32'):
             f = six.binary_type
         else:
             f = six.text_type
         _base = itertools.chain(six.iteritems(os.environ),
                                 six.iteritems(OPTIONS.env),
-                                six.iteritems(self.env))
+                                six.iteritems(env))
         fullenv = {f(k): f(v) for k, v in _base}
 
+        result.command = ' '.join(command)
+        result.environment = " ".join(
+            '{0}="{1}"'.format(k, v) for k, v in six.iteritems(fullenv))
+
         try:
             proc = subprocess.Popen(command,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE,
-                                    cwd=self.cwd,
+                                    cwd=cwd,
                                     env=fullenv,
                                     universal_newlines=True,
                                     **_EXTRA_POPEN_ARGS)
 
-            self.result.pid.append(proc.pid)
+            result.pid.append(proc.pid)
             if not _SUPPRESS_TIMEOUT:
-                out, err = proc.communicate(timeout=self.timeout)
+                out, err = proc.communicate(timeout=timeout)
             else:
                 out, err = proc.communicate()
             returncode = proc.returncode
         except OSError as e:
             # Different sets of tests get built under different build
             # configurations.  If a developer chooses to not build a test,
             # Piglit should not report that test as having failed.
             if e.errno == errno.ENOENT:
                 raise TestRunError("Test executable not found.\n", 'skip')
             else:
@@ -357,243 +505,123 @@ class Test(object):
             proc.terminate()
 
             # XXX: This is probably broken on windows, since os.getpgid doesn't
             # exist on windows. What is the right way to handle this?
             if proc.poll() is None:
                 time.sleep(3)
                 os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
 
             # Since the process isn't running it's safe to get any remaining
             # stdout/stderr values out and store them.
-            self.result.out, self.result.err = proc.communicate()
+            result.out, result.err = proc.communicate()
 
             raise TestRunError(
                 'Test run time exceeded timeout value ({} seconds)\n'.format(
-                    self.timeout),
+                    timeout),
                 'timeout')
 
         # The setter handles the bytes/unicode conversion
-        self.result.out = out
-        self.result.err = err
-        self.result.returncode = returncode
+        result.out = out
+        result.err = err
+        result.returncode = returncode
 
-    def __eq__(self, other):
-        return self.command == other.command
 
-    def __ne__(self, other):
-        return not self == other
+class DummyTestRunner(TestRunner):
+    """Test runner for use with DummyTest objects."""
+    def _run_tests(self, results, tests):
+        for test in tests:
+            results.set_result(test.name, test.testresult)
 
 
-class DummyTest(Test):
-    def __init__(self, name, result):
-        super(DummyTest, self).__init__([name])
-        self.result.result = result
+class SingleProcessRunner(TestRunner):
+    """Test runner that runs a single test in its own process.
 
-    def execute(self, log, options):
-        pass
+    Requires an implementation of Test.interpret_result
+    """
+    @TestRunner.max_tests.getter
+    def max_tests(self):
+        return 1
 
-    def interpret_result(self):
-        pass
+    def _run_tests(self, results, tests):
+        assert len(tests) == 1
+        test = tests[0]
+
+        self._run_command(results,
+                          test.command,
+                          cwd=test.cwd,
+                          env=test.env,
+                          timeout=test.timeout)
 
+        test.interpret_result(results)
+
+DEFAULT_RUNNER = SingleProcessRunner()
 
 class WindowResizeMixin(object):
     """ Mixin class that deals with spurious window resizes
 
     On gnome (and possible other DE's) the window manager may decide to resize
     a window. This causes the test to fail even though otherwise would not.
     This Mixin overides the _run_command method to run the test 5 times, each
     time searching for the string 'Got suprious window resize' in the output,
     if it fails to find it it will break the loop and continue.
 
     see: https://bugzilla.gnome.org/show_bug.cgi?id=680214
 
     """
-    def _run_command(self, *args, **kwargs):
+    def _run_command(self, result, command, **kwargs):
         """Run a test up 5 times when window resize is detected.
 
         Rerun the command up to 5 times if the window size changes, if it
         changes 6 times mark the test as fail and return True, which will cause
         Test.run() to return early.
 
         """
         for _ in range(5):
-            super(WindowResizeMixin, self)._run_command(*args, **kwargs)
-            if "Got spurious window resize" not in self.result.out:
+            super(WindowResizeMixin, self)._run_command(result, command, **kwargs)
+            if "Got spurious window resize" not in result.out:
                 return
 
         # If we reach this point then there has been no error, but spurious
         # resize was detected more than 5 times. Set the result to fail
         raise TestRunError('Got spurious resize more than 5 times', 'fail')
 
 
 class ValgrindMixin(object):
     """Mixin class that adds support for running tests through valgrind.
 
     This mixin allows a class to run with the --valgrind option.
 
     """
-    @Test.command.getter
-    def command(self):
-        command = super(ValgrindMixin, self).command
+    def _run_command(self, results, command, **kwargs):
+        """Inject the valgrind command line before running the command."""
         if OPTIONS.valgrind:
-            return ['valgrind', '--quiet', '--error-exitcode=1',
-                    '--tool=memcheck'] + command
-        else:
-            return command
+            command = ['valgrind', '--quiet', '--error-exitcode=1',
+                       '--tool=memcheck'] + command,
+
+        super(ValgrindMixin, self)._run_command(
+            results, command, **kwargs)
 
-    def interpret_result(self):
+    def _run_tests(self, results, tests):
         """Set the status to the valgrind status.
 
-        It is important that the valgrind interpret_results code is run last,
+        It is important that the code interpreting valgrind results is run last,
         since it depends on the statuses already set and passed to it,
-        including the Test.interpret_result() method. To this end it executes
-        super().interpret_result(), then calls it's own result.
+        including the Test.interpret_result() method.
 
         """
-        super(ValgrindMixin, self).interpret_result()
+        assert len(tests) == 1
+
+        super(ValgrindMixin, self)._run_tests(results, tests)
 
         if OPTIONS.valgrind:
             # If the underlying test failed, simply report
             # 'skip' for this valgrind test.
-            if self.result.result != 'pass' and (
-                    self.result.result != 'warn' and
-                    self.result.returncode != 0):
-                self.result.result = 'skip'
-            elif self.result.returncode == 0:
+            if results.result != 'pass' and (
+                    results.result != 'warn' and
+                    results.returncode != 0):
+                results.result = 'skip'
+            elif results.returncode == 0:
                 # Test passes and is valgrind clean.
-                self.result.result = 'pass'
+                results.result = 'pass'
             else:
                 # Test passed but has valgrind errors.
-                self.result.result = 'fail'
-
-
- at six.add_metaclass(abc.ABCMeta)
-class ReducedProcessMixin(object):
-    """This Mixin simplifies writing Test classes that run more than one test
-    in a single process.
-
-    Although one of the benefits of piglit is it's process isolation, there are
-    times that process isolation is too expensive for day to day runs, and
-    running more than one test in a single process is a valid trade-off for
-    decreased run times. This class helps to ease writing a Test class for such
-    a purpose, while not suffering all of the drawback of the approach.
-
-    The first way that this helps is that it provides crash detection and
-    recovery, allowing a single subtest to crash
-    """
-
-    def __init__(self, command, subtests=None, **kwargs):
-        assert subtests is not None
-        super(ReducedProcessMixin, self).__init__(command, **kwargs)
-        self._expected = subtests
-        self._populate_subtests()
-
-    def is_skip(self):
-        """Skip if the length of expected is 0."""
-        if not self._expected:
-            raise TestIsSkip('All subtests skipped')
-        super(ReducedProcessMixin, self).is_skip()
-
-    def __find_sub(self):
-        """Helper for getting the next index."""
-        return len([l for l in self.result.out.split('\n')
-                    if self._is_subtest(l)])
-
-    @staticmethod
-    def _subtest_name(test):
-        """If the name provided isn't the subtest name, this method does."""
-        return test
-
-    def _stop_status(self):
-        """This method returns the status of the test that stopped the run.
-
-        By default this will return status.CRASH, but this may not be suitable
-        for some suites, which may require special considerations and need to
-        require a different status in some cases, like SKIP.
-        """
-        return status.CRASH
-
-    def _run_command(self, *args, **kwargs):
-        """Run the command until all of the subtests have completed or crashed.
-
-        This method will try to run all of the subtests, resuming the run if
-        it's interrupted, and combining the stdout and stderr attributes
-        together for parsing later. I will separate those values with
-        "\n\n====RESUME====\n\n".
-        """
-        super(ReducedProcessMixin, self)._run_command(*args, **kwargs)
-
-        if not self._is_cherry():
-            returncode = self.result.returncode
-            out = [self.result.out]
-            err = [self.result.err]
-            cur_sub = self.__find_sub() or 1
-            last = len(self._expected)
-
-            while cur_sub < last:
-                self.result.subtests[
-                    self._subtest_name(self._expected[cur_sub - 1])] = \
-                        self._stop_status()
-
-                super(ReducedProcessMixin, self)._run_command(
-                    _command=self._resume(cur_sub) + list(args), **kwargs)
-
-                out.append(self.result.out)
-                err.append(self.result.err)
-
-                # If the index is 0 the next test failed without printing a
-                # name, increase by 1 so that test will be marked crash and we
-                # don't get stuck in an infinite loop, otherwise return the
-                # number of tests that did complete.
-                cur_sub += self.__find_sub() or 1
-
-            if not self._is_cherry():
-                self.result.subtests[
-                    self._subtest_name(self._expected[cur_sub - 1])] = \
-                        self._stop_status()
-
-            # Restore and keep the original returncode (so that it remains a
-            # non-pass, since only one test might fail and the resumed part
-            # might return 0)
-            self.result.returncode = returncode
-            self.result.out = '\n\n====RESUME====\n\n'.join(out)
-            self.result.err = '\n\n====RESUME====\n\n'.join(err)
-
-    def _is_cherry(self):
-        """Method used to determine if rerunning is required.
-
-        If this returns False then the rerun path will be entered, otherwise
-        _run_command is effectively a bare call to super().
-
-        Classes using this mixin may need to overwrite this if the binary
-        they're calling can stop prematurely but return 0.
-        """
-        return self.result.returncode == 0
-
-    def _populate_subtests(self):
-        """Default implementation of subtest prepopulation.
-
-        It may be necissary to override this depending on the subtest format.
-        """
-        self.result.subtests.update({x: status.NOTRUN for x in self._expected})
-
-    @abc.abstractmethod
-    def _resume(self, current):
-        """Method that defines how to resume the case if it crashes.
-
-        This method will be provided with a completed count, which is the index
-        into self._expected of the first subtest that hasn't been run. This
-        method should return the command to restart, and the ReduceProcessMixin
-        will handle actually restarting the the process with the new command.
-        """
-
-    @abc.abstractmethod
-    def _is_subtest(self, line):
-        """Determines if a line in stdout contains a subtest name.
-
-        This method is used during the resume detection phase of the
-        _run_command method to determine how many subtests have successfully
-        been run.
-
-        Should simply return True if the line reprents a test starting, or
-        False if it does not.
-        """
+                results.result = 'fail'
diff --git a/framework/test/deqp.py b/framework/test/deqp.py
index 871ce2545..8627feabb 100644
--- a/framework/test/deqp.py
+++ b/framework/test/deqp.py
@@ -186,43 +186,43 @@ class DEQPBaseTest(Test):
         # otherwise it cannot find its data files (2014-12-07).
         # This must be called after super or super will overwrite it
         self.cwd = os.path.dirname(self.deqp_bin)
 
     @Test.command.getter
     def command(self):
         """Return the command plus any extra arguments."""
         command = super(DEQPBaseTest, self).command
         return command + self.extra_args
 
-    def __find_map(self):
+    def __find_map(self, result):
         """Run over the lines and set the result."""
         # splitting this into a separate function allows us to return cleanly,
         # otherwise this requires some break/else/continue madness
-        for line in self.result.out.split('\n'):
+        for line in result.out.split('\n'):
             line = line.lstrip()
             for k, v in six.iteritems(self.__RESULT_MAP):
                 if line.startswith(k):
-                    self.result.result = v
+                    result.result = v
                     return
 
-    def interpret_result(self):
-        if is_crash_returncode(self.result.returncode):
-            self.result.result = 'crash'
-        elif self.result.returncode != 0:
-            self.result.result = 'fail'
+    def interpret_result(self, result):
+        if is_crash_returncode(result.returncode):
+            result.result = 'crash'
+        elif result.returncode != 0:
+            result.result = 'fail'
         else:
-            self.__find_map()
+            self.__find_map(result)
 
         # We failed to parse the test output. Fallback to 'fail'.
-        if self.result.result == 'notrun':
-            self.result.result = 'fail'
-
-    def _run_command(self, *args, **kwargs):
-        """Rerun the command if X11 connection failure happens."""
-        for _ in range(5):
-            super(DEQPBaseTest, self)._run_command(*args, **kwargs)
-            x_err_msg = "FATAL ERROR: Failed to open display"
-            if x_err_msg in self.result.err or x_err_msg in self.result.out:
-                continue
-            return
-
-        raise TestRunError('Failed to connect to X server 5 times', 'fail')
+        if result.result == 'notrun':
+            result.result = 'fail'
+
+    # def _run_command(self, *args, **kwargs):
+    #     """Rerun the command if X11 connection failure happens."""
+    #     for _ in range(5):
+    #         super(DEQPBaseTest, self)._run_command(*args, **kwargs)
+    #         x_err_msg = "FATAL ERROR: Failed to open display"
+    #         if x_err_msg in self.result.err or x_err_msg in self.result.out:
+    #             continue
+    #         return
+
+    #     raise TestRunError('Failed to connect to X server 5 times', 'fail')
diff --git a/framework/test/gleantest.py b/framework/test/gleantest.py
index 0220c1a77..5389e6051 100644
--- a/framework/test/gleantest.py
+++ b/framework/test/gleantest.py
@@ -53,25 +53,25 @@ class GleanTest(Test):
             **kwargs)
 
     @Test.command.getter
     def command(self):
         return super(GleanTest, self).command + self.GLOBAL_PARAMS
 
     @command.setter
     def command(self, new):
         self._command = [n for n in new if not n in self.GLOBAL_PARAMS]
 
-    def interpret_result(self):
-        if self.result.returncode != 0 or 'FAIL' in self.result.out:
-            self.result.result = 'fail'
+    def interpret_result(self, result):
+        if result.returncode != 0 or 'FAIL' in result.out:
+            result.result = 'fail'
         else:
-            self.result.result = 'pass'
-        super(GleanTest, self).interpret_result()
+            result.result = 'pass'
+        super(GleanTest, self).interpret_result(result)
 
     def is_skip(self):
         # Glean tests require glx
         if options.OPTIONS.env['PIGLIT_PLATFORM'] not in ['glx', 'mixed_glx_egl']:
             raise TestIsSkip(
                 'Glean tests require platform to support glx, '
                 'but the platform is "{}"'.format(
                     options.OPTIONS.env['PIGLIT_PLATFORM']))
         super(GleanTest, self).is_skip()
diff --git a/framework/test/gtest.py b/framework/test/gtest.py
index b331a2fb3..6ecec8f54 100644
--- a/framework/test/gtest.py
+++ b/framework/test/gtest.py
@@ -29,23 +29,23 @@ from __future__ import (
 import re
 
 from .base import Test
 
 __all__ = [
     'GTest',
 ]
 
 
 class GTest(Test):
-    def interpret_result(self):
+    def interpret_result(self, result):
         # Since gtests can have several subtets, if any of the subtests fail
         # then we need to report fail.
-        out = self.result.out
+        out = result.out
         if len(re.findall('FAILED', out, re.MULTILINE)) > 0:
-            self.result.result = 'fail'
+            result.result = 'fail'
         elif len(re.findall('PASSED', out, re.MULTILINE)) > 0:
-            self.result.result = 'pass'
+            result.result = 'pass'
         else:
             #If we get here, then the test probably exited early.
-            self.result.result = 'fail'
+            result.result = 'fail'
 
-        super(GTest, self).interpret_result()
+        super(GTest, self).interpret_result(result)
diff --git a/framework/test/oclconform.py b/framework/test/oclconform.py
index 0a644995d..1b84ab909 100644
--- a/framework/test/oclconform.py
+++ b/framework/test/oclconform.py
@@ -39,25 +39,25 @@ __all__ = [
     'OCLConform',
     'add_oclconform_tests',
 ]
 
 
 def get_test_section_name(test):
     return 'oclconform-{}'.format(test)
 
 
 class OCLConform(Test):
-    def interpret_result(self):
-        if self.result.returncode != 0 or 'FAIL' in self.result.out:
-            self.result.result = 'fail'
+    def interpret_result(self, result):
+        if result.returncode != 0 or 'FAIL' in result.out:
+            result.result = 'fail'
         else:
-            self.result.result = 'pass'
+            result.result = 'pass'
 
 
 def add_sub_test(profile, test_name, subtest_name, subtest):
     profile.test_list[grouptools.join('oclconform', test_name,
                                       subtest_name)] = subtest
 
 
 def add_test(profile, test_name, test):
     profile.test_list[grouptools.join('oclconform', test_name)] = test
 
diff --git a/framework/test/piglit_test.py b/framework/test/piglit_test.py
index 491f3d3d4..4d7fe7c51 100644
--- a/framework/test/piglit_test.py
+++ b/framework/test/piglit_test.py
@@ -27,87 +27,110 @@ from __future__ import (
 )
 import glob
 import os
 import sys
 try:
     import simplejson as json
 except ImportError:
     import json
 
 from framework import core, options
-from .base import Test, WindowResizeMixin, ValgrindMixin, TestIsSkip
+from .base import Test, SingleProcessRunner, WindowResizeMixin, ValgrindMixin, TestIsSkip
 
 
 __all__ = [
     'PiglitCLTest',
+    'PiglitGLRunner',
     'PiglitGLTest',
     'PiglitBaseTest',
     'CL_CONCURRENT',
     'TEST_BIN_DIR',
 ]
 
 if 'PIGLIT_BUILD_DIR' in os.environ:
     TEST_BIN_DIR = os.path.join(os.environ['PIGLIT_BUILD_DIR'], 'bin')
 else:
     TEST_BIN_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__),
                                                  '../../bin'))
 
 CL_CONCURRENT = (not sys.platform.startswith('linux') or
                  glob.glob('/dev/dri/render*'))
 
 
-class PiglitBaseTest(ValgrindMixin, Test):
+class PiglitBaseRunner(ValgrindMixin, SingleProcessRunner):
+    """ Runner for basic "native" piglit tests.
+
+    """
+    pass
+
+
+class PiglitBaseTest(Test):
     """
     PiglitTest: Run a "native" piglit test executable
 
     Expect one line prefixed PIGLIT: in the output, which contains a result
     dictionary. The plain output is appended to this dictionary
     """
-    def __init__(self, command, run_concurrent=True, **kwargs):
-        super(PiglitBaseTest, self).__init__(command, run_concurrent, **kwargs)
+    __DEFAULT_RUNNER = PiglitBaseRunner()
+
+    def __init__(self, command, run_concurrent=True, runner=None, **kwargs):
+        runner = runner or self.__DEFAULT_RUNNER
+        super(PiglitBaseTest, self).__init__(command,
+                                             run_concurrent=run_concurrent,
+                                             runner=runner,
+                                             **kwargs)
 
         # Prepend TEST_BIN_DIR to the path.
         self._command[0] = os.path.join(TEST_BIN_DIR, self._command[0])
 
-    def interpret_result(self):
+    def interpret_result(self, result):
         out = []
 
-        for each in self.result.out.split('\n'):
+        for each in result.out.split('\n'):
             if each.startswith('PIGLIT:'):
-                self.result.update(json.loads(each[8:]))
+                result.update(json.loads(each[8:]))
             else:
                 out.append(each)
 
-        self.result.out = '\n'.join(out)
+        result.out = '\n'.join(out)
 
-        super(PiglitBaseTest, self).interpret_result()
+        super(PiglitBaseTest, self).interpret_result(result)
 
 
-class PiglitGLTest(WindowResizeMixin, PiglitBaseTest):
+class PiglitGLRunner(WindowResizeMixin, PiglitBaseRunner):
+    """ Runner for "native" piglit GL tests """
+    pass
+
+
+class PiglitGLTest(PiglitBaseTest):
     """ OpenGL specific Piglit test class
 
     This Subclass provides provides an is_skip() implementation that skips glx
     tests on non-glx platforms
 
     This class also provides two additional keyword arguments, require_platform
     and exclude_platforms. require_platforms may be set to a list of platforms
     which the test requires to run. This should be resereved for platform
     specific tests, such as GLX specific tests, or EGL specific tests. Multiple
     platforms are allowed because EGL can be fulfilled by multiple platforms.
     exclude_platforms is a list of platforms a test should not be run on, this
     is useful for tests that are valid on more than one platform, but not on
     all of them. This will probably be mainly used to exclude gbm. These
     options are mutually exclusive.
 
     """
+    __DEFAULT_RUNNER = PiglitGLRunner()
+
     def __init__(self, command, require_platforms=None, exclude_platforms=None,
-                 **kwargs):
+                 runner=None, **kwargs):
+        runner = runner or self.__DEFAULT_RUNNER
+
         # TODO: There is a design flaw in python2, keyword args can be
         # fulfilled as positional arguments. This sounds really great, until
         # you realize that because of it you cannot use the splat operator with
         # args and create new keyword arguments.
         # What we really want is __init__(self, *args, new_arg=None, **kwargs),
         # but this doesn't work in python2. In python3 thanks to PEP3102, you
         # can in fact do just that
         # The work around is to explicitely pass the arguments down.
         super(PiglitGLTest, self).__init__(command, **kwargs)
 
diff --git a/framework/test/shader_test.py b/framework/test/shader_test.py
index 3e67cbd4d..d0c2b8d9e 100644
--- a/framework/test/shader_test.py
+++ b/framework/test/shader_test.py
@@ -23,25 +23,31 @@
 
 """ This module enables running shader tests. """
 
 from __future__ import (
     absolute_import, division, print_function, unicode_literals
 )
 import io
 import os
 import re
 
+try:
+    import simplejson as json
+except ImportError:
+    import json
+
 from framework import exceptions
+from framework import grouptools
 from framework import status
-from .base import ReducedProcessMixin, TestIsSkip
+from .base import TestIsSkip, is_crash_returncode
 from .opengl import FastSkipMixin, FastSkip
-from .piglit_test import PiglitBaseTest
+from .piglit_test import PiglitBaseTest, PiglitGLRunner
 
 __all__ = [
     'ShaderTest',
 ]
 
 
 class Parser(object):
     """An object responsible for parsing a shader_test file."""
 
     _is_gl = re.compile(r'GL (<|<=|=|>=|>) \d\.\d')
@@ -160,124 +166,88 @@ class ShaderTest(FastSkipMixin, PiglitBaseTest):
 
         super(ShaderTest, self).__init__(
             [parser.prog, parser.filename],
             run_concurrent=True,
             gl_required=parser.gl_required,
             gl_version=parser.gl_version,
             gles_version=parser.gles_version,
             glsl_version=parser.glsl_version,
             glsl_es_version=parser.glsl_es_version)
 
-    @PiglitBaseTest.command.getter
-    def command(self):
-        """ Add -auto and -fbo to the test command """
-        return self._command + ['-auto', '-fbo']
-
-    @command.setter
-    def command(self, new):
-        self._command = [n for n in new if n not in ['-auto', '-fbo']]
+    # @PiglitBaseTest.command.getter
+    # def command(self):
+    #     """ Add -auto and -fbo to the test command """
+    #     return self._command + ['-auto', '-fbo']
 
+    # @command.setter
+    # def command(self, new):
+    #     self._command = [n for n in new if n not in ['-auto', '-fbo']]
 
-class MultiShaderTest(ReducedProcessMixin, PiglitBaseTest):
-    """A Shader class that can run more than one test at a time.
 
-    This class can call shader_runner with multiple shader_files at a time, and
-    interpret the results, as well as handle pre-mature exit through crashes or
-    from breaking import assupmtions in the utils about skipping.
+class ShaderTestRunner(PiglitGLRunner):
+    """A TestRunner class that can run more than one test at a time.
 
-    Arguments:
-    filenames -- a list of absolute paths to shader test files
     """
+    @PiglitGLRunner.max_tests.getter
+    def max_tests(self):
+        return None # No limit
 
-    def __init__(self, filenames):
-        assert filenames
+    def _run_tests(self, results, tests):
         prog = None
         files = []
         subtests = []
-        skips = []
 
-        # Walk each subtest, and either add it to the list of tests to run, or
-        # determine it is skip, and set the result of that test in the subtests
-        # dictionary to skip without adding it ot the liest of tests to run
-        for each in filenames:
-            parser = Parser(each)
-            parser.parse()
-            subtest = os.path.basename(os.path.splitext(each)[0]).lower()
+        for test in tests:
+            assert isinstance(test, ShaderTest)
+
+            test_prog = test.command[0]
+            test_file = test.command[1]
 
-            if prog is not None:
+            if prog is None:
+                prog = test_prog
+            elif prog != test_prog:
                 # This allows mixing GLES2 and GLES3 shader test files
                 # together. Since GLES2 profiles can be promoted to GLES3, this
                 # is fine.
-                if parser.prog != prog:
-                    # Pylint can't figure out that prog is not None.
-                    if 'gles' in parser.prog and 'gles' in prog:  # pylint: disable=unsupported-membership-test
-                        prog = max(parser.prog, prog)
-                    else:
-                        # The only way we can get here is if one is GLES and
-                        # one is not, since there is only one desktop runner
-                        # thus it will never fail the is parser.prog != prog
-                        # check
-                        raise exceptions.PiglitInternalError(
-                            'GLES and GL shaders in the same command!\n'
-                            'Cannot pick a shader_runner binary!')
+                # Pylint can't figure out that prog is not None.
+                if 'gles' in prog and 'gles' in test_prog: # pylint: disable=unsupported-membership-test
+                    prog = max(prog, test_prog)
+                else:
+                    # The only way we can get here is if one is GLES and
+                    # one is not, since there is only one desktop runner
+                    # thus it will never fail the is parser.prog != prog
+                    # check
+                    raise exceptions.PiglitInternalError(
+                        'GLES and GL shaders in the same command!\n'
+                        'Cannot pick a shader_runner binary!')
+
+            files.append(test_file)
+            subtests.append(os.path.basename(os.path.splitext(test_file)[0]).lower())
+
+        self._run_command(
+                results,
+                [test_prog] + files + ['-auto', '-report-subtests', '-fbo'])
+
+        subtest = None
+        testpath = None
+        for each in results.out.split('\n'):
+            if each.startswith('PIGLIT TEST:'):
+                subtest = each.rsplit(maxsplit=1)[-1]
+                if results.subtests:
+                    testpath = grouptools.join(results.root, subtest)
+                else:
+                    assert grouptools.testname(results.root) == subtest.lower(), (results.root, grouptools.testname(results.root), subtest)
+                    testpath = results.root
+                results.set_result(testpath, status.INCOMPLETE)
+            elif each.startswith('PIGLIT:'):
+                update = json.loads(each[8:])
+                assert 'subtest' in update, update
+                assert len(update['subtest']) == 1, update
+                results.set_result(testpath, update['subtest'][subtest])
+                subtest = None
+                testpath = None
+
+        if is_crash_returncode(results.returncode):
+            if subtest is not None:
+                results.set_result(testpath, status.CRASH)
             else:
-                prog = parser.prog
-
-            try:
-                skipper = FastSkip(gl_required=parser.gl_required,
-                                   gl_version=parser.gl_version,
-                                   gles_version=parser.gles_version,
-                                   glsl_version=parser.glsl_version,
-                                   glsl_es_version=parser.glsl_es_version)
-                skipper.test()
-            except TestIsSkip:
-                skips.append(subtest)
-                continue
-            files.append(parser.filename)
-            subtests.append(subtest)
-
-        assert len(subtests) + len(skips) == len(filenames), \
-            'not all tests accounted for'
-
-        super(MultiShaderTest, self).__init__(
-            [prog] + files,
-            subtests=subtests,
-            run_concurrent=True)
-
-        for name in skips:
-            self.result.subtests[name] = status.SKIP
-
-    @PiglitBaseTest.command.getter  # pylint: disable=no-member
-    def command(self):
-        """Add -auto to the test command."""
-        return self._command + ['-auto', '-report-subtests']
-
-    def _is_subtest(self, line):
-        return line.startswith('PIGLIT TEST:')
-
-    def _resume(self, current):
-        command = [self.command[0]]
-        command.extend(self.command[current + 1:])
-        return command
-
-    def _stop_status(self):
-        # If the lower level framework skips then return a status for that
-        # subtest as skip, and resume.
-        if self.result.out.endswith('PIGLIT: {"result": "skip" }\n'):
-            return status.SKIP
-        if self.result.returncode > 0:
-            return status.FAIL
-        return status.CRASH
-
-    def _is_cherry(self):
-        # Due to the way that piglt is architected if a particular feature
-        # isn't supported it causes the test to exit with status 0. There is no
-        # straightforward way to fix this, so we work around it by looking for
-        # the message that feature provides and marking the test as not
-        # "cherry" when it is found at the *end* of stdout. (We don't want to
-        # match other places or we'll end up in an infinite loop)
-        return (
-            self.result.returncode == 0 and not
-            self.result.out.endswith(
-                'not supported on this implementation\n') and not
-            self.result.out.endswith(
-                'PIGLIT: {"result": "skip" }\n'))
+                results.result = status.CRASH
diff --git a/tests/all.py b/tests/all.py
index 0c75de54e..2436f1002 100644
--- a/tests/all.py
+++ b/tests/all.py
@@ -6,32 +6,29 @@ from __future__ import (
 )
 import collections
 import itertools
 import os
 import platform
 
 import six
 from six.moves import range
 
 from framework import grouptools
-from framework import options
 from framework.profile import TestProfile
 from framework.driver_classifier import DriverClassifier
 from framework.test import (PiglitGLTest, GleanTest, PiglitBaseTest,
                             GLSLParserTest, GLSLParserNoConfigError)
-from framework.test.shader_test import ShaderTest, MultiShaderTest
+from framework.test.shader_test import ShaderTest, ShaderTestRunner
 from .py_modules.constants import TESTS_DIR, GENERATED_TESTS_DIR
 
 __all__ = ['profile']
 
-PROCESS_ISOLATION = options.OPTIONS.process_isolation
-
 # Disable bad hanging indent errors in pylint
 # There is a bug in pylint which causes the profile.test_list.group_manager to
 # be tagged as bad hanging indent, even though it seems to be correct (and
 # similar syntax doesn't trigger an error)
 # pylint: disable=bad-continuation
 
 # Shadowing variables is a bad practice. It's just nearly impossible with the
 # format of this module to avoid it.
 # pylint: disable=redefined-outer-name
 
@@ -201,34 +198,41 @@ def power_set(s):
     result = []
     for p in power_set(s[:-1]):
         result.append(p)
         result.append(p + [s[-1]])
     return result
 
 ######
 # Collecting all tests
 profile = TestProfile()  # pylint: disable=invalid-name
 
-shader_tests = collections.defaultdict(list)
+# Use an ordered dictionary for reproducibility
+shader_tests = collections.OrderedDict()
 
 # Find and add all shader tests.
 for basedir in [TESTS_DIR, GENERATED_TESTS_DIR]:
     for dirpath, _, filenames in os.walk(basedir):
         for filename in filenames:
             testname, ext = os.path.splitext(filename)
             groupname = grouptools.from_path(os.path.relpath(dirpath, basedir))
             if ext == '.shader_test':
-                if PROCESS_ISOLATION:
-                    test = ShaderTest(os.path.join(dirpath, filename))
-                else:
-                    shader_tests[groupname].append(os.path.join(dirpath, filename))
-                    continue
+                test = ShaderTest(os.path.join(dirpath, filename))
+                group = shader_tests.get(groupname)
+                if not group:
+                    group = collections.OrderedDict()
+                    shader_tests[groupname] = group
+                command_group = group.get(test.command[0])
+                if not command_group:
+                    command_group = []
+                    group[test.command[0]] = command_group
+                command_group.append(test)
+                continue
             elif ext in ['.vert', '.tesc', '.tese', '.geom', '.frag', '.comp']:
                 try:
                     test = GLSLParserTest(os.path.join(dirpath, filename))
                 except GLSLParserNoConfigError:
                     # In the event that there is no config assume that it is a
                     # legacy test, and continue
                     continue
 
                 # For glslparser tests you can have multiple tests with the
                 # same name, but a different stage, so keep the extension.
@@ -236,30 +240,32 @@ for basedir in [TESTS_DIR, GENERATED_TESTS_DIR]:
             else:
                 continue
 
             group = grouptools.join(groupname, testname)
             assert group not in profile.test_list, group
 
             profile.test_list[group] = test
 
 # Because we need to handle duplicate group names in TESTS and GENERATED_TESTS
 # this dictionary is constructed, then added to the actual test dictionary.
-for group, files in six.iteritems(shader_tests):
+for group, tests_per_prog in six.iteritems(shader_tests):
     assert group not in profile.test_list, 'duplicate group: {}'.format(group)
-    # If there is only one file in the directory use a normal shader_test.
-    # Otherwise use a MultiShaderTest
-    if len(files) == 1:
-        group = grouptools.join(
-            group, os.path.basename(os.path.splitext(files[0])[0]))
-        profile.test_list[group] = ShaderTest(files[0])
-    else:
-        profile.test_list[group] = MultiShaderTest(files)
+    for tests in six.itervalues(tests_per_prog):
+        # Use one runner per directory per shader_runner program (GL vs. GLES)
+        # Currently, the subtest protocol of shader_runner does not support
+        # running tests from different groups/directories.
+        runner = ShaderTestRunner()
+        for test in tests:
+            test.runner = runner
+            lgroup = grouptools.join(
+                group, os.path.basename(os.path.splitext(test.command[1])[0]))
+            profile.test_list[lgroup] = test
 
 # Collect and add all asmparsertests
 for basedir in [TESTS_DIR, GENERATED_TESTS_DIR]:
     _basedir = os.path.join(basedir, 'asmparsertest', 'shaders')
     for dirpath, _, filenames in os.walk(_basedir):
         base_group = grouptools.from_path(os.path.join(
             'asmparsertest', os.path.relpath(dirpath, _basedir)))
         type_ = os.path.basename(dirpath)
 
         for filename in filenames:
diff --git a/tests/deqp_vk.py b/tests/deqp_vk.py
index 1f3d58d38..022356642 100644
--- a/tests/deqp_vk.py
+++ b/tests/deqp_vk.py
@@ -48,28 +48,28 @@ _DEQP_ASSERT = re.compile(
 
 class DEQPVKTest(deqp.DEQPBaseTest):
     """Test representation for Khronos Vulkan CTS."""
     timeout = 60
     deqp_bin = _DEQP_VK_BIN
     @property
     def extra_args(self):
         return super(DEQPVKTest, self).extra_args + \
             [x for x in _EXTRA_ARGS if not x.startswith('--deqp-case')]
 
-    def interpret_result(self):
-        if 'Failed to compile shader at vkGlslToSpirV' in self.result.out:
-            self.result.result = 'skip'
-            self.result.out += \
+    def interpret_result(self, result):
+        if 'Failed to compile shader at vkGlslToSpirV' in result.out:
+            result.result = 'skip'
+            result.out += \
                 '\n\nMarked as skip because GLSLang failed to compile shaders'
-        elif _DEQP_ASSERT.search(self.result.err):
-            self.result.result = 'skip'
-            self.result.out += \
+        elif _DEQP_ASSERT.search(result.err):
+            result.result = 'skip'
+            result.out += \
                 '\n\nMarked as skip because of a internal dEQP assertion'
         else:
-            super(DEQPVKTest, self).interpret_result()
+            super(DEQPVKTest, self).interpret_result(result)
 
 
 profile = deqp.make_profile(  # pylint: disable=invalid-name
     deqp.iter_deqp_test_cases(
         deqp.gen_caselist_txt(_DEQP_VK_BIN, 'dEQP-VK-cases.txt',
                               _EXTRA_ARGS)),
     DEQPVKTest)
diff --git a/tests/es3conform.py b/tests/es3conform.py
index 6bdcf2ba9..3d17ba434 100644
--- a/tests/es3conform.py
+++ b/tests/es3conform.py
@@ -53,26 +53,26 @@ gtfroot = path.dirname(path.realpath(path.join(TEST_BIN_DIR, 'GTF3')))
 
 class GTFTest(Test):
     pass_re = re.compile(
         r'(Conformance|Regression) PASSED all (?P<passed>\d+) tests')
 
     def __init__(self, testpath):
         super(GTFTest, self).__init__([path.join(TEST_BIN_DIR, 'GTF3'),
                                        '-minfmt', '-width=113', '-height=47',
                                        '-run=' + testpath])
 
-    def interpret_result(self):
-        mo = self.pass_re.search(self.result.out)
+    def interpret_result(self, result):
+        mo = self.pass_re.search(result.out)
         if mo is not None and int(mo.group('passed')) > 0:
-            self.result.result = 'pass'
+            result.result = 'pass'
         else:
-            self.result.result = 'fail'
+            result.result = 'fail'
 
 
 def populateTests(runfile):
     "Read a .run file, adding any .test files to the profile"
     with open(runfile, 'r') as f:
         for line in f.readlines():
             # Ignore comments and whitespace
             line = line.strip()
             if line.startswith('#') or line == '':
                 continue
diff --git a/tests/igt.py b/tests/igt.py
index 5842810de..b0e796cce 100644
--- a/tests/igt.py
+++ b/tests/igt.py
@@ -106,36 +106,36 @@ profile = IGTTestProfile()  # pylint: disable=invalid-name
 
 class IGTTest(Test):
     """Test class for running libdrm."""
     def __init__(self, binary, arguments=None):
         if arguments is None:
             arguments = []
         super(IGTTest, self).__init__(
             [os.path.join(IGT_TEST_ROOT, binary)] + arguments)
         self.timeout = 600
 
-    def interpret_result(self):
-        super(IGTTest, self).interpret_result()
+    def interpret_result(self, result):
+        super(IGTTest, self).interpret_result(result)
 
-        if self.result.returncode == 0:
-            if not self.result.err:
-                self.result.result = 'pass'
+        if result.returncode == 0:
+            if not result.err:
+                result.result = 'pass'
             else:
-                self.result.result = 'warn'
-        elif self.result.returncode == 77:
-            self.result.result = 'skip'
-        elif self.result.returncode == 78:
-            self.result.result = 'timeout'
-        elif self.result.returncode == 139:
-            self.result.result = 'crash'
+                result.result = 'warn'
+        elif result.returncode == 77:
+            result.result = 'skip'
+        elif result.returncode == 78:
+            result.result = 'timeout'
+        elif result.returncode == 139:
+            result.result = 'crash'
         else:
-            self.result.result = 'fail'
+            result.result = 'fail'
 
 
 def list_tests(listname):
     """Parse igt test list and return them as a list."""
     with open(os.path.join(IGT_TEST_ROOT, listname), 'r') as f:
         lines = (line.rstrip() for line in f.readlines())
 
     found_header = False
 
     for line in lines:
diff --git a/tests/oglconform.py b/tests/oglconform.py
index 5104f442b..6080b568f 100644
--- a/tests/oglconform.py
+++ b/tests/oglconform.py
@@ -55,38 +55,38 @@ class OGLCTest(Test):
         r'wont be scheduled due to lack of compatible fbconfig')
 
     def __init__(self, category, subtest):
         super(OGLCTest, self).__init__([category, subtest])
 
     @Test.command.getter
     def command(self):
         return [BIN, '-minFmt', '-v', '4', '-test'] + \
             super(OGLCTest, self).command
 
-    def interpret_result(self):
+    def interpret_result(self, result):
         # Most of what we want to search for is in the last three lines of the
         # the output
-        split = self.result.out.rsplit('\n', 4)[1:]
+        split = result.out.rsplit('\n', 4)[1:]
         if 'Total Passed : 1' in split:
-            self.result.result = 'pass'
+            result.result = 'pass'
         elif 'Total Failed : 1' in split:
             # This is a fast path to avoid the regular expression.
-            self.result.result = 'fail'
+            result.result = 'fail'
         elif ('Total Not run: 1' in split or
-              self.skip_re.search(self.result.out) is not None):
+              self.skip_re.search(result.out) is not None):
             # Lazy evaluation means that the re (which is slow) is only tried if
             # the more obvious case is not true
-            self.result.result = 'skip'
+            result.result = 'skip'
         else:
-            self.result.result = 'fail'
+            result.result = 'fail'
 
-        super(OGLCTest, self).interpret_result()
+        super(OGLCTest, self).interpret_result(result)
 
 
 def _make_profile():
     """Create and populate a TestProfile instance."""
     profile_ = TestProfile()
 
     with tempfile.NamedTemporaryFile() as f:
         with open(os.devnull, "w") as d:
             subprocess.call([BIN, '-generateTestList', f.name],
                             stdout=d, stderr=d)
diff --git a/tests/xts.py b/tests/xts.py
index 715ecfa47..1890bedb5 100644
--- a/tests/xts.py
+++ b/tests/xts.py
@@ -142,62 +142,62 @@ class XTSTest(Test):  # pylint: disable=too-few-public-methods
                 split = out.splitlines()
                 os.rename(os.path.join(self.cwd, split[0]), render_path)
                 os.rename(os.path.join(self.cwd, split[1]), ref_path)
 
                 images.append({'image_desc': desc,
                                'image_ref': ref_path,
                                'image_render': render_path})
 
         return images
 
-    def interpret_result(self):
-        super(XTSTest, self).interpret_result()
+    def interpret_result(self, result):
+        super(XTSTest, self).interpret_result(result)
 
         try:
             with open(self.test_results_file, 'r') as rfile:
                 log = rfile.read()
-                self.result.out = log
+                result.out = log
                 os.remove(self.test_results_file)
         except IOError:
-            self.result.err = "No results file found"
+            result.err = "No results file found"
             log = ""
 
-        if self.result.returncode == 0:
-            if re.search('FAIL', self.result.out) is not None:
-                self.result.result = 'fail'
-            elif re.search('PASS', self.result.out) is not None:
-                self.result.result = 'pass'
+        if result.returncode == 0:
+            if re.search('FAIL', result.out) is not None:
+                result.result = 'fail'
+            elif re.search('PASS', result.out) is not None:
+                result.result = 'pass'
             else:
-                self.result.result = 'fail'
-        elif self.result.returncode == 77:
-            self.result.result = 'skip'
-        elif self.result.returncode == 1:
+                result.result = 'fail'
+        elif result.returncode == 77:
+            result.result = 'skip'
+        elif result.returncode == 1:
             if re.search('Could not open all VSW5 fonts', log):
-                self.result.result = 'warn'
+                result.result = 'warn'
             else:
-                self.result.result = 'fail'
+                result.result = 'fail'
 
-        self.result.images = self._process_log_for_images(log)
+        result.images = self._process_log_for_images(log)
 
 
 class RendercheckTest(Test):
     def __init__(self, args):
         super(RendercheckTest, self).__init__(['rendercheck'] + args)
         self.testname = "rendercheck " + " ".join(args)
 
-    def interpret_result(self):
-        super(RendercheckTest, self).interpret_result()
+    def interpret_result(self, result):
+        super(RendercheckTest, self).interpret_result(result)
 
-        if self.result.returncode == 0:
-            self.result.result = 'pass'
-        elif self.result.returncode == 77:
-            self.result.result = 'skip'
+        if result.returncode == 0:
+            result.result = 'pass'
+        elif result.returncode == 77:
+            result.result = 'skip'
 
 
 def _populate_profile_xts(profile):
     fpath = os.path.join(X_TEST_SUITE, 'xts5')
     for dirpath, _, filenames in os.walk(fpath):
         for fname in filenames:
             # only look at the .m test files
             testname, ext = os.path.splitext(fname)
             if ext != '.m':
                 continue
-- 
2.11.0



More information about the Piglit mailing list