[Piglit] [RFC 5/9] framework: move json functions out of results into backends.json

Dylan Baker baker.dylan.c at gmail.com
Mon Apr 6 14:30:15 PDT 2015


This is the first step to fully abstracting the backends. With this
change the code for loading, resuming, and processing json results are
moved out of the results module and into the json backend module.

Signed-off-by: Dylan Baker <dylanx.c.baker at intel.com>
---
 framework/backends/errors.py          |  25 +++
 framework/backends/json.py            | 380 +++++++++++++++++++++++++++++++++-
 framework/programs/summary.py         |  10 +-
 framework/results.py                  | 379 ---------------------------------
 framework/summary.py                  |   8 +-
 framework/tests/backends_tests.py     |  97 ---------
 framework/tests/json_backend_tests.py | 275 ++++++++++++++++++++++++
 framework/tests/results_tests.py      | 156 +-------------
 framework/tests/results_v0_tests.py   |  11 +-
 framework/tests/results_v1_tests.py   |  10 +-
 framework/tests/results_v2_tests.py   |   4 +-
 framework/tests/results_v3_tests.py   |   5 +-
 framework/tests/results_v4_tests.py   |   6 +-
 framework/tests/utils.py              |   2 +-
 14 files changed, 704 insertions(+), 664 deletions(-)
 create mode 100644 framework/backends/errors.py
 create mode 100644 framework/tests/json_backend_tests.py

diff --git a/framework/backends/errors.py b/framework/backends/errors.py
new file mode 100644
index 0000000..150bc19
--- /dev/null
+++ b/framework/backends/errors.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2015 Intel Corporation
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Shared errors for the backends."""
+
+
+class ResultsLoadError(Exception):
+    pass
diff --git a/framework/backends/json.py b/framework/backends/json.py
index 7934ce9..52bdd08 100644
--- a/framework/backends/json.py
+++ b/framework/backends/json.py
@@ -22,16 +22,19 @@
 
 from __future__ import print_function, absolute_import
 import os
+import sys
 import shutil
+import posixpath
 
 try:
     import simplejson as json
 except ImportError:
     import json
 
-import framework.status as status
+from framework import status, results
 from .abstract import FileBackend
 from .register import Registry
+from . import errors
 
 __all__ = [
     'REGISTRY',
@@ -41,6 +44,9 @@ __all__ = [
 # The current version of the JSON results
 CURRENT_JSON_VERSION = 5
 
+# The level to indent a final file
+INDENT = 4
+
 
 def piglit_encoder(obj):
     """ Encoder for piglit that can transform additional classes into json
@@ -68,8 +74,6 @@ class JSONBackend(FileBackend):
     a file it just ignores it, making the result atomic.
 
     """
-    INDENT = 4
-
     def initialize(self, metadata):
         """ Write boilerplate json code
 
@@ -131,7 +135,7 @@ class JSONBackend(FileBackend):
         # write out the combined file.
         with open(os.path.join(self._dest, 'results.json'), 'w') as f:
             json.dump(data, f, default=piglit_encoder,
-                      indent=JSONBackend.INDENT)
+                      indent=INDENT)
 
         # Delete the temporary files
         os.unlink(os.path.join(self._dest, 'metadata.json'))
@@ -146,6 +150,374 @@ class JSONBackend(FileBackend):
             self._fsync(f)
 
 
+def load_results(filename):
+    """ Loader function for TestrunResult class
+
+    This function takes a single argument of a results file.
+
+    It makes quite a few assumptions, first it assumes that it has been passed
+    a folder, if that fails then it looks for a plain text json file called
+    "main"
+
+    """
+    # This will load any file or file-like thing. That would include pipes and
+    # file descriptors
+    if not os.path.isdir(filename):
+        filepath = filename
+    elif os.path.exists(os.path.join(filename, 'metadata.json')):
+        # If the test is still running we need to use the resume code, since
+        # there will not be a results.json file.
+        # We want to return here since the results are known current (there's
+        # an assert in TestrunResult.load), and there is no filepath
+        # to pass to update_results
+        # XXX: This needs to be run before searching for a results.json file so
+        #      that if the new run is overwriting an old one we load the
+        #      partial and not the original. It might be better to just delete
+        #      the contents of the folder if there is anything in it.
+        # XXX: What happens if the tests folder gets deleted in the middle of
+        #      this?
+        return _resume(filename)
+    else:
+        # If there are both old and new results in a directory pick the new
+        # ones first
+        if os.path.exists(os.path.join(filename, 'results.json')):
+            filepath = os.path.join(filename, 'results.json')
+        # Version 0 results are called 'main'
+        elif os.path.exists(os.path.join(filename, 'main')):
+            filepath = os.path.join(filename, 'main')
+        else:
+            raise errors.ResultsLoadError('No results found in "{}"'.format(
+                filename))
+
+    with open(filepath, 'r') as f:
+        testrun = _load(f)
+
+    return _update_results(testrun, filepath)
+
+
+def _load(results_file):
+    """Load a json results instance and return a TestrunResult.
+
+    This function converts an existing, fully completed json run.
+    
+    """
+    result = results.TestrunResult()
+    result.results_vesrion = 0  # This should get overwritten
+    result.__dict__.update(json.load(results_file))
+
+    for key, value in result.tests.viewitems():
+        result.tests[key] = results.TestResult.load(value)
+
+    return result
+
+
+def _resume(results_dir):
+    """Loads a partially completed json results directory."""
+    # Pylint can't infer that the json being loaded is a dict
+    # pylint: disable=maybe-no-member
+    assert os.path.isdir(results_dir), \
+        "TestrunResult.resume() requires a directory"
+
+    # Load the metadata
+    with open(os.path.join(results_dir, 'metadata.json'), 'r') as f:
+        meta = json.load(f)
+    assert meta['results_version'] == CURRENT_JSON_VERSION, \
+        "Old results version, resume impossible"
+
+    testrun = results.TestrunResult()
+    testrun.name = meta['name']
+    testrun.options = meta['options']
+    testrun.uname = meta.get('uname')
+    testrun.glxinfo = meta.get('glxinfo')
+    testrun.lspci = meta.get('lspci')
+
+    # Load all of the test names and added them to the test list
+    for file_ in os.listdir(os.path.join(results_dir, 'tests')):
+        with open(os.path.join(results_dir, 'tests', file_), 'r') as f:
+            try:
+                test = json.load(f)
+            except ValueError:
+                continue
+
+        # XXX: There has to be a better way to get a single key: value out
+        # of a dict even when the key name isn't known
+        # XXX: Yes, using a piglit_decoder function
+        for key, value in test.iteritems():
+            testrun.tests[key] = results.TestResult.load(value)
+
+    return testrun
+
+
+def _update_results(results, filepath):
+    """ Update results to the lastest version
+
+    This function is a wraper for other update_* functions, providing
+    incremental updates from one version to another.
+
+    Arguments:
+    results -- a TestrunResults instance
+    filepath -- the name of the file that the Testrunresults instance was
+                created from
+
+    """
+
+    def loop_updates(results):
+        """ Helper to select the proper update sequence """
+        # Python lacks a switch statement, the workaround is to use a
+        # dictionary
+        updates = {
+            0: _update_zero_to_one,
+            1: _update_one_to_two,
+            2: _update_two_to_three,
+            3: _update_three_to_four,
+            4: _update_four_to_five,
+        }
+
+        while results.results_version < CURRENT_JSON_VERSION:
+            results = updates[results.results_version](results)
+
+        return results
+
+    # If there is no version, then set it to 0, this will trigger a full
+    # update.
+    if not hasattr(results, 'results_version'):
+        results.results_version = 0
+
+    # If the results version is the current version there is no need to
+    # update, just return the results
+    if results.results_version == CURRENT_JSON_VERSION:
+        return results
+
+    results = loop_updates(results)
+
+    # Move the old results, and write the current results
+    filedir = os.path.dirname(filepath)
+    try:
+        os.rename(filepath, os.path.join(filedir, 'results.json.old'))
+        _write(results, os.path.join(filedir, 'results.json'))
+    except OSError:
+        print("WARNING: Could not write updated results {}".format(filepath),
+              file=sys.stderr)
+
+    return results
+
+
+def _write(results, file_):
+    """WRite the values of the results out to a file."""
+    with open(file_, 'w') as f:
+        json.dump({k:v for k, v in results.__dict__.iteritems()},
+                  f,
+                  default=piglit_encoder,
+                  indent=INDENT)
+
+
+def _update_zero_to_one(results):
+    """ Update version zero results to version 1 results
+
+    Changes from version 0 to version 1
+
+    - dmesg is sometimes stored as a list, sometimes stored as a string. In
+      version 1 it is always stored as a string
+    - in version 0 subtests are somtimes stored as duplicates, sometimes stored
+      only with a single entry, in version 1 tests with subtests are only
+      recorded once, always.
+    - Version 0 can have an info entry, or returncode, out, and err entries,
+      Version 1 will only have the latter
+    - version 0 results are called 'main', while version 1 results are called
+      'results.json' (This is not handled internally, it's either handled by
+      update_results() which will write the file back to disk, or needs to be
+      handled manually by the user)
+
+    """
+    updated_results = {}
+    remove = set()
+
+    for name, test in results.tests.iteritems():
+        # fix dmesg errors if any
+        if isinstance(test.get('dmesg'), list):
+            test['dmesg'] = '\n'.join(test['dmesg'])
+
+        # If a test as an info attribute, we want to remove it, if it doesn't
+        # have a returncode, out, or attribute we'll want to get those out of
+        # info first
+        #
+        # This expects that the order of info is rougly returncode, errors,
+        # output, *extra it can handle having extra information in the middle,
+        if (None in [test.get('out'), test.get('err'), test.get('returncode')]
+                and test.get('info')):
+
+            # This attempts to split everything before Errors: as a returncode,
+            # and everything before Output: as Errors, and everything else as
+            # output. This may result in extra info being put in out, this is
+            # actually fine since out is only parsed by humans.
+            returncode, split = test['info'].split('\n\nErrors:')
+            err, out = split.split('\n\nOutput:')
+
+            # returncode can be 0, and 0 is falsy, so ensure it is actually
+            # None
+            if test.get('returncode') is None:
+                # In some cases the returncode might not be set (like the test
+                # skipped), in that case it will be None, so set it
+                # apropriately
+                try:
+                    test['returncode'] = int(
+                        returncode[len('returncode: '):].strip())
+                except ValueError:
+                    test['returncode'] = None
+            if not test.get('err'):
+                test['err'] = err.strip()
+            if not test.get('out'):
+                test['out'] = out.strip()
+
+        # Remove the unused info key
+        if test.get('info'):
+            del test['info']
+
+        # If there is more than one subtest written in version 0 results that
+        # entry will be a complete copy of the original entry with '/{name}'
+        # appended. This loop looks for tests with subtests, removes the
+        # duplicate entries, and creates a new entry in update_results for the
+        # single full tests.
+        #
+        # this must be the last thing done in this loop, or there will be pain
+        if test.get('subtest'):
+            for sub in test['subtest'].iterkeys():
+                # adding the leading / ensures that we get exactly what we
+                # expect, since endswith does a character by chacter match, if
+                # the subtest name is duplicated it wont match, and if there
+                # are more trailing characters it will not match
+                #
+                # We expect duplicate names like this:
+                #  "group1/groupA/test1/subtest 1": <thing>,
+                #  "group1/groupA/test1/subtest 2": <thing>,
+                #  "group1/groupA/test1/subtest 3": <thing>,
+                #  "group1/groupA/test1/subtest 4": <thing>,
+                #  "group1/groupA/test1/subtest 5": <thing>,
+                #  "group1/groupA/test1/subtest 6": <thing>,
+                #  "group1/groupA/test1/subtest 7": <thing>,
+                # but what we want is groupg1/groupA/test1 and none of the
+                # subtest as keys in the dictionary at all
+                if name.endswith('/{0}'.format(sub)):
+                    testname = name[:-(len(sub) + 1)]  # remove leading /
+                    assert testname[-1] != '/'
+
+                    remove.add(name)
+                    break
+            else:
+                # This handles two cases, first that the results have only
+                # single entries for each test, regardless of subtests (new
+                # style), or that the test onhly as a single subtest and thus
+                # was recorded correctly
+                testname = name
+
+            if testname not in updated_results:
+                updated_results[testname] = test
+
+    for name in remove:
+        del results.tests[name]
+    results.tests.update(updated_results)
+
+    # set the results version
+    results.results_version = 1
+
+    return results
+
+
+def _update_one_to_two(results):
+    """Update version 1 results to version 2.
+
+    Version two results are actually identical to version one results, however,
+    there was an error in version 1 at the end causing metadata in the options
+    dictionary to be incorrect. Version 2 corrects that.
+
+    Namely uname, glxinfo, wglinfo, and lspci were put in the options['env']
+    instead of in the root.
+
+    """
+    if 'env' in results.options:
+        env = results.options['env']
+        if env.get('glxinfo'):
+            results.glxinfo = env['glxinfo']
+        if env.get('lspci'):
+            results.lspci = env['lspci']
+        if env.get('uname'):
+            results.uname = env['uname']
+        if env.get('wglinfo'):
+            results.wglinfo = env['wglinfo']
+        del results.options['env']
+
+    results.results_version = 2
+
+    return results
+
+
+def _update_two_to_three(results):
+    """Lower key names."""
+    for key, value in results.tests.items():
+        lowered = key.lower()
+        if not key == lowered:
+            results.tests[lowered] = value
+            del results.tests[key]
+
+    results.results_version = 3
+
+    return results
+
+
+def _update_three_to_four(results):
+    """Update results v3 to v4.
+
+    This update requires renaming a few tests. The complete lists can be found
+    in framework/data/results_v3_to_v4.json, a json file containing a list of
+    lists (They would be tuples if json has tuples), the first element being
+    the original name, and the second being a new name to update to
+
+    """
+    mapped_updates = [
+        ("spec/arb_texture_rg/fs-shadow2d-red-01",
+         "spec/arb_texture_rg/execution/fs-shadow2d-red-01"),
+        ("spec/arb_texture_rg/fs-shadow2d-red-02",
+         "spec/arb_texture_rg/execution/fs-shadow2d-red-02"),
+        ("spec/arb_texture_rg/fs-shadow2d-red-03",
+         "spec/arb_texture_rg/execution/fs-shadow2d-red-03"),
+        ("spec/arb_draw_instanced/draw-non-instanced",
+         "spec/arb_draw_instanced/execution/draw-non-instanced"),
+        ("spec/arb_draw_instanced/instance-array-dereference",
+         "spec/arb_draw_instanced/execution/instance-array-dereference"),
+    ]
+
+    for original, new in mapped_updates:
+        if original in results.tests:
+            results.tests[new] = results.tests[original]
+            del results.tests[original]
+
+    # This needs to use posixpath rather than grouptools because version 4 uses
+    # / as a separator, but grouptools isn't guaranteed to do so
+    for test, result in results.tests.items():
+        if posixpath.dirname(test) == 'glslparsertest':
+            group = posixpath.join('glslparsertest/shaders',
+                                   posixpath.basename(test))
+            results.tests[group] = result
+            del results.tests[test]
+
+    results.results_version = 4
+
+    return results
+
+
+def _update_four_to_five(results):                                                                                                    
+    """Updates json results from version 4 to version 5."""
+    new_tests = {}
+
+    for name, test in results.tests.iteritems():
+        new_tests[name.replace('/', '@').replace('\\', '@')] = test
+
+    results.tests = new_tests
+    results.results_version = 5
+
+    return results
+
+
 REGISTRY = Registry(
     extensions=['', '.json'],
     backend=JSONBackend,
diff --git a/framework/programs/summary.py b/framework/programs/summary.py
index b596fe9..f098856 100644
--- a/framework/programs/summary.py
+++ b/framework/programs/summary.py
@@ -27,9 +27,7 @@ import os.path as path
 import sys
 import errno
 
-import framework.summary as summary
-import framework.status as status
-import framework.core as core
+from framework import summary, status, core, backends
 import framework.results
 
 __all__ = [
@@ -157,8 +155,8 @@ def csv(input_):
     args = parser.parse_args(input_)
 
     try:
-        testrun = framework.results.load_results(args.testResults)
-    except framework.results.ResultsLoadError as e:
+        testrun = backends.json.load_results(args.testResults)
+    except backends.errors.ResultsLoadError as e:
         print('Error: {}'.format(e.message), file=sys.stderr)
         sys.exit(1)
 
@@ -191,7 +189,7 @@ def aggregate(input_):
 
     outfile = os.path.join(args.results_folder, args.output)
     try:
-        results = framework.results.load_results(args.results_folder)
+        results = backends.json.load_results(args.results_folder)
     except framework.results.ResultsLoadError as e:
         print('Error: {}'.format(e.message), file=sys.stderr)
         sys.exit(1)
diff --git a/framework/results.py b/framework/results.py
index 1710228..dd1a645 100644
--- a/framework/results.py
+++ b/framework/results.py
@@ -22,31 +22,14 @@
 """ Module for results generation """
 
 from __future__ import print_function, absolute_import
-import os
-import sys
-import posixpath
-
-try:
-    import simplejson as json
-except ImportError:
-    import json
-
 import framework.status as status
-from framework.backends.json import (CURRENT_JSON_VERSION, piglit_encoder,
-                                     JSONBackend)
 
 __all__ = [
-    'ResultsLoadError',
     'TestrunResult',
     'TestResult',
-    'load_results',
 ]
 
 
-class ResultsLoadError(Exception):
-    pass
-
-
 class TestResult(dict):
     def recursive_update(self, dictionary):
         """ Recursively update the TestResult
@@ -110,366 +93,4 @@ class TestrunResult(object):
         self.wglinfo = None
         self.lspci = None
         self.time_elapsed = None
-        self.results_version = CURRENT_JSON_VERSION
         self.tests = {}
-
-    def write(self, file_):
-        """Write the result json."""
-        with open(file_, 'w') as f:
-            json.dump({k: v for k, v in self.__dict__.iteritems() if not
-                       k.startswith('_')},
-                      f, default=piglit_encoder, indent=JSONBackend.INDENT)
-
-    @classmethod
-    def load(cls, results_file):
-        """Create a TestrunResult from a completed file."""
-        result = cls()
-        result.results_version = 0
-        result.__dict__.update(json.load(results_file))
-
-        for key, value in result.tests.iteritems():
-            result.tests[key] = TestResult.load(value)
-
-        return result
-
-    @classmethod
-    def resume(cls, results_dir):
-        """ Create a TestrunResult from an interupted run
-
-        This class method creates and returns a TestrunResult from a partial
-        result file. This does not load old streaminmg results by design, one
-        should not resume a run with a different piglit, it leads to all kinds
-        of problems.
-
-        """
-        # Pylint can't infer that the json being loaded is a dict
-        # pylint: disable=maybe-no-member
-        assert os.path.isdir(results_dir), \
-            "TestrunResult.resume() requires a directory"
-
-        # Load the metadata
-        with open(os.path.join(results_dir, 'metadata.json'), 'r') as f:
-            meta = json.load(f)
-        assert meta['results_version'] == CURRENT_JSON_VERSION, \
-            "Old results version, resume impossible"
-
-        testrun = cls()
-        testrun.name = meta['name']
-        testrun.options = meta['options']
-        testrun.uname = meta.get('uname')
-        testrun.glxinfo = meta.get('glxinfo')
-        testrun.lspci = meta.get('lspci')
-
-        # Load all of the test names and added them to the test list
-        for file_ in os.listdir(os.path.join(results_dir, 'tests')):
-            with open(os.path.join(results_dir, 'tests', file_), 'r') as f:
-                try:
-                    test = json.load(f)
-                except ValueError:
-                    continue
-            # XXX: There has to be a better way to get a single key: value out
-            # of a dict even when the key name isn't known
-            for key, value in test.iteritems():
-                testrun.tests[key] = TestResult.load(value)
-
-        return testrun
-
-
-def load_results(filename):
-    """ Loader function for TestrunResult class
-
-    This function takes a single argument of a results file.
-
-    It makes quite a few assumptions, first it assumes that it has been passed
-    a folder, if that fails then it looks for a plain text json file called
-    "main"
-
-    """
-    # This will load any file or file-like thing. That would include pipes and
-    # file descriptors
-    if not os.path.isdir(filename):
-        filepath = filename
-    elif os.path.exists(os.path.join(filename, 'metadata.json')):
-        # If the test is still running we need to use the resume code, since
-        # there will not be a results.json file.
-        # We want to return here since the results are known current (there's
-        # an assert in TestrunResult.load), and there is no filepath
-        # to pass to update_results
-        # XXX: This needs to be run before searching for a results.json file so
-        #      that if the new run is overwriting an old one we load the
-        #      partial and not the original. It might be better to just delete
-        #      the contents of the folder if there is anything in it.
-        # XXX: What happens if the tests folder gets deleted in the middle of
-        #      this?
-        return TestrunResult.resume(filename)
-    else:
-        # If there are both old and new results in a directory pick the new
-        # ones first
-        if os.path.exists(os.path.join(filename, 'results.json')):
-            filepath = os.path.join(filename, 'results.json')
-        # Version 0 results are called 'main'
-        elif os.path.exists(os.path.join(filename, 'main')):
-            filepath = os.path.join(filename, 'main')
-        else:
-            raise ResultsLoadError('No results found in "{}"'.format(filename))
-
-    with open(filepath, 'r') as f:
-        testrun = TestrunResult.load(f)
-
-    return update_results(testrun, filepath)
-
-
-def update_results(results, filepath):
-    """ Update results to the lastest version
-
-    This function is a wraper for other update_* functions, providing
-    incremental updates from one version to another.
-
-    Arguments:
-    results -- a TestrunResults instance
-    filepath -- the name of the file that the Testrunresults instance was
-                created from
-
-    """
-
-    def loop_updates(results):
-        """ Helper to select the proper update sequence """
-        # Python lacks a switch statement, the workaround is to use a
-        # dictionary
-        updates = {
-            0: _update_zero_to_one,
-            1: _update_one_to_two,
-            2: _update_two_to_three,
-            3: _update_three_to_four,
-            4: _update_four_to_five,
-        }
-
-        while results.results_version < CURRENT_JSON_VERSION:
-            results = updates[results.results_version](results)
-
-        return results
-
-    # If the results version is the current version there is no need to
-    # update, just return the results
-    if results.results_version == CURRENT_JSON_VERSION:
-        return results
-
-    results = loop_updates(results)
-
-    # Move the old results, and write the current results
-    filedir = os.path.dirname(filepath)
-    try:
-        os.rename(filepath, os.path.join(filedir, 'results.json.old'))
-        results.write(os.path.join(filedir, 'results.json'))
-    except OSError:
-        print("WARNING: Could not write updated results {}".format(filepath),
-              file=sys.stderr)
-
-    return results
-
-
-def _update_zero_to_one(results):
-    """ Update version zero results to version 1 results
-
-    Changes from version 0 to version 1
-
-    - dmesg is sometimes stored as a list, sometimes stored as a string. In
-      version 1 it is always stored as a string
-    - in version 0 subtests are somtimes stored as duplicates, sometimes stored
-      only with a single entry, in version 1 tests with subtests are only
-      recorded once, always.
-    - Version 0 can have an info entry, or returncode, out, and err entries,
-      Version 1 will only have the latter
-    - version 0 results are called 'main', while version 1 results are called
-      'results.json' (This is not handled internally, it's either handled by
-      update_results() which will write the file back to disk, or needs to be
-      handled manually by the user)
-
-    """
-    updated_results = {}
-    remove = set()
-
-    for name, test in results.tests.iteritems():
-        # fix dmesg errors if any
-        if isinstance(test.get('dmesg'), list):
-            test['dmesg'] = '\n'.join(test['dmesg'])
-
-        # If a test as an info attribute, we want to remove it, if it doesn't
-        # have a returncode, out, or attribute we'll want to get those out of
-        # info first
-        #
-        # This expects that the order of info is rougly returncode, errors,
-        # output, *extra it can handle having extra information in the middle,
-        if (None in [test.get('out'), test.get('err'), test.get('returncode')]
-                and test.get('info')):
-
-            # This attempts to split everything before Errors: as a returncode,
-            # and everything before Output: as Errors, and everything else as
-            # output. This may result in extra info being put in out, this is
-            # actually fine since out is only parsed by humans.
-            returncode, split = test['info'].split('\n\nErrors:')
-            err, out = split.split('\n\nOutput:')
-
-            # returncode can be 0, and 0 is falsy, so ensure it is actually
-            # None
-            if test.get('returncode') is None:
-                # In some cases the returncode might not be set (like the test
-                # skipped), in that case it will be None, so set it
-                # apropriately
-                try:
-                    test['returncode'] = int(
-                        returncode[len('returncode: '):].strip())
-                except ValueError:
-                    test['returncode'] = None
-            if not test.get('err'):
-                test['err'] = err.strip()
-            if not test.get('out'):
-                test['out'] = out.strip()
-
-        # Remove the unused info key
-        if test.get('info'):
-            del test['info']
-
-        # If there is more than one subtest written in version 0 results that
-        # entry will be a complete copy of the original entry with '/{name}'
-        # appended. This loop looks for tests with subtests, removes the
-        # duplicate entries, and creates a new entry in update_results for the
-        # single full tests.
-        #
-        # this must be the last thing done in this loop, or there will be pain
-        if test.get('subtest'):
-            for sub in test['subtest'].iterkeys():
-                # adding the leading / ensures that we get exactly what we
-                # expect, since endswith does a character by chacter match, if
-                # the subtest name is duplicated it wont match, and if there
-                # are more trailing characters it will not match
-                #
-                # We expect duplicate names like this:
-                #  "group1/groupA/test1/subtest 1": <thing>,
-                #  "group1/groupA/test1/subtest 2": <thing>,
-                #  "group1/groupA/test1/subtest 3": <thing>,
-                #  "group1/groupA/test1/subtest 4": <thing>,
-                #  "group1/groupA/test1/subtest 5": <thing>,
-                #  "group1/groupA/test1/subtest 6": <thing>,
-                #  "group1/groupA/test1/subtest 7": <thing>,
-                # but what we want is groupg1/groupA/test1 and none of the
-                # subtest as keys in the dictionary at all
-                if name.endswith('/{0}'.format(sub)):
-                    testname = name[:-(len(sub) + 1)]  # remove leading /
-                    assert testname[-1] != '/'
-
-                    remove.add(name)
-                    break
-            else:
-                # This handles two cases, first that the results have only
-                # single entries for each test, regardless of subtests (new
-                # style), or that the test onhly as a single subtest and thus
-                # was recorded correctly
-                testname = name
-
-            if testname not in updated_results:
-                updated_results[testname] = test
-
-    for name in remove:
-        del results.tests[name]
-    results.tests.update(updated_results)
-
-    # set the results version
-    results.results_version = 1
-
-    return results
-
-
-def _update_one_to_two(results):
-    """Update version 1 results to version 2.
-
-    Version two results are actually identical to version one results, however,
-    there was an error in version 1 at the end causing metadata in the options
-    dictionary to be incorrect. Version 2 corrects that.
-
-    Namely uname, glxinfo, wglinfo, and lspci were put in the options['env']
-    instead of in the root.
-
-    """
-    if 'env' in results.options:
-        env = results.options['env']
-        if env.get('glxinfo'):
-            results.glxinfo = env['glxinfo']
-        if env.get('lspci'):
-            results.lspci = env['lspci']
-        if env.get('uname'):
-            results.uname = env['uname']
-        if env.get('wglinfo'):
-            results.wglinfo = env['wglinfo']
-        del results.options['env']
-
-    results.results_version = 2
-
-    return results
-
-
-def _update_two_to_three(results):
-    """Lower key names."""
-    for key, value in results.tests.items():
-        lowered = key.lower()
-        if not key == lowered:
-            results.tests[lowered] = value
-            del results.tests[key]
-
-    results.results_version = 3
-
-    return results
-
-
-def _update_three_to_four(results):
-    """Update results v3 to v4.
-
-    This update requires renaming a few tests. The complete lists can be found
-    in framework/data/results_v3_to_v4.json, a json file containing a list of
-    lists (They would be tuples if json has tuples), the first element being
-    the original name, and the second being a new name to update to
-
-    """
-    mapped_updates = [
-        ("spec/arb_texture_rg/fs-shadow2d-red-01",
-         "spec/arb_texture_rg/execution/fs-shadow2d-red-01"),
-        ("spec/arb_texture_rg/fs-shadow2d-red-02",
-         "spec/arb_texture_rg/execution/fs-shadow2d-red-02"),
-        ("spec/arb_texture_rg/fs-shadow2d-red-03",
-         "spec/arb_texture_rg/execution/fs-shadow2d-red-03"),
-        ("spec/arb_draw_instanced/draw-non-instanced",
-         "spec/arb_draw_instanced/execution/draw-non-instanced"),
-        ("spec/arb_draw_instanced/instance-array-dereference",
-         "spec/arb_draw_instanced/execution/instance-array-dereference"),
-    ]
-
-    for original, new in mapped_updates:
-        if original in results.tests:
-            results.tests[new] = results.tests[original]
-            del results.tests[original]
-
-    # This needs to use posixpath rather than grouptools because version 4 uses
-    # / as a separator, but grouptools isn't guaranteed to do so forever.
-    for test, result in results.tests.items():
-        if posixpath.dirname(test) == 'glslparsertest':
-            group = posixpath.join('glslparsertest/shaders',
-                                   posixpath.basename(test))
-            results.tests[group] = result
-            del results.tests[test]
-
-    results.results_version = 4
-
-    return results
-
-
-def _update_four_to_five(results):
-    """Updates json results from version 4 to version 5."""
-    new_tests = {}
-
-    for name, test in results.tests.iteritems():
-        new_tests[name.replace('/', '@').replace('\\', '@')] = test
-
-    results.tests = new_tests
-    results.results_version = 5
-
-    return results
diff --git a/framework/summary.py b/framework/summary.py
index d709870..c027d76 100644
--- a/framework/summary.py
+++ b/framework/summary.py
@@ -37,9 +37,7 @@ from mako.template import Template
 # a local variable status exists, prevent accidental overloading by renaming
 # the module
 import framework.status as so
-import framework.results
-import framework.grouptools as grouptools
-
+from framework import grouptools, backends
 
 __all__ = [
     'Summary',
@@ -302,8 +300,8 @@ class Summary:
         # Create a Result object for each piglit result and append it to the
         # results list
         try:
-            self.results = [framework.results.load_results(i) for i in resultfiles]
-        except framework.results.ResultsLoadError as e:
+            self.results = [backends.json.load_results(i) for i in resultfiles]
+        except backends.errors.ResultsLoadError as e:
             print('Error: {}'.format(e.message), file=sys.stderr)
             sys.exit(1)
 
diff --git a/framework/tests/backends_tests.py b/framework/tests/backends_tests.py
index 7b94fef..c15543d 100644
--- a/framework/tests/backends_tests.py
+++ b/framework/tests/backends_tests.py
@@ -29,10 +29,6 @@ try:
     from lxml import etree
 except ImportError:
     import xml.etree.cElementTree as etree
-try:
-    import simplejson as json
-except ImportError:
-    import json
 import nose.tools as nt
 from nose.plugins.skip import SkipTest
 
@@ -52,18 +48,6 @@ JUNIT_SCHEMA = 'framework/tests/schema/junit-7.xsd'
 doc_formatter = utils.DocFormatter({'seperator': grouptools.SEPARATOR})
 
 
-def test_initialize_jsonbackend():
-    """ Test that JSONBackend initializes
-
-    This needs to be handled separately from the others because it requires
-    arguments
-
-    """
-    with utils.tempdir() as tdir:
-        func = results.JSONBackend(tdir)
-        assert isinstance(func, results.JSONBackend)
-
-
 @utils.nose_generator
 def test_get_backend():
     """ Generate tests to get various backends """
@@ -198,87 +182,6 @@ def test_junit_replace():
                     'piglit.a.test.group')
 
 
-def test_json_initialize_metadata():
-    """ JSONBackend.initialize() produces a metadata.json file """
-    with utils.tempdir() as f:
-        test = backends.json.JSONBackend(f)
-        test.initialize(BACKEND_INITIAL_META)
-
-        nt.ok_(os.path.exists(os.path.join(f, 'metadata.json')))
-
-
-class TestJSONTestMethod(utils.StaticDirectory):
-    @classmethod
-    def setup_class(cls):
-        cls.test_name = grouptools.join('a', 'test', 'group', 'test1')
-        cls.result = results.TestResult({
-            'time': 1.2345,
-            'result': 'pass',
-            'out': 'this is stdout',
-            'err': 'this is stderr',
-        })
-        super(TestJSONTestMethod, cls).setup_class()
-        test = backends.json.JSONBackend(cls.tdir)
-        test.initialize(BACKEND_INITIAL_META)
-        test.write_test(cls.test_name, cls.result)
-
-    def test_write_test(self):
-        """ JSONBackend.write_test() adds tests to a 'tests' directory """
-        assert os.path.exists(os.path.join(self.tdir, 'tests', '0.json'))
-
-    def test_json_is_valid(self):
-        """ JSONBackend.write_test() produces valid json """
-        with open(os.path.join(self.tdir, 'tests', '0.json'), 'r') as f:
-            try:
-                json.load(f)
-            except Exception as e:
-                raise AssertionError(e)
-
-    def test_json_is_correct(self):
-        """ JSONBackend.write_test() produces correct json """
-        with open(os.path.join(self.tdir, 'tests', '0.json'), 'r') as f:
-            test = json.load(f)
-
-        nt.assert_dict_equal({self.test_name: self.result}, test)
-
-
-class TestJSONTestFinalize(utils.StaticDirectory):
-    @classmethod
-    def setup_class(cls):
-        cls.test_name = grouptools.join('a', 'test', 'group', 'test1')
-        cls.result = results.TestResult({
-            'time': 1.2345,
-            'result': 'pass',
-            'out': 'this is stdout',
-            'err': 'this is stderr',
-        })
-        super(TestJSONTestFinalize, cls).setup_class()
-        test = backends.json.JSONBackend(cls.tdir)
-        test.initialize(BACKEND_INITIAL_META)
-        test.write_test(cls.test_name, cls.result)
-        test.finalize()
-
-    def test_remove_metadata(self):
-        """ JSONBackend.finalize() removes metadata.json """
-        assert not os.path.exists(os.path.join(self.tdir, 'metadata.json'))
-
-    def test_remove_tests(self):
-        """ JSONBackend.finalize() removes tests directory """
-        assert not os.path.exists(os.path.join(self.tdir, 'tests'))
-
-    def test_create_results(self):
-        """ JSONBackend.finalize() creates a results.json file """
-        assert os.path.exists(os.path.join(self.tdir, 'results.json'))
-
-    def test_results_valid(self):
-        """ JSONBackend.finalize() results.json is valid """
-        with open(os.path.join(self.tdir, 'results.json'), 'r') as f:
-            try:
-                json.load(f)
-            except Exception as e:
-                raise AssertionError(e)
-
-
 def test_junit_skips_bad_tests():
     """ backends.JUnitBackend skips illformed tests """
     with utils.tempdir() as tdir:
diff --git a/framework/tests/json_backend_tests.py b/framework/tests/json_backend_tests.py
new file mode 100644
index 0000000..54f24dd
--- /dev/null
+++ b/framework/tests/json_backend_tests.py
@@ -0,0 +1,275 @@
+# Copyright (c) 2014 Intel Corporation
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# pylint: disable=missing-docstring
+
+""" Tests for the backend package """
+
+from __future__ import print_function, absolute_import
+import os
+
+try:
+    import simplejson as json
+except ImportError:
+    import json
+import nose.tools as nt
+
+from framework import results, backends
+import framework.tests.utils as utils
+from .backends_tests import BACKEND_INITIAL_META
+
+
+def test_initialize_jsonbackend():
+    """ Test that JSONBackend initializes
+
+    This needs to be handled separately from the others because it requires
+    arguments
+
+    """
+    with utils.tempdir() as tdir:
+        func = backends.json.JSONBackend(tdir)
+        assert isinstance(func, backends.json.JSONBackend)
+
+
+ at utils.nose_generator
+def test_get_backend():
+    """ Generate tests to get various backends """
+    # We use a hand generated list here to ensure that we are getting what we
+    # expect
+    backends_ = {
+        'json': backends.json.JSONBackend,
+        'junit': backends.junit.JUnitBackend,
+    }
+
+    check = lambda n, i: nt.assert_is(backends.get_backend(n), i)
+
+    for name, inst in backends_.iteritems():
+        check.description = 'get_backend({0}) returns {0} backend'.format(name)
+        yield check, name, inst
+
+
+def test_json_initialize_metadata():
+    """ JSONBackend.initialize() produces a metadata.json file """
+    with utils.tempdir() as f:
+        test = backends.json.JSONBackend(f)
+        test.initialize(BACKEND_INITIAL_META)
+
+        nt.ok_(os.path.exists(os.path.join(f, 'metadata.json')))
+
+
+class TestJSONTestMethod(utils.StaticDirectory):
+    @classmethod
+    def setup_class(cls):
+        cls.test_name = 'a/test/group/test1'
+        cls.result = results.TestResult({
+            'time': 1.2345,
+            'result': 'pass',
+            'out': 'this is stdout',
+            'err': 'this is stderr',
+        })
+        super(TestJSONTestMethod, cls).setup_class()
+        test = backends.json.JSONBackend(cls.tdir)
+        test.initialize(BACKEND_INITIAL_META)
+        test.write_test(cls.test_name, cls.result)
+
+    def test_write_test(self):
+        """ JSONBackend.write_test() adds tests to a 'tests' directory """
+        assert os.path.exists(os.path.join(self.tdir, 'tests', '0.json'))
+
+    def test_json_is_valid(self):
+        """ JSONBackend.write_test() produces valid json """
+        with open(os.path.join(self.tdir, 'tests', '0.json'), 'r') as f:
+            try:
+                json.load(f)
+            except Exception as e:
+                raise AssertionError(e)
+
+    def test_json_is_correct(self):
+        """ JSONBackend.write_test() produces correct json """
+        with open(os.path.join(self.tdir, 'tests', '0.json'), 'r') as f:
+            test = json.load(f)
+
+        nt.assert_dict_equal({self.test_name: self.result}, test)
+
+
+class TestJSONTestFinalize(utils.StaticDirectory):
+    @classmethod
+    def setup_class(cls):
+        cls.test_name = 'a/test/group/test1'
+        cls.result = results.TestResult({
+            'time': 1.2345,
+            'result': 'pass',
+            'out': 'this is stdout',
+            'err': 'this is stderr',
+        })
+        super(TestJSONTestFinalize, cls).setup_class()
+        test = backends.json.JSONBackend(cls.tdir)
+        test.initialize(BACKEND_INITIAL_META)
+        test.write_test(cls.test_name, cls.result)
+        test.finalize()
+
+    def test_remove_metadata(self):
+        """ JSONBackend.finalize() removes metadata.json """
+        assert not os.path.exists(os.path.join(self.tdir, 'metadata.json'))
+
+    def test_remove_tests(self):
+        """ JSONBackend.finalize() removes tests directory """
+        assert not os.path.exists(os.path.join(self.tdir, 'tests'))
+
+    def test_create_results(self):
+        """ JSONBackend.finalize() creates a results.json file """
+        assert os.path.exists(os.path.join(self.tdir, 'results.json'))
+
+    def test_results_valid(self):
+        """ JSONBackend.finalize() results.json is valid """
+        with open(os.path.join(self.tdir, 'results.json'), 'r') as f:
+            try:
+                json.load(f)
+            except Exception as e:
+                raise AssertionError(e)
+
+
+def test_update_results_current():
+    """ update_results() returns early when the results_version is current """
+    data = utils.JSON_DATA.copy()
+    data['results_version'] = backends.json.CURRENT_JSON_VERSION
+
+    with utils.tempdir() as d:
+        with open(os.path.join(d, 'main'), 'w') as f:
+            json.dump(data, f)
+
+        with open(os.path.join(d, 'main'), 'r') as f:
+            base = backends.json._load(f)
+
+        res = backends.json._update_results(base, f.name)
+
+    nt.assert_dict_equal(res.__dict__, base.__dict__)
+
+
+def test_update_results_old():
+    """ update_results() updates results
+
+    Because of the design of the our updates (namely that they silently
+    incrementally update from x to y) it's impossible to konw exactly what
+    we'll get at th end without having tests that have to be reworked each time
+    updates are run. Since there already is (at least for v0 -> v1) a fairly
+    comprehensive set of tests, this test only tests that update_results() has
+    been set equal to the CURRENT_JSON_VERSION, (which is one of the effects of
+    runing update_results() with the assumption that there is sufficient other
+    testing of the update process.
+
+    """
+    data = utils.JSON_DATA.copy()
+    data['results_version'] = 0
+
+    with utils.tempdir() as d:
+        with open(os.path.join(d, 'main'), 'w') as f:
+            json.dump(data, f)
+
+        with open(os.path.join(d, 'main'), 'r') as f:
+            base = backends.json._load(f)
+
+        res = backends.json._update_results(base, f.name)
+
+    nt.assert_equal(res.results_version, backends.json.CURRENT_JSON_VERSION)
+
+
+ at utils.no_error
+def test_json_resume_non_folder():
+    """ TestrunResult.resume doesn't accept a file """
+    with utils.with_tempfile('') as f:
+        with nt.assert_raises(AssertionError):
+            backends.json._resume(f)
+
+
+ at utils.no_error
+def test_resume_load():
+    """ TestrunResult.resume loads with good results """
+    with utils.tempdir() as f:
+        backend = backends.json.JSONBackend(f)
+        backend.initialize(BACKEND_INITIAL_META)
+        backend.write_test("group1/test1", {'result': 'fail'})
+        backend.write_test("group1/test2", {'result': 'pass'})
+        backend.write_test("group2/test3", {'result': 'fail'})
+
+        backends.json._resume(f)
+
+
+def test_resume_load_valid():
+    """ TestrunResult.resume loads valid results """
+    with utils.tempdir() as f:
+        backend = backends.json.JSONBackend(f)
+        backend.initialize(BACKEND_INITIAL_META)
+        backend.write_test("group1/test1", {'result': 'fail'})
+        backend.write_test("group1/test2", {'result': 'pass'})
+        backend.write_test("group2/test3", {'result': 'fail'})
+
+        test = backends.json._resume(f)
+
+        nt.assert_set_equal(
+            set(test.tests.keys()),
+            set(['group1/test1', 'group1/test2', 'group2/test3']),
+        )
+
+
+def test_resume_load_invalid():
+    """ TestrunResult.resume ignores invalid results """
+    with utils.tempdir() as f:
+        backend = backends.json.JSONBackend(f)
+        backend.initialize(BACKEND_INITIAL_META)
+        backend.write_test("group1/test1", {'result': 'fail'})
+        backend.write_test("group1/test2", {'result': 'pass'})
+        backend.write_test("group2/test3", {'result': 'fail'})
+        with open(os.path.join(f, 'tests', 'x.json'), 'w') as w:
+            w.write('foo')
+
+        test = backends.json._resume(f)
+
+        nt.assert_set_equal(
+            set(test.tests.keys()),
+            set(['group1/test1', 'group1/test2', 'group2/test3']),
+        )
+
+
+ at utils.no_error
+def test_load_results_folder_as_main():
+    """ Test that load_results takes a folder with a file named main in it """
+    with utils.tempdir() as tdir:
+        with open(os.path.join(tdir, 'main'), 'w') as tfile:
+            tfile.write(json.dumps(utils.JSON_DATA))
+
+        backends.json.load_results(tdir)
+
+
+ at utils.no_error
+def test_load_results_folder():
+    """ Test that load_results takes a folder with a file named results.json """
+    with utils.tempdir() as tdir:
+        with open(os.path.join(tdir, 'results.json'), 'w') as tfile:
+            tfile.write(json.dumps(utils.JSON_DATA))
+
+        backends.json.load_results(tdir)
+
+
+ at utils.no_error
+def test_load_results_file():
+    """ Test that load_results takes a file """
+    with utils.resultfile() as tfile:
+        backends.json.load_results(tfile.name)
diff --git a/framework/tests/results_tests.py b/framework/tests/results_tests.py
index 00e64dc..4fc49c5 100644
--- a/framework/tests/results_tests.py
+++ b/framework/tests/results_tests.py
@@ -22,16 +22,12 @@
 
 
 from __future__ import print_function, absolute_import
-import os
-import json
 
-import nose.tools as nt
-
-from framework import results, status, backends, grouptools
-from framework.tests.backends_tests import BACKEND_INITIAL_META
+from framework import results, status
 import framework.tests.utils as utils
 
 
+ at utils.no_error
 def check_initialize(target):
     """ Check that a class initializes without error """
     func = target()
@@ -58,157 +54,9 @@ def test_generate_initialize():
         yield yieldable, target
 
 
-def test_load_results_folder_as_main():
-    """ Test that load_results takes a folder with a file named main in it """
-    with utils.tempdir() as tdir:
-        with open(os.path.join(tdir, 'main'), 'w') as tfile:
-            tfile.write(json.dumps(utils.JSON_DATA))
-
-        results.load_results(tdir)
-
-
-def test_load_results_folder():
-    """ Test that load_results takes a folder with a file named results.json """
-    with utils.tempdir() as tdir:
-        with open(os.path.join(tdir, 'results.json'), 'w') as tfile:
-            tfile.write(json.dumps(utils.JSON_DATA))
-
-        results.load_results(tdir)
-
-
-def test_load_results_file():
-    """ Test that load_results takes a file """
-    with utils.resultfile() as tfile:
-        results.load_results(tfile.name)
-
-
 def test_testresult_load_to_status():
     """ TestResult initialized with result key converts the value to a Status
     """
     result = results.TestResult.load({'result': 'pass'})
     assert isinstance(result['result'], status.Status), \
         "Result key not converted to a status object"
-
-
-def test_testrunresult_write():
-    """ TestrunResult.write() works
-
-    This tests for a bug where TestrunResult.write() wrote a file containing
-    {}, essentially if it dumps a file that is equal to what was provided then
-    it's probably working
-
-    """
-    with utils.resultfile() as f:
-        result = results.load_results(f.name)
-        with utils.tempdir() as tdir:
-            result.write(os.path.join(tdir, 'results.json'))
-            new = results.load_results(os.path.join(tdir, 'results.json'))
-
-    nt.assert_dict_equal(result.__dict__, new.__dict__)
-
-
-def test_update_results_current():
-    """ update_results() returns early when the results_version is current """
-    data = utils.JSON_DATA.copy()
-    data['results_version'] = results.CURRENT_JSON_VERSION
-
-    with utils.tempdir() as d:
-        with open(os.path.join(d, 'main'), 'w') as f:
-            json.dump(data, f)
-
-        with open(os.path.join(d, 'main'), 'r') as f:
-            base = results.TestrunResult.load(f)
-
-        res = results.update_results(base, f.name)
-
-    nt.assert_dict_equal(res.__dict__, base.__dict__)
-
-
-def test_update_results_old():
-    """ update_results() updates results
-
-    Because of the design of the our updates (namely that they silently
-    incrementally update from x to y) it's impossible to konw exactly what
-    we'll get at th end without having tests that have to be reworked each time
-    updates are run. Since there already is (at least for v0 -> v1) a fairly
-    comprehensive set of tests, this test only tests that update_results() has
-    been set equal to the CURRENT_JSON_VERSION, (which is one of the effects of
-    runing update_results() with the assumption that there is sufficient other
-    testing of the update process.
-
-    """
-    data = utils.JSON_DATA.copy()
-    data['results_version'] = 0
-
-    with utils.tempdir() as d:
-        with open(os.path.join(d, 'main'), 'w') as f:
-            json.dump(data, f)
-
-        with open(os.path.join(d, 'main'), 'r') as f:
-            base = results.TestrunResult.load(f)
-
-        res = results.update_results(base, f.name)
-
-    nt.assert_equal(res.results_version, results.CURRENT_JSON_VERSION)
-
-
-def test_resume_non_folder():
-    """ TestrunResult.resume doesn't accept a file """
-    with utils.with_tempfile('') as f:
-        with nt.assert_raises(AssertionError):
-            results.TestrunResult.resume(f)
-
-
-def test_resume_load():
-    """ TestrunResult.resume loads with good results """
-    with utils.tempdir() as f:
-        backend = backends.json.JSONBackend(f)
-        backend.initialize(BACKEND_INITIAL_META)
-        backend.write_test(grouptools.join("group1', 'test1"), {'result': 'fail'})
-        backend.write_test(grouptools.join("group1', 'test2"), {'result': 'pass'})
-        backend.write_test(grouptools.join("group2', 'test3"), {'result': 'fail'})
-
-        try:
-            results.TestrunResult.resume(f)
-        except Exception as e:
-            raise AssertionError(e)
-
-
-def test_resume_load_valid():
-    """ TestrunResult.resume loads valid results """
-    with utils.tempdir() as f:
-        backend = backends.json.JSONBackend(f)
-        backend.initialize(BACKEND_INITIAL_META)
-        backend.write_test(grouptools.join('group1', 'test1'), {'result': 'fail'})
-        backend.write_test(grouptools.join('group1', 'test2'), {'result': 'pass'})
-        backend.write_test(grouptools.join('group2', 'test3'), {'result': 'fail'})
-
-        test = results.TestrunResult.resume(f)
-
-        nt.assert_set_equal(
-            set(test.tests.keys()),
-            set([grouptools.join('group1', 'test1'),
-                 grouptools.join('group1', 'test2'),
-                 grouptools.join('group2', 'test3')]),
-        )
-
-
-def test_resume_load_invalid():
-    """ TestrunResult.resume ignores invalid results """
-    with utils.tempdir() as f:
-        backend = backends.json.JSONBackend(f)
-        backend.initialize(BACKEND_INITIAL_META)
-        backend.write_test(grouptools.join('group1', 'test1'), {'result': 'fail'})
-        backend.write_test(grouptools.join('group1', 'test2'), {'result': 'pass'})
-        backend.write_test(grouptools.join('group2', 'test3'), {'result': 'fail'})
-        with open(os.path.join(f, 'tests', 'x.json'), 'w') as w:
-            w.write('foo')
-
-        test = results.TestrunResult.resume(f)
-
-        nt.assert_set_equal(
-            set(test.tests.keys()),
-            set([grouptools.join('group1', 'test1'),
-                 grouptools.join('group1', 'test2'),
-                 grouptools.join('group2', 'test3')]),
-        )
diff --git a/framework/tests/results_v0_tests.py b/framework/tests/results_v0_tests.py
index 83f3a48..bf885a7 100644
--- a/framework/tests/results_v0_tests.py
+++ b/framework/tests/results_v0_tests.py
@@ -29,6 +29,7 @@ import tempfile
 import nose.tools as nt
 
 import framework.results as results
+from framework import backends
 import framework.tests.utils as utils
 
 # NOTE: It is very important to NOT use grouptools in this file.
@@ -123,7 +124,7 @@ DATA['tests'].update({
 
 with utils.with_tempfile(json.dumps(DATA)) as t:
     with open(t, 'r') as f:
-        RESULT = results._update_zero_to_one(results.TestrunResult.load(f))
+        RESULT = backends.json._update_zero_to_one(backends.json._load(f))
 
 
 def test_dmesg():
@@ -212,7 +213,7 @@ def test_info_split():
 
     with utils.with_tempfile(json.dumps(data)) as t:
         with open(t, 'r') as f:
-            results._update_zero_to_one(results.TestrunResult.load(f))
+            backends.json._update_zero_to_one(backends.json._load(f))
 
 
 def test_subtests_with_slash():
@@ -237,7 +238,7 @@ def _load_with_update(data):
     """
     try:
         with utils.with_tempfile(json.dumps(data)) as t:
-            result = results.load_results(t)
+            result = backends.json.load_results(t)
     except OSError as e:
         # There is the potential that the file will be renamed. In that event
         # remove the renamed files
@@ -251,7 +252,7 @@ def _load_with_update(data):
 
 
 def test_load_results_unversioned():
-    """results.load_results: Loads unversioned results and updates correctly.
+    """backends.json.load_results: Loads unversioned results and updates correctly.
 
     This is just a random change to show that the update path is being hit.
 
@@ -261,7 +262,7 @@ def test_load_results_unversioned():
 
 
 def test_load_results_v0():
-    """results.load_results: Loads results v0 and updates correctly.
+    """backends.json.load_results: Loads results v0 and updates correctly.
 
     This is just a random change to show that the update path is being hit.
 
diff --git a/framework/tests/results_v1_tests.py b/framework/tests/results_v1_tests.py
index f4bfc82..b233dda 100644
--- a/framework/tests/results_v1_tests.py
+++ b/framework/tests/results_v1_tests.py
@@ -28,7 +28,7 @@ except ImportError:
     import json
 import nose.tools as nt
 
-import framework.results as results
+from framework import backends
 import framework.tests.utils as utils
 
 # NOTE: do NOT use grouptools in this module, see v0 tests for explanation
@@ -75,8 +75,8 @@ class TestV2Update(object):
 
         with utils.with_tempfile(json.dumps(data)) as t:
             with open(t, 'r') as f:
-                cls.result = results._update_one_to_two(
-                    results.TestrunResult.load(f))
+                cls.result = backends.json._update_one_to_two(
+                    backends.json._load(f))
 
     def test_version_is_two(self):
         """update_results (v2): The result version is updated to 2."""
@@ -142,8 +142,8 @@ class TestV2NoUpdate(object):
 
         with utils.with_tempfile(json.dumps(data)) as t:
             with open(t, 'r') as f:
-                cls.result = results._update_one_to_two(
-                    results.TestrunResult.load(f))
+                cls.result = backends.json._update_one_to_two(
+                    backends.json._load(f))
 
     def test_version_is_two(self):
         """update_results (v2) no change: The result version is updated to 2.
diff --git a/framework/tests/results_v2_tests.py b/framework/tests/results_v2_tests.py
index d34f2e5..934c947 100644
--- a/framework/tests/results_v2_tests.py
+++ b/framework/tests/results_v2_tests.py
@@ -28,7 +28,7 @@ except ImportError:
     import json
 import nose.tools as nt
 
-import framework.results as results
+from framework import backends
 import framework.tests.utils as utils
 
 # NOTE: do NOT use grouptools in this module, see v0 tests for explanation
@@ -80,7 +80,7 @@ DATA = {
 with utils.with_tempfile(json.dumps(DATA)) as t:
     with open(t, 'r') as f:
         # pylint: disable=protected-access
-        RESULT = results._update_two_to_three(results.TestrunResult.load(f))
+        RESULT = backends.json._update_two_to_three(backends.json._load(f))
 
 
 def test_unchanged():
diff --git a/framework/tests/results_v3_tests.py b/framework/tests/results_v3_tests.py
index d90d110..eaf84b0 100644
--- a/framework/tests/results_v3_tests.py
+++ b/framework/tests/results_v3_tests.py
@@ -22,7 +22,6 @@
 
 from __future__ import print_function, absolute_import, division
 
-import os
 import copy
 try:
     import simplejson as json
@@ -30,7 +29,7 @@ except ImportError:
     import json
 import nose.tools as nt
 
-import framework.results as results
+from framework import backends
 import framework.tests.utils as utils
 
 # NOTE: do NOT use grouptools in this module, see v0 tests for explanation
@@ -83,7 +82,7 @@ def make_result(data):
     with utils.with_tempfile(json.dumps(data)) as t:
         with open(t, 'r') as f:
             # pylint: disable=protected-access
-            return results._update_three_to_four(results.TestrunResult.load(f))
+            return backends.json._update_three_to_four(backends.json._load(f))
 
 
 class TestV4(object):
diff --git a/framework/tests/results_v4_tests.py b/framework/tests/results_v4_tests.py
index 3a17fcc..5c866fc 100644
--- a/framework/tests/results_v4_tests.py
+++ b/framework/tests/results_v4_tests.py
@@ -29,7 +29,7 @@ except ImportError:
     import json
 import nose.tools as nt
 
-import framework.results as results
+from framework import backends
 import framework.tests.utils as utils
 
 # NOTE: do NOT use grouptools in this module, see v0 tests for explanation
@@ -78,7 +78,7 @@ def make_result(data):
     with utils.with_tempfile(json.dumps(data)) as t:
         with open(t, 'r') as f:
             # pylint: disable=protected-access
-            return results._update_four_to_five(results.TestrunResult.load(f))
+            return backends.json._update_four_to_five(backends.json._load(f))
 
 
 class TestV5(object):
@@ -119,5 +119,5 @@ def test_load_results():
         with open(tempfile, 'w') as f:
             json.dump(DATA, f)
         with open(tempfile, 'r') as f:
-            result = results.load_results(tempfile)
+            result = backends.json.load_results(tempfile)
             nt.assert_equal(result.results_version, 5)
diff --git a/framework/tests/utils.py b/framework/tests/utils.py
index 3970659..556f523 100644
--- a/framework/tests/utils.py
+++ b/framework/tests/utils.py
@@ -59,7 +59,7 @@ JSON_DATA = {
         "filter": [],
         "exclude_filter": []
     },
-    "results_version": framework.results.CURRENT_JSON_VERSION,
+    "results_version": framework.backends.json.CURRENT_JSON_VERSION,
     "name": "fake-tests",
     "lspci": "fake",
     "glxinfo": "fake",
-- 
2.3.5



More information about the Piglit mailing list