[Piglit] [PATCH 3/6] framework: Add support for jsonstreams

Dylan Baker dylan at pnwbakers.com
Fri Aug 26 20:11:49 UTC 2016


This commit adds support in the json backend for using an external
library that I wrote called jsonstreams. It's a pretty self-explanatory
library, and for piglit has several advantages. First, I've measured
a consistent 10-15 second speed up for running the quick profile.
Second, it *vastly* reduces the amount of memory piglit needs to write
the final JSON document out.

This is not implemented as a separate backend because the way that
piglit's backend are implemented only one backend can handle a specific
file extension. While this can be worked around by setting the
extension to something like '.junit.xml' that doesn't really make sense
here, since they are writing the same format, just using different
means.

Signed-off-by: Dylan Baker <dylanx.c.baker at intel.com>
---
 framework/backends/json.py | 111 ++++++++++++++++++++++++++------------
 tox.ini                    |   3 +-
 2 files changed, 79 insertions(+), 35 deletions(-)

diff --git a/framework/backends/json.py b/framework/backends/json.py
index 7533f53..772f8f9 100644
--- a/framework/backends/json.py
+++ b/framework/backends/json.py
@@ -24,6 +24,7 @@ from __future__ import (
     absolute_import, division, print_function, unicode_literals
 )
 import collections
+import functools
 import os
 import posixpath
 import shutil
@@ -35,6 +36,11 @@ except ImportError:
     import json
 
 import six
+try:
+    import jsonstreams
+    _STREAMS = True
+except ImportError:
+    _STREAMS = False
 
 from framework import status, results, exceptions, compat
 from .abstract import FileBackend, write_compressed
@@ -130,41 +136,78 @@ class JSONBackend(FileBackend):
         containers that are still open and closes the file
 
         """
-        # Create a dictionary that is full of data to be written to a single
-        # file
-        data = collections.OrderedDict()
-
-        # Load the metadata and put it into a dictionary
-        with open(os.path.join(self._dest, 'metadata.json'), 'r') as f:
-            data.update(json.load(f))
-
-        # If there is more metadata add it the dictionary
-        if metadata:
-            data.update(metadata)
-
-        # Add the tests to the dictionary
-        data['tests'] = collections.OrderedDict()
-
-        test_dir = os.path.join(self._dest, 'tests')
-        for test in os.listdir(test_dir):
-            test = os.path.join(test_dir, test)
-            if os.path.isfile(test):
-                # Try to open the json snippets. If we fail to open a test then
-                # throw the whole thing out. This gives us atomic writes, the
-                # writing worked and is valid or it didn't work.
-                try:
-                    with open(test, 'r') as f:
-                        data['tests'].update(json.load(f, object_hook=piglit_decoder))
-                except ValueError:
-                    pass
-        assert data['tests']
-
-        data = results.TestrunResult.from_dict(data)
+        # If jsonstreams is not present then build a complete tree of all of
+        # the data and write it with json.dump
+        if not _STREAMS:
+            # Create a dictionary that is full of data to be written to a
+            # single file
+            data = collections.OrderedDict()
+
+            # Load the metadata and put it into a dictionary
+            with open(os.path.join(self._dest, 'metadata.json'), 'r') as f:
+                data.update(json.load(f))
+
+            # If there is more metadata add it the dictionary
+            if metadata:
+                data.update(metadata)
+
+            # Add the tests to the dictionary
+            data['tests'] = collections.OrderedDict()
+
+            test_dir = os.path.join(self._dest, 'tests')
+            for test in os.listdir(test_dir):
+                test = os.path.join(test_dir, test)
+                if os.path.isfile(test):
+                    # Try to open the json snippets. If we fail to open a test
+                    # then throw the whole thing out. This gives us atomic
+                    # writes, the writing worked and is valid or it didn't
+                    # work.
+                    try:
+                        with open(test, 'r') as f:
+                            data['tests'].update(
+                                json.load(f, object_hook=piglit_decoder))
+                    except ValueError:
+                        pass
+            assert data['tests']
+
+            data = results.TestrunResult.from_dict(data)
+
+            # write out the combined file. Use the compression writer from the
+            # FileBackend
+            with self._write_final(os.path.join(self._dest, 'results.json')) as f:
+                json.dump(data, f, default=piglit_encoder, indent=INDENT)
+
+        # Otherwise use jsonstreams to write the final dictionary. This uses an
+        # external library, but is slightly faster and uses considerably less
+        # memory that building a complete tree.
+        else:
+            encoder = functools.partial(json.JSONEncoder, default=piglit_encoder)
+
+            with self._write_final(os.path.join(self._dest, 'results.json')) as f:
+                with jsonstreams.Stream('object', fd=f, indent=4,
+                                        encoder=encoder, pretty=True) as s:
+                    s.write('__type__', 'TestrunResult')
+                    with open(os.path.join(self._dest, 'metadata.json'),
+                              'r') as n:
+                        s.iterwrite(six.iteritems(json.load(n)))
+
+                    if metadata:
+                        s.iterwrite(six.iteritems(metadata))
+
+                    test_dir = os.path.join(self._dest, 'tests')
+                    with s.subobject('tests') as t:
+                        for test in os.listdir(test_dir):
+                            test = os.path.join(test_dir, test)
+                            if os.path.isfile(test):
+                                try:
+                                    with open(test, 'r') as f:
+                                        a = json.load(
+                                            f, object_hook=piglit_decoder)
+                                except ValueError:
+                                    continue
+
+                                t.iterwrite(six.iteritems(a))
 
-        # write out the combined file. Use the compression writer from the
-        # FileBackend
-        with self._write_final(os.path.join(self._dest, 'results.json')) as f:
-            json.dump(data, f, default=piglit_encoder, indent=INDENT)
 
         # Delete the temporary files
         os.unlink(os.path.join(self._dest, 'metadata.json'))
diff --git a/tox.ini b/tox.ini
index 12cdddb..41b7113 100644
--- a/tox.ini
+++ b/tox.ini
@@ -28,7 +28,8 @@ deps =
     pytest-timeout
     py{27,33,34}: mako==0.8.0
     six==1.5.2
-    {accel,noaccel}: jsonschema
+    {accel,noaccel,streams}: jsonschema
+    streams: jsonstreams>=0.3.1
 commands = 
     {accel,noaccel}: py.test -rw unittests/framework unittests/suites []
     generator: py.test -rw unittests/generators []
-- 
git-series 0.8.10


More information about the Piglit mailing list