[Piglit] [RFC 02/10] framework: update JSON storage format to version 10

Dylan Baker dylan at pnwbakers.com
Thu Oct 19 00:47:49 UTC 2017


I don't see any unit tests updated, and I'm pretty sure at least some of them
are going to be broken by these changes. I'd really like to see at least one
test for the 9 -> 10 update path. There's also a JSON schema that needs to be
updated for the v10 changes.

Dylan

Quoting Nicolai Hähnle (2017-10-11 03:26:51)
> From: Nicolai Hähnle <nicolai.haehnle at amd.com>
> 
> Reflect the refactoring of TestResult and TestrunResult in the on-disk
> storage format.
> ---
>  framework/backends/abstract.py |  6 +++---
>  framework/backends/json.py     | 39 ++++++++++++++++++++++++++++++---------
>  framework/results.py           | 17 +++++++++--------
>  framework/test/base.py         |  1 +
>  4 files changed, 43 insertions(+), 20 deletions(-)
> 
> diff --git a/framework/backends/abstract.py b/framework/backends/abstract.py
> index 85abfa52d..27dec7e6b 100644
> --- a/framework/backends/abstract.py
> +++ b/framework/backends/abstract.py
> @@ -165,22 +165,20 @@ class FileBackend(Backend):
>                          tests. It is important for resumes that this is not
>                          overlapping as the Inheriting classes assume they are
>                          not. Default: 0
>  
>      """
>      def __init__(self, dest, file_start_count=0, **kwargs):
>          self._dest = dest
>          self._counter = itertools.count(file_start_count)
>          self._write_final = write_compressed
>  
> -    __INCOMPLETE = TestResult(result=INCOMPLETE)
> -
>      def __fsync(self, file_):
>          """ Sync the file to disk
>  
>          If options.OPTIONS.sync is truthy this will sync self._file to disk
>  
>          """
>          file_.flush()
>          if options.OPTIONS.sync:
>              os.fsync(file_.fileno())
>  
> @@ -210,14 +208,16 @@ class FileBackend(Backend):
>              tfile = file_ + '.tmp'
>              with open(tfile, 'w') as f:
>                  self._write(f, name, val)
>                  self.__fsync(f)
>              shutil.move(tfile, file_)
>  
>          file_ = os.path.join(self._dest, 'tests', '{}.{}'.format(
>              next(self._counter), self._file_extension))
>  
>          with open(file_, 'w') as f:
> -            self._write(f, name, self.__INCOMPLETE)
> +            incomplete = TestResult(result=INCOMPLETE)
> +            incomplete.root = name
> +            self._write(f, name, incomplete)
>              self.__fsync(f)
>  
>          yield finish
> diff --git a/framework/backends/json.py b/framework/backends/json.py
> index 882169e09..80d82d0ab 100644
> --- a/framework/backends/json.py
> +++ b/framework/backends/json.py
> @@ -46,21 +46,21 @@ from framework import status, results, exceptions, compat
>  from .abstract import FileBackend, write_compressed
>  from .register import Registry
>  from . import compression
>  
>  __all__ = [
>      'REGISTRY',
>      'JSONBackend',
>  ]
>  
>  # The current version of the JSON results
> -CURRENT_JSON_VERSION = 9
> +CURRENT_JSON_VERSION = 10
>  
>  # The minimum JSON format supported
>  MINIMUM_SUPPORTED_VERSION = 7
>  
>  # The level to indent a final file
>  INDENT = 4
>  
>  
>  def piglit_encoder(obj):
>      """ Encoder for piglit that can transform additional classes into json
> @@ -139,35 +139,35 @@ class JSONBackend(FileBackend):
>  
>              # Load the metadata and put it into a dictionary
>              with open(os.path.join(self._dest, 'metadata.json'), 'r') as f:
>                  data.update(json.load(f))
>  
>              # If there is more metadata add it the dictionary
>              if metadata:
>                  data.update(metadata)
>  
>              # Add the tests to the dictionary
> -            data['tests'] = collections.OrderedDict()
> +            data['results'] = []
>  
>              for test in file_list:
>                  test = os.path.join(tests_dir, test)
>                  if os.path.isfile(test):
>                      # Try to open the json snippets. If we fail to open a test
>                      # then throw the whole thing out. This gives us atomic
>                      # writes, the writing worked and is valid or it didn't
>                      # work.
>                      try:
>                          with open(test, 'r') as f:
> -                            data['tests'].update(json.load(f))
> +                            data['results'].append(json.load(f))
>                      except ValueError:
>                          pass
> -            assert data['tests']
> +            assert data['results']
>  
>              data = results.TestrunResult.from_dict(data)
>  
>              # write out the combined file. Use the compression writer from the
>              # FileBackend
>              with self._write_final(os.path.join(self._dest, 'results.json')) as f:
>                  json.dump(data, f, default=piglit_encoder, indent=INDENT)
>  
>          # Otherwise use jsonstreams to write the final dictionary. This uses an
>          # external library, but is slightly faster and uses considerably less
> @@ -179,40 +179,40 @@ class JSONBackend(FileBackend):
>                  with jsonstreams.Stream(jsonstreams.Type.object, fd=f, indent=4,
>                                          encoder=encoder, pretty=True) as s:
>                      s.write('__type__', 'TestrunResult')
>                      with open(os.path.join(self._dest, 'metadata.json'),
>                                'r') as n:
>                          s.iterwrite(six.iteritems(json.load(n, object_pairs_hook=collections.OrderedDict)))
>  
>                      if metadata:
>                          s.iterwrite(six.iteritems(metadata))
>  
> -                    with s.subobject('tests') as t:
> +                    with s.subarray('results') as t:
>                          for test in file_list:
>                              test = os.path.join(tests_dir, test)
>                              if os.path.isfile(test):
>                                  try:
>                                      with open(test, 'r') as f:
>                                          a = json.load(f)
>                                  except ValueError:
>                                      continue
>  
> -                                t.iterwrite(six.iteritems(a))
> +                                t.write(a)
>  
>  
>          # Delete the temporary files
>          os.unlink(os.path.join(self._dest, 'metadata.json'))
>          shutil.rmtree(os.path.join(self._dest, 'tests'))
>  
>      @staticmethod
>      def _write(f, name, data):
> -        json.dump({name: data}, f, default=piglit_encoder)
> +        json.dump(data, f, default=piglit_encoder)
>  
>  
>  def load_results(filename, compression_):
>      """ Loader function for TestrunResult class
>  
>      This function takes a single argument of a results file.
>  
>      It makes quite a few assumptions, first it assumes that it has been passed
>      a folder, if that fails then it looks for a plain text json file called
>      "main"
> @@ -277,32 +277,32 @@ def _resume(results_dir):
>      # pylint: disable=maybe-no-member
>      assert os.path.isdir(results_dir), \
>          "TestrunResult.resume() requires a directory"
>  
>      # Load the metadata
>      with open(os.path.join(results_dir, 'metadata.json'), 'r') as f:
>          meta = json.load(f)
>      assert meta['results_version'] == CURRENT_JSON_VERSION, \
>          "Old results version, resume impossible"
>  
> -    meta['tests'] = collections.OrderedDict()
> +    meta['results'] = []
>  
>      # Load all of the test names and added them to the test list
>      tests_dir = os.path.join(results_dir, 'tests')
>      file_list = sorted(
>          (l for l in os.listdir(tests_dir) if l.endswith('.json')),
>          key=lambda p: int(os.path.splitext(p)[0]))
>  
>      for file_ in file_list:
>          with open(os.path.join(tests_dir, file_), 'r') as f:
>              try:
> -                meta['tests'].update(json.load(f))
> +                meta['results'].append(json.load(f))
>              except ValueError:
>                  continue
>  
>      return results.TestrunResult.from_dict(meta)
>  
>  
>  def _update_results(results, filepath):
>      """ Update results to the latest version
>  
>      This function is a wrapper for other update_* functions, providing
> @@ -315,20 +315,21 @@ def _update_results(results, filepath):
>  
>      """
>  
>      def loop_updates(results):
>          """ Helper to select the proper update sequence """
>          # Python lacks a switch statement, the workaround is to use a
>          # dictionary
>          updates = {
>              7: _update_seven_to_eight,
>              8: _update_eight_to_nine,
> +            9: _update_nine_to_ten,
>          }
>  
>          while results['results_version'] < CURRENT_JSON_VERSION:
>              results = updates[results['results_version']](results)
>  
>          return results
>  
>      if results['results_version'] < MINIMUM_SUPPORTED_VERSION:
>          raise exceptions.PiglitFatalError(
>              'Unsupported version "{}", '
> @@ -393,16 +394,36 @@ def _update_eight_to_nine(result):
>          if 'pid' in test:
>              test['pid'] = [test['pid']]
>          else:
>              test['pid'] = []
>  
>      result['results_version'] = 9
>  
>      return result
>  
>  
> +def _update_nine_to_ten(result):
> +    """Update json results from version 9 to 10.
> +
> +    This changes the tests dictionary to the results list, and adds the
> +    'root' attribute to results.
> +
> +    """
> +    result['results'] = []
> +
> +    for key, test in six.iteritems(result['tests']):
> +        test['root'] = key
> +        result['results'].append(test)
> +
> +    del result['tests']
> +
> +    result['results_version'] = 10
> +
> +    return result
> +
> +
>  REGISTRY = Registry(
>      extensions=['.json'],
>      backend=JSONBackend,
>      load=load_results,
>      meta=set_meta,
>  )
> diff --git a/framework/results.py b/framework/results.py
> index a44029f3e..26ebbe7a6 100644
> --- a/framework/results.py
> +++ b/framework/results.py
> @@ -206,20 +206,21 @@ class TestResult(object):
>  
>          try:
>              return self.subtests[relative]
>          except KeyError:
>              raise KeyError(key)
>  
>      def to_json(self):
>          """Return the TestResult as a json serializable object."""
>          obj = {
>              '__type__': 'TestResult',
> +            'root': self.root,
>              'command': self.command,
>              'environment': self.environment,
>              'err': self.err,
>              'out': self.out,
>              'result': self.result,
>              'returncode': self.returncode,
>              'subtests': self.subtests.to_json(),
>              'time': self.time.to_json(),
>              'exception': self.exception,
>              'traceback': self.traceback,
> @@ -237,21 +238,21 @@ class TestResult(object):
>          status.Status object
>  
>          """
>          # pylint will say that assining to inst.out or inst.err is a non-slot
>          # because self.err and self.out are descriptors, methods that act like
>          # variables. Just silence pylint
>          # pylint: disable=assigning-non-slot
>          inst = cls()
>  
>          for each in ['returncode', 'command', 'exception', 'environment',
> -                     'traceback', 'dmesg', 'pid', 'result']:
> +                     'traceback', 'dmesg', 'pid', 'result', 'root']:
>              if each in dict_:
>                  setattr(inst, each, dict_[each])
>  
>          # Set special instances
>          if 'subtests' in dict_:
>              inst.subtests = Subtests.from_dict(dict_['subtests'])
>          if 'time' in dict_:
>              inst.time = TimeAttribute.from_dict(dict_['time'])
>  
>          # out and err must be set manually to avoid replacing the setter
> @@ -385,22 +386,24 @@ class TestrunResult(object):
>                      self.totals[name].add(total)
>                      name = grouptools.groupname(name)
>                      worklist[name].add(total)
>                  else:
>                      self.totals['root'].add(total)
>  
>      def to_json(self):
>          if not self.totals:
>              self.calculate_group_totals()
>          rep = copy.copy(self.__dict__)
> -        rep['tests'] = collections.OrderedDict((t.root, t.to_json())
> -                       for t in self.results)
> +
> +        rep['results'] = [t.to_json() for t in self.results]
> +        del rep['_tests']
> +
>          rep['__type__'] = 'TestrunResult'
>          return rep
>  
>      @classmethod
>      def from_dict(cls, dict_, _no_totals=False):
>          """Convert a dictionary into a TestrunResult.
>  
>          This method is meant to be used for loading results from json or
>          similar formats
>  
> @@ -415,28 +418,26 @@ class TestrunResult(object):
>              if value:
>                  setattr(res, name, value)
>  
>          # Since this is used to load partial metadata when writing final test
>          # results there is no guarantee that this will have a "time_elapsed"
>          # key
>          if 'time_elapsed' in dict_:
>              setattr(res, 'time_elapsed',
>                      TimeAttribute.from_dict(dict_['time_elapsed']))
>  
> -        for root, result_dict in six.iteritems(dict_['tests']):
> -            result = TestResult.from_dict(result_dict)
> -            result.root = root
> -            res.results.append(result)
> +        res.results = [TestResult.from_dict(t) for t in dict_['results']]
>  
> +        for result in res.results:
>              if result.subtests:
>                  for subtest in six.iterkeys(result.subtests):
> -                    fullname = grouptools.join(root, subtest)
> +                    fullname = grouptools.join(result.root, subtest)
>                      assert fullname not in res._tests
>                      res._tests[fullname] = result
>              else:
>                  if result.root in res._tests:
>                      # This can happen with old resumed test results.
>                      print('Warning: duplicate results for {}'.format(result.root))
>                  res._tests[result.root] = result
>  
>          if not 'totals' in dict_ and not _no_totals:
>              res.calculate_group_totals()
> diff --git a/framework/test/base.py b/framework/test/base.py
> index 0b7ebab2e..a8f81f547 100644
> --- a/framework/test/base.py
> +++ b/framework/test/base.py
> @@ -190,20 +190,21 @@ class Test(object):
>          Run a test, but with features. This times the test, uses dmesg checking
>          (if requested), and runs the logger.
>  
>          Arguments:
>          path    -- the name of the test
>          log     -- a log.Log instance
>          options -- a dictionary containing dmesg and monitoring objects
>          """
>          log.start(path)
>          # Run the test
> +        self.result.root = path
>          if OPTIONS.execute:
>              try:
>                  self.result.time.start = time.time()
>                  options['dmesg'].update_dmesg()
>                  options['monitor'].update_monitoring()
>                  self.run()
>                  self.result.time.end = time.time()
>                  self.result = options['dmesg'].update_result(self.result)
>                  options['monitor'].check_monitoring()
>              # This is a rare case where a bare exception is okay, since we're
> -- 
> 2.11.0
> 
> _______________________________________________
> Piglit mailing list
> Piglit at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/piglit
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 488 bytes
Desc: signature
URL: <https://lists.freedesktop.org/archives/piglit/attachments/20171018/86444b6d/attachment.sig>


More information about the Piglit mailing list