[Piglit] [PATCH 1/7] framework_tests/summary.py: Tests Summary.py's assignment of statues

Dylan Baker baker.dylan.c at gmail.com
Fri Sep 27 14:39:15 PDT 2013


This code tests the way Summary.py assigns statuses to tests.
Specifically it tests regressions, fixes, changes, and problems,
assuring that only the correct permutations of these tests are added to
the list.

It uses python's builtin unittest framework, and a magical decorator to
limit code duplication.

This already reveals ~20 problems in the current implementation

Signed-off-by: Dylan Baker <baker.dylan.c at gmail.com>
---
 framework_tests/__init__.py |   0
 framework_tests/helpers.py  | 105 +++++++++++++++++
 framework_tests/summary.py  | 282 ++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 387 insertions(+)
 create mode 100644 framework_tests/__init__.py
 create mode 100644 framework_tests/helpers.py
 create mode 100644 framework_tests/summary.py

diff --git a/framework_tests/__init__.py b/framework_tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/framework_tests/helpers.py b/framework_tests/helpers.py
new file mode 100644
index 0000000..0aea2e1
--- /dev/null
+++ b/framework_tests/helpers.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+import sys
+
+import framework.core as core
+
+
+def test_iterations(*parameters):
+    """ Magic that allows a single method to create a whole bunch of functions.
+
+    This is desireable becasue of the way unittest works: Each method gets a
+    name in the output, and each method stops on the first error. This makes
+    using a loop useless, if 10/20 iterations should fail, the first failure
+    stops the loop.  The solution other than using a decorator is to create a
+    test for each iteration, which could result in hundresds of tests.
+
+    """
+
+    def decorator(method, parameters=parameters):
+        def tuplify(x):
+            if not isinstance(x, tuple):
+                return (x, )
+            return x
+
+        for parameter in map(tuplify, parameters):
+            name_for_parameter = "{0}({1})".format(method.__name__,
+                                                   ", ".join(map(repr, parameter)))
+            frame = sys._getframe(1)  # pylint: disable-msg=W0212
+            frame.f_locals[name_for_parameter] = \
+                lambda self, m=method, p=parameter: m(self, *p)
+        return None
+    return decorator
+
+
+def create_testresult(name, lspci="fake lspci", glxinfo="fake glxinfo",
+                      tests={}):
+    """ Helper function that returns a complete TestResults file
+
+    This takes one required argument
+    :name: This must be set, it names the run. If two runs have the same name
+           there can be problems in the summary code.
+
+    This function also takes three optional arguments
+    :lspci:   This is the lspci information in the file. Default="fake lspci"
+    :glxinfo: glxinfo in the file. Default="fake glxinfo"
+    :tests:   A dictionary of tests
+
+    """
+    assert(isinstance(tests, dict))
+
+    return core.TestResult({"options": {"profile": "fake",
+                                        "filter": [],
+                                        "exclude_filter": []},
+                            "name": "{0}".format(name),
+                            "lspci": "{0}".format(lspci),
+                            "glxinfo": "{0}".format(glxinfo),
+                            "time_elapsed": 10.23456789,
+                            "tests": tests})
+
+
+def create_test(name, result, info="fake info", returncode=0, time=0.123456789,
+                command="fake command"):
+    """ Helper function that takes input and returns a dictionary of results
+    for a single tests
+
+    Takes two manditory arguments:
+    :name: The name of the tests
+    :result: The result the test returned
+
+    Additionally takes four optional arguments:
+    :info:       The info entry. Default="fake info"
+    :returncode: The returncode of the test. Default=0
+    :time:       The amount of time the test ran. Default=0.123456789
+    :command:    The command that was executed. Default="fake command"
+
+    """
+    #FIXME: This should be able to create other entries for failed tests
+
+    return {name: {"info": info,
+                   "returncode": returncode,
+                   "time": time,
+                   "command": command,
+                   "result": result}}
diff --git a/framework_tests/summary.py b/framework_tests/summary.py
new file mode 100644
index 0000000..c6631a8
--- /dev/null
+++ b/framework_tests/summary.py
@@ -0,0 +1,282 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+import os
+import os.path as path
+import json
+import unittest
+import itertools
+
+import framework.summary as summary
+from helpers import test_iterations, create_testresult, create_test
+
+
+# These lists normally would reside within a the class itself, however, these
+# are meant to be fed to decorators, and decorates do not have access to class
+# variables.
+
+""" ordering heirarchy from best to worst
+
+pass
+dmesg-warn
+warn
+dmesg-fail
+fail
+crash
+skip
+
+
+The following are regressions:
+
+pass|warn|dmesg-warn|fail|dmesg-fail|Crash -> Skip
+pass|warn|dmesg-warn|fail|dmesg-fail -> Crash|Skip
+pass|warn|dmesg-warn|fail -> dmesg-fail|Crash|Skip
+pass|warn|dmesg-warn -> fail|dmesg-fail|Crash|Skip
+pass|warn -> dmesg-warn|fail|dmesg-fail|Crash|Skip
+pass -> warn|dmesg-warn|fail|dmesg-fail|Crash|Skip
+
+
+The following are fixes:
+
+Skip -> pass|warn|dmesg-warn|fail|dmesg-fail|Crash
+Crash|Skip - >pass|warn|dmesg-warn|fail|dmesg-fail
+dmesg-fail|Crash|Skip -> pass|warn|dmesg-warn|fail
+fail|dmesg-fail|Crash|Skip -> pass|warn|dmesg-warn
+dmesg-warn|fail|dmesg-fail|Crash|Skip -> pass|warn
+warn|dmesg-warn|fail|dmesg-fail|Crash|Skip -> pass
+
+
+NotRun -> * and * -> NotRun is a change, but not a fix or a regression. This is
+because NotRun is not a status, but a representation of an unkown status.
+
+"""
+
+# List of possible statuses. Add new statuses before 'notrun' or break all
+# kinds of things.
+STATUSES = ['pass', 'fail', 'skip', 'warn', 'crash', 'dmesg-warn',
+            'dmesg-fail', 'notrun']
+
+# list of possible regresssions
+REGRESSIONS = (("pass", "warn"),
+               ("pass", "dmesg-warn"),
+               ("pass", "fail"),
+               ("pass", "dmesg-fail"),
+               ("pass", "skip"),
+               ("pass", "crash"),
+               ("dmesg-warn", "warn"),
+               ("dmesg-warn", "dmesg-fail"),
+               ("dmesg-warn", "fail"),
+               ("dmesg-warn", "crash"),
+               ("dmesg-warn", "skip"),
+               ("warn", "fail"),
+               ("warn", "crash"),
+               ("warn", "skip"),
+               ("warn", "dmesg-fail"),
+               ("dmesg-fail", "crash"),
+               ("dmesg-fail", "skip"),
+               ("dmesg-fail", "fail"),
+               ("fail", "crash"),
+               ("fail", "skip"),
+               ("crash", "skip"))
+
+
+# List of possible fixes
+FIXES = (("skip", "crash"),
+         ("skip", "fail"),
+         ("skip", "dmesg-fail"),
+         ("skip", "warn"),
+         ("skip", "dmesg-warn"),
+         ("skip", "pass"),
+         ("crash", "fail"),
+         ("crash", "dmesg-fail"),
+         ("crash", "warn"),
+         ("crash", "dmesg-warn"),
+         ("crash", "pass"),
+         ("fail", "dmesg-fail"),
+         ("fail", "warn"),
+         ("fail", "dmesg-warn"),
+         ("fail", "pass"),
+         ("dmesg-fail", "warn"),
+         ("dmesg-fail", "dmesg-warn"),
+         ("dmesg-fail", "pass"),
+         ("warn", "dmesg-warn"),
+         ("warn", "pass"),
+         ("dmesg-warn", "pass"))
+
+
+# List of statuses that should be problems.
+PROBLEMS = ("crash", "fail", "warn", "dmesg-warn", "dmesg-fail")
+
+
+class StatusTest(unittest.TestCase):
+    """ Test for summaries handling of status assignments
+
+    This test creates summary objects with one or two results that it
+    generates. These results have a single test in them forming known status
+    pairs. These pairs are explicitly a fix, regression, or change. The code
+    then creates a summary and ensures that the status has been added to the
+    propper field, and not to any additional fields.
+
+    """
+
+    tmpdir = "/tmp/piglit"
+
+    @classmethod
+    def setUpClass(cls):
+        # A list of files that should be cleaned up after testing is done
+        cls.files = []
+
+        # Create a directory to store files in
+        cls.tmpdir = "/tmp/piglit"
+
+        try:
+            os.makedirs(cls.tmpdir)
+        except OSError:
+            # os.mkdir (and by extension os.makedirs) quite annoyingly throws
+            # an error if the directory exists
+            pass
+
+        # Create temporary files in the /tmp/piglit directory
+        for result in STATUSES[:-1]:  # notrun cannot be generated here
+            file = path.join(cls.tmpdir, result, "main")
+            try:
+                os.mkdir(path.join(cls.tmpdir, result))
+            except OSError:
+                pass
+
+            with open(file, 'w') as f:
+                json.dump(create_testresult(name=result,
+                                            tests=create_test("test", result)),
+                          f, indent=4)
+
+            # Add the file to the list of files to be removed in tearDownClass
+            cls.files.append(file)
+
+        # Create an additional file to test a Not Run status. This needs to be
+        # generated sepeartely since the test result should not include the
+        # test that will have the not run status.
+        try:
+            os.mkdir(path.join(cls.tmpdir, "notrun"))
+        except OSError:
+            pass
+
+        file = path.join(cls.tmpdir, "notrun", "main")
+
+        with open(file, 'w') as f:
+            # If there are not tests buildDictionary fails
+            json.dump(create_testresult(name="notrun", 
+                                        tests=create_test("diff", "pass")),
+                      f, indent=4)
+
+        cls.files.append(path.join(cls.tmpdir, "notrun/main"))
+
+    @classmethod
+    def tearDownClass(cls):
+        # Remove any folders which were created and are empty
+        for branch in cls.files:
+            os.remove(branch)
+            while branch != '/tmp':
+                branch = path.dirname(branch)
+                try:
+                    os.rmdir(branch)
+                except OSError:
+                    # If there is an OSError the directory is not empty, so
+                    # every additional attempt to call os.rmdir will fail.
+                    break
+
+    def _build_lists(self, files):
+        result = summary.Summary([path.join(self.tmpdir, i) for i in files])
+        result._Summary__generate_lists(['changes', 'problems', 'skipped',
+                                         'fixes', 'regressions'])
+        return result
+
+    @test_iterations(*REGRESSIONS)
+    def test_is_regression(self, x, y):
+        result = self._build_lists([x, y])
+        self.assertEqual(
+            len(result.tests['regressions']), 1,
+            "{0} -> {1} is not a regression but should be".format(x, y))
+
+    # This ugly mother tests every iteration of statuses when the status is not
+    # equal and if the status is not a valid regression
+    @test_iterations(*itertools.ifilter(lambda x: x not in REGRESSIONS,
+                                        itertools.product(STATUSES, STATUSES)))
+    def test_is_not_regression(self, x, y):
+        result = self._build_lists([x, y])
+        self.assertEqual(
+            len(result.tests['regressions']), 0,
+            "{0} -> {1} is a regression but should not be".format(x, y))
+
+    # Test fixes based on the FIXES list
+    @test_iterations(*FIXES)
+    def test_is_fix(self, x, y):
+        result = self._build_lists([x, y])
+        self.assertEqual(
+            len(result.tests['fixes']), 1,
+            "{0} -> {1} is not a fix but should be".format(x, y))
+
+
+    # Test that all combinations taht are not explicitely a fix are not treated
+    # as fixes
+    @test_iterations(*itertools.ifilter(lambda x: x not in FIXES,
+                                        itertools.product(STATUSES, STATUSES)))
+    def test_is_not_fix(self, x, y):
+        result = self._build_lists([x, y])
+        self.assertEqual(
+            len(result.tests['fixes']), 0,
+            "{0} -> {1} is a fix but should not be".format(x, y))
+
+    #XXX: is "notrun" -> not(notrun) a change?
+    @test_iterations(*itertools.ifilter(lambda x: x[0] != x[1],
+                                        itertools.product(STATUSES[:-1],
+                                                          STATUSES[:-1])))
+    def test_is_change(self, x, y):
+        result = self._build_lists([x, y])
+        self.assertEqual(
+            len(result.tests['changes']), 1,
+            "{0} -> {1} is a not a change but should be".format(x, y))
+
+    # Tests that two equal statues are not changes
+    @test_iterations(*itertools.izip(STATUSES, STATUSES))
+    def test_is_not_change(self, x, y):
+        result = self._build_lists([x, y])
+        self.assertEqual(
+            len(result.tests['changes']), 0,
+            "{0} -> {1} is a change but should not be".format(x, y))
+
+    # only statuses in the PROBLEMS list shoudl be added to problems
+    @test_iterations(*PROBLEMS)
+    def test_is_problem(self, x):
+        result = self._build_lists([x])
+        self.assertEqual(
+            len(result.tests['problems']), 1,
+            "{0} is not a problem but should be".format(x))
+
+    # Any statuses not in the PROBLEMS list should not be a problem
+    @test_iterations(*(i for i in STATUSES if i not in PROBLEMS))
+    def test_is_not_problem(self, x):
+        result = self._build_lists([x])
+        self.assertEqual(
+            len(result.tests['problems']), 0,
+            "{0} is a problem but should not be".format(x))
-- 
1.8.1.5



More information about the Piglit mailing list