[Piglit] [PATCH 34/42] summary_tests.py: fix test names

Dylan Baker baker.dylan.c at gmail.com
Wed Apr 22 15:10:23 PDT 2015


Signed-off-by: Dylan Baker <dylanx.c.baker at intel.com>
---
 framework/tests/summary_tests.py | 129 ++++++++++++++++-----------------------
 framework/tests/utils.py         |  20 ++++--
 2 files changed, 67 insertions(+), 82 deletions(-)

diff --git a/framework/tests/summary_tests.py b/framework/tests/summary_tests.py
index 14fa5c2..0bd34a0 100644
--- a/framework/tests/summary_tests.py
+++ b/framework/tests/summary_tests.py
@@ -34,11 +34,11 @@ import framework.summary as summary
 import framework.tests.utils as utils
 
 
+ at utils.no_error
 def test_initialize_summary():
-    """ Test that Summary initializes """
+    """summary.Summary: class initializes"""
     with utils.resultfile() as tfile:
-        test = summary.Summary([tfile.name])
-        assert test
+        summary.Summary([tfile.name])
 
 
 @utils.nose_generator
@@ -62,7 +62,8 @@ def test_summary_add_to_set():
                                ('timeout', 'pass', 'fixes'),
                                ('pass', 'timeout', 'regressions'),
                                ('pass', 'timeout', 'problems')]:
-        check_sets.description = "{0} -> {1} should be added to {2}".format(
+        check_sets.description = \
+            "summary.Summary: {0} -> {1} should be added to {2}".format(
                 ostat, nstat, set_)
 
         yield check_sets, old, ostat, new, nstat, set_
@@ -82,76 +83,50 @@ def check_sets(old, ostat, new, nstat, set_):
                             msg="{0} was not appended".format(set_))
 
 
- at utils.nose_generator
-def test_subtest_handling():
-    data = copy.deepcopy(utils.JSON_DATA)
-    data['tests']['with_subtests'] = {}
-    data['tests']['with_subtests']['result'] = 'pass'
-
-    data['tests']['with_subtests']['subtest'] = {}
-    data['tests']['with_subtests']['subtest']['subtest1'] = 'fail'
-    data['tests']['with_subtests']['subtest']['subtest2'] = 'warn'
-    data['tests']['with_subtests']['subtest']['subtest3'] = 'crash'
-    data['tests']['is_skip'] = {}
-    data['tests']['is_skip']['result'] = 'skip'
-
-    with utils.with_tempfile(json.dumps(data)) as sumfile:
-        summ = summary.Summary([sumfile])
-
-        check_subtests_are_tests.description = \
-            "Subtests should be treated as full tests "
-        yield check_subtests_are_tests, summ
-
-        check_tests_w_subtests_are_groups.description = \
-            "Tests with subtests should be a group"
-        yield check_tests_w_subtests_are_groups, summ
-
-        test_removed_from_all.description = \
-            "Tests with subtests should not be in the tests['all'] name"
-        yield test_removed_from_all, summ
-
-        subtest_not_skip_notrun.description = \
-            "Skip's should not become NotRun"
-        yield subtest_not_skip_notrun, summ
-
-
- at nt.nottest
-def check_subtests_are_tests(summary_):
-    """ Subtests should be treated as full tests """
-    print(summary_.fractions)
-    nt.assert_equal(summary_.fractions['fake-tests']['with_subtests'], (0, 3),
-        msg="Summary.fraction['fake-tests']['with_subtests'] should "
-            "be (0, 3), but isn't")
-
-
- at nt.nottest
-def check_tests_w_subtests_are_groups(summary_):
-    """ Tests with subtests should be a group
-
-    We know that the status will be 'pass' if it's not being overwritten, and
-    will be 'crash' if it has. (since we set the data that way)
-
-    """
-    print(summary_.status)
-    nt.assert_equal(
-        str(summary_.status['fake-tests']['with_subtests']), 'crash',
-        msg="Summary.status['fake-tests']['with_subtests'] should "
-            "be crash, but isn't")
-
-
- at nt.nottest
-def test_removed_from_all(summary_):
-    """ Tests with subtests should not be in the all results """
-    print(summary_.tests['all'])
-    nt.assert_not_in('with_subtests', summary_.tests['all'],
-        msg="Test with subtests should have been removed from "
-            "self.tests['all'], but wasn't")
-
-
- at nt.nottest
-def subtest_not_skip_notrun(summary_):
-    """ Ensure that skips are not changed to notruns """
-    print(summary_.status['fake-tests']['is_skip'])
-    print(summary_.results[0].tests['is_skip'])
-    nt.eq_(summary_.status['fake-tests']['is_skip'], 'skip',
-        msg="Status should be skip but was changed")
+class TestSubtestHandling(object):
+    """Test Summary subtest handling."""
+    @classmethod
+    def setup_class(cls):
+        data = copy.deepcopy(utils.JSON_DATA)
+        data['tests']['with_subtests']['result'] = 'pass'
+
+        data['tests']['with_subtests']['subtest']['subtest1'] = 'fail'
+        data['tests']['with_subtests']['subtest']['subtest2'] = 'warn'
+        data['tests']['with_subtests']['subtest']['subtest3'] = 'crash'
+        data['tests']['is_skip']['result'] = 'skip'
+
+        with utils.with_tempfile(json.dumps(data)) as sumfile:
+            cls.summ = summary.Summary([sumfile])
+
+    def test_subtests_are_tests(self):
+        """summary.Summary: Subtests should be treated as full tests"""
+        nt.assert_equal(
+            self.summ.fractions['fake-tests']['with_subtests'], (0, 3),
+            msg="Summary.fraction['fake-tests']['with_subtests'] should "
+                "be (0, 3), but isn't")
+
+    def test_tests_w_subtests_are_groups(self):
+        """summary.Summary: Tests with subtests should be a group
+
+        We know that the status will be 'pass' if it's not being overwritten, and
+        will be 'crash' if it has. (since we set the data that way)
+
+        """
+        nt.assert_equal(
+            self.summ.status['fake-tests']['with_subtests'], 'crash',
+            msg="Summary.status['fake-tests']['with_subtests'] should "
+                "be crash, but isn't")
+
+    def test_removed_from_all(self):
+        """summary.Summary: Tests with subtests should not be in the all results
+        """
+        nt.assert_not_in(
+            'with_subtests', self.summ.tests['all'],
+            msg="Test with subtests should have been removed from "
+                "self.tests['all'], but wasn't")
+
+    def subtest_not_skip_notrun(self):
+        """summary.Summary: skips are not changed to notruns"""
+        nt.eq_(
+            self.summ.status['fake-tests']['is_skip'], 'skip',
+            msg="Status should be skip but was changed")
diff --git a/framework/tests/utils.py b/framework/tests/utils.py
index 59305e1..10b48f6 100644
--- a/framework/tests/utils.py
+++ b/framework/tests/utils.py
@@ -44,7 +44,7 @@ except ImportError:
 from nose.plugins.skip import SkipTest
 import nose.tools as nt
 
-from framework import test, backends
+from framework import test, backends, results
 
 
 __all__ = [
@@ -55,6 +55,16 @@ __all__ = [
 ]
 
 
+class _Tree(dict):
+    """Private helper to make JSON_DATA easier to work with."""
+    def __getitem__(self, key):
+        try:
+            return super(_Tree, self).__getitem__(key)
+        except KeyError:
+            ret = self[key] = _Tree()
+            return ret
+
+
 JSON_DATA = {
     "options": {
         "profile": "tests/fake.py",
@@ -65,12 +75,12 @@ JSON_DATA = {
     "name": "fake-tests",
     "lspci": "fake",
     "glxinfo": "fake",
-    "tests": {
-        "sometest": {
+    "tests": _Tree({
+        "sometest": results.TestResult({
             "result": "pass",
             "time": 0.01
-        }
-    }
+        })
+    })
 }
 
 
-- 
2.3.5



More information about the Piglit mailing list