<div dir="ltr">I think the default mode we want here is more of a "continue after crash" mode where, when it crashes, we don't rerun anything but instead just continue starting from the next test.  This is what Google does with their "cherry" tool and it would prevent ensure that random crashes don't get hidden by the rerun.<br></div><div class="gmail_extra"><br><div class="gmail_quote">On Thu, Mar 24, 2016 at 11:43 AM, Dylan Baker <span dir="ltr"><<a href="mailto:baker.dylan.c@gmail.com" target="_blank">baker.dylan.c@gmail.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">This adds a new feature to the deqp integration group at a time mode,<br>
which allows tests to be rerun when they meet a set of bad status in<br>
test at a time mode. This helps to eliminate tests where the crash<br>
caused tests to not be run, and helps clear up issues where an error<br>
would change test statuses.<br>
<br>
In my runs this is still significantly faster than test at a time mode,<br>
unless a huge number of tests are crashing. In that case it is really up<br>
to the developer to know whether they need test at a time mode, or<br>
whether group at a time mode is sufficient.<br>
<br>
A new option, --deqp-no-group-rerun, is added which disables this<br>
feature.<br>
<br>
This also addresses the vulkan suite issue, by rerunning any tests where<br>
the problematic statuses are detected in test at a time mode.<br>
<br>
Signed-off-by: Dylan Baker <<a href="mailto:dylanx.c.baker@intel.com">dylanx.c.baker@intel.com</a>><br>
---<br>
 framework/backends/abstract.py |   6 ++<br>
 framework/options.py           |   3 +<br>
 framework/profile.py           |  80 ++++++++++--------<br>
 framework/programs/run.py      |   9 ++<br>
 framework/test/deqp.py         | 182 +++++++++++++++++++++++++++--------------<br>
 tests/deqp_vk.py               |  11 ++-<br>
 unittests/deqp_tests.py        |  50 +++++++----<br>
 7 files changed, 228 insertions(+), 113 deletions(-)<br>
<br>
diff --git a/framework/backends/abstract.py b/framework/backends/abstract.py<br>
index b5b4858..4ebc8ae 100644<br>
--- a/framework/backends/abstract.py<br>
+++ b/framework/backends/abstract.py<br>
@@ -207,6 +207,12 @@ class FileBackend(Backend):<br>
<br>
         """<br>
         def finish(val):<br>
+            # If finish is passed None delete the file. This is used in the<br>
+            # dEQP backend<br>
+            if val is None:<br>
+                os.unlink(file_)<br>
+                return<br>
+<br>
             tfile = file_ + '.tmp'<br>
             with open(tfile, 'w') as f:<br>
                 self._write(f, name, val)<br>
diff --git a/framework/options.py b/framework/options.py<br>
index 52d90be..b39cf60 100644<br>
--- a/framework/options.py<br>
+++ b/framework/options.py<br>
@@ -181,6 +181,8 @@ class _Options(object):  # pylint: disable=too-many-instance-attributes<br>
     env -- environment variables set for each test before run<br>
     deqp_mode -- either 'test' or 'group. Controls deqp integration mode, to<br>
                  either run test at a time or group at a time.<br>
+    deqp_group_rerun -- If True any tests that fail in group mode will be rerun<br>
+                        in individual mode.<br>
<br>
     """<br>
     include_filter = _ReListDescriptor('_include_filter', type_=_FilterReList)<br>
@@ -196,6 +198,7 @@ class _Options(object):  # pylint: disable=too-many-instance-attributes<br>
         self.dmesg = False<br>
         self.sync = False<br>
         self.deqp_mode = 'test'<br>
+        self.deqp_group_rerun = False<br>
<br>
         # env is used to set some base environment variables that are not going<br>
         # to change across runs, without sending them to os.environ which is<br>
diff --git a/framework/profile.py b/framework/profile.py<br>
index fc38c56..5971dcb 100644<br>
--- a/framework/profile.py<br>
+++ b/framework/profile.py<br>
@@ -232,6 +232,50 @@ class TestProfile(object):<br>
         """<br>
         pass<br>
<br>
+    def _test(self, pair, log, backend):<br>
+        """Function to call test.execute from map"""<br>
+        name, test = pair<br>
+        with backend.write_test(name) as w:<br>
+            test.execute(name, log.get(), self.dmesg)<br>
+            w(test.result)<br>
+<br>
+    def _run_threads(self, pool, testlist, log, backend):<br>
+        """ Open a pool, close it, and join it """<br>
+        pool.imap(lambda pair: self._test(pair, log, backend), testlist)<br>
+        pool.close()<br>
+        pool.join()<br>
+<br>
+    def _run(self, log, backend, test_list=None):<br>
+        """Run all the tests using requested threading."""<br>
+        test_list = test_list or self.test_list<br>
+<br>
+        # Multiprocessing.dummy is a wrapper around Threading that provides a<br>
+        # multiprocessing compatible API<br>
+        #<br>
+        # The default value of pool is the number of virtual processor cores<br>
+        single = multiprocessing.dummy.Pool(1)<br>
+        multi = multiprocessing.dummy.Pool()<br>
+<br>
+        if options.OPTIONS.concurrent == "all":<br>
+            self._run_threads(multi, six.iteritems(test_list), log,<br>
+                              backend)<br>
+        elif options.OPTIONS.concurrent == "none":<br>
+            self._run_threads(single, six.iteritems(test_list), log,<br>
+                              backend)<br>
+        else:<br>
+            # Filter and return only thread safe tests to the threaded pool<br>
+            self._run_threads(<br>
+                multi,<br>
+                (x for x in six.iteritems(test_list) if x[1].run_concurrent),<br>
+                log,<br>
+                backend)<br>
+            # Filter and return the non thread safe tests to the single pool<br>
+            self._run_threads(<br>
+                single,<br>
+                (x for x in six.iteritems(test_list) if not x[1].run_concurrent),<br>
+                log,<br>
+                backend)<br>
+<br>
     def run(self, logger, backend):<br>
         """ Runs all tests using Thread pool<br>
<br>
@@ -252,43 +296,9 @@ class TestProfile(object):<br>
<br>
         self._pre_run_hook()<br>
<br>
-        chunksize = 1<br>
-<br>
         self._prepare_test_list()<br>
         log = LogManager(logger, len(self.test_list))<br>
-<br>
-        def test(pair):<br>
-            """Function to call test.execute from map"""<br>
-            name, test = pair<br>
-            with backend.write_test(name) as w:<br>
-                test.execute(name, log.get(), self.dmesg)<br>
-                w(test.result)<br>
-<br>
-        def run_threads(pool, testlist):<br>
-            """ Open a pool, close it, and join it """<br>
-            pool.imap(test, testlist, chunksize)<br>
-            pool.close()<br>
-            pool.join()<br>
-<br>
-        # Multiprocessing.dummy is a wrapper around Threading that provides a<br>
-        # multiprocessing compatible API<br>
-        #<br>
-        # The default value of pool is the number of virtual processor cores<br>
-        single = multiprocessing.dummy.Pool(1)<br>
-        multi = multiprocessing.dummy.Pool()<br>
-<br>
-        if options.OPTIONS.concurrent == "all":<br>
-            run_threads(multi, six.iteritems(self.test_list))<br>
-        elif options.OPTIONS.concurrent == "none":<br>
-            run_threads(single, six.iteritems(self.test_list))<br>
-        else:<br>
-            # Filter and return only thread safe tests to the threaded pool<br>
-            run_threads(multi, (x for x in six.iteritems(self.test_list)<br>
-                                if x[1].run_concurrent))<br>
-            # Filter and return the non thread safe tests to the single pool<br>
-            run_threads(single, (x for x in six.iteritems(self.test_list)<br>
-                                 if not x[1].run_concurrent))<br>
-<br>
+        self._run(log, backend)<br>
         log.get().summary()<br>
<br>
         self._post_run_hook()<br>
diff --git a/framework/programs/run.py b/framework/programs/run.py<br>
index 5d171c1..9142d2e 100644<br>
--- a/framework/programs/run.py<br>
+++ b/framework/programs/run.py<br>
@@ -171,6 +171,13 @@ def _run_parser(input_):<br>
                         dest='deqp_mode',<br>
                         help='Run DEQP integration in either "group-at-a-time"'<br>
                              ' or "test-at-a-time" mode.')<br>
+    parser.add_argument('--deqp-no-group-rerun',<br>
+                        action='store_true',<br>
+                        dest='deqp_no_group_rerun',<br>
+                        help='With --deqp-test-mode=group, this option will '<br>
+                             'prevent all tests with a status not in '<br>
+                             '[pass, skip] from being rerun in test at a '<br>
+                             'time mode')<br>
     parser.add_argument("test_profile",<br>
                         metavar="<Profile path(s)>",<br>
                         nargs='+',<br>
@@ -249,6 +256,7 @@ def run(input_):<br>
     options.OPTIONS.dmesg = args.dmesg<br>
     options.OPTIONS.sync = args.sync<br>
     options.OPTIONS.deqp_mode = args.deqp_mode<br>
+    options.OPTIONS.deqp_group_rerun = not args.deqp_no_group_rerun<br>
<br>
     # Set the platform to pass to waffle<br>
     options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform<br>
@@ -329,6 +337,7 @@ def resume(input_):<br>
     options.OPTIONS.dmesg = results.options['dmesg']<br>
     options.OPTIONS.sync = results.options['sync']<br>
     options.OPTIONS.deqp_mode = results.options['deqp_mode']<br>
+    options.OPTIONS.deqp_group_rerun = results.options['deqp_group_rerun']<br>
<br>
     core.get_config(args.config_file)<br>
<br>
diff --git a/framework/test/deqp.py b/framework/test/deqp.py<br>
index de6aab4..e7b52a4 100644<br>
--- a/framework/test/deqp.py<br>
+++ b/framework/test/deqp.py<br>
@@ -31,6 +31,7 @@ import six<br>
<br>
 from framework import core, grouptools, exceptions, status<br>
 from framework.profile import TestProfile<br>
+from framework.log import LogManager<br>
 from framework.test.base import Test, is_crash_returncode<br>
 from framework.options import OPTIONS<br>
<br>
@@ -61,6 +62,8 @@ _EXTRA_ARGS = get_option('PIGLIT_DEQP_EXTRA_ARGS',<br>
                          ('deqp', 'extra_args'),<br>
                          default='').split()<br>
<br>
+_RERUN = ['crash', 'incomplete', 'timeout', 'notrun', 'skip']<br>
+<br>
<br>
 def _gen_caselist_txt(bin_, caselist, extra_args):<br>
     """Generate a caselist.txt and return its path.<br>
@@ -108,19 +111,29 @@ def _iter_test_groups(case_file):<br>
     contains only tests.)<br>
<br>
     """<br>
-    slice_ = slice(len('GROUP: '), None)<br>
+    slice_group = slice(len('GROUP: '), None)<br>
+    slice_test = slice(len('TEST: '), None)<br>
+<br>
     group = ''<br>
+    tests = []<br>
     with open(case_file, 'r') as caselist_file:<br>
         for i, line in enumerate(_iterate_file(caselist_file)):<br>
             if line.startswith('GROUP:'):<br>
-                group = line[slice_]<br>
+                new = line[slice_group].strip()<br>
+<br>
+                # This needs to handle the name of the new group being a<br>
+                # superset of the old group (ex: items to items_max)<br>
+                if new != group and tests:<br>
+                    yield group, tests<br>
+                    tests = []<br>
+                group = new<br>
             elif line.startswith('TEST:'):<br>
-                if group != '':<br>
-                    yield group.rstrip()<br>
-                    group = ''<br>
+                tests.append(line[slice_test].strip())<br>
             else:<br>
                 raise exceptions.PiglitFatalError(<br>
                     'deqp: {}:{}: ill-formed line'.format(case_file, i))<br>
+        # Yield the final set of tests.<br>
+        yield group.strip(), tests<br>
<br>
<br>
 def _iter_test_single(case_file):<br>
@@ -188,18 +201,61 @@ class DEQPProfile(TestProfile):<br>
         filter_ = pop('filter_', default=lambda x: x)<br>
<br>
         super(DEQPProfile, self).__init__(*args, **kwargs)<br>
+        self._rerun = multi_class.rerun<br>
+        self._rerun_class = single_class<br>
<br>
         iter_ = _iter_test_cases(filter_(<br>
             _gen_caselist_txt(bin_, filename, extra_args)))<br>
<br>
         if OPTIONS.deqp_mode == 'group':<br>
-            class_ = multi_class<br>
+            for testname, rerun in iter_:<br>
+                # deqp uses '.' as the testgroup separator.<br>
+                piglit_name = testname.replace('.', grouptools.SEPARATOR)<br>
+                self.test_list[piglit_name] = multi_class(testname, rerun)<br>
         elif OPTIONS.deqp_mode == 'test':<br>
-            class_ = single_class<br>
+            for testname in iter_:<br>
+                # deqp uses '.' as the testgroup separator.<br>
+                piglit_name = testname.replace('.', grouptools.SEPARATOR)<br>
+                self.test_list[piglit_name] = single_class(testname)<br>
+<br>
+    def run(self, logger, backend):<br>
+        """Run all tests.<br>
+<br>
+        Adds the option to rerun tests if requested.<br>
+<br>
+        """<br>
+        super(DEQPProfile, self).run(logger, backend)<br>
+<br>
+        if OPTIONS.deqp_group_rerun and self._rerun:<br>
+            print('\nRerunning failed tests in single mode. '<br>
+                  '(run with --deqp-no-group-rerun to disable)\n')<br>
+<br>
+            log = LogManager(logger, len(self._rerun))<br>
+            self._run(log, backend,<br>
+                      # TODO: replace this dict with a profile.TestDict<br>
+                      test_list={k.replace('.', grouptools.SEPARATOR).lower():<br>
+                                 self._rerun_class(k) for k in self._rerun})<br>
<br>
-        for testname in iter_:<br>
-            piglit_name = testname.replace('.', grouptools.SEPARATOR)<br>
-            self.test_list[piglit_name] = class_(testname)<br>
+            log.get().summary()<br>
+<br>
+    def _test(self, pair, log, backend):<br>
+        """Function to call test.execute from map.<br>
+<br>
+        if in Group mode and group-rerun is enabled,then pass None to write if<br>
+        the test isn't pass or skip, this will cause it to delete the result<br>
+        file rather than loading it, which will be replaced when the re-run<br>
+        happens.<br>
+<br>
+        """<br>
+        name, test = pair<br>
+        if (OPTIONS.deqp_mode == 'group' and<br>
+                OPTIONS.deqp_group_rerun and<br>
+                isinstance(test, DEQPGroupTest)):<br>
+            with backend.write_test(name) as w:<br>
+                test.execute(name, log.get(), self.dmesg)<br>
+                w(None if test.result.result in _RERUN else test.result)<br>
+        else:<br>
+            super(DEQPProfile, self)._test(pair, log, backend)<br>
<br>
<br>
 @six.add_metaclass(abc.ABCMeta)<br>
@@ -273,11 +329,13 @@ class DEQPBaseTest(Test):<br>
<br>
 class DEQPGroupTest(DEQPBaseTest):<br>
     timeout = 300  # 5 minutes<br>
+    rerun = []<br>
     __name_slicer = slice(len("Test case '"), -len("'.."))<br>
     __finder = re.compile(r'^  (Warnings|Not supported|Failed|Passed):\s+\d/(?P<total>\d+).*')<br>
<br>
-    def __init__(self, case_name, **kwargs):<br>
+    def __init__(self, case_name, individual_cases, **kwargs):<br>
         super(DEQPGroupTest, self).__init__(case_name + '*', **kwargs)<br>
+        self._individual_cases = individual_cases<br>
<br>
     def interpret_result(self):<br>
         """Group based result interpretation.<br>
@@ -301,55 +359,59 @@ class DEQPGroupTest(DEQPBaseTest):<br>
         # since there will almost certinaly be an exception raised.<br>
         if self.result.returncode != 0:<br>
             self.result.result = 'crash'<br>
-            return<br>
-<br>
-        # Strip the first 3 lines, and the last 8 lines, which aren't useful<br>
-        # for this pass<br>
-        lines = self.result.out.rstrip().split('\n')[3:]<br>
-        cur = ''<br>
-        total = None<br>
-        for each in reversed(lines):<br>
-            m = self.__finder.match(each)<br>
-            if m:<br>
-                total = int(m.group('total'))<br>
-                break<br>
-        assert total is not None, 'Could not calculate total test count'<br>
-<br>
-        lines = (l for l in lines[:-8])<br>
-<br>
-        # Walk over standard out line by line, looking for 'Test case' (to get<br>
-        # the name of the test) and then for a result. Track each line, which<br>
-        # is used to both know when to stop walking and for error reporting.<br>
-        while len(self.result.subtests) < total:<br>
-            for l in lines:<br>
-                if l.startswith('Test case'):<br>
-                    name = l[self.__name_slicer].rsplit('.', 1)[1].lower()<br>
-                    break<br>
-            else:<br>
-                raise exceptions.PiglitInternalError(<br>
-                    'Expected "Test case", but didn\'t find it in:\n'<br>
-                    '{}\ncurrent line: {}'.format(self.result.out, l))<br>
-<br>
-            for l in lines:<br>
-                # If there is an info block fast forward through it by calling<br>
-                # next on the generator until it is passed.<br>
-                if l.startswith('INFO'):<br>
-                    cur = ''<br>
-                    while not (cur.startswith('INFO') and cur.endswith('----')):<br>
-                        cur = next(lines)<br>
-<br>
-                elif l.startswith('  '):<br>
-                    try:<br>
-                        self.result.subtests[name] = self._RESULT_MAP[l[2]]<br>
-                    except KeyError:<br>
-                        raise exceptions.PiglitInternalError(<br>
-                            'Unknown status {}'.format(l[2:].split()[0]))<br>
+        else:<br>
+            # Strip the first 3 lines, and the last 8 lines, which aren't<br>
+            # useful for this pass<br>
+            lines = self.result.out.rstrip().split('\n')[3:]<br>
+            cur = ''<br>
+            total = None<br>
+            for each in reversed(lines):<br>
+                m = self.__finder.match(each)<br>
+                if m:<br>
+                    total = int(m.group('total'))<br>
                     break<br>
-            else:<br>
-                raise exceptions.PiglitInternalError(<br>
-                    'Expected "  (Pass,Fail,...)", but didn\'t find it in:\n'<br>
-                    '{}\ncurrent line: {}'.format(self.result.out, l))<br>
-<br>
+            assert total is not None, 'Could not calculate total test count'<br>
+<br>
+            lines = (l for l in lines[:-8])<br>
+<br>
+            # Walk over standard out line by line, looking for 'Test case' (to<br>
+            # get the name of the test) and then for a result. Track each line,<br>
+            # which is used to both know when to stop walking and for error<br>
+            # reporting.<br>
+            while len(self.result.subtests) < total:<br>
+                for l in lines:<br>
+                    if l.startswith('Test case'):<br>
+                        name = l[self.__name_slicer].rsplit('.', 1)[1].lower()<br>
+                        break<br>
+                else:<br>
+                    raise exceptions.PiglitInternalError(<br>
+                        'Expected "Test case", but didn\'t find it in:\n'<br>
+                        '{}\ncurrent line: {}'.format(self.result.out, l))<br>
+<br>
+                for l in lines:<br>
+                    # If there is an info block fast forward through it by<br>
+                    # calling next on the generator until it is passed.<br>
+                    if l.startswith('INFO'):<br>
+                        cur = ''<br>
+                        while not (cur.startswith('INFO') and cur.endswith('----')):<br>
+                            cur = next(lines)<br>
+<br>
+                    elif l.startswith('  '):<br>
+                        try:<br>
+                            self.result.subtests[name] = self._RESULT_MAP[l[2]]<br>
+                        except KeyError:<br>
+                            raise exceptions.PiglitInternalError(<br>
+                                'Unknown status {}'.format(l[2:].split()[0]))<br>
+                        break<br>
+                else:<br>
+                    raise exceptions.PiglitInternalError(<br>
+                        'Expected "  (Pass,Fail,...)", '<br>
+                        'but didn\'t find it in:\n'<br>
+                        '{}\ncurrent line: {}'.format(self.result.out, l))<br>
+<br>
+        # If group_rerun (the default) and the status is crash rerun<br>
+        if OPTIONS.deqp_group_rerun and self.result.result == 'crash':<br>
+            self.rerun.extend(self._individual_cases)<br>
         # We failed to parse the test output. Fallback to 'fail'.<br>
-        if self.result.result == 'notrun':<br>
+        elif self.result.result == 'notrun':<br>
             self.result.result = 'fail'<br>
diff --git a/tests/deqp_vk.py b/tests/deqp_vk.py<br>
index 1cfce6e..1a8a911 100644<br>
--- a/tests/deqp_vk.py<br>
+++ b/tests/deqp_vk.py<br>
@@ -30,7 +30,6 @@ from __future__ import (<br>
 import re<br>
<br>
 from framework.test import deqp<br>
-from framework import exceptions<br>
<br>
 __all__ = ['profile']<br>
<br>
@@ -74,7 +73,15 @@ class DEQPVKTest(_Mixin, deqp.DEQPBaseTest):<br>
<br>
 class DEQPVKGroupTest(_Mixin, deqp.DEQPGroupTest):<br>
     """Test representation for Khronos Vulkacn CTS in group mode."""<br>
-    pass<br>
+    def interpret_result(self):<br>
+        # If there are either of the know result problems add all subcases to<br>
+        # the rerun list.<br>
+        if 'Failed to compile shader at vkGlslToSpirV' in self.result.out:<br>
+            self.rerun.extend(self._individual_cases)<br>
+        elif _DEQP_ASSERT.search(self.result.err):<br>
+            self.rerun.extend(self._individual_cases)<br>
+        else:<br>
+            super(DEQPVKGroupTest, self).interpret_result()<br>
<br>
<br>
 profile = deqp.DEQPProfile(  # pylint: disable=invalid-name<br>
diff --git a/unittests/deqp_tests.py b/unittests/deqp_tests.py<br>
index 2d347a8..f3e62b0 100644<br>
--- a/unittests/deqp_tests.py<br>
+++ b/unittests/deqp_tests.py<br>
@@ -45,7 +45,7 @@ else:<br>
     except ImportError:<br>
         from unittest import mock<br>
<br>
-from framework import profile, grouptools, exceptions<br>
+from framework import grouptools, exceptions<br>
 from framework.test import deqp<br>
 from . import utils<br>
<br>
@@ -107,23 +107,23 @@ def test_get_option_conf_no_option():<br>
            None)<br>
<br>
<br>
-def test_iter_deqp_test_cases_test():<br>
-    """deqp.iter_deqp_test_cases: correctly detects a TEST: line"""<br>
+def test_iter_test_cases_test():<br>
+    """deqp.iter_test_cases: correctly detects a TEST: line"""<br>
     with utils.tempfile('TEST: a.deqp.test') as tfile:<br>
         gen = deqp._iter_test_cases(tfile)<br>
         nt.eq_('a.deqp.test', next(gen))<br>
<br>
<br>
-def test_iter_deqp_test_cases_group():<br>
-    """deqp.iter_deqp_test_casesgen_caselist_txt: correctly detects a GROUP: line"""<br>
+def test_iter_test_cases_group():<br>
+    """deqp.iter_test_casesgen_caselist_txt: correctly detects a GROUP: line"""<br>
     with utils.tempfile('GROUP: a group\nTEST: a.deqp.test') as tfile:<br>
         gen = deqp._iter_test_cases(tfile)<br>
         nt.eq_('a.deqp.test', next(gen))<br>
<br>
<br>
 @nt.raises(exceptions.PiglitFatalError)<br>
-def test_iter_deqp_test_cases_bad():<br>
-    """deqp.iter_deqp_test_casesgen_caselist_txt: PiglitFatalException is raised if line is not TEST: or GROUP:<br>
+def test_iter_test_cases_bad():<br>
+    """deqp.iter_test_casesgen_caselist_txt: PiglitFatalException is raised if line is not TEST: or GROUP:<br>
     """<br>
     with utils.tempfile('this will fail') as tfile:<br>
         gen = deqp._iter_test_cases(tfile)<br>
@@ -363,7 +363,7 @@ class TestDEQPGroupTest_interpret_result(object):<br>
<br>
     @classmethod<br>
     def setup_class(cls):<br>
-        cls.test = _DEQPGroupTest('foo')<br>
+        cls.test = _DEQPGroupTest('foo', [])<br>
         cls.test.result.returncode = 0<br>
         cls.test.result.out = cls.__out<br>
         cls.test.interpret_result()<br>
@@ -480,7 +480,7 @@ def test_DEQPGroupTest_interpret_result_cts():<br>
           Warnings:      0/2 (0.00%)<br>
     """)<br>
<br>
-    test = _DEQPGroupTest('foo')<br>
+    test = _DEQPGroupTest('foo', [])<br>
     test.result.returncode = 0<br>
     test.result.out = out<br>
     test.interpret_result()<br>
@@ -490,7 +490,7 @@ def test_DEQPGroupTest_interpret_result_cts():<br>
<br>
 def test_DEQPGroupTest_interpret_result_nonzero():<br>
     """test.deqp.DEQPGroupTest.interpret_results: if returncode is nonzero test is crash"""<br>
-    test = _DEQPGroupTest('foo')<br>
+    test = _DEQPGroupTest('foo', [])<br>
     test.result.returncode = -6<br>
     test.interpret_result()<br>
     nt.eq_(test.result.result, 'crash')<br>
@@ -499,8 +499,8 @@ def test_DEQPGroupTest_interpret_result_nonzero():<br>
 @utils.skip(not (sys.version_info[0:2] >= (3, 4) or<br>
                  float(mock.__version__[:3]) >= 1.2),<br>
             'Test requires that mock.mock_open provides readline method.')<br>
-def test_iter_deqp_test_groups():<br>
-    """deqp._test_deqp_test_groups: Returns expected values"""<br>
+def test_iter_test_groups():<br>
+    """deqp._test_test_groups: Returns expected values"""<br>
     text = textwrap.dedent("""\<br>
         GROUP: dEQP-GLES2.info<br>
         TEST: dEQP-GLES2.info.vendor<br>
@@ -529,10 +529,28 @@ def test_iter_deqp_test_groups():<br>
     """)<br>
<br>
     expected = [<br>
-        'dEQP-GLES2.info',<br>
-        'dEQP-GLES2.capability.limits',<br>
-        'dEQP-GLES2.capability.limits_lower',<br>
-        'dEQP-GLES2.capability.extensions.uncompressed_texture_formats',<br>
+        ('dEQP-GLES2.info',<br>
+         ['dEQP-GLES2.info.vendor',<br>
+          'dEQP-GLES2.info.renderer',<br>
+          'dEQP-GLES2.info.version',<br>
+          'dEQP-GLES2.info.shading_language_version',<br>
+          'dEQP-GLES2.info.extensions',<br>
+          'dEQP-GLES2.info.render_target',]),<br>
+        ('dEQP-GLES2.capability.limits',<br>
+         ['dEQP-GLES2.capability.limits.vertex_attribs',<br>
+          'dEQP-GLES2.capability.limits.varying_vectors',<br>
+          'dEQP-GLES2.capability.limits.vertex_uniform_vectors',<br>
+          'dEQP-GLES2.capability.limits.fragment_uniform_vectors',<br>
+          'dEQP-GLES2.capability.limits.texture_image_units',<br>
+          'dEQP-GLES2.capability.limits.vertex_texture_image_units',<br>
+          'dEQP-GLES2.capability.limits.combined_texture_image_units',<br>
+          'dEQP-GLES2.capability.limits.texture_2d_size',<br>
+          'dEQP-GLES2.capability.limits.texture_cube_size',<br>
+          'dEQP-GLES2.capability.limits.renderbuffer_size']),<br>
+        ('dEQP-GLES2.capability.limits_lower',<br>
+         ['dEQP-GLES2.capability.limits_lower.minimum_size']),<br>
+        ('dEQP-GLES2.capability.extensions.uncompressed_texture_formats',<br>
+         ['dEQP-GLES2.capability.extensions.uncompressed_texture_formats.foo']),<br>
     ]<br>
<br>
     with mock.patch('framework.test.deqp.open', create=True,<br>
<span class="HOEnZb"><font color="#888888">--<br>
2.7.4<br>
<br>
_______________________________________________<br>
Piglit mailing list<br>
<a href="mailto:Piglit@lists.freedesktop.org">Piglit@lists.freedesktop.org</a><br>
<a href="https://lists.freedesktop.org/mailman/listinfo/piglit" rel="noreferrer" target="_blank">https://lists.freedesktop.org/mailman/listinfo/piglit</a><br>
</font></span></blockquote></div><br></div>