[Piglit] [PATCH 11/27] framework/profile: Don't merge profiles

Dylan Baker dylan at pnwbakers.com
Mon Oct 24 19:54:57 UTC 2016


Because we can copy profiles, we don't need to merge them to run more
than one of them. Instead we can simply have a list of profiles, and run
them one by one. One side effect of this is that tests will be run one
profile at a time, so if running with out the -1/--no-concurrency or
-c/--all-concurrent options tests will run in a sort of zipper pattern:
<p1 concurrent>, <p1 non-concurrent>, <p2 concurrent>, etc.

Signed-off-by: Dylan Baker <dylanx.c.baker at intel.com>
---
 framework/profile.py                | 93 +++++++++++-------------------
 framework/programs/run.py           | 33 ++++++-----
 unittests/framework/test_profile.py | 17 +-----
 3 files changed, 55 insertions(+), 88 deletions(-)

diff --git a/framework/profile.py b/framework/profile.py
index 73a8a96..ed8166c 100644
--- a/framework/profile.py
+++ b/framework/profile.py
@@ -54,7 +54,6 @@ __all__ = [
     'ConcurrentMode',
     'TestProfile',
     'load_test_profile',
-    'merge_test_profiles'
 ]
 
 
@@ -310,21 +309,6 @@ class TestProfile(object):
         """
         self.filters.append(function)
 
-    def update(self, *profiles):
-        """ Updates the contents of this TestProfile instance with another
-
-        This method overwrites key:value pairs in self with those in the
-        provided profiles argument. This allows multiple TestProfiles to be
-        called in the same run; which could be used to run piglit and external
-        suites at the same time.
-
-        Arguments:
-        profiles -- one or more TestProfile-like objects to be merged.
-
-        """
-        for profile in profiles:
-            self.test_list.update(profile.test_list)
-
     @contextlib.contextmanager
     def group_manager(self, test_class, group, prefix=None, **default_args):
         """A context manager to make working with flat groups simple.
@@ -453,24 +437,7 @@ def load_test_profile(filename):
             'Check your spelling?'.format(filename))
 
 
-def merge_test_profiles(profiles):
-    """ Helper for loading and merging TestProfile instances
-
-    Takes paths to test profiles as arguments and returns a single merged
-    TestProfile instance.
-
-    Arguments:
-    profiles -- a list of one or more paths to profile files.
-
-    """
-    profile = load_test_profile(profiles.pop())
-    with profile.allow_reassignment:
-        for p in profiles:
-            profile.update(load_test_profile(p))
-    return profile
-
-
-def run(profile, logger, backend, concurrency):
+def run(profiles, logger, backend, concurrency):
     """Runs all tests using Thread pool.
 
     When called this method will flatten out self.tests into self.test_list,
@@ -484,30 +451,50 @@ def run(profile, logger, backend, concurrency):
     Finally it will print a final summary of the tests.
 
     Arguments:
-    profile -- a Profile ojbect.
-    logger  -- a log.LogManager instance.
-    backend -- a results.Backend derived instance.
+    profiles -- a list of Profile instances.
+    logger   -- a log.LogManager instance.
+    backend  -- a results.Backend derived instance.
     """
     chunksize = 1
 
-    profile.prepare_test_list()
-    log = LogManager(logger, len(profile.test_list))
+    for p in profiles:
+        p.prepare_test_list()
+    log = LogManager(logger, sum(len(p.test_list) for p in profiles))
 
-    def test(pair, this_pool=None):
+    def test(name, test, profile, this_pool=None):
         """Function to call test.execute from map"""
-        name, test = pair
         with backend.write_test(name) as w:
             test.execute(name, log.get(), profile.dmesg, profile.monitoring)
             w(test.result)
         if profile.monitoring.abort_needed:
             this_pool.terminate()
 
-    def run_threads(pool, testlist):
+    def run_threads(pool, profile, filterby=None):
         """ Open a pool, close it, and join it """
-        pool.imap(lambda pair: test(pair, pool), testlist, chunksize)
+        iterable = six.iteritems(profile.test_list)
+        if filterby:
+            iterable = (x for x in iterable if filterby(x))
+
+        pool.imap(lambda pair: test(pair[0], pair[1], profile, pool),
+                  iterable, chunksize)
         pool.close()
         pool.join()
 
+    def run_profile(profile):
+        """Run an individual profile."""
+        if concurrency is ConcurrentMode.full:
+            run_threads(multi, profile)
+        elif concurrency is ConcurrentMode.none:
+            run_threads(single, profile)
+        else:
+            assert concurrency is ConcurrentMode.some
+            # Filter and return only thread safe tests to the threaded pool
+            run_threads(multi, profile, lambda x: x[1].run_concurrent)
+
+            # Filter and return the non thread safe tests to the single
+            # pool
+            run_threads(single, profile, lambda x: not x[1].run_concurrent)
+
     # Multiprocessing.dummy is a wrapper around Threading that provides a
     # multiprocessing compatible API
     #
@@ -516,21 +503,11 @@ def run(profile, logger, backend, concurrency):
     multi = multiprocessing.dummy.Pool()
 
     try:
-        if concurrency is ConcurrentMode.full:
-            run_threads(multi, six.iteritems(profile.test_list))
-        elif concurrency is ConcurrentMode.none:
-            run_threads(single, six.iteritems(profile.test_list))
-        else:
-            assert concurrency is ConcurrentMode.some
-            # Filter and return only thread safe tests to the threaded pool
-            run_threads(multi, (x for x in six.iteritems(profile.test_list)
-                                if x[1].run_concurrent))
-            # Filter and return the non thread safe tests to the single
-            # pool
-            run_threads(single, (x for x in six.iteritems(profile.test_list)
-                                 if not x[1].run_concurrent))
+        for p in profiles:
+            run_profile(p)
     finally:
         log.get().summary()
 
-    if profile.monitoring.abort_needed:
-        raise exceptions.PiglitAbort(profile.monitoring.error_message)
+    for p in profiles:
+        if p.monitoring.abort_needed:
+            raise exceptions.PiglitAbort(p.monitoring.error_message)
diff --git a/framework/programs/run.py b/framework/programs/run.py
index fefe7af..b30b292 100644
--- a/framework/programs/run.py
+++ b/framework/programs/run.py
@@ -312,8 +312,10 @@ def run(input_):
     backend.initialize(_create_metadata(
         args, args.name or path.basename(args.results_path)))
 
-    profile = framework.profile.merge_test_profiles(args.test_profile)
-    profile.results_dir = args.results_path
+    profiles = [framework.profile.load_test_profile(p) for p in args.test_profile]
+    for p in profiles:
+        p.results_dir = args.results_path
+
     # If a test list is provided then set the forced_test_list value.
     if args.test_list:
         if len(args.test_profiles) != 1:
@@ -322,18 +324,20 @@ def run(input_):
 
         with open(args.test_list) as test_list:
             # Strip newlines
-            profile.forced_test_list = list([t.strip() for t in test_list])
+            profiles[0].forced_test_list = list([t.strip() for t in test_list])
 
     # Set the dmesg type
     if args.dmesg:
-        profile.dmesg = args.dmesg
+        for p in profiles:
+            p.dmesg = args.dmesg
 
     if args.monitored:
-        profile.monitoring = args.monitored
+        for p in profiles:
+            p.monitoring = args.monitored
 
     time_elapsed = time.time()
 
-    framework.profile.run(profile, args.log_level, backend, args.concurrent)
+    framework.profile.run(profiles, args.log_level, backend, args.concurrent)
 
     time_elapsed = time.time() - time_elapsed
     backend.finalize({'time_elapsed': time_elapsed})
@@ -391,17 +395,20 @@ def resume(input_):
         if args.no_retry or result.result != 'incomplete':
             options.OPTIONS.exclude_tests.add(name)
 
-    profile = framework.profile.merge_test_profiles(results.options['profile'])
-    profile.results_dir = args.results_path
-    if options.OPTIONS.dmesg:
-        profile.dmesg = options.OPTIONS.dmesg
+    profiles = [framework.profile.load_test_profile(p)
+                for p in results.options['profile']]
+    for p in profiles:
+        p.results_dir = args.results_path
+
+        if options.OPTIONS.dmesg:
+            p.dmesg = options.OPTIONS.dmesg
 
-    if options.OPTIONS.monitored:
-        profile.monitoring = options.OPTIONS.monitored
+        if options.OPTIONS.monitored:
+            p.monitoring = options.OPTIONS.monitored
 
     # This is resumed, don't bother with time since it won't be accurate anyway
     framework.profile.run(
-        profile,
+        profiles,
         results.options['log_level'],
         backend,
         framework.profile.ConcurrentMode[results.options['concurrent']])
diff --git a/unittests/framework/test_profile.py b/unittests/framework/test_profile.py
index 5ef95e4..4ffabfa 100644
--- a/unittests/framework/test_profile.py
+++ b/unittests/framework/test_profile.py
@@ -101,23 +101,6 @@ class TestTestProfile(object):
         profile_.dmesg = False
         assert isinstance(profile_.dmesg, dmesg.DummyDmesg)
 
-    def test_update_test_list(self):
-        """profile.TestProfile.update(): updates TestProfile.test_list"""
-        profile1 = profile.TestProfile()
-        group1 = grouptools.join('group1', 'test1')
-        group2 = grouptools.join('group1', 'test2')
-
-        profile1.test_list[group1] = utils.Test(['test1'])
-
-        profile2 = profile.TestProfile()
-        profile2.test_list[group1] = utils.Test(['test3'])
-        profile2.test_list[group2] = utils.Test(['test2'])
-
-        with profile1.allow_reassignment:
-            profile1.update(profile2)
-
-        assert dict(profile1.test_list) == dict(profile2.test_list)
-
     class TestPrepareTestList(object):
         """Create tests for TestProfile.prepare_test_list filtering."""
 
-- 
git-series 0.8.10


More information about the Piglit mailing list