[Piglit] [PATCH v4 11/27] framework/profile: Don't merge profiles
Dylan Baker
dylan at pnwbakers.com
Wed Nov 9 20:53:23 UTC 2016
Because we can copy profiles, we don't need to merge them to run more
than one of them. Instead we can simply have a list of profiles, and run
them one by one. One side effect of this is that tests will be run one
profile at a time, so if running with out the -1/--no-concurrency or
-c/--all-concurrent options tests will run in a sort of zipper pattern:
<p1 concurrent>, <p1 non-concurrent>, <p2 concurrent>, etc.
Signed-off-by: Dylan Baker <dylanx.c.baker at intel.com>
---
framework/profile.py | 98 ++++++++--------------
framework/programs/run.py | 33 ++++---
framework/summary/feature.py | 3 +-
unittests/framework/summary/test_feature.py | 19 +---
unittests/framework/test_profile.py | 17 +----
5 files changed, 67 insertions(+), 103 deletions(-)
diff --git a/framework/profile.py b/framework/profile.py
index a64855e..e4b8308 100644
--- a/framework/profile.py
+++ b/framework/profile.py
@@ -49,7 +49,6 @@ from framework.test.base import Test
__all__ = [
'TestProfile',
'load_test_profile',
- 'merge_test_profiles'
]
@@ -298,21 +297,6 @@ class TestProfile(object):
"""
self.filters.append(function)
- def update(self, *profiles):
- """ Updates the contents of this TestProfile instance with another
-
- This method overwrites key:value pairs in self with those in the
- provided profiles argument. This allows multiple TestProfiles to be
- called in the same run; which could be used to run piglit and external
- suites at the same time.
-
- Arguments:
- profiles -- one or more TestProfile-like objects to be merged.
-
- """
- for profile in profiles:
- self.test_list.update(profile.test_list)
-
@contextlib.contextmanager
def group_manager(self, test_class, group, prefix=None, **default_args):
"""A context manager to make working with flat groups simple.
@@ -447,24 +431,7 @@ def load_test_profile(filename):
'Check your spelling?'.format(filename))
-def merge_test_profiles(profiles):
- """ Helper for loading and merging TestProfile instances
-
- Takes paths to test profiles as arguments and returns a single merged
- TestProfile instance.
-
- Arguments:
- profiles -- a list of one or more paths to profile files.
-
- """
- profile = load_test_profile(profiles.pop())
- with profile.allow_reassignment:
- for p in profiles:
- profile.update(load_test_profile(p))
- return profile
-
-
-def run(profile, logger, backend, concurrency):
+def run(profiles, logger, backend, concurrency):
"""Runs all tests using Thread pool.
When called this method will flatten out self.tests into self.test_list,
@@ -478,30 +445,52 @@ def run(profile, logger, backend, concurrency):
Finally it will print a final summary of the tests.
Arguments:
- profile -- a Profile ojbect.
- logger -- a log.LogManager instance.
- backend -- a results.Backend derived instance.
+ profiles -- a list of Profile instances.
+ logger -- a log.LogManager instance.
+ backend -- a results.Backend derived instance.
"""
chunksize = 1
- profile.prepare_test_list()
- log = LogManager(logger, len(profile.test_list))
+ for p in profiles:
+ p.prepare_test_list()
+ log = LogManager(logger, sum(len(p.test_list) for p in profiles))
- def test(pair, this_pool=None):
+ def test(name, test, profile, this_pool=None):
"""Function to call test.execute from map"""
- name, test = pair
with backend.write_test(name) as w:
test.execute(name, log.get(), profile.dmesg, profile.monitoring)
w(test.result)
if profile.monitoring.abort_needed:
this_pool.terminate()
- def run_threads(pool, testlist):
+ def run_threads(pool, profile, filterby=None):
""" Open a pool, close it, and join it """
- pool.imap(lambda pair: test(pair, pool), testlist, chunksize)
+ iterable = six.iteritems(profile.test_list)
+ if filterby:
+ iterable = (x for x in iterable if filterby(x))
+
+ pool.imap(lambda pair: test(pair[0], pair[1], profile, pool),
+ iterable, chunksize)
pool.close()
pool.join()
+ def run_profile(profile):
+ """Run an individual profile."""
+ profile.setup()
+ if concurrency == "all":
+ run_threads(multi, profile)
+ elif concurrency == "none":
+ run_threads(single, profile)
+ else:
+ assert concurrency == "some"
+ # Filter and return only thread safe tests to the threaded pool
+ run_threads(multi, profile, lambda x: x[1].run_concurrent)
+
+ # Filter and return the non thread safe tests to the single
+ # pool
+ run_threads(single, profile, lambda x: not x[1].run_concurrent)
+ profile.teardown()
+
# Multiprocessing.dummy is a wrapper around Threading that provides a
# multiprocessing compatible API
#
@@ -509,25 +498,12 @@ def run(profile, logger, backend, concurrency):
single = multiprocessing.dummy.Pool(1)
multi = multiprocessing.dummy.Pool()
- profile.setup()
try:
- if concurrency == "all":
- run_threads(multi, six.iteritems(profile.test_list))
- elif concurrency == "none":
- run_threads(single, six.iteritems(profile.test_list))
- else:
- assert concurrency == "some"
-
- # Filter and return only thread safe tests to the threaded pool
- run_threads(multi, (x for x in six.iteritems(profile.test_list)
- if x[1].run_concurrent))
- # Filter and return the non thread safe tests to the single
- # pool
- run_threads(single, (x for x in six.iteritems(profile.test_list)
- if not x[1].run_concurrent))
+ for p in profiles:
+ run_profile(p)
finally:
log.get().summary()
- profile.teardown()
- if profile.monitoring.abort_needed:
- raise exceptions.PiglitAbort(profile.monitoring.error_message)
+ for p in profiles:
+ if p.monitoring.abort_needed:
+ raise exceptions.PiglitAbort(p.monitoring.error_message)
diff --git a/framework/programs/run.py b/framework/programs/run.py
index 8af8448..9e82bba 100644
--- a/framework/programs/run.py
+++ b/framework/programs/run.py
@@ -312,8 +312,10 @@ def run(input_):
backend.initialize(_create_metadata(
args, args.name or path.basename(args.results_path)))
- profile = framework.profile.merge_test_profiles(args.test_profile)
- profile.results_dir = args.results_path
+ profiles = [framework.profile.load_test_profile(p) for p in args.test_profile]
+ for p in profiles:
+ p.results_dir = args.results_path
+
# If a test list is provided then set the forced_test_list value.
if args.test_list:
if len(args.test_profiles) != 1:
@@ -322,18 +324,20 @@ def run(input_):
with open(args.test_list) as test_list:
# Strip newlines
- profile.forced_test_list = list([t.strip() for t in test_list])
+ profiles[0].forced_test_list = list([t.strip() for t in test_list])
# Set the dmesg type
if args.dmesg:
- profile.dmesg = args.dmesg
+ for p in profiles:
+ p.dmesg = args.dmesg
if args.monitored:
- profile.monitoring = args.monitored
+ for p in profiles:
+ p.monitoring = args.monitored
time_elapsed = framework.results.TimeAttribute(start=time.time())
- framework.profile.run(profile, args.log_level, backend, args.concurrency)
+ framework.profile.run(profiles, args.log_level, backend, args.concurrency)
time_elapsed.end = time.time()
backend.finalize({'time_elapsed': time_elapsed.to_json()})
@@ -391,17 +395,20 @@ def resume(input_):
if args.no_retry or result.result != 'incomplete':
options.OPTIONS.exclude_tests.add(name)
- profile = framework.profile.merge_test_profiles(results.options['profile'])
- profile.results_dir = args.results_path
- if options.OPTIONS.dmesg:
- profile.dmesg = options.OPTIONS.dmesg
+ profiles = [framework.profile.load_test_profile(p)
+ for p in results.options['profile']]
+ for p in profiles:
+ p.results_dir = args.results_path
+
+ if options.OPTIONS.dmesg:
+ p.dmesg = options.OPTIONS.dmesg
- if options.OPTIONS.monitored:
- profile.monitoring = options.OPTIONS.monitored
+ if options.OPTIONS.monitored:
+ p.monitoring = options.OPTIONS.monitored
# This is resumed, don't bother with time since it won't be accurate anyway
framework.profile.run(
- profile,
+ profiles,
results.options['log_level'],
backend,
results.options['concurrent'])
diff --git a/framework/summary/feature.py b/framework/summary/feature.py
index 9a17792..a66a49b 100644
--- a/framework/summary/feature.py
+++ b/framework/summary/feature.py
@@ -66,8 +66,7 @@ class FeatResults(object): # pylint: disable=too-few-public-methods
options.OPTIONS.exclude_filter = exclude_filter
options.OPTIONS.include_filter = include_filter
- profiles[feature] = profile.TestProfile()
- profiles[feature].update(profile_orig)
+ profiles[feature] = profile_orig.copy()
# An empty list will raise PiglitFatalError exception
# But for reporting we need to handle this situation
diff --git a/unittests/framework/summary/test_feature.py b/unittests/framework/summary/test_feature.py
index 370b360..fc05941 100644
--- a/unittests/framework/summary/test_feature.py
+++ b/unittests/framework/summary/test_feature.py
@@ -65,16 +65,15 @@ def _maketest(res):
PROFILE = profile.TestProfile()
-PROFILE.test_list = {
- 'spec at gl-1.0@a': _maketest('pass'),
- 'spec at gl-1.0@b': _maketest('warn'),
- 'spec at gl-1.0@c': _maketest('pass'),
- 'spec at gl-1.0@d': _maketest('fail'),
- 'spec at gl-2.0@a': _maketest('fail'),
- 'spec at gl-2.0@b': _maketest('crash'),
- 'spec at gl-2.0@c': _maketest('pass'),
- 'spec at gl-2.0@d': _maketest('fail'),
-}
+PROFILE.test_list = profile.TestDict()
+PROFILE.test_list['spec at gl-1.0@a'] = _maketest('pass')
+PROFILE.test_list['spec at gl-1.0@b'] = _maketest('warn')
+PROFILE.test_list['spec at gl-1.0@c'] = _maketest('pass')
+PROFILE.test_list['spec at gl-1.0@d'] = _maketest('fail')
+PROFILE.test_list['spec at gl-2.0@a'] = _maketest('fail')
+PROFILE.test_list['spec at gl-2.0@b'] = _maketest('crash')
+PROFILE.test_list['spec at gl-2.0@c'] = _maketest('pass')
+PROFILE.test_list['spec at gl-2.0@d'] = _maketest('fail')
class TestFeatResult(object):
diff --git a/unittests/framework/test_profile.py b/unittests/framework/test_profile.py
index 5ef95e4..4ffabfa 100644
--- a/unittests/framework/test_profile.py
+++ b/unittests/framework/test_profile.py
@@ -101,23 +101,6 @@ class TestTestProfile(object):
profile_.dmesg = False
assert isinstance(profile_.dmesg, dmesg.DummyDmesg)
- def test_update_test_list(self):
- """profile.TestProfile.update(): updates TestProfile.test_list"""
- profile1 = profile.TestProfile()
- group1 = grouptools.join('group1', 'test1')
- group2 = grouptools.join('group1', 'test2')
-
- profile1.test_list[group1] = utils.Test(['test1'])
-
- profile2 = profile.TestProfile()
- profile2.test_list[group1] = utils.Test(['test3'])
- profile2.test_list[group2] = utils.Test(['test2'])
-
- with profile1.allow_reassignment:
- profile1.update(profile2)
-
- assert dict(profile1.test_list) == dict(profile2.test_list)
-
class TestPrepareTestList(object):
"""Create tests for TestProfile.prepare_test_list filtering."""
--
git-series 0.8.10
More information about the Piglit
mailing list