[Piglit] [PATCH v2 3/26] framework: Split the run method out of profile.
Dylan Baker
dylan at pnwbakers.com
Thu Oct 27 21:31:31 UTC 2016
There are a couple of reasons for doing this. First, profile is a big
complex mess that does entirely too much, and this helps with that.
Second, there are bugs in the way two profiles run at the same time
work, and this is going to fix that.
Signed-off-by: Dylan Baker <dylanx.c.baker at intel.com>
---
framework/profile.py | 131 ++++++++++++++---------------
framework/programs/run.py | 4 +-
unittests/framework/test_profile.py | 10 +-
3 files changed, 73 insertions(+), 72 deletions(-)
diff --git a/framework/profile.py b/framework/profile.py
index 53a17b7..b10f817 100644
--- a/framework/profile.py
+++ b/framework/profile.py
@@ -244,7 +244,7 @@ class TestProfile(object):
"""
self._monitoring = Monitoring(monitored)
- def _prepare_test_list(self):
+ def prepare_test_list(self):
""" Prepare tests for running
Flattens the nested group hierarchy into a flat dictionary using '/'
@@ -287,70 +287,6 @@ class TestProfile(object):
raise exceptions.PiglitFatalError(
'There are no tests scheduled to run. Aborting run.')
- def run(self, logger, backend):
- """ Runs all tests using Thread pool
-
- When called this method will flatten out self.tests into
- self.test_list, then will prepare a logger, and begin executing tests
- through it's Thread pools.
-
- Based on the value of options.OPTIONS.concurrent it will either run all
- the tests concurrently, all serially, or first the thread safe tests
- then the serial tests.
-
- Finally it will print a final summary of the tests
-
- Arguments:
- backend -- a results.Backend derived instance
-
- """
-
- chunksize = 1
-
- self._prepare_test_list()
- log = LogManager(logger, len(self.test_list))
-
- def test(pair, this_pool=None):
- """Function to call test.execute from map"""
- name, test = pair
- with backend.write_test(name) as w:
- test.execute(name, log.get(), self.dmesg, self.monitoring)
- w(test.result)
- if self._monitoring.abort_needed:
- this_pool.terminate()
-
- def run_threads(pool, testlist):
- """ Open a pool, close it, and join it """
- pool.imap(lambda pair: test(pair, pool), testlist, chunksize)
- pool.close()
- pool.join()
-
- # Multiprocessing.dummy is a wrapper around Threading that provides a
- # multiprocessing compatible API
- #
- # The default value of pool is the number of virtual processor cores
- single = multiprocessing.dummy.Pool(1)
- multi = multiprocessing.dummy.Pool()
-
- try:
- if options.OPTIONS.concurrent == "all":
- run_threads(multi, six.iteritems(self.test_list))
- elif options.OPTIONS.concurrent == "none":
- run_threads(single, six.iteritems(self.test_list))
- else:
- # Filter and return only thread safe tests to the threaded pool
- run_threads(multi, (x for x in six.iteritems(self.test_list)
- if x[1].run_concurrent))
- # Filter and return the non thread safe tests to the single
- # pool
- run_threads(single, (x for x in six.iteritems(self.test_list)
- if not x[1].run_concurrent))
- finally:
- log.get().summary()
-
- if self._monitoring.abort_needed:
- raise exceptions.PiglitAbort(self._monitoring.error_message)
-
def filter_tests(self, function):
"""Filter out tests that return false from the supplied function
@@ -504,3 +440,68 @@ def merge_test_profiles(profiles):
for p in profiles:
profile.update(load_test_profile(p))
return profile
+
+
+def run(profile, logger, backend):
+ """Runs all tests using Thread pool.
+
+ When called this method will flatten out self.tests into self.test_list,
+ then will prepare a logger, and begin executing tests through it's Thread
+ pools.
+
+ Based on the value of options.OPTIONS.concurrent it will either run all the
+ tests concurrently, all serially, or first the thread safe tests then the
+ serial tests.
+
+ Finally it will print a final summary of the tests.
+
+ Arguments:
+ profile -- a Profile ojbect.
+ logger -- a log.LogManager instance.
+ backend -- a results.Backend derived instance.
+ """
+ chunksize = 1
+
+ profile.prepare_test_list()
+ log = LogManager(logger, len(profile.test_list))
+
+ def test(pair, this_pool=None):
+ """Function to call test.execute from map"""
+ name, test = pair
+ with backend.write_test(name) as w:
+ test.execute(name, log.get(), profile.dmesg, profile.monitoring)
+ w(test.result)
+ if profile.monitoring.abort_needed:
+ this_pool.terminate()
+
+ def run_threads(pool, testlist):
+ """ Open a pool, close it, and join it """
+ pool.imap(lambda pair: test(pair, pool), testlist, chunksize)
+ pool.close()
+ pool.join()
+
+ # Multiprocessing.dummy is a wrapper around Threading that provides a
+ # multiprocessing compatible API
+ #
+ # The default value of pool is the number of virtual processor cores
+ single = multiprocessing.dummy.Pool(1)
+ multi = multiprocessing.dummy.Pool()
+
+ try:
+ if options.OPTIONS.concurrent == "all":
+ run_threads(multi, six.iteritems(profile.test_list))
+ elif options.OPTIONS.concurrent == "none":
+ run_threads(single, six.iteritems(profile.test_list))
+ else:
+ # Filter and return only thread safe tests to the threaded pool
+ run_threads(multi, (x for x in six.iteritems(profile.test_list)
+ if x[1].run_concurrent))
+ # Filter and return the non thread safe tests to the single
+ # pool
+ run_threads(single, (x for x in six.iteritems(profile.test_list)
+ if not x[1].run_concurrent))
+ finally:
+ log.get().summary()
+
+ if profile.monitoring.abort_needed:
+ raise exceptions.PiglitAbort(profile.monitoring.error_message)
diff --git a/framework/programs/run.py b/framework/programs/run.py
index 5a2fcd0..023aa2e 100644
--- a/framework/programs/run.py
+++ b/framework/programs/run.py
@@ -336,7 +336,7 @@ def run(input_):
if args.monitored:
profile.monitoring = args.monitored
- profile.run(args.log_level, backend)
+ framework.profile.run(profile, args.log_level, backend)
results.time_elapsed.end = time.time()
backend.finalize({'time_elapsed': results.time_elapsed.to_json()})
@@ -404,7 +404,7 @@ def resume(input_):
profile.monitoring = options.OPTIONS.monitored
# This is resumed, don't bother with time since it won't be accurate anyway
- profile.run(results.options['log_level'], backend)
+ framework.profile.run(profile, results.options['log_level'], backend)
backend.finalize()
diff --git a/unittests/framework/test_profile.py b/unittests/framework/test_profile.py
index 4ff9ea5..6671349 100644
--- a/unittests/framework/test_profile.py
+++ b/unittests/framework/test_profile.py
@@ -152,7 +152,7 @@ class TestTestProfile(object):
"""
profile_ = profile.TestProfile()
profile_.test_list = self.data
- profile_._prepare_test_list()
+ profile_.prepare_test_list()
assert dict(profile_.test_list) == dict(self.data)
@@ -164,7 +164,7 @@ class TestTestProfile(object):
profile_ = profile.TestProfile()
profile_.test_list = self.data
- profile_._prepare_test_list()
+ profile_.prepare_test_list()
baseline = {
grouptools.join('group3', 'test5'): utils.Test(['other'])}
@@ -184,7 +184,7 @@ class TestTestProfile(object):
profile_ = profile.TestProfile()
profile_.test_list = self.data
- profile_._prepare_test_list()
+ profile_.prepare_test_list()
assert dict(profile_.test_list) == dict(baseline)
@@ -199,7 +199,7 @@ class TestTestProfile(object):
profile_ = profile.TestProfile()
profile_.test_list = self.data
- profile_._prepare_test_list()
+ profile_.prepare_test_list()
assert dict(profile_.test_list) == dict(baseline)
@@ -211,7 +211,7 @@ class TestTestProfile(object):
profile_ = profile.TestProfile()
profile_.test_list = self.data
- profile_._prepare_test_list()
+ profile_.prepare_test_list()
assert grouptools.join('group4', 'Test9') not in profile_.test_list
--
git-series 0.8.10
More information about the Piglit
mailing list