[Piglit] [Patch v7 05/13] core.py: rename Environment to Options

Dylan Baker baker.dylan.c at gmail.com
Mon Jun 23 16:38:25 PDT 2014


Environment doesn't really describe what the class is (although,
honestly it's a pretty bad class design), but Options comes much closer
to what it is.

v7: - update piglit-print-commands.py
    - replace instance name 'env' with 'opts'
    - rename Test.ENV to Test.OPTS

Signed-off-by: Dylan Baker <baker.dylan.c at gmail.com>
---
 framework/core.py                | 17 +++++++++++++--
 framework/exectest.py            | 20 ++++++++---------
 framework/profile.py             | 32 ++++++++++++++--------------
 framework/programs/run.py        | 46 ++++++++++++++++++++--------------------
 framework/tests/core_tests.py    |  2 +-
 framework/tests/profile_tests.py |  8 +++----
 piglit-print-commands.py         |  7 +++---
 7 files changed, 72 insertions(+), 60 deletions(-)

diff --git a/framework/core.py b/framework/core.py
index 6f476d0..2005a4e 100644
--- a/framework/core.py
+++ b/framework/core.py
@@ -40,7 +40,7 @@ import framework.status as status
 from .threads import synchronized_self
 
 __all__ = ['PIGLIT_CONFIG',
-           'Environment',
+           'Options',
            'TestrunResult',
            'TestResult',
            'JSONWriter',
@@ -326,7 +326,20 @@ class TestrunResult:
         json.dump(raw_dict, file, indent=JSONWriter.INDENT)
 
 
-class Environment:
+class Options(object):
+    """ Contains options for a piglit run
+
+    Options are as follows:
+    concurrent -- True if concurrency is to be used
+    execute -- False for dry run
+    filter -- list of compiled regex which include exclusively tests that match
+    exclude_filter -- list of compiled regex which exclude tests that match
+    valgrind -- True if valgrind is to be used
+    dmesg -- True if dmesg checking is desired. This forces concurrency off
+    verbose -- verbosity level.
+    env -- environment variables set for each test before run
+
+    """
     def __init__(self, concurrent=True, execute=True, include_filter=None,
                  exclude_filter=None, valgrind=False, dmesg=False,
                  verbose=False):
diff --git a/framework/exectest.py b/framework/exectest.py
index 990b87d..2df49f5 100644
--- a/framework/exectest.py
+++ b/framework/exectest.py
@@ -29,7 +29,7 @@ import sys
 import traceback
 import itertools
 
-from .core import TestResult, Environment
+from .core import TestResult, Options
 
 
 __all__ = ['Test',
@@ -50,8 +50,8 @@ else:
 
 
 class Test(object):
-    ENV = Environment()
-    __slots__ = ['ENV', 'run_concurrent', 'env', 'result', 'cwd', '_command',
+    OPTS = Options()
+    __slots__ = ['run_concurrent', 'env', 'result', 'cwd', '_command',
                  '_test_hook_execute_run']
 
     def __init__(self, command, run_concurrent=False):
@@ -79,10 +79,10 @@ class Test(object):
             Fully qualified test name as a string.  For example,
             ``spec/glsl-1.30/preprocessor/compiler/keywords/void.frag``.
         '''
-        log_current = log.pre_log(path if self.ENV.verbose else None)
+        log_current = log.pre_log(path if self.OPTS.verbose else None)
 
         # Run the test
-        if self.ENV.execute:
+        if self.OPTS.execute:
             try:
                 time_start = time.time()
                 dmesg.update_dmesg()
@@ -110,7 +110,7 @@ class Test(object):
     @property
     def command(self):
         assert self._command
-        if self.ENV.valgrind:
+        if self.OPTS.valgrind:
             return ['valgrind', '--quiet', '--error-exitcode=1',
                     '--tool=memcheck'] + self._command
         return self._command
@@ -138,7 +138,7 @@ class Test(object):
         self.result['command'] = ' '.join(self.command)
         self.result['environment'] = " ".join(
             '{0}="{1}"'.format(k, v) for k, v in itertools.chain(
-                self.ENV.env.iteritems(), self.env.iteritems()))
+                self.OPTS.env.iteritems(), self.env.iteritems()))
 
         if self.check_for_skip_scenario():
             self.result['result'] = 'skip'
@@ -168,7 +168,7 @@ class Test(object):
         elif self.result['returncode'] != 0 and self.result['result'] == 'pass':
             self.result['result'] = 'warn'
 
-        if self.ENV.valgrind:
+        if self.OPTS.valgrind:
             # If the underlying test failed, simply report
             # 'skip' for this valgrind test.
             if self.result['result'] != 'pass':
@@ -191,10 +191,10 @@ class Test(object):
 
     def get_command_result(self):
         # Set the environment for the tests. Use the default settings created
-        # in the Environment constructor first, then use any user defined
+        # in the Options constructor first, then use any user defined
         # variables, finally, use any variables set for the test in the test
         # profile
-        fullenv = self.ENV.env.copy()
+        fullenv = self.OPTS.env.copy()
         for key, value in itertools.chain(self.env.iteritems(),
                                           os.environ.iteritems()):
             fullenv[key] = str(value)
diff --git a/framework/profile.py b/framework/profile.py
index affd4b6..4242a98 100644
--- a/framework/profile.py
+++ b/framework/profile.py
@@ -113,7 +113,7 @@ class TestProfile(object):
         # Clear out the old Group()
         self.tests = {}
 
-    def _prepare_test_list(self, env):
+    def _prepare_test_list(self, opts):
         """ Prepare tests for running
 
         Flattens the nested group hierarchy into a flat dictionary using '/'
@@ -121,7 +121,7 @@ class TestProfile(object):
         runs it's own filters plus the filters in the self.filters name
 
         Arguments:
-        env - a core.Environment instance
+        opts - a core.Options instance
 
         """
         self._flatten_group_hierarchy()
@@ -132,9 +132,9 @@ class TestProfile(object):
         # The extra argument is needed to match check_all's API
         def test_matches(path, test):
             """Filter for user-specified restrictions"""
-            return ((not env.filter or matches_any_regexp(path, env.filter))
-                    and not path in env.exclude_tests and
-                    not matches_any_regexp(path, env.exclude_filter))
+            return ((not opts.filter or matches_any_regexp(path, opts.filter))
+                    and not path in opts.exclude_tests and
+                    not matches_any_regexp(path, opts.exclude_filter))
 
         filters = self.filters + [test_matches]
         def check_all(item):
@@ -167,37 +167,37 @@ class TestProfile(object):
         """
         pass
 
-    def run(self, env, json_writer):
+    def run(self, opts, json_writer):
         """ Runs all tests using Thread pool
 
         When called this method will flatten out self.tests into
-        self.test_list, then will prepare a logger, pass env to the Test class,
-        and begin executing tests through it's Thread pools.
+        self.test_list, then will prepare a logger, pass opts to the Test
+        class, and begin executing tests through it's Thread pools.
 
-        Based on the value of env.concurrent it will either run all the tests
+        Based on the value of opts.concurrent it will either run all the tests
         concurrently, all serially, or first the thread safe tests then the
         serial tests.
 
         Finally it will print a final summary of the tests
 
         Arguments:
-        env -- a core.Environment instance
+        opts -- a core.Options instance
         json_writer -- a core.JSONWriter instance
 
         """
 
         self._pre_run_hook()
-        framework.exectest.Test.ENV = env
+        framework.exectest.Test.Opts = opts
 
         chunksize = 1
 
-        self._prepare_test_list(env)
-        log = Log(len(self.test_list), env.verbose)
+        self._prepare_test_list(opts)
+        log = Log(len(self.test_list), opts.verbose)
 
         def test(pair):
             """ Function to call test.execute from .map
 
-            Adds env and json_writer which are needed by Test.execute()
+            Adds opts and json_writer which are needed by Test.execute()
 
             """
             name, test = pair
@@ -216,9 +216,9 @@ class TestProfile(object):
         single = multiprocessing.dummy.Pool(1)
         multi = multiprocessing.dummy.Pool()
 
-        if env.concurrent == "all":
+        if opts.concurrent == "all":
             run_threads(multi, self.test_list.iteritems())
-        elif env.concurrent == "none":
+        elif opts.concurrent == "none":
             run_threads(single, self.test_list.iteritems())
         else:
             # Filter and return only thread safe tests to the threaded pool
diff --git a/framework/programs/run.py b/framework/programs/run.py
index 758c845..d3e9830 100644
--- a/framework/programs/run.py
+++ b/framework/programs/run.py
@@ -130,18 +130,18 @@ def run(input_):
             os.path.join(
                 os.path.dirname(__file__), '..', '..', 'piglit.conf')))
 
-    # Pass arguments into Environment
-    env = core.Environment(concurrent=args.concurrency,
-                           exclude_filter=args.exclude_tests,
-                           include_filter=args.include_tests,
-                           execute=args.execute,
-                           valgrind=args.valgrind,
-                           dmesg=args.dmesg,
-                           verbose=args.verbose)
+    # Pass arguments into Options
+    opts = core.Options(concurrent=args.concurrency,
+                       exclude_filter=args.exclude_tests,
+                       include_filter=args.include_tests,
+                       execute=args.execute,
+                       valgrind=args.valgrind,
+                       dmesg=args.dmesg,
+                       verbose=args.verbose)
 
     # Set the platform to pass to waffle
     if args.platform:
-        env.env['PIGLIT_PLATFORM'] = args.platform
+        opts.env['PIGLIT_PLATFORM'] = args.platform
 
     # Change working directory to the root of the piglit directory
     piglit_dir = path.dirname(path.realpath(sys.argv[0]))
@@ -166,7 +166,7 @@ def run(input_):
     json_writer.write_dict_key('options')
     json_writer.open_dict()
     json_writer.write_dict_item('profile', args.test_profile)
-    for key, value in env:
+    for key, value in opts:
         json_writer.write_dict_item(key, value)
     if args.platform:
         json_writer.write_dict_item('platform', args.platform)
@@ -186,7 +186,7 @@ def run(input_):
     # Set the dmesg type
     if args.dmesg:
         profile.dmesg = args.dmesg
-    profile.run(env, json_writer)
+    profile.run(opts, json_writer)
     time_end = time.time()
 
     json_writer.close_dict()
@@ -211,16 +211,16 @@ def resume(input_):
     args = parser.parse_args(input_)
 
     results = core.load_results(args.results_path)
-    env = core.Environment(concurrent=results.options['concurrent'],
-                           exclude_filter=results.options['exclude_filter'],
-                           include_filter=results.options['filter'],
-                           execute=results.options['execute'],
-                           valgrind=results.options['valgrind'],
-                           dmesg=results.options['dmesg'],
-                           verbose=results.options['verbose'])
+    opts = core.Options(concurrent=results.options['concurrent'],
+                       exclude_filter=results.options['exclude_filter'],
+                       include_filter=results.options['filter'],
+                       execute=results.options['execute'],
+                       valgrind=results.options['valgrind'],
+                       dmesg=results.options['dmesg'],
+                       verbose=results.options['verbose'])
 
     if results.options.get('platform'):
-        env.env['PIGLIT_PLATFORM'] = results.options['platform']
+        opts.env['PIGLIT_PLATFORM'] = results.options['platform']
 
     results_path = path.join(args.results_path, "main")
     json_writer = core.JSONWriter(open(results_path, 'w+'))
@@ -239,15 +239,15 @@ def resume(input_):
     json_writer.open_dict()
     for key, value in results.tests.iteritems():
         json_writer.write_dict_item(key, value)
-        env.exclude_tests.add(key)
+        opts.exclude_tests.add(key)
 
     profile = framework.profile.merge_test_profiles(results.options['profile'])
     profile.results_dir = args.results_path
-    if env.dmesg:
-        profile.dmesg = env.dmesg
+    if opts.dmesg:
+        profile.dmesg = opts.dmesg
 
     # This is resumed, don't bother with time since it wont be accurate anyway
-    profile.run(env, json_writer)
+    profile.run(opts, json_writer)
 
     json_writer.close_dict()
     json_writer.close_dict()
diff --git a/framework/tests/core_tests.py b/framework/tests/core_tests.py
index 44462ce..15858b8 100644
--- a/framework/tests/core_tests.py
+++ b/framework/tests/core_tests.py
@@ -49,7 +49,7 @@ def test_generate_initialize():
     """
     yieldable = check_initialize
 
-    for target in [core.Environment, core.TestrunResult, core.TestResult,
+    for target in [core.Options, core.TestrunResult, core.TestResult,
                    core.PiglitJSONEncoder]:
         yieldable.description = "Test that {} initializes".format(
             target.__name__)
diff --git a/framework/tests/profile_tests.py b/framework/tests/profile_tests.py
index de4730f..c861cfc 100644
--- a/framework/tests/profile_tests.py
+++ b/framework/tests/profile_tests.py
@@ -211,7 +211,7 @@ def test_matches_filter_mar_1(data):
     Nothing should be filtered.
 
     """
-    env = core.Environment()
+    env = core.Options()
 
     profile_ = profile.TestProfile()
     profile_.test_list = data
@@ -223,7 +223,7 @@ def test_matches_filter_mar_1(data):
 @nt.nottest
 def test_matches_filter_mar_2(data):
     """ Tests 'not env.filter or matches_any_regex() mar is False"""
-    env = core.Environment(include_filter=['test5'])
+    env = core.Options(include_filter=['test5'])
 
     profile_ = profile.TestProfile()
     profile_.test_list = data
@@ -237,7 +237,7 @@ def test_matches_filter_mar_2(data):
 @nt.nottest
 def test_matches_env_exclude(data):
     """ Tests 'not path in env.exclude_tests  """
-    env = core.Environment()
+    env = core.Options()
     env.exclude_tests.add('group3/test5')
 
     profile_ = profile.TestProfile()
@@ -253,7 +253,7 @@ def test_matches_env_exclude(data):
 @nt.nottest
 def test_matches_exclude_mar(data):
     """ Tests 'not matches_any_regexp() """
-    env = core.Environment(exclude_filter=['test5'])
+    env = core.Options(exclude_filter=['test5'])
 
     profile_ = profile.TestProfile()
     profile_.test_list = data
diff --git a/piglit-print-commands.py b/piglit-print-commands.py
index d88ab1c..7186326 100755
--- a/piglit-print-commands.py
+++ b/piglit-print-commands.py
@@ -53,9 +53,8 @@ def main():
                         help="Path to results folder")
     args = parser.parse_args()
 
-    # Set the environment, pass in the included and excluded tests
-    env = core.Environment(exclude_filter=args.exclude_tests,
-                           include_filter=args.include_tests)
+    opts = core.Options(exclude_filter=args.exclude_tests,
+                       include_filter=args.include_tests)
 
     # Change to the piglit's path
     piglit_dir = path.dirname(path.realpath(sys.argv[0]))
@@ -76,7 +75,7 @@ def main():
         command += ' '.join(testCommand)
         return command
 
-    profile._prepare_test_list(env)
+    profile._prepare_test_list(opts)
     for name, test in profile.test_list.items():
         assert(isinstance(test, Test))
         print(name, ':::', getCommand(test))
-- 
2.0.0



More information about the Piglit mailing list