[Piglit] [PATCH 2/2] framework: Convert the codebase to use the new global Options
baker.dylan.c at gmail.com
baker.dylan.c at gmail.com
Fri Oct 30 16:21:45 PDT 2015
From: Dylan Baker <baker.dylan.c at gmail.com>
This is the plunge to change over from the old options model to the new
one. After this change Options is a global value, and no more passing or
creation of new instances is done (except for testing).
Signed-off-by: Dylan Baker <dylanx.c.baker at intel.com>
---
framework/backends/abstract.py | 16 ++++------
framework/core.py | 50 -----------------------------
framework/options.py | 13 ++++----
framework/profile.py | 55 ++++++++++++--------------------
framework/programs/run.py | 62 +++++++++++++++++-------------------
framework/test/base.py | 14 ++++----
framework/test/gleantest.py | 6 ++--
framework/test/piglit_test.py | 4 +--
framework/tests/backends_tests.py | 8 ++---
framework/tests/base_tests.py | 38 ++++++++++++++--------
framework/tests/core_tests.py | 27 +++-------------
framework/tests/gleantest_tests.py | 17 ++++++----
framework/tests/json_tests.py | 8 ++---
framework/tests/piglit_test_tests.py | 26 +++++++++------
framework/tests/profile_tests.py | 33 +++++++++++--------
piglit-print-commands.py | 12 +++----
tests/igt.py | 6 ++--
tests/xts.py | 2 +-
18 files changed, 168 insertions(+), 229 deletions(-)
diff --git a/framework/backends/abstract.py b/framework/backends/abstract.py
index 26151f4..b8540dd 100644
--- a/framework/backends/abstract.py
+++ b/framework/backends/abstract.py
@@ -32,6 +32,7 @@ import itertools
import os
import shutil
+from framework import options
from . import compression
from framework.results import TestResult
from framework.status import INCOMPLETE
@@ -49,7 +50,7 @@ def write_compressed(filename):
"""
mode = compression.get_mode()
if mode != 'none':
- # if the suffix (final .xxx) is a knwon compression suffix
+ # if the suffix (final .xxx) is a knwon compression suffix
suffix = os.path.splitext(filename)[1]
if suffix in compression.COMPRESSION_SUFFIXES:
filename = '{}.{}'.format(os.path.splitext(filename)[0], mode)
@@ -77,7 +78,7 @@ class Backend(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
- def __init__(self, dest, metadata, **options):
+ def __init__(self, dest, metadata, **kwargs):
""" Generic constructor
This method should setup the container and open any files or conections
@@ -85,7 +86,7 @@ class Backend(object):
store, that job is for the iniitalize method.
In addition it takes keyword arguments that define options for the
- backends. options should be prefixed to identify which backends they
+ backends. Options should be prefixed to identify which backends they
apply to. For example, a json specific value should be passed as
json_*, while a file specific value should be passed as file_*)
@@ -161,14 +162,11 @@ class FileBackend(Backend):
tests. It is important for resumes that this is not
overlapping as the Inheriting classes assume they are
not. Default: 0
- file_sync -- If file_sync is truthy then the _fsync method will flush and
- sync files to disk, otherwise it will pass. Defualt: False
"""
- def __init__(self, dest, file_start_count=0, file_fsync=False, **kwargs):
+ def __init__(self, dest, file_start_count=0, **kwargs):
self._dest = dest
self._counter = itertools.count(file_start_count)
- self._file_sync = file_fsync
self._write_final = write_compressed
__INCOMPLETE = TestResult(result=INCOMPLETE)
@@ -176,11 +174,11 @@ class FileBackend(Backend):
def __fsync(self, file_):
""" Sync the file to disk
- If self._file_sync is truthy this will sync self._file to disk
+ If options.OPTIONS.sync is truthy this will sync self._file to disk
"""
file_.flush()
- if self._file_sync:
+ if options.OPTIONS.sync:
os.fsync(file_.fileno())
@abc.abstractmethod
diff --git a/framework/core.py b/framework/core.py
index c30f961..111725b 100644
--- a/framework/core.py
+++ b/framework/core.py
@@ -25,7 +25,6 @@
from __future__ import print_function, absolute_import
import errno
import os
-import re
import subprocess
import sys
import ConfigParser
@@ -36,7 +35,6 @@ __all__ = [
'PIGLIT_CONFIG',
'PLATFORMS',
'PiglitConfig',
- 'Options',
'collect_system_info',
'parse_listfile',
]
@@ -135,54 +133,6 @@ def checkDir(dirname, failifexists):
raise
-class Options(object):
- """ Contains options for a piglit run
-
- Options are as follows:
- concurrent -- True if concurrency is to be used
- execute -- False for dry run
- filter -- list of compiled regex which include exclusively tests that match
- exclude_filter -- list of compiled regex which exclude tests that match
- valgrind -- True if valgrind is to be used
- dmesg -- True if dmesg checking is desired. This forces concurrency off
- env -- environment variables set for each test before run
-
- """
- def __init__(self, concurrent=True, execute=True, include_filter=None,
- exclude_filter=None, valgrind=False, dmesg=False, sync=False):
- self.concurrent = concurrent
- self.execute = execute
- self.filter = \
- [re.compile(x, re.IGNORECASE) for x in include_filter or []]
- self.exclude_filter = \
- [re.compile(x, re.IGNORECASE) for x in exclude_filter or []]
- self.exclude_tests = set()
- self.valgrind = valgrind
- self.dmesg = dmesg
- self.sync = sync
-
- # env is used to set some base environment variables that are not going
- # to change across runs, without sending them to os.environ which is
- # fickle and easy to break
- self.env = {
- 'PIGLIT_SOURCE_DIR':
- os.environ.get(
- 'PIGLIT_SOURCE_DIR',
- os.path.abspath(os.path.join(os.path.dirname(__file__),
- '..')))
- }
-
- def __iter__(self):
- for key, values in self.__dict__.iteritems():
- # If the values are regex compiled then yield their pattern
- # attribute, which is the original plaintext they were compiled
- # from, otherwise yield them normally.
- if key in ['filter', 'exclude_filter']:
- yield (key, [x.pattern for x in values])
- else:
- yield (key, values)
-
-
def collect_system_info():
""" Get relavent information about the system running piglit
diff --git a/framework/options.py b/framework/options.py
index b46e31c..4f818c5 100644
--- a/framework/options.py
+++ b/framework/options.py
@@ -174,13 +174,12 @@ class _Options(object): # pylint: disable=too-many-instance-attributes
def __iter__(self):
for key, values in self.__dict__.iteritems():
- # If the values are regex compiled then yield their pattern
- # attribute, which is the original plaintext they were compiled
- # from, otherwise yield them normally.
- if key in ['filter', 'exclude_filter']:
- yield (key, [x.pattern for x in values])
- else:
- yield (key, values)
+ if not key.startswith('_'):
+ yield key, values
+
+ # Handle the attributes that have a descriptor separately
+ yield 'include_filter', self.include_filter
+ yield 'exclude_filter', self.exclude_filter
OPTIONS = _Options()
diff --git a/framework/profile.py b/framework/profile.py
index bce28b9..32ed759 100644
--- a/framework/profile.py
+++ b/framework/profile.py
@@ -34,7 +34,7 @@ import importlib
import contextlib
import itertools
-from framework import grouptools, exceptions
+from framework import grouptools, exceptions, options
from framework.dmesg import get_dmesg
from framework.log import LogManager
from framework.test.base import Test
@@ -175,16 +175,13 @@ class TestProfile(object):
"""
self._dmesg = get_dmesg(not_dummy)
- def _prepare_test_list(self, opts):
+ def _prepare_test_list(self):
""" Prepare tests for running
Flattens the nested group hierarchy into a flat dictionary using '/'
delimited groups by calling self.flatten_group_hierarchy(), then
runs it's own filters plus the filters in the self.filters name
- Arguments:
- opts - a core.Options instance
-
"""
def matches_any_regexp(x, re_list):
return any(r.search(x) for r in re_list)
@@ -192,10 +189,10 @@ class TestProfile(object):
# The extra argument is needed to match check_all's API
def test_matches(path, test):
"""Filter for user-specified restrictions"""
- return ((not opts.filter or
- matches_any_regexp(path, opts.filter)) and
- path not in opts.exclude_tests and
- not matches_any_regexp(path, opts.exclude_filter))
+ return ((not options.OPTIONS.include_filter or
+ matches_any_regexp(path, options.OPTIONS.include_filter))
+ and path not in options.OPTIONS.exclude_tests
+ and not matches_any_regexp(path, options.OPTIONS.exclude_filter))
filters = self.filters + [test_matches]
@@ -215,61 +212,49 @@ class TestProfile(object):
raise exceptions.PiglitFatalError(
'There are no tests scheduled to run. Aborting run.')
- def _pre_run_hook(self, opts):
+ def _pre_run_hook(self):
""" Hook executed at the start of TestProfile.run
To make use of this hook one will need to subclass TestProfile, and
set this to do something, as be default it will no-op
-
- Arguments:
- opts -- a core.Options instance
"""
pass
- def _post_run_hook(self, opts):
+ def _post_run_hook(self):
""" Hook executed at the end of TestProfile.run
To make use of this hook one will need to subclass TestProfile, and
set this to do something, as be default it will no-op
-
- Arguments:
- opts -- a core.Options instance
"""
pass
- def run(self, opts, logger, backend):
+ def run(self, logger, backend):
""" Runs all tests using Thread pool
When called this method will flatten out self.tests into
- self.test_list, then will prepare a logger, pass opts to the Test
- class, and begin executing tests through it's Thread pools.
+ self.test_list, then will prepare a logger, and begin executing tests
+ through it's Thread pools.
- Based on the value of opts.concurrent it will either run all the tests
- concurrently, all serially, or first the thread safe tests then the
- serial tests.
+ Based on the value of options.OPTIONS.concurrent it will either run all
+ the tests concurrently, all serially, or first the thread safe tests
+ then the serial tests.
Finally it will print a final summary of the tests
Arguments:
- opts -- a core.Options instance
backend -- a results.Backend derived instance
"""
- self._pre_run_hook(opts)
- Test.OPTS = opts
+ self._pre_run_hook()
chunksize = 1
- self._prepare_test_list(opts)
+ self._prepare_test_list()
log = LogManager(logger, len(self.test_list))
def test(pair):
- """ Function to call test.execute from .map
-
- Adds opts which are needed by Test.execute()
-
- """
+ """Function to call test.execute from map"""
name, test = pair
with backend.write_test(name) as w:
test.execute(name, log.get(), self.dmesg)
@@ -288,9 +273,9 @@ class TestProfile(object):
single = multiprocessing.dummy.Pool(1)
multi = multiprocessing.dummy.Pool()
- if opts.concurrent == "all":
+ if options.OPTIONS.concurrent == "all":
run_threads(multi, self.test_list.iteritems())
- elif opts.concurrent == "none":
+ elif options.OPTIONS.concurrent == "none":
run_threads(single, self.test_list.iteritems())
else:
# Filter and return only thread safe tests to the threaded pool
@@ -302,7 +287,7 @@ class TestProfile(object):
log.get().summary()
- self._post_run_hook(opts)
+ self._post_run_hook()
def filter_tests(self, function):
"""Filter out tests that return false from the supplied function
diff --git a/framework/programs/run.py b/framework/programs/run.py
index 0e17d64..ebd7834 100644
--- a/framework/programs/run.py
+++ b/framework/programs/run.py
@@ -29,7 +29,7 @@ import time
import ConfigParser
import ctypes
-from framework import core, backends, exceptions
+from framework import core, backends, exceptions, options
import framework.results
import framework.profile
from . import parsers
@@ -176,17 +176,15 @@ def _run_parser(input_):
return parser.parse_args(unparsed)
-def _create_metadata(args, name, opts):
+def _create_metadata(args, name):
"""Create and return a metadata dict for Backend.initialize()."""
- options = {}
- options['profile'] = args.test_profile
- options['log_level'] = args.log_level
- for key, value in opts:
- options[key] = value
+ opts = dict(options.OPTIONS)
+ opts['profile'] = args.test_profile
+ opts['log_level'] = args.log_level
if args.platform:
- options['platform'] = args.platform
+ opts['platform'] = args.platform
- metadata = {'options': options}
+ metadata = {'options': opts}
metadata['name'] = name
metadata.update(core.collect_system_info())
@@ -235,16 +233,16 @@ def run(input_):
args.include_tests.append(line.rstrip())
# Pass arguments into Options
- opts = core.Options(concurrent=args.concurrency,
- exclude_filter=args.exclude_tests,
- include_filter=args.include_tests,
- execute=args.execute,
- valgrind=args.valgrind,
- dmesg=args.dmesg,
- sync=args.sync)
+ options.OPTIONS.concurrent = args.concurrency
+ options.OPTIONS.exclude_filter = args.exclude_tests
+ options.OPTIONS.include_filter = args.include_tests
+ options.OPTIONS.execute = args.execute
+ options.OPTIONS.valgrind = args.valgrind
+ options.OPTIONS.dmesg = args.dmesg
+ options.OPTIONS.sync = args.sync
# Set the platform to pass to waffle
- opts.env['PIGLIT_PLATFORM'] = args.platform
+ options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform
# Change working directory to the root of the piglit directory
piglit_dir = path.dirname(path.realpath(sys.argv[0]))
@@ -262,9 +260,8 @@ def run(input_):
backend = backends.get_backend(args.backend)(
args.results_path,
- file_fsync=opts.sync,
junit_suffix=args.junit_suffix)
- backend.initialize(_create_metadata(args, results.name, opts))
+ backend.initialize(_create_metadata(args, results.name))
profile = framework.profile.merge_test_profiles(args.test_profile)
profile.results_dir = args.results_path
@@ -273,7 +270,7 @@ def run(input_):
# Set the dmesg type
if args.dmesg:
profile.dmesg = args.dmesg
- profile.run(opts, args.log_level, backend)
+ profile.run(args.log_level, backend)
results.time_elapsed.end = time.time()
backend.finalize({'time_elapsed': results.time_elapsed})
@@ -302,17 +299,17 @@ def resume(input_):
_disable_windows_exception_messages()
results = backends.load(args.results_path)
- opts = core.Options(concurrent=results.options['concurrent'],
- exclude_filter=results.options['exclude_filter'],
- include_filter=results.options['filter'],
- execute=results.options['execute'],
- valgrind=results.options['valgrind'],
- dmesg=results.options['dmesg'],
- sync=results.options['sync'])
+ options.OPTIONS.concurrent = results.options['concurrent']
+ options.OPTIONS.exclude_filter = results.options['exclude_filter']
+ options.OPTIONS.include_filter = results.options['filter']
+ options.OPTIONS.execute = results.options['execute']
+ options.OPTIONS.valgrind = results.options['valgrind']
+ options.OPTIONS.dmesg = results.options['dmesg']
+ options.OPTIONS.sync = results.options['sync']
core.get_config(args.config_file)
- opts.env['PIGLIT_PLATFORM'] = results.options['platform']
+ options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']
results.options['env'] = core.collect_system_info()
results.options['name'] = results.name
@@ -320,7 +317,6 @@ def resume(input_):
# Resume only works with the JSON backend
backend = backends.get_backend('json')(
args.results_path,
- file_fsync=opts.sync,
file_start_count=len(results.tests) + 1)
# Specifically do not initialize again, everything initialize does is done.
@@ -328,15 +324,15 @@ def resume(input_):
# have obviously not completed.
for name, result in results.tests.iteritems():
if args.no_retry or result.result != 'incomplete':
- opts.exclude_tests.add(name)
+ options.OPTIONS.exclude_tests.add(name)
profile = framework.profile.merge_test_profiles(results.options['profile'])
profile.results_dir = args.results_path
- if opts.dmesg:
- profile.dmesg = opts.dmesg
+ if options.OPTIONS.dmesg:
+ profile.dmesg = options.OPTIONS.dmesg
# This is resumed, don't bother with time since it wont be accurate anyway
- profile.run(opts, results.options['log_level'], backend)
+ profile.run(results.options['log_level'], backend)
backend.finalize()
diff --git a/framework/test/base.py b/framework/test/base.py
index bf00396..7c7c410 100644
--- a/framework/test/base.py
+++ b/framework/test/base.py
@@ -36,8 +36,7 @@ import itertools
import abc
import copy
-from framework import exceptions
-from framework.core import Options
+from framework import exceptions, options
from framework.results import TestResult
@@ -145,7 +144,6 @@ class Test(object):
run_concurrent -- If True the test is thread safe. Default: False
"""
- OPTS = Options()
__metaclass__ = abc.ABCMeta
__slots__ = ['run_concurrent', 'env', 'result', 'cwd', '_command',
'__proc_timeout']
@@ -175,7 +173,7 @@ class Test(object):
"""
log.start(path)
# Run the test
- if self.OPTS.execute:
+ if options.OPTIONS.execute:
try:
self.result.time.start = time.time()
dmesg.update_dmesg()
@@ -226,7 +224,7 @@ class Test(object):
self.result.command = ' '.join(self.command)
self.result.environment = " ".join(
'{0}="{1}"'.format(k, v) for k, v in itertools.chain(
- self.OPTS.env.iteritems(), self.env.iteritems()))
+ options.OPTIONS.env.iteritems(), self.env.iteritems()))
try:
self.is_skip()
@@ -281,7 +279,7 @@ class Test(object):
#
fullenv = dict()
for key, value in itertools.chain(os.environ.iteritems(),
- self.OPTS.env.iteritems(),
+ options.OPTIONS.env.iteritems(),
self.env.iteritems()):
fullenv[key] = str(value)
@@ -369,7 +367,7 @@ class ValgrindMixin(object):
@Test.command.getter
def command(self):
command = super(ValgrindMixin, self).command
- if self.OPTS.valgrind:
+ if options.OPTIONS.valgrind:
return ['valgrind', '--quiet', '--error-exitcode=1',
'--tool=memcheck'] + command
else:
@@ -386,7 +384,7 @@ class ValgrindMixin(object):
"""
super(ValgrindMixin, self).interpret_result()
- if self.OPTS.valgrind:
+ if options.OPTIONS.valgrind:
# If the underlying test failed, simply report
# 'skip' for this valgrind test.
if self.result.result != 'pass':
diff --git a/framework/test/gleantest.py b/framework/test/gleantest.py
index cd58f2f..acc3983 100644
--- a/framework/test/gleantest.py
+++ b/framework/test/gleantest.py
@@ -24,6 +24,8 @@
from __future__ import print_function, absolute_import
import os
+
+from framework import options
from .base import Test, TestIsSkip
from .piglit_test import TEST_BIN_DIR
@@ -61,9 +63,9 @@ class GleanTest(Test):
def is_skip(self):
# Glean tests require glx
- if self.OPTS.env['PIGLIT_PLATFORM'] not in ['glx', 'mixed_glx_egl']:
+ if options.OPTIONS.env['PIGLIT_PLATFORM'] not in ['glx', 'mixed_glx_egl']:
raise TestIsSkip(
'Glean tests require platform to support glx, '
'but the platform is "{}"'.format(
- self.OPTS.env['PIGLIT_PLATFORM']))
+ options.OPTIONS.env['PIGLIT_PLATFORM']))
super(GleanTest, self).is_skip()
diff --git a/framework/test/piglit_test.py b/framework/test/piglit_test.py
index 7a35311..d173632 100644
--- a/framework/test/piglit_test.py
+++ b/framework/test/piglit_test.py
@@ -31,8 +31,8 @@ try:
except ImportError:
import json
+from framework import core, options
from .base import Test, WindowResizeMixin, ValgrindMixin, TestIsSkip
-import framework.core as core
__all__ = [
@@ -129,7 +129,7 @@ class PiglitGLTest(WindowResizeMixin, PiglitBaseTest):
any glx specific tests.
"""
- platform = self.OPTS.env['PIGLIT_PLATFORM']
+ platform = options.OPTIONS.env['PIGLIT_PLATFORM']
if self.__require_platforms and platform not in self.__require_platforms:
raise TestIsSkip(
'Test requires one of the following platforms "{}" '
diff --git a/framework/tests/backends_tests.py b/framework/tests/backends_tests.py
index 39e3e2d..61169e8 100644
--- a/framework/tests/backends_tests.py
+++ b/framework/tests/backends_tests.py
@@ -27,7 +27,7 @@ import os
import nose.tools as nt
-from framework import core, backends
+from framework import backends, options
import framework.tests.utils as utils
@@ -35,7 +35,7 @@ BACKEND_INITIAL_META = {
'name': 'name',
'test_count': 0,
'env': {},
- 'options': {k: v for k, v in core.Options()},
+ 'options': {k: v for k, v in options.OPTIONS},
}
@@ -208,7 +208,7 @@ def test_load_trailing_dot():
Basically if this reaches a BackendNotImplementedError, then the '.' was
handled correctly, otherwise if it's '.' then we should reach the
BackendError, which is incorrect.
-
+
"""
backends.load('foo.test_backend..gz')
@@ -221,7 +221,7 @@ def test_load_old():
If this raises a BackendError it means it didn't find a backend to use,
thus it skipped the file ending in '.old'.
-
+
"""
os.mkdir('test')
file_path = os.path.join('test', 'results.test_backend.old')
diff --git a/framework/tests/base_tests.py b/framework/tests/base_tests.py
index caef9e3..a7afd25 100644
--- a/framework/tests/base_tests.py
+++ b/framework/tests/base_tests.py
@@ -22,6 +22,7 @@
from __future__ import print_function, absolute_import
+import mock
import nose.tools as nt
from nose.plugins.attrib import attr
@@ -30,6 +31,7 @@ from framework.test.base import (
Test, WindowResizeMixin, ValgrindMixin, TestRunError
)
from framework.tests.status_tests import PROBLEMS, STATUSES
+from framework.options import _Options as Options
# pylint: disable=invalid-name
@@ -168,16 +170,16 @@ def test_mutation():
nt.assert_list_equal(args, ['a', 'b'])
-def test_ValgrindMixin_command():
+ at mock.patch('framework.test.base.options.OPTIONS', new_callable=Options)
+def test_ValgrindMixin_command(mock_opts):
"""test.base.ValgrindMixin.command: overrides self.command"""
class _Test(ValgrindMixin, utils.Test):
pass
+ mock_opts.valgrind = True
test = _Test(['foo'])
- test.OPTS.valgrind = True
nt.eq_(test.command, ['valgrind', '--quiet', '--error-exitcode=1',
'--tool=memcheck', 'foo'])
- test.OPTS.valgrind = False
class TestValgrindMixinRun(object):
@@ -194,12 +196,16 @@ class TestValgrindMixinRun(object):
@utils.nose_generator
def test_bad_valgrind_true(self):
- """Test non-pass status when OPTS.valgrind is True."""
+ """Test non-pass status when options.OPTIONS.valgrind is True."""
def test(status, expected):
test = self.test(['foo'])
- test.OPTS.valgrind = True
test.result.result = status
- test.run()
+
+ with mock.patch('framework.test.base.options.OPTIONS',
+ new_callable=Options) as mock_opts:
+ mock_opts.valgrind = True
+ test.run()
+
nt.eq_(test.result.result, expected)
desc = ('test.base.ValgrindMixin.run: '
@@ -211,12 +217,16 @@ class TestValgrindMixinRun(object):
@utils.nose_generator
def test_valgrind_false(self):
- """Test non-pass status when OPTS.valgrind is False."""
+ """Test non-pass status when options.OPTIONS.valgrind is False."""
def test(status):
test = self.test(['foo'])
- test.OPTS.valgrind = False
test.result.result = status
- test.run()
+
+ with mock.patch('framework.test.base.options.OPTIONS',
+ new_callable=Options) as mock_opts:
+ mock_opts.valgrind = False
+ test.run()
+
nt.eq_(test.result.result, status)
desc = ('test.base.ValgrindMixin.run: when status is "{}" '
@@ -226,21 +236,23 @@ class TestValgrindMixinRun(object):
test.description = desc.format(status)
yield test, status
- def test_pass(self):
+ @mock.patch('framework.test.base.options.OPTIONS', new_callable=Options)
+ def test_pass(self, mock_opts):
"""test.base.ValgrindMixin.run: when test is 'pass' and returncode is '0' result is pass
"""
test = self.test(['foo'])
- test.OPTS.valgrind = True
+ mock_opts.valgrind = True
test.result.result = 'pass'
test.result.returncode = 0
test.run()
nt.eq_(test.result.result, 'pass')
- def test_fallthrough(self):
+ @mock.patch('framework.test.base.options.OPTIONS', new_callable=Options)
+ def test_fallthrough(self, mock_opts):
"""test.base.ValgrindMixin.run: when a test is 'pass' but returncode is not 0 it's 'fail'
"""
test = self.test(['foo'])
- test.OPTS.valgrind = True
+ mock_opts.valgrind = True
test.result.result = 'pass'
test.result.returncode = 1
test.run()
diff --git a/framework/tests/core_tests.py b/framework/tests/core_tests.py
index 82be599..a4b366a 100644
--- a/framework/tests/core_tests.py
+++ b/framework/tests/core_tests.py
@@ -88,29 +88,10 @@ def _save_core_config(func):
return inner
- at utils.nose_generator
-def test_generate_initialize():
- """ Generator that creates tests to initialize all of the classes in core
-
- In a compiled language the compiler provides this kind of checking, but in
- an interpreted language like python you don't have a compiler test. The
- unit tests generated by this function serve as a similar test, does this
- even work?
-
- """
- def check_initialize(target):
- """ Check that a class initializes without error """
- func = target()
-
- # Asserting that func exists will fail for Group and TestrunResult which
- # are dict subclasses
- nt.ok_(isinstance(func, target))
-
-
- for target in [core.Options, core.PiglitConfig]:
- check_initialize.description = "Test that {} initializes".format(
- target.__name__)
- yield check_initialize, target
+ at utils.no_error
+def test_PiglitConfig_init():
+ """core.PiglitConfig: initializes"""
+ core.PiglitConfig()
def test_parse_listfile_return():
diff --git a/framework/tests/gleantest_tests.py b/framework/tests/gleantest_tests.py
index 88d2fe6..7a9f30f 100644
--- a/framework/tests/gleantest_tests.py
+++ b/framework/tests/gleantest_tests.py
@@ -22,8 +22,10 @@
from __future__ import print_function, absolute_import
+import mock
import nose.tools as nt
+from framework.options import _Options as Options
from framework.test import GleanTest
from framework.tests import utils
from framework.test.base import TestIsSkip
@@ -69,27 +71,30 @@ def test_bad_returncode():
nt.assert_equal(test.result.result, 'fail')
+ at mock.patch('framework.test.gleantest.options.OPTIONS', new_callable=Options)
@nt.raises(TestIsSkip)
-def test_is_skip_not_glx():
+def test_is_skip_not_glx(mock_opts):
"""test.gleantest.GleanTest.is_skip: Skips when platform isn't glx"""
- GleanTest.OPTS.env['PIGLIT_PLATFORM'] = 'gbm'
+ mock_opts.env['PIGLIT_PLATFORM'] = 'gbm'
test = GleanTest('foo')
test.is_skip()
+ at mock.patch('framework.test.gleantest.options.OPTIONS', new_callable=Options)
@utils.not_raises(TestIsSkip)
-def test_is_skip_glx():
+def test_is_skip_glx(mock_opts):
"""test.gleantest.GleanTest.is_skip: Does not skip when platform is glx"""
- GleanTest.OPTS.env['PIGLIT_PLATFORM'] = 'glx'
+ mock_opts.env['PIGLIT_PLATFORM'] = 'glx'
test = GleanTest('foo')
test.is_skip()
+ at mock.patch('framework.test.gleantest.options.OPTIONS', new_callable=Options)
@utils.not_raises(TestIsSkip)
-def test_is_skip_glx_egl():
+def test_is_skip_glx_egl(mock_opts):
"""test.gleantest.GleanTest.is_skip: Does not skip when platform is mixed_glx_egl
"""
- GleanTest.OPTS.env['PIGLIT_PLATFORM'] = 'mixed_glx_egl'
+ mock_opts.env['PIGLIT_PLATFORM'] = 'mixed_glx_egl'
test = GleanTest('foo')
test.is_skip()
diff --git a/framework/tests/json_tests.py b/framework/tests/json_tests.py
index 36409eb..2fa03a9 100644
--- a/framework/tests/json_tests.py
+++ b/framework/tests/json_tests.py
@@ -36,7 +36,7 @@ except ImportError:
import json
import framework.tests.utils as utils
-from framework import core, results
+from framework import results
from framework.backends.json import JSONBackend
from framework.programs.run import _create_metadata
@@ -72,7 +72,7 @@ class TestJsonOutput(utils.StaticDirectory):
args.log_level = 'verbose'
backend = JSONBackend(cls.tdir, file_fsync=True)
- backend.initialize(_create_metadata(args, 'test', core.Options()))
+ backend.initialize(_create_metadata(args, 'test'))
with backend.write_test('result') as t:
t(results.TestResult('pass'))
backend.finalize({'time_elapsed': results.TimeAttribute(end=1.2)})
@@ -150,8 +150,8 @@ class TestJsonOutput(utils.StaticDirectory):
nt.assert_in('concurrent', self.json['options'])
def test_options_filter(self):
- """JSON: filter is an options key."""
- nt.assert_in('filter', self.json['options'])
+ """JSON: include_filter is an options key."""
+ nt.assert_in('include_filter', self.json['options'])
def test_options_exclude_tests(self):
"""JSON: exclude_tests is an options key."""
diff --git a/framework/tests/piglit_test_tests.py b/framework/tests/piglit_test_tests.py
index c7c4c8f..e7adf21 100644
--- a/framework/tests/piglit_test_tests.py
+++ b/framework/tests/piglit_test_tests.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2014 Intel Corporation
+# Copyright (c) 2014, 2015 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -18,13 +18,15 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-""" Tests for the exectest module """
+"""Tests for the piglit_test module"""
from __future__ import print_function, absolute_import
+import mock
import nose.tools as nt
from framework.tests import utils
+from framework.options import _Options as Options
from framework.test.base import TestIsSkip
from framework.test.piglit_test import (PiglitBaseTest, PiglitGLTest,
PiglitCLTest)
@@ -96,33 +98,37 @@ def test_PiglitGLTest_include_and_exclude():
exclude_platforms=['gbm'])
+ at mock.patch('framework.test.piglit_test.options.OPTIONS', new_callable=Options)
@utils.not_raises(TestIsSkip)
-def test_PiglitGLTest_platform_in_require():
+def test_PiglitGLTest_platform_in_require(mock_opts):
"""test.piglit_test.PiglitGLTest.is_skip(): does not skip if platform is in require_platforms"""
- PiglitGLTest.OPTS.env['PIGLIT_PLATFORM'] = 'glx'
+ mock_opts.env['PIGLIT_PLATFORM'] = 'glx'
test = PiglitGLTest(['foo'], require_platforms=['glx'])
test.is_skip()
+ at mock.patch('framework.test.piglit_test.options.OPTIONS', new_callable=Options)
@nt.raises(TestIsSkip)
-def test_PiglitGLTest_platform_not_in_require():
+def test_PiglitGLTest_platform_not_in_require(mock_opts):
"""test.piglit_test.PiglitGLTest.is_skip(): skips if platform is not in require_platforms"""
- PiglitGLTest.OPTS.env['PIGLIT_PLATFORM'] = 'gbm'
+ mock_opts.env['PIGLIT_PLATFORM'] = 'gbm'
test = PiglitGLTest(['foo'], require_platforms=['glx'])
test.is_skip()
+ at mock.patch('framework.test.piglit_test.options.OPTIONS', new_callable=Options)
@nt.raises(TestIsSkip)
-def test_PiglitGLTest_platform_in_exclude():
+def test_PiglitGLTest_platform_in_exclude(mock_opts):
"""test.piglit_test.PiglitGLTest.is_skip(): skips if platform is in exclude_platforms"""
- PiglitGLTest.OPTS.env['PIGLIT_PLATFORM'] = 'glx'
+ mock_opts.env['PIGLIT_PLATFORM'] = 'glx'
test = PiglitGLTest(['foo'], exclude_platforms=['glx'])
test.is_skip()
+ at mock.patch('framework.test.piglit_test.options.OPTIONS', new_callable=Options)
@utils.not_raises(TestIsSkip)
-def test_PiglitGLTest_platform_not_in_exclude():
+def test_PiglitGLTest_platform_not_in_exclude(mock_opts):
"""test.piglit_test.PiglitGLTest.is_skip(): does not skip if platform is in exclude_platforms"""
- PiglitGLTest.OPTS.env['PIGLIT_PLATFORM'] = 'gbm'
+ mock_opts.env['PIGLIT_PLATFORM'] = 'gbm'
test = PiglitGLTest(['foo'], exclude_platforms=['glx'])
test.is_skip()
diff --git a/framework/tests/profile_tests.py b/framework/tests/profile_tests.py
index 4942fc8..3a7d68b 100644
--- a/framework/tests/profile_tests.py
+++ b/framework/tests/profile_tests.py
@@ -24,10 +24,11 @@ from __future__ import print_function, absolute_import
import sys
import copy
+import mock
import nose.tools as nt
from framework.tests import utils
-from framework import grouptools, core, dmesg, profile, exceptions
+from framework import grouptools, dmesg, profile, exceptions, options
from framework.test import GleanTest
# Don't print sys.stderr to the console
@@ -114,6 +115,15 @@ class TestPrepareTestListMatches(object):
grouptools.join('group3', 'test5'): 'other',
grouptools.join('group4', 'Test9'): 'is_caps',
}
+ self.opts = None
+ self.__patcher = mock.patch('framework.profile.options.OPTIONS',
+ new_callable=options._Options)
+
+ def setup(self):
+ self.opts = self.__patcher.start()
+
+ def teardown(self):
+ self.__patcher.stop()
def test_matches_filter_mar_1(self):
"""profile.TestProfile.prepare_test_list: 'not env.filter or matches_any_regex()' env.filter is False
@@ -121,21 +131,19 @@ class TestPrepareTestListMatches(object):
Nothing should be filtered.
"""
- env = core.Options()
-
profile_ = profile.TestProfile()
profile_.test_list = self.data
- profile_._prepare_test_list(env)
+ profile_._prepare_test_list()
nt.assert_dict_equal(profile_.test_list, self.data)
def test_matches_filter_mar_2(self):
"""profile.TestProfile.prepare_test_list: 'not env.filter or matches_any_regex()' mar is False"""
- env = core.Options(include_filter=['test5'])
+ self.opts.include_filter = ['test5']
profile_ = profile.TestProfile()
profile_.test_list = self.data
- profile_._prepare_test_list(env)
+ profile_._prepare_test_list()
baseline = {grouptools.join('group3', 'test5'): 'other'}
@@ -143,12 +151,11 @@ class TestPrepareTestListMatches(object):
def test_matches_env_exclude(self):
"""profile.TestProfile.prepare_test_list: 'not path in env.exclude_tests'"""
- env = core.Options()
- env.exclude_tests.add(grouptools.join('group3', 'test5'))
+ self.opts.exclude_tests.add(grouptools.join('group3', 'test5'))
profile_ = profile.TestProfile()
profile_.test_list = self.data
- profile_._prepare_test_list(env)
+ profile_._prepare_test_list()
baseline = copy.deepcopy(self.data)
del baseline[grouptools.join('group3', 'test5')]
@@ -157,11 +164,11 @@ class TestPrepareTestListMatches(object):
def test_matches_exclude_mar(self):
"""profile.TestProfile.prepare_test_list: 'not matches_any_regexp()'"""
- env = core.Options(exclude_filter=['test5'])
+ self.opts.exclude_filter = ['test5']
profile_ = profile.TestProfile()
profile_.test_list = self.data
- profile_._prepare_test_list(env)
+ profile_._prepare_test_list()
baseline = copy.deepcopy(self.data)
del baseline[grouptools.join('group3', 'test5')]
@@ -170,11 +177,11 @@ class TestPrepareTestListMatches(object):
def test_matches_include_caps(self):
"""profile.TestProfile.prepare_test_list: matches capitalized tests"""
- env = core.Options(exclude_filter=['test9'])
+ self.opts.exclude_filter = ['test9']
profile_ = profile.TestProfile()
profile_.test_list = self.data
- profile_._prepare_test_list(env)
+ profile_._prepare_test_list()
nt.assert_not_in(grouptools.join('group4', 'Test9'), profile_.test_list)
diff --git a/piglit-print-commands.py b/piglit-print-commands.py
index e32ec70..c891e8e 100755
--- a/piglit-print-commands.py
+++ b/piglit-print-commands.py
@@ -29,14 +29,14 @@ import os
import os.path as path
sys.path.append(path.dirname(path.realpath(sys.argv[0])))
-import framework.core as core
+from framework import options
+from framework.programs import parsers
import framework.profile
from framework.test import Test, GleanTest
def main():
- core.get_config()
- parser = argparse.ArgumentParser(sys.argv)
+ parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
parser.add_argument("-t", "--include-tests",
default=[],
action="append",
@@ -54,8 +54,8 @@ def main():
help="Path to results folder")
args = parser.parse_args()
- opts = core.Options(exclude_filter=args.exclude_tests,
- include_filter=args.include_tests)
+ options.OPTIONS.exclude_filter = args.exclude_tests
+ options.OPTIONS.include_filter = args.include_tests
# Change to the piglit's path
piglit_dir = path.dirname(path.realpath(sys.argv[0]))
@@ -76,7 +76,7 @@ def main():
command += ' '.join(testCommand)
return command
- profile._prepare_test_list(opts)
+ profile._prepare_test_list()
for name, test in profile.test_list.items():
assert(isinstance(test, Test))
print(name, ':::', getCommand(test))
diff --git a/tests/igt.py b/tests/igt.py
index 4f970a8..4998ab2 100644
--- a/tests/igt.py
+++ b/tests/igt.py
@@ -37,7 +37,7 @@ import os
import re
import subprocess
-from framework import grouptools, exceptions, core
+from framework import grouptools, exceptions, core, options
from framework.profile import TestProfile, Test
__all__ = ['profile']
@@ -88,8 +88,8 @@ else:
class IGTTestProfile(TestProfile):
"""Test profile for intel-gpu-tools tests."""
- def _pre_run_hook(self, opts):
- if opts.execute:
+ def _pre_run_hook(self):
+ if options.OPTIONS.execute:
try:
check_environment()
except exceptions.PiglitInternalError as e:
diff --git a/tests/xts.py b/tests/xts.py
index a078056..c7d55e3 100644
--- a/tests/xts.py
+++ b/tests/xts.py
@@ -39,7 +39,7 @@ X_TEST_SUITE = core.PIGLIT_CONFIG.required_get('xts', 'path')
class XTSProfile(TestProfile): # pylint: disable=too-few-public-methods
""" A subclass of TestProfile that provides a setup hook for XTS """
- def _pre_run_hook(self, opts):
+ def _pre_run_hook(self):
""" This hook sets the XTSTest.results_path variable
Setting this variable allows images created by XTS to moved into the
--
2.6.2
More information about the Piglit
mailing list