[Piglit] [PATCH 4/5] framework: refactor the log module

Dylan Baker baker.dylan.c at gmail.com
Thu Jul 3 13:56:18 PDT 2014


This refactors the log module away from a single class that does
everything to having two classes. The first class LogFactory, is a state
manager that returns BaseLog derived objects when it's get() method is
called. However, it mainly acts as a shared state manager.

The BaseLog derived classes provide three public methods; start(),
log(), summary().

This makes the interfaces much cleaner, just 3 public methods that are
fairly obvious and clearly documented. It uses inheritance for more
shared code, and is just generally much less complex than the previous
implementation

This makes two changes to behavior, and I believe it is out of necessity.
In the previous implementation verbose printed a line at the start of
the test and at the end, but it also printed the status line like the
non-verbose logger does, using the '\r' character to overwrite the line.
As far as I can tell the new logger is too performant for this behavior,
it literally overwrites the line before a human could read it.

The second change is to the command line arguments. Because one of the
design goals of this series is to allow multiple loggers to be created
for different purposes having a single command line option to toggle
between one state or another isn't really feasible.

Signed-off-by: Dylan Baker <baker.dylan.c at gmail.com>
---
 framework/core.py            |   5 +-
 framework/exectest.py        |   7 +-
 framework/log.py             | 290 ++++++++++++++++++++++++++++---------------
 framework/profile.py         |  10 +-
 framework/programs/run.py    |  26 ++--
 framework/results.py         |   6 +-
 framework/tests/log_tests.py | 177 +++++++++++++++++---------
 7 files changed, 339 insertions(+), 182 deletions(-)

diff --git a/framework/core.py b/framework/core.py
index 675b7aa..b4ba54a 100644
--- a/framework/core.py
+++ b/framework/core.py
@@ -71,13 +71,11 @@ class Options(object):
     exclude_filter -- list of compiled regex which exclude tests that match
     valgrind -- True if valgrind is to be used
     dmesg -- True if dmesg checking is desired. This forces concurrency off
-    verbose -- verbosity level.
     env -- environment variables set for each test before run
 
     """
     def __init__(self, concurrent=True, execute=True, include_filter=None,
-                 exclude_filter=None, valgrind=False, dmesg=False,
-                 verbose=False):
+                 exclude_filter=None, valgrind=False, dmesg=False):
         self.concurrent = concurrent
         self.execute = execute
         self.filter = [re.compile(x) for x in include_filter or []]
@@ -85,7 +83,6 @@ class Options(object):
         self.exclude_tests = set()
         self.valgrind = valgrind
         self.dmesg = dmesg
-        self.verbose = verbose
         # env is used to set some base environment variables that are not going
         # to change across runs, without sending them to os.environ which is
         # fickle as easy to break
diff --git a/framework/exectest.py b/framework/exectest.py
index 7d08774..c27be90 100644
--- a/framework/exectest.py
+++ b/framework/exectest.py
@@ -84,7 +84,7 @@ class Test(object):
             Fully qualified test name as a string.  For example,
             ``spec/glsl-1.30/preprocessor/compiler/keywords/void.frag``.
         '''
-        log_current = log.pre_log(path if self.OPTS.verbose else None)
+        log.start(path)
 
         # Run the test
         if self.OPTS.execute:
@@ -104,11 +104,10 @@ class Test(object):
                 self.result['traceback'] = "".join(
                     traceback.format_tb(exception[2]))
 
-            log.log(path, self.result['result'], log_current)
-
+            log.log(self.result['result'])
             json_writer.write_dict_item(path, self.result)
         else:
-            log.log(path, 'dry-run', log_current)
+            log.log('dry-run')
 
     @property
     def command(self):
diff --git a/framework/log.py b/framework/log.py
index d6ab735..491ab7f 100644
--- a/framework/log.py
+++ b/framework/log.py
@@ -18,136 +18,230 @@
 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 # IN THE SOFTWARE.
-#
+
+""" Module for terminal logging
+
+This module provides a class LogManager, which acts as a state manager
+returning BaseLog derived instances to individual tests.
+
+"""
 
 import sys
+import abc
+import itertools
+import threading
 import collections
-from .threads import synchronized_self
+
+__all__ = ['LogManager']
 
 
-class Log(object):
-    """ Print Progress to stdout
+class BaseLog(object):
+    """ Abstract base class for Log objects
 
     Arguments:
-    total -- The total number of tests to run.
+    state -- the state dict from LogManager
 
     """
-    def __init__(self, total, verbose):
-        self.__total = total
-        self.__complete = 0
-        self.__running = []
-        self.__generator = (x for x in xrange(self.__total))
-        self.__pad = len(str(self.__total))
-        self.__summary_keys = set(['pass', 'fail', 'warn', 'crash', 'skip',
-                                   'dmesg-warn', 'dmesg-fail', 'dry-run',
-                                   'timeout'])
-        self.__summary = collections.defaultdict(lambda: 0)
-        self.__lastlength = 0
-        self.__tty = sys.stdout.isatty()
-
-        self.__output = "[{percent}] {summary} {running}"
-
-        # Some automation tools (e.g, Jenkins) will buffer all output until
-        # newline, so don't use carriage return character if the stdout is not
-        # a TTY.
-        if self.__tty:
-            self.__output += "\r"
-        else:
-            self.__output += "\n"
+    __metaclass__ = abc.ABCMeta
+
+    SUMMARY_KEYS = set([
+        'pass', 'fail', 'warn', 'crash', 'skip', 'dmesg-warn', 'dmesg-fail',
+        'dry-run', 'timeout'])
+    _LOCK = threading.RLock()
+
+    def __init__(self, state):
+        self._state = state
+        self._pad = len(str(state['total']))
+
+    @abc.abstractmethod
+    def start(self, name):
+        """ Called before test run starts
+
+        This method is used to print things before the test starts
+
+        """
+
+    @abc.abstractmethod
+    def log(self, status):
+        """ Print the result of the test
+
+        This method is run after the test completes, and is used to log the
+        actual result of the test
+
+        """
+
+    @abc.abstractmethod
+    def summary(self):
+        """ Print a final summary
+
+        This method is run at the end of the test run, and is used to print a
+        final summary of the run
 
-        if verbose:
-            self.__output = "{result} :: {name}\n" + self.__output
+        """
+
+
+class QuietLog(BaseLog):
+    """ A logger providing minimal status output
 
-        self.__summary_output = "[{percent}] {summary}\n"
+    This logger provides only a ninja like [x/y] pass: z, fail: w ... output.
 
-    def _write_output(self, output):
-        """ write the output to stdout, ensuring any previous line is cleared """
+    It uses \r to print over the same line when printing to a tty. If
+    sys.stdout is not a tty then it prints \n at the end of the line instead
+
+    Arguments:
+    status -- the status to print
 
-        if self.__tty:
-            length = len(output)
-            if self.__lastlength > length:
-                output = "{0}{1}{2}".format(output[:-1],
-                                            " " * (self.__lastlength - length),
-                                            output[-1])
+    """
+    _test_counter = itertools.count()
 
-            self.__lastlength = length
+    def __init__(self, *args, **kwargs):
+        super(QuietLog, self).__init__(*args, **kwargs)
 
-        sys.stdout.write(output)
+        # If the output is a tty we can use '\r' (return, no linebreak) to
+        # print over the existing line, if stdout isn't a tty that will not
+        # work (and can break systems like jenkins), so we print a \n instead
+        if sys.stdout.isatty():
+            self._endcode = '\r'
+        else:
+            self._endcode = '\n'
 
-        # Need to flush explicitly, otherwise it all gets buffered without a
-        # newline.
-        sys.stdout.flush()
+        self.__counter = self._test_counter.next()
+        self._state['running'].append(self.__counter)
 
-    def _print_summary(self):
-        """ return a summary of the statuses """
-        return ", ".join("{0}: {1}".format(k, self.__summary[k])
-                         for k in sorted(self.__summary))
+    def start(self, name):
+        pass
 
-    def _print_running(self):
-        """ return running tests """
-        return "Running Test(s): {}".format(
-            " ".join([str(x).zfill(self.__pad) for x in self.__running]))
+    def log(self, status):
+        # increment complete
+        self._state['complete'] += 1
 
-    def _print_percent(self):
-        """ return the percentage of tess completed """
-        return "{0}/{1}".format(str(self.__complete).zfill(self.__pad),
-                                str(self.__total).zfill(self.__pad))
+        # Add to the summary dict
+        assert status in self.SUMMARY_KEYS
+        self._state['summary'][status] += 1
 
-    def __print(self, name, result):
-        """ Do the actual printing """
-        self._write_output(self.__output.format(
-            percent=self._print_percent(),
-            running=self._print_running(),
-            summary=self._print_summary(),
-            name=name,
-            result=result))
+        self.__print_summary()
+        self._state['running'].remove(self.__counter)
 
-    @synchronized_self
-    def log(self, name, result, value):
-        """ Print to the screen
+    def summary(self):
+        self.__print_summary()
 
-        Works by moving the cursor back to the front of the line and printing
-        over it.
+    def __print_summary(self):
+        """ Print the summary result
 
-        Arguments:
-        name -- the name of the test
-        result -- the result of the test
-        value -- the number of the test to remove
+        this prints '[done/total] {status}', it is a private method hidden from
+        the children of this class (VerboseLog)
 
         """
-        assert result in self.__summary_keys
-        self.__print(name, result)
+        out = '[{done}/{total}] {status} Running Test(s): {running}'.format(
+            done=str(self._state['complete']).zfill(self._pad),
+            total=str(self._state['total']).zfill(self._pad),
+            status=', '.join('{0}: {1}'.format(k, v) for k, v in
+                             sorted(self._state['summary'].iteritems())),
+            running=" ".join(str(x) for x in self._state['running'])
+        )
+
+        self.__print(out)
+
+    def __print(self, out):
+        """ Shared print method that ensures any bad lines are overwritten """
+        with self._LOCK:
+            # If the new line is shorter than the old one add trailing
+            # whitespace
+            pad = self._state['lastlength'] - len(out)
+
+            sys.stdout.write(out)
+            if pad > 0:
+                sys.stdout.write(' ' * pad)
+            sys.stdout.write(self._endcode)
+            sys.stdout.flush()
+
+        # Set the length of the line just printed (minus the spaces since we
+        # don't care if we leave whitespace) so that the next line will print
+        # extra whitespace if necissary
+        self._state['lastlength'] = len(out)
+
+
+class VerboseLog(QuietLog):
+    """ A more verbose version of the QuietLog
+
+    This logger works like the quiet logger, except it doesn't overwrite the
+    previous line, ever. It also prints an additional line at the start of each
+    test, which the quite logger doesn't do.
 
-        # Mark running complete
-        assert value in self.__running
-        self.__running.remove(value)
+    """
+    def __init__(self, *args, **kwargs):
+        super(VerboseLog, self).__init__(*args, **kwargs)
+        self._endcode = '\n'  # Always use newlines for verbose
+        self.__name = None    # the name needs to be printed by start and log
 
-        # increment the number of completed tests
-        self.__complete += 1
+    def __print(self, out):
+        """ Shared print method for flushing """
+        with self._LOCK:
+            sys.stdout.write(out)
+            sys.stdout.write('\n')
+            sys.stdout.flush()
 
-        assert result in self.__summary_keys
-        self.__summary[result] += 1
+    def start(self, name):
+        """ Print the test that is being run next
 
-    @synchronized_self
-    def pre_log(self, running=None):
-        """ Hook to run before log()
+        This printings a running: <testname> message before the test run
+        starts.
 
-        Returns a new number to know what processes are running, if running is
-        set it will print a running message for the test
+        """
+        with self._LOCK:
+            self.__print('running: {}'.format(name))
+        self.__name = name
 
-        Keyword Arguments:
-        running -- the name of a test to print is running. If Falsy then
-                   nothing will be printed. Default: None
+    def log(self, value):
+        """ Print a message after the test finishes
+
+        This method prints <status>: <name>. It also does a little bit of magic
+        before calling super() to print the status line.
 
         """
-        if running:
-            self.__print(running, 'running')
+        with self._LOCK:
+            self.__print('{0}: {1}'.format(value, self.__name))
 
-        x = self.__generator.next()
-        self.__running.append(x)
-        return x
+            # Set lastlength to 0, this prevents printing worthless pad in
+            # super()
+            self._state['lastlength'] = 0
+            super(VerboseLog, self).log(value)
 
-    def summary(self):
-        self._write_output(self.__summary_output.format(
-            percent=self._print_percent(),
-            summary=self._print_summary()))
+
+class LogManager(object):
+    """ Creates new log objects
+
+    Each of the log objects it creates have two accessible methods: start() and
+    log(); neither method should return anything, and they must be thread safe.
+    log() should accept a single argument, which is the status the test
+    returned. start() should also take a single argument, the name of the test
+
+    When the LogManager is initialized it is initialized with an argument which
+    determines which Log class it will return (they are set via the LOG_MAP
+    dictionary, which maps string names to class callables), it will return
+    these as long as it exists.
+
+    Arguments:
+    logger -- a string name of a logger to use
+    total -- the total number of test to run
+
+    """
+    LOG_MAP = {
+        'quiet': QuietLog,
+        'verbose': VerboseLog,
+    }
+
+    def __init__(self, logger, total):
+        assert logger in self.LOG_MAP
+        self._log = self.LOG_MAP[logger]
+        self._state = {
+            'total': total,
+            'summary': collections.defaultdict(lambda: 0),
+            'lastlength': 0,
+            'complete': 0,
+            'running': [],
+        }  # XXX Does access to this dict need locking?
+
+    def get(self):
+        """ Return a new log instance """
+        return self._log(self._state)
diff --git a/framework/profile.py b/framework/profile.py
index 5428890..02b0ef0 100644
--- a/framework/profile.py
+++ b/framework/profile.py
@@ -34,7 +34,7 @@ import multiprocessing.dummy
 import importlib
 
 from framework.dmesg import get_dmesg
-from framework.log import Log
+from framework.log import LogManager
 import framework.exectest
 
 __all__ = [
@@ -167,7 +167,7 @@ class TestProfile(object):
         """
         pass
 
-    def run(self, opts, json_writer):
+    def run(self, opts, logger, json_writer):
         """ Runs all tests using Thread pool
 
         When called this method will flatten out self.tests into
@@ -192,7 +192,7 @@ class TestProfile(object):
         chunksize = 1
 
         self._prepare_test_list(opts)
-        log = Log(len(self.test_list), opts.verbose)
+        log = LogManager(logger, len(self.test_list))
 
         def test(pair):
             """ Function to call test.execute from .map
@@ -201,7 +201,7 @@ class TestProfile(object):
 
             """
             name, test = pair
-            test.execute(name, log, json_writer, self.dmesg)
+            test.execute(name, log.get(), json_writer, self.dmesg)
 
         def run_threads(pool, testlist):
             """ Open a pool, close it, and join it """
@@ -228,7 +228,7 @@ class TestProfile(object):
             run_threads(single, (x for x in self.test_list.iteritems()
                                  if not x[1].run_concurrent))
 
-        log.summary()
+        log.get().summary()
 
         self._post_run_hook()
 
diff --git a/framework/programs/run.py b/framework/programs/run.py
index c1a2658..f04fdcf 100644
--- a/framework/programs/run.py
+++ b/framework/programs/run.py
@@ -85,10 +85,12 @@ def run(input_):
                         action="store_true",
                         help="Capture a difference in dmesg before and "
                              "after each test. Implies -1/--no-concurrency")
-    parser.add_argument("-v", "--verbose",
-                        action="store_true",
-                        help="Produce a line of output for each test before "
-                             "and after it runs")
+    parser.add_argument("-l", "--log-level",
+                        dest="log_level",
+                        action="store",
+                        choices=['quiet', 'verbose'],
+                        default='quiet',
+                        help="Set the logger verbosity level")
     parser.add_argument("test_profile",
                         metavar="<Path to one or more test profile(s)>",
                         nargs='+',
@@ -137,8 +139,8 @@ def run(input_):
                         include_filter=args.include_tests,
                         execute=args.execute,
                         valgrind=args.valgrind,
-                        dmesg=args.dmesg,
-                        verbose=args.verbose)
+                        dmesg=args.dmesg)
+                        
 
     # Set the platform to pass to waffle
     if args.platform:
@@ -170,7 +172,8 @@ def run(input_):
     if args.platform:
         options['platform'] = args.platform
     json_writer.initialize_json(options, results.name,
-                                core.collect_system_info())
+                                core.collect_system_info(),
+                                log_level=args.log_level)
 
     json_writer.write_dict_key('tests')
     json_writer.open_dict()
@@ -182,7 +185,7 @@ def run(input_):
     # Set the dmesg type
     if args.dmesg:
         profile.dmesg = args.dmesg
-    profile.run(opts, json_writer)
+    profile.run(opts, args.log_level, json_writer)
     time_end = time.time()
 
     json_writer.close_dict()
@@ -211,9 +214,8 @@ def resume(input_):
                         include_filter=results.options['filter'],
                         execute=results.options['execute'],
                         valgrind=results.options['valgrind'],
-                        dmesg=results.options['dmesg'],
-                        verbose=results.options['verbose'])
-
+                        dmesg=results.options['dmesg'])
+                        
     if results.options.get('platform'):
         opts.env['PIGLIT_PLATFORM'] = results.options['platform']
 
@@ -235,7 +237,7 @@ def resume(input_):
         profile.dmesg = opts.dmesg
 
     # This is resumed, don't bother with time since it wont be accurate anyway
-    profile.run(opts, json_writer)
+    profile.run(opts, results.options['log_level'], json_writer)
 
     json_writer.close_dict()
     json_writer.close_json()
diff --git a/framework/results.py b/framework/results.py
index 4b5fb30..a13f8cc 100644
--- a/framework/results.py
+++ b/framework/results.py
@@ -24,6 +24,7 @@
 from __future__ import print_function
 import os
 import sys
+import itertools
 from cStringIO import StringIO
 try:
     import simplejson as json
@@ -132,7 +133,7 @@ class JSONWriter(object):
         # is popped and written into the json
         self._open_containers = []
 
-    def initialize_json(self, options, name, env):
+    def initialize_json(self, options, name, env, **more):
         """ Write boilerplate json code
 
         This writes all of the json except the actual tests.
@@ -151,7 +152,8 @@ class JSONWriter(object):
 
         self.write_dict_key('options')
         self.open_dict()
-        for key, value in options.iteritems():
+        for key, value in itertools.chain(options.iteritems(),
+                                          more.iteritems()):
             # Loading a NoneType will break resume, and are a bug
             assert value is not None, "Value {} is NoneType".format(key)
             self.write_dict_item(key, value)
diff --git a/framework/tests/log_tests.py b/framework/tests/log_tests.py
index 333ba35..a71f3ca 100644
--- a/framework/tests/log_tests.py
+++ b/framework/tests/log_tests.py
@@ -20,76 +20,139 @@
 
 """ Module provides tests for log.py module """
 
-from types import *  # This is a special * safe module
+from __future__ import print_function
+import sys
+import collections
 import nose.tools as nt
-from framework.log import Log
+import framework.log as log
 import framework.tests.utils as utils
 
-valid_statuses = ('pass', 'fail', 'crash', 'warn', 'dmesg-warn',
-                  'dmesg-fail', 'skip', 'dry-run')
+TEST_STATE = {'total': 0, 'complete': 0, 'lastlength': 0, 'running': [],
+              'summary': collections.defaultdict(lambda: 0)}
 
-def test_initialize_log_terse():
-    """ Test that Log initializes with verbose=False """
-    log = Log(100, False)
-    assert log
 
+ at utils.nose_generator
+def test_initialize():
+    """ Generate tests for class initializiation """
+    check_initialize = lambda c, *a: c(*a)
+
+    for name, class_ in [('QuiteLog', log.QuietLog),
+                         ('VerboseLog', log.VerboseLog)]:
+        check_initialize.description = "{} initializes".format(name)
+        yield check_initialize, class_, TEST_STATE
+
+    check_initialize.description = "LogManager initializes"
+    yield check_initialize, log.LogManager, 'quiet', 100
+
+
+def test_log_factory_returns_log():
+    """ LogManager.get() returns a BaseLog derived instance """
+    logger = log.LogManager('quiet', 100)
+    log_inst = logger.get()
+    assert isinstance(log_inst, log.BaseLog)
+
+
+ at utils.nose_generator
+def test_quietlog_log_state_update():
+    """ QuiteLog.log() updates shared state managed by LogManager """
+    logger = log.LogManager('quiet', 100)
+    log_inst = logger.get()
+    log_inst.log('pass')
+
+    # This lambda is necissary, without it description cannot be set
+    check = lambda x, y: nt.assert_equal(x, y)
+    for name, value in [('total', 100), ('summary', {'pass': 1}),
+                        ('complete', 1)]:
+        check.description = \
+            'State value {0} is incremented by QuiteLog.log()'.format(
+                name, value)
+        yield check, logger._state[name], value
+
+
+def check_for_output(func, args, file_=sys.stdout):
+    """ Test that a test produced output to either stdout or stderr
+
+    Arguments:
+    func -- a callable that will produce output
+    args -- any arguments to be passed to func as a container with a splat
+
+    KeywordArguments:
+    file -- should be either sys.stdout or sys.stderr. default: sys.stdout
+
+    """
+    func(*args)
+    # In nose sys.stdout and sys.stderr is a StringIO object, it returns a
+    # string of everything after the tell. If the tell is at the end of the
+    # file an empty
+    assert file_.read() == ''
+
+
+ at utils.nose_generator
+def test_print_when_expected():
+    """ Generator that creates tests that ensure that methods print
+
+    For most logger classes <class>.log() and <class>.summary() should print
+    something, for some classes <class>.start() should print.
+
+    This doesn't try to check what they are printing, just that they are
+    printing, since 1) the output is very implementation dependent, and 2) it
+    is subject to change.
 
-def test_initialize_log_verbose():
-    """ Test that Log initializes with verbose=True """
-    log = Log(100, True)
-    assert log
+    """
+    # a list of tuples which element 1 is the name of the class and method
+    # element 2 is a callable, and element 3 is a list of arguments to be
+    # passed to that callable. it needs to work with the splat operator
+    printing = []
 
+    # Test QuietLog
+    quiet = log.QuietLog(TEST_STATE)
+    printing.append(('QuietLog.log', quiet.log, ['pass']))
+    printing.append(('QuietLog.summary', quiet.summary, []))
 
-def test_pre_log_return():
-    """ Test that pre_log returns a number """
-    log = Log(100, False)
+    # Test VerboseLog
+    verbose = log.VerboseLog(TEST_STATE)
+    printing.append(('VerboseLog.start', verbose.start, ['a test']))
+    printing.append(('VerboseLog.log', verbose.log, ['pass']))
+    printing.append(('VerboseLog.summary', verbose.summary, []))
 
-    ret = log.pre_log()
-    nt.assert_true(isinstance(ret, (IntType, FloatType, LongType)),
-                   msg="Log.pre_log() didn't return a numeric type!")
+    for name, func, args in printing:
+        check_for_output.description = "{} produces output".format(name)
+        yield check_for_output, func, args
 
 
-def test_log_increment_complete():
-    """ Tests that Log.log() increments self.__complete """
-    log = Log(100, False)
-    ret = log.pre_log()
-    log.log('test', 'pass', ret)
-    nt.assert_equal(log._Log__complete, 1,
-                    msg="Log.log() did not properly incremented Log.__current")
+def check_no_output(func, args, file_=sys.stdout):
+    """ file should not be written into
 
+    This is the coralary of check_for_ouput, it takes the same arguments and
+    behaves the same way. See its docstring for more info
 
-def check_log_increment_summary(stat):
-    """ Test that passing a result to log works correctly """
-    log = Log(100, False)
-    ret = log.pre_log()
-    log.log('test', stat, ret)
-    print log._Log__summary
-    nt.assert_equal(log._Log__summary[stat], 1,
-                    msg="Log.__summary[{}] was not properly "
-                        "incremented".format(stat))
+    """
+    func(*args)
+    assert file_.tell() == 0
 
 
 @utils.nose_generator
-def test_log_increment_summary():
-    """ Generator that creates tests for self.__summary """
-    for stat in valid_statuses:
-        check_log_increment_summary.description = \
-            "Test that Log.log increments self._summary[{}]".format(stat)
-        yield check_log_increment_summary, stat
-
-
-def test_log_removes_complete():
-    """ Test that Log.log() removes finished tests from __running """
-    log = Log(100, False)
-    ret = log.pre_log()
-    log.log('test', 'pass', ret)
-    nt.assert_not_in(ret, log._Log__running,
-                     msg="Running tests not removed from running list")
-
-
- at nt.raises(AssertionError)
-def test_log_increment_summary_bad():
-    """ Only statuses in self.__summary_keys are valid for log """
-    log = Log(100, False)
-    ret = log.pre_log()
-    log.log('test', 'fails', ret)
+def test_noprint_when_expected():
+    """ Generate tests for methods that shouldn't print
+
+    some methods shouldn't print anything, ensure that they don't
+
+    """
+    # a list of tuples which element 1 is the name of the class and method
+    # element 2 is a callable, and element 3 is a list of arguments to be
+    # passed to that callable. it needs to work with the splat operator
+    printing = []
+
+    # Test QuietLog
+    quiet = log.QuietLog(TEST_STATE)
+    printing.append(('QuietLog.start', quiet.start, ['name']))
+
+    # Test DummyLog
+    dummy = log.DummyLog(TEST_STATE)
+    printing.append(('DummyLog.start', dummy.start, ['name']))
+    printing.append(('DummyLog.log', dummy.log, ['pass']))
+    printing.append(('DummyLog.summary', dummy.summary, []))
+
+    for name, func, args in printing:
+        check_no_output.description = "{} produces no output".format(name)
+        yield check_no_output, func, args
-- 
2.0.0



More information about the Piglit mailing list