[Piglit] [PATCH 4/4] log: add verbose logger

Dylan Baker baker.dylan.c at gmail.com
Mon Feb 17 12:41:56 PST 2014


On Monday, February 17, 2014 03:33:08 PM Ilia Mirkin wrote:
> On Mon, Feb 17, 2014 at 2:45 PM, Dylan Baker <baker.dylan.c at gmail.com> 
wrote:
> > This adds a verbose logger option, similar to that of the old piglit
> > logger, but with some of the flair of the new one.
> > 
> > Signed-off-by: Dylan Baker <baker.dylan.c at gmail.com>
> > --
> > 
> >  framework/core.py            | 17 ++++-----
> >  framework/log.py             | 56 +++++++++++++++++++++++-------
> >  framework/tests/log_tests.py | 83
> >  ++++++++++++++++++++++++++++++++------------ piglit-resume.py           
> >   |  8 +++++
> >  piglit-run.py                |  7 +++-
> >  5 files changed, 127 insertions(+), 44 deletions(-)
> > 
> > diff --git a/framework/core.py b/framework/core.py
> > index 13ed235..761af8e 100644
> > --- a/framework/core.py
> > +++ b/framework/core.py
> > 
> > @@ -355,7 +355,7 @@ class TestrunResult:
> >  class Environment:
> >      def __init__(self, concurrent=True, execute=True, include_filter=[],
> > 
> > -                 exclude_filter=[], valgrind=False, dmesg=False):
> > 
> > +                 exclude_filter=[], valgrind=False, dmesg=False, 
verbose=False):
> >          self.concurrent = concurrent
> >          self.execute = execute
> >          self.filter = []
> > 
> > @@ -363,6 +363,7 @@ class Environment:
> >          self.exclude_tests = set()
> >          self.valgrind = valgrind
> >          self.dmesg = dmesg
> > 
> > +        self.verbose = verbose
> > 
> >          """
> >          The filter lists that are read in should be a list of string
> >          objects,
> > 
> > @@ -434,11 +435,10 @@ class Test(object):
> >              ``spec/glsl-1.30/preprocessor/compiler/keywords/void.frag``.
> >          
> >          '''
> > 
> > -        log_current = log.pre_log()
> > +        log_current = log.pre_log(path if env.concurrent else None)
> 
> env.verbose, no?
> 

d'oh! I really wanted to use concurrent for some reason.

> >          # Run the test
> >          
> >          if env.execute:
> >              try:
> > -                log.log()
> > 
> >                  time_start = time.time()
> >                  dmesg.update_dmesg()
> >                  self._test_hook_execute_run()
> > 
> > @@ -462,7 +462,9 @@ class Test(object):
> >                  result['traceback'] = \
> >                  
> >                      "".join(traceback.format_tb(sys.exc_info()[2]))
> > 
> > -            test_result = result['result']
> > +            log.log(path, result['result'])
> > +            log.post_log(log_current, result['result'])
> > +
> > 
> >              if 'subtest' in result and len(result['subtest']) > 1:
> >                  for test in result['subtest']:
> >                      result['result'] = result['subtest'][test]
> > 
> > @@ -470,9 +472,8 @@ class Test(object):
> >              else:
> >                  json_writer.write_dict_item(path, result)
> >          
> >          else:
> > -            test_result = 'dry-run'
> > -            log.log()
> > -        log.post_log(log_current, test_result)
> > +            log.log(path, 'dry-run')
> > +            log.post_log(log_current, 'dry-run')
> > 
> >  class Group(dict):
> > @@ -557,7 +558,7 @@ class TestProfile(object):
> >          '''
> >          
> >          self.prepare_test_list(env)
> > 
> > -        log = Log(len(self.test_list))
> > +        log = Log(len(self.test_list), env.verbose)
> > 
> >          def test(pair):
> >              """ Function to call test.execute from .map
> > 
> > diff --git a/framework/log.py b/framework/log.py
> > index 676d122..a521e79 100644
> > --- a/framework/log.py
> > +++ b/framework/log.py
> > 
> > @@ -32,7 +32,7 @@ class Log(object):
> >      total -- The total number of tests to run.
> >      
> >      """
> > 
> > -    def __init__(self, total):
> > 
> > +    def __init__(self, total, verbose):
> >          self.__total = total
> >          self.__complete = 0
> >          self.__running = []
> > 
> > @@ -42,17 +42,39 @@ class Log(object):
> >                                     'dmesg-warn', 'dmesg-fail',
> >                                     'dry-run'])
> >          
> >          self.__summary = collections.defaultdict(lambda: 0)
> > 
> > +        if verbose:
> > +            self.__output = ("{result} :: {name}\n"
> > +                             "[{percent}] {summary} {running}\r")
> > +        else:
> > +            self.__output = "[{percent}] {summary} {running}\r"
> 
> A somewhat less repetitive alternative, but I guess a matter of style,
> so feel free to ignore:
> 
> self.__output = "[{percent}] {summary} {running}\r"
> if verbose:
>   self.__output = "{result} :: {name}\n" + self.__output
> 

I waffled back and forth on that, Since you mentioned it I'll make the change

> > +
> > +
> > 
> >      def _summary(self):
> > +        """ return a summary of the statuses """
> > 
> >          return ", ".join("{0}: {1}".format(k, self.__summary[k])
> >          
> >                           for k in sorted(self.__summary))
> >      
> >      def _running(self):
> > +        """ return running tests """
> > 
> >          return "Running Test(s): {}".format(
> >          
> >              " ".join([str(x).zfill(self.__pad) for x in self.__running]))
> >      
> >      def _percent(self):
> > -        return "[{0}/{1}]".format(str(self.__complete).zfill(self.__pad),
> > -                                  str(self.__total).zfill(self.__pad))
> > +        """ return the percentage of tess completed """
> > +        return "{0}/{1}".format(str(self.__complete).zfill(self.__pad),
> > +                                str(self.__total).zfill(self.__pad))
> > +
> > +    def __print(self, name, result):
> > +        """ Do the actual printing """
> > +        sys.stdout.write(self.__output.format(**{'percent':
> > self._percent(), +                                                
> > 'running': self._running(), +                                            
> >     'summary': self._summary(), +                                        
> >         'name': name,
> > +                                                 'result': result}))
> > +
> > +        # Need to flush explicitly, otherwise it all gets buffered
> > without a +        # newline.
> > +        sys.stdout.flush()
> > 
> >      @synchronized_self
> > 
> >      def post_log(self, value, result):
> > @@ -74,23 +96,31 @@ class Log(object):
> >          self.__summary[result] += 1
> >      
> >      @synchronized_self
> > 
> > -    def log(self):
> > 
> > +    def log(self, name, result):
> >          """ Print to the screen
> >          
> >          Works by moving the cursor back to the front of the line and
> >          printing
> >          over it.
> > 
> > -
> > -        """
> > -        sys.stdout.write("{0} {1} {2}\r".format(
> > -            self._percent(), self._summary(), self._running()))
> > 
> > -        # Need to flush explicitly, otherwise it all gets buffered
> > without a -        # newline.
> > -        sys.stdout.flush()
> > +        """
> > +        assert result in self.__summary_keys
> > +        self.__print(name, result)
> > 
> >      @synchronized_self
> > 
> > -    def pre_log(self):
> > -        """ Returns a new number to know what processes are running """
> > +    def pre_log(self, running=None):
> > +        """ Hook to run before log()
> > +
> > +        Returns a new number to know what processes are running, if
> > running is +        set it will print a running message for the test
> > +
> > +        Keyword Arguments:
> > +        running -- the name of a test to print is running. If Falsy then
> > +                   nothing will be printed. Default: None
> > +
> > +        """
> > +        if running:
> > +            self.__print(running, 'running')
> > +
> > 
> >          x = self.__generator.next()
> >          self.__running.append(x)
> >          return x
> > 
> > diff --git a/framework/tests/log_tests.py b/framework/tests/log_tests.py
> > index 8b817f2..6969fb3 100644
> > --- a/framework/tests/log_tests.py
> > +++ b/framework/tests/log_tests.py
> > @@ -29,15 +29,21 @@ from framework.log import Log
> > 
> >  valid_statuses = ('pass', 'fail', 'crash', 'warn', 'dmesg-warn',
> >  
> >                    'dmesg-fail', 'skip', 'dry-run')
> > 
> > -def test_initialize_log():
> > -    """ Test that Log initializes with """
> > -    log = Log(100)
> > +def test_initialize_log_terse():
> > +    """ Test that Log initializes with verbose=False """
> > +    log = Log(100, False)
> > +    assert log
> > +
> > +
> > +def test_initialize_log_verbose():
> > +    """ Test that Log initializes with verbose=True """
> > +    log = Log(100, True)
> > 
> >      assert log
> >  
> >  def test_pre_log_return():
> >      """ Test that pre_log returns a number """
> > 
> > -    log = Log(100)
> > +    log = Log(100, False)
> > 
> >      ret = log.pre_log()
> >      nt.assert_true(isinstance(ret, (IntType, FloatType, LongType)),
> > 
> > @@ -46,7 +52,7 @@ def test_pre_log_return():
> >  def test_post_log_increment_complete():
> >      """ Tests that Log.post_log() increments self.__complete """
> > 
> > -    log = Log(100)
> > +    log = Log(100, False)
> > 
> >      ret = log.pre_log()
> >      log.post_log(ret, 'pass')
> >      nt.assert_equal(log._Log__complete, 1,
> > 
> > @@ -56,7 +62,7 @@ def test_post_log_increment_complete():
> >  def check_post_log_increment_summary(stat):
> >      """ Test that passing a result to post_log works correctly """
> > 
> > -    log = Log(100)
> > +    log = Log(100, False)
> > 
> >      ret = log.pre_log()
> >      log.post_log(ret, stat)
> >      print log._Log__summary
> > 
> > @@ -77,7 +83,7 @@ def test_post_log_increment_summary():
> >  def test_post_log_removes_complete():
> >      """ Test that Log.post_log() removes finished tests from __running
> >      """
> > 
> > -    log = Log(100)
> > +    log = Log(100, False)
> > 
> >      ret = log.pre_log()
> >      log.post_log(ret, 'pass')
> >      nt.assert_not_in(ret, log._Log__running,
> > 
> > @@ -85,15 +91,15 @@ def test_post_log_removes_complete():
> >  @nt.raises(AssertionError)
> > 
> > -def check_post_log_incrment_summary_bad():
> > -    log = Log(100)
> > +def test_post_log_incrment_summary_bad():
> > +    log = Log(100, False)
> > 
> >      ret = log.pre_log()
> >      log.post_log(ret, 'fails')
> > 
> > -def check_output(stat):
> > -    """ Test that dry output returns the expected values """
> > -    log = Log(100)
> > +def check_output_terse(stat):
> > +    """ Test that terse output returns the expected values """
> > +    log = Log(100, False)
> > 
> >      for x in stat:
> >          ret = log.pre_log()
> > 
> > @@ -101,7 +107,7 @@ def check_output(stat):
> >      # Print to stdout, and then capture it, the capture absolutely needs
> >      to
> >      # imediatly after log.log()
> > 
> > -    log.log()
> > +    log.log('test', 'pass')
> > 
> >      output = sys.stdout.getvalue().strip()  # sys.stdout is a StringIO in
> >      nose
> >      
> >      # Create a count of the statuses in alphabetical order. What we need
> >      to end
> > 
> > @@ -119,24 +125,53 @@ def check_output(stat):
> >                      msg="Did not return execpted output: " + expected)
> > 
> > +def check_output_verbose(stat):
> > +    """ Test that verbose output returns the expected values """
> > +    log = Log(100, True)
> > +
> > +    for x in stat:
> > +        ret = log.pre_log()
> > +        log.post_log(ret, x)
> > +
> > +    # Print to stdout, and then capture it, the capture absolutely needs
> > to +    # imediatly after log.log()
> > +    log.log('test', 'pass')
> > +    output = sys.stdout.getvalue().strip()  # sys.stdout is a StringIO in
> > nose +
> > +    # Create a count of the statuses in alphabetical order. What we need
> > to end +    # up with is something like: 'crash': 2, 'skip': 1
> > +    s = []
> > +    for x in sorted(set(stat)):
> > +        s.append("{0}: {1}".format(x, stat.count(x)))
> > +
> > +    expected = ('pass :: test\n'
> > +                '[{0}/100] {1} Running Test(s):'.format(
> > +                    str(len(stat)).zfill(3),
> > +                    ", ".join(s)))
> > +
> > +    nt.assert_equal(output, expected,
> > +                    msg="Did not return execpted output: " + expected)
> > +
> > +
> > 
> >  def gen_test_output():
> >      """ Generate a set of tests for checking __summary output
> >      
> >      This generates severl hundered tests
> >      
> >      """
> > 
> > -    yieldable = check_output
> > +    terse = check_output_terse
> > +    verbose = check_output_verbose
> > 
> >      # test dry run seperately, we don't want to test it mixed in with
> >      other
> >      # statuses, since it shouldn't be mixed
> > 
> > -    yieldable.description = "Test output for status combinations:
> > dry-run"
> > -    yield yieldable, ['dry-run']
> > +    terse.description = "Test output for status combinations: dry-run"
> > +    yield terse, ['dry-run']
> > 
> > -    yieldable.description = "Test output for status combinations: dry-run
> > dry-run" -    yield yieldable, ['dry-run', 'dry-run']
> > +    terse.description = "Test output for status combinations: dry-run
> > dry-run" +    yield terse, ['dry-run', 'dry-run']
> > 
> > -    yieldable.description = "Test output for status combinations: dry-run
> > dry-run dry-run" -    yield yieldable, ['dry-run', 'dry-run', 'dry-run']
> > +    terse.description = "Test output for status combinations: dry-run
> > dry-run dry-run" +    yield terse, ['dry-run', 'dry-run', 'dry-run']
> > 
> >      # All combinations except dry-run up to 3 in length. We could
> >      generate
> >      # more, but it quickly becomes overwhelming, and the usefulness is
> > 
> > @@ -144,6 +179,10 @@ def gen_test_output():
> >      for r in xrange(1, 4):
> >          for stat in itertools.product(valid_statuses[:-1],
> >          
> >                                        repeat=r):
> > -            yieldable.description = "Test output for status: " + \
> > +            terse.description = "Test terse output for status: " + \
> > +                    " ".join([str(x) for x in stat])
> > +            yield terse, stat
> > +
> > +            verbose.description = "Test verbose output for status: " + \
> > 
> >                      " ".join([str(x) for x in stat])
> > 
> > -            yield yieldable, stat
> > +            yield verbose, stat
> > diff --git a/piglit-resume.py b/piglit-resume.py
> > index 7b82420..5d41b9a 100755
> > --- a/piglit-resume.py
> > +++ b/piglit-resume.py
> > @@ -31,6 +31,10 @@ import framework.core as core
> > 
> >  def main():
> >      parser = argparse.ArgumentParser()
> > 
> > +    parser.add_argument("-v", "--verbose",
> > +                        action="store_true",
> > +                        help="Produce a line of output for each test
> > before " +                             "and after it runs")
> > 
> >      parser.add_argument("results_path",
> >      
> >                          type=path.realpath,
> >                          metavar="<Results Path>",
> > 
> > @@ -44,6 +48,10 @@ def main():
> >                             execute=results.options['execute'],
> >                             valgrind=results.options['valgrind'],
> >                             dmesg=results.options['dmesg'])
> > 
> > +
> > +    # Verbosity isn't a useful environment setting, and the user should
> > be able +    # to set it on their own.
> > +    env.verbose = results.options['verbose']
> > 
> >      # Change working directory to the piglit directory
> >      os.chdir(path.dirname(path.realpath(sys.argv[0])))
> > 
> > diff --git a/piglit-run.py b/piglit-run.py
> > index ba5c925..5fc9baf 100755
> > --- a/piglit-run.py
> > +++ b/piglit-run.py
> > 
> > @@ -82,6 +82,10 @@ def main():
> >                          action="store_true",
> >                          help="Capture a difference in dmesg before and "
> >                          
> >                               "after each test. Implies
> >                               -1/--no-concurrency")
> > 
> > +    parser.add_argument("-v", "--verbose",
> > +                        action="store_true",
> > +                        help="Produce a line of output for each test
> > before " +                             "and after it runs")
> > 
> >      parser.add_argument("test_profile",
> >      
> >                          metavar="<Path to one or more test profile(s)>",
> >                          nargs='+',
> > 
> > @@ -115,7 +119,8 @@ def main():
> >                             include_filter=args.include_tests,
> >                             execute=args.execute,
> >                             valgrind=args.valgrind,
> > 
> > -                           dmesg=args.dmesg)
> > +                           dmesg=args.dmesg,
> > +                           verbose=args.verbose)
> > 
> >      # Change working directory to the root of the piglit directory
> >      piglit_dir = path.dirname(path.realpath(sys.argv[0]))
> > 
> > --
> > 1.8.5.5
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 490 bytes
Desc: This is a digitally signed message part.
URL: <http://lists.freedesktop.org/archives/piglit/attachments/20140217/3a292d93/attachment.pgp>


More information about the Piglit mailing list