[Piglit] [PATCH 2/4] framework/log.py: Make self.__summary keys explicitly

Dylan Baker baker.dylan.c at gmail.com
Mon Feb 17 12:39:39 PST 2014


On Monday, February 17, 2014 03:25:22 PM Ilia Mirkin wrote:
> On Mon, Feb 17, 2014 at 2:45 PM, Dylan Baker <baker.dylan.c at gmail.com> 
wrote:
> > It would be an unwelcome surprise if some test returns 'fails' or 'pas',
> > so rather than allowing such a thing to happen, assert that the result
> > is actually viable.
> > 
> > Signed-off-by: Dylan Baker <baker.dylan.c at gmail.com>
> > ---
> > 
> >  framework/core.py            |  2 +-
> >  framework/log.py             | 10 ++++--
> >  framework/tests/log_tests.py | 74
> >  +++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 77
> >  insertions(+), 9 deletions(-)
> > 
> > diff --git a/framework/core.py b/framework/core.py
> > index fc59e3c..f6ae80f 100644
> > --- a/framework/core.py
> > +++ b/framework/core.py
> > 
> > @@ -435,7 +435,6 @@ class Test(object):
> >          '''
> >          
> >          log_current = log.get_current()
> > 
> > -        test_result = None
> > 
> >          # Run the test
> >          
> >          if env.execute:
> >              try:
> > @@ -471,6 +470,7 @@ class Test(object):
> >              else:
> >                  json_writer.write_dict_item(path, result)
> >          
> >          else:
> > +            test_result = 'dry-run'
> > 
> >              log.log()
> >          
> >          log.mark_complete(log_current, test_result)
> > 
> > diff --git a/framework/log.py b/framework/log.py
> > index 25ecdf5..7cdc940 100644
> > --- a/framework/log.py
> > +++ b/framework/log.py
> > @@ -21,6 +21,7 @@
> > 
> >  #
> >  
> >  import sys
> > 
> > +import collections
> > 
> >  from .threads import synchronized_self
> > 
> > @@ -37,7 +38,9 @@ class Log(object):
> >          self.__running = []
> >          self.__generator = (x for x in xrange(self.__total))
> >          self.__pad = len(str(self.__total))
> > 
> > -        self.__summary = {}
> > +        self.__summary_keys = set(['pass', 'fail', 'warn', 'crash',
> > 'skip', +                                   'dmesg-warn', 'dmesg-fail',
> > 'dry-run']) +        self.__summary = collections.defaultdict(lambda: 0)
> > 
> >      def _summary(self):
> >          return ", ".join("{0}: {1}".format(k, self.__summary[k])
> > 
> > @@ -67,8 +70,8 @@ class Log(object):
> >          # increment the number of completed tests
> >          self.__complete += 1
> > 
> > -        if result:
> > -            self.__summary[result] = self.__summary.get(result, 0) + 1
> > +        assert result in self.__summary_keys
> > +        self.__summary[result] += 1
> > 
> >      @synchronized_self
> > 
> >      def log(self):
> > @@ -80,6 +83,7 @@ class Log(object):
> >          """
> >          sys.stdout.write("{0} {1} {2}\r".format(
> >          
> >              self._percent(), self._summary(), self._running()))
> > 
> > +
> > 
> >          # Need to flush explicitly, otherwise it all gets buffered
> >          without a
> >          # newline.
> >          sys.stdout.flush()
> > 
> > diff --git a/framework/tests/log_tests.py b/framework/tests/log_tests.py
> > index 5f0640f..09d99e4 100644
> > --- a/framework/tests/log_tests.py
> > +++ b/framework/tests/log_tests.py
> > @@ -20,10 +20,14 @@
> > 
> >  """ Module provides tests for log.py module """
> > 
> > +import sys
> > +import itertools
> > 
> >  from types import *  # This is a special * safe module
> >  import nose.tools as nt
> >  from framework.log import Log
> > 
> > +valid_statuses = ('pass', 'fail', 'crash', 'warn', 'dmesg-warn',
> > +                  'dmesg-fail', 'skip', 'dry-run')
> > 
> >  def test_initialize_log():
> >      """ Test that Log initializes with """
> > 
> > @@ -63,11 +67,6 @@ def check_mark_complete_increment_summary(stat):
> >  def test_mark_complete_increment_summary():
> >      """ Generator that creates tests for self.__summary """
> > 
> > -
> > -
> > -    valid_statuses = ('pass', 'fail', 'crash', 'warn', 'dmesg-warn',
> > -                      'dmesg-fail', 'skip')
> > -
> > 
> >      yieldable = check_mark_complete_increment_summary
> > 
> >      for stat in valid_statuses:
> > @@ -83,3 +82,68 @@ def test_mark_complete_removes_complete():
> >      log.mark_complete(ret, 'pass')
> >      nt.assert_not_in(ret, log._Log__running,
> >      
> >                       msg="Running tests not removed from running list")
> > 
> > +
> > +
> > + at nt.raises(AssertionError)
> > +def check_mark_complete_incrment_summary_bad():
> > +    log = Log(100)
> > +    ret = log.get_current()
> > +    log.mark_complete(ret, 'fails')
> > +
> > +
> > +def check_output(stat):
> > +    """ Test that dry output returns the expected values """
> > +    log = Log(100)
> > +
> > +    for x in stat:
> > +        ret = log.get_current()
> > +        log.mark_complete(ret, x)
> > +
> > +    # Print to stdout, and then capture it, the capture absolutely needs
> > to +    # imediatly after log.log()
> > +    log.log()
> > +    output = sys.stdout.getvalue().strip()  # sys.stdout is a StringIO in
> > nose +
> > +    # Create a count of the statuses in alphabetical order. What we need
> > to end +    # up with is something like: 'crash': 2, 'skip': 1
> > +    s = []
> > +    for x in sorted(set(stat)):
> > +        s.append("{0}: {1}".format(x, stat.count(x)))
> > +
> > +    expected = '[{0}/100] {1} Running Test(s):'.format(
> > +            str(len(stat)).zfill(3),
> > +            ", ".join(s))
> 
> This is a dangerous sort of test. Any change in the output will make
> it fail, and I can pretty much guarantee that people won't run the
> tests. I wonder if it'd be better to test for the bits you expect,
> e.g. n/y and that each status: val is in there?

Hmm, that is a good point. I'll have to think about that.

> 
> > +
> > +    print expected
> > +    nt.assert_equal(output, expected,
> > +                    msg="Did not return execpted output: " + expected)
> 
> expected. (dyslexics, untie!)

Ah yes, I really need to just leave spell checking on in vim...

> 
> > +
> > +
> > +def gen_test_output():
> > +    """ Generate a set of tests for checking __summary output
> > +
> > +    This generates severl hundered tests
> > +
> > +    """
> > +    yieldable = check_output
> > +
> > +    # test dry run seperately, we don't want to test it mixed in with
> > other +    # statuses, since it shouldn't be mixed
> > +    yieldable.description = "Test output for status combinations:
> > dry-run"
> > +    yield yieldable, ['dry-run']
> > +
> > +    yieldable.description = "Test output for status combinations: dry-run
> > dry-run" +    yield yieldable, ['dry-run', 'dry-run']
> > +
> > +    yieldable.description = "Test output for status combinations: dry-run
> > dry-run dry-run" +    yield yieldable, ['dry-run', 'dry-run', 'dry-run']
> > +
> > +    # All combinations except dry-run up to 3 in length. We could
> > generate
> > +    # more, but it quickly becomes overwhelming, and the usefulness is
> > +    # uncertain
> > +    for r in xrange(1, 4):
> > +        for stat in itertools.product(valid_statuses[:-1],
> > +                                      repeat=r):
> > +            yieldable.description = "Test output for status: " + \
> > +                    " ".join([str(x) for x in stat])
> 
> The [ ] are unnecessary. Generator expressions have been around since
> 2.4, iirc. (With the additional benefit that x won't leak out into the
> function scope... not that one would really care.)

I'll make that change.
I didn't think that list comprehensions leaked, since they're supposed to be 
usable anywhere map() and filter() is used without any additional side 
effects. Maybe I'm wrong though.

> 
> > +            yield yieldable, stat
> > --
> > 1.8.5.5
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 490 bytes
Desc: This is a digitally signed message part.
URL: <http://lists.freedesktop.org/archives/piglit/attachments/20140217/1aa57a5d/attachment-0001.pgp>


More information about the Piglit mailing list