[Piglit] [PATCH] add new piglit-summary.py script for printing summaries of results file(s)
Dylan Baker
baker.dylan.c at gmail.com
Fri Apr 19 20:54:33 PDT 2013
I have some concerns with this patch:
1) It has two imports that are not being used: cgi, os (not os.path)
2) getopt: I've been trying to get rid of getopt and replace it with
argparse (I have a patch set ready to go to replace the rest of optparse
and getopt with argparse)
3) using the name piglit-summary.py: If the project ever wanted to have a
unified summary program that could output different summary formats (which
seems like a good idea as the project gets more and more summary output
types) the logical name would be piglit-summary.py. This creates friction
since that kind of feature would be changing the functionality of an
existing tool in the project.
-Dylan
On Thu, Apr 18, 2013 at 6:21 PM, Brian Paul <brianp at vmware.com> wrote:
> If only one result file is specified, just print all the tests
> followed by the outcome. For example:
>
> fbo/FBO blit from missing attachment: pass
> fbo/FBO blit to missing attachment: fail
> fbo/fbo-1d: pass
> fbo/fbo-3d: crash
> [...]
>
> If multiple result files are specified, we'll print pass/fail/etc
> for each file. Example:
>
> fbo/FBO blit from missing attachment: pass pass
> fbo/FBO blit to missing attachment: fail pass
> [...]
>
> If -s (--summary) is specified, only print a summary of the number of
> passes, fails, crashes, etc.
>
> if -d (-diff) is specified with multipe result files, only print the
> tests which had different outcomes. Good for spotting regressions.
> ---
> piglit-summary.py | 158
> +++++++++++++++++++++++++++++++++++++++++++++++++++++
> 1 files changed, 158 insertions(+), 0 deletions(-)
> create mode 100755 piglit-summary.py
>
> diff --git a/piglit-summary.py b/piglit-summary.py
> new file mode 100755
> index 0000000..ae1e5ca
> --- /dev/null
> +++ b/piglit-summary.py
> @@ -0,0 +1,158 @@
> +#!/usr/bin/env python
> +#
> +# Permission is hereby granted, free of charge, to any person
> +# obtaining a copy of this software and associated documentation
> +# files (the "Software"), to deal in the Software without
> +# restriction, including without limitation the rights to use,
> +# copy, modify, merge, publish, distribute, sublicense, and/or
> +# sell copies of the Software, and to permit persons to whom the
> +# Software is furnished to do so, subject to the following
> +# conditions:
> +#
> +# This permission notice shall be included in all copies or
> +# substantial portions of the Software.
> +#
> +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
> +# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
> +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
> +# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR(S) BE
> +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
> +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
> +# OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
> +# DEALINGS IN THE SOFTWARE.
> +
> +# Print a very simple summary of piglit results file(s).
> +# When multiple result files are specified, compare the results
> +# of each test run to look for differences/regressions.
> +#
> +# Brian Paul
> +# April 2013
> +
> +
> +from getopt import getopt, GetoptError
> +import cgi
> +import os, os.path
> +import sys
> +import string
> +
> +sys.path.append(os.path.dirname(os.path.realpath(sys.argv[0])))
> +import framework.core as core
> +import framework.summary
> +
> +
>
> +#############################################################################
> +##### Main program
>
> +#############################################################################
> +def usage():
> + USAGE = """\
> +Usage: %(progName)s [options] resultsfile [...]
> +
> +Print path/name of each test and the result.
> +When multiple files are specified, count the number of differences in
> results.
> +Tests are sorted by name.
> +
> +Options:
> + -h, --help Show this message
> + -s, --summary Only display pass/fail summary
> + -d, --diff Only display the differences between multiple
> result files
> + -l, --list=listfile Use test results from a list file
> +"""
> + print USAGE % {'progName': sys.argv[0]}
> + sys.exit(1)
> +
> +
> +def parse_listfile(filename):
> + file = open(filename, "r")
> + code = "".join([s for s in file])
> + file.close()
> + return eval(code)
> +
> +def loadresult(descr):
> + result = core.loadTestResults(descr[0])
> + if len(descr) > 1:
> + result.__dict__.update(descr[1])
> + return result
> +
> +def main():
> + try:
> + options, args = getopt(sys.argv[1:], "hsdl:", [ "help",
> "summary", "diff", "list" ])
> + except GetoptError:
> + usage()
> +
> + OptionList = []
> + CountsOnly = False
> + DiffOnly = False
> + for name, value in options:
> + if name == "-h" or name == "--help":
> + usage()
> + elif name == "-s" or name == "--summary":
> + CountsOnly = True
> + elif name == "-d" or name == "--diff":
> + DiffOnly = True
> + elif name == "-l" or name == "--list":
> + OptionList += parse_listfile(value)
> +
> + OptionList += [[name] for name in args[0:]]
> +
> + if len(args) < 1 or len(OptionList) == 0:
> + usage()
> +
> + # make list of results
> + results = []
> + for result_dir in OptionList:
> + results.append(loadresult(result_dir))
> +
> + summary = framework.summary.Summary(results)
> +
> + # possible test outcomes
> + possible_results = [ "pass", "fail", "crash", "skip", "warn" ]
> + if len(OptionList) > 1:
> + possible_results.append("changes")
> +
> + # init the summary counters
> + counts = {}
> + for result in possible_results:
> + counts[result] = 0
> +
> + # get all results
> + all = summary.allTests()
> +
> + # sort the results list by path
> + all = sorted(all, key=lambda test: test.path)
> +
> + # loop over the tests
> + for test in all:
> + results = []
> + anyChange = False
> + # loop over the results for multiple runs
> + for j in range(len(summary.testruns)):
> + outcome = test.results[j]['result'] # 'pass',
> 'fail', etc.
> + # check for different results between multiple runs
> + if len(results) >= 1 and not outcome in results:
> + # something changed
> + counts["changes"] += 1
> + anyChange = True
> + results.append(outcome)
> +
> + # if all test runs had the same outcome:
> + if not anyChange:
> + counts[outcome] += 1
> +
> + # print the individual test result line
> + if DiffOnly:
> + if anyChange:
> + print "%s: %s" % (test.path,
> string.join(results," "))
> + elif not CountsOnly:
> + print "%s: %s" % (test.path, string.join(results,"
> "))
> +
> + # print the summary info
> + print "summary:"
> + total = 0
> + for result in possible_results:
> + print " %7s: %5d" % (result, counts[result])
> + total += counts[result]
> + print " total: %5d" % total
> +
> +
> +if __name__ == "__main__":
> + main()
> --
> 1.7.3.4
>
> _______________________________________________
> Piglit mailing list
> Piglit at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/piglit
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.freedesktop.org/archives/piglit/attachments/20130419/0f9abc8c/attachment.html>
More information about the Piglit
mailing list