[Piglit] [PATCH 3/3] Optionally capture dmesg changes for each test and report them in a summary

Aaron Watry awatry at gmail.com
Mon Sep 16 13:40:42 PDT 2013


Stupid question, but I'm stuck in an airport and can't research the answer easily right now...  Will this throw ?false? positives as a result of DPM performance level changes on kernel 3.11+ (assuming dpm is enabled)?  Is there any way to filter these messages out besides disabling DPM?

--Aaron

On Sep 16, 2013, at 1:08 PM, "Marek Olšák" <maraeo at gmail.com> wrote:

> The Radeon driver writes GPU page faults to dmesg and we need to know which
> test caused them.
> 
> If there is any change in dmesg during a test run, the test result is changed
> as follows:
> * pass -> dmesg-warn
> * warn -> dmesg-warn
> * fail -> dmesg-fail
> Dmesg is captured before and after the test and the difference between the two
> is stored in the test summary.
> 
> The piglit-run.py parameter which enables this behavior is --dmesg. It's also
> recommended to use -c0.
> ---
> framework/core.py          |  5 ++--
> framework/exectest.py      | 64 +++++++++++++++++++++++++++++++++++++++++-----
> framework/shader_test.py   |  4 +--
> framework/summary.py       | 26 ++++++++++++++-----
> piglit-run.py              |  6 ++++-
> templates/index.css        |  6 ++++-
> templates/test_result.mako |  7 ++++-
> 7 files changed, 98 insertions(+), 20 deletions(-)
> 
> diff --git a/framework/core.py b/framework/core.py
> index 150a70c..9efae63 100644
> --- a/framework/core.py
> +++ b/framework/core.py
> @@ -364,13 +364,14 @@ class TestrunResult:
> 
> class Environment:
>     def __init__(self, concurrent=True, execute=True, include_filter=[],
> -                 exclude_filter=[], valgrind=False):
> +                 exclude_filter=[], valgrind=False, dmesg=False):
>         self.concurrent = concurrent
>         self.execute = execute
>         self.filter = []
>         self.exclude_filter = []
>         self.exclude_tests = set()
>         self.valgrind = valgrind
> +        self.dmesg = dmesg
> 
>         """
>         The filter lists that are read in should be a list of string objects,
> @@ -448,7 +449,7 @@ class Test:
>             try:
>                 status("running")
>                 time_start = time.time()
> -                result = self.run(env.valgrind)
> +                result = self.run(env.valgrind, env.dmesg)
>                 time_end = time.time()
>                 if 'time' not in result:
>                     result['time'] = time_end - time_start
> diff --git a/framework/exectest.py b/framework/exectest.py
> index 6ee550c..2e9f0a5 100644
> --- a/framework/exectest.py
> +++ b/framework/exectest.py
> @@ -35,8 +35,48 @@ if 'PIGLIT_PLATFORM' in os.environ:
> else:
>     PIGLIT_PLATFORM = ''
> 
> -
> -# ExecTest: A shared base class for tests that simply run an executable.
> +def read_dmesg():
> +    try:
> +        proc = subprocess.Popen('dmesg',
> +                                stdout=subprocess.PIPE,
> +                                universal_newlines=True)
> +        out, err = proc.communicate()
> +    except OSError as e:
> +        return None
> +    return out
> +
> +def get_last_dmesg_timestamp(text):
> +    for i in reversed(range(0, len(text)-1)):
> +        if text[i] == '\n':
> +            line = text[i+1:]
> +            for right_bracket, char in enumerate(line):
> +                if char == ']':
> +                    return line[:right_bracket+1]
> +            return None
> +    return None
> +
> +def get_dmesg_diff(old, new):
> +    ts = get_last_dmesg_timestamp(old)
> +    if ts == None:
> +        return ''
> +
> +    start = 0
> +    end = len(new)-1
> +
> +    while True:
> +        pos = new.find(ts, start, end)
> +        if pos == -1:
> +            break
> +        start = pos+len(ts)
> +
> +    pos = new.find('\n', start, end)
> +    if (pos == -1):
> +        return ''
> +    start = pos+1
> +    return new[start:end]
> +
> +
> +# ExecTest: A shared base class for tests that simply runs an executable.
> class ExecTest(Test):
>     def __init__(self, command):
>         Test.__init__(self)
> @@ -49,11 +89,11 @@ class ExecTest(Test):
> 
>         self.skip_test = self.check_for_skip_scenario(command)
> 
> -    def interpretResult(self, out, returncode, results):
> +    def interpretResult(self, out, returncode, results, dmesg):
>         raise NotImplementedError
>         return out
> 
> -    def run(self, valgrind):
> +    def run(self, valgrind, dmesg):
>         """
>         Run a test.  The return value will be a dictionary with keys
>         including 'result', 'info', 'returncode' and 'command'.
> @@ -69,6 +109,7 @@ class ExecTest(Test):
> 
>         if self.command is not None:
>             command = self.command
> +            dmesg_diff = ''
> 
>             if valgrind:
>                 command[:0] = ['valgrind', '--quiet', '--error-exitcode=1',
> @@ -81,8 +122,13 @@ class ExecTest(Test):
>                     err = ""
>                     returncode = None
>                 else:
> +                    old_dmesg = ''
> +                    if dmesg:
> +                        old_dmesg = read_dmesg()
>                     (out, err, returncode) = \
>                         self.get_command_result(command, fullenv)
> +                    if dmesg:
> +                        dmesg_diff = get_dmesg_diff(old_dmesg, read_dmesg())
> 
>                 # https://bugzilla.gnome.org/show_bug.cgi?id=680214 is
>                 # affecting many developers.  If we catch it
> @@ -117,7 +163,7 @@ class ExecTest(Test):
>                 results['result'] = 'skip'
>             else:
>                 results['result'] = 'fail'
> -                out = self.interpretResult(out, returncode, results)
> +                out = self.interpretResult(out, returncode, results, dmesg_diff)
> 
>             crash_codes = [
>                 # Unix: terminated by a signal
> @@ -161,6 +207,7 @@ class ExecTest(Test):
>                                                              err, out)
>             results['returncode'] = returncode
>             results['command'] = ' '.join(self.command)
> +            results['dmesg'] = dmesg_diff
> 
>             self.handleErr(results, err)
> 
> @@ -217,11 +264,16 @@ class PlainExecTest(ExecTest):
>         # Prepend testBinDir to the path.
>         self.command[0] = testBinDir + self.command[0]
> 
> -    def interpretResult(self, out, returncode, results):
> +    def interpretResult(self, out, returncode, results, dmesg):
>         outlines = out.split('\n')
>         outpiglit = map(lambda s: s[7:],
>                         filter(lambda s: s.startswith('PIGLIT:'), outlines))
> 
> +        if dmesg != '':
> +            outpiglit = map(lambda s: s.replace("'pass'", "'dmesg-warn'"), outpiglit)
> +            outpiglit = map(lambda s: s.replace("'warn'", "'dmesg-warn'"), outpiglit)
> +            outpiglit = map(lambda s: s.replace("'fail'", "'dmesg-fail'"), outpiglit)
> +
>         if len(outpiglit) > 0:
>             try:
>                 for piglit in outpiglit:
> diff --git a/framework/shader_test.py b/framework/shader_test.py
> index b80af24..c91c22e 100755
> --- a/framework/shader_test.py
> +++ b/framework/shader_test.py
> @@ -243,7 +243,7 @@ class ShaderTest(PlainExecTest):
>         self.__command = [runner] + self.__shader_runner_args
>         return self.__command
> 
> -    def run(self, valgrind=False):
> +    def run(self, valgrind=False, dmesg=False):
>         """ Parse the test file's [require] block to determine which
>         executable is needed to run the test. Then run the executable on the
>         test file."""
> @@ -259,7 +259,7 @@ class ShaderTest(PlainExecTest):
>                 # parsing the test file discovered an error.
>                 return self.__result
> 
> -            return PlainExecTest.run(self, valgrind)
> +            return PlainExecTest.run(self, valgrind, dmesg)
> 
> 
> def _usage_error():
> diff --git a/framework/summary.py b/framework/summary.py
> index 1cdbab7..5ce327c 100644
> --- a/framework/summary.py
> +++ b/framework/summary.py
> @@ -325,10 +325,14 @@ class Summary:
>                     return 2
>                 elif status == 'warn':
>                     return 3
> -                elif status == 'fail':
> +                elif status == 'dmesg-warn':
>                     return 4
> -                elif status == 'crash':
> +                elif status == 'fail':
>                     return 5
> +                elif status == 'dmesg-fail':
> +                    return 6
> +                elif status == 'crash':
> +                    return 7
> 
>             openGroup('fake')
>             openGroup('all')
> @@ -421,12 +425,16 @@ class Summary:
>                 return 1
>             elif status == 'warn':
>                 return 2
> -            elif status == 'fail':
> +            elif status == 'dmesg-warn':
>                 return 3
> -            elif status == 'skip':
> +            elif status == 'fail':
>                 return 4
> -            elif status == 'crash':
> +            elif status == 'dmesg-fail':
>                 return 5
> +            elif status == 'skip':
> +                return 6
> +            elif status == 'crash':
> +                return 7
>             elif status == 'special':
>                 return 0
> 
> @@ -450,7 +458,7 @@ class Summary:
>                     # If the result contains a value other than 1 (pass) or 4
>                     # (skip) it is a problem. Skips are not problems becasuse
>                     # they have Their own page.
> -                    if [i for e in [2, 3, 5] for i in status if e is i]:
> +                    if [i for e in [2, 3, 4, 5, 7] for i in status if e is i]:
>                         self.tests['problems'].append(test)
> 
>                 if 'skipped' in lists:
> @@ -480,7 +488,8 @@ class Summary:
>         Private: Find the total number of pass, fail, crash, skip, and warn in
>         the *last* set of results stored in self.results.
>         """
> -        self.totals = {'pass': 0, 'fail': 0, 'crash': 0, 'skip': 0, 'warn': 0}
> +        self.totals = {'pass': 0, 'fail': 0, 'crash': 0, 'skip': 0, 'warn': 0,
> +                       'dmesg-warn': 0, 'dmesg-fail': 0}
> 
>         for test in self.results[-1].tests.values():
>             self.totals[test['result']] += 1
> @@ -547,6 +556,7 @@ class Summary:
>                         info=value.get('info', 'None'),
>                         traceback=value.get('traceback', 'None'),
>                         command=value.get('command', 'None'),
> +                        dmesg=value.get('dmesg', 'None'),
>                         css=path.relpath(resultCss, tPath),
>                         index=path.relpath(index, tPath)))
>                     file.close()
> @@ -625,6 +635,8 @@ class Summary:
>         print "      crash: %d" % self.totals['crash']
>         print "       skip: %d" % self.totals['skip']
>         print "       warn: %d" % self.totals['warn']
> +        print "       dmesg-warn: %d" % self.totals['dmesg-warn']
> +        print "       dmesg-fail: %d" % self.totals['dmesg-fail']
>         if self.tests['changes']:
>             print "    changes: %d" % len(self.tests['changes'])
>             print "      fixes: %d" % len(self.tests['fixes'])
> diff --git a/piglit-run.py b/piglit-run.py
> index 7945b21..169c621 100755
> --- a/piglit-run.py
> +++ b/piglit-run.py
> @@ -72,6 +72,9 @@ def main():
>     parser.add_argument("--valgrind",
>                         action="store_true",
>                         help="Run tests in valgrind's memcheck")
> +    parser.add_argument("--dmesg",
> +                        action="store_true",
> +                        help="Capture a difference in dmesg before and after each test")
>     parser.add_argument("testProfile",
>                         metavar="<Path to test profile>",
>                         help="Path to testfile to run")
> @@ -108,7 +111,8 @@ def main():
>                            exclude_filter=args.exclude_tests,
>                            include_filter=args.include_tests,
>                            execute=args.execute,
> -                           valgrind=args.valgrind)
> +                           valgrind=args.valgrind,
> +                           dmesg=args.dmesg)
> 
>     # Change working directory to the root of the piglit directory
>     piglit_dir = path.dirname(path.realpath(sys.argv[0]))
> diff --git a/templates/index.css b/templates/index.css
> index 875333f..577370c 100644
> --- a/templates/index.css
> +++ b/templates/index.css
> @@ -36,7 +36,7 @@ td:first-child > div {
>    background-color: #c8c838
> }
> 
> -td.skip, td.warn, td.fail, td.pass, td.trap, td.abort, td.crash {
> +td.skip, td.warn, td.fail, td.pass, td.trap, td.abort, td.crash, td.dmesg-warn, td.dmesg-fail {
>    text-align: right;
> }
> 
> @@ -59,9 +59,13 @@ tr:nth-child(even) td.skip  { background-color: #a0a0a0; }
> 
> tr:nth-child(odd)  td.warn  { background-color: #ff9020; }
> tr:nth-child(even) td.warn  { background-color: #ef8010; }
> +tr:nth-child(odd)  td.dmesg-warn  { background-color: #ff9020; }
> +tr:nth-child(even) td.dmesg-warn  { background-color: #ef8010; }
> 
> tr:nth-child(odd)  td.fail  { background-color: #ff2020; }
> tr:nth-child(even) td.fail  { background-color: #e00505; }
> +tr:nth-child(odd)  td.dmesg-fail  { background-color: #ff2020; }
> +tr:nth-child(even) td.dmesg-fail  { background-color: #e00505; }
> 
> tr:nth-child(odd)  td.trap  { background-color: #111111; }
> tr:nth-child(even) td.trap  { background-color: #000000; }
> diff --git a/templates/test_result.mako b/templates/test_result.mako
> index 410dbb4..b23fb8e 100644
> --- a/templates/test_result.mako
> +++ b/templates/test_result.mako
> @@ -46,7 +46,12 @@
>           <pre>${traceback}</pre>
>         </td>
>       </tr>
> -
> +      <tr>
> +        <td>dmesg</td>
> +        <td>
> +          <pre>${dmesg}</pre>
> +        </td>
> +      </tr>
>     </table>
>     <p><a href="${index}">Back to summary</a></p>
>   </body>
> -- 
> 1.8.1.2
> 
> _______________________________________________
> Piglit mailing list
> Piglit at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/piglit


More information about the Piglit mailing list