[Piglit] [PATCH 3/3] Optionally capture dmesg changes for each test and report them in a summary
Marek Olšák
maraeo at gmail.com
Fri Sep 20 17:31:06 PDT 2013
The Radeon driver writes GPU page faults to dmesg and we need to know which
tests caused them.
If there is any change in dmesg during a test run, the test result is changed
as follows:
* pass -> dmesg-warn
* warn -> dmesg-warn
* fail -> dmesg-fail
Dmesg is captured before and after each test and the difference between the two
is stored in the test summary.
The piglit-run.py parameter which enables this behavior is --dmesg. It's also
recommended to use -c0.
v2: - address some of other Dylan's remarks, mainly bug fixes
- fix get_dmesg_diff
---
framework/core.py | 5 +++--
framework/exectest.py | 54 ++++++++++++++++++++++++++++++++++++++++------
framework/gleantest.py | 6 +++---
framework/shader_test.py | 6 +++---
framework/summary.py | 14 +++++++-----
piglit-run.py | 6 +++++-
templates/index.css | 6 +++++-
templates/test_result.mako | 7 +++++-
tests/es3conform.tests | 6 +++---
tests/gtf.tests | 6 +++---
tests/igt.tests | 6 +++---
tests/oglconform.tests | 6 +++---
12 files changed, 93 insertions(+), 35 deletions(-)
diff --git a/framework/core.py b/framework/core.py
index 150a70c..20c0c11 100644
--- a/framework/core.py
+++ b/framework/core.py
@@ -364,13 +364,14 @@ class TestrunResult:
class Environment:
def __init__(self, concurrent=True, execute=True, include_filter=[],
- exclude_filter=[], valgrind=False):
+ exclude_filter=[], valgrind=False, dmesg=False):
self.concurrent = concurrent
self.execute = execute
self.filter = []
self.exclude_filter = []
self.exclude_tests = set()
self.valgrind = valgrind
+ self.dmesg = dmesg
"""
The filter lists that are read in should be a list of string objects,
@@ -448,7 +449,7 @@ class Test:
try:
status("running")
time_start = time.time()
- result = self.run(env.valgrind)
+ result = self.run(env)
time_end = time.time()
if 'time' not in result:
result['time'] = time_end - time_start
diff --git a/framework/exectest.py b/framework/exectest.py
index 6ee550c..ebf8d28 100644
--- a/framework/exectest.py
+++ b/framework/exectest.py
@@ -25,6 +25,7 @@ import os
import subprocess
import shlex
import types
+import re
from core import Test, testBinDir, TestResult
@@ -36,7 +37,35 @@ else:
PIGLIT_PLATFORM = ''
-# ExecTest: A shared base class for tests that simply run an executable.
+def read_dmesg():
+ proc = subprocess.Popen('dmesg', stdout=subprocess.PIPE)
+ return proc.communicate()[0].rstrip('\n')
+
+def get_dmesg_diff(old, new):
+ # Note that dmesg is a ring buffer, i.e. lines at the beginning may
+ # be removed when new lines are added.
+
+ # Get the last dmesg timestamp from the old dmesg as string.
+ last = old.split('\n')[-1]
+ ts = last[:last.find(']')+1]
+ if ts == '':
+ return ''
+
+ # Find the last occurence of the timestamp.
+ pos = new.find(ts)
+ if pos == -1:
+ return new # dmesg was completely overwritten by new messages
+
+ while pos != -1:
+ start = pos
+ pos = new.find(ts, pos+len(ts))
+
+ # Find the next line and return the rest of the string.
+ nl = new.find('\n', start+len(ts))
+ return new[nl+1:] if nl != -1 else ''
+
+
+# ExecTest: A shared base class for tests that simply runs an executable.
class ExecTest(Test):
def __init__(self, command):
Test.__init__(self)
@@ -49,11 +78,11 @@ class ExecTest(Test):
self.skip_test = self.check_for_skip_scenario(command)
- def interpretResult(self, out, returncode, results):
+ def interpretResult(self, out, returncode, results, dmesg):
raise NotImplementedError
return out
- def run(self, valgrind):
+ def run(self, env):
"""
Run a test. The return value will be a dictionary with keys
including 'result', 'info', 'returncode' and 'command'.
@@ -70,19 +99,24 @@ class ExecTest(Test):
if self.command is not None:
command = self.command
- if valgrind:
+ if env.valgrind:
command[:0] = ['valgrind', '--quiet', '--error-exitcode=1',
'--tool=memcheck']
i = 0
+ dmesg_diff = ''
while True:
if self.skip_test:
out = "PIGLIT: {'result': 'skip'}\n"
err = ""
returncode = None
else:
+ if env.dmesg:
+ old_dmesg = read_dmesg()
(out, err, returncode) = \
self.get_command_result(command, fullenv)
+ if env.dmesg:
+ dmesg_diff = get_dmesg_diff(old_dmesg, read_dmesg())
# https://bugzilla.gnome.org/show_bug.cgi?id=680214 is
# affecting many developers. If we catch it
@@ -117,7 +151,7 @@ class ExecTest(Test):
results['result'] = 'skip'
else:
results['result'] = 'fail'
- out = self.interpretResult(out, returncode, results)
+ out = self.interpretResult(out, returncode, results, dmesg_diff)
crash_codes = [
# Unix: terminated by a signal
@@ -138,7 +172,7 @@ class ExecTest(Test):
elif returncode != 0:
results['note'] = 'Returncode was {0}'.format(returncode)
- if valgrind:
+ if env.valgrind:
# If the underlying test failed, simply report
# 'skip' for this valgrind test.
if results['result'] != 'pass':
@@ -161,6 +195,7 @@ class ExecTest(Test):
err, out)
results['returncode'] = returncode
results['command'] = ' '.join(self.command)
+ results['dmesg'] = dmesg_diff
self.handleErr(results, err)
@@ -217,11 +252,16 @@ class PlainExecTest(ExecTest):
# Prepend testBinDir to the path.
self.command[0] = testBinDir + self.command[0]
- def interpretResult(self, out, returncode, results):
+ def interpretResult(self, out, returncode, results, dmesg):
outlines = out.split('\n')
outpiglit = map(lambda s: s[7:],
filter(lambda s: s.startswith('PIGLIT:'), outlines))
+ if dmesg != '':
+ outpiglit = map(lambda s: s.replace("'pass'", "'dmesg-warn'"), outpiglit)
+ outpiglit = map(lambda s: s.replace("'warn'", "'dmesg-warn'"), outpiglit)
+ outpiglit = map(lambda s: s.replace("'fail'", "'dmesg-fail'"), outpiglit)
+
if len(outpiglit) > 0:
try:
for piglit in outpiglit:
diff --git a/framework/gleantest.py b/framework/gleantest.py
index 2a1029f..e5ea7b3 100644
--- a/framework/gleantest.py
+++ b/framework/gleantest.py
@@ -41,9 +41,9 @@ class GleanTest(ExecTest):
"+"+name] + GleanTest.globalParams)
self.name = name
- def interpretResult(self, out, returncode, results):
+ def interpretResult(self, out, returncode, results, dmesg):
if out.find('FAIL') >= 0:
- results['result'] = 'fail'
+ results['result'] = 'dmesg-fail' if dmesg != '' else 'fail'
else:
- results['result'] = 'pass'
+ results['result'] = 'dmesg-warn' if dmesg != '' else 'pass'
return out
diff --git a/framework/shader_test.py b/framework/shader_test.py
index b80af24..82820fd 100755
--- a/framework/shader_test.py
+++ b/framework/shader_test.py
@@ -31,7 +31,7 @@ import re
import sys
import textwrap
-from core import testBinDir, Group, Test, TestResult
+from core import testBinDir, Group, Test, TestResult, Environment
from exectest import PlainExecTest
"""This module enables running shader tests.
@@ -243,7 +243,7 @@ class ShaderTest(PlainExecTest):
self.__command = [runner] + self.__shader_runner_args
return self.__command
- def run(self, valgrind=False):
+ def run(self, env = Environment()):
""" Parse the test file's [require] block to determine which
executable is needed to run the test. Then run the executable on the
test file."""
@@ -259,7 +259,7 @@ class ShaderTest(PlainExecTest):
# parsing the test file discovered an error.
return self.__result
- return PlainExecTest.run(self, valgrind)
+ return PlainExecTest.run(self, env)
def _usage_error():
diff --git a/framework/summary.py b/framework/summary.py
index 1cdbab7..107b1c7 100644
--- a/framework/summary.py
+++ b/framework/summary.py
@@ -323,9 +323,9 @@ class Summary:
return 1
elif status == 'pass':
return 2
- elif status == 'warn':
+ elif status in ['warn', 'dmesg-warn']:
return 3
- elif status == 'fail':
+ elif status in ['fail', 'dmesg-fail']:
return 4
elif status == 'crash':
return 5
@@ -419,9 +419,9 @@ class Summary:
"""
if status == 'pass':
return 1
- elif status == 'warn':
+ elif status in ['warn', 'dmesg-warn']:
return 2
- elif status == 'fail':
+ elif status in ['fail', 'dmesg-fail']:
return 3
elif status == 'skip':
return 4
@@ -480,7 +480,8 @@ class Summary:
Private: Find the total number of pass, fail, crash, skip, and warn in
the *last* set of results stored in self.results.
"""
- self.totals = {'pass': 0, 'fail': 0, 'crash': 0, 'skip': 0, 'warn': 0}
+ self.totals = {'pass': 0, 'fail': 0, 'crash': 0, 'skip': 0, 'warn': 0,
+ 'dmesg-warn': 0, 'dmesg-fail': 0}
for test in self.results[-1].tests.values():
self.totals[test['result']] += 1
@@ -547,6 +548,7 @@ class Summary:
info=value.get('info', 'None'),
traceback=value.get('traceback', 'None'),
command=value.get('command', 'None'),
+ dmesg=value.get('dmesg', 'None'),
css=path.relpath(resultCss, tPath),
index=path.relpath(index, tPath)))
file.close()
@@ -625,6 +627,8 @@ class Summary:
print " crash: %d" % self.totals['crash']
print " skip: %d" % self.totals['skip']
print " warn: %d" % self.totals['warn']
+ print " dmesg-warn: %d" % self.totals['dmesg-warn']
+ print " dmesg-fail: %d" % self.totals['dmesg-fail']
if self.tests['changes']:
print " changes: %d" % len(self.tests['changes'])
print " fixes: %d" % len(self.tests['fixes'])
diff --git a/piglit-run.py b/piglit-run.py
index 7945b21..169c621 100755
--- a/piglit-run.py
+++ b/piglit-run.py
@@ -72,6 +72,9 @@ def main():
parser.add_argument("--valgrind",
action="store_true",
help="Run tests in valgrind's memcheck")
+ parser.add_argument("--dmesg",
+ action="store_true",
+ help="Capture a difference in dmesg before and after each test")
parser.add_argument("testProfile",
metavar="<Path to test profile>",
help="Path to testfile to run")
@@ -108,7 +111,8 @@ def main():
exclude_filter=args.exclude_tests,
include_filter=args.include_tests,
execute=args.execute,
- valgrind=args.valgrind)
+ valgrind=args.valgrind,
+ dmesg=args.dmesg)
# Change working directory to the root of the piglit directory
piglit_dir = path.dirname(path.realpath(sys.argv[0]))
diff --git a/templates/index.css b/templates/index.css
index 875333f..577370c 100644
--- a/templates/index.css
+++ b/templates/index.css
@@ -36,7 +36,7 @@ td:first-child > div {
background-color: #c8c838
}
-td.skip, td.warn, td.fail, td.pass, td.trap, td.abort, td.crash {
+td.skip, td.warn, td.fail, td.pass, td.trap, td.abort, td.crash, td.dmesg-warn, td.dmesg-fail {
text-align: right;
}
@@ -59,9 +59,13 @@ tr:nth-child(even) td.skip { background-color: #a0a0a0; }
tr:nth-child(odd) td.warn { background-color: #ff9020; }
tr:nth-child(even) td.warn { background-color: #ef8010; }
+tr:nth-child(odd) td.dmesg-warn { background-color: #ff9020; }
+tr:nth-child(even) td.dmesg-warn { background-color: #ef8010; }
tr:nth-child(odd) td.fail { background-color: #ff2020; }
tr:nth-child(even) td.fail { background-color: #e00505; }
+tr:nth-child(odd) td.dmesg-fail { background-color: #ff2020; }
+tr:nth-child(even) td.dmesg-fail { background-color: #e00505; }
tr:nth-child(odd) td.trap { background-color: #111111; }
tr:nth-child(even) td.trap { background-color: #000000; }
diff --git a/templates/test_result.mako b/templates/test_result.mako
index 410dbb4..b23fb8e 100644
--- a/templates/test_result.mako
+++ b/templates/test_result.mako
@@ -46,7 +46,12 @@
<pre>${traceback}</pre>
</td>
</tr>
-
+ <tr>
+ <td>dmesg</td>
+ <td>
+ <pre>${dmesg}</pre>
+ </td>
+ </tr>
</table>
<p><a href="${index}">Back to summary</a></p>
</body>
diff --git a/tests/es3conform.tests b/tests/es3conform.tests
index c79b20f..51b540d 100644
--- a/tests/es3conform.tests
+++ b/tests/es3conform.tests
@@ -51,12 +51,12 @@ class GTFTest(ExecTest):
def __init__(self, testpath):
ExecTest.__init__(self, [path.join(testBinDir, 'GTF3'), '-minfmt', '-width=113', '-height=47', '-run=' + testpath])
- def interpretResult(self, out, returncode, results):
+ def interpretResult(self, out, returncode, results, dmesg):
mo = self.pass_re.search(out)
if mo is not None and int(mo.group('passed')) > 0:
- results['result'] = 'pass'
+ results['result'] = 'dmesg-warn' if dmesg != '' else 'pass'
else:
- results['result'] = 'fail'
+ results['result'] = 'dmesg-fail' if dmesg != '' else 'fail'
return out
def populateTests(runfile):
diff --git a/tests/gtf.tests b/tests/gtf.tests
index 396b01a..3a7d2d4 100644
--- a/tests/gtf.tests
+++ b/tests/gtf.tests
@@ -47,11 +47,11 @@ class GTFTest(ExecTest):
def __init__(self, testpath):
ExecTest.__init__(self, [path.join(testBinDir, 'GTF'), '-width=113', '-height=47', '-seed=2', '-minfmt', '-run=' + testpath])
- def interpretResult(self, out, returncode, results):
+ def interpretResult(self, out, returncode, results, dmesg):
if self.pass_re.search(out) is not None:
- results['result'] = 'pass'
+ results['result'] = 'dmesg-warn' if dmesg != '' else 'pass'
else:
- results['result'] = 'fail'
+ results['result'] = 'dmesg-fail' if dmesg != '' else 'fail'
return out
# Populate a group with tests in the given directory:
diff --git a/tests/igt.tests b/tests/igt.tests
index 826ba78..ec17de5 100644
--- a/tests/igt.tests
+++ b/tests/igt.tests
@@ -52,13 +52,13 @@ class IGTTest(ExecTest):
def __init__(self, binary, arguments=[]):
ExecTest.__init__(self, [path.join(igtTestRoot, binary)] + arguments)
- def interpretResult(self, out, returncode, results):
+ def interpretResult(self, out, returncode, results, dmesg):
if returncode == 0:
- results['result'] = 'pass'
+ results['result'] = 'dmesg-warn' if dmesg != '' else 'pass'
elif returncode == 77:
results['result'] = 'skip'
else:
- results['result'] = 'fail'
+ results['result'] = 'dmesg-fail' if dmesg != '' else 'fail'
return out
def listTests(listname):
diff --git a/tests/oglconform.tests b/tests/oglconform.tests
index a8dbc70..13b993a 100644
--- a/tests/oglconform.tests
+++ b/tests/oglconform.tests
@@ -48,13 +48,13 @@ class OGLCTest(ExecTest):
def __init__(self, category, subtest):
ExecTest.__init__(self, [bin_oglconform, '-minFmt', '-v', '4', '-test', category, subtest])
- def interpretResult(self, out, returncode, results):
+ def interpretResult(self, out, returncode, results, dmesg):
if self.skip_re.search(out) is not None:
results['result'] = 'skip'
elif re.search('Total Passed : 1', out) is not None:
- results['result'] = 'pass'
+ results['result'] = 'dmesg-warn' if dmesg != '' else 'pass'
else:
- results['result'] = 'fail'
+ results['result'] = 'dmesg-fail' if dmesg != '' else 'fail'
return out
# Create a new top-level 'oglconform' category
--
1.8.1.2
More information about the Piglit
mailing list