On 17 February 2012 01:38, Kenneth Graunke <span dir="ltr"><<a href="mailto:kenneth@whitecape.org">kenneth@whitecape.org</a>></span> wrote:<br><div class="gmail_quote"><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
GPUs like to hang, especially when barraged with lots of mean Piglit<br>
tests. Usually this results in the poor developer having to figure out<br>
what test hung, blacklist it via -x, and start the whole test run over.<br>
This can waste a huge amount of time, especially when many tests hang.<br>
<br>
This patch adds the ability to resume a Piglit run where you left off.<br>
<br>
The workflow is:<br>
$ piglit-run.py -t foo tests/quick.tests results/foobar-1<br>
<interrupt the test run somehow><br>
$ piglit-run.py -r -x bad-test results/foobar-1<br>
<br>
To accomplish this, piglit-run.py now stores the test profile<br>
(quick.tests) and -t/-x options in the JSON results file so it can tell<br>
what you were originally running. When run with the --resume option, it<br>
re-reads the results file to obtain this information (repairing broken<br>
JSON if necessary), rewrites the existing results, and runs any<br>
remaining tests.<br>
<br>
Signed-off-by: Kenneth Graunke <<a href="mailto:kenneth@whitecape.org">kenneth@whitecape.org</a>><br>
Reviewed-by: Paul Berry <<a href="mailto:stereotype441@gmail.com">stereotype441@gmail.com</a>> [v1]<br>
---<br>
framework/core.py | 3 ++<br>
piglit-run.py | 59 ++++++++++++++++++++++++++++++++++++++++++++++++----<br>
2 files changed, 57 insertions(+), 5 deletions(-)<br>
<br>
diff --git a/framework/core.py b/framework/core.py<br>
index 2af8ed3..db20d65 100644<br>
--- a/framework/core.py<br>
+++ b/framework/core.py<br>
@@ -261,6 +261,7 @@ class GroupResult(dict):<br>
class TestrunResult:<br>
def __init__(self):<br>
self.serialized_keys = [<br>
+ 'options',<br>
'name',<br>
'tests',<br>
'glxinfo',<br>
@@ -371,6 +372,7 @@ class Environment:<br>
self.execute = True<br>
self.filter = []<br>
self.exclude_filter = []<br>
+ self.exclude_tests = set()<br>
<br>
def run(self, command):<br>
try:<br>
@@ -519,6 +521,7 @@ class TestProfile:<br>
<br>
def test_matches((path, test)):<br>
return (matches_any_regexp(path, env.filter) and<br>
+ not path in env.exclude_tests and<br>
not matches_any_regexp(path, env.exclude_filter))<br>
<br>
# Filter out unwanted tests<br>
diff --git a/piglit-run.py b/piglit-run.py<br>
index 2867a73..296d463 100755<br>
--- a/piglit-run.py<br>
+++ b/piglit-run.py<br>
@@ -28,6 +28,7 @@ import re<br>
import sys, os<br>
import time<br>
import traceback<br>
+import json<br>
<br>
sys.path.append(path.dirname(path.realpath(sys.argv[0])))<br>
import framework.core as core<br>
@@ -39,6 +40,7 @@ from framework.threads import synchronized_self<br>
def usage():<br>
USAGE = """\<br>
Usage: %(progName)s [options] [profile.tests] [results]<br>
+ %(progName)s [options] -r [results]<br>
<br>
Options:<br>
-h, --help Show this message<br>
@@ -60,6 +62,10 @@ Example:<br>
%(progName)s -t ^glean/ -t tex tests/all.tests results/all<br>
Run all tests that are in the 'glean' group or whose path contains<br>
the substring 'tex'<br>
+<br>
+ %(progName)s -r -x bad-test results/all<br>
+ Resume an interrupted test run whose results are stored in the<br>
+ directory results/all, skipping bad-test.<br>
"""<br>
print USAGE % {'progName': sys.argv[0]}<br>
sys.exit(1)<br>
@@ -71,25 +77,33 @@ def main():<br>
option_list = [<br>
"help",<br>
"dry-run",<br>
+ "resume",<br>
"tests=",<br>
"name=",<br>
"exclude-tests=",<br>
"concurrent=",<br>
]<br>
- options, args = getopt(sys.argv[1:], "hdt:n:x:c:", option_list)<br>
+ options, args = getopt(sys.argv[1:], "hdrt:n:x:c:", option_list)<br>
except GetoptError:<br>
usage()<br>
<br>
OptionName = ''<br>
+ OptionResume = False<br>
+ test_filter = []<br>
+ exclude_filter = []<br>
<br>
for name, value in options:<br>
if name in ('-h', '--help'):<br>
usage()<br>
elif name in ('-d', '--dry-run'):<br>
env.execute = False<br>
+ elif name in ('-r', '--resume'):<br>
+ OptionResume = True<br>
elif name in ('-t', '--tests'):<br>
+ test_filter.append(value)<br>
env.filter.append(re.compile(value))<br>
elif name in ('-x', '--exclude-tests'):<br>
+ exclude_filter.append(value)<br>
env.exclude_filter.append(re.compile(value))<br>
elif name in ('-n', '--name'):<br>
OptionName = value<br>
@@ -101,11 +115,29 @@ def main():<br>
else:<br>
usage()<br>
<br>
- if len(args) != 2:<br>
- usage()<br>
+ if OptionResume:<br>
+ if test_filter or OptionName:<br>
+ print "-r is not compatible with -t or -n."<br>
+ usage()<br>
+ if len(args) != 1:<br>
+ usage()<br>
+ resultsDir = args[0]<br>
+<br>
+ # Load settings from the old results JSON<br>
+ old_results = core.loadTestResults(resultsDir)<br>
+ profileFilename = old_results.options['profile']<br>
+ for value in old_results.options['filter']:<br>
+ test_filter.append(value)<br>
+ env.filter.append(re.compile(value))<br>
+ for value in old_results.options['exclude_filter']:<br>
+ exclude_filter.append(value)<br>
+ env.exclude_filter.append(re.compile(value))<br>
+ else:<br>
+ if len(args) != 2:<br>
+ usage()<br>
<br>
- profileFilename = args[0]<br>
- resultsDir = path.realpath(args[1])<br>
+ profileFilename = args[0]<br>
+ resultsDir = path.realpath(args[1])<br>
<br>
# Change to the piglit's path<br>
piglit_dir = path.dirname(path.realpath(sys.argv[0]))<br>
@@ -127,6 +159,16 @@ def main():<br>
json_writer = core.JSONWriter(result_file)<br>
json_writer.open_dict()<br>
<br>
+ # Write out command line options for use in resuming.<br>
+ json_writer.write_dict_key('options')<br>
+ json_writer.open_dict()<br>
+ json_writer.write_dict_item('profile', profileFilename)<br>
+ json_writer.write_dict_key('filter')<br>
+ result_file.write(json.dumps(test_filter))<br>
+ json_writer.write_dict_key('exclude_filter')<br>
+ result_file.write(json.dumps(exclude_filter))<br>
+ json_writer.close_dict()<br>
+<br>
json_writer.write_dict_item('name', <a href="http://results.name" target="_blank">results.name</a>)<br>
for (key, value) in env.collectData().items():<br>
json_writer.write_dict_item(key, value)<br>
@@ -135,6 +177,13 @@ def main():<br>
<br>
json_writer.write_dict_key('tests')<br>
json_writer.open_dict()<br>
+ # If resuming an interrupted test run, re-write all of the existing<br>
+ # results since we clobbered the results file. Also, exclude them<br>
+ # from being run again.<br>
+ if OptionResume:<br>
+ for (key, value) in old_results.tests.items():<br>
+ json_writer.write_dict_item(key, value)<br>
+ env.exclude_tests.add(key)<br>
<br>
time_start = time.time()<br>
profile.run(env, json_writer)<br>
<span class="HOEnZb"><font color="#888888">--<br>
1.7.7.6<br>
<br>
_______________________________________________<br>
Piglit mailing list<br>
<a href="mailto:Piglit@lists.freedesktop.org">Piglit@lists.freedesktop.org</a><br>
<a href="http://lists.freedesktop.org/mailman/listinfo/piglit" target="_blank">http://lists.freedesktop.org/mailman/listinfo/piglit</a><br>
</font></span></blockquote></div><br>Reviewed-by: Paul Berry <<a href="mailto:stereotype441@gmail.com">stereotype441@gmail.com</a>><br>