[Piglit] [PATCH 4/4] Add the ability to resume an interrupted test run where it left off.

Paul Berry stereotype441 at gmail.com
Fri Feb 24 14:31:19 PST 2012


On 17 February 2012 01:38, Kenneth Graunke <kenneth at whitecape.org> wrote:

> GPUs like to hang, especially when barraged with lots of mean Piglit
> tests.  Usually this results in the poor developer having to figure out
> what test hung, blacklist it via -x, and start the whole test run over.
> This can waste a huge amount of time, especially when many tests hang.
>
> This patch adds the ability to resume a Piglit run where you left off.
>
> The workflow is:
> $ piglit-run.py -t foo tests/quick.tests results/foobar-1
> <interrupt the test run somehow>
> $ piglit-run.py -r -x bad-test results/foobar-1
>
> To accomplish this, piglit-run.py now stores the test profile
> (quick.tests) and -t/-x options in the JSON results file so it can tell
> what you were originally running.  When run with the --resume option, it
> re-reads the results file to obtain this information (repairing broken
> JSON if necessary), rewrites the existing results, and runs any
> remaining tests.
>
> Signed-off-by: Kenneth Graunke <kenneth at whitecape.org>
> Reviewed-by: Paul Berry <stereotype441 at gmail.com> [v1]
> ---
>  framework/core.py |    3 ++
>  piglit-run.py     |   59
> ++++++++++++++++++++++++++++++++++++++++++++++++----
>  2 files changed, 57 insertions(+), 5 deletions(-)
>
> diff --git a/framework/core.py b/framework/core.py
> index 2af8ed3..db20d65 100644
> --- a/framework/core.py
> +++ b/framework/core.py
> @@ -261,6 +261,7 @@ class GroupResult(dict):
>  class TestrunResult:
>        def __init__(self):
>                self.serialized_keys = [
> +                       'options',
>                        'name',
>                        'tests',
>                        'glxinfo',
> @@ -371,6 +372,7 @@ class Environment:
>                self.execute = True
>                self.filter = []
>                self.exclude_filter = []
> +               self.exclude_tests = set()
>
>        def run(self, command):
>                try:
> @@ -519,6 +521,7 @@ class TestProfile:
>
>                def test_matches((path, test)):
>                        return (matches_any_regexp(path, env.filter) and
> +                               not path in env.exclude_tests and
>                                not matches_any_regexp(path,
> env.exclude_filter))
>
>                # Filter out unwanted tests
> diff --git a/piglit-run.py b/piglit-run.py
> index 2867a73..296d463 100755
> --- a/piglit-run.py
> +++ b/piglit-run.py
> @@ -28,6 +28,7 @@ import re
>  import sys, os
>  import time
>  import traceback
> +import json
>
>  sys.path.append(path.dirname(path.realpath(sys.argv[0])))
>  import framework.core as core
> @@ -39,6 +40,7 @@ from framework.threads import synchronized_self
>  def usage():
>        USAGE = """\
>  Usage: %(progName)s [options] [profile.tests] [results]
> +       %(progName)s [options] -r [results]
>
>  Options:
>   -h, --help                Show this message
> @@ -60,6 +62,10 @@ Example:
>   %(progName)s -t ^glean/ -t tex tests/all.tests results/all
>          Run all tests that are in the 'glean' group or whose path contains
>                 the substring 'tex'
> +
> +  %(progName)s -r -x bad-test results/all
> +         Resume an interrupted test run whose results are stored in the
> +         directory results/all, skipping bad-test.
>  """
>        print USAGE % {'progName': sys.argv[0]}
>        sys.exit(1)
> @@ -71,25 +77,33 @@ def main():
>                option_list = [
>                         "help",
>                         "dry-run",
> +                        "resume",
>                         "tests=",
>                         "name=",
>                         "exclude-tests=",
>                         "concurrent=",
>                         ]
> -               options, args = getopt(sys.argv[1:], "hdt:n:x:c:",
> option_list)
> +               options, args = getopt(sys.argv[1:], "hdrt:n:x:c:",
> option_list)
>        except GetoptError:
>                usage()
>
>        OptionName = ''
> +       OptionResume = False
> +       test_filter = []
> +       exclude_filter = []
>
>        for name, value in options:
>                if name in ('-h', '--help'):
>                        usage()
>                elif name in ('-d', '--dry-run'):
>                        env.execute = False
> +               elif name in ('-r', '--resume'):
> +                       OptionResume = True
>                elif name in ('-t', '--tests'):
> +                       test_filter.append(value)
>                        env.filter.append(re.compile(value))
>                elif name in ('-x', '--exclude-tests'):
> +                       exclude_filter.append(value)
>                        env.exclude_filter.append(re.compile(value))
>                elif name in ('-n', '--name'):
>                        OptionName = value
> @@ -101,11 +115,29 @@ def main():
>                        else:
>                                usage()
>
> -       if len(args) != 2:
> -               usage()
> +       if OptionResume:
> +               if test_filter or OptionName:
> +                       print "-r is not compatible with -t or -n."
> +                       usage()
> +               if len(args) != 1:
> +                       usage()
> +               resultsDir = args[0]
> +
> +               # Load settings from the old results JSON
> +               old_results = core.loadTestResults(resultsDir)
> +               profileFilename = old_results.options['profile']
> +               for value in old_results.options['filter']:
> +                       test_filter.append(value)
> +                       env.filter.append(re.compile(value))
> +               for value in old_results.options['exclude_filter']:
> +                       exclude_filter.append(value)
> +                       env.exclude_filter.append(re.compile(value))
> +       else:
> +               if len(args) != 2:
> +                       usage()
>
> -       profileFilename = args[0]
> -       resultsDir = path.realpath(args[1])
> +               profileFilename = args[0]
> +               resultsDir = path.realpath(args[1])
>
>        # Change to the piglit's path
>        piglit_dir = path.dirname(path.realpath(sys.argv[0]))
> @@ -127,6 +159,16 @@ def main():
>        json_writer = core.JSONWriter(result_file)
>        json_writer.open_dict()
>
> +       # Write out command line options for use in resuming.
> +       json_writer.write_dict_key('options')
> +       json_writer.open_dict()
> +       json_writer.write_dict_item('profile', profileFilename)
> +       json_writer.write_dict_key('filter')
> +       result_file.write(json.dumps(test_filter))
> +       json_writer.write_dict_key('exclude_filter')
> +       result_file.write(json.dumps(exclude_filter))
> +       json_writer.close_dict()
> +
>        json_writer.write_dict_item('name', results.name)
>        for (key, value) in env.collectData().items():
>                json_writer.write_dict_item(key, value)
> @@ -135,6 +177,13 @@ def main():
>
>        json_writer.write_dict_key('tests')
>        json_writer.open_dict()
> +       # If resuming an interrupted test run, re-write all of the existing
> +       # results since we clobbered the results file.  Also, exclude them
> +       # from being run again.
> +       if OptionResume:
> +               for (key, value) in old_results.tests.items():
> +                       json_writer.write_dict_item(key, value)
> +                       env.exclude_tests.add(key)
>
>        time_start = time.time()
>        profile.run(env, json_writer)
> --
> 1.7.7.6
>
> _______________________________________________
> Piglit mailing list
> Piglit at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/piglit
>

Reviewed-by: Paul Berry <stereotype441 at gmail.com>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.freedesktop.org/archives/piglit/attachments/20120224/b4337c62/attachment-0001.htm>


More information about the Piglit mailing list