[poppler] 4 commits - regtest/backends regtest/TestRun.py
Carlos Garcia Campos
carlosgc at kemper.freedesktop.org
Fri Dec 6 08:38:13 PST 2013
regtest/TestRun.py | 73 +++++++++++++++++++++++++++++++------------
regtest/backends/__init__.py | 8 +++-
2 files changed, 59 insertions(+), 22 deletions(-)
New commits:
commit 3335d5e52fd7527bba7368ad6e87f1188808582f
Author: Carlos Garcia Campos <carlosgc at gnome.org>
Date: Fri Dec 6 17:28:13 2013 +0100
regtest: Limit the stderr files to ~1MB
Some tests send a lot of information to stderr, usually due to parsing
errors in buggy documents. More than 1MB of stderr output is diffcult to
hanlde and in most cases it's redundant with a lot of duplicated messages.
This patch reduced the size of the refs dir for the complete test suite
by 1GB.
diff --git a/regtest/backends/__init__.py b/regtest/backends/__init__.py
index 7d0ab8d..36391d1 100644
--- a/regtest/backends/__init__.py
+++ b/regtest/backends/__init__.py
@@ -206,7 +206,9 @@ class Backend:
def __redirect_stderr_to_file(self, fd, out_path):
stderr_file = None
+ max_size = 1024 * 1024
read_set = [fd]
+
while read_set:
try:
rlist, wlist, xlist = select.select(read_set, [], [])
@@ -225,7 +227,9 @@ class Backend:
if chunk:
if stderr_file is None:
stderr_file = open(out_path + '.stderr', 'wb')
- stderr_file.write(chunk)
+ if max_size > 0:
+ stderr_file.write(chunk)
+ max_size -= len(chunk)
else:
read_set.remove(fd)
commit 24107ac47625438837d7c29053bff795f986a6bb
Author: Carlos Garcia Campos <carlosgc at gnome.org>
Date: Fri Dec 6 13:24:47 2013 +0100
regtest: Save checksum results sorted in md5 files
We are using os.listdir() to get the list of test results that returns
files in arbitrary order.
diff --git a/regtest/backends/__init__.py b/regtest/backends/__init__.py
index b57d8aa..7d0ab8d 100644
--- a/regtest/backends/__init__.py
+++ b/regtest/backends/__init__.py
@@ -67,7 +67,7 @@ class Backend:
path = os.path.join(refs_path, self._name)
md5_file = open(path + '.md5', 'w')
- for entry in os.listdir(refs_path):
+ for entry in sorted(os.listdir(refs_path)):
if not self.__should_have_checksum(entry):
continue
ref_path = os.path.join(refs_path, entry)
commit f1c9993d58fb9d191a7b3e26bfcaf7b5eec5323d
Author: Carlos Garcia Campos <carlosgc at gnome.org>
Date: Fri Dec 6 13:03:24 2013 +0100
regtest: Show also the tests expected to crash/fail to run but don't fail
diff --git a/regtest/TestRun.py b/regtest/TestRun.py
index 1bff4b4..1b984c9 100644
--- a/regtest/TestRun.py
+++ b/regtest/TestRun.py
@@ -45,6 +45,8 @@ class TestRun:
self._failed = {}
self._crashed = {}
self._failed_status_error = {}
+ self._did_not_crash = {}
+ self._did_not_fail_status_error = {}
self._stderr = {}
self._skipped = []
self._new = []
@@ -103,8 +105,10 @@ class TestRun:
if test_has_md5:
if ref_is_crashed:
self.printer.print_test_result_ln(doc_path, backend.get_name(), self._n_tests, self._total_tests, "DOES NOT CRASH")
+ self._did_not_crash.setdefault(backend.get_name(), []).append(doc_path)
elif ref_is_failed:
self.printer.print_test_result_ln(doc_path, backend.get_name(), self._n_tests, self._total_tests, "DOES NOT FAIL")
+ self._did_not_fail_status_error.setdefault(backend.get_name(), []).append(doc_path)
return
test_is_crashed = backend.is_crashed(test_path)
@@ -221,7 +225,9 @@ class TestRun:
test_results = [(self._failed, "unexpected failures"),
(self._crashed, "unexpected crashes"),
(self._failed_status_error, "unexpected failures (test program returned with an exit error status)"),
- (self._stderr, "tests have stderr output")]
+ (self._stderr, "tests have stderr output"),
+ (self._did_not_crash, "expected to crash, but didn't crash"),
+ (self._did_not_fail_status_error, "expected to fail to run, but didn't fail")]
for test_dict, test_msg in test_results:
n_tests, tests = result_tests(test_dict)
commit 64d1e79c863d12b12b87ed0e3139d364f503e026
Author: Carlos Garcia Campos <carlosgc at gnome.org>
Date: Fri Dec 6 12:51:48 2013 +0100
regtest: Improve readability of test results
Show a summary of tests failed per backend with the percentages and use a
new line for every test in the result instead of using a comma separated
line.
diff --git a/regtest/TestRun.py b/regtest/TestRun.py
index 23ff31f..1bff4b4 100644
--- a/regtest/TestRun.py
+++ b/regtest/TestRun.py
@@ -42,10 +42,10 @@ class TestRun:
self._n_tests = 0
self._n_run = 0
self._n_passed = 0
- self._failed = []
- self._crashed = []
- self._failed_status_error = []
- self._stderr = []
+ self._failed = {}
+ self._crashed = {}
+ self._failed_status_error = {}
+ self._stderr = {}
self._skipped = []
self._new = []
@@ -88,7 +88,7 @@ class TestRun:
self._n_run += 1
if backend.has_stderr(test_path):
- self._stderr.append("%s (%s)" % (doc_path, backend.get_name()))
+ self._stderr.setdefault(backend.get_name(), []).append(doc_path)
if ref_has_md5 and test_has_md5:
if test_passed:
@@ -97,7 +97,7 @@ class TestRun:
self._n_passed += 1
else:
self.printer.print_test_result_ln(doc_path, backend.get_name(), self._n_tests, self._total_tests, "FAIL")
- self._failed.append("%s (%s)" % (doc_path, backend.get_name()))
+ self._failed.setdefault(backend.get_name(), []).append(doc_path)
return
if test_has_md5:
@@ -122,12 +122,12 @@ class TestRun:
if test_is_crashed:
self.printer.print_test_result_ln(doc_path, backend.get_name(), self._n_tests, self._total_tests, "CRASH")
- self._crashed.append("%s (%s)" % (doc_path, backend.get_name()))
+ self._crashed.setdefault(backend.get_name(), []).append(doc_path)
return
if test_is_failed:
self.printer.print_test_result_ln(doc_path, backend.get_name(), self._n_tests, self._total_tests, "FAIL (status error %d)" % (test_is_failed))
- self._failed_status_error("%s (%s)" % (doc_path, backend.get_name()))
+ self._failed_status_error.setdefault(backend.get_name(), []).append(doc_path)
return
def run_test(self, filename):
@@ -196,24 +196,51 @@ class TestRun:
if self._n_run:
self.printer.printout_ln("%d tests passed (%.2f%%)" % (self._n_passed, (self._n_passed * 100.) / self._n_run))
self.printer.printout_ln()
- def report_tests(test_list, test_type):
- n_tests = len(test_list)
- if not n_tests:
- return
- self.printer.printout_ln("%d tests %s (%.2f%%): %s" % (n_tests, test_type, (n_tests * 100.) / self._n_run, ", ".join(test_list)))
- self.printer.printout_ln()
- report_tests(self._failed, "failed")
- report_tests(self._crashed, "crashed")
- report_tests(self._failed_status_error, "failed to run")
- report_tests(self._stderr, "have stderr output")
+ def result_tests(test_dict):
+ if not test_dict:
+ return 0, None
+
+ n_tests = 0
+ tests = ""
+ for backend in test_dict:
+ backend_docs = test_dict[backend]
+ n_tests += len(backend_docs)
+ tests += "\n".join([" %s (%s)" % (doc_path, backend) for doc_path in backend_docs])
+ tests += "\n"
+
+ return n_tests, tests
+
+ def backends_summary(test_dict, n_tests):
+ percs = []
+ for backend in test_dict:
+ n_docs = len(test_dict[backend])
+ percs.append("%d %s (%.2f%%)" % (n_docs, backend, (n_docs * 100.) / n_tests))
+ return ", ".join(percs)
+
+ test_results = [(self._failed, "unexpected failures"),
+ (self._crashed, "unexpected crashes"),
+ (self._failed_status_error, "unexpected failures (test program returned with an exit error status)"),
+ (self._stderr, "tests have stderr output")]
+
+ for test_dict, test_msg in test_results:
+ n_tests, tests = result_tests(test_dict)
+ if n_tests == 0:
+ continue
+
+ self.printer.printout_ln("%d %s (%.2f%%) [%s]" % (n_tests, test_msg, (n_tests * 100.) / self._n_run, backends_summary(test_dict, n_tests)))
+ self.printer.printout_ln(tests)
+ self.printer.printout_ln()
else:
self.printer.printout_ln("No tests run")
if self._skipped:
- self.printer.printout_ln("%d tests skipped: %s" % (len(self._skipped), ", ".join(self._skipped)))
+ self.printer.printout_ln("%d tests skipped" % len(self._skipped))
+ self.printer.printout_ln("\n".join([" %s" % skipped for skipped in self._skipped]))
self.printer.printout_ln()
if self._new:
- self.printer.printout_ln("%d new documents: %s\nUse create-refs command to add reference results for them" % (len(self._new), ", ".join(self._new)))
+ self.printer.printout_ln("%d new documents" % len(self._new))
+ self.printer.printout_ln("\n".join([" %s" % new for new in self._new]))
+ self.printer.printout_ln("Use create-refs command to add reference results for them")
self.printer.printout_ln()
More information about the poppler
mailing list