[Piglit] [PATCH] Make valgrind testing a command line option rather than extra tests.

Kenneth Graunke kenneth at whitecape.org
Fri Mar 2 17:24:59 PST 2012


Valgrind testing is useful, but really should be done as a separate
exercise from the usual regression testing, as it takes way too long.

Rather than including it by default in all.tests and making people
exclude it with the -x valgrind option (or by using quick.tests), it
makes sense to explicitly request valgrind testing with --valgrind.

To perform valgrind testing:
$ piglit-run.py --valgrind <options> tests/quick.tests results/vg-1

The ExecTest class now handles Valgrind wrapping natively, rather than
relying on the tests/valgrind-test/valgrind-test shell script wrapper.
This provides a huge benefit: we can leverage the interpretResult()
function to make it work properly for any subclass of ExecTest.  The
old shell script only worked for PlainExecTest (piglit) and GleanTest.

Another small benefit is that you can now use --valgrind with any test
profile (such as quick.tests).  Also, you can use all.tests without
having to remember to specify "-x valgrind".

Signed-off-by: Kenneth Graunke <kenneth at whitecape.org>
---
 framework/core.py                 |    3 ++-
 framework/exectest.py             |   20 ++++++++++++++++++--
 piglit-run.py                     |    4 ++++
 tests/all.tests                   |   29 -----------------------------
 tests/quick.tests                 |    2 --
 tests/valgrind-test/valgrind-test |   37 -------------------------------------
 6 files changed, 24 insertions(+), 71 deletions(-)
 delete mode 100755 tests/valgrind-test/valgrind-test

Huzzah!  Really happy with this patch.  Less code, works with external test
suites, and more flexible.

diff --git a/framework/core.py b/framework/core.py
index 1bc108b..b85072a 100644
--- a/framework/core.py
+++ b/framework/core.py
@@ -373,6 +373,7 @@ class Environment:
 		self.filter = []
 		self.exclude_filter = []
 		self.exclude_tests = set()
+		self.valgrind = False
 
 	def run(self, command):
 		try:
@@ -432,7 +433,7 @@ class Test:
 			try:
 				status("running")
 				time_start = time.time()
-				result = self.run()
+				result = self.run(env.valgrind)
 				time_end = time.time()
 				if 'time' not in result:
 					result['time'] = time_end - time_start
diff --git a/framework/exectest.py b/framework/exectest.py
index d4a2bd8..7877adb 100644
--- a/framework/exectest.py
+++ b/framework/exectest.py
@@ -44,14 +44,18 @@ class ExecTest(Test):
 		raise NotImplementedError
 		return out
 
-	def run(self):
+	def run(self, valgrind):
 		fullenv = os.environ.copy()
 		for e in self.env:
 			fullenv[e] = str(self.env[e])
 
 		if self.command is not None:
+			command = self.command
+			if valgrind:
+				command[:0] = ['valgrind', '--quiet', '--error-exitcode=1', '--tool=memcheck']
+
 			proc = subprocess.Popen(
-				self.command,
+				command,
 				stdout=subprocess.PIPE,
 				stderr=subprocess.PIPE,
 				env=fullenv,
@@ -98,6 +102,18 @@ class ExecTest(Test):
 			elif proc.returncode != 0:
 				results['note'] = 'Returncode was %d' % (proc.returncode)
 
+			if valgrind:
+				# If the underlying test failed, simply report
+				# 'skip' for this valgrind test.
+				if results['result'] != 'pass':
+					results['result'] = 'skip'
+				elif proc.returncode == 0:
+					# Test passes and is valgrind clean.
+					results['result'] = 'pass'
+				else:
+					# Test passed but has valgrind errors.
+					results['result'] = 'fail'
+
 			env = ''
 			for key in self.env:
 				env = env + key + '="' + self.env[key] + '" '
diff --git a/piglit-run.py b/piglit-run.py
index 296d463..e24cf2a 100755
--- a/piglit-run.py
+++ b/piglit-run.py
@@ -52,6 +52,7 @@ Options:
   -n name, --name=name      Name of the testrun
   -c bool, --concurrent=bool  Enable/disable concurrent test runs. Valid
 			      option values are: 0, 1, on, off.  (default: on)
+  --valgrind                Run tests in valgrind's memcheck.
 Example:
   %(progName)s tests/all.tests results/all
          Run all tests, store the results in the directory results/all
@@ -78,6 +79,7 @@ def main():
 			 "help",
 			 "dry-run",
 			 "resume",
+			 "valgrind",
 			 "tests=",
 			 "name=",
 			 "exclude-tests=",
@@ -99,6 +101,8 @@ def main():
 			env.execute = False
 		elif name in ('-r', '--resume'):
 			OptionResume = True
+		elif name in ('--valgrind'):
+			env.valgrind = True
 		elif name in ('-t', '--tests'):
 			test_filter.append(value)
 			env.filter.append(re.compile(value))
diff --git a/tests/all.tests b/tests/all.tests
index 0539755..d0eb280 100644
--- a/tests/all.tests
+++ b/tests/all.tests
@@ -2139,32 +2139,3 @@ profile.tests['glx'] = glx
 for test_path in blacklist:
     profile.remove_test(test_path)
 
-class ValgrindExecTest(PlainExecTest):
-	def __init__(self, test):
-		Test.__init__(self, test.runConcurrent)
-		self.orig_test = test
-		self.env = {}
-		if 'PIGLIT_TEST' in test.env:
-			self.env['PIGLIT_TEST'] = test.env['PIGLIT_TEST']
-
-	def run(self):
-		orig_command = self.orig_test.command
-		if orig_command is None:
-			assert(self.orig_test.result is not None)
-			assert(self.orig_test.result['result'] == 'fail')
-			return self.orig_test.result
-		self.command = [testsDir + '/valgrind-test/valgrind-test'] + orig_command
-		return PlainExecTest.run(self)
-
-valgrind = Group()
-for groupname, group in sorted(profile.tests.iteritems()):
-	for testname, test in sorted(group.iteritems()):
-		if issubclass(test.__class__, PlainExecTest) or issubclass(test.__class__, GleanTest):
-			valgrind[groupname+"/"+testname] = ValgrindExecTest(test)
-
-		if issubclass(test.__class__, Group):
-			for subtestname, subtest in sorted(test.iteritems()):
-				if issubclass(subtest.__class__, PlainExecTest) or issubclass(subtest.__class__, GleanTest):
-					valgrind[groupname+"/"+testname+"/"+subtestname] = ValgrindExecTest(subtest)
-
-profile.tests['valgrind'] = valgrind
diff --git a/tests/quick.tests b/tests/quick.tests
index 43eae7d..2ca060d 100644
--- a/tests/quick.tests
+++ b/tests/quick.tests
@@ -8,5 +8,3 @@ import os.path
 execfile(os.path.dirname(__file__) + '/all.tests')
 
 GleanTest.globalParams += [ "--quick" ]
-
-del profile.tests['valgrind']
diff --git a/tests/valgrind-test/valgrind-test b/tests/valgrind-test/valgrind-test
deleted file mode 100755
index 885c298..0000000
--- a/tests/valgrind-test/valgrind-test
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-
-output=$(valgrind --quiet --error-exitcode=1 "$@")
-exit_code=$?
-
-# If valgrind is happy, then this test passes, so we substitute a pass
-# result in the place of any existing piglit result line.
-if [ "$exit_code" = "0" ]; then
-    echo "$output" | grep -v '^PIGLIT'
-    echo "PIGLIT: {'result': 'pass' }"
-    exit 0
-fi
-
-# A non-zero exit code could mean either a valgrind error or a failure
-# of the underlying test.
-
-# Look for a piglit result of pass in the original output to ensure
-# that this is a valgrind error, and if so, return a piglit failure.
-if echo "$output" | grep -q '^PIGLIT.*pass'; then
-    echo "$output" | grep -v 'PIGLIT'
-    echo "PIGLIT: {'result': 'fail' }"
-    exit 1
-fi
-
-# Report a valgrind error for glean tests without any failures.
-if echo "$@" | grep -q 'glean'; then
-    if echo "$output" | grep -qv 'FAIL'; then
-        echo "PIGLIT: {'result': 'fail' }"
-        exit 1
-    fi
-fi
-
-# Otherwise, the underlying test failed, so we simply report a piglit
-# skip for this valgrind test.
-echo "$output" | grep -v 'PIGLIT'
-echo "PIGLIT: {'result': 'skip' }"
-exit 0
-- 
1.7.7.6



More information about the Piglit mailing list