[Mesa-dev] [PATCH v2] intel/tools: new intel_sanitize_gpu tool
Rogovin, Kevin
kevin.rogovin at intel.com
Thu Feb 8 08:18:34 UTC 2018
HI,
Review comments below.
-----Original Message-----
From: Phillips, Scott D
Sent: Thursday, February 8, 2018 2:19 AM
To: mesa-dev at lists.freedesktop.org; Rogovin, Kevin <kevin.rogovin at intel.com>
Subject: [PATCH v2] intel/tools: new intel_sanitize_gpu tool
From: Kevin Rogovin <kevin.rogovin at intel.com>
Adds a new debug tool to pad each GEM BO allocated with (weak) pseudo-random noise values which are then checked after each batchbuffer dispatch to the kernel. This can be quite valuable to find diffucult to track down heisenberg style bugs.
[scott.d.phillips at intel.com: split to separate tool]
v2: (by Scott D Phillips)
- track gem handles per fd (Kevin)
- remove handles on GEM_CLOSE (Kevin)
- ignore prime handles
- meson & shell script
---
src/intel/tools/intel_sanitize_gpu.c | 399 ++++++++++++++++++++++++++++++++++
src/intel/tools/intel_sanitize_gpu.in | 4 +
src/intel/tools/meson.build | 25 +++
3 files changed, 428 insertions(+)
create mode 100644 src/intel/tools/intel_sanitize_gpu.c
create mode 100755 src/intel/tools/intel_sanitize_gpu.in
diff --git a/src/intel/tools/intel_sanitize_gpu.c b/src/intel/tools/intel_sanitize_gpu.c
new file mode 100644
index 00000000000..84e9196da31
--- /dev/null
+++ b/src/intel/tools/intel_sanitize_gpu.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright © 2015-2018 Intel Corporation
+ *
<snip>
+static int
+prime_fd(int fd, struct drm_prime_handle *handle) {
+ int ret = libc_ioctl(fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, handle);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Can't pad prime buffers, mark it as size=UINT64_MAX and we'll
+ * skip it in verification.
+ */
+ _mesa_hash_table_insert(bo_size_table(fd), (void*)(uintptr_t)handle->handle,
+ (void*)(uintptr_t)UINT64_MAX);
+
+ return 0;
+}
Rather than tracking such GEM handles, just ignore them entirely since one cannot pad
them at all. This is the same story as for USER_PTR GEM BO's as well.
+
+__attribute__ ((visibility ("default"))) int open(const char *path, int
+flags, ...) {
+ va_list args;
+ mode_t mode;
+
+ va_start(args, flags);
+ mode = va_arg(args, int);
+ va_end(args);
+
+ int fd = libc_open(path, flags, mode);
+
+ if (fd >= 0 && strcmp(path, "/dev/dri/renderD128") == 0)
+ add_drm_fd(fd);
+
+ return fd;
+}
+
+__attribute__ ((visibility ("default"), alias ("open"))) int
+open64(const char *path, int flags, ...);
+
+__attribute__ ((visibility ("default"))) int close(int fd) {
+ if (is_drm_fd(fd))
+ del_drm_fd(fd);
+
+ return libc_close(fd);
+}
+
+__attribute__ ((visibility ("default"))) int fcntl(int fd, int cmd,
+...) {
+ va_list args;
+ int param;
+
+ va_start(args, cmd);
+ param = va_arg(args, int);
+ va_end(args);
+
+ int res = libc_fcntl(fd, cmd, param);
+
+ if (is_drm_fd(fd) && cmd == F_DUPFD_CLOEXEC)
+ dup_drm_fd(fd, res);
+
+ return res;
+}
+
+__attribute__ ((visibility ("default"))) int ioctl(int fd, unsigned
+long request, ...) {
+ va_list args;
+ void *argp;
+ struct stat buf;
+
+ va_start(args, request);
+ argp = va_arg(args, void *);
+ va_end(args);
+
+ if (_IOC_TYPE(request) == DRM_IOCTL_BASE &&
+ !is_drm_fd(fd) && fstat(fd, &buf) == 0 &&
+ (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) {
+ intel_loge("missed drm fd %d", fd);
+ add_drm_fd(fd);
+ }
+
+ if (is_drm_fd(fd)) {
+ switch (request) {
+ case DRM_IOCTL_GEM_CLOSE:
+ return gem_close(fd, (struct drm_gem_close*)argp);
+
+ case DRM_IOCTL_PRIME_FD_TO_HANDLE:
+ return prime_fd(fd, (struct drm_prime_handle*)argp);
+
+ case DRM_IOCTL_I915_GEM_CREATE:
+ return create_with_padding(fd, (struct
+ drm_i915_gem_create*)argp);
+
+ case DRM_IOCTL_I915_GEM_EXECBUFFER2:
+ case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR:
+ return exec_and_check_padding(fd, request,
+ (struct
+ drm_i915_gem_execbuffer2*)argp);
+
+ default:
+ break;
+ }
+ }
+ return libc_ioctl(fd, request, argp); }
+
One needs to mutex lock the internal structures of the created .so to correctly handle simultaneous ioctl's coming from multi-threaded applications. I suggest putting that common mutex lock in the functions open(), close() and ioctl().
More information about the mesa-dev
mailing list