[Intel-gfx] [PATCH igt 3/5] prime_mmap: Add basic tests to write in a bo using CPU

Tiago Vignatti tiago.vignatti at intel.com
Thu Aug 27 15:48:52 PDT 2015


This patch adds test_correct_cpu_write, which maps the texture buffer through a
prime fd and then writes directly to it using the CPU. It stresses the driver
to guarantee cache synchronization among the different domains.

This test also adds test_forked_cpu_write, which creates the GEM bo in one
process and pass the prime handle of the it to another process, which in turn
uses the handle only to map and write. Grossly speaking this test simulates
Chrome OS  architecture, where the Web content ("unpriviledged process") maps
and CPU-draws a buffer, which was previously allocated in the GPU process
("priviledged process").

This requires kernel modifications (Daniel Thompson's "drm: prime: Honour
O_RDWR during prime-handle-to-fd") and therefore prime_handle_to_fd_for_mmap is
added to fail in case these lack. Also, upcoming tests (e.g. next patch) are
going to use it as well, so make it public and available in the lib.

v2: adds prime_handle_to_fd_with_mmap for skipping test in older kernels and
test for invalid flags.

Signed-off-by: Tiago Vignatti <tiago.vignatti at intel.com>
---
 lib/ioctl_wrappers.c | 25 +++++++++++++++
 lib/ioctl_wrappers.h |  4 +++
 tests/prime_mmap.c   | 89 ++++++++++++++++++++++++++++++++++++++++++++++++----
 3 files changed, 112 insertions(+), 6 deletions(-)

diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index a97e909..e16d3f0 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -1192,6 +1192,31 @@ int prime_handle_to_fd(int fd, uint32_t handle)
 }
 
 /**
+ * prime_handle_to_fd_for_mmap:
+ * @fd: open i915 drm file descriptor
+ * @handle: file-private gem buffer object handle
+ *
+ * Same as prime_handle_to_fd above but with DRM_RDWR capabilities, which can
+ * be useful for writing into the mmap'ed dma-buf file-descriptor.
+ *
+ * Returns: The created dma-buf fd handle or -1 if the ioctl fails.
+ */
+int prime_handle_to_fd_for_mmap(int fd, uint32_t handle)
+{
+	struct drm_prime_handle args;
+
+	memset(&args, 0, sizeof(args));
+	args.handle = handle;
+	args.flags = DRM_CLOEXEC | DRM_RDWR;
+	args.fd = -1;
+
+	if (drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args) != 0)
+		return -1;
+
+	return args.fd;
+}
+
+/**
  * prime_fd_to_handle:
  * @fd: open i915 drm file descriptor
  * @dma_buf_fd: dma-buf fd handle
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index b1e99ed..ee9066c 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -147,6 +147,10 @@ void gem_require_ring(int fd, int ring_id);
 
 /* prime */
 int prime_handle_to_fd(int fd, uint32_t handle);
+#ifndef DRM_RDWR
+#define DRM_RDWR O_RDWR
+#endif
+int prime_handle_to_fd_for_mmap(int fd, uint32_t handle);
 uint32_t prime_fd_to_handle(int fd, int dma_buf_fd);
 off_t prime_get_size(int dma_buf_fd);
 
diff --git a/tests/prime_mmap.c b/tests/prime_mmap.c
index 16d73fc..c2fcf23 100644
--- a/tests/prime_mmap.c
+++ b/tests/prime_mmap.c
@@ -22,6 +22,7 @@
  *
  * Authors:
  *    Rob Bradford <rob at linux.intel.com>
+ *    Tiago Vignatti <tiago.vignatti at intel.com>
  *
  */
 
@@ -66,6 +67,12 @@ fill_bo(uint32_t handle, size_t size)
 }
 
 static void
+fill_bo_cpu(char *ptr)
+{
+	memcpy(ptr, pattern, sizeof(pattern));
+}
+
+static void
 test_correct(void)
 {
 	int dma_buf_fd;
@@ -180,6 +187,65 @@ test_forked(void)
 	gem_close(fd, handle);
 }
 
+/* test simple CPU write */
+static void
+test_correct_cpu_write(void)
+{
+	int dma_buf_fd;
+	char *ptr;
+	uint32_t handle;
+
+	handle = gem_create(fd, BO_SIZE);
+
+	dma_buf_fd = prime_handle_to_fd_for_mmap(fd, handle);
+
+	/* Skip if DRM_RDWR is not supported */
+	igt_skip_on(errno == EINVAL);
+
+	/* Check correctness of map using write protection (PROT_WRITE) */
+	ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf_fd, 0);
+	igt_assert(ptr != MAP_FAILED);
+
+	/* Fill bo using CPU */
+	fill_bo_cpu(ptr);
+
+	/* Check pattern correctness */
+	igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
+
+	munmap(ptr, BO_SIZE);
+	close(dma_buf_fd);
+	gem_close(fd, handle);
+}
+
+/* map from another process and then write using CPU */
+static void
+test_forked_cpu_write(void)
+{
+	int dma_buf_fd;
+	char *ptr;
+	uint32_t handle;
+
+	handle = gem_create(fd, BO_SIZE);
+
+	dma_buf_fd = prime_handle_to_fd_for_mmap(fd, handle);
+
+	/* Skip if DRM_RDWR is not supported */
+	igt_skip_on(errno == EINVAL);
+
+	igt_fork(childno, 1) {
+		ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE , MAP_SHARED, dma_buf_fd, 0);
+		igt_assert(ptr != MAP_FAILED);
+		fill_bo_cpu(ptr);
+
+		igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
+		munmap(ptr, BO_SIZE);
+		close(dma_buf_fd);
+	}
+	close(dma_buf_fd);
+	igt_waitchildren();
+	gem_close(fd, handle);
+}
+
 static void
 test_refcounting(void)
 {
@@ -224,15 +290,14 @@ test_dup(void)
 	close (dma_buf_fd);
 }
 
-
 /* Used for error case testing to avoid wrapper */
-static int prime_handle_to_fd_no_assert(uint32_t handle, int *fd_out)
+static int prime_handle_to_fd_no_assert(uint32_t handle, int flags, int *fd_out)
 {
 	struct drm_prime_handle args;
 	int ret;
 
 	args.handle = handle;
-	args.flags = DRM_CLOEXEC;
+	args.flags = flags;
 	args.fd = -1;
 
 	ret = drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
@@ -260,7 +325,7 @@ test_userptr(void)
 	gem_userptr(fd, (uint32_t *)ptr, BO_SIZE, 0, 0, &handle);
 
 	/* export userptr */
-	ret = prime_handle_to_fd_no_assert(handle, &dma_buf_fd);
+	ret = prime_handle_to_fd_no_assert(handle, DRM_CLOEXEC, &dma_buf_fd);
 	if (ret) {
 		igt_assert(ret == EINVAL || ret == ENODEV);
 		goto free_userptr;
@@ -281,15 +346,25 @@ free_userptr:
 static void
 test_errors(void)
 {
-	int dma_buf_fd;
+	int i, dma_buf_fd;
 	char *ptr;
 	uint32_t handle;
+	int invalid_flags[] = {DRM_CLOEXEC - 1, DRM_CLOEXEC + 1,
+	                       DRM_RDWR - 1, DRM_RDWR + 1};
+
+	/* Test for invalid flags */
+	handle = gem_create(fd, BO_SIZE);
+	for (i = 0; i < sizeof(invalid_flags) / sizeof(invalid_flags[0]); i++) {
+		prime_handle_to_fd_no_assert(handle, invalid_flags[i], &dma_buf_fd);
+		igt_assert_eq(errno, EINVAL);
+		errno = 0;
+	}
 
 	/* Close gem object before priming */
 	handle = gem_create(fd, BO_SIZE);
 	fill_bo(handle, BO_SIZE);
 	gem_close(fd, handle);
-	prime_handle_to_fd_no_assert(handle, &dma_buf_fd);
+	prime_handle_to_fd_no_assert(handle, DRM_CLOEXEC, &dma_buf_fd);
 	igt_assert(dma_buf_fd == -1 && errno == ENOENT);
 	errno = 0;
 
@@ -392,6 +467,8 @@ igt_main
 		{ "test_map_unmap", test_map_unmap },
 		{ "test_reprime", test_reprime },
 		{ "test_forked", test_forked },
+		{ "test_correct_cpu_write", test_correct_cpu_write },
+		{ "test_forked_cpu_write", test_forked_cpu_write },
 		{ "test_refcounting", test_refcounting },
 		{ "test_dup", test_dup },
 		{ "test_userptr", test_userptr },
-- 
2.1.0



More information about the Intel-gfx mailing list