[Intel-gfx] [PATCH i-g-t] tests/gem_buffered_svm_test: New tests for buffered SVM feature
Vinay Belgaumkar
vinay.belgaumkar at intel.com
Tue Nov 10 08:02:52 PST 2015
v1: These tests exercise the userptr ioctl to create shared buffers
between CPU and GPU. They contain error and normal usage scenarios.
They also contain a couple of stress tests which copy buffers between
CPU and GPU. These tests rely on the softpin patch in order to pin buffers
to a certain VA.
Caveat: These tests were designed to run on 64-bit system. Future work
includes adding logic to ensure these tests can run on 32-bit systems with
PPGTT support. Some tests are currently disabled for 32-bit systems for that
reason.
v2: Added cc and signed-off-by fields
Testcase: igt/gem_buffered_svm_test
Cc: Michel Thierry <michel.thierry at intel.com>
Signed-off-by: Vinay Belgaumkar <vinay.belgaumkar at intel.com>
---
tests/Makefile.sources | 1 +
tests/gem_buffered_svm_test.c | 1490 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 1491 insertions(+)
create mode 100644 tests/gem_buffered_svm_test.c
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 8fb2de8..2ce4216 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -11,6 +11,7 @@ TESTS_progs_M = \
drv_hangman \
gem_bad_reloc \
gem_basic \
+ gem_buffered_svm_test \
gem_caching \
gem_close_race \
gem_concurrent_blit \
diff --git a/tests/gem_buffered_svm_test.c b/tests/gem_buffered_svm_test.c
new file mode 100644
index 0000000..44d342a
--- /dev/null
+++ b/tests/gem_buffered_svm_test.c
@@ -0,0 +1,1490 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Vinay Belgaumkar <vinay.belgaumkar at intel.com?
+ Thomas Daniel <thomas.daniel at intel.com>
+ *
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <malloc.h>
+#include "drm.h"
+#include "ioctl_wrappers.h"
+#include "drmtest.h"
+#include "intel_chipset.h"
+#include "intel_io.h"
+#include "i915_drm.h"
+#include <assert.h>
+#include <sys/wait.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include "igt_kms.h"
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+
+#if (INTPTR_MAX == INT32_MAX)
+ #define IS_32BIT_USER
+#endif
+
+#define OBJECT_SIZE 16384
+#define BO_SIZE 4 * 4096
+#define STORE_BATCH_BUFFER_SIZE 6
+#define STRESS_BATCH_BUFFER_SIZE 5
+#define EXEC_OBJECT_PINNED (1<<4)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+#define SHARED_BUFFER_SIZE 4096
+#define NUM_EXEC_OBJECTS 2
+
+typedef struct drm_i915_gem_userptr i915_gem_userptr;
+static i915_gem_userptr* gem_create_userptr_struct(void* ptr, int size, int read_only);
+
+static void* gem_create_mem_buffer(int size);
+static void gem_invalid_userptr_test(void);
+static int gem_call_userptr_ioctl(int fd, i915_gem_userptr* userptr);
+static void gem_basic_test(bool);
+static void gem_pin_invalid_vma_test(void);
+static void gem_pin_overlap_test(void);
+static void gem_shmem_test(void);
+static void gem_pin_high_address_test(void);
+static void gem_pin_mmap_anonymous_test(void);
+static void gem_pin_mmap_file_test(void);
+
+/* incorrect args to userptr ioctl
+ - Write to Read Only buffer
+*/
+
+static int gem_call_userptr_ioctl(int fd, i915_gem_userptr* userptr)
+{
+ int ret;
+
+ ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_USERPTR, userptr);
+
+ if (ret)
+ ret = errno;
+
+ return ret;
+}
+
+static i915_gem_userptr* gem_create_userptr_struct(void* ptr, int size, int read_only)
+{
+ i915_gem_userptr* userptr;
+
+ userptr = (i915_gem_userptr*)calloc(1, sizeof(i915_gem_userptr));
+
+ igt_assert(userptr != NULL);
+
+ if (userptr)
+ {
+ userptr->user_ptr = (uintptr_t)ptr;
+ userptr->user_size = size;
+ userptr->flags = I915_USERPTR_UNSYNCHRONIZED;
+
+ if (read_only)
+ userptr->flags |= I915_USERPTR_READ_ONLY;
+ }
+
+ return userptr;
+}
+
+
+
+/** Creates a CPU buffer
+ * @size - size of buffer
+ * RETURNS pointer to buffer of @size
+*/
+
+static void* gem_create_mem_buffer(int size)
+{
+ void* addr;
+
+ addr = memalign(4096, size);
+ igt_assert(addr != NULL);
+
+ return addr;
+}
+
+/**
+ * This test will send an invalid userptr struct to the ioctl
+ * Expected result is ioctl call should return FAILURE
+*/
+static void gem_invalid_userptr_test(void)
+{
+ i915_gem_userptr* userptr = NULL;
+ int fd, ret;
+
+ fd = drm_open_driver(DRIVER_INTEL);
+
+ ret = gem_call_userptr_ioctl(fd, userptr);
+
+ /* Expect to fail */
+ igt_assert(ret);
+}
+
+/** setup_exec_obj - populate exec object
+ * @exec - exec object
+ * @handle - handle to gem buffer
+ * flags - any flags
+ * offset - requested VMA
+ */
+static void setup_exec_obj(struct drm_i915_gem_exec_object2 *exec, __u32 handle, __u32 flags, uint32_t* offset)
+{
+ memset(exec, 0, sizeof(struct drm_i915_gem_exec_object2));
+ exec->handle = handle;
+ exec->flags = flags;
+ exec->offset = (uintptr_t)offset;
+}
+
+/**
+ * gem_store_data_svm - populate batch buffer with MI_STORE_DATA_IMM command
+ * @fd: drm file descriptor
+ * @buf: batch buffer
+ * @buffer_size: size of buffer
+ * @addr: destination Virtual address
+ * @data: data to be store at destination
+ * @end: whether to end batch buffer or not
+ */
+#define MI_COMMAND 0
+#define MI_STORE_DATA_IMM 0x20 << 23
+#define USE_PPGTT 0x0 << 22
+#define DWORD_LENGTH 0x2
+static int gem_store_data_svm(int fd, uint32_t* cmd_buf, uint32_t* vaddr,
+ uint32_t data, bool end)
+{
+ int i = 0;
+
+ cmd_buf[i++] = MI_COMMAND | MI_STORE_DATA_IMM | USE_PPGTT | DWORD_LENGTH;
+ cmd_buf[i++] = (uintptr_t)vaddr & 0xFFFFFFFC;
+ #ifdef IS_32BIT_USER
+ cmd_buf[i++] = 0;
+ #else // 64 bit
+ cmd_buf[i++] = ((uintptr_t)vaddr >> 32) & 0xFFFF; /* bits 32:47 */
+ #endif
+
+ cmd_buf[i++] = data;
+ if (end)
+ {
+ cmd_buf[i++] = MI_BATCH_BUFFER_END;
+ cmd_buf[i++] = 0;
+ }
+
+ return (i * sizeof(uint32_t));
+}
+
+/**
+ * gem_store_data_svm - populate batch buffer with MI_STORE_DATA_IMM command
+ * This one fills up reloc buffer as well
+ * @fd: drm file descriptor
+ * @buf: batch buffer
+ * @buffer_size: size of buffer
+ * @addr: destination Virtual address
+ * @data: data to be store at destination
+ * @reloc - relocation entry
+ * @end: whether to end batch buffer or not
+ */
+
+static int gem_store_data(int fd, uint32_t* cmd_buf,
+ uint32_t handle, uint32_t data,
+ struct drm_i915_gem_relocation_entry *reloc,
+ bool end)
+{
+ int i = 0;
+
+ cmd_buf[i++] = MI_COMMAND | MI_STORE_DATA_IMM | USE_PPGTT | DWORD_LENGTH;
+ cmd_buf[i++] = 0; /* lower 31 bits of 48 bit address - 0 because reloc is needed */
+ cmd_buf[i++] = 0; /* upper 15 bits of 48 bit address - 0 because reloc is needed */
+ reloc->offset = 1 * sizeof(uint32_t);
+ reloc->delta = 0;
+ reloc->target_handle = handle;
+ reloc->read_domains = I915_GEM_DOMAIN_RENDER;
+ reloc->write_domain = I915_GEM_DOMAIN_RENDER;
+ reloc->presumed_offset = 0;
+ reloc++;
+ cmd_buf[i++] = data;
+ if (end)
+ {
+ cmd_buf[i++] = MI_BATCH_BUFFER_END;
+ cmd_buf[i++] = 0;
+ }
+
+ return (i * sizeof(uint32_t));
+}
+
+
+/** gem_basic_test - This test will create a shared buffer, and create a command
+ * for GPU to write data in it
+ * CPU will read and make sure expected value is obtained
+ * @valid_shared_buffer - whether test with valid malloc'd buffer or not
+
+ at code
+ if (valid_shared_buffer == true)
+ Malloc a 4K buffer
+ Share buffer with with GPU by using userptr ioctl
+ Create batch buffer to write DATA to first dword of buffer
+ Use virtual address of buffer as destination address in batch buffer
+ Set EXEC_OBJECT_PINNED flag in exec object
+ Set 'offset' in exec object to shared buffer VMA
+ Submit execbuffer
+ Verify value of first DWORD in shared buffer matches DATA
+
+ if (valid_shared_buffer == false)
+ Declare null buffer
+ Call Userptr ioctl with null buffer
+ Run Basic Test
+ Test should fail at submit execbuffer
+ at endcode
+*/
+static void gem_basic_test(bool valid_shared_buffer)
+{
+ i915_gem_userptr* userptr = NULL;
+ int fd, ret;
+ uint32_t* shared_buffer = NULL;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec_object2[NUM_EXEC_OBJECTS];
+ uint32_t batch_buffer[STORE_BATCH_BUFFER_SIZE];
+ uint32_t batch_buf_handle, shared_buf_handle;
+ int ring, len;
+ const uint32_t data = 0x12345678;
+
+ fd = drm_open_driver(DRIVER_INTEL);
+ batch_buf_handle = gem_create(fd, BO_SIZE);
+
+ /* create cpu buffer, set to all 0xF's */
+ if (valid_shared_buffer)
+ {
+ shared_buffer = gem_create_mem_buffer(BO_SIZE);
+ //*shared_buffer = 0xFFFFFFFF;
+ }
+
+ /* share with GPU */
+ userptr = gem_create_userptr_struct(shared_buffer, BO_SIZE, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr);
+ igt_assert_eq(ret, 0);
+
+ /* Get handle for shared buffer */
+ shared_buf_handle = userptr->handle;
+ free(userptr);
+
+ /* create command buffer with write command */
+ len = gem_store_data_svm(fd, batch_buffer, shared_buffer, data, true);
+ igt_assert_lte(len, STORE_BATCH_BUFFER_SIZE * 4);
+
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ /* submit command buffer */
+ setup_exec_obj(&exec_object2[0], shared_buf_handle, EXEC_OBJECT_PINNED, shared_buffer);
+ setup_exec_obj(&exec_object2[1], batch_buf_handle, 0, 0);
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf.buffer_count = NUM_EXEC_OBJECTS;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ if (valid_shared_buffer)
+ {
+ gem_execbuf(fd, &execbuf);
+ }
+ else
+ {
+ /* Expect execbuf to fail */
+ ret = drmIoctl(fd,
+ DRM_IOCTL_I915_GEM_EXECBUFFER2,
+ &execbuf);
+ igt_assert_neq(ret, 0);
+ }
+ gem_sync(fd, batch_buf_handle);
+ gem_close(fd, batch_buf_handle);
+ close(fd);
+
+ // check on CPU to see if value changes
+ if (valid_shared_buffer)
+ {
+ igt_fail_on_f(shared_buffer[0] != data,
+ "\nCPU read does not match GPU write, expected: 0x%x, got: 0x%x\n", data, shared_buffer[0]);
+
+ free(shared_buffer);
+ }
+}
+
+/** gem_multiple_process_test - Run basic test simultaneously with multiple processes
+* This will test pinning same VA separately in each process
+
+ fork();
+ Execute basic test in parent/child processes
+
+**/
+
+static void gem_multiple_process_test(void)
+{
+ int status;
+ pid_t child_pid, wait_pid;
+ child_pid = fork();
+
+ if (child_pid == 0)
+ {
+ gem_basic_test(true);
+ _exit(0);
+ }
+ else
+ {
+ gem_basic_test(true);
+ wait_pid = wait(&status);
+ igt_assert(wait_pid != -1);
+ }
+}
+
+
+/** gem_repin_test
+ * This test tries to repin a buffer at a previously pinned vma
+ * from a different execbuf.
+
+ at code
+ Malloc a 4K buffer
+ Share buffer with with GPU by using userptr ioctl
+ Create batch buffer to write DATA to first dword of buffer
+ Use virtual address of buffer as destination address in batch buffer
+ Set EXEC_OBJECT_PINNED flag in exec object
+ Set 'offset' in exec object to shared buffer VMA
+ Submit execbuffer
+ Verify value of first DWORD in shared buffer matches DATA
+
+ Create second shared buffer
+ Follow all steps above
+ Execpt, for offset, use VMA of first buffer above
+ Submit execbuffer
+ Verify value of first DWORD in second shared buffer matches DATA
+
+ at endcode
+**/
+
+static void gem_repin_test(void)
+{
+ i915_gem_userptr* userptr = NULL;
+ i915_gem_userptr* userptr1 = NULL;
+ int fd, ret;
+ uint32_t* shared_buffer = NULL;
+ uint32_t* shared_buffer1 = NULL;
+ struct drm_i915_gem_execbuffer2 execbuf, execbuf1;
+ struct drm_i915_gem_exec_object2 exec_object2[NUM_EXEC_OBJECTS];
+ uint32_t batch_buffer[STORE_BATCH_BUFFER_SIZE];
+ uint32_t batch_buf_handle, shared_buf_handle, shared_buf_handle1;
+ int ring, len;
+ const uint32_t data = 0x12345678;
+
+ // Create gem object
+ fd = drm_open_driver(DRIVER_INTEL);
+ batch_buf_handle = gem_create(fd, BO_SIZE);
+
+ // create cpu buffer, set to all 0xF's
+ shared_buffer = gem_create_mem_buffer(BO_SIZE);
+ shared_buffer1 = gem_create_mem_buffer(BO_SIZE * 2);
+
+ // share with GPU
+ userptr = gem_create_userptr_struct(shared_buffer, BO_SIZE, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr);
+ igt_assert_eq(ret, 0);
+
+ userptr1 = gem_create_userptr_struct(shared_buffer1, BO_SIZE * 2, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr1);
+ igt_assert_eq(ret, 0);
+
+ // Get handle for shared buffer
+ shared_buf_handle = userptr->handle;
+ shared_buf_handle1 = userptr1->handle;
+ free(userptr);
+ free(userptr1);
+
+ // create command buffer with write command
+ len = gem_store_data_svm(fd, batch_buffer, shared_buffer, data, true);
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ // submit command buffer
+ setup_exec_obj(&exec_object2[0], shared_buf_handle, EXEC_OBJECT_PINNED, shared_buffer);
+ setup_exec_obj(&exec_object2[1], batch_buf_handle, 0, 0);
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf.buffer_count = NUM_EXEC_OBJECTS;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+
+
+ // Second buffer
+ // create command buffer with write command
+ len = gem_store_data_svm(fd, batch_buffer, shared_buffer1, data, true);
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ // submit command buffer
+ // Pin at shared_buffer, not shared_buffer1
+ setup_exec_obj(&exec_object2[0], shared_buf_handle1, EXEC_OBJECT_PINNED, shared_buffer);
+ setup_exec_obj(&exec_object2[1], batch_buf_handle, 0, 0);
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf1.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf1.buffer_count = NUM_EXEC_OBJECTS;
+ execbuf1.batch_start_offset = 0;
+ execbuf1.batch_len = len;
+ execbuf1.cliprects_ptr = 0;
+ execbuf1.num_cliprects = 0;
+ execbuf1.DR1 = 0;
+ execbuf1.DR4 = 0;
+ execbuf1.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf1, 0);
+ execbuf1.rsvd2 = 0;
+
+ gem_execbuf(fd, &execbuf1);
+ gem_sync(fd, batch_buf_handle);
+
+ gem_close(fd, batch_buf_handle);
+ close(fd);
+
+ free(shared_buffer);
+ free(shared_buffer1);
+}
+
+
+/** gem_repin_overlap_test
+ * This test will attempt to pin two buffers at the same VMA as part of the same
+ execbuffer object
+
+ at code
+ Malloc a 4K buffer
+ Share buffer with with GPU by using userptr ioctl
+ Create second shared buffer
+ Create batch buffer to write DATA to first dword of each buffer
+ Use virtual address of each buffer as destination addresses in batch buffer
+ Set EXEC_OBJECT_PINNED flag in both exec objects
+ Set 'offset' in both exec objects to first shared buffer VMA
+ Submit execbuffer
+ Command should return EINVAL, since we are trying to pin to same VMA
+ at endcode
+**/
+static void gem_pin_overlap_test(void)
+{
+ i915_gem_userptr* userptr = NULL;
+ i915_gem_userptr* userptr1 = NULL;
+ int fd, ret;
+ uint32_t* shared_buffer = NULL;
+ uint32_t* shared_buffer1 = NULL;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec_object2[NUM_EXEC_OBJECTS + 1];
+ uint32_t batch_buffer[BO_SIZE];
+ uint32_t batch_buf_handle, shared_buf_handle, shared_buf_handle1;
+ int ring, len;
+ const uint32_t data = 0x12345678;
+
+ fd = drm_open_driver(DRIVER_INTEL);
+ batch_buf_handle = gem_create(fd, BO_SIZE);
+
+ shared_buffer = gem_create_mem_buffer(BO_SIZE);
+ shared_buffer1 = gem_create_mem_buffer(BO_SIZE * 2);
+
+ /* share with GPU */
+ userptr = gem_create_userptr_struct(shared_buffer, BO_SIZE, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr);
+ igt_assert_eq(ret, 0);
+
+ userptr1 = gem_create_userptr_struct(shared_buffer1, BO_SIZE * 2, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr1);
+ igt_assert_eq(ret, 0);
+
+ shared_buf_handle = userptr->handle;
+ shared_buf_handle1 = userptr1->handle;
+ free(userptr);
+ free(userptr1);
+
+ len = gem_store_data_svm(fd, batch_buffer, shared_buffer, data, false);
+ len += gem_store_data_svm(fd, (batch_buffer + len/4), shared_buffer1, data, true);
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ /* submit command buffer */
+ setup_exec_obj(&exec_object2[0], shared_buf_handle, EXEC_OBJECT_PINNED, shared_buffer);
+ setup_exec_obj(&exec_object2[1], shared_buf_handle1, EXEC_OBJECT_PINNED, shared_buffer);
+ setup_exec_obj(&exec_object2[2], batch_buf_handle, 0, 0);
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf.buffer_count = NUM_EXEC_OBJECTS + 1;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ ret = drmIoctl(fd,
+ DRM_IOCTL_I915_GEM_EXECBUFFER2,
+ &execbuf);
+
+ /* expect to fail */
+ igt_assert(errno == 22);
+ igt_assert_neq(ret, 0);
+
+ gem_close(fd, batch_buf_handle);
+ close(fd);
+
+ free(shared_buffer);
+ free(shared_buffer1);
+}
+
+
+/** gem_evict_test
+ * create shared buffer, pin it
+ * create normal buffer
+ * try and relocate to shared location
+ * Opens: How to detect eviction occured?
+ * i915_gem_gtt - debugfs api - grep it for the page?
+
+ at code
+ Create a gem buffer of 4K
+ Malloc a 4K buffer
+ Share buffer with GPU using userptr ioctl
+ Create a batch buffer to write 0x11111111 and 0x22222222 in above 2 buffers
+ Pin Shared buffer to offset '0' in GTT
+ Create reloc buffer to ensure gem buffer is relocated to GTT
+ Submit execbuffer
+ Verify shared buffer has 0x22222222 as expected
+ Obtain offset of where gem object has been placed from exec object field
+ Try to pin shared buffer at that address using 'offset' field in exec object
+ Prevent relocation by setting relocation_count = 0
+ Submit execbuffer
+ Shared buffer will be pinned to previous address of gem object
+ Unshared buffer will be evicted, since relocation is not allowed
+ Second batch buffer will write 0x11111111 to shared buffer instead of unshared
+ Verify shared buffer contains 0x11111111
+ Reverse order of instructions in batch buffer to write to unshared first
+ at endcode
+
+**/
+
+static void gem_evict_test(void)
+{
+ i915_gem_userptr* userptr = NULL;
+ int fd, ret;
+ uint32_t* shared_buffer = NULL;
+ struct drm_i915_gem_relocation_entry reloc[4];
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec_object2[NUM_EXEC_OBJECTS + 1];
+ uint32_t batch_buffer[STORE_BATCH_BUFFER_SIZE * 2];
+ uint32_t batch_buf_handle, shared_buf_handle, unshared_buf_handle;
+ int ring, len;
+
+ fd = drm_open_driver(DRIVER_INTEL);
+ batch_buf_handle = gem_create(fd, BO_SIZE);
+ unshared_buf_handle = gem_create(fd, BO_SIZE);
+
+ shared_buffer = gem_create_mem_buffer(BO_SIZE);
+ *shared_buffer = 0xFFFFFFFF;
+
+ // share with GPU
+ userptr = gem_create_userptr_struct(shared_buffer, BO_SIZE, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr);
+ igt_assert_eq(ret, 0);
+
+ // Get handle for shared buffer
+ shared_buf_handle = userptr->handle;
+ free(userptr);
+
+ // create command buffer with write commands
+ len = gem_store_data(fd, batch_buffer, unshared_buf_handle, 0x11111111, reloc, false);
+ len += gem_store_data_svm(fd, batch_buffer + (len/4), NULL, 0x22222222, true);
+ igt_assert_lte(len, STORE_BATCH_BUFFER_SIZE * 2 * 4);
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ // submit command buffer
+ setup_exec_obj(&exec_object2[0], shared_buf_handle, EXEC_OBJECT_PINNED, 0);
+ setup_exec_obj(&exec_object2[1], unshared_buf_handle, 0, 0);
+ setup_exec_obj(&exec_object2[2], batch_buf_handle, 0, 0);
+
+ exec_object2[2].relocation_count = 1;
+ exec_object2[2].relocs_ptr = (uintptr_t)reloc;
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf.buffer_count = NUM_EXEC_OBJECTS + 1;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+ igt_assert(*shared_buffer == 0x22222222);
+ *shared_buffer = 0xffffffff;
+ // Now cause eviction of unshared buffer by pinning shared buffer there
+ exec_object2[0].offset = exec_object2[1].offset;
+ // Prevent relocation
+ exec_object2[2].relocation_count = 0;
+
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+ igt_assert(*shared_buffer == 0x11111111);
+ igt_assert(exec_object2[0].offset != exec_object2[1].offset);
+
+ // Now lets do it again with the objects listed in reverse order...
+ *shared_buffer = 0xffffffff;
+ setup_exec_obj(&exec_object2[0], unshared_buf_handle, 0, 0);
+ setup_exec_obj(&exec_object2[1], shared_buf_handle, EXEC_OBJECT_PINNED, 0);
+ exec_object2[2].relocation_count = 1;
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+ igt_assert(*shared_buffer == 0x22222222);
+ *shared_buffer = 0xffffffff;
+ // Now cause eviction of unshared buffer by pinning shared buffer there
+ exec_object2[1].offset = exec_object2[0].offset;
+ // Prevent relocation
+ exec_object2[2].relocation_count = 0;
+
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+ igt_assert(*shared_buffer == 0x11111111);
+ igt_assert(exec_object2[0].offset != exec_object2[1].offset);
+
+ gem_close(fd, batch_buf_handle);
+ close(fd);
+
+ free(shared_buffer);
+}
+
+/** gem_stress_test - Stress test which creates 10K buffers and shares with GPU
+ at code
+ Create 10K uint32 buffers of size 4K each
+ Share with GPU using userptr ioctl
+ Create batch buffer to write DATA in first element of each buffer
+ Execute Batch Buffer on Blit ring STRESS_NUM_LOOPS times
+ Validate every buffer has DATA in first element
+ Repeat on Render ring
+ at endcode
+**/
+#define STRESS_NUM_BUFFERS 10240
+#define STRESS_NUM_LOOPS 1000
+
+static void gem_stress_test(void)
+{
+ i915_gem_userptr* userptr = NULL;
+ int fd, ret;
+ uint32_t* shared_buffer[STRESS_NUM_BUFFERS];
+ uint32_t shared_handle[STRESS_NUM_BUFFERS];
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec_object2[STRESS_NUM_BUFFERS + 1];
+ uint32_t batch_buffer[4*STRESS_NUM_BUFFERS + 2];
+ uint32_t batch_buf_handle;
+ int ring, len, i, j;
+
+ fd = drm_open_driver(DRIVER_INTEL);
+ batch_buf_handle = gem_create(fd, sizeof(batch_buffer));
+
+ // create command buffer with write commands
+ len = 0;
+ for(i = 0; i < STRESS_NUM_BUFFERS; i++)
+ {
+ shared_buffer[i] = gem_create_mem_buffer(BO_SIZE);
+ *shared_buffer[i] = 0xFFFFFFFF;
+
+ // share with GPU
+ userptr = gem_create_userptr_struct(shared_buffer[i], BO_SIZE, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr);
+ igt_assert_eq(ret, 0);
+
+ // Get handle for shared buffer
+ shared_handle[i] = userptr->handle;
+ free(userptr);
+
+ setup_exec_obj(&exec_object2[i], shared_handle[i], EXEC_OBJECT_PINNED, shared_buffer[i]);
+ len += gem_store_data_svm(fd, batch_buffer + (len/4), shared_buffer[i], i , (i == STRESS_NUM_BUFFERS-1) ? true:false);
+ }
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ // submit command buffer
+
+ exec_object2[STRESS_NUM_BUFFERS].handle = batch_buf_handle;
+ exec_object2[STRESS_NUM_BUFFERS].relocation_count = 0;
+ exec_object2[STRESS_NUM_BUFFERS].relocs_ptr = 0;
+ exec_object2[STRESS_NUM_BUFFERS].alignment = 0;
+ exec_object2[STRESS_NUM_BUFFERS].offset = 0;
+ exec_object2[STRESS_NUM_BUFFERS].flags = 0;
+ exec_object2[STRESS_NUM_BUFFERS].rsvd1 = 0;
+ exec_object2[STRESS_NUM_BUFFERS].rsvd2 = 0;
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf.buffer_count = STRESS_NUM_BUFFERS + 1;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ for (i = 0; i < STRESS_NUM_LOOPS; i++)
+ {
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+ for(j = 0; j < STRESS_NUM_BUFFERS; j++)
+ {
+ igt_fail_on_f(*shared_buffer[j] != j,
+ "Mismatch in object %d, iteration %d: 0x%08X\n", j, i, *shared_buffer[j]);
+ *shared_buffer[j] = (j<<2)+1;
+ }
+ }
+
+ // Now Render Ring
+ ring = I915_EXEC_BLT;
+ execbuf.flags = ring;
+ for (i = 0; i < STRESS_NUM_LOOPS; i++)
+ {
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+ for(j = 0; j < STRESS_NUM_BUFFERS; j++)
+ {
+ igt_fail_on_f(*shared_buffer[j] != j,
+ "Mismatch in object %d, iteration %d: 0x%08X\n", j, i, *shared_buffer[j]);
+ *shared_buffer[j] = (j<<2)+1;
+ }
+ }
+
+ gem_close(fd, batch_buf_handle);
+ close(fd);
+
+ for(i = 0; i < STRESS_NUM_BUFFERS; i++)
+ {
+ free(shared_buffer[i]);
+ }
+}
+
+#define STRESS2_NUM_BUFFERS 1
+#define STRESS2_NUM_LOOPS 256
+
+/** gem_write_multipage_buffer - Create a buffer spanning multiple
+ pages, and share with GPU. Write to every element of the buffer
+ and verify correct contents.
+
+ at code
+ Create 16K uint32 buffer
+ Share with GPU using userptr ioctl
+ Create batch buffer to write DATA in all elements of buffer
+ Execute Batch Buffer
+ Validate every element has DATA
+ at endcode
+**/
+static void gem_write_multipage_buffer_test(void)
+{
+ i915_gem_userptr* userptr = NULL;
+ int fd, ret;
+ uint32_t* shared_buffer;
+ uint32_t shared_handle;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec_object2[NUM_EXEC_OBJECTS];
+ uint32_t batch_buffer[(4 * BO_SIZE) + 2];
+ uint32_t batch_buf_handle;
+ int ring, len, i, j;
+
+ fd = drm_open_driver(DRIVER_INTEL);
+ batch_buf_handle = gem_create(fd, sizeof(batch_buffer));
+
+ /* create command buffer with write commands */
+ len = 0;
+ shared_buffer = gem_create_mem_buffer(BO_SIZE * 4);
+ memset(batch_buffer, 0, sizeof(batch_buffer));
+
+ for(i = 0; i< BO_SIZE; i++)
+ {
+ shared_buffer[i] = 0;
+ }
+
+ /* share with GPU */
+ userptr = gem_create_userptr_struct(shared_buffer, BO_SIZE * 4, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr);
+ igt_assert_eq(ret, 0);
+
+ /* Get handle for shared buffer */
+ shared_handle = userptr->handle;
+ free(userptr);
+
+ setup_exec_obj(&exec_object2[0], shared_handle, EXEC_OBJECT_PINNED, shared_buffer);
+
+ for(j=0; j< (BO_SIZE); j++)
+ {
+ len += gem_store_data_svm(fd, batch_buffer + (len/4), &shared_buffer[j], j, (j == ((BO_SIZE)-1)) ? true:false);
+ }
+
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ // submit command buffer
+ setup_exec_obj(&exec_object2[1], batch_buf_handle, 0, 0);
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf.buffer_count = NUM_EXEC_OBJECTS;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+ for(j = 0; j < (BO_SIZE); j++)
+ {
+ igt_fail_on_f(shared_buffer[j] != j,
+ "Mismatch in object %d: 0x%08X\n", j, shared_buffer[j]);
+ }
+
+ gem_close(fd, batch_buf_handle);
+ close(fd);
+
+ free(shared_buffer);
+}
+
+/** This test will request to pin a shared buffer to an invalid
+ VMA > 48-bit address
+
+ Create shared buffer of size 4K
+ Try and Pin object to address 0x9000000000000
+**/
+static void gem_pin_invalid_vma_test(void)
+{
+ i915_gem_userptr* userptr = NULL;
+ int fd, ret;
+ uint32_t* shared_buffer = NULL;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec_object2[NUM_EXEC_OBJECTS];
+ uint32_t batch_buffer[BO_SIZE];
+ uint32_t batch_buf_handle, shared_buf_handle;
+ int ring, len;
+ const uint32_t data = 0x12345678;
+ uint32_t* invalid_address = (uint32_t*)0x9000000000000;
+
+ fd = drm_open_driver(DRIVER_INTEL);
+ batch_buf_handle = gem_create(fd, BO_SIZE);
+
+ shared_buffer = gem_create_mem_buffer(BO_SIZE);
+ *shared_buffer = 0xFFFFFFFF;
+
+ // share with GPU
+ userptr = gem_create_userptr_struct(shared_buffer, BO_SIZE, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr);
+ igt_assert_eq(ret, 0);
+
+ shared_buf_handle = userptr->handle;
+ free(userptr);
+
+ len = gem_store_data_svm(fd, batch_buffer, shared_buffer, data, true);
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ setup_exec_obj(&exec_object2[0], shared_buf_handle, EXEC_OBJECT_PINNED, invalid_address);
+ setup_exec_obj(&exec_object2[1], batch_buf_handle, 0, 0);
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf.buffer_count = NUM_EXEC_OBJECTS;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ /* Expect execbuf to fail */
+ ret = drmIoctl(fd,
+ DRM_IOCTL_I915_GEM_EXECBUFFER2,
+ &execbuf);
+
+ igt_assert_neq(ret, 0);
+
+ gem_sync(fd, batch_buf_handle);
+ gem_close(fd, batch_buf_handle);
+ close(fd);
+
+ free(shared_buffer);
+}
+
+#define SHM_KEY 56789
+#define SHMEM_SIZE 4096
+/** gem_shmem_svm_test - Test userptr ioctl with shared memory
+ * This test creates a sysV IPC buffer and shares with GPU.
+ * It will send GPU commands to write DATA in the buffer and
+ * validate it on the CPU side when the command completes.
+
+ * Create arbitrary shmem id
+ * Use shmat to attach a 4K uint32 buffer to above id
+ * Share buffer with GPU using userptr ioctl
+ * Create Batch buffer to write DATA in the first element
+ * submit execbuffer
+ * Validate on CPU side that DATA was indeed written
+ */
+static void gem_shmem_test(void)
+{
+ int shmid;
+ i915_gem_userptr* userptr = NULL;
+ int fd, ret;
+ uint32_t* shared_buffer = NULL;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec_object2[NUM_EXEC_OBJECTS];
+ uint32_t batch_buffer[BO_SIZE];
+ uint32_t batch_buf_handle, shared_buf_handle;
+ int ring, len;
+ const uint32_t data = 0x12345678;
+
+ shmid = shmget(SHM_KEY, SHMEM_SIZE, IPC_CREAT);
+ igt_assert_neq(shmid, -1);
+
+ shared_buffer = shmat(shmid, NULL, 0);
+ igt_assert(shared_buffer != (void*)-1);
+
+ memset(shared_buffer, 0, SHMEM_SIZE);
+ shared_buffer[0] = 0xFFFFFFFF;
+
+ fd = drm_open_driver(DRIVER_INTEL);
+ batch_buf_handle = gem_create(fd, BO_SIZE);
+
+ userptr = gem_create_userptr_struct(shared_buffer, SHMEM_SIZE, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr);
+ igt_assert_eq(ret, 0);
+
+ shared_buf_handle = userptr->handle;
+ free(userptr);
+
+ /* create command buffer with write command */
+ len = gem_store_data_svm(fd, batch_buffer, shared_buffer, data, true);
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ /* submit command buffer */
+ setup_exec_obj(&exec_object2[0], shared_buf_handle, EXEC_OBJECT_PINNED, shared_buffer);
+ setup_exec_obj(&exec_object2[1], batch_buf_handle, 0, 0);
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf.buffer_count = NUM_EXEC_OBJECTS;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+
+ gem_close(fd, batch_buf_handle);
+ close(fd);
+
+ /* check on CPU to see if value changes */
+ igt_fail_on_f(shared_buffer[0] != data,
+ "\nCPU read does not match GPU write, expected: 0x%x, got: 0x%x\n", data, shared_buffer[0]);
+
+ ret = shmdt(shared_buffer);
+ igt_assert_eq(ret, 0);
+
+}
+
+/** gem_pin_high_address_test - This test will create a shared buffer, and create a command
+ * for GPU to write data in it. It will attempt to pin the buffer at address > 32 bits.
+ * CPU will read and make sure expected value is obtained
+
+ at code
+ Malloc a 4K buffer
+ Share buffer with with GPU by using userptr ioctl
+ Create batch buffer to write DATA to first dword of buffer
+ Use virtual address of buffer as 0x1100000000 (> 32 bit)
+ Set EXEC_OBJECT_PINNED flag in exec object
+ Set 'offset' in exec object to shared buffer VMA
+ Submit execbuffer
+ Verify value of first DWORD in shared buffer matches DATA
+ at endcode
+*/
+
+static void gem_pin_high_address_test(void)
+{
+ i915_gem_userptr* userptr = NULL;
+ int fd, ret;
+ uint32_t* shared_buffer = NULL;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec_object2[NUM_EXEC_OBJECTS];
+ uint32_t batch_buffer[BO_SIZE];
+ uint32_t batch_buf_handle, shared_buf_handle;
+ int ring, len;
+ const uint32_t data = 0x12345678;
+ uint32_t* high_address = (uint32_t*)0x1111FFFF000;
+
+ fd = drm_open_driver(DRIVER_INTEL);
+ batch_buf_handle = gem_create(fd, BO_SIZE);
+
+ /* create cpu buffer, set to all 0xF's */
+ shared_buffer = gem_create_mem_buffer(BO_SIZE);
+ *shared_buffer = 0xFFFFFFFF;
+
+ /* share with GPU */
+ userptr = gem_create_userptr_struct(shared_buffer, BO_SIZE, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr);
+ igt_assert_eq(ret, 0);
+
+ /* Get handle for shared buffer */
+ shared_buf_handle = userptr->handle;
+ free(userptr);
+
+ /* create command buffer with write command */
+ len = gem_store_data_svm(fd, batch_buffer, high_address, data, true);
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ /* submit command buffer */
+ setup_exec_obj(&exec_object2[0], shared_buf_handle, EXEC_OBJECT_PINNED | EXEC_OBJECT_SUPPORTS_48B_ADDRESS, high_address);
+ setup_exec_obj(&exec_object2[1], batch_buf_handle, 0, 0);
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf.buffer_count = NUM_EXEC_OBJECTS;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+
+ // check on CPU to see if value changes
+ igt_fail_on_f(shared_buffer[0] != data,
+ "\nCPU read does not match GPU write, expected: 0x%x, got: 0x%x\n", data, shared_buffer[0]);
+
+ gem_close(fd, batch_buf_handle);
+ close(fd);
+ free(shared_buffer);
+}
+
+/** gem_pin_near_48Bit_test - This test will create a shared buffer, and create a command
+ * for GPU to write data in it. It will attempt to pin the buffer at address > 47 bits <= 48-bit.
+ * CPU will read and make sure expected value is obtained
+
+ at code
+ Malloc a 4K buffer
+ Share buffer with with GPU by using userptr ioctl
+ Create batch buffer to write DATA to first dword of buffer
+ Use virtual address of buffer as range between 47-bit and 48-bit
+ Set EXEC_OBJECT_PINNED flag in exec object
+ Set 'offset' in exec object to shared buffer VMA
+ Submit execbuffer
+ Verify value of first DWORD in shared buffer matches DATA
+ at endcode
+*/
+#define BEGIN_HIGH_ADDRESS 0x7FFFFFFFF000
+#define END_HIGH_ADDRESS 0xFFFFFFFFC000
+#define ADDRESS_INCREMENT 0x2000000000
+static void gem_pin_near_48Bit_test(void)
+{
+ i915_gem_userptr* userptr = NULL;
+ int fd, ret;
+ uint32_t* shared_buffer = NULL;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec_object2[NUM_EXEC_OBJECTS];
+ uint32_t batch_buffer[BO_SIZE];
+ uint32_t batch_buf_handle, shared_buf_handle;
+ int ring, len;
+ const uint32_t data = 0x12345678;
+ uint32_t* high_address;
+
+ fd = drm_open_driver(DRIVER_INTEL);
+ batch_buf_handle = gem_create(fd, BO_SIZE);
+
+ /* create cpu buffer, set to all 0xF's */
+ shared_buffer = gem_create_mem_buffer(BO_SIZE);
+ *shared_buffer = 0xFFFFFFFF;
+
+ /* share with GPU */
+ userptr = gem_create_userptr_struct(shared_buffer, BO_SIZE, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr);
+ igt_assert_eq(ret, 0);
+
+ /* Get handle for shared buffer */
+ shared_buf_handle = userptr->handle;
+ free(userptr);
+
+ for (high_address = (uint32_t*)BEGIN_HIGH_ADDRESS; high_address <= (uint32_t*)END_HIGH_ADDRESS;
+ high_address+=ADDRESS_INCREMENT)
+ {
+ /* create command buffer with write command */
+ len = gem_store_data_svm(fd, batch_buffer, (uint32_t*)high_address,
+ data, true);
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ /* submit command buffer */
+ setup_exec_obj(&exec_object2[0], shared_buf_handle,
+ EXEC_OBJECT_PINNED | EXEC_OBJECT_SUPPORTS_48B_ADDRESS,
+ high_address);
+ setup_exec_obj(&exec_object2[1], batch_buf_handle, 0, 0);
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf.buffer_count = NUM_EXEC_OBJECTS;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+
+ // check on CPU to see if value changes
+ if (shared_buffer[0] != data)
+ {
+ #ifdef IS_32BIT_USER
+ igt_info("\nCPU read does not match GPU write, expected: 0x%"PRIx32", got: 0x%"PRIx32", address: 0x%"PRIx32"\n", data, shared_buffer[0], (uintptr_t)high_address);
+ #else
+ igt_info("\nCPU read does not match GPU write, expected: 0x%"PRIx32", got: 0x%"PRIx32", address: 0x%"PRIx64"\n", data, shared_buffer[0], (uintptr_t)high_address);
+ #endif
+
+ igt_fail(1);
+ }
+ }
+
+ gem_close(fd, batch_buf_handle);
+ close(fd);
+ free(shared_buffer);
+}
+
+/** gem_pin_mmap_anonymous_test - This test will create a mmap anonymous buffer and
+ * share with GPU. It will run basic test on this buffer.
+
+ at code
+ Create a anonymous mmap buffer
+ Share buffer with with GPU by using userptr ioctl
+ Create batch buffer to write DATA to first dword of buffer
+ Set EXEC_OBJECT_PINNED flag in exec object
+ Set 'offset' in exec object to shared buffer VMA
+ Submit execbuffer
+ Verify value of first DWORD in shared buffer matches DATA
+ at endcode
+*/
+void gem_pin_mmap_anonymous_test(void)
+{
+ i915_gem_userptr* userptr = NULL;
+ int fd, ret;
+ uint32_t* shared_buffer = NULL;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec_object2[NUM_EXEC_OBJECTS];
+ uint32_t batch_buffer[BO_SIZE];
+ uint32_t batch_buf_handle, shared_buf_handle;
+ int ring, len;
+ const uint32_t data = 0x12345678;
+
+ fd = drm_open_driver(DRIVER_INTEL);
+ batch_buf_handle = gem_create(fd, BO_SIZE);
+
+ /* create anonymus mmap buffer, set to all 0xF's */
+ shared_buffer = mmap(NULL, BO_SIZE, PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ igt_fail_on_f(shared_buffer == (void *)-1,
+ "mmap call failed with %s\n", strerror(errno));
+
+ *shared_buffer = 0xFFFFFFFF;
+
+ /* share with GPU */
+ userptr = gem_create_userptr_struct(shared_buffer, BO_SIZE, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr);
+ igt_assert_eq(ret, 0);
+
+ /* Get handle for shared buffer */
+ shared_buf_handle = userptr->handle;
+ free(userptr);
+
+ /* create command buffer with write command */
+ len = gem_store_data_svm(fd, batch_buffer, shared_buffer, data, true);
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ /* submit command buffer */
+ setup_exec_obj(&exec_object2[0], shared_buf_handle, EXEC_OBJECT_PINNED, shared_buffer);
+ setup_exec_obj(&exec_object2[1], batch_buf_handle, 0, 0);
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf.buffer_count = NUM_EXEC_OBJECTS;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+
+ // check on CPU to see if value changes
+ igt_fail_on_f(shared_buffer[0] != data,
+ "\nCPU read does not match GPU write, expected: 0x%x, got: 0x%x\n", data, shared_buffer[0]);
+
+ gem_close(fd, batch_buf_handle);
+ close(fd);
+ igt_fail_on_f(munmap(shared_buffer, BO_SIZE) != 0,
+ "munmap failed with: %s", strerror(errno));
+}
+
+/** gem_pin_mmap_file_test - This test will use mmap command to map
+ * a file in memory. It will then attempt to share the buffer with GPU
+ * using the userptr ioctl. It will verify if CPU/GPU writes are consistent
+
+ at code
+ open/create a file
+ lseek into the file and write some arbitrary data
+ this allows the mmap'd page to become resident
+ use mmap command to map the file into memory
+ Share buffer with with GPU by using userptr ioctl
+ Create batch buffer to write DATA to first dword of buffer
+ Set EXEC_OBJECT_PINNED flag in exec object
+ Set 'offset' in exec object to shared buffer VMA
+ Submit execbuffer
+ Verify value of first DWORD in shared buffer matches DATA
+ Close file
+ at endcode
+*/
+void gem_pin_mmap_file_test(void)
+{
+ i915_gem_userptr* userptr = NULL;
+ int fd, ret;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec_object2[NUM_EXEC_OBJECTS];
+ uint32_t batch_buffer[BO_SIZE];
+ uint32_t batch_buf_handle, dest_buf_handle;
+ int ring, len;
+ const uint32_t data = 0x12345678;
+ int fdout;
+ uint32_t *dest;
+ const char filename[] = "svm_mmap.txt";
+
+ fdout = open(filename, O_RDWR | O_CREAT | O_TRUNC, 0640);
+ igt_fail_on_f(fdout < 0, "Cannot open output file\n");
+
+ // Do this to ensure backing physical memory for the file
+ /* go to the location corresponding to the last byte */
+ if (lseek (fdout, BO_SIZE, SEEK_SET) == -1)
+ igt_info("lseek error");
+
+ /* write a dummy byte at the last location */
+ if (write (fdout, "", 1) != 1)
+ igt_info("write error");
+
+ fd = drm_open_driver(DRIVER_INTEL);
+ batch_buf_handle = gem_create(fd, BO_SIZE);
+
+ /* create anonymus mmap buffer, set to all 0xF's */
+ dest = mmap(0, BO_SIZE, PROT_WRITE, MAP_SHARED, fdout, 0);
+ igt_fail_on_f(dest == (void *)-1,
+ "mmap call failed with %s\n", strerror(errno));
+ *dest = 0x11111111;
+
+ userptr = gem_create_userptr_struct(dest, BO_SIZE, 0);
+ ret = gem_call_userptr_ioctl(fd, userptr);
+ igt_assert_eq(ret, 0);
+ dest_buf_handle = userptr->handle;
+ free(userptr);
+
+ /* create command buffer with write command */
+ len = gem_store_data_svm(fd, batch_buffer, dest, data, true);
+ gem_write(fd, batch_buf_handle, 0, batch_buffer, len);
+
+ /* submit command buffer */
+ setup_exec_obj(&exec_object2[0], dest_buf_handle, EXEC_OBJECT_PINNED, dest);
+ setup_exec_obj(&exec_object2[1], batch_buf_handle, 0, 0);
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_RENDER;
+
+ execbuf.buffers_ptr = (uintptr_t)exec_object2;
+ execbuf.buffer_count = NUM_EXEC_OBJECTS;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, batch_buf_handle);
+
+ // check on CPU to see if value changes
+ igt_fail_on_f(*dest != data,
+ "\nCPU read does not match GPU write, expected: 0x%x, got: 0x%x\n", data, dest[0]);
+
+ gem_close(fd, batch_buf_handle);
+ close(fd);
+ igt_fail_on_f(munmap(dest, BO_SIZE) != 0,
+ "munmap failed with: %s", strerror(errno));
+ close(fdout);
+}
+
+
+int main(int argc, char* argv[])
+{
+ igt_subtest_init(argc, argv);
+ igt_skip_on_simulation();
+
+ igt_subtest("gem_null_buffer"){
+ gem_basic_test(false);
+ }
+ igt_subtest("gem_invalid_userptr"){
+ gem_invalid_userptr_test();
+ }
+ igt_subtest("gem_basic"){
+ gem_basic_test(true);
+ }
+ igt_subtest("gem_multiple_process"){
+ gem_multiple_process_test();
+ }
+ igt_subtest("gem_repin"){
+ gem_repin_test();
+ }
+ igt_subtest("gem_evict"){
+ gem_evict_test();
+ }
+ igt_subtest("gem_stress"){
+ gem_stress_test();
+ }
+ igt_subtest("gem_pin_overlap"){
+ gem_pin_overlap_test();
+ }
+ igt_subtest("gem_shmem"){
+ gem_shmem_test();
+ }
+ igt_subtest("gem_write_multipage_buffer"){
+ gem_write_multipage_buffer_test();
+ }
+#ifndef IS_32BIT_USER
+ igt_subtest("gem_pin_high_address"){
+ gem_pin_high_address_test();
+ }
+ igt_subtest("gem_pin_near_48Bit"){
+ gem_pin_near_48Bit_test();
+ }
+ igt_subtest("gem_pin_invalid_vma"){
+ gem_pin_invalid_vma_test();
+ }
+#endif
+ igt_subtest("gem_pin_mmap_anon"){
+ gem_pin_mmap_anonymous_test();
+ }
+ igt_subtest("gem_pin_mmap_file"){
+ gem_pin_mmap_file_test();
+ }
+
+ igt_exit();
+}
--
1.9.1
More information about the Intel-gfx
mailing list