[PATCH i-g-t v2 06/10] tests/intel/xe_svm: svm_atomic_access

Bommu Krishnaiah krishnaiah.bommu at intel.com
Tue May 14 07:10:22 UTC 2024


Verify GPU atomic access any location in malloc'ed memory by using svm

Signed-off-by: Bommu Krishnaiah <krishnaiah.bommu at intel.com>
Cc: Oak Zeng <oak.zeng at intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
---
 lib/xe/xe_util.c     | 17 +++++++++++++++++
 lib/xe/xe_util.h     |  1 +
 tests/intel/xe_svm.c | 39 +++++++++++++++++++++++++++++++++++++++
 3 files changed, 57 insertions(+)

diff --git a/lib/xe/xe_util.c b/lib/xe/xe_util.c
index 1bdb5fa08..0e28c0093 100644
--- a/lib/xe/xe_util.c
+++ b/lib/xe/xe_util.c
@@ -107,6 +107,23 @@ void insert_store(uint32_t *batch, uint64_t dst_va, uint32_t val)
 	batch[++i] = MI_BATCH_BUFFER_END;
 }
 
+/*
+a command buffer is a buffer in GT0's vram, filled with gpu commands,
+plus some memory for a ufence used to sync command submission
+*/
+void insert_atomic_inc(uint32_t *batch, uint64_t dst_va, uint32_t val)
+{
+	int i = 0;
+
+	//suppress compiler warning
+	(void)(val);
+
+	batch[i] = MI_STORE_DWORD_IMM_GEN4;
+	batch[++i] = dst_va;
+	batch[++i] = dst_va >> 32;
+	batch[++i] = MI_BATCH_BUFFER_END;
+}
+
 void xe_create_cmdbuf(struct xe_buffer *cmd_buf, cmdbuf_fill_func_t fill_func, uint64_t dst_va, uint32_t val, struct drm_xe_engine_class_instance *eci)
 {
 	//make some room for a exec_ufence, which will be used to sync the
diff --git a/lib/xe/xe_util.h b/lib/xe/xe_util.h
index c38f79e60..46e1ccc9a 100644
--- a/lib/xe/xe_util.h
+++ b/lib/xe/xe_util.h
@@ -40,6 +40,7 @@ void xe_create_cmdbuf(struct xe_buffer *cmd_buf, cmdbuf_fill_func_t fill_func,
 uint64_t xe_cmdbuf_exec_ufence_gpuva(struct xe_buffer *cmd_buf);
 uint64_t *xe_cmdbuf_exec_ufence_cpuva(struct xe_buffer *cmd_buf);
 void insert_store(uint32_t *batch, uint64_t dst_va, uint32_t val);
+void insert_atomic_inc(uint32_t *batch, uint64_t dst_va, uint32_t val);
 void xe_submit_cmd(struct xe_buffer *cmdbuf);
 int64_t __xe_submit_cmd(struct xe_buffer *cmdbuf);
 void xe_destroy_buffer(struct xe_buffer *buffer);
diff --git a/tests/intel/xe_svm.c b/tests/intel/xe_svm.c
index 4f2818cc8..421d7fd1a 100644
--- a/tests/intel/xe_svm.c
+++ b/tests/intel/xe_svm.c
@@ -30,6 +30,8 @@
  * Description: verify SVM basic functionality by using randomly access any location in malloc'ed memory
  * SUBTEST: svm-huge-page
  * Description: verify SVM basic functionality by using huge page access
+ * SUBTEST: svm-atomic-access
+ * Description: verify SVM basic functionality by using GPU atomic access any location in malloc'ed memory
  */
 
 #include <fcntl.h>
@@ -189,6 +191,39 @@ static void svm_thp(int fd, uint32_t vm, struct drm_xe_engine_class_instance *ec
 	free(dst);
 }
 
+/**
+ *  Test GPU atomic access any location in malloc'ed memory
+ */
+static void svm_atomic_access(int fd, uint32_t vm, struct drm_xe_engine_class_instance *eci)
+{
+	uint64_t gpu_va = 0x1a0000;
+	int val = 0xc0ffee;
+	size_t bo_size = xe_bb_size(fd, PAGE_ALIGN_UFENCE);
+	uint32_t *dst, *dst_to_access;
+	uint32_t size = 1024*1024, sz_dw = size/4;
+
+	struct xe_buffer cmd_buf = {
+		.fd = fd,
+		.gpu_addr = (void *)(uintptr_t)gpu_va,
+		.vm = vm,
+		.size = bo_size,
+		.placement = vram_if_possible(fd, eci->gt_id),
+		.flag = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM,
+	};
+
+	dst = aligned_alloc(xe_get_default_alignment(fd), size);
+	dst_to_access = dst + random()%sz_dw;
+	*dst_to_access = val;
+
+	xe_create_cmdbuf(&cmd_buf, insert_atomic_inc, (uint64_t)dst_to_access, val, eci);
+	xe_submit_cmd(&cmd_buf);
+
+	igt_assert_eq(*dst_to_access, val + 1);
+
+	xe_destroy_cmdbuf(&cmd_buf);
+	free(dst);
+}
+
 igt_main
 {
 	int fd;
@@ -223,6 +258,10 @@ igt_main
 		xe_for_each_engine(fd, hwe)
 			svm_thp(fd, vm, hwe);
 
+	igt_subtest_f("svm-atomic-access")
+		xe_for_each_engine(fd, hwe)
+			svm_atomic_access(fd, vm, hwe);
+
 	igt_fixture {
 		xe_vm_destroy(fd, vm);
 		drm_close_driver(fd);
-- 
2.25.1



More information about the igt-dev mailing list