[PATCH i-g-t v2 08/10] tests/intel/xe_svm: svm_benchmark

Bommu Krishnaiah krishnaiah.bommu at intel.com
Tue May 14 07:10:24 UTC 2024


Verify SVM performance with simple benchmark test

Signed-off-by: Bommu Krishnaiah <krishnaiah.bommu at intel.com>
Cc: Oak Zeng <oak.zeng at intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
---
 lib/xe/xe_util.c     | 29 ++++++++++++++++++++++++++++
 lib/xe/xe_util.h     |  5 +++++
 tests/intel/xe_svm.c | 46 ++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 80 insertions(+)

diff --git a/lib/xe/xe_util.c b/lib/xe/xe_util.c
index 0e28c0093..a02ee5324 100644
--- a/lib/xe/xe_util.c
+++ b/lib/xe/xe_util.c
@@ -124,6 +124,24 @@ void insert_atomic_inc(uint32_t *batch, uint64_t dst_va, uint32_t val)
 	batch[++i] = MI_BATCH_BUFFER_END;
 }
 
+/** Insert commands to batch buffer to memset dst_va buffer with val
+ */
+void insert_memset(uint32_t *batch, uint64_t dst_va, uint64_t size, uint32_t val)
+{
+#define PVC_MEM_SET_CMD		(2 << 29 | 0x5b << 22)
+#define   MS_MATRIX		(1 << 17)
+	const int page_shift = 12;
+
+	*batch++ = PVC_MEM_SET_CMD | MS_MATRIX | (7 - 2);
+	*batch++ = BIT(page_shift) - 1;
+	*batch++ = (size >> page_shift) - 1;
+	*batch++ = BIT(page_shift) - 1;
+	*batch++ = lower_32_bits(dst_va);
+	*batch++ = upper_32_bits(dst_va);
+	*batch++ = (uint32_t)val << 24;
+	*batch++ = MI_BATCH_BUFFER_END;
+}
+
 void xe_create_cmdbuf(struct xe_buffer *cmd_buf, cmdbuf_fill_func_t fill_func, uint64_t dst_va, uint32_t val, struct drm_xe_engine_class_instance *eci)
 {
 	//make some room for a exec_ufence, which will be used to sync the
@@ -135,6 +153,17 @@ void xe_create_cmdbuf(struct xe_buffer *cmd_buf, cmdbuf_fill_func_t fill_func, u
 	fill_func(cmd_buf->cpu_addr, dst_va, val);
 }
 
+void xe_create_cmdbuf_fill_two_dw(struct xe_buffer *cmd_buf, cmdbuf_fill_two_dw_func_t fill_func, uint64_t dst_va, uint64_t dst_va1, uint32_t val, struct drm_xe_engine_class_instance *eci)
+{
+	//make some room for a exec_ufence, which will be used to sync the
+	//submission of this command....
+
+	cmd_buf->size = xe_bb_size(cmd_buf->fd, cmd_buf->size + PAGE_ALIGN_UFENCE);
+	xe_create_buffer(cmd_buf);
+	cmd_buf->exec_queue = xe_exec_queue_create(cmd_buf->fd, cmd_buf->vm, eci, 0);
+	fill_func(cmd_buf->cpu_addr, dst_va, dst_va1, val);
+}
+
 void xe_destroy_cmdbuf(struct xe_buffer *cmd_buf)
 {
 	xe_exec_queue_destroy(cmd_buf->fd, cmd_buf->exec_queue);
diff --git a/lib/xe/xe_util.h b/lib/xe/xe_util.h
index 46e1ccc9a..50f2a4bc4 100644
--- a/lib/xe/xe_util.h
+++ b/lib/xe/xe_util.h
@@ -34,13 +34,18 @@ struct xe_buffer {
 };
 
 typedef void (*cmdbuf_fill_func_t) (uint32_t *batch, uint64_t dst_gpu_va, uint32_t val);
+typedef void (*cmdbuf_fill_two_dw_func_t) (uint32_t *batch, uint64_t dst_gpu_va,
+		uint64_t dst_gpu_va1, uint32_t val);
 void xe_create_buffer(struct xe_buffer *buffer);
 void xe_create_cmdbuf(struct xe_buffer *cmd_buf, cmdbuf_fill_func_t fill_func,
 		uint64_t dst_va, uint32_t val, struct drm_xe_engine_class_instance *eci);
+void xe_create_cmdbuf_fill_two_dw(struct xe_buffer *cmd_buf, cmdbuf_fill_two_dw_func_t fill_func,
+		uint64_t dst_va, uint64_t dst_va1, uint32_t val, struct drm_xe_engine_class_instance *eci);
 uint64_t xe_cmdbuf_exec_ufence_gpuva(struct xe_buffer *cmd_buf);
 uint64_t *xe_cmdbuf_exec_ufence_cpuva(struct xe_buffer *cmd_buf);
 void insert_store(uint32_t *batch, uint64_t dst_va, uint32_t val);
 void insert_atomic_inc(uint32_t *batch, uint64_t dst_va, uint32_t val);
+void insert_memset(uint32_t *batch, uint64_t dst_va, uint64_t size, uint32_t val);
 void xe_submit_cmd(struct xe_buffer *cmdbuf);
 int64_t __xe_submit_cmd(struct xe_buffer *cmdbuf);
 void xe_destroy_buffer(struct xe_buffer *buffer);
diff --git a/tests/intel/xe_svm.c b/tests/intel/xe_svm.c
index b846ca71a..0b573e0c9 100644
--- a/tests/intel/xe_svm.c
+++ b/tests/intel/xe_svm.c
@@ -34,6 +34,8 @@
  * Description: verify SVM basic functionality by using GPU atomic access any location in malloc'ed memory
  * SUBTEST: svm_invalid_va
  * Description: verify SVM functionality while accessing invalid address
+ * SUBTEST: svm-benchmark
+ * Description: verify SVM performance with simple benchmark test
  */
 
 #include <fcntl.h>
@@ -254,6 +256,46 @@ static void svm_invalid_va(int fd, uint32_t vm, struct drm_xe_engine_class_insta
 	free(dst);
 }
 
+/**
+ * A simple benchmark test.
+ * Use GPU to memset a buffer with specific value and
+ * measure memset end to end bandwidth.
+ *
+ * By comparing the output of those two tests, we can have
+ * a very basic concept of the performance of sytem allocator
+ * compared to runtime allocator.
+ */
+static void svm_benchmark(int fd, uint32_t vm, struct drm_xe_engine_class_instance *eci)
+{
+	uint64_t gpu_va = 0x1a0000;
+	size_t bo_size = xe_bb_size(fd, PAGE_ALIGN_UFENCE);
+	uint32_t *dst, size = 1 << 26;
+	struct timespec start_time;
+	double bandwidth;
+
+	struct xe_buffer cmd_buf = {
+		.fd = fd,
+		.gpu_addr = (void *)(uintptr_t)gpu_va,
+		.vm = vm,
+		.size = bo_size,
+		.placement = vram_if_possible(fd, eci->gt_id),
+		.flag = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM,
+	};
+
+	igt_gettime(&start_time);
+	dst = aligned_alloc(xe_get_default_alignment(fd), size);
+
+	xe_create_cmdbuf_fill_two_dw(&cmd_buf, insert_memset, (uint64_t)dst, (uint64_t)size, 0x12, eci);
+	xe_submit_cmd(&cmd_buf);
+	igt_assert_eq(*dst, 0x12121212);
+	xe_destroy_cmdbuf(&cmd_buf);
+
+	free(dst);
+
+	bandwidth = (double)(size>>20)*NSEC_PER_SEC/igt_nsec_elapsed(&start_time);
+	igt_info("engine class %d, engine id %d memset E2E bandwidth(include sync overhead) %.3f MiB/s\n", eci->engine_class, eci->engine_instance, bandwidth);
+}
+
 igt_main
 {
 	int fd;
@@ -296,6 +338,10 @@ igt_main
 		xe_for_each_engine(fd, hwe)
 			svm_invalid_va(fd, vm, hwe);
 
+	igt_subtest_f("svm-benchmark")
+		xe_for_each_engine(fd, hwe)
+			svm_benchmark(fd, vm, hwe);
+
 	igt_fixture {
 		xe_vm_destroy(fd, vm);
 		drm_close_driver(fd);
-- 
2.25.1



More information about the igt-dev mailing list