[PATCH] tests/xe_spin_batch: Add spin-timestamp-check

Lucas De Marchi lucas.demarchi at intel.com
Wed Nov 6 17:08:20 UTC 2024


On Tue, Nov 05, 2024 at 02:58:05PM +0530, Pravalika Gurram wrote:
>check the ctx_timestamp register post gt reset for each engine.
>
>Reference: VLK-64943

please drop this

>Signed-off-by: Pravalika Gurram <pravalika.gurram at intel.com>
>Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
>---
> tests/intel/xe_spin_batch.c | 158 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 158 insertions(+)
>
>diff --git a/tests/intel/xe_spin_batch.c b/tests/intel/xe_spin_batch.c
>index 9314e229e..547cb1c5b 100644
>--- a/tests/intel/xe_spin_batch.c
>+++ b/tests/intel/xe_spin_batch.c
>@@ -309,6 +309,158 @@ static void xe_spin_fixed_duration(int fd, int gt, int class, int flags)
> 	put_ahnd(ahnd);
> }
>
>+static void exec_store(int fd, struct drm_xe_engine_class_instance *eci,
>+		       bool hang)
>+{
>+	uint64_t ahnd, bb_size, bb_addr;
>+	uint32_t vm, exec_queue, bb;
>+#define USER_FENCE_VALUE	0xdeadbeefdeadbeefull
>+	struct drm_xe_sync syncobj = {
>+		.type = DRM_XE_SYNC_TYPE_USER_FENCE,
>+		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
>+		.timeline_value = USER_FENCE_VALUE,
>+	};
>+
>+	struct drm_xe_exec exec = {
>+		.num_batch_buffer = 1,
>+		.num_syncs = 1,
>+		.syncs = to_user_pointer(&syncobj),
>+	};
>+	struct {
>+		uint32_t batch[16];
>+		uint64_t pad;
>+		uint32_t data;
>+		uint64_t vm_sync;
>+		uint64_t exec_sync;
>+	} *data;
>+	uint64_t batch_offset, batch_addr, sdi_offset, sdi_addr;
>+	int64_t timeout = NSEC_PER_SEC;
>+	int i, ret;
>+
>+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_RELOC);
>+
>+	vm = xe_vm_create(fd, 0, 0);
>+	exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
>+	bb_size = xe_bb_size(fd, sizeof(*data));
>+	bb = xe_bo_create(fd, vm, bb_size, vram_if_possible(fd, eci->gt_id), 0);
>+	bb_addr = intel_allocator_alloc_with_strategy(ahnd, bb, bb_size, 0,
>+						      ALLOC_STRATEGY_LOW_TO_HIGH);
>+	data = xe_bo_map(fd, bb, bb_size);
>+	syncobj.addr = to_user_pointer(&data->vm_sync);
>+	xe_vm_bind_async(fd, vm, 0, bb, 0, bb_addr, bb_size, &syncobj, 1);
>+	xe_wait_ufence(fd, &data->vm_sync, USER_FENCE_VALUE, 0, NSEC_PER_SEC);
>+
>+	batch_offset = (char *)&data->batch - (char *)data;
>+	batch_addr = bb_addr + batch_offset;
>+	sdi_offset = (char *)&data->data - (char *)data;
>+	sdi_addr = bb_addr + sdi_offset;
>+
>+	i = 0;
>+
>+	data->batch[i++] = MI_STORE_DWORD_IMM_GEN4;
>+	data->batch[i++] = sdi_addr;
>+	data->batch[i++] = sdi_addr >> 32;
>+	data->batch[i++] = 0;
>+	if (!hang)
>+		data->batch[i++] = MI_BATCH_BUFFER_END;
>+	igt_assert(i <= ARRAY_SIZE(data->batch));
>+
>+	syncobj.addr = bb_addr + (char *)&data->exec_sync - (char *)data;
>+	exec.exec_queue_id = exec_queue;
>+	exec.address = batch_addr;
>+	ret = __xe_exec(fd, &exec);
>+	ret = __xe_wait_ufence(fd, &data->exec_sync, USER_FENCE_VALUE, 0, &timeout);
>+	igt_assert(hang ? ret < 0 : ret == 0);
>+
>+	munmap(data, bb_size);
>+	gem_close(fd, bb);
>+
>+	xe_exec_queue_destroy(fd, exec_queue);
>+	xe_vm_destroy(fd, vm);
>+
>+	put_ahnd(ahnd);
>+}
>+
>+static void run_spinner(int fd, struct drm_xe_engine_class_instance *eci)

This is a lot of code for running a spinner.

Let's move spin_sync_end() and spin_sync_start() from
tests/intel/xe_drm_fdinfo.c to the lib so we don't need to copy and
paste it everywhere in slightly different ways.


>+{
>+	struct drm_xe_sync sync = {
>+		.type = DRM_XE_SYNC_TYPE_USER_FENCE,
>+		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
>+		.timeline_value = USER_FENCE_VALUE,
>+	};
>+	struct drm_xe_exec exec = {
>+		.num_batch_buffer = 1,
>+		.num_syncs = 1,
>+		.syncs = to_user_pointer(&sync),
>+	};
>+	struct xe_spin *spin;
>+	uint64_t vm_sync;
>+	size_t bo_size;
>+	uint32_t vm;
>+	uint32_t exec_queue;
>+	uint64_t spin_addr;
>+	uint64_t ahnd;
>+	uint32_t bo;
>+	uint32_t ts_1, ts_2;
>+
>+	vm = xe_vm_create(fd, 0, 0);
>+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_RELOC);
>+	bo_size = xe_bb_size(fd, sizeof(*spin));
>+	bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id), 0);
>+	spin = xe_bo_map(fd, bo, bo_size);
>+
>+	exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
>+	spin_addr = intel_allocator_alloc_with_strategy(ahnd, bo, bo_size, 0,
>+						ALLOC_STRATEGY_LOW_TO_HIGH);
>+
>+	sync.addr = to_user_pointer(&vm_sync);
>+	xe_vm_bind_async(fd, vm, 0, bo, 0, spin_addr, bo_size, &sync, 1);
>+	xe_wait_ufence(fd, &vm_sync, USER_FENCE_VALUE, 0, NSEC_PER_SEC);
>+
>+
>+	xe_spin_init_opts(spin, .addr = spin_addr, .write_timestamp = true);
>+	sync.addr = spin_addr + (char *)&spin->exec_sync - (char *)spin;
>+	exec.exec_queue_id = exec_queue;
>+	exec.address = spin_addr;
>+	xe_exec(fd, &exec);
>+	xe_spin_wait_started(spin);
>+
>+	/* Collect and check timestamps before stopping the spinner */
>+	usleep(50000);
>+	ts_1 = spin->timestamp;
>+	usleep(50000);
>+	ts_2 = spin->timestamp;

reading the timestamp like this could not work depending on the
compiler options. I believe you will need to READ_ONCE(spin->timestamp)
in both cases to prevent compiler from optimizing it out.


>+	igt_assert_neq_u32(ts_1, ts_2);
>+
>+	xe_spin_end(spin);
>+	xe_wait_ufence(fd, &spin->exec_sync, USER_FENCE_VALUE, 0, NSEC_PER_SEC);
>+
>+	sync.addr = to_user_pointer(&vm_sync);
>+	xe_vm_unbind_async(fd, vm, 0, 0, spin_addr, bo_size, &sync, 1);
>+	xe_wait_ufence(fd, &vm_sync, USER_FENCE_VALUE, 0, NSEC_PER_SEC);
>+	munmap(spin, bo_size);
>+	gem_close(fd, bo);
>+
>+	xe_exec_queue_destroy(fd, exec_queue);
>+	xe_vm_destroy(fd, vm);
>+	put_ahnd(ahnd);
>+}
>+
>+/**
>+ * SUBTEST: spin-timestamp-check
>+ * Description: Intiate gt reset then check the timestamp register for each engine.
>+ * Test category: functionality test
>+ */
>+static void xe_spin_timestamp_check(int fd, struct drm_xe_engine_class_instance *eci)
>+{
>+
>+	exec_store(fd, eci, false);

what is the first exec for?

>+
>+	exec_store(fd, eci, true);

please use flags instead of bool to improve readability.

Lucas De Marchi

>+
>+	run_spinner(fd, eci);
>+}
>+
> igt_main
> {
> 	struct drm_xe_engine_class_instance *hwe;
>@@ -343,6 +495,12 @@ igt_main
> 			xe_for_each_engine_class(class)
> 				xe_spin_fixed_duration(fd, gt, class, SPIN_FIX_DURATION_PREEMPT);
>
>+	igt_subtest_with_dynamic("spin-timestamp-check")
>+		xe_for_each_engine(fd, hwe) {
>+			igt_dynamic_f("engine-%s", xe_engine_class_string(hwe->engine_class))
>+				xe_spin_timestamp_check(fd, hwe);
>+		}
>+
> 	igt_fixture
> 		drm_close_driver(fd);
> }
>-- 
>2.34.1
>


More information about the igt-dev mailing list