[PATCH 2/2] tests/xe_spin_batch: Add spin-timestamp-check
Pravalika Gurram
pravalika.gurram at intel.com
Wed Nov 13 17:52:07 UTC 2024
check the ctx_timestamp register post gt reset for each engine.
V2: move spinner code to lib avoid code redundancy
use flags to maintain the readability
use READ_ONCE to prevent compiler from optimizing it out [Lucas]
V3: call allocator in run_spinner and pass to spinner ctx [Zbigniew]
v4: Integrate spin_ctx to xe_cork [Lucas]
Signed-off-by: Pravalika Gurram <pravalika.gurram at intel.com>
Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
---
tests/intel/xe_spin_batch.c | 121 ++++++++++++++++++++++++++++++++++++
1 file changed, 121 insertions(+)
diff --git a/tests/intel/xe_spin_batch.c b/tests/intel/xe_spin_batch.c
index 9314e229e..9a5cdb830 100644
--- a/tests/intel/xe_spin_batch.c
+++ b/tests/intel/xe_spin_batch.c
@@ -309,6 +309,121 @@ static void xe_spin_fixed_duration(int fd, int gt, int class, int flags)
put_ahnd(ahnd);
}
+static void exec_store(int fd, struct drm_xe_engine_class_instance *eci,
+ bool hang)
+{
+ uint64_t ahnd, bb_size, bb_addr;
+ uint32_t vm, exec_queue, bb;
+#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
+ struct drm_xe_sync syncobj = {
+ .type = DRM_XE_SYNC_TYPE_USER_FENCE,
+ .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE,
+ };
+
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(&syncobj),
+ };
+ struct {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint32_t data;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ } *data;
+ uint64_t batch_offset, batch_addr, sdi_offset, sdi_addr;
+ int64_t timeout = NSEC_PER_SEC;
+ int i, ret;
+
+ ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_RELOC);
+
+ vm = xe_vm_create(fd, 0, 0);
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+ bb_size = xe_bb_size(fd, sizeof(*data));
+ bb = xe_bo_create(fd, vm, bb_size, vram_if_possible(fd, eci->gt_id), 0);
+ bb_addr = intel_allocator_alloc_with_strategy(ahnd, bb, bb_size, 0,
+ ALLOC_STRATEGY_LOW_TO_HIGH);
+ data = xe_bo_map(fd, bb, bb_size);
+ syncobj.addr = to_user_pointer(&data->vm_sync);
+ xe_vm_bind_async(fd, vm, 0, bb, 0, bb_addr, bb_size, &syncobj, 1);
+ xe_wait_ufence(fd, &data->vm_sync, USER_FENCE_VALUE, 0, NSEC_PER_SEC);
+
+ batch_offset = (char *)&data->batch - (char *)data;
+ batch_addr = bb_addr + batch_offset;
+ sdi_offset = (char *)&data->data - (char *)data;
+ sdi_addr = bb_addr + sdi_offset;
+
+ i = 0;
+
+ data->batch[i++] = MI_STORE_DWORD_IMM_GEN4;
+ data->batch[i++] = sdi_addr;
+ data->batch[i++] = sdi_addr >> 32;
+ data->batch[i++] = 0;
+ if (!hang)
+ data->batch[i++] = MI_BATCH_BUFFER_END;
+ igt_assert(i <= ARRAY_SIZE(data->batch));
+
+ syncobj.addr = bb_addr + (char *)&data->exec_sync - (char *)data;
+ exec.exec_queue_id = exec_queue;
+ exec.address = batch_addr;
+ xe_exec(fd, &exec);
+ ret = __xe_wait_ufence(fd, &data->exec_sync, USER_FENCE_VALUE, 0, &timeout);
+ igt_assert(hang ? ret < 0 : ret == 0);
+
+ munmap(data, bb_size);
+ gem_close(fd, bb);
+
+ xe_exec_queue_destroy(fd, exec_queue);
+ xe_vm_destroy(fd, vm);
+
+ put_ahnd(ahnd);
+}
+
+static void run_spinner(int fd, struct drm_xe_engine_class_instance *eci)
+{
+ struct xe_cork *ctx = NULL;
+ uint32_t vm;
+ uint32_t ts_1, ts_2;
+ uint64_t ahnd;
+
+ vm = xe_vm_create(fd, 0, 0);
+ ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_RELOC);
+ ctx = xe_spin_ctx_init_opts(fd, eci, vm, 1, 1, .ahnd = ahnd);
+ xe_spin_sync_start(fd, ctx);
+
+ /* Collect and check timestamps before stopping the spinner */
+ usleep(50000);
+ ts_1 = READ_ONCE(ctx->spin->timestamp);
+ usleep(50000);
+ ts_2 = READ_ONCE(ctx->spin->timestamp);
+ igt_assert_neq_u32(ts_1, ts_2);
+
+ xe_spin_sync_end(fd, ctx);
+ xe_spin_ctx_destroy(fd, ctx);
+
+ xe_vm_destroy(fd, vm);
+ put_ahnd(ahnd);
+}
+
+#define TRUE 1
+#define FALSE 0
+/**
+ * SUBTEST: spin-timestamp-check
+ * Description: Intiate gt reset then check the timestamp register for each engine.
+ * Test category: functionality test
+ */
+static void xe_spin_timestamp_check(int fd, struct drm_xe_engine_class_instance *eci)
+{
+ /*sanity check for exec submission*/
+ exec_store(fd, eci, FALSE);
+
+ exec_store(fd, eci, TRUE);
+
+ run_spinner(fd, eci);
+}
+
igt_main
{
struct drm_xe_engine_class_instance *hwe;
@@ -343,6 +458,12 @@ igt_main
xe_for_each_engine_class(class)
xe_spin_fixed_duration(fd, gt, class, SPIN_FIX_DURATION_PREEMPT);
+ igt_subtest_with_dynamic("spin-timestamp-check")
+ xe_for_each_engine(fd, hwe) {
+ igt_dynamic_f("engine-%s", xe_engine_class_string(hwe->engine_class))
+ xe_spin_timestamp_check(fd, hwe);
+ }
+
igt_fixture
drm_close_driver(fd);
}
--
2.34.1
More information about the igt-dev
mailing list