[PATCH i-g-t] lib/intel_compute: use post-sync write to detect command level preemption

Zbigniew Kempczyński zbigniew.kempczynski at intel.com
Wed Jul 2 07:32:43 UTC 2025


At command level when long job compute walker is finished we might
be immediately switched to short job. This defers signalling of long
job user-fence giving false impression this job is still ongoing.

Add post-sync write to compute walker to allow detect this situation
and fail if command level preemption occurred instead TG preemption.

Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Francois Dugast <francois.dugast at intel.com>
---
 lib/intel_compute.c | 36 ++++++++++++++++++++++++++++--------
 1 file changed, 28 insertions(+), 8 deletions(-)

diff --git a/lib/intel_compute.c b/lib/intel_compute.c
index 9f5fc1bc59..829d3a35f9 100644
--- a/lib/intel_compute.c
+++ b/lib/intel_compute.c
@@ -14,6 +14,7 @@
 #include "gen8_media.h"
 #include "gen9_media.h"
 #include "intel_compute.h"
+#include "intel_mocs.h"
 #include "lib/igt_syncobj.h"
 #include "lib/intel_reg.h"
 #include "xe/xe_ioctl.h"
@@ -52,6 +53,7 @@
 #define OFFSET_STATE_SIP			0xFFFF0000
 
 #define USER_FENCE_VALUE			0xdeadbeefdeadbeefull
+#define POST_SYNC_VALUE			0xabcdabcdcdefcdefull
 #define MAGIC_LOOP_STOP			0x12341234
 
 #define THREADS_PER_GROUP		32
@@ -60,6 +62,9 @@
 #define ENQUEUED_LOCAL_SIZE_X		1024
 #define ENQUEUED_LOCAL_SIZE_Y		1
 #define ENQUEUED_LOCAL_SIZE_Z		1
+#define DP_SS_CACHE_FLUSH		(1 << 12)
+#define DP_PIPELINE_FLUSH		(1 << 2)
+#define WRITE_IMM_DATA			(1 << 0)
 
 /*
  * TGP  - ThreadGroup Preemption
@@ -1532,7 +1537,8 @@ static void xelpg_compute_exec_compute(uint32_t *addr_bo_buffer_batch,
 	addr_bo_buffer_batch[b++] = MI_BATCH_BUFFER_END;
 }
 
-static void xe2lpg_compute_exec_compute(uint32_t *addr_bo_buffer_batch,
+static void xe2lpg_compute_exec_compute(int fd,
+					uint32_t *addr_bo_buffer_batch,
 					uint64_t addr_general_state_base,
 					uint64_t addr_surface_state_base,
 					uint64_t addr_dynamic_state_base,
@@ -1544,6 +1550,7 @@ static void xe2lpg_compute_exec_compute(uint32_t *addr_bo_buffer_batch,
 					bool	 threadgroup_preemption,
 					uint32_t work_size)
 {
+	uint8_t uc_mocs = intel_get_uc_mocs_index(fd);
 	int b = 0;
 
 	igt_debug("general   state base: %"PRIx64"\n", addr_general_state_base);
@@ -1603,7 +1610,7 @@ static void xe2lpg_compute_exec_compute(uint32_t *addr_bo_buffer_batch,
 	addr_bo_buffer_batch[b++] = addr_surface_state_base >> 32;
 	addr_bo_buffer_batch[b++] = 0x001ff000;
 
-	if (sip_start_pointer) {
+	if (sip_start_pointer && !threadgroup_preemption) {
 		addr_bo_buffer_batch[b++] = XE2_STATE_SIP | 0x1;
 		addr_bo_buffer_batch[b++] = sip_start_pointer;
 		addr_bo_buffer_batch[b++] = 0x00000000;
@@ -1651,11 +1658,12 @@ static void xe2lpg_compute_exec_compute(uint32_t *addr_bo_buffer_batch,
 	addr_bo_buffer_batch[b++] = 0x0c000000 | THREADS_PER_GROUP;
 	addr_bo_buffer_batch[b++] = 0x00000000;
 	addr_bo_buffer_batch[b++] = 0x00000000;
-	addr_bo_buffer_batch[b++] = 0x00001047;
+	addr_bo_buffer_batch[b++] = DP_SS_CACHE_FLUSH | uc_mocs << 4 |
+				    DP_PIPELINE_FLUSH | WRITE_IMM_DATA;
 	addr_bo_buffer_batch[b++] = ADDR_BATCH;
 	addr_bo_buffer_batch[b++] = ADDR_BATCH >> 32;
-	addr_bo_buffer_batch[b++] = 0x00000000;
-	addr_bo_buffer_batch[b++] = 0x00000000;
+	addr_bo_buffer_batch[b++] = (uint32_t) POST_SYNC_VALUE;
+	addr_bo_buffer_batch[b++] = (uint32_t) (POST_SYNC_VALUE >> 32);
 	addr_bo_buffer_batch[b++] = 0x00000000;
 	addr_bo_buffer_batch[b++] = 0x00000000;
 	addr_bo_buffer_batch[b++] = 0x00000000;
@@ -1857,7 +1865,8 @@ static void xe2lpg_compute_exec(int fd, const unsigned char *kernel,
 	input_data = get_input_data(&execenv, user, bo_dict[4].data);
 	output_data = get_output_data(&execenv, user, bo_dict[5].data);
 
-	xe2lpg_compute_exec_compute(bo_dict[8].data,
+	xe2lpg_compute_exec_compute(fd,
+				    bo_dict[8].data,
 				    ADDR_GENERAL_STATE_BASE,
 				    ADDR_SURFACE_STATE_BASE,
 				    ADDR_DYNAMIC_STATE_BASE,
@@ -2101,6 +2110,7 @@ static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel
 	struct bo_dict_entry bo_dict_short[ARRAY_SIZE(bo_dict_long)];
 	struct bo_execenv execenv_short, execenv_long;
 	float *input_short, *output_short, *input_long;
+	uint64_t *post_data;
 	unsigned int long_kernel_loop_count = 0;
 	int64_t timeout_one_ns = 1;
 	bool use_loop_kernel = loop_kernel && !threadgroup_preemption;
@@ -2149,16 +2159,19 @@ static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel
 	input_long = (float *) bo_dict_long[4].data;
 	input_short = (float *) bo_dict_short[4].data;
 	output_short = (float *) bo_dict_short[5].data;
+	post_data = (uint64_t *) bo_dict_long[8].data;
 
 	bo_randomize(input_short, SIZE_DATA);
 
-	xe2lpg_compute_exec_compute(bo_dict_long[8].data, ADDR_GENERAL_STATE_BASE,
+	xe2lpg_compute_exec_compute(fd,
+				    bo_dict_long[8].data, ADDR_GENERAL_STATE_BASE,
 				    ADDR_SURFACE_STATE_BASE, ADDR_DYNAMIC_STATE_BASE,
 				    ADDR_INSTRUCTION_STATE_BASE, XE2_ADDR_STATE_CONTEXT_DATA_BASE,
 				    OFFSET_INDIRECT_DATA_START, OFFSET_KERNEL, OFFSET_STATE_SIP,
 				    threadgroup_preemption, SIZE_DATA);
 
-	xe2lpg_compute_exec_compute(bo_dict_short[8].data, ADDR_GENERAL_STATE_BASE,
+	xe2lpg_compute_exec_compute(fd,
+				    bo_dict_short[8].data, ADDR_GENERAL_STATE_BASE,
 				    ADDR_SURFACE_STATE_BASE, ADDR_DYNAMIC_STATE_BASE,
 				    ADDR_INSTRUCTION_STATE_BASE, XE2_ADDR_STATE_CONTEXT_DATA_BASE,
 				    OFFSET_INDIRECT_DATA_START, OFFSET_KERNEL, OFFSET_STATE_SIP,
@@ -2177,6 +2190,12 @@ static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel
 	bo_execenv_exec(&execenv_short, ADDR_BATCH);
 	bo_check_square(input_short, output_short, SIZE_DATA);
 
+	/*
+	 * Catch command level preemption instead TG preemption. For TG and WMTP
+	 * post sync can't be visible at this point yet.
+	 */
+	igt_assert_neq_u64(POST_SYNC_VALUE, *post_data);
+
 	/* Check that the long kernel has not completed yet */
 	igt_assert_neq(0, __xe_wait_ufence(fd, &execenv_long.bo_sync->sync, USER_FENCE_VALUE,
 					   execenv_long.exec_queue, &timeout_one_ns));
@@ -2187,6 +2206,7 @@ static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel
 	((int *)input_long)[0] = MAGIC_LOOP_STOP;
 
 	bo_execenv_sync(&execenv_long);
+	igt_assert_eq_u64(POST_SYNC_VALUE, *post_data);
 
 	bo_execenv_unbind(&execenv_short, bo_dict_short, entries);
 	bo_execenv_unbind(&execenv_long, bo_dict_long, entries);
-- 
2.43.0



More information about the igt-dev mailing list