[Intel-xe] [PATCH 03/22] drm/xe: Move TLB invalidation variable to own sub-structure in GT

Rodrigo Vivi rodrigo.vivi at intel.com
Fri Feb 3 20:23:50 UTC 2023


From: Matthew Brost <matthew.brost at intel.com>

TLB invalidations no longer just restricted to USM, move the variables
to own sub-structure.

Signed-off-by: Matthew Brost <matthew.brost at intel.com>
Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
---
 drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 20 +++++++++----------
 drivers/gpu/drm/xe/xe_gt_types.h            | 22 ++++++++++-----------
 2 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index fea7a557d213..a39a2fb163ae 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -16,7 +16,7 @@ guc_to_gt(struct xe_guc *guc)
 
 int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
 {
-	gt->usm.tlb_invalidation_seqno = 1;
+	gt->tlb_invalidation.seqno = 1;
 
 	return 0;
 }
@@ -40,12 +40,12 @@ static int send_tlb_invalidation(struct xe_guc *guc)
 	 * need to be updated.
 	 */
 	mutex_lock(&guc->ct.lock);
-	seqno = gt->usm.tlb_invalidation_seqno;
+	seqno = gt->tlb_invalidation.seqno;
 	action[1] = seqno;
-	gt->usm.tlb_invalidation_seqno = (gt->usm.tlb_invalidation_seqno + 1) %
+	gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
 		TLB_INVALIDATION_SEQNO_MAX;
-	if (!gt->usm.tlb_invalidation_seqno)
-		gt->usm.tlb_invalidation_seqno = 1;
+	if (!gt->tlb_invalidation.seqno)
+		gt->tlb_invalidation.seqno = 1;
 	ret = xe_guc_ct_send_locked(&guc->ct, action, ARRAY_SIZE(action),
 				    G2H_LEN_DW_TLB_INVALIDATE, 1);
 	if (!ret)
@@ -62,10 +62,10 @@ int xe_gt_tlb_invalidation(struct xe_gt *gt)
 
 static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
 {
-	if (gt->usm.tlb_invalidation_seqno_recv >= seqno)
+	if (gt->tlb_invalidation.seqno_recv >= seqno)
 		return true;
 
-	if (seqno - gt->usm.tlb_invalidation_seqno_recv >
+	if (seqno - gt->tlb_invalidation.seqno_recv >
 	    (TLB_INVALIDATION_SEQNO_MAX / 2))
 		return true;
 
@@ -87,7 +87,7 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
 				 HZ / 5);
 	if (!ret) {
 		drm_err(&xe->drm, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
-			seqno, gt->usm.tlb_invalidation_seqno_recv);
+			seqno, gt->tlb_invalidation.seqno_recv);
 		return -ETIME;
 	}
 
@@ -103,11 +103,11 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
 		return -EPROTO;
 
 	/* Sanity check on seqno */
-	expected_seqno = (gt->usm.tlb_invalidation_seqno_recv + 1) %
+	expected_seqno = (gt->tlb_invalidation.seqno_recv + 1) %
 		TLB_INVALIDATION_SEQNO_MAX;
 	XE_WARN_ON(expected_seqno != msg[0]);
 
-	gt->usm.tlb_invalidation_seqno_recv = msg[0];
+	gt->tlb_invalidation.seqno_recv = msg[0];
 	smp_wmb();
 	wake_up_all(&guc->ct.wq);
 
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 2dbc8cedd630..3bfce7abe857 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -160,6 +160,17 @@ struct xe_gt {
 		struct work_struct worker;
 	} reset;
 
+	/** @tlb_invalidation: TLB invalidation state */
+	struct {
+		/** @seqno: TLB invalidation seqno, protected by CT lock */
+#define TLB_INVALIDATION_SEQNO_MAX	0x100000
+		int seqno;
+		/**
+		 * @seqno_recv: last received TLB invalidation seqno, protected by CT lock
+		 */
+		int seqno_recv;
+	} tlb_invalidation;
+
 	/** @usm: unified shared memory state */
 	struct {
 		/**
@@ -175,17 +186,6 @@ struct xe_gt {
 		 * operations (e.g. mmigrations, fixing page tables)
 		 */
 		u16 reserved_bcs_instance;
-		/**
-		 * @tlb_invalidation_seqno: TLB invalidation seqno, protected by
-		 * CT lock
-		 */
-#define TLB_INVALIDATION_SEQNO_MAX	0x100000
-		int tlb_invalidation_seqno;
-		/**
-		 * @tlb_invalidation_seqno_recv: last received TLB invalidation
-		 * seqno, protected by CT lock
-		 */
-		int tlb_invalidation_seqno_recv;
 		/** @pf_wq: page fault work queue, unbound, high priority */
 		struct workqueue_struct *pf_wq;
 		/** @acc_wq: access counter work queue, unbound, high priority */
-- 
2.39.1



More information about the Intel-xe mailing list