[RFC PATCH] drm/xe: Add initial vfunc for flexible platform based TLB invals
Nirmoy Das
nirmoy.das at intel.com
Wed Mar 12 20:05:07 UTC 2025
TLB invalidation mechanism is platform dependent. Add initial vfunc
based flexible platform based TLB invalidation which will in future
replace existing usages.
Cc: Matthew Brost <matthew.brost at intel.com>
Signed-off-by: Nirmoy Das <nirmoy.das at intel.com>
---
Sending this to get initial reviews if this is the rigth direction
for TLB layering for future-proofing TLB invalidation based on platform.
drivers/gpu/drm/xe/xe_device.c | 3 +
drivers/gpu/drm/xe/xe_device_types.h | 3 +
drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 89 ++++++++++++++++++-
.../gpu/drm/xe/xe_gt_tlb_invalidation_types.h | 19 ++++
4 files changed, 113 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index b2f656b2a563..728f4c1d2545 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -37,6 +37,7 @@
#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_gt_sriov_vf.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_hw_engine_group.h"
#include "xe_hwmon.h"
@@ -778,6 +779,8 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
+ xe->tlb_ops = xe_tlb_invalidation_ops_get(xe);
+
for_each_gt(gt, xe, id) {
err = xe_gt_init_early(gt);
if (err)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 72ef0b6fc425..f3c0ccb94557 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -552,6 +552,9 @@ struct xe_device {
/** @pmu: performance monitoring unit */
struct xe_pmu pmu;
+ /** @tlb_ops: tlb operations for this device */
+ const struct xe_tlb_invalidation_ops *tlb_ops;
+
#ifdef TEST_VM_OPS_ERROR
/**
* @vm_inject_error_position: inject errors at different places in VM
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 03072e094991..d3e7185d416b 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -322,6 +322,24 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
return 0;
}
+#define MAX_TLB_INVALIDATION_LEN 7
+static int xe_gt_tlb_invalidation_full(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ u64 start, u64 end, u32 asid)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 action[MAX_TLB_INVALIDATION_LEN];
+ int len = 0;
+
+ xe_gt_assert(gt, fence);
+ xe_gt_assert(gt, !xe->info.has_range_tlb_invalidation);
+
+ action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
+ action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
+
+ return send_tlb_invalidation(>->uc.guc, fence, action, len);
+}
+
/**
* xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
* address range
@@ -344,7 +362,6 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
u64 start, u64 end, u32 asid)
{
struct xe_device *xe = gt_to_xe(gt);
-#define MAX_TLB_INVALIDATION_LEN 7
u32 action[MAX_TLB_INVALIDATION_LEN];
int len = 0;
@@ -568,3 +585,73 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
dma_fence_get(&fence->base);
fence->gt = gt;
}
+
+static int
+xe_gt_tlb_invalidation_guc_no_execlist(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ __invalidation_fence_signal(xe, fence);
+ return 0;
+}
+
+static int
+xe_gt_tlb_invalidation_no_execlist(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ u64 start, u64 end, u32 asid)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ __invalidation_fence_signal(xe, fence);
+ return 0;
+}
+
+static int
+xe_gt_tlb_invalidation_vma_no_execlist(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_vma *vma)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ __invalidation_fence_signal(xe, fence);
+ return 0;
+}
+
+static const struct xe_tlb_invalidation_ops tlb_inval_ops_guc = {
+ .invalidation_guc = xe_gt_tlb_invalidation_guc,
+ .invalidation_ggtt = xe_gt_tlb_invalidation_ggtt,
+ .invalidation_range = xe_gt_tlb_invalidation_range,
+ .invalidation_vma = xe_gt_tlb_invalidation_vma,
+ .init_early = xe_gt_tlb_invalidation_init_early,
+ .reset = xe_gt_tlb_invalidation_reset,
+};
+
+static const struct xe_tlb_invalidation_ops tlb_inval_ops_no_range = {
+ .invalidation_guc = xe_gt_tlb_invalidation_guc,
+ .invalidation_ggtt = xe_gt_tlb_invalidation_ggtt,
+ .invalidation_range = xe_gt_tlb_invalidation_full,
+ .invalidation_vma = xe_gt_tlb_invalidation_vma,
+ .init_early = xe_gt_tlb_invalidation_init_early,
+ .reset = xe_gt_tlb_invalidation_reset,
+};
+
+static const struct xe_tlb_invalidation_ops tlb_inval_ops_no_execlist = {
+ .invalidation_guc = xe_gt_tlb_invalidation_guc_no_execlist,
+ .invalidation_ggtt = xe_gt_tlb_invalidation_ggtt,
+ .invalidation_range = xe_gt_tlb_invalidation_no_execlist,
+ .invalidation_vma = xe_gt_tlb_invalidation_vma_no_execlist,
+ .init_early = xe_gt_tlb_invalidation_init_early,
+ .reset = xe_gt_tlb_invalidation_reset,
+};
+
+const struct xe_tlb_invalidation_ops *
+xe_tlb_invalidation_ops_get(struct xe_device *xe)
+{
+ if (xe->info.force_execlist)
+ return &tlb_inval_ops_no_execlist;
+ else if (xe->info.has_range_tlb_invalidation)
+ return &tlb_inval_ops_guc;
+ else
+ return &tlb_inval_ops_no_range;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
index de6e825e0851..ed367941c42f 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
@@ -8,7 +8,9 @@
#include <linux/dma-fence.h>
+struct xe_device;
struct xe_gt;
+struct xe_vma;
/**
* struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence
@@ -29,4 +31,21 @@ struct xe_gt_tlb_invalidation_fence {
ktime_t invalidation_time;
};
+struct xe_tlb_invalidation_ops {
+ int (*invalidation_guc)(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence);
+ int (*invalidation_ggtt)(struct xe_gt *gt);
+ int (*invalidation_range)(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ u64 start, u64 end, u32 asid);
+ int (*invalidation_vma)(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_vma *vma);
+ int (*init_early)(struct xe_gt *gt);
+ void (*reset)(struct xe_gt *gt);
+};
+
+const struct xe_tlb_invalidation_ops *
+xe_tlb_invalidation_ops_get(struct xe_device *xe);
+
#endif
--
2.46.0
More information about the Intel-xe
mailing list