[PATCH v2 06/11] drm/xe: Add send tlb invalidation helpers
Matthew Brost
matthew.brost at intel.com
Mon Jul 8 04:03:26 UTC 2024
Break GuC specific code into dedicated functions.
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 166 ++++++++++----------
1 file changed, 84 insertions(+), 82 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 2493ea91b637..79d1ed138db5 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -157,13 +157,11 @@ static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
return seqno_recv >= seqno;
}
-static int send_tlb_invalidation(struct xe_guc *guc,
- struct xe_gt_tlb_invalidation_fence *fence,
- u32 *action, int len)
+static int send_tlb_invalidation(struct xe_guc *guc, u32 *action, int len)
{
struct xe_gt *gt = guc_to_gt(guc);
- xe_gt_assert(gt, fence);
+ xe_gt_assert(gt, action[1]); /* Seqno */
lockdep_assert_held(>->tlb_invalidation.seqno_lock);
/*
@@ -172,11 +170,86 @@ static int send_tlb_invalidation(struct xe_guc *guc,
* need to be updated.
*/
- action[1] = fence->seqno;
return xe_guc_ct_send(&guc->ct, action, len,
G2H_LEN_DW_TLB_INVALIDATE, 1);
}
+#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
+ XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
+ XE_GUC_TLB_INVAL_FLUSH_CACHE)
+
+static int send_tlb_invalidation_ggtt(struct xe_gt *gt, int seqno)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION,
+ seqno,
+ MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
+ };
+
+ return send_tlb_invalidation(>->uc.guc, action, ARRAY_SIZE(action));
+}
+
+static int send_tlb_invalidation_ppgtt(struct xe_gt *gt, u64 start, u64 end,
+ u32 asid, int seqno)
+{
+#define MAX_TLB_INVALIDATION_LEN 7
+ u32 action[MAX_TLB_INVALIDATION_LEN];
+ int len = 0;
+
+ action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
+ action[len++] = seqno;
+ if (!gt_to_xe(gt)->info.has_range_tlb_invalidation) {
+ action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
+ } else {
+ u64 orig_start = start;
+ u64 length = end - start;
+ u64 align;
+
+ if (length < SZ_4K)
+ length = SZ_4K;
+
+ /*
+ * We need to invalidate a higher granularity if start address
+ * is not aligned to length. When start is not aligned with
+ * length we need to find the length large enough to create an
+ * address mask covering the required range.
+ */
+ align = roundup_pow_of_two(length);
+ start = ALIGN_DOWN(start, align);
+ end = ALIGN(end, align);
+ length = align;
+ while (start + length < end) {
+ length <<= 1;
+ start = ALIGN_DOWN(orig_start, length);
+ }
+
+ /*
+ * Minimum invalidation size for a 2MB page that the hardware
+ * expects is 16MB
+ */
+ if (length >= SZ_2M) {
+ length = max_t(u64, SZ_16M, length);
+ start = ALIGN_DOWN(orig_start, length);
+ }
+
+ xe_gt_assert(gt, length >= SZ_4K);
+ xe_gt_assert(gt, is_power_of_2(length));
+ xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
+ ilog2(SZ_2M) + 1)));
+ xe_gt_assert(gt, IS_ALIGNED(start, length));
+
+ action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
+ action[len++] = asid;
+ action[len++] = lower_32_bits(start);
+ action[len++] = upper_32_bits(start);
+ action[len++] = ilog2(length) - ilog2(SZ_4K);
+ }
+
+ xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
+
+ return send_tlb_invalidation(>->uc.guc, action, len);
+}
+
static void xe_gt_tlb_invalidation_fence_prep(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence)
{
@@ -209,33 +282,16 @@ static void xe_gt_tlb_invalidation_fence_prep(struct xe_gt *gt,
XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
XE_GUC_TLB_INVAL_FLUSH_CACHE)
-/**
- * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
- * @gt: graphics tile
- * @fence: invalidation fence which will be signal on TLB invalidation
- * completion
- *
- * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
- * caller can use the invalidation fence to wait for completion.
- *
- * Return: 0 on success, negative error code on error
- */
-static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence)
+static int __xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence)
{
- u32 action[] = {
- XE_GUC_ACTION_TLB_INVALIDATION,
- 0, /* seqno, replaced in send_tlb_invalidation */
- MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
- };
int ret;
mutex_lock(>->tlb_invalidation.seqno_lock);
xe_gt_tlb_invalidation_fence_prep(gt, fence);
- ret = send_tlb_invalidation(>->uc.guc, fence, action,
- ARRAY_SIZE(action));
+ ret = send_tlb_invalidation_ggtt(gt, fence->seqno);
if (ret < 0)
invalidation_fence_signal(gt_to_xe(gt), fence);
@@ -263,7 +319,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
int ret;
xe_gt_tlb_invalidation_fence_init(gt, &fence);
- ret = xe_gt_tlb_invalidation_guc(gt, &fence);
+ ret = __xe_gt_tlb_invalidation_ggtt(gt, &fence);
if (ret < 0)
return ret;
@@ -310,75 +366,21 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
u64 start, u64 end, u32 asid)
{
struct xe_device *xe = gt_to_xe(gt);
-#define MAX_TLB_INVALIDATION_LEN 7
- u32 action[MAX_TLB_INVALIDATION_LEN];
- int len = 0;
int ret;
xe_gt_assert(gt, fence);
/* Execlists not supported */
- if (gt_to_xe(gt)->info.force_execlist) {
+ if (xe->info.force_execlist) {
__invalidation_fence_signal(xe, fence);
return 0;
}
- action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
- action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
- if (!xe->info.has_range_tlb_invalidation) {
- action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
- } else {
- u64 orig_start = start;
- u64 length = end - start;
- u64 align;
-
- if (length < SZ_4K)
- length = SZ_4K;
-
- /*
- * We need to invalidate a higher granularity if start address
- * is not aligned to length. When start is not aligned with
- * length we need to find the length large enough to create an
- * address mask covering the required range.
- */
- align = roundup_pow_of_two(length);
- start = ALIGN_DOWN(start, align);
- end = ALIGN(end, align);
- length = align;
- while (start + length < end) {
- length <<= 1;
- start = ALIGN_DOWN(orig_start, length);
- }
-
- /*
- * Minimum invalidation size for a 2MB page that the hardware
- * expects is 16MB
- */
- if (length >= SZ_2M) {
- length = max_t(u64, SZ_16M, length);
- start = ALIGN_DOWN(orig_start, length);
- }
-
- xe_gt_assert(gt, length >= SZ_4K);
- xe_gt_assert(gt, is_power_of_2(length));
- xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
- ilog2(SZ_2M) + 1)));
- xe_gt_assert(gt, IS_ALIGNED(start, length));
-
- action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
- action[len++] = asid;
- action[len++] = lower_32_bits(start);
- action[len++] = upper_32_bits(start);
- action[len++] = ilog2(length) - ilog2(SZ_4K);
- }
-
- xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
-
mutex_lock(>->tlb_invalidation.seqno_lock);
xe_gt_tlb_invalidation_fence_prep(gt, fence);
- ret = send_tlb_invalidation(>->uc.guc, fence, action, len);
+ ret = send_tlb_invalidation_ppgtt(gt, start, end, asid, fence->seqno);
if (ret < 0)
invalidation_fence_signal(xe, fence);
--
2.34.1
More information about the Intel-xe
mailing list