[PATCH v2 17/21] drm/i915: Add generic interface for tlb invalidation for XeHP
Mauro Carvalho Chehab
mchehab at kernel.org
Thu Jul 14 12:06:22 UTC 2022
From: Prathap Kumar Valsan <prathap.kumar.valsan at intel.com>
Add an interface for GuC TLB actions, supporting both selective and
full TLB invalidations. After this change, when GuC is enabled,
tlb invalidations use GuC ct. Otherwise, use mmio interface.
Signed-off-by: Prathap Kumar Valsan <prathap.kumar.valsan at intel.com>
Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
Cc: Fei Yang <fei.yang at intel.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab at kernel.org>
---
To avoid mailbombing on a large number of people, only mailing lists were C/C on the cover.
See [PATCH v2 00/21] at: https://lore.kernel.org/all/cover.1657800199.git.mchehab@kernel.org/
drivers/gpu/drm/i915/gt/intel_gt_regs.h | 8 +++
drivers/gpu/drm/i915/gt/intel_tlb.c | 78 ++++++++++++++++++++++++-
drivers/gpu/drm/i915/gt/intel_tlb.h | 1 +
3 files changed, 86 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 60d6eb5f245b..52508a9c23e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -1054,6 +1054,14 @@
#define GEN12_GAM_DONE _MMIO(0xcf68)
+#define XEHP_TLB_INV_DESC0 _MMIO(0xcf7c)
+#define XEHP_TLB_INV_DESC0_ADDR_LO REG_GENMASK(31, 12)
+#define XEHP_TLB_INV_DESC0_ADDR_MASK REG_GENMASK(8, 3)
+#define XEHP_TLB_INV_DESC0_G REG_GENMASK(2, 1)
+#define XEHP_TLB_INV_DESC0_VALID REG_BIT(0)
+#define XEHP_TLB_INV_DESC1 _MMIO(0xcf80)
+#define XEHP_TLB_INV_DESC0_ADDR_HI REG_GENMASK(31, 0)
+
#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
#define GEN7_MAX_PS_THREAD_DEP (8 << 12)
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1 << 10)
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c
index af8cae979489..15ed83226676 100644
--- a/drivers/gpu/drm/i915/gt/intel_tlb.c
+++ b/drivers/gpu/drm/i915/gt/intel_tlb.c
@@ -10,6 +10,7 @@
#include "intel_gt_pm.h"
#include "intel_gt_regs.h"
#include "intel_tlb.h"
+#include "uc/intel_guc.h"
struct reg_and_bit {
i915_reg_t reg;
@@ -159,11 +160,16 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
return;
with_intel_gt_pm_if_awake(gt, wakeref) {
+ struct intel_guc *guc = >->uc.guc;
+
mutex_lock(>->tlb.invalidate_lock);
if (tlb_seqno_passed(gt, seqno))
goto unlock;
- mmio_invalidate_full(gt);
+ if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc))
+ intel_guc_invalidate_tlb_full(guc, INTEL_GUC_TLB_INVAL_MODE_HEAVY);
+ else
+ mmio_invalidate_full(gt);
write_seqcount_invalidate(>->tlb.seqno);
unlock:
@@ -171,6 +177,76 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
}
}
+static bool mmio_invalidate_range(struct intel_gt *gt, u64 start, u64 length)
+{
+ u32 address_mask = (ilog2(length) - ilog2(I915_GTT_PAGE_SIZE_4K));
+ u64 vm_total = BIT_ULL(INTEL_INFO(gt->i915)->ppgtt_size);
+ intel_wakeref_t wakeref;
+ u32 dw0, dw1;
+ int err;
+
+ GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE_4K));
+ GEM_BUG_ON(!IS_ALIGNED(length, I915_GTT_PAGE_SIZE_4K));
+ GEM_BUG_ON(range_overflows(start, length, vm_total));
+
+ dw0 = FIELD_PREP(XEHP_TLB_INV_DESC0_ADDR_LO, (lower_32_bits(start) >> 12)) |
+ FIELD_PREP(XEHP_TLB_INV_DESC0_ADDR_MASK, address_mask) |
+ FIELD_PREP(XEHP_TLB_INV_DESC0_G, 0x3) |
+ FIELD_PREP(XEHP_TLB_INV_DESC0_VALID, 0x1);
+ dw1 = upper_32_bits(start);
+
+ err = 0;
+ with_intel_gt_pm_if_awake(gt, wakeref) {
+ struct intel_uncore *uncore = gt->uncore;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ mutex_lock(>->tlb.invalidate_lock);
+ intel_uncore_write_fw(uncore, XEHP_TLB_INV_DESC1, dw1);
+ intel_uncore_write_fw(uncore, XEHP_TLB_INV_DESC0, dw0);
+ err = __intel_wait_for_register_fw(uncore,
+ XEHP_TLB_INV_DESC0,
+ XEHP_TLB_INV_DESC0_VALID,
+ 0, 100, 10, NULL);
+ mutex_unlock(>->tlb.invalidate_lock);
+
+ intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
+ }
+
+ if (err)
+ drm_err_ratelimited(>->i915->drm,
+ "TLB invalidation response timed out\n");
+
+ return err == 0;
+}
+
+bool intel_gt_invalidate_tlb_range(struct intel_gt *gt,
+ u64 start, u64 length)
+{
+ struct intel_guc *guc = >->uc.guc;
+ intel_wakeref_t wakeref;
+
+ if (intel_gt_is_wedged(gt))
+ return true;
+
+ if (!INTEL_GUC_SUPPORTS_TLB_INVALIDATION_SELECTIVE(guc))
+ return false;
+
+ /*XXX: We are seeing timeouts on guc based tlb invalidations on XEHPSDV.
+ * Until we have a fix, use mmio
+ */
+ if (IS_XEHPSDV(gt->i915))
+ return mmio_invalidate_range(gt, start, length);
+
+ with_intel_gt_pm_if_awake(gt, wakeref) {
+ intel_guc_invalidate_tlb_page_selective(guc,
+ INTEL_GUC_TLB_INVAL_MODE_HEAVY,
+ start, length);
+ }
+
+ return true;
+}
+
void intel_gt_init_tlb(struct intel_gt *gt)
{
mutex_init(>->tlb.invalidate_lock);
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.h b/drivers/gpu/drm/i915/gt/intel_tlb.h
index 46ce25bf5afe..32cc79b1d8a4 100644
--- a/drivers/gpu/drm/i915/gt/intel_tlb.h
+++ b/drivers/gpu/drm/i915/gt/intel_tlb.h
@@ -12,6 +12,7 @@
#include "intel_gt_types.h"
void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno);
+bool intel_gt_invalidate_tlb_range(struct intel_gt *gt, u64 start, u64 length);
void intel_gt_init_tlb(struct intel_gt *gt);
void intel_gt_fini_tlb(struct intel_gt *gt);
--
2.36.1
More information about the dri-devel
mailing list