[PATCH 1/3] drm/i915: Define GuC Based ranged TLB invalidations
Nirmoy Das
nirmoy.das at intel.com
Fri Nov 17 14:50:21 UTC 2023
The GuC firmware has defined the interface for selective TLB
invalidation support.
Signed-off-by: Nirmoy Das <nirmoy.das at intel.com>
---
drivers/gpu/drm/i915/gt/intel_tlb.c | 52 +++++++++++++
drivers/gpu/drm/i915/gt/intel_tlb.h | 1 +
.../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h | 1 +
drivers/gpu/drm/i915/gt/uc/intel_guc.h | 4 +
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 74 +++++++++++++++----
5 files changed, 119 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c
index 4bb13d1890e3..c31fd0875ac4 100644
--- a/drivers/gpu/drm/i915/gt/intel_tlb.c
+++ b/drivers/gpu/drm/i915/gt/intel_tlb.c
@@ -157,6 +157,58 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
}
}
+static u64 tlb_page_selective_size(u64 *addr, u64 length)
+{
+ const u64 end = *addr + length;
+ u64 start;
+
+ /*
+ * Minimum invalidation size for a 2MB page that the hardware expects is
+ * 16MB
+ */
+ length = max_t(u64, roundup_pow_of_two(length), SZ_4K);
+ if (length >= SZ_2M)
+ length = max_t(u64, SZ_16M, length);
+
+ /*
+ * We need to invalidate a higher granularity if start address is not
+ * aligned to length. When start is not aligned with length we need to
+ * find the length large enough to create an address mask covering the
+ * required range.
+ */
+ start = round_down(*addr, length);
+ while (start + length < end) {
+ length <<= 1;
+ start = round_down(*addr, length);
+ }
+
+ *addr = start;
+ return length;
+}
+
+bool intel_gt_invalidate_tlb_range(struct intel_gt *gt,
+ u64 start, u64 length)
+{
+ struct intel_guc *guc = >->uc.guc;
+ intel_wakeref_t wakeref;
+ u64 size, vm_total;
+ bool ret = true;
+
+ if (intel_gt_is_wedged(gt))
+ return true;
+
+ vm_total = BIT_ULL(RUNTIME_INFO(gt->i915)->ppgtt_size);
+ /* Align start and length */
+ size = min_t(u64, vm_total, tlb_page_selective_size(&start, length));
+
+ with_intel_gt_pm_if_awake(gt, wakeref)
+ ret = intel_guc_invalidate_tlb_page_selective(guc,
+ INTEL_GUC_TLB_INVAL_MODE_HEAVY,
+ start, size) == 0;
+
+ return ret;
+}
+
void intel_gt_init_tlb(struct intel_gt *gt)
{
mutex_init(>->tlb.invalidate_lock);
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.h b/drivers/gpu/drm/i915/gt/intel_tlb.h
index 337327af92ac..9e5fc40c2b08 100644
--- a/drivers/gpu/drm/i915/gt/intel_tlb.h
+++ b/drivers/gpu/drm/i915/gt/intel_tlb.h
@@ -12,6 +12,7 @@
#include "intel_gt_types.h"
void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno);
+bool intel_gt_invalidate_tlb_range(struct intel_gt *gt, u64 start, u64 length);
void intel_gt_init_tlb(struct intel_gt *gt);
void intel_gt_fini_tlb(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index 33f253410d0c..d9d7e6fec3db 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -189,6 +189,7 @@ enum intel_guc_state_capture_event_status {
enum intel_guc_tlb_invalidation_type {
INTEL_GUC_TLB_INVAL_ENGINES = 0x0,
+ INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE = 0x1,
INTEL_GUC_TLB_INVAL_GUC = 0x3,
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 2b6dfe62c8f2..39fcbf118a9a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -431,6 +431,10 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value);
int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value);
+int intel_guc_invalidate_tlb_page_selective(struct intel_guc *guc,
+ enum intel_guc_tlb_inval_mode mode,
+ u64 start, u64 length);
+
static inline bool intel_guc_is_supported(struct intel_guc *guc)
{
return intel_uc_fw_is_supported(&guc->fw);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index d37698bd6b91..10844aa802a1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -4764,23 +4764,13 @@ static bool intel_gt_is_enabled(const struct intel_gt *gt)
return true;
}
-static int guc_send_invalidate_tlb(struct intel_guc *guc,
- enum intel_guc_tlb_invalidation_type type)
+static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 *action, u32 size)
{
struct intel_guc_tlb_wait _wq, *wq = &_wq;
struct intel_gt *gt = guc_to_gt(guc);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err;
u32 seqno;
- u32 action[] = {
- INTEL_GUC_ACTION_TLB_INVALIDATION,
- 0,
- REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK, type) |
- REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
- INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
- INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
- };
- u32 size = ARRAY_SIZE(action);
/*
* Early guard against GT enablement. TLB invalidation should not be
@@ -4844,13 +4834,71 @@ static int guc_send_invalidate_tlb(struct intel_guc *guc,
/* Send a H2G command to invalidate the TLBs at engine level and beyond. */
int intel_guc_invalidate_tlb_engines(struct intel_guc *guc)
{
- return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_ENGINES);
+ u32 action[] = {
+ INTEL_GUC_ACTION_TLB_INVALIDATION,
+ 0,
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK,
+ INTEL_GUC_TLB_INVAL_ENGINES) |
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
+ INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
+ INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+ };
+ u32 size = ARRAY_SIZE(action);
+
+ return guc_send_invalidate_tlb(guc, action, size);
}
/* Send a H2G command to invalidate the GuC's internal TLB. */
int intel_guc_invalidate_tlb_guc(struct intel_guc *guc)
{
- return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_GUC);
+ u32 action[] = {
+ INTEL_GUC_ACTION_TLB_INVALIDATION,
+ 0,
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK,
+ INTEL_GUC_TLB_INVAL_GUC) |
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
+ INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
+ INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+ };
+ u32 size = ARRAY_SIZE(action);
+
+ return guc_send_invalidate_tlb(guc, action, size);
+}
+
+/*
+ * Selective TLB Invalidation for Address Range:
+ * TLB's in the Address Range is Invalidated across all engines.
+ */
+int intel_guc_invalidate_tlb_page_selective(struct intel_guc *guc,
+ enum intel_guc_tlb_inval_mode mode,
+ u64 start, u64 length)
+{
+ u64 vm_total = BIT_ULL(RUNTIME_INFO(guc_to_gt(guc)->i915)->ppgtt_size);
+
+ /*
+ * For page selective invalidations, this specifies the number of contiguous
+ * PPGTT pages that needs to be invalidated.
+ */
+ u32 address_mask = length >= vm_total ? 0 : ilog2(length) - ilog2(SZ_4K);
+ u32 action[] = {
+ INTEL_GUC_ACTION_TLB_INVALIDATION,
+ 0,
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK,
+ INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE) |
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK, mode) |
+ INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+ 0,
+ length >= vm_total ? 1 : lower_32_bits(start),
+ upper_32_bits(start),
+ address_mask,
+ };
+
+ GEM_BUG_ON(length < SZ_4K);
+ GEM_BUG_ON(!is_power_of_2(length));
+ GEM_BUG_ON(!IS_ALIGNED(start, length));
+ GEM_BUG_ON(range_overflows(start, length, vm_total));
+
+ return guc_send_invalidate_tlb(guc, action, ARRAY_SIZE(action));
}
int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
--
2.42.0
More information about the Intel-gfx-trybot
mailing list