[PATCH v2] drm/xe: Enable ATS if enabled on the PCI side
Thomas Hellström
thomas.hellstrom at linux.intel.com
Sun Jun 8 17:36:19 UTC 2025
If IOMMU and device supports ATS, enable it in an effort to offload
IOMMU TLB.
v2:
- Set the FORCE_FAULT PTE flag when clearing a PTE for faulting VM. (CI)
Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/xe/regs/xe_gtt_defs.h | 1 +
drivers/gpu/drm/xe/xe_lrc.c | 5 +++++
drivers/gpu/drm/xe/xe_pt.c | 4 ++--
3 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/regs/xe_gtt_defs.h b/drivers/gpu/drm/xe/regs/xe_gtt_defs.h
index 4389e5a76f89..c6b32516b008 100644
--- a/drivers/gpu/drm/xe/regs/xe_gtt_defs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gtt_defs.h
@@ -33,5 +33,6 @@
#define XE_PAGE_PRESENT BIT_ULL(0)
#define XE_PAGE_RW BIT_ULL(1)
+#define XE_PAGE_FORCE_FAULT BIT_ULL(2)
#endif
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 61a2e87990a9..085f7e0568e9 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -976,6 +976,7 @@ static void xe_lrc_setup_utilization(struct xe_lrc *lrc)
#define PVC_CTX_ASID (0x2e + 1)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
+#define XE_CTX_PASID (0x2c + 1)
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_vm *vm, u32 ring_size, u16 msix_vec,
@@ -1104,6 +1105,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
if (xe->info.has_asid && vm)
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
+ /* If possible, enable ATS to offload the IOMMU TLB */
+ if (to_pci_dev(xe->drm.dev)->ats_enabled)
+ xe_lrc_write_ctx_reg(lrc, XE_CTX_PASID, (1 << 31));
+
lrc->desc = LRC_VALID;
lrc->desc |= FIELD_PREP(LRC_ADDRESSING_MODE, LRC_LEGACY_64B_CONTEXT);
/* TODO: Priority */
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index c9c41fbe125c..15e87be0e491 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -65,7 +65,7 @@ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
u8 id = tile->id;
if (!xe_vm_has_scratch(vm))
- return 0;
+ return XE_PAGE_FORCE_FAULT;
if (level > MAX_HUGEPTE_LEVEL)
return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
@@ -535,7 +535,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
XE_WARN_ON(xe_walk->va_curs_start != addr);
if (xe_walk->clear_pt) {
- pte = 0;
+ pte = XE_PAGE_FORCE_FAULT;
} else {
pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
xe_res_dma(curs) +
--
2.49.0
More information about the Intel-xe
mailing list