[PATCH 2/3] drm/xe: Clear scratch page before vm_bind

Oak Zeng oak.zeng at intel.com
Tue Jan 28 22:21:44 UTC 2025


When a vm runs under fault mode, if scratch page is enabled, we need
to clear the scratch page mapping before vm_bind for the vm_bind
address range. Under fault mode, we depend on recoverable page fault
to establish mapping in page table. If scratch page is not cleared,
GPU access of address won't cause page fault because it always hits
the existing scratch page mapping.

When vm_bind with IMMEDIATE flag, there is no need of clearing as
immediate bind can overwrite the scratch page mapping.

So far only is xe2 and xe3 products are allowed to enable scratch page
under fault mode. On other platform we don't allow scratch page under
fault mode, so no need of such clearing.

Signed-off-by: Oak Zeng <oak.zeng at intel.com>
---
 drivers/gpu/drm/xe/xe_vm.c | 32 ++++++++++++++++++++++++++++++++
 1 file changed, 32 insertions(+)

diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 690330352d4c..196d347c6ac0 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -38,6 +38,7 @@
 #include "xe_trace_bo.h"
 #include "xe_wa.h"
 #include "xe_hmm.h"
+#include "i915_drv.h"
 
 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
 {
@@ -2917,6 +2918,34 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
 	return 0;
 }
 
+static bool __xe_vm_needs_clear_scratch_pages(struct xe_device *xe,
+					      struct xe_vm *vm, u32 bind_flags)
+{
+	if (!xe_vm_in_fault_mode(vm))
+		return false;
+
+	if (!xe_vm_has_scratch(vm))
+		return false;
+
+	if (bind_flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE)
+		return false;
+
+	if (!(IS_LUNARLAKE(xe) || IS_BATTLEMAGE(xe) || IS_PANTHERLAKE(xe)))
+		return false;
+
+	return true;
+}
+
+static void __xe_vm_clear_scratch_pages(struct xe_device *xe, struct xe_vm *vm,
+					u64 start, u64 end)
+{
+	struct xe_tile *tile;
+	u8 id;
+
+	for_each_tile(tile, xe, id)
+		xe_pt_zap_range(tile, vm, start, end);
+}
+
 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
 	struct xe_device *xe = to_xe_device(dev);
@@ -3062,6 +3091,9 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
 		u16 pat_index = bind_ops[i].pat_index;
 
+		if (__xe_vm_needs_clear_scratch_pages(xe, vm, flags))
+			__xe_vm_clear_scratch_pages(xe, vm, addr, addr + range);
+
 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
 						  addr, range, op, flags,
 						  prefetch_region, pat_index);
-- 
2.26.3



More information about the Intel-xe mailing list