[PATCH v4 11/20] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise
Himal Prasad Ghimiray
himal.prasad.ghimiray at intel.com
Fri Jun 13 12:55:49 UTC 2025
In the case of the MADVISE ioctl, if the start or end addresses fall
within a VMA and existing SVM ranges are present, remove the existing
SVM mappings. Then, continue with ops_parse to create new VMAs by REMAP
unmapping of old one.
v2 (Matthew Brost)
- Use vops flag to call unmapping of ranges in vm_bind_ioctl_ops_parse
- Rename the function
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
---
drivers/gpu/drm/xe/xe_svm.c | 27 +++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_svm.h | 8 ++++++++
drivers/gpu/drm/xe/xe_vm.c | 8 ++++++--
3 files changed, 41 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 19420635f1fa..df6992ee2e2d 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -935,6 +935,33 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
}
+/**
+ * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
+ * @start: start addr
+ * @end: end addr
+ *
+ * This function UNMAPS svm ranges if start or end address are inside them.
+ */
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpusvm_notifier *notifier, *next;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
+ struct drm_gpusvm_range *range, *__next;
+
+ drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
+ if (start > drm_gpusvm_range_start(range) ||
+ end < drm_gpusvm_range_end(range)) {
+ if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
+ __xe_svm_garbage_collector(vm, to_xe_range(range));
+ }
+ }
+ }
+}
+
/**
* xe_svm_bo_evict() - SVM evict BO to system memory
* @bo: BO to evict
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index af8f285b6caa..4e5d42323679 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -92,6 +92,9 @@ bool xe_svm_range_validate(struct xe_vm *vm,
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
+
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
+
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -312,6 +315,11 @@ u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
return 0;
}
+static inline
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
+{
+}
+
#define xe_svm_assert_in_notifier(...) do {} while (0)
#define xe_svm_range_has_dma_mapping(...) false
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index e059d9810d26..0872df8d0b15 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2663,8 +2663,12 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
end = op->base.remap.next->va.addr;
if (xe_vma_is_cpu_addr_mirror(old) &&
- xe_svm_has_mapping(vm, start, end))
- return -EBUSY;
+ xe_svm_has_mapping(vm, start, end)) {
+ if (vops->flags & XE_VMA_OPS_FLAG_MADVISE)
+ xe_svm_unmap_address_range(vm, start, end);
+ else
+ return -EBUSY;
+ }
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
--
2.34.1
More information about the Intel-xe
mailing list