[RFC 21/29] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise
Himal Prasad Ghimiray
himal.prasad.ghimiray at intel.com
Fri Mar 14 08:02:18 UTC 2025
In the case of the MADVISE ioctl, if the start or end addresses fall
within a VMA and existing SVM ranges are present, remove the existing
SVM mappings. Then, continue with ops_parse to create new VMAs by REMAP
unmapping of old one.
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
---
drivers/gpu/drm/xe/xe_svm.c | 25 +++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_svm.h | 7 +++++++
drivers/gpu/drm/xe/xe_vm.c | 18 +++++++++++++++++-
3 files changed, 49 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 5a4cb14d608e..b181b9bbfa5e 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -821,6 +821,31 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
}
+/**
+ * xe_svm_range_clean_if_addr_within - Clean SVM mappings and ranges
+ * @start: start addr
+ * @end: end addr
+ *
+ * This function cleans up svm ranges if start or end address are inside them.
+ */
+void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpusvm_notifier *notifier, *next;
+
+ drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
+ struct drm_gpusvm_range *range, *__next;
+
+ drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
+ if (start > drm_gpusvm_range_start(range) ||
+ end < drm_gpusvm_range_end(range)) {
+ if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
+ __xe_svm_garbage_collector(vm, to_xe_range(range));
+ }
+ }
+ }
+}
+
/**
* xe_svm_bo_evict() - SVM evict BO to system memory
* @bo: BO to evict
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 6bb358bf62ad..d57d7cc851ee 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -82,6 +82,8 @@ struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
struct drm_gpusvm_ctx *ctx);
+
+void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
#else
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
@@ -157,6 +159,11 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
return -EINVAL;
}
+static inline
+void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
+{
+}
+
#endif
/**
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index b24bd078aeec..f3e9c3f31fe7 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2341,6 +2341,22 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
+ } else if (__op->op == DRM_GPUVA_OP_REMAP) {
+ struct xe_vma *old =
+ gpuva_to_vma(op->base.remap.unmap->va);
+ u64 start = xe_vma_start(old), end = xe_vma_end(old);
+
+ if (op->base.remap.prev)
+ start = op->base.remap.prev->va.addr +
+ op->base.remap.prev->va.range;
+ if (op->base.remap.next)
+ end = op->base.remap.next->va.addr;
+
+ if (xe_vma_is_cpu_addr_mirror(old) &&
+ xe_svm_has_mapping(vm, start, end)) {
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return ERR_PTR(-EBUSY);
+ }
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
@@ -2621,7 +2637,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (xe_vma_is_cpu_addr_mirror(old) &&
xe_svm_has_mapping(vm, start, end))
- return -EBUSY;
+ xe_svm_range_clean_if_addr_within(vm, start, end);
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
--
2.34.1
More information about the Intel-xe
mailing list