[PATCH v3 09/19] drm/xe/svm: Split system allocator vma incase of madvise call
Himal Prasad Ghimiray
himal.prasad.ghimiray at intel.com
Tue May 27 16:39:53 UTC 2025
If the start or end of input address range lies within system allocator
vma split the vma to create new vma's as per input range.
v2 (Matthew Brost)
- Add lockdep_assert_write for vm->lock
- Remove unnecessary page aligned checks
- Add kerrnel-doc and comments
- Remove unnecessary unwind_ops and return
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 95 ++++++++++++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_vm.h | 2 +
2 files changed, 97 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index d60b711e97e9..c220bf904ee0 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -4161,3 +4161,98 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
}
kvfree(snap);
}
+
+/**
+ * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
+ * @vm: Pointer to the xe_vm structure
+ * @start: Starting input address
+ * @range: Size of the input range
+ *
+ * This function splits existing vma to create new vma for user provided input range
+ *
+ * Return: 0 if success
+ */
+int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
+{
+ struct xe_vma_ops vops;
+ struct drm_gpuva_ops *ops = NULL;
+ struct drm_gpuva_op *__op;
+ bool is_cpu_addr_mirror = false;
+ int err;
+
+ vm_dbg(&vm->xe->drm, "MADVISE IN: addr=0x%016llx, size=0x%016llx", start, range);
+
+ lockdep_assert_held_write(&vm->lock);
+
+ vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range);
+ ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, start, range,
+ DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE,
+ NULL, start);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ if (list_empty(&ops->list)) {
+ err = 0;
+ goto free_ops;
+ }
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+
+ if (__op->op == DRM_GPUVA_OP_REMAP) {
+ if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.remap.unmap->va)))
+ is_cpu_addr_mirror = true;
+ else
+ is_cpu_addr_mirror = false;
+ }
+
+ if (__op->op == DRM_GPUVA_OP_MAP)
+ /* In case of madvise ops DRM_GPUVA_OP_REMAP is always by
+ * DRM_GPUVA_OP_REMAP, so ensure we assign op->map.is_cpu_addr_mirror true
+ * if REMAP is for xe_vma_is_cpu_addr_mirror vma
+ */
+ op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
+
+ print_op(vm->xe, __op);
+ }
+
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
+ if (err)
+ goto unwind_ops;
+
+ xe_vm_lock(vm, false);
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+ struct xe_vma *vma;
+ struct xe_vma_mem_attr temp_attr;
+
+ if (__op->op == DRM_GPUVA_OP_UNMAP) {
+ /* There should be no unmap */
+ XE_WARN_ON("UNEXPECTED UNMAP");
+ xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), NULL);
+ } else if (__op->op == DRM_GPUVA_OP_REMAP) {
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ /* Store attributes for REMAP UNMAPPED VMA, so they can be assigned
+ * to newly MAPPED vma.
+ */
+ cp_vma_mem_attr(&temp_attr, &vma->attr);
+ xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
+ } else if (__op->op == DRM_GPUVA_OP_MAP) {
+ vma = op->map.vma;
+ cp_vma_mem_attr(&vma->attr, &temp_attr);
+ }
+ }
+
+ xe_vm_unlock(vm);
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return 0;
+
+unwind_ops:
+ vm_bind_ioctl_ops_unwind(vm, &ops, 1);
+free_ops:
+ if (ops)
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 1ef98113fa5b..8151b1b01a13 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -171,6 +171,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
+int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
+
/**
* to_userptr_vma() - Return a pointer to an embedding userptr vma
* @vma: Pointer to the embedded struct xe_vma
--
2.34.1
More information about the Intel-xe
mailing list