[PATCH v2 22/32] drm/xe/svm: Split system allocator vma incase of madvise call
Matthew Brost
matthew.brost at intel.com
Wed May 14 19:02:18 UTC 2025
On Mon, Apr 07, 2025 at 03:47:09PM +0530, Himal Prasad Ghimiray wrote:
> If the start or end of input address range lies within system allocator
> vma split the vma to create new vma's as per input range.
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> ---
> drivers/gpu/drm/xe/xe_vm.c | 84 ++++++++++++++++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_vm.h | 2 +
> 2 files changed, 86 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 6e5ba58d475e..c7c012afe9eb 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -4127,3 +4127,87 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
> }
> kvfree(snap);
> }
> +
Kernel doc, missed this previous reply.
Matt
> +int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
> +{
> + struct xe_vma_ops vops;
> + struct drm_gpuva_ops *ops = NULL;
> + struct drm_gpuva_op *__op;
> + bool is_cpu_addr_mirror = false;
> + int err;
> +
> + vm_dbg(&vm->xe->drm, "MADVISE IN: addr=0x%016llx, size=0x%016llx", start, range);
> +
> + if (start & ~PAGE_MASK)
> + start = ALIGN_DOWN(start, SZ_4K);
> +
> + if (range & ~PAGE_MASK)
> + range = ALIGN(range, SZ_4K);
> +
> + vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range);
> + ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, start, range,
> + DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE,
> + NULL, start);
> + if (IS_ERR(ops)) {
> + err = PTR_ERR(ops);
> + goto unwind_ops;
> + }
> +
> + if (list_empty(&ops->list)) {
> + err = 0;
> + goto free_ops;
> + }
> +
> + drm_gpuva_for_each_op(__op, ops) {
> + struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
> +
> + if (__op->op == DRM_GPUVA_OP_REMAP) {
> + if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.remap.unmap->va)))
> + is_cpu_addr_mirror = true;
> + else
> + is_cpu_addr_mirror = false;
> + }
> +
> + if (__op->op == DRM_GPUVA_OP_MAP)
> + op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
> +
> + print_op(vm->xe, __op);
> + }
> +
> + xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
> + err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
> + if (err)
> + goto unwind_ops;
> +
> + xe_vm_lock(vm, false);
> +
> + drm_gpuva_for_each_op(__op, ops) {
> + struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
> + struct xe_vma *vma;
> + struct xe_vma_mem_attr temp_attr;
> +
> + if (__op->op == DRM_GPUVA_OP_UNMAP) {
> + /* There should be no unmap */
> + xe_assert(vm->xe, true);
> + xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), NULL);
> + } else if (__op->op == DRM_GPUVA_OP_REMAP) {
> + vma = gpuva_to_vma(op->base.remap.unmap->va);
> + cp_mem_attr(&temp_attr, &vma->attr);
> + xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
> + } else if (__op->op == DRM_GPUVA_OP_MAP) {
> + vma = op->map.vma;
> + cp_mem_attr(&vma->attr, &temp_attr);
> + }
> + }
> +
> + xe_vm_unlock(vm);
> + drm_gpuva_ops_free(&vm->gpuvm, ops);
> + return 0;
> +
> +unwind_ops:
> + vm_bind_ioctl_ops_unwind(vm, &ops, 1);
> +free_ops:
> + if (ops)
> + drm_gpuva_ops_free(&vm->gpuvm, ops);
> + return err;
> +}
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index 99e164852f63..4e45230b7205 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -171,6 +171,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
>
> struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
>
> +int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
> +
> /**
> * to_userptr_vma() - Return a pointer to an embedding userptr vma
> * @vma: Pointer to the embedded struct xe_vma
> --
> 2.34.1
>
More information about the Intel-xe
mailing list