[PATCH v6 17/32] drm/xe: Do not allow CPU address mirror VMA unbind if the GPU has bindings
Thomas Hellström
thomas.hellstrom at linux.intel.com
Thu Feb 27 17:01:28 UTC 2025
On Mon, 2025-02-24 at 20:42 -0800, Matthew Brost wrote:
> uAPI is designed with the use case that only mapping a BO to a
> malloc'd
> address will unbind a CPU-address mirror VMA. Therefore, allowing a
> CPU-address mirror VMA to unbind when the GPU has bindings in the
> range
> being unbound does not make much sense. This behavior is not
> supported,
> as it simplifies the code. This decision can always be revisited if a
> use case arises.
>
> v3:
> - s/arrises/arises (Thomas)
> - s/system allocator/GPU address mirror (Thomas)
> - Kernel doc (Thomas)
> - Newline between function defs (Thomas)
> v5:
> - Kernel doc (Thomas)
> v6:
> - Only compile if CONFIG_DRM_GPUSVM selected (CI, Lucas)
>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> ---
> drivers/gpu/drm/xe/xe_svm.c | 15 +++++++++++++++
> drivers/gpu/drm/xe/xe_svm.h | 8 ++++++++
> drivers/gpu/drm/xe/xe_vm.c | 16 ++++++++++++++++
> 3 files changed, 39 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.c
> b/drivers/gpu/drm/xe/xe_svm.c
> index a9d32cd69ae9..80076f4dc4b4 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -434,3 +434,18 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> struct xe_vma *vma,
>
> return err;
> }
> +
> +/**
> + * xe_svm_has_mapping() - SVM has mappings
> + * @vm: The VM.
> + * @start: Start address.
> + * @end: End address.
> + *
> + * Check if an address range has SVM mappings.
> + *
> + * Return: True if address range has a SVM mapping, False otherwise
> + */
> +bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
> +{
> + return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
> +}
> diff --git a/drivers/gpu/drm/xe/xe_svm.h
> b/drivers/gpu/drm/xe/xe_svm.h
> index 87cbda5641bb..35e044e492e0 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -57,6 +57,8 @@ void xe_svm_close(struct xe_vm *vm);
> int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
> struct xe_tile *tile, u64 fault_addr,
> bool atomic);
> +
> +bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
> #else
> static inline bool xe_svm_range_pages_valid(struct xe_svm_range
> *range)
> {
> @@ -86,6 +88,12 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> struct xe_vma *vma,
> {
> return 0;
> }
> +
> +static inline
> +bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
> +{
> + return false;
> +}
> #endif
>
> /**
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 870629cbb859..a3ef76504ce8 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2442,6 +2442,17 @@ static int vm_bind_ioctl_ops_parse(struct
> xe_vm *vm, struct drm_gpuva_ops *ops,
> struct xe_vma *old =
> gpuva_to_vma(op->base.remap.unmap-
> >va);
> bool skip = xe_vma_is_cpu_addr_mirror(old);
> + u64 start = xe_vma_start(old), end =
> xe_vma_end(old);
> +
> + if (op->base.remap.prev)
> + start = op->base.remap.prev->va.addr
> +
> + op->base.remap.prev-
> >va.range;
> + if (op->base.remap.next)
> + end = op->base.remap.next->va.addr;
> +
> + if (xe_vma_is_cpu_addr_mirror(old) &&
> + xe_svm_has_mapping(vm, start, end))
> + return -EBUSY;
>
> op->remap.start = xe_vma_start(old);
> op->remap.range = xe_vma_size(old);
> @@ -2524,6 +2535,11 @@ static int vm_bind_ioctl_ops_parse(struct
> xe_vm *vm, struct drm_gpuva_ops *ops,
> {
> struct xe_vma *vma = gpuva_to_vma(op-
> >base.unmap.va);
>
> + if (xe_vma_is_cpu_addr_mirror(vma) &&
> + xe_svm_has_mapping(vm,
> xe_vma_start(vma),
> + xe_vma_end(vma)))
> + return -EBUSY;
> +
> if (!xe_vma_is_cpu_addr_mirror(vma))
> xe_vma_ops_incr_pt_update_ops(vops,
> op->tile_mask);
> break;
More information about the Intel-xe
mailing list