[PATCH v3 16/19] drm/xe/svm: Consult madvise preferred location in prefetch
Matthew Brost
matthew.brost at intel.com
Fri May 30 04:24:53 UTC 2025
On Tue, May 27, 2025 at 10:10:00PM +0530, Himal Prasad Ghimiray wrote:
> When prefetch region is DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, prefetch svm
> ranges to preferred location provided by madvise.
>
> v2 (Matthew Brost)
> - Fix region, devmem_fd usages
> - consult madvise is applicable for other vma's too.
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> ---
> drivers/gpu/drm/xe/xe_svm.c | 11 +++++++++++
> drivers/gpu/drm/xe/xe_svm.h | 7 +++++++
> drivers/gpu/drm/xe/xe_vm.c | 30 ++++++++++++++++++++----------
> 3 files changed, 38 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 8b6546ebac72..0c929eb192e7 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -822,6 +822,17 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
> return NULL;
> }
>
> +/**
> + * xe_tile_from_dpagemap - Find xe_tile from drm_pagemap
> + * @dpagemap: pointer to struct drm_pagemap
> + *
> + * Return: Pointer to xe_tile
> + */
> +struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap)
> +{
> + return container_of(dpagemap, struct xe_tile, mem.vram.dpagemap);
> +}
> +
This looks to be in the wrong file. xe_tile.h would be my choice as
static inline.
> /**
> * xe_svm_handle_pagefault() - SVM handle page fault
> * @vm: The VM.
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index 344349313001..a8b5bebf7a54 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -97,6 +97,8 @@ void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
>
> struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
>
> +struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap);
> +
> /**
> * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
> * @range: SVM range
> @@ -328,6 +330,11 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
> return NULL;
> }
>
> +static inline
> +struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap)
> +{
> + return NULL;
> +}
> #define xe_svm_assert_in_notifier(...) do {} while (0)
> #define xe_svm_range_has_dma_mapping(...) false
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index e5fc2c2be8b2..4520e475399e 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2917,15 +2917,24 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
> int err = 0;
>
> struct xe_svm_range *svm_range;
> + struct drm_pagemap *dpagemap;
> struct drm_gpusvm_ctx ctx = {};
> - struct xe_tile *tile;
> + struct xe_tile *tile = NULL;
> unsigned long i;
> u32 region;
>
> if (!xe_vma_is_cpu_addr_mirror(vma))
> return 0;
>
> - region = op->prefetch_range.region;
> + if (op->prefetch_range.region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
> + dpagemap = xe_vma_resolve_pagemap(vma, tile);
> + if (dpagemap)
> + tile = xe_tile_from_dpagemap(dpagemap);
> + } else {
> + region = op->prefetch_range.region;
> + if (region)
> + tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> + }
>
> ctx.read_only = xe_vma_read_only(vma);
> ctx.devmem_possible = devmem_possible;
> @@ -2935,18 +2944,18 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
>
> /* TODO: Threading the migration */
> xa_for_each(&op->prefetch_range.range, i, svm_range) {
> - bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, region);
> + bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, !!tile);
>
> if (!needs_vram) {
> xe_svm_range_migrate_to_smem(vm, svm_range);
> } else if (needs_vram) {
else {
Matt
> - /* If migration is mandated by atomic attributes
> - * in vma and prefetch region is smem force prefetch
> + /* If migration is mandated by atomic attributes
> + * in vma, and prefetch region is smem, force prefetch
> * in vram of root tile.
> */
> - region = region ? region : 1;
> + if (!tile)
> + tile = xe_device_get_root_tile(vm->xe);
>
> - tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
> if (err) {
> drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
> @@ -3014,7 +3023,8 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
> else
> region = op->prefetch.region;
>
> - xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
> + xe_assert(vm->xe, region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC ||
> + region <= ARRAY_SIZE(region_to_mem_type));
>
> err = vma_lock_and_validate(exec,
> gpuva_to_vma(op->base.prefetch.va),
> @@ -3432,8 +3442,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
> op == DRM_XE_VM_BIND_OP_PREFETCH) ||
> XE_IOCTL_DBG(xe, prefetch_region &&
> op != DRM_XE_VM_BIND_OP_PREFETCH) ||
> - XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
> - xe->info.mem_region_mask)) ||
> + XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
> + !(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
> XE_IOCTL_DBG(xe, obj &&
> op == DRM_XE_VM_BIND_OP_UNMAP)) {
> err = -EINVAL;
> --
> 2.34.1
>
More information about the Intel-xe
mailing list