[PATCH v5 12/23] drm/xe/madvise: Update migration policy based on preferred location

Matthew Brost matthew.brost at intel.com
Tue Jul 29 04:07:40 UTC 2025


On Tue, Jul 22, 2025 at 07:05:15PM +0530, Himal Prasad Ghimiray wrote:
> When the user sets the valid devmem_fd as a preferred location, GPU fault
> will trigger migration to tile of device associated with devmem_fd.
> 
> If the user sets an invalid devmem_fd the preferred location is current
> placement(smem) only.
> 
> v2(Matthew Brost)
> - Default should be faulting tile
> - remove devmem_fd used as region
> 
> v3 (Matthew Brost)
> - Add migration_policy
> - Fix return condition
> - fix migrate condition
> 
> Cc: Matthew Brost <matthew.brost at intel.com>

Reviewed-by: Matthew Brost <matthew.brost at intel.com>

> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_svm.c        | 40 +++++++++++++++++++++++++++++-
>  drivers/gpu/drm/xe/xe_svm.h        |  8 ++++++
>  drivers/gpu/drm/xe/xe_vm_madvise.c | 21 +++++++++++++++-
>  3 files changed, 67 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 49d3405aacb9..ba1233d0d5a2 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -790,6 +790,37 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
>  	return true;
>  }
>  
> +/**
> + * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
> + * @vma: Pointer to the xe_vma structure containing memory attributes
> + * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
> + *
> + * This function determines the correct DRM pagemap to use for a given VMA.
> + * It first checks if a valid devmem_fd is provided in the VMA's preferred
> + * location. If the devmem_fd is negative, it returns NULL, indicating no
> + * pagemap is available and smem to be used as preferred location.
> + * If the devmem_fd is equal to the default faulting
> + * GT identifier, it returns the VRAM pagemap associated with the tile.
> + *
> + * Future support for multi-device configurations may use drm_pagemap_from_fd()
> + * to resolve pagemaps from arbitrary file descriptors.
> + *
> + * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
> + */
> +struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
> +{
> +	s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
> +
> +	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
> +		return NULL;
> +
> +	if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
> +		return IS_DGFX(tile_to_xe(tile)) ? xe_tile_local_pagemap(tile) : NULL;
> +
> +	/* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
> +	return NULL;
> +}
> +
>  /**
>   * xe_svm_handle_pagefault() - SVM handle page fault
>   * @vm: The VM.
> @@ -821,6 +852,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>  	};
>  	struct xe_svm_range *range;
>  	struct dma_fence *fence;
> +	struct drm_pagemap *dpagemap;
>  	struct xe_tile *tile = gt_to_tile(gt);
>  	int migrate_try_count = ctx.devmem_only ? 3 : 1;
>  	ktime_t end = 0;
> @@ -850,8 +882,14 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>  
>  	range_debug(range, "PAGE FAULT");
>  
> +	dpagemap = xe_vma_resolve_pagemap(vma, tile);
>  	if (--migrate_try_count >= 0 &&
> -	    xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
> +	    xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
> +		/* TODO : For multi-device dpagemap will be used to find the
> +		 * remote tile and remote device. Will need to modify
> +		 * xe_svm_alloc_vram to use dpagemap for future multi-device
> +		 * support.
> +		 */
>  		err = xe_svm_alloc_vram(tile, range, &ctx);
>  		ctx.timeslice_ms <<= 1;	/* Double timeslice if we have to retry */
>  		if (err) {
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index b0da0e85f0b8..494823afaa98 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -94,6 +94,8 @@ void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
>  
>  u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
>  
> +struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
> +
>  /**
>   * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
>   * @range: SVM range
> @@ -318,6 +320,12 @@ u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
>  	return 0;
>  }
>  
> +static inline
> +struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
> +{
> +	return NULL;
> +}
> +
>  #define xe_svm_assert_in_notifier(...) do {} while (0)
>  #define xe_svm_range_has_dma_mapping(...) false
>  
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> index 62dc5cec8950..17959257ee1d 100644
> --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> @@ -78,7 +78,19 @@ static void madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
>  				      struct xe_vma **vmas, int num_vmas,
>  				      struct drm_xe_madvise *op)
>  {
> -	/* Implementation pending */
> +	int i;
> +
> +	xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC);
> +
> +	for (i = 0; i < num_vmas; i++) {
> +		vmas[i]->attr.preferred_loc.devmem_fd = op->preferred_mem_loc.devmem_fd;
> +
> +		/* Till multi-device support is not added migration_policy
> +		 * is of no use and can be ignored.
> +		 */
> +		vmas[i]->attr.preferred_loc.migration_policy =
> +						op->preferred_mem_loc.migration_policy;
> +	}
>  }
>  
>  static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> @@ -178,6 +190,12 @@ static bool madvise_args_are_sane(struct xe_device *xe, const struct drm_xe_madv
>  
>  	switch (args->type) {
>  	case DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC:
> +	{
> +		s32 fd = (s32)args->preferred_mem_loc.devmem_fd;
> +
> +		if (XE_IOCTL_DBG(xe, fd < DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM))
> +			return false;
> +
>  		if (XE_IOCTL_DBG(xe, args->preferred_mem_loc.migration_policy >
>  				     DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES))
>  			return false;
> @@ -188,6 +206,7 @@ static bool madvise_args_are_sane(struct xe_device *xe, const struct drm_xe_madv
>  		if (XE_IOCTL_DBG(xe, args->atomic.reserved))
>  			return false;
>  		break;
> +	}
>  	case DRM_XE_MEM_RANGE_ATTR_ATOMIC:
>  		if (XE_IOCTL_DBG(xe, args->atomic.val > DRM_XE_ATOMIC_CPU))
>  			return false;
> -- 
> 2.34.1
> 


More information about the Intel-xe mailing list