[PATCH v2 02/32] drm/xe: Make xe_svm_alloc_vram public

Matthew Brost matthew.brost at intel.com
Thu Apr 17 02:50:35 UTC 2025


On Mon, Apr 07, 2025 at 03:46:49PM +0530, Himal Prasad Ghimiray wrote:
> This function will be used in prefetch too, hence make it public.
> 
> v2:
>   - Add kernel-doc (Matthew Brost)
>   - Rebase
> 
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_svm.c | 23 +++++++++++++----------
>  drivers/gpu/drm/xe/xe_svm.h | 23 +++++++++++++++++++++++
>  2 files changed, 36 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index c7424c824a14..de19ad056287 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -661,9 +661,19 @@ static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
>  	return &tile->mem.vram;
>  }
>  
> -static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
> -			     struct xe_svm_range *range,
> -			     const struct drm_gpusvm_ctx *ctx)
> +/**
> + * xe_svm_alloc_vram()- Allocate device memory pages for range,
> + * migrating existing data.
> + * @vm: The VM.
> + * @tile: tile to allocate vram from
> + * @range: SVM range
> + * @ctx: DRM GPU SVM context
> + *
> + * Return: 0 on success, error code on failure.
> + */
> +int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
> +		      struct xe_svm_range *range,
> +		      const struct drm_gpusvm_ctx *ctx)
>  {
>  	struct mm_struct *mm = vm->svm.gpusvm.mm;
>  	struct xe_vram_region *vr = tile_to_vr(tile);
> @@ -717,13 +727,6 @@ static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
>  
>  	return err;
>  }
> -#else
> -static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
> -			     struct xe_svm_range *range,
> -			     const struct drm_gpusvm_ctx *ctx)
> -{
> -	return -EOPNOTSUPP;
> -}
>  #endif
>  
>  
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index 3d441eb1f7ea..d8772f841ab7 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -75,6 +75,20 @@ int xe_svm_bo_evict(struct xe_bo *bo);
>  
>  void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
>  
> +#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
> +int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
> +		      struct xe_svm_range *range,
> +		      const struct drm_gpusvm_ctx *ctx);
> +#else
> +static inline
> +int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
> +		      struct xe_svm_range *range,
> +		      const struct drm_gpusvm_ctx *ctx)
> +{
> +	return -EOPNOTSUPP;
> +}
> +#endif
> +
>  /**
>   * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
>   * @range: SVM range
> @@ -100,6 +114,7 @@ static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
>  #include <linux/interval_tree.h>
>  
>  struct drm_pagemap_device_addr;
> +struct drm_gpusvm_ctx;
>  struct xe_bo;
>  struct xe_gt;
>  struct xe_vm;
> @@ -170,6 +185,14 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
>  {
>  }
>  
> +static inline
> +int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
> +		      struct xe_svm_range *range,
> +		      const struct drm_gpusvm_ctx *ctx)
> +{
> +	return -EOPNOTSUPP;
> +}
> +

It is a little goofy to have 2 versions of xe_svm_alloc_vram stubbed
out in a single file. How about...

#if IS_ENABLED(CONFIG_DRM_GPUSVM) && IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
prototyope
#else
stub
#endif

Or another option is in xe_svm.c we stub out xe_devm_add behind
CONFIG_DRM_XE_DEVMEM_MIRROR so maybe stick xe_svm_alloc_vram there?

Or lastly, I don't think anything in xe_svm_alloc_vram actually depends
on CONFIG_DRM_XE_DEVMEM_MIRROR either as static version is not hidden
behind CONFIG_DRM_XE_DEVMEM_MIRROR.

Matt

>  #define xe_svm_assert_in_notifier(...) do {} while (0)
>  #define xe_svm_range_has_dma_mapping(...) false
>  
> -- 
> 2.34.1
> 


More information about the Intel-xe mailing list