[PATCH v3 05/19] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter

Matthew Brost matthew.brost at intel.com
Wed May 28 22:58:43 UTC 2025


On Tue, May 27, 2025 at 10:09:49PM +0530, Himal Prasad Ghimiray wrote:
> This change simplifies the logic by ensuring that remapped previous or
> next VMAs are created with the same memory attributes as the original VMA.
> By passing struct xe_vma_mem_attr as a parameter, we maintain consistency
> in memory attributes.
> 
> -v2
>  *dst = *src (Matthew Brost)
> 
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_vm.c | 25 ++++++++++++++++++++-----
>  1 file changed, 20 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index a48e1bc8b76a..de6ecff237a6 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2436,8 +2436,13 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
>  
>  ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
>  
> +static void cp_vma_mem_attr(struct xe_vma_mem_attr *dst, struct xe_vma_mem_attr *src)
> +{
> +	*dst = *src;
> +}

I'm not sure if this worth a helper.

> +
>  static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
> -			      u16 pat_index, unsigned int flags)
> +			      struct xe_vma_mem_attr attr, unsigned int flags)

I'd make attr a pointer.

>  {
>  	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
>  	struct drm_exec exec;
> @@ -2466,7 +2471,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
>  	}
>  	vma = xe_vma_create(vm, bo, op->gem.offset,
>  			    op->va.addr, op->va.addr +
> -			    op->va.range - 1, pat_index, flags);
> +			    op->va.range - 1, attr.pat_index, flags);

I'd pass attr in here and set vma->attr in that function.

>  	if (IS_ERR(vma))
>  		goto err_unlock;
>  
> @@ -2483,6 +2488,8 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
>  		prep_vma_destroy(vm, vma, false);
>  		xe_vma_destroy_unlocked(vma);
>  		vma = ERR_PTR(err);
> +	} else {
> +		cp_vma_mem_attr(&vma->attr, &attr);
>  	}
>  
>  	return vma;
> @@ -2609,6 +2616,14 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>  		switch (op->base.op) {
>  		case DRM_GPUVA_OP_MAP:
>  		{
> +			struct xe_vma_mem_attr default_attr = {
> +				.preferred_loc = {
> +				.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVMEM_FD,

I'd indent .devmem_fd here.

Also should be initialize the migration policy?

> +				},
> +				.atomic_access = DRM_XE_VMA_ATOMIC_UNDEFINED,
> +				.pat_index = op->map.pat_index

s/op->map.pat_index/op->map.pat_index,/

Is typically the style.

Matt

> +			};
> +
>  			flags |= op->map.read_only ?
>  				VMA_CREATE_FLAG_READ_ONLY : 0;
>  			flags |= op->map.is_null ?
> @@ -2618,7 +2633,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>  			flags |= op->map.is_cpu_addr_mirror ?
>  				VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
>  
> -			vma = new_vma(vm, &op->base.map, op->map.pat_index,
> +			vma = new_vma(vm, &op->base.map, default_attr,
>  				      flags);
>  			if (IS_ERR(vma))
>  				return PTR_ERR(vma);
> @@ -2666,7 +2681,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>  
>  			if (op->base.remap.prev) {
>  				vma = new_vma(vm, op->base.remap.prev,
> -					      old->attr.pat_index, flags);
> +					      old->attr, flags);
>  				if (IS_ERR(vma))
>  					return PTR_ERR(vma);
>  
> @@ -2696,7 +2711,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>  
>  			if (op->base.remap.next) {
>  				vma = new_vma(vm, op->base.remap.next,
> -					      old->attr.pat_index, flags);
> +					      old->attr, flags);
>  				if (IS_ERR(vma))
>  					return PTR_ERR(vma);
>  
> -- 
> 2.34.1
> 


More information about the Intel-xe mailing list