[PATCH v2 20/32] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter

Ghimiray, Himal Prasad himal.prasad.ghimiray at intel.com
Tue May 20 09:28:46 UTC 2025



On 15-05-2025 00:10, Matthew Brost wrote:
> On Mon, May 12, 2025 at 07:36:56PM -0700, Matthew Brost wrote:
>> On Mon, Apr 07, 2025 at 03:47:07PM +0530, Himal Prasad Ghimiray wrote:
>>> This change simplifies the logic by ensuring that remapped previous or
>>> next VMAs are created with the same memory attributes as the original VMA.
>>> By passing struct xe_vma_mem_attr as a parameter, we maintain consistency
>>> in memory attributes.
>>>
>>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
>>> ---
>>>   drivers/gpu/drm/xe/xe_vm.c | 37 ++++++++++++++++++++++++++-----------
>>>   1 file changed, 26 insertions(+), 11 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>>> index 59e2a951db25..6e5ba58d475e 100644
>>> --- a/drivers/gpu/drm/xe/xe_vm.c
>>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>>> @@ -2421,8 +2421,16 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
>>>   
>>>   ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
>>>   
>>> +static void cp_mem_attr(struct xe_vma_mem_attr *dst, struct xe_vma_mem_attr *src)
>>
>> Drive by commment - not need.
>>
>> memcopy(dst, src, sizeof(*src));
>>
> 
> Actually you can just do:
> 
> *dst = *src;

Noted

> 
>> Matt
>>
>>> +{
>>> +	dst->preferred_loc.migration_policy = src->preferred_loc.migration_policy;
>>> +	dst->preferred_loc.devmem_fd =  src->preferred_loc.devmem_fd;
>>> +	dst->atomic_access = src->atomic_access;
>>> +	dst->pat_index = src->pat_index;
>>> +}
>>> +
>>>   static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
>>> -			      u16 pat_index, unsigned int flags)
>>> +			      struct xe_vma_mem_attr attr, unsigned int flags)
>>>   {
>>>   	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
>>>   	struct drm_exec exec;
>>> @@ -2451,7 +2459,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
>>>   	}
>>>   	vma = xe_vma_create(vm, bo, op->gem.offset,
>>>   			    op->va.addr, op->va.addr +
>>> -			    op->va.range - 1, pat_index, flags);
>>> +			    op->va.range - 1, attr.pat_index, flags);
>>>   	if (IS_ERR(vma))
>>>   		goto err_unlock;
>>>   
>>> @@ -2468,14 +2476,10 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
>>>   		prep_vma_destroy(vm, vma, false);
>>>   		xe_vma_destroy_unlocked(vma);
>>>   		vma = ERR_PTR(err);
>>> +	} else {
>>> +		cp_mem_attr(&vma->attr, &attr);
>>>   	}
>>>   
>>> -	/*TODO: assign devmem_fd of local vram once multi device
>>> -	 * support is added.
>>> -	 */
>>> -	vma->attr.preferred_loc.devmem_fd = 1;
>>> -	vma->attr.atomic_access = DRM_XE_VMA_ATOMIC_UNDEFINED;
>>> -
>>>   	return vma;
>>>   }
>>>   
>>> @@ -2600,6 +2604,17 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>>>   		switch (op->base.op) {
>>>   		case DRM_GPUVA_OP_MAP:
>>>   		{
>>> +			struct xe_vma_mem_attr default_attr = {
>>> +				.preferred_loc = {
>>> +						  /*TODO: assign devmem_fd of local vram
>>> +						   * once multi device support is added.
>>> +						   */
>>> +						   .devmem_fd = IS_DGFX(vm->xe) ? 1 : 0,
>>> +						   .migration_policy = 1,	},
> 
> Where are a couple of magic '1' which I suggested to avoid patch 18.
> Same question as patch 18 on the usage of these.
> 
> In general I think this patch makes sense if the to previous patches
> land.
> 
> Matt
> 
>>> +				.atomic_access = DRM_XE_VMA_ATOMIC_UNDEFINED,
>>> +				.pat_index = op->map.pat_index
>>> +			};
>>> +
>>>   			flags |= op->map.read_only ?
>>>   				VMA_CREATE_FLAG_READ_ONLY : 0;
>>>   			flags |= op->map.is_null ?
>>> @@ -2609,7 +2624,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>>>   			flags |= op->map.is_cpu_addr_mirror ?
>>>   				VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
>>>   
>>> -			vma = new_vma(vm, &op->base.map, op->map.pat_index,
>>> +			vma = new_vma(vm, &op->base.map, default_attr,
>>>   				      flags);
>>>   			if (IS_ERR(vma))
>>>   				return PTR_ERR(vma);
>>> @@ -2657,7 +2672,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>>>   
>>>   			if (op->base.remap.prev) {
>>>   				vma = new_vma(vm, op->base.remap.prev,
>>> -					      old->attr.pat_index, flags);
>>> +					      old->attr, flags);
>>>   				if (IS_ERR(vma))
>>>   					return PTR_ERR(vma);
>>>   
>>> @@ -2687,7 +2702,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>>>   
>>>   			if (op->base.remap.next) {
>>>   				vma = new_vma(vm, op->base.remap.next,
>>> -					      old->attr.pat_index, flags);
>>> +					      old->attr, flags);
>>>   				if (IS_ERR(vma))
>>>   					return PTR_ERR(vma);
>>>   
>>> -- 
>>> 2.34.1
>>>



More information about the Intel-xe mailing list