[PATCH v2 32/32] drm/xe/bo: Update atomic_access attribute on madvise

Ghimiray, Himal Prasad himal.prasad.ghimiray at intel.com
Wed May 21 09:13:50 UTC 2025



On 15-05-2025 04:01, Matthew Brost wrote:
> On Mon, Apr 07, 2025 at 03:47:19PM +0530, Himal Prasad Ghimiray wrote:
>> Update the bo_atomic_access based on user-provided input and determine
>> the migration to smem during a CPU fault
>>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
>> ---
>>   drivers/gpu/drm/xe/xe_bo.c         | 21 ++++++++++++++---
>>   drivers/gpu/drm/xe/xe_vm.c         | 11 +++++++--
>>   drivers/gpu/drm/xe/xe_vm_madvise.c | 38 +++++++++++++++++++++++++++---
>>   3 files changed, 62 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
>> index c337790c81ae..fe78f6da7054 100644
>> --- a/drivers/gpu/drm/xe/xe_bo.c
>> +++ b/drivers/gpu/drm/xe/xe_bo.c
>> @@ -1573,6 +1573,12 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
>>   	}
>>   }
>>   
>> +static bool should_migrate_to_smem(struct xe_bo *bo)
>> +{
>> +	return bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_GLOBAL ||
>> +	       bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU;
>> +}
>> +
>>   static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
>>   {
>>   	struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
>> @@ -1581,7 +1587,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
>>   	struct xe_bo *bo = ttm_to_xe_bo(tbo);
>>   	bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
>>   	vm_fault_t ret;
>> -	int idx;
>> +	int idx, r = 0;
>>   
>>   	if (needs_rpm)
>>   		xe_pm_runtime_get(xe);
>> @@ -1593,8 +1599,17 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
>>   	if (drm_dev_enter(ddev, &idx)) {
>>   		trace_xe_bo_cpu_fault(bo);
>>   
>> -		ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
>> -					       TTM_BO_VM_NUM_PREFAULT);
>> +		if (should_migrate_to_smem(bo)) {
>> +			r = xe_bo_migrate(bo, XE_PL_TT);
>> +			if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
>> +				ret = VM_FAULT_NOPAGE;
>> +			else if (r)
>> +				ret = VM_FAULT_SIGBUS;
>> +		}
>> +		if (!ret)
>> +			ret = ttm_bo_vm_fault_reserved(vmf,
>> +						       vmf->vma->vm_page_prot,
>> +						       TTM_BO_VM_NUM_PREFAULT);
>>   		drm_dev_exit(idx);
>>   	} else {
>>   		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
>> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>> index f1d4daf90efe..189e97113dbe 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.c
>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>> @@ -3104,9 +3104,16 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
>>   		err = vma_lock_and_validate(exec,
>>   					    gpuva_to_vma(op->base.prefetch.va),
>>   					    false);
>> -		if (!err && !xe_vma_has_no_bo(vma))
>> -			err = xe_bo_migrate(xe_vma_bo(vma),
>> +		if (!err && !xe_vma_has_no_bo(vma)) {
>> +			struct xe_bo *bo = xe_vma_bo(vma);
>> +
>> +			if (region == 0 && !vm->xe->info.has_device_atomics_on_smem &&
>> +			    bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE)
>> +				region = 1;
>> +
> 
> So here we disallowing migration to system if atomics don't work there?
> Shouldn't we just let the GPU fault and fixup on fault?

this is in prefetch, so avoiding GPU fault.
  >
>> +			err = xe_bo_migrate(bo,
>>   					    region_to_mem_type[region]);
>> +		}
>>   		break;
>>   	}
>>   	default:
>> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
>> index f4e0545937b0..bbae2faee603 100644
>> --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
>> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
>> @@ -87,16 +87,48 @@ static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
>>   			  struct xe_vma **vmas, int num_vmas,
>>   			  struct drm_xe_madvise_ops ops)
>>   {
>> -	int i;
>> +	struct xe_bo *bo;
>> +	int err, i;
>>   
>>   	xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_ATOMIC);
>>   	xe_assert(vm->xe, ops.atomic.val > DRM_XE_VMA_ATOMIC_UNDEFINED &&
>>   		  ops.atomic.val <= DRM_XE_VMA_ATOMIC_CPU);
>>   	vm_dbg(&xe->drm, "attr_value = %d", ops.atomic.val);
>>   
>> -	for (i = 0; i < num_vmas; i++)
>> +	for (i = 0; i < num_vmas; i++) {
>>   		vmas[i]->attr.atomic_access = ops.atomic.val;
>> -	/*TODO: handle bo backed vmas */
>> +
>> +		bo = xe_vma_bo(vmas[i]);
>> +		if (!bo)
>> +			continue;
>> +
>> +		if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_CPU &&
>> +				 !(bo->flags & XE_BO_FLAG_SYSTEM)))
>> +			return -EINVAL;
>> +
>> +		if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_DEVICE &&
>> +				 !(bo->flags & XE_BO_FLAG_VRAM0) &&
>> +				     !(bo->flags & XE_BO_FLAG_VRAM1)))
>> +			return -EINVAL;
> 
> Don't device atomics work if xe->info.has_device_atomics_on_smem is set?

Need to fix this. Thanks

> 
>> +
>> +		if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_GLOBAL &&
>> +				 (!(bo->flags & XE_BO_FLAG_SYSTEM) ||
>> +				      (!(bo->flags & XE_BO_FLAG_VRAM0) &&
>> +				      !(bo->flags & XE_BO_FLAG_VRAM1)))))
>> +			return -EINVAL;
> 
> One concern is all of the above are platform specific checks - e.g. if
> we had a device with CXL atomics just work everywhere. I'd at least add
> a comment indicating these are platform specific checks.

Agreed.

> 
>> +
>> +		err = xe_bo_lock(bo, true);
>> +		if (err)
>> +			return err;
>> +		bo->attr.atomic_access = ops.atomic.val;
>> +
>> +		/* Invalidate cpu page table, so bo can migrate to smem in next access */
>> +		if (bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU ||
>> +		    bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_GLOBAL)
>> +			ttm_bo_unmap_virtual(&bo->ttm);
> 
> If alreday in SMEM, you don't need to unmap do you?

correct not required if bo is already in smem.

> 
> Matt
> 
>> +
>> +		xe_bo_unlock(bo);
>> +	}
>>   	return 0;
>>   }
>>   
>> -- 
>> 2.34.1
>>



More information about the Intel-xe mailing list