[PATCH v2 7/7] drm/xe/pt: unify xe_pt_svm_pre_commit with userptr

Matthew Auld matthew.auld at intel.com
Fri Apr 4 08:20:38 UTC 2025


On 03/04/2025 22:23, Matthew Brost wrote:
> On Fri, Mar 28, 2025 at 06:10:36PM +0000, Matthew Auld wrote:
>> We now use the same notifier lock for SVM and userptr, with that we can
>> combine xe_pt_userptr_pre_commit and xe_pt_svm_pre_commit.
>>
>> Suggested-by: Matthew Brost <matthew.brost at intel.com>
>> Signed-off-by: Matthew Auld <matthew.auld at intel.com>
>> Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
>> Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
>> ---
>>   drivers/gpu/drm/xe/xe_pt.c | 95 +++++++++++++-------------------------
>>   1 file changed, 33 insertions(+), 62 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
>> index b097c91e2e02..947b82aa19a6 100644
>> --- a/drivers/gpu/drm/xe/xe_pt.c
>> +++ b/drivers/gpu/drm/xe/xe_pt.c
>> @@ -1396,7 +1396,7 @@ static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
>>   	return err;
>>   }
>>   
>> -static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
>> +static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
>>   {
>>   	struct xe_vm *vm = pt_update->vops->vm;
>>   	struct xe_vma_ops *vops = pt_update->vops;
>> @@ -1409,55 +1409,40 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
>>   	if (err)
>>   		return err;
>>   
>> -	down_read(&vm->svm.gpusvm.notifier_lock);
>> +	drm_gpusvm_notifier_lock(&vm->svm.gpusvm);
>>   
>>   	list_for_each_entry(op, &vops->list, link) {
>> -		err = op_check_userptr(vm, op, pt_update_ops);
>> -		if (err) {
>> -			up_read(&vm->svm.gpusvm.notifier_lock);
>> -			break;
>> +		if (pt_update_ops->needs_svm_lock) {
>> +#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
>> +			struct xe_svm_range *range = op->map_range.range;
>> +
>> +			if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
>> +				continue;
>> +
>> +			xe_svm_range_debug(range, "PRE-COMMIT");
>> +
>> +			xe_assert(vm->xe,
>> +				  xe_vma_is_cpu_addr_mirror(op->map_range.vma));
>> +			xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
>> +
>> +			if (!xe_svm_range_pages_valid(range)) {
>> +				xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
>> +				drm_gpusvm_notifier_unlock(&vm->svm.gpusvm);
>> +				return -EAGAIN;
>> +			}
>> +#endif
>> +		} else {
>> +			err = op_check_userptr(vm, op, pt_update_ops);
> 
> I think it would be better to combine needs_svm_lock into a single
> variable, rename op_check_userptr to op_check_svm_userptr, and then add
> the code in the above if statement to op_check_svm_userptr. Yes, the way
> have it coded now works but when Himal's code for SVM prefetch lands a
> VM operation list could have both userptr binds and binds SVM within
> single list. Changing it per my suggestion should make that work too.

Will try this. Thanks for the comments.

> 
> Matt
> 
>> +			if (err) {
>> +				drm_gpusvm_notifier_unlock(&vm->svm.gpusvm);
>> +				break;
>> +			}
>>   		}
>>   	}
>>   
>>   	return err;
>>   }
>>   
>> -#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
>> -static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
>> -{
>> -	struct xe_vm *vm = pt_update->vops->vm;
>> -	struct xe_vma_ops *vops = pt_update->vops;
>> -	struct xe_vma_op *op;
>> -	int err;
>> -
>> -	err = xe_pt_pre_commit(pt_update);
>> -	if (err)
>> -		return err;
>> -
>> -	xe_svm_notifier_lock(vm);
>> -
>> -	list_for_each_entry(op, &vops->list, link) {
>> -		struct xe_svm_range *range = op->map_range.range;
>> -
>> -		if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
>> -			continue;
>> -
>> -		xe_svm_range_debug(range, "PRE-COMMIT");
>> -
>> -		xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
>> -		xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
>> -
>> -		if (!xe_svm_range_pages_valid(range)) {
>> -			xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
>> -			xe_svm_notifier_unlock(vm);
>> -			return -EAGAIN;
>> -		}
>> -	}
>> -
>> -	return 0;
>> -}
>> -#endif
>> -
>>   struct invalidation_fence {
>>   	struct xe_gt_tlb_invalidation_fence base;
>>   	struct xe_gt *gt;
>> @@ -2255,22 +2240,12 @@ static const struct xe_migrate_pt_update_ops migrate_ops = {
>>   	.pre_commit = xe_pt_pre_commit,
>>   };
>>   
>> -static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
>> +static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = {
>>   	.populate = xe_vm_populate_pgtable,
>>   	.clear = xe_migrate_clear_pgtable_callback,
>> -	.pre_commit = xe_pt_userptr_pre_commit,
>> +	.pre_commit = xe_pt_svm_userptr_pre_commit,
>>   };
>>   
>> -#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
>> -static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
>> -	.populate = xe_vm_populate_pgtable,
>> -	.clear = xe_migrate_clear_pgtable_callback,
>> -	.pre_commit = xe_pt_svm_pre_commit,
>> -};
>> -#else
>> -static const struct xe_migrate_pt_update_ops svm_migrate_ops;
>> -#endif
>> -
>>   /**
>>    * xe_pt_update_ops_run() - Run PT update operations
>>    * @tile: Tile of PT update operations
>> @@ -2296,10 +2271,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
>>   	struct xe_vma_op *op;
>>   	int err = 0, i;
>>   	struct xe_migrate_pt_update update = {
>> -		.ops = pt_update_ops->needs_svm_lock ?
>> -			&svm_migrate_ops :
>> -			pt_update_ops->needs_userptr_lock ?
>> -			&userptr_migrate_ops :
>> +		.ops = pt_update_ops->needs_svm_lock || pt_update_ops->needs_userptr_lock ?
>> +			&svm_userptr_migrate_ops :
>>   			&migrate_ops,
>>   		.vops = vops,
>>   		.tile_id = tile->id,
>> @@ -2419,10 +2392,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
>>   				  &ifence->base.base, &mfence->base.base);
>>   	}
>>   
>> -	if (pt_update_ops->needs_svm_lock)
>> -		xe_svm_notifier_unlock(vm);
>> -	if (pt_update_ops->needs_userptr_lock)
>> -		up_read(&vm->svm.gpusvm.notifier_lock);
>> +	if (pt_update_ops->needs_svm_lock || pt_update_ops->needs_userptr_lock)
>> +		drm_gpusvm_notifier_unlock(&vm->svm.gpusvm);
>>   
>>   	return fence;
>>   
>> -- 
>> 2.48.1
>>



More information about the dri-devel mailing list