[PATCH v3 1/2] drm/xe: Add xe_vm_has_valid_gpu_mapping helper

Ghimiray, Himal Prasad himal.prasad.ghimiray at intel.com
Wed Jun 18 02:43:12 UTC 2025



On 18-06-2025 02:20, Matthew Brost wrote:
> On Tue, Jun 17, 2025 at 11:11:40PM +0530, Ghimiray, Himal Prasad wrote:
>>
>>
>> On 16-06-2025 12:00, Matthew Brost wrote:
>>> Rather than having multiple READ_ONCE of the tile_* fields and comments
>>> in code, use helper with kernel doc for single access point and clear
>>> rules.
>>>
>>> v3:
>>>    - s/xe_vm_has_valid_gpu_pages/xe_vm_has_valid_gpu_mapping
>>>
>>> Suggested-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
>>> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
>>> ---
>>>    drivers/gpu/drm/xe/xe_gt_pagefault.c |  9 ++-------
>>>    drivers/gpu/drm/xe/xe_pt.c           |  6 +++---
>>>    drivers/gpu/drm/xe/xe_svm.c          | 16 +++++++---------
>>>    drivers/gpu/drm/xe/xe_vm.c           |  2 +-
>>>    drivers/gpu/drm/xe/xe_vm.h           | 19 +++++++++++++++++++
>>>    5 files changed, 32 insertions(+), 20 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
>>> index e2d975b2fddb..3522865c67c9 100644
>>> --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
>>> +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
>>> @@ -69,15 +69,10 @@ static bool access_is_atomic(enum access_type access_type)
>>>    static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
>>>    {
>>> -	/*
>>> -	 * Advisory only check whether the VMA currently has a valid mapping,
>>> -	 * READ_ONCE pairs with WRITE_ONCE in xe_pt.c
>>> -	 */
>>> -	return BIT(tile->id) & READ_ONCE(vma->tile_present) &&
>>> -		!(BIT(tile->id) & READ_ONCE(vma->tile_invalidated));
>>> +	return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present,
>>> +					   vma->tile_invalidated);
>>>    }
>>> -
>>>    static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
>>>    		       bool atomic, unsigned int id)
>>>    {
>>> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
>>> index f39d5cc9f411..9c30111e8786 100644
>>> --- a/drivers/gpu/drm/xe/xe_pt.c
>>> +++ b/drivers/gpu/drm/xe/xe_pt.c
>>> @@ -2196,7 +2196,7 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
>>>    					   DMA_RESV_USAGE_KERNEL :
>>>    					   DMA_RESV_USAGE_BOOKKEEP);
>>>    	}
>>> -	/* All WRITE_ONCE pair with READ_ONCE in xe_gt_pagefault.c */
>>> +	/* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
>>>    	WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id));
>>>    	if (invalidate_on_bind)
>>>    		WRITE_ONCE(vma->tile_invalidated,
>>> @@ -2255,7 +2255,7 @@ static void range_present_and_invalidated_tile(struct xe_vm *vm,
>>>    					       struct xe_svm_range *range,
>>>    					       u8 tile_id)
>>>    {
>>> -	/* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
>>> +	/* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
>>>    	lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
>>> @@ -2324,7 +2324,7 @@ static void op_commit(struct xe_vm *vm,
>>>    	}
>>>    	case DRM_GPUVA_OP_DRIVER:
>>>    	{
>>> -		/* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
>>> +		/* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
>>>    		if (op->subop == XE_VMA_SUBOP_MAP_RANGE)
>>>    			range_present_and_invalidated_tile(vm, op->map_range.range, tile->id);
>>>    		else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
>>> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
>>> index 2fbbd6a604ea..26418e9bdff0 100644
>>> --- a/drivers/gpu/drm/xe/xe_svm.c
>>> +++ b/drivers/gpu/drm/xe/xe_svm.c
>>> @@ -141,7 +141,10 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
>>>    	for_each_tile(tile, xe, id)
>>>    		if (xe_pt_zap_ptes_range(tile, vm, range)) {
>>>    			tile_mask |= BIT(id);
>>> -			/* Pairs with READ_ONCE in xe_svm_range_is_valid */
>>> +			/*
>>> +			 * WRITE_ONCE pairs with READ_ONCE in
>>> +			 * xe_vm_has_valid_gpu_mapping()
>>> +			 */
>>>    			WRITE_ONCE(range->tile_invalidated,
>>>    				   range->tile_invalidated | BIT(id));
>>>    		}
>>> @@ -605,14 +608,9 @@ static bool xe_svm_range_is_valid(struct xe_svm_range *range,
>>>    				  struct xe_tile *tile,
>>>    				  bool devmem_only)
>>>    {
>>> -	/*
>>> -	 * Advisory only check whether the range currently has a valid mapping,
>>> -	 * READ_ONCE pairs with WRITE_ONCE in xe_pt.c,
>>> -	 * xe_svm_range_notifier_event_begin
>>> -	 */
>>> -	return ((READ_ONCE(range->tile_present) &
>>> -		 ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) &&
>>> -		(!devmem_only || xe_svm_range_in_vram(range));
>>> +	return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
>>> +					    range->tile_invalidated) &&
>>> +		(!devmem_only || xe_svm_range_in_vram(range)));
>>>    }
>>>    /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
>>> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>>> index 6ef8c4dab647..04d1a43b81e3 100644
>>> --- a/drivers/gpu/drm/xe/xe_vm.c
>>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>>> @@ -3961,7 +3961,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
>>>    	ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
>>>    						    xe_vma_end(vma), tile_mask);
>>> -	/* WRITE_ONCE pair with READ_ONCE in xe_gt_pagefault.c */
>>> +	/* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
>>>    	WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
>>>    	return ret;
>>> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
>>> index acd3fd6c605b..cc332c4ebdf2 100644
>>> --- a/drivers/gpu/drm/xe/xe_vm.h
>>> +++ b/drivers/gpu/drm/xe/xe_vm.h
>>> @@ -375,6 +375,25 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm)
>>>    	return false;
>>>    }
>>> +/**
>>> + * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
>>
>> Nit the helper is no more in use for VMA in Patch 2.
>>
> 
> vma_is_valid in patch uses the helper though.

Just to clarify — the comment I made in patch 1 was actually intended 
for patch 2. The nit was regarding the commit description of patch 2, 
not the code itself. Ignore the comment here.

> 
> Matt
> 
>> Rest all looks good to me.
>> Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
>>
>>> + * a valid GPU mapping
>>> + * @tile: The tile which the GPU mapping belongs to
>>> + * @tile_present: Tile present mask
>>> + * @tile_invalidated: Tile invalidated mask
>>> + *
>>> + * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
>>> + * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
>>> + * without the notifier lock in userptr or SVM cases, and not reliable without
>>> + * the BO dma-resv lock in the BO case. As such, they should only be used in
>>> + * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
>>> + * invalidation) where it is harmless.
>>> + *
>>> + * Return: True is there are valid GPU pages, False otherwise
>>> + */
>>> +#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated)	\
>>> +	((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT(tile->id))
>>> +
>>>    #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
>>>    void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
>>>    #else
>>
>>



More information about the Intel-xe mailing list