[RFC 10/29] drm/xe/svm: Refactor usage of drm_gpusvm* function in xe_svm

Ghimiray, Himal Prasad himal.prasad.ghimiray at intel.com
Mon Apr 7 06:15:04 UTC 2025



On 04-04-2025 02:24, Matthew Brost wrote:
> On Fri, Mar 14, 2025 at 01:32:07PM +0530, Himal Prasad Ghimiray wrote:
>> Define xe_svm_range_find_or_insert function wrapping
>> drm_gpusvm_range_find_or_insert for reusing in prefetch.
>>
>> Define xe_svm_range_get_pages function wrapping
>> drm_gpusvm_range_get_pages for reusing in prefetch.
>>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
>> ---
>>   drivers/gpu/drm/xe/xe_svm.c | 73 +++++++++++++++++++++++++++++++------
>>   drivers/gpu/drm/xe/xe_svm.h | 20 ++++++++++
>>   2 files changed, 81 insertions(+), 12 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
>> index 07511011aba6..5a4cb14d608e 100644
>> --- a/drivers/gpu/drm/xe/xe_svm.c
>> +++ b/drivers/gpu/drm/xe/xe_svm.c
>> @@ -714,7 +714,6 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>>   			IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
>>   	};
>>   	struct xe_svm_range *range;
>> -	struct drm_gpusvm_range *r;
>>   	struct drm_exec exec;
>>   	struct dma_fence *fence;
>>   	ktime_t end = 0;
>> @@ -729,13 +728,11 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>>   	if (err)
>>   		return err;
>>   
>> -	r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr,
>> -					    xe_vma_start(vma), xe_vma_end(vma),
>> -					    &ctx);
>> -	if (IS_ERR(r))
>> -		return PTR_ERR(r);
>> +	range = xe_svm_range_find_or_insert(vm, fault_addr, vma);
>> +
>> +	if (IS_ERR(range))
>> +		return PTR_ERR(range);
>>   
>> -	range = to_xe_range(r);
>>   	if (xe_svm_range_is_valid(range, tile))
>>   		return 0;
>>   
>> @@ -757,13 +754,9 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>>   	}
>>   
>>   	range_debug(range, "GET PAGES");
>> -	err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
>> +	err = xe_svm_range_get_pages(vm, range, &ctx);
>>   	/* Corner where CPU mappings have changed */
>>   	if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
>> -		if (err == -EOPNOTSUPP) {
>> -			range_debug(range, "PAGE FAULT - EVICT PAGES");
>> -			drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
>> -		}
>>   		drm_dbg(&vm->xe->drm,
>>   			"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
>>   			vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
>> @@ -842,6 +835,62 @@ int xe_svm_bo_evict(struct xe_bo *bo)
>>   	return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
>>   }
>>   
>> +/**
>> + * xe_svm_range_find_or_insert- Find or insert GPU SVM range
>> + * @vm: xe_vm pointer
>> + * @addr: address for which range needs to be found/inserted
>> + * @vma:  Pointer to struct xe_vma which mirrors CPU
>> + *
>> + * This function finds or inserts a newly allocated a SVM range based on the
>> + * address.
>> + *
>> + * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
>> + */
>> +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
>> +						 struct xe_vma *vma)
> 
> I think you want a drm_gpusvm_ctx argument here. It is odd to duplicate
> the context locally when the page fault handler has already set one up.

Makes sense. will fix in next version.

> 
> Matt
> 
>> +{
>> +	struct drm_gpusvm_range *r;
>> +
>> +	struct drm_gpusvm_ctx ctx = {
>> +		.read_only = xe_vma_read_only(vma),
>> +		.devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
>> +		.check_pages_threshold = IS_DGFX(vm->xe) &&
>> +					 IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
>> +	};
>> +
>> +	r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
>> +					    xe_vma_start(vma), xe_vma_end(vma), &ctx);
>> +	if (IS_ERR(r))
>> +		return ERR_PTR(PTR_ERR(r));
>> +
>> +	return to_xe_range(r);
>> +}
>> +
>> +/**
>> + * xe_svm_range_get_pages() - Get pages for a SVM range
>> + * @vm: Pointer to the struct xe_vm
>> + * @range: Pointer to the xe SVM range structure
>> + * @ctx: GPU SVM context
>> + *
>> + * This function gets pages for a SVM range and ensures they are mapped for
>> + * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
>> + *
>> + * Return: 0 on success, negative error code on failure.
>> + */
>> +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
>> +			   struct drm_gpusvm_ctx *ctx)
>> +{
>> +	int err = 0;
>> +
>> +	err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
>> +	if (err == -EOPNOTSUPP) {
>> +		range_debug(range, "PAGE FAULT - EVICT PAGES");
>> +		drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
>> +	}
>> +
>> +	return err;
>> +}
>> +
>>   #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
>>   static struct drm_pagemap_device_addr
>>   xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
>> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
>> index c8add37614ec..6bb358bf62ad 100644
>> --- a/drivers/gpu/drm/xe/xe_svm.h
>> +++ b/drivers/gpu/drm/xe/xe_svm.h
>> @@ -76,6 +76,12 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
>>   int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
>>   		      struct xe_svm_range *range,
>>   		      const struct drm_gpusvm_ctx *ctx);
>> +
>> +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
>> +						 struct xe_vma *vma);
>> +
>> +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
>> +			   struct drm_gpusvm_ctx *ctx);
>>   #else
>>   static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
>>   {
>> @@ -137,6 +143,20 @@ int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
>>   	return 0;
>>   }
>>   
>> +static inline
>> +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
>> +						 struct xe_vma *vma)
>> +{
>> +	return ERR_PTR(-EINVAL);
>> +}
>> +
>> +static inline
>> +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
>> +			   struct drm_gpusvm_ctx *ctx)
>> +{
>> +	return -EINVAL;
>> +}
>> +
>>   #endif
>>   
>>   /**
>> -- 
>> 2.34.1
>>



More information about the Intel-xe mailing list