[PATCH v2 10/32] drm/xe/svm: Refactor usage of drm_gpusvm* function in xe_svm
Ghimiray, Himal Prasad
himal.prasad.ghimiray at intel.com
Mon Apr 21 04:30:53 UTC 2025
On 17-04-2025 08:27, Matthew Brost wrote:
> On Mon, Apr 07, 2025 at 03:46:57PM +0530, Himal Prasad Ghimiray wrote:
>> Define xe_svm_range_find_or_insert function wrapping
>> drm_gpusvm_range_find_or_insert for reusing in prefetch.
>>
>> Define xe_svm_range_get_pages function wrapping
>> drm_gpusvm_range_get_pages for reusing in prefetch.
>>
>> -v2 pass pagefault defined drm_gpu_svm context as parameter
>> in xe_svm_range_find_or_insert(Matthew Brost)
>>
>> Cc: Matthew Brost <matthew.brost at intel.com>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_svm.c | 67 ++++++++++++++++++++++++++++++-------
>> drivers/gpu/drm/xe/xe_svm.h | 20 +++++++++++
>> 2 files changed, 75 insertions(+), 12 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
>> index 6648b4da0bca..8cd35553a927 100644
>> --- a/drivers/gpu/drm/xe/xe_svm.c
>> +++ b/drivers/gpu/drm/xe/xe_svm.c
>> @@ -735,7 +735,6 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>> IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
>> };
>> struct xe_svm_range *range;
>> - struct drm_gpusvm_range *r;
>> struct drm_exec exec;
>> struct dma_fence *fence;
>> struct xe_tile *tile = gt_to_tile(gt);
>> @@ -753,13 +752,11 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>> if (err)
>> return err;
>>
>> - r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr,
>> - xe_vma_start(vma), xe_vma_end(vma),
>> - &ctx);
>> - if (IS_ERR(r))
>> - return PTR_ERR(r);
>> + range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
>> +
>> + if (IS_ERR(range))
>> + return PTR_ERR(range);
>>
>> - range = to_xe_range(r);
>> if (xe_svm_range_is_valid(range, tile))
>> return 0;
>>
>> @@ -781,13 +778,9 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>> }
>>
>> range_debug(range, "GET PAGES");
>> - err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
>> + err = xe_svm_range_get_pages(vm, range, &ctx);
>> /* Corner where CPU mappings have changed */
>> if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
>> - if (err == -EOPNOTSUPP) {
>> - range_debug(range, "PAGE FAULT - EVICT PAGES");
>> - drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
>> - }
>> drm_dbg(&vm->xe->drm,
>> "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
>> vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
>> @@ -866,6 +859,56 @@ int xe_svm_bo_evict(struct xe_bo *bo)
>> return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
>> }
>>
>> +/**
>> + * xe_svm_range_find_or_insert- Find or insert GPU SVM range
>> + * @vm: xe_vm pointer
>> + * @addr: address for which range needs to be found/inserted
>> + * @vma: Pointer to struct xe_vma which mirrors CPU
>> + * @ctx: GPU SVM context
>> + *
>> + * This function finds or inserts a newly allocated a SVM range based on the
>> + * address.
>> + *
>> + * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
>> + */
>> +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
>> + struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
>> +{
>> + struct drm_gpusvm_range *r;
>> +
>> + r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
>> + xe_vma_start(vma), xe_vma_end(vma), ctx);
>> + if (IS_ERR(r))
>> + return ERR_PTR(PTR_ERR(r));
>> +
>> + return to_xe_range(r);
>> +}
>> +
>> +/**
>> + * xe_svm_range_get_pages() - Get pages for a SVM range
>> + * @vm: Pointer to the struct xe_vm
>> + * @range: Pointer to the xe SVM range structure
>> + * @ctx: GPU SVM context
>> + *
>> + * This function gets pages for a SVM range and ensures they are mapped for
>> + * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
>> + *
>> + * Return: 0 on success, negative error code on failure.
>> + */
>> +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
>> + struct drm_gpusvm_ctx *ctx)
>> +{
>> + int err = 0;
>> +
>> + err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
>> + if (err == -EOPNOTSUPP) {
>> + range_debug(range, "PAGE FAULT - EVICT PAGES");
>> + drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
>> + }
>> +
>> + return err;
>> +}
>> +
>> #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
>>
>> static struct drm_pagemap_device_addr
>> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
>> index 1ec90d9bc749..9c4c3aeacc6c 100644
>> --- a/drivers/gpu/drm/xe/xe_svm.h
>> +++ b/drivers/gpu/drm/xe/xe_svm.h
>> @@ -89,6 +89,12 @@ int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
>> }
>> #endif
>>
>> +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
>> + struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
>
> One nit, check on the alignment here, checkpatch should complain if this
> is off, hard to tell if this wrong from the patch.
Checkpatch confirms alignment is ok here.
>
> But patch LGTM:
> Reviewed-by: Matthew Brost <matthew.brost at intel.com>
Thanks for the review.
>
>> +
>> +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
>> + struct drm_gpusvm_ctx *ctx);
>> +
>> /**
>> * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
>> * @range: SVM range
>> @@ -241,6 +247,20 @@ int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
>> return -EOPNOTSUPP;
>> }
>>
>> +static inline
>> +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
>> + struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
>> +{
>> + return ERR_PTR(-EINVAL);
>> +}
>> +
>> +static inline
>> +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
>> + struct drm_gpusvm_ctx *ctx)
>> +{
>> + return -EINVAL;
>> +}
>> +
>> static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
>> {
>> return NULL;
>> --
>> 2.34.1
>>
More information about the Intel-xe
mailing list