[PATCH v3 07/19] drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation()
Matthew Brost
matthew.brost at intel.com
Wed May 28 23:12:33 UTC 2025
On Tue, May 27, 2025 at 10:09:51PM +0530, Himal Prasad Ghimiray wrote:
> Introduce xe_vm_range_tilemask_tlb_invalidation(), which issues a TLB
> invalidation for a specified address range across GTs indicated by a
> tilemask.
>
> Suggested-by: Matthew Brost <matthew.brost at intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
A couple nits, but feel free to post a follow up as independent patch to
merge ahead of madvise.
> ---
> drivers/gpu/drm/xe/xe_svm.c | 43 +--------------
> drivers/gpu/drm/xe/xe_vm.c | 103 ++++++++++++++++++++++++------------
> drivers/gpu/drm/xe/xe_vm.h | 3 ++
> 3 files changed, 75 insertions(+), 74 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 871ac81bb04a..59e73187114d 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -167,14 +167,9 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
> {
> struct xe_vm *vm = gpusvm_to_vm(gpusvm);
> struct xe_device *xe = vm->xe;
> - struct xe_tile *tile;
> struct drm_gpusvm_range *r, *first;
> - struct xe_gt_tlb_invalidation_fence
> - fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
> u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
> u8 tile_mask = 0;
> - u8 id;
> - u32 fence_id = 0;
> long err;
>
> xe_svm_assert_in_notifier(vm);
> @@ -220,42 +215,8 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
>
> xe_device_wmb(xe);
>
> - for_each_tile(tile, xe, id) {
> - if (tile_mask & BIT(id)) {
> - int err;
> -
> - xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
> - &fence[fence_id], true);
> -
> - err = xe_gt_tlb_invalidation_range(tile->primary_gt,
> - &fence[fence_id],
> - adj_start,
> - adj_end,
> - vm->usm.asid);
> - if (WARN_ON_ONCE(err < 0))
> - goto wait;
> - ++fence_id;
> -
> - if (!tile->media_gt)
> - continue;
> -
> - xe_gt_tlb_invalidation_fence_init(tile->media_gt,
> - &fence[fence_id], true);
> -
> - err = xe_gt_tlb_invalidation_range(tile->media_gt,
> - &fence[fence_id],
> - adj_start,
> - adj_end,
> - vm->usm.asid);
> - if (WARN_ON_ONCE(err < 0))
> - goto wait;
> - ++fence_id;
> - }
> - }
> -
> -wait:
> - for (id = 0; id < fence_id; ++id)
> - xe_gt_tlb_invalidation_fence_wait(&fence[id]);
> + err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
> + XE_WARN_ON(err);
WARN_ON_ONCE
>
> range_notifier_event_end:
> r = first;
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index de6ecff237a6..d60b711e97e9 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -3851,6 +3851,68 @@ void xe_vm_unlock(struct xe_vm *vm)
> dma_resv_unlock(xe_vm_resv(vm));
> }
>
> +/**
> + * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an
> + * address range
> + * @vm: The VM
> + * @start: start address
> + * @end: end address
> + * @tile_mask: mask for which gt's issue tlb invalidation
> + *
> + * Issue a range based TLB invalidation for gt's in tilemask
> + *
> + * Returns 0 for success, negative error code otherwise.
> + */
> +int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
> + u64 end, u8 tile_mask)
> +{
> + struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
> + struct xe_tile *tile;
> + u32 fence_id = 0;
> + u8 id;
> + int err;
> +
> + if (!tile_mask)
> + return 0;
> +
> + for_each_tile(tile, vm->xe, id) {
> + if (tile_mask & BIT(id)) {
> + xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
> + &fence[fence_id], true);
> +
> + err = xe_gt_tlb_invalidation_range(tile->primary_gt,
> + &fence[fence_id],
> + start,
> + end,
> + vm->usm.asid);
> + if (WARN_ON_ONCE(err < 0))
> + goto wait;
Let's just have the WARN_ON_ONCE in the SVM code at the caller - that is
the place where we can't really fail and warrents the warn.
> + ++fence_id;
> +
> + if (!tile->media_gt)
> + continue;
> +
> + xe_gt_tlb_invalidation_fence_init(tile->media_gt,
> + &fence[fence_id], true);
> +
> + err = xe_gt_tlb_invalidation_range(tile->media_gt,
> + &fence[fence_id],
> + start,
> + end,
> + vm->usm.asid);
> + if (WARN_ON_ONCE(err < 0))
> + goto wait;
> + ++fence_id;
> + }
> + }
> +
> +wait:
> + for (id = 0; id < fence_id; ++id)
> + xe_gt_tlb_invalidation_fence_wait(&fence[id]);
> +
> + return err;
> +}
> +
> /**
> * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
> * @vma: VMA to invalidate
> @@ -3865,11 +3927,9 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
> {
> struct xe_device *xe = xe_vma_vm(vma)->xe;
> struct xe_tile *tile;
> - struct xe_gt_tlb_invalidation_fence
> - fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
> - u8 id;
> - u32 fence_id = 0;
> + u8 tile_mask = 0;
> int ret = 0;
> + u8 id;
>
> xe_assert(xe, !xe_vma_is_null(vma));
> xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
> @@ -3893,37 +3953,14 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
> }
> }
>
> - for_each_tile(tile, xe, id) {
> - if (xe_pt_zap_ptes(tile, vma)) {
> - xe_device_wmb(xe);
> - xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
> - &fence[fence_id],
> - true);
> -
> - ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
> - &fence[fence_id], vma);
You can delete xe_gt_tlb_invalidation_vma now as this was the only
caller.
Matt
> - if (ret)
> - goto wait;
> - ++fence_id;
> + for_each_tile(tile, xe, id)
> + if (xe_pt_zap_ptes(tile, vma))
> + tile_mask |= BIT(id);
>
> - if (!tile->media_gt)
> - continue;
> + xe_device_wmb(xe);
>
> - xe_gt_tlb_invalidation_fence_init(tile->media_gt,
> - &fence[fence_id],
> - true);
> -
> - ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
> - &fence[fence_id], vma);
> - if (ret)
> - goto wait;
> - ++fence_id;
> - }
> - }
> -
> -wait:
> - for (id = 0; id < fence_id; ++id)
> - xe_gt_tlb_invalidation_fence_wait(&fence[id]);
> + ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
> + xe_vma_end(vma), tile_mask);
>
> vma->tile_invalidated = vma->tile_mask;
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index 99e164852f63..1ef98113fa5b 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -228,6 +228,9 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
> struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
> struct xe_svm_range *range);
>
> +int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
> + u64 end, u8 tile_mask);
> +
> int xe_vm_invalidate_vma(struct xe_vma *vma);
>
> int xe_vm_validate_protected(struct xe_vm *vm);
> --
> 2.34.1
>
More information about the Intel-xe
mailing list