[PATCH v5 18/23] drm/xe/madvise: Skip vma invalidation if mem attr are unchanged
Matthew Brost
matthew.brost at intel.com
Tue Jul 29 04:19:48 UTC 2025
On Tue, Jul 22, 2025 at 07:05:21PM +0530, Himal Prasad Ghimiray wrote:
> If a VMA within the madvise input range already has the same memory
> attribute as the one requested by the user, skip PTE zapping for that
> VMA to avoid unnecessary invalidation.
>
> v2 (Matthew Brost)
> - fix skip_invalidation for new attributes
> - s/u32/bool
> - Remove unnecessary assignment for kzalloc'ed
>
> Suggested-by: Matthew Brost <matthew.brost at intel.com>
Reviewed-by: Matthew Brost <matthew.brost at intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> ---
> drivers/gpu/drm/xe/xe_vm_madvise.c | 52 +++++++++++++++++++++---------
> drivers/gpu/drm/xe/xe_vm_types.h | 6 ++++
> 2 files changed, 42 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> index 727833780b4b..fbb6aa8a7a5e 100644
> --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> @@ -84,13 +84,19 @@ static void madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
> xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC);
>
> for (i = 0; i < num_vmas; i++) {
> - vmas[i]->attr.preferred_loc.devmem_fd = op->preferred_mem_loc.devmem_fd;
> -
> - /* Till multi-device support is not added migration_policy
> - * is of no use and can be ignored.
> - */
> - vmas[i]->attr.preferred_loc.migration_policy =
> + if (vmas[i]->attr.preferred_loc.devmem_fd == op->preferred_mem_loc.devmem_fd &&
> + vmas[i]->attr.preferred_loc.migration_policy ==
> + op->preferred_mem_loc.migration_policy) {
> + vmas[i]->skip_invalidation = true;
> + } else {
> + vmas[i]->skip_invalidation = false;
> + vmas[i]->attr.preferred_loc.devmem_fd = op->preferred_mem_loc.devmem_fd;
> + /* Till multi-device support is not added migration_policy
> + * is of no use and can be ignored.
> + */
> + vmas[i]->attr.preferred_loc.migration_policy =
> op->preferred_mem_loc.migration_policy;
> + }
> }
> }
>
> @@ -105,7 +111,12 @@ static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> xe_assert(vm->xe, op->atomic.val <= DRM_XE_ATOMIC_CPU);
>
> for (i = 0; i < num_vmas; i++) {
> - vmas[i]->attr.atomic_access = op->atomic.val;
> + if (vmas[i]->attr.atomic_access == op->atomic.val) {
> + vmas[i]->skip_invalidation = true;
> + } else {
> + vmas[i]->skip_invalidation = false;
> + vmas[i]->attr.atomic_access = op->atomic.val;
> + }
>
> bo = xe_vma_bo(vmas[i]);
> if (!bo)
> @@ -130,9 +141,14 @@ static void madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
>
> xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PAT);
>
> - for (i = 0; i < num_vmas; i++)
> - vmas[i]->attr.pat_index = op->pat_index.val;
> -
> + for (i = 0; i < num_vmas; i++) {
> + if (vmas[i]->attr.pat_index == op->pat_index.val) {
> + vmas[i]->skip_invalidation = true;
> + } else {
> + vmas[i]->skip_invalidation = false;
> + vmas[i]->attr.pat_index = op->pat_index.val;
> + }
> + }
> }
>
> typedef void (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
> @@ -158,17 +174,20 @@ static u8 xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end)
> false, MAX_SCHEDULE_TIMEOUT) <= 0)
> XE_WARN_ON(1);
>
> - tile_mask = xe_svm_ranges_zap_ptes_in_range(vm, start, end);
> -
> drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
> struct xe_vma *vma = gpuva_to_vma(gpuva);
>
> - if (xe_vma_is_cpu_addr_mirror(vma))
> + if (vma->skip_invalidation)
> continue;
>
> - for_each_tile(tile, vm->xe, id) {
> - if (xe_pt_zap_ptes(tile, vma)) {
> - tile_mask |= BIT(id);
> + if (xe_vma_is_cpu_addr_mirror(vma)) {
> + tile_mask |= xe_svm_ranges_zap_ptes_in_range(vm,
> + xe_vma_start(vma),
> + xe_vma_end(vma));
> + } else {
> + for_each_tile(tile, vm->xe, id) {
> + if (xe_pt_zap_ptes(tile, vma)) {
> + tile_mask |= BIT(id);
>
> /*
> * WRITE_ONCE pairs with READ_ONCE
> @@ -176,6 +195,7 @@ static u8 xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end)
> */
> WRITE_ONCE(vma->tile_invalidated,
> vma->tile_invalidated | BIT(id));
> + }
> }
> }
> }
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> index cd94d8b5819d..81d92d886578 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -157,6 +157,12 @@ struct xe_vma {
> /** @tile_staged: bind is staged for this VMA */
> u8 tile_staged;
>
> + /**
> + * @skip_invalidation: Used in madvise to avoid invalidation
> + * if mem attributes doesn't change
> + */
> + bool skip_invalidation;
> +
> /**
> * @ufence: The user fence that was provided with MAP.
> * Needs to be signalled before UNMAP can be processed.
> --
> 2.34.1
>
More information about the Intel-xe
mailing list