[Intel-xe] [PATCH v2 5/6] drm/xe/migrate: rather use pte_encode helpers
Matt Roper
matthew.d.roper at intel.com
Fri Sep 15 22:19:21 UTC 2023
On Thu, Sep 14, 2023 at 04:31:18PM +0100, Matthew Auld wrote:
> We need to avoid using stuff like PPAT_CACHED directly, which is no
> longer going to work on newer platforms. At some point we can just
> directly use the pat_index, but for now just use XE_CACHE_WB.
>
> Signed-off-by: Matthew Auld <matthew.auld at intel.com>
> Cc: Pallavi Mishra <pallavi.mishra at intel.com>
> Cc: Lucas De Marchi <lucas.demarchi at intel.com>
> Cc: Matt Roper <matthew.d.roper at intel.com>
> ---
> drivers/gpu/drm/xe/xe_migrate.c | 7 ++++---
> drivers/gpu/drm/xe/xe_pt.c | 12 ++++++------
> drivers/gpu/drm/xe/xe_pt.h | 2 ++
> 3 files changed, 12 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
> index 46f88f3a8c58..26cbc9107501 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/xe_migrate.c
> @@ -257,8 +257,9 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
>
> level = 2;
> ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
> - flags = XE_PAGE_RW | XE_PAGE_PRESENT | PPAT_CACHED |
> - XE_PPGTT_PTE_DM | XE_PDPE_PS_1G;
> +
> + flags = XE_PPGTT_PTE_DM;
> + flags = __xe_pte_encode(flags, XE_CACHE_WB, vm, NULL, 2);
Might be best to pass 'level' as the final parameter since we already
have it sitting around as a local variable?
Reviewed-by: Matt Roper <matthew.d.roper at intel.com>
>
> /*
> * Use 1GB pages, it shouldn't matter the physical amount of
> @@ -493,7 +494,7 @@ static void emit_pte(struct xe_migrate *m,
> addr += vram_region_gpu_offset(bo->ttm.resource);
> addr |= XE_PPGTT_PTE_DM;
> }
> - addr |= PPAT_CACHED | XE_PAGE_PRESENT | XE_PAGE_RW;
> + addr = __xe_pte_encode(addr, XE_CACHE_WB, m->q->vm, NULL, 0);
> bb->cs[bb->len++] = lower_32_bits(addr);
> bb->cs[bb->len++] = upper_32_bits(addr);
>
> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> index b0874052f5ce..a1b164cf8bce 100644
> --- a/drivers/gpu/drm/xe/xe_pt.c
> +++ b/drivers/gpu/drm/xe/xe_pt.c
> @@ -67,8 +67,8 @@ u64 xe_pde_encode(struct xe_bo *bo, u64 bo_offset)
> return pde;
> }
>
> -static u64 __pte_encode(u64 pte, enum xe_cache_level cache,
> - struct xe_vm *vm, struct xe_vma *vma, u32 pt_level)
> +u64 __xe_pte_encode(u64 pte, enum xe_cache_level cache,
> + struct xe_vm *vm, struct xe_vma *vma, u32 pt_level)
> {
> struct xe_device *xe = vm->xe;
>
> @@ -112,7 +112,7 @@ u64 xe_pte_encode(struct xe_vm *vm, struct xe_bo *bo, u64 offset, enum xe_cache_
> if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
> pte |= XE_PPGTT_PTE_DM;
>
> - return __pte_encode(pte, cache, vm, NULL, pt_level);
> + return __xe_pte_encode(pte, cache, vm, NULL, pt_level);
> }
>
> static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
> @@ -592,9 +592,9 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
>
> XE_WARN_ON(xe_walk->va_curs_start != addr);
>
> - pte = __pte_encode(is_null ? 0 :
> - xe_res_dma(curs) + xe_walk->dma_offset,
> - xe_walk->cache, xe_walk->vm, xe_walk->vma, level);
> + pte = __xe_pte_encode(is_null ? 0 :
> + xe_res_dma(curs) + xe_walk->dma_offset,
> + xe_walk->cache, xe_walk->vm, xe_walk->vma, level);
> pte |= xe_walk->default_pte;
>
> /*
> diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
> index 4a9143bc6628..0e66436d707d 100644
> --- a/drivers/gpu/drm/xe/xe_pt.h
> +++ b/drivers/gpu/drm/xe/xe_pt.h
> @@ -49,5 +49,7 @@ u64 xe_pde_encode(struct xe_bo *bo, u64 bo_offset);
>
> u64 xe_pte_encode(struct xe_vm *vm, struct xe_bo *bo, u64 offset, enum xe_cache_level cache,
> u32 pt_level);
> +u64 __xe_pte_encode(u64 pte, enum xe_cache_level cache,
> + struct xe_vm *vm, struct xe_vma *vma, u32 pt_level);
>
> #endif
> --
> 2.41.0
>
--
Matt Roper
Graphics Software Engineer
Linux GPU Platform Enablement
Intel Corporation
More information about the Intel-xe
mailing list