[PATCH v7 07/10] drm/xe/xe2: Update emit_pte to use compression enabled PAT index
Thomas Hellström
thomas.hellstrom at linux.intel.com
Tue Dec 12 12:28:51 UTC 2023
On 12/11/23 14:43, Himal Prasad Ghimiray wrote:
> For indirect accessed buffer use compression enabled PAT index.
>
> v2:
> - Fix parameter name.
>
> v3:
> - use a relevant define instead of fix number.
>
> Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> Cc: Matthew Auld <matthew.auld at intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> ---
> drivers/gpu/drm/xe/tests/xe_migrate.c | 2 +-
> drivers/gpu/drm/xe/xe_migrate.c | 21 +++++++++++++++------
> drivers/gpu/drm/xe/xe_pat.c | 1 +
> drivers/gpu/drm/xe/xe_pt_types.h | 1 +
> 4 files changed, 18 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
> index 47fcd6e6b777..d6c23441632a 100644
> --- a/drivers/gpu/drm/xe/tests/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
> @@ -330,7 +330,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
> else
> xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
>
> - emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt),
> + emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
> &src_it, XE_PAGE_SIZE, pt);
>
> run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
> index 9698986eab06..1ecf2274c7ba 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/xe_migrate.c
> @@ -422,15 +422,24 @@ static u32 pte_update_size(struct xe_migrate *m,
>
> static void emit_pte(struct xe_migrate *m,
> struct xe_bb *bb, u32 at_pt,
> - bool is_vram,
> + bool is_vram, bool is_comp_pte,
> struct xe_res_cursor *cur,
> u32 size, struct xe_bo *bo)
> {
> - u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
> + struct xe_device *xe = tile_to_xe(m->tile);
> +
> + u16 pat_index;
> u32 ptes;
> u64 ofs = at_pt * XE_PAGE_SIZE;
> u64 cur_ofs;
>
> + /* Indirect access needs compression enabled uncached PAT index */
> + if (GRAPHICS_VERx100(xe) >= 2000)
> + pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
> + xe->pat.idx[XE_CACHE_NONE];
> + else
> + pat_index = xe->pat.idx[XE_CACHE_WB];
> +
> /*
> * FIXME: Emitting VRAM PTEs to L0 PTs is forbidden. Currently
> * we're only emitting VRAM PTEs during sanity tests, so when
> @@ -717,19 +726,19 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
> }
>
> if (!src_is_vram)
> - emit_pte(m, bb, src_L0_pt, src_is_vram, &src_it, src_L0,
> + emit_pte(m, bb, src_L0_pt, src_is_vram, true, &src_it, src_L0,
> src_bo);
> else
> xe_res_next(&src_it, src_L0);
>
> if (!dst_is_vram)
> - emit_pte(m, bb, dst_L0_pt, dst_is_vram, &dst_it, src_L0,
> + emit_pte(m, bb, dst_L0_pt, dst_is_vram, true, &dst_it, src_L0,
> dst_bo);
> else
> xe_res_next(&dst_it, src_L0);
>
> if (copy_system_ccs)
> - emit_pte(m, bb, ccs_pt, false, &ccs_it, ccs_size, src_bo);
> + emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src_bo);
>
> bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
> update_idx = bb->len;
> @@ -962,7 +971,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
>
> /* Preemption is enabled again by the ring ops. */
> if (!clear_vram) {
> - emit_pte(m, bb, clear_L0_pt, clear_vram, &src_it, clear_L0,
> + emit_pte(m, bb, clear_L0_pt, clear_vram, true, &src_it, clear_L0,
> bo);
> } else {
> xe_res_next(&src_it, clear_L0);
> diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
> index 1892ff81086f..1ff6bc79e7d4 100644
> --- a/drivers/gpu/drm/xe/xe_pat.c
> +++ b/drivers/gpu/drm/xe/xe_pat.c
> @@ -387,6 +387,7 @@ void xe_pat_init_early(struct xe_device *xe)
> xe->pat.idx[XE_CACHE_NONE] = 3;
> xe->pat.idx[XE_CACHE_WT] = 15;
> xe->pat.idx[XE_CACHE_WB] = 2;
> + xe->pat.idx[XE_CACHE_NONE_COMPRESSION] = 12; /*Applicable on xe2 and beyond */
> } else if (xe->info.platform == XE_METEORLAKE) {
> xe->pat.ops = &xelpg_pat_ops;
> xe->pat.table = xelpg_pat_table;
> diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
> index 82cbf1ef8e57..cee70cb0f014 100644
> --- a/drivers/gpu/drm/xe/xe_pt_types.h
> +++ b/drivers/gpu/drm/xe/xe_pt_types.h
> @@ -18,6 +18,7 @@ enum xe_cache_level {
> XE_CACHE_NONE,
> XE_CACHE_WT,
> XE_CACHE_WB,
> + XE_CACHE_NONE_COMPRESSION, /*UC + COH_NONE + COMPRESSION */
> __XE_CACHE_LEVEL_COUNT,
> };
>
More information about the Intel-xe
mailing list