[Intel-xe] [PATCH] drm/xe/migrate: fix MI_ARB_ON_OFF usage
Matt Roper
matthew.d.roper at intel.com
Mon Oct 23 23:48:20 UTC 2023
On Fri, Oct 20, 2023 at 07:24:38PM +0100, Matthew Auld wrote:
> Spec says: "This is a privileged command; it will not be effective (will
> be converted to a no-op) if executed from within a non-privileged batch
> buffer." However here it looks like we are just emitting it inside some
> bb which was jumped to via the ppGTT, which should be considered
> a non-privileged address space.
>
> It looks like we just need some way of preventing things like the
> emit_pte() and later copy/clear being preempted in-between so rather
> just emit directly in the ring for migration jobs.
>
> Bspec: 45716
> Signed-off-by: Matthew Auld <matthew.auld at intel.com>
> Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> Cc: Matthew Brost <matthew.brost at intel.com>
Reviewed-by: Matt Roper <matthew.d.roper at intel.com>
> ---
> drivers/gpu/drm/xe/xe_migrate.c | 16 ----------------
> drivers/gpu/drm/xe/xe_ring_ops.c | 2 ++
> 2 files changed, 2 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
> index b4baecde60e6..009d728af762 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/xe_migrate.c
> @@ -406,12 +406,6 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
> return m;
> }
>
> -static void emit_arb_clear(struct xe_bb *bb)
> -{
> - /* 1 dword */
> - bb->cs[bb->len++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
> -}
> -
> static u64 xe_migrate_res_sizes(struct xe_res_cursor *cur)
> {
> /*
> @@ -745,10 +739,6 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
> goto err_sync;
> }
>
> - /* Preemption is enabled again by the ring ops. */
> - if (!src_is_vram || !dst_is_vram)
> - emit_arb_clear(bb);
> -
> if (!src_is_vram)
> emit_pte(m, bb, src_L0_pt, src_is_vram, &src_it, src_L0,
> src_bo);
> @@ -994,7 +984,6 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
>
> /* Preemption is enabled again by the ring ops. */
> if (!clear_vram) {
> - emit_arb_clear(bb);
> emit_pte(m, bb, clear_L0_pt, clear_vram, &src_it, clear_L0,
> bo);
> } else {
> @@ -1285,9 +1274,6 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
> VM_SA_UPDATE_UNIT_SIZE;
> }
>
> - /* Preemption is enabled again by the ring ops. */
> - emit_arb_clear(bb);
> -
> /* Map our PT's to gtt */
> bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
> bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
> @@ -1316,8 +1302,6 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
> bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
> update_idx = bb->len;
>
> - /* Preemption is enabled again by the ring ops. */
> - emit_arb_clear(bb);
> for (i = 0; i < num_updates; i++)
> write_pgtable(tile, bb, 0, &updates[i], pt_update);
> }
> diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
> index 58676f4b989f..59e0aa2d6a4c 100644
> --- a/drivers/gpu/drm/xe/xe_ring_ops.c
> +++ b/drivers/gpu/drm/xe/xe_ring_ops.c
> @@ -355,6 +355,8 @@ static void emit_migration_job_gen12(struct xe_sched_job *job,
> i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
> seqno, dw, i);
>
> + dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */
> +
> i = emit_bb_start(job->batch_addr[0], BIT(8), dw, i);
>
> /* XXX: Do we need this? Leaving for now. */
> --
> 2.41.0
>
--
Matt Roper
Graphics Software Engineer
Linux GPU Platform Enablement
Intel Corporation
More information about the Intel-xe
mailing list