[PATCH v5 06/16] drm/xe/xelp: Support auxccs invalidation on blitter
Tvrtko Ursulin
tvrtko.ursulin at igalia.com
Thu Apr 3 19:03:06 UTC 2025
Auxccs platforms need to be able to invalidate auxccs on the blitter
engine.
Add the relevant mmio register and enable this by refactoring the ring
emission a bit to consolidate all non-render engines.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at igalia.com>
---
drivers/gpu/drm/xe/regs/xe_gt_regs.h | 1 +
drivers/gpu/drm/xe/xe_ring_ops.c | 104 +++++++++++----------------
2 files changed, 41 insertions(+), 64 deletions(-)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index da1f198ac107..300335dc8c5e 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -89,6 +89,7 @@
#define CCS_AUX_INV XE_REG(0x4208)
#define VD0_AUX_INV XE_REG(0x4218)
+#define BCS_AUX_INV XE_REG(0x4248)
#define VE0_AUX_INV XE_REG(0x4238)
#define VE1_AUX_INV XE_REG(0x42b8)
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 3f075dd36c86..72979521ee7e 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -261,44 +261,6 @@ static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
return i;
}
-/* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */
-static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc,
- u64 batch_addr, u32 seqno)
-{
- u32 dw[MAX_JOB_SIZE_DW], i = 0;
- u32 ppgtt_flag = get_ppgtt_flag(job);
- struct xe_gt *gt = job->q->gt;
-
- i = emit_copy_timestamp(lrc, dw, i);
-
- if (job->ring_ops_flush_tlb) {
- dw[i++] = preparser_disable(true);
- i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
- seqno, MI_INVALIDATE_TLB, dw, i);
- dw[i++] = preparser_disable(false);
- } else {
- i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
- seqno, dw, i);
- }
-
- i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
-
- if (job->user_fence.used) {
- i = emit_flush_dw(dw, i);
- i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
- job->user_fence.value,
- dw, i);
- }
-
- i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i);
-
- i = emit_user_interrupt(dw, i);
-
- xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
-
- xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
-}
-
static bool has_aux_ccs(struct xe_device *xe)
{
/*
@@ -313,36 +275,50 @@ static bool has_aux_ccs(struct xe_device *xe)
return !xe->info.has_flat_ccs;
}
-static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
- u64 batch_addr, u32 seqno)
+static void __emit_job_gen12_xcs(struct xe_sched_job *job, struct xe_lrc *lrc,
+ u64 batch_addr, u32 seqno)
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
struct xe_gt *gt = job->q->gt;
struct xe_device *xe = gt_to_xe(gt);
- bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
+ const unsigned int class = job->q->class;
+ const bool aux_ccs = has_aux_ccs(xe) &&
+ (class == XE_ENGINE_CLASS_COPY ||
+ class == XE_ENGINE_CLASS_VIDEO_DECODE ||
+ class == XE_ENGINE_CLASS_VIDEO_ENHANCE);
+ const bool invalidate_tlb = aux_ccs || job->ring_ops_flush_tlb;
i = emit_copy_timestamp(lrc, dw, i);
- dw[i++] = preparser_disable(true);
-
- /* hsdes: 1809175790 */
- if (has_aux_ccs(xe)) {
- if (decode)
- i = emit_aux_table_inv(gt, VD0_AUX_INV, dw, i);
- else
- i = emit_aux_table_inv(gt, VE0_AUX_INV, dw, i);
- }
-
- if (job->ring_ops_flush_tlb)
+ if (invalidate_tlb) {
+ dw[i++] = preparser_disable(true);
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
- seqno, MI_INVALIDATE_TLB, dw, i);
+ seqno,
+ MI_INVALIDATE_TLB,
+ dw, i);
+ /* hsdes: 1809175790 */
+ if (aux_ccs) {
+ struct xe_reg reg;
- dw[i++] = preparser_disable(false);
+ switch (job->q->class) {
+ case XE_ENGINE_CLASS_COPY:
+ reg = BCS_AUX_INV;
+ break;
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ reg = VD0_AUX_INV;
+ break;
+ default:
+ reg = VE0_AUX_INV;
+ };
- if (!job->ring_ops_flush_tlb)
+ i = emit_aux_table_inv(gt, reg, dw, i);
+ }
+ dw[i++] = preparser_disable(false);
+ } else {
i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, dw, i);
+ }
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
@@ -458,9 +434,9 @@ static void emit_job_gen12_gsc(struct xe_sched_job *job)
xe_gt_assert(gt, job->q->width <= 1); /* no parallel submission for GSCCS */
- __emit_job_gen12_simple(job, job->q->lrc[0],
- job->ptrs[0].batch_addr,
- xe_sched_job_lrc_seqno(job));
+ __emit_job_gen12_xcs(job, job->q->lrc[0],
+ job->ptrs[0].batch_addr,
+ xe_sched_job_lrc_seqno(job));
}
static void emit_job_gen12_copy(struct xe_sched_job *job)
@@ -474,9 +450,9 @@ static void emit_job_gen12_copy(struct xe_sched_job *job)
}
for (i = 0; i < job->q->width; ++i)
- __emit_job_gen12_simple(job, job->q->lrc[i],
- job->ptrs[i].batch_addr,
- xe_sched_job_lrc_seqno(job));
+ __emit_job_gen12_xcs(job, job->q->lrc[i],
+ job->ptrs[i].batch_addr,
+ xe_sched_job_lrc_seqno(job));
}
static void emit_job_gen12_video(struct xe_sched_job *job)
@@ -485,9 +461,9 @@ static void emit_job_gen12_video(struct xe_sched_job *job)
/* FIXME: Not doing parallel handshake for now */
for (i = 0; i < job->q->width; ++i)
- __emit_job_gen12_video(job, job->q->lrc[i],
- job->ptrs[i].batch_addr,
- xe_sched_job_lrc_seqno(job));
+ __emit_job_gen12_xcs(job, job->q->lrc[i],
+ job->ptrs[i].batch_addr,
+ xe_sched_job_lrc_seqno(job));
}
static void emit_job_gen12_render_compute(struct xe_sched_job *job)
--
2.48.0
More information about the Intel-xe
mailing list