[PATCH 61/68] pass-engine-to-flush

Chris Wilson chris at chris-wilson.co.uk
Mon Feb 1 00:13:30 UTC 2021


---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   |  2 +-
 drivers/gpu/drm/i915/gt/gen2_engine_cs.c      | 18 ++++++----
 drivers/gpu/drm/i915/gt/gen2_engine_cs.h      | 12 +++++--
 drivers/gpu/drm/i915/gt/gen6_engine_cs.c      | 27 +++++++++-----
 drivers/gpu/drm/i915/gt/gen6_engine_cs.h      | 16 ++++++---
 drivers/gpu/drm/i915/gt/gen8_engine_cs.c      | 35 ++++++++++++-------
 drivers/gpu/drm/i915/gt/gen8_engine_cs.h      | 20 ++++++++---
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |  5 ++-
 .../drm/i915/gt/intel_execlists_submission.c  |  6 ++--
 .../gpu/drm/i915/gt/intel_ring_scheduler.c    |  2 +-
 .../gpu/drm/i915/gt/intel_ring_submission.c   | 15 ++++----
 drivers/gpu/drm/i915/gt/intel_workarounds.c   |  5 +--
 drivers/gpu/drm/i915/gt/mock_engine.c         |  4 ++-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  2 +-
 drivers/gpu/drm/i915/gvt/mmio_context.c       |  7 ++--
 15 files changed, 116 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ca37d93ef5e7..78737009f25d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1272,7 +1272,7 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
 		int err;
 
 		/* Magic required to prevent forcewake errors! */
-		err = engine->emit_flush(rq, EMIT_INVALIDATE);
+		err = engine->emit_flush(engine, rq, EMIT_INVALIDATE);
 		if (err)
 			return err;
 
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index 3f5cebf2d233..7829aff8bfb8 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -11,7 +11,9 @@
 #include "intel_gt_irq.h"
 #include "intel_ring.h"
 
-int gen2_emit_flush(struct i915_request *rq, u32 mode)
+int gen2_emit_flush(const struct intel_engine_cs *engine,
+		    struct i915_request *rq,
+		    u32 mode)
 {
 	unsigned int num_store_dw = 12;
 	u32 cmd, *cs;
@@ -38,7 +40,9 @@ int gen2_emit_flush(struct i915_request *rq, u32 mode)
 	return 0;
 }
 
-int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
+int gen4_emit_flush_rcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode)
 {
 	u32 cmd, *cs;
 	int i;
@@ -74,7 +78,7 @@ int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
 	cmd = MI_FLUSH;
 	if (mode & EMIT_INVALIDATE) {
 		cmd |= MI_EXE_FLUSH;
-		if (IS_G4X(rq->engine->i915) || IS_GEN(rq->engine->i915, 5))
+		if (IS_G4X(engine->i915) || IS_GEN(engine->i915, 5))
 			cmd |= MI_INVALIDATE_ISP;
 	}
 
@@ -100,7 +104,7 @@ int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
 	 */
 	if (mode & EMIT_INVALIDATE) {
 		*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
-		*cs++ = intel_gt_scratch_offset(rq->engine->gt,
+		*cs++ = intel_gt_scratch_offset(engine->gt,
 						INTEL_GT_SCRATCH_FIELD_DEFAULT) |
 			PIPE_CONTROL_GLOBAL_GTT;
 		*cs++ = 0;
@@ -110,7 +114,7 @@ int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
 			*cs++ = MI_FLUSH;
 
 		*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
-		*cs++ = intel_gt_scratch_offset(rq->engine->gt,
+		*cs++ = intel_gt_scratch_offset(engine->gt,
 						INTEL_GT_SCRATCH_FIELD_DEFAULT) |
 			PIPE_CONTROL_GLOBAL_GTT;
 		*cs++ = 0;
@@ -124,7 +128,9 @@ int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
 	return 0;
 }
 
-int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
+int gen4_emit_flush_vcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode)
 {
 	u32 *cs;
 
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h
index 99cc752c25b6..a07b163608e2 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h
@@ -11,9 +11,15 @@
 struct i915_request;
 struct intel_engine_cs;
 
-int gen2_emit_flush(struct i915_request *rq, u32 mode);
-int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode);
-int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode);
+int gen2_emit_flush(const struct intel_engine_cs *engine,
+		    struct i915_request *rq,
+		    u32 mode);
+int gen4_emit_flush_rcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode);
+int gen4_emit_flush_vcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode);
 
 u32 *gen3_emit_breadcrumb(const struct intel_engine_cs *engine,
 			  struct i915_request *rq,
diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
index 080aaa2bcfc9..6d9279af2762 100644
--- a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
@@ -51,10 +51,11 @@
  * really our business.  That leaves only stall at scoreboard.
  */
 static int
-gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
+gen6_emit_post_sync_nonzero_flush(const struct intel_engine_cs *engine,
+				  struct i915_request *rq)
 {
 	u32 scratch_addr =
-		intel_gt_scratch_offset(rq->engine->gt,
+		intel_gt_scratch_offset(engine->gt,
 					INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
 	u32 *cs;
 
@@ -85,16 +86,18 @@ gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
 	return 0;
 }
 
-int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode)
+int gen6_emit_flush_rcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode)
 {
 	u32 scratch_addr =
-		intel_gt_scratch_offset(rq->engine->gt,
+		intel_gt_scratch_offset(engine->gt,
 					INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
 	u32 *cs, flags = 0;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
-	ret = gen6_emit_post_sync_nonzero_flush(rq);
+	ret = gen6_emit_post_sync_nonzero_flush(engine, rq);
 	if (ret)
 		return ret;
 
@@ -230,12 +233,16 @@ static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
 	return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
 }
 
-int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode)
+int gen6_emit_flush_xcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode)
 {
 	return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
 }
 
-int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode)
+int gen6_emit_flush_vcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode)
 {
 	return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
 }
@@ -300,10 +307,12 @@ static int gen7_stall_cs(struct i915_request *rq)
 	return 0;
 }
 
-int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode)
+int gen7_emit_flush_rcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode)
 {
 	u32 scratch_addr =
-		intel_gt_scratch_offset(rq->engine->gt,
+		intel_gt_scratch_offset(engine->gt,
 					INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
 	u32 *cs, flags = 0;
 
diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.h b/drivers/gpu/drm/i915/gt/gen6_engine_cs.h
index 75baea303dd0..9d752f9b7873 100644
--- a/drivers/gpu/drm/i915/gt/gen6_engine_cs.h
+++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.h
@@ -13,9 +13,15 @@
 struct i915_request;
 struct intel_engine_cs;
 
-int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode);
-int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode);
-int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode);
+int gen6_emit_flush_rcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode);
+int gen6_emit_flush_vcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode);
+int gen6_emit_flush_xcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode);
 
 u32 *gen6_emit_breadcrumb_rcs(const struct intel_engine_cs *engine,
 			      struct i915_request *rq,
@@ -24,7 +30,9 @@ u32 *gen6_emit_breadcrumb_xcs(const struct intel_engine_cs *engine,
 			      struct i915_request *rq,
 			      u32 *cs);
 
-int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen7_emit_flush_rcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode);
 
 u32 *gen7_emit_breadcrumb_rcs(const struct intel_engine_cs *engine,
 			      struct i915_request *rq,
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 8a446dad32f5..85cdd777f54f 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -9,7 +9,9 @@
 #include "intel_gpu_commands.h"
 #include "intel_ring.h"
 
-int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
+int gen8_emit_flush_rcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode)
 {
 	bool vf_flush_wa = false, dc_flush_wa = false;
 	u32 *cs, flags = 0;
@@ -38,11 +40,11 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
 		 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
 		 * pipe control.
 		 */
-		if (IS_GEN(rq->engine->i915, 9))
+		if (IS_GEN(engine->i915, 9))
 			vf_flush_wa = true;
 
 		/* WaForGAMHang:kbl */
-		if (IS_KBL_GT_REVID(rq->engine->i915, 0, KBL_REVID_B0))
+		if (IS_KBL_GT_REVID(engine->i915, 0, KBL_REVID_B0))
 			dc_flush_wa = true;
 	}
 
@@ -75,7 +77,9 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
 	return 0;
 }
 
-int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
+int gen8_emit_flush_xcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode)
 {
 	u32 cmd, *cs;
 
@@ -95,7 +99,7 @@ int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
 
 	if (mode & EMIT_INVALIDATE) {
 		cmd |= MI_INVALIDATE_TLB;
-		if (rq->engine->class == VIDEO_DECODE_CLASS)
+		if (engine->class == VIDEO_DECODE_CLASS)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
@@ -108,7 +112,9 @@ int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
 	return 0;
 }
 
-int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
+int gen11_emit_flush_rcs(const struct intel_engine_cs *engine,
+			 struct i915_request *rq,
+			 u32 mode)
 {
 	if (mode & EMIT_FLUSH) {
 		u32 *cs;
@@ -198,7 +204,9 @@ static u32 *gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
 	return cs;
 }
 
-int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+int gen12_emit_flush_rcs(const struct intel_engine_cs *engine,
+			 struct i915_request *rq,
+			 u32 mode)
 {
 	if (mode & EMIT_FLUSH) {
 		u32 flags = 0;
@@ -268,7 +276,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
 	return 0;
 }
 
-int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
+int gen12_emit_flush_xcs(const struct intel_engine_cs *engine,
+			 struct i915_request *rq,
+			 u32 mode)
 {
 	intel_engine_mask_t aux_inv = 0;
 	u32 cmd, *cs;
@@ -300,7 +310,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
 
 	if (mode & EMIT_INVALIDATE) {
 		cmd |= MI_INVALIDATE_TLB;
-		if (rq->engine->class == VIDEO_DECODE_CLASS)
+		if (engine->class == VIDEO_DECODE_CLASS)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
@@ -310,13 +320,12 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
 	*cs++ = 0; /* value */
 
 	if (aux_inv) { /* hsdes: 1809175790 */
-		struct intel_engine_cs *engine;
+		struct intel_engine_cs *aux;
 		unsigned int tmp;
 
 		*cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
-		for_each_engine_masked(engine, rq->engine->gt,
-				       aux_inv, tmp) {
-			*cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
+		for_each_engine_masked(aux, engine->gt, aux_inv, tmp) {
+			*cs++ = i915_mmio_reg_offset(aux_inv_reg(aux));
 			*cs++ = AUX_INV;
 		}
 		*cs++ = MI_NOOP;
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
index be4f84670d34..3225c186e363 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -16,12 +16,22 @@
 struct i915_request;
 struct intel_engine_cs;
 
-int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
-int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode);
-int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen8_emit_flush_rcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode);
+int gen11_emit_flush_rcs(const struct intel_engine_cs *engine,
+			 struct i915_request *rq,
+			 u32 mode);
+int gen12_emit_flush_rcs(const struct intel_engine_cs *engine,
+			 struct i915_request *rq,
+			 u32 mode);
 
-int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode);
-int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode);
+int gen8_emit_flush_xcs(const struct intel_engine_cs *engine,
+			struct i915_request *rq,
+			u32 mode);
+int gen12_emit_flush_xcs(const struct intel_engine_cs *engine,
+			 struct i915_request *rq,
+			 u32 mode);
 
 int gen8_emit_init_breadcrumb(const struct intel_engine_cs *engine,
 			      struct i915_request *rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index e1aa4f9507f1..270ec0a4ce96 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -385,10 +385,13 @@ struct intel_engine_cs {
 
 	const struct intel_context_ops *cops;
 
-	int		(*emit_flush)(struct i915_request *request, u32 mode);
+	int		(*emit_flush)(const struct intel_engine_cs *engine,
+				      struct i915_request *request,
+				      u32 mode);
 #define EMIT_INVALIDATE	BIT(0)
 #define EMIT_FLUSH	BIT(1)
 #define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
+
 	int		(*emit_bb_start)(struct i915_request *rq,
 					 u64 offset, u32 length,
 					 unsigned int dispatch_flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 79886d46fb83..2a8589f97932 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2252,12 +2252,12 @@ static int emit_pdps(struct intel_context *ce, struct i915_request *rq)
 	intel_ring_advance(rq, cs);
 
 	/* Flush any residual operations from the context load */
-	err = engine->emit_flush(rq, EMIT_FLUSH);
+	err = engine->emit_flush(engine, rq, EMIT_FLUSH);
 	if (err)
 		return err;
 
 	/* Magic required to prevent forcewake errors! */
-	err = engine->emit_flush(rq, EMIT_INVALIDATE);
+	err = engine->emit_flush(engine, rq, EMIT_INVALIDATE);
 	if (err)
 		return err;
 
@@ -2314,7 +2314,7 @@ execlists_context_request(struct intel_context *ce, struct i915_request *rq)
 	}
 
 	/* Unconditionally invalidate GPU caches and TLBs. */
-	ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+	ret = engine->emit_flush(engine, rq, EMIT_INVALIDATE);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index 593aef2fb679..25acf0d8e49a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -955,7 +955,7 @@ ring_context_request(struct intel_context *ce, struct i915_request *rq)
 	rq->reserved_space += LEGACY_REQUEST_SIZE;
 
 	/* Unconditionally invalidate GPU caches and TLBs. */
-	ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+	ret = engine->emit_flush(engine, rq, EMIT_INVALIDATE);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 193b099e8048..84858d84d5ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -596,7 +596,7 @@ static int load_pd_dir(const struct intel_engine_cs *engine,
 
 	intel_ring_advance(rq, cs);
 
-	return engine->emit_flush(rq, EMIT_FLUSH);
+	return engine->emit_flush(engine, rq, EMIT_FLUSH);
 }
 
 static int mi_set_context(struct i915_request *rq,
@@ -772,12 +772,13 @@ static int remap_l3(struct intel_context *ce, struct i915_request *rq)
 static int switch_mm(struct intel_context *ce, struct i915_request *rq)
 {
 	struct i915_address_space *vm = vm_alias(ce->vm);
+	struct intel_engine_cs *engine = ce->engine;
 	int ret;
 
 	if (!vm)
 		return 0;
 
-	ret = ce->engine->emit_flush(rq, EMIT_FLUSH);
+	ret = engine->emit_flush(engine, rq, EMIT_FLUSH);
 	if (ret)
 		return ret;
 
@@ -789,11 +790,11 @@ static int switch_mm(struct intel_context *ce, struct i915_request *rq)
 	 * post-sync op, this extra pass appears vital before a
 	 * mm switch!
 	 */
-	ret = load_pd_dir(ce->engine, rq, vm, PP_DIR_DCLV_2G);
+	ret = load_pd_dir(engine, rq, vm, PP_DIR_DCLV_2G);
 	if (ret)
 		return ret;
 
-	return ce->engine->emit_flush(rq, EMIT_INVALIDATE);
+	return engine->emit_flush(engine, rq, EMIT_INVALIDATE);
 }
 
 static int
@@ -819,12 +820,12 @@ clear_residuals(struct intel_engine_cs *engine, struct i915_request *rq)
 	if (ret)
 		return ret;
 
-	ret = engine->emit_flush(rq, EMIT_FLUSH);
+	ret = engine->emit_flush(engine, rq, EMIT_FLUSH);
 	if (ret)
 		return ret;
 
 	/* Always invalidate before the next switch_mm() */
-	return engine->emit_flush(rq, EMIT_INVALIDATE);
+	return engine->emit_flush(engine, rq, EMIT_INVALIDATE);
 }
 
 static int switch_context(struct intel_context *ce,
@@ -909,7 +910,7 @@ ring_context_request(struct intel_context *ce, struct i915_request *rq)
 	rq->reserved_space += LEGACY_REQUEST_SIZE;
 
 	/* Unconditionally invalidate GPU caches and TLBs. */
-	ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+	ret = engine->emit_flush(engine, rq, EMIT_INVALIDATE);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 71d1c19c868b..3f54a6a82112 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -771,6 +771,7 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
 int intel_engine_emit_ctx_wa(struct i915_request *rq)
 {
 	struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
+	struct intel_engine_cs *engine = rq->engine;
 	struct i915_wa *wa;
 	unsigned int i;
 	u32 *cs;
@@ -779,7 +780,7 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
 	if (wal->count == 0)
 		return 0;
 
-	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
+	ret = engine->emit_flush(engine, rq, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
@@ -796,7 +797,7 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
 
 	intel_ring_advance(rq, cs);
 
-	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
+	ret = engine->emit_flush(engine, rq, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 83fbe118e9ce..8c66c1e9fa09 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -188,7 +188,9 @@ static const struct intel_context_ops mock_context_ops = {
 	.destroy = mock_context_destroy,
 };
 
-static int mock_emit_flush(struct i915_request *request, unsigned int flags)
+static int mock_emit_flush(const struct intel_engine_cs *engine,
+			   struct i915_request *request,
+			   unsigned int flags)
 {
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 2cbec392d0c2..d7772fa20f88 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -466,7 +466,7 @@ guc_context_request(struct intel_context *ce, struct i915_request *rq)
 	 */
 
 	/* Unconditionally invalidate GPU caches and TLBs. */
-	ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+	ret = engine->emit_flush(engine, rq, EMIT_INVALIDATE);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index c9589e26af93..85ee0b5de8c1 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -200,13 +200,14 @@ restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
 	int ret;
 	struct engine_mmio *mmio;
 	struct intel_gvt *gvt = vgpu->gvt;
-	int ring_id = req->engine->id;
+	struct intel_engine_cs *engine = req->engine;
+	int ring_id = engine->id;
 	int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
 
 	if (count == 0)
 		return 0;
 
-	ret = req->engine->emit_flush(req, EMIT_BARRIER);
+	ret = engine->emit_flush(engine, req, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
@@ -229,7 +230,7 @@ restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
 	*cs++ = MI_NOOP;
 	intel_ring_advance(req, cs);
 
-	ret = req->engine->emit_flush(req, EMIT_BARRIER);
+	ret = engine->emit_flush(engine, req, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list