[PATCH 67/76] drm/i915: Hide i915_request.engine

Chris Wilson chris at chris-wilson.co.uk
Tue Feb 2 09:17:06 UTC 2021


In preparation to remove the i915_request.engine, first wrap access to
it with a helper. In many instances, we already have a pointer to the
engine in the local function, so we do not need to dig into the request.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_busy.c      |  4 +-
 drivers/gpu/drm/i915/gem/i915_gem_context.c   |  2 +-
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 12 +--
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |  2 +-
 drivers/gpu/drm/i915/gt/intel_context_sseu.c  |  2 +-
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  2 +-
 .../gpu/drm/i915/gt/intel_engine_heartbeat.c  |  2 +-
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |  2 +-
 .../drm/i915/gt/intel_execlists_submission.c  | 84 +++++++++++--------
 drivers/gpu/drm/i915/gt/intel_gt.c            |  2 +-
 drivers/gpu/drm/i915/gt/intel_renderstate.c   |  2 +-
 drivers/gpu/drm/i915/gt/intel_reset.c         |  2 +-
 .../gpu/drm/i915/gt/intel_ring_scheduler.c    | 38 +++++----
 .../gpu/drm/i915/gt/intel_ring_submission.c   |  4 +-
 drivers/gpu/drm/i915/gt/intel_rps.c           |  2 +-
 drivers/gpu/drm/i915/gt/intel_timeline.c      |  2 +-
 drivers/gpu/drm/i915/gt/intel_workarounds.c   |  8 +-
 drivers/gpu/drm/i915/gt/mock_engine.c         |  6 +-
 drivers/gpu/drm/i915/gt/selftest_engine_cs.c  |  5 +-
 drivers/gpu/drm/i915/gt/selftest_execlists.c  | 16 ++--
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c  | 20 +++--
 drivers/gpu/drm/i915/gt/selftest_lrc.c        |  4 +-
 drivers/gpu/drm/i915/gt/selftest_mocs.c       |  5 +-
 drivers/gpu/drm/i915/gt/selftest_rc6.c        |  2 +-
 drivers/gpu/drm/i915/gt/selftest_timeline.c   |  8 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 16 ++--
 drivers/gpu/drm/i915/gvt/mmio_context.c       | 10 ++-
 drivers/gpu/drm/i915/gvt/scheduler.c          | 24 +++---
 drivers/gpu/drm/i915/i915_active.c            |  2 +-
 drivers/gpu/drm/i915/i915_request.c           | 40 ++++-----
 drivers/gpu/drm/i915/i915_request.h           | 15 +++-
 drivers/gpu/drm/i915/i915_scheduler.c         | 41 +++++----
 drivers/gpu/drm/i915/i915_trace.h             | 30 +++----
 drivers/gpu/drm/i915/selftests/i915_perf.c    |  4 +-
 .../gpu/drm/i915/selftests/i915_scheduler.c   |  4 +-
 drivers/gpu/drm/i915/selftests/mock_request.c |  2 +-
 36 files changed, 234 insertions(+), 192 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 25235ef630c1..fe9d1c5bb581 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -57,8 +57,8 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
 		return 0;
 
 	/* Beware type-expansion follies! */
-	BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
-	return flag(rq->engine->uabi_class);
+	BUILD_BUG_ON(!typecheck(u16, i915_request_get_engine(rq)->uabi_class));
+	return flag(i915_request_get_engine(rq)->uabi_class);
 }
 
 static __always_inline unsigned int
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index cf36e04e715b..b53ec6d7cdbe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1134,7 +1134,7 @@ static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww
 static int emit_ppgtt_update(struct i915_request *rq, void *data)
 {
 	struct i915_address_space *vm = rq->context->vm;
-	struct intel_engine_cs *engine = rq->engine;
+	struct intel_engine_cs *engine = rq->context->engine;
 	u32 base = engine->mmio_base;
 	u32 *cs;
 	int i;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index efbe1e92a3ab..052ae889bfde 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1041,6 +1041,7 @@ static void reloc_cache_put_pool(struct i915_execbuffer *eb, struct reloc_cache
 static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cache)
 {
 	struct drm_i915_gem_object *obj = cache->rq->batch->obj;
+	struct intel_engine_cs *engine = i915_request_get_engine(cache->rq);
 
 	GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
 	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
@@ -1048,7 +1049,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
 	i915_gem_object_flush_map(obj);
 	i915_gem_object_unpin_map(obj);
 
-	intel_gt_chipset_flush(cache->rq->engine->gt);
+	intel_gt_chipset_flush(engine->gt);
 
 	i915_request_add(cache->rq);
 	reloc_cache_put_pool(eb, cache);
@@ -2214,13 +2215,14 @@ static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 	return 0;
 }
 
-static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
+static int i915_reset_gen7_sol_offsets(struct i915_execbuffer *eb)
 {
+	struct i915_request *rq = eb->request;
 	u32 *cs;
 	int i;
 
-	if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) {
-		drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
+	if (!IS_GEN(eb->engine->i915, 7) || eb->engine->id != RCS0) {
+		drm_dbg(&eb->i915->drm, "sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
 
@@ -2536,7 +2538,7 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
 		return err;
 
 	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		err = i915_reset_gen7_sol_offsets(eb->request);
+		err = i915_reset_gen7_sol_offsets(eb);
 		if (err)
 			return err;
 	}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 9935a2e59df0..2247d760cb38 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -502,7 +502,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 	rcu_read_unlock();
 
 	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
-		engine = to_request(fence)->engine;
+		engine = i915_request_get_engine(to_request(fence));
 	dma_fence_put(fence);
 
 	return engine;
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index 8dfd8f656aaa..9d2a1a1cbc48 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -29,7 +29,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
 	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
 	*cs++ = lower_32_bits(offset);
 	*cs++ = upper_32_bits(offset);
-	*cs++ = intel_sseu_make_rpcs(rq->engine->gt, &sseu);
+	*cs++ = intel_sseu_make_rpcs(ce->engine->gt, &sseu);
 
 	intel_ring_advance(rq, cs);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 6e717264f69c..9fc35c56fae6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -737,7 +737,7 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
 	if (!frame)
 		return -ENOMEM;
 
-	frame->rq.engine = engine;
+	frame->rq.__engine = engine;
 	frame->rq.context = ce;
 	rcu_assign_pointer(frame->rq.timeline, ce->timeline);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 0f0bf9e4d34f..d9dee4262c58 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -59,7 +59,7 @@ static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
 static void heartbeat_commit(struct i915_request *rq,
 			     const struct i915_sched_attr *attr)
 {
-	idle_pulse(rq->engine, rq);
+	idle_pulse(i915_request_get_engine(rq), rq);
 
 	__i915_request_commit(rq);
 	__i915_request_queue(rq, attr);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 44948abe4bf8..cfcc9b491faf 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -114,7 +114,7 @@ static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 	struct i915_request *rq = to_request(fence);
 
-	ewma__engine_latency_add(&rq->engine->latency,
+	ewma__engine_latency_add(&i915_request_get_engine(rq)->latency,
 				 ktime_us_delta(rq->fence.timestamp,
 						rq->duration.emitted));
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index d785c0aae51a..b528b8066cca 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -202,6 +202,11 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
 	return container_of(engine, struct virtual_engine, base);
 }
 
+static struct virtual_engine *to_virtual_context(struct intel_context *ce)
+{
+	return container_of(ce, struct virtual_engine, context);
+}
+
 static struct i915_request *
 __active_request(const struct intel_timeline * const tl,
 		 struct i915_request *rq,
@@ -366,7 +371,8 @@ static bool need_preempt(const struct intel_engine_cs *engine,
 }
 
 __maybe_unused static bool
-assert_priority_queue(const struct i915_request *prev,
+assert_priority_queue(const struct intel_engine_cs *engine,
+		      const struct i915_request *prev,
 		      const struct i915_request *next)
 {
 	/*
@@ -382,7 +388,7 @@ assert_priority_queue(const struct i915_request *prev,
 	if (rq_deadline(prev) <= rq_deadline(next))
 		return true;
 
-	ENGINE_TRACE(prev->engine,
+	ENGINE_TRACE(engine,
 		     "next %llx:%lld dl %lld is before prev %llx:%lld dl %lld\n",
 		     next->fence.context, next->fence.seqno, rq_deadline(next),
 		     prev->fence.context, prev->fence.seqno, rq_deadline(prev));
@@ -391,7 +397,9 @@ assert_priority_queue(const struct i915_request *prev,
 }
 
 static void
-execlists_context_status_change(struct i915_request *rq, unsigned long status)
+execlists_context_status_change(struct intel_engine_cs *engine,
+				struct i915_request *rq,
+				unsigned long status)
 {
 	/*
 	 * Only used when GVT-g is enabled now. When GVT-g is disabled,
@@ -400,7 +408,7 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
 	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
 		return;
 
-	atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+	atomic_notifier_call_chain(&engine->context_status_notifier,
 				   status, rq);
 }
 
@@ -443,9 +451,8 @@ static void reset_active(struct i915_request *rq,
 }
 
 static struct intel_engine_cs *
-__execlists_schedule_in(struct i915_request *rq)
+__execlists_schedule_in(struct intel_engine_cs *engine, struct i915_request *rq)
 {
-	struct intel_engine_cs * const engine = rq->engine;
 	struct intel_context * const ce = rq->context;
 
 	intel_context_get(ce);
@@ -480,7 +487,7 @@ __execlists_schedule_in(struct i915_request *rq)
 	__intel_gt_pm_get(engine->gt);
 	if (engine->fw_domain && !engine->fw_active++)
 		intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
-	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+	execlists_context_status_change(engine, rq, INTEL_CONTEXT_SCHEDULE_IN);
 	intel_engine_context_in(engine);
 
 	CE_TRACE(ce, "schedule-in, ccid:%x\n", ce->lrc.ccid);
@@ -488,20 +495,23 @@ __execlists_schedule_in(struct i915_request *rq)
 	return engine;
 }
 
-static void execlists_schedule_in(struct i915_request *rq, int idx)
+static void
+execlists_schedule_in(struct intel_engine_cs *engine,
+		      struct i915_request *rq,
+		      int idx)
 {
 	struct intel_context * const ce = rq->context;
 	struct intel_engine_cs *old;
 
-	GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
+	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
 	trace_i915_request_in(rq, idx);
 
 	old = ce->inflight;
 	if (!__intel_context_inflight_count(old))
-		old = __execlists_schedule_in(rq);
+		old = __execlists_schedule_in(engine, rq);
 	WRITE_ONCE(ce->inflight, ptr_inc(old));
 
-	GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+	GEM_BUG_ON(intel_context_inflight(ce) != engine);
 }
 
 static void
@@ -530,7 +540,7 @@ resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
 	/* Resubmit the queue in execution order */
 	spin_lock(&se->lock);
 	list_for_each_entry_from(pos, &tl->requests, link) {
-		if (pos->engine == &ve->base)
+		if (pos->__engine == &ve->base)
 			break;
 
 		__i915_request_requeue(pos, &ve->base);
@@ -541,7 +551,9 @@ resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
 	spin_unlock_irq(&pv->lock);
 }
 
-static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
+static void kick_siblings(struct intel_engine_cs *engine,
+			  struct i915_request *rq,
+			  struct intel_context *ce)
 {
 	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
 
@@ -552,17 +564,17 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
 	 * same as other native request.
 	 */
 	if (i915_request_in_priority_queue(rq) &&
-	    rq->execution_mask != rq->engine->mask)
+	    rq->execution_mask != engine->mask)
 		resubmit_virtual_request(rq, ve);
 
 	if (!i915_sched_is_idle(&ve->base.sched))
 		i915_sched_kick(&ve->base.sched);
 }
 
-static void __execlists_schedule_out(struct i915_request * const rq,
+static void __execlists_schedule_out(struct intel_engine_cs *engine,
+				     struct i915_request * const rq,
 				     struct intel_context * const ce)
 {
-	struct intel_engine_cs * const engine = rq->engine;
 	unsigned int ccid;
 
 	/*
@@ -609,7 +621,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
 		__set_bit(ccid - 1, &engine->context_tag);
 	}
 	intel_engine_context_out(engine);
-	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+	execlists_context_status_change(engine, rq, INTEL_CONTEXT_SCHEDULE_OUT);
 	if (engine->fw_domain && !--engine->fw_active)
 		intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
 	intel_gt_pm_put_async(engine->gt);
@@ -624,13 +636,14 @@ static void __execlists_schedule_out(struct i915_request * const rq,
 	 * each virtual tree and kick everyone again.
 	 */
 	if (ce->engine != engine)
-		kick_siblings(rq, ce);
+		kick_siblings(engine, rq, ce);
 
 	WRITE_ONCE(ce->inflight, NULL);
 	intel_context_put(ce);
 }
 
-static inline void execlists_schedule_out(struct i915_request *rq)
+static inline void
+execlists_schedule_out(struct intel_engine_cs *engine, struct i915_request *rq)
 {
 	struct intel_context * const ce = rq->context;
 
@@ -639,7 +652,7 @@ static inline void execlists_schedule_out(struct i915_request *rq)
 	GEM_BUG_ON(!ce->inflight);
 	ce->inflight = ptr_dec(ce->inflight);
 	if (!__intel_context_inflight_count(ce->inflight))
-		__execlists_schedule_out(rq, ce);
+		__execlists_schedule_out(engine, rq, ce);
 
 	i915_request_put(rq);
 }
@@ -928,11 +941,12 @@ static unsigned long i915_request_flags(const struct i915_request *rq)
 	return READ_ONCE(rq->fence.flags);
 }
 
-static bool can_merge_rq(const struct i915_request *prev,
+static bool can_merge_rq(const struct intel_engine_cs *engine,
+			 const struct i915_request *prev,
 			 const struct i915_request *next)
 {
 	GEM_BUG_ON(prev == next);
-	GEM_BUG_ON(!assert_priority_queue(prev, next));
+	GEM_BUG_ON(!assert_priority_queue(engine, prev, next));
 
 	/*
 	 * We do not submit known completed requests. Therefore if the next
@@ -1207,7 +1221,6 @@ static void virtual_requeue(struct intel_engine_cs *engine,
 			/* lost the race to a sibling */
 			goto unlock;
 
-		GEM_BUG_ON(rq->engine != &ve->base);
 		GEM_BUG_ON(rq->context != &ve->context);
 
 		if (last && !__can_merge_ctx(last->context, rq->context)) {
@@ -1251,7 +1264,7 @@ static void virtual_requeue(struct intel_engine_cs *engine,
 			if (!ve->context.inflight)
 				WRITE_ONCE(ve->context.inflight, engine);
 
-			GEM_BUG_ON(rq->engine != engine);
+			GEM_BUG_ON(rq->__engine != engine);
 			GEM_BUG_ON(ve->siblings[0] != engine);
 			GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
 
@@ -1437,7 +1450,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			 * second request, and so we never need to tell the
 			 * hardware about the first.
 			 */
-			if (last && !can_merge_rq(last, rq)) {
+			if (last && !can_merge_rq(engine, last, rq)) {
 				/*
 				 * If we are on the second port and cannot
 				 * combine this request with the last, then we
@@ -1519,7 +1532,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		   (port - execlists->pending) * sizeof(*port))) {
 		*port = NULL;
 		while (port-- != execlists->pending)
-			execlists_schedule_in(*port, port - execlists->pending);
+			execlists_schedule_in(engine, *port,
+					      port - execlists->pending);
 
 		WRITE_ONCE(execlists->yield, -1);
 		set_preempt_timeout(engine, *active);
@@ -1917,11 +1931,12 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
 	return inactive;
 }
 
-static void post_process_csb(struct i915_request **port,
+static void post_process_csb(struct intel_engine_cs *engine,
+			     struct i915_request **port,
 			     struct i915_request **last)
 {
 	while (port != last)
-		execlists_schedule_out(*port++);
+		execlists_schedule_out(engine, *port++);
 }
 
 struct execlists_capture {
@@ -1934,7 +1949,6 @@ static void execlists_capture_work(struct work_struct *work)
 {
 	struct execlists_capture *cap = container_of(work, typeof(*cap), work);
 	const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
-	struct intel_engine_cs *engine = cap->rq->engine;
 	struct intel_gt_coredump *gt = cap->error->gt;
 	struct intel_engine_capture_vma *vma;
 
@@ -1956,7 +1970,7 @@ static void execlists_capture_work(struct work_struct *work)
 	i915_gpu_coredump_put(cap->error);
 
 	/* Return this request and all that depend upon it for signaling */
-	i915_sched_resume_request(engine, cap->rq);
+	i915_sched_resume_request(cap->rq->__engine, cap->rq);
 	i915_request_put(cap->rq);
 
 	kfree(cap);
@@ -2187,7 +2201,7 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
 		start_timeslice(engine);
 	}
 
-	post_process_csb(post, inactive);
+	post_process_csb(engine, post, inactive);
 	rcu_read_unlock();
 }
 
@@ -2638,7 +2652,7 @@ static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
 	execlists_reset_active(engine, true);
 
 	inactive = cancel_port_requests(execlists, inactive);
-	post_process_csb(post, inactive);
+	post_process_csb(engine, post, inactive);
 	rcu_read_unlock();
 }
 
@@ -3390,13 +3404,13 @@ virtual_find_bond(struct virtual_engine *ve,
 static void
 virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 {
-	struct virtual_engine *ve = to_virtual_engine(rq->engine);
+	struct virtual_engine *ve = to_virtual_context(rq->context);
 	intel_engine_mask_t allowed, exec;
 	struct ve_bond *bond;
 
-	allowed = ~to_request(signal)->engine->mask;
+	allowed = ~to_request(signal)->__engine->mask;
 
-	bond = virtual_find_bond(ve, to_request(signal)->engine);
+	bond = virtual_find_bond(ve, to_request(signal)->__engine);
 	if (bond)
 		allowed &= bond->sibling_mask;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 35ff68ada4f1..2df53e8f7be9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -511,7 +511,7 @@ static int __engines_record_defaults(struct intel_gt *gt)
 			err = PTR_ERR(state);
 			goto out;
 		}
-		rq->engine->default_state = state;
+		i915_request_get_engine(rq)->default_state = state;
 	}
 
 out:
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 6b98f73fa48e..68e498b37e19 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -207,7 +207,7 @@ int intel_renderstate_init(struct intel_renderstate *so,
 int intel_renderstate_emit(struct intel_renderstate *so,
 			   struct i915_request *rq)
 {
-	struct intel_engine_cs *engine = rq->engine;
+	struct intel_engine_cs *engine = i915_request_get_engine(rq);
 	int err;
 
 	if (!so->vma)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 477b948da1f4..7d032e6a80aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -790,7 +790,7 @@ static void nop_submit_request(struct i915_request *request)
 
 	request = i915_request_mark_eio(request);
 	if (request) {
-		struct intel_engine_cs *engine = request->engine;
+		struct intel_engine_cs *engine = i915_request_get_engine(request);
 
 		i915_request_submit(request, engine);
 		intel_engine_signal_breadcrumbs(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index 472e0b710d85..45962ce1e015 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -71,10 +71,10 @@ static inline void runtime_stop(struct intel_context *ce)
 	WRITE_ONCE(stats->active, 0);
 }
 
-static struct intel_engine_cs *__schedule_in(struct i915_request *rq)
+static struct intel_engine_cs *
+__schedule_in(struct intel_engine_cs *engine, struct i915_request *rq)
 {
 	struct intel_context *ce = rq->context;
-	struct intel_engine_cs *engine = rq->engine;
 
 	intel_context_get(ce);
 
@@ -89,26 +89,26 @@ static struct intel_engine_cs *__schedule_in(struct i915_request *rq)
 	return engine;
 }
 
-static void schedule_in(struct i915_request *rq)
+static void schedule_in(struct intel_engine_cs *engine, struct i915_request *rq)
 {
 	struct intel_context * const ce = rq->context;
 	struct intel_engine_cs *old;
 
-	GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
+	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
 
 	old = ce->inflight;
 	if (!old)
-		old = __schedule_in(rq);
+		old = __schedule_in(engine, rq);
 	WRITE_ONCE(ce->inflight, ptr_inc(old));
 
-	GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+	GEM_BUG_ON(intel_context_inflight(ce) != engine);
 	GEM_BUG_ON(!intel_context_inflight_count(ce));
 }
 
-static void __schedule_out(struct i915_request *rq)
+static void
+__schedule_out(struct intel_engine_cs *engine, struct i915_request *rq)
 {
 	struct intel_context *ce = rq->context;
-	struct intel_engine_cs *engine = rq->engine;
 
 	CE_TRACE(ce, "schedule-out\n");
 
@@ -124,15 +124,16 @@ static void __schedule_out(struct i915_request *rq)
 	intel_gt_pm_put_async(engine->gt);
 }
 
-static void schedule_out(struct i915_request *rq)
+static void
+schedule_out(struct intel_engine_cs *engine, struct i915_request *rq)
 {
 	struct intel_context *ce = rq->context;
 
 	GEM_BUG_ON(!ce->inflight);
 	ce->inflight = ptr_dec(ce->inflight);
 	if (!intel_context_inflight_count(ce)) {
-		GEM_BUG_ON(ce->inflight != rq->engine);
-		__schedule_out(rq);
+		GEM_BUG_ON(ce->inflight != engine);
+		__schedule_out(engine, rq);
 		WRITE_ONCE(ce->inflight, NULL);
 		intel_context_put(ce);
 	}
@@ -412,9 +413,9 @@ static void remap_l3(struct intel_ring *ring,
 
 static void switch_context(struct intel_ring *ring, struct i915_request *rq)
 {
-	struct intel_engine_cs *engine = rq->engine;
-	struct i915_address_space *cvm = current_vm(engine);
 	struct intel_context *ce = rq->context;
+	struct intel_engine_cs *engine = ce->engine;
+	struct i915_address_space *cvm = current_vm(engine);
 	struct i915_address_space *vm;
 
 	if (engine->wa_ctx.vma && ce != engine->kernel_context) {
@@ -576,7 +577,7 @@ static void dequeue(struct i915_sched *se, struct intel_engine_cs *engine)
 
 		copy_ports(el->inflight, el->pending, port - el->pending + 1);
 		while (port-- != first)
-			schedule_in(*port);
+			schedule_in(engine, *port);
 
 		write_tail(engine);
 
@@ -589,11 +590,12 @@ static void dequeue(struct i915_sched *se, struct intel_engine_cs *engine)
 	local_irq_enable(); /* flush irq_work *after* RING_TAIL write */
 }
 
-static void post_process_csb(struct i915_request **port,
+static void post_process_csb(struct intel_engine_cs *engine,
+			     struct i915_request **port,
 			     struct i915_request **last)
 {
 	while (port != last)
-		schedule_out(*port++);
+		schedule_out(engine, *port++);
 }
 
 static struct i915_request **
@@ -631,7 +633,7 @@ static void submission_tasklet(struct tasklet_struct *t)
 	if (!i915_sched_is_idle(se))
 		dequeue(se, engine);
 
-	post_process_csb(post, inactive);
+	post_process_csb(engine, post, inactive);
 	rcu_read_unlock();
 }
 
@@ -696,7 +698,7 @@ static void ring_reset_csb(struct intel_engine_cs *engine)
 	intel_ring_reset(engine->legacy.ring, 0);
 	set_current_context(&engine->legacy.context, NULL);
 
-	post_process_csb(post, inactive);
+	post_process_csb(engine, post, inactive);
 	rcu_read_unlock();
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index f69c05433fc7..089097cafbf7 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -413,7 +413,7 @@ static void reset_cancel(struct intel_engine_cs *engine)
 
 static void i9xx_submit_request(struct i915_request *request)
 {
-	struct intel_engine_cs *engine = request->engine;
+	struct intel_engine_cs *engine = i915_request_get_engine(request);
 
 	i915_request_submit(request, engine);
 	wmb(); /* paranoid flush writes out of the WCB before mmio */
@@ -942,7 +942,7 @@ static const struct intel_context_ops ring_context_ops = {
 
 static void gen6_bsd_submit_request(struct i915_request *request)
 {
-	struct intel_uncore *uncore = request->engine->uncore;
+	struct intel_uncore *uncore = i915_request_get_engine(request)->uncore;
 
 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 2c78d61e7ea9..174fd30d8735 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -923,7 +923,7 @@ void intel_rps_boost(struct i915_request *rq)
 
 	/* Serializes with i915_request_retire() */
 	if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
-		struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
+		struct intel_rps *rps = &i915_request_get_engine(rq)->gt->rps;
 
 		if (atomic_fetch_inc(&rps->num_waiters))
 			return;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 69052495c64a..e4c4628cbb59 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -695,7 +695,7 @@ void intel_gt_show_timelines(struct intel_gt *gt,
 		fence = i915_active_fence_get(&tl->last_request);
 		if (fence) {
 			drm_printf(m, ", engine: %s",
-				   to_request(fence)->engine->name);
+				   i915_request_get_engine(to_request(fence))->name);
 			dma_fence_put(fence);
 		}
 		drm_printf(m, " }\n");
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 4057bd0b8a24..a9e46cf88f84 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -771,8 +771,8 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
 
 int intel_engine_emit_ctx_wa(struct i915_request *rq)
 {
-	struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
-	struct intel_engine_cs *engine = rq->engine;
+	struct intel_engine_cs *engine = i915_request_get_engine(rq);
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 	struct i915_wa *wa;
 	unsigned int i;
 	u32 *cs;
@@ -2149,7 +2149,7 @@ wa_list_srm(struct i915_request *rq,
 	    const struct i915_wa_list *wal,
 	    struct i915_vma *vma)
 {
-	struct drm_i915_private *i915 = rq->engine->i915;
+	struct drm_i915_private *i915 = i915_request_get_engine(rq)->i915;
 	unsigned int i, count = 0;
 	const struct i915_wa *wa;
 	u32 srm, *cs;
@@ -2245,7 +2245,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
 
 	err = 0;
 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
-		if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg)))
+		if (mcr_range(ce->engine->i915, i915_mmio_reg_offset(wa->reg)))
 			continue;
 
 		if (!wa_verify(wa, results[i], wal->name, from))
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index cb5c5721623a..f10778bedd83 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -75,7 +75,7 @@ static void advance(struct i915_request *request)
 	i915_request_mark_complete(request);
 	GEM_BUG_ON(!i915_request_completed(request));
 
-	intel_engine_signal_breadcrumbs(request->engine);
+	intel_engine_signal_breadcrumbs(i915_request_get_engine(request));
 }
 
 static void hw_delay_complete(struct timer_list *t)
@@ -205,7 +205,9 @@ static u32 *mock_emit_breadcrumb(const struct intel_engine_cs *engine,
 static void mock_submit_request(struct i915_request *request)
 {
 	struct mock_engine *engine =
-		container_of(request->engine, typeof(*engine), base);
+		container_of(i915_request_get_engine(request),
+			     typeof(*engine),
+			     base);
 	unsigned long flags;
 
 	i915_request_submit(request, &engine->base);
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index c7f912140f2f..c4da47f91d72 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -41,6 +41,7 @@ static int perf_end(struct intel_gt *gt)
 
 static int write_timestamp(struct i915_request *rq, int slot)
 {
+	struct intel_engine_cs *engine = i915_request_get_engine(rq);
 	u32 cmd;
 	u32 *cs;
 
@@ -49,10 +50,10 @@ static int write_timestamp(struct i915_request *rq, int slot)
 		return PTR_ERR(cs);
 
 	cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
-	if (INTEL_GEN(rq->engine->i915) >= 8)
+	if (INTEL_GEN(engine->i915) >= 8)
 		cmd++;
 	*cs++ = cmd;
-	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base));
 	*cs++ = i915_request_timeline(rq)->ggtt_offset + slot * sizeof(u32);
 	*cs++ = 0;
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index c9ef891bb6ef..df38298d463b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -1067,7 +1067,7 @@ create_rewinder(struct intel_context *ce,
 	*cs++ = 0;
 
 	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
-	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(ce->engine->mmio_base));
 	*cs++ = offset + idx * sizeof(u32);
 	*cs++ = 0;
 
@@ -3966,9 +3966,9 @@ static int mask_virtual_engine(struct intel_gt *gt,
 			goto out;
 		}
 
-		if (request[n]->engine != siblings[nsibling - n - 1]) {
+		if (i915_request_get_engine(request[n]) != siblings[nsibling - n - 1]) {
 			pr_err("Executed on wrong sibling '%s', expected '%s'\n",
-			       request[n]->engine->name,
+			       i915_request_get_engine(request[n])->name,
 			       siblings[nsibling - n - 1]->name);
 			err = -EINVAL;
 			goto out;
@@ -4068,7 +4068,7 @@ static int slicein_virtual_engine(struct intel_gt *gt,
 	i915_request_add(rq);
 	if (i915_request_wait(rq, 0, timeout) < 0) {
 		GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
-			      __func__, rq->engine->name);
+			      __func__, i915_request_get_engine(rq)->name);
 		GEM_TRACE_DUMP();
 		intel_gt_set_wedged(gt);
 		err = -EIO;
@@ -4474,7 +4474,7 @@ static int bond_virtual_engine(struct intel_gt *gt,
 
 		if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
 			pr_err("Master request did not execute (on %s)!\n",
-			       rq[0]->engine->name);
+			       i915_request_get_engine(rq[0])->name);
 			err = -EIO;
 			goto out;
 		}
@@ -4486,11 +4486,11 @@ static int bond_virtual_engine(struct intel_gt *gt,
 				goto out;
 			}
 
-			if (rq[n + 1]->engine != siblings[n]) {
+			if (i915_request_get_engine(rq[n + 1]) != siblings[n]) {
 				pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
 				       siblings[n]->name,
-				       rq[n + 1]->engine->name,
-				       rq[0]->engine->name);
+				       i915_request_get_engine(rq[n + 1])->name,
+				       i915_request_get_engine(rq[0])->name);
 				err = -EINVAL;
 				goto out;
 			}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 3050fa68e188..bc53c3c00eb1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -810,13 +810,15 @@ static int active_request_put(struct i915_request *rq)
 		return 0;
 
 	if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
+		struct intel_engine_cs *engine = i915_request_get_engine(rq);
+
 		GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
-			  rq->engine->name,
+			  engine->name,
 			  rq->fence.context,
 			  rq->fence.seqno);
 		GEM_TRACE_DUMP();
 
-		intel_gt_set_wedged(rq->engine->gt);
+		intel_gt_set_wedged(engine->gt);
 		err = -EIO;
 	}
 
@@ -1198,10 +1200,11 @@ static int igt_reset_wait(void *arg)
 
 	if (!wait_until_running(&h, rq)) {
 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+		struct intel_engine_cs *engine = i915_request_get_engine(rq);
 
 		pr_err("%s: Failed to start request %llx, at %x\n",
 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
-		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+		intel_engine_dump(engine, &p, "%s\n", engine->name);
 
 		intel_gt_set_wedged(gt);
 
@@ -1382,10 +1385,11 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
 
 	if (!wait_until_running(&h, rq)) {
 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+		struct intel_engine_cs *engine = i915_request_get_engine(rq);
 
 		pr_err("%s: Failed to start request %llx, at %x\n",
 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
-		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+		intel_engine_dump(engine, &p, "%s\n", engine->name);
 
 		intel_gt_set_wedged(gt);
 		goto out_reset;
@@ -1405,9 +1409,10 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
 
 	if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+		struct intel_engine_cs *engine = i915_request_get_engine(rq);
 
 		pr_err("igt/evict_vma kthread did not wait\n");
-		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+		intel_engine_dump(engine, &p, "%s\n", engine->name);
 
 		intel_gt_set_wedged(gt);
 		goto out_reset;
@@ -1415,7 +1420,7 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
 
 out_reset:
 	igt_global_reset_lock(gt);
-	fake_hangcheck(gt, rq->engine->mask);
+	fake_hangcheck(gt, i915_request_get_engine(rq)->mask);
 	igt_global_reset_unlock(gt);
 
 	if (tsk) {
@@ -1671,10 +1676,11 @@ static int igt_handle_error(void *arg)
 
 	if (!wait_until_running(&h, rq)) {
 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+		struct intel_engine_cs *engine = i915_request_get_engine(rq);
 
 		pr_err("%s: Failed to start request %llx, at %x\n",
 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
-		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+		intel_engine_dump(engine, &p, "%s\n", engine->name);
 
 		intel_gt_set_wedged(gt);
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index b7dd5646c882..391a14cc135f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -729,7 +729,7 @@ create_timestamp(struct intel_context *ce, void *slot, int idx)
 	*cs++ = 0;
 
 	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
-	*cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
+	*cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(ce->engine->mmio_base));
 	*cs++ = offset + idx * sizeof(u32);
 	*cs++ = 0;
 
@@ -770,7 +770,7 @@ static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt)
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
-	err = wait_for_submit(rq->engine, rq, HZ / 2);
+	err = wait_for_submit(arg->engine, rq, HZ / 2);
 	if (err)
 		goto err;
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index 44609d1c7780..770121a774c1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -128,15 +128,16 @@ static int read_mocs_table(struct i915_request *rq,
 			   const struct drm_i915_mocs_table *table,
 			   u32 *offset)
 {
+	struct intel_engine_cs *engine = i915_request_get_engine(rq);
 	u32 addr;
 
 	if (!table)
 		return 0;
 
-	if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
+	if (HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
 		addr = global_mocs_offset();
 	else
-		addr = mocs_offset(rq->engine);
+		addr = mocs_offset(engine);
 
 	return read_regs(rq, addr, table->n_entries, offset);
 }
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 285cead849dd..b49bf9dd4917 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -132,7 +132,7 @@ static const u32 *__live_rc6_ctx(struct intel_context *ce)
 	}
 
 	cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
-	if (INTEL_GEN(rq->engine->i915) >= 8)
+	if (INTEL_GEN(ce->engine->i915) >= 8)
 		cmd++;
 
 	*cs++ = cmd;
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index dcc03522b277..6f0c9a9868c0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -422,18 +422,19 @@ int intel_timeline_mock_selftests(void)
 
 static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
 {
+	struct intel_engine_cs *engine = i915_request_get_engine(rq);
 	u32 *cs;
 
 	cs = intel_ring_begin(rq, 4);
 	if (IS_ERR(cs))
 		return PTR_ERR(cs);
 
-	if (INTEL_GEN(rq->engine->i915) >= 8) {
+	if (INTEL_GEN(engine->i915) >= 8) {
 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
 		*cs++ = addr;
 		*cs++ = 0;
 		*cs++ = value;
-	} else if (INTEL_GEN(rq->engine->i915) >= 4) {
+	} else if (INTEL_GEN(engine->i915) >= 4) {
 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
 		*cs++ = 0;
 		*cs++ = addr;
@@ -760,7 +761,8 @@ static int emit_read_hwsp(struct i915_request *rq,
 			  u32 seqno, u32 hwsp,
 			  u32 *addr)
 {
-	const u32 gpr = i915_mmio_reg_offset(GEN8_RING_CS_GPR(rq->engine->mmio_base, 0));
+	struct intel_engine_cs *engine = i915_request_get_engine(rq);
+	const u32 gpr = i915_mmio_reg_offset(GEN8_RING_CS_GPR(engine->mmio_base, 0));
 	u32 *cs;
 
 	cs = intel_ring_begin(rq, 12);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index c4a6bb39266d..f5cce46d5c64 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -150,7 +150,8 @@ static inline int rq_prio(const struct i915_request *rq)
 	return rq->sched.attr.priority;
 }
 
-static struct i915_request *schedule_in(struct i915_request *rq, int idx)
+static struct i915_request *
+schedule_in(struct intel_engine_cs *engine, struct i915_request *rq, int idx)
 {
 	trace_i915_request_in(rq, idx);
 
@@ -161,16 +162,17 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx)
 	 * required if we generalise the inflight tracking.
 	 */
 
-	__intel_gt_pm_get(rq->engine->gt);
+	__intel_gt_pm_get(engine->gt);
 	return i915_request_get(rq);
 }
 
-static void schedule_out(struct i915_request *rq)
+static void
+schedule_out(struct intel_engine_cs *engine, struct i915_request *rq)
 {
 	trace_i915_request_out(rq);
 
-	intel_gt_pm_put_async(rq->engine->gt);
 	i915_request_put(rq);
+	intel_gt_pm_put_async(engine->gt);
 }
 
 static void __guc_dequeue(struct intel_engine_cs *engine)
@@ -207,7 +209,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 				if (port == last_port)
 					goto done;
 
-				*port = schedule_in(last,
+				*port = schedule_in(engine, last,
 						    port - execlists->inflight);
 				port++;
 			}
@@ -222,7 +224,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 	}
 done:
 	if (submit) {
-		*port = schedule_in(last, port - execlists->inflight);
+		*port = schedule_in(engine, last, port - execlists->inflight);
 		*++port = NULL;
 		guc_submit(engine, first, port);
 	}
@@ -244,7 +246,7 @@ static void guc_submission_tasklet(struct tasklet_struct *t)
 		if (!i915_request_completed(rq))
 			break;
 
-		schedule_out(rq);
+		schedule_out(engine, rq);
 	}
 	if (port != execlists->inflight) {
 		int idx = port - execlists->inflight;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 85ee0b5de8c1..2925fcadd83c 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -200,7 +200,7 @@ restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
 	int ret;
 	struct engine_mmio *mmio;
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_engine_cs *engine = i915_request_get_engine(req);
 	int ring_id = engine->id;
 	int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
 
@@ -254,7 +254,8 @@ restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
 		*cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
 		*cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
 		gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
-			      *(cs-2), *(cs-1), vgpu->id, req->engine->id);
+			      *(cs-2), *(cs-1), vgpu->id,
+			      i915_request_get_engine(req)->id);
 
 	}
 
@@ -281,7 +282,8 @@ restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
 		*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
 		*cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
 		gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
-			      *(cs-2), *(cs-1), vgpu->id, req->engine->id);
+			      *(cs-2), *(cs-1), vgpu->id,
+			      i915_request_get_engine(req)->id);
 
 	}
 
@@ -315,7 +317,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
 		goto out;
 
 	/* no MOCS register in context except render engine */
-	if (req->engine->id != RCS0)
+	if (i915_request_get_engine(req)->id != RCS0)
 		goto out;
 
 	ret = restore_render_mocs_control_for_inhibit(vgpu, req);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4167eeaf65f4..f308dff69485 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -285,10 +285,11 @@ static int shadow_context_status_change(struct notifier_block *nb,
 		unsigned long action, void *data)
 {
 	struct i915_request *rq = data;
+	struct intel_engine_cs *engine = i915_request_get_engine(rq);
 	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
-				shadow_ctx_notifier_block[rq->engine->id]);
+				shadow_ctx_notifier_block[engine->id]);
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-	enum intel_engine_id ring_id = rq->engine->id;
+	enum intel_engine_id ring_id = engine->id;
 	struct intel_vgpu_workload *workload;
 	unsigned long flags;
 
@@ -298,7 +299,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
 		    scheduler->engine_owner[ring_id]) {
 			/* Switch ring from vGPU to host. */
 			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
-					      NULL, rq->engine);
+					      NULL, engine);
 			scheduler->engine_owner[ring_id] = NULL;
 		}
 		spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
@@ -316,7 +317,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
 		if (workload->vgpu != scheduler->engine_owner[ring_id]) {
 			/* Switch ring from host to vGPU or vGPU to vGPU. */
 			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
-					      workload->vgpu, rq->engine);
+					      workload->vgpu, engine);
 			scheduler->engine_owner[ring_id] = workload->vgpu;
 		} else
 			gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
@@ -325,11 +326,11 @@ static int shadow_context_status_change(struct notifier_block *nb,
 		atomic_set(&workload->shadow_ctx_active, 1);
 		break;
 	case INTEL_CONTEXT_SCHEDULE_OUT:
-		save_ring_hw_state(workload->vgpu, rq->engine);
+		save_ring_hw_state(workload->vgpu, engine);
 		atomic_set(&workload->shadow_ctx_active, 0);
 		break;
 	case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
-		save_ring_hw_state(workload->vgpu, rq->engine);
+		save_ring_hw_state(workload->vgpu, engine);
 		break;
 	default:
 		WARN_ON(1);
@@ -904,9 +905,10 @@ check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm
 static void update_guest_context(struct intel_vgpu_workload *workload)
 {
 	struct i915_request *rq = workload->req;
+	struct intel_engine_cs *engine = i915_request_get_engine(rq);
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct execlist_ring_context *shadow_ring_context;
-	struct intel_context *ctx = workload->req->context;
+	struct intel_context *ctx = rq->context;
 	void *context_base;
 	void *src;
 	unsigned long context_gpa, context_page_num;
@@ -917,7 +919,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 	u32 head, tail;
 	u16 wrap_count;
 
-	gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
+	gvt_dbg_sched("ring id %d workload lrca %x\n", engine->id,
 		      workload->ctx_desc.lrca);
 
 	GEM_BUG_ON(!intel_context_is_pinned(ctx));
@@ -935,14 +937,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 
 	head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
 
-	ring_base = rq->engine->mmio_base;
+	ring_base = engine->mmio_base;
 	vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
 	vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
 
-	context_page_num = rq->engine->context_size;
+	context_page_num = engine->context_size;
 	context_page_num = context_page_num >> PAGE_SHIFT;
 
-	if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0)
+	if (IS_BROADWELL(engine->i915) && engine->id == RCS0)
 		context_page_num = 19;
 
 	context_base = (void *) ctx->lrc_reg_state -
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 3bc616cc1ad2..02c5ab8eb57e 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -1014,7 +1014,7 @@ static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
 
 void i915_request_add_active_barriers(struct i915_request *rq)
 {
-	struct intel_engine_cs *engine = rq->engine;
+	struct intel_engine_cs *engine = i915_request_get_engine(rq);
 	struct llist_node *node, *next;
 	unsigned long flags;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 46ae475c84f2..5ebf0e5166f8 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -58,7 +58,7 @@ static struct i915_global_request {
 
 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
-	return dev_name(to_request(fence)->engine->i915->drm.dev);
+	return dev_name(to_request(fence)->__engine->i915->drm.dev);
 }
 
 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
@@ -154,7 +154,7 @@ static void i915_fence_release(struct dma_fence *fence)
 	 * can be a physical engine with the exact corresponding mask.
 	 */
 	if (is_power_of_2(rq->execution_mask) &&
-	    !cmpxchg(&rq->engine->request_pool, NULL, rq))
+	    !cmpxchg(&rq->__engine->request_pool, NULL, rq))
 		return;
 
 	kmem_cache_free(global.slab_requests, rq);
@@ -254,9 +254,9 @@ static void remove_from_engine(struct i915_request *rq)
 	 * engine lock. The simple ploy we use is to take the lock then
 	 * check that the rq still belongs to the newly locked engine.
 	 */
-	locked = READ_ONCE(rq->engine);
+	locked = READ_ONCE(rq->__engine);
 	spin_lock_irq(&locked->sched.lock);
-	while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+	while (unlikely(locked != (engine = READ_ONCE(rq->__engine)))) {
 		spin_unlock(&locked->sched.lock);
 		spin_lock(&engine->sched.lock);
 		locked = engine;
@@ -308,7 +308,7 @@ bool i915_request_retire(struct i915_request *rq)
 	}
 
 	if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
-		atomic_dec(&rq->engine->gt->rps.num_waiters);
+		atomic_dec(&i915_request_get_engine(rq)->gt->rps.num_waiters);
 
 	/*
 	 * We only loosely track inflight requests across preemption,
@@ -559,7 +559,7 @@ void i915_request_submit(struct i915_request *request,
 	/* Will be called from irq-context when using foreign fences. */
 	spin_lock_irqsave(&se->lock, flags);
 
-	__i915_request_submit(request, engine);
+	__i915_request_submit(request, i915_request_get_engine(request));
 
 	spin_unlock_irqrestore(&se->lock, flags);
 }
@@ -784,7 +784,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	}
 
 	rq->context = ce;
-	rq->engine = ce->engine;
+	rq->__engine = ce->engine;
 	rq->ring = ce->ring;
 	rq->execution_mask = ce->engine->mask;
 
@@ -831,7 +831,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	 * the beginning of the ring as well.
 	 */
 	rq->reserved_space =
-		2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
+		2 * ce->engine->emit_fini_breadcrumb_dw * sizeof(u32);
 
 	/*
 	 * Record the position of the start of the request so that
@@ -986,12 +986,12 @@ __emit_semaphore_wait(struct i915_request *to,
 		      struct i915_request *from,
 		      u32 seqno)
 {
-	const int has_token = INTEL_GEN(to->engine->i915) >= 12;
+	const int has_token = INTEL_GEN(i915_request_get_engine(to)->i915) >= 12;
 	u32 hwsp_offset;
 	int len, err;
 	u32 *cs;
 
-	GEM_BUG_ON(INTEL_GEN(to->engine->i915) < 8);
+	GEM_BUG_ON(INTEL_GEN(i915_request_get_engine(to)->i915) < 8);
 	GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
 
 	/* We need to pin the signaler's HWSP until we are finished reading. */
@@ -1037,7 +1037,7 @@ emit_semaphore_wait(struct i915_request *to,
 		    struct i915_request *from,
 		    gfp_t gfp)
 {
-	const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+	const intel_engine_mask_t mask = READ_ONCE(from->__engine)->mask;
 	struct i915_sw_fence *wait = &to->submit;
 
 	if (!intel_context_use_semaphores(to->context))
@@ -1072,7 +1072,7 @@ emit_semaphore_wait(struct i915_request *to,
 	if (__emit_semaphore_wait(to, from, from->fence.seqno))
 		goto await_fence;
 
-	to->sched.semaphores |= mask & ~to->engine->mask;
+	to->sched.semaphores |= mask & ~to->__engine->mask;
 	wait = &to->semaphore;
 
 await_fence:
@@ -1186,7 +1186,7 @@ __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
 {
 	mark_external(rq);
 	return i915_sw_fence_await_dma_fence(&rq->submit, fence,
-					     i915_fence_context_timeout(rq->engine->i915,
+					     i915_fence_context_timeout(i915_request_get_engine(rq)->i915,
 									fence->context),
 					     I915_FENCE_GFP);
 }
@@ -1276,7 +1276,7 @@ await_request_submit(struct i915_request *to, struct i915_request *from)
 	 * the waiter to be submitted immediately to the physical engine
 	 * as it may then bypass the virtual request.
 	 */
-	if (to->engine == READ_ONCE(from->engine))
+	if (to->__engine == READ_ONCE(from->__engine))
 		return i915_sw_fence_await_sw_fence_gfp(&to->submit,
 							&from->submit,
 							I915_FENCE_GFP);
@@ -1444,7 +1444,7 @@ static bool in_order_submission(const struct i915_request *prev,
 	if (likely(prev->context == rq->context))
 		return true;
 
-	return is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask);
+	return is_power_of_2(READ_ONCE(prev->__engine)->mask | rq->__engine->mask);
 }
 
 static struct i915_request *
@@ -1518,7 +1518,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
  */
 struct i915_request *__i915_request_commit(struct i915_request *rq)
 {
-	struct intel_engine_cs *engine = rq->engine;
+	struct intel_engine_cs *engine = i915_request_get_engine(rq);
 	struct intel_ring *ring = rq->ring;
 	u32 *cs;
 
@@ -1661,7 +1661,7 @@ static bool __i915_spin_request(struct i915_request * const rq, int state)
 	 * takes to sleep on a request, on the order of a microsecond.
 	 */
 
-	timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
+	timeout_ns = READ_ONCE(i915_request_get_engine(rq)->props.max_busywait_duration_ns);
 	timeout_ns += local_clock_ns(&cpu);
 	do {
 		if (dma_fence_is_signaled(&rq->fence))
@@ -1731,7 +1731,7 @@ long i915_request_wait(struct i915_request *rq,
 	 * serialise wait/reset with an explicit lock, we do want
 	 * lockdep to detect potential dependency cycles.
 	 */
-	mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
+	mutex_acquire(&i915_request_get_engine(rq)->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
 
 	/*
 	 * Optimistic spin before touching IRQs.
@@ -1822,7 +1822,7 @@ long i915_request_wait(struct i915_request *rq,
 	GEM_BUG_ON(!list_empty(&wait.cb.node));
 
 out:
-	mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
+	mutex_release(&i915_request_get_engine(rq)->gt->reset.mutex.dep_map, _THIS_IP_);
 	trace_i915_request_wait_end(rq);
 	return timeout;
 }
@@ -1849,7 +1849,7 @@ static char queue_status(const struct i915_request *rq)
 		return 'S';
 
 	if (i915_request_is_ready(rq))
-		return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
+		return intel_engine_is_virtual(i915_request_get_engine(rq)) ? 'V' : 'R';
 
 	return 'U';
 }
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 45c494035cdd..9c6ea5fa7b13 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -56,7 +56,8 @@ struct i915_capture_list {
 
 #define RQ_TRACE(rq, fmt, ...) do {					\
 	const struct i915_request *rq__ = (rq);				\
-	ENGINE_TRACE(rq__->engine, "fence " RQ_FMT ", current %d " fmt,	\
+	ENGINE_TRACE(i915_request_get_engine(rq__),			\
+		     "fence " RQ_FMT ", current %d " fmt,		\
 		     RQ_ARG(rq__), hwsp_seqno(rq__), ##__VA_ARGS__);	\
 } while (0)
 
@@ -175,7 +176,7 @@ struct i915_request {
 	 * i915_request_free() will then decrement the refcount on the
 	 * context.
 	 */
-	struct intel_engine_cs *engine;
+	struct intel_engine_cs *__engine;
 	struct intel_context *context;
 	struct intel_ring *ring;
 	struct intel_timeline __rcu *timeline;
@@ -596,7 +597,13 @@ static inline void i915_request_clear_hold(struct i915_request *rq)
 static inline struct i915_sched *
 i915_request_get_scheduler(const struct i915_request *rq)
 {
-	return intel_engine_get_scheduler(rq->engine);
+	return intel_engine_get_scheduler(rq->__engine);
+}
+
+static inline struct intel_engine_cs *
+i915_request_get_engine(const struct i915_request *rq)
+{
+	return rq->context->engine;
 }
 
 static inline struct intel_timeline *
@@ -654,7 +661,7 @@ static inline bool i915_request_is_executing(const struct i915_request *rq)
 
 static inline bool i915_request_use_semaphores(const struct i915_request *rq)
 {
-	return intel_engine_has_semaphores(rq->engine);
+	return intel_engine_has_semaphores(i915_request_get_engine(rq));
 }
 
 static inline bool i915_request_use_busywait(const struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index c8b5152f28e2..75dab453bfbc 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -31,12 +31,12 @@ static struct i915_global_scheduler {
  */
 #define lock_engine_irqsave(rq, flags) ({ \
 	struct i915_request * const rq__ = (rq); \
-	struct intel_engine_cs *engine__ = READ_ONCE(rq__->engine); \
+	struct intel_engine_cs *engine__ = READ_ONCE(rq__->__engine); \
 \
 	spin_lock_irqsave(&engine__->sched.lock, (flags)); \
-	while (engine__ != READ_ONCE((rq__)->engine)) { \
+	while (engine__ != READ_ONCE((rq__)->__engine)) { \
 		spin_unlock(&engine__->sched.lock); \
-		engine__ = READ_ONCE(rq__->engine); \
+		engine__ = READ_ONCE(rq__->__engine); \
 		spin_lock(&engine__->sched.lock); \
 	} \
 \
@@ -132,7 +132,7 @@ static void i915_sched_init_ipi(struct i915_sched_ipi *ipi)
 
 static bool match_ring(const struct i915_request *rq)
 {
-	const struct intel_engine_cs *engine = rq->engine;
+	const struct intel_engine_cs *engine = i915_request_get_engine(rq);
 	const struct intel_ring *ring = rq->ring;
 
 	if (intel_ring_is_internal(ring))
@@ -598,7 +598,7 @@ static bool is_first_priolist(const struct i915_sched *se,
 
 static bool __i915_request_set_deadline(struct i915_request *rq, u64 deadline)
 {
-	struct intel_engine_cs *engine = rq->engine;
+	struct intel_engine_cs *engine = rq->__engine;
 	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	struct list_head *pos = &rq->sched.signalers_list;
 	struct list_head *plist;
@@ -626,7 +626,7 @@ static bool __i915_request_set_deadline(struct i915_request *rq, u64 deadline)
 				if (__i915_request_is_complete(s))
 					continue;
 
-				if (s->engine != engine) {
+				if (s->__engine != engine) {
 					ipi_deadline(s, deadline);
 					continue;
 				}
@@ -648,7 +648,7 @@ static bool __i915_request_set_deadline(struct i915_request *rq, u64 deadline)
 		 * any preemption required, be dealt with upon submission.
 		 * See engine->submit_request()
 		 */
-		GEM_BUG_ON(rq->engine != engine);
+		GEM_BUG_ON(i915_request_get_engine(rq) != engine);
 		if (i915_request_in_priority_queue(rq))
 			remove_from_priolist(se, rq, plist, true);
 	} while ((rq = stack_pop(rq, &pos)));
@@ -857,7 +857,7 @@ static void ipi_priority(struct i915_request *rq, int prio)
 
 static bool __i915_request_set_priority(struct i915_request *rq, int prio)
 {
-	struct intel_engine_cs *engine = rq->engine;
+	struct intel_engine_cs *engine = rq->__engine;
 	struct list_head *pos = &rq->sched.signalers_list;
 	bool kick = false;
 
@@ -899,7 +899,7 @@ static bool __i915_request_set_priority(struct i915_request *rq, int prio)
 				if (__i915_request_is_complete(s))
 					continue;
 
-				if (s->engine != engine) {
+				if (s->__engine != engine) {
 					ipi_priority(s, prio);
 					continue;
 				}
@@ -923,7 +923,7 @@ static bool __i915_request_set_priority(struct i915_request *rq, int prio)
 		 * any preemption required, be dealt with upon submission.
 		 * See engine->submit_request()
 		 */
-		GEM_BUG_ON(rq->engine != engine);
+		GEM_BUG_ON(rq->__engine != engine);
 		if (i915_request_is_ready(rq) &&
 		    set_earliest_deadline(rq, rq_deadline(rq)))
 			kick = true;
@@ -1025,7 +1025,7 @@ static void __defer_request(struct intel_engine_cs *engine,
 				continue;
 
 			/* Leave semaphores spinning on the other engines */
-			if (w->engine != engine)
+			if (w->__engine != engine)
 				continue;
 
 			/* No waiter should start before its signaler */
@@ -1060,7 +1060,7 @@ static void __defer_request(struct intel_engine_cs *engine,
 		WRITE_ONCE(rq->sched.deadline, deadline);
 
 		/* Note list is reversed for waiters wrt signal hierarchy */
-		GEM_BUG_ON(rq->engine != engine);
+		GEM_BUG_ON(rq->__engine != engine);
 		remove_from_priolist(se, rq, &dfs, false);
 
 		/* Track our visit, and prevent duplicate processing */
@@ -1102,7 +1102,7 @@ static bool hold_request(const struct i915_request *rq)
 		const struct i915_request *s =
 			container_of(p->signaler, typeof(*s), sched);
 
-		if (s->engine != rq->engine)
+		if (s->__engine != rq->__engine)
 			continue;
 
 		result = i915_request_on_hold(s);
@@ -1127,15 +1127,15 @@ bool __i915_request_requeue(struct i915_request *rq,
 	struct i915_sched *se = intel_engine_get_scheduler(engine);
 
 	RQ_TRACE(rq, "transfer from %s to %s\n",
-		 rq->engine->name, engine->name);
+		 i915_request_get_scheduler(rq)->dbg.name, se->dbg.name);
 
 	lockdep_assert_held(&se->lock);
 	lockdep_assert_held(&i915_request_get_scheduler(rq)->lock);
 	GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags));
-	GEM_BUG_ON(rq->engine == engine);
+	GEM_BUG_ON(rq->__engine == engine);
 
 	remove_from_priolist(i915_request_get_scheduler(rq), rq, NULL, false);
-	WRITE_ONCE(rq->engine, engine);
+	WRITE_ONCE(rq->__engine, engine);
 
 	if (__i915_request_is_complete(rq)) {
 		clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
@@ -1165,8 +1165,7 @@ bool __i915_request_requeue(struct i915_request *rq,
 
 void i915_request_enqueue(struct i915_request *rq)
 {
-	struct intel_engine_cs *engine = rq->engine;
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
+	struct i915_sched *se = i915_request_get_scheduler(rq);
 	u64 dl = earliest_deadline(rq);
 	unsigned long flags;
 	bool kick = false;
@@ -1253,7 +1252,7 @@ bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
 	LIST_HEAD(list);
 
 	lockdep_assert_held(&se->lock);
-	GEM_BUG_ON(rq->engine != engine);
+	GEM_BUG_ON(rq->__engine != engine);
 
 	if (__i915_request_is_complete(rq)) /* too late! */
 		return false;
@@ -1288,7 +1287,7 @@ bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
 				continue;
 
 			/* Leave semaphores spinning on the other engines */
-			if (w->engine != engine)
+			if (w->__engine != engine)
 				continue;
 
 			if (!i915_request_is_ready(w))
@@ -1370,7 +1369,7 @@ void __i915_sched_resume_request(struct intel_engine_cs *engine,
 			if (rq->fence.error)
 				i915_request_set_error_once(w, rq->fence.error);
 
-			if (w->engine != engine)
+			if (w->__engine != engine)
 				continue;
 
 			/* We also treat the on-hold status as a visited bit */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index a4addcc64978..1d9c6851e6f7 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -735,9 +735,9 @@ TRACE_EVENT(i915_request_queue,
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = rq->engine->i915->drm.primary->index;
-			   __entry->class = rq->engine->uabi_class;
-			   __entry->instance = rq->engine->uabi_instance;
+			   __entry->dev = i915_request_get_engine(rq)->i915->drm.primary->index;
+			   __entry->class = i915_request_get_engine(rq)->uabi_class;
+			   __entry->instance = i915_request_get_engine(rq)->uabi_instance;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
 			   __entry->flags = flags;
@@ -761,9 +761,9 @@ DECLARE_EVENT_CLASS(i915_request,
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = rq->engine->i915->drm.primary->index;
-			   __entry->class = rq->engine->uabi_class;
-			   __entry->instance = rq->engine->uabi_instance;
+			   __entry->dev = i915_request_get_engine(rq)->i915->drm.primary->index;
+			   __entry->class = i915_request_get_engine(rq)->uabi_class;
+			   __entry->instance = i915_request_get_engine(rq)->uabi_instance;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
 			   ),
@@ -804,9 +804,9 @@ TRACE_EVENT(i915_request_in,
 			    ),
 
 	    TP_fast_assign(
-			   __entry->dev = rq->engine->i915->drm.primary->index;
-			   __entry->class = rq->engine->uabi_class;
-			   __entry->instance = rq->engine->uabi_instance;
+			   __entry->dev = i915_request_get_engine(rq)->i915->drm.primary->index;
+			   __entry->class = i915_request_get_engine(rq)->uabi_class;
+			   __entry->instance = i915_request_get_engine(rq)->uabi_instance;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
 			   __entry->prio = rq->sched.attr.priority;
@@ -833,9 +833,9 @@ TRACE_EVENT(i915_request_out,
 			    ),
 
 	    TP_fast_assign(
-			   __entry->dev = rq->engine->i915->drm.primary->index;
-			   __entry->class = rq->engine->uabi_class;
-			   __entry->instance = rq->engine->uabi_instance;
+			   __entry->dev = i915_request_get_engine(rq)->i915->drm.primary->index;
+			   __entry->class = i915_request_get_engine(rq)->uabi_class;
+			   __entry->instance = i915_request_get_engine(rq)->uabi_instance;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
 			   __entry->completed = i915_request_completed(rq);
@@ -895,9 +895,9 @@ TRACE_EVENT(i915_request_wait_begin,
 	     * less desirable.
 	     */
 	    TP_fast_assign(
-			   __entry->dev = rq->engine->i915->drm.primary->index;
-			   __entry->class = rq->engine->uabi_class;
-			   __entry->instance = rq->engine->uabi_instance;
+			   __entry->dev = i915_request_get_engine(rq)->i915->drm.primary->index;
+			   __entry->class = i915_request_get_engine(rq)->uabi_class;
+			   __entry->instance = i915_request_get_engine(rq)->uabi_instance;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
 			   __entry->flags = flags;
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index c5f45afb9614..056d7935dfff 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -162,7 +162,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
 		return PTR_ERR(cs);
 
 	len = 5;
-	if (INTEL_GEN(rq->engine->i915) >= 8)
+	if (INTEL_GEN(i915_request_get_engine(rq)->i915) >= 8)
 		len++;
 
 	*cs++ = GFX_OP_PIPE_CONTROL(len);
@@ -181,7 +181,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
 
 static ktime_t poll_status(struct i915_request *rq, int slot)
 {
-	while (!intel_read_status_page(rq->engine, slot) &&
+	while (!intel_read_status_page(i915_request_get_engine(rq), slot) &&
 	       !i915_request_completed(rq))
 		cpu_relax();
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index 506ac4c01503..0180a21fa1a7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -606,7 +606,7 @@ static int igt_deadline_chains(void *arg)
 
 static bool igt_defer(struct i915_request *rq, unsigned long v, unsigned long e)
 {
-	struct intel_engine_cs *engine = rq->engine;
+	struct intel_engine_cs *engine = rq->__engine;
 	struct i915_sched *se = intel_engine_get_scheduler(engine);
 
 	/* XXX No generic means to unwind incomplete requests yet */
@@ -624,7 +624,7 @@ static bool igt_defer(struct i915_request *rq, unsigned long v, unsigned long e)
 
 	/* Then the very first request must be the one everyone depends on */
 	rq = list_first_entry(lookup_priolist(se, 0), typeof(*rq), sched.link);
-	GEM_BUG_ON(rq->engine != engine);
+	GEM_BUG_ON(rq->__engine != engine);
 
 	/* Deferring the first request will then have to defer all requests */
 	__i915_sched_defer_request(engine, rq);
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 09f747228dff..49f3bddfcb95 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -44,7 +44,7 @@ mock_request(struct intel_context *ce, unsigned long delay)
 bool mock_cancel_request(struct i915_request *request)
 {
 	struct mock_engine *engine =
-		container_of(request->engine, typeof(*engine), base);
+		container_of(i915_request_get_engine(request), typeof(*engine), base);
 	bool was_queued;
 
 	spin_lock_irq(&engine->hw_lock);
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list