[PATCH 60/75] drm/i915: Move request initialisation callback to intel_context_ops

Chris Wilson chris at chris-wilson.co.uk
Tue Feb 2 00:36:26 UTC 2021


Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_context_types.h |  2 +
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |  2 -
 .../drm/i915/gt/intel_execlists_submission.c  | 62 ++++++------
 .../gpu/drm/i915/gt/intel_ring_scheduler.c    | 48 ++++-----
 .../gpu/drm/i915/gt/intel_ring_submission.c   | 99 ++++++++++---------
 drivers/gpu/drm/i915/gt/mock_engine.c         | 22 ++---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 64 ++++++------
 drivers/gpu/drm/i915/i915_request.c           |  2 +-
 8 files changed, 156 insertions(+), 145 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index d1a35c3055a7..3adccb1c4609 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -38,6 +38,8 @@ struct intel_context_ops {
 
 	int (*alloc)(struct intel_context *ce);
 
+	int (*init_request)(struct intel_context *ce, struct i915_request *rq);
+
 	int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
 	int (*pin)(struct intel_context *ce, void *vaddr);
 	void (*unpin)(struct intel_context *ce);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index f39eb0bc3089..fbb247ea02e6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -385,8 +385,6 @@ struct intel_engine_cs {
 
 	const struct intel_context_ops *cops;
 
-	int		(*request_alloc)(struct i915_request *rq);
-
 	int		(*emit_flush)(struct i915_request *request, u32 mode);
 #define EMIT_INVALIDATE	BIT(0)
 #define EMIT_FLUSH	BIT(1)
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 6b2dc4091603..c05ed5d63809 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2224,31 +2224,14 @@ static int execlists_context_alloc(struct intel_context *ce)
 	return lrc_alloc(ce, ce->engine);
 }
 
-static const struct intel_context_ops execlists_context_ops = {
-	.flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES,
-
-	.alloc = execlists_context_alloc,
-
-	.pre_pin = execlists_context_pre_pin,
-	.pin = execlists_context_pin,
-	.unpin = lrc_unpin,
-	.post_unpin = lrc_post_unpin,
-
-	.enter = intel_context_enter_engine,
-	.exit = intel_context_exit_engine,
-
-	.reset = lrc_reset,
-	.destroy = lrc_destroy,
-};
-
-static int emit_pdps(struct i915_request *rq)
+static int emit_pdps(struct intel_context *ce, struct i915_request *rq)
 {
-	const struct intel_engine_cs * const engine = rq->engine;
-	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
+	const struct intel_engine_cs * const engine = ce->engine;
+	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(ce->vm);
 	int err, i;
 	u32 *cs;
 
-	GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
+	GEM_BUG_ON(intel_vgpu_active(engine->i915));
 
 	/*
 	 * Beware ye of the dragons, this sequence is magic!
@@ -2298,18 +2281,20 @@ static int emit_pdps(struct i915_request *rq)
 	return 0;
 }
 
-static int execlists_request_alloc(struct i915_request *request)
+static int
+execlists_context_request(struct intel_context *ce, struct i915_request *rq)
 {
+	struct intel_engine_cs *engine = ce->engine;
 	int ret;
 
-	GEM_BUG_ON(!intel_context_is_pinned(request->context));
+	GEM_BUG_ON(!intel_context_is_pinned(ce));
 
 	/*
 	 * Flush enough space to reduce the likelihood of waiting after
 	 * we start building the request - in which case we will just
 	 * have to repeat work.
 	 */
-	request->reserved_space += EXECLISTS_REQUEST_SIZE;
+	rq->reserved_space += EXECLISTS_REQUEST_SIZE;
 
 	/*
 	 * Note that after this point, we have committed to using
@@ -2319,21 +2304,39 @@ static int execlists_request_alloc(struct i915_request *request)
 	 * to cancel/unwind this request now.
 	 */
 
-	if (!i915_vm_is_4lvl(request->context->vm)) {
-		ret = emit_pdps(request);
+	if (!i915_vm_is_4lvl(ce->vm)) {
+		ret = emit_pdps(ce, rq);
 		if (ret)
 			return ret;
 	}
 
 	/* Unconditionally invalidate GPU caches and TLBs. */
-	ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+	ret = engine->emit_flush(rq, EMIT_INVALIDATE);
 	if (ret)
 		return ret;
 
-	request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+	rq->reserved_space -= EXECLISTS_REQUEST_SIZE;
 	return 0;
 }
 
+static const struct intel_context_ops execlists_context_ops = {
+	.flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES,
+
+	.alloc = execlists_context_alloc,
+	.init_request = execlists_context_request,
+
+	.pre_pin = execlists_context_pre_pin,
+	.pin = execlists_context_pin,
+	.unpin = lrc_unpin,
+	.post_unpin = lrc_post_unpin,
+
+	.enter = intel_context_enter_engine,
+	.exit = intel_context_exit_engine,
+
+	.reset = lrc_reset,
+	.destroy = lrc_destroy,
+};
+
 static void reset_csb_pointers(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -2927,7 +2930,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 	engine->resume = execlists_resume;
 
 	engine->cops = &execlists_context_ops;
-	engine->request_alloc = execlists_request_alloc;
 
 	engine->reset.prepare = execlists_reset_prepare;
 	engine->reset.rewind = execlists_reset_rewind;
@@ -3243,6 +3245,7 @@ static const struct intel_context_ops virtual_context_ops = {
 	.flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES,
 
 	.alloc = virtual_context_alloc,
+	.init_request = execlists_context_request,
 
 	.pre_pin = virtual_context_pre_pin,
 	.pin = virtual_context_pin,
@@ -3434,7 +3437,6 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 	intel_engine_init_execlists(&ve->base);
 
 	ve->base.cops = &virtual_context_ops;
-	ve->base.request_alloc = execlists_request_alloc;
 
 	ve->base.bond_execute = virtual_bond_execute;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index 7f7c259e9ec8..cab421802c9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -939,10 +939,35 @@ static void ring_context_reset(struct intel_context *ce)
 	clear_bit(CONTEXT_VALID_BIT, &ce->flags);
 }
 
+static int
+ring_context_request(struct intel_context *ce, struct i915_request *rq)
+{
+	struct intel_engine_cs *engine = ce->engine;
+	int ret;
+
+	GEM_BUG_ON(!intel_context_is_pinned(ce));
+
+	/*
+	 * Flush enough space to reduce the likelihood of waiting after
+	 * we start building the request - in which case we will just
+	 * have to repeat work.
+	 */
+	rq->reserved_space += LEGACY_REQUEST_SIZE;
+
+	/* Unconditionally invalidate GPU caches and TLBs. */
+	ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+	if (ret)
+		return ret;
+
+	rq->reserved_space -= LEGACY_REQUEST_SIZE;
+	return 0;
+}
+
 static const struct intel_context_ops ring_context_ops = {
 	.flags = COPS_HAS_INFLIGHT,
 
 	.alloc = ring_context_alloc,
+	.init_request = ring_context_request,
 
 	.pre_pin = ring_context_pre_pin,
 	.pin = ring_context_pin,
@@ -956,28 +981,6 @@ static const struct intel_context_ops ring_context_ops = {
 	.destroy = ring_context_destroy,
 };
 
-static int ring_request_alloc(struct i915_request *rq)
-{
-	int ret;
-
-	GEM_BUG_ON(!intel_context_is_pinned(rq->context));
-
-	/*
-	 * Flush enough space to reduce the likelihood of waiting after
-	 * we start building the request - in which case we will just
-	 * have to repeat work.
-	 */
-	rq->reserved_space += LEGACY_REQUEST_SIZE;
-
-	/* Unconditionally invalidate GPU caches and TLBs. */
-	ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
-	if (ret)
-		return ret;
-
-	rq->reserved_space -= LEGACY_REQUEST_SIZE;
-	return 0;
-}
-
 static void set_default_submission(struct intel_engine_cs *engine)
 {
 	engine->sched.submit_request = i915_request_enqueue;
@@ -1034,7 +1037,6 @@ static void setup_common(struct intel_engine_cs *engine)
 	engine->reset.finish = reset_finish;
 
 	engine->cops = &ring_context_ops;
-	engine->request_alloc = ring_request_alloc;
 
 	engine->emit_init_breadcrumb = gen4_emit_init_breadcrumb_xcs;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index ede148c7b2bd..193b099e8048 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -565,26 +565,11 @@ static void ring_context_reset(struct intel_context *ce)
 	clear_bit(CONTEXT_VALID_BIT, &ce->flags);
 }
 
-static const struct intel_context_ops ring_context_ops = {
-	.alloc = ring_context_alloc,
-
-	.pre_pin = ring_context_pre_pin,
-	.pin = ring_context_pin,
-	.unpin = ring_context_unpin,
-	.post_unpin = ring_context_post_unpin,
-
-	.enter = intel_context_enter_engine,
-	.exit = intel_context_exit_engine,
-
-	.reset = ring_context_reset,
-	.destroy = ring_context_destroy,
-};
-
-static int load_pd_dir(struct i915_request *rq,
+static int load_pd_dir(const struct intel_engine_cs *engine,
+		       struct i915_request *rq,
 		       struct i915_address_space *vm,
 		       u32 valid)
 {
-	const struct intel_engine_cs * const engine = rq->engine;
 	u32 *cs;
 
 	cs = intel_ring_begin(rq, 12);
@@ -611,14 +596,14 @@ static int load_pd_dir(struct i915_request *rq,
 
 	intel_ring_advance(rq, cs);
 
-	return rq->engine->emit_flush(rq, EMIT_FLUSH);
+	return engine->emit_flush(rq, EMIT_FLUSH);
 }
 
 static int mi_set_context(struct i915_request *rq,
 			  struct intel_context *ce,
 			  u32 flags)
 {
-	struct intel_engine_cs *engine = rq->engine;
+	struct intel_engine_cs *engine = ce->engine;
 	struct drm_i915_private *i915 = engine->i915;
 	enum intel_engine_id id;
 	const int num_engines =
@@ -731,10 +716,12 @@ static int mi_set_context(struct i915_request *rq,
 	return 0;
 }
 
-static int remap_l3_slice(struct i915_request *rq, int slice)
+static int remap_l3_slice(struct intel_context *ce,
+			  struct i915_request *rq,
+			  int slice)
 {
 #define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32))
-	u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
+	u32 *cs, *remap_info = ce->engine->i915->l3_parity.remap_info[slice];
 	int i;
 
 	if (!remap_info)
@@ -761,7 +748,7 @@ static int remap_l3_slice(struct i915_request *rq, int slice)
 #undef L3LOG_DW
 }
 
-static int remap_l3(struct i915_request *rq)
+static int remap_l3(struct intel_context *ce, struct i915_request *rq)
 {
 	struct i915_gem_context *ctx = i915_request_gem_context(rq);
 	int i, err;
@@ -773,7 +760,7 @@ static int remap_l3(struct i915_request *rq)
 		if (!(ctx->remap_slice & BIT(i)))
 			continue;
 
-		err = remap_l3_slice(rq, i);
+		err = remap_l3_slice(ce, rq, i);
 		if (err)
 			return err;
 	}
@@ -782,14 +769,15 @@ static int remap_l3(struct i915_request *rq)
 	return 0;
 }
 
-static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
+static int switch_mm(struct intel_context *ce, struct i915_request *rq)
 {
+	struct i915_address_space *vm = vm_alias(ce->vm);
 	int ret;
 
 	if (!vm)
 		return 0;
 
-	ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
+	ret = ce->engine->emit_flush(rq, EMIT_FLUSH);
 	if (ret)
 		return ret;
 
@@ -801,25 +789,25 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
 	 * post-sync op, this extra pass appears vital before a
 	 * mm switch!
 	 */
-	ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G);
+	ret = load_pd_dir(ce->engine, rq, vm, PP_DIR_DCLV_2G);
 	if (ret)
 		return ret;
 
-	return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+	return ce->engine->emit_flush(rq, EMIT_INVALIDATE);
 }
 
-static int clear_residuals(struct i915_request *rq)
+static int
+clear_residuals(struct intel_engine_cs *engine, struct i915_request *rq)
 {
-	struct intel_engine_cs *engine = rq->engine;
+	struct intel_context *ce = engine->kernel_context;
 	int ret;
 
-	ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
+	ret = switch_mm(ce, rq);
 	if (ret)
 		return ret;
 
-	if (engine->kernel_context->state) {
-		ret = mi_set_context(rq,
-				     engine->kernel_context,
+	if (ce->state) {
+		ret = mi_set_context(rq, ce,
 				     MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
 		if (ret)
 			return ret;
@@ -839,10 +827,10 @@ static int clear_residuals(struct i915_request *rq)
 	return engine->emit_flush(rq, EMIT_INVALIDATE);
 }
 
-static int switch_context(struct i915_request *rq)
+static int switch_context(struct intel_context *ce,
+			  struct i915_request *rq)
 {
-	struct intel_engine_cs *engine = rq->engine;
-	struct intel_context *ce = rq->context;
+	struct intel_engine_cs *engine = ce->engine;
 	void **residuals = NULL;
 	int ret;
 
@@ -851,7 +839,7 @@ static int switch_context(struct i915_request *rq)
 	if (engine->wa_ctx.vma && ce != engine->kernel_context) {
 		if (engine->wa_ctx.vma->private != ce &&
 		    i915_mitigate_clear_residuals()) {
-			ret = clear_residuals(rq);
+			ret = clear_residuals(engine, rq);
 			if (ret)
 				return ret;
 
@@ -859,7 +847,7 @@ static int switch_context(struct i915_request *rq)
 		}
 	}
 
-	ret = switch_mm(rq, vm_alias(ce->vm));
+	ret = switch_mm(ce, rq);
 	if (ret)
 		return ret;
 
@@ -883,7 +871,7 @@ static int switch_context(struct i915_request *rq)
 			return ret;
 	}
 
-	ret = remap_l3(rq);
+	ret = remap_l3(ce, rq);
 	if (ret)
 		return ret;
 
@@ -904,33 +892,51 @@ static int switch_context(struct i915_request *rq)
 	return 0;
 }
 
-static int ring_request_alloc(struct i915_request *request)
+static int
+ring_context_request(struct intel_context *ce, struct i915_request *rq)
 {
+	struct intel_engine_cs *engine = ce->engine;
 	int ret;
 
-	GEM_BUG_ON(!intel_context_is_pinned(request->context));
-	GEM_BUG_ON(intel_timeline_has_initial_breadcrumb(i915_request_timeline(request)));
+	GEM_BUG_ON(!intel_context_is_pinned(ce));
+	GEM_BUG_ON(intel_timeline_has_initial_breadcrumb(i915_request_timeline(rq)));
 
 	/*
 	 * Flush enough space to reduce the likelihood of waiting after
 	 * we start building the request - in which case we will just
 	 * have to repeat work.
 	 */
-	request->reserved_space += LEGACY_REQUEST_SIZE;
+	rq->reserved_space += LEGACY_REQUEST_SIZE;
 
 	/* Unconditionally invalidate GPU caches and TLBs. */
-	ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+	ret = engine->emit_flush(rq, EMIT_INVALIDATE);
 	if (ret)
 		return ret;
 
-	ret = switch_context(request);
+	ret = switch_context(ce, rq);
 	if (ret)
 		return ret;
 
-	request->reserved_space -= LEGACY_REQUEST_SIZE;
+	rq->reserved_space -= LEGACY_REQUEST_SIZE;
 	return 0;
 }
 
+static const struct intel_context_ops ring_context_ops = {
+	.alloc = ring_context_alloc,
+	.init_request = ring_context_request,
+
+	.pre_pin = ring_context_pre_pin,
+	.pin = ring_context_pin,
+	.unpin = ring_context_unpin,
+	.post_unpin = ring_context_post_unpin,
+
+	.enter = intel_context_enter_engine,
+	.exit = intel_context_exit_engine,
+
+	.reset = ring_context_reset,
+	.destroy = ring_context_destroy,
+};
+
 static void gen6_bsd_submit_request(struct i915_request *request)
 {
 	struct intel_uncore *uncore = request->engine->uncore;
@@ -1037,7 +1043,6 @@ static void setup_common(struct intel_engine_cs *engine)
 	engine->reset.finish = intel_ring_submission_reset_finish;
 
 	engine->cops = &ring_context_ops;
-	engine->request_alloc = ring_request_alloc;
 
 	/*
 	 * Using a global execution timeline; the previous final breadcrumb is
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index cae736e34bda..1ded772fe395 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -163,8 +163,18 @@ static void mock_context_reset(struct intel_context *ce)
 {
 }
 
+static int
+mock_context_request(struct intel_context *ce, struct i915_request *rq)
+{
+	INIT_LIST_HEAD(&rq->mock.link);
+	rq->mock.delay = 0;
+
+	return 0;
+}
+
 static const struct intel_context_ops mock_context_ops = {
 	.alloc = mock_context_alloc,
+	.init_request = mock_context_request,
 
 	.pre_pin = mock_context_pre_pin,
 	.pin = mock_context_pin,
@@ -178,16 +188,7 @@ static const struct intel_context_ops mock_context_ops = {
 	.destroy = mock_context_destroy,
 };
 
-static int mock_request_alloc(struct i915_request *request)
-{
-	INIT_LIST_HEAD(&request->mock.link);
-	request->mock.delay = 0;
-
-	return 0;
-}
-
-static int mock_emit_flush(struct i915_request *request,
-			   unsigned int flags)
+static int mock_emit_flush(struct i915_request *request, unsigned int flags)
 {
 	return 0;
 }
@@ -299,7 +300,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.status_page.addr = (void *)(engine + 1);
 
 	engine->base.cops = &mock_context_ops;
-	engine->base.request_alloc = mock_request_alloc;
 	engine->base.emit_flush = mock_emit_flush;
 	engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 5f4627a93fd2..42ee3f1e8f6f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -442,8 +442,41 @@ static int guc_context_pin(struct intel_context *ce, void *vaddr)
 	return lrc_pin(ce, ce->engine, vaddr);
 }
 
+static int
+guc_context_request(struct intel_context *ce, struct i915_request *rq)
+{
+	struct intel_engine_cs *engine = ce->engine;
+	int ret;
+
+	GEM_BUG_ON(!intel_context_is_pinned(ce));
+
+	/*
+	 * Flush enough space to reduce the likelihood of waiting after
+	 * we start building the request - in which case we will just
+	 * have to repeat work.
+	 */
+	rq->reserved_space += GUC_REQUEST_SIZE;
+
+	/*
+	 * Note that after this point, we have committed to using
+	 * this request as it is being used to both track the
+	 * state of engine initialisation and liveness of the
+	 * golden renderstate above. Think twice before you try
+	 * to cancel/unwind this request now.
+	 */
+
+	/* Unconditionally invalidate GPU caches and TLBs. */
+	ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+	if (ret)
+		return ret;
+
+	rq->reserved_space -= GUC_REQUEST_SIZE;
+	return 0;
+}
+
 static const struct intel_context_ops guc_context_ops = {
 	.alloc = guc_context_alloc,
+	.init_request = guc_context_request,
 
 	.pre_pin = guc_context_pre_pin,
 	.pin = guc_context_pin,
@@ -457,36 +490,6 @@ static const struct intel_context_ops guc_context_ops = {
 	.destroy = lrc_destroy,
 };
 
-static int guc_request_alloc(struct i915_request *request)
-{
-	int ret;
-
-	GEM_BUG_ON(!intel_context_is_pinned(request->context));
-
-	/*
-	 * Flush enough space to reduce the likelihood of waiting after
-	 * we start building the request - in which case we will just
-	 * have to repeat work.
-	 */
-	request->reserved_space += GUC_REQUEST_SIZE;
-
-	/*
-	 * Note that after this point, we have committed to using
-	 * this request as it is being used to both track the
-	 * state of engine initialisation and liveness of the
-	 * golden renderstate above. Think twice before you try
-	 * to cancel/unwind this request now.
-	 */
-
-	/* Unconditionally invalidate GPU caches and TLBs. */
-	ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
-	if (ret)
-		return ret;
-
-	request->reserved_space -= GUC_REQUEST_SIZE;
-	return 0;
-}
-
 static void sanitize_hwsp(struct intel_engine_cs *engine)
 {
 	struct intel_timeline *tl;
@@ -573,7 +576,6 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
 	engine->resume = guc_resume;
 
 	engine->cops = &guc_context_ops;
-	engine->request_alloc = guc_request_alloc;
 
 	engine->reset.prepare = guc_reset_prepare;
 	engine->reset.rewind = guc_reset_rewind;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 352083889b97..72019ba4907d 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -840,7 +840,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	 */
 	rq->head = rq->ring->emit;
 
-	ret = rq->engine->request_alloc(rq);
+	ret = ce->ops->init_request(ce, rq);
 	if (ret)
 		goto err_unwind;
 
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list