[Intel-gfx] [PATCH 3/4] drm/i915: Remove FIXME_lrc_ctx backpointer

Nick Hoath nicholas.hoath at intel.com
Mon Dec 22 01:37:23 PST 2014


The first pass implementation of execlists required a backpointer to the context to be held
in the intel_ringbuffer. However the context pointer is available higher in the call stack.
Remove the backpointer from the ring buffer structure and instead pass it down through the
call stack.

v2: Integrate this changeset with the removal of duplicate request/execlist queue item members.
v3: Rebase
Signed-off-by: Nick Hoath <nicholas.hoath at intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c         |  7 ++--
 drivers/gpu/drm/i915/intel_lrc.c        | 68 ++++++++++++++++++++-------------
 drivers/gpu/drm/i915/intel_lrc.h        |  8 +++-
 drivers/gpu/drm/i915/intel_ringbuffer.h | 15 ++++----
 4 files changed, 57 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8782a4d..73c1a22 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2435,8 +2435,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
 		return -ENOMEM;
 
 	if (i915.enable_execlists) {
-		struct intel_context *ctx = request->ctx;
-		ringbuf = ctx->engine[ring->id].ringbuf;
+		ringbuf = request->ctx->engine[ring->id].ringbuf;
 	} else
 		ringbuf = ring->buffer;
 
@@ -2449,7 +2448,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
 	 * what.
 	 */
 	if (i915.enable_execlists) {
-		ret = logical_ring_flush_all_caches(ringbuf);
+		ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
 		if (ret)
 			return ret;
 	} else {
@@ -2466,7 +2465,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
 	request_ring_position = intel_ring_get_tail(ringbuf);
 
 	if (i915.enable_execlists) {
-		ret = ring->emit_request(ringbuf);
+		ret = ring->emit_request(ringbuf, request->ctx, request);
 		if (ret)
 			return ret;
 	} else {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index a18ea13..427d561 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -562,6 +562,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
 			return -ENOMEM;
 		request->ctx = to;
 		request->ring = ring;
+	} else {
+		WARN_ON(to != request->ctx);
 	}
 	req->request = request;
 	i915_gem_request_reference(request);
@@ -600,7 +602,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
 	return 0;
 }
 
-static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
+static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
+					      struct intel_context *ctx)
 {
 	struct intel_engine_cs *ring = ringbuf->ring;
 	uint32_t flush_domains;
@@ -610,7 +613,8 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
 	if (ring->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = ring->emit_flush(ringbuf, ctx,
+			       I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
@@ -619,6 +623,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
 }
 
 static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
+				 struct intel_context *ctx,
 				 struct list_head *vmas)
 {
 	struct intel_engine_cs *ring = ringbuf->ring;
@@ -646,7 +651,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return logical_ring_invalidate_all_caches(ringbuf);
+	return logical_ring_invalidate_all_caches(ringbuf, ctx);
 }
 
 /**
@@ -726,13 +731,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
 		return -EINVAL;
 	}
 
-	ret = execlists_move_to_gpu(ringbuf, vmas);
+	ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
 	if (ret)
 		return ret;
 
 	if (ring == &dev_priv->ring[RCS] &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		ret = intel_logical_ring_begin(ringbuf, 4);
+		ret = intel_logical_ring_begin(ringbuf, ctx, 4);
 		if (ret)
 			return ret;
 
@@ -745,7 +750,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
 		dev_priv->relative_constants_mode = instp_mode;
 	}
 
-	ret = ring->emit_bb_start(ringbuf, exec_start, flags);
+	ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags);
 	if (ret)
 		return ret;
 
@@ -808,7 +813,8 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
 	I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
+int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
+				  struct intel_context *ctx)
 {
 	struct intel_engine_cs *ring = ringbuf->ring;
 	int ret;
@@ -816,7 +822,7 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
 	if (!ring->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
+	ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -834,10 +840,10 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
 void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
+					   struct intel_context *ctx,
 					   struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *ring = ringbuf->ring;
-	struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
 
 	intel_logical_ring_advance(ringbuf);
 
@@ -975,6 +981,7 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
 }
 
 static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
+				       struct intel_context *ctx,
 				       int bytes)
 {
 	struct intel_engine_cs *ring = ringbuf->ring;
@@ -988,7 +995,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
 		return ret;
 
 	/* Force the context submission in case we have been skipping it */
-	intel_logical_ring_advance_and_submit(ringbuf, NULL);
+	intel_logical_ring_advance_and_submit(ringbuf, ctx, NULL);
 
 	/* With GEM the hangcheck timer should kick us out of the loop,
 	 * leaving it early runs the risk of corrupting GEM state (due
@@ -1023,13 +1030,14 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
 	return ret;
 }
 
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
+static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
+				    struct intel_context *ctx)
 {
 	uint32_t __iomem *virt;
 	int rem = ringbuf->size - ringbuf->tail;
 
 	if (ringbuf->space < rem) {
-		int ret = logical_ring_wait_for_space(ringbuf, rem);
+		int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
 
 		if (ret)
 			return ret;
@@ -1046,18 +1054,19 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
 	return 0;
 }
 
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
+static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
+				struct intel_context *ctx, int bytes)
 {
 	int ret;
 
 	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-		ret = logical_ring_wrap_buffer(ringbuf);
+		ret = logical_ring_wrap_buffer(ringbuf, ctx);
 		if (unlikely(ret))
 			return ret;
 	}
 
 	if (unlikely(ringbuf->space < bytes)) {
-		ret = logical_ring_wait_for_space(ringbuf, bytes);
+		ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
 		if (unlikely(ret))
 			return ret;
 	}
@@ -1078,7 +1087,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
  *
  * Return: non-zero if the ringbuffer is not ready to be written to.
  */
-int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
+int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
+			     struct intel_context *ctx, int num_dwords)
 {
 	struct intel_engine_cs *ring = ringbuf->ring;
 	struct drm_device *dev = ring->dev;
@@ -1090,12 +1100,12 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
 	if (ret)
 		return ret;
 
-	ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
+	ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
 	if (ret)
 		return ret;
 
 	/* Preallocate the olr before touching the ring */
-	ret = logical_ring_alloc_request(ring, ringbuf->FIXME_lrc_ctx);
+	ret = logical_ring_alloc_request(ring, ctx);
 	if (ret)
 		return ret;
 
@@ -1116,11 +1126,11 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
 		return 0;
 
 	ring->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(ringbuf);
+	ret = logical_ring_flush_all_caches(ringbuf, ctx);
 	if (ret)
 		return ret;
 
-	ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
+	ret = intel_logical_ring_begin(ringbuf, ctx, w->count * 2 + 2);
 	if (ret)
 		return ret;
 
@@ -1134,7 +1144,7 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
 	intel_logical_ring_advance(ringbuf);
 
 	ring->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(ringbuf);
+	ret = logical_ring_flush_all_caches(ringbuf, ctx);
 	if (ret)
 		return ret;
 
@@ -1184,12 +1194,13 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
 }
 
 static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
+			      struct intel_context *ctx,
 			      u64 offset, unsigned flags)
 {
 	bool ppgtt = !(flags & I915_DISPATCH_SECURE);
 	int ret;
 
-	ret = intel_logical_ring_begin(ringbuf, 4);
+	ret = intel_logical_ring_begin(ringbuf, ctx, 4);
 	if (ret)
 		return ret;
 
@@ -1237,6 +1248,7 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
 }
 
 static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
+			   struct intel_context *ctx,
 			   u32 invalidate_domains,
 			   u32 unused)
 {
@@ -1246,7 +1258,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
 	uint32_t cmd;
 	int ret;
 
-	ret = intel_logical_ring_begin(ringbuf, 4);
+	ret = intel_logical_ring_begin(ringbuf, ctx, 4);
 	if (ret)
 		return ret;
 
@@ -1275,6 +1287,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
 }
 
 static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
+				  struct intel_context *ctx,
 				  u32 invalidate_domains,
 				  u32 flush_domains)
 {
@@ -1301,7 +1314,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
 		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 	}
 
-	ret = intel_logical_ring_begin(ringbuf, 6);
+	ret = intel_logical_ring_begin(ringbuf, ctx, 6);
 	if (ret)
 		return ret;
 
@@ -1327,13 +1340,14 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
 }
 
 static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
+			     struct intel_context *ctx,
 			     struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *ring = ringbuf->ring;
 	u32 cmd;
 	int ret;
 
-	ret = intel_logical_ring_begin(ringbuf, 6);
+	ret = intel_logical_ring_begin(ringbuf, ctx, 6);
 	if (ret)
 		return ret;
 
@@ -1349,7 +1363,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
 		i915_gem_request_get_seqno(ring->outstanding_lazy_request));
 	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance_and_submit(ringbuf, request);
+	intel_logical_ring_advance_and_submit(ringbuf, ctx, request);
 
 	return 0;
 }
@@ -1637,6 +1651,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
 		return 0;
 
 	ret = ring->emit_bb_start(ringbuf,
+			ctx,
 			so.ggtt_offset,
 			I915_DISPATCH_SECURE);
 	if (ret)
@@ -1893,7 +1908,6 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 	}
 
 	ringbuf->ring = ring;
-	ringbuf->FIXME_lrc_ctx = ctx;
 
 	ringbuf->size = 32 * PAGE_SIZE;
 	ringbuf->effective_size = ringbuf->size;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 376c307..80a80ea 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -38,9 +38,11 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring);
 void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
 int intel_logical_rings_init(struct drm_device *dev);
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
+int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
+				  struct intel_context *ctx);
 void intel_logical_ring_advance_and_submit(
 				struct intel_ringbuffer *ringbuf,
+				struct intel_context *ctx,
 				struct drm_i915_gem_request *request);
 /**
  * intel_logical_ring_advance() - advance the ringbuffer tail
@@ -63,7 +65,9 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
 	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
 	ringbuf->tail += 4;
 }
-int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
+int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
+			     struct intel_context *ctx,
+			     int num_dwords);
 
 /* Logical Ring Contexts */
 int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6dbb6f4..cabf48f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -99,13 +99,6 @@ struct intel_ringbuffer {
 
 	struct intel_engine_cs *ring;
 
-	/*
-	 * FIXME: This backpointer is an artifact of the history of how the
-	 * execlist patches came into being. It will get removed once the basic
-	 * code has landed.
-	 */
-	struct intel_context *FIXME_lrc_ctx;
-
 	u32 head;
 	u32 tail;
 	int space;
@@ -123,6 +116,8 @@ struct intel_ringbuffer {
 	u32 last_retired_head;
 };
 
+struct	intel_context;
+
 struct  intel_engine_cs {
 	const char	*name;
 	enum intel_ring_id {
@@ -239,11 +234,15 @@ struct  intel_engine_cs {
 	struct list_head execlist_retired_req_list;
 	u8 next_context_status_buffer;
 	u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
-	int		(*emit_request)(struct intel_ringbuffer *ringbuf);
+	int		(*emit_request)(struct intel_ringbuffer *ringbuf,
+					struct intel_context *ctx,
+					struct drm_i915_gem_request *request);
 	int		(*emit_flush)(struct intel_ringbuffer *ringbuf,
+				      struct intel_context *ctx,
 				      u32 invalidate_domains,
 				      u32 flush_domains);
 	int		(*emit_bb_start)(struct intel_ringbuffer *ringbuf,
+					 struct intel_context *ctx,
 					 u64 offset, unsigned flags);
 
 	/**
-- 
2.1.1



More information about the Intel-gfx mailing list