[Intel-gfx] [PATCH 1/2] drm/i915: Don't wait twice in {__intel, logical}_ring_prepare()

Dave Gordon david.s.gordon at intel.com
Fri Jun 12 13:25:37 PDT 2015


In the case that the ringbuffer was near-full AND 'tail' was
near the end of the buffer, we could have ended up waiting twice:
once to gain ownership of the space between TAIL and the end
(which we just want to fill with padding, so as not to split a
single command sequence across the end of the ringbuffer), and
then again to get enough space for the new data that the caller
wants to add.

Now we just precalculate the total amount of space we'll need
*including* any for padding at the end of the ringbuffer and wait
for that much in one go.

Signed-off-by: Dave Gordon <david.s.gordon at intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c        |   52 +++++++++++++++----------------
 drivers/gpu/drm/i915/intel_ringbuffer.c |   51 +++++++++++++++---------------
 2 files changed, 50 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 454e836..a4bb5d3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -740,39 +740,22 @@ intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
 	execlists_context_queue(ring, ctx, ringbuf->tail, request);
 }
 
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
-				    struct intel_context *ctx)
-{
-	uint32_t __iomem *virt;
-	int rem = ringbuf->size - ringbuf->tail;
-
-	if (ringbuf->space < rem) {
-		int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
-
-		if (ret)
-			return ret;
-	}
-
-	virt = ringbuf->virtual_start + ringbuf->tail;
-	rem /= 4;
-	while (rem--)
-		iowrite32(MI_NOOP, virt++);
-
-	ringbuf->tail = 0;
-	intel_ring_update_space(ringbuf);
-
-	return 0;
-}
-
 static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
 				struct intel_context *ctx, int bytes)
 {
+	int fill = 0;
 	int ret;
 
+	/*
+	 * If the request will not fit between 'tail' and the effective
+	 * size of the ringbuffer, then we need to pad the end of the
+	 * ringbuffer with NOOPs, then start the request at the top of
+	 * the ring. This increases the total size that we need to check
+	 * for by however much is left at the end of the ring ...
+	 */
 	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-		ret = logical_ring_wrap_buffer(ringbuf, ctx);
-		if (unlikely(ret))
-			return ret;
+		fill = ringbuf->size - ringbuf->tail;
+		bytes += fill;
 	}
 
 	if (unlikely(ringbuf->space < bytes)) {
@@ -781,6 +764,21 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
 			return ret;
 	}
 
+	if (unlikely(fill)) {
+		uint32_t __iomem *virt = ringbuf->virtual_start + ringbuf->tail;
+
+		/* tail should not have moved */
+		if (WARN_ON(fill != ringbuf->size - ringbuf->tail))
+			fill = ringbuf->size - ringbuf->tail;
+
+		do
+			iowrite32(MI_NOOP, virt++);
+		while ((fill -= 4) > 0);
+
+		ringbuf->tail = 0;
+		intel_ring_update_space(ringbuf);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index a3406b2..5a1cd20 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2137,29 +2137,6 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
 	return 0;
 }
 
-static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
-{
-	uint32_t __iomem *virt;
-	struct intel_ringbuffer *ringbuf = ring->buffer;
-	int rem = ringbuf->size - ringbuf->tail;
-
-	if (ringbuf->space < rem) {
-		int ret = ring_wait_for_space(ring, rem);
-		if (ret)
-			return ret;
-	}
-
-	virt = ringbuf->virtual_start + ringbuf->tail;
-	rem /= 4;
-	while (rem--)
-		iowrite32(MI_NOOP, virt++);
-
-	ringbuf->tail = 0;
-	intel_ring_update_space(ringbuf);
-
-	return 0;
-}
-
 int intel_ring_idle(struct intel_engine_cs *ring)
 {
 	struct drm_i915_gem_request *req;
@@ -2197,12 +2174,19 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring,
 				int bytes)
 {
 	struct intel_ringbuffer *ringbuf = ring->buffer;
+	int fill = 0;
 	int ret;
 
+	/*
+	 * If the request will not fit between 'tail' and the effective
+	 * size of the ringbuffer, then we need to pad the end of the
+	 * ringbuffer with NOOPs, then start the request at the top of
+	 * the ring. This increases the total size that we need to check
+	 * for by however much is left at the end of the ring ...
+	 */
 	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-		ret = intel_wrap_ring_buffer(ring);
-		if (unlikely(ret))
-			return ret;
+		fill = ringbuf->size - ringbuf->tail;
+		bytes += fill;
 	}
 
 	if (unlikely(ringbuf->space < bytes)) {
@@ -2211,6 +2195,21 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring,
 			return ret;
 	}
 
+	if (unlikely(fill)) {
+		uint32_t __iomem *virt = ringbuf->virtual_start + ringbuf->tail;
+
+		/* tail should not have moved */
+		if (WARN_ON(fill != ringbuf->size - ringbuf->tail))
+			fill = ringbuf->size - ringbuf->tail;
+
+		do
+			iowrite32(MI_NOOP, virt++);
+		while ((fill -= 4) > 0);
+
+		ringbuf->tail = 0;
+		intel_ring_update_space(ringbuf);
+	}
+
 	return 0;
 }
 
-- 
1.7.9.5



More information about the Intel-gfx mailing list