[Intel-gfx] [RFC 38/44] drm/i915: Add early exit to execbuff_final() if insufficient ring space
John.C.Harrison at Intel.com
John.C.Harrison at Intel.com
Thu Jun 26 19:24:29 CEST 2014
From: John Harrison <John.C.Harrison at Intel.com>
One of the major purposes of the GPU scheduler is to avoid stalling the CPU when
the GPU is busy and unable to accept more work. This change adds support to the
ring submission code to allow a ring space check to be performed before
attempting to submit a batch buffer to the hardware. If insufficient space is
available then the scheduler can go away and come back later, letting the CPU
get on with other work, rather than stalling and waiting for the hardware to
catch up.
---
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 44 +++++++++++++++++++++++++++-
drivers/gpu/drm/i915/intel_ringbuffer.c | 34 +++++++++++++++++----
drivers/gpu/drm/i915/intel_ringbuffer.h | 2 ++
3 files changed, 73 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3227a39..a9570ff 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1490,6 +1490,36 @@ int i915_gem_do_execbuffer_final(struct i915_execbuffer_params *params)
goto early_err;
}
+#ifdef CONFIG_DRM_I915_SCHEDULER
+{
+ uint32_t min_space;
+
+ /*
+ * It would be a bad idea to run out of space while writing commands
+ * to the ring. One of the major aims of the scheduler is to not stall
+ * at any point for any reason. However, doing an early exit half way
+ * through submission could result in a partial sequence being written
+ * which would leave the engine in an unknown state. Therefore, check in
+ * advance that there will be enough space for the entire submission
+ * whether emitted by the code below OR by any other functions that may
+ * be executed before the end of final().
+ *
+ * NB: This test deliberately overestimates, because that's easier than
+ * tracing every potential path that could be taken!
+ *
+ * Current measurements suggest that we may need to emit up to 744 bytes
+ * (186 dwords), so this is rounded up to 256 dwords here. Then we double
+ * that to get the free space requirement, because the block isn't allowed
+ * to span the transition from the end to the beginning of the ring.
+ */
+#define I915_BATCH_EXEC_MAX_LEN 256 /* max dwords emitted here */
+ min_space = I915_BATCH_EXEC_MAX_LEN * 2 * sizeof(uint32_t);
+ ret = intel_ring_test_space(ring, min_space);
+ if (ret)
+ goto early_err;
+}
+#endif
+
intel_runtime_pm_get(dev_priv);
/* Ensure the correct seqno gets assigned to the correct buffer: */
@@ -1500,6 +1530,16 @@ int i915_gem_do_execbuffer_final(struct i915_execbuffer_params *params)
seqno = params->seqno;
+#ifdef CONFIG_DRM_I915_SCHEDULER
+ ret = intel_ring_begin(ring, I915_BATCH_EXEC_MAX_LEN);
+ if (ret)
+ goto err;
+#endif
+
+ /* Seqno matches? */
+ BUG_ON(ring->outstanding_lazy_seqno != params->seqno);
+ BUG_ON(ring->preallocated_lazy_request != params->request);
+
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
@@ -1518,9 +1558,11 @@ int i915_gem_do_execbuffer_final(struct i915_execbuffer_params *params)
if (ring == &dev_priv->ring[RCS] &&
params->mode != dev_priv->relative_constants_mode) {
+#ifndef CONFIG_DRM_I915_SCHEDULER
ret = intel_ring_begin(ring, 4);
if (ret)
- goto err;
+ goto err;
+#endif
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1ad162b..640f26f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,7 +49,7 @@ static inline int __ring_space(int head, int tail, int size)
return space;
}
-static inline int ring_space(struct intel_engine_cs *ring)
+inline int intel_ring_space(struct intel_engine_cs *ring)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
@@ -546,7 +546,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
else {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = intel_ring_space(ring);
ringbuf->last_retired_head = -1;
}
@@ -1530,7 +1530,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = intel_ring_space(ring);
if (ringbuf->space >= n)
return 0;
}
@@ -1553,7 +1553,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = intel_ring_space(ring);
return 0;
}
@@ -1582,7 +1582,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
trace_i915_ring_wait_begin(ring);
do {
ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = ring_space(ring);
+ ringbuf->space = intel_ring_space(ring);
if (ringbuf->space >= n) {
ret = 0;
break;
@@ -1634,7 +1634,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = intel_ring_space(ring);
return 0;
}
@@ -1767,6 +1767,28 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
return 0;
}
+/* Test to see if the ring has sufficient space to submit a given piece of work
+ * without causing a stall */
+int intel_ring_test_space(struct intel_engine_cs *ring, int min_space)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+
+ if (ringbuf->space < min_space) {
+ /* Need to update the actual ring space. Otherwise, the system
+ * hangs forever testing a software copy of the space value that
+ * never changes!
+ */
+ ringbuf->head = I915_READ_HEAD(ring);
+ ringbuf->space = intel_ring_space(ring);
+
+ if (ringbuf->space < min_space)
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index cc92de2..cf9a535 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -345,6 +345,8 @@ intel_write_status_page(struct intel_engine_cs *ring,
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
+int intel_ring_space(struct intel_engine_cs *ring);
+int intel_ring_test_space(struct intel_engine_cs *ring, int min_space);
int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
int __must_check intel_ring_alloc_seqno(struct intel_engine_cs *ring);
--
1.7.9.5
More information about the Intel-gfx
mailing list