[Intel-gfx] [RFC 26/44] drm/i915: Added scheduler support to __wait_seqno() calls

John.C.Harrison at Intel.com John.C.Harrison at Intel.com
Thu Jun 26 19:24:17 CEST 2014


From: John Harrison <John.C.Harrison at Intel.com>

The scheduler can cause batch buffers, and hence seqno values, to be submitted
to the ring out of order and asynchronously to their submission to the driver.
Thus waiting for the completion of a given seqno value is not as simple as
saying 'is my value <= current ring value'. Not only might the arithmetic
comparison be invalid but the seqno in question might not even have been sent to
the hardware yet.

This change hooks the scheduler in to the wait_seqno() code to ensure correct
behaviour. That is, flush the target batch buffer through to the hardware and do
an out-of-order-safe comparison. Note that pre-emptive scheduling adds in the
further complication that even thought the batch buffer might have been sent at
the start of the wait call, it could be thrown off the hardware and back into
the software queue during the wait. This means that waiting indefinitely with
the driver wide mutex lock held is a very Bad Idea. Instead, the wait call must
return EAGAIN at least as far back as necessary to release the mutex lock and
allow the scheduler's asynchronous processing to get in and handle the
pre-emption operation and eventually re-submit the work.
---
 drivers/gpu/drm/i915/i915_gem.c       |   53 +++++++++++++++++++++++++++------
 drivers/gpu/drm/i915/i915_scheduler.c |    8 +++++
 drivers/gpu/drm/i915/i915_scheduler.h |    9 ++++++
 3 files changed, 61 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7727f0f..5ed5f66 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1152,7 +1152,8 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
 			unsigned reset_counter,
 			bool interruptible,
 			struct timespec *timeout,
-			struct drm_i915_file_private *file_priv)
+			struct drm_i915_file_private *file_priv,
+			bool is_locked)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1161,9 +1162,14 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
 	struct timespec before, now;
 	DEFINE_WAIT(wait);
 	unsigned long timeout_expire;
-	int ret;
+	int ret = 0;
+#ifdef CONFIG_DRM_I915_SCHEDULER
+	bool    completed;
+#endif
 
+	might_sleep();
 	WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
+	BUG_ON(seqno == 0);
 
 	if (i915_seqno_passed(ring, ring->get_seqno(ring, true), seqno))
 		return 0;
@@ -1201,9 +1207,34 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
 			break;
 		}
 
-		if (i915_seqno_passed(ring, ring->get_seqno(ring, false), seqno)) {
-			ret = 0;
-			break;
+#ifdef CONFIG_DRM_I915_SCHEDULER
+		if (is_locked) {
+			/* If this seqno is being tracked by the scheduler then
+			 * it is unsafe to sleep with the mutex lock held as the
+			 * scheduler may require the lock in order to progress
+			 * the seqno. */
+			if (i915_scheduler_is_seqno_in_flight(ring, seqno, &completed)) {
+				ret = completed ? 0 : -EAGAIN;
+				break;
+			}
+
+			/* If the seqno is not tracked by the scheduler then a
+			 * straight arithmetic comparison test can be done. */
+			if (i915_compare_seqno_values(ring->get_seqno(ring, false), seqno) >= 0) {
+				ret = 0;
+				break;
+			}
+		} else
+#endif
+		{
+			/* The regular 'is seqno passed' test is fine if the
+			 * mutex lock is not held. Even if the seqno is stuck
+			 * in the scheduler, it will be able to progress while
+			 * this thread waits. */
+			if (i915_seqno_passed(ring, ring->get_seqno(ring, false), seqno)) {
+				ret = 0;
+				break;
+			}
 		}
 
 		if (interruptible && signal_pending(current)) {
@@ -1265,6 +1296,10 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 	BUG_ON(seqno == 0);
 
+	ret = I915_SCHEDULER_FLUSH_SEQNO(ring, true, seqno);
+	if (ret < 0)
+		return ret;
+
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
 	if (ret)
 		return ret;
@@ -1275,7 +1310,7 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
 
 	return __wait_seqno(ring, seqno,
 			    atomic_read(&dev_priv->gpu_error.reset_counter),
-			    interruptible, NULL, NULL);
+			    interruptible, NULL, NULL, true);
 }
 
 static int
@@ -1352,7 +1387,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	mutex_unlock(&dev->struct_mutex);
-	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv, false);
 	mutex_lock(&dev->struct_mutex);
 	if (ret)
 		return ret;
@@ -2848,7 +2883,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	mutex_unlock(&dev->struct_mutex);
 
-	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv, false);
 	if (timeout)
 		args->timeout_ns = timespec_to_ns(timeout);
 	return ret;
@@ -4071,7 +4106,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	if (seqno == 0)
 		return 0;
 
-	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL, false);
 	if (ret == 0)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index b5d391c..d579bab 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -117,6 +117,14 @@ int i915_scheduler_remove(struct intel_engine_cs *ring)
 	return 0;
 }
 
+int i915_scheduler_flush_seqno(struct intel_engine_cs *ring, bool is_locked,
+			       uint32_t seqno)
+{
+	/* Do stuff... */
+
+	return 0;
+}
+
 bool i915_scheduler_is_seqno_in_flight(struct intel_engine_cs *ring,
 			       uint32_t seqno, bool *completed)
 {
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 57e001a..3811359 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -57,6 +57,13 @@ struct i915_scheduler_queue_entry {
 	int                                 num_objs;
 };
 
+#ifdef CONFIG_DRM_I915_SCHEDULER
+#   define I915_SCHEDULER_FLUSH_SEQNO(ring, locked, seqno)                   \
+		i915_scheduler_flush_seqno(ring, locked, seqno)
+#else
+#   define I915_SCHEDULER_FLUSH_SEQNO(ring, locked, seqno)      0
+#endif
+
 bool        i915_scheduler_is_enabled(struct drm_device *dev);
 int         i915_scheduler_init(struct drm_device *dev);
 int         i915_scheduler_closefile(struct drm_device *dev,
@@ -74,6 +81,8 @@ struct i915_scheduler {
 
 int         i915_scheduler_fly_seqno(struct intel_engine_cs *ring, uint32_t seqno);
 int         i915_scheduler_remove(struct intel_engine_cs *ring);
+int         i915_scheduler_flush_seqno(struct intel_engine_cs *ring,
+				       bool is_locked, uint32_t seqno);
 bool        i915_scheduler_is_seqno_in_flight(struct intel_engine_cs *ring,
 					      uint32_t seqno, bool *completed);
 
-- 
1.7.9.5




More information about the Intel-gfx mailing list