[FOR_CI 11/15] drm/i915: Port of Added scheduler support to __wait_request() calls

Arun Siluvery arun.siluvery at linux.intel.com
Mon Jul 4 09:31:06 UTC 2016


This is a partial port of the following patch from John Harrison's GPU
scheduler patch series: (patch sent to Intel-GFX with the subject line
"[Intel-gfx] [RFC 19/39] drm/i915: Added scheduler support to __wait_request()
calls" on Fri 17 July 2015)

	Author: John Harrison <John.C.Harrison at Intel.com>
	Date:   Thu Apr 10 10:48:55 2014 +0100
	Subject: drm/i915: Added scheduler support to __wait_request() calls

Removed all scheduler references and backported it to this baseline. The reason
we need this is because Chris Wilson has pointed out that threads that don't
hold the struct_mutex should not be thrown out of __i915_wait_request during
TDR hang recovery. Therefore we need a way to determine which threads are
holding the mutex and which are not.

Signed-off-by: Arun Siluvery <arun.siluvery at linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         |  7 ++++++-
 drivers/gpu/drm/i915/i915_gem.c         | 34 ++++++++++++++++++++++-----------
 drivers/gpu/drm/i915/intel_display.c    |  5 +++--
 drivers/gpu/drm/i915/intel_ringbuffer.c |  8 +++++---
 4 files changed, 37 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7ce6901..d7caea7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3402,8 +3402,13 @@ void __i915_add_request(struct drm_i915_gem_request *req,
 	__i915_add_request(req, NULL, true)
 #define i915_add_request_no_flush(req) \
 	__i915_add_request(req, NULL, false)
+
+/* flags used by users of __i915_wait_request */
+#define I915_WAIT_REQUEST_INTERRUPTIBLE  (1 << 0)
+#define I915_WAIT_REQUEST_LOCKED         (1 << 1)
+
 int __i915_wait_request(struct drm_i915_gem_request *req,
-			bool interruptible,
+			u32 flags,
 			s64 *timeout,
 			struct intel_rps_client *rps);
 int __must_check i915_wait_request(struct drm_i915_gem_request *req);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bbe3835..495e437 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1431,7 +1431,9 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
 /**
  * __i915_wait_request - wait until execution of request has finished
  * @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
+ * @flags: flags to define the nature of wait
+ *    I915_WAIT_INTERRUPTIBLE - do an interruptible wait (normally yes)
+ *    I915_WAIT_LOCKED - caller is holding struct_mutex
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  * @rps: RPS client
  *
@@ -1446,7 +1448,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
  * errno with remaining time filled in timeout argument.
  */
 int __i915_wait_request(struct drm_i915_gem_request *req,
-			bool interruptible,
+			u32 flags,
 			s64 *timeout,
 			struct intel_rps_client *rps)
 {
@@ -1454,6 +1456,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 	struct drm_i915_private *dev_priv = req->i915;
 	const bool irq_test_in_progress =
 		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
+	bool interruptible = flags & I915_WAIT_REQUEST_INTERRUPTIBLE;
 	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
 	DEFINE_WAIT(wait);
 	unsigned long timeout_expire;
@@ -1501,6 +1504,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 
 	for (;;) {
 		struct timer_list timer;
+		bool locked = flags & I915_WAIT_REQUEST_LOCKED;
 
 		prepare_to_wait(&engine->irq_queue, &wait, state);
 
@@ -1517,7 +1521,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 			break;
 		}
 
-		if (i915_reset_in_progress(&dev_priv->gpu_error)) {
+		if (i915_reset_in_progress(&dev_priv->gpu_error) || locked) {
 			ret = -EAGAIN;
 			break;
 		}
@@ -1679,14 +1683,15 @@ int
 i915_wait_request(struct drm_i915_gem_request *req)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	bool interruptible;
+	u32 flags;
 	int ret;
 
-	interruptible = dev_priv->mm.interruptible;
-
 	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-	ret = __i915_wait_request(req, interruptible, NULL, NULL);
+	flags = dev_priv->mm.interruptible ? I915_WAIT_REQUEST_INTERRUPTIBLE : 0;
+	flags |= I915_WAIT_REQUEST_LOCKED;
+
+	ret = __i915_wait_request(req, flags, NULL, NULL);
 	if (ret)
 		return ret;
 
@@ -1798,7 +1803,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	mutex_unlock(&dev->struct_mutex);
 	ret = 0;
 	for (i = 0; ret == 0 && i < n; i++)
-		ret = __i915_wait_request(requests[i], true, NULL, rps);
+		ret = __i915_wait_request(requests[i],
+					  I915_WAIT_REQUEST_INTERRUPTIBLE,
+					  NULL, rps);
 	mutex_lock(&dev->struct_mutex);
 
 	for (i = 0; i < n; i++) {
@@ -3423,7 +3430,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
 	for (i = 0; i < n; i++) {
 		if (ret == 0)
-			ret = __i915_wait_request(req[i], true,
+			ret = __i915_wait_request(req[i], I915_WAIT_REQUEST_INTERRUPTIBLE,
 						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
 						  to_rps_client(file));
 		i915_gem_request_unreference(req[i]);
@@ -3454,8 +3461,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
 	if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
 		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		u32 flags;
+
+		flags = i915->mm.interruptible ? I915_WAIT_REQUEST_INTERRUPTIBLE : 0;
+		flags |= I915_WAIT_REQUEST_LOCKED;
+
 		ret = __i915_wait_request(from_req,
-					  i915->mm.interruptible,
+					  flags,
 					  NULL,
 					  &i915->rps.semaphores);
 		if (ret)
@@ -4447,7 +4459,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	if (target == NULL)
 		return 0;
 
-	ret = __i915_wait_request(target, true, NULL, NULL);
+	ret = __i915_wait_request(target, I915_WAIT_REQUEST_INTERRUPTIBLE, NULL, NULL);
 	if (ret == 0)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 30c181a..1fbb94e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11584,7 +11584,7 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
 
 	if (work->flip_queued_req)
 		WARN_ON(__i915_wait_request(work->flip_queued_req,
-					    false, NULL,
+					    0, NULL,
 					    &dev_priv->rps.mmioflips));
 
 	/* For framebuffer backed by dmabuf, wait for fence */
@@ -13637,7 +13637,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
 				continue;
 
 			ret = __i915_wait_request(intel_plane_state->wait_req,
-						  true, NULL, NULL);
+						  I915_WAIT_REQUEST_INTERRUPTIBLE,
+						  NULL, NULL);
 			if (ret) {
 				/* Any hang should be swallowed by the wait */
 				WARN_ON(ret == -EIO);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 24cdc92..7cc93bd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2483,6 +2483,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
 int intel_engine_idle(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *req;
+	u32 flags;
 
 	/* Wait upon the last request to be completed */
 	if (list_empty(&engine->request_list))
@@ -2492,10 +2493,11 @@ int intel_engine_idle(struct intel_engine_cs *engine)
 			 struct drm_i915_gem_request,
 			 list);
 
+	flags = req->i915->mm.interruptible ? I915_WAIT_REQUEST_INTERRUPTIBLE : 0;
+	flags |= I915_WAIT_REQUEST_LOCKED;
+
 	/* Make sure we do not trigger any retires */
-	return __i915_wait_request(req,
-				   req->i915->mm.interruptible,
-				   NULL, NULL);
+	return __i915_wait_request(req, flags, NULL, NULL);
 }
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
-- 
1.9.1



More information about the Intel-gfx-trybot mailing list