[FOR_CI 11/15] drm/i915: Port of Added scheduler support to __wait_request() calls

Arun Siluvery arun.siluvery at linux.intel.com
Mon Jul 4 15:18:47 UTC 2016


This is a partial port of the following patch from John Harrison's GPU
scheduler patch series: (patch sent to Intel-GFX with the subject line
"[Intel-gfx] [RFC 19/39] drm/i915: Added scheduler support to __wait_request()
calls" on Fri 17 July 2015)

	Author: John Harrison <John.C.Harrison at Intel.com>
	Date:   Thu Apr 10 10:48:55 2014 +0100
	Subject: drm/i915: Added scheduler support to __wait_request() calls

Removed all scheduler references and backported it to this baseline. The reason
we need this is because Chris Wilson has pointed out that threads that don't
hold the struct_mutex should not be thrown out of __i915_wait_request during
TDR hang recovery. Therefore we need a way to determine which threads are
holding the mutex and which are not.

Signed-off-by: Arun Siluvery <arun.siluvery at linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         |  7 ++++++-
 drivers/gpu/drm/i915/i915_gem.c         | 35 ++++++++++++++++++++++-----------
 drivers/gpu/drm/i915/intel_display.c    |  5 +++--
 drivers/gpu/drm/i915/intel_ringbuffer.c |  8 +++++---
 4 files changed, 38 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 55e2c90..a40ffb4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3417,8 +3417,13 @@ void __i915_add_request(struct drm_i915_gem_request *req,
 	__i915_add_request(req, NULL, true)
 #define i915_add_request_no_flush(req) \
 	__i915_add_request(req, NULL, false)
+
+/* flags used by users of __i915_wait_request */
+#define I915_WAIT_REQUEST_INTERRUPTIBLE  (1 << 0)
+#define I915_WAIT_REQUEST_LOCKED         (1 << 1)
+
 int __i915_wait_request(struct drm_i915_gem_request *req,
-			bool interruptible,
+			u32 flags,
 			s64 *timeout,
 			struct intel_rps_client *rps);
 int __must_check i915_wait_request(struct drm_i915_gem_request *req);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f199966..5bccb5e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1410,7 +1410,9 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
 /**
  * __i915_wait_request - wait until execution of request has finished
  * @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
+ * @flags: flags to define the nature of wait
+ *    I915_WAIT_INTERRUPTIBLE - do an interruptible wait (normally yes)
+ *    I915_WAIT_LOCKED - caller is holding struct_mutex
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  * @rps: RPS client
  *
@@ -1425,10 +1427,11 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
  * errno with remaining time filled in timeout argument.
  */
 int __i915_wait_request(struct drm_i915_gem_request *req,
-			bool interruptible,
+			u32 flags,
 			s64 *timeout,
 			struct intel_rps_client *rps)
 {
+	bool interruptible = flags & I915_WAIT_REQUEST_INTERRUPTIBLE;
 	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
 	DEFINE_WAIT(reset);
 	struct intel_wait wait;
@@ -1496,12 +1499,14 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 		goto wakeup;
 
 	for (;;) {
+		bool locked = flags & I915_WAIT_REQUEST_LOCKED;
+
 		if (signal_pending_state(state, current)) {
 			ret = -ERESTARTSYS;
 			break;
 		}
 
-		if (i915_reset_in_progress(&req->i915->gpu_error)) {
+		if (locked || i915_reset_in_progress(&req->i915->gpu_error)) {
 			ret = -EAGAIN;
 			break;
 		}
@@ -1680,14 +1685,15 @@ int
 i915_wait_request(struct drm_i915_gem_request *req)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	bool interruptible;
+	u32 flags;
 	int ret;
 
-	interruptible = dev_priv->mm.interruptible;
-
 	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-	ret = __i915_wait_request(req, interruptible, NULL, NULL);
+	flags = dev_priv->mm.interruptible ? I915_WAIT_REQUEST_INTERRUPTIBLE : 0;
+	flags |= I915_WAIT_REQUEST_LOCKED;
+
+	ret = __i915_wait_request(req, flags, NULL, NULL);
 	if (ret)
 		return ret;
 
@@ -1799,7 +1805,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	mutex_unlock(&dev->struct_mutex);
 	ret = 0;
 	for (i = 0; ret == 0 && i < n; i++)
-		ret = __i915_wait_request(requests[i], true, NULL, rps);
+		ret = __i915_wait_request(requests[i],
+					  I915_WAIT_REQUEST_INTERRUPTIBLE,
+					  NULL, rps);
 	mutex_lock(&dev->struct_mutex);
 
 	for (i = 0; i < n; i++) {
@@ -3460,7 +3468,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
 	for (i = 0; i < n; i++) {
 		if (ret == 0)
-			ret = __i915_wait_request(req[i], true,
+			ret = __i915_wait_request(req[i], I915_WAIT_REQUEST_INTERRUPTIBLE,
 						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
 						  to_rps_client(file));
 		i915_gem_request_unreference(req[i]);
@@ -3491,8 +3499,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
 	if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
 		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		u32 flags;
+
+		flags = i915->mm.interruptible ? I915_WAIT_REQUEST_INTERRUPTIBLE : 0;
+		flags |= I915_WAIT_REQUEST_LOCKED;
+
 		ret = __i915_wait_request(from_req,
-					  i915->mm.interruptible,
+					  flags,
 					  NULL,
 					  &i915->rps.semaphores);
 		if (ret)
@@ -4484,7 +4497,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	if (target == NULL)
 		return 0;
 
-	ret = __i915_wait_request(target, true, NULL, NULL);
+	ret = __i915_wait_request(target, I915_WAIT_REQUEST_INTERRUPTIBLE, NULL, NULL);
 	i915_gem_request_unreference(target);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index acb89e7..e0bf913 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11559,7 +11559,7 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
 
 	if (work->flip_queued_req)
 		WARN_ON(__i915_wait_request(work->flip_queued_req,
-					    false, NULL,
+					    0, NULL,
 					    &dev_priv->rps.mmioflips));
 
 	/* For framebuffer backed by dmabuf, wait for fence */
@@ -13611,7 +13611,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
 				continue;
 
 			ret = __i915_wait_request(intel_plane_state->wait_req,
-						  true, NULL, NULL);
+						  I915_WAIT_REQUEST_INTERRUPTIBLE,
+						  NULL, NULL);
 			if (ret) {
 				/* Any hang should be swallowed by the wait */
 				WARN_ON(ret == -EIO);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3fafc35..a7920ab 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2277,6 +2277,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
 int intel_engine_idle(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *req;
+	u32 flags;
 
 	/* Wait upon the last request to be completed */
 	if (list_empty(&engine->request_list))
@@ -2286,10 +2287,11 @@ int intel_engine_idle(struct intel_engine_cs *engine)
 			 struct drm_i915_gem_request,
 			 list);
 
+	flags = req->i915->mm.interruptible ? I915_WAIT_REQUEST_INTERRUPTIBLE : 0;
+	flags |= I915_WAIT_REQUEST_LOCKED;
+
 	/* Make sure we do not trigger any retires */
-	return __i915_wait_request(req,
-				   req->i915->mm.interruptible,
-				   NULL, NULL);
+	return __i915_wait_request(req, flags, NULL, NULL);
 }
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
-- 
1.9.1



More information about the Intel-gfx-trybot mailing list