[Intel-gfx] [PATCH 2/3] drm/i915: Convert __i915_wait_request to receive flags

Chris Wilson chris at chris-wilson.co.uk
Wed Nov 18 01:56:07 PST 2015


Convert the bool interruptible argument over to a flags bitmask so that
we can add further bool parameters conveniently.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h         |  3 ++-
 drivers/gpu/drm/i915/i915_gem.c         | 42 +++++++++++++++++++++++----------
 drivers/gpu/drm/i915/intel_display.c    |  2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c |  7 +++++-
 4 files changed, 39 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 16095b95d2df..f47d701f2ddb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2975,9 +2975,10 @@ void __i915_add_request(struct drm_i915_gem_request *req,
 	__i915_add_request(req, NULL, false)
 int __i915_wait_request(struct drm_i915_gem_request *req,
 			unsigned reset_counter,
-			bool interruptible,
+			unsigned flags,
 			s64 *timeout,
 			struct intel_rps_client *rps);
+#define I915_WAIT_INTERRUPTIBLE 0x1
 int __must_check i915_wait_request(struct drm_i915_gem_request *req);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int __must_check
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index af9ffa11ef44..d4d5c6e8c02f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1224,7 +1224,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
  * __i915_wait_request - wait until execution of request has finished
  * @req: duh!
  * @reset_counter: reset sequence associated with the given request
- * @interruptible: do an interruptible wait (normally yes)
+ * @flags: flags
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  *
  * Note: It is of utmost importance that the passed in seqno and reset_counter
@@ -1239,7 +1239,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
  */
 int __i915_wait_request(struct drm_i915_gem_request *req,
 			unsigned reset_counter,
-			bool interruptible,
+			unsigned flags,
 			s64 *timeout,
 			struct intel_rps_client *rps)
 {
@@ -1248,7 +1248,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	const bool irq_test_in_progress =
 		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
-	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+	int state = flags & I915_WAIT_INTERRUPTIBLE ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
 	DEFINE_WAIT(wait);
 	unsigned long timeout_expire;
 	s64 before, now;
@@ -1292,7 +1292,8 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
 			/* ... but upgrade the -EAGAIN to an -EIO if the gpu
 			 * is truely gone. */
-			ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+			ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+						   flags & I915_WAIT_INTERRUPTIBLE);
 			if (ret == 0)
 				ret = -EAGAIN;
 			break;
@@ -1451,24 +1452,28 @@ i915_wait_request(struct drm_i915_gem_request *req)
 {
 	struct drm_device *dev;
 	struct drm_i915_private *dev_priv;
-	bool interruptible;
+	unsigned flags;
 	int ret;
 
 	BUG_ON(req == NULL);
 
 	dev = req->ring->dev;
 	dev_priv = dev->dev_private;
-	interruptible = dev_priv->mm.interruptible;
 
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+				   dev_priv->mm.interruptible);
 	if (ret)
 		return ret;
 
+	flags = 0;
+	if (dev_priv->mm.interruptible)
+		flags |= I915_WAIT_INTERRUPTIBLE;
+
 	ret = __i915_wait_request(req,
 				  atomic_read(&dev_priv->gpu_error.reset_counter),
-				  interruptible, NULL, NULL);
+				  flags, NULL, NULL);
 	if (ret)
 		return ret;
 
@@ -1580,7 +1585,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 
 	mutex_unlock(&dev->struct_mutex);
 	for (i = 0; ret == 0 && i < n; i++)
-		ret = __i915_wait_request(requests[i], reset_counter, true,
+		ret = __i915_wait_request(requests[i],
+					  reset_counter,
+					  I915_WAIT_INTERRUPTIBLE,
 					  NULL, rps);
 	mutex_lock(&dev->struct_mutex);
 
@@ -3096,7 +3103,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
 	for (i = 0; i < n; i++) {
 		if (ret == 0)
-			ret = __i915_wait_request(req[i], reset_counter, true,
+			ret = __i915_wait_request(req[i],
+						  reset_counter,
+						  I915_WAIT_INTERRUPTIBLE,
 						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
 						  file->driver_priv);
 		i915_gem_request_unreference__unlocked(req[i]);
@@ -3127,9 +3136,15 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
 	if (!i915_semaphore_is_enabled(obj->base.dev)) {
 		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		unsigned flags;
+
+		flags = 0;
+		if (i915->mm.interruptible)
+			flags |= I915_WAIT_INTERRUPTIBLE;
+
 		ret = __i915_wait_request(from_req,
 					  atomic_read(&i915->gpu_error.reset_counter),
-					  i915->mm.interruptible,
+					  flags,
 					  NULL,
 					  &i915->rps.semaphores);
 		if (ret)
@@ -4090,7 +4105,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	if (target == NULL)
 		return 0;
 
-	ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
+	ret = __i915_wait_request(target,
+				  reset_counter,
+				  I915_WAIT_INTERRUPTIBLE,
+				  NULL, NULL);
 	if (ret == 0)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f62ffc04c21d..edc0d313398d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11216,7 +11216,7 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
 	if (mmio_flip->req) {
 		WARN_ON(__i915_wait_request(mmio_flip->req,
 					    mmio_flip->crtc->reset_counter,
-					    false, NULL,
+					    0, NULL,
 					    &mmio_flip->i915->rps.mmioflips));
 		i915_gem_request_unreference__unlocked(mmio_flip->req);
 	}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9461a238f5d5..664ce0b20b23 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2231,6 +2231,7 @@ static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 int intel_ring_idle(struct intel_engine_cs *ring)
 {
 	struct drm_i915_gem_request *req;
+	unsigned flags;
 
 	/* Wait upon the last request to be completed */
 	if (list_empty(&ring->request_list))
@@ -2240,10 +2241,14 @@ int intel_ring_idle(struct intel_engine_cs *ring)
 			struct drm_i915_gem_request,
 			list);
 
+	flags = 0;
+	if (to_i915(ring->dev)->mm.interruptible)
+		flags |= I915_WAIT_INTERRUPTIBLE;
+
 	/* Make sure we do not trigger any retires */
 	return __i915_wait_request(req,
 				   atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
-				   to_i915(ring->dev)->mm.interruptible,
+				   flags,
 				   NULL, NULL);
 }
 
-- 
2.6.2



More information about the Intel-gfx mailing list