[Intel-gfx] [PATCH v3] drm/i915: Fix recursive calls to unmap

Ben Widawsky ben at bwidawsk.net
Mon Oct 31 04:16:45 CET 2011


After the ILK vt-d workaround patches it became clear that we had
introduced a bug.  Chris tracked down the issue to recursive calls to
unmap. This happens because we try to optimize waiting on requests by
calling retire requests after the wait, which may drop the last
reference on an object and end up freeing the object (and then unmap the
object from the gtt).

The solution here is to add a new flag to the call chain which gives the
routines the information they need to possibly defer actions which may
cause us to recurse. A macro has been defined to replace i915_gpu_idle
which defaults to the old behavior.

Kudos to Chris for tracking this one down.

This fixes tests/gem_linear_blits in intel-gpu-tools.

v2: v1 used a global, v2 directly modified the call chain.

v3: As Keith pointed out, v2 changed retirement behavior. While it was
intentional, it wasn't related to the bugfix, and so has been removed.
To do this, changed i915_wait_request to be a macro to assure old
behavior is kept.

Cc: Keith Packard <keithp at keithp.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Reported-by: guang.a.yang at intel.com
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=42180
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h     |    9 ++++++---
 drivers/gpu/drm/i915/i915_gem.c     |   17 +++++++++--------
 drivers/gpu/drm/i915/i915_gem_gtt.c |    2 +-
 3 files changed, 16 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 06a37f4..fc3e00c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1180,13 +1180,16 @@ void i915_gem_do_init(struct drm_device *dev,
 		      unsigned long start,
 		      unsigned long mappable_end,
 		      unsigned long end);
-int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_gpu_idle(struct drm_device *dev, bool strictly_idle);
+#define i915_gpu_idle(dev) i915_gem_gpu_idle(dev, false)
 int __must_check i915_gem_idle(struct drm_device *dev);
 int __must_check i915_add_request(struct intel_ring_buffer *ring,
 				  struct drm_file *file,
 				  struct drm_i915_gem_request *request);
-int __must_check i915_wait_request(struct intel_ring_buffer *ring,
-				   uint32_t seqno);
+int __must_check i915_gem_wait_request(struct intel_ring_buffer *ring,
+				   uint32_t seqno,
+				   bool defer_retirement);
+#define i915_wait_request(ring, seqno) i915_gem_wait_request(ring, seqno, false)
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int __must_check
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6651c36..3ea017a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1942,8 +1942,9 @@ i915_gem_retire_work_handler(struct work_struct *work)
  * request and object lists appropriately for that event.
  */
 int
-i915_wait_request(struct intel_ring_buffer *ring,
-		  uint32_t seqno)
+i915_gem_wait_request(struct intel_ring_buffer *ring,
+		  uint32_t seqno,
+		  bool defer_retirement)
 {
 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	u32 ier;
@@ -2027,7 +2028,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
 	 * buffer to have made it to the inactive list, and we would need
 	 * a separate wait queue to handle that.
 	 */
-	if (ret == 0)
+	if (ret == 0 && !defer_retirement)
 		i915_gem_retire_requests_ring(ring);
 
 	return ret;
@@ -2172,7 +2173,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
 	return 0;
 }
 
-static int i915_ring_idle(struct intel_ring_buffer *ring)
+static int i915_ring_idle(struct intel_ring_buffer *ring, bool defer_retirement)
 {
 	int ret;
 
@@ -2186,18 +2187,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
 			return ret;
 	}
 
-	return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
+	return i915_gem_wait_request(ring, i915_gem_next_request_seqno(ring),
+				     defer_retirement);
 }
 
-int
-i915_gpu_idle(struct drm_device *dev)
+int i915_gem_gpu_idle(struct drm_device *dev, bool strictly_idle)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret, i;
 
 	/* Flush everything onto the inactive list. */
 	for (i = 0; i < I915_NUM_RINGS; i++) {
-		ret = i915_ring_idle(&dev_priv->ring[i]);
+		ret = i915_ring_idle(&dev_priv->ring[i], strictly_idle);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6042c5e..b90b547 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -55,7 +55,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
 
 	if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
 		dev_priv->mm.interruptible = false;
-		if (i915_gpu_idle(dev_priv->dev)) {
+		if (i915_gem_gpu_idle(dev_priv->dev, true)) {
 			DRM_ERROR("Couldn't idle GPU\n");
 			/* Wait a bit, in hopes it avoids the hang */
 			udelay(10);
-- 
1.7.7.1




More information about the Intel-gfx mailing list