[Intel-gfx] [PATCH] drm/i915: Repeat unbinding during free if interrupted (v5)

Chris Wilson chris at chris-wilson.co.uk
Fri Jul 23 20:20:46 CEST 2010


If during the freeing of an object the unbind is interrupted by a system
call, which is quite possible if we have outstanding GPU writes that
must be flushed, the unbind is silently aborted. This still leaves the
AGP region and backing pages allocated, and perhaps more importantly,
the object remains upon the various lists exposing us to memory
corruption.

I think this is the cause behind the use-after-free, such as

  Bug 15664 - Graphics hang and kernel backtrace when starting Azureus
              with Compiz enabled
  https://bugzilla.kernel.org/show_bug.cgi?id=15664

v2: Daniel Vetter reminded me that kernel space programming is never easy.
We cannot simply spin to clear the pending signal and so must deferred
the freeing of the object until later.
v3: Run from the top level retire requests.
v4: Tested with P(return -ERESTARTSYS)=.5 from i915_gem_do_wait_request()
v5: Rebase against Eric's for-linus tree.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: stable at kernel.org
Cc: Daniel Vetter <daniel at ffwll.ch>
---
 drivers/gpu/drm/i915/i915_drv.c |    2 +-
 drivers/gpu/drm/i915/i915_drv.h |   11 ++++-
 drivers/gpu/drm/i915/i915_gem.c |   96 ++++++++++++++++++++++++---------------
 3 files changed, 70 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 119692f..5044f65 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
 	/*
 	 * Clear request list
 	 */
-	i915_gem_retire_requests(dev, &dev_priv->render_ring);
+	i915_gem_retire_requests(dev);
 
 	if (need_display)
 		i915_save_display(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5a0100e..e3ff35e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -547,6 +547,14 @@ typedef struct drm_i915_private {
 		struct list_head fence_list;
 
 		/**
+		 * List of objects currently pending being freed.
+		 *
+		 * These objects are no longer in use, but due to a signal
+		 * we were prevented from freeing them at the appointed time.
+		 */
+		struct list_head deferred_free_list;
+
+		/**
 		 * We leave the user IRQ off as much as possible,
 		 * but this means that requests will finish and never
 		 * be retired once the system goes idle. Set a timer to
@@ -955,8 +963,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev,
 bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
 int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev,
-		 struct intel_ring_buffer *ring);
+void i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_work_handler(struct work_struct *work);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 int i915_gem_object_set_domain(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2d0e109..6a371e3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -53,6 +53,7 @@ static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
 				struct drm_i915_gem_pwrite *args,
 				struct drm_file *file_priv);
+static void i915_gem_free_object_tail(struct drm_gem_object *obj);
 
 static LIST_HEAD(shrink_list);
 static DEFINE_SPINLOCK(shrink_list_lock);
@@ -1709,9 +1710,9 @@ i915_get_gem_seqno(struct drm_device *dev,
 /**
  * This function clears the request list as sequence numbers are passed.
  */
-void
-i915_gem_retire_requests(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+static void
+i915_gem_retire_requests_ring(struct drm_device *dev,
+			      struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t seqno;
@@ -1751,6 +1752,25 @@ i915_gem_retire_requests(struct drm_device *dev,
 }
 
 void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!list_empty(&dev_priv->mm.deferred_free_list)) {
+	    struct drm_i915_gem_object *obj_priv, *tmp;
+
+	    list_for_each_entry_safe(obj_priv, tmp,
+				     &dev_priv->mm.deferred_free_list,
+				     list)
+		i915_gem_free_object_tail(&obj_priv->base);
+	}
+
+	i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
+	if (HAS_BSD(dev))
+		i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+}
+
+void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
 	drm_i915_private_t *dev_priv;
@@ -1761,10 +1781,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
 	dev = dev_priv->dev;
 
 	mutex_lock(&dev->struct_mutex);
-	i915_gem_retire_requests(dev, &dev_priv->render_ring);
-
-	if (HAS_BSD(dev))
-		i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+	i915_gem_retire_requests(dev);
 
 	if (!dev_priv->mm.suspended &&
 		(!list_empty(&dev_priv->render_ring.request_list) ||
@@ -1832,7 +1849,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
 	 * a separate wait queue to handle that.
 	 */
 	if (ret == 0)
-		i915_gem_retire_requests(dev, ring);
+		i915_gem_retire_requests_ring(dev, ring);
 
 	return ret;
 }
@@ -1945,11 +1962,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 	 * before we unbind.
 	 */
 	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-	if (ret) {
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("set_domain failed: %d\n", ret);
+	if (ret == -ERESTARTSYS)
 		return ret;
-	}
+	/* Continue on if we fail due to EIO, the GPU is hung so we
+	 * should be safe and we need to cleanup or else we might
+	 * cause memory corruption through use-after-free.
+	 */
 
 	BUG_ON(obj_priv->active);
 
@@ -1985,7 +2003,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 
 	trace_i915_gem_object_unbind(obj);
 
-	return 0;
+	return ret;
 }
 
 static struct drm_gem_object *
@@ -2107,10 +2125,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 	struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
 	for (;;) {
-		i915_gem_retire_requests(dev, render_ring);
-
-		if (HAS_BSD(dev))
-			i915_gem_retire_requests(dev, bsd_ring);
+		i915_gem_retire_requests(dev);
 
 		/* If there's an inactive buffer available now, grab it
 		 * and be done.
@@ -4329,7 +4344,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_busy *args = data;
 	struct drm_gem_object *obj;
 	struct drm_i915_gem_object *obj_priv;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (obj == NULL) {
@@ -4344,10 +4358,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * actually unmasked, and our working set ends up being larger than
 	 * required.
 	 */
-	i915_gem_retire_requests(dev, &dev_priv->render_ring);
-
-	if (HAS_BSD(dev))
-		i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+	i915_gem_retire_requests(dev);
 
 	obj_priv = to_intel_bo(obj);
 	/* Don't count being on the flushing list against the object being
@@ -4457,20 +4468,19 @@ int i915_gem_init_object(struct drm_gem_object *obj)
 	return 0;
 }
 
-void i915_gem_free_object(struct drm_gem_object *obj)
+static void i915_gem_free_object_tail(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	int ret;
 
-	trace_i915_gem_object_destroy(obj);
-
-	while (obj_priv->pin_count > 0)
-		i915_gem_object_unpin(obj);
-
-	if (obj_priv->phys_obj)
-		i915_gem_detach_phys_object(dev, obj);
-
-	i915_gem_object_unbind(obj);
+	ret = i915_gem_object_unbind(obj);
+	if (ret == -ERESTARTSYS) {
+		list_move_tail(&obj_priv->list,
+			       &dev_priv->mm.deferred_free_list);
+		return;
+	}
 
 	if (obj_priv->mmap_offset)
 		i915_gem_free_mmap_offset(obj);
@@ -4482,6 +4492,22 @@ void i915_gem_free_object(struct drm_gem_object *obj)
 	kfree(obj_priv);
 }
 
+void i915_gem_free_object(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+	trace_i915_gem_object_destroy(obj);
+
+	while (obj_priv->pin_count > 0)
+		i915_gem_object_unpin(obj);
+
+	if (obj_priv->phys_obj)
+		i915_gem_detach_phys_object(dev, obj);
+
+	i915_gem_free_object_tail(obj);
+}
+
 /** Unbinds all inactive objects. */
 static int
 i915_gem_evict_from_inactive_list(struct drm_device *dev)
@@ -4755,6 +4781,7 @@ i915_gem_load(struct drm_device *dev)
 	INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+	INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
 	INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
 	INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
 	if (HAS_BSD(dev)) {
@@ -5043,10 +5070,7 @@ rescan:
 			continue;
 
 		spin_unlock(&shrink_list_lock);
-		i915_gem_retire_requests(dev, &dev_priv->render_ring);
-
-		if (HAS_BSD(dev))
-			i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+		i915_gem_retire_requests(dev);
 
 		list_for_each_entry_safe(obj_priv, next_obj,
 					 &dev_priv->mm.inactive_list,
-- 
1.7.1




More information about the Intel-gfx mailing list