[PATCH 76/81] drm/i915: Be more careful when unbinding vma

Chris Wilson chris at chris-wilson.co.uk
Sat Apr 16 12:51:11 UTC 2016


When we call i915_vma_unbind(), we will wait upon outstanding rendering.
This will also trigger a retirement phase, which may update the object
lists. If, we extend request tracking to the VMA itself (rather than
keep it at the encompassing object), then there is a potential that the
obj->vma_list be modified for other elements upon i915_vma_unbind(). As
a result, if we walk over the object list and call i915_vma_unbind(), we
need to be prepared for that list to change.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h          |  2 ++
 drivers/gpu/drm/i915/i915_gem.c          | 54 ++++++++++++++++++++++++--------
 drivers/gpu/drm/i915/i915_gem_shrinker.c |  7 +----
 drivers/gpu/drm/i915/i915_gem_userptr.c  |  4 +--
 4 files changed, 45 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c1120170e119..21643a97d524 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2812,6 +2812,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
  * _guarantee_ VMA in question is _not in use_ anywhere.
  */
 int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
+
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9247918923e0..f20f5a55e4d5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -255,18 +255,38 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
 	.release = i915_gem_object_release_phys,
 };
 
+int
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+{
+	struct i915_vma *vma;
+	LIST_HEAD(still_in_list);
+	int ret;
+
+	/* The vma will only be freed if it is marked as closed, and if we wait
+	 * upon rendering to the vma, we may unbind anything in the list.
+	 */
+	while ((vma = list_first_entry_or_null(&obj->vma_list,
+					       struct i915_vma,
+					       obj_link))) {
+		list_move_tail(&vma->obj_link, &still_in_list);
+		ret = i915_vma_unbind(vma);
+		if (ret)
+			break;
+	}
+	list_splice(&still_in_list, &obj->vma_list);
+
+	return ret;
+}
+
 static int
 drop_pages(struct drm_i915_gem_object *obj)
 {
-	struct i915_vma *vma, *next;
 	int ret;
 
 	drm_gem_object_reference(&obj->base);
-	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
-		if (i915_vma_unbind(vma))
-			break;
-
-	ret = i915_gem_object_put_pages(obj);
+	ret = i915_gem_object_unbind(obj);
+	if (ret == 0)
+		ret = i915_gem_object_put_pages(obj);
 	drm_gem_object_unreference(&obj->base);
 
 	return ret;
@@ -3051,7 +3071,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				    enum i915_cache_level cache_level)
 {
-	struct i915_vma *vma, *next;
+	struct i915_vma *vma;
 	int ret = 0;
 
 	if (obj->cache_level == cache_level)
@@ -3062,7 +3082,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 	 * catch the issue of the CS prefetch crossing page boundaries and
 	 * reading an invalid PTE on older architectures.
 	 */
-	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
+restart:
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
 
@@ -3071,11 +3092,18 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 			return -EBUSY;
 		}
 
-		if (!i915_gem_valid_gtt_space(vma, cache_level)) {
-			ret = i915_vma_unbind(vma);
-			if (ret)
-				return ret;
-		}
+		if (i915_gem_valid_gtt_space(vma, cache_level))
+			continue;
+
+		ret = i915_vma_unbind(vma);
+		if (ret)
+			return ret;
+
+		/* As unbinding may affect other elements in the
+		 * obj->vma_list (due to side-effects from retiring
+		 * an active vma), play safe and restart the iterator.
+		 */
+		goto restart;
 	}
 
 	/* We can reuse the existing drm_mm nodes but need to change the
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 3c1798accafe..9955cf5d018e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -76,12 +76,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
 
 static int unsafe_drop_pages(struct drm_i915_gem_object *obj)
 {
-	struct i915_vma *vma, *vn;
-
-	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
-		if (i915_vma_unbind(vma))
-			break;
-
+	i915_gem_object_unbind(obj);
 	return i915_gem_object_put_pages(obj);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index ff5fa7f39d6d..52baea7215a5 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -104,7 +104,6 @@ static void cancel_userptr(struct work_struct *work)
 
 	if (obj->pages != NULL) {
 		struct drm_i915_private *dev_priv = to_i915(dev);
-		struct i915_vma *vma, *tmp;
 		bool was_interruptible;
 
 		wait_rendering(obj);
@@ -112,8 +111,7 @@ static void cancel_userptr(struct work_struct *work)
 		was_interruptible = dev_priv->mm.interruptible;
 		dev_priv->mm.interruptible = false;
 
-		list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
-			WARN_ON(i915_vma_unbind(vma));
+		WARN_ON(i915_gem_object_unbind(obj));
 		WARN_ON(i915_gem_object_put_pages(obj));
 
 		dev_priv->mm.interruptible = was_interruptible;
-- 
2.8.0.rc3



More information about the Intel-gfx-trybot mailing list