[PATCH 55/55] invrange

Chris Wilson chris at chris-wilson.co.uk
Sat Dec 26 23:23:30 UTC 2020


From: Chris Wilson <ickle at kabylake.alporthouse.com>

---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 11 ++-
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c   | 78 +++++++------------
 drivers/gpu/drm/i915/i915_vma_types.h         | 11 ++-
 3 files changed, 42 insertions(+), 58 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index dee0e2cbc71c..b332088c5130 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -805,8 +805,15 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
 		if (likely(vma && vma->vm == vm))
 			vma = i915_vma_tryget(vma);
 		rcu_read_unlock();
-		if (likely(vma))
-			return vma;
+		if (likely(vma)) {
+			if (likely(!test_bit(I915_VMA_INVALID_BIT,
+					     __i915_vma_flags(vma))))
+				return vma;
+
+			radix_tree_delete(&eb->gem_context->handles_vma, handle);
+			i915_vma_close(vma);
+			i915_vma_put(vma);
+		}
 
 		obj = i915_gem_object_lookup(eb->file, handle);
 		if (unlikely(!obj))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index f2eaed6aca3d..a3751cae961a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -83,75 +83,49 @@ __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
 	spin_unlock(&mo->mn->lock);
 }
 
-static int
-userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
-				  const struct mmu_notifier_range *range)
+static void
+userptr_mn_invalidate_range(struct mmu_notifier *_mn,
+		struct mm_struct *mm,
+		unsigned long start,
+		unsigned long end)
 {
 	struct i915_mmu_notifier *mn =
 		container_of(_mn, struct i915_mmu_notifier, mn);
 	struct interval_tree_node *it;
-	unsigned long end;
-	int ret = 0;
 
 	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
-		return 0;
+		return;
 
 	/* interval ranges are inclusive, but invalidate range is exclusive */
-	end = range->end - 1;
+	end--;
 
 	spin_lock(&mn->lock);
-	it = interval_tree_iter_first(&mn->objects, range->start, end);
-	while (it) {
-		struct drm_i915_gem_object *obj;
-
-		if (!mmu_notifier_range_blockable(range)) {
-			ret = -EAGAIN;
-			break;
-		}
-
-		/*
-		 * The mmu_object is released late when destroying the
-		 * GEM object so it is entirely possible to gain a
-		 * reference on an object in the process of being freed
-		 * since our serialisation is via the spinlock and not
-		 * the struct_mutex - and consequently use it after it
-		 * is freed and then double free it. To prevent that
-		 * use-after-free we only acquire a reference on the
-		 * object if it is not in the process of being destroyed.
-		 */
-		obj = container_of(it, struct i915_mmu_object, it)->obj;
-		if (!kref_get_unless_zero(&obj->base.refcount)) {
-			it = interval_tree_iter_next(it, range->start, end);
+	for (it = interval_tree_iter_first(&mn->objects, start, end);
+	     it;
+	     it = interval_tree_iter_next(it, start, end)) {
+		struct drm_i915_gem_object *obj =
+			container_of(it, struct i915_mmu_object, it)->obj;
+		struct i915_vma *vma;
+
+		if (list_empty(&obj->vma.list))
 			continue;
-		}
-		spin_unlock(&mn->lock);
-
-		ret = i915_gem_object_unbind(obj,
-					     I915_GEM_OBJECT_UNBIND_ACTIVE |
-					     I915_GEM_OBJECT_UNBIND_BARRIER);
-		if (ret == 0)
-			ret = __i915_gem_object_put_pages(obj);
-		i915_gem_object_put(obj);
-		if (ret)
-			return ret;
 
-		spin_lock(&mn->lock);
-
-		/*
-		 * As we do not (yet) protect the mmu from concurrent insertion
-		 * over this range, there is no guarantee that this search will
-		 * terminate given a pathologic workload.
-		 */
-		it = interval_tree_iter_first(&mn->objects, range->start, end);
+		spin_lock(&obj->vma.lock);
+		list_for_each_entry(vma, &obj->vma.list, obj_link) {
+			set_bit(I915_VMA_INVALID_BIT, __i915_vma_flags(vma));
+			RB_CLEAR_NODE(&vma->obj_node);
+			//vma->obj = NULL;
+		}
+		//INIT_LIST_HEAD(&obj->vma.list);
+		//obj->mm.pages = NULL/
+		obj->vma.tree = RB_ROOT;
+		spin_unlock(&obj->vma.lock);
 	}
 	spin_unlock(&mn->lock);
-
-	return ret;
-
 }
 
 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
-	.invalidate_range_start = userptr_mn_invalidate_range_start,
+	.invalidate_range = userptr_mn_invalidate_range,
 };
 
 static struct i915_mmu_notifier *
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 9e9082dc8f4b..aaa264a51956 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -239,10 +239,13 @@ struct i915_vma {
 #define I915_VMA_ERROR_BIT	13
 #define I915_VMA_ERROR		((int)BIT(I915_VMA_ERROR_BIT))
 
-#define I915_VMA_GGTT_BIT	14
-#define I915_VMA_CAN_FENCE_BIT	15
-#define I915_VMA_USERFAULT_BIT	16
-#define I915_VMA_GGTT_WRITE_BIT	17
+#define I915_VMA_INVALID_BIT	14
+#define I915_VMA_INVALID	((int)BIT(I915_VMA_INVALID_BIT))
+
+#define I915_VMA_GGTT_BIT	15
+#define I915_VMA_CAN_FENCE_BIT	16
+#define I915_VMA_USERFAULT_BIT	17
+#define I915_VMA_GGTT_WRITE_BIT	18
 
 #define I915_VMA_GGTT		((int)BIT(I915_VMA_GGTT_BIT))
 #define I915_VMA_CAN_FENCE	((int)BIT(I915_VMA_CAN_FENCE_BIT))
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list