[PATCH 12/13] drm/i915: Move vma pinning under vm->mutex

Chris Wilson chris at chris-wilson.co.uk
Tue Jun 12 14:04:28 UTC 2018


Use the local GTT lock (vm->mutex) for pinning the VMA (including
insertion and removal), as opposed to relying on the BKL struct_mutex.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_evict.c      | 12 +++---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 26 +++++++-----
 drivers/gpu/drm/i915/i915_gem_stolen.c     |  2 +
 drivers/gpu/drm/i915/i915_vma.c            | 47 ++++++++++++++++------
 drivers/gpu/drm/i915/i915_vma.h            | 41 ++++++++++++++-----
 5 files changed, 91 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 54814a196ee4..91ad3d23126a 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -136,7 +136,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	enum drm_mm_insert_mode mode;
 	int ret;
 
-	lockdep_assert_held(&vm->i915->drm.struct_mutex);
+	lockdep_assert_held(&vm->mutex);
 	trace_i915_gem_evict(vm, min_size, alignment, flags);
 
 	/*
@@ -237,7 +237,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	 */
 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 		if (drm_mm_scan_remove_block(&scan, &vma->node))
-			__i915_vma_pin(vma);
+			____i915_vma_pin(vma);
 		else
 			list_del(&vma->evict_link);
 	}
@@ -281,7 +281,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
 	bool check_color;
 	int ret = 0;
 
-	lockdep_assert_held(&vm->i915->drm.struct_mutex);
+	lockdep_assert_held(&vm->mutex);
 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
 
@@ -361,7 +361,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
 		 * unbinding one vma from freeing (by dropping its active
 		 * reference) another in our eviction list.
 		 */
-		__i915_vma_pin(vma);
+		____i915_vma_pin(vma);
 		list_add(&vma->evict_link, &eviction_list);
 	}
 
@@ -397,7 +397,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
 	struct i915_vma *vma, *next;
 	int ret;
 
-	lockdep_assert_held(&vm->i915->drm.struct_mutex);
+	lockdep_assert_held(&vm->mutex);
 	trace_i915_gem_evict_vm(vm);
 
 	/* Switch back to the default context in order to unpin
@@ -418,7 +418,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
 			if (i915_vma_is_pinned(vma))
 				continue;
 
-			__i915_vma_pin(vma);
+			____i915_vma_pin(vma);
 			list_add(&vma->evict_link, &eviction_list);
 		}
 	} while (*++phase);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 87dd68b5a3c7..ccbab51c8778 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -396,7 +396,7 @@ eb_pin_vma(struct i915_execbuffer *eb,
 	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
 		pin_flags |= PIN_GLOBAL;
 
-	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
+	if (unlikely(__i915_vma_pin(vma, 0, 0, pin_flags)))
 		return false;
 
 	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
@@ -609,9 +609,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
 		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
 	}
 
-	err = i915_vma_pin(vma,
-			   entry->pad_to_size, entry->alignment,
-			   pin_flags);
+	err = __i915_vma_pin(vma,
+			     entry->pad_to_size, entry->alignment,
+			     pin_flags);
 	if (err)
 		return err;
 
@@ -623,7 +623,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
 	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
 		err = i915_vma_pin_fence(vma);
 		if (unlikely(err)) {
-			i915_vma_unpin(vma);
+			__i915_vma_unpin(vma);
 			return err;
 		}
 
@@ -753,6 +753,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 
 	batch = eb_batch_index(eb);
 
+	mutex_lock(&eb->vm->mutex);
 	for (i = 0; i < eb->buffer_count; i++) {
 		u32 handle = eb->exec[i].handle;
 		struct i915_lut_handle *lut;
@@ -806,13 +807,16 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 	}
 
 	eb->args->flags |= __EXEC_VALIDATED;
-	return eb_reserve(eb);
+	err = eb_reserve(eb);
+out_unlock:
+	mutex_unlock(&eb->vm->mutex);
+	return err;
 
 err_obj:
 	i915_gem_object_put(obj);
 err_vma:
 	eb->vma[i] = NULL;
-	return err;
+	goto out_unlock;
 }
 
 static struct i915_vma *
@@ -1895,11 +1899,15 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 	 * add the active reference first and queue for it to be dropped
 	 * *last*.
 	 */
-	if (!i915_vma_is_active(vma))
+	if (!i915_vma_is_active(vma)) {
+		mutex_lock(&vma->vm->mutex);
+		list_move_tail(&vma->vm_link, &vma->vm->active_list);
+		mutex_unlock(&vma->vm->mutex);
+
 		obj->active_count++;
+	}
 	i915_vma_set_active(vma, idx);
 	i915_gem_active_set(&vma->last_read[idx], rq);
-	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 
 	obj->write_domain = 0;
 	if (flags & EXEC_OBJECT_WRITE) {
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 79a347295e00..dd3880f440f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -663,10 +663,12 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
+	mutex_lock(&vma->vm->mutex);
 	vma->pages = obj->mm.pages;
 	vma->flags |= I915_VMA_GLOBAL_BIND;
 	__i915_vma_set_map_and_fenceable(vma);
 	list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
+	mutex_unlock(&vma->vm->mutex);
 
 	spin_lock(&dev_priv->mm.obj_lock);
 	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e82aa804cdba..6b56e052823e 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -44,8 +44,10 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
 	if (i915_vma_is_active(vma))
 		return;
 
+	mutex_lock(&vma->vm->mutex);
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+	mutex_unlock(&vma->vm->mutex);
 
 	GEM_BUG_ON(!i915_gem_object_is_active(obj));
 	if (--obj->active_count)
@@ -167,7 +169,10 @@ vma_create(struct drm_i915_gem_object *obj,
 	}
 	rb_link_node(&vma->obj_node, rb, p);
 	rb_insert_color(&vma->obj_node, &obj->vma_tree);
+
+	mutex_lock(&vm->mutex);
 	list_add(&vma->vm_link, &vm->unbound_list);
+	mutex_unlock(&vm->mutex);
 
 	return vma;
 
@@ -253,6 +258,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	u32 vma_flags;
 	int ret;
 
+	lockdep_assert_held(&vma->vm->mutex);
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(vma->size > vma->node.size);
 
@@ -296,8 +302,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 
 	/* Access through the GTT requires the device to be awake. */
 	assert_rpm_wakelock_held(vma->vm->i915);
+	mutex_lock(&vma->vm->mutex);
 
-	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 	if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
 		err = -ENODEV;
 		goto err;
@@ -319,18 +325,20 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 		vma->iomap = ptr;
 	}
 
-	__i915_vma_pin(vma);
+	____i915_vma_pin(vma);
 
-	err = i915_vma_pin_fence(vma);
+	err = __i915_vma_pin_fence(vma);
 	if (err)
 		goto err_unpin;
 
 	i915_vma_set_ggtt_write(vma);
+	mutex_unlock(&vma->vm->mutex);
 	return ptr;
 
 err_unpin:
 	__i915_vma_unpin(vma);
 err:
+	mutex_unlock(&vma->vm->mutex);
 	return IO_ERR_PTR(err);
 }
 
@@ -495,6 +503,8 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	u64 start, end;
 	int ret;
 
+	lockdep_assert_held(&vma->vm->mutex);
+
 	GEM_BUG_ON(i915_vma_is_closed(vma));
 	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -607,6 +617,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
 
+	lockdep_assert_held(&vma->vm->mutex);
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 
 	if (vma->obj) {
@@ -635,6 +646,8 @@ i915_vma_remove(struct i915_vma *vma)
 {
 	struct drm_i915_private *i915 = vma->vm->i915;
 
+	lockdep_assert_held(&vma->vm->mutex);
+
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 
@@ -671,7 +684,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
 	const unsigned int bound = vma->flags;
 	int ret;
 
-	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+	lockdep_assert_held(&vma->vm->mutex);
 	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
 	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
 
@@ -755,10 +768,13 @@ static void __i915_vma_destroy(struct i915_vma *vma)
 	GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
 
 	list_del(&vma->obj_link);
-	list_del(&vma->vm_link);
 	if (vma->obj)
 		rb_erase(&vma->obj_node, &vma->obj->vma_tree);
 
+	mutex_lock(&vma->vm->mutex);
+	list_del(&vma->vm_link);
+	mutex_unlock(&vma->vm->mutex);
+
 	if (!i915_vma_is_ggtt(vma))
 		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 
@@ -855,7 +871,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 		 * we currently hold, therefore it cannot free this object
 		 * before we are finished).
 		 */
-		__i915_vma_pin(vma);
+		____i915_vma_pin(vma);
 
 		for_each_active(active, idx) {
 			ret = i915_gem_active_retire(&vma->last_read[idx],
@@ -875,11 +891,16 @@ int i915_vma_unbind(struct i915_vma *vma)
 	}
 	GEM_BUG_ON(i915_vma_is_active(vma));
 
-	if (i915_vma_is_pinned(vma))
-		return -EBUSY;
+	mutex_lock(&vma->vm->mutex);
+	if (i915_vma_is_pinned(vma)) {
+		ret = -EBUSY;
+		goto out_unlock;
+	}
 
-	if (!drm_mm_node_allocated(&vma->node))
-		return 0;
+	if (!drm_mm_node_allocated(&vma->node)) {
+		ret = 0;
+		goto out_unlock;
+	}
 
 	if (i915_vma_is_map_and_fenceable(vma)) {
 		/*
@@ -894,7 +915,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 		/* release the fence reg _after_ flushing */
 		ret = i915_vma_put_fence(vma);
 		if (ret)
-			return ret;
+			goto out_unlock;
 
 		/* Force a pagefault for domain tracking on next user access */
 		i915_vma_revoke_mmap(vma);
@@ -913,7 +934,9 @@ int i915_vma_unbind(struct i915_vma *vma)
 
 	i915_vma_remove(vma);
 
-	return 0;
+out_unlock:
+	mutex_unlock(&vma->vm->mutex);
+	return ret;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 7c8b5434efaa..84107ba0f32e 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -292,11 +292,22 @@ void i915_vma_close(struct i915_vma *vma);
 void i915_vma_reopen(struct i915_vma *vma);
 void i915_vma_destroy(struct i915_vma *vma);
 
+static inline void ____i915_vma_pin(struct i915_vma *vma)
+{
+	lockdep_assert_held(&vma->vm->mutex);
+
+	vma->flags++;
+	GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+}
+
 int __i915_vma_do_pin(struct i915_vma *vma,
 		      u64 size, u64 alignment, u64 flags);
+
 static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+__i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
+	lockdep_assert_held(&vma->vm->mutex);
+
 	BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
 	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
 	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
@@ -313,32 +324,42 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	return __i915_vma_do_pin(vma, size, alignment, flags);
 }
 
-static inline int i915_vma_pin_count(const struct i915_vma *vma)
+static inline int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-	return vma->flags & I915_VMA_PIN_MASK;
+	int err;
+
+	mutex_lock(&vma->vm->mutex);
+	err = __i915_vma_pin(vma, size, alignment, flags);
+	mutex_unlock(&vma->vm->mutex);
+
+	return err;
 }
 
-static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
+static inline int i915_vma_pin_count(const struct i915_vma *vma)
 {
-	return i915_vma_pin_count(vma);
+	return READ_ONCE(vma->flags) & I915_VMA_PIN_MASK;
 }
 
-static inline void __i915_vma_pin(struct i915_vma *vma)
+static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
 {
-	vma->flags++;
-	GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+	return i915_vma_pin_count(vma);
 }
 
 static inline void __i915_vma_unpin(struct i915_vma *vma)
 {
+	lockdep_assert_held(&vma->vm->mutex);
+	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+	GEM_BUG_ON(!i915_vma_is_pinned(vma));
 	vma->flags--;
 }
 
 static inline void i915_vma_unpin(struct i915_vma *vma)
 {
-	GEM_BUG_ON(!i915_vma_is_pinned(vma));
-	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+	mutex_lock(&vma->vm->mutex);
 	__i915_vma_unpin(vma);
+	mutex_unlock(&vma->vm->mutex);
 }
 
 static inline bool i915_vma_is_bound(const struct i915_vma *vma,
-- 
2.17.1



More information about the Intel-gfx-trybot mailing list