[PATCH 11/15] drm/i915: Pull i915_vma_pin under the vm->mutex

Chris Wilson chris at chris-wilson.co.uk
Sun Aug 18 00:15:23 UTC 2019


Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile                 |   1 +
 drivers/gpu/drm/i915/display/intel_overlay.c  |  11 +-
 drivers/gpu/drm/i915/gem/i915_gem_domain.c    |   2 +-
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  14 +-
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      |  26 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |   1 -
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |   5 +
 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c  |  67 -----
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c    |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c   |  27 +-
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |   2 +-
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c  |  19 +-
 drivers/gpu/drm/i915/gvt/aperture_gm.c        |  12 +-
 drivers/gpu/drm/i915/i915_active.c            |  63 ++++
 drivers/gpu/drm/i915/i915_active.h            |   2 +
 drivers/gpu/drm/i915/i915_active_types.h      |   4 +
 drivers/gpu/drm/i915/i915_gem.c               |  77 +++--
 drivers/gpu/drm/i915/i915_gem_evict.c         |  20 +-
 drivers/gpu/drm/i915/i915_gem_fence_reg.c     |   4 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c           |  46 ++-
 drivers/gpu/drm/i915/i915_perf.c              |   6 -
 drivers/gpu/drm/i915/i915_sw_fence_work.c     |  83 ++++++
 drivers/gpu/drm/i915/i915_sw_fence_work.h     |  31 ++
 drivers/gpu/drm/i915/i915_vma.c               | 271 +++++++++++-------
 drivers/gpu/drm/i915/i915_vma.h               | 100 ++++---
 .../gpu/drm/i915/selftests/i915_gem_evict.c   |   8 +-
 drivers/gpu/drm/i915/selftests/mock_gtt.c     |   4 +-
 27 files changed, 513 insertions(+), 395 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_sw_fence_work.c
 create mode 100644 drivers/gpu/drm/i915/i915_sw_fence_work.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 45add812048b..658b930d34a8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -62,6 +62,7 @@ i915-y += \
 	i915_memcpy.o \
 	i915_mm.o \
 	i915_sw_fence.o \
+	i915_sw_fence_work.o \
 	i915_syncmap.o \
 	i915_user_extensions.o
 
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 29edfc343716..4f36557b3f3b 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -1303,15 +1303,11 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
 	struct i915_vma *vma;
 	int err;
 
-	mutex_lock(&i915->drm.struct_mutex);
-
 	obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
 	if (obj == NULL)
 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
-	if (IS_ERR(obj)) {
-		err = PTR_ERR(obj);
-		goto err_unlock;
-	}
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
 
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
 	if (IS_ERR(vma)) {
@@ -1332,13 +1328,10 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
 	}
 
 	overlay->reg_bo = obj;
-	mutex_unlock(&i915->drm.struct_mutex);
 	return 0;
 
 err_put_bo:
 	i915_gem_object_put(obj);
-err_unlock:
-	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 60134c5aefa7..720e384dda2a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -277,7 +277,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 			if (!drm_mm_node_allocated(&vma->node))
 				continue;
 
-			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
+			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE, NULL);
 			if (ret)
 				return ret;
 		}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 49020720be5e..a9085c0307f4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -549,8 +549,11 @@ eb_add_vma(struct i915_execbuffer *eb,
 		eb_unreserve_vma(vma, vma->exec_flags);
 
 		list_add_tail(&vma->exec_link, &eb->unbound);
-		if (drm_mm_node_allocated(&vma->node))
+		if (drm_mm_node_allocated(&vma->node)) {
+			mutex_lock(&vma->vm->mutex);
 			err = i915_vma_unbind(vma);
+			mutex_unlock(&vma->vm->mutex);
+		}
 		if (unlikely(err))
 			vma->exec_flags = NULL;
 	}
@@ -697,7 +700,9 @@ static int eb_reserve(struct i915_execbuffer *eb)
 
 		case 1:
 			/* Too fragmented, unbind everything and retry */
+			mutex_lock(&eb->context->vm->mutex);
 			err = i915_gem_evict_vm(eb->context->vm);
+			mutex_unlock(&eb->context->vm->mutex);
 			if (err)
 				return err;
 			break;
@@ -1189,8 +1194,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 		goto skip_request;
 
 	i915_vma_lock(batch);
-	GEM_BUG_ON(!dma_resv_test_signaled_rcu(batch->resv, true));
-	err = i915_vma_move_to_active(batch, rq, 0);
+	err = i915_request_await_object(rq, batch->obj, true);
+	if (err == 0)
+		err = i915_vma_move_to_active(batch, rq, 0);
 	i915_vma_unlock(batch);
 	if (err)
 		goto skip_request;
@@ -1387,7 +1393,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
 		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
 		    IS_GEN(eb->i915, 6)) {
 			err = i915_vma_bind(target, target->obj->cache_level,
-					    PIN_GLOBAL);
+					    PIN_GLOBAL, NULL);
 			if (WARN_ONCE(err,
 				      "Unexpected failure to bind target VMA!"))
 				return err;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index dba5dd779149..b94eb557cc2e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -251,16 +251,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 		goto err_rpm;
 	}
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		goto err_reset;
-
-	/* Access to snoopable pages through the GTT is incoherent. */
-	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
-		ret = -EFAULT;
-		goto err_unlock;
-	}
-
 	/* Now pin it into the GTT as needed */
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 				       PIN_MAPPABLE |
@@ -290,7 +280,13 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	}
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
-		goto err_unlock;
+		goto err_reset;
+	}
+
+	/* Access to snoopable pages through the GTT is incoherent. */
+	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
+		ret = -EFAULT;
+		goto err_unpin;
 	}
 
 	ret = i915_vma_pin_fence(vma);
@@ -307,22 +303,20 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 		goto err_fence;
 
 	/* Mark as being mmapped into userspace for later revocation */
-	assert_rpm_wakelock_held(rpm);
+	mutex_lock(&i915->ggtt.vm.mutex);
 	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
 		list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
+	mutex_unlock(&i915->ggtt.vm.mutex);
+
 	if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
 		intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
-	GEM_BUG_ON(!obj->userfault_count);
 
 	i915_vma_set_ggtt_write(vma);
-
 err_fence:
 	i915_vma_unpin_fence(vma);
 err_unpin:
 	__i915_vma_unpin(vma);
-err_unlock:
-	mutex_unlock(&dev->struct_mutex);
 err_reset:
 	intel_gt_reset_unlock(ggtt->vm.gt, srcu);
 err_rpm:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index d7855dc5a5c5..81436c6a1f4b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -163,7 +163,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 		list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
 			GEM_BUG_ON(i915_vma_is_active(vma));
-			vma->flags &= ~I915_VMA_PIN_MASK;
 			i915_vma_destroy(vma);
 		}
 		GEM_BUG_ON(!list_empty(&obj->vma.list));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 5efb9936e05b..6e26f8e17cb5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -106,6 +106,11 @@ static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
 	dma_resv_lock(obj->base.resv, NULL);
 }
 
+static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
+{
+	return dma_resv_trylock(obj->base.resv);
+}
+
 static inline int
 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
 {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index edd21d14e64f..9b93a8fe520b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -16,40 +16,6 @@
 
 #include "i915_trace.h"
 
-static bool shrinker_lock(struct drm_i915_private *i915,
-			  unsigned int flags,
-			  bool *unlock)
-{
-	struct mutex *m = &i915->drm.struct_mutex;
-
-	switch (mutex_trylock_recursive(m)) {
-	case MUTEX_TRYLOCK_RECURSIVE:
-		*unlock = false;
-		return true;
-
-	case MUTEX_TRYLOCK_FAILED:
-		*unlock = false;
-		if (flags & I915_SHRINK_ACTIVE &&
-		    mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
-			*unlock = true;
-		return *unlock;
-
-	case MUTEX_TRYLOCK_SUCCESS:
-		*unlock = true;
-		return true;
-	}
-
-	BUG();
-}
-
-static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
-{
-	if (!unlock)
-		return;
-
-	mutex_unlock(&i915->drm.struct_mutex);
-}
-
 static bool swap_available(void)
 {
 	return get_nr_swap_pages() > 0;
@@ -162,10 +128,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
 	intel_wakeref_t wakeref = 0;
 	unsigned long count = 0;
 	unsigned long scanned = 0;
-	bool unlock;
-
-	if (!shrinker_lock(i915, shrink, &unlock))
-		return 0;
 
 	/*
 	 * When shrinking the active list, we should also consider active
@@ -275,8 +237,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
 	if (shrink & I915_SHRINK_BOUND)
 		intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 
-	shrinker_unlock(i915, unlock);
-
 	if (nr_scanned)
 		*nr_scanned += scanned;
 	return count;
@@ -346,13 +306,9 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 	struct drm_i915_private *i915 =
 		container_of(shrinker, struct drm_i915_private, mm.shrinker);
 	unsigned long freed;
-	bool unlock;
 
 	sc->nr_scanned = 0;
 
-	if (!shrinker_lock(i915, 0, &unlock))
-		return SHRINK_STOP;
-
 	freed = i915_gem_shrink(i915,
 				sc->nr_to_scan,
 				&sc->nr_scanned,
@@ -373,8 +329,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 		}
 	}
 
-	shrinker_unlock(i915, unlock);
-
 	return sc->nr_scanned ? freed : SHRINK_STOP;
 }
 
@@ -426,10 +380,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 	struct i915_vma *vma, *next;
 	unsigned long freed_pages = 0;
 	intel_wakeref_t wakeref;
-	bool unlock;
-
-	if (!shrinker_lock(i915, 0, &unlock))
-		return NOTIFY_DONE;
 
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
 		freed_pages += i915_gem_shrink(i915, -1UL, NULL,
@@ -446,15 +396,11 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 		if (!vma->iomap || i915_vma_is_active(vma))
 			continue;
 
-		mutex_unlock(&i915->ggtt.vm.mutex);
 		if (i915_vma_unbind(vma) == 0)
 			freed_pages += count;
-		mutex_lock(&i915->ggtt.vm.mutex);
 	}
 	mutex_unlock(&i915->ggtt.vm.mutex);
 
-	shrinker_unlock(i915, unlock);
-
 	*(unsigned long *)ptr += freed_pages;
 	return NOTIFY_DONE;
 }
@@ -497,22 +443,9 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
 
 	fs_reclaim_acquire(GFP_KERNEL);
 
-	/*
-	 * As we invariably rely on the struct_mutex within the shrinker,
-	 * but have a complicated recursion dance, taint all the mutexes used
-	 * within the shrinker with the struct_mutex. For completeness, we
-	 * taint with all subclass of struct_mutex, even though we should
-	 * only need tainting by I915_MM_NORMAL to catch possible ABBA
-	 * deadlocks from using struct_mutex inside @mutex.
-	 */
-	mutex_acquire(&i915->drm.struct_mutex.dep_map,
-		      I915_MM_SHRINKER, 0, _RET_IP_);
-
 	mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
 	mutex_release(&mutex->dep_map, 0, _RET_IP_);
 
-	mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
-
 	fs_reclaim_release(GFP_KERNEL);
 
 	if (unlock)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 2e1bfd5e4adf..f4504b7c479f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -685,7 +685,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
 	vma->pages = obj->mm.pages;
-	vma->flags |= I915_VMA_GLOBAL_BIND;
+	atomic_or(I915_VMA_GLOBAL_BIND, &vma->flags);
 	__i915_vma_set_map_and_fenceable(vma);
 
 	mutex_lock(&ggtt->vm.mutex);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 74da35611d7c..cd36236e3faf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -92,7 +92,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 	struct i915_mmu_notifier *mn =
 		container_of(_mn, struct i915_mmu_notifier, mn);
 	struct interval_tree_node *it;
-	struct mutex *unlock = NULL;
 	unsigned long end;
 	int ret = 0;
 
@@ -129,33 +128,13 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 		}
 		spin_unlock(&mn->lock);
 
-		if (!unlock) {
-			unlock = &mn->mm->i915->drm.struct_mutex;
-
-			switch (mutex_trylock_recursive(unlock)) {
-			default:
-			case MUTEX_TRYLOCK_FAILED:
-				if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
-					i915_gem_object_put(obj);
-					return -EINTR;
-				}
-				/* fall through */
-			case MUTEX_TRYLOCK_SUCCESS:
-				break;
-
-			case MUTEX_TRYLOCK_RECURSIVE:
-				unlock = ERR_PTR(-EEXIST);
-				break;
-			}
-		}
-
 		ret = i915_gem_object_unbind(obj,
 					     I915_GEM_OBJECT_UNBIND_ACTIVE);
 		if (ret == 0)
 			ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
 		i915_gem_object_put(obj);
 		if (ret)
-			goto unlock;
+			return ret;
 
 		spin_lock(&mn->lock);
 
@@ -168,10 +147,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 	}
 	spin_unlock(&mn->lock);
 
-unlock:
-	if (!IS_ERR_OR_NULL(unlock))
-		mutex_unlock(unlock);
-
 	return ret;
 
 }
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 8de83c6d81f5..b66f02ee3ed7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1387,7 +1387,7 @@ static int igt_ppgtt_pin_update(void *arg)
 			goto out_unpin;
 		}
 
-		err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
+		err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
 		if (err)
 			goto out_unpin;
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index a31eed2bb00f..79a262046249 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1124,15 +1124,14 @@ static int evict_vma(void *data)
 {
 	struct evict_vma *arg = data;
 	struct i915_address_space *vm = arg->vma->vm;
-	struct drm_i915_private *i915 = vm->i915;
 	struct drm_mm_node evict = arg->vma->node;
 	int err;
 
 	complete(&arg->completion);
 
-	mutex_lock(&i915->drm.struct_mutex);
+	mutex_lock(&vm->mutex);
 	err = i915_gem_evict_for_node(vm, &evict, 0);
-	mutex_unlock(&i915->drm.struct_mutex);
+	mutex_unlock(&vm->mutex);
 
 	return err;
 }
@@ -1140,39 +1139,33 @@ static int evict_vma(void *data)
 static int evict_fence(void *data)
 {
 	struct evict_vma *arg = data;
-	struct drm_i915_private *i915 = arg->vma->vm->i915;
 	int err;
 
 	complete(&arg->completion);
 
-	mutex_lock(&i915->drm.struct_mutex);
-
 	/* Mark the fence register as dirty to force the mmio update. */
 	err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
 	if (err) {
 		pr_err("Invalid Y-tiling settings; err:%d\n", err);
-		goto out_unlock;
+		return err;
 	}
 
 	err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
 	if (err) {
 		pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
-		goto out_unlock;
+		return err;
 	}
 
 	err = i915_vma_pin_fence(arg->vma);
 	i915_vma_unpin(arg->vma);
 	if (err) {
 		pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
-		goto out_unlock;
+		return err;
 	}
 
 	i915_vma_unpin_fence(arg->vma);
 
-out_unlock:
-	mutex_unlock(&i915->drm.struct_mutex);
-
-	return err;
+	return 0;
 }
 
 static int __igt_reset_evict_vma(struct intel_gt *gt,
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 5ff2437b2998..d996bbc7ea59 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
 		flags = PIN_MAPPABLE;
 	}
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&dev_priv->ggtt.vm.mutex);
 	mmio_hw_access_pre(dev_priv);
 	ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
 				  size, I915_GTT_PAGE_SIZE,
 				  I915_COLOR_UNEVICTABLE,
 				  start, end, flags);
 	mmio_hw_access_post(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&dev_priv->ggtt.vm.mutex);
 	if (ret)
 		gvt_err("fail to alloc %s gm space from host\n",
 			high_gm ? "high" : "low");
@@ -98,9 +98,9 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
 
 	return 0;
 out_free_aperture:
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&dev_priv->ggtt.vm.mutex);
 	drm_mm_remove_node(&vgpu->gm.low_gm_node);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&dev_priv->ggtt.vm.mutex);
 	return ret;
 }
 
@@ -108,10 +108,10 @@ static void free_vgpu_gm(struct intel_vgpu *vgpu)
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&dev_priv->ggtt.vm.mutex);
 	drm_mm_remove_node(&vgpu->gm.low_gm_node);
 	drm_mm_remove_node(&vgpu->gm.high_gm_node);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&dev_priv->ggtt.vm.mutex);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index df6164591702..7d9e97fa4ecf 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -335,6 +335,45 @@ int i915_active_ref(struct i915_active *ref,
 	return err;
 }
 
+static void excl_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	struct i915_active *ref = container_of(cb, typeof(*ref), excl_cb);
+
+	RCU_INIT_POINTER(ref->excl, NULL);
+	dma_fence_put(f);
+
+	active_retire(ref);
+}
+
+void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+{
+	GEM_BUG_ON(i915_active_is_idle(ref));
+
+	dma_fence_get(f);
+
+	rcu_read_lock();
+	if (rcu_access_pointer(ref->excl)) {
+		struct dma_fence *old;
+
+		old = dma_fence_get_rcu_safe(&ref->excl);
+		if (old) {
+			if (dma_fence_remove_callback(old, &ref->excl_cb))
+				atomic_dec(&ref->count);
+			dma_fence_put(old);
+		}
+	}
+	rcu_read_unlock();
+
+	atomic_inc(&ref->count);
+	rcu_assign_pointer(ref->excl, f);
+
+	if (dma_fence_add_callback(f, &ref->excl_cb, excl_cb)) {
+		RCU_INIT_POINTER(ref->excl, NULL);
+		atomic_dec(&ref->count);
+		dma_fence_put(f);
+	}
+}
+
 int i915_active_acquire(struct i915_active *ref)
 {
 	int err;
@@ -393,6 +432,25 @@ void i915_active_ungrab(struct i915_active *ref)
 	__active_ungrab(ref);
 }
 
+static int excl_wait(struct i915_active *ref)
+{
+	struct dma_fence *old;
+	int err = 0;
+
+	if (!rcu_access_pointer(ref->excl))
+		return 0;
+
+	rcu_read_lock();
+	old = dma_fence_get_rcu_safe(&ref->excl);
+	rcu_read_unlock();
+	if (old) {
+		err = dma_fence_wait(old, true);
+		dma_fence_put(old);
+	}
+
+	return err;
+}
+
 int i915_active_wait(struct i915_active *ref)
 {
 	struct active_node *it, *n;
@@ -413,6 +471,10 @@ int i915_active_wait(struct i915_active *ref)
 		return 0;
 	}
 
+	err = excl_wait(ref);
+	if (err)
+		goto out;
+
 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
 		if (is_barrier(&it->base)) { /* unconnected idle-barrier */
 			err = -EBUSY;
@@ -424,6 +486,7 @@ int i915_active_wait(struct i915_active *ref)
 			break;
 	}
 
+out:
 	__active_retire(ref);
 	if (err)
 		return err;
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index f95058f99057..bc1c9d5d4f63 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -373,6 +373,8 @@ int i915_active_ref(struct i915_active *ref,
 		    struct intel_timeline *tl,
 		    struct i915_request *rq);
 
+void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
+
 int i915_active_wait(struct i915_active *ref);
 
 int i915_request_await_active(struct i915_request *rq,
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 1854e7d168c1..b2f227e7de0d 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -8,6 +8,7 @@
 #define _I915_ACTIVE_TYPES_H_
 
 #include <linux/atomic.h>
+#include <linux/dma-fence.h>
 #include <linux/llist.h>
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
@@ -51,6 +52,9 @@ struct i915_active {
 	struct mutex mutex;
 	atomic_t count;
 
+	struct dma_fence __rcu *excl;
+	struct dma_fence_cb excl_cb;
+
 	unsigned long flags;
 #define I915_ACTIVE_GRAB_BIT 0
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d1bb8ec0e8b6..32802275b80d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -62,20 +62,31 @@
 #include "intel_pm.h"
 
 static int
-insert_mappable_node(struct i915_ggtt *ggtt,
-                     struct drm_mm_node *node, u32 size)
+insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
 {
+	int err;
+
+	err = mutex_lock_interruptible(&ggtt->vm.mutex);
+	if (err)
+		return err;
+
 	memset(node, 0, sizeof(*node));
-	return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
-					   size, 0, I915_COLOR_UNEVICTABLE,
-					   0, ggtt->mappable_end,
-					   DRM_MM_INSERT_LOW);
+	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
+					  size, 0, I915_COLOR_UNEVICTABLE,
+					  0, ggtt->mappable_end,
+					  DRM_MM_INSERT_LOW);
+
+	mutex_unlock(&ggtt->vm.mutex);
+
+	return err;
 }
 
 static void
-remove_mappable_node(struct drm_mm_node *node)
+remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
 {
+	mutex_lock(&ggtt->vm.mutex);
 	drm_mm_remove_node(node);
+	mutex_unlock(&ggtt->vm.mutex);
 }
 
 int
@@ -87,7 +98,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 	struct i915_vma *vma;
 	u64 pinned;
 
-	mutex_lock(&ggtt->vm.mutex);
+	if (mutex_lock_interruptible(&ggtt->vm.mutex))
+		return -EINTR;
 
 	pinned = ggtt->vm.reserved;
 	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
@@ -109,8 +121,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 	LIST_HEAD(still_in_list);
 	int ret = 0;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-
 	spin_lock(&obj->vma.lock);
 	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
 						       struct i915_vma,
@@ -120,8 +130,13 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 
 		ret = -EBUSY;
 		if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
-		    !i915_vma_is_active(vma))
-			ret = i915_vma_unbind(vma);
+		    !i915_vma_is_active(vma)) {
+			ret = mutex_lock_interruptible(&vma->vm->mutex);
+			if (!ret) {
+				ret = i915_vma_unbind(vma);
+				mutex_unlock(&vma->vm->mutex);
+			}
+		}
 
 		spin_lock(&obj->vma.lock);
 	}
@@ -338,10 +353,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 	u64 remain, offset;
 	int ret;
 
-	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
-	if (ret)
-		return ret;
-
 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 	vma = ERR_PTR(-ENODEV);
 	if (!i915_gem_object_is_tiled(obj))
@@ -355,12 +366,10 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 	} else {
 		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
 		if (ret)
-			goto out_unlock;
+			goto out_rpm;
 		GEM_BUG_ON(!node.allocated);
 	}
 
-	mutex_unlock(&i915->drm.struct_mutex);
-
 	ret = i915_gem_object_lock_interruptible(obj);
 	if (ret)
 		goto out_unpin;
@@ -414,17 +423,14 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 
 	i915_gem_object_unlock_fence(obj, fence);
 out_unpin:
-	mutex_lock(&i915->drm.struct_mutex);
 	if (node.allocated) {
 		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
-		remove_mappable_node(&node);
+		remove_mappable_node(ggtt, &node);
 	} else {
 		i915_vma_unpin(vma);
 	}
-out_unlock:
+out_rpm:
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-	mutex_unlock(&i915->drm.struct_mutex);
-
 	return ret;
 }
 
@@ -531,10 +537,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 	void __user *user_data;
 	int ret;
 
-	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
-	if (ret)
-		return ret;
-
 	if (i915_gem_object_has_struct_page(obj)) {
 		/*
 		 * Avoid waking the device up if we can fallback, as
@@ -544,10 +546,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 		 * using the cache bypass of indirect GGTT access.
 		 */
 		wakeref = intel_runtime_pm_get_if_in_use(rpm);
-		if (!wakeref) {
-			ret = -EFAULT;
-			goto out_unlock;
-		}
+		if (!wakeref)
+			return -EFAULT;
 	} else {
 		/* No backing pages, no fallback, we must force GGTT access */
 		wakeref = intel_runtime_pm_get(rpm);
@@ -569,8 +569,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 		GEM_BUG_ON(!node.allocated);
 	}
 
-	mutex_unlock(&i915->drm.struct_mutex);
-
 	ret = i915_gem_object_lock_interruptible(obj);
 	if (ret)
 		goto out_unpin;
@@ -634,18 +632,15 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 
 	i915_gem_object_unlock_fence(obj, fence);
 out_unpin:
-	mutex_lock(&i915->drm.struct_mutex);
 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
 	if (node.allocated) {
 		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
-		remove_mappable_node(&node);
+		remove_mappable_node(ggtt, &node);
 	} else {
 		i915_vma_unpin(vma);
 	}
 out_rpm:
 	intel_runtime_pm_put(rpm, wakeref);
-out_unlock:
-	mutex_unlock(&i915->drm.struct_mutex);
 	return ret;
 }
 
@@ -966,8 +961,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-
 	if (flags & PIN_MAPPABLE &&
 	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
 		/* If the required space is larger than the available
@@ -1021,7 +1014,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 		     i915_ggtt_offset(vma), alignment,
 		     !!(flags & PIN_MAPPABLE),
 		     i915_vma_is_map_and_fenceable(vma));
+		mutex_lock(&vma->vm->mutex);
 		ret = i915_vma_unbind(vma);
+		mutex_unlock(&vma->vm->mutex);
 		if (ret)
 			return ERR_PTR(ret);
 	}
@@ -1319,7 +1314,9 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 		 * from the GTT to prevent such accidents and reclaim the
 		 * space.
 		 */
+		mutex_lock(&state->vm->mutex);
 		err = i915_vma_unbind(state);
+		mutex_unlock(&state->vm->mutex);
 		if (err)
 			goto out;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 52c86c6e0673..722b03af3892 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -47,8 +47,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
 	 * bound by their active reference.
 	 */
 	return i915_gem_wait_for_idle(i915,
-				      I915_WAIT_INTERRUPTIBLE |
-				      I915_WAIT_LOCKED,
+				      I915_WAIT_INTERRUPTIBLE,
 				      MAX_SCHEDULE_TIMEOUT);
 }
 
@@ -104,7 +103,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	struct i915_vma *active;
 	int ret;
 
-	lockdep_assert_held(&vm->i915->drm.struct_mutex);
+	lockdep_assert_held(&vm->mutex);
 	trace_i915_gem_evict(vm, min_size, alignment, flags);
 
 	/*
@@ -127,15 +126,6 @@ i915_gem_evict_something(struct i915_address_space *vm,
 				    min_size, alignment, cache_level,
 				    start, end, mode);
 
-	/*
-	 * Retire before we search the active list. Although we have
-	 * reasonable accuracy in our retirement lists, we may have
-	 * a stray pin (preventing eviction) that can only be resolved by
-	 * retiring.
-	 */
-	if (!(flags & PIN_NONBLOCK))
-		i915_retire_requests(dev_priv);
-
 search_again:
 	active = NULL;
 	INIT_LIST_HEAD(&eviction_list);
@@ -269,7 +259,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
 	bool check_color;
 	int ret = 0;
 
-	lockdep_assert_held(&vm->i915->drm.struct_mutex);
+	lockdep_assert_held(&vm->mutex);
 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
 
@@ -375,7 +365,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
 	struct i915_vma *vma, *next;
 	int ret;
 
-	lockdep_assert_held(&vm->i915->drm.struct_mutex);
+	lockdep_assert_held(&vm->mutex);
 	trace_i915_gem_evict_vm(vm);
 
 	/* Switch back to the default context in order to unpin
@@ -390,7 +380,6 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
 	}
 
 	INIT_LIST_HEAD(&eviction_list);
-	mutex_lock(&vm->mutex);
 	list_for_each_entry(vma, &vm->bound_list, vm_link) {
 		if (i915_vma_is_pinned(vma))
 			continue;
@@ -398,7 +387,6 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
 		__i915_vma_pin(vma);
 		list_add(&vma->evict_link, &eviction_list);
 	}
-	mutex_unlock(&vm->mutex);
 
 	ret = 0;
 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 615a9f4ef30c..1e93f2aafc80 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -331,13 +331,15 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
 	return ERR_PTR(-EDEADLK);
 }
 
-static int __i915_vma_pin_fence(struct i915_vma *vma)
+int __i915_vma_pin_fence(struct i915_vma *vma)
 {
 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
 	struct i915_fence_reg *fence;
 	struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
 	int err;
 
+	lockdep_assert_held(&vma->vm->mutex);
+
 	/* Just update our place in the LRU if our fence is getting reused. */
 	if (vma->fence) {
 		fence = vma->fence;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3e376ba35bb8..eb758be9e70c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -143,12 +143,12 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
 
 static int ppgtt_bind_vma(struct i915_vma *vma,
 			  enum i915_cache_level cache_level,
-			  u32 unused)
+			  u32 flags)
 {
 	u32 pte_flags;
 	int err;
 
-	if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+	if (flags & BIND_ALLOC) {
 		err = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start, vma->size);
 		if (err)
@@ -1863,7 +1863,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
 
 	vma->size = size;
 	vma->fence_size = size;
-	vma->flags = I915_VMA_GGTT;
+	atomic_or(I915_VMA_GGTT, &vma->flags);
 	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
 
 	INIT_LIST_HEAD(&vma->obj_link);
@@ -2422,7 +2422,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
 	 * upgrade to both bound if we bind either to avoid double-binding.
 	 */
-	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+	atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
 
 	return 0;
 }
@@ -2452,7 +2452,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 	if (flags & I915_VMA_LOCAL_BIND) {
 		struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
 
-		if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+		if (flags & BIND_ALLOC) {
 			ret = alias->vm.allocate_va_range(&alias->vm,
 							  vma->node.start,
 							  vma->size);
@@ -2480,7 +2480,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
 {
 	struct drm_i915_private *i915 = vma->vm->i915;
 
-	if (vma->flags & I915_VMA_GLOBAL_BIND) {
+	if (atomic_read(&vma->flags) & I915_VMA_GLOBAL_BIND) {
 		struct i915_address_space *vm = vma->vm;
 		intel_wakeref_t wakeref;
 
@@ -2488,7 +2488,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
 			vm->clear_range(vm, vma->node.start, vma->size);
 	}
 
-	if (vma->flags & I915_VMA_LOCAL_BIND) {
+	if (atomic_read(&vma->flags) & I915_VMA_LOCAL_BIND) {
 		struct i915_address_space *vm =
 			&i915_vm_to_ggtt(vma->vm)->alias->vm;
 
@@ -2720,15 +2720,14 @@ int i915_init_ggtt(struct drm_i915_private *i915)
 
 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
 {
-	struct drm_i915_private *i915 = ggtt->vm.i915;
 	struct i915_vma *vma, *vn;
 
 	ggtt->vm.closed = true;
 
 	rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
-	flush_workqueue(i915->wq);
+	flush_workqueue(ggtt->vm.i915->wq);
 
-	mutex_lock(&i915->drm.struct_mutex);
+	mutex_lock(&ggtt->vm.mutex);
 
 	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
 		WARN_ON(i915_vma_unbind(vma));
@@ -2745,7 +2744,7 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
 
 	ggtt->vm.cleanup(&ggtt->vm);
 
-	mutex_unlock(&i915->drm.struct_mutex);
+	mutex_unlock(&ggtt->vm.mutex);
 
 	arch_phys_wc_del(ggtt->mtrr);
 	io_mapping_fini(&ggtt->iomap);
@@ -3159,9 +3158,6 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
 static int ggtt_init_hw(struct i915_ggtt *ggtt)
 {
 	struct drm_i915_private *i915 = ggtt->vm.i915;
-	int ret = 0;
-
-	mutex_lock(&i915->drm.struct_mutex);
 
 	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
 
@@ -3177,18 +3173,14 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
 				ggtt->gmadr.start,
 				ggtt->mappable_end)) {
 		ggtt->vm.cleanup(&ggtt->vm);
-		ret = -EIO;
-		goto out;
+		return -EIO;
 	}
 
 	ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
 
 	i915_ggtt_init_fences(ggtt);
 
-out:
-	mutex_unlock(&i915->drm.struct_mutex);
-
-	return ret;
+	return 0;
 }
 
 /**
@@ -3272,25 +3264,19 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
 	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
 		struct drm_i915_gem_object *obj = vma->obj;
 
-		if (!(vma->flags & I915_VMA_GLOBAL_BIND))
+		if (!(atomic_read(&vma->flags) & I915_VMA_GLOBAL_BIND))
 			continue;
 
-		mutex_unlock(&ggtt->vm.mutex);
-
 		if (!i915_vma_unbind(vma))
-			goto lock;
+			continue;
 
 		WARN_ON(i915_vma_bind(vma,
 				      obj ? obj->cache_level : 0,
-				      PIN_UPDATE));
-		if (obj) {
-			i915_gem_object_lock(obj);
+				      PIN_UPDATE, NULL));
+		if (obj && i915_gem_object_trylock(obj)) {
 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
 			i915_gem_object_unlock(obj);
 		}
-
-lock:
-		mutex_lock(&ggtt->vm.mutex);
 	}
 
 	ggtt->vm.closed = false;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index e42b86827d6b..71794f9e0055 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1347,15 +1347,9 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
 static void
 free_oa_buffer(struct i915_perf_stream *stream)
 {
-	struct drm_i915_private *i915 = stream->dev_priv;
-
-	mutex_lock(&i915->drm.struct_mutex);
-
 	i915_vma_unpin_and_release(&stream->oa_buffer.vma,
 				   I915_VMA_RELEASE_MAP);
 
-	mutex_unlock(&i915->drm.struct_mutex);
-
 	stream->oa_buffer.vaddr = NULL;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
new file mode 100644
index 000000000000..a6abd99b0b3d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -0,0 +1,83 @@
+#include "i915_sw_fence_work.h"
+
+static void fence_work(struct work_struct *work)
+{
+	struct dma_fence_work *f = container_of(work, typeof(*f), work);
+	int err;
+
+	err = f->fn(f);
+	if (err)
+		dma_fence_set_error(&f->base, err);
+	dma_fence_signal(&f->base);
+	dma_fence_put(&f->base);
+}
+
+static int __i915_sw_fence_call
+fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+	struct dma_fence_work *f = container_of(fence, typeof(*f), chain);
+
+	switch (state) {
+	case FENCE_COMPLETE:
+		if (f->fn) {
+			queue_work(system_unbound_wq, &f->work);
+			break;
+		}
+
+		/* fall through */
+	case FENCE_FREE:
+		dma_fence_put(&f->base);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static const char *get_driver_name(struct dma_fence *fence)
+{
+	return "dma-fence";
+}
+
+static const char *get_timeline_name(struct dma_fence *fence)
+{
+	return "work";
+}
+
+static void fence_release(struct dma_fence *fence)
+{
+	struct dma_fence_work *f = container_of(fence, typeof(*f), base);
+
+	i915_sw_fence_fini(&f->chain);
+
+	BUILD_BUG_ON(offsetof(typeof(*f), base));
+	dma_fence_free(&f->base);
+}
+
+static const struct dma_fence_ops fence_ops = {
+	.get_driver_name = get_driver_name,
+	.get_timeline_name = get_timeline_name,
+	.release = fence_release,
+};
+
+void dma_fence_work_init(struct dma_fence_work *f,
+			 int (*fn)(struct dma_fence_work *f))
+{
+	spin_lock_init(&f->lock);
+	dma_fence_init(&f->base, &fence_ops, &f->lock, 0, 0);
+
+	dma_fence_get(&f->base);
+	INIT_WORK(&f->work, fence_work);
+
+	dma_fence_get(&f->base);
+	i915_sw_fence_init(&f->chain, fence_notify);
+
+	f->fn = fn;
+}
+
+int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal)
+{
+	if (!signal)
+		return 0;
+
+	return __i915_sw_fence_await_dma_fence(&f->chain, signal, &f->cb);
+}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.h b/drivers/gpu/drm/i915/i915_sw_fence_work.h
new file mode 100644
index 000000000000..488e0fb76a7a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.h
@@ -0,0 +1,31 @@
+#ifndef I915_SW_FENCE_WORK_H
+#define I915_SW_FENCE_WORK_H
+
+#include <linux/dma-fence.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "i915_sw_fence.h"
+
+struct dma_fence_work {
+	struct dma_fence base;
+	spinlock_t lock;
+
+	struct i915_sw_fence chain;
+	struct i915_sw_dma_fence_cb cb;
+
+	struct work_struct work;
+	int (*fn)(struct dma_fence_work *f);
+};
+
+void dma_fence_work_init(struct dma_fence_work *f,
+			 int (*fn)(struct dma_fence_work *f));
+int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal);
+
+static inline void dma_fence_work_commit(struct dma_fence_work *f)
+{
+	i915_sw_fence_commit(&f->chain);
+	dma_fence_put(&f->base);
+}
+
+#endif /* I915_SW_FENCE_WORK_H */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index b75a409908f6..a234aba9d95d 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -32,6 +32,7 @@
 
 #include "i915_drv.h"
 #include "i915_globals.h"
+#include "i915_sw_fence_work.h"
 #include "i915_trace.h"
 #include "i915_vma.h"
 
@@ -172,7 +173,7 @@ vma_create(struct drm_i915_gem_object *obj,
 								i915_gem_object_get_stride(obj));
 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
 
-		vma->flags |= I915_VMA_GGTT;
+		atomic_or(I915_VMA_GGTT, &vma->flags);
 	}
 
 	spin_lock(&obj->vma.lock);
@@ -262,8 +263,6 @@ vma_lookup(struct drm_i915_gem_object *obj,
  * Once created, the VMA is kept until either the object is freed, or the
  * address space is closed.
  *
- * Must be called with struct_mutex held.
- *
  * Returns the vma, or an error pointer.
  */
 struct i915_vma *
@@ -288,6 +287,38 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
 	return vma;
 }
 
+struct i915_vma_work {
+	struct dma_fence_work base;
+	struct i915_vma *vma;
+	enum i915_cache_level cache_level;
+	unsigned int flags;
+};
+
+static int __vma_bind(struct dma_fence_work *work)
+{
+	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+	struct i915_vma *vma = vw->vma;
+	int err;
+
+	err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
+	if (err)
+		atomic_or(I915_VMA_ERROR, &vma->flags);
+
+	return err;
+}
+
+struct i915_vma_work *i915_vma_work(void)
+{
+	struct i915_vma_work *vw;
+
+	vw = kzalloc(sizeof(*vw), GFP_KERNEL);
+	if (!vw)
+		return NULL;
+
+	dma_fence_work_init(&vw->base, NULL);
+	return vw;
+}
+
 /**
  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
  * @vma: VMA to map
@@ -298,8 +329,10 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
  * this VMA in case of non-default GGTT views) and PTE entries set up.
  * Note that DMA addresses are also the only part of the SG table we care about.
  */
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
-		  u32 flags)
+int i915_vma_bind(struct i915_vma *vma,
+		  enum i915_cache_level cache_level,
+		  u32 flags,
+		  struct i915_vma_work *work)
 {
 	u32 bind_flags;
 	u32 vma_flags;
@@ -316,13 +349,11 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	if (GEM_DEBUG_WARN_ON(!flags))
 		return -EINVAL;
 
-	bind_flags = 0;
-	if (flags & PIN_GLOBAL)
-		bind_flags |= I915_VMA_GLOBAL_BIND;
-	if (flags & PIN_USER)
-		bind_flags |= I915_VMA_LOCAL_BIND;
+	bind_flags = flags;
+	vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 
-	vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+	vma_flags = atomic_read(&vma->flags);
+	vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 	if (flags & PIN_UPDATE)
 		bind_flags |= vma_flags;
 	else
@@ -333,11 +364,28 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	GEM_BUG_ON(!vma->pages);
 
 	trace_i915_vma_bind(vma, bind_flags);
-	ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
-	if (ret)
-		return ret;
+	if (work && bind_flags & I915_VMA_LOCAL_BIND) {
+		work->vma = vma;
+		work->cache_level = cache_level;
+		work->flags = bind_flags | BIND_ALLOC;
 
-	vma->flags |= bind_flags;
+		if (!i915_vma_trylock(vma))
+			return -EAGAIN;
+
+		dma_fence_work_chain(&work->base, dma_resv_get_excl(vma->resv));
+		dma_resv_add_excl_fence(vma->resv, &work->base.base);
+		i915_vma_unlock(vma);
+
+		i915_active_set_exclusive(&vma->active, &work->base.base);
+		work->base.fn = __vma_bind;
+	} else {
+		GEM_BUG_ON((bind_flags & ~vma_flags) & I915_VMA_LOCAL_BIND);
+		ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+		if (ret)
+			return ret;
+	}
+
+	atomic_or(bind_flags, &vma->flags);
 	return 0;
 }
 
@@ -348,15 +396,13 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 
 	/* Access through the GTT requires the device to be awake. */
 	assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
-
-	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-	if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
+	if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
 		err = -ENODEV;
 		goto err;
 	}
 
 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
-	GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
+	GEM_BUG_ON((atomic_read(&vma->flags) & I915_VMA_GLOBAL_BIND) == 0);
 
 	ptr = vma->iomap;
 	if (ptr == NULL) {
@@ -368,7 +414,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 			goto err;
 		}
 
-		vma->iomap = ptr;
+		if (cmpxchg(&vma->iomap, NULL, ptr))
+			io_mapping_unmap(ptr);
 	}
 
 	__i915_vma_pin(vma);
@@ -388,18 +435,12 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 
 void i915_vma_flush_writes(struct i915_vma *vma)
 {
-	if (!i915_vma_has_ggtt_write(vma))
-		return;
-
-	intel_gt_flush_ggtt_writes(vma->vm->gt);
-
-	i915_vma_unset_ggtt_write(vma);
+	if (i915_vma_unset_ggtt_write(vma))
+		intel_gt_flush_ggtt_writes(vma->vm->gt);
 }
 
 void i915_vma_unpin_iomap(struct i915_vma *vma)
 {
-	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
 	GEM_BUG_ON(vma->iomap == NULL);
 
 	i915_vma_flush_writes(vma);
@@ -469,9 +510,9 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 	mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
 
 	if (mappable && fenceable)
-		vma->flags |= I915_VMA_CAN_FENCE;
+		atomic_or(I915_VMA_CAN_FENCE, &vma->flags);
 	else
-		vma->flags &= ~I915_VMA_CAN_FENCE;
+		atomic_and(~I915_VMA_CAN_FENCE, &vma->flags);
 }
 
 static bool color_differs(struct drm_mm_node *node, unsigned long color)
@@ -538,13 +579,12 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj)
 static int
 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-	struct drm_i915_private *dev_priv = vma->vm->i915;
 	unsigned int cache_level;
 	u64 start, end;
 	int ret;
 
 	GEM_BUG_ON(i915_vma_is_closed(vma));
-	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+	GEM_BUG_ON(atomic_read(&vma->flags) & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 
 	size = max(size, vma->size);
@@ -564,7 +604,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 
 	end = vma->vm->total;
 	if (flags & PIN_MAPPABLE)
-		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
+		end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
 	if (flags & PIN_ZONE_4G)
 		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
@@ -580,21 +620,14 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		return -ENOSPC;
 	}
 
-	if (vma->obj) {
-		ret = i915_gem_object_pin_pages(vma->obj);
-		if (ret)
-			return ret;
-
-		cache_level = vma->obj->cache_level;
-	} else {
-		cache_level = 0;
-	}
-
 	GEM_BUG_ON(vma->pages);
-
 	ret = vma->ops->set_pages(vma);
 	if (ret)
-		goto err_unpin;
+		return ret;
+
+	cache_level = 0;
+	if (vma->obj)
+		cache_level = vma->obj->cache_level;
 
 	if (flags & PIN_OFFSET_FIXED) {
 		u64 offset = flags & PIN_OFFSET_MASK;
@@ -655,11 +688,10 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
 
-	mutex_lock(&vma->vm->mutex);
 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
-	mutex_unlock(&vma->vm->mutex);
 
 	if (vma->obj) {
+		atomic_inc(&vma->obj->mm.pages_pin_count);
 		atomic_inc(&vma->obj->bind_count);
 		assert_bind_count(vma->obj);
 	}
@@ -668,9 +700,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 
 err_clear:
 	vma->ops->clear_pages(vma);
-err_unpin:
-	if (vma->obj)
-		i915_gem_object_unpin_pages(vma->obj);
 	return ret;
 }
 
@@ -678,14 +707,12 @@ static void
 i915_vma_remove(struct i915_vma *vma)
 {
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+	GEM_BUG_ON(atomic_read(&vma->flags) & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 
 	vma->ops->clear_pages(vma);
 
-	mutex_lock(&vma->vm->mutex);
 	drm_mm_remove_node(&vma->node);
 	list_del(&vma->vm_link);
-	mutex_unlock(&vma->vm->mutex);
 
 	/*
 	 * Since the unbound list is global, only move to that list if
@@ -706,51 +733,113 @@ i915_vma_remove(struct i915_vma *vma)
 	}
 }
 
-int __i915_vma_do_pin(struct i915_vma *vma,
-		      u64 size, u64 alignment, u64 flags)
+static int __i915_vma_do_pin(struct i915_vma *vma,
+			     u64 size, u64 alignment, u64 flags,
+			     struct i915_vma_work *work)
 {
-	const unsigned int bound = vma->flags;
+	const unsigned int bound = atomic_read(&vma->flags);
 	int ret;
 
-	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
 	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
 
-	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
-		ret = -EBUSY;
-		goto err_unpin;
-	}
-
 	if ((bound & I915_VMA_BIND_MASK) == 0) {
 		ret = i915_vma_insert(vma, size, alignment, flags);
 		if (ret)
-			goto err_unpin;
+			return ret;
 	}
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-	ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
+	ret = i915_vma_bind(vma,
+			    vma->obj ? vma->obj->cache_level : 0,
+			    flags, work);
 	if (ret)
 		goto err_remove;
 
-	GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
+	GEM_BUG_ON((atomic_read(&vma->flags) & I915_VMA_BIND_MASK) == 0);
 
-	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
+	if ((bound ^ atomic_read(&vma->flags)) & I915_VMA_GLOBAL_BIND)
 		__i915_vma_set_map_and_fenceable(vma);
 
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
+
+	atomic_inc(&vma->flags);
 	return 0;
 
 err_remove:
 	if ((bound & I915_VMA_BIND_MASK) == 0) {
 		i915_vma_remove(vma);
 		GEM_BUG_ON(vma->pages);
-		GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
+		GEM_BUG_ON(atomic_read(&vma->flags) & I915_VMA_BIND_MASK);
 	}
-err_unpin:
-	__i915_vma_unpin(vma);
 	return ret;
 }
 
+int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+	struct i915_vma_work *work = NULL;
+	unsigned int bound;
+	int err;
+
+	BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
+	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+	bound = atomic_inc_return(&vma->flags);
+	if (likely(((bound ^ flags) & I915_VMA_BIND_MASK) == 0)) {
+		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+		GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
+		return 0;
+	}
+
+	if (bound & I915_VMA_ERROR) {
+		err = -ENOMEM;
+		goto err_unpin;
+	}
+
+	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
+		err = -ENOMEM;
+		goto err_unpin;
+	}
+
+	if (vma->obj) {
+		err = i915_gem_object_pin_pages(vma->obj);
+		if (err)
+			return err;
+	}
+
+	if (flags & PIN_USER) {
+		work = i915_vma_work();
+		if (!work) {
+			err = -ENOMEM;
+			goto err_pages;
+		}
+	}
+
+	err = i915_active_acquire(&vma->active);
+	if (err)
+		goto err_fence;
+
+	err = mutex_lock_interruptible(&vma->vm->mutex);
+	if (err)
+		goto err_active;
+
+	err = __i915_vma_do_pin(vma, size, alignment, flags, work);
+
+	mutex_unlock(&vma->vm->mutex);
+err_active:
+	i915_active_release(&vma->active);
+err_fence:
+	if (work)
+		dma_fence_work_commit(&work->base);
+err_pages:
+	if (vma->obj)
+		i915_gem_object_unpin_pages(vma->obj);
+err_unpin:
+	atomic_dec(&vma->flags);
+	return err;
+}
+
 void i915_vma_close(struct i915_vma *vma)
 {
 	struct drm_i915_private *i915 = vma->vm->i915;
@@ -813,13 +902,16 @@ static void __i915_vma_destroy(struct i915_vma *vma)
 
 void i915_vma_destroy(struct i915_vma *vma)
 {
-	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
 	GEM_BUG_ON(i915_vma_is_pinned(vma));
 
 	__i915_vma_remove_closed(vma);
 
-	WARN_ON(i915_vma_unbind(vma));
+	if (drm_mm_node_allocated(&vma->node)) {
+		mutex_lock(&vma->vm->mutex);
+		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+		WARN_ON(i915_vma_unbind(vma));
+		mutex_unlock(&vma->vm->mutex);
+	}
 	GEM_BUG_ON(i915_vma_is_active(vma));
 
 	__i915_vma_destroy(vma);
@@ -857,7 +949,7 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
 	struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
 	u64 vma_offset;
 
-	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+	lockdep_assert_held(&vma->vm->mutex);
 
 	if (!i915_vma_has_userfault(vma))
 		return;
@@ -927,34 +1019,15 @@ int i915_vma_unbind(struct i915_vma *vma)
 {
 	int ret;
 
-	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+	lockdep_assert_held(&vma->vm->mutex);
 
 	/*
 	 * First wait upon any activity as retiring the request may
 	 * have side-effects such as unpinning or even unbinding this vma.
 	 */
-	might_sleep();
-	if (i915_vma_is_active(vma)) {
-		/*
-		 * When a closed VMA is retired, it is unbound - eek.
-		 * In order to prevent it from being recursively closed,
-		 * take a pin on the vma so that the second unbind is
-		 * aborted.
-		 *
-		 * Even more scary is that the retire callback may free
-		 * the object (last active vma). To prevent the explosion
-		 * we defer the actual object free to a worker that can
-		 * only proceed once it acquires the struct_mutex (which
-		 * we currently hold, therefore it cannot free this object
-		 * before we are finished).
-		 */
-		__i915_vma_pin(vma);
-		ret = i915_active_wait(&vma->active);
-		__i915_vma_unpin(vma);
-		if (ret)
-			return ret;
-	}
-	GEM_BUG_ON(i915_vma_is_active(vma));
+	ret = i915_active_wait(&vma->active);
+	if (ret)
+		return ret;
 
 	if (i915_vma_is_pinned(vma)) {
 		vma_print_allocator(vma, "is pinned");
@@ -975,9 +1048,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 		GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
 
 		/* release the fence reg _after_ flushing */
-		mutex_lock(&vma->vm->mutex);
 		ret = i915_vma_revoke_fence(vma);
-		mutex_unlock(&vma->vm->mutex);
 		if (ret)
 			return ret;
 
@@ -985,7 +1056,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 		i915_vma_revoke_mmap(vma);
 
 		__i915_vma_iounmap(vma);
-		vma->flags &= ~I915_VMA_CAN_FENCE;
+		atomic_and(~I915_VMA_CAN_FENCE, &vma->flags);
 	}
 	GEM_BUG_ON(vma->fence);
 	GEM_BUG_ON(i915_vma_has_userfault(vma));
@@ -994,7 +1065,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 		trace_i915_vma_unbind(vma);
 		vma->ops->unbind_vma(vma);
 	}
-	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+	atomic_and(~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND), &vma->flags);
 
 	i915_vma_remove(vma);
 
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 910d365d5599..de49bcaf1f68 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -72,7 +72,8 @@ struct i915_vma {
 	 * that exist in the ctx->handle_vmas LUT for this vma.
 	 */
 	atomic_t open_count;
-	unsigned long flags;
+	atomic_t flags;
+
 	/**
 	 * How many users have pinned this object in GTT space.
 	 *
@@ -97,18 +98,24 @@ struct i915_vma {
 	 * users.
 	 */
 #define I915_VMA_PIN_MASK 0xff
-#define I915_VMA_PIN_OVERFLOW	BIT(8)
+#define I915_VMA_PIN_OVERFLOW	(int)BIT(8)
+
+#define BIND_ALLOC I915_VMA_PIN_OVERFLOW
 
 	/** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND	BIT(9)
-#define I915_VMA_LOCAL_BIND	BIT(10)
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+#define I915_VMA_GLOBAL_BIND	(int)BIT(9)
+#define I915_VMA_LOCAL_BIND	(int)BIT(10)
+#define I915_VMA_ERROR		(int)BIT(11)
+#define I915_VMA_BIND_MASK (I915_VMA_PIN_OVERFLOW | \
+			    I915_VMA_GLOBAL_BIND | \
+			    I915_VMA_LOCAL_BIND | \
+			    I915_VMA_ERROR)
+
+#define I915_VMA_GGTT		(int)BIT(12)
+#define I915_VMA_CAN_FENCE	(int)BIT(13)
+#define I915_VMA_USERFAULT	(int)BIT(14)
+#define I915_VMA_GGTT_WRITE	(int)BIT(15)
 
-#define I915_VMA_GGTT		BIT(11)
-#define I915_VMA_CAN_FENCE	BIT(12)
-#define I915_VMA_USERFAULT_BIT	13
-#define I915_VMA_USERFAULT	BIT(I915_VMA_USERFAULT_BIT)
-#define I915_VMA_GGTT_WRITE	BIT(14)
 
 	struct i915_active active;
 
@@ -164,46 +171,47 @@ int __must_check i915_vma_move_to_active(struct i915_vma *vma,
 
 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
 {
-	return vma->flags & I915_VMA_GGTT;
+	return atomic_read(&vma->flags) & I915_VMA_GGTT;
 }
 
 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
 {
-	return vma->flags & I915_VMA_GGTT_WRITE;
+	return atomic_read(&vma->flags) & I915_VMA_GGTT_WRITE;
 }
 
 static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
 {
 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
-	vma->flags |= I915_VMA_GGTT_WRITE;
+	atomic_or(I915_VMA_GGTT_WRITE, &vma->flags);
 }
 
-static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
+static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma)
 {
-	vma->flags &= ~I915_VMA_GGTT_WRITE;
+	atomic_and(~I915_VMA_GGTT_WRITE, &vma->flags);
+	return true;
 }
 
 void i915_vma_flush_writes(struct i915_vma *vma);
 
 static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
 {
-	return vma->flags & I915_VMA_CAN_FENCE;
+	return atomic_read(&vma->flags) & I915_VMA_CAN_FENCE;
 }
 
 static inline bool i915_vma_set_userfault(struct i915_vma *vma)
 {
 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
-	return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+	return atomic_fetch_or(I915_VMA_USERFAULT, &vma->flags);
 }
 
 static inline void i915_vma_unset_userfault(struct i915_vma *vma)
 {
-	return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+	atomic_and(~I915_VMA_USERFAULT, &vma->flags);
 }
 
 static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
 {
-	return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+	return atomic_read(&vma->flags) & I915_VMA_USERFAULT;
 }
 
 static inline bool i915_vma_is_closed(const struct i915_vma *vma)
@@ -285,8 +293,12 @@ i915_vma_compare(struct i915_vma *vma,
 	return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
 }
 
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
-		  u32 flags);
+struct i915_vma_work *i915_vma_work(void);
+int i915_vma_bind(struct i915_vma *vma,
+		  enum i915_cache_level cache_level,
+		  u32 flags,
+		  struct i915_vma_work *work);
+
 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
 bool i915_vma_misplaced(const struct i915_vma *vma,
 			u64 size, u64 alignment, u64 flags);
@@ -305,35 +317,22 @@ static inline void i915_vma_lock(struct i915_vma *vma)
 	dma_resv_lock(vma->resv, NULL);
 }
 
-static inline void i915_vma_unlock(struct i915_vma *vma)
+static inline bool i915_vma_trylock(struct i915_vma *vma)
 {
-	dma_resv_unlock(vma->resv);
+	return dma_resv_trylock(vma->resv);
 }
 
-int __i915_vma_do_pin(struct i915_vma *vma,
-		      u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+static inline void i915_vma_unlock(struct i915_vma *vma)
 {
-	BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
-	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
-	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
-
-	/* Pin early to prevent the shrinker/eviction logic from destroying
-	 * our vma as we insert and bind.
-	 */
-	if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
-		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-		GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
-		return 0;
-	}
-
-	return __i915_vma_do_pin(vma, size, alignment, flags);
+	dma_resv_unlock(vma->resv);
 }
 
+int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+
 static inline int i915_vma_pin_count(const struct i915_vma *vma)
 {
-	return vma->flags & I915_VMA_PIN_MASK;
+	return atomic_read(&vma->flags) & I915_VMA_PIN_MASK;
 }
 
 static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
@@ -343,13 +342,13 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
 
 static inline void __i915_vma_pin(struct i915_vma *vma)
 {
-	vma->flags++;
-	GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+	atomic_inc(&vma->flags);
+	GEM_BUG_ON(atomic_read(&vma->flags) & I915_VMA_PIN_OVERFLOW);
 }
 
 static inline void __i915_vma_unpin(struct i915_vma *vma)
 {
-	vma->flags--;
+	atomic_dec(&vma->flags);
 }
 
 static inline void i915_vma_unpin(struct i915_vma *vma)
@@ -362,7 +361,7 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
 static inline bool i915_vma_is_bound(const struct i915_vma *vma,
 				     unsigned int where)
 {
-	return vma->flags & where;
+	return atomic_read(&vma->flags) & where;
 }
 
 /**
@@ -374,8 +373,6 @@ static inline bool i915_vma_is_bound(const struct i915_vma *vma,
  * the caller must call i915_vma_unpin_iomap to relinquish the pinning
  * after the iomapping is no longer required.
  *
- * Callers must hold the struct_mutex.
- *
  * Returns a valid iomapped pointer or ERR_PTR.
  */
 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
@@ -387,8 +384,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
  *
  * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
  *
- * Callers must hold the struct_mutex. This function is only valid to be
- * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ * This function is only valid to be called on a VMA previously
+ * iomapped by the caller with i915_vma_pin_iomap().
  */
 void i915_vma_unpin_iomap(struct i915_vma *vma);
 
@@ -416,6 +413,8 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
 int __must_check i915_vma_pin_fence(struct i915_vma *vma);
 int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
 
+int __i915_vma_pin_fence(struct i915_vma *vma);
+
 static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
 {
 	GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
@@ -433,7 +432,6 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
 static inline void
 i915_vma_unpin_fence(struct i915_vma *vma)
 {
-	/* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
 	if (vma->fence)
 		__i915_vma_unpin_fence(vma);
 }
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index cb30c669b1b7..29a7de9181d4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -408,7 +408,7 @@ static int igt_evict_contexts(void *arg)
 	if (!HAS_FULL_PPGTT(i915))
 		return 0;
 
-	mutex_lock(&i915->drm.struct_mutex);
+	mutex_lock(&i915->ggtt.vm.mutex);
 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 
 	/* Reserve a block so that we know we have enough to fit a few rq */
@@ -445,7 +445,7 @@ static int igt_evict_contexts(void *arg)
 		count++;
 	} while (1);
 	drm_mm_remove_node(&hole);
-	mutex_unlock(&i915->drm.struct_mutex);
+	mutex_unlock(&i915->ggtt.vm.mutex);
 	pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
 
 	/* Overfill the GGTT with context objects and so try to evict one. */
@@ -545,11 +545,11 @@ int i915_gem_evict_mock_selftests(void)
 	if (!i915)
 		return -ENOMEM;
 
-	mutex_lock(&i915->drm.struct_mutex);
+	mutex_lock(&i915->ggtt.vm.mutex);
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
 		err = i915_subtests(tests, i915);
 
-	mutex_unlock(&i915->drm.struct_mutex);
+	mutex_unlock(&i915->ggtt.vm.mutex);
 
 	drm_dev_put(&i915->drm);
 	return err;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index e62a67e0f79c..91cec91d6712 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -43,7 +43,7 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
 			   u32 flags)
 {
 	GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
-	vma->flags |= I915_VMA_LOCAL_BIND;
+	atomic_or(I915_VMA_LOCAL_BIND, &vma->flags);
 	return 0;
 }
 
@@ -86,7 +86,7 @@ static int mock_bind_ggtt(struct i915_vma *vma,
 			  enum i915_cache_level cache_level,
 			  u32 flags)
 {
-	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+	atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
 	return 0;
 }
 
-- 
2.23.0.rc1



More information about the Intel-gfx-trybot mailing list