[PATCH 48/55] drm/i915: Move object domain management under the purview of its own locks

Chris Wilson chris at chris-wilson.co.uk
Sat Jun 30 13:40:01 UTC 2018


Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gvt/scheduler.c          |  14 +-
 drivers/gpu/drm/i915/i915_cmd_parser.c        |   2 +
 drivers/gpu/drm/i915/i915_gem.c               | 157 ++++++++----------
 drivers/gpu/drm/i915/i915_gem_dmabuf.c        |  18 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c    |  16 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c           |   5 +-
 drivers/gpu/drm/i915/i915_gem_render_state.c  |   2 +
 drivers/gpu/drm/i915/i915_perf.c              |   2 +
 drivers/gpu/drm/i915/intel_engine_cs.c        |   2 +
 drivers/gpu/drm/i915/intel_guc_log.c          |  20 ++-
 drivers/gpu/drm/i915/intel_lrc.c              |  38 +++--
 drivers/gpu/drm/i915/intel_overlay.c          |   2 +
 drivers/gpu/drm/i915/intel_ringbuffer.c       |  46 +++--
 drivers/gpu/drm/i915/intel_uc_fw.c            |  18 +-
 .../drm/i915/selftests/i915_gem_coherency.c   |  26 +--
 .../gpu/drm/i915/selftests/i915_gem_context.c |  14 +-
 .../gpu/drm/i915/selftests/i915_gem_evict.c   |   5 +
 .../gpu/drm/i915/selftests/i915_gem_object.c  |   6 +-
 drivers/gpu/drm/i915/selftests/i915_request.c |  12 +-
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |   3 +
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |   3 +
 .../drm/i915/selftests/intel_workarounds.c    |   2 +
 22 files changed, 229 insertions(+), 184 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 3af4b08fb4b9..db211e6a147d 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -468,21 +468,19 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 				bb->clflush &= ~CLFLUSH_AFTER;
 			}
 
+			i915_gem_object_lock(bb->obj);
 			ret = i915_gem_object_set_to_gtt_domain(bb->obj,
 					false);
+			if (ret == 0)
+				ret = i915_vma_move_to_active(bb->vma,
+							      workload->req,
+							      0);
 			if (ret)
 				goto err;
+			i915_gem_object_unlock(bb->obj);
 
 			i915_gem_obj_finish_shmem_access(bb->obj);
 			bb->accessing = false;
-
-			i915_vma_lock(bb->vma);
-			ret = i915_vma_move_to_active(bb->vma,
-						      workload->req,
-						      0);
-			i915_vma_unlock(bb->vma);
-			if (ret)
-				goto err;
 		}
 	}
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 95478db9998b..ee542ace7019 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1057,6 +1057,8 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
 	void *dst, *src;
 	int ret;
 
+	GEM_BUG_ON(dst_obj == src_obj);
+
 	ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush);
 	if (ret)
 		return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6f141d14de10..2ef148587e35 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -814,6 +814,8 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct i915_vma *vma;
 
+	i915_gem_object_assert_held(obj);
+
 	if (!(obj->write_domain & flush_domains))
 		return;
 
@@ -907,15 +909,12 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 {
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-
 	*needs_clflush = 0;
 	if (!i915_gem_object_has_struct_page(obj))
 		return -ENODEV;
 
 	ret = i915_gem_object_wait(obj,
-				   I915_WAIT_INTERRUPTIBLE |
-				   I915_WAIT_LOCKED,
+				   I915_WAIT_INTERRUPTIBLE,
 				   MAX_SCHEDULE_TIMEOUT,
 				   NULL);
 	if (ret)
@@ -925,6 +924,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
+	i915_gem_object_lock(obj);
+
 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
 	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
 		ret = i915_gem_object_set_to_cpu_domain(obj, false);
@@ -946,10 +947,12 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 		*needs_clflush = CLFLUSH_BEFORE;
 
 out:
+	i915_gem_object_unlock(obj);
 	/* return with the pages pinned */
 	return 0;
 
 err_unpin:
+	i915_gem_object_unlock(obj);
 	i915_gem_object_unpin_pages(obj);
 	return ret;
 }
@@ -959,15 +962,12 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 {
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-
 	*needs_clflush = 0;
 	if (!i915_gem_object_has_struct_page(obj))
 		return -ENODEV;
 
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
-				   I915_WAIT_LOCKED |
 				   I915_WAIT_ALL,
 				   MAX_SCHEDULE_TIMEOUT,
 				   NULL);
@@ -978,6 +978,8 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
+	i915_gem_object_lock(obj);
+
 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
 	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
 		ret = i915_gem_object_set_to_cpu_domain(obj, true);
@@ -1006,12 +1008,14 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 	}
 
 out:
+	i915_gem_object_unlock(obj);
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 	obj->mm.dirty = true;
 	/* return with the pages pinned */
 	return 0;
 
 err_unpin:
+	i915_gem_object_unlock(obj);
 	i915_gem_object_unpin_pages(obj);
 	return ret;
 }
@@ -1099,12 +1103,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
 	if (i915_gem_object_needs_bit17_swizzle(obj))
 		obj_do_bit17_swizzling = BIT(17);
 
-	ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
-	if (ret)
-		return ret;
-
 	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
-	mutex_unlock(&obj->base.dev->struct_mutex);
 	if (ret)
 		return ret;
 
@@ -1170,10 +1169,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 	u64 remain, offset;
 	int ret;
 
-	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
-	if (ret)
-		return ret;
-
 	intel_runtime_pm_get(i915);
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 				       PIN_MAPPABLE |
@@ -1197,12 +1192,12 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 		GEM_BUG_ON(!node.allocated);
 	}
 
+	i915_gem_object_lock(obj);
 	ret = i915_gem_object_set_to_gtt_domain(obj, false);
+	i915_gem_object_unlock(obj);
 	if (ret)
 		goto out_unpin;
 
-	mutex_unlock(&i915->drm.struct_mutex);
-
 	user_data = u64_to_user_ptr(args->data_ptr);
 	remain = args->size;
 	offset = args->offset;
@@ -1239,7 +1234,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 		offset += page_length;
 	}
 
-	mutex_lock(&i915->drm.struct_mutex);
 out_unpin:
 	if (node.allocated) {
 		wmb();
@@ -1252,7 +1246,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 	}
 out_unlock:
 	intel_runtime_pm_put(i915);
-	mutex_unlock(&i915->drm.struct_mutex);
 
 	return ret;
 }
@@ -1359,10 +1352,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 	void __user *user_data;
 	int ret;
 
-	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
-	if (ret)
-		return ret;
-
 	if (i915_gem_object_has_struct_page(obj)) {
 		/*
 		 * Avoid waking the device up if we can fallback, as
@@ -1402,12 +1391,12 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 		GEM_BUG_ON(!node.allocated);
 	}
 
+	i915_gem_object_lock(obj);
 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
+	i915_gem_object_unlock(obj);
 	if (ret)
 		goto out_unpin;
 
-	mutex_unlock(&i915->drm.struct_mutex);
-
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 
 	user_data = u64_to_user_ptr(args->data_ptr);
@@ -1451,7 +1440,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 	}
 	intel_fb_obj_flush(obj, ORIGIN_CPU);
 
-	mutex_lock(&i915->drm.struct_mutex);
 out_unpin:
 	if (node.allocated) {
 		wmb();
@@ -1465,7 +1453,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 out_rpm:
 	intel_runtime_pm_put(i915);
 out_unlock:
-	mutex_unlock(&i915->drm.struct_mutex);
 	return ret;
 }
 
@@ -1534,21 +1521,15 @@ static int
 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
 		      const struct drm_i915_gem_pwrite *args)
 {
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	void __user *user_data;
-	u64 remain;
 	unsigned int obj_do_bit17_swizzling;
 	unsigned int partial_cacheline_write;
 	unsigned int needs_clflush;
 	unsigned int offset, idx;
+	void __user *user_data;
+	u64 remain;
 	int ret;
 
-	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
-	if (ret)
-		return ret;
-
 	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
-	mutex_unlock(&i915->drm.struct_mutex);
 	if (ret)
 		return ret;
 
@@ -1770,22 +1751,20 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	if (err)
 		goto out;
 
-	err = i915_mutex_lock_interruptible(dev);
-	if (err)
-		goto out_unpin;
-
+	i915_gem_object_lock(obj);
 	if (read_domains & I915_GEM_DOMAIN_WC)
 		err = i915_gem_object_set_to_wc_domain(obj, write_domain);
 	else if (read_domains & I915_GEM_DOMAIN_GTT)
 		err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
 	else
 		err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
+	i915_gem_object_unlock(obj);
+	if (err)
+		goto out_unpin;
 
 	/* And bump the LRU for this access */
 	i915_gem_object_bump_inactive_ggtt(obj);
 
-	mutex_unlock(&dev->struct_mutex);
-
 	if (write_domain != 0)
 		intel_fb_obj_invalidate(obj,
 					fb_write_origin(obj, write_domain));
@@ -2042,9 +2021,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 
 	intel_runtime_pm_get(dev_priv);
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		goto err_rpm;
+	i915_gem_object_lock(obj);
 
 	/* Access to snoopable pages through the GTT is incoherent. */
 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
@@ -2118,8 +2095,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 err_unpin:
 	i915_vma_unpin(vma);
 err_unlock:
-	mutex_unlock(&dev->struct_mutex);
-err_rpm:
+	i915_gem_object_unlock(obj);
 	intel_runtime_pm_put(dev_priv);
 	i915_gem_object_unpin_pages(obj);
 err:
@@ -3461,11 +3437,11 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
 {
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	i915_gem_object_assert_held(obj);
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
-				   I915_WAIT_LOCKED |
 				   (write ? I915_WAIT_ALL : 0),
 				   MAX_SCHEDULE_TIMEOUT,
 				   NULL);
@@ -3475,7 +3451,8 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
 	if (obj->write_domain == I915_GEM_DOMAIN_WC)
 		return 0;
 
-	/* Flush and acquire obj->pages so that we are coherent through
+	/*
+	 * Flush and acquire obj->pages so that we are coherent through
 	 * direct access in memory with previous cached writes through
 	 * shmemfs and that our cache domain tracking remains valid.
 	 * For example, if the obj->filp was moved to swap without us
@@ -3483,20 +3460,18 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
 	 * continue to assume that the obj remained out of the CPU cached
 	 * domain.
 	 */
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		return ret;
-
 	flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
 
-	/* Serialise direct access to this object with the barriers for
+	/*
+	 * Serialise direct access to this object with the barriers for
 	 * coherent writes from the GPU, by effectively invalidating the
 	 * WC domain upon first access.
 	 */
 	if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
 		mb();
 
-	/* It should now be out of any other write domains, and we can update
+	/*
+	 * It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
 	GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
@@ -3507,7 +3482,6 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
 		obj->mm.dirty = true;
 	}
 
-	i915_gem_object_unpin_pages(obj);
 	return 0;
 }
 
@@ -3524,11 +3498,11 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	i915_gem_object_assert_held(obj);
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
-				   I915_WAIT_LOCKED |
 				   (write ? I915_WAIT_ALL : 0),
 				   MAX_SCHEDULE_TIMEOUT,
 				   NULL);
@@ -3538,7 +3512,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	if (obj->write_domain == I915_GEM_DOMAIN_GTT)
 		return 0;
 
-	/* Flush and acquire obj->pages so that we are coherent through
+	/*
+	 * Flush and acquire obj->pages so that we are coherent through
 	 * direct access in memory with previous cached writes through
 	 * shmemfs and that our cache domain tracking remains valid.
 	 * For example, if the obj->filp was moved to swap without us
@@ -3546,20 +3521,18 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	 * continue to assume that the obj remained out of the CPU cached
 	 * domain.
 	 */
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		return ret;
-
 	flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
 
-	/* Serialise direct access to this object with the barriers for
+	/*
+	 * Serialise direct access to this object with the barriers for
 	 * coherent writes from the GPU, by effectively invalidating the
 	 * GTT domain upon first access.
 	 */
 	if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
 		mb();
 
-	/* It should now be out of any other write domains, and we can update
+	/*
+	 * It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
 	GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
@@ -3570,7 +3543,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 		obj->mm.dirty = true;
 	}
 
-	i915_gem_object_unpin_pages(obj);
 	return 0;
 }
 
@@ -3595,7 +3567,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	i915_gem_object_assert_held(obj);
 
 	if (obj->cache_level == cache_level)
 		return 0;
@@ -3732,12 +3704,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		goto out;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		goto out;
-
+	i915_gem_object_lock(obj);
 	ret = i915_gem_object_set_cache_level(obj, level);
-	mutex_unlock(&dev->struct_mutex);
+	i915_gem_object_unlock(obj);
 
 out:
 	i915_gem_object_put(obj);
@@ -3759,7 +3728,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	i915_gem_object_lock(obj);
 
 	/* Mark the global pin early so that we account for the
 	 * display coherency whilst setting up the cache domains.
@@ -3811,17 +3780,19 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 */
 	obj->read_domains |= I915_GEM_DOMAIN_GTT;
 
+	i915_gem_object_unlock(obj);
 	return vma;
 
 err_unpin_global:
 	obj->pin_global--;
+	i915_gem_object_unlock(obj);
 	return vma;
 }
 
 void
 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 {
-	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+	i915_gem_object_assert_held(vma->obj);
 
 	if (WARN_ON(vma->obj->pin_global == 0))
 		return;
@@ -3848,11 +3819,11 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 {
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	i915_gem_object_assert_held(obj);
+	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
-				   I915_WAIT_LOCKED |
 				   (write ? I915_WAIT_ALL : 0),
 				   MAX_SCHEDULE_TIMEOUT,
 				   NULL);
@@ -3867,16 +3838,20 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 		obj->read_domains |= I915_GEM_DOMAIN_CPU;
 	}
 
-	/* It should now be out of any other write domains, and we can update
+	/*
+	 * It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
 	GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
 
-	/* If we're writing through the CPU, then the GPU read domains will
+	/*
+	 * If we're writing through the CPU, then the GPU read domains will
 	 * need to be invalidated at next use.
 	 */
-	if (write)
+	if (write) {
 		__start_cpu_write(obj);
+		obj->mm.dirty = true;
+	}
 
 	return 0;
 }
@@ -4646,7 +4621,7 @@ int i915_gem_suspend(struct drm_i915_private *i915)
 
 void i915_gem_suspend_late(struct drm_i915_private *i915)
 {
-	struct drm_i915_gem_object *obj;
+	struct drm_i915_gem_object *obj, *on;
 	struct list_head *phases[] = {
 		&i915->mm.unbound_list,
 		&i915->mm.bound_list,
@@ -4673,12 +4648,17 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 	 * machine in an unusable condition.
 	 */
 
-	mutex_lock(&i915->drm.struct_mutex);
 	for (phase = phases; *phase; phase++) {
-		list_for_each_entry(obj, *phase, mm.link)
+		list_for_each_entry_safe(obj, on, *phase, mm.link) {
+			__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+			if (!i915_gem_object_has_pages(obj))
+				continue;
+
+			i915_gem_object_lock(obj);
 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+			i915_gem_object_unlock(obj);
+		}
 	}
-	mutex_unlock(&i915->drm.struct_mutex);
 
 	intel_uc_sanitize(i915);
 	i915_gem_sanitize(i915);
@@ -4941,7 +4921,9 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 		if (err)
 			goto err_active;
 
+		i915_gem_object_lock(state->obj);
 		err = i915_gem_object_set_to_cpu_domain(state->obj, false);
+		i915_gem_object_unlock(state->obj);
 		if (err)
 			goto err_active;
 
@@ -5319,12 +5301,13 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
 	i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
 	i915_gem_drain_freed_objects(i915);
 
-	mutex_lock(&i915->drm.struct_mutex);
 	for (phase = phases; *phase; phase++) {
-		list_for_each_entry(obj, *phase, mm.link)
+		list_for_each_entry(obj, *phase, mm.link) {
+			i915_gem_object_lock(obj);
 			WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
+			i915_gem_object_unlock(obj);
+		}
 	}
-	mutex_unlock(&i915->drm.struct_mutex);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 82e2ca17a441..7aedd9cbd431 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -169,7 +169,6 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
-	struct drm_device *dev = obj->base.dev;
 	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
 	int err;
 
@@ -177,14 +176,10 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
 	if (err)
 		return err;
 
-	err = i915_mutex_lock_interruptible(dev);
-	if (err)
-		goto out;
-
+	i915_gem_object_lock(obj);
 	err = i915_gem_object_set_to_cpu_domain(obj, write);
-	mutex_unlock(&dev->struct_mutex);
+	i915_gem_object_unlock(obj);
 
-out:
 	i915_gem_object_unpin_pages(obj);
 	return err;
 }
@@ -192,21 +187,16 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
-	struct drm_device *dev = obj->base.dev;
 	int err;
 
 	err = i915_gem_object_pin_pages(obj);
 	if (err)
 		return err;
 
-	err = i915_mutex_lock_interruptible(dev);
-	if (err)
-		goto out;
-
+	i915_gem_object_lock(obj);
 	err = i915_gem_object_set_to_gtt_domain(obj, false);
-	mutex_unlock(&dev->struct_mutex);
+	i915_gem_object_unlock(obj);
 
-out:
 	i915_gem_object_unpin_pages(obj);
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 14069840f01f..37e1af2ebe7d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1040,7 +1040,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 		if (use_cpu_reloc(cache, obj))
 			return NULL;
 
+		i915_gem_object_lock(obj);
 		err = i915_gem_object_set_to_gtt_domain(obj, true);
+		i915_gem_object_unlock(obj);
 		if (err)
 			return ERR_PTR(err);
 
@@ -1154,7 +1156,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 	if (IS_ERR(cmd))
 		return PTR_ERR(cmd);
 
+	i915_gem_object_lock(obj);
 	err = i915_gem_object_set_to_wc_domain(obj, false);
+	i915_gem_object_unlock(obj);
 	if (err)
 		goto err_unmap;
 
@@ -1174,7 +1178,11 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 		goto err_unpin;
 	}
 
+	i915_vma_lock(vma);
 	err = i915_request_await_object(rq, vma->obj, true);
+	if (err == 0)
+		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	i915_vma_unlock(vma);
 	if (err)
 		goto err_request;
 
@@ -1184,19 +1192,13 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 	if (err)
 		goto err_request;
 
-	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
 	i915_vma_lock(batch);
+	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
 	err = i915_vma_move_to_active(batch, rq, 0);
 	i915_vma_unlock(batch);
 	if (err)
 		goto skip_request;
 
-	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-	i915_vma_unlock(vma);
-	if (err)
-		goto skip_request;
-
 	rq->batch = batch;
 	i915_vma_unpin(batch);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d15c60790f2c..0d44033d4b40 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3722,8 +3722,11 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 		WARN_ON(i915_vma_bind(vma,
 				      obj ? obj->cache_level : 0,
 				      PIN_UPDATE));
-		if (obj)
+		if (obj) {
+			i915_gem_object_lock(obj);
 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+			i915_gem_object_unlock(obj);
+		}
 	}
 
 	ggtt->vm.closed = false;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 67cac97fcfd4..5f62361cf035 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -164,7 +164,9 @@ static int render_state_setup(struct intel_render_state *so,
 		drm_clflush_virt_range(d, i * sizeof(u32));
 	kunmap_atomic(d);
 
+	i915_gem_object_lock(so->obj);
 	ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
+	i915_gem_object_unlock(so->obj);
 out:
 	i915_gem_obj_finish_shmem_access(so->obj);
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 447407fee3b8..e3279b2df67d 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1510,7 +1510,9 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
 		goto unlock;
 	}
 
+	i915_gem_object_lock(bo);
 	ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
+	i915_gem_object_unlock(bo);
 	if (ret)
 		goto err_unref;
 
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 84d1d38a600f..7ebd6dca26a1 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -573,7 +573,9 @@ static int init_status_page(struct intel_engine_cs *engine)
 		return PTR_ERR(obj);
 	}
 
+	i915_gem_object_lock(obj);
 	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	i915_gem_object_unlock(obj);
 	if (ret)
 		goto err;
 
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 6da61a71d28f..9d428f557aa3 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -340,8 +340,6 @@ static void capture_logs_work(struct work_struct *work)
 
 static int guc_log_map(struct intel_guc_log *log)
 {
-	struct intel_guc *guc = log_to_guc(log);
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	void *vaddr;
 	int ret;
 
@@ -350,12 +348,6 @@ static int guc_log_map(struct intel_guc_log *log)
 	if (!log->vma)
 		return -ENODEV;
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-	if (ret)
-		return ret;
-
 	/*
 	 * Create a WC (Uncached for read) vmalloc mapping of log
 	 * buffer pages, so that we can directly get the data
@@ -363,13 +355,23 @@ static int guc_log_map(struct intel_guc_log *log)
 	 */
 	vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
 	if (IS_ERR(vaddr)) {
-		DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+		DRM_ERROR("Couldn't map log buffer pages\n");
 		return PTR_ERR(vaddr);
 	}
 
+	i915_gem_object_lock(log->vma->obj);
+	ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true);
+	i915_gem_object_unlock(log->vma->obj);
+	if (ret)
+		goto err_unmap;
+
 	log->relay.buf_addr = vaddr;
 
 	return 0;
+
+err_unmap:
+	i915_gem_object_unpin_map(log->vma->obj);
+	return ret;
 }
 
 static void guc_log_unmap(struct intel_guc_log *log)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index fa01d79dc5a3..c02121005483 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1569,25 +1569,36 @@ static void execlists_context_unpin(struct intel_context *ce)
 
 static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
 {
+	unsigned int bound = vma->flags;
 	unsigned int flags;
 	int err;
 
+	flags = PIN_GLOBAL | PIN_HIGH;
+	if (ctx->ggtt_offset_bias)
+		flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
+
+	err = i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
+	if (err)
+		return err;
+
 	/*
 	 * Clear this page out of any CPU caches for coherent swap-in/out.
 	 * We only want to do this on the first bind so that we do not stall
 	 * on an active context (which by nature is already on the GPU).
 	 */
-	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+	if (!(bound & I915_VMA_GLOBAL_BIND)) {
+		i915_gem_object_lock(vma->obj);
 		err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+		i915_gem_object_unlock(vma->obj);
 		if (err)
-			return err;
+			goto err_unpin;
 	}
 
-	flags = PIN_GLOBAL | PIN_HIGH;
-	if (ctx->ggtt_offset_bias)
-		flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
+	return 0;
 
-	return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
+err_unpin:
+	i915_vma_unpin(vma);
+	return err;
 }
 
 static struct intel_context *
@@ -2913,19 +2924,20 @@ populate_lr_context(struct i915_gem_context *ctx,
 	u32 *regs;
 	int ret;
 
-	ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
-	if (ret) {
-		DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
-		return ret;
-	}
-
 	vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		ret = PTR_ERR(vaddr);
 		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
 		return ret;
 	}
-	ctx_obj->mm.dirty = true;
+
+	i915_gem_object_lock(ctx_obj);
+	ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
+	i915_gem_object_unlock(ctx_obj);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
+		return ret;
+	}
 
 	if (engine->default_state) {
 		/*
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 371eb3dbedc0..423b3ad4986c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1431,7 +1431,9 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
 		}
 		overlay->flip_addr = i915_ggtt_offset(vma);
 
+		i915_gem_object_lock(reg_bo);
 		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+		i915_gem_object_unlock(reg_bo);
 		if (ret) {
 			DRM_ERROR("failed to move overlay register bo into the GTT\n");
 			goto out_unpin_bo;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 44a0d3010059..41e6efc662d9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1042,13 +1042,13 @@ int intel_ring_pin(struct intel_ring *ring,
 {
 	enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
 	struct i915_vma *vma = ring->vma;
+	unsigned int bound = vma->flags;
 	unsigned int flags;
 	void *addr;
 	int ret;
 
 	GEM_BUG_ON(ring->vaddr);
 
-
 	flags = PIN_GLOBAL;
 	if (offset_bias)
 		flags |= PIN_OFFSET_BIAS | offset_bias;
@@ -1057,25 +1057,29 @@ int intel_ring_pin(struct intel_ring *ring,
 	else
 		flags |= PIN_HIGH;
 
-	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+	ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
+	if (unlikely(ret))
+		return ret;
+
+	if (!(bound & I915_VMA_GLOBAL_BIND)) {
+		i915_gem_object_lock(vma->obj);
 		if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
 			ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
 		else
 			ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
+		i915_gem_object_unlock(vma->obj);
 		if (unlikely(ret))
-			return ret;
+			goto err;
 	}
 
-	ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
-	if (unlikely(ret))
-		return ret;
-
 	if (i915_vma_is_map_and_fenceable(vma))
 		addr = (void __force *)i915_vma_pin_iomap(vma);
 	else
 		addr = i915_gem_object_pin_map(vma->obj, map);
-	if (IS_ERR(addr))
+	if (IS_ERR(addr)) {
+		ret = PTR_ERR(addr);
 		goto err;
+	}
 
 	vma->obj->pin_global++;
 
@@ -1084,7 +1088,7 @@ int intel_ring_pin(struct intel_ring *ring,
 
 err:
 	i915_vma_unpin(vma);
-	return PTR_ERR(addr);
+	return ret;
 }
 
 void intel_ring_reset(struct intel_ring *ring, u32 tail)
@@ -1234,28 +1238,32 @@ static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
 static int __context_pin(struct intel_context *ce)
 {
 	struct i915_vma *vma;
+	unsigned int bound;
 	int err;
 
 	vma = ce->state;
 	if (!vma)
 		return 0;
 
+	bound = vma->flags;
+	err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
+			   PIN_GLOBAL | PIN_HIGH);
+	if (err)
+		return err;
+
 	/*
 	 * Clear this page out of any CPU caches for coherent swap-in/out.
 	 * We only want to do this on the first bind so that we do not stall
 	 * on an active context (which by nature is already on the GPU).
 	 */
-	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+	if (!(bound & I915_VMA_GLOBAL_BIND)) {
+		i915_gem_object_lock(vma->obj);
 		err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+		i915_gem_object_unlock(vma->obj);
 		if (err)
-			return err;
+			goto err_unpin;
 	}
 
-	err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
-			   PIN_GLOBAL | PIN_HIGH);
-	if (err)
-		return err;
-
 	/*
 	 * And mark is as a globally pinned object to let the shrinker know
 	 * it cannot reclaim the object until we release it.
@@ -1263,6 +1271,10 @@ static int __context_pin(struct intel_context *ce)
 	vma->obj->pin_global++;
 
 	return 0;
+
+err_unpin:
+	i915_vma_unpin(vma);
+	return err;
 }
 
 static void __context_unpin(struct intel_context *ce)
@@ -1337,7 +1349,9 @@ alloc_context_vma(struct intel_engine_cs *engine)
 	 */
 	if (IS_IVYBRIDGE(i915)) {
 		/* Ignore any error, regard it as a simple optimisation */
+		i915_gem_object_lock(obj);
 		i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+		i915_gem_object_unlock(obj);
 	}
 
 	vma = i915_vma_instance(obj, &ggtt->vm, NULL);
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index 6e8e0b546743..7dc5519a5c25 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -215,13 +215,6 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
 			 intel_uc_fw_status_repr(uc_fw->load_status));
 
 	/* Pin object with firmware */
-	err = i915_gem_object_set_to_gtt_domain(uc_fw->obj, false);
-	if (err) {
-		DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
-				 intel_uc_fw_type_repr(uc_fw->type), err);
-		goto fail;
-	}
-
 	ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->guc.ggtt_pin_bias;
 	vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
 				       PIN_OFFSET_BIAS | ggtt_pin_bias);
@@ -232,6 +225,15 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
 		goto fail;
 	}
 
+	i915_gem_object_lock(uc_fw->obj);
+	err = i915_gem_object_set_to_gtt_domain(uc_fw->obj, false);
+	i915_gem_object_unlock(uc_fw->obj);
+	if (err) {
+		DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
+				 intel_uc_fw_type_repr(uc_fw->type), err);
+		goto err_unpin;
+	}
+
 	/* Call custom loader */
 	err = xfer(uc_fw, vma);
 
@@ -256,6 +258,8 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
 
 	return 0;
 
+err_unpin:
+	i915_vma_unpin(vma);
 fail:
 	uc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
 	DRM_DEBUG_DRIVER("%s fw load %s\n",
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index a71535db94c2..e6945f1131b4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -138,18 +138,19 @@ static int wc_set(struct drm_i915_gem_object *obj,
 	u32 *map;
 	int err;
 
-	err = i915_gem_object_set_to_wc_domain(obj, true);
-	if (err)
-		return err;
-
 	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
 	if (IS_ERR(map))
 		return PTR_ERR(map);
 
-	map[offset / sizeof(*map)] = v;
+	i915_gem_object_lock(obj);
+	err = i915_gem_object_set_to_wc_domain(obj, true);
+	if (err == 0)
+		map[offset / sizeof(*map)] = v;
+	i915_gem_object_unlock(obj);
+
 	i915_gem_object_unpin_map(obj);
 
-	return 0;
+	return err;
 }
 
 static int wc_get(struct drm_i915_gem_object *obj,
@@ -159,18 +160,19 @@ static int wc_get(struct drm_i915_gem_object *obj,
 	u32 *map;
 	int err;
 
-	err = i915_gem_object_set_to_wc_domain(obj, false);
-	if (err)
-		return err;
-
 	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
 	if (IS_ERR(map))
 		return PTR_ERR(map);
 
-	*v = map[offset / sizeof(*map)];
+	i915_gem_object_lock(obj);
+	err = i915_gem_object_set_to_wc_domain(obj, false);
+	if (err == 0)
+		*v = map[offset / sizeof(*map)];
+	i915_gem_object_unlock(obj);
+
 	i915_gem_object_unpin_map(obj);
 
-	return 0;
+	return err;
 }
 
 static int gpu_set(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 152df5a288d8..83c4cf6b24a9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -78,10 +78,6 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
 	*cmd = MI_BATCH_BUFFER_END;
 	i915_gem_object_unpin_map(obj);
 
-	err = i915_gem_object_set_to_gtt_domain(obj, false);
-	if (err)
-		goto err;
-
 	vma = i915_vma_instance(obj, vma->vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
@@ -92,8 +88,16 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
 	if (err)
 		goto err;
 
+	i915_gem_object_lock(obj);
+	err = i915_gem_object_set_to_gtt_domain(obj, false);
+	i915_gem_object_unlock(obj);
+	if (err)
+		goto err_unpin;
+
 	return vma;
 
+err_unpin:
+	i915_vma_unpin(vma);
 err:
 	i915_gem_object_put(obj);
 	return ERR_PTR(err);
@@ -320,7 +324,9 @@ create_test_object(struct i915_gem_context *ctx,
 		return ERR_PTR(err);
 	}
 
+	i915_gem_object_lock(obj);
 	err = i915_gem_object_set_to_gtt_domain(obj, false);
+	i915_gem_object_unlock(obj);
 	if (err)
 		return ERR_PTR(err);
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 51b5818e7fac..de744ae67c88 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -239,7 +239,9 @@ static int igt_evict_for_cache_color(void *arg)
 		err = PTR_ERR(obj);
 		goto cleanup;
 	}
+	i915_gem_object_lock(obj);
 	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	i915_gem_object_unlock(obj);
 
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 				       I915_GTT_PAGE_SIZE | flags);
@@ -254,7 +256,10 @@ static int igt_evict_for_cache_color(void *arg)
 		err = PTR_ERR(obj);
 		goto cleanup;
 	}
+
+	i915_gem_object_lock(obj);
 	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	i915_gem_object_unlock(obj);
 
 	/* Neighbouring; same colour - should fit */
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 6727f7677fd1..baedb14334d0 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -86,9 +86,9 @@ static int igt_phys_object(void *arg)
 	}
 
 	/* Make the object dirty so that put_pages must do copy back the data */
-	mutex_lock(&i915->drm.struct_mutex);
+	i915_gem_object_lock(obj);
 	err = i915_gem_object_set_to_gtt_domain(obj, true);
-	mutex_unlock(&i915->drm.struct_mutex);
+	i915_gem_object_unlock(obj);
 	if (err) {
 		pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
 		       err);
@@ -324,6 +324,7 @@ static int igt_partial_tiling(void *arg)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	intel_runtime_pm_get(i915);
+	i915_gem_object_lock(obj);
 
 	if (1) {
 		IGT_TIMEOUT(end);
@@ -425,6 +426,7 @@ next_tiling: ;
 	}
 
 out_unlock:
+	i915_gem_object_unlock(obj);
 	intel_runtime_pm_put(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 	i915_gem_object_unpin_pages(obj);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index e5220bf5255a..c244674817a9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -577,16 +577,18 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 	if (err)
 		goto err;
 
-	err = i915_gem_object_set_to_wc_domain(obj, true);
-	if (err)
-		goto err;
-
 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
 	if (IS_ERR(cmd)) {
 		err = PTR_ERR(cmd);
 		goto err;
 	}
 
+	i915_gem_object_lock(obj);
+	err = i915_gem_object_set_to_wc_domain(obj, true);
+	i915_gem_object_unlock(obj);
+	if (err)
+		goto err_map;
+
 	if (gen >= 8) {
 		*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
 		*cmd++ = lower_32_bits(vma->node.start);
@@ -608,6 +610,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 
 	return vma;
 
+err_map:
+	i915_gem_object_unpin_map(obj);
 err:
 	i915_gem_object_put(obj);
 	return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 61d42757f372..a5de7c4e0bab 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -66,7 +66,10 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
 		goto err_hws;
 	}
 
+	i915_gem_object_lock(h->hws);
 	i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC);
+	i915_gem_object_unlock(h->hws);
+
 	vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		err = PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 17463022a5d9..58ebeab6cb04 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -42,7 +42,10 @@ static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
 		goto err_hws;
 	}
 
+	i915_gem_object_lock(spin->hws);
 	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+	i915_gem_object_unlock(spin->hws);
+
 	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		err = PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 441dfa138569..00496386f09f 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -24,7 +24,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 	if (IS_ERR(result))
 		return result;
 
+	i915_gem_object_lock(result);
 	i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
+	i915_gem_object_unlock(result);
 
 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
 	if (IS_ERR(cs)) {
-- 
2.18.0



More information about the Intel-gfx-trybot mailing list