[PATCH 43/48] drm/i915: Move object domain management under the purview of its own locks

Chris Wilson chris at chris-wilson.co.uk
Fri Jun 29 08:59:59 UTC 2018


Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c               | 119 +++++++-----------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c    |  13 +-
 drivers/gpu/drm/i915/i915_perf.c              |   2 +
 drivers/gpu/drm/i915/intel_engine_cs.c        |   2 +
 drivers/gpu/drm/i915/intel_ringbuffer.c       |   2 +
 .../gpu/drm/i915/selftests/i915_gem_evict.c   |   5 +
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |   3 +
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |   3 +
 .../drm/i915/selftests/intel_workarounds.c    |   2 +
 9 files changed, 77 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b4114cf2428f..cbc9d3b3f39b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -431,14 +431,15 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	LIST_HEAD(still_in_list);
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-
-	/* Closed vma are removed from the obj->vma_list - but they may
+	/*
+	 * Closed vma are removed from the obj->vma_list - but they may
 	 * still have an active binding on the object. To remove those we
 	 * must wait for all rendering to complete to the object (as unbinding
 	 * must anyway), and retire the requests.
 	 */
+	i915_gem_object_lock(obj);
 	ret = i915_gem_object_set_to_cpu_domain(obj, false);
+	i915_gem_object_unlock(obj);
 	if (ret)
 		return ret;
 
@@ -830,6 +831,8 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct i915_vma *vma;
 
+	lockdep_assert_held(&obj->resv->lock.base);
+
 	if (!(obj->write_domain & flush_domains))
 		return;
 
@@ -923,15 +926,12 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 {
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-
 	*needs_clflush = 0;
 	if (!i915_gem_object_has_struct_page(obj))
 		return -ENODEV;
 
 	ret = i915_gem_object_wait(obj,
-				   I915_WAIT_INTERRUPTIBLE |
-				   I915_WAIT_LOCKED,
+				   I915_WAIT_INTERRUPTIBLE,
 				   MAX_SCHEDULE_TIMEOUT,
 				   NULL);
 	if (ret)
@@ -941,6 +941,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
+	i915_gem_object_lock(obj);
+
 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
 	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
 		ret = i915_gem_object_set_to_cpu_domain(obj, false);
@@ -962,10 +964,12 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 		*needs_clflush = CLFLUSH_BEFORE;
 
 out:
+	i915_gem_object_unlock(obj);
 	/* return with the pages pinned */
 	return 0;
 
 err_unpin:
+	i915_gem_object_unlock(obj);
 	i915_gem_object_unpin_pages(obj);
 	return ret;
 }
@@ -975,7 +979,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 {
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	lockdep_assert_held(&obj->resv->lock.base);
 
 	*needs_clflush = 0;
 	if (!i915_gem_object_has_struct_page(obj))
@@ -983,7 +987,6 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
-				   I915_WAIT_LOCKED |
 				   I915_WAIT_ALL,
 				   MAX_SCHEDULE_TIMEOUT,
 				   NULL);
@@ -994,6 +997,8 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
+	i915_gem_object_lock(obj);
+
 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
 	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
 		ret = i915_gem_object_set_to_cpu_domain(obj, true);
@@ -1022,12 +1027,14 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 	}
 
 out:
+	i915_gem_object_unlock(obj);
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 	obj->mm.dirty = true;
 	/* return with the pages pinned */
 	return 0;
 
 err_unpin:
+	i915_gem_object_unlock(obj);
 	i915_gem_object_unpin_pages(obj);
 	return ret;
 }
@@ -1115,12 +1122,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
 	if (i915_gem_object_needs_bit17_swizzle(obj))
 		obj_do_bit17_swizzling = BIT(17);
 
-	ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
-	if (ret)
-		return ret;
-
 	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
-	mutex_unlock(&obj->base.dev->struct_mutex);
 	if (ret)
 		return ret;
 
@@ -1186,10 +1188,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 	u64 remain, offset;
 	int ret;
 
-	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
-	if (ret)
-		return ret;
-
 	intel_runtime_pm_get(i915);
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 				       PIN_MAPPABLE |
@@ -1213,12 +1211,12 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 		GEM_BUG_ON(!node.allocated);
 	}
 
+	i915_gem_object_lock(obj);
 	ret = i915_gem_object_set_to_gtt_domain(obj, false);
+	i915_gem_object_unlock(obj);
 	if (ret)
 		goto out_unpin;
 
-	mutex_unlock(&i915->drm.struct_mutex);
-
 	user_data = u64_to_user_ptr(args->data_ptr);
 	remain = args->size;
 	offset = args->offset;
@@ -1255,7 +1253,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 		offset += page_length;
 	}
 
-	mutex_lock(&i915->drm.struct_mutex);
 out_unpin:
 	if (node.allocated) {
 		wmb();
@@ -1268,7 +1265,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 	}
 out_unlock:
 	intel_runtime_pm_put(i915);
-	mutex_unlock(&i915->drm.struct_mutex);
 
 	return ret;
 }
@@ -1375,10 +1371,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 	void __user *user_data;
 	int ret;
 
-	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
-	if (ret)
-		return ret;
-
 	if (i915_gem_object_has_struct_page(obj)) {
 		/*
 		 * Avoid waking the device up if we can fallback, as
@@ -1418,12 +1410,12 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 		GEM_BUG_ON(!node.allocated);
 	}
 
+	i915_gem_object_lock(obj);
 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
+	i915_gem_object_unlock(obj);
 	if (ret)
 		goto out_unpin;
 
-	mutex_unlock(&i915->drm.struct_mutex);
-
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 
 	user_data = u64_to_user_ptr(args->data_ptr);
@@ -1467,7 +1459,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 	}
 	intel_fb_obj_flush(obj, ORIGIN_CPU);
 
-	mutex_lock(&i915->drm.struct_mutex);
 out_unpin:
 	if (node.allocated) {
 		wmb();
@@ -1481,7 +1472,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 out_rpm:
 	intel_runtime_pm_put(i915);
 out_unlock:
-	mutex_unlock(&i915->drm.struct_mutex);
 	return ret;
 }
 
@@ -1550,21 +1540,15 @@ static int
 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
 		      const struct drm_i915_gem_pwrite *args)
 {
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	void __user *user_data;
-	u64 remain;
 	unsigned int obj_do_bit17_swizzling;
 	unsigned int partial_cacheline_write;
 	unsigned int needs_clflush;
 	unsigned int offset, idx;
+	void __user *user_data;
+	u64 remain;
 	int ret;
 
-	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
-	if (ret)
-		return ret;
-
 	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
-	mutex_unlock(&i915->drm.struct_mutex);
 	if (ret)
 		return ret;
 
@@ -1786,22 +1770,20 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	if (err)
 		goto out;
 
-	err = i915_mutex_lock_interruptible(dev);
-	if (err)
-		goto out_unpin;
-
+	i915_gem_object_lock(obj);
 	if (read_domains & I915_GEM_DOMAIN_WC)
 		err = i915_gem_object_set_to_wc_domain(obj, write_domain);
 	else if (read_domains & I915_GEM_DOMAIN_GTT)
 		err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
 	else
 		err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
+	i915_gem_object_unlock(obj);
+	if (err)
+		goto out_unpin;
 
 	/* And bump the LRU for this access */
 	i915_gem_object_bump_inactive_ggtt(obj);
 
-	mutex_unlock(&dev->struct_mutex);
-
 	if (write_domain != 0)
 		intel_fb_obj_invalidate(obj,
 					fb_write_origin(obj, write_domain));
@@ -2059,9 +2041,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 
 	intel_runtime_pm_get(dev_priv);
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		goto err_rpm;
+	i915_gem_object_lock(obj);
 
 	/* Access to snoopable pages through the GTT is incoherent. */
 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
@@ -2128,8 +2108,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 err_unpin:
 	__i915_vma_unpin(vma);
 err_unlock:
-	mutex_unlock(&dev->struct_mutex);
-err_rpm:
+	i915_gem_object_unlock(obj);
 	intel_runtime_pm_put(dev_priv);
 	i915_gem_object_unpin_pages(obj);
 err:
@@ -3448,9 +3427,9 @@ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
 	if (!READ_ONCE(obj->pin_global))
 		return;
 
-	mutex_lock(&obj->base.dev->struct_mutex);
+	i915_gem_object_lock(obj);
 	__i915_gem_object_flush_for_display(obj);
-	mutex_unlock(&obj->base.dev->struct_mutex);
+	i915_gem_object_unlock(obj);
 }
 
 /**
@@ -3466,11 +3445,10 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
 {
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	lockdep_assert_held(&obj->resv->lock.base);
 
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
-				   I915_WAIT_LOCKED |
 				   (write ? I915_WAIT_ALL : 0),
 				   MAX_SCHEDULE_TIMEOUT,
 				   NULL);
@@ -3529,11 +3507,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	lockdep_assert_held(&obj->resv->lock.base);
 
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
-				   I915_WAIT_LOCKED |
 				   (write ? I915_WAIT_ALL : 0),
 				   MAX_SCHEDULE_TIMEOUT,
 				   NULL);
@@ -3600,7 +3577,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	lockdep_assert_held(&obj->resv->lock.base);
 
 	if (obj->cache_level == cache_level)
 		return 0;
@@ -3797,12 +3774,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		goto out;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		goto out;
-
+	i915_gem_object_lock(obj);
 	ret = i915_gem_object_set_cache_level(obj, level);
-	mutex_unlock(&dev->struct_mutex);
+	i915_gem_object_unlock(obj);
 
 out:
 	i915_gem_object_put(obj);
@@ -3824,7 +3798,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	lockdep_assert_held(&obj->resv->lock.base);
 
 	/* Mark the global pin early so that we account for the
 	 * display coherency whilst setting up the cache domains.
@@ -3886,7 +3860,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 void
 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 {
-	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+	lockdep_assert_held(&vma->obj->resv->lock.base);
 
 	if (WARN_ON(vma->obj->pin_global == 0))
 		return;
@@ -3913,11 +3887,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 {
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	lockdep_assert_held(&obj->resv->lock.base);
 
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
-				   I915_WAIT_LOCKED |
 				   (write ? I915_WAIT_ALL : 0),
 				   MAX_SCHEDULE_TIMEOUT,
 				   NULL);
@@ -4738,12 +4711,13 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 	 * machine in an unusable condition.
 	 */
 
-	mutex_lock(&i915->drm.struct_mutex);
 	for (phase = phases; *phase; phase++) {
-		list_for_each_entry(obj, *phase, mm.link)
+		list_for_each_entry(obj, *phase, mm.link) {
+			i915_gem_object_lock(obj);
 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+			i915_gem_object_unlock(obj);
+		}
 	}
-	mutex_unlock(&i915->drm.struct_mutex);
 
 	intel_uc_sanitize(i915);
 	i915_gem_sanitize(i915);
@@ -5006,7 +4980,9 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 		if (err)
 			goto err_active;
 
+		i915_gem_object_lock(state->obj);
 		err = i915_gem_object_set_to_cpu_domain(state->obj, false);
+		i915_gem_object_unlock(state->obj);
 		if (err)
 			goto err_active;
 
@@ -5384,12 +5360,13 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
 	i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
 	i915_gem_drain_freed_objects(i915);
 
-	mutex_lock(&i915->drm.struct_mutex);
 	for (phase = phases; *phase; phase++) {
-		list_for_each_entry(obj, *phase, mm.link)
+		list_for_each_entry(obj, *phase, mm.link) {
+			i915_gem_object_lock(obj);
 			WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
+			i915_gem_object_unlock(obj);
+		}
 	}
-	mutex_unlock(&i915->drm.struct_mutex);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 89a38bd0ed98..29542fce92ea 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -51,9 +51,10 @@ enum {
 #define __EXEC_OBJECT_HAS_REF		BIT(31)
 #define __EXEC_OBJECT_HAS_PIN		BIT(30)
 #define __EXEC_OBJECT_HAS_FENCE		BIT(29)
-#define __EXEC_OBJECT_NEEDS_MAP		BIT(28)
-#define __EXEC_OBJECT_NEEDS_BIAS	BIT(27)
-#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 27) /* all of the above */
+#define __EXEC_OBJECT_LOCKED		BIT(28)
+#define __EXEC_OBJECT_NEEDS_MAP		BIT(27)
+#define __EXEC_OBJECT_NEEDS_BIAS	BIT(26)
+#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 26) /* all of the above */
 #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
 
 #define __EXEC_HAS_RELOC	BIT(31)
@@ -429,6 +430,9 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
 	if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
 		__i915_vma_unpin_fence(vma);
 
+	if (flags & __EXEC_OBJECT_LOCKED)
+		i915_gem_object_unlock(vma->obj);
+
 	__i915_vma_unpin(vma);
 }
 
@@ -1812,6 +1816,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
 			eb->request->capture_list = capture;
 		}
 
+		i915_gem_object_lock(obj);
+		eb->flags[i] |= __EXEC_OBJECT_LOCKED;
+
 		/*
 		 * If the GPU is not _reading_ through the CPU cache, we need
 		 * to make sure that any writes (both previous GPU writes from
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 447407fee3b8..e3279b2df67d 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1510,7 +1510,9 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
 		goto unlock;
 	}
 
+	i915_gem_object_lock(bo);
 	ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
+	i915_gem_object_unlock(bo);
 	if (ret)
 		goto err_unref;
 
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 84d1d38a600f..7ebd6dca26a1 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -573,7 +573,9 @@ static int init_status_page(struct intel_engine_cs *engine)
 		return PTR_ERR(obj);
 	}
 
+	i915_gem_object_lock(obj);
 	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	i915_gem_object_unlock(obj);
 	if (ret)
 		goto err;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 44a0d3010059..bd4f50c5fa21 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1337,7 +1337,9 @@ alloc_context_vma(struct intel_engine_cs *engine)
 	 */
 	if (IS_IVYBRIDGE(i915)) {
 		/* Ignore any error, regard it as a simple optimisation */
+		i915_gem_object_lock(obj);
 		i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+		i915_gem_object_unlock(obj);
 	}
 
 	vma = i915_vma_instance(obj, &ggtt->vm, NULL);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 51b5818e7fac..de744ae67c88 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -239,7 +239,9 @@ static int igt_evict_for_cache_color(void *arg)
 		err = PTR_ERR(obj);
 		goto cleanup;
 	}
+	i915_gem_object_lock(obj);
 	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	i915_gem_object_unlock(obj);
 
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 				       I915_GTT_PAGE_SIZE | flags);
@@ -254,7 +256,10 @@ static int igt_evict_for_cache_color(void *arg)
 		err = PTR_ERR(obj);
 		goto cleanup;
 	}
+
+	i915_gem_object_lock(obj);
 	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	i915_gem_object_unlock(obj);
 
 	/* Neighbouring; same colour - should fit */
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index e522a02343d5..570bb2c13f3b 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -66,7 +66,10 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
 		goto err_hws;
 	}
 
+	i915_gem_object_lock(h->hws);
 	i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC);
+	i915_gem_object_unlock(h->hws);
+
 	vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		err = PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 97f37ee0ae8f..eb631f0a6827 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -42,7 +42,10 @@ static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
 		goto err_hws;
 	}
 
+	i915_gem_object_lock(spin->hws);
 	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+	i915_gem_object_unlock(spin->hws);
+
 	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		err = PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 40fb481bbbf4..da4fe7f03666 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -24,7 +24,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 	if (IS_ERR(result))
 		return result;
 
+	i915_gem_object_lock(result);
 	i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
+	i915_gem_object_unlock(result);
 
 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
 	if (IS_ERR(cs)) {
-- 
2.18.0



More information about the Intel-gfx-trybot mailing list