[PATCH 17/23] hmm

Chris Wilson chris at chris-wilson.co.uk
Tue Aug 27 21:34:57 UTC 2019


---
 drivers/gpu/drm/i915/i915_vma.c | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 9c19d8422734..537ac940c9bc 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -380,6 +380,7 @@ int i915_vma_bind(struct i915_vma *vma,
 		work->cache_level = cache_level;
 		work->flags = bind_flags | I915_VMA_ALLOC;
 
+#if 0
 		if (!i915_vma_trylock(vma))
 			return -EAGAIN;
 
@@ -391,6 +392,7 @@ int i915_vma_bind(struct i915_vma *vma,
 		dma_fence_work_chain(&work->base, dma_resv_get_excl(vma->resv));
 		dma_resv_add_excl_fence(vma->resv, &work->base.dma);
 		i915_vma_unlock(vma);
+#endif
 
 		i915_active_set_exclusive(&vma->active, &work->base.dma);
 		work->base.dma.error = 0; /* enable the queue_work() */
@@ -1063,6 +1065,20 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 	assert_object_held(obj);
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
+	if (rcu_access_pointer(vma->active.excl)) {
+		struct dma_fence *fence;
+
+		rcu_read_lock();
+		fence = dma_fence_get_rcu_safe(&vma->active.excl);
+		rcu_read_unlock();
+		if (fence) {
+			err = i915_request_await_dma_fence(rq, fence);
+			dma_fence_put(fence);
+			if (unlikely(err))
+				return err;
+		}
+	}
+
 	/*
 	 * Add a reference if we're newly entering the active list.
 	 * The order in which we add operations to the retirement queue is
-- 
2.23.0



More information about the Intel-gfx-trybot mailing list