[PATCH 49/52] timeline-barrier

Chris Wilson chris at chris-wilson.co.uk
Sun Mar 31 12:08:32 UTC 2019


---
 drivers/gpu/drm/i915/gem/i915_gem_context.c    | 17 ++++-------------
 drivers/gpu/drm/i915/i915_request.c            |  9 ---------
 drivers/gpu/drm/i915/i915_timeline.c           |  2 --
 drivers/gpu/drm/i915/i915_timeline.h           | 15 ---------------
 drivers/gpu/drm/i915/i915_timeline_types.h     | 10 ----------
 drivers/gpu/drm/i915/selftests/mock_timeline.c |  1 -
 6 files changed, 4 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ebd36fe4049e..8c0d7cb50848 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1202,7 +1202,7 @@ static int
 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
 {
 	struct drm_i915_private *i915 = ce->engine->i915;
-	struct i915_request *rq, *prev;
+	struct i915_request *rq;
 	int ret;
 
 	lockdep_assert_held(&ce->pin_mutex);
@@ -1226,13 +1226,9 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
 	}
 
 	/* Queue this switch after all other activity by this context. */
-	prev = i915_active_request_raw(&ce->ring->timeline->last_request,
-				       &i915->drm.struct_mutex);
-	if (prev && !i915_request_completed(prev)) {
-		ret = i915_request_await_dma_fence(rq, &prev->fence);
-		if (ret < 0)
-			goto out_add;
-	}
+	ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
+	if (ret)
+		goto out_add;
 
 	/*
 	 * Guarantee context image and the timeline remains pinned until the
@@ -1246,11 +1242,6 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
 	if (ret)
 		goto out_add;
 
-	/* Order all following requests to be after. */
-	ret = i915_timeline_set_barrier(ce->ring->timeline, rq);
-	if (ret)
-		goto out_add;
-
 	ret = gen8_emit_rpcs_config(rq, ce, sseu);
 
 out_add:
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 362eb3af86e1..706b8425a84f 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -590,11 +590,6 @@ i915_request_alloc_slow(struct intel_context *ce)
 	return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
 }
 
-static int add_timeline_barrier(struct i915_request *rq)
-{
-	return i915_request_await_active_request(rq, &rq->timeline->barrier);
-}
-
 struct i915_request *i915_request_create(struct intel_context *ce)
 {
 	struct drm_i915_private *i915 = ce->engine->i915;
@@ -711,10 +706,6 @@ struct i915_request *i915_request_create(struct intel_context *ce)
 	 */
 	rq->head = rq->ring->emit;
 
-	ret = add_timeline_barrier(rq);
-	if (ret)
-		goto err_unwind;
-
 	ret = rq->engine->request_alloc(rq);
 	if (ret)
 		goto err_unwind;
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
index bd6fabb3be7c..000e1a9b6750 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -254,7 +254,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
 	spin_lock_init(&timeline->lock);
 	mutex_init(&timeline->mutex);
 
-	INIT_ACTIVE_REQUEST(&timeline->barrier);
 	INIT_ACTIVE_REQUEST(&timeline->last_request);
 	INIT_LIST_HEAD(&timeline->requests);
 
@@ -327,7 +326,6 @@ void i915_timeline_fini(struct i915_timeline *timeline)
 {
 	GEM_BUG_ON(timeline->pin_count);
 	GEM_BUG_ON(!list_empty(&timeline->requests));
-	GEM_BUG_ON(i915_active_request_isset(&timeline->barrier));
 
 	i915_syncmap_free(&timeline->sync);
 
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index 4ca7f80bdf6d..27668a1a69a3 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -110,19 +110,4 @@ void i915_timelines_init(struct drm_i915_private *i915);
 void i915_timelines_park(struct drm_i915_private *i915);
 void i915_timelines_fini(struct drm_i915_private *i915);
 
-/**
- * i915_timeline_set_barrier - orders submission between different timelines
- * @timeline: timeline to set the barrier on
- * @rq: request after which new submissions can proceed
- *
- * Sets the passed in request as the serialization point for all subsequent
- * submissions on @timeline. Subsequent requests will not be submitted to GPU
- * until the barrier has been completed.
- */
-static inline int
-i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq)
-{
-	return i915_active_request_set(&tl->barrier, rq);
-}
-
 #endif
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h
index 57c79830bb8c..1688705f4a2b 100644
--- a/drivers/gpu/drm/i915/i915_timeline_types.h
+++ b/drivers/gpu/drm/i915/i915_timeline_types.h
@@ -62,16 +62,6 @@ struct i915_timeline {
 	 */
 	struct i915_syncmap *sync;
 
-	/**
-	 * Barrier provides the ability to serialize ordering between different
-	 * timelines.
-	 *
-	 * Users can call i915_timeline_set_barrier which will make all
-	 * subsequent submissions to this timeline be executed only after the
-	 * barrier has been completed.
-	 */
-	struct i915_active_request barrier;
-
 	struct list_head link;
 	struct drm_i915_private *i915;
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index 416d85233263..e084476469ef 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -16,7 +16,6 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
 	spin_lock_init(&timeline->lock);
 	mutex_init(&timeline->mutex);
 
-	INIT_ACTIVE_REQUEST(&timeline->barrier);
 	INIT_ACTIVE_REQUEST(&timeline->last_request);
 	INIT_LIST_HEAD(&timeline->requests);
 
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list