[PATCH 2/2] timeline-retire

Chris Wilson chris at chris-wilson.co.uk
Tue Mar 12 14:28:52 UTC 2019


---
 drivers/gpu/drm/i915/i915_drv.h               |   5 +-
 drivers/gpu/drm/i915/i915_gem.c               |   2 -
 drivers/gpu/drm/i915/i915_gem_context.c       |  65 +++++------
 drivers/gpu/drm/i915/i915_request.c           | 104 +++++++++++-------
 drivers/gpu/drm/i915/intel_engine_types.h     |   1 -
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   1 -
 6 files changed, 99 insertions(+), 79 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 05bb2ceda6bd..8c1eab6bb80e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1996,10 +1996,9 @@ struct drm_i915_private {
 		} timelines;
 
 		intel_engine_mask_t active_engines;
-		struct list_head active_rings;
-		struct list_head closed_vma;
-		atomic_t active_requests;
 		struct mutex active_mutex;
+		atomic_t active_requests;
+		struct list_head closed_vma;
 
 		/**
 		 * Is the GPU currently considered idle, or busy executing
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dcbe5bc6993b..83efe4030203 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -109,7 +109,6 @@ static void __i915_gem_park(struct drm_i915_private *i915)
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 	GEM_BUG_ON(atomic_read(&i915->gt.active_requests));
-	GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
 
 	if (!i915->gt.awake)
 		return;
@@ -5047,7 +5046,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
 	int err;
 
 	mutex_init(&dev_priv->gt.active_mutex);
-	INIT_LIST_HEAD(&dev_priv->gt.active_rings);
 	INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
 
 	i915_gem_init__mm(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b6370225dcb5..a366049346ee 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -656,27 +656,6 @@ void i915_gem_context_close(struct drm_file *file)
 	idr_destroy(&file_priv->context_idr);
 }
 
-static struct i915_request *
-last_request_on_engine(struct i915_timeline *timeline,
-		       struct intel_engine_cs *engine)
-{
-	struct i915_request *rq;
-
-	GEM_BUG_ON(timeline == &engine->timeline);
-
-	rq = i915_active_request_raw(&timeline->last_request,
-				     &engine->i915->drm.struct_mutex);
-	if (rq && rq->engine == engine) {
-		GEM_TRACE("last request for %s on engine %s: %llx:%llu\n",
-			  timeline->name, engine->name,
-			  rq->fence.context, rq->fence.seqno);
-		GEM_BUG_ON(rq->timeline != timeline);
-		return rq;
-	}
-
-	return NULL;
-}
-
 struct context_barrier_task {
 	struct i915_active base;
 	void (*task)(void *data);
@@ -754,10 +733,9 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
 				      unsigned long mask)
 {
+	struct i915_gt_timelines *gt = &i915->gt.timelines;
 	struct intel_engine_cs *engine;
 
-	GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
-
 	lockdep_assert_held(&i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915->kernel_context);
 
@@ -766,7 +744,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
 		return 0;
 
 	for_each_engine_masked(engine, i915, mask, mask) {
-		struct intel_ring *ring;
+		struct i915_timeline *tl;
 		struct i915_request *rq;
 
 		rq = i915_request_alloc(engine, i915->kernel_context);
@@ -774,24 +752,41 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
 			return PTR_ERR(rq);
 
 		/* Queue this switch after all other activity */
-		list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
+		mutex_lock(&gt->mutex);
+		list_for_each_entry(tl, &gt->active_list, link) {
 			struct i915_request *prev;
+			int err;
 
-			prev = last_request_on_engine(ring->timeline, engine);
-			if (!prev)
+			if (tl == rq->timeline)
 				continue;
 
-			if (prev->gem_context == i915->kernel_context)
+			if (i915_timeline_sync_is_later(tl, &rq->fence))
 				continue;
 
-			GEM_TRACE("add barrier on %s for %llx:%lld\n",
-				  engine->name,
-				  prev->fence.context,
-				  prev->fence.seqno);
-			i915_sw_fence_await_sw_fence_gfp(&rq->submit,
-							 &prev->submit,
-							 I915_FENCE_GFP);
+			prev = i915_active_request_raw(&tl->last_request,
+						       &i915->drm.struct_mutex);
+			GEM_BUG_ON(prev == rq);
+			if (!prev || (prev->engine->mask & engine->mask) == 0)
+				continue;
+
+			i915_timeline_get(tl);
+			mutex_unlock(&gt->mutex);
+
+			err = i915_active_request_set(&tl->last_request, rq);
+			if (err == 0)
+				err = i915_timeline_sync_set(tl, &rq->fence);
+			if (err) {
+				i915_timeline_put(tl);
+				i915_request_add(rq);
+				return err;
+			}
+
+			mutex_lock(&gt->mutex);
+			i915_timeline_put(tl);
+
+			tl = list_entry(&gt->active_list, typeof(*tl), link);
 		}
+		mutex_unlock(&gt->mutex);
 
 		i915_request_add(rq);
 	}
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index cc2e83fc9c85..49021c7f7208 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -180,7 +180,6 @@ static void advance_ring(struct i915_request *request)
 		 */
 		GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
 		tail = READ_ONCE(request->tail);
-		list_del(&ring->active_link);
 	} else {
 		tail = request->postfix;
 	}
@@ -272,7 +271,7 @@ static void i915_request_retire(struct i915_request *request)
 		  request->fence.context, request->fence.seqno,
 		  hwsp_seqno(request));
 
-	lockdep_assert_held(&request->i915->drm.struct_mutex);
+	lockdep_assert_held(&request->timeline->mutex);
 	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
 	GEM_BUG_ON(!i915_request_completed(request));
 
@@ -332,11 +331,9 @@ void i915_request_retire_upto(struct i915_request *rq)
 		  rq->fence.context, rq->fence.seqno,
 		  hwsp_seqno(rq));
 
-	lockdep_assert_held(&rq->i915->drm.struct_mutex);
+	lockdep_assert_held(&rq->timeline->mutex);
 	GEM_BUG_ON(!i915_request_completed(rq));
-
-	if (list_empty(&rq->ring_link))
-		return;
+	GEM_BUG_ON(list_empty(&rq->ring_link));
 
 	do {
 		tmp = list_first_entry(&ring->request_list,
@@ -563,33 +560,32 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 	return NOTIFY_DONE;
 }
 
-static void ring_retire_requests(struct intel_ring *ring)
+static noinline struct i915_request *
+i915_request_alloc_slow(struct i915_timeline *timeline, gfp_t gfp)
 {
 	struct i915_request *rq, *rn;
 
-	list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
-		if (!i915_request_completed(rq))
-			break;
-
-		i915_request_retire(rq);
-	}
-}
-
-static noinline struct i915_request *
-i915_request_alloc_slow(struct intel_context *ce)
-{
-	struct intel_ring *ring = ce->ring;
-	struct i915_request *rq;
+	if (!gfpflags_allow_blocking(gfp))
+		goto out;
 
-	if (list_empty(&ring->request_list))
+	if (list_empty(&timeline->requests))
 		goto out;
 
+	mutex_lock(&timeline->mutex);
+
 	/* Ratelimit ourselves to prevent oom from malicious clients */
-	rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+	rq = list_last_entry(&timeline->requests, typeof(*rq), link);
 	cond_synchronize_rcu(rq->rcustate);
 
 	/* Retire our old requests in the hope that we free some */
-	ring_retire_requests(ring);
+	list_for_each_entry_safe(rq, rn, &timeline->requests, link) {
+		if (!i915_request_completed(rq))
+			break;
+
+		i915_request_retire(rq);
+	}
+
+	mutex_unlock(&timeline->mutex);
 
 out:
 	return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
@@ -646,7 +642,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 		return ERR_CAST(ce);
 
 	reserve_gt(i915);
-	mutex_lock(&ce->ring->timeline->mutex);
+
+	tl = ce->ring->timeline;
+	mutex_lock(&tl->mutex);
 
 	/* Move our oldest request to the slab-cache (if not in use!) */
 	rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -686,7 +684,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	rq = kmem_cache_alloc(global.slab_requests,
 			      GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 	if (unlikely(!rq)) {
-		rq = i915_request_alloc_slow(ce);
+		rq = i915_request_alloc_slow(tl, GFP_KERNEL);
 		if (!rq) {
 			ret = -ENOMEM;
 			goto err_unreserve;
@@ -696,7 +694,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	INIT_LIST_HEAD(&rq->active_list);
 	INIT_LIST_HEAD(&rq->execute_cb);
 
-	tl = ce->ring->timeline;
 	ret = i915_timeline_get_seqno(tl, rq, &seqno);
 	if (ret)
 		goto err_free;
@@ -1075,10 +1072,6 @@ void i915_request_add(struct i915_request *request)
 	__i915_active_request_set(&timeline->last_request, request);
 
 	list_add_tail(&request->ring_link, &ring->request_list);
-	if (list_is_first(&request->ring_link, &ring->request_list)) {
-		GEM_TRACE("marking %s as active\n", ring->timeline->name);
-		list_add(&ring->active_link, &request->i915->gt.active_rings);
-	}
 	request->i915->gt.active_engines |= request->engine->mask;
 	request->emitted_jiffies = jiffies;
 
@@ -1145,8 +1138,22 @@ void i915_request_add(struct i915_request *request)
 	 * work on behalf of others -- but instead we should benefit from
 	 * improved resource management. (Well, that's the theory at least.)
 	 */
-	if (prev && i915_request_completed(prev))
-		i915_request_retire_upto(prev);
+	do {
+		prev = list_first_entry(&request->timeline->requests,
+					typeof(*prev), link);
+
+		/*
+		 * Keep the current request, the caller may not be
+		 * expecting it to be retired (and freed!) immediately,
+		 * and preserving one request from the client allows us to
+		 * carry forward frequently reused state onto the next
+		 * submission.
+		 */
+		if (prev == request || !i915_request_completed(prev))
+			break;
+
+		i915_request_retire(prev);
+	} while (1);
 
 	mutex_unlock(&request->timeline->mutex);
 }
@@ -1336,15 +1343,38 @@ long i915_request_wait(struct i915_request *rq,
 
 void i915_retire_requests(struct drm_i915_private *i915)
 {
-	struct intel_ring *ring, *tmp;
-
-	lockdep_assert_held(&i915->drm.struct_mutex);
+	struct i915_gt_timelines *gt = &i915->gt.timelines;
+	struct i915_timeline *tl, *tn;
 
 	if (!atomic_read(&i915->gt.active_requests))
 		return;
 
-	list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
-		ring_retire_requests(ring);
+	mutex_lock(&gt->mutex);
+	list_for_each_entry_safe(tl, tn, &gt->active_list, link) {
+		struct i915_request *rq, *rn;
+
+		if (!rcu_access_pointer(tl->last_request.request))
+			continue;
+
+		i915_timeline_get(tl);
+
+		mutex_unlock(&gt->mutex);
+		if (!mutex_trylock(&tl->mutex))
+			goto relock;
+
+		list_for_each_entry_safe(rq, rn, &tl->requests, link) {
+			if (!i915_request_completed(rq))
+				break;
+
+			i915_request_retire(rq);
+		}
+
+		mutex_unlock(&tl->mutex);
+relock:
+		mutex_lock(&gt->mutex);
+		i915_timeline_put(tl);
+	}
+	mutex_unlock(&gt->mutex);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h
index b0aa1f0d4e47..90953c6751b1 100644
--- a/drivers/gpu/drm/i915/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/intel_engine_types.h
@@ -56,7 +56,6 @@ struct intel_ring {
 
 	struct i915_timeline *timeline;
 	struct list_head request_list;
-	struct list_head active_link;
 
 	u32 head;
 	u32 tail;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index fe8252d0e73c..b38e5b4d7a62 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -204,7 +204,6 @@ struct drm_i915_private *mock_gem_device(void)
 	i915_timelines_init(i915);
 
 	mutex_init(&i915->gt.active_mutex);
-	INIT_LIST_HEAD(&i915->gt.active_rings);
 	INIT_LIST_HEAD(&i915->gt.closed_vma);
 
 	mutex_lock(&i915->drm.struct_mutex);
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list