[Intel-gfx] [PATCH 2/4] drm/i915: Retire requests along rings

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Mon Apr 23 08:47:57 UTC 2018


On 20/04/2018 14:20, Chris Wilson wrote:
> In the next patch, rings are the central timeline as requests may jump
> between engines. Therefore in the future as we retire in order along the
> engine timeline, we may retire out-of-order within a ring (as the ring now
> occurs along multiple engines), leading to much hilarity in miscomputing
> the position of ring->head.
> 
> As an added bonus, retiring along the ring reduces the penalty of having
> one execlists client do cleanup for another (old legacy submission
> shares a ring between all clients). The downside is that slow and
> irregular (off the critical path) process of cleaning up stale requests
> after userspace becomes a modicum less efficient.
> 
> In the long run, it will become apparent that the ordered
> ring->request_list matches the ring->timeline, a fun challenge for the
> future will be unifying the two lists to avoid duplication!
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> ---
>   drivers/gpu/drm/i915/i915_drv.h               |  3 +-
>   drivers/gpu/drm/i915/i915_gem.c               |  1 +
>   drivers/gpu/drm/i915/i915_request.c           | 43 ++++++++-----------
>   drivers/gpu/drm/i915/intel_ringbuffer.c       |  5 +++
>   drivers/gpu/drm/i915/intel_ringbuffer.h       |  1 +
>   drivers/gpu/drm/i915/selftests/mock_engine.c  | 27 +++++++++---
>   .../gpu/drm/i915/selftests/mock_gem_device.c  |  2 +
>   7 files changed, 50 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 028691108125..e177d2bda87d 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2055,8 +2055,9 @@ struct drm_i915_private {
>   		void (*resume)(struct drm_i915_private *);
>   		void (*cleanup_engine)(struct intel_engine_cs *engine);
>   
> -		struct list_head timelines;
>   		struct i915_gem_timeline global_timeline;
> +		struct list_head timelines;
> +		struct list_head rings;
>   		u32 active_requests;
>   
>   		/**
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 795ca83aed7a..906e2395c245 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -5600,6 +5600,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
>   		goto err_dependencies;
>   
>   	mutex_lock(&dev_priv->drm.struct_mutex);
> +	INIT_LIST_HEAD(&dev_priv->gt.rings);
>   	INIT_LIST_HEAD(&dev_priv->gt.timelines);
>   	err = i915_gem_timeline_init__global(dev_priv);
>   	mutex_unlock(&dev_priv->drm.struct_mutex);
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 1437538d5bfa..0bf949ec9f1a 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -292,6 +292,7 @@ void i915_gem_retire_noop(struct i915_gem_active *active,
>   
>   static void advance_ring(struct i915_request *request)
>   {
> +	struct intel_ring *ring = request->ring;
>   	unsigned int tail;
>   
>   	/*
> @@ -303,7 +304,9 @@ static void advance_ring(struct i915_request *request)
>   	 * Note this requires that we are always called in request
>   	 * completion order.
>   	 */
> -	if (list_is_last(&request->ring_link, &request->ring->request_list)) {
> +	GEM_BUG_ON(request != list_first_entry(&ring->request_list,
> +					       typeof(*request), ring_link));
> +	if (list_is_last(&request->ring_link, &ring->request_list)) {
>   		/*
>   		 * We may race here with execlists resubmitting this request
>   		 * as we retire it. The resubmission will move the ring->tail
> @@ -318,7 +321,7 @@ static void advance_ring(struct i915_request *request)
>   	}
>   	list_del(&request->ring_link);
>   
> -	request->ring->head = tail;
> +	ring->head = tail;
>   }
>   
>   static void free_capture_list(struct i915_request *request)
> @@ -424,18 +427,18 @@ static void i915_request_retire(struct i915_request *request)
>   
>   void i915_request_retire_upto(struct i915_request *rq)
>   {
> -	struct intel_engine_cs *engine = rq->engine;
> +	struct intel_ring *ring = rq->ring;
>   	struct i915_request *tmp;
>   
>   	lockdep_assert_held(&rq->i915->drm.struct_mutex);
>   	GEM_BUG_ON(!i915_request_completed(rq));
>   
> -	if (list_empty(&rq->link))
> +	if (list_empty(&rq->ring_link))
>   		return;
>   
>   	do {
> -		tmp = list_first_entry(&engine->timeline->requests,
> -				       typeof(*tmp), link);
> +		tmp = list_first_entry(&ring->request_list,
> +				       typeof(*tmp), ring_link);
>   
>   		i915_request_retire(tmp);
>   	} while (tmp != rq);
> @@ -644,9 +647,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
>   	if (ret)
>   		goto err_unreserve;
>   
> -	/* Move the oldest request to the slab-cache (if not in use!) */
> -	rq = list_first_entry_or_null(&engine->timeline->requests,
> -				      typeof(*rq), link);
> +	/* Move our oldest request to the slab-cache (if not in use!) */
> +	rq = list_first_entry_or_null(&ring->request_list,
> +				      typeof(*rq), ring_link);
>   	if (rq && i915_request_completed(rq))
>   		i915_request_retire(rq);
>   
> @@ -1350,38 +1353,30 @@ long i915_request_wait(struct i915_request *rq,
>   	return timeout;
>   }
>   
> -static void engine_retire_requests(struct intel_engine_cs *engine)
> +static void ring_retire_requests(struct intel_ring *ring)
>   {
>   	struct i915_request *request, *next;
> -	u32 seqno = intel_engine_get_seqno(engine);
> -	LIST_HEAD(retire);
>   
> -	spin_lock_irq(&engine->timeline->lock);
>   	list_for_each_entry_safe(request, next,
> -				 &engine->timeline->requests, link) {
> -		if (!i915_seqno_passed(seqno, request->global_seqno))
> +				 &ring->request_list, ring_link) {
> +		if (!i915_request_completed(request))
>   			break;
>   
> -		list_move_tail(&request->link, &retire);
> -	}
> -	spin_unlock_irq(&engine->timeline->lock);
> -
> -	list_for_each_entry_safe(request, next, &retire, link)
>   		i915_request_retire(request);
> +	}
>   }
>   
>   void i915_retire_requests(struct drm_i915_private *i915)
>   {
> -	struct intel_engine_cs *engine;
> -	enum intel_engine_id id;
> +	struct intel_ring *ring, *next;
>   
>   	lockdep_assert_held(&i915->drm.struct_mutex);
>   
>   	if (!i915->gt.active_requests)
>   		return;
>   
> -	for_each_engine(engine, i915, id)
> -		engine_retire_requests(engine);
> +	list_for_each_entry_safe(ring, next, &i915->gt.rings, link)
> +		ring_retire_requests(ring);

[Continuing from previous discussion on try-bot.]

I had a thought that this could be managed more efficiently in a very 
simple manner.

We rename timeline->inflight_seqnos to something like live_requests and 
manage this per ctx timeline, from request_add to request_retire. At the 
same time we only have ring->ring_link be linked to 
i915->gt.(live_)rings while the ctx timeline live_requests count is 
greater than zero. In other words list_add on 0->1, list_del on 1->0.

This way the retire path does not have to walk all known rings, but only 
all rings with live (unretired) reqeusts.

What do you think?

Regards,

Tvrtko



>   }
>   
>   #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index c68ac605b8a9..792a2ca95872 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1124,6 +1124,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
>   
>   	GEM_BUG_ON(!is_power_of_2(size));
>   	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
> +	lockdep_assert_held(&engine->i915->drm.struct_mutex);
>   
>   	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
>   	if (!ring)
> @@ -1149,6 +1150,8 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
>   	}
>   	ring->vma = vma;
>   
> +	list_add(&ring->link, &engine->i915->gt.rings);
> +
>   	return ring;
>   }
>   
> @@ -1160,6 +1163,8 @@ intel_ring_free(struct intel_ring *ring)
>   	i915_vma_close(ring->vma);
>   	__i915_gem_object_release_unless_active(obj);
>   
> +	list_del(&ring->link);
> +
>   	kfree(ring);
>   }
>   
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index c5e27905b0e1..d816f8dea245 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -129,6 +129,7 @@ struct intel_ring {
>   	void *vaddr;
>   
>   	struct list_head request_list;
> +	struct list_head link;
>   
>   	u32 head;
>   	u32 tail;
> diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
> index 78a89efa1119..a0bc324edadd 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_engine.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
> @@ -140,9 +140,18 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
>   	INIT_LIST_HEAD(&ring->request_list);
>   	intel_ring_update_space(ring);
>   
> +	list_add(&ring->link, &engine->i915->gt.rings);
> +
>   	return ring;
>   }
>   
> +static void mock_ring_free(struct intel_ring *ring)
> +{
> +	list_del(&ring->link);
> +
> +	kfree(ring);
> +}
> +
>   struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
>   				    const char *name,
>   				    int id)
> @@ -155,12 +164,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
>   	if (!engine)
>   		return NULL;
>   
> -	engine->base.buffer = mock_ring(&engine->base);
> -	if (!engine->base.buffer) {
> -		kfree(engine);
> -		return NULL;
> -	}
> -
>   	/* minimal engine setup for requests */
>   	engine->base.i915 = i915;
>   	snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
> @@ -185,7 +188,16 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
>   	timer_setup(&engine->hw_delay, hw_delay_complete, 0);
>   	INIT_LIST_HEAD(&engine->hw_queue);
>   
> +	engine->base.buffer = mock_ring(&engine->base);
> +	if (!engine->base.buffer)
> +		goto err_breadcrumbs;
> +
>   	return &engine->base;
> +
> +err_breadcrumbs:
> +	intel_engine_fini_breadcrumbs(&engine->base);
> +	kfree(engine);
> +	return NULL;
>   }
>   
>   void mock_engine_flush(struct intel_engine_cs *engine)
> @@ -219,8 +231,9 @@ void mock_engine_free(struct intel_engine_cs *engine)
>   	if (engine->last_retired_context)
>   		engine->context_unpin(engine, engine->last_retired_context);
>   
> +	mock_ring_free(engine->buffer);
> +
>   	intel_engine_fini_breadcrumbs(engine);
>   
> -	kfree(engine->buffer);
>   	kfree(engine);
>   }
> diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> index e6d4b882599a..ac4bacf8b5b9 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> @@ -44,6 +44,7 @@ void mock_device_flush(struct drm_i915_private *i915)
>   		mock_engine_flush(engine);
>   
>   	i915_retire_requests(i915);
> +	GEM_BUG_ON(i915->gt.active_requests);
>   }
>   
>   static void mock_device_release(struct drm_device *dev)
> @@ -224,6 +225,7 @@ struct drm_i915_private *mock_gem_device(void)
>   		goto err_dependencies;
>   
>   	mutex_lock(&i915->drm.struct_mutex);
> +	INIT_LIST_HEAD(&i915->gt.rings);
>   	INIT_LIST_HEAD(&i915->gt.timelines);
>   	err = i915_gem_timeline_init__global(i915);
>   	if (err) {
> 


More information about the Intel-gfx mailing list