[Intel-gfx] [PATCH v2 01/14] drm/i915: Keep a global seqno per-engine

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Wed Feb 15 17:05:40 UTC 2017


On 14/02/2017 09:54, Chris Wilson wrote:
> Replace the global device seqno with one for each engine, and account
> for in-flight seqno on each separately. This is consistent with
> dma-fence as each timeline has separate fence-contexts for each engine
> and a seqno is only ordered within a fence-context (i.e.  seqno do not
> need to be ordered wrt to other engines, just ordered within a single
> engine). This is required to enable request rewinding for preemption on
> individual engines (we have to rewind the global seqno to avoid
> overflow, and we do not have to rewind all engines just to preempt one.)
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_debugfs.c      |  5 +--
>  drivers/gpu/drm/i915/i915_gem_request.c  | 68 +++++++++++++++-----------------
>  drivers/gpu/drm/i915/i915_gem_request.h  |  8 +---
>  drivers/gpu/drm/i915/i915_gem_timeline.h |  4 +-
>  drivers/gpu/drm/i915/intel_breadcrumbs.c | 33 +++++++---------
>  drivers/gpu/drm/i915/intel_engine_cs.c   |  2 -
>  drivers/gpu/drm/i915/intel_ringbuffer.h  |  4 +-
>  7 files changed, 52 insertions(+), 72 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index cda957c674ee..9b636962cab6 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -1080,10 +1080,7 @@ static const struct file_operations i915_error_state_fops = {
>  static int
>  i915_next_seqno_get(void *data, u64 *val)
>  {
> -	struct drm_i915_private *dev_priv = data;
> -
> -	*val = 1 + atomic_read(&dev_priv->gt.global_timeline.seqno);
> -	return 0;
> +	return -ENODEV;

I assume reason for leaving this function in this state appears in a 
later patch? gt.global_timeline stays around for something else?

>  }
>
>  static int
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 24571a5289a4..2d84da0e2b39 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -213,7 +213,9 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
>  	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
>  	GEM_BUG_ON(!i915_sw_fence_signaled(&request->execute));
>  	GEM_BUG_ON(!i915_gem_request_completed(request));
> +
>  	GEM_BUG_ON(!request->i915->gt.active_requests);
> +	GEM_BUG_ON(!request->engine->timeline->active_seqno);
>
>  	trace_i915_gem_request_retire(request);
>
> @@ -237,6 +239,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
>  				 &request->i915->gt.idle_work,
>  				 msecs_to_jiffies(100));
>  	}
> +	request->engine->timeline->active_seqno--;

Hm, decrementing the seqno, a new concept in our codebase. There is no 
comment where the structure field is added so I guess it's reverse 
engineering time again. :(

Back few minutes later - so this is a count of in flight requests? 
Should you call it active_seqnos then so it becomes obvious it is not a 
seqno as we know it?

It sucks a bit that the decrement is in the infamous retire worker. I 
wonder how hard would it be to DoS the seqno space with it.

Protected by the timeline spinlock?

Would the design work to assume GuC scheduler backend is in and to put 
in flight seqno accounting close to requests coming in and out from the 
hardware?

>
>  	/* Walk through the active list, calling retire on each. This allows
>  	 * objects to track their GPU activity and mark themselves as idle
> @@ -325,15 +328,19 @@ static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
>  	GEM_BUG_ON(i915->gt.active_requests > 1);
>
>  	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
> -	if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) {
> -		while (intel_breadcrumbs_busy(i915))
> -			cond_resched(); /* spin until threads are complete */
> -	}
> -	atomic_set(&timeline->seqno, seqno);
> +	for_each_engine(engine, i915, id) {
> +		struct intel_timeline *tl = &timeline->engine[id];
>
> -	/* Finally reset hw state */
> -	for_each_engine(engine, i915, id)
> +		if (!i915_seqno_passed(seqno, tl->seqno)) {
> +			/* spin until threads are complete */
> +			while (intel_breadcrumbs_busy(engine))
> +				cond_resched();
> +		}
> +
> +		/* Finally reset hw state */
> +		tl->seqno = seqno;
>  		intel_engine_init_global_seqno(engine, seqno);
> +	}

Came back here a bit later. Shouldn't you just handle one engine in this 
function if seqnos are per-engine now?

>
>  	list_for_each_entry(timeline, &i915->gt.timelines, link) {
>  		for_each_engine(engine, i915, id) {
> @@ -361,34 +368,28 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
>  	return i915_gem_init_global_seqno(dev_priv, seqno - 1);
>  }
>
> -static int reserve_global_seqno(struct drm_i915_private *i915)
> +static int reserve_global_seqno(struct intel_engine_cs *engine)

Rename to reserve_engine_seqno?

>  {
> -	u32 active_requests = ++i915->gt.active_requests;
> -	u32 seqno = atomic_read(&i915->gt.global_timeline.seqno);
> +	u32 active = ++engine->timeline->active_seqno;
> +	u32 seqno = engine->timeline->seqno;
>  	int ret;
>
>  	/* Reservation is fine until we need to wrap around */
> -	if (likely(seqno + active_requests > seqno))
> +	if (likely(seqno + active > seqno))

Case for one of the overflows type helpers?

>  		return 0;
>
> -	ret = i915_gem_init_global_seqno(i915, 0);
> +	ret = i915_gem_init_global_seqno(engine->i915, 0);

i915_gem_init_engine_seqno(engine) ?

>  	if (ret) {
> -		i915->gt.active_requests--;
> +		engine->timeline->active_seqno--;

It would be better for the active seqno count to be managed on the same 
level for readability. By that I mean having the decrement in 
add_request where it was incremented.

>  		return ret;
>  	}
>
>  	return 0;
>  }
>
> -static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
> +static u32 timeline_get_seqno(struct intel_timeline *tl)
>  {
> -	/* seqno only incremented under a mutex */
> -	return ++tl->seqno.counter;
> -}
> -
> -static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
> -{
> -	return atomic_inc_return(&tl->seqno);
> +	return ++tl->seqno;
>  }
>
>  void __i915_gem_request_submit(struct drm_i915_gem_request *request)
> @@ -402,14 +403,10 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
>  	GEM_BUG_ON(timeline == request->timeline);
>  	assert_spin_locked(&timeline->lock);
>
> -	seqno = timeline_get_seqno(timeline->common);
> +	seqno = timeline_get_seqno(timeline);
>  	GEM_BUG_ON(!seqno);
>  	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
>
> -	GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
> -	request->previous_seqno = timeline->last_submitted_seqno;
> -	timeline->last_submitted_seqno = seqno;
> -
>  	/* We may be recursing from the signal callback of another i915 fence */
>  	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
>  	request->global_seqno = seqno;

This field could also be renamed to engine_seqno to be more 
self-documenting.

> @@ -514,7 +511,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
>  	if (ret)
>  		return ERR_PTR(ret);
>
> -	ret = reserve_global_seqno(dev_priv);
> +	ret = reserve_global_seqno(engine);
>  	if (ret)
>  		goto err_unpin;
>
> @@ -566,7 +563,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
>  		       &i915_fence_ops,
>  		       &req->lock,
>  		       req->timeline->fence_context,
> -		       __timeline_get_seqno(req->timeline->common));
> +		       timeline_get_seqno(req->timeline));
>
>  	/* We bump the ref for the fence chain */
>  	i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
> @@ -611,6 +608,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
>  	 */
>  	req->head = req->ring->tail;
>
> +	/* Check that we didn't interrupt ourselves with a new request */
> +	GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
>  	return req;
>
>  err_ctx:
> @@ -621,7 +620,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
>
>  	kmem_cache_free(dev_priv->requests, req);
>  err_unreserve:
> -	dev_priv->gt.active_requests--;
> +	engine->timeline->active_seqno--;
>  err_unpin:
>  	engine->context_unpin(engine, ctx);
>  	return ERR_PTR(ret);
> @@ -833,8 +832,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
>  	 * our i915_gem_request_alloc() and called __i915_add_request() before
>  	 * us, the timeline will hold its seqno which is later than ours.
>  	 */
> -	GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
> -				     request->fence.seqno));
> +	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
>
>  	/*
>  	 * To ensure that this call will not fail, space for its emissions
> @@ -889,16 +887,14 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
>  	list_add_tail(&request->link, &timeline->requests);
>  	spin_unlock_irq(&timeline->lock);
>
> -	GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
> -				     request->fence.seqno));
> -
> -	timeline->last_submitted_seqno = request->fence.seqno;
> +	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
>  	i915_gem_active_set(&timeline->last_request, request);
>
>  	list_add_tail(&request->ring_link, &ring->request_list);
>  	request->emitted_jiffies = jiffies;
>
> -	i915_gem_mark_busy(engine);
> +	if (!request->i915->gt.active_requests++)
> +		i915_gem_mark_busy(engine);
>
>  	/* Let the backend know a new request has arrived that may need
>  	 * to adjust the existing execution schedule due to a high priority
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
> index ea511f06efaf..9049936c571c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.h
> +++ b/drivers/gpu/drm/i915/i915_gem_request.h
> @@ -145,12 +145,6 @@ struct drm_i915_gem_request {
>
>  	u32 global_seqno;
>
> -	/** GEM sequence number associated with the previous request,
> -	 * when the HWS breadcrumb is equal to this the GPU is processing
> -	 * this request.
> -	 */

Aha removing comments is fine! ...

> -	u32 previous_seqno;
> -
>  	/** Position in the ring of the start of the request */
>  	u32 head;
>
> @@ -287,7 +281,7 @@ __i915_gem_request_started(const struct drm_i915_gem_request *req)
>  {
>  	GEM_BUG_ON(!req->global_seqno);
>  	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
> -				 req->previous_seqno);
> +				 req->global_seqno - 1);
>  }
>
>  static inline bool
> diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
> index f2e51f42cc2f..bc101e644143 100644
> --- a/drivers/gpu/drm/i915/i915_gem_timeline.h
> +++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
> @@ -33,7 +33,8 @@ struct i915_gem_timeline;
>
>  struct intel_timeline {
>  	u64 fence_context;
> -	u32 last_submitted_seqno;
> +	u32 seqno;
> +	u32 active_seqno;

... But adding some back not so much? >:I

>
>  	spinlock_t lock;
>
> @@ -56,7 +57,6 @@ struct intel_timeline {
>
>  struct i915_gem_timeline {
>  	struct list_head link;
> -	atomic_t seqno;
>
>  	struct drm_i915_private *i915;
>  	const char *name;
> diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
> index 74cb7b91b5db..4f859e423ef3 100644
> --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
> +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
> @@ -655,31 +655,26 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
>  	cancel_fake_irq(engine);
>  }
>
> -unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
> +bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
>  {
> -	struct intel_engine_cs *engine;
> -	enum intel_engine_id id;
> -	unsigned int mask = 0;
> -
> -	for_each_engine(engine, i915, id) {
> -		struct intel_breadcrumbs *b = &engine->breadcrumbs;
> -
> -		spin_lock_irq(&b->lock);
> +	struct intel_breadcrumbs *b = &engine->breadcrumbs;
> +	bool busy = false;
>
> -		if (b->first_wait) {
> -			wake_up_process(b->first_wait->tsk);
> -			mask |= intel_engine_flag(engine);
> -		}
> +	spin_lock_irq(&b->lock);
>
> -		if (b->first_signal) {
> -			wake_up_process(b->signaler);
> -			mask |= intel_engine_flag(engine);
> -		}
> +	if (b->first_wait) {
> +		wake_up_process(b->first_wait->tsk);
> +		busy |= intel_engine_flag(engine);
> +	}
>
> -		spin_unlock_irq(&b->lock);
> +	if (b->first_signal) {
> +		wake_up_process(b->signaler);
> +		busy |= intel_engine_flag(engine);
>  	}
>
> -	return mask;
> +	spin_unlock_irq(&b->lock);
> +
> +	return busy;
>  }
>
>  #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index 538d845d7251..1798b87fd934 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -254,8 +254,6 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
>  		engine->irq_seqno_barrier(engine);
>
>  	GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
> -	engine->timeline->last_submitted_seqno = seqno;
> -
>  	engine->hangcheck.seqno = seqno;
>
>  	/* After manually advancing the seqno, fake the interrupt in case
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index cc62e89010d3..9412c8f58619 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -564,7 +564,7 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
>  	 * wtih serialising this hint with anything, so document it as
>  	 * a hint and nothing more.
>  	 */
> -	return READ_ONCE(engine->timeline->last_submitted_seqno);
> +	return READ_ONCE(engine->timeline->seqno);
>  }
>
>  int init_workarounds_ring(struct intel_engine_cs *engine);
> @@ -637,6 +637,6 @@ static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
>
>  void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
>  void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
> -unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
> +bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
>
>  #endif /* _INTEL_RINGBUFFER_H_ */
>

Regards,

Tvrtko


More information about the Intel-gfx mailing list