[Intel-gfx] [PATCH v4 11/16] drm/i915: Remove the preempted request from the execution queue

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Thu Feb 23 12:11:34 UTC 2017


On 23/02/2017 07:44, Chris Wilson wrote:
> After the request is cancelled, we then need to remove it from the
> global execution timeline and return it to the context timeline, the
> inverse of submit_request().
>
> v2: Move manipulation of struct intel_wait to helpers
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at linux.intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem_request.c  | 55 ++++++++++++++++++++++++++++++++
>  drivers/gpu/drm/i915/i915_gem_request.h  |  3 ++
>  drivers/gpu/drm/i915/intel_breadcrumbs.c | 17 ++++++++--
>  3 files changed, 73 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 5ed52521397f..76e31cd7840e 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -441,6 +441,55 @@ void i915_gem_request_submit(struct drm_i915_gem_request *request)
>  	spin_unlock_irqrestore(&engine->timeline->lock, flags);
>  }
>
> +void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
> +{
> +	struct intel_engine_cs *engine = request->engine;
> +	struct intel_timeline *timeline;
> +
> +	assert_spin_locked(&engine->timeline->lock);
> +
> +	/* Only unwind in reverse order, required so that the per-context list
> +	 * is kept in seqno/ring order.
> +	 */
> +	GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
> +	engine->timeline->seqno--;
> +
> +	/* We may be recursing from the signal callback of another i915 fence */
> +	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
> +	request->global_seqno = 0;
> +	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
> +		intel_engine_cancel_signaling(request);
> +	spin_unlock(&request->lock);
> +
> +	/* Transfer back from the global per-engine timeline to per-context */
> +	timeline = request->timeline;
> +	GEM_BUG_ON(timeline == engine->timeline);
> +
> +	spin_lock(&timeline->lock);
> +	list_move(&request->link, &timeline->requests);
> +	spin_unlock(&timeline->lock);
> +
> +	/* We don't need to wake_up any waiters on request->execute, they
> +	 * will get woken by any other event or us re-adding this request
> +	 * to the engine timeline (__i915_gem_request_submit()). The waiters
> +	 * should be quite adapt at finding that the request now has a new
> +	 * global_seqno to the one they went to sleep on.
> +	 */
> +}
> +
> +void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
> +{
> +	struct intel_engine_cs *engine = request->engine;
> +	unsigned long flags;
> +
> +	/* Will be called from irq-context when using foreign fences. */
> +	spin_lock_irqsave(&engine->timeline->lock, flags);
> +
> +	__i915_gem_request_unsubmit(request);
> +
> +	spin_unlock_irqrestore(&engine->timeline->lock, flags);
> +}
> +
>  static int __i915_sw_fence_call
>  submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
>  {
> @@ -1036,6 +1085,7 @@ long i915_wait_request(struct drm_i915_gem_request *req,
>
>  	intel_wait_init(&wait);
>
> +restart:
>  	reset_wait_queue(&req->execute, &exec);
>  	if (!intel_wait_update_request(&wait, req)) {
>  		do {
> @@ -1134,6 +1184,11 @@ long i915_wait_request(struct drm_i915_gem_request *req,
>  		/* Only spin if we know the GPU is processing this request */
>  		if (i915_spin_request(req, state, 2))
>  			break;
> +
> +		if (!intel_wait_check_request(&wait, req)) {
> +			intel_engine_remove_wait(req->engine, &wait);
> +			goto restart;
> +		}
>  	}
>
>  	intel_engine_remove_wait(req->engine, &wait);
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
> index b81f6709905c..5f73d8c0a38a 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.h
> +++ b/drivers/gpu/drm/i915/i915_gem_request.h
> @@ -274,6 +274,9 @@ void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
>  void __i915_gem_request_submit(struct drm_i915_gem_request *request);
>  void i915_gem_request_submit(struct drm_i915_gem_request *request);
>
> +void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
> +void i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
> +
>  struct intel_rps_client;
>  #define NO_WAITBOOST ERR_PTR(-1)
>  #define IS_RPS_CLIENT(p) (!IS_ERR(p))
> diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
> index dd39e4f7a560..027c93e34c97 100644
> --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
> +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
> @@ -453,7 +453,12 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
>  	spin_unlock_irq(&b->lock);
>  }
>
> -static bool signal_complete(struct drm_i915_gem_request *request)
> +static bool signal_valid(const struct drm_i915_gem_request *request)
> +{
> +	return intel_wait_check_request(&request->signaling.wait, request);
> +}
> +
> +static bool signal_complete(const struct drm_i915_gem_request *request)
>  {
>  	if (!request)
>  		return false;
> @@ -462,7 +467,7 @@ static bool signal_complete(struct drm_i915_gem_request *request)
>  	 * signalled that this wait is already completed.
>  	 */
>  	if (intel_wait_complete(&request->signaling.wait))
> -		return true;
> +		return signal_valid(request);
>
>  	/* Carefully check if the request is complete, giving time for the
>  	 * seqno to be visible or if the GPU hung.
> @@ -542,13 +547,21 @@ static int intel_breadcrumbs_signaler(void *arg)
>
>  			i915_gem_request_put(request);
>  		} else {
> +			DEFINE_WAIT(exec);
> +
>  			if (kthread_should_stop()) {
>  				GEM_BUG_ON(request);
>  				break;
>  			}
>
> +			if (request)
> +				add_wait_queue(&request->execute, &exec);
> +
>  			schedule();
>
> +			if (request)
> +				remove_wait_queue(&request->execute, &exec);
> +
>  			if (kthread_should_park())
>  				kthread_parkme();
>  		}
>

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Regards,

Tvrtko



More information about the Intel-gfx mailing list