[Intel-gfx] [PATCH 01/18] drm/i915/guc: Use a local cancel_port_requests

Daniele Ceraolo Spurio daniele.ceraolospurio at intel.com
Mon Aug 12 20:23:29 UTC 2019



On 8/12/19 6:38 AM, Chris Wilson wrote:
> Since execlista and the guc have diverged in their port tracking, we
> cannot simply reuse the execlists cancellation code as it leads to
> unbalanced reference counting. Use a local simpler routine for the guc.
> 

LGTM, just wondering if we should put a comment somewhere to note that 
ce->inflight and execlists->pending are currently unused in the GuC 
path, so we don't incorrectly assume they're always set at certain 
stages of the submission. But anyway:

Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>

Janusz, does this work for you?

Thanks,
Daniele

> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_engine.h        |  3 ---
>   drivers/gpu/drm/i915/gt/intel_lrc.c           |  6 ++---
>   .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 23 +++++++++++--------
>   3 files changed, 16 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
> index e1228b0e577f..4b6a1cf80706 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine.h
> @@ -136,9 +136,6 @@ execlists_active(const struct intel_engine_execlists *execlists)
>   	return READ_ONCE(*execlists->active);
>   }
>   
> -void
> -execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
> -
>   struct i915_request *
>   execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
>   
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index b97047d58d3d..5c26c4ae139b 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -1297,8 +1297,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   	}
>   }
>   
> -void
> -execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
> +static void
> +cancel_port_requests(struct intel_engine_execlists * const execlists)
>   {
>   	struct i915_request * const *port, *rq;
>   
> @@ -2355,7 +2355,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
>   
>   unwind:
>   	/* Push back any incomplete requests for replay after the reset. */
> -	execlists_cancel_port_requests(execlists);
> +	cancel_port_requests(execlists);
>   	__unwind_incomplete_requests(engine);
>   }
>   
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 449ca6357018..a37afc6266ec 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -517,11 +517,7 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx)
>   {
>   	trace_i915_request_in(rq, idx);
>   
> -	if (!rq->hw_context->inflight)
> -		rq->hw_context->inflight = rq->engine;
> -	intel_context_inflight_inc(rq->hw_context);
>   	intel_gt_pm_get(rq->engine->gt);
> -
>   	return i915_request_get(rq);
>   }
>   
> @@ -529,10 +525,6 @@ static void schedule_out(struct i915_request *rq)
>   {
>   	trace_i915_request_out(rq);
>   
> -	intel_context_inflight_dec(rq->hw_context);
> -	if (!intel_context_inflight_count(rq->hw_context))
> -		rq->hw_context->inflight = NULL;
> -
>   	intel_gt_pm_put(rq->engine->gt);
>   	i915_request_put(rq);
>   }
> @@ -636,6 +628,17 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
>   	__tasklet_disable_sync_once(&execlists->tasklet);
>   }
>   
> +static void
> +cancel_port_requests(struct intel_engine_execlists * const execlists)
> +{
> +	struct i915_request * const *port, *rq;
> +
> +	for (port = execlists->active; (rq = *port); port++)
> +		schedule_out(rq);
> +	execlists->active =
> +		memset(execlists->inflight, 0, sizeof(execlists->inflight));
> +}
> +
>   static void guc_reset(struct intel_engine_cs *engine, bool stalled)
>   {
>   	struct intel_engine_execlists * const execlists = &engine->execlists;
> @@ -644,7 +647,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
>   
>   	spin_lock_irqsave(&engine->active.lock, flags);
>   
> -	execlists_cancel_port_requests(execlists);
> +	cancel_port_requests(execlists);
>   
>   	/* Push back any incomplete requests for replay after the reset. */
>   	rq = execlists_unwind_incomplete_requests(execlists);
> @@ -687,7 +690,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
>   	spin_lock_irqsave(&engine->active.lock, flags);
>   
>   	/* Cancel the requests on the HW and clear the ELSP tracker. */
> -	execlists_cancel_port_requests(execlists);
> +	cancel_port_requests(execlists);
>   
>   	/* Mark all executing requests as skipped. */
>   	list_for_each_entry(rq, &engine->active.requests, sched.link) {
> 


More information about the Intel-gfx mailing list