[Intel-gfx] [PATCH 1/8] drm/i915: Rename execlists->queue_priority to preempt_priority_hint

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Tue Jan 29 10:14:45 UTC 2019


On 29/01/2019 08:58, Chris Wilson wrote:
> After noticing that we trigger preemption events for currently executing
> requests, as well as requests that complete before the preemption and
> attempting to suppress those preemption events, it is wise to not
> consider the queue_priority to be authoritative. As we only track the
> maximum priority seen between dequeue passes, if the maximum priority
> request is no longer available for dequeuing (it completed or is even
> executing on another engine), we have no knowledge of the previous
> queue_priority as it would require us to keep a full history of enqueued
> requests -- but we already have that history in the priolists!
> 
> Rename the queue_priority to preempt_priority_hint so that we do not
> confuse it as being the maximum priority in the queue, but merely an
> indication that we have seen a new maximum priority value and as such we
> should check whether it should preempt the currently running request.
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> ---
>   drivers/gpu/drm/i915/i915_scheduler.c       | 11 +++++------
>   drivers/gpu/drm/i915/intel_engine_cs.c      |  4 ++--
>   drivers/gpu/drm/i915/intel_guc_submission.c |  5 +++--
>   drivers/gpu/drm/i915/intel_lrc.c            | 20 +++++++++++---------
>   drivers/gpu/drm/i915/intel_ringbuffer.h     |  8 ++++++--
>   5 files changed, 27 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index 340faea6c08a..0da718ceab43 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -127,8 +127,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
>   	return rb_entry(rb, struct i915_priolist, node);
>   }
>   
> -static void assert_priolists(struct intel_engine_execlists * const execlists,
> -			     long queue_priority)
> +static void assert_priolists(struct intel_engine_execlists * const execlists)
>   {
>   	struct rb_node *rb;
>   	long last_prio, i;
> @@ -139,7 +138,7 @@ static void assert_priolists(struct intel_engine_execlists * const execlists,
>   	GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
>   		   rb_first(&execlists->queue.rb_root));
>   
> -	last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
> +	last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
>   	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
>   		const struct i915_priolist *p = to_priolist(rb);
>   
> @@ -166,7 +165,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
>   	int idx, i;
>   
>   	lockdep_assert_held(&engine->timeline.lock);
> -	assert_priolists(execlists, INT_MAX);
> +	assert_priolists(execlists);
>   
>   	/* buckets sorted from highest [in slot 0] to lowest priority */
>   	idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
> @@ -353,7 +352,7 @@ static void __i915_schedule(struct i915_request *rq,
>   				continue;
>   		}
>   
> -		if (prio <= engine->execlists.queue_priority)
> +		if (prio <= engine->execlists.preempt_priority_hint)
>   			continue;
>   
>   		/*
> @@ -366,7 +365,7 @@ static void __i915_schedule(struct i915_request *rq,
>   			continue;
>   
>   		/* Defer (tasklet) submission until after all of our updates. */
> -		engine->execlists.queue_priority = prio;
> +		engine->execlists.preempt_priority_hint = prio;
>   		tasklet_hi_schedule(&engine->execlists.tasklet);
>   	}
>   
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index ead9c4371fe1..5a80db990351 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -480,7 +480,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
>   	GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
>   	GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
>   
> -	execlists->queue_priority = INT_MIN;
> +	execlists->preempt_priority_hint = INT_MIN;
>   	execlists->queue = RB_ROOT_CACHED;
>   }
>   
> @@ -1178,7 +1178,7 @@ void intel_engines_park(struct drm_i915_private *i915)
>   		}
>   
>   		/* Must be reset upon idling, or we may miss the busy wakeup. */
> -		GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN);
> +		GEM_BUG_ON(engine->execlists.preempt_priority_hint != INT_MIN);
>   
>   		if (engine->park)
>   			engine->park(engine);
> diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
> index 4295ade0d613..6cdd3f097209 100644
> --- a/drivers/gpu/drm/i915/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/intel_guc_submission.c
> @@ -737,7 +737,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
>   		if (intel_engine_has_preemption(engine)) {
>   			struct guc_preempt_work *preempt_work =
>   				&engine->i915->guc.preempt_work[engine->id];
> -			int prio = execlists->queue_priority;
> +			int prio = execlists->preempt_priority_hint;
>   
>   			if (__execlists_need_preempt(prio, port_prio(port))) {
>   				execlists_set_active(execlists,
> @@ -783,7 +783,8 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
>   			kmem_cache_free(engine->i915->priorities, p);
>   	}
>   done:
> -	execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
> +	execlists->preempt_priority_hint =
> +		rb ? to_priolist(rb)->priority : INT_MIN;
>   	if (submit)
>   		port_assign(port, last);
>   	if (last)
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index fdbb3fe8eac9..9219efa4ee79 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -591,7 +591,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   		if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
>   			return;
>   
> -		if (need_preempt(engine, last, execlists->queue_priority)) {
> +		if (need_preempt(engine, last, execlists->preempt_priority_hint)) {
>   			inject_preempt_context(engine);
>   			return;
>   		}
> @@ -699,20 +699,20 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   	/*
>   	 * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
>   	 *
> -	 * We choose queue_priority such that if we add a request of greater
> +	 * We choose the priority hint such that if we add a request of greater
>   	 * priority than this, we kick the submission tasklet to decide on
>   	 * the right order of submitting the requests to hardware. We must
>   	 * also be prepared to reorder requests as they are in-flight on the
> -	 * HW. We derive the queue_priority then as the first "hole" in
> +	 * HW. We derive the priority hint then as the first "hole" in
>   	 * the HW submission ports and if there are no available slots,
>   	 * the priority of the lowest executing request, i.e. last.
>   	 *
>   	 * When we do receive a higher priority request ready to run from the
> -	 * user, see queue_request(), the queue_priority is bumped to that
> +	 * user, see queue_request(), the priority hint is bumped to that
>   	 * request triggering preemption on the next dequeue (or subsequent
>   	 * interrupt for secondary ports).
>   	 */
> -	execlists->queue_priority =
> +	execlists->preempt_priority_hint =
>   		port != execlists->port ? rq_prio(last) : INT_MIN;
>   
>   	if (submit) {
> @@ -861,7 +861,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
>   
>   	/* Remaining _unready_ requests will be nop'ed when submitted */
>   
> -	execlists->queue_priority = INT_MIN;
> +	execlists->preempt_priority_hint = INT_MIN;
>   	execlists->queue = RB_ROOT_CACHED;
>   	GEM_BUG_ON(port_isset(execlists->port));
>   
> @@ -1092,8 +1092,8 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
>   
>   static void submit_queue(struct intel_engine_cs *engine, int prio)
>   {
> -	if (prio > engine->execlists.queue_priority) {
> -		engine->execlists.queue_priority = prio;
> +	if (prio > engine->execlists.preempt_priority_hint) {
> +		engine->execlists.preempt_priority_hint = prio;
>   		__submit_queue_imm(engine);
>   	}
>   }
> @@ -2748,7 +2748,9 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
>   
>   	last = NULL;
>   	count = 0;
> -	drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
> +	if (execlists->preempt_priority_hint != INT_MIN)
> +		drm_printf(m, "\t\tPreempt priority hint: %d\n",
> +			   execlists->preempt_priority_hint);
>   	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
>   		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
>   		int i;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 2927b712b973..5e2231d0c2fa 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -293,14 +293,18 @@ struct intel_engine_execlists {
>   	unsigned int port_mask;
>   
>   	/**
> -	 * @queue_priority: Highest pending priority.
> +	 * @preempt_priority_hint: Highest pending priority.

Would queue_priority_hint be better since it is used not just for 
preemption but to start an idle GPU?

Regards,

Tvrtko

>   	 *
>   	 * When we add requests into the queue, or adjust the priority of
>   	 * executing requests, we compute the maximum priority of those
>   	 * pending requests. We can then use this value to determine if
>   	 * we need to preempt the executing requests to service the queue.
> +	 * However, since the we may have recorded the priority of an inflight
> +	 * request we wanted to preempt but since completed, at the time of
> +	 * dequeuing the priority hint may no longer may match the highest
> +	 * available request priority.
>   	 */
> -	int queue_priority;
> +	int preempt_priority_hint;
>   
>   	/**
>   	 * @queue: queue of requests, in priority lists
> 


More information about the Intel-gfx mailing list