[Intel-gfx] [PATCH 13/37] drm/i915: Priority boost switching to an idle ring

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Fri Jun 29 10:08:48 UTC 2018


On 29/06/2018 08:53, Chris Wilson wrote:
> In order to maximise concurrency between engines, if we queue a request
> to a current idle ring, reorder its dependencies to execute that request
> as early as possible and ideally improve occupancy of multiple engines
> simultaneously.
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_request.c     | 6 +++++-
>   drivers/gpu/drm/i915/i915_scheduler.h   | 5 +++--
>   drivers/gpu/drm/i915/intel_ringbuffer.h | 6 ++++++
>   3 files changed, 14 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 2d7a785dd88c..d618e7127e88 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -1053,7 +1053,8 @@ void i915_request_add(struct i915_request *request)
>   	local_bh_disable();
>   	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
>   	if (engine->schedule) {
> -		struct i915_sched_attr attr = request->gem_context->sched;
> +		struct i915_gem_context *ctx = request->gem_context;
> +		struct i915_sched_attr attr = ctx->sched;
>   
>   		/*
>   		 * Boost priorities to new clients (new request flows).
> @@ -1064,6 +1065,9 @@ void i915_request_add(struct i915_request *request)
>   		if (!prev || i915_request_completed(prev))
>   			attr.priority |= I915_PRIORITY_NEWCLIENT;
>   
> +		if (intel_engine_queue_is_empty(engine))
> +			attr.priority |= I915_PRIORITY_STALL;
> +
>   		engine->schedule(request, &attr);
>   	}
>   	rcu_read_unlock();
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
> index e9fb6c1d5e42..be132ceb83d9 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.h
> +++ b/drivers/gpu/drm/i915/i915_scheduler.h
> @@ -19,13 +19,14 @@ enum {
>   	I915_PRIORITY_INVALID = INT_MIN
>   };
>   
> -#define I915_USER_PRIORITY_SHIFT 1
> +#define I915_USER_PRIORITY_SHIFT 2
>   #define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
>   
>   #define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
>   #define I915_PRIORITY_MASK (-I915_PRIORITY_COUNT)
>   
> -#define I915_PRIORITY_NEWCLIENT BIT(0)
> +#define I915_PRIORITY_NEWCLIENT BIT(1)
> +#define I915_PRIORITY_STALL BIT(0)

So lower priority than new clients.

Again I have big concerns.

A client which happens to be the only one using some exotic engine would 
now be able to trash everything else running on the system. Just because 
so it happens no one else uses its favourite engine. And that's 
regardless how much work it has queued up on other, potentially busy, 
engines.

This and the previous patch I'd say this - hello slippery slope of 
scheduler tuning! :))

Regards,

Tvrtko

>   
>   struct i915_sched_attr {
>   	/**
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index b9ea5cee249f..b26587d0c70d 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -1080,6 +1080,12 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
>   
>   void intel_engines_sanitize(struct drm_i915_private *i915);
>   
> +static inline bool
> +intel_engine_queue_is_empty(const struct intel_engine_cs *engine)
> +{
> +	return RB_EMPTY_ROOT(&engine->execlists.queue.rb_root);
> +}
> +
>   bool intel_engine_is_idle(struct intel_engine_cs *engine);
>   bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
>   
> 


More information about the Intel-gfx mailing list