[Intel-gfx] [PATCH 1/2] drm/i915/selftests: Calculate maximum ring size for preemption chain

Caz Yokoyama Caz.Yokoyama at intel.com
Thu Mar 21 18:41:10 UTC 2019


inline
-caz
On Thu, 2019-03-21 at 07:37 +0000, Chris Wilson wrote:
> 32 is too many for the likes of kbl, and in order to insert that many
Not only kbl. ring_size is 25 on my cfl.

> requests into the ring requires us to declare the first few hung --
The hung is not caused by 32. It is caused by accumulation of requests
for all prime numbers.

> understandably a slow and unexpected process. Instead, measure the
> size
> of a singe requests and use that to estimate the upper bound on the
> chain length we can use for our test, remembering to flush the
> previous
> chain between tests for safety.
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> ---
>  drivers/gpu/drm/i915/selftests/intel_lrc.c | 40
> ++++++++++++++++++++--
>  1 file changed, 37 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c
> b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> index d61520ea03c1..42068ed5eec0 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> @@ -615,14 +615,33 @@ static int live_chain_preempt(void *arg)
>  		struct i915_sched_attr attr = {
>  			.priority =
> I915_USER_PRIORITY(I915_PRIORITY_MAX),
>  		};
> -		int count, i;
> +		struct i915_request *rq;
> +		int ring_size, count, i;
>  
>  		if (!intel_engine_has_preemption(engine))
>  			continue;
>  
> -		for_each_prime_number_from(count, 1, 32) { /* must fit
> ring! */
> -			struct i915_request *rq;
> +		rq = igt_spinner_create_request(&lo.spin,
> +						lo.ctx, engine,
> +						MI_ARB_CHECK);
> +		if (IS_ERR(rq))
> +			goto err_wedged;
> +		i915_request_add(rq);
> +
> +		ring_size = rq->wa_tail - rq->head;
> +		if (ring_size < 0)
> +			ring_size += rq->ring->size;
> +		ring_size = rq->ring->size / ring_size;
> +		pr_debug("%s(%s): Using maximum of %d requests\n",
> +			 __func__, engine->name, ring_size);
>  
> +		igt_spinner_end(&lo.spin);
> +		if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) <
> 0) {
> +			pr_err("Timed out waiting to flush %s\n",
> engine->name);
> +			goto err_wedged;
> +		}
> +
> +		for_each_prime_number_from(count, 1, ring_size) {
>  			rq = igt_spinner_create_request(&hi.spin,
>  							hi.ctx, engine,
>  							MI_ARB_CHECK);
> @@ -664,6 +683,21 @@ static int live_chain_preempt(void *arg)
>  				goto err_wedged;
>  			}
>  			igt_spinner_end(&lo.spin);
> +
> +			rq = i915_request_alloc(engine, lo.ctx);
> +			if (IS_ERR(rq))
> +				goto err_wedged;
> +			i915_request_add(rq);
This request add is redundant. Wait for the last rq for lo.

> +			if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ
> / 5) < 0) {
> +				struct drm_printer p =
> +					drm_info_printer(i915-
> >drm.dev);
> +
> +				pr_err("Failed to flush low priority
> chain of %d requests\n",
> +				       count);
> +				intel_engine_dump(engine, &p,
> +						  "%s\n", engine-
> >name);
> +				goto err_wedged;
> +			}
>  		}
>  	}
>  



More information about the Intel-gfx mailing list