[Intel-gfx] [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Wed Nov 20 15:58:49 UTC 2019


On 20/11/2019 13:41, Chris Wilson wrote:
> Since we use barriers, we need only explicitly flush those barriers to
> ensure tha we can reclaim the available ggtt for ourselves. The barrier
> flush was implicit inside the intel_gt_wait_for_idle() -- except because
> we use i915_gem_evict from inside an active timeline during execbuf, we
> could easily end up waiting upon ourselves.
> 
> Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Testcase: igt/gem_exec_reloc/basic-range

Bugzilla: ?

This test gets permanently stuck on some platforms?

Regards,

Tvrtko

> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> ---
>   drivers/gpu/drm/i915/i915_gem_evict.c | 26 +++++++++++++-------------
>   1 file changed, 13 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 7e62c310290f..78ca56c06a3c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -28,7 +28,7 @@
>   
>   #include <drm/i915_drm.h>
>   
> -#include "gem/i915_gem_context.h"
> +#include "gt/intel_engine_heartbeat.h"
>   #include "gt/intel_gt_requests.h"
>   
>   #include "i915_drv.h"
> @@ -38,8 +38,11 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
>   	bool fail_if_busy:1;
>   } igt_evict_ctl;)
>   
> -static int ggtt_flush(struct intel_gt *gt)
> +static void ggtt_flush(struct intel_gt *gt)
>   {
> +	struct intel_engine_cs *engine;
> +	enum intel_engine_id id;
> +
>   	/*
>   	 * Not everything in the GGTT is tracked via vma (otherwise we
>   	 * could evict as required with minimal stalling) so we are forced
> @@ -47,7 +50,11 @@ static int ggtt_flush(struct intel_gt *gt)
>   	 * the hopes that we can then remove contexts and the like only
>   	 * bound by their active reference.
>   	 */
> -	return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
> +	intel_gt_retire_requests(gt);
> +	for_each_engine(engine, gt, id)
> +		intel_engine_flush_barriers(engine);
> +
> +	cond_resched();
>   }
>   
>   static bool
> @@ -197,11 +204,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
>   	if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
>   		return -EBUSY;
>   
> -	ret = ggtt_flush(vm->gt);
> -	if (ret)
> -		return ret;
> -
> -	cond_resched();
> +	ggtt_flush(vm->gt);
>   
>   	flags |= PIN_NONBLOCK;
>   	goto search_again;
> @@ -371,11 +374,8 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
>   	 * pin themselves inside the global GTT and performing the
>   	 * switch otherwise is ineffective.
>   	 */
> -	if (i915_is_ggtt(vm)) {
> -		ret = ggtt_flush(vm->gt);
> -		if (ret)
> -			return ret;
> -	}
> +	if (i915_is_ggtt(vm))
> +		ggtt_flush(vm->gt);
>   
>   	INIT_LIST_HEAD(&eviction_list);
>   	list_for_each_entry(vma, &vm->bound_list, vm_link) {
> 


More information about the Intel-gfx mailing list