[Intel-gfx] [PATCH igt] igt/gem_eio: Add another variant of in-flight to avoid request coalescing

Michał Winiarski michal.winiarski at intel.com
Fri Sep 15 16:47:27 UTC 2017


On Fri, Sep 15, 2017 at 05:10:25PM +0100, Chris Wilson wrote:
> Break up request coalescing by submitting a queue of requests from
> different contexts. This makes sure that we mark all ready but not yet
> submitted-to-hw requests as completed/EIO upon wedging.
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Michał Winiarski <michal.winiarski at intel.com>
> ---
>  tests/gem_eio.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 72 insertions(+)
> 
> diff --git a/tests/gem_eio.c b/tests/gem_eio.c
> index 0fa98032..608b2dfd 100644
> --- a/tests/gem_eio.c
> +++ b/tests/gem_eio.c
> @@ -207,6 +207,75 @@ static void test_inflight(int fd)
>  	}
>  }
>  
> +static uint32_t __gem_context_create(int fd)
> +{
> +	struct drm_i915_gem_context_create create;
> +
> +	memset(&create, 0, sizeof(create));
> +	if (ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create))
> +		return 0;
> +
> +	return create.ctx_id;
> +}
> +

#define NCTX 64 since we're using it for both fences and ctx?

> +static void test_inflight_contexts(int fd)
> +{
> +	struct drm_i915_gem_execbuffer2 execbuf;
> +	struct drm_i915_gem_exec_object2 obj[2];
> +	uint32_t bbe = MI_BATCH_BUFFER_END;
> +	unsigned int engine;
> +	uint32_t ctx[64];
> +	int fence[64];
> +
> +	igt_require(gem_has_exec_fence(fd));
> +
> +	ctx[0] = __gem_context_create(fd);

gem_context_create skips when we don't have contexts, so I think we can drop
this.

/* We're using multiple contexts hoping that at least some of the requests are
 * kept queued rather than submitted to the HW.
 */

Or rephrased. Helpful to know what we're trying to achieve here without digging
through git.

With that:

Reviewed-by: Michał Winiarski <michal.winiarski at intel.com>

-Michał

> +	igt_require(ctx[0]);
> +	for (unsigned int n = 1; n < ARRAY_SIZE(ctx); n++)
> +		ctx[n] = gem_context_create(fd);
> +
> +	memset(obj, 0, sizeof(obj));
> +	obj[0].flags = EXEC_OBJECT_WRITE;
> +	obj[1].handle = gem_create(fd, 4096);
> +	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
> +
> +	for_each_engine(fd, engine) {
> +		igt_hang_t hang;
> +
> +		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
> +		igt_require(i915_reset_control(false));
> +
> +		hang = igt_hang_ring(fd, engine);
> +		obj[0].handle = hang.handle;
> +
> +		memset(&execbuf, 0, sizeof(execbuf));
> +		execbuf.buffers_ptr = to_user_pointer(obj);
> +		execbuf.buffer_count = 2;
> +		execbuf.flags = engine | I915_EXEC_FENCE_OUT;
> +
> +		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
> +			execbuf.rsvd1 = ctx[n];
> +			gem_execbuf_wr(fd, &execbuf);
> +			fence[n] = execbuf.rsvd2 >> 32;
> +			igt_assert(fence[n] != -1);
> +		}
> +
> +		igt_post_hang_ring(fd, hang);
> +
> +		igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
> +		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
> +			igt_assert_eq(sync_fence_status(fence[n]), -EIO);
> +			close(fence[n]);
> +		}
> +
> +		igt_assert(i915_reset_control(true));
> +		trigger_reset(fd);
> +	}
> +
> +	for (unsigned int n = 0; n < ARRAY_SIZE(ctx); n++)
> +		gem_context_destroy(fd, ctx[n]);
> +}
> +
>  static void test_inflight_external(int fd)
>  {
>  	struct drm_i915_gem_execbuffer2 execbuf;
> @@ -375,6 +444,9 @@ igt_main
>  	igt_subtest("in-flight")
>  		test_inflight(fd);
>  
> +	igt_subtest("in-flight-contexts")
> +		test_inflight_contexts(fd);
> +
>  	igt_subtest("in-flight-external")
>  		test_inflight_external(fd);
>  
> -- 
> 2.14.1
> 


More information about the Intel-gfx mailing list