[Intel-gfx] [igt-dev] [PATCH i-g-t] i915/gem_exec_schedule: Verify that using HW semaphores doesn't block
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Mon Apr 1 07:52:09 UTC 2019
On 29/03/2019 09:54, Chris Wilson wrote:
> We may use HW semaphores to schedule nearly-ready work such that they
> are already spinning on the GPU waiting for the completion on another
> engine. However, we don't want for that spinning task to actually block
> any real work should it be scheduled.
>
> v2: No typeof autos
> v3: Don't cheat, check gen8 as well
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> ---
> tests/i915/gem_exec_schedule.c | 87 ++++++++++++++++++++++++++++++++++
> 1 file changed, 87 insertions(+)
>
> diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
> index 4f0577b4e..3df319bcc 100644
> --- a/tests/i915/gem_exec_schedule.c
> +++ b/tests/i915/gem_exec_schedule.c
> @@ -48,6 +48,10 @@
>
> #define MAX_CONTEXTS 1024
>
> +#define LOCAL_I915_EXEC_BSD_SHIFT (13)
> +#define LOCAL_I915_EXEC_BSD_MASK (3 << LOCAL_I915_EXEC_BSD_SHIFT)
> +#define ENGINE_MASK (I915_EXEC_RING_MASK | LOCAL_I915_EXEC_BSD_MASK)
> +
> IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
>
> static inline
> @@ -320,6 +324,86 @@ static void smoketest(int fd, unsigned ring, unsigned timeout)
> }
> }
>
> +static uint32_t __batch_create(int i915, uint32_t offset)
> +{
> + const uint32_t bbe = MI_BATCH_BUFFER_END;
> + uint32_t handle;
> +
> + handle = gem_create(i915, ALIGN(offset + 4, 4096));
> + gem_write(i915, handle, offset, &bbe, sizeof(bbe));
> +
> + return handle;
> +}
> +
> +static uint32_t batch_create(int i915)
> +{
> + return __batch_create(i915, 0);
> +}
> +
> +static void semaphore_userlock(int i915)
> +{
> + struct drm_i915_gem_exec_object2 obj = {
> + .handle = batch_create(i915),
> + };
> + igt_spin_t *spin = NULL;
> + unsigned int engine;
> + uint32_t scratch;
> +
> + igt_require(gem_scheduler_has_semaphores(i915));
> +
> + /*
> + * Given the use of semaphores to govern parallel submission
> + * of nearly-ready work to HW, we still want to run actually
> + * ready work immediately. Without semaphores, the dependent
> + * work wouldn't be submitted so our ready work will run.
> + */
> +
> + scratch = gem_create(i915, 4096);
> + for_each_physical_engine(i915, engine) {
> + if (!spin) {
> + spin = igt_spin_batch_new(i915,
> + .dependency = scratch,
> + .engine = engine);
> + } else {
> + uint64_t saved = spin->execbuf.flags;
> +
> + spin->execbuf.flags &= ~ENGINE_MASK;
> + spin->execbuf.flags |= engine;
> +
> + gem_execbuf(i915, &spin->execbuf);
> +
> + spin->execbuf.flags = saved;
> + }
> + }
> + igt_require(spin);
> + gem_close(i915, scratch);
> +
> + /*
> + * On all dependent engines, the request may be executing (busywaiting
> + * on a HW semaphore) but it should not prevent any real work from
> + * taking precedence.
> + */
> + scratch = gem_context_create(i915);
> + for_each_physical_engine(i915, engine) {
> + struct drm_i915_gem_execbuffer2 execbuf = {
> + .buffers_ptr = to_user_pointer(&obj),
> + .buffer_count = 1,
> + .flags = engine,
> + .rsvd1 = scratch,
> + };
> +
> + if (engine == (spin->execbuf.flags & ENGINE_MASK))
> + continue;
> +
> + gem_execbuf(i915, &execbuf);
> + }
> + gem_context_destroy(i915, scratch);
> + gem_sync(i915, obj.handle); /* to hang unless we can preempt */
> + gem_close(i915, obj.handle);
> +
> + igt_spin_batch_free(i915, spin);
> +}
> +
> static void reorder(int fd, unsigned ring, unsigned flags)
> #define EQUAL 1
> {
> @@ -1307,6 +1391,9 @@ igt_main
> igt_require(gem_scheduler_has_ctx_priority(fd));
> }
>
> + igt_subtest("semaphore-user")
> + semaphore_userlock(fd);
> +
> igt_subtest("smoketest-all")
> smoketest(fd, ALL_ENGINES, 30);
>
>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list