[igt-dev] [PATCH i-g-t 1/2] lib/dummyload: Allow spin batches to be restarted

Chris Wilson chris at chris-wilson.co.uk
Wed Jan 31 15:44:41 UTC 2018


Quoting Tvrtko Ursulin (2018-01-31 12:34:40)
> From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> 
> This adds the igt_spin_batch_restart API so same spin batch can be
> re-used in tests which care about low setup cost.
> 
> Batch will be re-submited in the manner completely identical to as
> it was originally submitted.

Heh, I would have just marked the objects as EXEC_OBJECT_PINNED after
execution and dropped the relocs :)

> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> ---
>  lib/igt_dummyload.c | 150 +++++++++++++++++++++++++++++++++-------------------
>  lib/igt_dummyload.h |   9 ++++
>  2 files changed, 106 insertions(+), 53 deletions(-)
> 
> diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
> index 27eb402bb699..102efd197e5b 100644
> --- a/lib/igt_dummyload.c
> +++ b/lib/igt_dummyload.c
> @@ -71,6 +71,35 @@ fill_reloc(struct drm_i915_gem_relocation_entry *reloc,
>         reloc->write_domain = write_domains;
>  }
>  
> +static int __igt_spin_batch_submit(int fd, igt_spin_t *spin)
> +{
> +       int fence_fd = -1;
> +       unsigned int i;
> +
> +       for (i = 0; i < spin->nengine; i++) {
> +               spin->execbuf.flags &= ~ENGINE_MASK;
> +               spin->execbuf.flags |= spin->engines[i];
> +               gem_execbuf_wr(fd, &spin->execbuf);
> +               if (spin->out_fence_requested) {
> +                       int _fd = spin->execbuf.rsvd2 >> 32;
> +
> +                       igt_assert(_fd >= 0);
> +                       if (fence_fd == -1) {
> +                               fence_fd = _fd;
> +                       } else {
> +                               int old_fd = fence_fd;
> +
> +                               fence_fd = sync_fence_merge(old_fd, _fd);
> +                               close(old_fd);
> +                               close(_fd);
> +                       }
> +                       igt_assert(fence_fd >= 0);
> +               }
> +       }
> +
> +       return fence_fd;
> +}
> +
>  static int emit_recursive_batch(igt_spin_t *spin,
>                                 int fd, uint32_t ctx, unsigned engine,
>                                 uint32_t dep, bool out_fence)
> @@ -78,52 +107,46 @@ static int emit_recursive_batch(igt_spin_t *spin,
>  #define SCRATCH 0
>  #define BATCH 1
>         const int gen = intel_gen(intel_get_drm_devid(fd));
> -       struct drm_i915_gem_exec_object2 obj[2];
> -       struct drm_i915_gem_relocation_entry relocs[2];
> -       struct drm_i915_gem_execbuffer2 execbuf;
> -       unsigned int engines[16];
> -       unsigned int nengine;
> -       int fence_fd = -1;
>         uint32_t *batch;
> -       int i;
>  
> -       nengine = 0;
> +       spin->nengine = 0;
>         if (engine == -1) {
>                 for_each_engine(fd, engine)
>                         if (engine)
> -                               engines[nengine++] = engine;
> +                               spin->engines[spin->nengine++] = engine;
>         } else {
>                 gem_require_ring(fd, engine);
> -               engines[nengine++] = engine;
> +               spin->engines[spin->nengine++] = engine;
>         }
> -       igt_require(nengine);
> +       igt_require(spin->nengine);
>  
> -       memset(&execbuf, 0, sizeof(execbuf));
> -       memset(obj, 0, sizeof(obj));
> -       memset(relocs, 0, sizeof(relocs));
> +       memset(&spin->execbuf, 0, sizeof(spin->execbuf));
> +       memset(spin->obj, 0, sizeof(spin->obj));
> +       memset(spin->relocs, 0, sizeof(spin->relocs));
>  
> -       obj[BATCH].handle = gem_create(fd, BATCH_SIZE);
> -       batch = __gem_mmap__wc(fd, obj[BATCH].handle,
> +       spin->obj[BATCH].handle = gem_create(fd, BATCH_SIZE);
> +       batch = __gem_mmap__wc(fd, spin->obj[BATCH].handle,
>                                0, BATCH_SIZE, PROT_WRITE);
>         if (!batch)
> -               batch = __gem_mmap__gtt(fd, obj[BATCH].handle,
> +               batch = __gem_mmap__gtt(fd, spin->obj[BATCH].handle,
>                                         BATCH_SIZE, PROT_WRITE);
> -       gem_set_domain(fd, obj[BATCH].handle,
> +       gem_set_domain(fd, spin->obj[BATCH].handle,
>                         I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
> -       execbuf.buffer_count++;
> +       spin->execbuf.buffer_count++;
>  
>         if (dep) {
>                 /* dummy write to dependency */
> -               obj[SCRATCH].handle = dep;
> -               fill_reloc(&relocs[obj[BATCH].relocation_count++],
> +               spin->obj[SCRATCH].handle = dep;
> +               fill_reloc(&spin->relocs[spin->obj[BATCH].relocation_count++],
>                            dep, 1020,
>                            I915_GEM_DOMAIN_RENDER,
>                            I915_GEM_DOMAIN_RENDER);
> -               execbuf.buffer_count++;
> +               spin->execbuf.buffer_count++;
>         }
>  
>         spin->batch = batch;
> -       spin->handle = obj[BATCH].handle;
> +       spin->handle = spin->obj[BATCH].handle;
> +       spin->out_fence_requested = out_fence;
>  
>         /* Allow ourselves to be preempted */
>         *batch++ = MI_ARB_CHK;
> @@ -142,8 +165,8 @@ static int emit_recursive_batch(igt_spin_t *spin,
>         batch += 1000;
>  
>         /* recurse */
> -       fill_reloc(&relocs[obj[BATCH].relocation_count],
> -                  obj[BATCH].handle, (batch - spin->batch) + 1,
> +       fill_reloc(&spin->relocs[spin->obj[BATCH].relocation_count],
> +                  spin->obj[BATCH].handle, (batch - spin->batch) + 1,
>                    I915_GEM_DOMAIN_COMMAND, 0);
>         if (gen >= 8) {
>                 *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
> @@ -157,41 +180,21 @@ static int emit_recursive_batch(igt_spin_t *spin,
>                 *batch = 0;
>                 if (gen < 4) {
>                         *batch |= 1;
> -                       relocs[obj[BATCH].relocation_count].delta = 1;
> +                       spin->relocs[spin->obj[BATCH].relocation_count].delta = 1;
>                 }
>                 batch++;
>         }
> -       obj[BATCH].relocation_count++;
> -       obj[BATCH].relocs_ptr = to_user_pointer(relocs);
> +       spin->obj[BATCH].relocation_count++;
> +       spin->obj[BATCH].relocs_ptr = to_user_pointer(spin->relocs);
>  
> -       execbuf.buffers_ptr = to_user_pointer(obj + (2 - execbuf.buffer_count));
> -       execbuf.rsvd1 = ctx;
> +       spin->execbuf.buffers_ptr =
> +               to_user_pointer(spin->obj + (2 - spin->execbuf.buffer_count));
> +       spin->execbuf.rsvd1 = ctx;
>  
>         if (out_fence)
> -               execbuf.flags |= I915_EXEC_FENCE_OUT;
> -
> -       for (i = 0; i < nengine; i++) {
> -               execbuf.flags &= ~ENGINE_MASK;
> -               execbuf.flags |= engines[i];
> -               gem_execbuf_wr(fd, &execbuf);
> -               if (out_fence) {
> -                       int _fd = execbuf.rsvd2 >> 32;
> -
> -                       igt_assert(_fd >= 0);
> -                       if (fence_fd == -1) {
> -                               fence_fd = _fd;
> -                       } else {
> -                               int old_fd = fence_fd;
> -
> -                               fence_fd = sync_fence_merge(old_fd, _fd);
> -                               close(old_fd);
> -                               close(_fd);
> -                       }
> -                       igt_assert(fence_fd >= 0);
> -               }
> -       }
> +               spin->execbuf.flags |= I915_EXEC_FENCE_OUT;
>  
> -       return fence_fd;
> +       return __igt_spin_batch_submit(fd, spin);
>  }
>  
>  static igt_spin_t *
> @@ -273,6 +276,47 @@ igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine)
>         return __igt_spin_batch_new_fence(fd, ctx, engine);
>  }
>  
> +/**
> + * __igt_spin_batch_restart:
> + * @fd: open i915 drm file descriptor
> + * @spin: spin batch state from igt_spin_batch_new()
> + *
> + * Restarts the spin batch which was previously ended either explicitly
> + * or via timeout.
> + *
> + * This version does not verify that the batch is currently idle.
> + *
> + * Returns:
> + * New fence fd if spin batch was originaly created as requesting the
> + * output fence.
> + */
> +int __igt_spin_batch_restart(int fd, igt_spin_t *spin)
> +{
> +       *spin->batch = MI_ARB_CHK;
> +       __sync_synchronize();

Hmm, I would maybe assert(*spin->batch == MI_BBE), but the mfence here
is a part of the following submit (i.e. as this batch is idle, and can
only be to work reliably, we don't need an explicit mfence as that is
handled for us by submit.)

> +
> +       return __igt_spin_batch_submit(fd, spin);
> +}
> +
> +/**
> + * igt_spin_batch_restart:
> + * @fd: open i915 drm file descriptor
> + * @spin: spin batch state from igt_spin_batch_new()
> + *
> + * Restarts the spin batch which was previously ended either explicitly
> + * or via timeout.
> + *
> + * Returns:
> + * New fence fd if spin batch was originaly created as requesting the
> + * output fence.
> + */
> +int igt_spin_batch_restart(int fd, igt_spin_t *spin)
> +{
> +       igt_assert(!gem_bo_busy(fd, spin->handle));
> +
> +       return __igt_spin_batch_restart(fd, spin);
> +}
> +
>  static void notify(union sigval arg)
>  {
>         igt_spin_t *spin = arg.sival_ptr;
> diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
> index ffa7e351dea3..b9f201d4afb6 100644
> --- a/lib/igt_dummyload.h
> +++ b/lib/igt_dummyload.h
> @@ -36,6 +36,12 @@ typedef struct igt_spin {
>         struct igt_list link;
>         uint32_t *batch;
>         int out_fence;
> +       struct drm_i915_gem_exec_object2 obj[2];
> +       struct drm_i915_gem_relocation_entry relocs[2];
> +       struct drm_i915_gem_execbuffer2 execbuf;

I'm dubious whether we want to emit the dependency obj on resubmit. I
can see where it may be desired, and where it may be a hindrance.

I think I'm coming down on the side that to reemit the dependency is
surprising, and would argue that if that is desired it should be an
explicit parameter to restart().
-Chris


More information about the igt-dev mailing list