[Intel-gfx] [PATCH i-g-t 1/3] lib/igt_dummyload: libify checks for spin batch activation

Chris Wilson chris at chris-wilson.co.uk
Wed Apr 17 20:04:04 UTC 2019


Quoting Mika Kuoppala (2019-04-17 17:43:39)
> Instead of opencoding the poll into the spinner, use
> a helper to check if spinner has started.
> 
> v2: use zero as presumed offset (Chris)
> v3: cleanup the relocs (Chris)
> 
> Cc: Chris Wilson <chris at chris-wilson.co.uk>
> Signed-off-by: Mika Kuoppala <mika.kuoppala at linux.intel.com>
> ---
>  static int
>  emit_recursive_batch(igt_spin_t *spin,
>                      int fd, const struct igt_spin_factory *opts)
> @@ -128,15 +117,20 @@ emit_recursive_batch(igt_spin_t *spin,
>         if (opts->dependency) {
>                 igt_assert(!(opts->flags & IGT_SPIN_POLL_RUN));
>  
> +               r = &relocs[obj[BATCH].relocation_count++];
> +
>                 /* dummy write to dependency */
>                 obj[SCRATCH].handle = opts->dependency;
> -               fill_reloc(&relocs[obj[BATCH].relocation_count++],
> -                          opts->dependency, 1020,
> -                          I915_GEM_DOMAIN_RENDER,
> -                          I915_GEM_DOMAIN_RENDER);
> +               r->presumed_offset = 0;

fwiw, we could make this r->presumed_offset = 4096 and save the kernel
having to patch the batch.

> +               r->target_handle = obj[SCRATCH].handle;
> +               r->offset = sizeof(uint32_t) * 1020;
> +               r->delta = 0;
> +               r->read_domains = I915_GEM_DOMAIN_RENDER;
> +               r->write_domain = I915_GEM_DOMAIN_RENDER;
> +
>                 execbuf->buffer_count++;
>         } else if (opts->flags & IGT_SPIN_POLL_RUN) {
> -               unsigned int offset;
> +               r = &relocs[obj[BATCH].relocation_count++];
>  
>                 igt_assert(!opts->dependency);
>  
> @@ -146,39 +140,43 @@ emit_recursive_batch(igt_spin_t *spin,
>                 }
>  
>                 spin->poll_handle = gem_create(fd, 4096);
> +               obj[SCRATCH].handle = spin->poll_handle;
>  
>                 if (__gem_set_caching(fd, spin->poll_handle,
>                                       I915_CACHING_CACHED) == 0)
> -                       spin->running = gem_mmap__cpu(fd, spin->poll_handle,
> -                                                     0, 4096,
> -                                                     PROT_READ | PROT_WRITE);
> +                       spin->poll = gem_mmap__cpu(fd, spin->poll_handle,
> +                                                  0, 4096,
> +                                                  PROT_READ | PROT_WRITE);
>                 else
> -                       spin->running = gem_mmap__wc(fd, spin->poll_handle,
> -                                                    0, 4096,
> -                                                    PROT_READ | PROT_WRITE);
> -               igt_assert_eq(*spin->running, 0);
> +                       spin->poll = gem_mmap__wc(fd, spin->poll_handle,
> +                                                 0, 4096,
> +                                                 PROT_READ | PROT_WRITE);
> +
> +               igt_assert_eq(spin->poll[SPIN_POLL_START_IDX], 0);
> +
> +               r->presumed_offset = 0;
> +               r->target_handle = obj[SCRATCH].handle;
> +               r->offset = sizeof(uint32_t) * 1;

* 1.

May be a bit overkill in the pedantry stakes.

> +               r->delta = sizeof(uint32_t) * SPIN_POLL_START_IDX;
> +               r->read_domains = I915_GEM_DOMAIN_INSTRUCTION;
> +               r->write_domain = I915_GEM_DOMAIN_INSTRUCTION;
>  
>                 *batch++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
>  
>                 if (gen >= 8) {
> -                       offset = 1;
> -                       *batch++ = 0;
> +                       *batch++ = r->delta;
>                         *batch++ = 0;
>                 } else if (gen >= 4) {
> -                       offset = 2;
> -                       *batch++ = 0;
>                         *batch++ = 0;
> +                       *batch++ = r->delta;
> +                       r->offset += sizeof(uint32_t);
>                 } else {
> -                       offset = 1;
>                         batch[-1]--;
> -                       *batch++ = 0;
> +                       *batch++ = r->delta;
>                 }
>  
>                 *batch++ = 1;
>  
> -               obj[SCRATCH].handle = spin->poll_handle;
> -               fill_reloc(&relocs[obj[BATCH].relocation_count++],
> -                          spin->poll_handle, offset, 0, 0);
>                 execbuf->buffer_count++;
>         }
>  
> @@ -408,8 +406,8 @@ void igt_spin_batch_free(int fd, igt_spin_t *spin)
>         gem_munmap((void *)((unsigned long)spin->batch & (~4095UL)),
>                    BATCH_SIZE);
>  
> -       if (spin->running) {
> -               gem_munmap(spin->running, 4096);
> +       if (spin->poll) {
> +               gem_munmap(spin->poll, 4096);
>                 gem_close(fd, spin->poll_handle);
>         }
>  
> diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
> index 73bd035b..3793bf7f 100644
> --- a/lib/igt_dummyload.h
> +++ b/lib/igt_dummyload.h
> @@ -41,7 +41,8 @@ typedef struct igt_spin {
>         struct drm_i915_gem_exec_object2 obj[2];
>         struct drm_i915_gem_execbuffer2 execbuf;
>         uint32_t poll_handle;
> -       bool *running;
> +       uint32_t *poll;
> +#define SPIN_POLL_START_IDX 0

Planning more than one? Plausible, you could stick the timestamp in
there as well, or share the scratch with others.

Just looks very suspicious all by itself :-p

>  } igt_spin_t;
>  
>  struct igt_spin_factory {
> @@ -70,9 +71,19 @@ void igt_spin_batch_set_timeout(igt_spin_t *spin, int64_t ns);
>  void igt_spin_batch_end(igt_spin_t *spin);
>  void igt_spin_batch_free(int fd, igt_spin_t *spin);
>  
> -static inline void igt_spin_busywait_until_running(igt_spin_t *spin)
> +static inline bool igt_spin_has_poll(const igt_spin_t *spin)
>  {
> -       while (!READ_ONCE(*spin->running))
> +       return spin->poll;
> +}
> +
> +static inline bool igt_spin_has_started(igt_spin_t *spin)
> +{
> +       return READ_ONCE(spin->poll[SPIN_POLL_START_IDX]);
> +}
> +
> +static inline void igt_spin_busywait_until_started(igt_spin_t *spin)
> +{
> +       while (!igt_spin_has_started(spin))
>                 ;

I keep fearing the gpu hang that leaves this spinning for ever.


while (gem_busy(spin->handle) && !igt_spin_has_started(spin))
	;
igt_assert(igt_spin_has_started(spin));

give or take the lack of spin->fd and generalisation of gem_busy. A
problem for another day.

Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk>
-Chris


More information about the Intel-gfx mailing list