[igt-dev] [PATCH i-g-t] tests/i915/gem_mmap_offset: add new tests to extend coverage

Chris Wilson chris at chris-wilson.co.uk
Fri Dec 6 09:20:47 UTC 2019


Quoting Zbigniew Kempczyński (2019-12-06 08:37:29)
> Extend coverity by adding:
>  * memory detection function to find reasonable limit on lmem region
>  * big-bo test - check is single bo creation and mapping is possible
>  * coherence test - verify cpu and wc mappings are coherent
>  * test descriptions
>  * some minor changes within existing tests
> 
> Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
> Cc: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>  tests/i915/gem_mmap_offset.c | 150 +++++++++++++++++++++++++++++++----
>  1 file changed, 134 insertions(+), 16 deletions(-)
> 
> diff --git a/tests/i915/gem_mmap_offset.c b/tests/i915/gem_mmap_offset.c
> index 95e1e3e6..747f1cbe 100644
> --- a/tests/i915/gem_mmap_offset.c
> +++ b/tests/i915/gem_mmap_offset.c
> @@ -31,7 +31,7 @@
>  #include "igt.h"
>  #include "igt_x86.h"
>  
> -IGT_TEST_DESCRIPTION("Basic MMAP_OFFSET IOCTL tests for mem regions\n");
> +IGT_TEST_DESCRIPTION("Basic MMAP_OFFSET IOCTL tests\n");
>  
>  static const struct mmap_offset {
>         const char *name;
> @@ -115,12 +115,19 @@ static void bad_object(int i915)
>  
>  static void bad_flags(int i915)
>  {
> +       uint64_t flags = I915_MMAP_OFFSET_UC;

No, please do not create a test that requires manually editing every
time we change the ioctl. That's a very quick way to ensure that a test
is deleted.

Besides, it's every *value* not just every bit beyond the known that is
invalid. You've barely scratched at the negative space, write a fuzzer
instead. (Or see one I've made earlier...)

> @@ -147,7 +154,7 @@ static void basic_uaf(int i915)
>  
>         for_each_mmap_offset_type(t) {
>                 uint32_t handle = gem_create(i915, obj_size);
> -               uint8_t *expected, *buf, *addr;
> +               uint8_t *buf, *addr;
>  
>                 addr = __mmap_offset(i915, handle, 0, obj_size,
>                                      PROT_READ | PROT_WRITE,
> @@ -157,14 +164,12 @@ static void basic_uaf(int i915)
>                         continue;
>                 }
>  
> -               expected = calloc(obj_size, sizeof(*expected));
> +               buf = calloc(obj_size, sizeof(*buf));
>                 gem_set_domain(i915, handle, t->domain, 0);
> -               igt_assert_f(memcmp(addr, expected, obj_size) == 0,
> +               igt_assert_f(memcmp(addr, buf, obj_size) == 0,
>                              "mmap(%s) not clear on gem_create()\n",
>                              t->name);
> -               free(expected);
>  
> -               buf = calloc(obj_size, sizeof(*buf));
>                 memset(buf + 1024, 0x01, 1024);
>                 gem_write(i915, handle, 0, buf, obj_size);
>                 gem_set_domain(i915, handle, t->domain, 0);
> @@ -184,15 +189,16 @@ static void basic_uaf(int i915)
>                 igt_assert_f(memcmp(buf, addr, obj_size) == 0,
>                              "mmap(%s) not resident after gem_close()\n",
>                              t->name);
> -               free(buf);
>  
> -               igt_debug("Testing unmapping\n");
> +               free(buf);
>                 munmap(addr, obj_size);

>         }
>  }
>  
>  static void isolation(int i915)
>  {
> +       int maps_supported = 0;
> +
>         for_each_mmap_offset_type(t) {
>                 struct drm_i915_gem_mmap_offset mmap_arg = {
>                         .flags = t->type
> @@ -218,10 +224,10 @@ static void isolation(int i915)
>                 igt_assert_eq(mmap_offset_ioctl(B, &mmap_arg), 0);
>                 offset_b = mmap_arg.offset;
>  
> -               igt_info("A[%s]: {fd:%d, handle:%d, offset:%"PRIx64"}\n",
> -                        t->name, A, a, offset_a);
> -               igt_info("B[%s]: {fd:%d, handle:%d, offset:%"PRIx64"}\n",
> -                        t->name, B, b, offset_b);
> +               igt_debug("A[%s]: {fd:%d, handle:%d, offset:%"PRIx64"}\n",
> +                         t->name, A, a, offset_a);
> +               igt_debug("B[%s]: {fd:%d, handle:%d, offset:%"PRIx64"}\n",
> +                         t->name, B, b, offset_b);
>  
>                 errno = 0;
>                 ptr = mmap64(0, 4096, PROT_READ, MAP_SHARED, i915, offset_a);
> @@ -253,7 +259,11 @@ static void isolation(int i915)
>  
>                 ptr = mmap64(0, 4096, PROT_READ, MAP_SHARED, A, offset_a);
>                 igt_assert(ptr == MAP_FAILED);
> +
> +               maps_supported++;
>         }
> +
> +       igt_assert_f(maps_supported, "No mmap offset type found!\n");

No. If you think you need to mandate that the kernel has to support at
least one of your known mapping types, create a test to do that. The
kernel does not have to subscribe to your view though, it's job is to
tell you exactly what it and the HW supports. If we have to close
security loopholes, we have to close security loopholes.

>  }
>  
>  static void pf_nonblock(int i915)
> @@ -264,8 +274,8 @@ static void pf_nonblock(int i915)
>                 uint32_t *ptr;
>  
>                 ptr = __mmap_offset(i915, spin->handle, 0, 4096,
> -                                   PROT_READ | PROT_WRITE,
> -                                   t->type);
> +                                       PROT_READ | PROT_WRITE,
> +                                       t->type);

Check your whitespace settings.

> +/*
> + * Depending on allocation region we have different memory constraints.
> + * Try to find reasonable limit to cover the test regardless the region.
> + */
> +static uint64_t __get_memory_size_in_mb(int i915)
> +{
> +       uint64_t aperture_size = gem_available_aperture_size(i915);
> +       uint32_t size = 1024*1024;
> +       uint32_t *handles;
> +       uint32_t num_handles;
> +       int i = 0;
> +
> +       igt_debug("Aperture size: %zd\n", gem_available_aperture_size(i915));
> +
> +       num_handles = aperture_size / size;
> +       handles = malloc(num_handles * sizeof(uint32_t));
> +
> +       for (i = 0; i < num_handles; i++) {
> +               if (__gem_create(i915, size, &handles[i]))
> +                       break;
> +       }
> +       igt_debug("Created %d/%u handles of size %u\n", i, num_handles, size);
> +       num_handles = i;
> +
> +       for (i = 0; i < num_handles; i++)
> +               gem_close(i915, handles[i]);
> +       free(handles);

Come back to this when there is an actual API to describe limits. This
is simply meaningless.

> +static void test_coherency(int i915)

There's quite a few coherencies to check
wc/wb
uc/wb
uc/wc
gtt/wb
gtt/wc
gtt/uc

Some of those are known not to be coherent... Are we advertising the
limits of the ABI correctly?

> +{
> +       uint64_t size = __get_memory_size_in_mb(i915) << 16; /* 1/16 is enough */
> +       uint32_t handle;
> +       uint32_t *wc, *cpu;
> +       int i;
> +
> +       igt_require(igt_setup_clflush());
> +
> +       handle = gem_create(i915, size);
> +
> +       wc = __mmap_offset(i915, handle, 0, size, PROT_READ | PROT_WRITE,
> +                          I915_MMAP_OFFSET_WC);
> +       cpu = __mmap_offset(i915, handle, 0, size, PROT_READ | PROT_WRITE,
> +                          I915_MMAP_OFFSET_WB);
> +       gem_set_domain(i915, handle, I915_GEM_DOMAIN_WC, I915_GEM_DOMAIN_WC);
> +
> +       for (i = 0; i < size / 64; i++) {
> +               int x = 16*i + (i%16);
> +               wc[x] = i;
> +               igt_clflush_range(&cpu[x], sizeof(cpu[x]));
> +               igt_assert_eq(cpu[x], i);
> +       }
> +
> +       munmap(cpu, size);
> +       munmap(wc, size);
> +       gem_close(i915, handle);
> +}
> +
> +static void big_bo(int i915)
> +{
> +       uint64_t size = __get_memory_size_in_mb(i915) << 20;
> +       uint32_t handle;
> +       uint8_t *ptr;
> +       int i;
> +
> +       for_each_mmap_offset_type(t) {
> +               __gem_create(i915, size, &handle);
> +               igt_assert_f(handle, "Cannot create big bo (size: %zu MB)\n",
> +                            size >> 20);
> +
> +               igt_debug("Mmaping bo, handle: %u, size: %zu MB\n",
> +                         handle, size >> 20);
> +               ptr = __mmap_offset(i915, handle, 0, size,
> +                                   PROT_READ | PROT_WRITE,
> +                                   t->type);
> +               gem_close(i915, handle);
> +               if (!ptr)
> +                       continue;
> +
> +               for (i = 0; i < size; i += 4096)
> +                       ptr[i] = i / 4096;
> +
> +               munmap(ptr, size);

It's a start. We also need to pagefault concurrently multiple objects of
maximum size, (for which the current maximum is actually unbounded i.e.
phys + swap, it's just the kernel cannot handle it, bad kernel), and
concurrent pagefaults of the same large object. It would also be
sensible to check concurrent pagefaults of different types.
-Chris


More information about the igt-dev mailing list