[Mesa-dev] [PATCH v4 2/3] i965: add noise padding to buffer object and function to check if noise is correct

kevin.rogovin at intel.com kevin.rogovin at intel.com
Tue Jan 30 10:51:18 UTC 2018


From: Kevin Rogovin <kevin.rogovin at intel.com>

Signed-off-by: Kevin Rogovin <kevin.rogovin at intel.com>
---
 src/mesa/drivers/dri/i965/brw_bufmgr.c | 101 ++++++++++++++++++++++++++++++++-
 src/mesa/drivers/dri/i965/brw_bufmgr.h |  13 +++++
 2 files changed, 113 insertions(+), 1 deletion(-)

diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.c b/src/mesa/drivers/dri/i965/brw_bufmgr.c
index fb180289a0..5c609b2332 100644
--- a/src/mesa/drivers/dri/i965/brw_bufmgr.c
+++ b/src/mesa/drivers/dri/i965/brw_bufmgr.c
@@ -220,6 +220,27 @@ bucket_for_size(struct brw_bufmgr *bufmgr, uint64_t size)
           &bufmgr->cache_bucket[index] : NULL;
 }
 
+/* Our goal is not to have noise good enough for cryto,
+ * but instead values that are unique-ish enough that
+ * it is incredibly unlikely that a buffer overwrite
+ * will produce the exact same values.
+ */
+static uint8_t
+next_noise_value(uint8_t prev_noise)
+{
+   uint32_t v = prev_noise;
+   return (v * 103u + 227u) & 0xFF;
+}
+
+static void
+fill_noise_buffer(uint8_t *dst, uint8_t start, uint32_t length)
+{
+   for(uint32_t i = 0; i < length; ++i) {
+      dst[i] = start;
+      start = next_noise_value(start);
+   }
+}
+
 int
 brw_bo_busy(struct brw_bo *bo)
 {
@@ -367,7 +388,18 @@ retry:
       bo->size = bo_size;
       bo->idle = true;
 
-      struct drm_i915_gem_create create = { .size = bo_size };
+      bo->padding_size = 0;
+      if (unlikely(INTEL_DEBUG & DEBUG_CHECK_OOB)) {
+         /* TODO: we want to make sure that the padding forces
+          * the BO to take another page on the (PP)GTT; 4KB
+          * may or may not be the page size for the BO. Indeed,
+          * depending on GPU, kernel version and GEM size, the
+          * page size can be one of 4KB, 64KB or 2M.
+          */
+         bo->padding_size = 4096;
+      }
+
+      struct drm_i915_gem_create create = { .size = bo_size + bo->padding_size };
 
       /* All new BOs we get from the kernel are zeroed, so we don't need to
        * worry about that here.
@@ -378,6 +410,25 @@ retry:
          goto err;
       }
 
+      if (unlikely(bo->padding_size > 0)) {
+         uint8_t *noise_values;
+         struct drm_i915_gem_mmap mmap_arg = {
+            .handle = create.handle,
+            .offset = bo->size,
+            .size = bo->padding_size,
+            .flags = 0,
+         };
+
+         ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+         if (ret != 0)
+            goto err_free;
+
+         noise_values = (uint8_t*) (uintptr_t) mmap_arg.addr_ptr;
+         fill_noise_buffer(noise_values, create.handle & 0xFF,
+                           bo->padding_size);
+         drm_munmap(noise_values, bo->padding_size);
+      }
+
       bo->gem_handle = create.handle;
 
       bo->bufmgr = bufmgr;
@@ -424,6 +475,53 @@ err:
    return NULL;
 }
 
+bool
+brw_bo_padding_is_good(struct brw_bo *bo)
+{
+   if (bo->padding_size > 0) {
+      struct drm_i915_gem_mmap mmap_arg = {
+         .handle = bo->gem_handle,
+         .offset = bo->size,
+         .size = bo->padding_size,
+         .flags = 0,
+      };
+      uint8_t *mapped;
+      int ret;
+      uint8_t expected_value;
+
+      /* We cannot use brw_bo_map() because it maps precisely the range
+       * [0, bo->size) and we wish to map the range of the padding which
+       * is [bo->size, bo->size + bo->pading_size)
+       */
+      ret = drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+      if (ret != 0) {
+         DBG("Unable to map mapping buffer %d (%s) buffer for pad checking.\n",
+             bo->gem_handle, bo->name);
+         return false;
+      }
+
+      mapped = (uint8_t*) (uintptr_t) mmap_arg.addr_ptr;
+      /* bah-humbug, we need to see the latest contents and
+       * if the bo is not cache coherent we likely need to
+       * invalidate the cache lines to get it.
+       */
+      if (!bo->cache_coherent && !bo->bufmgr->has_llc) {
+         gen_invalidate_range(mapped, bo->padding_size);
+      }
+
+      expected_value = bo->gem_handle & 0xFF;
+      for (uint32_t i = 0; i < bo->padding_size; ++i) {
+         if (expected_value != mapped[i]) {
+            drm_munmap(mapped, bo->padding_size);
+            return false;
+         }
+         expected_value = next_noise_value(expected_value);
+      }
+      drm_munmap(mapped, bo->padding_size);
+   }
+   return true;
+}
+
 struct brw_bo *
 brw_bo_alloc(struct brw_bufmgr *bufmgr,
              const char *name, uint64_t size, uint64_t alignment)
@@ -1157,6 +1255,7 @@ brw_bo_gem_create_from_prime_internal(struct brw_bufmgr *bufmgr, int prime_fd,
    bo->name = "prime";
    bo->reusable = false;
    bo->external = true;
+   bo->padding_size = 0;
 
    if (tiling_mode < 0) {
       struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.h b/src/mesa/drivers/dri/i965/brw_bufmgr.h
index a3745d6667..fcfd2a9f75 100644
--- a/src/mesa/drivers/dri/i965/brw_bufmgr.h
+++ b/src/mesa/drivers/dri/i965/brw_bufmgr.h
@@ -165,6 +165,12 @@ struct brw_bo {
     * Boolean of whether this buffer is cache coherent
     */
    bool cache_coherent;
+
+   /**
+    * Padding size of weak pseudo-random values; used to check for out of
+    * bounds buffer writing.
+    */
+   uint32_t padding_size;
 };
 
 #define BO_ALLOC_BUSY       (1<<0)
@@ -346,6 +352,13 @@ uint32_t brw_bo_export_gem_handle(struct brw_bo *bo);
 int brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset,
                  uint64_t *result);
 
+/* maps the padding of the brw_bo and returns true if and only if the
+ * padding is the correct noise values. The caller must make sure that
+ * the GPU is not going write to the BO; the best way to do that is to
+ * call brw_bo_wait_rendering() on the batchbuffer.
+ */
+bool brw_bo_padding_is_good(struct brw_bo *bo);
+
 /** @{ */
 
 #if defined(__cplusplus)
-- 
2.16.1



More information about the mesa-dev mailing list