[Mesa-dev] [PATCH v2 2/3] i965: add noise padding to buffer object and function to check if noise is correct

kevin.rogovin at intel.com kevin.rogovin at intel.com
Wed Dec 13 10:18:37 UTC 2017


From: Kevin Rogovin <kevin.rogovin at intel.com>

v2:
 Change from using rand() to using internal generating function
 (requested/suggested by Jason Ekstrand)

 Avoid having extra pointers in brw_bo struct via using the internal
 function and allocating buffer for pread at brw_bo_padding_is_good()
 (requested/suggested by Jason Ekstrand)

 Comments indicating that pread ioctl will do the required waiting
 for GPU commands to finish

Signed-off-by: Kevin Rogovin <kevin.rogovin at intel.com>
---
 src/mesa/drivers/dri/i965/brw_bufmgr.c | 107 ++++++++++++++++++++++++++++++++-
 src/mesa/drivers/dri/i965/brw_bufmgr.h |   8 +++
 2 files changed, 114 insertions(+), 1 deletion(-)

diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.c b/src/mesa/drivers/dri/i965/brw_bufmgr.c
index 52b5bf97a1..274147d2ce 100644
--- a/src/mesa/drivers/dri/i965/brw_bufmgr.c
+++ b/src/mesa/drivers/dri/i965/brw_bufmgr.c
@@ -220,6 +220,39 @@ bucket_for_size(struct brw_bufmgr *bufmgr, uint64_t size)
           &bufmgr->cache_bucket[index] : NULL;
 }
 
+/* Our goal is not to have noise good enough for cryto,
+ * but instead values that are unique-ish enough that
+ * it is incredibly unlikely that a buffer overwrite
+ * will produce the exact same values.
+ */
+static uint8_t
+next_noise_value(uint8_t prev_noise)
+{
+   uint32_t v = prev_noise;
+   return (v * 103u + 227u) & 0xFF;
+}
+
+static void
+fill_noise_buffer(uint8_t *dst, uint8_t start, uint32_t length)
+{
+   for(uint32_t i = 0; i < length; ++i) {
+      dst[i] = start;
+      start = next_noise_value(start);
+   }
+}
+
+static uint8_t*
+generate_noise_buffer(uint8_t start, uint32_t length)
+{
+   uint8_t *return_value;
+   return_value = malloc(length);
+   if (return_value) {
+      fill_noise_buffer(return_value, start, length);
+   }
+
+   return return_value;
+}
+
 int
 brw_bo_busy(struct brw_bo *bo)
 {
@@ -367,7 +400,18 @@ retry:
       bo->size = bo_size;
       bo->idle = true;
 
-      struct drm_i915_gem_create create = { .size = bo_size };
+      bo->padding_size = 0;
+      if (unlikely(INTEL_DEBUG & DEBUG_OUT_OF_BOUND_CHK)) {
+         /* TODO: we want to make sure that the padding forces
+          * the BO to take another page on the (PP)GTT; 4KB
+          * may or may not be the page size for the GEM. Indeed,
+          * depending on generation, kernel version and GEM size,
+          * the page size can be one of 4KB, 64KB or 2M.
+          */
+         bo->padding_size = 4096;
+      }
+
+      struct drm_i915_gem_create create = { .size = bo_size + bo->padding_size };
 
       /* All new BOs we get from the kernel are zeroed, so we don't need to
        * worry about that here.
@@ -378,6 +422,27 @@ retry:
          goto err;
       }
 
+      if (unlikely(bo->padding_size > 0)) {
+         uint8_t *noise_values;
+         struct drm_i915_gem_pwrite pwrite;
+
+         noise_values = generate_noise_buffer(create.handle, bo->padding_size);
+         if (!noise_values)
+            goto err_free;
+
+         pwrite.handle = create.handle;
+         pwrite.pad = 0;
+         pwrite.offset = bo_size;
+         pwrite.size = bo->padding_size;
+         pwrite.data_ptr = (__u64) (uintptr_t) noise_values;
+
+         ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
+         free(noise_values);
+
+         if (ret != 0)
+            goto err_free;
+      }
+
       bo->gem_handle = create.handle;
 
       bo->bufmgr = bufmgr;
@@ -424,6 +489,44 @@ err:
    return NULL;
 }
 
+bool
+brw_bo_padding_is_good(struct brw_bo *bo)
+{
+   if (bo->padding_size > 0) {
+      uint8_t *tmp;
+      struct drm_i915_gem_pread pread;
+      int ret;
+      uint8_t start;
+
+      tmp = malloc(bo->padding_size);
+      if (!tmp)
+         return false;
+
+      pread.handle = bo->gem_handle;
+      pread.pad = 0;
+      pread.offset = bo->size;
+      pread.size = bo->padding_size;
+      pread.data_ptr = (__u64) (uintptr_t) tmp;
+
+      /* PREAD waits for any processing by the GPU that uses the buffer
+       * to finish before reading values from that buffer.
+       */
+      ret = drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
+      assert(ret == 0);
+
+      start = bo->gem_handle & 0xFF;
+      for (uint32_t i = 0; i < bo->padding_size; ++i) {
+         if (start != tmp[i]) {
+            free(tmp);
+            return false;
+         }
+         start = next_noise_value(start);
+      }
+      free(tmp);
+   }
+   return true;
+}
+
 struct brw_bo *
 brw_bo_alloc(struct brw_bufmgr *bufmgr,
              const char *name, uint64_t size, uint64_t alignment)
@@ -598,6 +701,7 @@ bo_free(struct brw_bo *bo)
       DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
           bo->gem_handle, bo->name, strerror(errno));
    }
+
    free(bo);
 }
 
@@ -1156,6 +1260,7 @@ brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd)
    bo->name = "prime";
    bo->reusable = false;
    bo->external = true;
+   bo->padding_size = 0;
 
    struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
    if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.h b/src/mesa/drivers/dri/i965/brw_bufmgr.h
index 0ae541cda0..e402e61c53 100644
--- a/src/mesa/drivers/dri/i965/brw_bufmgr.h
+++ b/src/mesa/drivers/dri/i965/brw_bufmgr.h
@@ -165,6 +165,12 @@ struct brw_bo {
     * Boolean of whether this buffer is cache coherent
     */
    bool cache_coherent;
+
+   /**
+    * Padding size of weak pseudo-random values; used to check for out of
+    * bounds buffer writing.
+    */
+   uint32_t padding_size;
 };
 
 #define BO_ALLOC_BUSY       (1<<0)
@@ -342,6 +348,8 @@ uint32_t brw_bo_export_gem_handle(struct brw_bo *bo);
 int brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset,
                  uint64_t *result);
 
+bool brw_bo_padding_is_good(struct brw_bo *bo);
+
 /** @{ */
 
 #if defined(__cplusplus)
-- 
2.15.0



More information about the mesa-dev mailing list