[Mesa-dev] [PATCH 2/3] i965: add noise padding to buffer object and function to check if noise is correct
Jason Ekstrand
jason at jlekstrand.net
Tue Dec 12 20:29:23 UTC 2017
I can't help but think that this could be a bit simpler and involve
throwing fewer pointers around.
On Fri, Dec 8, 2017 at 2:54 AM, <kevin.rogovin at intel.com> wrote:
> From: Kevin Rogovin <kevin.rogovin at intel.com>
>
> Signed-off-by: Kevin Rogovin <kevin.rogovin at intel.com>
> ---
> src/mesa/drivers/dri/i965/brw_bufmgr.c | 68
> +++++++++++++++++++++++++++++++++-
> src/mesa/drivers/dri/i965/brw_bufmgr.h | 12 ++++++
> 2 files changed, 79 insertions(+), 1 deletion(-)
>
> diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.c
> b/src/mesa/drivers/dri/i965/brw_bufmgr.c
> index 52b5bf9..7167165 100644
> --- a/src/mesa/drivers/dri/i965/brw_bufmgr.c
> +++ b/src/mesa/drivers/dri/i965/brw_bufmgr.c
> @@ -367,7 +367,14 @@ retry:
> bo->size = bo_size;
> bo->idle = true;
>
> - struct drm_i915_gem_create create = { .size = bo_size };
> + bo->padding.size = 0;
> + bo->padding.value = NULL;
> + bo->padding.tmp = NULL;
> + if (unlikely(INTEL_DEBUG & DEBUG_OUT_OF_BOUND_CHK)) {
> + bo->padding.size = getpagesize();
>
This is 4096. I think we could just have a single uint32_t padding field
which is either 0 or 4096 (More on that later).
> + }
> +
> + struct drm_i915_gem_create create = { .size = bo_size +
> bo->padding.size };
>
> /* All new BOs we get from the kernel are zeroed, so we don't need
> to
> * worry about that here.
> @@ -378,6 +385,31 @@ retry:
> goto err;
> }
>
> + if (unlikely(bo->padding.size > 0)) {
> + struct drm_i915_gem_pwrite pwrite;
> +
> + bo->padding.value = calloc(bo->padding.size, 1);
> + bo->padding.tmp = calloc(bo->padding.size, 1);
> + if (!bo->padding.value || !bo->padding.tmp) {
> + goto err_free;
> + }
> +
> + for (uint32_t i = 0; i < bo->padding.size; ++i) {
> + bo->padding.value[i] = rand() & 0xFF;
>
Does using rand() really help us? Why not just come up with some hash-like
thing which generates consistent pseudo-random data? How about something
like "value.[i] = i * 853 + 193" (some random primes)? That would mean
that we can generate the data and check it without having to store it in a
per-bo temporary. If you want it to be magic per-bo, you could also seed
it somehow with the bo handle (just add handle * 607).
If we always allocate 4096B of padding, then you don't need to heap
allocate it and can just put it on the stack for the purpose of interacting
with pread/pwrite. It's a bit big but still perfectly reasonable. If a
day came when we wanted to make the padding size adjustable, it would still
probably be reasonable to make the heap allocations temporary so we have
less random stuff in the BO we have to cleanup.
> + }
> +
> + pwrite.handle = create.handle;
> + pwrite.pad = 0;
> + pwrite.offset = bo_size;
> + pwrite.size = bo->padding.size;
> + pwrite.data_ptr = (__u64) (uintptr_t) bo->padding.value;
> + ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
>
There's a part of me that wants to kill pread/write. However, I think you
may have come up with the only good use of it I've ever seen. :-)
> +
> + if (ret != 0) {
> + goto err_free;
> + }
> + }
> +
> bo->gem_handle = create.handle;
>
> bo->bufmgr = bufmgr;
> @@ -424,6 +456,26 @@ err:
> return NULL;
> }
>
> +bool
> +brw_bo_padding_is_good(struct brw_bo *bo)
> +{
> + if (bo->padding.size > 0) {
> + struct drm_i915_gem_pread pread;
> + int ret;
> +
> + assert(bo->padding.tmp && bo->padding.value);
> + pread.handle = bo->gem_handle;
> + pread.pad = 0;
> + pread.offset = bo->size;
> + pread.size = bo->padding.size;
> + pread.data_ptr = (__u64) (uintptr_t) bo->padding.tmp;
> + ret = drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
> + assert(ret == 0);
> + return memcmp(bo->padding.tmp, bo->padding.value, bo->padding.size)
> == 0;
> + }
> + return true;
> +}
> +
> struct brw_bo *
> brw_bo_alloc(struct brw_bufmgr *bufmgr,
> const char *name, uint64_t size, uint64_t alignment)
> @@ -598,6 +650,17 @@ bo_free(struct brw_bo *bo)
> DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
> bo->gem_handle, bo->name, strerror(errno));
> }
> +
> + if (unlikely(INTEL_DEBUG & DEBUG_OUT_OF_BOUND_CHK)) {
> + if (bo->padding.value) {
> + free(bo->padding.value);
>
free will happily ignore NULL pointers. The check is redundant.
> + }
> +
> + if (bo->padding.tmp) {
> + free(bo->padding.tmp);
> + }
>
If we still keep these heap allocations, deleting them should be keyed off
of bo->padding.size or nothing at all.
> + }
> +
> free(bo);
> }
>
> @@ -1156,6 +1219,9 @@ brw_bo_gem_create_from_prime(struct brw_bufmgr
> *bufmgr, int prime_fd)
> bo->name = "prime";
> bo->reusable = false;
> bo->external = true;
> + bo->padding.size = 0;
> + bo->padding.value = NULL;
> + bo->padding.tmp = NULL;
>
> struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle
> };
> if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
> diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.h
> b/src/mesa/drivers/dri/i965/brw_bufmgr.h
> index 0ae541c..4fff866 100644
> --- a/src/mesa/drivers/dri/i965/brw_bufmgr.h
> +++ b/src/mesa/drivers/dri/i965/brw_bufmgr.h
> @@ -165,6 +165,16 @@ struct brw_bo {
> * Boolean of whether this buffer is cache coherent
> */
> bool cache_coherent;
> +
> + /**
> + * pointer and size to memory holding values for padding for the
> + * purpose of checking out-of-bound writes.
> + */
> + struct {
> + uint32_t size;
> + uint8_t *value;
> + uint8_t *tmp;
> + } padding;
> };
>
> #define BO_ALLOC_BUSY (1<<0)
> @@ -342,6 +352,8 @@ uint32_t brw_bo_export_gem_handle(struct brw_bo *bo);
> int brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset,
> uint64_t *result);
>
> +bool brw_bo_padding_is_good(struct brw_bo *bo);
> +
> /** @{ */
>
> #if defined(__cplusplus)
> --
> 2.7.4
>
> _______________________________________________
> mesa-dev mailing list
> mesa-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/mesa-dev
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/mesa-dev/attachments/20171212/69cf0fb2/attachment.html>
More information about the mesa-dev
mailing list