[igt-dev] [PATCH i-g-t v19 06/34] lib/intel_allocator_random: Add random allocator
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Tue Feb 2 09:24:15 UTC 2021
Sometimes we want to experiment with addresses so randomizing can help
us a little.
Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
lib/intel_allocator_random.c | 204 +++++++++++++++++++++++++++++++++++
1 file changed, 204 insertions(+)
create mode 100644 lib/intel_allocator_random.c
diff --git a/lib/intel_allocator_random.c b/lib/intel_allocator_random.c
new file mode 100644
index 000000000..15b930af1
--- /dev/null
+++ b/lib/intel_allocator_random.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include "igt.h"
+#include "igt_x86.h"
+#include "igt_rand.h"
+#include "intel_allocator.h"
+
+struct intel_allocator *intel_allocator_random_create(int fd, uint32_t ctx);
+
+struct intel_allocator_random {
+ uint64_t bias;
+ uint32_t prng;
+ uint64_t gtt_size;
+ uint64_t start;
+ uint64_t end;
+
+ /* statistics */
+ uint64_t allocated_objects;
+};
+
+#define GEN8_HIGH_ADDRESS_BIT 47
+static uint64_t gen8_canonical_addr(uint64_t address)
+{
+ int shift = 63 - GEN8_HIGH_ADDRESS_BIT;
+
+ return (int64_t)(address << shift) >> shift;
+}
+
+static uint64_t get_bias(int fd)
+{
+ (void) fd;
+
+ return 256 << 10;
+}
+
+static void intel_allocator_random_get_address_range(struct intel_allocator *ial,
+ uint64_t *startp,
+ uint64_t *endp)
+{
+ struct intel_allocator_random *ialr = ial->priv;
+
+ if (startp)
+ *startp = ialr->start;
+
+ if (endp)
+ *endp = ialr->end;
+}
+
+static uint64_t intel_allocator_random_alloc(struct intel_allocator *ial,
+ uint32_t handle, uint64_t size,
+ uint64_t alignment)
+{
+ struct intel_allocator_random *ialr = ial->priv;
+ uint64_t offset;
+
+ (void) handle;
+
+ /* randomize the address, we try to avoid relocations */
+ offset = hars_petruska_f54_1_random64(&ialr->prng);
+ offset += ialr->bias; /* Keep the low 256k clear, for negative deltas */
+ offset &= ialr->gtt_size - 1;
+ offset &= ~(alignment - 1);
+ offset = gen8_canonical_addr(offset);
+
+ ialr->allocated_objects++;
+
+ return offset;
+}
+
+static bool intel_allocator_random_free(struct intel_allocator *ial,
+ uint32_t handle)
+{
+ struct intel_allocator_random *ialr = ial->priv;
+
+ (void) handle;
+
+ ialr->allocated_objects--;
+
+ return false;
+}
+
+static bool intel_allocator_random_is_allocated(struct intel_allocator *ial,
+ uint32_t handle, uint64_t size,
+ uint64_t offset)
+{
+ (void) ial;
+ (void) handle;
+ (void) size;
+ (void) offset;
+
+ return false;
+}
+
+static void intel_allocator_random_destroy(struct intel_allocator *ial)
+{
+ igt_assert(ial);
+
+ free(ial->priv);
+ free(ial);
+}
+
+static bool intel_allocator_random_reserve(struct intel_allocator *ial,
+ uint32_t handle,
+ uint64_t start, uint64_t end)
+{
+ (void) ial;
+ (void) handle;
+ (void) start;
+ (void) end;
+
+ return false;
+}
+
+static bool intel_allocator_random_unreserve(struct intel_allocator *ial,
+ uint32_t handle,
+ uint64_t start, uint64_t end)
+{
+ (void) ial;
+ (void) handle;
+ (void) start;
+ (void) end;
+
+ return false;
+}
+
+static bool intel_allocator_random_is_reserved(struct intel_allocator *ial,
+ uint64_t start, uint64_t end)
+{
+ (void) ial;
+ (void) start;
+ (void) end;
+
+ return false;
+}
+
+static void intel_allocator_random_print(struct intel_allocator *ial, bool full)
+{
+ struct intel_allocator_random *ialr = ial->priv;
+
+ (void) full;
+
+ igt_info("<fd: %d, ctx: %u> allocated objects: %" PRIx64 "\n",
+ ial->fd, ial->ctx, ialr->allocated_objects);
+}
+
+static bool intel_allocator_random_is_empty(struct intel_allocator *ial)
+{
+ struct intel_allocator_random *ialr = ial->priv;
+
+ return !ialr->allocated_objects;
+}
+
+struct intel_allocator *intel_allocator_random_create(int fd, uint32_t ctx)
+{
+ struct intel_allocator *ial;
+ struct intel_allocator_random *ialr;
+
+ igt_debug("Using random allocator\n");
+ ial = calloc(1, sizeof(*ial));
+ igt_assert(ial);
+
+ ial->fd = fd;
+ ial->ctx = ctx;
+ ial->get_address_range = intel_allocator_random_get_address_range;
+ ial->alloc = intel_allocator_random_alloc;
+ ial->free = intel_allocator_random_free;
+ ial->is_allocated = intel_allocator_random_is_allocated;
+ ial->reserve = intel_allocator_random_reserve;
+ ial->unreserve = intel_allocator_random_unreserve;
+ ial->is_reserved = intel_allocator_random_is_reserved;
+ ial->destroy = intel_allocator_random_destroy;
+ ial->print = intel_allocator_random_print;
+ ial->is_empty = intel_allocator_random_is_empty;
+
+ ialr = ial->priv = calloc(1, sizeof(*ialr));
+ igt_assert(ial->priv);
+ ialr->prng = (uint32_t) to_user_pointer(ial);
+ ialr->gtt_size = gem_aperture_size(fd);
+ igt_debug("Gtt size: %" PRId64 "\n", ialr->gtt_size);
+ if (!gem_uses_full_ppgtt(fd))
+ ialr->gtt_size /= 2;
+
+ if ((ialr->gtt_size - 1) >> 32) {
+ /*
+ * We're not aware of bo sizes, so limiting to 46 bit make us
+ * sure we won't enter to addresses with 47-bit set
+ * (we use 32-bit size now so still we fit 47-bit address space).
+ */
+ if (ialr->gtt_size & (3ull << 47))
+ ialr->gtt_size = (1ull << 46);
+ }
+ ialr->bias = get_bias(fd);
+ ialr->start = ialr->bias;
+ ialr->end = ialr->gtt_size;
+
+ ialr->allocated_objects = 0;
+
+ return ial;
+}
--
2.26.0
More information about the igt-dev
mailing list