[PATCH i-g-t v21 26/36] tests/api_intel_allocator: Simple allocator test suite
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Fri Feb 26 11:11:43 UTC 2021
From: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
We want to verify allocator works as expected. Try to exploit it.
Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
tests/i915/api_intel_allocator.c | 538 +++++++++++++++++++++++++++++++
tests/meson.build | 1 +
2 files changed, 539 insertions(+)
create mode 100644 tests/i915/api_intel_allocator.c
diff --git a/tests/i915/api_intel_allocator.c b/tests/i915/api_intel_allocator.c
new file mode 100644
index 000000000..650c2ff5e
--- /dev/null
+++ b/tests/i915/api_intel_allocator.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <stdatomic.h>
+#include "i915/gem.h"
+#include "igt.h"
+#include "igt_aux.h"
+#include "intel_allocator.h"
+
+#define OBJ_SIZE 1024
+
+struct test_obj {
+ uint32_t handle;
+ uint64_t offset;
+ uint64_t size;
+};
+
+static _Atomic(uint32_t) next_handle;
+
+static inline uint32_t gem_handle_gen(void)
+{
+ return atomic_fetch_add(&next_handle, 1);
+}
+
+static void alloc_simple(int fd)
+{
+ uint64_t ialh;
+ uint64_t offset0, offset1;
+ bool is_allocated, freed;
+
+ ialh = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+
+ offset0 = intel_allocator_alloc(ialh, 1, 0x1000, 0x1000);
+ offset1 = intel_allocator_alloc(ialh, 1, 0x1000, 0x1000);
+ igt_assert(offset0 == offset1);
+
+ is_allocated = intel_allocator_is_allocated(ialh, 1, 0x1000, offset0);
+ igt_assert(is_allocated);
+
+ freed = intel_allocator_free(ialh, 1);
+ igt_assert(freed);
+
+ is_allocated = intel_allocator_is_allocated(ialh, 1, 0x1000, offset0);
+ igt_assert(!is_allocated);
+
+ freed = intel_allocator_free(ialh, 1);
+ igt_assert(!freed);
+
+ intel_allocator_close(ialh);
+}
+
+static void reserve_simple(int fd)
+{
+ uint64_t ialh;
+ uint64_t start;
+ bool reserved, unreserved;
+
+ ialh = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+ intel_allocator_get_address_range(ialh, &start, NULL);
+
+ reserved = intel_allocator_reserve(ialh, 0, 0x1000, start);
+ igt_assert(reserved);
+
+ reserved = intel_allocator_is_reserved(ialh, 0x1000, start);
+ igt_assert(reserved);
+
+ reserved = intel_allocator_reserve(ialh, 0, 0x1000, start);
+ igt_assert(!reserved);
+
+ unreserved = intel_allocator_unreserve(ialh, 0, 0x1000, start);
+ igt_assert(unreserved);
+
+ reserved = intel_allocator_is_reserved(ialh, 0x1000, start);
+ igt_assert(!reserved);
+
+ intel_allocator_close(ialh);
+}
+
+static void reserve(int fd, uint8_t type)
+{
+ struct intel_allocator *ial;
+ struct test_obj obj;
+
+ ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+
+ igt_assert(ial->reserve(ial, 0, 0x40000, 0x800000));
+ /* try reserve once again */
+ igt_assert_eq(ial->reserve(ial, 0, 0x40040, 0x700000), false);
+
+ obj.handle = gem_handle_gen();
+ obj.size = OBJ_SIZE;
+ obj.offset = ial->alloc(ial, obj.handle, obj.size, 0);
+
+ igt_assert_eq(ial->reserve(ial, 0, obj.offset,
+ obj.offset + obj.size), false);
+ ial->free(ial, obj.handle);
+ igt_assert_eq(ial->reserve(ial, 0, obj.offset,
+ obj.offset + obj.size), true);
+
+ ial->unreserve(ial, 0, obj.offset, obj.offset + obj.size);
+ ial->unreserve(ial, 0, 0x40000, 0x800000);
+ igt_assert(ial->reserve(ial, 0, 0x40040, 0x700000));
+ ial->unreserve(ial, 0, 0x40040, 0x700000);
+
+ igt_assert(ial->is_empty(ial));
+
+ intel_allocator_close(to_user_pointer(ial));
+}
+
+static bool overlaps(struct test_obj *buf1, struct test_obj *buf2)
+{
+ uint64_t begin1 = buf1->offset;
+ uint64_t end1 = buf1->offset + buf1->size;
+ uint64_t begin2 = buf2->offset;
+ uint64_t end2 = buf2->offset + buf2->size;
+
+ return (end1 > begin2 && end2 > end1) || (end2 > begin1 && end1 > end2);
+}
+
+static void basic_alloc(int fd, int cnt, uint8_t type)
+{
+ struct test_obj *obj;
+ struct intel_allocator *ial;
+ int i, j;
+
+ ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+ obj = malloc(sizeof(struct test_obj) * cnt);
+
+ for (i = 0; i < cnt; i++) {
+ igt_progress("allocating objects: ", i, cnt);
+ obj[i].handle = gem_handle_gen();
+ obj[i].size = OBJ_SIZE;
+ obj[i].offset = ial->alloc(ial, obj[i].handle,
+ obj[i].size, 4096);
+ igt_assert_eq(obj[i].offset % 4096, 0);
+ }
+
+ for (i = 0; i < cnt; i++) {
+ igt_progress("check overlapping: ", i, cnt);
+
+ if (type == INTEL_ALLOCATOR_RANDOM)
+ continue;
+
+ for (j = 0; j < cnt; j++) {
+ if (j == i)
+ continue;
+ igt_assert(!overlaps(&obj[i], &obj[j]));
+ }
+ }
+
+ for (i = 0; i < cnt; i++) {
+ igt_progress("freeing objects: ", i, cnt);
+ ial->free(ial, obj[i].handle);
+ }
+
+ igt_assert(ial->is_empty(ial));
+
+ free(obj);
+ intel_allocator_close(to_user_pointer(ial));
+}
+
+static void reuse(int fd, uint8_t type)
+{
+ struct test_obj obj[128], tmp;
+ struct intel_allocator *ial;
+ uint64_t prev_offset;
+ int i;
+
+ ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+
+ for (i = 0; i < 128; i++) {
+ obj[i].handle = gem_handle_gen();
+ obj[i].size = OBJ_SIZE;
+ obj[i].offset = ial->alloc(ial, obj[i].handle,
+ obj[i].size, 0x40);
+ }
+
+ /* check simple reuse */
+ for (i = 0; i < 128; i++) {
+ prev_offset = obj[i].offset;
+ obj[i].offset = ial->alloc(ial, obj[i].handle, obj[i].size, 0);
+ igt_assert(prev_offset == obj[i].offset);
+ }
+ i--;
+
+ /* free bo prevously alloced */
+ ial->free(ial, obj[i].handle);
+ /* alloc different buffer to fill freed hole */
+ tmp.handle = gem_handle_gen();
+ tmp.offset = ial->alloc(ial, tmp.handle, OBJ_SIZE, 0);
+ igt_assert(prev_offset == tmp.offset);
+
+ obj[i].offset = ial->alloc(ial, obj[i].handle, obj[i].size, 0);
+ igt_assert(prev_offset != obj[i].offset);
+ ial->free(ial, tmp.handle);
+
+ for (i = 0; i < 128; i++)
+ ial->free(ial, obj[i].handle);
+
+ igt_assert(ial->is_empty(ial));
+
+ intel_allocator_close(to_user_pointer(ial));
+}
+
+struct ial_thread_args {
+ struct intel_allocator *ial;
+ pthread_t thread;
+ uint32_t *handles;
+ uint64_t *offsets;
+ uint32_t count;
+ int threads;
+ int idx;
+};
+
+static void *alloc_bo_in_thread(void *arg)
+{
+ struct ial_thread_args *a = arg;
+ int i;
+
+ for (i = a->idx; i < a->count; i += a->threads) {
+ a->handles[i] = gem_handle_gen();
+ pthread_mutex_lock(&a->ial->mutex);
+ a->offsets[i] = a->ial->alloc(a->ial, a->handles[i], OBJ_SIZE,
+ 1UL << ((random() % 20) + 1));
+ pthread_mutex_unlock(&a->ial->mutex);
+ }
+
+ return NULL;
+}
+
+static void *free_bo_in_thread(void *arg)
+{
+ struct ial_thread_args *a = arg;
+ int i;
+
+ for (i = (a->idx + 1) % a->threads; i < a->count; i += a->threads) {
+ pthread_mutex_lock(&a->ial->mutex);
+ a->ial->free(a->ial, a->handles[i]);
+ pthread_mutex_unlock(&a->ial->mutex);
+ }
+
+ return NULL;
+}
+
+#define THREADS 6
+
+static void parallel_one(int fd, uint8_t type)
+{
+ struct intel_allocator *ial;
+ struct ial_thread_args a[THREADS];
+ uint32_t *handles;
+ uint64_t *offsets;
+ int count, i;
+
+ srandom(0xdeadbeef);
+ ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+ count = 1UL << 12;
+
+ handles = malloc(sizeof(uint32_t) * count);
+ offsets = calloc(1, sizeof(uint64_t) * count);
+
+ for (i = 0; i < THREADS; i++) {
+ a[i].ial = ial;
+ a[i].handles = handles;
+ a[i].offsets = offsets;
+ a[i].count = count;
+ a[i].threads = THREADS;
+ a[i].idx = i;
+ pthread_create(&a[i].thread, NULL, alloc_bo_in_thread, &a[i]);
+ }
+
+ for (i = 0; i < THREADS; i++)
+ pthread_join(a[i].thread, NULL);
+
+ /* Check if all objects are alocated */
+ for (i = 0; i < count; i++) {
+ /* Random allocator don't have state. Always returns different offset */
+ if (type == INTEL_ALLOCATOR_RANDOM)
+ break;
+
+ igt_assert_eq(offsets[i],
+ a->ial->alloc(ial, handles[i], OBJ_SIZE, 0));
+ }
+
+ for (i = 0; i < THREADS; i++)
+ pthread_create(&a[i].thread, NULL, free_bo_in_thread, &a[i]);
+
+ for (i = 0; i < THREADS; i++)
+ pthread_join(a[i].thread, NULL);
+
+ /* Check if all offsets where objects were are free */
+ for (i = 0; i < count; i++) {
+ if (type == INTEL_ALLOCATOR_RANDOM)
+ break;
+
+ igt_assert(ial->reserve(ial, 0, offsets[i], offsets[i] + 1));
+ }
+
+ free(handles);
+ free(offsets);
+
+ intel_allocator_close(to_user_pointer(ial));
+}
+
+#define SIMPLE_GROUP_ALLOCS 8
+static void __simple_allocs(int fd)
+{
+ uint32_t handles[SIMPLE_GROUP_ALLOCS];
+ uint64_t ahnd;
+ uint32_t ctx;
+ int i;
+
+ ctx = rand() % 2;
+ ahnd = intel_allocator_open(fd, ctx, INTEL_ALLOCATOR_SIMPLE);
+
+ for (i = 0; i < SIMPLE_GROUP_ALLOCS; i++) {
+ uint32_t size;
+
+ size = (rand() % 4 + 1) * 0x1000;
+ handles[i] = gem_create(fd, size);
+ intel_allocator_alloc(ahnd, handles[i], size, 0x1000);
+ }
+
+ for (i = 0; i < SIMPLE_GROUP_ALLOCS; i++) {
+ igt_assert_f(intel_allocator_free(ahnd, handles[i]) == 1,
+ "Error freeing handle: %u\n", handles[i]);
+ gem_close(fd, handles[i]);
+ }
+
+ intel_allocator_close(ahnd);
+}
+
+static void fork_simple_once(int fd)
+{
+ intel_allocator_multiprocess_start();
+
+ igt_fork(child, 1)
+ __simple_allocs(fd);
+
+ igt_waitchildren();
+
+ intel_allocator_multiprocess_stop();
+}
+
+#define SIMPLE_TIMEOUT 5
+static void *__fork_simple_thread(void *data)
+{
+ int fd = (int) (long) data;
+
+ igt_until_timeout(SIMPLE_TIMEOUT) {
+ __simple_allocs(fd);
+ }
+
+ return NULL;
+}
+
+static void fork_simple_stress(int fd, bool two_level_inception)
+{
+ pthread_t thread0, thread1;
+ uint64_t ahnd0, ahnd1;
+ bool are_empty;
+
+ intel_allocator_multiprocess_start();
+
+ ahnd0 = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+ ahnd1 = intel_allocator_open(fd, 1, INTEL_ALLOCATOR_SIMPLE);
+
+ pthread_create(&thread0, NULL, __fork_simple_thread, (void *) (long) fd);
+ pthread_create(&thread1, NULL, __fork_simple_thread, (void *) (long) fd);
+
+ igt_fork(child, 8) {
+ if (two_level_inception) {
+ pthread_create(&thread0, NULL, __fork_simple_thread,
+ (void *) (long) fd);
+ pthread_create(&thread1, NULL, __fork_simple_thread,
+ (void *) (long) fd);
+ }
+
+ igt_until_timeout(SIMPLE_TIMEOUT) {
+ __simple_allocs(fd);
+ }
+
+ if (two_level_inception) {
+ pthread_join(thread0, NULL);
+ pthread_join(thread1, NULL);
+ }
+ }
+ igt_waitchildren();
+
+ pthread_join(thread0, NULL);
+ pthread_join(thread1, NULL);
+
+ are_empty = intel_allocator_close(ahnd0);
+ are_empty &= intel_allocator_close(ahnd1);
+
+ intel_allocator_multiprocess_stop();
+
+ igt_assert_f(are_empty, "Allocators were not emptied\n");
+}
+
+static void __reopen_allocs(int fd1, int fd2)
+{
+ uint64_t ahnd0, ahnd1, ahnd2;
+
+ ahnd0 = intel_allocator_open(fd1, 0, INTEL_ALLOCATOR_SIMPLE);
+ ahnd1 = intel_allocator_open(fd2, 0, INTEL_ALLOCATOR_SIMPLE);
+ ahnd2 = intel_allocator_open(fd2, 0, INTEL_ALLOCATOR_SIMPLE);
+ igt_assert(ahnd0 != ahnd1);
+ igt_assert(ahnd1 == ahnd2);
+
+ intel_allocator_close(ahnd0);
+ intel_allocator_close(ahnd1);
+ intel_allocator_close(ahnd2);
+}
+
+static void reopen(int fd)
+{
+ int fd2;
+
+ igt_require_gem(fd);
+
+ fd2 = gem_reopen_driver(fd);
+
+ __reopen_allocs(fd, fd2);
+
+ close(fd2);
+}
+
+#define REOPEN_TIMEOUT 3
+static void reopen_fork(int fd)
+{
+ int fd2;
+
+ igt_require_gem(fd);
+
+ intel_allocator_multiprocess_start();
+
+ fd2 = gem_reopen_driver(fd);
+
+ igt_fork(child, 1) {
+ igt_until_timeout(REOPEN_TIMEOUT)
+ __reopen_allocs(fd, fd2);
+ }
+ igt_until_timeout(REOPEN_TIMEOUT)
+ __reopen_allocs(fd, fd2);
+
+ igt_waitchildren();
+
+ close(fd2);
+
+ intel_allocator_multiprocess_stop();
+}
+
+struct allocators {
+ const char *name;
+ uint8_t type;
+} als[] = {
+ {"simple", INTEL_ALLOCATOR_SIMPLE},
+ {"random", INTEL_ALLOCATOR_RANDOM},
+ {NULL, 0},
+};
+
+igt_main
+{
+ int fd;
+ struct allocators *a;
+
+ igt_fixture {
+ fd = drm_open_driver(DRIVER_INTEL);
+ atomic_init(&next_handle, 1);
+ srandom(0xdeadbeef);
+ }
+
+ igt_subtest_f("alloc-simple")
+ alloc_simple(fd);
+
+ igt_subtest_f("reserve-simple")
+ reserve_simple(fd);
+
+ igt_subtest_f("print")
+ basic_alloc(fd, 1UL << 2, INTEL_ALLOCATOR_RANDOM);
+
+ igt_subtest_f("reuse")
+ reuse(fd, INTEL_ALLOCATOR_SIMPLE);
+
+ igt_subtest_f("reserve")
+ reserve(fd, INTEL_ALLOCATOR_SIMPLE);
+
+ for (a = als; a->name; a++) {
+ igt_subtest_with_dynamic_f("%s-allocator", a->name) {
+ igt_dynamic("basic")
+ basic_alloc(fd, 1UL << 8, a->type);
+
+ igt_dynamic("parallel-one")
+ parallel_one(fd, a->type);
+
+ if (a->type != INTEL_ALLOCATOR_RANDOM) {
+ igt_dynamic("reuse")
+ reuse(fd, a->type);
+
+ igt_dynamic("reserve")
+ reserve(fd, a->type);
+ }
+ }
+ }
+
+ igt_subtest_f("fork-simple-once")
+ fork_simple_once(fd);
+
+ igt_subtest_f("fork-simple-stress")
+ fork_simple_stress(fd, false);
+
+ igt_subtest_f("fork-simple-stress-signal") {
+ igt_fork_signal_helper();
+ fork_simple_stress(fd, false);
+ igt_stop_signal_helper();
+ }
+
+ igt_subtest_f("two-level-inception")
+ fork_simple_stress(fd, true);
+
+ igt_subtest_f("two-level-inception-interruptible") {
+ igt_fork_signal_helper();
+ fork_simple_stress(fd, true);
+ igt_stop_signal_helper();
+ }
+
+ igt_subtest_f("reopen")
+ reopen(fd);
+
+ igt_subtest_f("reopen-fork")
+ reopen_fork(fd);
+
+ igt_fixture
+ close(fd);
+}
diff --git a/tests/meson.build b/tests/meson.build
index 825e01833..061691903 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -111,6 +111,7 @@ test_progs = [
]
i915_progs = [
+ 'api_intel_allocator',
'api_intel_bb',
'gen3_mixed_blits',
'gen3_render_linear_blits',
--
2.26.0
More information about the Intel-gfx-trybot
mailing list