[igt-dev] [PATCH i-g-t 5/5] benchmarks/intel_allocator: Test the allocator performance

Andrzej Turko andrzej.turko at linux.intel.com
Wed Jul 28 15:12:07 UTC 2021


This tests the allocator with regard to
both performance and efficient use of the
address space.

Signed-off-by: Andrzej Turko <andrzej.turko at linux.intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
---
 benchmarks/intel_allocator.c | 756 +++++++++++++++++++++++++++++++++++
 benchmarks/meson.build       |   1 +
 2 files changed, 757 insertions(+)
 create mode 100644 benchmarks/intel_allocator.c

diff --git a/benchmarks/intel_allocator.c b/benchmarks/intel_allocator.c
new file mode 100644
index 000000000..00bb2074e
--- /dev/null
+++ b/benchmarks/intel_allocator.c
@@ -0,0 +1,756 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "igt.h"
+#include "igt_map.h"
+#include "intel_allocator.h"
+
+//#define DBG
+#ifdef DBG
+#define ial_debug(...) fprintf(stderr, __VA_ARGS__)
+#define echo_query(...) fprintf(stderr, __VA_ARGS__)
+#else
+#define ial_debug(...) {}
+#define echo_query(...) {}
+#endif
+
+static _Atomic(uint32_t) next_handle;
+
+static inline uint32_t gem_handle_gen(void)
+{
+	return atomic_fetch_add(&next_handle, 1);
+}
+
+struct validated_allocator {
+	struct igt_list_head allocated;
+	struct igt_list_head reserved;
+
+	uint64_t allocator_handle;
+	uint64_t vas_start;
+	uint64_t vas_end;
+};
+
+struct va_block {
+	uint64_t offset;
+	uint64_t size;
+	uint32_t handle;
+	struct igt_list_head link;
+};
+
+struct alloc_block {
+	uint64_t size;
+	uint32_t handle;
+	struct igt_list_head link;
+};
+
+struct rsvd_block {
+	uint64_t size;
+	uint64_t offset;
+	uint32_t handle;
+};
+
+#define QUERY_ALLOC 1
+#define QUERY_FREE 2
+#define QUERY_IS_ALLOCATED 3
+#define QUERY_RESERVE 4
+#define QUERY_UNRESERVE 5
+#define QUERY_IS_RESERVED 6
+
+#define MAXTESTLEN (1 << 24)
+#define NO_CHECK 0
+#define CURSORY_CHECK 1
+#define THOROUGH_CHECK 2
+
+struct allocator_query {
+	int type;
+
+	union {
+		struct {
+			uint32_t handle;
+			uint64_t size;
+			uint64_t alignment;
+		} alloc;
+		struct {
+			uint32_t handle;
+		} free;
+		struct {
+			uint32_t handle;
+			uint64_t size;
+			uint64_t offset;
+		} is_allocated;
+		struct {
+			uint32_t handle;
+			uint64_t size;
+			uint64_t offset;
+		} reserve;
+		struct {
+			uint32_t handle;
+			uint64_t size;
+			uint64_t offset;
+		} unreserve;
+		struct {
+			uint64_t size;
+			uint64_t offset;
+		} is_reserved;
+	};
+};
+
+static void validate_allocator(struct validated_allocator *va)
+{
+	struct va_block *a, *b;
+	int same;
+	bool is_allocated, is_reserved;
+
+	igt_list_for_each_entry(a, &va->allocated, link) {
+
+		igt_assert(a->offset + a->size > a->offset);
+		same = 0;
+		igt_list_for_each_entry(b, &va->allocated, link) {
+
+			if (a->handle == b->handle) {
+				same++;
+			} else {
+				igt_assert_f((a->offset < b->offset &&
+					      a->offset + a->size <= b->offset) ||
+					     (b->offset < a->offset &&
+					      b->offset + b->size <= a->offset),
+					     "Two allocated blocks overlap");
+			}
+		}
+		igt_assert_f(same == 1, "A handle appears more than once.\n");
+	}
+
+	igt_list_for_each_entry(a, &va->reserved, link) {
+
+		igt_assert(a->offset + a->size > a->offset);
+		same = 0;
+		igt_list_for_each_entry(b, &va->reserved, link) {
+
+			if (a->offset == b->offset) {
+				same++;
+			} else {
+				igt_assert_f((a->offset < b->offset &&
+					      a->offset + a->size <= b->offset) ||
+					     (b->offset < a->offset &&
+					      b->offset + b->size <= a->offset),
+					     "Two reserved areas overlap");
+			}
+		}
+		igt_assert_f(same == 1, "The same area has been reserved more than once.\n");
+	}
+
+	igt_list_for_each_entry(a, &va->reserved, link) {
+		igt_list_for_each_entry(b, &va->allocated, link) {
+
+			igt_assert_f((a->offset < b->offset &&
+				      a->offset + a->size <= b->offset) ||
+				     (b->offset < a->offset &&
+				      b->offset + b->size <= a->offset),
+				     "An allocated block overlaps with a reserved area.\n");
+		}
+	}
+
+	igt_list_for_each_entry(a, &va->reserved, link) {
+		is_reserved = intel_allocator_is_reserved(va->allocator_handle,
+							  a->size,
+							  a->offset);
+		igt_assert_f(is_reserved, "The allocator has not reported a reserved area.\n");
+	}
+
+	igt_list_for_each_entry(a, &va->allocated, link) {
+		is_allocated = intel_allocator_is_allocated(va->allocator_handle,
+							    a->handle,
+							    a->size,
+							    a->offset);
+		igt_assert_f(is_allocated, "The allocator has not reported an allocated block.\n");
+	}
+}
+
+static struct va_block
+*get_by_handle(struct igt_list_head *list, uint32_t handle)
+{
+	struct va_block *entry;
+
+	igt_list_for_each_entry(entry, list, link) {
+		if (entry->handle == handle)
+			return entry;
+	}
+	return NULL;
+}
+
+static struct va_block
+*get_by_offset(struct igt_list_head *list, uint64_t offset)
+{
+	struct va_block *entry;
+
+	igt_list_for_each_entry(entry, list, link) {
+		if (entry->offset == offset)
+			return entry;
+	}
+	return NULL;
+}
+
+static struct va_block
+*get_overlapping(struct igt_list_head *list, uint64_t offset, uint64_t size)
+{
+	struct va_block *entry;
+	uint64_t start, end;
+	bool overlaps;
+
+	igt_list_for_each_entry(entry, list, link) {
+		start = entry->offset;
+		end = entry->offset + entry->size;
+
+		overlaps = ((start >= offset && start < offset + size) ||
+			    (end > offset && end <= offset + size));
+
+		if (overlaps)
+			return entry;
+	}
+	return NULL;
+}
+
+static uint64_t
+validated_allocator_alloc(struct validated_allocator *va, uint32_t handle,
+					  uint64_t size, uint64_t alignment)
+{
+	struct va_block *block;
+	uint64_t offset;
+
+	offset = __intel_allocator_alloc(va->allocator_handle, handle,
+					 size, alignment, ALLOC_STRATEGY_NONE);
+
+	block = get_by_handle(&va->allocated, handle);
+	if (block) {
+		igt_assert(block->offset == offset);
+		return offset;
+	}
+
+	/* not enough space */
+	if (offset == ALLOC_INVALID_ADDRESS)
+		return ALLOC_INVALID_ADDRESS;
+
+	igt_assert(offset >= va->vas_start &&
+		   offset + size <= va->vas_end);
+
+	if (alignment > 0)
+		igt_assert(offset % alignment == 0);
+
+	/* check that no allocated block overlaps */
+	igt_assert(!get_overlapping(&va->allocated, offset, size));
+	/* check that no reserved area overlaps */
+	igt_assert(!get_overlapping(&va->reserved, offset, size));
+
+	block = (struct va_block *) malloc(sizeof(struct va_block));
+	igt_assert(block);
+	block->offset = offset;
+	block->size = size;
+	block->handle = handle;
+	igt_list_add(&block->link, &va->allocated);
+
+	return offset;
+}
+
+static bool
+validated_allocator_free(struct validated_allocator *va, uint32_t handle)
+{
+	struct va_block *block;
+
+	block = get_by_handle(&va->allocated, handle);
+
+	if (block) {
+		igt_list_del(&block->link);
+		free(block);
+		igt_assert(intel_allocator_free(va->allocator_handle, handle));
+		return true;
+	} else {
+		igt_assert(!intel_allocator_free(va->allocator_handle, handle));
+		return false;
+	}
+}
+
+static bool
+validated_allocator_is_allocated(struct validated_allocator *va, uint32_t handle,
+				 uint64_t size, uint64_t offset)
+{
+	struct va_block *block;
+	bool is_allocated;
+
+	block = get_by_handle(&va->allocated, handle);
+
+	is_allocated =  (block && block->size == size &&
+			 block->offset == offset);
+
+	igt_assert(is_allocated == intel_allocator_is_allocated(va->allocator_handle,
+								handle, size, offset));
+
+	return is_allocated;
+}
+
+static bool
+validated_allocator_reserve(struct validated_allocator *va, uint32_t handle,
+			    uint64_t size, uint64_t offset)
+{
+	struct va_block *block;
+	bool reserved;
+
+	reserved = intel_allocator_reserve(va->allocator_handle, handle,
+					   size, offset);
+	if (!reserved) {
+		if (offset >= va->vas_start && offset + size <= va->vas_end)
+			igt_assert(get_overlapping(&va->reserved, offset, size) ||
+				   get_overlapping(&va->allocated, offset, size));
+
+		return false;
+	}
+	igt_assert(!get_overlapping(&va->allocated, offset, size));
+	igt_assert(!get_overlapping(&va->reserved, offset, size));
+
+	block = (struct va_block *) malloc(sizeof(struct va_block));
+	igt_assert(block);
+	block->handle = handle;
+	block->offset = offset;
+	block->size = size;
+	igt_list_add(&block->link, &va->reserved);
+
+	return true;
+}
+
+static bool
+validated_allocator_unreserve(struct validated_allocator *va, uint32_t handle,
+			      uint64_t size, uint64_t offset)
+{
+	struct va_block *block;
+
+	block = get_by_offset(&va->reserved, offset);
+
+	if (block && block->size == size && block->handle == handle) {
+		igt_assert(intel_allocator_unreserve(va->allocator_handle,
+						     handle, size, offset));
+		igt_list_del(&block->link);
+		free(block);
+		return true;
+	} else {
+		igt_assert(!intel_allocator_unreserve(va->allocator_handle,
+						      handle, size, offset));
+		return false;
+	}
+}
+
+static bool
+validated_allocator_is_reserved(struct validated_allocator *va,
+			    uint64_t size, uint64_t offset)
+{
+	struct va_block *block;
+	bool is_reserved;
+
+	block = get_by_offset(&va->reserved, offset);
+	is_reserved = (block && block->size == size);
+	igt_assert(is_reserved == intel_allocator_is_reserved(va->allocator_handle,
+							      size, offset));
+	return is_reserved;
+}
+
+static struct validated_allocator
+*validated_allocator_create(int fd, int allocator_type)
+{
+	struct validated_allocator *va;
+
+	va = (struct validated_allocator *) malloc(sizeof (struct validated_allocator));
+	igt_assert(va);
+	IGT_INIT_LIST_HEAD(&va->allocated);
+	IGT_INIT_LIST_HEAD(&va->reserved);
+
+	va->allocator_handle = intel_allocator_open(fd, 0, allocator_type);
+	intel_allocator_get_address_range(va->allocator_handle,
+					  &va->vas_start,
+					  &va->vas_end);
+
+	return va;
+}
+
+static bool validated_allocator_destroy(struct validated_allocator *va)
+{
+	bool is_empty;
+	struct va_block *entry, *tmp;
+
+	is_empty = igt_list_empty(&va->allocated) &&
+		   igt_list_empty(&va->reserved);
+	igt_assert_eq(is_empty, intel_allocator_close(va->allocator_handle));
+
+	igt_list_for_each_entry_safe(entry, tmp, &va->allocated, link) {
+		igt_list_del(&entry->link);
+		free(entry);
+	}
+
+	igt_list_for_each_entry_safe(entry, tmp, &va->reserved, link) {
+		igt_list_del(&entry->link);
+		free(entry);
+	}
+
+	free(va);
+	return is_empty;
+}
+
+static void get_device_address_range(int fd, uint64_t *start, uint64_t *end)
+{
+	uint64_t ahnd;
+
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	intel_allocator_get_address_range(ahnd, start, end);
+	intel_allocator_close(ahnd);
+}
+
+static void
+execute_test_no_check(int fd, int alloc_type, int nqueries,
+		      struct allocator_query *queries)
+{
+	uint64_t ahnd;
+	uint64_t offset;
+	uint64_t allocated_mem = 0, requested_mem = 0;
+	uint32_t real_allocs = 0, requested_allocs = 0;
+	int i = 0;
+
+	ahnd = intel_allocator_open(fd, 0, alloc_type);
+
+	for (i = 0; i < nqueries; i++) {
+
+		switch(queries[i].type) {
+		case QUERY_ALLOC:
+			offset = __intel_allocator_alloc(ahnd, queries[i].alloc.handle,
+							 queries[i].alloc.size,
+							 queries[i].alloc.alignment,
+							 ALLOC_STRATEGY_NONE);
+
+			allocated_mem += (offset == ALLOC_INVALID_ADDRESS ?
+						  0 : queries[i].alloc.size);
+			requested_mem += queries[i].alloc.size;
+			real_allocs += (offset == ALLOC_INVALID_ADDRESS ? 0 : 1);
+			requested_allocs += 1;
+			break;
+		case QUERY_FREE:
+			intel_allocator_free(ahnd, queries[i].free.handle);
+			break;
+		case QUERY_IS_ALLOCATED:
+			intel_allocator_is_allocated(ahnd, queries[i].is_allocated.handle,
+						     queries[i].is_allocated.size,
+						     queries[i].is_allocated.offset);
+			break;
+		case QUERY_RESERVE:
+			intel_allocator_reserve(ahnd, queries[i].reserve.handle,
+						queries[i].reserve.size,
+						queries[i].reserve.offset);
+			break;
+		case QUERY_UNRESERVE:
+			intel_allocator_unreserve(ahnd, queries[i].unreserve.handle,
+						  queries[i].unreserve.size,
+						  queries[i].unreserve.offset);
+			break;
+		case QUERY_IS_RESERVED:
+			intel_allocator_is_reserved(ahnd, queries[i].is_reserved.size,
+						    queries[i].is_reserved.offset);
+			break;
+		}
+	}
+	intel_allocator_close(ahnd);
+
+	igt_info("Test summary: %d allocations out of %d succedded (%f %%)\n",
+		 real_allocs, requested_allocs,
+		 ((float) real_allocs * 100) / (float) requested_allocs);
+
+}
+
+static void
+execute_test_check(int fd, int alloc_type, int nqueries,
+		   struct allocator_query *queries, int mode)
+{
+	struct validated_allocator *va;
+	uint64_t offset;
+	uint64_t allocated_mem = 0, requested_mem = 0;
+	uint32_t real_allocs = 0, requested_allocs = 0;
+	int i = 0;
+
+	va = validated_allocator_create(fd, alloc_type);
+
+	for (i = 0; i < nqueries; i++) {
+
+		ial_debug("\n");
+		switch(queries[i].type) {
+		case QUERY_ALLOC:
+			ial_debug("Alloc %d 0x%llx (0x%llx)\n", queries[i].alloc.handle,
+				  queries[i].alloc.size, queries[i].alloc.alignment);
+			offset = validated_allocator_alloc(va, queries[i].alloc.handle,
+							   queries[i].alloc.size,
+							   queries[i].alloc.alignment);
+
+			allocated_mem += (offset == ALLOC_INVALID_ADDRESS ?
+						  0 : queries[i].alloc.size);
+			requested_mem += queries[i].alloc.size;
+			real_allocs += (offset == ALLOC_INVALID_ADDRESS ? 0 : 1);
+			requested_allocs += 1;
+			break;
+		case QUERY_FREE:
+			ial_debug("Free %d\n", queries[i].free.handle);
+			validated_allocator_free(va, queries[i].free.handle);
+			break;
+		case QUERY_IS_ALLOCATED:
+			validated_allocator_is_allocated(va, queries[i].is_allocated.handle,
+							 queries[i].is_allocated.size,
+							 queries[i].is_allocated.offset);
+			break;
+		case QUERY_RESERVE:
+			ial_debug ("Reserve %d 0x%llx + 0x%llx\n", queries[i].reserve.handle,
+				   queries[i].reserve.size, queries[i].reserve.offset);
+			validated_allocator_reserve(va, queries[i].reserve.handle,
+						    queries[i].reserve.size,
+						    queries[i].reserve.offset);
+			break;
+		case QUERY_UNRESERVE:
+			ial_debug ("Unreserve %d 0x%llx 0x%llx\n", queries[i].reserve.handle,
+				   queries[i].reserve.size, queries[i].reserve.offset);
+			validated_allocator_unreserve(va, queries[i].unreserve.handle,
+						      queries[i].unreserve.size,
+						      queries[i].unreserve.offset);
+			break;
+		case QUERY_IS_RESERVED:
+			validated_allocator_is_reserved(va, queries[i].is_reserved.size,
+							queries[i].is_reserved.offset);
+			break;
+		}
+
+		if (mode == THOROUGH_CHECK) validate_allocator(va);
+	}
+
+	validated_allocator_destroy(va);
+
+	igt_info("Test summary: %d allocations out of %d succedded (%f %%)\n",
+		 real_allocs, requested_allocs,
+		 ((float) real_allocs * 100) / (float) requested_allocs);
+}
+
+static void
+execute_test(int fd, int alloc_type, int nqueries,
+	     struct allocator_query *queries, int mode)
+{
+	if (mode == NO_CHECK)
+		execute_test_no_check(fd, alloc_type, nqueries, queries);
+	else
+		execute_test_check(fd, alloc_type, nqueries, queries, mode);
+}
+
+static uint64_t randu64(void)
+{
+	uint64_t x = 0;
+	int rnd_bits = igt_fls(RAND_MAX);
+
+	for (int i = 0; i < 64; i += rnd_bits)
+		x = (x << rnd_bits) + rand();
+
+	return x;
+}
+
+static int
+create_simple_test(int fd, uint64_t min_size, int nq_stage,
+		   struct allocator_query *queries)
+{
+	uint64_t start, end, size, offset, free_space;
+	int i, j, stages, nq, objcount;
+	int perm[64];
+	uint64_t offsets[64], sizes[64];
+	struct igt_list_head alloc_list;
+	struct alloc_block *block, *entry;
+
+	IGT_INIT_LIST_HEAD(&alloc_list);
+	igt_assert(min_size > 0);
+	get_device_address_range(fd, &start, &end);
+
+	stages = 0;
+	for (size = min_size; start + size * 2ULL <= end; size *= 2ULL) {
+		perm[stages] = stages;
+		sizes[stages++] = size;
+	}
+
+	perm[stages] = stages;
+	sizes[stages++] = end - start - (size - min_size);
+	igt_permute_array(perm, stages, igt_exchange_int);
+	offset = start;
+
+	for (i = 0; i < stages; i++) {
+		offsets[perm[i]] = offset;
+		offset += sizes[perm[i]];
+	}
+
+	nq = 0;
+	for (i = 0; i < stages; i++) {
+		queries[nq].type = QUERY_RESERVE;
+		queries[nq].reserve.size = sizes[i];
+		queries[nq].reserve.offset = offsets[i];
+		queries[nq].reserve.handle = -1;
+		nq++;
+	}
+
+	free_space = 0;
+	objcount = 0;
+	for (i = 0; i < stages; i++) {
+
+		queries[nq].type = QUERY_UNRESERVE;
+		queries[nq].unreserve.size = sizes[i];
+		queries[nq].unreserve.offset = offsets[i];
+		queries[nq].unreserve.handle = -1;
+		nq++;
+		free_space += sizes[i];
+
+		for (j = nq_stage; j > 0; j--) {
+
+			if (objcount > 0 && rand() % 4 == 0) {
+
+				igt_list_for_each_entry(entry, &alloc_list, link) {
+					block = entry;
+					if (rand() % 10 == 0)
+						break;
+				}
+				igt_assert(block);
+				queries[nq].type = QUERY_FREE;
+				queries[nq].free.handle = block->handle;
+				nq++;
+
+				objcount--;
+				free_space += block->size;
+				igt_list_del(&block->link);
+				free(block);
+
+			} else if (free_space > 0) {
+
+				block = malloc(sizeof(struct alloc_block));
+				igt_assert(block);
+				block->handle = gem_handle_gen();
+				block->size = randu64() % free_space + 1;
+
+				igt_list_add(&block->link, &alloc_list);
+				objcount++;
+				free_space -= block->size;
+				queries[nq].type = QUERY_ALLOC;
+				queries[nq].alloc.size = block->size;
+				queries[nq].alloc.alignment = 1<<(rand() % 8);
+				queries[nq].alloc.handle = block->handle;
+				nq++;
+			}
+		}
+	}
+
+	igt_list_for_each_entry_safe(entry, block, &alloc_list, link) {
+		igt_list_del(&entry->link);
+		free(entry);
+	}
+
+	return nq;
+}
+
+static void basic_test(int fd, int alloc_type)
+{
+	struct validated_allocator *va;
+	uint64_t offsets[5];
+	bool reserved;
+
+	va = validated_allocator_create(fd, alloc_type);
+
+	offsets[1] = validated_allocator_alloc(va, 1, 123, 8);
+	igt_assert(offsets[1] != ALLOC_INVALID_ADDRESS);
+
+	offsets[2] = validated_allocator_alloc(va, 2, 123, 8);
+	igt_assert(offsets[2] != ALLOC_INVALID_ADDRESS);
+
+	reserved = validated_allocator_reserve(va, -1, 128, 1024);
+
+	igt_assert(validated_allocator_is_allocated(va, 1, 123, offsets[1]));
+	igt_assert(!validated_allocator_is_allocated(va, 2, 120, offsets[2]));
+	igt_assert(!validated_allocator_is_allocated(va, 2, 123, offsets[1]));
+	igt_assert(!validated_allocator_is_allocated(va, 3, 123, offsets[2]));
+
+	igt_assert(validated_allocator_free(va, 1));
+	igt_assert(validated_allocator_free(va, 2));
+	igt_assert(!validated_allocator_free(va, 1));
+
+	igt_assert(reserved == validated_allocator_unreserve(va, -1, 128, 1024));
+
+	igt_assert(validated_allocator_reserve(va, 1, 300, 2047));
+	igt_assert(validated_allocator_reserve(va, 2, 450, 1024));
+	igt_assert(!validated_allocator_reserve(va, 3, 20, 1005));
+
+	igt_assert(!validated_allocator_unreserve(va, 1, 540, 1024));
+	igt_assert(validated_allocator_unreserve(va, 1, 300, 2047));
+	igt_assert(!validated_allocator_unreserve(va, 2, 450, 2047));
+	igt_assert(validated_allocator_unreserve(va, 2, 450, 1024));
+
+	offsets[1] = validated_allocator_alloc(va, 2, 500, 32);
+
+	if (offsets[1] + 500 > offsets[1]) {
+		validated_allocator_reserve(va, -1, 100, offsets[1]+499);
+		validated_allocator_reserve(va, -1, 100, offsets[1]+500);
+	}
+
+	if (offsets[1] >= 100) {
+		validated_allocator_reserve(va, -1, 100, offsets[1] - 99);
+		validated_allocator_reserve(va, -1, 100, offsets[1] - 100);
+	}
+
+	igt_assert(!validated_allocator_destroy(va));
+}
+
+struct allocator_query query_array[MAXTESTLEN];
+
+struct allocators {
+	const char *name;
+	uint8_t type;
+} als[] = {
+	{"simple", INTEL_ALLOCATOR_SIMPLE},
+	{"bst", INTEL_ALLOCATOR_BST},
+	{NULL, 0},
+};
+
+igt_main
+{
+	int fd;
+	int testlen;
+	struct allocators *a;
+
+	igt_fixture {
+		fd = drm_open_driver(DRIVER_INTEL);
+		atomic_init(&next_handle, 1);
+		srandom(0xdeadbeef);
+	}
+
+	igt_subtest_with_dynamic("basic") {
+		for (a = als; a->name; a++) {
+			igt_dynamic_f("%s-allocator", a->name)
+				basic_test(fd, a->type);
+		}
+	}
+
+	igt_subtest_with_dynamic("short") {
+		testlen = create_simple_test(fd, 1ULL << 10,
+					     80, query_array);
+
+		for (a = als; a->name; a++) {
+			igt_dynamic_f("%s-allocator", a->name)
+					execute_test(fd, a->type, testlen,
+						     query_array, THOROUGH_CHECK);
+		}
+	}
+
+	igt_subtest_with_dynamic("long") {
+		testlen = create_simple_test(fd, 1ULL << 6,
+					     200000, query_array);
+
+		for (a = als; a->name; a++) {
+			igt_dynamic_f("%s-allocator", a->name)
+					execute_test(fd, a->type, testlen,
+						     query_array, NO_CHECK);
+		}
+	}
+
+	igt_fixture
+		close(fd);
+}
diff --git a/benchmarks/meson.build b/benchmarks/meson.build
index 98a08e25c..e084cb2ce 100644
--- a/benchmarks/meson.build
+++ b/benchmarks/meson.build
@@ -13,6 +13,7 @@ benchmark_progs = [
 	'gem_syslatency',
 	'gem_userptr_benchmark',
 	'gem_wsim',
+	'intel_allocator',
 	'kms_vblank',
 	'prime_lookup',
 	'vgem_mmap',
-- 
2.25.1



More information about the igt-dev mailing list