[igt-dev] [PATCH i-g-t 3/4] lib/intel_allocator_bst: Implement the allocator with a BST
Andrzej Turko
andrzej.turko at linux.intel.com
Tue Jul 20 13:27:39 UTC 2021
This implements an alternative to the simple allocator.
The BST allocator follows best-fit strategy in order to
use the address space more effectively.
The use of a binary search tree speeds up finding free
blocks of memory (compared to a list).
Signed-off-by: Andrzej Turko <andrzej.turko at linux.intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
---
lib/intel_allocator.c | 7 +
lib/intel_allocator.h | 1 +
lib/intel_allocator_bst.c | 672 ++++++++++++++++++++++++++++++++++++++
lib/meson.build | 1 +
4 files changed, 681 insertions(+)
create mode 100644 lib/intel_allocator_bst.c
diff --git a/lib/intel_allocator.c b/lib/intel_allocator.c
index 133176ed4..2ef487a05 100644
--- a/lib/intel_allocator.c
+++ b/lib/intel_allocator.c
@@ -71,6 +71,9 @@ intel_allocator_random_create(int fd, uint64_t start, uint64_t end);
struct intel_allocator *
intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
enum allocator_strategy strategy);
+struct intel_allocator *
+intel_allocator_bst_create(int fd, uint64_t start, uint64_t end,
+ enum allocator_strategy strategy);
/*
* Instead of trying to find first empty handle just get new one. Assuming
@@ -311,6 +314,10 @@ static struct intel_allocator *intel_allocator_create(int fd,
ial = intel_allocator_simple_create(fd, start, end,
allocator_strategy);
break;
+ case INTEL_ALLOCATOR_BST:
+ ial = intel_allocator_bst_create(fd, start, end,
+ allocator_strategy);
+ break;
default:
igt_assert_f(ial, "Allocator type %d not implemented\n",
allocator_type);
diff --git a/lib/intel_allocator.h b/lib/intel_allocator.h
index 7d9d01123..93ecbc3e4 100644
--- a/lib/intel_allocator.h
+++ b/lib/intel_allocator.h
@@ -211,6 +211,7 @@ void intel_allocator_print(uint64_t allocator_handle);
#define INTEL_ALLOCATOR_RELOC 1
#define INTEL_ALLOCATOR_RANDOM 2
#define INTEL_ALLOCATOR_SIMPLE 3
+#define INTEL_ALLOCATOR_BST 4
#define GEN8_GTT_ADDRESS_WIDTH 48
diff --git a/lib/intel_allocator_bst.c b/lib/intel_allocator_bst.c
new file mode 100644
index 000000000..d7b0319ed
--- /dev/null
+++ b/lib/intel_allocator_bst.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include "igt.h"
+#include "igt_map.h"
+#include "igt_bst.h"
+#include "intel_allocator.h"
+
+/* The addresses of allocated objects will be
+ * strictly smaller than this value (the upper
+ * end of the address range cannot exceed it).
+ * This is to ensure there are no overflows
+ * and prevent the existence of a hole or
+ * block of size 2^64.
+ *
+ * Thanks to this -1 can be returned from alloc
+ * to indicate that allocator has failed to find
+ * enough space for the object.
+ *
+ * IALB_MAX_ADDR can be set to any positive
+ * integer smaller than 2^64.
+ */
+#define IALB_MAX_ADDR (1ull<<63)
+
+#define RESERVED 4096
+
+/* Avoid compilation warning */
+struct intel_allocator *
+intel_allocator_bst_create(int fd, uint64_t start, uint64_t end,
+ enum allocator_strategy strategy);
+
+struct intel_allocator_bst {
+ struct igt_map *objects;
+ struct igt_map *reserved;
+ struct igt_bst *by_size;
+ struct igt_bst *by_offset;
+
+ uint64_t start;
+ uint64_t end;
+
+ /* statistics */
+ uint64_t total_size;
+ uint64_t allocated_size;
+ uint64_t allocated_objects;
+ uint64_t reserved_size;
+ uint64_t reserved_areas;
+};
+
+struct intel_allocator_bst_vma_hole {
+
+ /* pointer to the node in the by_size bst */
+ void *size_ptr;
+
+ /* pointer to the node in the by_offset bst */
+ void *offset_ptr;
+
+ uint64_t size;
+ uint64_t offset;
+};
+
+struct intel_allocator_record {
+ uint32_t handle;
+ uint64_t offset;
+ uint64_t size;
+};
+
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
+
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
+
+static inline uint32_t hash_handles(const void *val)
+{
+ uint32_t hash = *(uint32_t *) val;
+
+ hash = hash * GOLDEN_RATIO_PRIME_32;
+ return hash;
+}
+
+static int equal_handles(const void *a, const void *b)
+{
+ uint32_t *key1 = (uint32_t *) a, *key2 = (uint32_t *) b;
+
+ return *key1 == *key2;
+}
+
+static inline uint32_t hash_offsets(const void *val)
+{
+ uint64_t hash = *(uint64_t *) val;
+
+ hash = hash * GOLDEN_RATIO_PRIME_64;
+ /* High bits are more random, so use them. */
+ return hash >> 32;
+}
+
+static int equal_offsets(const void *a, const void *b)
+{
+ uint64_t *key1 = (uint64_t *) a, *key2 = (uint64_t *) b;
+
+ return *key1 == *key2;
+}
+
+static void map_entry_free_func(struct igt_map_entry *entry)
+{
+ free(entry->data);
+}
+
+static void holes_bst_validate(struct intel_allocator_bst *ialb)
+{
+ struct igt_bst *offset_bst, *size_bst;
+ struct intel_allocator_bst_vma_hole *hole;
+ void *nodeptr;
+ int by_offset_size, by_size_size;
+ uint64_t prev_offset;
+
+ offset_bst = ialb->by_offset;
+ size_bst = ialb->by_size;
+ by_offset_size = offset_bst->validate(offset_bst);
+ by_size_size = size_bst->validate(size_bst);
+ igt_assert(by_offset_size == by_size_size);
+
+ prev_offset = IALB_MAX_ADDR;
+ igt_bst_for_each_node_rev(offset_bst, nodeptr) {
+ hole = igt_bst_get_value(offset_bst, nodeptr);
+ igt_assert(hole);
+ /* Make sure the hole is stored under correct keys in
+ * the balanced search trees.
+ */
+ igt_assert(hole->offset == igt_bst_get_key(offset_bst, hole->offset_ptr));
+ igt_assert(hole->size == igt_bst_get_key(size_bst, hole->size_ptr));
+
+ igt_assert(hole->offset + hole->size > hole->offset);
+ /* Make sure no pair of holes is adjacent. */
+ igt_assert(prev_offset > hole->offset + hole->size);
+
+ prev_offset = hole->offset;
+ }
+}
+
+static void __intel_allocator_bst_alloc(struct intel_allocator_bst *ialb,
+ struct intel_allocator_bst_vma_hole *hole,
+ uint64_t offset, uint64_t size)
+{
+ struct igt_bst *offset_bst, *size_bst;
+ struct intel_allocator_bst_vma_hole *newhole;
+ uint64_t lower, higher;
+
+ igt_assert(hole->offset <= offset);
+ igt_assert(hole->offset + hole->size >= offset + size);
+
+ offset_bst = ialb->by_offset;
+ size_bst = ialb->by_size;
+
+ lower = offset - hole->offset;
+ higher = hole->size - size - lower;
+
+ if (lower > 0 && higher > 0) {
+ /* There are leftovers on both lower and higher addresses.
+ * Create a hole for higher addresses from scratch and
+ * one for lower addresses by modifying the existing hole.
+ */
+
+ newhole = malloc(sizeof(struct intel_allocator_bst_vma_hole));
+ igt_assert(newhole);
+
+ newhole->offset = offset + size;
+ newhole->size = higher;
+
+ newhole->offset_ptr = igt_bst_insert(offset_bst, newhole, newhole->offset);
+ newhole->size_ptr = igt_bst_insert(size_bst, newhole, newhole->size);
+
+ hole->size = lower;
+ igt_bst_erase(size_bst, hole->size_ptr);
+ hole->size_ptr = igt_bst_insert(size_bst, hole, hole->size);
+
+ } else if (higher > 0) {
+ /* There is no free address space in this hole left
+ * below the allocated object. Only a portion of
+ * higher addresses is still free, so just adjust the
+ * existing hole accordingly (change offset and size).
+ */
+
+ hole->offset = offset + size;
+ hole->size = higher;
+
+ igt_bst_erase(offset_bst, hole->offset_ptr);
+ igt_bst_erase(size_bst, hole->size_ptr);
+
+ hole->offset_ptr = igt_bst_insert(offset_bst, hole, hole->offset);
+ hole->size_ptr = igt_bst_insert(size_bst, hole, hole->size);
+
+ } else if (lower > 0) {
+ /* The allocated block is aligned to the end of the
+ * hole and the lower addresses are still free,
+ * so shrink the existing hole (change size,
+ * but not the offset).
+ */
+
+ hole->size = lower;
+ igt_bst_erase(size_bst, hole->size_ptr);
+ hole->size_ptr = igt_bst_insert(size_bst, hole, hole->size);
+
+ } else {
+ /* There are no leftovers, just erase the hole. */
+
+ igt_bst_erase(offset_bst, hole->offset_ptr);
+ igt_bst_erase(size_bst, hole->size_ptr);
+ free(hole);
+ }
+}
+
+static void __intel_allocator_bst_add_hole(struct intel_allocator_bst *ialb,
+ uint64_t offset, uint64_t size)
+{
+ struct igt_bst *offset_bst, *size_bst;
+ struct intel_allocator_bst_vma_hole *hole, *nxt, *prv;
+
+ offset_bst = ialb->by_offset;
+ size_bst = ialb->by_size;
+
+ prv = igt_bst_query_predecessor(offset_bst, offset);
+ if (prv && prv->offset + prv->size < offset)
+ prv = NULL;
+
+ nxt = igt_bst_query_successor(offset_bst, offset);
+ if (nxt && nxt->offset > offset + size)
+ nxt = NULL;
+
+ if (nxt && prv) {
+ /* There are holes adjacent to the new one
+ * both below and above it in the address space.
+ * Merge them all into a single hole:
+ * erase the upper and increase size of the
+ * lower one.
+ */
+ prv->size += size + nxt->size;
+
+ igt_bst_erase(offset_bst, nxt->offset_ptr);
+ igt_bst_erase(size_bst, nxt->size_ptr);
+ free(nxt);
+
+ igt_bst_erase(size_bst, prv->size_ptr);
+ prv->size_ptr = igt_bst_insert(size_bst, prv, prv->size);
+
+ } else if (nxt) {
+ /* The only adjacent hole is above the new one,
+ * so increase its size and update its offset.
+ */
+ nxt->size += size;
+ nxt->offset = offset;
+
+ igt_bst_erase(offset_bst, nxt->offset_ptr);
+ igt_bst_erase(size_bst, nxt->size_ptr);
+
+ nxt->offset_ptr = igt_bst_insert(offset_bst, nxt, nxt->offset);
+ nxt->size_ptr = igt_bst_insert(size_bst, nxt, nxt->size);
+
+ } else if (prv) {
+ /* The only adjacent hole is below the new one,
+ * just increase its size.
+ */
+ prv->size += size;
+ igt_bst_erase(size_bst, prv->size_ptr);
+ prv->size_ptr = igt_bst_insert(size_bst, prv, prv->size);
+
+ } else {
+ /* There are no holes neighbouring the new one,
+ * so just create a new hole.
+ */
+ hole = malloc(sizeof(struct intel_allocator_bst_vma_hole));
+ igt_assert(hole);
+
+ hole->offset = offset;
+ hole->size = size;
+ hole->offset_ptr = igt_bst_insert(offset_bst, hole, offset);
+ hole->size_ptr = igt_bst_insert(size_bst, hole, size);
+
+ }
+}
+
+
+static void intel_allocator_bst_get_address_range(struct intel_allocator *ial,
+ uint64_t *startp, uint64_t *endp)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+
+ if (startp)
+ *startp = ialb->start;
+
+ if (endp)
+ *endp = ialb->end;
+}
+
+static uint64_t intel_allocator_bst_alloc(struct intel_allocator *ial, uint32_t handle,
+ uint64_t size, uint64_t alignment,
+ enum allocator_strategy strategy)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_record *record;
+ struct intel_allocator_bst_vma_hole *hole;
+ uint64_t misalignment, offset;
+
+ igt_assert(size > 0);
+ alignment = (alignment > 0 ? alignment : 1);
+
+ record = igt_map_search(ialb->objects, &handle);
+
+ if (record) {
+ /* This block has already been allocated,
+ * check that it satifies the requirements.
+ */
+
+ igt_assert(size == record->size);
+ igt_assert(record->offset % alignment == 0);
+
+ return record->offset;
+ }
+
+ /* Look for a slightly bigger hole to make sure
+ * the block can be aligned properly.
+ */
+ hole = igt_bst_query_successor(ialb->by_size,
+ size + alignment - 1);
+
+ /* Look for a smaller hole, maybe proper alignment
+ * will still be possible.
+ */
+ if (!hole)
+ igt_bst_query_successor(ialb->by_size, size);
+
+ if (!hole) {
+ igt_warn("Failed to allocate: not enough memory\n");
+ return ALLOC_INVALID_ADDRESS;
+ }
+
+ if (strategy == ALLOC_STRATEGY_NONE)
+ strategy = ial->strategy;
+
+ switch (strategy) {
+ case ALLOC_STRATEGY_LOW_TO_HIGH:
+ misalignment = alignment - (hole->offset % alignment);
+ misalignment = misalignment == alignment ? 0 : misalignment;
+ offset = hole->offset + misalignment;
+
+ if (misalignment + size > hole->size) {
+ igt_warn("Failed to allocate: not enough memory\n");
+ return ALLOC_INVALID_ADDRESS;
+ }
+ break;
+ case ALLOC_STRATEGY_HIGH_TO_LOW:
+ offset = hole->offset + hole->size - size;
+ offset = offset - offset % alignment;
+
+ if (offset < hole->offset) {
+ igt_warn("Failed to allocate: not enough memory\n");
+ return ALLOC_INVALID_ADDRESS;
+ }
+ break;
+ default:
+ igt_assert_f(0, "Unsupported strategy.");
+ }
+
+ __intel_allocator_bst_alloc(ialb, hole, offset, size);
+
+ record = malloc(sizeof(struct intel_allocator_record));
+ igt_assert(record);
+ record->handle = handle;
+ record->offset = offset;
+ record->size = size;
+
+ igt_map_insert(ialb->objects, &record->handle, record);
+ ialb->allocated_objects++;
+ ialb->allocated_size += size;
+
+ return offset;
+}
+
+static bool intel_allocator_bst_is_allocated(struct intel_allocator *ial, uint32_t handle,
+ uint64_t size, uint64_t offset)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_record *record;
+ bool same;
+
+ record = igt_map_search(ialb->objects, &handle);
+ if (record) {
+ same = (record->handle == handle && record->size == size &&
+ record->offset == offset);
+ } else {
+ same = false;
+ }
+
+ return same;
+}
+
+static bool intel_allocator_bst_free(struct intel_allocator *ial, uint32_t handle)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_record *record;
+
+ record = igt_map_search(ialb->objects, &handle);
+
+ if (!record)
+ return false;
+
+ __intel_allocator_bst_add_hole(ialb, record->offset, record->size);
+ ialb->allocated_objects--;
+ ialb->allocated_size -= record->size;
+ igt_map_remove(ialb->objects, &handle, map_entry_free_func);
+
+ return true;
+}
+
+static bool intel_allocator_bst_reserve(struct intel_allocator *ial, uint32_t handle,
+ uint64_t start, uint64_t end)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_bst_vma_hole *hole;
+ struct intel_allocator_record *record;
+
+ igt_assert(start < end);
+ igt_assert(start >= ialb->start);
+ igt_assert(end <= ialb->end);
+
+ hole = igt_bst_query_predecessor(ialb->offset_bst, start);
+ if (!hole)
+ return false;
+
+ igt_assert(hole->offset <= start);
+
+ if (hole->offset + hole->size < end)
+ return false;
+
+ __intel_allocator_bst_alloc(ialb, hole, start, end - start);
+
+ record = malloc(sizeof(struct intel_allocator_bst_vma_hole));
+ igt_assert(record);
+ record->offset = start;
+ record->size = end - start;
+ record->handle = handle;
+ igt_map_insert(ialb->reserved, &record->offset, record);
+ ialb->reserved_areas++;
+ ialb->reserved_size += end - start;
+
+ return true;
+}
+
+static bool intel_allocator_bst_unreserve(struct intel_allocator *ial, uint32_t handle,
+ uint64_t start, uint64_t end)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_record *record;
+
+ record = igt_map_search(ialb->reserved, &start);
+
+ if (!record) {
+ igt_warn("Only a reserved block can be unreserved.\n");
+ return false;
+ }
+
+ if (record->size != end - start) {
+ igt_warn("Size of the block does not match the passed value.\n");
+ return false;
+ }
+
+ if (record->offset != start) {
+ igt_warn("Offset of the block does not match the passed value.\n");
+ return false;
+ }
+
+ if (record->handle != handle) {
+ igt_warn("Handle %u does not match the passed handle %u.\n",
+ record->handle, handle);
+ return false;
+ }
+
+ __intel_allocator_bst_add_hole(ialb, start, end - start);
+ ialb->reserved_areas--;
+ ialb->reserved_size -= record->size;
+ igt_map_remove(ialb->reserved, &start, map_entry_free_func);
+
+ return true;
+}
+
+static bool intel_allocator_bst_is_reserved(struct intel_allocator *ial,
+ uint64_t start, uint64_t end)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_record *record;
+
+ igt_assert(start < end);
+ record = igt_map_search(ialb->reserved, &start);
+
+ if (!record)
+ return false;
+
+ if (record->offset == start && record->size == end - start)
+ return true;
+
+ return false;
+}
+
+static void intel_allocator_bst_destroy(struct intel_allocator *ial)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct igt_bst *size_bst, *offset_bst;
+ void *nodeptr;
+
+ size_bst = ialb->by_size;
+ offset_bst = ialb->by_offset;
+
+ igt_bst_for_each_node(offset_bst, nodeptr)
+ free(offset_bst->get_value(nodeptr));
+
+ igt_bst_free(size_bst);
+ igt_bst_free(offset_bst);
+ free(offset_bst);
+ free(size_bst);
+
+ igt_map_destroy(ialb->objects, map_entry_free_func);
+ igt_map_destroy(ialb->reserved, map_entry_free_func);
+
+ free(ialb);
+ free(ial);
+}
+
+static bool intel_allocator_bst_is_empty(struct intel_allocator *ial)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+
+ return ialb->allocated_objects == 0 && ialb->reserved_areas == 0;
+}
+
+static void intel_allocator_bst_print(struct intel_allocator *ial, bool full)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_record *record;
+ struct intel_allocator_bst_vma_hole *hole;
+ struct igt_bst *offset_bst, *size_bst;
+ struct igt_map_entry *entry;
+ void *nodeptr;
+ uint64_t total_free;
+
+ igt_info("intel_allocator_bst <ial: %p, fd: %d> on "
+ "[0x%"PRIX64" : 0x%"PRIx64"]\n", ial,
+ ial->fd, ialb->start, ialb->end);
+
+ total_free = 0;
+ if (full) {
+ offset_bst = ialb->by_offset;
+ size_bst = ialb->by_size;
+
+ igt_info("holes by offset:\n");
+ igt_bst_for_each_node(offset_bst, nodeptr) {
+ hole = igt_bst_get_value(offset_bst, nodeptr);
+ igt_info("offset = %"PRIu64" (0x%"PRIx64") "
+ "size = %"PRIu64" (0x%"PRIx64")\n",
+ hole->offset, hole->offset, hole->size,
+ hole->size);
+ total_free += hole->size;
+ }
+
+ igt_info("holes by size:\n");
+ igt_bst_for_each_node(size_bst, nodeptr) {
+ hole = igt_bst_get_value(size_bst, nodeptr);
+ igt_info("offset = %"PRIu64" (0x%"PRIx64") "
+ "size = %"PRIu64" (0x%"PRIx64")\n",
+ hole->offset, hole->offset, hole->size,
+ hole->size);
+
+ }
+
+ igt_info("allocated objects:\n");
+ igt_map_foreach(ialb->objects, entry) {
+ record = entry->data;
+ igt_info("handle = %d, offset = %"PRIu64" "
+ "(0x%"PRIx64", size = %"PRIu64" (0x%"PRIx64")\n",
+ record->handle, record->offset, record->offset,
+ record->size, record->size);
+
+ }
+
+ igt_info("reserved areas:\n");
+ igt_map_foreach(ialb->reserved, entry) {
+ record = entry->data;
+ igt_info("handle = %d, offset = %"PRIu64" "
+ "(0x%"PRIx64", size = %"PRIu64" (0x%"PRIx64")\n",
+ record->handle, record->offset, record->offset,
+ record->size, record->size);
+
+ }
+
+ } else {
+ offset_bst = ialb->by_offset;
+
+ igt_bst_for_each_node(offset_bst, nodeptr) {
+ offset_bst->get_value(offset_bst, nodeptr);
+ total_free += hole->size;
+ }
+ }
+
+ igt_info("free space: %"PRIu64"B (0x%"PRIx64") (%.2f%% full)\n"
+ "allocated objects: %"PRIu64", size: %"PRIu64
+ " (%"PRIx64")\n"
+ "reserved areas: %"PRIu64", size: %"PRIu64
+ " (%"PRIx64")\n", total_free, total_free,
+ ((double) (ialb->total_size - total_free) /
+ (double) ialb->total_size) * 100.0,
+ ialb->allocated_objects, ialb->allocated_size,
+ ialb->allocated_size, ialb->reserved_areas,
+ ialb->reserved_areas, ialb->reserved_size);
+
+ holes_bst_validate(ialb);
+}
+
+struct intel_allocator *
+intel_allocator_bst_create(int fd, uint64_t start, uint64_t end,
+ enum allocator_strategy strategy)
+{
+ struct intel_allocator *ial;
+ struct intel_allocator_bst *ialb;
+
+ igt_debug("Using BST allocator\n");
+
+ ialb = malloc(sizeof(struct intel_allocator_bst));
+ igt_assert(ialb);
+ ial = malloc(sizeof(struct intel_allocator));
+ igt_assert(ial);
+
+ /* Allocated object are stored by 4-byte-long handles. */
+ ialb->objects = igt_map_create(hash_handles, equal_handles);
+ /* Reserved object are stored by 8-byte-long offsets. */
+ ialb->reserved = igt_map_create(hash_offsets, equal_offsets);
+ igt_assert(ialb->objects && ialb->reserved);
+
+ ialb->by_offset = igt_bst_avl_create();
+ ialb->by_size = igt_bst_avl_create();
+
+ igt_assert(start < end);
+ igt_assert(end < IALB_MAX_ADDR);
+ ialb->start = start;
+ ialb->end = end;
+
+ ialb->total_size = ialb->end - ialb->start;
+ ialb->allocated_size = 0;
+ ialb->allocated_objects = 0;
+ ialb->reserved_size = 0;
+ ialb->reserved_areas = 0;
+ __intel_allocator_bst_add_hole(ialb, ialb->start, ialb->total_size);
+
+ ial->fd = fd;
+ ial->type = INTEL_ALLOCATOR_BST;
+ ial->strategy = strategy;
+ ial->refcount = 0;
+ ial->priv = ialb;
+ ial->get_address_range = intel_allocator_bst_get_address_range;
+ ial->alloc = intel_allocator_bst_alloc;
+ ial->is_allocated = intel_allocator_bst_is_allocated;
+ ial->free = intel_allocator_bst_free;
+ ial->reserve = intel_allocator_bst_reserve;
+ ial->unreserve = intel_allocator_bst_unreserve;
+ ial->is_reserved = intel_allocator_bst_is_reserved;
+ ial->destroy = intel_allocator_bst_destroy;
+ ial->is_empty = intel_allocator_bst_is_empty;
+ ial->print = intel_allocator_bst_print;
+
+ return ial;
+}
diff --git a/lib/meson.build b/lib/meson.build
index a1aa0ee12..62c8dbf20 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -38,6 +38,7 @@ lib_sources = [
'igt_x86.c',
'instdone.c',
'intel_allocator.c',
+ 'intel_allocator_bst.c',
'intel_allocator_msgchannel.c',
'intel_allocator_random.c',
'intel_allocator_reloc.c',
--
2.25.1
More information about the igt-dev
mailing list