[igt-dev] [PATCH i-g-t 04/24] lib/intel_allocator_simple: Add simple allocator

Zbigniew Kempczyński zbigniew.kempczynski at intel.com
Thu Oct 22 09:58:47 UTC 2020


From: Dominik Grzegorzek <dominik.grzegorzek at intel.com>

Simple allocator borrowed from Mesa adopted for IGT use.

Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
 lib/intel_allocator_simple.c | 730 +++++++++++++++++++++++++++++++++++
 1 file changed, 730 insertions(+)
 create mode 100644 lib/intel_allocator_simple.c

diff --git a/lib/intel_allocator_simple.c b/lib/intel_allocator_simple.c
new file mode 100644
index 00000000..1f52db3f
--- /dev/null
+++ b/lib/intel_allocator_simple.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include "igt.h"
+#include "igt_x86.h"
+#include "intel_allocator.h"
+#include "intel_bufops.h"
+#include "igt_map.h"
+
+/* Avoid compilation warning */
+struct intel_allocator *intel_allocator_simple_create(int fd, uint32_t ctx);
+
+struct simple_vma_heap {
+	struct igt_list_head holes;
+
+	/* If true, simple_vma_heap_alloc will prefer high addresses
+	 *
+	 * Default is true.
+	 */
+	bool alloc_high;
+};
+
+struct simple_vma_hole {
+	struct igt_list_head link;
+	uint64_t offset;
+	uint64_t size;
+};
+
+struct intel_allocator_simple {
+	struct igt_map objects;
+	struct igt_map reserved;
+	struct simple_vma_heap heap;
+
+	uint64_t gtt_size;
+	uint64_t bias;
+	uint64_t start;
+	uint64_t end;
+
+	/* statistics */
+	uint64_t total_size;
+	uint64_t allocated_size;
+	uint64_t allocated_objects;
+	uint64_t reserved_size;
+	uint64_t reserved_areas;
+};
+
+struct intel_allocator_record {
+	uint32_t handle;
+	uint64_t offset;
+	uint64_t size;
+};
+
+#define simple_vma_foreach_hole(_hole, _heap) \
+	igt_list_for_each_entry(_hole, &(_heap)->holes, link)
+
+#define simple_vma_foreach_hole_safe(_hole, _heap, _tmp) \
+	igt_list_for_each_entry_safe(_hole, _tmp,  &(_heap)->holes, link)
+
+#define simple_vma_foreach_hole_safe_rev(_hole, _heap, _tmp) \
+	igt_list_for_each_entry_safe_reverse(_hole, _tmp,  &(_heap)->holes, link)
+
+#define DECANONICAL(offset) (offset & ((1ull << 48) - 1))
+
+static uint64_t get_bias(int fd)
+{
+	(void) fd;
+
+	return 256 << 10;
+}
+
+static void simple_vma_heap_validate(struct simple_vma_heap *heap)
+{
+	uint64_t prev_offset = 0;
+	struct simple_vma_hole *hole;
+
+	simple_vma_foreach_hole(hole, heap) {
+		igt_assert(hole->size > 0);
+
+		if (&hole->link == heap->holes.next) {
+		/* This must be the top-most hole.  Assert that,
+		 * if it overflows, it overflows to 0, i.e. 2^64.
+		 */
+			igt_assert(hole->size + hole->offset == 0 ||
+				   hole->size + hole->offset > hole->offset);
+		} else {
+		/* This is not the top-most hole so it must not overflow and,
+		 * in fact, must be strictly lower than the top-most hole.  If
+		 * hole->size + hole->offset == prev_offset, then we failed to
+		 * join holes during a simple_vma_heap_free.
+		 */
+			igt_assert(hole->size + hole->offset > hole->offset &&
+				   hole->size + hole->offset < prev_offset);
+		}
+		prev_offset = hole->offset;
+	}
+}
+
+
+static void simple_vma_heap_free(struct simple_vma_heap *heap,
+				uint64_t offset, uint64_t size)
+{
+	struct simple_vma_hole *high_hole = NULL, *low_hole = NULL, *hole;
+	bool high_adjacent, low_adjacent;
+
+	/* Freeing something with a size of 0 is not valid. */
+	igt_assert(size > 0);
+
+	/* It's possible for offset + size to wrap around if we touch the top of
+	 * the 64-bit address space, but we cannot go any higher than 2^64.
+	 */
+	igt_assert(offset + size == 0 || offset + size > offset);
+
+	simple_vma_heap_validate(heap);
+
+	/* Find immediately higher and lower holes if they exist. */
+	simple_vma_foreach_hole(hole, heap) {
+		if (hole->offset <= offset) {
+			low_hole = hole;
+			break;
+		}
+		high_hole = hole;
+	}
+
+	if (high_hole)
+		igt_assert(offset + size <= high_hole->offset);
+	high_adjacent = high_hole && offset + size == high_hole->offset;
+
+	if (low_hole) {
+		igt_assert(low_hole->offset + low_hole->size > low_hole->offset);
+		igt_assert(low_hole->offset + low_hole->size <= offset);
+	}
+	low_adjacent = low_hole && low_hole->offset + low_hole->size == offset;
+
+	if (low_adjacent && high_adjacent) {
+		/* Merge the two holes */
+		low_hole->size += size + high_hole->size;
+		igt_list_del(&high_hole->link);
+		free(high_hole);
+	} else if (low_adjacent) {
+		/* Merge into the low hole */
+		low_hole->size += size;
+	} else if (high_adjacent) {
+		/* Merge into the high hole */
+		high_hole->offset = offset;
+		high_hole->size += size;
+	} else {
+		/* Neither hole is adjacent; make a new one */
+		hole = calloc(1, sizeof(*hole));
+		igt_assert(hole);
+
+		hole->offset = offset;
+		hole->size = size;
+		/* Add it after the high hole so we maintain high-to-low
+		 *  ordering
+		 */
+		if (high_hole)
+			igt_list_add(&hole->link, &high_hole->link);
+		else
+			igt_list_add(&hole->link, &heap->holes);
+	}
+
+	simple_vma_heap_validate(heap);
+}
+
+static void simple_vma_heap_init(struct simple_vma_heap *heap,
+		     uint64_t start, uint64_t size)
+{
+	IGT_INIT_LIST_HEAD(&heap->holes);
+	simple_vma_heap_free(heap, start, size);
+
+	/* Default to using high addresses */
+	heap->alloc_high = true;
+}
+
+static void simple_vma_heap_finish(struct simple_vma_heap *heap)
+{
+	struct simple_vma_hole *hole, *tmp;
+
+	simple_vma_foreach_hole_safe(hole, heap, tmp)
+		free(hole);
+}
+
+static void simple_vma_hole_alloc(struct simple_vma_hole *hole,
+				uint64_t offset, uint64_t size)
+{
+	struct simple_vma_hole *high_hole;
+	uint64_t waste;
+
+	igt_assert(hole->offset <= offset);
+	igt_assert(hole->size >= offset - hole->offset + size);
+
+	if (offset == hole->offset && size == hole->size) {
+		/* Just get rid of the hole. */
+		igt_list_del(&hole->link);
+		free(hole);
+		return;
+	}
+
+	igt_assert(offset - hole->offset <= hole->size - size);
+	waste = (hole->size - size) - (offset - hole->offset);
+	if (waste == 0) {
+		/* We allocated at the top->  Shrink the hole down. */
+		hole->size -= size;
+		return;
+	}
+
+	if (offset == hole->offset) {
+		/* We allocated at the bottom. Shrink the hole up-> */
+		hole->offset += size;
+		hole->size -= size;
+		return;
+	}
+
+   /* We allocated in the middle.  We need to split the old hole into two
+    * holes, one high and one low.
+    */
+	high_hole = calloc(1, sizeof(*hole));
+	igt_assert(high_hole);
+
+	high_hole->offset = offset + size;
+	high_hole->size = waste;
+
+   /* Adjust the hole to be the amount of space left at he bottom of the
+    * original hole.
+    */
+	hole->size = offset - hole->offset;
+
+   /* Place the new hole before the old hole so that the list is in order
+    * from high to low.
+    */
+	igt_list_add_tail(&high_hole->link, &hole->link);
+}
+
+static bool simple_vma_heap_alloc(struct simple_vma_heap *heap,
+				  uint64_t *offset, uint64_t size,
+				  uint64_t alignment)
+{
+	struct simple_vma_hole *hole, *tmp;
+	uint64_t misalign;
+
+	/* The caller is expected to reject zero-size allocations */
+	igt_assert(size > 0);
+	igt_assert(alignment > 0);
+
+	simple_vma_heap_validate(heap);
+
+	if (heap->alloc_high) {
+		simple_vma_foreach_hole_safe(hole, heap, tmp) {
+			if (size > hole->size)
+				continue;
+
+	/* Compute the offset as the highest address where a chunk of the
+	 * given size can be without going over the top of the hole.
+	 *
+	 * This calculation is known to not overflow because we know that
+	 * hole->size + hole->offset can only overflow to 0 and size > 0.
+	 */
+			*offset = (hole->size - size) + hole->offset;
+
+	  /* Align the offset.  We align down and not up because we are
+	   *
+	   * allocating from the top of the hole and not the bottom.
+	   */
+			*offset = (*offset / alignment) * alignment;
+
+			if (*offset < hole->offset)
+				continue;
+
+			simple_vma_hole_alloc(hole, *offset, size);
+			simple_vma_heap_validate(heap);
+			return true;
+		}
+	} else {
+		simple_vma_foreach_hole_safe_rev(hole, heap, tmp) {
+			if (size > hole->size)
+				continue;
+
+			*offset = hole->offset;
+
+			/* Align the offset */
+			misalign = *offset % alignment;
+			if (misalign) {
+				uint64_t pad = alignment - misalign;
+
+				if (pad > hole->size - size)
+					continue;
+
+				*offset += pad;
+			}
+
+			simple_vma_hole_alloc(hole, *offset, size);
+			simple_vma_heap_validate(heap);
+			return true;
+		}
+	}
+
+	/* Failed to allocate */
+	return false;
+}
+
+static void intel_allocator_simple_get_address_range(struct intel_allocator *ial,
+						     uint64_t *startp,
+						     uint64_t *endp)
+{
+	struct intel_allocator_simple *ials = ial->priv;
+
+	if (startp)
+		*startp = ials->start;
+
+	if (endp)
+		*endp = ials->end;
+}
+
+static bool simple_vma_heap_alloc_addr(struct intel_allocator_simple *ials,
+				       uint64_t offset, uint64_t size)
+{
+	struct simple_vma_heap *heap = &ials->heap;
+	struct simple_vma_hole *hole, *tmp;
+	/* Allocating something with a size of 0 is not valid. */
+	igt_assert(size > 0);
+
+	/* It's possible for offset + size to wrap around if we touch the top of
+	 * the 64-bit address space, but we cannot go any higher than 2^64.
+	 */
+	igt_assert(offset + size == 0 || offset + size > offset);
+
+	/* Find the hole if one exists. */
+	simple_vma_foreach_hole_safe(hole, heap, tmp) {
+		if (hole->offset > offset)
+			continue;
+
+	/* Holes are ordered high-to-low so the first hole we find with
+	 * hole->offset <= is our hole.  If it's not big enough to contain the
+	 * requested range, then the allocation fails.
+	 */
+		igt_assert(hole->offset <= offset);
+		if (hole->size < offset - hole->offset + size)
+			return false;
+
+		simple_vma_hole_alloc(hole, offset, size);
+		return true;
+	}
+
+	/* We didn't find a suitable hole */
+	return false;
+}
+
+static uint64_t intel_allocator_simple_alloc(struct intel_allocator *ial,
+					     uint32_t handle, uint64_t size,
+					     uint64_t alignment)
+{
+	struct intel_allocator_record *rec;
+	struct intel_allocator_simple *ials;
+	uint64_t offset;
+
+	igt_assert(ial);
+	ials = (struct intel_allocator_simple *) ial->priv;
+	igt_assert(ials);
+	igt_assert(handle);
+	alignment = alignment > 0 ? alignment : 1;
+	rec = igt_map_find(&ials->objects, &handle);
+	if (rec) {
+		offset = rec->offset;
+		igt_assert(rec->size == size);
+	} else {
+		igt_assert(simple_vma_heap_alloc(&ials->heap, &offset,
+						 size, alignment));
+		rec = malloc(sizeof(*rec));
+		rec->handle = handle;
+		rec->offset = offset;
+		rec->size = size;
+
+		igt_map_add(&ials->objects, &rec->handle, rec);
+		ials->allocated_objects++;
+		ials->allocated_size += size;
+	}
+
+	return offset;
+}
+
+static bool intel_allocator_simple_free(struct intel_allocator *ial, uint32_t handle)
+{
+	struct intel_allocator_record *rec = NULL;
+	struct intel_allocator_simple *ials;
+
+	igt_assert(ial);
+	ials = (struct intel_allocator_simple *) ial->priv;
+	igt_assert(ials);
+
+	rec = igt_map_del(&ials->objects, &handle);
+	if (rec) {
+		simple_vma_heap_free(&ials->heap, rec->offset, rec->size);
+		ials->allocated_objects--;
+		ials->allocated_size -= rec->size;
+		free(rec);
+
+		return true;
+	}
+
+	return false;
+}
+
+static inline bool __same(const struct intel_allocator_record *rec,
+			  uint32_t handle, uint64_t size, uint64_t offset)
+{
+	return rec->handle == handle && rec->size == size &&
+			DECANONICAL(rec->offset) == DECANONICAL(offset);
+}
+
+static bool intel_allocator_simple_is_allocated(struct intel_allocator *ial,
+						uint32_t handle, uint64_t size,
+						uint64_t offset)
+{
+	struct intel_allocator_record *rec;
+	struct intel_allocator_simple *ials;
+	bool same = false;
+
+	igt_assert(ial);
+	ials = (struct intel_allocator_simple *) ial->priv;
+	igt_assert(ials);
+	igt_assert(handle);
+
+	rec = igt_map_find(&ials->objects, &handle);
+	if (rec && __same(rec, handle, size, offset))
+		same = true;
+
+	return same;
+}
+
+static bool intel_allocator_simple_reserve(struct intel_allocator *ial,
+					   uint32_t handle,
+					   uint64_t start, uint64_t end)
+{
+	uint64_t size = end - start;
+	struct intel_allocator_record *rec = NULL;
+	struct intel_allocator_simple *ials;
+
+	igt_assert(ial);
+	ials = (struct intel_allocator_simple *) ial->priv;
+	igt_assert(ials);
+
+	/* clear [63:48] bits to get rid of canonical form */
+	start = DECANONICAL(start);
+	end = DECANONICAL(end);
+	igt_assert(end > start || end == 0);
+
+	if (simple_vma_heap_alloc_addr(ials, start, size)) {
+		rec = malloc(sizeof(*rec));
+		rec->handle = handle;
+		rec->offset = start;
+		rec->size = size;
+
+		igt_map_add(&ials->reserved, &rec->offset, rec);
+
+		ials->reserved_areas++;
+		ials->reserved_size += rec->size;
+		return true;
+	}
+
+	return false;
+}
+
+static bool intel_allocator_simple_unreserve(struct intel_allocator *ial,
+					     uint32_t handle,
+					     uint64_t start, uint64_t end)
+{
+	uint64_t size = end - start;
+	struct intel_allocator_record *rec = NULL;
+	struct intel_allocator_simple *ials;
+
+	igt_assert(ial);
+	ials = (struct intel_allocator_simple *) ial->priv;
+	igt_assert(ials);
+
+	/* clear [63:48] bits to get rid of canonical form */
+	start = DECANONICAL(start);
+	end = DECANONICAL(end);
+
+	igt_assert(end > start || end == 0);
+
+	rec = igt_map_find(&ials->reserved, &start);
+
+	if (!rec) {
+		igt_warn("Only reserved blocks can be unreserved\n");
+		return false;
+	}
+
+	if (rec->size != size) {
+		igt_warn("Only the whole block unreservation allowed\n");
+		return false;
+	}
+
+	if (rec->handle != handle) {
+		igt_warn("Handle %u doesn't match reservation handle: %u\n",
+			 rec->handle, handle);
+		return false;
+	}
+
+	igt_map_del(&ials->reserved, &start);
+
+	ials->reserved_areas--;
+	ials->reserved_size -= rec->size;
+	free(rec);
+	simple_vma_heap_free(&ials->heap, start, size);
+
+	return true;
+}
+
+static bool intel_allocator_simple_is_reserved(struct intel_allocator *ial,
+					       uint64_t start, uint64_t end)
+{
+	uint64_t size = end - start;
+	struct intel_allocator_record *rec = NULL;
+	struct intel_allocator_simple *ials;
+
+	igt_assert(ial);
+	ials = (struct intel_allocator_simple *) ial->priv;
+	igt_assert(ials);
+
+	/* clear [63:48] bits to get rid of canonical form */
+	start = DECANONICAL(start);
+	end = DECANONICAL(end);
+
+	igt_assert(end > start || end == 0);
+
+	rec = igt_map_find(&ials->reserved, &start);
+
+	if (!rec)
+		return false;
+
+	if (rec->offset == start && rec->size == size)
+		return true;
+
+	return false;
+}
+
+static bool equal_8bytes(const void *key1, const void *key2)
+{
+	const uint64_t *k1 = key1, *k2 = key2;
+	return *k1 == *k2;
+}
+
+static void intel_allocator_simple_destroy(struct intel_allocator *ial)
+{
+	struct intel_allocator_simple *ials;
+	struct igt_map_entry *pos;
+	struct igt_map *map;
+	int i;
+
+	igt_assert(ial);
+	ials = (struct intel_allocator_simple *) ial->priv;
+	simple_vma_heap_finish(&ials->heap);
+
+	map = &ials->objects;
+	igt_map_for_each(map, i, pos)
+		free(pos->value);
+	igt_map_free(&ials->objects);
+
+	map = &ials->reserved;
+	igt_map_for_each(map, i, pos)
+		free(pos->value);
+	igt_map_free(&ials->reserved);
+
+	free(ial->priv);
+	free(ial);
+}
+
+#define RESERVE (1024 * 64)
+
+static bool intel_allocator_simple_is_empty(struct intel_allocator *ial)
+{
+	struct intel_allocator_simple *ials = ial->priv;
+
+	igt_debug("<fd: %d, ctx: %u> objects: %" PRId64 ", reserved_areas: %" PRId64 "\n",
+		 ial->fd, ial->ctx, ials->allocated_objects, ials->reserved_areas);
+
+	return !ials->allocated_objects && !ials->reserved_areas;
+}
+
+static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
+{
+	struct intel_allocator_simple *ials;
+	struct simple_vma_hole *hole;
+	struct simple_vma_heap *heap;
+	struct igt_map_entry *pos;
+	struct igt_map *map;
+	uint64_t total_free = 0, allocated_size = 0, allocated_objects = 0;
+	uint64_t reserved_size = 0, reserved_areas = 0;
+	int i;
+
+	igt_assert(ial);
+	ials = (struct intel_allocator_simple *) ial->priv;
+	igt_assert(ials);
+	heap = &ials->heap;
+
+	igt_info("intel_allocator_simple <fd:%d ctx:%d> on "
+		 "[0x%"PRIx64" : 0x%"PRIx64"]:\n", ial->fd, ial->ctx,
+		 ials->bias, ials->gtt_size - RESERVE);
+
+	if (full) {
+		igt_info("holes:\n");
+		simple_vma_foreach_hole(hole, heap) {
+			igt_info("offset = %"PRIu64" (0x%"PRIx64", "
+				 "size = %"PRIu64" (0x%"PRIx64")\n",
+				 hole->offset, hole->offset, hole->size,
+				 hole->size);
+			total_free += hole->size;
+		}
+		igt_assert(total_free <= ials->total_size);
+		igt_info("total_free: %" PRIx64
+			 ", total_size: %" PRIx64
+			 ", allocated_size: %" PRIx64
+			 ", reserved_size: %" PRIx64 "\n",
+			 total_free, ials->total_size, ials->allocated_size,
+			 ials->reserved_size);
+		igt_assert(total_free ==
+			   ials->total_size - ials->allocated_size - ials->reserved_size);
+
+		igt_info("objects:\n");
+		map = &ials->objects;
+		igt_map_for_each(map, i, pos) {
+			struct intel_allocator_record *rec = pos->value;
+
+			igt_info("handle = %d, offset = %"PRIu64" "
+				"(0x%"PRIx64", size = %"PRIu64" (0x%"PRIx64")\n",
+				 rec->handle, rec->offset, rec->offset,
+				 rec->size, rec->size);
+			allocated_objects++;
+			allocated_size += rec->size;
+		}
+		igt_assert(ials->allocated_size == allocated_size);
+		igt_assert(ials->allocated_objects == allocated_objects);
+
+		igt_info("reserved areas:\n");
+		map = &ials->reserved;
+		igt_map_for_each(map, i, pos) {
+			struct intel_allocator_record *rec = pos->value;
+
+			igt_info("offset = %"PRIu64" (0x%"PRIx64", "
+				 "size = %"PRIu64" (0x%"PRIx64")\n",
+				 rec->offset, rec->offset,
+				 rec->size, rec->size);
+			reserved_areas++;
+			reserved_size += rec->size;
+		}
+		igt_assert(ials->reserved_areas == reserved_areas);
+		igt_assert(ials->reserved_size == reserved_size);
+	} else
+		simple_vma_foreach_hole(hole, heap)
+			total_free += hole->size;
+
+
+	igt_info("free space: %"PRIu64"B (0x%"PRIx64") (%.2f%% full)\n"
+		 "allocated objects: %"PRIu64", reserved areas: %"PRIu64"\n",
+		 total_free, total_free,
+		 ((double) (ials->total_size - total_free) /
+		  (double) ials->total_size) * 100,
+		 ials->allocated_objects, ials->reserved_areas);
+}
+
+struct intel_allocator *intel_allocator_simple_create(int fd, uint32_t ctx)
+{
+	struct intel_allocator *ial;
+	struct intel_allocator_simple *ials;
+
+	igt_debug("Using simple allocator <fd: %d, ctx: %u>\n", fd, ctx);
+
+	ial = calloc(1, sizeof(*ial));
+	igt_assert(ial);
+
+	ial->fd = fd;
+	ial->ctx = ctx;
+	ial->get_address_range = intel_allocator_simple_get_address_range;
+	ial->alloc = intel_allocator_simple_alloc;
+	ial->free = intel_allocator_simple_free;
+	ial->is_allocated = intel_allocator_simple_is_allocated;
+	ial->reserve = intel_allocator_simple_reserve;
+	ial->unreserve = intel_allocator_simple_unreserve;
+	ial->is_reserved = intel_allocator_simple_is_reserved;
+	ial->destroy = intel_allocator_simple_destroy;
+	ial->is_empty = intel_allocator_simple_is_empty;
+	ial->print = intel_allocator_simple_print;
+	ials = ial->priv = malloc(sizeof(struct intel_allocator_simple));
+	igt_assert(ials);
+
+	igt_map_init(&ials->objects, NULL, NULL, 8);
+	/* Reserved addresses hashtable is indexed by an offset */
+	igt_map_init(&ials->reserved, equal_8bytes, NULL, 3);
+
+	ials->gtt_size = gem_aperture_size(fd);
+	igt_debug("Gtt size: %" PRId64 "\n", ials->gtt_size);
+	if (!gem_uses_full_ppgtt(fd))
+		ials->gtt_size /= 2;
+
+	ials->bias = get_bias(fd);
+	ials->total_size = ials->gtt_size - ials->bias - RESERVE;
+	ials->start = ials->bias;
+	ials->end = ials->gtt_size - RESERVE;
+	simple_vma_heap_init(&ials->heap, ials->bias, ials->total_size);
+
+	ials->allocated_size = 0;
+	ials->allocated_objects = 0;
+	ials->reserved_size = 0;
+	ials->reserved_areas = 0;
+
+	return ial;
+}
-- 
2.26.0



More information about the igt-dev mailing list