[igt-dev] [PATCH i-g-t v26 05/35] lib/intel_allocator_simple: Add simple allocator
Jason Ekstrand
jason at jlekstrand.net
Wed Mar 17 19:38:35 UTC 2021
On Wed, Mar 17, 2021 at 9:46 AM Zbigniew Kempczyński
<zbigniew.kempczynski at intel.com> wrote:
>
> From: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
>
> Simple allocator borrowed from Mesa adopted for IGT use.
>
> From default we prefer an allocation from the top of vm address space
> (we can catch addressing issues pro-actively). When function
> intel_allocator_simple_create() is used we exclude last page as HW
> tends to hang on the render engine when full 3D pipeline is executed from
> the last page. For more control of vm range user can specify range using
> intel_allocator_simple_create_full() (with the respect of the gtt size).
>
> Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
> Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
> Cc: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> lib/intel_allocator_simple.c | 744 +++++++++++++++++++++++++++++++++++
> 1 file changed, 744 insertions(+)
> create mode 100644 lib/intel_allocator_simple.c
>
> diff --git a/lib/intel_allocator_simple.c b/lib/intel_allocator_simple.c
> new file mode 100644
> index 000000000..cc207c8e9
> --- /dev/null
> +++ b/lib/intel_allocator_simple.c
> @@ -0,0 +1,744 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2021 Intel Corporation
> + */
> +
> +#include <sys/ioctl.h>
> +#include <stdlib.h>
> +#include "igt.h"
> +#include "igt_x86.h"
> +#include "intel_allocator.h"
> +#include "intel_bufops.h"
> +#include "igt_map.h"
> +
> +/*
> + * We limit allocator space to avoid hang when batch would be
> + * pinned in the last page.
> + */
> +#define RESERVED 4096
> +
> +/* Avoid compilation warning */
> +struct intel_allocator *intel_allocator_simple_create(int fd);
> +struct intel_allocator *
> +intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
> + enum allocator_strategy strategy);
> +
> +struct simple_vma_heap {
> + struct igt_list_head holes;
> +
> + /* If true, simple_vma_heap_alloc will prefer high addresses
> + *
> + * Default is true.
> + */
> + bool alloc_high;
> +};
> +
> +struct simple_vma_hole {
> + struct igt_list_head link;
> + uint64_t offset;
> + uint64_t size;
> +};
> +
> +struct intel_allocator_simple {
> + struct igt_map objects;
> + struct igt_map reserved;
> + struct simple_vma_heap heap;
> +
> + uint64_t start;
> + uint64_t end;
> +
> + /* statistics */
> + uint64_t total_size;
> + uint64_t allocated_size;
> + uint64_t allocated_objects;
> + uint64_t reserved_size;
> + uint64_t reserved_areas;
> +};
> +
> +struct intel_allocator_record {
> + uint32_t handle;
> + uint64_t offset;
> + uint64_t size;
> +};
> +
> +#define simple_vma_foreach_hole(_hole, _heap) \
> + igt_list_for_each_entry(_hole, &(_heap)->holes, link)
> +
> +#define simple_vma_foreach_hole_safe(_hole, _heap, _tmp) \
> + igt_list_for_each_entry_safe(_hole, _tmp, &(_heap)->holes, link)
> +
> +#define simple_vma_foreach_hole_safe_rev(_hole, _heap, _tmp) \
> + igt_list_for_each_entry_safe_reverse(_hole, _tmp, &(_heap)->holes, link)
> +
> +#define GEN8_GTT_ADDRESS_WIDTH 48
> +#define DECANONICAL(offset) (offset & ((1ull << GEN8_GTT_ADDRESS_WIDTH) - 1))
> +
> +static void simple_vma_heap_validate(struct simple_vma_heap *heap)
> +{
> + uint64_t prev_offset = 0;
> + struct simple_vma_hole *hole;
> +
> + simple_vma_foreach_hole(hole, heap) {
> + igt_assert(hole->size > 0);
> +
> + if (&hole->link == heap->holes.next) {
> + /* This must be the top-most hole. Assert that,
> + * if it overflows, it overflows to 0, i.e. 2^64.
> + */
This indent is weird.
> + igt_assert(hole->size + hole->offset == 0 ||
> + hole->size + hole->offset > hole->offset);
> + } else {
> + /* This is not the top-most hole so it must not overflow and,
> + * in fact, must be strictly lower than the top-most hole. If
> + * hole->size + hole->offset == prev_offset, then we failed to
> + * join holes during a simple_vma_heap_free.
> + */
Weird indent.
> + igt_assert(hole->size + hole->offset > hole->offset &&
> + hole->size + hole->offset < prev_offset);
> + }
> + prev_offset = hole->offset;
> + }
> +}
> +
> +
> +static void simple_vma_heap_free(struct simple_vma_heap *heap,
> + uint64_t offset, uint64_t size)
> +{
> + struct simple_vma_hole *high_hole = NULL, *low_hole = NULL, *hole;
> + bool high_adjacent, low_adjacent;
> +
> + /* Freeing something with a size of 0 is not valid. */
> + igt_assert(size > 0);
> +
> + /* It's possible for offset + size to wrap around if we touch the top of
> + * the 64-bit address space, but we cannot go any higher than 2^64.
> + */
> + igt_assert(offset + size == 0 || offset + size > offset);
> +
> + simple_vma_heap_validate(heap);
> +
> + /* Find immediately higher and lower holes if they exist. */
> + simple_vma_foreach_hole(hole, heap) {
> + if (hole->offset <= offset) {
> + low_hole = hole;
> + break;
> + }
> + high_hole = hole;
> + }
> +
> + if (high_hole)
> + igt_assert(offset + size <= high_hole->offset);
> + high_adjacent = high_hole && offset + size == high_hole->offset;
> +
> + if (low_hole) {
> + igt_assert(low_hole->offset + low_hole->size > low_hole->offset);
> + igt_assert(low_hole->offset + low_hole->size <= offset);
> + }
> + low_adjacent = low_hole && low_hole->offset + low_hole->size == offset;
> +
> + if (low_adjacent && high_adjacent) {
> + /* Merge the two holes */
> + low_hole->size += size + high_hole->size;
> + igt_list_del(&high_hole->link);
> + free(high_hole);
> + } else if (low_adjacent) {
> + /* Merge into the low hole */
> + low_hole->size += size;
> + } else if (high_adjacent) {
> + /* Merge into the high hole */
> + high_hole->offset = offset;
> + high_hole->size += size;
> + } else {
> + /* Neither hole is adjacent; make a new one */
> + hole = calloc(1, sizeof(*hole));
> + igt_assert(hole);
> +
> + hole->offset = offset;
> + hole->size = size;
> + /* Add it after the high hole so we maintain high-to-low
> + * ordering
> + */
> + if (high_hole)
> + igt_list_add(&hole->link, &high_hole->link);
> + else
> + igt_list_add(&hole->link, &heap->holes);
> + }
> +
> + simple_vma_heap_validate(heap);
> +}
> +
> +static void simple_vma_heap_init(struct simple_vma_heap *heap,
> + uint64_t start, uint64_t size,
> + enum allocator_strategy strategy)
> +{
> + IGT_INIT_LIST_HEAD(&heap->holes);
> + simple_vma_heap_free(heap, start, size);
> +
> + switch (strategy) {
> + case ALLOC_STRATEGY_LOW_TO_HIGH:
> + heap->alloc_high = false;
> + break;
> + case ALLOC_STRATEGY_HIGH_TO_LOW:
> + default:
> + heap->alloc_high = true;
> + }
> +}
> +
> +static void simple_vma_heap_finish(struct simple_vma_heap *heap)
> +{
> + struct simple_vma_hole *hole, *tmp;
> +
> + simple_vma_foreach_hole_safe(hole, heap, tmp)
> + free(hole);
> +}
> +
> +static void simple_vma_hole_alloc(struct simple_vma_hole *hole,
> + uint64_t offset, uint64_t size)
> +{
> + struct simple_vma_hole *high_hole;
> + uint64_t waste;
> +
> + igt_assert(hole->offset <= offset);
> + igt_assert(hole->size >= offset - hole->offset + size);
> +
> + if (offset == hole->offset && size == hole->size) {
> + /* Just get rid of the hole. */
> + igt_list_del(&hole->link);
> + free(hole);
> + return;
> + }
> +
> + igt_assert(offset - hole->offset <= hole->size - size);
> + waste = (hole->size - size) - (offset - hole->offset);
> + if (waste == 0) {
> + /* We allocated at the top-> Shrink the hole down. */
> + hole->size -= size;
> + return;
> + }
> +
> + if (offset == hole->offset) {
> + /* We allocated at the bottom. Shrink the hole up-> */
> + hole->offset += size;
> + hole->size -= size;
> + return;
> + }
> +
> + /* We allocated in the middle. We need to split the old hole into two
> + * holes, one high and one low.
> + */
Weird indent
> + high_hole = calloc(1, sizeof(*hole));
> + igt_assert(high_hole);
> +
> + high_hole->offset = offset + size;
> + high_hole->size = waste;
> +
> + /* Adjust the hole to be the amount of space left at he bottom of the
> + * original hole.
> + */
Weird indent
> + hole->size = offset - hole->offset;
> +
> + /* Place the new hole before the old hole so that the list is in order
> + * from high to low.
> + */
Weird indent
> + igt_list_add_tail(&high_hole->link, &hole->link);
> +}
> +
> +static bool simple_vma_heap_alloc(struct simple_vma_heap *heap,
> + uint64_t *offset, uint64_t size,
> + uint64_t alignment)
> +{
> + struct simple_vma_hole *hole, *tmp;
> + uint64_t misalign;
> +
> + /* The caller is expected to reject zero-size allocations */
> + igt_assert(size > 0);
> + igt_assert(alignment > 0);
> +
> + simple_vma_heap_validate(heap);
> +
> + if (heap->alloc_high) {
> + simple_vma_foreach_hole_safe(hole, heap, tmp) {
> + if (size > hole->size)
> + continue;
> +
> + /* Compute the offset as the highest address where a chunk of the
> + * given size can be without going over the top of the hole.
> + *
> + * This calculation is known to not overflow because we know that
> + * hole->size + hole->offset can only overflow to 0 and size > 0.
> + */
Weird indent
> + *offset = (hole->size - size) + hole->offset;
> +
> + /* Align the offset. We align down and not up because we are
> + *
> + * allocating from the top of the hole and not the bottom.
> + */
Weird indent
> + *offset = (*offset / alignment) * alignment;
> +
> + if (*offset < hole->offset)
> + continue;
> +
> + simple_vma_hole_alloc(hole, *offset, size);
> + simple_vma_heap_validate(heap);
> + return true;
> + }
> + } else {
> + simple_vma_foreach_hole_safe_rev(hole, heap, tmp) {
> + if (size > hole->size)
> + continue;
> +
> + *offset = hole->offset;
> +
> + /* Align the offset */
> + misalign = *offset % alignment;
> + if (misalign) {
> + uint64_t pad = alignment - misalign;
> +
> + if (pad > hole->size - size)
> + continue;
> +
> + *offset += pad;
> + }
> +
> + simple_vma_hole_alloc(hole, *offset, size);
> + simple_vma_heap_validate(heap);
> + return true;
> + }
> + }
> +
> + /* Failed to allocate */
> + return false;
> +}
> +
> +static void intel_allocator_simple_get_address_range(struct intel_allocator *ial,
> + uint64_t *startp,
> + uint64_t *endp)
> +{
> + struct intel_allocator_simple *ials = ial->priv;
> +
> + if (startp)
> + *startp = ials->start;
> +
> + if (endp)
> + *endp = ials->end;
> +}
> +
> +static bool simple_vma_heap_alloc_addr(struct intel_allocator_simple *ials,
> + uint64_t offset, uint64_t size)
> +{
> + struct simple_vma_heap *heap = &ials->heap;
> + struct simple_vma_hole *hole, *tmp;
> + /* Allocating something with a size of 0 is not valid. */
> + igt_assert(size > 0);
> +
> + /* It's possible for offset + size to wrap around if we touch the top of
> + * the 64-bit address space, but we cannot go any higher than 2^64.
> + */
> + igt_assert(offset + size == 0 || offset + size > offset);
> +
> + /* Find the hole if one exists. */
> + simple_vma_foreach_hole_safe(hole, heap, tmp) {
> + if (hole->offset > offset)
> + continue;
> +
> + /* Holes are ordered high-to-low so the first hole we find with
> + * hole->offset <= is our hole. If it's not big enough to contain the
> + * requested range, then the allocation fails.
> + */
Weird indent.
> + igt_assert(hole->offset <= offset);
> + if (hole->size < offset - hole->offset + size)
> + return false;
> +
> + simple_vma_hole_alloc(hole, offset, size);
> + return true;
> + }
> +
> + /* We didn't find a suitable hole */
> + return false;
> +}
> +
> +static uint64_t intel_allocator_simple_alloc(struct intel_allocator *ial,
> + uint32_t handle, uint64_t size,
> + uint64_t alignment)
> +{
> + struct intel_allocator_record *rec;
> + struct intel_allocator_simple *ials;
> + uint64_t offset;
> +
> + igt_assert(ial);
> + ials = (struct intel_allocator_simple *) ial->priv;
> + igt_assert(ials);
> + igt_assert(handle);
> + alignment = alignment > 0 ? alignment : 1;
> + rec = igt_map_find(&ials->objects, &handle);
> + if (rec) {
> + offset = rec->offset;
> + igt_assert(rec->size == size);
> + } else {
> + igt_assert(simple_vma_heap_alloc(&ials->heap, &offset,
> + size, alignment));
> + rec = malloc(sizeof(*rec));
> + rec->handle = handle;
> + rec->offset = offset;
> + rec->size = size;
> +
> + igt_map_add(&ials->objects, &rec->handle, rec);
> + ials->allocated_objects++;
> + ials->allocated_size += size;
> + }
> +
> + return offset;
> +}
> +
> +static bool intel_allocator_simple_free(struct intel_allocator *ial, uint32_t handle)
> +{
> + struct intel_allocator_record *rec = NULL;
> + struct intel_allocator_simple *ials;
> +
> + igt_assert(ial);
> + ials = (struct intel_allocator_simple *) ial->priv;
> + igt_assert(ials);
> +
> + rec = igt_map_del(&ials->objects, &handle);
> + if (rec) {
> + simple_vma_heap_free(&ials->heap, rec->offset, rec->size);
> + ials->allocated_objects--;
> + ials->allocated_size -= rec->size;
> + free(rec);
> +
> + return true;
> + }
> +
> + return false;
> +}
> +
> +static inline bool __same(const struct intel_allocator_record *rec,
> + uint32_t handle, uint64_t size, uint64_t offset)
> +{
> + return rec->handle == handle && rec->size == size &&
> + DECANONICAL(rec->offset) == DECANONICAL(offset);
> +}
> +
> +static bool intel_allocator_simple_is_allocated(struct intel_allocator *ial,
> + uint32_t handle, uint64_t size,
> + uint64_t offset)
> +{
> + struct intel_allocator_record *rec;
> + struct intel_allocator_simple *ials;
> + bool same = false;
> +
> + igt_assert(ial);
> + ials = (struct intel_allocator_simple *) ial->priv;
> + igt_assert(ials);
> + igt_assert(handle);
> +
> + rec = igt_map_find(&ials->objects, &handle);
> + if (rec && __same(rec, handle, size, offset))
> + same = true;
> +
> + return same;
> +}
> +
> +static bool intel_allocator_simple_reserve(struct intel_allocator *ial,
> + uint32_t handle,
> + uint64_t start, uint64_t end)
> +{
> + uint64_t size = end - start;
With canonical addresses, our size calculations aren't going to be
correct if start and end are on different sides of the 47-bit
boundary. I'm not sure how to deal with that, TBH. Most of the time,
I think you get saved by the fact that you're only really likely to
hit that if you have a > 128 TB range which never happens. One simple
thing we could do is
igt_assert(end >> GEN8_GT_ADDRESS_WIDTH == start >> GEN8_GT_ADDRESS_WIDTH);
or similar. Same goes for the 3-4 other cases below.
> + struct intel_allocator_record *rec = NULL;
> + struct intel_allocator_simple *ials;
> +
> + igt_assert(ial);
> + ials = (struct intel_allocator_simple *) ial->priv;
> + igt_assert(ials);
> +
> + /* clear [63:48] bits to get rid of canonical form */
> + start = DECANONICAL(start);
> + end = DECANONICAL(end);
> + igt_assert(end > start || end == 0);
With always reserving the top 4K, end = 0 should never happen.
> +
> + if (simple_vma_heap_alloc_addr(ials, start, size)) {
> + rec = malloc(sizeof(*rec));
> + rec->handle = handle;
> + rec->offset = start;
> + rec->size = size;
> +
> + igt_map_add(&ials->reserved, &rec->offset, rec);
> +
> + ials->reserved_areas++;
> + ials->reserved_size += rec->size;
> + return true;
> + }
> +
> + igt_debug("Failed to reserve %llx + %llx\n", (long long)start, (long long)size);
> + return false;
> +}
> +
> +static bool intel_allocator_simple_unreserve(struct intel_allocator *ial,
> + uint32_t handle,
> + uint64_t start, uint64_t end)
> +{
> + uint64_t size = end - start;
> + struct intel_allocator_record *rec = NULL;
> + struct intel_allocator_simple *ials;
> +
> + igt_assert(ial);
> + ials = (struct intel_allocator_simple *) ial->priv;
> + igt_assert(ials);
> +
> + /* clear [63:48] bits to get rid of canonical form */
> + start = DECANONICAL(start);
> + end = DECANONICAL(end);
> +
> + igt_assert(end > start || end == 0);
> +
> + rec = igt_map_find(&ials->reserved, &start);
> +
> + if (!rec) {
> + igt_debug("Only reserved blocks can be unreserved\n");
> + return false;
> + }
> +
> + if (rec->size != size) {
> + igt_debug("Only the whole block unreservation allowed\n");
> + return false;
> + }
> +
> + if (rec->handle != handle) {
> + igt_debug("Handle %u doesn't match reservation handle: %u\n",
> + rec->handle, handle);
> + return false;
> + }
> +
> + igt_map_del(&ials->reserved, &start);
> +
> + ials->reserved_areas--;
> + ials->reserved_size -= rec->size;
> + free(rec);
> + simple_vma_heap_free(&ials->heap, start, size);
> +
> + return true;
> +}
> +
> +static bool intel_allocator_simple_is_reserved(struct intel_allocator *ial,
> + uint64_t start, uint64_t end)
> +{
> + uint64_t size = end - start;
> + struct intel_allocator_record *rec = NULL;
> + struct intel_allocator_simple *ials;
> +
> + igt_assert(ial);
> + ials = (struct intel_allocator_simple *) ial->priv;
> + igt_assert(ials);
> +
> + /* clear [63:48] bits to get rid of canonical form */
> + start = DECANONICAL(start);
> + end = DECANONICAL(end);
> +
> + igt_assert(end > start || end == 0);
> +
> + rec = igt_map_find(&ials->reserved, &start);
> +
> + if (!rec)
> + return false;
> +
> + if (rec->offset == start && rec->size == size)
> + return true;
> +
> + return false;
> +}
> +
> +static bool equal_8bytes(const void *key1, const void *key2)
> +{
> + const uint64_t *k1 = key1, *k2 = key2;
> + return *k1 == *k2;
> +}
> +
> +static void intel_allocator_simple_destroy(struct intel_allocator *ial)
> +{
> + struct intel_allocator_simple *ials;
> + struct igt_map_entry *pos;
> + struct igt_map *map;
> + int i;
> +
> + igt_assert(ial);
> + ials = (struct intel_allocator_simple *) ial->priv;
> + simple_vma_heap_finish(&ials->heap);
> +
> + map = &ials->objects;
> + igt_map_for_each(map, i, pos)
> + free(pos->value);
> + igt_map_free(&ials->objects);
> +
> + map = &ials->reserved;
> + igt_map_for_each(map, i, pos)
> + free(pos->value);
> + igt_map_free(&ials->reserved);
> +
> + free(ial->priv);
> + free(ial);
> +}
> +
> +static bool intel_allocator_simple_is_empty(struct intel_allocator *ial)
> +{
> + struct intel_allocator_simple *ials = ial->priv;
> +
> + igt_debug("<ial: %p, fd: %d> objects: %" PRId64
> + ", reserved_areas: %" PRId64 "\n",
> + ial, ial->fd,
> + ials->allocated_objects, ials->reserved_areas);
> +
> + return !ials->allocated_objects && !ials->reserved_areas;
> +}
> +
> +static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
> +{
> + struct intel_allocator_simple *ials;
> + struct simple_vma_hole *hole;
> + struct simple_vma_heap *heap;
> + struct igt_map_entry *pos;
> + struct igt_map *map;
> + uint64_t total_free = 0, allocated_size = 0, allocated_objects = 0;
> + uint64_t reserved_size = 0, reserved_areas = 0;
> + int i;
> +
> + igt_assert(ial);
> + ials = (struct intel_allocator_simple *) ial->priv;
> + igt_assert(ials);
> + heap = &ials->heap;
> +
> + igt_info("intel_allocator_simple <ial: %p, fd: %d> on "
> + "[0x%"PRIx64" : 0x%"PRIx64"]:\n", ial, ial->fd,
> + ials->start, ials->end);
> +
> + if (full) {
> + igt_info("holes:\n");
> + simple_vma_foreach_hole(hole, heap) {
> + igt_info("offset = %"PRIu64" (0x%"PRIx64", "
> + "size = %"PRIu64" (0x%"PRIx64")\n",
> + hole->offset, hole->offset, hole->size,
> + hole->size);
> + total_free += hole->size;
> + }
> + igt_assert(total_free <= ials->total_size);
> + igt_info("total_free: %" PRIx64
> + ", total_size: %" PRIx64
> + ", allocated_size: %" PRIx64
> + ", reserved_size: %" PRIx64 "\n",
> + total_free, ials->total_size, ials->allocated_size,
> + ials->reserved_size);
> + igt_assert(total_free ==
> + ials->total_size - ials->allocated_size - ials->reserved_size);
> +
> + igt_info("objects:\n");
> + map = &ials->objects;
> + igt_map_for_each(map, i, pos) {
> + struct intel_allocator_record *rec = pos->value;
> +
> + igt_info("handle = %d, offset = %"PRIu64" "
> + "(0x%"PRIx64", size = %"PRIu64" (0x%"PRIx64")\n",
> + rec->handle, rec->offset, rec->offset,
> + rec->size, rec->size);
> + allocated_objects++;
> + allocated_size += rec->size;
> + }
> + igt_assert(ials->allocated_size == allocated_size);
> + igt_assert(ials->allocated_objects == allocated_objects);
> +
> + igt_info("reserved areas:\n");
> + map = &ials->reserved;
> + igt_map_for_each(map, i, pos) {
> + struct intel_allocator_record *rec = pos->value;
> +
> + igt_info("offset = %"PRIu64" (0x%"PRIx64", "
> + "size = %"PRIu64" (0x%"PRIx64")\n",
> + rec->offset, rec->offset,
> + rec->size, rec->size);
> + reserved_areas++;
> + reserved_size += rec->size;
> + }
> + igt_assert(ials->reserved_areas == reserved_areas);
> + igt_assert(ials->reserved_size == reserved_size);
> + } else {
> + simple_vma_foreach_hole(hole, heap)
> + total_free += hole->size;
> + }
> +
> + igt_info("free space: %"PRIu64"B (0x%"PRIx64") (%.2f%% full)\n"
> + "allocated objects: %"PRIu64", reserved areas: %"PRIu64"\n",
> + total_free, total_free,
> + ((double) (ials->total_size - total_free) /
> + (double) ials->total_size) * 100,
> + ials->allocated_objects, ials->reserved_areas);
> +}
> +
> +static struct intel_allocator *
> +__intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
> + enum allocator_strategy strategy)
> +{
> + struct intel_allocator *ial;
> + struct intel_allocator_simple *ials;
> +
> + igt_debug("Using simple allocator\n");
> +
> + ial = calloc(1, sizeof(*ial));
> + igt_assert(ial);
> +
> + ial->fd = fd;
> + ial->get_address_range = intel_allocator_simple_get_address_range;
> + ial->alloc = intel_allocator_simple_alloc;
> + ial->free = intel_allocator_simple_free;
> + ial->is_allocated = intel_allocator_simple_is_allocated;
> + ial->reserve = intel_allocator_simple_reserve;
> + ial->unreserve = intel_allocator_simple_unreserve;
> + ial->is_reserved = intel_allocator_simple_is_reserved;
> + ial->destroy = intel_allocator_simple_destroy;
> + ial->is_empty = intel_allocator_simple_is_empty;
> + ial->print = intel_allocator_simple_print;
> + ials = ial->priv = malloc(sizeof(struct intel_allocator_simple));
> + igt_assert(ials);
> +
> + igt_map_init(&ials->objects);
> + /* Reserved addresses hashtable is indexed by an offset */
> + __igt_map_init(&ials->reserved, equal_8bytes, NULL, 3);
We have this same problem with Mesa. Maybe just make the hash map
take a uint64_t key instead of a void*. Then it'll work for both
cases easily at the cost of a little extra memory on 32-bit platforms.
Looking a bit more, I guess it's not quite as bad as the Mesa case
because you have the uint64_t key in the object you're adding so you
don't have to malloc a bunch of uint64_t's just to use as keys.
> +
> + ials->start = start;
> + ials->end = end;
> + ials->total_size = end - start;
> + simple_vma_heap_init(&ials->heap, ials->start, ials->total_size,
> + strategy);
> +
> + ials->allocated_size = 0;
> + ials->allocated_objects = 0;
> + ials->reserved_size = 0;
> + ials->reserved_areas = 0;
> +
> + return ial;
> +}
> +
> +struct intel_allocator *
> +intel_allocator_simple_create(int fd)
> +{
> + uint64_t gtt_size = gem_aperture_size(fd);
> +
> + if (!gem_uses_full_ppgtt(fd))
> + gtt_size /= 2;
> + else
> + gtt_size -= RESERVED;
> +
> + return __intel_allocator_simple_create(fd, 0, gtt_size,
> + ALLOC_STRATEGY_HIGH_TO_LOW);
> +}
> +
> +struct intel_allocator *
> +intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
> + enum allocator_strategy strategy)
> +{
> + uint64_t gtt_size = gem_aperture_size(fd);
> +
> + igt_assert(end <= gtt_size);
> + if (!gem_uses_full_ppgtt(fd))
> + gtt_size /= 2;
> + igt_assert(end - start <= gtt_size);
Don't you want just `end <= gtt_size`? When is something only going
to use the top half?
> +
> + return __intel_allocator_simple_create(fd, start, end, strategy);
> +}
> --
> 2.26.0
>
More information about the igt-dev
mailing list