[PATCH i-g-t 37/37] lib/igt_map: Adopt Mesa hash map
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Fri Mar 19 19:29:44 UTC 2021
From: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
To be described
Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
---
lib/igt_map.c | 461 +++++++++++++++++++++++++------
lib/igt_map.h | 164 ++++++-----
lib/intel_allocator.c | 184 ++++++------
lib/intel_allocator_simple.c | 121 +++++---
tests/i915/api_intel_allocator.c | 2 +-
tests/i915/api_intel_bb.c | 143 +++++++++-
6 files changed, 776 insertions(+), 299 deletions(-)
diff --git a/lib/igt_map.c b/lib/igt_map.c
index 12909ed5a..49f05932e 100644
--- a/lib/igt_map.c
+++ b/lib/igt_map.c
@@ -1,131 +1,434 @@
-// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
+ * Copyright © 1988-2004 Keith Packard and Bart Massey.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the names of the authors
+ * or their institutions shall not be used in advertising or
+ * otherwise to promote the sale, use or other dealings in this
+ * Software without prior written authorization from the
+ * authors.
+ *
+ * Authors:
+ * Eric Anholt <eric at anholt.net>
+ * Keith Packard <keithp at keithp.com>
*/
-#include <sys/ioctl.h>
+#include <assert.h>
#include <stdlib.h>
+
#include "igt_map.h"
-#include "igt.h"
-#include "igt_x86.h"
-#define MIN_CAPACITY_BITS 2
+#define ARRAY_SIZE(array) (sizeof(array) / sizeof(array[0]))
+
+/*
+ * From Knuth -- a good choice for hash/rehash values is p, p-2 where
+ * p and p-2 are both prime. These tables are sized to have an extra 10%
+ * free to avoid exponential performance degradation as the hash table fills
+ */
+
+static const uint32_t deleted_key_value;
+static const void *deleted_key = &deleted_key_value;
-static bool igt_map_filled(struct igt_map *map)
+static const struct {
+ uint32_t max_entries, size, rehash;
+} hash_sizes[] = {
+ { 2, 5, 3 },
+ { 4, 7, 5 },
+ { 8, 13, 11 },
+ { 16, 19, 17 },
+ { 32, 43, 41 },
+ { 64, 73, 71 },
+ { 128, 151, 149 },
+ { 256, 283, 281 },
+ { 512, 571, 569 },
+ { 1024, 1153, 1151 },
+ { 2048, 2269, 2267 },
+ { 4096, 4519, 4517 },
+ { 8192, 9013, 9011 },
+ { 16384, 18043, 18041 },
+ { 32768, 36109, 36107 },
+ { 65536, 72091, 72089 },
+ { 131072, 144409, 144407 },
+ { 262144, 288361, 288359 },
+ { 524288, 576883, 576881 },
+ { 1048576, 1153459, 1153457 },
+ { 2097152, 2307163, 2307161 },
+ { 4194304, 4613893, 4613891 },
+ { 8388608, 9227641, 9227639 },
+ { 16777216, 18455029, 18455027 },
+ { 33554432, 36911011, 36911009 },
+ { 67108864, 73819861, 73819859 },
+ { 134217728, 147639589, 147639587 },
+ { 268435456, 295279081, 295279079 },
+ { 536870912, 590559793, 590559791 },
+ { 1073741824, 1181116273, 1181116271},
+ { 2147483648ul, 2362232233ul, 2362232231ul}
+};
+
+static int
+entry_is_free(const struct igt_map_entry *entry)
{
- return IGT_MAP_CAPACITY(map) * 4 / 5 <= map->size;
+ return entry->key == NULL;
}
-static void igt_map_extend(struct igt_map *map)
+static int
+entry_is_deleted(const struct igt_map_entry *entry)
{
- struct igt_hlist_head *new_heads;
- struct igt_map_entry *pos;
- struct igt_hlist_node *tmp;
- uint32_t new_bits = map->capacity_bits + 1;
- int i;
+ return entry->key == deleted_key;
+}
- new_heads = calloc(1ULL << new_bits,
- sizeof(struct igt_hlist_head));
- igt_assert(new_heads);
+static int
+entry_is_present(const struct igt_map_entry *entry)
+{
+ return entry->key != NULL && entry->key != deleted_key;
+}
- igt_map_for_each_safe(map, i, tmp, pos)
- igt_hlist_add_head(&pos->link, &new_heads[map->hash_fn(pos->key, new_bits)]);
+struct igt_map *
+igt_map_create(uint32_t (*hash_function)(const void *key),
+ int (*key_equals_function)(const void *a, const void *b))
+{
+ struct igt_map *map;
- free(map->heads);
- map->capacity_bits++;
+ map = malloc(sizeof(*map));
+ if (map == NULL)
+ return NULL;
- map->heads = new_heads;
+ map->size_index = 0;
+ map->size = hash_sizes[map->size_index].size;
+ map->rehash = hash_sizes[map->size_index].rehash;
+ map->max_entries = hash_sizes[map->size_index].max_entries;
+ map->hash_function = hash_function;
+ map->key_equals_function = key_equals_function;
+ map->table = calloc(map->size, sizeof(*map->table));
+ map->entries = 0;
+ map->deleted_entries = 0;
+
+ if (map->table == NULL) {
+ free(map);
+ return NULL;
+ }
+
+ return map;
}
-static bool equal_4bytes(const void *key1, const void *key2)
+/**
+ * Frees the given hash table.
+ *
+ * If delete_function is passed, it gets called on each entry present before
+ * freeing.
+ */
+void
+igt_map_destroy(struct igt_map *map,
+ void (*delete_function)(struct igt_map_entry *entry))
{
- const uint32_t *k1 = key1, *k2 = key2;
- return *k1 == *k2;
+ if (!map)
+ return;
+
+ if (delete_function) {
+ struct igt_map_entry *entry;
+
+ igt_map_foreach(map, entry)
+ delete_function(entry);
+ }
+ free(map->table);
+ free(map);
}
-/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
+void *
+igt_map_search(struct igt_map *map, const void *key)
+{
+ uint32_t hash = map->hash_function(key);
+ struct igt_map_entry *entry;
-static inline uint64_t hash_64_4bytes(const void *val, unsigned int bits)
+ entry = igt_map_search_pre_hashed(map, hash, key);
+ return entry ? entry->data : NULL;
+}
+
+/**
+ * Finds a hash table entry with the given key.
+ *
+ * Returns NULL if no entry is found. Note that the data pointer may be
+ * modified by the user.
+ */
+struct igt_map_entry *
+igt_map_search_entry(struct igt_map *map, const void *key)
{
- uint64_t hash = *(uint32_t *)val;
+ uint32_t hash = map->hash_function(key);
- hash = hash * GOLDEN_RATIO_PRIME_64;
- /* High bits are more random, so use them. */
- return hash >> (64 - bits);
+ return igt_map_search_pre_hashed(map, hash, key);
}
-void __igt_map_init(struct igt_map *map, igt_map_equal_fn eq_fn,
- igt_map_hash_fn hash_fn, uint32_t initial_bits)
+/**
+ * Finds a hash table entry with the given key and hash of that key.
+ *
+ * Returns NULL if no entry is found. Note that the data pointer may be
+ * modified by the user.
+ */
+struct igt_map_entry *
+igt_map_search_pre_hashed(struct igt_map *map, uint32_t hash,
+ const void *key)
{
- map->equal_fn = eq_fn == NULL ? equal_4bytes : eq_fn;
- map->hash_fn = hash_fn == NULL ? hash_64_4bytes : hash_fn;
- map->capacity_bits = initial_bits > 0 ? initial_bits
- : MIN_CAPACITY_BITS;
- map->heads = calloc(IGT_MAP_CAPACITY(map),
- sizeof(struct igt_hlist_head));
-
- igt_assert(map->heads);
- map->size = 0;
+ uint32_t start_hash_address = hash % map->size;
+ uint32_t hash_address = start_hash_address;
+
+ do {
+ uint32_t double_hash;
+
+ struct igt_map_entry *entry = map->table + hash_address;
+
+ if (entry_is_free(entry)) {
+ return NULL;
+ } else if (entry_is_present(entry) && entry->hash == hash) {
+ if (map->key_equals_function(key, entry->key)) {
+ return entry;
+ }
+ }
+
+ double_hash = 1 + hash % map->rehash;
+
+ hash_address = (hash_address + double_hash) % map->size;
+ } while (hash_address != start_hash_address);
+
+ return NULL;
}
-void igt_map_add(struct igt_map *map, const void *key, void *value)
+static void
+igt_map_rehash(struct igt_map *map, int new_size_index)
{
- struct igt_map_entry *entry;
+ struct igt_map old_map;
+ struct igt_map_entry *table, *entry;
+
+ if (new_size_index >= ARRAY_SIZE(hash_sizes))
+ return;
+
+ table = calloc(hash_sizes[new_size_index].size, sizeof(*map->table));
+ if (table == NULL)
+ return;
- if (igt_map_filled(map))
- igt_map_extend(map);
+ old_map = *map;
- entry = malloc(sizeof(struct igt_map_entry));
- entry->value = value;
- entry->key = key;
- igt_hlist_add_head(&entry->link,
- &map->heads[map->hash_fn(key, map->capacity_bits)]);
- map->size++;
+ map->table = table;
+ map->size_index = new_size_index;
+ map->size = hash_sizes[map->size_index].size;
+ map->rehash = hash_sizes[map->size_index].rehash;
+ map->max_entries = hash_sizes[map->size_index].max_entries;
+ map->entries = 0;
+ map->deleted_entries = 0;
+
+ igt_map_foreach(&old_map, entry) {
+ igt_map_insert_pre_hashed(map, entry->hash,
+ entry->key, entry->data);
+ }
+
+ free(old_map.table);
}
-void *igt_map_del(struct igt_map *map, const void *key)
+/**
+ * Inserts the key into the table.
+ *
+ * Note that insertion may rearrange the table on a resize or rehash,
+ * so previously found hash_entries are no longer valid after this function.
+ */
+struct igt_map_entry *
+igt_map_insert(struct igt_map *map, const void *key, void *data)
{
- struct igt_map_entry *pos;
- struct igt_hlist_node *tmp;
- void *val = NULL;
-
- igt_map_for_each_possible_safe(map, pos, tmp, key) {
- if (map->equal_fn(pos->key, key)) {
- igt_hlist_del(&pos->link);
- val = pos->value;
- free(pos);
+ uint32_t hash = map->hash_function(key);
+
+ /* Make sure nobody tries to add one of the magic values as a
+ * key. If you need to do so, either do so in a wrapper, or
+ * store keys with the magic values separately in the struct
+ * igt_map.
+ */
+ assert(key != NULL);
+
+ return igt_map_insert_pre_hashed(map, hash, key, data);
+}
+
+/**
+ * Inserts the key with the given hash into the table.
+ *
+ * Note that insertion may rearrange the table on a resize or rehash,
+ * so previously found hash_entries are no longer valid after this function.
+ */
+struct igt_map_entry *
+igt_map_insert_pre_hashed(struct igt_map *map, uint32_t hash,
+ const void *key, void *data)
+{
+ uint32_t start_hash_address, hash_address;
+ struct igt_map_entry *available_entry = NULL;
+
+ if (map->entries >= map->max_entries) {
+ igt_map_rehash(map, map->size_index + 1);
+ } else if (map->deleted_entries + map->entries >= map->max_entries) {
+ igt_map_rehash(map, map->size_index);
+ }
+
+ start_hash_address = hash % map->size;
+ hash_address = start_hash_address;
+ do {
+ struct igt_map_entry *entry = map->table + hash_address;
+ uint32_t double_hash;
+
+ if (!entry_is_present(entry)) {
+ /* Stash the first available entry we find */
+ if (available_entry == NULL)
+ available_entry = entry;
+ if (entry_is_free(entry))
+ break;
+ }
+
+ /* Implement replacement when another insert happens
+ * with a matching key. This is a relatively common
+ * feature of hash tables, with the alternative
+ * generally being "insert the new value as well, and
+ * return it first when the key is searched for".
+ *
+ * Note that the hash table doesn't have a delete
+ * callback. If freeing of old data pointers is
+ * required to avoid memory leaks, perform a search
+ * before inserting.
+ */
+ if (!entry_is_deleted(entry) &&
+ entry->hash == hash &&
+ map->key_equals_function(key, entry->key)) {
+ entry->key = key;
+ entry->data = data;
+ return entry;
}
+
+
+ double_hash = 1 + hash % map->rehash;
+
+ hash_address = (hash_address + double_hash) % map->size;
+ } while (hash_address != start_hash_address);
+
+ if (available_entry) {
+ if (entry_is_deleted(available_entry))
+ map->deleted_entries--;
+ available_entry->hash = hash;
+ available_entry->key = key;
+ available_entry->data = data;
+ map->entries++;
+ return available_entry;
}
- return val;
+
+ /* We could hit here if a required resize failed. An unchecked-malloc
+ * application could ignore this result.
+ */
+ return NULL;
}
-void *igt_map_find(struct igt_map *map, const void *key)
+/**
+ * This function searches for, and removes an entry from the hash table.
+ *
+ * If the caller has previously found a struct igt_map_entry pointer,
+ * (from calling igt_map_search or remembering it from
+ * igt_map_insert), then igt_map_remove_entry can be called
+ * instead to avoid an extra search.
+ */
+void
+igt_map_remove(struct igt_map *map, const void *key,
+ void (*delete_function)(struct igt_map_entry *entry))
{
- struct igt_map_entry *pos = NULL;
+ struct igt_map_entry *entry;
- igt_map_for_each_possible(map, pos, key)
- if (map->equal_fn(pos->key, key))
- break;
+ entry = igt_map_search_entry(map, key);
+ if (delete_function)
+ delete_function(entry);
- return pos ? pos->value : NULL;
+ igt_map_remove_entry(map, entry);
}
-void igt_map_free(struct igt_map *map)
+/**
+ * This function deletes the given hash table entry.
+ *
+ * Note that deletion doesn't otherwise modify the table, so an iteration over
+ * the table deleting entries is safe.
+ */
+void
+igt_map_remove_entry(struct igt_map *map, struct igt_map_entry *entry)
{
- struct igt_map_entry *pos;
- struct igt_hlist_node *tmp;
- int i;
+ if (!entry)
+ return;
- igt_map_for_each_safe(map, i, tmp, pos) {
- igt_hlist_del(&pos->link);
- free(pos);
+ entry->key = deleted_key;
+ map->entries--;
+ map->deleted_entries++;
+}
+
+/**
+ * This function is an iterator over the hash table.
+ *
+ * Pass in NULL for the first entry, as in the start of a for loop. Note that
+ * an iteration over the table is O(table_size) not O(entries).
+ */
+struct igt_map_entry *
+igt_map_next_entry(struct igt_map *map, struct igt_map_entry *entry)
+{
+ if (entry == NULL)
+ entry = map->table;
+ else
+ entry = entry + 1;
+
+ for (; entry != map->table + map->size; entry++) {
+ if (entry_is_present(entry)) {
+ return entry;
+ }
}
- free(map->heads);
+ return NULL;
}
-bool igt_map_empty(struct igt_map *map)
+/**
+ * Returns a random entry from the hash table.
+ *
+ * This may be useful in implementing random replacement (as opposed
+ * to just removing everything) in caches based on this hash table
+ * implementation. @predicate may be used to filter entries, or may
+ * be set to NULL for no filtering.
+ */
+struct igt_map_entry *
+igt_map_random_entry(struct igt_map *map,
+ int (*predicate)(struct igt_map_entry *entry))
{
- return map->size;
+ struct igt_map_entry *entry;
+ uint32_t i = random() % map->size;
+
+ if (map->entries == 0)
+ return NULL;
+
+ for (entry = map->table + i; entry != map->table + map->size; entry++) {
+ if (entry_is_present(entry) &&
+ (!predicate || predicate(entry))) {
+ return entry;
+ }
+ }
+
+ for (entry = map->table; entry != map->table + i; entry++) {
+ if (entry_is_present(entry) &&
+ (!predicate || predicate(entry))) {
+ return entry;
+ }
+ }
+
+ return NULL;
}
diff --git a/lib/igt_map.h b/lib/igt_map.h
index d47afdbbe..f325404d8 100644
--- a/lib/igt_map.h
+++ b/lib/igt_map.h
@@ -1,104 +1,102 @@
-// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
- */
-
-#ifndef IGT_MAP_H
-#define IGT_MAP_H
-
-#include <stdint.h>
-#include "igt_list.h"
-
-/**
- * SECTION:igt_map
- * @short_description: a dynamic sized hashmap implementation
- * @title: IGT Map
- * @include: igt_map.h
- *
- * igt_map is a dynamic sized, non-thread-safe implementation of a hashmap.
- * This map grows exponentially when it's over 80% filled. The structure allows
- * indexing records by any key through the hash and equal functions.
- * By default, hashmap compares and hashes the first four bytes of a key.
*
- * Example usage:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * |[<!-- language="C" -->
- * struct igt_map *map;
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
*
- * struct record {
- * int foo;
- * uint32_t unique_identifier;
- * };
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*
- * struct record r1, r2, *r_ptr;
+ * Authors:
+ * Eric Anholt <eric at anholt.net>
*
- * map = malloc(sizeof(*map));
- * igt_map_init(map); // initiaize the map with default parameters
- * igt_map_add(map, &r1.unique_identifier, &r1);
- * igt_map_add(map, &r2.unique_identifier, &r2);
- *
- * struct igt_map_entry *pos; int i;
- * igt_map_for_each(map, i, pos) {
- * r_ptr = pos->value;
- * printf("key: %u, foo: %d\n", *(uint32_t*) pos->key, r_ptr->foo);
- * }
- *
- * uint32_t key = r1.unique_identifier;
- * r_ptr = igt_map_find(map, &key); // get r1
- *
- * r_ptr = igt_map_del(map, &r2.unique_identifier);
- * if (r_ptr)
- * printf("record with key %u deleted\n", r_ptr->unique_identifier);
- *
- * igt_map_free(map);
- * free(map);
- * ]|
*/
-typedef bool (*igt_map_equal_fn)(const void *key1, const void *key2);
-typedef uint64_t (*igt_map_hash_fn)(const void *val, unsigned int bits);
-struct igt_map {
- uint32_t size;
- uint32_t capacity_bits;
- igt_map_equal_fn equal_fn;
- igt_map_hash_fn hash_fn;
- struct igt_hlist_head *heads;
-};
+#ifndef HASH_TABLE_H
+#define HASH_TABLE_H
+
+#include <inttypes.h>
struct igt_map_entry {
+ uint32_t hash;
const void *key;
- void *value;
- struct igt_hlist_node link;
+ void *data;
+};
+
+struct igt_map {
+ struct igt_map_entry *table;
+ uint32_t (*hash_function)(const void *key);
+ int (*key_equals_function)(const void *a, const void *b);
+ uint32_t size;
+ uint32_t rehash;
+ uint32_t max_entries;
+ uint32_t size_index;
+ uint32_t entries;
+ uint32_t deleted_entries;
};
-void __igt_map_init(struct igt_map *map, igt_map_equal_fn eq_fn,
- igt_map_hash_fn hash_fn, uint32_t initial_bits);
-void igt_map_add(struct igt_map *map, const void *key, void *value);
-void *igt_map_del(struct igt_map *map, const void *key);
-void *igt_map_find(struct igt_map *map, const void *key);
-void igt_map_free(struct igt_map *map);
-bool igt_map_empty(struct igt_map *map);
+struct igt_map *
+igt_map_create(uint32_t (*hash_function)(const void *key),
+ int (*key_equals_function)(const void *a, const void *b));
+void
+igt_map_destroy(struct igt_map *map,
+ void (*delete_function)(struct igt_map_entry *entry));
-#define igt_map_init(map) __igt_map_init(map, NULL, NULL, 8)
+struct igt_map_entry *
+igt_map_insert(struct igt_map *map, const void *key, void *data);
-#define IGT_MAP_CAPACITY(map) (1ULL << map->capacity_bits)
+void *
+igt_map_search(struct igt_map *map, const void *key);
-#define igt_map_for_each_safe(map, bkt, tmp, obj) \
- for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < IGT_MAP_CAPACITY(map); \
- (bkt)++)\
- igt_hlist_for_each_entry_safe(obj, tmp, &map->heads[bkt], link)
+struct igt_map_entry *
+igt_map_search_entry(struct igt_map *map, const void *key);
-#define igt_map_for_each(map, bkt, obj) \
- for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < IGT_MAP_CAPACITY(map); \
- (bkt)++)\
- igt_hlist_for_each_entry(obj, &map->heads[bkt], link)
+void
+igt_map_remove(struct igt_map *map, const void *key,
+ void (*delete_function)(struct igt_map_entry *entry));
+
+void
+igt_map_remove_entry(struct igt_map *map, struct igt_map_entry *entry);
+
+struct igt_map_entry *
+igt_map_next_entry(struct igt_map *map, struct igt_map_entry *entry);
+
+struct igt_map_entry *
+igt_map_random_entry(struct igt_map *map,
+ int (*predicate)(struct igt_map_entry *entry));
+
+/**
+ * This foreach function is safe against deletion (which just replaces
+ * an entry's data with the deleted marker), but not against insertion
+ * (which may rehash the table, making entry a dangling pointer).
+ */
+#define igt_map_foreach(map, entry) \
+ for (entry = igt_map_next_entry(map, NULL); \
+ entry != NULL; \
+ entry = igt_map_next_entry(map, entry))
-#define igt_map_for_each_possible(map, obj, key) \
- igt_hlist_for_each_entry(obj, \
- &map->heads[map->hash_fn(key, map->capacity_bits)], link)
+/* Alternate interfaces to reduce repeated calls to hash function. */
+struct igt_map_entry *
+igt_map_search_pre_hashed(struct igt_map *map,
+ uint32_t hash,
+ const void *key);
-#define igt_map_for_each_possible_safe(map, obj, tmp, key) \
- igt_hlist_for_each_entry_safe(obj, tmp, \
- &map->heads[map->hash_fn(key, map->capacity_bits)], link)
+struct igt_map_entry *
+igt_map_insert_pre_hashed(struct igt_map *map,
+ uint32_t hash,
+ const void *key, void *data);
-#endif /* IGT_MAP_H */
+#endif
diff --git a/lib/intel_allocator.c b/lib/intel_allocator.c
index e35d32148..418fe8f35 100644
--- a/lib/intel_allocator.c
+++ b/lib/intel_allocator.c
@@ -49,8 +49,8 @@ struct allocator {
int fd;
uint32_t ctx;
uint32_t vm;
+ _Atomic(int32_t) refcount;
struct intel_allocator *ial;
- uint64_t handle;
};
struct handle_entry {
@@ -74,27 +74,27 @@ intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
* allocator
* handles <fd, ctx> intel allocator
* +-----+ +--------+ +-------------+
- * | 1 +---------->+ fd: 3 +----+---->+ data: ... |
- * +-----+ | ctx: 1 | | | refcount: 2 |
- * | 2 +------ +--------+ | +-------------+
- * +-----+ +---->+ fd: 3 +----+
- * | 3 +--+ | ctx: 1 | intel allocator
- * +-----+ | +--------+ +-------------+
- * | ... | +------->+ fd: 3 +--------->+ data: ... |
- * +-----+ | ctx: 2 | | refcount: 1 |
+ * | 1 +---------->+ fd: 3 +--------->+ data: ... |
+ * +-----+ +---->+ ctx: 1 | | refcount: 2 |
+ * | 2 +-----+ | ref: 2 | +-------------+
+ * +-----+ +--------+
+ * | 3 +--+ +--------+ intel allocator
+ * +-----+ | | fd: 3 | +-------------+
+ * | ... | +------->| ctx: 2 +--------->+ data: ... |
+ * +-----+ | ref: 1 | | refcount: 1 |
* | n +--------+ +--------+ +-------------+
* +-----+ |
* | ... +-----+ | allocator
* +-----+ | | <fd, vm> intel allocator
* | ... +--+ | | +--------+ +-------------+
- * + + | | +->+ fd: 3 +--+--+--->+ data: ... |
- * | | | vm: 1 | | | | refcount: 3 |
- * | | +--------+ | | +-------------+
- * | +---->+ fd: 3 +--+ |
- * | | vm: 1 | |
+ * + + | | +->+ fd: 3 +-----+--->+ data: ... |
+ * | +---->+ vm: 1 | | | refcount: 3 |
+ * | | ref: 2 | | +-------------+
+ * | +--------+ |
* | +--------+ |
- * +------->+ fd: 3 +-----+
- * | vm: 2 |
+ * | | fd: 3 | |
+ * +------->+ vm: 2 +-----+
+ * | ref: 1 |
* +--------+
*/
static _Atomic(uint64_t) next_handle;
@@ -161,6 +161,11 @@ static int recv_resp(struct msg_channel *msgchan,
return msgchan->recv_resp(msgchan, response);
}
+static inline void map_entry_free_func(struct igt_map_entry *entry)
+{
+ free(entry->data);
+}
+
static uint64_t __handle_create(struct allocator *al)
{
struct handle_entry *h = malloc(sizeof(*h));
@@ -168,19 +173,16 @@ static uint64_t __handle_create(struct allocator *al)
igt_assert(h);
h->handle = atomic_fetch_add(&next_handle, 1);
h->al = al;
- igt_map_add(handles, h, h);
+ igt_map_insert(handles, h, h);
return h->handle;
}
static void __handle_destroy(uint64_t handle)
{
- struct handle_entry *h, he = { .handle = handle };
+ struct handle_entry he = { .handle = handle };
- h = igt_map_find(handles, &he);
- igt_assert(h);
- igt_map_del(handles, &he);
- free(h);
+ igt_map_remove(handles, &he, map_entry_free_func);
}
static struct allocator *__allocator_find(int fd, uint32_t ctx, uint32_t vm)
@@ -188,14 +190,14 @@ static struct allocator *__allocator_find(int fd, uint32_t ctx, uint32_t vm)
struct allocator al = { .fd = fd, .ctx = ctx, .vm = vm };
struct igt_map *map = GET_MAP(vm);
- return igt_map_find(map, &al);
+ return igt_map_search(map, &al);
}
static struct allocator *__allocator_find_by_handle(uint64_t handle)
{
struct handle_entry *h, he = { .handle = handle };
- h = igt_map_find(handles, &he);
+ h = igt_map_search(handles, &he);
if (!h)
return NULL;
@@ -213,9 +215,10 @@ static struct allocator *__allocator_create(int fd, uint32_t ctx, uint32_t vm,
al->fd = fd;
al->ctx = ctx;
al->vm = vm;
+ atomic_init(&al->refcount, 0);
al->ial = ial;
- igt_map_add(map, al, al);
+ igt_map_insert(map, al, al);
return al;
}
@@ -224,8 +227,7 @@ static void __allocator_destroy(struct allocator *al)
{
struct igt_map *map = GET_MAP(al->vm);
- igt_map_del(map, al);
- free(al);
+ igt_map_remove(map, al, map_entry_free_func);
}
static int __allocator_get(struct allocator *al)
@@ -233,6 +235,7 @@ static int __allocator_get(struct allocator *al)
struct intel_allocator *ial = al->ial;
int refcount;
+ atomic_fetch_add(&al->refcount, 1);
refcount = atomic_fetch_add(&ial->refcount, 1);
igt_assert(refcount >= 0);
@@ -245,6 +248,7 @@ static bool __allocator_put(struct allocator *al)
bool released = false;
int refcount;
+ atomic_fetch_sub(&al->refcount, 1);
refcount = atomic_fetch_sub(&ial->refcount, 1);
igt_assert(refcount >= 1);
if (refcount == 1) {
@@ -316,12 +320,15 @@ static void intel_allocator_destroy(struct intel_allocator *ial)
static struct allocator *allocator_open(int fd, uint32_t ctx, uint32_t vm,
uint64_t start, uint64_t end,
uint8_t allocator_type,
- uint8_t allocator_strategy)
+ uint8_t allocator_strategy,
+ uint64_t *ahndp)
{
struct intel_allocator *ial;
struct allocator *al;
const char *idstr = vm ? "vm" : "ctx";
+ igt_assert(ahndp);
+
al = __allocator_find(fd, ctx, vm);
if (!al) {
alloc_info("Allocator fd: %d, ctx: %u, vm: %u, <0x%llx : 0x%llx> "
@@ -330,34 +337,31 @@ static struct allocator *allocator_open(int fd, uint32_t ctx, uint32_t vm,
ial = intel_allocator_create(fd, start, end, allocator_type,
allocator_strategy);
al = __allocator_create(fd, ctx, vm, ial);
- } else {
- al = __allocator_create(al->fd, al->ctx, al->vm, al->ial);
- ial = al->ial;
}
- __allocator_get(al);
- al->handle = __handle_create(al);
- if (ial->type != allocator_type) {
- igt_warn("Allocator type must be same for fd/%s\n", idstr);
- ial = NULL;
- }
+ ial = al->ial;
- if (ial->strategy != allocator_strategy) {
- igt_warn("Allocator strategy must be same or fd/%s\n", idstr);
- ial = NULL;
- }
+ igt_assert_f(ial->type == allocator_type,
+ "Allocator type must be same for fd/%s\n", idstr);
+
+ igt_assert_f(ial->strategy == allocator_strategy,
+ "Allocator strategy must be same or fd/%s\n", idstr);
+
+ __allocator_get(al);
+ *ahndp = __handle_create(al);
return al;
}
static struct allocator *allocator_open_as(struct allocator *base,
- uint32_t new_vm)
+ uint32_t new_vm, uint64_t *ahndp)
{
struct allocator *al;
+ igt_assert(ahndp);
al = __allocator_create(base->fd, base->ctx, new_vm, base->ial);
__allocator_get(al);
- al->handle = __handle_create(al);
+ *ahndp = __handle_create(al);
return al;
}
@@ -378,7 +382,10 @@ static bool allocator_close(uint64_t ahnd)
is_empty = al->ial->is_empty(al->ial);
intel_allocator_destroy(al->ial);
}
- __allocator_destroy(al);
+
+ if (!atomic_load(&al->refcount))
+ __allocator_destroy(al);
+
__handle_destroy(ahnd);
return is_empty;
@@ -416,18 +423,20 @@ static int send_req_recv_resp(struct msg_channel *msgchan,
static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
{
int ret;
+ long refcnt;
memset(resp, 0, sizeof(*resp));
if (is_same_process()) {
struct intel_allocator *ial;
struct allocator *al;
- uint64_t start, end, size;
+ uint64_t start, end, size, ahnd;
uint32_t ctx, vm;
bool allocated, reserved, unreserved;
/* Used when debug is on, so avoid compilation warnings */
(void) ctx;
(void) vm;
+ (void) refcnt;
/*
* Mutex only work on allocator instance, not stop/open/close
@@ -444,7 +453,6 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
ial = al->ial;
igt_assert(ial);
-
pthread_mutex_lock(&ial->mutex);
}
@@ -459,19 +467,23 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
req->open.ctx, req->open.vm,
req->open.start, req->open.end,
req->open.allocator_type,
- req->open.allocator_strategy);
+ req->open.allocator_strategy,
+ &ahnd);
+ refcnt = atomic_load(&al->refcount);
ret = atomic_load(&al->ial->refcount);
pthread_mutex_unlock(&map_mutex);
resp->response_type = RESP_OPEN;
- resp->open.allocator_handle = al->handle;
+ resp->open.allocator_handle = ahnd;
+
alloc_info("<open> [tid: %ld] fd: %d, ahnd: %" PRIx64
", ctx: %u, vm: %u"
- ", alloc_type: %u, refcnt: %d->%d\n",
- (long) req->tid, req->open.fd, al->handle,
+ ", alloc_type: %u, al->refcnt: %ld->%ld"
+ ", refcnt: %d->%d\n",
+ (long) req->tid, req->open.fd, ahnd,
req->open.ctx,
req->open.vm, req->open.allocator_type,
- ret - 1, ret);
+ refcnt - 1, refcnt, ret - 1, ret);
break;
case REQ_OPEN_AS:
@@ -498,18 +510,21 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
}
- al = allocator_open_as(al, req->open_as.new_vm);
+ al = allocator_open_as(al, req->open_as.new_vm, &ahnd);
+ refcnt = atomic_load(&al->refcount);
ret = atomic_load(&al->ial->refcount);
pthread_mutex_unlock(&map_mutex);
resp->response_type = RESP_OPEN_AS;
- resp->open.allocator_handle = al->handle;
+ resp->open.allocator_handle = ahnd;
+
alloc_info("<open as> [tid: %ld] fd: %d, ahnd: %" PRIx64
", ctx: %u, vm: %u"
- ", alloc_type: %u, refcnt: %d->%d\n",
- (long) req->tid, al->fd, al->handle,
+ ", alloc_type: %u, al->refcnt: %ld->%ld"
+ ", refcnt: %d->%d\n",
+ (long) req->tid, al->fd, ahnd,
al->ctx, al->vm, al->ial->type,
- ret - 1, ret);
+ refcnt - 1, refcnt, ret - 1, ret);
break;
case REQ_CLOSE:
@@ -529,16 +544,18 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
ctx = al->ctx;
vm = al->vm;
+ refcnt = atomic_load(&al->refcount);
ret = atomic_load(&al->ial->refcount);
resp->close.is_empty = allocator_close(req->allocator_handle);
pthread_mutex_unlock(&map_mutex);
alloc_info("<close> [tid: %ld] ahnd: %" PRIx64
", ctx: %u, vm: %u"
- ", is_empty: %d, refcnt: %d->%d\n",
+ ", is_empty: %d, al->refcount: %ld->%ld"
+ ", refcnt: %d->%d\n",
(long) req->tid, req->allocator_handle,
ctx, vm, resp->close.is_empty,
- ret, al ? ret - 1 : 0);
+ refcnt, refcnt - 1, ret, ret - 1);
break;
@@ -1241,17 +1258,18 @@ void intel_allocator_print(uint64_t allocator_handle)
igt_assert(allocator_handle);
if (!multiprocess || is_same_process()) {
- struct intel_allocator *ial = from_user_pointer(allocator_handle);
+ struct allocator *al;
+ al = __allocator_find_by_handle(allocator_handle);
pthread_mutex_lock(&map_mutex);
- ial->print(ial, true);
+ al->ial->print(al->ial, true);
pthread_mutex_unlock(&map_mutex);
} else {
igt_warn("Print stats is in main process only\n");
}
}
-static bool equal_handles(const void *key1, const void *key2)
+static int equal_handles(const void *key1, const void *key2)
{
const struct handle_entry *h1 = key1, *h2 = key2;
@@ -1261,7 +1279,7 @@ static bool equal_handles(const void *key1, const void *key2)
return h1->handle == h2->handle;
}
-static bool equal_ctx(const void *key1, const void *key2)
+static int equal_ctx(const void *key1, const void *key2)
{
const struct allocator *a1 = key1, *a2 = key2;
@@ -1271,7 +1289,7 @@ static bool equal_ctx(const void *key1, const void *key2)
return a1->fd == a2->fd && a1->ctx == a2->ctx;
}
-static bool equal_vm(const void *key1, const void *key2)
+static int equal_vm(const void *key1, const void *key2)
{
const struct allocator *a1 = key1, *a2 = key2;
@@ -1281,43 +1299,40 @@ static bool equal_vm(const void *key1, const void *key2)
return a1->fd == a2->fd && a1->vm == a2->vm;
}
-/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
-static inline uint64_t hash_handles(const void *val, unsigned int bits)
+static inline uint32_t hash_handles(const void *val)
{
- uint64_t hash = ((struct handle_entry *) val)->handle;
+ uint32_t hash = ((struct handle_entry *) val)->handle;
- hash = hash * GOLDEN_RATIO_PRIME_64;
- return hash >> (64 - bits);
+ hash = hash * GOLDEN_RATIO_PRIME_32;
+ return hash;
}
-static inline uint64_t hash_instance(const void *val, unsigned int bits)
+static inline uint32_t hash_instance(const void *val)
{
uint64_t hash = ((struct allocator *) val)->fd;
- hash = hash * GOLDEN_RATIO_PRIME_64;
- return hash >> (64 - bits);
+ hash = hash * GOLDEN_RATIO_PRIME_32;
+ return hash;
}
static void __free_maps(struct igt_map *map, bool close_allocators)
{
struct igt_map_entry *pos;
- struct igt_hlist_node *tmp;
const struct handle_entry *h;
- int i;
if (!map)
return;
if (close_allocators)
- igt_map_for_each_safe(map, i, tmp, pos) {
+ igt_map_foreach(map, pos) {
h = pos->key;
allocator_close(h->handle);
}
- igt_map_free(map);
- free(map);
+ igt_map_destroy(map, map_entry_free_func);
}
/**
@@ -1338,19 +1353,10 @@ void intel_allocator_init(void)
__free_maps(ctx_map, false);
__free_maps(vm_map, false);
- handles = calloc(sizeof(*handles), 1);
- igt_assert(handles);
-
- ctx_map = calloc(sizeof(*ctx_map), 1);
- igt_assert(ctx_map);
-
- vm_map = calloc(sizeof(*vm_map), 1);
- igt_assert(vm_map);
-
atomic_init(&next_handle, 1);
- __igt_map_init(handles, equal_handles, hash_handles, 8);
- __igt_map_init(ctx_map, equal_ctx, hash_instance, 8);
- __igt_map_init(vm_map, equal_vm, hash_instance, 8);
+ handles = igt_map_create(hash_handles, equal_handles);
+ ctx_map = igt_map_create(hash_instance, equal_ctx);
+ vm_map = igt_map_create(hash_instance, equal_vm);
channel = intel_allocator_get_msgchannel(CHANNEL_SYSVIPC_MSGQUEUE);
}
diff --git a/lib/intel_allocator_simple.c b/lib/intel_allocator_simple.c
index 753cbb4f3..b8fa46682 100644
--- a/lib/intel_allocator_simple.c
+++ b/lib/intel_allocator_simple.c
@@ -40,8 +40,8 @@ struct simple_vma_hole {
};
struct intel_allocator_simple {
- struct igt_map objects;
- struct igt_map reserved;
+ struct igt_map *objects;
+ struct igt_map *reserved;
struct simple_vma_heap heap;
uint64_t start;
@@ -70,6 +70,48 @@ struct intel_allocator_record {
#define simple_vma_foreach_hole_safe_rev(_hole, _heap, _tmp) \
igt_list_for_each_entry_safe_reverse(_hole, _tmp, &(_heap)->holes, link)
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
+
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
+
+static inline uint32_t hash_handles(const void *val)
+{
+ uint32_t hash = *(uint32_t *) val;
+
+ hash = hash * GOLDEN_RATIO_PRIME_32;
+ return hash;
+}
+
+static int equal_handles(const void *a, const void *b)
+{
+ uint32_t *key1 = (uint32_t *) a, *key2 = (uint32_t *) b;
+
+ return *key1 == *key2;
+}
+
+static inline uint32_t hash_offsets(const void *val)
+{
+ uint64_t hash = *(uint64_t *) val;
+
+ hash = hash * GOLDEN_RATIO_PRIME_64;
+ /* High bits are more random, so use them. */
+ return hash >> 32;
+}
+
+static int equal_offsets(const void *a, const void *b)
+{
+ uint64_t *key1 = (uint64_t *) a, *key2 = (uint64_t *) b;
+
+ return *key1 == *key2;
+}
+
+static void map_entry_free_func(struct igt_map_entry *entry)
+{
+ free(entry->data);
+}
+
#define GEN8_GTT_ADDRESS_WIDTH 48
#define DECANONICAL(offset) (offset & ((1ull << GEN8_GTT_ADDRESS_WIDTH) - 1))
@@ -381,7 +423,8 @@ static uint64_t intel_allocator_simple_alloc(struct intel_allocator *ial,
igt_assert(ials);
igt_assert(handle);
alignment = alignment > 0 ? alignment : 1;
- rec = igt_map_find(&ials->objects, &handle);
+
+ rec = igt_map_search(ials->objects, &handle);
if (rec) {
offset = rec->offset;
igt_assert(rec->size == size);
@@ -393,7 +436,7 @@ static uint64_t intel_allocator_simple_alloc(struct intel_allocator *ial,
rec->offset = offset;
rec->size = size;
- igt_map_add(&ials->objects, &rec->handle, rec);
+ igt_map_insert(ials->objects, &rec->handle, rec);
ials->allocated_objects++;
ials->allocated_size += size;
}
@@ -405,19 +448,24 @@ static bool intel_allocator_simple_free(struct intel_allocator *ial, uint32_t ha
{
struct intel_allocator_record *rec = NULL;
struct intel_allocator_simple *ials;
+ struct igt_map_entry *entry;
igt_assert(ial);
ials = (struct intel_allocator_simple *) ial->priv;
igt_assert(ials);
- rec = igt_map_del(&ials->objects, &handle);
- if (rec) {
- simple_vma_heap_free(&ials->heap, rec->offset, rec->size);
- ials->allocated_objects--;
- ials->allocated_size -= rec->size;
- free(rec);
+ entry = igt_map_search_entry(ials->objects, &handle);
+ if (entry) {
+ igt_map_remove_entry(ials->objects, entry);
+ if (entry->data) {
+ rec = (struct intel_allocator_record *) entry->data;
+ simple_vma_heap_free(&ials->heap, rec->offset, rec->size);
+ ials->allocated_objects--;
+ ials->allocated_size -= rec->size;
+ free(rec);
- return true;
+ return true;
+ }
}
return false;
@@ -443,7 +491,7 @@ static bool intel_allocator_simple_is_allocated(struct intel_allocator *ial,
igt_assert(ials);
igt_assert(handle);
- rec = igt_map_find(&ials->objects, &handle);
+ rec = igt_map_search(ials->objects, &handle);
if (rec && __same(rec, handle, size, offset))
same = true;
@@ -484,7 +532,7 @@ static bool intel_allocator_simple_reserve(struct intel_allocator *ial,
rec->offset = start;
rec->size = size;
- igt_map_add(&ials->reserved, &rec->offset, rec);
+ igt_map_insert(ials->reserved, &rec->offset, rec);
ials->reserved_areas++;
ials->reserved_size += rec->size;
@@ -502,6 +550,7 @@ static bool intel_allocator_simple_unreserve(struct intel_allocator *ial,
uint64_t size;
struct intel_allocator_record *rec = NULL;
struct intel_allocator_simple *ials;
+ struct igt_map_entry *entry;
igt_assert(ial);
ials = (struct intel_allocator_simple *) ial->priv;
@@ -516,12 +565,13 @@ static bool intel_allocator_simple_unreserve(struct intel_allocator *ial,
igt_assert(end > start || end == 0);
size = get_size(start, end);
- rec = igt_map_find(&ials->reserved, &start);
+ entry = igt_map_search_entry(ials->reserved, &start);
- if (!rec) {
+ if (!entry || !entry->data) {
igt_debug("Only reserved blocks can be unreserved\n");
return false;
}
+ rec = entry->data;
if (rec->size != size) {
igt_debug("Only the whole block unreservation allowed\n");
@@ -534,8 +584,7 @@ static bool intel_allocator_simple_unreserve(struct intel_allocator *ial,
return false;
}
- igt_map_del(&ials->reserved, &start);
-
+ igt_map_remove_entry(ials->reserved, entry);
ials->reserved_areas--;
ials->reserved_size -= rec->size;
free(rec);
@@ -564,7 +613,7 @@ static bool intel_allocator_simple_is_reserved(struct intel_allocator *ial,
igt_assert(end > start || end == 0);
size = get_size(start, end);
- rec = igt_map_find(&ials->reserved, &start);
+ rec = igt_map_search(ials->reserved, &start);
if (!rec)
return false;
@@ -575,32 +624,17 @@ static bool intel_allocator_simple_is_reserved(struct intel_allocator *ial,
return false;
}
-static bool equal_8bytes(const void *key1, const void *key2)
-{
- const uint64_t *k1 = key1, *k2 = key2;
- return *k1 == *k2;
-}
-
static void intel_allocator_simple_destroy(struct intel_allocator *ial)
{
struct intel_allocator_simple *ials;
- struct igt_map_entry *pos;
- struct igt_map *map;
- int i;
igt_assert(ial);
ials = (struct intel_allocator_simple *) ial->priv;
simple_vma_heap_finish(&ials->heap);
- map = &ials->objects;
- igt_map_for_each(map, i, pos)
- free(pos->value);
- igt_map_free(&ials->objects);
+ igt_map_destroy(ials->objects, map_entry_free_func);
- map = &ials->reserved;
- igt_map_for_each(map, i, pos)
- free(pos->value);
- igt_map_free(&ials->reserved);
+ igt_map_destroy(ials->reserved, map_entry_free_func);
free(ial->priv);
free(ial);
@@ -624,10 +658,8 @@ static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
struct simple_vma_hole *hole;
struct simple_vma_heap *heap;
struct igt_map_entry *pos;
- struct igt_map *map;
uint64_t total_free = 0, allocated_size = 0, allocated_objects = 0;
uint64_t reserved_size = 0, reserved_areas = 0;
- int i;
igt_assert(ial);
ials = (struct intel_allocator_simple *) ial->priv;
@@ -658,9 +690,8 @@ static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
ials->total_size - ials->allocated_size - ials->reserved_size);
igt_info("objects:\n");
- map = &ials->objects;
- igt_map_for_each(map, i, pos) {
- struct intel_allocator_record *rec = pos->value;
+ igt_map_foreach(ials->objects, pos) {
+ struct intel_allocator_record *rec = pos->data;
igt_info("handle = %d, offset = %"PRIu64" "
"(0x%"PRIx64", size = %"PRIu64" (0x%"PRIx64")\n",
@@ -673,9 +704,8 @@ static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
igt_assert(ials->allocated_objects == allocated_objects);
igt_info("reserved areas:\n");
- map = &ials->reserved;
- igt_map_for_each(map, i, pos) {
- struct intel_allocator_record *rec = pos->value;
+ igt_map_foreach(ials->reserved, pos) {
+ struct intel_allocator_record *rec = pos->data;
igt_info("offset = %"PRIu64" (0x%"PRIx64", "
"size = %"PRIu64" (0x%"PRIx64")\n",
@@ -725,9 +755,8 @@ __intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
ials = ial->priv = malloc(sizeof(struct intel_allocator_simple));
igt_assert(ials);
- igt_map_init(&ials->objects);
- /* Reserved addresses hashtable is indexed by an offset */
- __igt_map_init(&ials->reserved, equal_8bytes, NULL, 3);
+ ials->objects = igt_map_create(hash_handles, equal_handles);
+ ials->reserved = igt_map_create(hash_offsets, equal_offsets);
ials->start = start;
ials->end = end;
diff --git a/tests/i915/api_intel_allocator.c b/tests/i915/api_intel_allocator.c
index 2d6f51deb..bb17c8829 100644
--- a/tests/i915/api_intel_allocator.c
+++ b/tests/i915/api_intel_allocator.c
@@ -466,7 +466,7 @@ static void reopen_fork(int fd)
fd2 = gem_reopen_driver(fd);
- igt_fork(child, 1) {
+ igt_fork(child, 2) {
igt_until_timeout(REOPEN_TIMEOUT)
__reopen_allocs(fd, fd2, false);
}
diff --git a/tests/i915/api_intel_bb.c b/tests/i915/api_intel_bb.c
index 4b6090830..d187049cf 100644
--- a/tests/i915/api_intel_bb.c
+++ b/tests/i915/api_intel_bb.c
@@ -31,12 +31,14 @@
#include <inttypes.h>
#include <cairo.h>
#include <errno.h>
-#include <sys/stat.h>
#include <sys/ioctl.h>
+#include <sys/types.h>
#include <glib.h>
#include <zlib.h>
+#include <pthread.h>
#include "intel_bufops.h"
#include "i915/gem_vm.h"
+#include "igt_map.h"
#define PAGE_SIZE 4096
@@ -1491,6 +1493,141 @@ static void render_ccs(struct buf_ops *bops)
igt_assert_f(fails == 0, "render-ccs fails: %d\n", fails);
}
+
+#define N 100000
+#define THREADS 10
+
+struct mutex_map {
+ struct igt_map *map;
+ pthread_mutex_t *mutex;
+};
+
+struct record {
+ int foo;
+ uint32_t unique_identifier;
+};
+
+static uint32_t fnv1_hash_4bytes(const void *data)
+{
+ uint32_t hash = 2166136261ul;
+ const uint8_t *bytes = (uint8_t *)data;
+ size_t size = 4;
+
+ while (size-- != 0) {
+ hash ^= *bytes;
+ hash = hash * 0x01000193;
+ bytes++;
+ }
+
+ return hash;
+}
+
+static int key_equals_4bytes(const void *a, const void *b)
+{
+ uint32_t *key1 = (uint32_t *) a, *key2 = (uint32_t *) b;
+ return *key1 == *key2;
+}
+
+static void map_entry_free_func(struct igt_map_entry *entry)
+{
+ free(entry->data);
+}
+
+static void *insert_remove(void *data)
+{
+ struct mutex_map *m = (struct mutex_map *) data;
+ struct record *r_ptr;
+ uint32_t to_delete = 0;
+ int i;
+
+ srand(gettid());
+ for (i = 0; i < N; i++) {
+ r_ptr = malloc(sizeof(struct record));
+
+ pthread_mutex_lock(m->mutex);
+ do {
+ r_ptr->unique_identifier = (uint32_t) ((rand() % N) + N+1);
+ } while (igt_map_search(m->map, &r_ptr->unique_identifier));
+ igt_map_insert(m->map, &(r_ptr->unique_identifier), r_ptr);
+
+ if (to_delete)
+ igt_map_remove(m->map, &to_delete, map_entry_free_func);
+
+ to_delete = r_ptr->unique_identifier;
+ pthread_mutex_unlock(m->mutex);
+ }
+
+ pthread_mutex_lock(m->mutex);
+ igt_map_remove(m->map, &to_delete, map_entry_free_func);
+ pthread_mutex_unlock(m->mutex);
+
+ return NULL;
+}
+
+static void *search_random(void *data)
+{
+ struct mutex_map *m = (struct mutex_map *) data;
+ struct record *r_ptr;
+
+ srand(gettid());
+ for (int i = 0; i < N; i++) {
+ r_ptr = malloc(sizeof(struct record));
+ r_ptr->unique_identifier = rand() % 2*N;
+
+ pthread_mutex_lock(m->mutex);
+ igt_map_search_entry(m->map, &(r_ptr->unique_identifier));
+ pthread_mutex_unlock(m->mutex);
+ }
+
+ return NULL;
+}
+
+static void *insert_n(void *data)
+{
+ struct mutex_map *m = (struct mutex_map *) data;
+ struct record *r_ptr;
+
+ for (int i = 0; i < N; i++) {
+ r_ptr = malloc(sizeof(struct record));
+ r_ptr->unique_identifier = i;
+
+ pthread_mutex_lock(m->mutex);
+ igt_map_insert(m->map, &(r_ptr->unique_identifier), r_ptr);
+ pthread_mutex_unlock(m->mutex);
+ }
+
+ return NULL;
+}
+
+static void map(void)
+{
+ pthread_t thread[THREADS+1];
+ struct mutex_map m_map;
+ pthread_mutex_t mutex;
+ int i = 0;
+
+ igt_assert(N*2 <= RAND_MAX);
+
+ pthread_mutex_init(&mutex, NULL);
+ m_map.map = igt_map_create(fnv1_hash_4bytes, key_equals_4bytes);;
+ m_map.mutex = &mutex;
+ igt_assert(!pthread_create(&thread[0], NULL, insert_n, (void *)&m_map));
+
+ for (i = 1; i < THREADS; i += 2) {
+ igt_assert(!pthread_create(&thread[i], NULL, insert_remove, (void *)&m_map));
+ igt_assert(!pthread_create(&thread[i+1], NULL, search_random, (void *)&m_map));
+ }
+
+ for (i = 0; i <= THREADS; i++)
+ pthread_join(thread[i], NULL);
+
+ for (i = 0; i < N; i++)
+ igt_map_remove(m_map.map, &i, map_entry_free_func);
+
+ igt_assert(m_map.map->entries == 0);
+ igt_map_destroy(m_map.map, map_entry_free_func);
+}
+
static int opt_handler(int opt, int opt_index, void *data)
{
switch (opt) {
@@ -1655,4 +1792,8 @@ igt_main_args("dpib", NULL, help_str, opt_handler, NULL)
buf_ops_destroy(bops);
close(i915);
}
+
+ igt_subtest("igt-map")
+ map();
+
}
--
2.26.0
More information about the Intel-gfx-trybot
mailing list