[PATCH i-g-t 36/38] WIP
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Tue Feb 23 06:39:40 UTC 2021
---
lib/intel_allocator.c | 203 ++++++++++++++++++++++---------
lib/intel_allocator.h | 6 +
lib/intel_allocator_msgchannel.h | 1 +
lib/intel_allocator_simple.c | 44 ++++++-
tests/i915/api_intel_allocator.c | 26 ++++
5 files changed, 222 insertions(+), 58 deletions(-)
diff --git a/lib/intel_allocator.c b/lib/intel_allocator.c
index 1295f5183..984595949 100644
--- a/lib/intel_allocator.c
+++ b/lib/intel_allocator.c
@@ -17,7 +17,7 @@
#include "intel_allocator.h"
#include "intel_allocator_msgchannel.h"
-//#define ALLOCDBG
+#define ALLOCDBG
#ifdef ALLOCDBG
#define alloc_info igt_info
#define alloc_debug igt_debug
@@ -50,9 +50,16 @@ struct intel_allocator *
intel_allocator_simple_create_full(int fd, uint32_t ctx,
uint64_t start, uint64_t end,
enum allocator_strategy strategy);
+struct intel_allocator *intel_allocator_simple_create_vm(int fd, uint32_t vm);
+struct intel_allocator *
+intel_allocator_simple_create_full_vm(int fd, uint32_t vm,
+ uint64_t start, uint64_t end,
+ enum allocator_strategy strategy);
static struct igt_map *allocators_map;
static pthread_mutex_t map_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct igt_map *allocators_vm_map;
+static pthread_mutex_t map_vm_mutex = PTHREAD_MUTEX_INITIALIZER;
static bool multiprocess;
static pthread_t allocator_thread;
static bool allocator_thread_running;
@@ -102,6 +109,7 @@ static int recv_resp(struct msg_channel *msgchan,
}
static struct intel_allocator *intel_allocator_create(int fd, uint32_t ctx,
+ uint32_t vm,
uint64_t start, uint64_t end,
uint8_t allocator_type,
uint8_t allocator_strategy)
@@ -126,12 +134,21 @@ static struct intel_allocator *intel_allocator_create(int fd, uint32_t ctx,
ial = intel_allocator_random_create(fd, ctx);
break;
case INTEL_ALLOCATOR_SIMPLE:
- if (!start && !end)
- ial = intel_allocator_simple_create(fd, ctx);
- else
- ial = intel_allocator_simple_create_full(fd, ctx,
- start, end,
- allocator_strategy);
+ if (!start && !end) {
+ if (vm == 0)
+ ial = intel_allocator_simple_create(fd, ctx);
+ else
+ ial = intel_allocator_simple_create_vm(fd, vm);
+ } else {
+ if (vm == 0)
+ ial = intel_allocator_simple_create_full(fd, ctx,
+ start, end,
+ allocator_strategy);
+ else
+ ial = intel_allocator_simple_create_full(fd, ctx,
+ start, end,
+ allocator_strategy);
+ }
break;
default:
igt_assert_f(ial, "Allocator type %d not implemented\n",
@@ -144,7 +161,8 @@ static struct intel_allocator *intel_allocator_create(int fd, uint32_t ctx,
atomic_fetch_add(&ial->refcount, 1);
pthread_mutex_init(&ial->mutex, NULL);
- igt_map_add(allocators_map, ial, ial);
+
+ igt_map_add(vm ? allocators_vm_map : allocators_map, ial, ial);
return ial;
}
@@ -157,12 +175,18 @@ static void intel_allocator_destroy(struct intel_allocator *ial)
ial->destroy(ial);
}
-static struct intel_allocator *__allocator_get(int fd, uint32_t ctx)
+static struct intel_allocator *__allocator_get(int fd, uint32_t ctx, uint32_t vm)
{
- struct intel_allocator *ial, ials = { .fd = fd, .ctx = ctx };
+ struct intel_allocator *ial, ials = { .fd = fd, .ctx = ctx, .vm = vm };
+ struct igt_map *map;
int refcount;
- ial = igt_map_find(allocators_map, &ials);
+ if (vm)
+ map = allocators_vm_map;
+ else
+ map = allocators_map;
+
+ ial = igt_map_find(map, &ials);
if (!ial)
goto out_get;
@@ -176,18 +200,25 @@ out_get:
static bool __allocator_put(struct intel_allocator *ial)
{
- struct intel_allocator ials = { .fd = ial->fd, .ctx = ial->ctx };
+ struct intel_allocator ials = { .fd = ial->fd, .ctx = ial->ctx,
+ .vm = ial->vm };
+ struct igt_map *map;
bool released = false;
int refcount;
- ial = igt_map_find(allocators_map, &ials);
+ if (ial->vm)
+ map = allocators_vm_map;
+ else
+ map = allocators_map;
+
+ ial = igt_map_find(map, &ials);
igt_assert(ial);
refcount = atomic_fetch_sub(&ial->refcount, 1);
alloc_debug("Refcount: %d\n", refcount);
igt_assert(refcount >= 1);
if (refcount == 1) {
- igt_map_del(allocators_map, ial);
+ igt_map_del(map, ial);
if (!ial->is_empty(ial) && warn_if_not_empty)
igt_warn("Allocator not clear before destroy!\n");
@@ -198,28 +229,35 @@ static bool __allocator_put(struct intel_allocator *ial)
return released;
}
-static struct intel_allocator *allocator_open(int fd, uint32_t ctx,
+static struct intel_allocator *allocator_open(int fd, uint32_t ctx, uint32_t vm,
uint64_t start, uint64_t end,
uint8_t allocator_type,
uint8_t allocator_strategy)
{
struct intel_allocator *ial;
+ static pthread_mutex_t *mutex;
+ const char *idstr = vm ? "vm" : "ctx";
- pthread_mutex_lock(&map_mutex);
- ial = __allocator_get(fd, ctx);
+ if (vm)
+ mutex = &map_vm_mutex;
+ else
+ mutex = &map_mutex;
+
+ pthread_mutex_lock(mutex);
+ ial = __allocator_get(fd, ctx, vm);
if (!ial) {
- alloc_debug("Allocator fd: %d, ctx: %u, <0x%llx : 0x%llx> "
+ alloc_debug("Allocator fd: %d, ctx: %u, vm: %u, <0x%llx : 0x%llx> "
"not found, creating one\n",
- fd, ctx, (long long) start, (long long) end);
- ial = intel_allocator_create(fd, ctx, start, end,
+ fd, ctx, vm, (long long) start, (long long) end);
+ ial = intel_allocator_create(fd, ctx, vm, start, end,
allocator_type, allocator_strategy);
}
- pthread_mutex_unlock(&map_mutex);
+ pthread_mutex_unlock(mutex);
igt_assert_f(ial->type == allocator_type,
- "Allocator type must be same for fd/ctx\n");
+ "Allocator type must be same for fd/%s\n", idstr);
igt_assert_f(ial->strategy == allocator_strategy,
- "Allocator strategy must be same or fd/ctx\n");
+ "Allocator strategy must be same or fd/%s\n", idstr);
return ial;
}
@@ -227,11 +265,17 @@ static struct intel_allocator *allocator_open(int fd, uint32_t ctx,
static bool allocator_close(uint64_t allocator_handle)
{
struct intel_allocator *ial = from_user_pointer(allocator_handle);
+ static pthread_mutex_t *mutex;
bool released, is_empty = false;
igt_assert(ial);
- pthread_mutex_lock(&map_mutex);
+ if (ial->vm)
+ mutex = &map_vm_mutex;
+ else
+ mutex = &map_mutex;
+
+ pthread_mutex_lock(mutex);
released = __allocator_put(ial);
if (released) {
@@ -239,7 +283,7 @@ static bool allocator_close(uint64_t allocator_handle)
intel_allocator_destroy(ial);
}
- pthread_mutex_unlock(&map_mutex);
+ pthread_mutex_unlock(mutex);
return is_empty;
}
@@ -283,6 +327,7 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
if (same_process) {
struct intel_allocator *ial;
uint64_t start, end, size;
+ uint32_t ctx, vm;
bool allocated, reserved, unreserved;
/* Mutex only work on allocator instance, not stop/open/close */
@@ -300,6 +345,7 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
case REQ_OPEN:
ial = allocator_open(req->open.fd, req->open.ctx,
+ req->open.vm,
req->open.start, req->open.end,
req->open.allocator_type,
req->open.allocator_strategy);
@@ -307,10 +353,10 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
resp->response_type = RESP_OPEN;
resp->open.allocator_handle = to_user_pointer(ial);
- alloc_info("<open> [tid: %ld] fd: %d, ctx: %u, alloc_type: %u, "
- "ahnd: %p, refcnt: %d\n",
+ alloc_info("<open> [tid: %ld] fd: %d, ctx: %u, vm: %u"
+ ", alloc_type: %u, ahnd: %p, refcnt: %d\n",
(long) req->tid, req->open.fd, req->open.ctx,
- req->open.allocator_type, ial,
+ req->open.vm, req->open.allocator_type, ial,
atomic_load(&ial->refcount));
break;
@@ -320,9 +366,13 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
resp->response_type = RESP_CLOSE;
ret = atomic_load(&ial->refcount);
+ ctx = ial->ctx;
+ vm = ial->vm;
resp->close.is_empty = allocator_close(req->allocator_handle);
- alloc_info("<close> [tid: %ld] ahnd: %p, is_empty: %d, refcnt: %d\n",
- (long) req->tid, ial, resp->close.is_empty, ret);
+ alloc_info("<close> [tid: %ld] ahnd: %p, ctx: %u, vm: %u"
+ ", is_empty: %d, refcnt: %d\n",
+ (long) req->tid, ial, ctx, vm,
+ resp->close.is_empty, ret);
break;
case REQ_ADDRESS_RANGE:
@@ -608,6 +658,33 @@ void intel_allocator_multiprocess_stop(void)
}
}
+static uint64_t __intel_allocator_open_full(int fd, uint32_t ctx,
+ uint32_t vm,
+ uint64_t start, uint64_t end,
+ uint8_t allocator_type,
+ enum allocator_strategy strategy)
+{
+ struct alloc_req req = { .request_type = REQ_OPEN,
+ .open.fd = fd,
+ .open.ctx = ctx,
+ .open.vm = vm,
+ .open.start = start,
+ .open.end = end,
+ .open.allocator_type = allocator_type,
+ .open.allocator_strategy = strategy };
+ struct alloc_resp resp;
+
+ /* Get child_tid only once at open() */
+ if (child_tid == -1)
+ child_tid = gettid();
+
+ igt_assert(handle_request(&req, &resp) == 0);
+ igt_assert(resp.open.allocator_handle);
+ igt_assert(resp.response_type == RESP_OPEN);
+
+ return resp.open.allocator_handle;
+}
+
/**
* intel_allocator_open_full:
* @fd: i915 descriptor
@@ -641,24 +718,17 @@ uint64_t intel_allocator_open_full(int fd, uint32_t ctx,
uint8_t allocator_type,
enum allocator_strategy strategy)
{
- struct alloc_req req = { .request_type = REQ_OPEN,
- .open.fd = fd,
- .open.ctx = ctx,
- .open.start = start,
- .open.end = end,
- .open.allocator_type = allocator_type,
- .open.allocator_strategy = strategy };
- struct alloc_resp resp;
-
- /* Get child_tid only once at open() */
- if (child_tid == -1)
- child_tid = gettid();
-
- igt_assert(handle_request(&req, &resp) == 0);
- igt_assert(resp.open.allocator_handle);
- igt_assert(resp.response_type == RESP_OPEN);
+ return __intel_allocator_open_full(fd, ctx, 0, start, end,
+ allocator_type, strategy);
+}
- return resp.open.allocator_handle;
+uint64_t intel_allocator_open_vm_full(int fd, uint32_t vm,
+ uint64_t start, uint64_t end,
+ uint8_t allocator_type,
+ enum allocator_strategy strategy)
+{
+ return __intel_allocator_open_full(fd, 0, vm, start, end,
+ allocator_type, strategy);
}
/**
@@ -683,6 +753,12 @@ uint64_t intel_allocator_open(int fd, uint32_t ctx, uint8_t allocator_type)
ALLOC_STRATEGY_HIGH_TO_LOW);
}
+uint64_t intel_allocator_open_vm(int fd, uint32_t vm, uint8_t allocator_type)
+{
+ return intel_allocator_open_vm_full(fd, vm, 0, 0, allocator_type,
+ ALLOC_STRATEGY_HIGH_TO_LOW);
+}
+
/**
* intel_allocator_close:
* @allocator_handle: handle to the allocator that will be closed
@@ -983,6 +1059,16 @@ static bool equal_allocators(const void *key1, const void *key2)
return a1->fd == a2->fd && a1->ctx == a2->ctx;
}
+static bool equal_allocators_vm(const void *key1, const void *key2)
+{
+ const struct intel_allocator *a1 = key1, *a2 = key2;
+
+ alloc_debug("a1: <fd: %d, vm: %u>, a2 <fd: %d, vm: %u>\n",
+ a1->fd, a1->vm, a2->fd, a2->vm);
+
+ return a1->fd == a2->fd && a1->vm == a2->vm;
+}
+
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
@@ -994,20 +1080,24 @@ static inline uint64_t hash_allocators(const void *val, unsigned int bits)
return hash >> (64 - bits);
}
-static void __free_allocators(void)
+static void __free_allocators(struct igt_map *map)
{
struct igt_map_entry *pos;
struct intel_allocator *ial;
int i;
- if (allocators_map) {
- igt_map_for_each(allocators_map, i, pos) {
+ if (!map)
+ return;
+
+ if (map) {
+ igt_map_for_each(map, i, pos) {
ial = pos->value;
ial->destroy(ial);
}
}
- igt_map_free(allocators_map);
+ igt_map_free(map);
+ free(map);
}
/**
@@ -1024,15 +1114,18 @@ void intel_allocator_init(void)
allocator_pid = getpid();
alloc_info("Allocator pid: %ld\n", (long) allocator_pid);
- if (allocators_map) {
- __free_allocators();
- free(allocators_map);
- }
+ __free_allocators(allocators_map);
+ __free_allocators(allocators_vm_map);
allocators_map = calloc(sizeof(*allocators_map), 1);
igt_assert(allocators_map);
+ allocators_vm_map = calloc(sizeof(*allocators_vm_map), 1);
+ igt_assert(allocators_vm_map);
+
__igt_map_init(allocators_map, equal_allocators, hash_allocators, 8);
+ __igt_map_init(allocators_vm_map, equal_allocators_vm,
+ hash_allocators, 8);
channel = intel_allocator_get_msgchannel(CHANNEL_SYSVIPC_MSGQUEUE);
}
diff --git a/lib/intel_allocator.h b/lib/intel_allocator.h
index 1298ce4a0..102e63570 100644
--- a/lib/intel_allocator.h
+++ b/lib/intel_allocator.h
@@ -79,6 +79,7 @@ enum allocator_strategy {
struct intel_allocator {
int fd;
uint32_t ctx;
+ uint32_t vm;
uint8_t type;
enum allocator_strategy strategy;
_Atomic(int32_t) refcount;
@@ -119,6 +120,11 @@ uint64_t intel_allocator_open_full(int fd, uint32_t ctx,
uint64_t start, uint64_t end,
uint8_t allocator_type,
enum allocator_strategy strategy);
+uint64_t intel_allocator_open_vm(int fd, uint32_t vm, uint8_t allocator_type);
+uint64_t intel_allocator_open_vm_full(int fd, uint32_t vm,
+ uint64_t start, uint64_t end,
+ uint8_t allocator_type,
+ enum allocator_strategy strategy);
bool intel_allocator_close(uint64_t allocator_handle);
void intel_allocator_get_address_range(uint64_t allocator_handle,
uint64_t *startp, uint64_t *endp);
diff --git a/lib/intel_allocator_msgchannel.h b/lib/intel_allocator_msgchannel.h
index ad5a9e901..7be1a4646 100644
--- a/lib/intel_allocator_msgchannel.h
+++ b/lib/intel_allocator_msgchannel.h
@@ -48,6 +48,7 @@ struct alloc_req {
struct {
int fd;
uint32_t ctx;
+ uint32_t vm;
uint64_t start;
uint64_t end;
uint8_t allocator_type;
diff --git a/lib/intel_allocator_simple.c b/lib/intel_allocator_simple.c
index 1163b42a6..b55844e8f 100644
--- a/lib/intel_allocator_simple.c
+++ b/lib/intel_allocator_simple.c
@@ -23,6 +23,12 @@ struct intel_allocator *
intel_allocator_simple_create_full(int fd, uint32_t ctx,
uint64_t start, uint64_t end,
enum allocator_strategy strategy);
+struct intel_allocator *
+intel_allocator_simple_create_vm(int fd, uint32_t vm);
+struct intel_allocator *
+intel_allocator_simple_create_vm_full(int fd, uint32_t vm,
+ uint64_t start, uint64_t end,
+ enum allocator_strategy strategy);
struct simple_vma_heap {
struct igt_list_head holes;
@@ -674,19 +680,22 @@ static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
static struct intel_allocator *
__intel_allocator_simple_create(int fd, uint32_t ctx,
+ uint32_t vm,
uint64_t start, uint64_t end,
enum allocator_strategy strategy)
{
struct intel_allocator *ial;
struct intel_allocator_simple *ials;
- igt_debug("Using simple allocator <fd: %d, ctx: %u>\n", fd, ctx);
+ igt_debug("Using simple allocator <fd: %d, ctx: %u, vm: %u>\n",
+ fd, ctx, vm);
ial = calloc(1, sizeof(*ial));
igt_assert(ial);
ial->fd = fd;
ial->ctx = ctx;
+ ial->vm = vm;
ial->get_address_range = intel_allocator_simple_get_address_range;
ial->alloc = intel_allocator_simple_alloc;
ial->free = intel_allocator_simple_free;
@@ -728,7 +737,7 @@ intel_allocator_simple_create(int fd, uint32_t ctx)
else
gtt_size -= RESERVED;
- return __intel_allocator_simple_create(fd, ctx, 0, gtt_size,
+ return __intel_allocator_simple_create(fd, ctx, 0, 0, gtt_size,
ALLOC_STRATEGY_HIGH_TO_LOW);
}
@@ -744,5 +753,34 @@ intel_allocator_simple_create_full(int fd, uint32_t ctx,
gtt_size /= 2;
igt_assert(end - start <= gtt_size);
- return __intel_allocator_simple_create(fd, ctx, start, end, strategy);
+ return __intel_allocator_simple_create(fd, ctx, 0, start, end, strategy);
+}
+
+struct intel_allocator *
+intel_allocator_simple_create_vm(int fd, uint32_t vm)
+{
+ uint64_t gtt_size = gem_aperture_size(fd);
+
+ if (!gem_uses_full_ppgtt(fd))
+ gtt_size /= 2;
+ else
+ gtt_size -= RESERVED;
+
+ return __intel_allocator_simple_create(fd, 0, vm, 0, gtt_size,
+ ALLOC_STRATEGY_HIGH_TO_LOW);
+}
+
+struct intel_allocator *
+intel_allocator_simple_create_vm_full(int fd, uint32_t vm,
+ uint64_t start, uint64_t end,
+ enum allocator_strategy strategy)
+{
+ uint64_t gtt_size = gem_aperture_size(fd);
+
+ igt_assert(end <= gtt_size);
+ if (!gem_uses_full_ppgtt(fd))
+ gtt_size /= 2;
+ igt_assert(end - start <= gtt_size);
+
+ return __intel_allocator_simple_create(fd, 0, vm, start, end, strategy);
}
diff --git a/tests/i915/api_intel_allocator.c b/tests/i915/api_intel_allocator.c
index 2de7a0baa..8433e395c 100644
--- a/tests/i915/api_intel_allocator.c
+++ b/tests/i915/api_intel_allocator.c
@@ -544,6 +544,29 @@ static void execbuf_with_allocator(int fd)
igt_assert(intel_allocator_close(ahnd) == true);
}
+static void open_vm(int fd)
+{
+ uint64_t ialh1, ialh2;
+ uint64_t ialh3, ialh4;
+
+ ialh1 = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+ ialh2 = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+
+ igt_info("ial1: %llx\n", (long long) ialh1);
+ igt_info("ial2: %llx\n", (long long) ialh2);
+
+ ialh3 = intel_allocator_open_vm(fd, 1, INTEL_ALLOCATOR_SIMPLE);
+ ialh4 = intel_allocator_open_vm(fd, 1, INTEL_ALLOCATOR_SIMPLE);
+
+ igt_info("ial3: %llx\n", (long long) ialh3);
+ igt_info("ial4: %llx\n", (long long) ialh4);
+
+ intel_allocator_close(ialh1);
+ intel_allocator_close(ialh2);
+ intel_allocator_close(ialh3);
+ intel_allocator_close(ialh4);
+}
+
struct allocators {
const char *name;
uint8_t type;
@@ -627,6 +650,9 @@ igt_main
igt_subtest_f("execbuf-with-allocator")
execbuf_with_allocator(fd);
+ igt_subtest_f("open-vm")
+ open_vm(fd);
+
igt_fixture
close(fd);
}
--
2.26.0
More information about the Intel-gfx-trybot
mailing list