[PATCH i-g-t 37/38] WIP rework
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Tue Feb 23 06:39:41 UTC 2021
---
lib/intel_allocator.c | 356 ++++++++++++++++++++++---------
lib/intel_allocator.h | 2 +
lib/intel_allocator_msgchannel.h | 12 ++
tests/i915/api_intel_allocator.c | 60 +++++-
4 files changed, 319 insertions(+), 111 deletions(-)
diff --git a/lib/intel_allocator.c b/lib/intel_allocator.c
index 984595949..8267f69b5 100644
--- a/lib/intel_allocator.c
+++ b/lib/intel_allocator.c
@@ -25,6 +25,8 @@ static const char *reqtype_str[] = {
[REQ_STOP] = "stop",
[REQ_OPEN] = "open",
[REQ_CLOSE] = "close",
+ [REQ_REF] = "ref",
+ [REQ_UNREF] = "unref",
[REQ_ADDRESS_RANGE] = "address range",
[REQ_ALLOC] = "alloc",
[REQ_FREE] = "free",
@@ -56,10 +58,10 @@ intel_allocator_simple_create_full_vm(int fd, uint32_t vm,
uint64_t start, uint64_t end,
enum allocator_strategy strategy);
-static struct igt_map *allocators_map;
static pthread_mutex_t map_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct igt_map *allocators_map;
+static struct igt_map *allocators_ctx_map;
static struct igt_map *allocators_vm_map;
-static pthread_mutex_t map_vm_mutex = PTHREAD_MUTEX_INITIALIZER;
static bool multiprocess;
static pthread_t allocator_thread;
static bool allocator_thread_running;
@@ -108,6 +110,83 @@ static int recv_resp(struct msg_channel *msgchan,
return msgchan->recv_resp(msgchan, response);
}
+#define GET_MAP(vm) ((vm) ? allocators_vm_map : allocators_ctx_map)
+
+static struct intel_allocator *
+__allocator_find(int fd, uint32_t ctx, uint32_t vm)
+{
+ struct intel_allocator ials = { .fd = fd, .ctx = ctx, .vm = vm };
+ struct igt_map *map = GET_MAP(vm);
+
+ return igt_map_find(map, &ials);
+}
+
+static struct intel_allocator *
+__allocator_find_by_ptr(const struct intel_allocator *ial)
+{
+ struct intel_allocator *ialp;
+
+ ialp = igt_map_find(allocators_map, ial);
+ if (ialp) {
+ igt_assert(ialp == ial);
+ return ialp;
+ }
+
+ return ialp;
+}
+
+static bool __allocator_is_valid(struct intel_allocator *ial)
+{
+ return __allocator_find_by_ptr(ial);
+}
+
+static void __allocator_add(struct intel_allocator *ial)
+{
+ struct igt_map *map = GET_MAP(ial->vm);
+
+ igt_assert(__allocator_is_valid(ial) == false);
+ igt_map_add(allocators_map, ial, ial);
+ igt_map_add(map, ial, ial);
+}
+
+static void __allocator_del(struct intel_allocator *ial)
+{
+ struct igt_map *map = GET_MAP(ial->vm);
+
+ igt_assert(__allocator_is_valid(ial) == true);
+ igt_map_del(allocators_map, ial);
+ igt_map_del(map, ial);
+}
+
+static int __allocator_get(struct intel_allocator *ial)
+{
+ int refcount;
+
+ refcount = atomic_fetch_add(&ial->refcount, 1);
+ alloc_debug("get: refcount: %d\n", refcount + 1);
+ igt_assert(refcount >= 0);
+
+ return refcount;
+}
+
+static bool __allocator_put(struct intel_allocator *ial)
+{
+ bool released = false;
+ int refcount;
+
+ refcount = atomic_fetch_sub(&ial->refcount, 1);
+ alloc_debug("put: refcount: %d\n", refcount - 1);
+ igt_assert(refcount >= 1);
+ if (refcount == 1) {
+ if (!ial->is_empty(ial) && warn_if_not_empty)
+ igt_warn("Allocator not clear before destroy!\n");
+
+ released = true;
+ }
+
+ return released;
+}
+
static struct intel_allocator *intel_allocator_create(int fd, uint32_t ctx,
uint32_t vm,
uint64_t start, uint64_t end,
@@ -156,13 +235,13 @@ static struct intel_allocator *intel_allocator_create(int fd, uint32_t ctx,
break;
}
+ igt_assert(ial);
+
ial->type = allocator_type;
ial->strategy = allocator_strategy;
- atomic_fetch_add(&ial->refcount, 1);
pthread_mutex_init(&ial->mutex, NULL);
-
- igt_map_add(vm ? allocators_vm_map : allocators_map, ial, ial);
+ __allocator_add(ial);
return ial;
}
@@ -172,79 +251,19 @@ static void intel_allocator_destroy(struct intel_allocator *ial)
alloc_info("Destroying allocator (empty: %d)\n",
ial->is_empty(ial));
+ __allocator_del(ial);
ial->destroy(ial);
}
-static struct intel_allocator *__allocator_get(int fd, uint32_t ctx, uint32_t vm)
-{
- struct intel_allocator *ial, ials = { .fd = fd, .ctx = ctx, .vm = vm };
- struct igt_map *map;
- int refcount;
-
- if (vm)
- map = allocators_vm_map;
- else
- map = allocators_map;
-
- ial = igt_map_find(map, &ials);
- if (!ial)
- goto out_get;
-
- refcount = atomic_fetch_add(&ial->refcount, 1);
- igt_assert(refcount > 0);
-
-out_get:
-
- return ial;
-}
-
-static bool __allocator_put(struct intel_allocator *ial)
-{
- struct intel_allocator ials = { .fd = ial->fd, .ctx = ial->ctx,
- .vm = ial->vm };
- struct igt_map *map;
- bool released = false;
- int refcount;
-
- if (ial->vm)
- map = allocators_vm_map;
- else
- map = allocators_map;
-
- ial = igt_map_find(map, &ials);
- igt_assert(ial);
-
- refcount = atomic_fetch_sub(&ial->refcount, 1);
- alloc_debug("Refcount: %d\n", refcount);
- igt_assert(refcount >= 1);
- if (refcount == 1) {
- igt_map_del(map, ial);
-
- if (!ial->is_empty(ial) && warn_if_not_empty)
- igt_warn("Allocator not clear before destroy!\n");
-
- released = true;
- }
-
- return released;
-}
-
static struct intel_allocator *allocator_open(int fd, uint32_t ctx, uint32_t vm,
uint64_t start, uint64_t end,
uint8_t allocator_type,
uint8_t allocator_strategy)
{
struct intel_allocator *ial;
- static pthread_mutex_t *mutex;
const char *idstr = vm ? "vm" : "ctx";
- if (vm)
- mutex = &map_vm_mutex;
- else
- mutex = &map_mutex;
-
- pthread_mutex_lock(mutex);
- ial = __allocator_get(fd, ctx, vm);
+ ial = __allocator_find(fd, ctx, vm);
if (!ial) {
alloc_debug("Allocator fd: %d, ctx: %u, vm: %u, <0x%llx : 0x%llx> "
"not found, creating one\n",
@@ -252,12 +271,17 @@ static struct intel_allocator *allocator_open(int fd, uint32_t ctx, uint32_t vm,
ial = intel_allocator_create(fd, ctx, vm, start, end,
allocator_type, allocator_strategy);
}
- pthread_mutex_unlock(mutex);
+ __allocator_get(ial);
+
+ if (ial->type != allocator_type) {
+ igt_warn("Allocator type must be same for fd/%s\n", idstr);
+ ial = NULL;
+ }
- igt_assert_f(ial->type == allocator_type,
- "Allocator type must be same for fd/%s\n", idstr);
- igt_assert_f(ial->strategy == allocator_strategy,
- "Allocator strategy must be same or fd/%s\n", idstr);
+ if (ial->strategy != allocator_strategy) {
+ igt_warn("Allocator strategy must be same or fd/%s\n", idstr);
+ ial = NULL;
+ }
return ial;
}
@@ -265,26 +289,14 @@ static struct intel_allocator *allocator_open(int fd, uint32_t ctx, uint32_t vm,
static bool allocator_close(uint64_t allocator_handle)
{
struct intel_allocator *ial = from_user_pointer(allocator_handle);
- static pthread_mutex_t *mutex;
bool released, is_empty = false;
- igt_assert(ial);
-
- if (ial->vm)
- mutex = &map_vm_mutex;
- else
- mutex = &map_mutex;
-
- pthread_mutex_lock(mutex);
-
released = __allocator_put(ial);
if (released) {
is_empty = ial->is_empty(ial);
intel_allocator_destroy(ial);
}
- pthread_mutex_unlock(mutex);
-
return is_empty;
}
@@ -328,10 +340,13 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
struct intel_allocator *ial;
uint64_t start, end, size;
uint32_t ctx, vm;
- bool allocated, reserved, unreserved;
+ bool released, allocated, reserved, unreserved;
- /* Mutex only work on allocator instance, not stop/open/close */
- if (req->request_type > REQ_CLOSE) {
+ /*
+ * Mutex only work on allocator instance,
+ * not stop/open/close/ref/unref
+ */
+ if (req->request_type > REQ_UNREF) {
ial = from_user_pointer(req->allocator_handle);
igt_assert(ial);
@@ -344,35 +359,101 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
break;
case REQ_OPEN:
+ ret = 0;
+
+ pthread_mutex_lock(&map_mutex);
+
ial = allocator_open(req->open.fd, req->open.ctx,
req->open.vm,
req->open.start, req->open.end,
req->open.allocator_type,
req->open.allocator_strategy);
- igt_assert(ial);
+ if (ial)
+ ret = atomic_load(&ial->refcount);
+
+ pthread_mutex_unlock(&map_mutex);
resp->response_type = RESP_OPEN;
resp->open.allocator_handle = to_user_pointer(ial);
alloc_info("<open> [tid: %ld] fd: %d, ctx: %u, vm: %u"
- ", alloc_type: %u, ahnd: %p, refcnt: %d\n",
+ ", alloc_type: %u, ahnd: %p, refcnt: %d->%d\n",
(long) req->tid, req->open.fd, req->open.ctx,
req->open.vm, req->open.allocator_type, ial,
- atomic_load(&ial->refcount));
+ ret - 1, ret);
break;
case REQ_CLOSE:
ial = from_user_pointer(req->allocator_handle);
- igt_assert(ial);
+ ret = ctx = vm = 0;
+
+ pthread_mutex_lock(&map_mutex);
+
+ if (__allocator_is_valid(ial)) {
+ ret = atomic_load(&ial->refcount);
+ ctx = ial->ctx;
+ vm = ial->vm;
+ resp->close.is_empty = allocator_close(req->allocator_handle);
+ } else {
+ ial = NULL;
+ }
+
+ pthread_mutex_unlock(&map_mutex);
resp->response_type = RESP_CLOSE;
- ret = atomic_load(&ial->refcount);
- ctx = ial->ctx;
- vm = ial->vm;
- resp->close.is_empty = allocator_close(req->allocator_handle);
alloc_info("<close> [tid: %ld] ahnd: %p, ctx: %u, vm: %u"
- ", is_empty: %d, refcnt: %d\n",
+ ", is_empty: %d, refcnt: %d->%d\n",
+ (long) req->tid, ial, ctx, vm,
+ resp->close.is_empty,
+ ret, ial ? ret - 1 : 0);
+
+ break;
+
+ case REQ_REF:
+ ial = from_user_pointer(req->allocator_handle);
+ ret = 0;
+
+ pthread_mutex_lock(&map_mutex);
+
+ if (__allocator_is_valid(ial))
+ ret = __allocator_get(ial);
+ else
+ ial = NULL;
+
+ pthread_mutex_unlock(&map_mutex);
+
+ resp->response_type = RESP_REF;
+ resp->ref.referenced = ial;
+ alloc_info("<ref> [tid: %ld] ahnd: %p, ctx: %u, vm: %u"
+ ", referenced: %d, refcnt: %d->%d\n",
(long) req->tid, ial, ctx, vm,
- resp->close.is_empty, ret);
+ resp->ref.referenced, ret, ret + 1);
+ break;
+
+ case REQ_UNREF:
+ ial = from_user_pointer(req->allocator_handle);
+ ret = 0;
+
+ pthread_mutex_lock(&map_mutex);
+
+ if (__allocator_is_valid(ial)) {
+ ret = atomic_load(&ial->refcount);
+ released = __allocator_put(ial);
+ } else {
+ released = false;
+ ial = NULL;
+ }
+
+ if (released)
+ intel_allocator_destroy(ial);
+
+ pthread_mutex_unlock(&map_mutex);
+
+ resp->response_type = RESP_UNREF;
+ resp->unref.unreferenced = ial;
+ alloc_info("<unref> [tid: %ld] ahnd: %p"
+ ", unreferenced: %d, refcnt: %d->%d\n",
+ (long) req->tid, ial,
+ resp->unref.unreferenced, ret, ret - 1);
break;
case REQ_ADDRESS_RANGE:
@@ -494,7 +575,7 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
}
- if (req->request_type > REQ_CLOSE)
+ if (req->request_type > REQ_UNREF)
pthread_mutex_unlock(&ial->mutex);
return 0;
@@ -727,6 +808,7 @@ uint64_t intel_allocator_open_vm_full(int fd, uint32_t vm,
uint8_t allocator_type,
enum allocator_strategy strategy)
{
+ igt_assert(vm != 0);
return __intel_allocator_open_full(fd, 0, vm, start, end,
allocator_type, strategy);
}
@@ -781,6 +863,48 @@ bool intel_allocator_close(uint64_t allocator_handle)
return resp.close.is_empty;
}
+/**
+ * intel_allocator_ref:
+ * @allocator_handle: handle to the allocator to increment the reference
+ *
+ * Function gets the allocator reference (increment refcount).
+ *
+ * Returns: true if reference was incremented (allocator exists),
+ * false otherwise.
+ */
+bool intel_allocator_ref(uint64_t allocator_handle)
+{
+ struct alloc_req req = { .request_type = REQ_REF,
+ .allocator_handle = allocator_handle };
+ struct alloc_resp resp;
+
+ igt_assert(handle_request(&req, &resp) == 0);
+ igt_assert(resp.response_type == RESP_REF);
+
+ return resp.ref.referenced;
+}
+
+/**
+ * intel_allocator_unref:
+ * @allocator_handle: handle to the allocator to decrement the reference
+ *
+ * Function puts the allocator reference (decrement refcount).
+ *
+ * Returns: true if reference was decremented (allocator exists),
+ * false otherwise.
+ */
+bool intel_allocator_unref(uint64_t allocator_handle)
+{
+ struct alloc_req req = { .request_type = REQ_UNREF,
+ .allocator_handle = allocator_handle };
+ struct alloc_resp resp;
+
+ igt_assert(handle_request(&req, &resp) == 0);
+ igt_assert(resp.response_type == RESP_UNREF);
+
+ return resp.unref.unreferenced;
+}
+
/**
* intel_allocator_get_address_range:
* @allocator_handle: handle to an allocator
@@ -1053,6 +1177,15 @@ static bool equal_allocators(const void *key1, const void *key2)
{
const struct intel_allocator *a1 = key1, *a2 = key2;
+ alloc_debug("a1: %p, a2: %p\n", a1, a2);
+
+ return a1 == a2;
+}
+
+static bool equal_allocators_ctx(const void *key1, const void *key2)
+{
+ const struct intel_allocator *a1 = key1, *a2 = key2;
+
alloc_debug("a1: <fd: %d, ctx: %u>, a2 <fd: %d, ctx: %u>\n",
a1->fd, a1->ctx, a2->fd, a2->ctx);
@@ -1073,6 +1206,14 @@ static bool equal_allocators_vm(const void *key1, const void *key2)
#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
static inline uint64_t hash_allocators(const void *val, unsigned int bits)
+{
+ uint64_t hash = (uint64_t) val;
+
+ hash = hash * GOLDEN_RATIO_PRIME_64;
+ return hash >> (64 - bits);
+}
+
+static inline uint64_t hash_allocators_instance(const void *val, unsigned int bits)
{
uint64_t hash = ((struct intel_allocator *) val)->fd;
@@ -1080,7 +1221,7 @@ static inline uint64_t hash_allocators(const void *val, unsigned int bits)
return hash >> (64 - bits);
}
-static void __free_allocators(struct igt_map *map)
+static void __free_allocators(struct igt_map *map, bool destroy_ials)
{
struct igt_map_entry *pos;
struct intel_allocator *ial;
@@ -1089,7 +1230,7 @@ static void __free_allocators(struct igt_map *map)
if (!map)
return;
- if (map) {
+ if (map && destroy_ials) {
igt_map_for_each(map, i, pos) {
ial = pos->value;
ial->destroy(ial);
@@ -1114,18 +1255,25 @@ void intel_allocator_init(void)
allocator_pid = getpid();
alloc_info("Allocator pid: %ld\n", (long) allocator_pid);
- __free_allocators(allocators_map);
- __free_allocators(allocators_vm_map);
+ __free_allocators(allocators_map, false);
+ __free_allocators(allocators_ctx_map, true);
+ __free_allocators(allocators_vm_map, true);
allocators_map = calloc(sizeof(*allocators_map), 1);
igt_assert(allocators_map);
+ allocators_ctx_map = calloc(sizeof(*allocators_ctx_map), 1);
+ igt_assert(allocators_ctx_map);
+
allocators_vm_map = calloc(sizeof(*allocators_vm_map), 1);
igt_assert(allocators_vm_map);
- __igt_map_init(allocators_map, equal_allocators, hash_allocators, 8);
- __igt_map_init(allocators_vm_map, equal_allocators_vm,
+ __igt_map_init(allocators_map, equal_allocators,
hash_allocators, 8);
+ __igt_map_init(allocators_ctx_map, equal_allocators_ctx,
+ hash_allocators_instance, 8);
+ __igt_map_init(allocators_vm_map, equal_allocators_vm,
+ hash_allocators_instance, 8);
channel = intel_allocator_get_msgchannel(CHANNEL_SYSVIPC_MSGQUEUE);
}
diff --git a/lib/intel_allocator.h b/lib/intel_allocator.h
index 102e63570..5493891af 100644
--- a/lib/intel_allocator.h
+++ b/lib/intel_allocator.h
@@ -126,6 +126,8 @@ uint64_t intel_allocator_open_vm_full(int fd, uint32_t vm,
uint8_t allocator_type,
enum allocator_strategy strategy);
bool intel_allocator_close(uint64_t allocator_handle);
+bool intel_allocator_ref(uint64_t allocator_handle);
+bool intel_allocator_unref(uint64_t allocator_handle);
void intel_allocator_get_address_range(uint64_t allocator_handle,
uint64_t *startp, uint64_t *endp);
uint64_t intel_allocator_alloc(uint64_t allocator_handle, uint32_t handle,
diff --git a/lib/intel_allocator_msgchannel.h b/lib/intel_allocator_msgchannel.h
index 7be1a4646..b8968bd86 100644
--- a/lib/intel_allocator_msgchannel.h
+++ b/lib/intel_allocator_msgchannel.h
@@ -14,6 +14,8 @@ enum reqtype {
REQ_STOP,
REQ_OPEN,
REQ_CLOSE,
+ REQ_REF,
+ REQ_UNREF,
REQ_ADDRESS_RANGE,
REQ_ALLOC,
REQ_FREE,
@@ -27,6 +29,8 @@ enum reqtype {
enum resptype {
RESP_OPEN,
RESP_CLOSE,
+ RESP_REF,
+ RESP_UNREF,
RESP_ADDRESS_RANGE,
RESP_ALLOC,
RESP_FREE,
@@ -98,6 +102,14 @@ struct alloc_resp {
bool is_empty;
} close;
+ struct {
+ bool referenced;
+ } ref;
+
+ struct {
+ bool unreferenced;
+ } unref;
+
struct {
uint64_t start;
uint64_t end;
diff --git a/tests/i915/api_intel_allocator.c b/tests/i915/api_intel_allocator.c
index 8433e395c..8b33ac59d 100644
--- a/tests/i915/api_intel_allocator.c
+++ b/tests/i915/api_intel_allocator.c
@@ -544,10 +544,29 @@ static void execbuf_with_allocator(int fd)
igt_assert(intel_allocator_close(ahnd) == true);
}
+static void basic_check(int fd)
+{
+ uint64_t ialh;
+
+ igt_assert_eq(intel_allocator_close(0x123), false);
+ igt_assert_eq(intel_allocator_ref(0x123), false);
+ igt_assert_eq(intel_allocator_unref(0x123), false);
+
+ ialh = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+ igt_assert_eq(intel_allocator_ref(ialh), true);
+ igt_assert_eq(intel_allocator_ref(ialh), true);
+ igt_assert_eq(intel_allocator_ref(ialh), true);
+
+ igt_assert_eq(intel_allocator_unref(ialh), true);
+ igt_assert_eq(intel_allocator_close(ialh), false);
+ igt_assert_eq(intel_allocator_unref(ialh), true);
+ igt_assert_eq(intel_allocator_close(ialh), true);
+}
+
static void open_vm(int fd)
{
uint64_t ialh1, ialh2;
- uint64_t ialh3, ialh4;
+// uint64_t ialh3, ialh4;
ialh1 = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
ialh2 = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
@@ -555,16 +574,37 @@ static void open_vm(int fd)
igt_info("ial1: %llx\n", (long long) ialh1);
igt_info("ial2: %llx\n", (long long) ialh2);
- ialh3 = intel_allocator_open_vm(fd, 1, INTEL_ALLOCATOR_SIMPLE);
- ialh4 = intel_allocator_open_vm(fd, 1, INTEL_ALLOCATOR_SIMPLE);
+// ialh3 = intel_allocator_open_vm(fd, 1, INTEL_ALLOCATOR_SIMPLE);
+// ialh4 = intel_allocator_open_vm(fd, 1, INTEL_ALLOCATOR_SIMPLE);
- igt_info("ial3: %llx\n", (long long) ialh3);
- igt_info("ial4: %llx\n", (long long) ialh4);
+// igt_info("ial3: %llx\n", (long long) ialh3);
+// igt_info("ial4: %llx\n", (long long) ialh4);
intel_allocator_close(ialh1);
intel_allocator_close(ialh2);
- intel_allocator_close(ialh3);
- intel_allocator_close(ialh4);
+// intel_allocator_close(ialh3);
+// intel_allocator_close(ialh4);
+}
+
+
+
+static void ctx(int fd)
+{
+ uint32_t ctx, vmid0, vmid1;
+ struct drm_i915_gem_context_param arg = {
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+
+ ctx = gem_context_create(fd);
+ igt_info("Ctx: %u\n", ctx);
+
+ arg.ctx_id = ctx;
+ gem_context_get_param(fd, &arg);
+ igt_info("vmid: %u\n", (uint32_t) arg.value);
+
+ arg.ctx_id = 0;
+ gem_context_get_param(fd, &arg);
+ igt_info("vmid: %u\n", (uint32_t) arg.value);
}
struct allocators {
@@ -650,9 +690,15 @@ igt_main
igt_subtest_f("execbuf-with-allocator")
execbuf_with_allocator(fd);
+ igt_subtest_f("basic-check")
+ basic_check(fd);
+
igt_subtest_f("open-vm")
open_vm(fd);
+ igt_subtest_f("ctx")
+ ctx(fd);
+
igt_fixture
close(fd);
}
--
2.26.0
More information about the Intel-gfx-trybot
mailing list