[PATCH i-g-t v4 09/40] SQUASH: lib/intel_allocator: Allow user to define vm range
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Mon Feb 15 19:43:09 UTC 2021
Add intel_allocator_open_full() to allow user pass vm range.
Add strategy: NONE, LOW_TO_HIGH, HIGH_TO_LOW passed to allocator
backend.
Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
lib/intel_allocator.c | 139 +++++++++++++++++++++++--------
lib/intel_allocator.h | 11 +++
lib/intel_allocator_msgchannel.h | 4 +
3 files changed, 120 insertions(+), 34 deletions(-)
diff --git a/lib/intel_allocator.c b/lib/intel_allocator.c
index 186608069..4d053c381 100644
--- a/lib/intel_allocator.c
+++ b/lib/intel_allocator.c
@@ -46,6 +46,10 @@ static inline const char *reqstr(enum reqtype request_type)
struct intel_allocator *intel_allocator_random_create(int fd, uint32_t ctx);
struct intel_allocator *intel_allocator_simple_create(int fd, uint32_t ctx);
+struct intel_allocator *
+intel_allocator_simple_create_full(int fd, uint32_t ctx,
+ uint64_t start, uint64_t end,
+ enum allocator_strategy strategy);
static struct igt_map *allocators_map;
static pthread_mutex_t map_mutex = PTHREAD_MUTEX_INITIALIZER;
@@ -97,7 +101,9 @@ static int recv_resp(struct msg_channel *msgchan,
}
static struct intel_allocator *intel_allocator_create(int fd, uint32_t ctx,
- uint8_t allocator_type)
+ uint64_t start, uint64_t end,
+ uint8_t allocator_type,
+ uint8_t allocator_strategy)
{
struct intel_allocator *ial = NULL;
@@ -119,7 +125,12 @@ static struct intel_allocator *intel_allocator_create(int fd, uint32_t ctx,
ial = intel_allocator_random_create(fd, ctx);
break;
case INTEL_ALLOCATOR_SIMPLE:
- ial = intel_allocator_simple_create(fd, ctx);
+ if (!start && !end)
+ ial = intel_allocator_simple_create(fd, ctx);
+ else
+ ial = intel_allocator_simple_create_full(fd, ctx,
+ start, end,
+ allocator_strategy);
break;
default:
igt_assert_f(ial, "Allocator type %d not implemented\n",
@@ -128,6 +139,7 @@ static struct intel_allocator *intel_allocator_create(int fd, uint32_t ctx,
}
ial->type = allocator_type;
+ ial->strategy = allocator_strategy;
atomic_fetch_add(&ial->refcount, 1);
pthread_mutex_init(&ial->mutex, NULL);
@@ -186,21 +198,27 @@ static bool __allocator_put(struct intel_allocator *ial)
}
static struct intel_allocator *allocator_open(int fd, uint32_t ctx,
- uint8_t allocator_type)
+ uint64_t start, uint64_t end,
+ uint8_t allocator_type,
+ uint8_t allocator_strategy)
{
struct intel_allocator *ial;
pthread_mutex_lock(&map_mutex);
ial = __allocator_get(fd, ctx);
if (!ial) {
- alloc_debug("Allocator fd: %d, ctx: %u not found, creating one\n",
- fd, ctx);
- ial = intel_allocator_create(fd, ctx, allocator_type);
+ alloc_debug("Allocator fd: %d, ctx: %u, <0x%llx : 0x%llx> "
+ "not found, creating one\n",
+ fd, ctx, (long long) start, (long long) end);
+ ial = intel_allocator_create(fd, ctx, start, end,
+ allocator_type, allocator_strategy);
}
pthread_mutex_unlock(&map_mutex);
igt_assert_f(ial->type == allocator_type,
- "Allocator must be same type for fd/ctx\n");
+ "Allocator type must be same for fd/ctx\n");
+ igt_assert_f(ial->strategy == allocator_strategy,
+ "Allocator strategy must be same or fd/ctx\n");
return ial;
}
@@ -281,7 +299,9 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
case REQ_OPEN:
ial = allocator_open(req->open.fd, req->open.ctx,
- req->open.allocator_type);
+ req->open.start, req->open.end,
+ req->open.allocator_type,
+ req->open.allocator_strategy);
igt_assert(ial);
resp->response_type = RESP_OPEN;
@@ -309,9 +329,9 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
ial->get_address_range(ial, &start, &end);
resp->address_range.start = start;
resp->address_range.end = end;
- alloc_info("<address range> [tid: %ld] start: %" PRIx64
- ", end: %" PRId64 "\n", (long) req->tid,
- start, end);
+ alloc_info("<address range> [tid: %ld] "
+ "start: 0x%" PRIx64 ", end: 0x%" PRId64 "\n",
+ (long) req->tid, start, end);
break;
case REQ_ALLOC:
@@ -320,9 +340,11 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
req->alloc.handle,
req->alloc.size,
req->alloc.alignment);
- alloc_info("<alloc> [tid: %ld] handle: %u, offset: %" PRIx64
- ", alignment: %" PRIx64 "\n",
- (long) req->tid, req->alloc.handle,
+ alloc_info("<alloc> [tid: %ld] handle: %u, "
+ "size: 0x%" PRIx64 ", offset: 0x%" PRIx64
+ ", alignment: 0x%" PRIx64 "\n",
+ (long) req->tid,
+ req->alloc.handle, req->alloc.size,
resp->alloc.offset, req->alloc.alignment);
break;
@@ -330,7 +352,8 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
resp->response_type = RESP_FREE;
resp->free.freed = ial->free(ial, req->free.handle);
alloc_info("<free> [tid: %ld] handle: %u, freed: %d\n",
- (long) req->tid, req->free.handle, resp->free.freed);
+ (long) req->tid, req->free.handle,
+ resp->free.freed);
break;
case REQ_IS_ALLOCATED:
@@ -340,7 +363,7 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
req->is_allocated.size,
req->is_allocated.offset);
resp->is_allocated.allocated = allocated;
- alloc_info("<is allocated> [tid: %ld] offset: %" PRIx64
+ alloc_info("<is allocated> [tid: %ld] offset: 0x%" PRIx64
", allocated: %d\n", (long) req->tid,
req->is_allocated.offset, allocated);
break;
@@ -352,8 +375,9 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
req->reserve.start,
req->reserve.end);
resp->reserve.reserved = reserved;
- alloc_info("<reserve> [tid: %ld] handle: %u, start: %" PRIx64
- ", end: %" PRIx64 ", reserved: %d\n",
+ alloc_info("<reserve> [tid: %ld] handle: %u, "
+ "start: 0x%" PRIx64 ", end: 0x%" PRIx64
+ ", reserved: %d\n",
(long) req->tid, req->reserve.handle,
req->reserve.start, req->reserve.end, reserved);
break;
@@ -365,8 +389,9 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
req->unreserve.start,
req->unreserve.end);
resp->unreserve.unreserved = unreserved;
- alloc_info("<unreserve> [tid: %ld] handle: %u, start: %" PRIx64
- ", end: %" PRIx64 ", unreserved: %d\n",
+ alloc_info("<unreserve> [tid: %ld] handle: %u, "
+ "start: 0x%" PRIx64 ", end: 0x%" PRIx64
+ ", unreserved: %d\n",
(long) req->tid, req->unreserve.handle,
req->unreserve.start, req->unreserve.end,
unreserved);
@@ -378,8 +403,9 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
req->is_reserved.start,
req->is_reserved.end);
resp->is_reserved.reserved = reserved;
- alloc_info("<is reserved> [tid: %ld] start: %" PRIx64
- ", end: %" PRIx64 ", reserved: %d\n",
+ alloc_info("<is reserved> [tid: %ld] "
+ "start: 0x%" PRIx64 ", end: 0x%" PRIx64
+ ", reserved: %d\n",
(long) req->tid, req->is_reserved.start,
req->is_reserved.end, reserved);
break;
@@ -392,9 +418,10 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
size, req->reserve.start);
if (allocated) {
resp->reserve_if_not_allocated.allocated = allocated;
- alloc_info("<reserve if not allocated> [tid: %ld] handle: %u "
- "size: %lx, start: %" PRIx64
- ", end: %" PRIx64 ", allocated: %d, reserved: %d\n",
+ alloc_info("<reserve if not allocated> [tid: %ld] "
+ "handle: %u, size: 0x%lx, "
+ "start: 0x%" PRIx64 ", end: 0x%" PRIx64
+ ", allocated: %d, reserved: %d\n",
(long) req->tid, req->reserve.handle,
(long) size, req->reserve.start,
req->reserve.end, allocated, false);
@@ -406,9 +433,9 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
req->reserve.start,
req->reserve.end);
resp->reserve_if_not_allocated.reserved = reserved;
- alloc_info("<reserve if not allocated> [tid: %ld] handle: %u"
- ", start: %" PRIx64
- ", end: %" PRIx64 ", allocated: %d, reserved: %d\n",
+ alloc_info("<reserve if not allocated> [tid: %ld] "
+ "handle: %u, start: 0x%" PRIx64 ", end: 0x%" PRIx64
+ ", allocated: %d, reserved: %d\n",
(long) req->tid, req->reserve.handle,
req->reserve.start, req->reserve.end,
false, reserved);
@@ -535,23 +562,45 @@ void intel_allocator_multiprocess_stop(void)
}
/**
- * intel_allocator_open:
+ * intel_allocator_open_full:
* @fd: i915 descriptor
* @ctx: context
+ * @start: address of the beginning
+ * @end: address of the end
* @allocator_type: one of INTEL_ALLOCATOR_* define
+ * @strategy: passed to the allocator to define the strategy (like order
+ * of allocation, see notes below).
*
- * Function opens an allocator instance for given @fd and @ctx and returns
- * its handle. If the allocator for such pair doesn't exist it is created
- * with refcount = 1. Parallel opens returns same handle bumping its refcount.
+ * Function opens an allocator instance within <@start, @end) vm for given
+ * @fd and @ctx and returns its handle. If the allocator for such pair
+ * doesn't exist it is created with refcount = 1.
+ * Parallel opens returns same handle bumping its refcount.
*
* Returns: unique handle to the currently opened allocator.
+ *
+ * Notes:
+ * Strategy is generally used internally by the underlying allocator:
+ *
+ * For SIMPLE allocator:
+ * - ALLOC_STRATEGY_HIGH_TO_LOW means topmost addresses are allocated first,
+ * - ALLOC_STRATEGY_LOW_TO_HIGH opposite, allocation starts from lowest
+ * addresses.
+ *
+ * For RANDOM allocator:
+ * - none of strategy is currently implemented.
*/
-uint64_t intel_allocator_open(int fd, uint32_t ctx, uint8_t allocator_type)
+uint64_t intel_allocator_open_full(int fd, uint32_t ctx,
+ uint64_t start, uint64_t end,
+ uint8_t allocator_type,
+ enum allocator_strategy strategy)
{
struct alloc_req req = { .request_type = REQ_OPEN,
.open.fd = fd,
.open.ctx = ctx,
- .open.allocator_type = allocator_type };
+ .open.start = start,
+ .open.end = end,
+ .open.allocator_type = allocator_type,
+ .open.allocator_strategy = strategy };
struct alloc_resp resp;
/* Get child_tid only once at open() */
@@ -565,6 +614,28 @@ uint64_t intel_allocator_open(int fd, uint32_t ctx, uint8_t allocator_type)
return resp.open.allocator_handle;
}
+/**
+ * intel_allocator_open:
+ * @fd: i915 descriptor
+ * @ctx: context
+ * @allocator_type: one of INTEL_ALLOCATOR_* define
+ *
+ * Function opens an allocator instance for given @fd and @ctx and returns
+ * its handle. If the allocator for such pair doesn't exist it is created
+ * with refcount = 1. Parallel opens returns same handle bumping its refcount.
+ *
+ * Returns: unique handle to the currently opened allocator.
+ *
+ * Notes: we pass ALLOC_STRATEGY_HIGH_TO_LOW as default, playing with higher
+ * addresses makes easier to find addressing issues (like passing non-canonical
+ * offsets, which won't be catched unless 47-bit is set).
+ */
+uint64_t intel_allocator_open(int fd, uint32_t ctx, uint8_t allocator_type)
+{
+ return intel_allocator_open_full(fd, ctx, 0, 0, allocator_type,
+ ALLOC_STRATEGY_HIGH_TO_LOW);
+}
+
/**
* intel_allocator_close:
* @allocator_handle: handle to the allocator that will be closed
diff --git a/lib/intel_allocator.h b/lib/intel_allocator.h
index c06e3032e..d26816c99 100644
--- a/lib/intel_allocator.h
+++ b/lib/intel_allocator.h
@@ -70,10 +70,17 @@
*
*/
+enum allocator_strategy {
+ ALLOC_STRATEGY_NONE,
+ ALLOC_STRATEGY_LOW_TO_HIGH,
+ ALLOC_STRATEGY_HIGH_TO_LOW
+};
+
struct intel_allocator {
int fd;
uint32_t ctx;
uint8_t type;
+ enum allocator_strategy strategy;
_Atomic(int32_t) refcount;
pthread_mutex_t mutex;
@@ -106,6 +113,10 @@ void intel_allocator_multiprocess_start(void);
void intel_allocator_multiprocess_stop(void);
uint64_t intel_allocator_open(int fd, uint32_t ctx, uint8_t allocator_type);
+uint64_t intel_allocator_open_full(int fd, uint32_t ctx,
+ uint64_t start, uint64_t end,
+ uint8_t allocator_type,
+ enum allocator_strategy strategy);
bool intel_allocator_close(uint64_t allocator_handle);
void intel_allocator_get_address_range(uint64_t allocator_handle,
uint64_t *startp, uint64_t *endp);
diff --git a/lib/intel_allocator_msgchannel.h b/lib/intel_allocator_msgchannel.h
index ef3568407..ad5a9e901 100644
--- a/lib/intel_allocator_msgchannel.h
+++ b/lib/intel_allocator_msgchannel.h
@@ -48,7 +48,10 @@ struct alloc_req {
struct {
int fd;
uint32_t ctx;
+ uint64_t start;
+ uint64_t end;
uint8_t allocator_type;
+ uint8_t allocator_strategy;
} open;
struct {
@@ -97,6 +100,7 @@ struct alloc_resp {
struct {
uint64_t start;
uint64_t end;
+ uint8_t direction;
} address_range;
struct {
--
2.26.0
More information about the Intel-gfx-trybot
mailing list