[igt-dev] [PATCH i-g-t 06/24] lib/intel_allocator: Add intel_allocator core

Zbigniew Kempczyński zbigniew.kempczynski at intel.com
Thu Oct 22 09:58:49 UTC 2020


Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Petri Latvala <petri.latvala at intel.com>
---
 .../igt-gpu-tools/igt-gpu-tools-docs.xml      |   1 +
 lib/igt_core.c                                |   2 +
 lib/intel_allocator.c                         | 939 ++++++++++++++++++
 lib/intel_allocator.h                         | 136 +++
 lib/intel_allocator_msgchannel.c              | 182 ++++
 lib/intel_allocator_msgchannel.h              | 140 +++
 lib/intel_allocator_simple.c                  |   2 -
 lib/meson.build                               |   4 +
 8 files changed, 1404 insertions(+), 2 deletions(-)
 create mode 100644 lib/intel_allocator.c
 create mode 100644 lib/intel_allocator.h
 create mode 100644 lib/intel_allocator_msgchannel.c
 create mode 100644 lib/intel_allocator_msgchannel.h

diff --git a/docs/reference/igt-gpu-tools/igt-gpu-tools-docs.xml b/docs/reference/igt-gpu-tools/igt-gpu-tools-docs.xml
index bf5ac542..192d1df7 100644
--- a/docs/reference/igt-gpu-tools/igt-gpu-tools-docs.xml
+++ b/docs/reference/igt-gpu-tools/igt-gpu-tools-docs.xml
@@ -43,6 +43,7 @@
     <xi:include href="xml/igt_vc4.xml"/>
     <xi:include href="xml/igt_vgem.xml"/>
     <xi:include href="xml/igt_x86.xml"/>
+    <xi:include href="xml/intel_allocator.xml"/>
     <xi:include href="xml/intel_batchbuffer.xml"/>
     <xi:include href="xml/intel_bufops.xml"/>
     <xi:include href="xml/intel_chipset.xml"/>
diff --git a/lib/igt_core.c b/lib/igt_core.c
index 1f725d00..86653abc 100644
--- a/lib/igt_core.c
+++ b/lib/igt_core.c
@@ -1414,6 +1414,8 @@ static void exit_subtest(const char *result)
 	}
 	num_test_children = 0;
 
+	intel_allocator_init();
+
 	if (!in_dynamic_subtest)
 		_igt_dynamic_tests_executed = -1;
 
diff --git a/lib/intel_allocator.c b/lib/intel_allocator.c
new file mode 100644
index 00000000..b0ae1d8b
--- /dev/null
+++ b/lib/intel_allocator.c
@@ -0,0 +1,939 @@
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ipc.h>
+#include <sys/msg.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include "igt.h"
+#include "igt_map.h"
+#include "intel_allocator.h"
+#include "intel_allocator_msgchannel.h"
+
+//#define ALLOCDBG
+#ifdef ALLOCDBG
+#define alloc_info igt_info
+#define alloc_debug igt_debug
+static const char *reqtype_str[] = {
+	[REQ_STOP]		= "stop",
+	[REQ_OPEN]		= "open",
+	[REQ_CLOSE]		= "close",
+	[REQ_REMOVE_HANDLE]	= "remove handle",
+	[REQ_ADDRESS_RANGE]	= "address range",
+	[REQ_ALLOC]		= "alloc",
+	[REQ_FREE]		= "free",
+	[REQ_IS_ALLOCATED]	= "is allocated",
+	[REQ_RESERVE]		= "reserve",
+	[REQ_UNRESERVE]		= "unreserve",
+	[REQ_RESERVE_IF_NOT_ALLOCATED] = "reserve-ina",
+	[REQ_IS_RESERVED]	= "is reserved",
+};
+static inline const char *reqstr(enum reqtype request_type)
+{
+	igt_assert(request_type >= REQ_STOP && request_type <= REQ_IS_RESERVED);
+	return reqtype_str[request_type];
+}
+#else
+#define alloc_info(...) {}
+#define alloc_debug(...) {}
+#endif
+
+struct intel_allocator *intel_allocator_random_create(int fd, uint32_t ctx);
+struct intel_allocator *intel_allocator_simple_create(int fd, uint32_t ctx);
+
+static struct igt_map *allocators_map;
+static pthread_mutex_t map_mutex = PTHREAD_MUTEX_INITIALIZER;
+static bool multiprocess;
+static pthread_t allocator_thread;
+
+static bool warn_if_not_empty;
+
+/* For allocator purposes we need to track pid/tid */
+static pid_t allocator_pid = -1;
+extern pid_t child_pid;
+extern __thread pid_t child_tid;
+
+static struct msg_channel *channel;
+
+static int send_alloc_stop(struct msg_channel *msgchan)
+{
+	struct alloc_req req = {0};
+
+	req.request_type = REQ_STOP;
+
+	return msgchan->send_req(msgchan, &req);
+}
+
+static int send_req(struct msg_channel *msgchan, pid_t tid,
+		    struct alloc_req *request)
+{
+	request->tid = tid;
+	return msgchan->send_req(msgchan, request);
+}
+
+static int recv_req(struct msg_channel *msgchan, struct alloc_req *request)
+{
+	return msgchan->recv_req(msgchan, request);
+}
+
+static int send_resp(struct msg_channel *msgchan,
+		     pid_t tid, struct alloc_resp *response)
+{
+	response->tid = tid;
+	return msgchan->send_resp(msgchan, response);
+}
+
+static int recv_resp(struct msg_channel *msgchan,
+		     pid_t tid, struct alloc_resp *response)
+{
+	response->tid = tid;
+	return msgchan->recv_resp(msgchan, response);
+}
+
+static struct intel_allocator *intel_allocator_create(int fd, uint32_t ctx,
+						      uint8_t allocator_type)
+{
+	struct intel_allocator *ial;
+
+	switch (allocator_type) {
+	case INTEL_ALLOCATOR_NONE:
+		igt_assert_f(allocator_type != INTEL_ALLOCATOR_NONE,
+			     "Bug, trying to use allocator with relocations");
+		break;
+	case INTEL_ALLOCATOR_RANDOM:
+		ial = intel_allocator_random_create(fd, ctx);
+		break;
+	case INTEL_ALLOCATOR_SIMPLE:
+		ial = intel_allocator_simple_create(fd, ctx);
+		break;
+	default:
+		igt_assert_f(ial, "Allocator type %d not implemented\n",
+			     allocator_type);
+		break;
+	}
+
+	ial->type = allocator_type;
+	atomic_fetch_add(&ial->refcount, 1);
+	pthread_mutex_init(&ial->mutex, NULL);
+
+	igt_map_add(allocators_map, ial, ial);
+
+	return ial;
+}
+
+static void intel_allocator_destroy(struct intel_allocator *ial)
+{
+	alloc_info("Destroying allocator (empty: %d)\n",
+		   ial->is_empty(ial));
+
+	ial->destroy(ial);
+}
+
+static struct intel_allocator *__allocator_get(int fd, uint32_t ctx)
+{
+	struct intel_allocator *ial, ials = { .fd = fd, .ctx = ctx };
+	int refcount;
+
+	ial = igt_map_find(allocators_map, &ials);
+	if (!ial)
+		goto out_get;
+
+	refcount = atomic_fetch_add(&ial->refcount, 1);
+	igt_assert(refcount > 0);
+
+out_get:
+
+	return ial;
+}
+
+static bool __allocator_put(struct intel_allocator *ial)
+{
+	struct intel_allocator ials = { .fd = ial->fd, .ctx = ial->ctx };
+	bool released = false;
+	int refcount;
+
+	ial = igt_map_find(allocators_map, &ials);
+	igt_assert(ial);
+
+	refcount = atomic_fetch_sub(&ial->refcount, 1);
+	alloc_debug("Refcount: %d\n", refcount);
+	igt_assert(refcount >= 1);
+	if (refcount == 1) {
+		igt_map_del(allocators_map, ial);
+
+		if (!ial->is_empty(ial) && warn_if_not_empty)
+			igt_warn("Allocator not clear before destroy!\n");
+
+		released = true;
+	}
+
+	return released;
+}
+
+static struct intel_allocator *allocator_open(int fd, uint32_t ctx,
+					      uint8_t allocator_type)
+{
+	struct intel_allocator *ial;
+
+	pthread_mutex_lock(&map_mutex);
+
+	ial = __allocator_get(fd, ctx);
+	if (ial) {
+		if (ial->type != allocator_type)
+			pthread_mutex_unlock(&map_mutex);
+		igt_assert_f(ial->type == allocator_type,
+			     "Allocator must be same type for fd/ctx\n");
+	} else {
+		alloc_debug("Allocator fd: %d, ctx: %u not found, creating one\n",
+			    fd, ctx);
+		ial = intel_allocator_create(fd, ctx, allocator_type);
+	}
+
+	pthread_mutex_unlock(&map_mutex);
+
+	return ial;
+}
+
+static bool allocator_close(uint64_t allocator_handle)
+{
+	struct intel_allocator *ial = from_user_pointer(allocator_handle);
+	bool released, is_empty = false;
+
+	igt_assert(ial);
+
+	pthread_mutex_lock(&map_mutex);
+
+	released = __allocator_put(ial);
+	if (released) {
+		is_empty = ial->is_empty(ial);
+		intel_allocator_destroy(ial);
+	}
+
+	pthread_mutex_unlock(&map_mutex);
+
+	return is_empty;
+}
+
+static int send_req_recv_resp(struct msg_channel *msgchan,
+			      struct alloc_req *request,
+			      struct alloc_resp *response)
+{
+	int ret;
+
+	ret = send_req(msgchan, child_tid, request);
+	if (ret < 0) {
+		igt_warn("Error sending request [type: %d]: err = %d [%s]\n",
+			 request->request_type, errno, strerror(errno));
+
+		return ret;
+	}
+
+	ret = recv_resp(msgchan, child_tid, response);
+	if (ret < 0)
+		igt_warn("Error receiving response [type: %d]: err = %d [%s]\n",
+			 request->request_type, errno, strerror(errno));
+
+	/*
+	 * This is main assumption - we receive message which size must be > 0.
+	 * If this is fulfilled we return 0 as a success.
+	 */
+	if (ret > 0)
+		ret = 0;
+
+	return ret;
+}
+
+static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
+{
+	bool same_process = child_pid == -1;
+	int ret;
+
+	memset(resp, 0, sizeof(*resp));
+
+	if (same_process) {
+		struct intel_allocator *ial;
+		uint64_t start, end, size;
+		bool allocated, reserved, unreserved;
+
+		/* Mutex only work on allocator instance, not stop/open/close */
+		if (req->request_type > REQ_CLOSE) {
+			ial = from_user_pointer(req->allocator_handle);
+			igt_assert(ial);
+
+			pthread_mutex_lock(&ial->mutex);
+		}
+
+		switch (req->request_type) {
+		case REQ_STOP:
+			alloc_info("<stop>\n");
+			break;
+
+		case REQ_OPEN:
+			ial = allocator_open(req->open.fd, req->open.ctx,
+					     req->open.allocator_type);
+			igt_assert(ial);
+
+			resp->response_type = RESP_OPEN;
+			resp->open.allocator_handle = to_user_pointer(ial);
+			alloc_info("<open> [tid: %ld] fd: %d, ctx: %u, alloc_type: %u, "
+				   "ahnd: %p, refcnt: %d\n",
+				   (long) req->tid, req->open.fd, req->open.ctx,
+				   req->open.allocator_type, ial,
+				   atomic_load(&ial->refcount));
+			break;
+
+		case REQ_CLOSE:
+			ial = from_user_pointer(req->allocator_handle);
+			igt_assert(ial);
+
+			resp->response_type = RESP_CLOSE;
+			ret = atomic_load(&ial->refcount);
+			resp->close.is_empty = allocator_close(req->allocator_handle);
+			alloc_info("<close> [tid: %ld] ahnd: %p, is_empty: %d, refcnt: %d\n",
+				   (long) req->tid, ial, resp->close.is_empty, ret);
+			break;
+
+		case REQ_ADDRESS_RANGE:
+			resp->response_type = RESP_ADDRESS_RANGE;
+			ial->get_address_range(ial, &start, &end);
+			resp->address_range.start = start;
+			resp->address_range.end = end;
+			alloc_info("<address range> [tid: %ld] start: %" PRIx64
+				   ", end: %" PRId64 "\n", (long) req->tid,
+				   start, end);
+			break;
+
+		case REQ_ALLOC:
+			resp->response_type = RESP_ALLOC;
+			resp->alloc.offset = ial->alloc(ial,
+							req->alloc.handle,
+							req->alloc.size,
+							req->alloc.alignment);
+			alloc_info("<alloc> [tid: %ld] handle: %u, offset: %" PRIx64
+				   ", alignment: %" PRIx64 "\n",
+				   (long) req->tid, req->alloc.handle,
+				   resp->alloc.offset, req->alloc.alignment);
+			break;
+
+		case REQ_FREE:
+			resp->response_type = RESP_FREE;
+			resp->free.freed = ial->free(ial, req->free.handle);
+			alloc_info("<free> [tid: %ld] handle: %u, freed: %d\n",
+				   (long) req->tid, req->free.handle, resp->free.freed);
+			break;
+
+		case REQ_IS_ALLOCATED:
+			resp->response_type = RESP_IS_ALLOCATED;
+			allocated = ial->is_allocated(ial,
+						      req->is_allocated.handle,
+						      req->is_allocated.size,
+						      req->is_allocated.offset);
+			resp->is_allocated.allocated = allocated;
+			alloc_info("<is allocated> [tid: %ld] offset: %" PRIx64
+				   ", allocated: %d\n", (long) req->tid,
+				   req->is_allocated.offset, allocated);
+			break;
+
+		case REQ_RESERVE:
+			resp->response_type = RESP_RESERVE;
+			reserved = ial->reserve(ial,
+						req->reserve.handle,
+						req->reserve.start,
+						req->reserve.end);
+			resp->reserve.reserved = reserved;
+			alloc_info("<reserve> [tid: %ld] handle: %u, start: %" PRIx64
+				   ", end: %" PRIx64 ", reserved: %d\n",
+				   (long) req->tid, req->reserve.handle,
+				   req->reserve.start, req->reserve.end, reserved);
+			break;
+
+		case REQ_UNRESERVE:
+			resp->response_type = RESP_UNRESERVE;
+			unreserved = ial->unreserve(ial,
+						    req->unreserve.handle,
+						    req->unreserve.start,
+						    req->unreserve.end);
+			resp->unreserve.unreserved = unreserved;
+			alloc_info("<unreserve> [tid: %ld] handle: %u, start: %" PRIx64
+				   ", end: %" PRIx64 ", unreserved: %d\n",
+				   (long) req->tid, req->unreserve.handle,
+				   req->unreserve.start, req->unreserve.end,
+				   unreserved);
+			break;
+
+		case REQ_IS_RESERVED:
+			resp->response_type = RESP_IS_RESERVED;
+			reserved = ial->is_reserved(ial,
+						    req->is_reserved.start,
+						    req->is_reserved.end);
+			resp->is_reserved.reserved = reserved;
+			alloc_info("<is reserved> [tid: %ld] start: %" PRIx64
+				   ", end: %" PRIx64 ", reserved: %d\n",
+				   (long) req->tid, req->is_reserved.start,
+				   req->is_reserved.end, reserved);
+			break;
+
+		case REQ_RESERVE_IF_NOT_ALLOCATED:
+			resp->response_type = RESP_RESERVE_IF_NOT_ALLOCATED;
+			size = DECANONICAL(req->reserve.end) - DECANONICAL(req->reserve.start);
+
+			allocated = ial->is_allocated(ial, req->reserve.handle,
+						      size, req->reserve.start);
+			if (allocated) {
+				resp->reserve_if_not_allocated.allocated = allocated;
+				alloc_info("<reserve if not allocated> [tid: %ld] handle: %u "
+					   "size: %lx, start: %" PRIx64
+					   ", end: %" PRIx64 ", allocated: %d, reserved: %d\n",
+					   (long) req->tid, req->reserve.handle,
+					   (long) size, req->reserve.start,
+					   req->reserve.end, allocated, false);
+				break;
+			}
+
+			reserved = ial->reserve(ial,
+						req->reserve.handle,
+						req->reserve.start,
+						req->reserve.end);
+			resp->reserve_if_not_allocated.reserved = reserved;
+			alloc_info("<reserve if not allocated> [tid: %ld] handle: %u"
+				   ", start: %" PRIx64
+				   ", end: %" PRIx64 ", allocated: %d, reserved: %d\n",
+				   (long) req->tid, req->reserve.handle,
+				   req->reserve.start, req->reserve.end,
+				   false, reserved);
+			break;
+
+		}
+
+		if (req->request_type > REQ_CLOSE)
+			pthread_mutex_unlock(&ial->mutex);
+
+		return 0;
+	}
+
+	ret = send_req_recv_resp(channel, req, resp);
+
+	if (ret < 0)
+		exit(0);
+
+	return ret;
+}
+
+static void kill_children(int sig)
+{
+	signal(sig, SIG_IGN);
+	kill(-getpgrp(), sig);
+	signal(sig, SIG_DFL);
+}
+
+static void *allocator_thread_loop(void *data)
+{
+	struct alloc_req req;
+	struct alloc_resp resp;
+	int ret;
+	(void) data;
+
+	alloc_info("Allocator pid: %ld, tid: %ld\n",
+		   (long) allocator_pid, (long) gettid());
+	alloc_info("Entering allocator loop\n");
+
+	while (1) {
+		ret = recv_req(channel, &req);
+
+		if (ret == -1) {
+			igt_warn("Error receiving request in thread, ret = %d [%s]\n",
+				 ret, strerror(errno));
+			kill_children(SIGINT);
+			return (void *) -1;
+		}
+
+		/* Fake message to stop the thread */
+		if (req.request_type == REQ_STOP) {
+			alloc_info("<stop request>\n");
+			break;
+		}
+
+		ret = handle_request(&req, &resp);
+		if (ret) {
+			igt_warn("Error handling request in thread, ret = %d [%s]\n",
+				 ret, strerror(errno));
+			break;
+		}
+
+		ret = send_resp(channel, req.tid, &resp);
+		if (ret) {
+			igt_warn("Error sending response in thread, ret = %d [%s]\n",
+				 ret, strerror(errno));
+
+			kill_children(SIGINT);
+			return (void *) -1;
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * intel_allocator_multiprocess_start:
+ *
+ * Function turns on intel_allocator multiprocess mode what means
+ * all allocations from children processes are performed in a separate thread
+ * within main igt process. Children are aware of the situation and use
+ * some interprocess communication channel to send/receive messages
+ * (open, close, alloc, free, ...) to/from allocator thread.
+ *
+ * Must be used when you want to use an allocator in non single-process code.
+ * All allocations in threads spawned in main igt process are handled by
+ * mutexing, not by sending/receiving messages to/from allocator thread.
+ *
+ * Note. This destroys all previously created allocators and theirs content.
+ */
+void intel_allocator_multiprocess_start(void)
+{
+	alloc_info("allocator multiprocess start\n");
+
+	intel_allocator_init();
+
+	multiprocess = true;
+	channel->init(channel);
+
+	pthread_create(&allocator_thread, NULL,
+		       allocator_thread_loop, NULL);
+}
+
+/**
+ * intel_allocator_multiprocess_stop:
+ *
+ * Function turns off intel_allocator multiprocess mode what means means
+ * stopping allocator thread and deinitializing its data.
+ */
+void intel_allocator_multiprocess_stop(void)
+{
+	alloc_info("allocator multiprocess stop\n");
+
+	if (multiprocess) {
+		send_alloc_stop(channel);
+		/* Deinit, this should stop all blocked syscalls, if any */
+		channel->deinit(channel);
+		pthread_join(allocator_thread, NULL);
+		/* But we're not sure does child will stuck */
+		kill_children(SIGINT);
+		igt_waitchildren_timeout(5, "Stopping children");
+		multiprocess = false;
+	}
+}
+
+/**
+ * intel_allocator_open:
+ * @fd: i915 descriptor
+ * @ctx: context
+ * @allocator_type: one of INTEL_ALLOCATOR_* define
+ *
+ * Function opens an allocator instance for given @fd and @ctx and returns
+ * its handle. If the allocator for such pair doesn't exist it is created
+ * with refcount = 1. Parallel opens returns same handle bumping its refcount.
+ *
+ * Returns: unique handle to the currently opened allocator.
+ */
+uint64_t intel_allocator_open(int fd, uint32_t ctx, uint8_t allocator_type)
+{
+	struct alloc_req req = { .request_type = REQ_OPEN,
+				 .open.fd = fd,
+				 .open.ctx = ctx,
+				 .open.allocator_type = allocator_type };
+	struct alloc_resp resp;
+
+	/* Get child_tid only once at open() */
+	if (child_tid == -1)
+		child_tid = gettid();
+
+	igt_assert(handle_request(&req, &resp) == 0);
+	igt_assert(resp.open.allocator_handle);
+	igt_assert(resp.response_type == RESP_OPEN);
+
+	return resp.open.allocator_handle;
+}
+
+/**
+ * intel_allocator_close:
+ * @allocator_handle: handle to the allocator that will be closed
+ *
+ * Function decreases an allocator refcount for the given @handle.
+ * When refcount reaches zero allocator is closed (destroyed) and all
+ * allocated / reserved areas are freed.
+ *
+ * Returns: true if closed allocator was empty, false otherwise.
+ */
+bool intel_allocator_close(uint64_t allocator_handle)
+{
+	struct alloc_req req = { .request_type = REQ_CLOSE,
+				 .allocator_handle = allocator_handle };
+	struct alloc_resp resp;
+
+	igt_assert(handle_request(&req, &resp) == 0);
+	igt_assert(resp.response_type == RESP_CLOSE);
+
+	return resp.close.is_empty;
+}
+
+/**
+ * intel_allocator_get_address_range:
+ * @allocator_handle: handle to an allocator
+ * @startp: pointer to the variable where function writes starting offset
+ * @endp: pointer to the variable where function writes ending offset
+ *
+ * Function fills @startp, @endp with respectively, starting and ending offset
+ * of the allocator working virtual address space range.
+ *
+ * Note. Allocators working ranges can differ depending on the device or
+ * the allocator type so before reserving a specific offset a good practise
+ * is to ensure that address is between accepted range.
+ */
+void intel_allocator_get_address_range(uint64_t allocator_handle,
+				       uint64_t *startp, uint64_t *endp)
+{
+	struct alloc_req req = { .request_type = REQ_ADDRESS_RANGE,
+				 .allocator_handle = allocator_handle };
+	struct alloc_resp resp;
+
+	igt_assert(handle_request(&req, &resp) == 0);
+	igt_assert(resp.response_type == RESP_ADDRESS_RANGE);
+
+	if (startp)
+		*startp = resp.address_range.start;
+
+	if (endp)
+		*endp = resp.address_range.end;
+}
+
+/**
+ * intel_allocator_alloc:
+ * @allocator_handle: handle to an allocator
+ * @handle: handle to an object
+ * @size: size of an object
+ * @alignment: determines object alignment
+ *
+ * Function finds and returns the most suitable offset with given @alignment
+ * for an object with @size identified by the @handle.
+ *
+ * Returns: currently assigned address for a given object. If an object was
+ * already allocated returns same address.
+ */
+uint64_t intel_allocator_alloc(uint64_t allocator_handle, uint32_t handle,
+			       uint64_t size, uint64_t alignment)
+{
+	struct alloc_req req = { .request_type = REQ_ALLOC,
+				 .allocator_handle = allocator_handle,
+				 .alloc.handle = handle,
+				 .alloc.size = size,
+				 .alloc.alignment = alignment };
+	struct alloc_resp resp;
+
+	igt_assert(handle_request(&req, &resp) == 0);
+	igt_assert(resp.response_type == RESP_ALLOC);
+
+	return resp.alloc.offset;
+}
+
+/**
+ * intel_allocator_free:
+ * @allocator_handle: handle to an allocator
+ * @handle: handle to an object to be freed
+ *
+ * Function free object identified by the @handle in allocator what makes it
+ * offset again allocable.
+ *
+ * Note. Reserved objects can only be freed by an #intel_allocator_unreserve
+ * function.
+ *
+ * Returns: true if the object was successfully freed, otherwise false.
+ */
+bool intel_allocator_free(uint64_t allocator_handle, uint32_t handle)
+{
+	struct alloc_req req = { .request_type = REQ_FREE,
+				 .allocator_handle = allocator_handle,
+				 .free.handle = handle };
+	struct alloc_resp resp;
+
+	igt_assert(handle_request(&req, &resp) == 0);
+	igt_assert(resp.response_type == RESP_FREE);
+
+	return resp.free.freed;
+}
+
+/**
+ * intel_allocator_is_allocated:
+ * @allocator_handle: handle to an allocator
+ * @handle: handle to an object
+ * @size: size of an object
+ * @offset: address of an object
+ *
+ * Function checks whether the object identified by the @handle and @size
+ * is allocated at the @offset.
+ *
+ * Returns: true if the object is currently allocated at the @offset,
+ * otherwise false.
+ */
+bool intel_allocator_is_allocated(uint64_t allocator_handle, uint32_t handle,
+				  uint64_t size, uint64_t offset)
+{
+	struct alloc_req req = { .request_type = REQ_IS_ALLOCATED,
+				 .allocator_handle = allocator_handle,
+				 .is_allocated.handle = handle,
+				 .is_allocated.size = size,
+				 .is_allocated.offset = offset };
+	struct alloc_resp resp;
+
+	igt_assert(handle_request(&req, &resp) == 0);
+	igt_assert(resp.response_type == RESP_IS_ALLOCATED);
+
+	return resp.is_allocated.allocated;
+}
+
+/**
+ * intel_allocator_reserve:
+ * @allocator_handle: handle to an allocator
+ * @handle: handle to an object
+ * @size: size of an object
+ * @offset: address of an object
+ *
+ * Function reserves space that starts at the @offset and has @size.
+ * Optionally we can pass @handle to mark that space is for a specific
+ * object, otherwise pass -1.
+ *
+ * Note. Reserved space is identified by offset and size, not a handle.
+ * So an object can have multiple reserved spaces with its handle.
+ *
+ * Returns: true if space is successfully reserved, otherwise false.
+ */
+bool intel_allocator_reserve(uint64_t allocator_handle, uint32_t handle,
+			     uint64_t size, uint64_t offset)
+{
+	struct alloc_req req = { .request_type = REQ_RESERVE,
+				 .allocator_handle = allocator_handle,
+				 .reserve.handle = handle,
+				 .reserve.start = offset,
+				 .reserve.end = offset + size };
+	struct alloc_resp resp;
+
+	igt_assert(handle_request(&req, &resp) == 0);
+	igt_assert(resp.response_type == RESP_RESERVE);
+
+	return resp.reserve.reserved;
+}
+
+/**
+ * intel_allocator_unreserve:
+ * @allocator_handle: handle to an allocator
+ * @handle: handle to an object
+ * @size: size of an object
+ * @offset: address of an object
+ *
+ * Function unreserves space that starts at the @offset, @size and @handle.
+ *
+ * Note. @handle, @size and @offset have to match those used in reservation.
+ * i.e. check with the same offset but even smaller size will fail.
+ *
+ * Returns: true if the space is successfully unreserved, otherwise false.
+ */
+bool intel_allocator_unreserve(uint64_t allocator_handle, uint32_t handle,
+			       uint64_t size, uint64_t offset)
+{
+	struct alloc_req req = { .request_type = REQ_UNRESERVE,
+				 .allocator_handle = allocator_handle,
+				 .unreserve.handle = handle,
+				 .unreserve.start = offset,
+				 .unreserve.end = offset + size };
+	struct alloc_resp resp;
+
+	igt_assert(handle_request(&req, &resp) == 0);
+	igt_assert(resp.response_type == RESP_UNRESERVE);
+
+	return resp.unreserve.unreserved;
+}
+
+/**
+ * intel_allocator_is_reserved:
+ * @allocator_handle: handle to an allocator
+ * @size: size of an object
+ * @offset: address of an object
+ *
+ * Function checks whether space starting at the @offset and @size is
+ * currently under reservation.
+ *
+ * Note. @size and @offset have to match those used in reservation,
+ * i.e. check with the same offset but even smaller size will fail.
+ *
+ * Returns: true if space is reserved, othwerise false.
+ */
+bool intel_allocator_is_reserved(uint64_t allocator_handle,
+				 uint64_t size, uint64_t offset)
+{
+	struct alloc_req req = { .request_type = REQ_IS_RESERVED,
+				 .allocator_handle = allocator_handle,
+				 .is_reserved.start = offset,
+				 .is_reserved.end = offset + size };
+	struct alloc_resp resp;
+
+	igt_assert(handle_request(&req, &resp) == 0);
+	igt_assert(resp.response_type == RESP_IS_RESERVED);
+
+	return resp.is_reserved.reserved;
+}
+
+/**
+ * intel_allocator_reserve_if_not_allocated:
+ * @allocator_handle: handle to an allocator
+ * @handle: handle to an object
+ * @size: size of an object
+ * @offset: address of an object
+ * @is_allocatedp: if not NULL function writes there object allocation status
+ * (true/false)
+ *
+ * Function checks whether the object identified by the @handle and @size
+ * is allocated at the @offset and writes the result to @is_allocatedp.
+ * If it's not it reserves it at the given @offset.
+ *
+ * Returns: true if the space for an object was reserved, otherwise false.
+ */
+bool intel_allocator_reserve_if_not_allocated(uint64_t allocator_handle,
+					      uint32_t handle,
+					      uint64_t size, uint64_t offset,
+					      bool *is_allocatedp)
+{
+	struct alloc_req req = { .request_type = REQ_RESERVE_IF_NOT_ALLOCATED,
+				 .allocator_handle = allocator_handle,
+				 .reserve.handle = handle,
+				 .reserve.start = offset,
+				 .reserve.end = offset + size };
+	struct alloc_resp resp;
+
+	igt_assert(handle_request(&req, &resp) == 0);
+	igt_assert(resp.response_type == RESP_RESERVE_IF_NOT_ALLOCATED);
+
+	if (is_allocatedp)
+		*is_allocatedp = resp.reserve_if_not_allocated.allocated;
+
+	return resp.reserve_if_not_allocated.reserved;
+}
+
+/**
+ * intel_allocator_print:
+ * @allocator_handle: handle to an allocator
+ *
+ * Function prints statistics and content of the allocator.
+ * Mainly for debugging purposes.
+ *
+ * Note. Printing possible only in the main process.
+ **/
+void intel_allocator_print(uint64_t allocator_handle)
+{
+	bool same_process;
+
+	igt_assert(allocator_handle);
+
+	same_process = child_pid == -1;
+
+	if (!multiprocess || same_process) {
+		struct intel_allocator *ial = from_user_pointer(allocator_handle);
+		pthread_mutex_lock(&map_mutex);
+		ial->print(ial, true);
+		pthread_mutex_unlock(&map_mutex);
+	} else {
+		igt_warn("Print stats is in main process only\n");
+	}
+}
+
+static bool equal_allocators(const void *key1, const void *key2)
+{
+	const struct intel_allocator *a1 = key1, *a2 = key2;
+
+	alloc_debug("a1: <fd: %d, ctx: %u>, a2 <fd: %d, ctx: %u>\n",
+		   a1->fd, a1->ctx, a2->fd, a2->ctx);
+
+	return a1->fd == a2->fd && a1->ctx == a2->ctx;
+}
+
+/*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
+
+static inline uint64_t hash_allocators(const void *val, unsigned int bits)
+{
+	uint64_t hash = ((struct intel_allocator *) val)->fd;
+
+	hash = hash * GOLDEN_RATIO_PRIME_64;
+	return hash >> (64 - bits);
+}
+
+static void __free_allocators(void)
+{
+	struct igt_map_entry *pos;
+	struct intel_allocator *ial;
+	int i;
+
+	if (allocators_map) {
+		igt_map_for_each(allocators_map, i, pos) {
+			ial = pos->value;
+			ial->destroy(ial);
+		}
+	}
+
+	igt_map_free(allocators_map);
+}
+
+/**
+ * intel_allocator_init:
+ *
+ * Function initializes the allocators infrastructure. The second call will
+ * override current infra and destroy existing there allocators. It is called
+ * in igt_constructor.
+ **/
+void intel_allocator_init(void)
+{
+	alloc_info("Prepare an allocator infrastructure\n");
+
+	allocator_pid = getpid();
+	alloc_info("Allocator pid: %ld\n", (long) allocator_pid);
+
+	if (allocators_map) {
+		__free_allocators();
+		free(allocators_map);
+	}
+
+	allocators_map = calloc(sizeof(*allocators_map), 1);
+	igt_assert(allocators_map);
+
+	igt_map_init(allocators_map, equal_allocators, hash_allocators, 8);
+
+	channel = intel_allocator_get_msgchannel(CHANNEL_SYSVIPC_MSGQUEUE);
+}
+
+igt_constructor {
+	intel_allocator_init();
+}
diff --git a/lib/intel_allocator.h b/lib/intel_allocator.h
new file mode 100644
index 00000000..f3747a8b
--- /dev/null
+++ b/lib/intel_allocator.h
@@ -0,0 +1,136 @@
+#ifndef __INTEL_ALLOCATOR_H__
+#define __INTEL_ALLOCATOR_H__
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+/**
+ * SECTION:intel_allocator
+ * @short_description: igt implementation of allocator
+ * @title: Intel allocator
+ * @include: intel_allocator.h
+ *
+ * # Intel allocator
+ *
+ * Since GPU devices driver has abandoned relocations for newer generations,
+ * we are facing the need to manage addresses in userspace. Intel allocator
+ * supply out of the box mechanisms providing correct virtual addresses.
+ * Specifically, intel_allocator is a multi-threading infrastructure wrapping
+ * a proper single-thread allocator, that can be the one of the following:
+ *
+ *  * INTEL_ALLOCATOR_SIMPLE - ported from Mesa, list-based, simple allocator
+ *  * INTEL_ALLOCATOR_RANDOM - stateless allocator, that provides random addresses
+ *  (sometime in the future the list can grow)
+ *
+ * Usage example:
+ *
+ * |[<!-- language="c" -->
+ * struct object {
+ * 	uint32_t handle;
+ * 	uint64_t offset;
+ * 	uint64_t size;
+ * };
+ *
+ * struct object obj1, obj2;
+ * uint64_t ahnd, startp, endp;
+ * int fd = -1;
+ *
+ * fd = drm_open_driver(DRIVER_INTEL);
+ * ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+ *
+ * obj1.handle = gem_create(4096);
+ * obj2.handle = gem_create(4096);
+ *
+ * // Reserve hole for an object in given address.
+ * // In this example the first possible address.
+ * intel_allocator_get_address_range(ahnd, &startp, &endp);
+ * obj1.offset = startp;
+ * igt_assert(intel_allocator_reserve(ahnd, obj1.handle, 4096, startp));
+ *
+ * // Get the most suitable offset for the object. Prefered way.
+ * obj2.offset = intel_allocator_alloc(ahnd, obj2.handle, 4096, 1 << 13);
+ *
+ *  ...
+ *
+ * // Reserved addresses can be only freed by unreserve.
+ * intel_allocator_unreserve(ahnd, obj1.handle, 4096, obj1.offset);
+ * intel_allocator_free(ahnd, obj2.handle);
+ *
+ * gem_close(obj1.handle);
+ * gem_close(obj2.handle);
+ * ]|
+ *
+ */
+
+struct intel_allocator {
+	int fd;
+	uint32_t ctx;
+	uint8_t type;
+	_Atomic(int32_t) refcount;
+	pthread_mutex_t mutex;
+
+	/* allocator's private structure */
+	void *priv;
+
+	void (*get_address_range)(struct intel_allocator *ial,
+				  uint64_t *startp, uint64_t *endp);
+	uint64_t (*alloc)(struct intel_allocator *ial, uint32_t handle,
+			  uint64_t size, uint64_t alignment);
+	bool (*is_allocated) (struct intel_allocator *ial, uint32_t handle,
+			      uint64_t size, uint64_t alignment);
+	bool (*reserve)(struct intel_allocator *ial,
+			uint32_t handle, uint64_t start, uint64_t size);
+	bool (*unreserve)(struct intel_allocator *ial,
+			  uint32_t handle, uint64_t start, uint64_t size);
+	bool (*is_reserved) (struct intel_allocator *ial,
+			     uint64_t start, uint64_t size);
+	bool (*free)(struct intel_allocator *ial, uint32_t handle);
+
+	void (*destroy)(struct intel_allocator *ial);
+
+	bool (*is_empty)(struct intel_allocator *ial);
+
+	void (*print)(struct intel_allocator *ial, bool full);
+};
+
+void intel_allocator_init(void);
+void intel_allocator_multiprocess_start(void);
+void intel_allocator_multiprocess_stop(void);
+
+uint64_t intel_allocator_open(int fd, uint32_t ctx, uint8_t allocator_type);
+bool intel_allocator_close(uint64_t allocator_handle);
+void intel_allocator_get_address_range(uint64_t allocator_handle,
+				       uint64_t *startp, uint64_t *endp);
+uint64_t intel_allocator_alloc(uint64_t allocator_handle, uint32_t handle,
+			       uint64_t size, uint64_t alignment);
+bool intel_allocator_free(uint64_t allocator_handle, uint32_t handle);
+bool intel_allocator_is_allocated(uint64_t allocator_handle, uint32_t handle,
+				  uint64_t size, uint64_t offset);
+bool intel_allocator_reserve(uint64_t allocator_handle, uint32_t handle,
+			     uint64_t size, uint64_t offset);
+bool intel_allocator_unreserve(uint64_t allocator_handle, uint32_t handle,
+			       uint64_t size, uint64_t offset);
+bool intel_allocator_is_reserved(uint64_t allocator_handle,
+				 uint64_t start, uint64_t size);
+bool intel_allocator_reserve_if_not_allocated(uint64_t allocator_handle,
+					      uint32_t handle,
+					      uint64_t size, uint64_t offset,
+					      bool *is_allocatedp);
+
+void intel_allocator_print(uint64_t allocator_handle);
+
+#define INTEL_ALLOCATOR_NONE   0
+#define INTEL_ALLOCATOR_RANDOM 1
+#define INTEL_ALLOCATOR_SIMPLE 2
+
+static inline uint64_t CANONICAL(uint64_t address)
+{
+	return (int64_t)(address << 16) >> 16;
+}
+
+#define DECANONICAL(offset) (offset & ((1ull << 48) - 1))
+
+#endif
diff --git a/lib/intel_allocator_msgchannel.c b/lib/intel_allocator_msgchannel.c
new file mode 100644
index 00000000..084a977d
--- /dev/null
+++ b/lib/intel_allocator_msgchannel.c
@@ -0,0 +1,182 @@
+#include <sys/types.h>
+#include <sys/ipc.h>
+#include <sys/msg.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "igt.h"
+#include "intel_allocator_msgchannel.h"
+
+extern __thread pid_t child_tid;
+
+/* ----- SYSVIPC MSGQUEUE ----- */
+
+#define FTOK_IGT_ALLOCATOR_KEY "/tmp/igt.allocator.key"
+#define FTOK_IGT_ALLOCATOR_PROJID 2020
+
+#define ALLOCATOR_REQUEST 1
+
+struct msgqueue_data {
+	key_t key;
+	int queue;
+};
+
+struct msgqueue_buf {
+       long mtype;
+       union {
+	       struct alloc_req request;
+	       struct alloc_resp response;
+       } data;
+};
+
+static void msgqueue_init(struct msg_channel *channel)
+{
+	struct msgqueue_data *msgdata;
+	struct msqid_ds qstat;
+	key_t key;
+	int fd, queue;
+
+	igt_debug("Init msgqueue\n");
+
+	/* Create ftok key only if not exists */
+	fd = open(FTOK_IGT_ALLOCATOR_KEY, O_CREAT | O_EXCL | O_WRONLY, 0600);
+	igt_assert(fd >= 0 || errno == EEXIST);
+	if (fd >= 0)
+		close(fd);
+
+	key = ftok(FTOK_IGT_ALLOCATOR_KEY, FTOK_IGT_ALLOCATOR_PROJID);
+	igt_assert(key != -1);
+	igt_debug("Queue key: %x\n", (int) key);
+
+	queue = msgget(key, 0);
+	if (queue != -1) {
+		igt_assert(msgctl(queue, IPC_STAT, &qstat) == 0);
+		igt_debug("old messages: %lu\n", qstat.msg_qnum);
+		igt_assert(msgctl(queue, IPC_RMID, NULL) == 0);
+	}
+
+	queue = msgget(key, IPC_CREAT);
+	igt_debug("msg queue: %d\n", queue);
+
+	msgdata = calloc(1, sizeof(*msgdata));
+	igt_assert(msgdata);
+	msgdata->key = key;
+	msgdata->queue = queue;
+	channel->priv = msgdata;
+}
+
+static void msgqueue_deinit(struct msg_channel *channel)
+{
+	struct msgqueue_data *msgdata = channel->priv;
+
+	igt_debug("Deinit msgqueue\n");
+	msgctl(msgdata->queue, IPC_RMID, NULL);
+	free(channel->priv);
+}
+
+static int msgqueue_send_req(struct msg_channel *channel,
+			     struct alloc_req *request)
+{
+	struct msgqueue_data *msgdata = channel->priv;
+	struct msgqueue_buf buf = {0};
+	int ret;
+
+	buf.mtype = ALLOCATOR_REQUEST;
+	buf.data.request.request_type = 1;
+	memcpy(&buf.data.request, request, sizeof(*request));
+
+retry:
+	ret = msgsnd(msgdata->queue, &buf, sizeof(buf) - sizeof(long), 0);
+	if (ret == -1 && errno == EINTR)
+		goto retry;
+
+	if (ret == -1)
+		igt_warn("Error: %s\n", strerror(errno));
+
+	return ret;
+}
+
+static int msgqueue_recv_req(struct msg_channel *channel,
+			     struct alloc_req *request)
+{
+	struct msgqueue_data *msgdata = channel->priv;
+	struct msgqueue_buf buf = {0};
+	int ret, size = sizeof(buf) - sizeof(long);
+
+retry:
+	ret = msgrcv(msgdata->queue, &buf, size, ALLOCATOR_REQUEST, 0);
+	if (ret == -1 && errno == EINTR)
+		goto retry;
+
+	if (ret == size)
+		memcpy(request, &buf.data.request, sizeof(*request));
+	else if (ret == -1)
+		igt_warn("Error: %s\n", strerror(errno));
+
+	return ret;
+}
+
+static int msgqueue_send_resp(struct msg_channel *channel,
+			      struct alloc_resp *response)
+{
+	struct msgqueue_data *msgdata = channel->priv;
+	struct msgqueue_buf buf = {0};
+	int ret;
+
+	buf.mtype = response->tid;
+	memcpy(&buf.data.response, response, sizeof(*response));
+
+retry:
+	ret = msgsnd(msgdata->queue, &buf, sizeof(buf) - sizeof(long), 0);
+	if (ret == -1 && errno == EINTR)
+		goto retry;
+
+	if (ret == -1)
+		igt_warn("Error: %s\n", strerror(errno));
+
+	return ret;
+}
+
+static int msgqueue_recv_resp(struct msg_channel *channel,
+			      struct alloc_resp *response)
+{
+	struct msgqueue_data *msgdata = channel->priv;
+	struct msgqueue_buf buf = {0};
+	int ret, size = sizeof(buf) - sizeof(long);
+
+retry:
+	ret = msgrcv(msgdata->queue, &buf, sizeof(buf) - sizeof(long),
+		     response->tid, 0);
+	if (ret == -1 && errno == EINTR)
+		goto retry;
+
+	if (ret == size)
+		memcpy(response, &buf.data.response, sizeof(*response));
+	else if (ret == -1)
+		igt_warn("Error: %s\n", strerror(errno));
+
+	return ret;
+}
+
+static struct msg_channel msgqueue_channel = {
+	.priv = NULL,
+	.init = msgqueue_init,
+	.deinit = msgqueue_deinit,
+	.send_req = msgqueue_send_req,
+	.recv_req = msgqueue_recv_req,
+	.send_resp = msgqueue_send_resp,
+	.recv_resp = msgqueue_recv_resp,
+};
+
+struct msg_channel *intel_allocator_get_msgchannel(enum msg_channel_type type)
+{
+	struct msg_channel *channel = NULL;
+
+	switch (type) {
+	case CHANNEL_SYSVIPC_MSGQUEUE:
+		channel = &msgqueue_channel;
+	}
+
+	igt_assert(channel);
+
+	return channel;
+}
diff --git a/lib/intel_allocator_msgchannel.h b/lib/intel_allocator_msgchannel.h
new file mode 100644
index 00000000..ab46d9ea
--- /dev/null
+++ b/lib/intel_allocator_msgchannel.h
@@ -0,0 +1,140 @@
+#ifndef __INTEL_ALLOCATOR_MSGCHANNEL_H__
+#define __INTEL_ALLOCATOR_MSGCHANNEL_H__
+
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdint.h>
+
+enum reqtype {
+	REQ_STOP,
+	REQ_OPEN,
+	REQ_CLOSE,
+	REQ_ADDRESS_RANGE,
+	REQ_ALLOC,
+	REQ_FREE,
+	REQ_IS_ALLOCATED,
+	REQ_RESERVE,
+	REQ_UNRESERVE,
+	REQ_RESERVE_IF_NOT_ALLOCATED,
+	REQ_IS_RESERVED,
+};
+
+enum resptype {
+	RESP_OPEN,
+	RESP_CLOSE,
+	RESP_ADDRESS_RANGE,
+	RESP_ALLOC,
+	RESP_FREE,
+	RESP_IS_ALLOCATED,
+	RESP_RESERVE,
+	RESP_UNRESERVE,
+	RESP_IS_RESERVED,
+	RESP_RESERVE_IF_NOT_ALLOCATED,
+};
+
+struct alloc_req {
+	enum reqtype request_type;
+
+	/* Common */
+	pid_t tid;
+	uint64_t allocator_handle;
+
+	union {
+		struct {
+			int fd;
+			uint32_t ctx;
+			uint8_t allocator_type;
+		} open;
+
+		struct {
+			uint32_t handle;
+			uint64_t size;
+			uint64_t alignment;
+		} alloc;
+
+		struct {
+			uint32_t handle;
+		} free;
+
+		struct {
+			uint32_t handle;
+			uint64_t size;
+			uint64_t offset;
+		} is_allocated;
+
+		struct {
+			uint32_t handle;
+			uint64_t start;
+			uint64_t end;
+		} reserve, unreserve;
+
+		struct {
+			uint64_t start;
+			uint64_t end;
+		} is_reserved;
+
+	};
+};
+
+struct alloc_resp {
+	enum resptype response_type;
+	pid_t tid;
+
+	union {
+		struct {
+			uint64_t allocator_handle;
+		} open;
+
+		struct {
+			bool is_empty;
+		} close;
+
+		struct {
+			uint64_t start;
+			uint64_t end;
+		} address_range;
+
+		struct {
+			uint64_t offset;
+		} alloc;
+
+		struct {
+			bool freed;
+		} free;
+
+		struct {
+			bool allocated;
+		} is_allocated;
+
+		struct {
+			bool reserved;
+		} reserve, is_reserved;
+
+		struct {
+			bool unreserved;
+		} unreserve;
+
+		struct {
+			bool allocated;
+			bool reserved;
+		} reserve_if_not_allocated;
+	};
+};
+
+struct msg_channel {
+	void *priv;
+	void (*init)(struct msg_channel *channel);
+	void (*deinit)(struct msg_channel *channel);
+	int (*send_req)(struct msg_channel *channel, struct alloc_req *request);
+	int (*recv_req)(struct msg_channel *channel, struct alloc_req *request);
+	int (*send_resp)(struct msg_channel *channel, struct alloc_resp *response);
+	int (*recv_resp)(struct msg_channel *channel, struct alloc_resp *response);
+};
+
+enum msg_channel_type {
+	CHANNEL_SYSVIPC_MSGQUEUE
+};
+
+struct msg_channel *intel_allocator_get_msgchannel(enum msg_channel_type type);
+
+#endif
diff --git a/lib/intel_allocator_simple.c b/lib/intel_allocator_simple.c
index 1f52db3f..1d0e117d 100644
--- a/lib/intel_allocator_simple.c
+++ b/lib/intel_allocator_simple.c
@@ -82,8 +82,6 @@ struct intel_allocator_record {
 #define simple_vma_foreach_hole_safe_rev(_hole, _heap, _tmp) \
 	igt_list_for_each_entry_safe_reverse(_hole, _tmp,  &(_heap)->holes, link)
 
-#define DECANONICAL(offset) (offset & ((1ull << 48) - 1))
-
 static uint64_t get_bias(int fd)
 {
 	(void) fd;
diff --git a/lib/meson.build b/lib/meson.build
index 484d3c7b..7a322a44 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -32,6 +32,10 @@ lib_sources = [
 	'igt_vgem.c',
 	'igt_x86.c',
 	'instdone.c',
+	'intel_allocator.c',
+	'intel_allocator_msgchannel.c',
+	'intel_allocator_random.c',
+	'intel_allocator_simple.c',
 	'intel_batchbuffer.c',
 	'intel_bufops.c',
 	'intel_chipset.c',
-- 
2.26.0



More information about the igt-dev mailing list